aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <roland@eddore.topspincom.com>2005-09-09 18:55:08 -0400
committerRoland Dreier <rolandd@cisco.com>2005-09-09 18:55:08 -0400
commit63aaf647529e8a56bdf31fd8f2979d4371c6a332 (patch)
treedd1ed29d591da4ef6ec0c4260d59b1910010c314 /drivers/infiniband
parent2e9f7cb7869059e55cd91f5e23c6380f3763db56 (diff)
Make sure that userspace does not retrieve stale asynchronous or
completion events after destroying a CQ, QP or SRQ. We do this by sweeping the event lists before returning from a destroy calls, and then return the number of events already reported before the destroy call. This allows userspace wait until it has processed all events for an object returned from the kernel before it frees its context for the object. The ABI of the destroy CQ, destroy QP and destroy SRQ commands has to change to return the event count, so bump the ABI version from 1 to 2. The userspace libibverbs library has already been updated to handle both the old and new ABI versions. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/uverbs.h26
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c155
-rw-r--r--drivers/infiniband/core/uverbs_main.c98
3 files changed, 191 insertions, 88 deletions
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 180b3d4765e4..b1897bed14ad 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -76,20 +76,28 @@ struct ib_uverbs_file {
76 struct ib_uverbs_event_file comp_file[1]; 76 struct ib_uverbs_event_file comp_file[1];
77}; 77};
78 78
79struct ib_uverbs_async_event { 79struct ib_uverbs_event {
80 struct ib_uverbs_async_event_desc desc; 80 union {
81 struct ib_uverbs_async_event_desc async;
82 struct ib_uverbs_comp_event_desc comp;
83 } desc;
81 struct list_head list; 84 struct list_head list;
85 struct list_head obj_list;
86 u32 *counter;
82}; 87};
83 88
84struct ib_uverbs_comp_event { 89struct ib_uevent_object {
85 struct ib_uverbs_comp_event_desc desc; 90 struct ib_uobject uobject;
86 struct list_head list; 91 struct list_head event_list;
92 u32 events_reported;
87}; 93};
88 94
89struct ib_uobject_mr { 95struct ib_ucq_object {
90 struct ib_uobject uobj; 96 struct ib_uobject uobject;
91 struct page *page_list; 97 struct list_head comp_list;
92 struct scatterlist *sg_list; 98 struct list_head async_list;
99 u32 comp_events_reported;
100 u32 async_events_reported;
93}; 101};
94 102
95extern struct semaphore ib_uverbs_idr_mutex; 103extern struct semaphore ib_uverbs_idr_mutex;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index ebccf9f38af9..e91ebde46481 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -590,7 +590,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
590 struct ib_uverbs_create_cq cmd; 590 struct ib_uverbs_create_cq cmd;
591 struct ib_uverbs_create_cq_resp resp; 591 struct ib_uverbs_create_cq_resp resp;
592 struct ib_udata udata; 592 struct ib_udata udata;
593 struct ib_uobject *uobj; 593 struct ib_ucq_object *uobj;
594 struct ib_cq *cq; 594 struct ib_cq *cq;
595 int ret; 595 int ret;
596 596
@@ -611,8 +611,12 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
611 if (!uobj) 611 if (!uobj)
612 return -ENOMEM; 612 return -ENOMEM;
613 613
614 uobj->user_handle = cmd.user_handle; 614 uobj->uobject.user_handle = cmd.user_handle;
615 uobj->context = file->ucontext; 615 uobj->uobject.context = file->ucontext;
616 uobj->comp_events_reported = 0;
617 uobj->async_events_reported = 0;
618 INIT_LIST_HEAD(&uobj->comp_list);
619 INIT_LIST_HEAD(&uobj->async_list);
616 620
617 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 621 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
618 file->ucontext, &udata); 622 file->ucontext, &udata);
@@ -622,7 +626,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
622 } 626 }
623 627
624 cq->device = file->device->ib_dev; 628 cq->device = file->device->ib_dev;
625 cq->uobject = uobj; 629 cq->uobject = &uobj->uobject;
626 cq->comp_handler = ib_uverbs_comp_handler; 630 cq->comp_handler = ib_uverbs_comp_handler;
627 cq->event_handler = ib_uverbs_cq_event_handler; 631 cq->event_handler = ib_uverbs_cq_event_handler;
628 cq->cq_context = file; 632 cq->cq_context = file;
@@ -635,7 +639,7 @@ retry:
635 } 639 }
636 640
637 down(&ib_uverbs_idr_mutex); 641 down(&ib_uverbs_idr_mutex);
638 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->id); 642 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
639 up(&ib_uverbs_idr_mutex); 643 up(&ib_uverbs_idr_mutex);
640 644
641 if (ret == -EAGAIN) 645 if (ret == -EAGAIN)
@@ -644,11 +648,11 @@ retry:
644 goto err_cq; 648 goto err_cq;
645 649
646 spin_lock_irq(&file->ucontext->lock); 650 spin_lock_irq(&file->ucontext->lock);
647 list_add_tail(&uobj->list, &file->ucontext->cq_list); 651 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
648 spin_unlock_irq(&file->ucontext->lock); 652 spin_unlock_irq(&file->ucontext->lock);
649 653
650 memset(&resp, 0, sizeof resp); 654 memset(&resp, 0, sizeof resp);
651 resp.cq_handle = uobj->id; 655 resp.cq_handle = uobj->uobject.id;
652 resp.cqe = cq->cqe; 656 resp.cqe = cq->cqe;
653 657
654 if (copy_to_user((void __user *) (unsigned long) cmd.response, 658 if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -661,11 +665,11 @@ retry:
661 665
662err_list: 666err_list:
663 spin_lock_irq(&file->ucontext->lock); 667 spin_lock_irq(&file->ucontext->lock);
664 list_del(&uobj->list); 668 list_del(&uobj->uobject.list);
665 spin_unlock_irq(&file->ucontext->lock); 669 spin_unlock_irq(&file->ucontext->lock);
666 670
667 down(&ib_uverbs_idr_mutex); 671 down(&ib_uverbs_idr_mutex);
668 idr_remove(&ib_uverbs_cq_idr, uobj->id); 672 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
669 up(&ib_uverbs_idr_mutex); 673 up(&ib_uverbs_idr_mutex);
670 674
671err_cq: 675err_cq:
@@ -680,21 +684,27 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
680 const char __user *buf, int in_len, 684 const char __user *buf, int in_len,
681 int out_len) 685 int out_len)
682{ 686{
683 struct ib_uverbs_destroy_cq cmd; 687 struct ib_uverbs_destroy_cq cmd;
684 struct ib_cq *cq; 688 struct ib_uverbs_destroy_cq_resp resp;
685 struct ib_uobject *uobj; 689 struct ib_cq *cq;
686 int ret = -EINVAL; 690 struct ib_ucq_object *uobj;
691 struct ib_uverbs_event *evt, *tmp;
692 u64 user_handle;
693 int ret = -EINVAL;
687 694
688 if (copy_from_user(&cmd, buf, sizeof cmd)) 695 if (copy_from_user(&cmd, buf, sizeof cmd))
689 return -EFAULT; 696 return -EFAULT;
690 697
698 memset(&resp, 0, sizeof resp);
699
691 down(&ib_uverbs_idr_mutex); 700 down(&ib_uverbs_idr_mutex);
692 701
693 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 702 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
694 if (!cq || cq->uobject->context != file->ucontext) 703 if (!cq || cq->uobject->context != file->ucontext)
695 goto out; 704 goto out;
696 705
697 uobj = cq->uobject; 706 user_handle = cq->uobject->user_handle;
707 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
698 708
699 ret = ib_destroy_cq(cq); 709 ret = ib_destroy_cq(cq);
700 if (ret) 710 if (ret)
@@ -703,11 +713,32 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
703 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 713 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
704 714
705 spin_lock_irq(&file->ucontext->lock); 715 spin_lock_irq(&file->ucontext->lock);
706 list_del(&uobj->list); 716 list_del(&uobj->uobject.list);
707 spin_unlock_irq(&file->ucontext->lock); 717 spin_unlock_irq(&file->ucontext->lock);
708 718
719 spin_lock_irq(&file->comp_file[0].lock);
720 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
721 list_del(&evt->list);
722 kfree(evt);
723 }
724 spin_unlock_irq(&file->comp_file[0].lock);
725
726 spin_lock_irq(&file->async_file.lock);
727 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
728 list_del(&evt->list);
729 kfree(evt);
730 }
731 spin_unlock_irq(&file->async_file.lock);
732
733 resp.comp_events_reported = uobj->comp_events_reported;
734 resp.async_events_reported = uobj->async_events_reported;
735
709 kfree(uobj); 736 kfree(uobj);
710 737
738 if (copy_to_user((void __user *) (unsigned long) cmd.response,
739 &resp, sizeof resp))
740 ret = -EFAULT;
741
711out: 742out:
712 up(&ib_uverbs_idr_mutex); 743 up(&ib_uverbs_idr_mutex);
713 744
@@ -721,7 +752,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
721 struct ib_uverbs_create_qp cmd; 752 struct ib_uverbs_create_qp cmd;
722 struct ib_uverbs_create_qp_resp resp; 753 struct ib_uverbs_create_qp_resp resp;
723 struct ib_udata udata; 754 struct ib_udata udata;
724 struct ib_uobject *uobj; 755 struct ib_uevent_object *uobj;
725 struct ib_pd *pd; 756 struct ib_pd *pd;
726 struct ib_cq *scq, *rcq; 757 struct ib_cq *scq, *rcq;
727 struct ib_srq *srq; 758 struct ib_srq *srq;
@@ -772,8 +803,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
772 attr.cap.max_recv_sge = cmd.max_recv_sge; 803 attr.cap.max_recv_sge = cmd.max_recv_sge;
773 attr.cap.max_inline_data = cmd.max_inline_data; 804 attr.cap.max_inline_data = cmd.max_inline_data;
774 805
775 uobj->user_handle = cmd.user_handle; 806 uobj->uobject.user_handle = cmd.user_handle;
776 uobj->context = file->ucontext; 807 uobj->uobject.context = file->ucontext;
808 uobj->events_reported = 0;
809 INIT_LIST_HEAD(&uobj->event_list);
777 810
778 qp = pd->device->create_qp(pd, &attr, &udata); 811 qp = pd->device->create_qp(pd, &attr, &udata);
779 if (IS_ERR(qp)) { 812 if (IS_ERR(qp)) {
@@ -786,7 +819,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
786 qp->send_cq = attr.send_cq; 819 qp->send_cq = attr.send_cq;
787 qp->recv_cq = attr.recv_cq; 820 qp->recv_cq = attr.recv_cq;
788 qp->srq = attr.srq; 821 qp->srq = attr.srq;
789 qp->uobject = uobj; 822 qp->uobject = &uobj->uobject;
790 qp->event_handler = attr.event_handler; 823 qp->event_handler = attr.event_handler;
791 qp->qp_context = attr.qp_context; 824 qp->qp_context = attr.qp_context;
792 qp->qp_type = attr.qp_type; 825 qp->qp_type = attr.qp_type;
@@ -805,17 +838,17 @@ retry:
805 goto err_destroy; 838 goto err_destroy;
806 } 839 }
807 840
808 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->id); 841 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
809 842
810 if (ret == -EAGAIN) 843 if (ret == -EAGAIN)
811 goto retry; 844 goto retry;
812 if (ret) 845 if (ret)
813 goto err_destroy; 846 goto err_destroy;
814 847
815 resp.qp_handle = uobj->id; 848 resp.qp_handle = uobj->uobject.id;
816 849
817 spin_lock_irq(&file->ucontext->lock); 850 spin_lock_irq(&file->ucontext->lock);
818 list_add_tail(&uobj->list, &file->ucontext->qp_list); 851 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
819 spin_unlock_irq(&file->ucontext->lock); 852 spin_unlock_irq(&file->ucontext->lock);
820 853
821 if (copy_to_user((void __user *) (unsigned long) cmd.response, 854 if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -830,7 +863,7 @@ retry:
830 863
831err_list: 864err_list:
832 spin_lock_irq(&file->ucontext->lock); 865 spin_lock_irq(&file->ucontext->lock);
833 list_del(&uobj->list); 866 list_del(&uobj->uobject.list);
834 spin_unlock_irq(&file->ucontext->lock); 867 spin_unlock_irq(&file->ucontext->lock);
835 868
836err_destroy: 869err_destroy:
@@ -930,21 +963,25 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
930 const char __user *buf, int in_len, 963 const char __user *buf, int in_len,
931 int out_len) 964 int out_len)
932{ 965{
933 struct ib_uverbs_destroy_qp cmd; 966 struct ib_uverbs_destroy_qp cmd;
934 struct ib_qp *qp; 967 struct ib_uverbs_destroy_qp_resp resp;
935 struct ib_uobject *uobj; 968 struct ib_qp *qp;
936 int ret = -EINVAL; 969 struct ib_uevent_object *uobj;
970 struct ib_uverbs_event *evt, *tmp;
971 int ret = -EINVAL;
937 972
938 if (copy_from_user(&cmd, buf, sizeof cmd)) 973 if (copy_from_user(&cmd, buf, sizeof cmd))
939 return -EFAULT; 974 return -EFAULT;
940 975
976 memset(&resp, 0, sizeof resp);
977
941 down(&ib_uverbs_idr_mutex); 978 down(&ib_uverbs_idr_mutex);
942 979
943 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 980 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
944 if (!qp || qp->uobject->context != file->ucontext) 981 if (!qp || qp->uobject->context != file->ucontext)
945 goto out; 982 goto out;
946 983
947 uobj = qp->uobject; 984 uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
948 985
949 ret = ib_destroy_qp(qp); 986 ret = ib_destroy_qp(qp);
950 if (ret) 987 if (ret)
@@ -953,11 +990,24 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
953 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 990 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
954 991
955 spin_lock_irq(&file->ucontext->lock); 992 spin_lock_irq(&file->ucontext->lock);
956 list_del(&uobj->list); 993 list_del(&uobj->uobject.list);
957 spin_unlock_irq(&file->ucontext->lock); 994 spin_unlock_irq(&file->ucontext->lock);
958 995
996 spin_lock_irq(&file->async_file.lock);
997 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
998 list_del(&evt->list);
999 kfree(evt);
1000 }
1001 spin_unlock_irq(&file->async_file.lock);
1002
1003 resp.events_reported = uobj->events_reported;
1004
959 kfree(uobj); 1005 kfree(uobj);
960 1006
1007 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1008 &resp, sizeof resp))
1009 ret = -EFAULT;
1010
961out: 1011out:
962 up(&ib_uverbs_idr_mutex); 1012 up(&ib_uverbs_idr_mutex);
963 1013
@@ -1015,7 +1065,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1015 struct ib_uverbs_create_srq cmd; 1065 struct ib_uverbs_create_srq cmd;
1016 struct ib_uverbs_create_srq_resp resp; 1066 struct ib_uverbs_create_srq_resp resp;
1017 struct ib_udata udata; 1067 struct ib_udata udata;
1018 struct ib_uobject *uobj; 1068 struct ib_uevent_object *uobj;
1019 struct ib_pd *pd; 1069 struct ib_pd *pd;
1020 struct ib_srq *srq; 1070 struct ib_srq *srq;
1021 struct ib_srq_init_attr attr; 1071 struct ib_srq_init_attr attr;
@@ -1050,8 +1100,10 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1050 attr.attr.max_sge = cmd.max_sge; 1100 attr.attr.max_sge = cmd.max_sge;
1051 attr.attr.srq_limit = cmd.srq_limit; 1101 attr.attr.srq_limit = cmd.srq_limit;
1052 1102
1053 uobj->user_handle = cmd.user_handle; 1103 uobj->uobject.user_handle = cmd.user_handle;
1054 uobj->context = file->ucontext; 1104 uobj->uobject.context = file->ucontext;
1105 uobj->events_reported = 0;
1106 INIT_LIST_HEAD(&uobj->event_list);
1055 1107
1056 srq = pd->device->create_srq(pd, &attr, &udata); 1108 srq = pd->device->create_srq(pd, &attr, &udata);
1057 if (IS_ERR(srq)) { 1109 if (IS_ERR(srq)) {
@@ -1061,7 +1113,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1061 1113
1062 srq->device = pd->device; 1114 srq->device = pd->device;
1063 srq->pd = pd; 1115 srq->pd = pd;
1064 srq->uobject = uobj; 1116 srq->uobject = &uobj->uobject;
1065 srq->event_handler = attr.event_handler; 1117 srq->event_handler = attr.event_handler;
1066 srq->srq_context = attr.srq_context; 1118 srq->srq_context = attr.srq_context;
1067 atomic_inc(&pd->usecnt); 1119 atomic_inc(&pd->usecnt);
@@ -1075,17 +1127,17 @@ retry:
1075 goto err_destroy; 1127 goto err_destroy;
1076 } 1128 }
1077 1129
1078 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id); 1130 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
1079 1131
1080 if (ret == -EAGAIN) 1132 if (ret == -EAGAIN)
1081 goto retry; 1133 goto retry;
1082 if (ret) 1134 if (ret)
1083 goto err_destroy; 1135 goto err_destroy;
1084 1136
1085 resp.srq_handle = uobj->id; 1137 resp.srq_handle = uobj->uobject.id;
1086 1138
1087 spin_lock_irq(&file->ucontext->lock); 1139 spin_lock_irq(&file->ucontext->lock);
1088 list_add_tail(&uobj->list, &file->ucontext->srq_list); 1140 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1089 spin_unlock_irq(&file->ucontext->lock); 1141 spin_unlock_irq(&file->ucontext->lock);
1090 1142
1091 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1143 if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -1100,7 +1152,7 @@ retry:
1100 1152
1101err_list: 1153err_list:
1102 spin_lock_irq(&file->ucontext->lock); 1154 spin_lock_irq(&file->ucontext->lock);
1103 list_del(&uobj->list); 1155 list_del(&uobj->uobject.list);
1104 spin_unlock_irq(&file->ucontext->lock); 1156 spin_unlock_irq(&file->ucontext->lock);
1105 1157
1106err_destroy: 1158err_destroy:
@@ -1149,21 +1201,25 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1149 const char __user *buf, int in_len, 1201 const char __user *buf, int in_len,
1150 int out_len) 1202 int out_len)
1151{ 1203{
1152 struct ib_uverbs_destroy_srq cmd; 1204 struct ib_uverbs_destroy_srq cmd;
1153 struct ib_srq *srq; 1205 struct ib_uverbs_destroy_srq_resp resp;
1154 struct ib_uobject *uobj; 1206 struct ib_srq *srq;
1155 int ret = -EINVAL; 1207 struct ib_uevent_object *uobj;
1208 struct ib_uverbs_event *evt, *tmp;
1209 int ret = -EINVAL;
1156 1210
1157 if (copy_from_user(&cmd, buf, sizeof cmd)) 1211 if (copy_from_user(&cmd, buf, sizeof cmd))
1158 return -EFAULT; 1212 return -EFAULT;
1159 1213
1160 down(&ib_uverbs_idr_mutex); 1214 down(&ib_uverbs_idr_mutex);
1161 1215
1216 memset(&resp, 0, sizeof resp);
1217
1162 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1218 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1163 if (!srq || srq->uobject->context != file->ucontext) 1219 if (!srq || srq->uobject->context != file->ucontext)
1164 goto out; 1220 goto out;
1165 1221
1166 uobj = srq->uobject; 1222 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
1167 1223
1168 ret = ib_destroy_srq(srq); 1224 ret = ib_destroy_srq(srq);
1169 if (ret) 1225 if (ret)
@@ -1172,11 +1228,24 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1172 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1228 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1173 1229
1174 spin_lock_irq(&file->ucontext->lock); 1230 spin_lock_irq(&file->ucontext->lock);
1175 list_del(&uobj->list); 1231 list_del(&uobj->uobject.list);
1176 spin_unlock_irq(&file->ucontext->lock); 1232 spin_unlock_irq(&file->ucontext->lock);
1177 1233
1234 spin_lock_irq(&file->async_file.lock);
1235 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
1236 list_del(&evt->list);
1237 kfree(evt);
1238 }
1239 spin_unlock_irq(&file->async_file.lock);
1240
1241 resp.events_reported = uobj->events_reported;
1242
1178 kfree(uobj); 1243 kfree(uobj);
1179 1244
1245 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1246 &resp, sizeof resp))
1247 ret = -EFAULT;
1248
1180out: 1249out:
1181 up(&ib_uverbs_idr_mutex); 1250 up(&ib_uverbs_idr_mutex);
1182 1251
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 09caf5b1ef36..ce5bdb7af306 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -120,7 +120,7 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context)
120 idr_remove(&ib_uverbs_qp_idr, uobj->id); 120 idr_remove(&ib_uverbs_qp_idr, uobj->id);
121 ib_destroy_qp(qp); 121 ib_destroy_qp(qp);
122 list_del(&uobj->list); 122 list_del(&uobj->list);
123 kfree(uobj); 123 kfree(container_of(uobj, struct ib_uevent_object, uobject));
124 } 124 }
125 125
126 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { 126 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
@@ -128,7 +128,7 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context)
128 idr_remove(&ib_uverbs_cq_idr, uobj->id); 128 idr_remove(&ib_uverbs_cq_idr, uobj->id);
129 ib_destroy_cq(cq); 129 ib_destroy_cq(cq);
130 list_del(&uobj->list); 130 list_del(&uobj->list);
131 kfree(uobj); 131 kfree(container_of(uobj, struct ib_ucq_object, uobject));
132 } 132 }
133 133
134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
@@ -136,7 +136,7 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context)
136 idr_remove(&ib_uverbs_srq_idr, uobj->id); 136 idr_remove(&ib_uverbs_srq_idr, uobj->id);
137 ib_destroy_srq(srq); 137 ib_destroy_srq(srq);
138 list_del(&uobj->list); 138 list_del(&uobj->list);
139 kfree(uobj); 139 kfree(container_of(uobj, struct ib_uevent_object, uobject));
140 } 140 }
141 141
142 /* XXX Free MWs */ 142 /* XXX Free MWs */
@@ -182,7 +182,7 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
182 size_t count, loff_t *pos) 182 size_t count, loff_t *pos)
183{ 183{
184 struct ib_uverbs_event_file *file = filp->private_data; 184 struct ib_uverbs_event_file *file = filp->private_data;
185 void *event; 185 struct ib_uverbs_event *event;
186 int eventsz; 186 int eventsz;
187 int ret = 0; 187 int ret = 0;
188 188
@@ -207,21 +207,23 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
207 return -ENODEV; 207 return -ENODEV;
208 } 208 }
209 209
210 if (file->is_async) { 210 event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
211 event = list_entry(file->event_list.next, 211
212 struct ib_uverbs_async_event, list); 212 if (file->is_async)
213 eventsz = sizeof (struct ib_uverbs_async_event_desc); 213 eventsz = sizeof (struct ib_uverbs_async_event_desc);
214 } else { 214 else
215 event = list_entry(file->event_list.next,
216 struct ib_uverbs_comp_event, list);
217 eventsz = sizeof (struct ib_uverbs_comp_event_desc); 215 eventsz = sizeof (struct ib_uverbs_comp_event_desc);
218 }
219 216
220 if (eventsz > count) { 217 if (eventsz > count) {
221 ret = -EINVAL; 218 ret = -EINVAL;
222 event = NULL; 219 event = NULL;
223 } else 220 } else {
224 list_del(file->event_list.next); 221 list_del(file->event_list.next);
222 if (event->counter) {
223 ++(*event->counter);
224 list_del(&event->obj_list);
225 }
226 }
225 227
226 spin_unlock_irq(&file->lock); 228 spin_unlock_irq(&file->lock);
227 229
@@ -257,16 +259,13 @@ static unsigned int ib_uverbs_event_poll(struct file *filp,
257 259
258static void ib_uverbs_event_release(struct ib_uverbs_event_file *file) 260static void ib_uverbs_event_release(struct ib_uverbs_event_file *file)
259{ 261{
260 struct list_head *entry, *tmp; 262 struct ib_uverbs_event *entry, *tmp;
261 263
262 spin_lock_irq(&file->lock); 264 spin_lock_irq(&file->lock);
263 if (file->fd != -1) { 265 if (file->fd != -1) {
264 file->fd = -1; 266 file->fd = -1;
265 list_for_each_safe(entry, tmp, &file->event_list) 267 list_for_each_entry_safe(entry, tmp, &file->event_list, list)
266 if (file->is_async) 268 kfree(entry);
267 kfree(list_entry(entry, struct ib_uverbs_async_event, list));
268 else
269 kfree(list_entry(entry, struct ib_uverbs_comp_event, list));
270 } 269 }
271 spin_unlock_irq(&file->lock); 270 spin_unlock_irq(&file->lock);
272} 271}
@@ -304,18 +303,23 @@ static struct file_operations uverbs_event_fops = {
304 303
305void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 304void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
306{ 305{
307 struct ib_uverbs_file *file = cq_context; 306 struct ib_uverbs_file *file = cq_context;
308 struct ib_uverbs_comp_event *entry; 307 struct ib_ucq_object *uobj;
309 unsigned long flags; 308 struct ib_uverbs_event *entry;
309 unsigned long flags;
310 310
311 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 311 entry = kmalloc(sizeof *entry, GFP_ATOMIC);
312 if (!entry) 312 if (!entry)
313 return; 313 return;
314 314
315 entry->desc.cq_handle = cq->uobject->user_handle; 315 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
316
317 entry->desc.comp.cq_handle = cq->uobject->user_handle;
318 entry->counter = &uobj->comp_events_reported;
316 319
317 spin_lock_irqsave(&file->comp_file[0].lock, flags); 320 spin_lock_irqsave(&file->comp_file[0].lock, flags);
318 list_add_tail(&entry->list, &file->comp_file[0].event_list); 321 list_add_tail(&entry->list, &file->comp_file[0].event_list);
322 list_add_tail(&entry->obj_list, &uobj->comp_list);
319 spin_unlock_irqrestore(&file->comp_file[0].lock, flags); 323 spin_unlock_irqrestore(&file->comp_file[0].lock, flags);
320 324
321 wake_up_interruptible(&file->comp_file[0].poll_wait); 325 wake_up_interruptible(&file->comp_file[0].poll_wait);
@@ -323,20 +327,25 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
323} 327}
324 328
325static void ib_uverbs_async_handler(struct ib_uverbs_file *file, 329static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
326 __u64 element, __u64 event) 330 __u64 element, __u64 event,
331 struct list_head *obj_list,
332 u32 *counter)
327{ 333{
328 struct ib_uverbs_async_event *entry; 334 struct ib_uverbs_event *entry;
329 unsigned long flags; 335 unsigned long flags;
330 336
331 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 337 entry = kmalloc(sizeof *entry, GFP_ATOMIC);
332 if (!entry) 338 if (!entry)
333 return; 339 return;
334 340
335 entry->desc.element = element; 341 entry->desc.async.element = element;
336 entry->desc.event_type = event; 342 entry->desc.async.event_type = event;
343 entry->counter = counter;
337 344
338 spin_lock_irqsave(&file->async_file.lock, flags); 345 spin_lock_irqsave(&file->async_file.lock, flags);
339 list_add_tail(&entry->list, &file->async_file.event_list); 346 list_add_tail(&entry->list, &file->async_file.event_list);
347 if (obj_list)
348 list_add_tail(&entry->obj_list, obj_list);
340 spin_unlock_irqrestore(&file->async_file.lock, flags); 349 spin_unlock_irqrestore(&file->async_file.lock, flags);
341 350
342 wake_up_interruptible(&file->async_file.poll_wait); 351 wake_up_interruptible(&file->async_file.poll_wait);
@@ -345,23 +354,39 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
345 354
346void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) 355void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
347{ 356{
348 ib_uverbs_async_handler(context_ptr, 357 struct ib_ucq_object *uobj;
349 event->element.cq->uobject->user_handle, 358
350 event->event); 359 uobj = container_of(event->element.cq->uobject,
360 struct ib_ucq_object, uobject);
361
362 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
363 event->event, &uobj->async_list,
364 &uobj->async_events_reported);
365
351} 366}
352 367
353void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) 368void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
354{ 369{
355 ib_uverbs_async_handler(context_ptr, 370 struct ib_uevent_object *uobj;
356 event->element.qp->uobject->user_handle, 371
357 event->event); 372 uobj = container_of(event->element.qp->uobject,
373 struct ib_uevent_object, uobject);
374
375 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
376 event->event, &uobj->event_list,
377 &uobj->events_reported);
358} 378}
359 379
360void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) 380void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
361{ 381{
362 ib_uverbs_async_handler(context_ptr, 382 struct ib_uevent_object *uobj;
363 event->element.srq->uobject->user_handle, 383
364 event->event); 384 uobj = container_of(event->element.srq->uobject,
385 struct ib_uevent_object, uobject);
386
387 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
388 event->event, &uobj->event_list,
389 &uobj->events_reported);
365} 390}
366 391
367static void ib_uverbs_event_handler(struct ib_event_handler *handler, 392static void ib_uverbs_event_handler(struct ib_event_handler *handler,
@@ -370,7 +395,8 @@ static void ib_uverbs_event_handler(struct ib_event_handler *handler,
370 struct ib_uverbs_file *file = 395 struct ib_uverbs_file *file =
371 container_of(handler, struct ib_uverbs_file, event_handler); 396 container_of(handler, struct ib_uverbs_file, event_handler);
372 397
373 ib_uverbs_async_handler(file, event->element.port_num, event->event); 398 ib_uverbs_async_handler(file, event->element.port_num, event->event,
399 NULL, NULL);
374} 400}
375 401
376static int ib_uverbs_event_init(struct ib_uverbs_event_file *file, 402static int ib_uverbs_event_init(struct ib_uverbs_event_file *file,