diff options
author | Roland Dreier <rolandd@cisco.com> | 2005-09-26 16:53:25 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2005-10-17 18:20:25 -0400 |
commit | 6b73597e7062118c0549c2702bfb7d273518c906 (patch) | |
tree | 6034aae7493b32d75d1c8818a801b09b77979acd /drivers/infiniband | |
parent | 33033b797225553e48ca68d8d8dc5e64ec22e02b (diff) |
[IB] uverbs: ABI-breaking fixes for userspace verbs
Introduce new userspace verbs ABI version 3. This eliminates some
unneeded commands, and adds support for user-created completion
channels. This cleans up problems with file leaks on error paths, and
also makes sure that file descriptors are always installed into the
correct process.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/uverbs.h | 18 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 186 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 292 |
3 files changed, 244 insertions, 252 deletions
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index cc124344dd2c..475153e510a9 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -53,14 +53,14 @@ struct ib_uverbs_device { | |||
53 | struct cdev dev; | 53 | struct cdev dev; |
54 | struct class_device class_dev; | 54 | struct class_device class_dev; |
55 | struct ib_device *ib_dev; | 55 | struct ib_device *ib_dev; |
56 | int num_comp; | 56 | int num_comp_vectors; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct ib_uverbs_event_file { | 59 | struct ib_uverbs_event_file { |
60 | struct kref ref; | 60 | struct kref ref; |
61 | struct file *file; | ||
61 | struct ib_uverbs_file *uverbs_file; | 62 | struct ib_uverbs_file *uverbs_file; |
62 | spinlock_t lock; | 63 | spinlock_t lock; |
63 | int fd; | ||
64 | int is_async; | 64 | int is_async; |
65 | wait_queue_head_t poll_wait; | 65 | wait_queue_head_t poll_wait; |
66 | struct fasync_struct *async_queue; | 66 | struct fasync_struct *async_queue; |
@@ -73,8 +73,7 @@ struct ib_uverbs_file { | |||
73 | struct ib_uverbs_device *device; | 73 | struct ib_uverbs_device *device; |
74 | struct ib_ucontext *ucontext; | 74 | struct ib_ucontext *ucontext; |
75 | struct ib_event_handler event_handler; | 75 | struct ib_event_handler event_handler; |
76 | struct ib_uverbs_event_file async_file; | 76 | struct ib_uverbs_event_file *async_file; |
77 | struct ib_uverbs_event_file comp_file[1]; | ||
78 | }; | 77 | }; |
79 | 78 | ||
80 | struct ib_uverbs_event { | 79 | struct ib_uverbs_event { |
@@ -110,10 +109,17 @@ extern struct idr ib_uverbs_cq_idr; | |||
110 | extern struct idr ib_uverbs_qp_idr; | 109 | extern struct idr ib_uverbs_qp_idr; |
111 | extern struct idr ib_uverbs_srq_idr; | 110 | extern struct idr ib_uverbs_srq_idr; |
112 | 111 | ||
112 | struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, | ||
113 | int is_async, int *fd); | ||
114 | void ib_uverbs_release_event_file(struct kref *ref); | ||
115 | struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd); | ||
116 | |||
113 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); | 117 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); |
114 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); | 118 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); |
115 | void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); | 119 | void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); |
116 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); | 120 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); |
121 | void ib_uverbs_event_handler(struct ib_event_handler *handler, | ||
122 | struct ib_event *event); | ||
117 | 123 | ||
118 | int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, | 124 | int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, |
119 | void *addr, size_t size, int write); | 125 | void *addr, size_t size, int write); |
@@ -125,16 +131,14 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem); | |||
125 | const char __user *buf, int in_len, \ | 131 | const char __user *buf, int in_len, \ |
126 | int out_len) | 132 | int out_len) |
127 | 133 | ||
128 | IB_UVERBS_DECLARE_CMD(query_params); | ||
129 | IB_UVERBS_DECLARE_CMD(get_context); | 134 | IB_UVERBS_DECLARE_CMD(get_context); |
130 | IB_UVERBS_DECLARE_CMD(query_device); | 135 | IB_UVERBS_DECLARE_CMD(query_device); |
131 | IB_UVERBS_DECLARE_CMD(query_port); | 136 | IB_UVERBS_DECLARE_CMD(query_port); |
132 | IB_UVERBS_DECLARE_CMD(query_gid); | ||
133 | IB_UVERBS_DECLARE_CMD(query_pkey); | ||
134 | IB_UVERBS_DECLARE_CMD(alloc_pd); | 137 | IB_UVERBS_DECLARE_CMD(alloc_pd); |
135 | IB_UVERBS_DECLARE_CMD(dealloc_pd); | 138 | IB_UVERBS_DECLARE_CMD(dealloc_pd); |
136 | IB_UVERBS_DECLARE_CMD(reg_mr); | 139 | IB_UVERBS_DECLARE_CMD(reg_mr); |
137 | IB_UVERBS_DECLARE_CMD(dereg_mr); | 140 | IB_UVERBS_DECLARE_CMD(dereg_mr); |
141 | IB_UVERBS_DECLARE_CMD(create_comp_channel); | ||
138 | IB_UVERBS_DECLARE_CMD(create_cq); | 142 | IB_UVERBS_DECLARE_CMD(create_cq); |
139 | IB_UVERBS_DECLARE_CMD(destroy_cq); | 143 | IB_UVERBS_DECLARE_CMD(destroy_cq); |
140 | IB_UVERBS_DECLARE_CMD(create_qp); | 144 | IB_UVERBS_DECLARE_CMD(create_qp); |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 562445165d2b..79b60c3dc8d0 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -33,6 +33,8 @@ | |||
33 | * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $ | 33 | * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $ |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/file.h> | ||
37 | |||
36 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
37 | 39 | ||
38 | #include "uverbs.h" | 40 | #include "uverbs.h" |
@@ -45,29 +47,6 @@ | |||
45 | (udata)->outlen = (olen); \ | 47 | (udata)->outlen = (olen); \ |
46 | } while (0) | 48 | } while (0) |
47 | 49 | ||
48 | ssize_t ib_uverbs_query_params(struct ib_uverbs_file *file, | ||
49 | const char __user *buf, | ||
50 | int in_len, int out_len) | ||
51 | { | ||
52 | struct ib_uverbs_query_params cmd; | ||
53 | struct ib_uverbs_query_params_resp resp; | ||
54 | |||
55 | if (out_len < sizeof resp) | ||
56 | return -ENOSPC; | ||
57 | |||
58 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
59 | return -EFAULT; | ||
60 | |||
61 | memset(&resp, 0, sizeof resp); | ||
62 | |||
63 | resp.num_cq_events = file->device->num_comp; | ||
64 | |||
65 | if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) | ||
66 | return -EFAULT; | ||
67 | |||
68 | return in_len; | ||
69 | } | ||
70 | |||
71 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | 50 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, |
72 | const char __user *buf, | 51 | const char __user *buf, |
73 | int in_len, int out_len) | 52 | int in_len, int out_len) |
@@ -77,7 +56,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | |||
77 | struct ib_udata udata; | 56 | struct ib_udata udata; |
78 | struct ib_device *ibdev = file->device->ib_dev; | 57 | struct ib_device *ibdev = file->device->ib_dev; |
79 | struct ib_ucontext *ucontext; | 58 | struct ib_ucontext *ucontext; |
80 | int i; | 59 | struct file *filp; |
81 | int ret; | 60 | int ret; |
82 | 61 | ||
83 | if (out_len < sizeof resp) | 62 | if (out_len < sizeof resp) |
@@ -110,26 +89,42 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | |||
110 | INIT_LIST_HEAD(&ucontext->srq_list); | 89 | INIT_LIST_HEAD(&ucontext->srq_list); |
111 | INIT_LIST_HEAD(&ucontext->ah_list); | 90 | INIT_LIST_HEAD(&ucontext->ah_list); |
112 | 91 | ||
113 | resp.async_fd = file->async_file.fd; | 92 | resp.num_comp_vectors = file->device->num_comp_vectors; |
114 | for (i = 0; i < file->device->num_comp; ++i) | 93 | |
115 | if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab + | 94 | filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); |
116 | i * sizeof (__u32), | 95 | if (IS_ERR(filp)) { |
117 | &file->comp_file[i].fd, sizeof (__u32))) { | 96 | ret = PTR_ERR(filp); |
118 | ret = -EFAULT; | 97 | goto err_free; |
119 | goto err_free; | 98 | } |
120 | } | ||
121 | 99 | ||
122 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 100 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
123 | &resp, sizeof resp)) { | 101 | &resp, sizeof resp)) { |
124 | ret = -EFAULT; | 102 | ret = -EFAULT; |
125 | goto err_free; | 103 | goto err_file; |
126 | } | 104 | } |
127 | 105 | ||
128 | file->ucontext = ucontext; | 106 | file->async_file = filp->private_data; |
107 | |||
108 | INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev, | ||
109 | ib_uverbs_event_handler); | ||
110 | ret = ib_register_event_handler(&file->event_handler); | ||
111 | if (ret) | ||
112 | goto err_file; | ||
113 | |||
114 | kref_get(&file->async_file->ref); | ||
115 | kref_get(&file->ref); | ||
116 | file->ucontext = ucontext; | ||
117 | |||
118 | fd_install(resp.async_fd, filp); | ||
119 | |||
129 | up(&file->mutex); | 120 | up(&file->mutex); |
130 | 121 | ||
131 | return in_len; | 122 | return in_len; |
132 | 123 | ||
124 | err_file: | ||
125 | put_unused_fd(resp.async_fd); | ||
126 | fput(filp); | ||
127 | |||
133 | err_free: | 128 | err_free: |
134 | ibdev->dealloc_ucontext(ucontext); | 129 | ibdev->dealloc_ucontext(ucontext); |
135 | 130 | ||
@@ -255,62 +250,6 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, | |||
255 | return in_len; | 250 | return in_len; |
256 | } | 251 | } |
257 | 252 | ||
258 | ssize_t ib_uverbs_query_gid(struct ib_uverbs_file *file, | ||
259 | const char __user *buf, | ||
260 | int in_len, int out_len) | ||
261 | { | ||
262 | struct ib_uverbs_query_gid cmd; | ||
263 | struct ib_uverbs_query_gid_resp resp; | ||
264 | int ret; | ||
265 | |||
266 | if (out_len < sizeof resp) | ||
267 | return -ENOSPC; | ||
268 | |||
269 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
270 | return -EFAULT; | ||
271 | |||
272 | memset(&resp, 0, sizeof resp); | ||
273 | |||
274 | ret = ib_query_gid(file->device->ib_dev, cmd.port_num, cmd.index, | ||
275 | (union ib_gid *) resp.gid); | ||
276 | if (ret) | ||
277 | return ret; | ||
278 | |||
279 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
280 | &resp, sizeof resp)) | ||
281 | return -EFAULT; | ||
282 | |||
283 | return in_len; | ||
284 | } | ||
285 | |||
286 | ssize_t ib_uverbs_query_pkey(struct ib_uverbs_file *file, | ||
287 | const char __user *buf, | ||
288 | int in_len, int out_len) | ||
289 | { | ||
290 | struct ib_uverbs_query_pkey cmd; | ||
291 | struct ib_uverbs_query_pkey_resp resp; | ||
292 | int ret; | ||
293 | |||
294 | if (out_len < sizeof resp) | ||
295 | return -ENOSPC; | ||
296 | |||
297 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
298 | return -EFAULT; | ||
299 | |||
300 | memset(&resp, 0, sizeof resp); | ||
301 | |||
302 | ret = ib_query_pkey(file->device->ib_dev, cmd.port_num, cmd.index, | ||
303 | &resp.pkey); | ||
304 | if (ret) | ||
305 | return ret; | ||
306 | |||
307 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
308 | &resp, sizeof resp)) | ||
309 | return -EFAULT; | ||
310 | |||
311 | return in_len; | ||
312 | } | ||
313 | |||
314 | ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, | 253 | ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, |
315 | const char __user *buf, | 254 | const char __user *buf, |
316 | int in_len, int out_len) | 255 | int in_len, int out_len) |
@@ -595,6 +534,35 @@ out: | |||
595 | return ret ? ret : in_len; | 534 | return ret ? ret : in_len; |
596 | } | 535 | } |
597 | 536 | ||
537 | ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, | ||
538 | const char __user *buf, int in_len, | ||
539 | int out_len) | ||
540 | { | ||
541 | struct ib_uverbs_create_comp_channel cmd; | ||
542 | struct ib_uverbs_create_comp_channel_resp resp; | ||
543 | struct file *filp; | ||
544 | |||
545 | if (out_len < sizeof resp) | ||
546 | return -ENOSPC; | ||
547 | |||
548 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
549 | return -EFAULT; | ||
550 | |||
551 | filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); | ||
552 | if (IS_ERR(filp)) | ||
553 | return PTR_ERR(filp); | ||
554 | |||
555 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
556 | &resp, sizeof resp)) { | ||
557 | put_unused_fd(resp.fd); | ||
558 | fput(filp); | ||
559 | return -EFAULT; | ||
560 | } | ||
561 | |||
562 | fd_install(resp.fd, filp); | ||
563 | return in_len; | ||
564 | } | ||
565 | |||
598 | ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | 566 | ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, |
599 | const char __user *buf, int in_len, | 567 | const char __user *buf, int in_len, |
600 | int out_len) | 568 | int out_len) |
@@ -603,6 +571,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
603 | struct ib_uverbs_create_cq_resp resp; | 571 | struct ib_uverbs_create_cq_resp resp; |
604 | struct ib_udata udata; | 572 | struct ib_udata udata; |
605 | struct ib_ucq_object *uobj; | 573 | struct ib_ucq_object *uobj; |
574 | struct ib_uverbs_event_file *ev_file = NULL; | ||
606 | struct ib_cq *cq; | 575 | struct ib_cq *cq; |
607 | int ret; | 576 | int ret; |
608 | 577 | ||
@@ -616,9 +585,12 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
616 | (unsigned long) cmd.response + sizeof resp, | 585 | (unsigned long) cmd.response + sizeof resp, |
617 | in_len - sizeof cmd, out_len - sizeof resp); | 586 | in_len - sizeof cmd, out_len - sizeof resp); |
618 | 587 | ||
619 | if (cmd.event_handler >= file->device->num_comp) | 588 | if (cmd.comp_vector >= file->device->num_comp_vectors) |
620 | return -EINVAL; | 589 | return -EINVAL; |
621 | 590 | ||
591 | if (cmd.comp_channel >= 0) | ||
592 | ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); | ||
593 | |||
622 | uobj = kmalloc(sizeof *uobj, GFP_KERNEL); | 594 | uobj = kmalloc(sizeof *uobj, GFP_KERNEL); |
623 | if (!uobj) | 595 | if (!uobj) |
624 | return -ENOMEM; | 596 | return -ENOMEM; |
@@ -641,7 +613,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
641 | cq->uobject = &uobj->uobject; | 613 | cq->uobject = &uobj->uobject; |
642 | cq->comp_handler = ib_uverbs_comp_handler; | 614 | cq->comp_handler = ib_uverbs_comp_handler; |
643 | cq->event_handler = ib_uverbs_cq_event_handler; | 615 | cq->event_handler = ib_uverbs_cq_event_handler; |
644 | cq->cq_context = file; | 616 | cq->cq_context = ev_file; |
645 | atomic_set(&cq->usecnt, 0); | 617 | atomic_set(&cq->usecnt, 0); |
646 | 618 | ||
647 | retry: | 619 | retry: |
@@ -700,6 +672,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, | |||
700 | struct ib_uverbs_destroy_cq_resp resp; | 672 | struct ib_uverbs_destroy_cq_resp resp; |
701 | struct ib_cq *cq; | 673 | struct ib_cq *cq; |
702 | struct ib_ucq_object *uobj; | 674 | struct ib_ucq_object *uobj; |
675 | struct ib_uverbs_event_file *ev_file; | ||
703 | struct ib_uverbs_event *evt, *tmp; | 676 | struct ib_uverbs_event *evt, *tmp; |
704 | u64 user_handle; | 677 | u64 user_handle; |
705 | int ret = -EINVAL; | 678 | int ret = -EINVAL; |
@@ -716,7 +689,8 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, | |||
716 | goto out; | 689 | goto out; |
717 | 690 | ||
718 | user_handle = cq->uobject->user_handle; | 691 | user_handle = cq->uobject->user_handle; |
719 | uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); | 692 | uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); |
693 | ev_file = cq->cq_context; | ||
720 | 694 | ||
721 | ret = ib_destroy_cq(cq); | 695 | ret = ib_destroy_cq(cq); |
722 | if (ret) | 696 | if (ret) |
@@ -728,19 +702,23 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, | |||
728 | list_del(&uobj->uobject.list); | 702 | list_del(&uobj->uobject.list); |
729 | up(&file->mutex); | 703 | up(&file->mutex); |
730 | 704 | ||
731 | spin_lock_irq(&file->comp_file[0].lock); | 705 | if (ev_file) { |
732 | list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { | 706 | spin_lock_irq(&ev_file->lock); |
733 | list_del(&evt->list); | 707 | list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { |
734 | kfree(evt); | 708 | list_del(&evt->list); |
709 | kfree(evt); | ||
710 | } | ||
711 | spin_unlock_irq(&ev_file->lock); | ||
712 | |||
713 | kref_put(&ev_file->ref, ib_uverbs_release_event_file); | ||
735 | } | 714 | } |
736 | spin_unlock_irq(&file->comp_file[0].lock); | ||
737 | 715 | ||
738 | spin_lock_irq(&file->async_file.lock); | 716 | spin_lock_irq(&file->async_file->lock); |
739 | list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { | 717 | list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { |
740 | list_del(&evt->list); | 718 | list_del(&evt->list); |
741 | kfree(evt); | 719 | kfree(evt); |
742 | } | 720 | } |
743 | spin_unlock_irq(&file->async_file.lock); | 721 | spin_unlock_irq(&file->async_file->lock); |
744 | 722 | ||
745 | resp.comp_events_reported = uobj->comp_events_reported; | 723 | resp.comp_events_reported = uobj->comp_events_reported; |
746 | resp.async_events_reported = uobj->async_events_reported; | 724 | resp.async_events_reported = uobj->async_events_reported; |
@@ -1005,12 +983,12 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, | |||
1005 | list_del(&uobj->uobject.list); | 983 | list_del(&uobj->uobject.list); |
1006 | up(&file->mutex); | 984 | up(&file->mutex); |
1007 | 985 | ||
1008 | spin_lock_irq(&file->async_file.lock); | 986 | spin_lock_irq(&file->async_file->lock); |
1009 | list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { | 987 | list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { |
1010 | list_del(&evt->list); | 988 | list_del(&evt->list); |
1011 | kfree(evt); | 989 | kfree(evt); |
1012 | } | 990 | } |
1013 | spin_unlock_irq(&file->async_file.lock); | 991 | spin_unlock_irq(&file->async_file->lock); |
1014 | 992 | ||
1015 | resp.events_reported = uobj->events_reported; | 993 | resp.events_reported = uobj->events_reported; |
1016 | 994 | ||
@@ -1243,12 +1221,12 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, | |||
1243 | list_del(&uobj->uobject.list); | 1221 | list_del(&uobj->uobject.list); |
1244 | up(&file->mutex); | 1222 | up(&file->mutex); |
1245 | 1223 | ||
1246 | spin_lock_irq(&file->async_file.lock); | 1224 | spin_lock_irq(&file->async_file->lock); |
1247 | list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { | 1225 | list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { |
1248 | list_del(&evt->list); | 1226 | list_del(&evt->list); |
1249 | kfree(evt); | 1227 | kfree(evt); |
1250 | } | 1228 | } |
1251 | spin_unlock_irq(&file->async_file.lock); | 1229 | spin_unlock_irq(&file->async_file->lock); |
1252 | 1230 | ||
1253 | resp.events_reported = uobj->events_reported; | 1231 | resp.events_reported = uobj->events_reported; |
1254 | 1232 | ||
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 12511808de21..e7058fbc607d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -77,26 +77,24 @@ static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); | |||
77 | static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | 77 | static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, |
78 | const char __user *buf, int in_len, | 78 | const char __user *buf, int in_len, |
79 | int out_len) = { | 79 | int out_len) = { |
80 | [IB_USER_VERBS_CMD_QUERY_PARAMS] = ib_uverbs_query_params, | 80 | [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, |
81 | [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, | 81 | [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, |
82 | [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, | 82 | [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, |
83 | [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, | 83 | [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, |
84 | [IB_USER_VERBS_CMD_QUERY_GID] = ib_uverbs_query_gid, | 84 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, |
85 | [IB_USER_VERBS_CMD_QUERY_PKEY] = ib_uverbs_query_pkey, | 85 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, |
86 | [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, | 86 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, |
87 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, | 87 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, |
88 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, | 88 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, |
89 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, | 89 | [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, |
90 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, | 90 | [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, |
91 | [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, | 91 | [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, |
92 | [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, | 92 | [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, |
93 | [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, | 93 | [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, |
94 | [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, | 94 | [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, |
95 | [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, | 95 | [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, |
96 | [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, | 96 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, |
97 | [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, | 97 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, |
98 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, | ||
99 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, | ||
100 | }; | 98 | }; |
101 | 99 | ||
102 | static struct vfsmount *uverbs_event_mnt; | 100 | static struct vfsmount *uverbs_event_mnt; |
@@ -188,25 +186,19 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf, | |||
188 | 186 | ||
189 | spin_lock_irq(&file->lock); | 187 | spin_lock_irq(&file->lock); |
190 | 188 | ||
191 | while (list_empty(&file->event_list) && file->fd >= 0) { | 189 | while (list_empty(&file->event_list)) { |
192 | spin_unlock_irq(&file->lock); | 190 | spin_unlock_irq(&file->lock); |
193 | 191 | ||
194 | if (filp->f_flags & O_NONBLOCK) | 192 | if (filp->f_flags & O_NONBLOCK) |
195 | return -EAGAIN; | 193 | return -EAGAIN; |
196 | 194 | ||
197 | if (wait_event_interruptible(file->poll_wait, | 195 | if (wait_event_interruptible(file->poll_wait, |
198 | !list_empty(&file->event_list) || | 196 | !list_empty(&file->event_list))) |
199 | file->fd < 0)) | ||
200 | return -ERESTARTSYS; | 197 | return -ERESTARTSYS; |
201 | 198 | ||
202 | spin_lock_irq(&file->lock); | 199 | spin_lock_irq(&file->lock); |
203 | } | 200 | } |
204 | 201 | ||
205 | if (file->fd < 0) { | ||
206 | spin_unlock_irq(&file->lock); | ||
207 | return -ENODEV; | ||
208 | } | ||
209 | |||
210 | event = list_entry(file->event_list.next, struct ib_uverbs_event, list); | 202 | event = list_entry(file->event_list.next, struct ib_uverbs_event, list); |
211 | 203 | ||
212 | if (file->is_async) | 204 | if (file->is_async) |
@@ -248,26 +240,19 @@ static unsigned int ib_uverbs_event_poll(struct file *filp, | |||
248 | poll_wait(filp, &file->poll_wait, wait); | 240 | poll_wait(filp, &file->poll_wait, wait); |
249 | 241 | ||
250 | spin_lock_irq(&file->lock); | 242 | spin_lock_irq(&file->lock); |
251 | if (file->fd < 0) | 243 | if (!list_empty(&file->event_list)) |
252 | pollflags = POLLERR; | ||
253 | else if (!list_empty(&file->event_list)) | ||
254 | pollflags = POLLIN | POLLRDNORM; | 244 | pollflags = POLLIN | POLLRDNORM; |
255 | spin_unlock_irq(&file->lock); | 245 | spin_unlock_irq(&file->lock); |
256 | 246 | ||
257 | return pollflags; | 247 | return pollflags; |
258 | } | 248 | } |
259 | 249 | ||
260 | static void ib_uverbs_event_release(struct ib_uverbs_event_file *file) | 250 | void ib_uverbs_release_event_file(struct kref *ref) |
261 | { | 251 | { |
262 | struct ib_uverbs_event *entry, *tmp; | 252 | struct ib_uverbs_event_file *file = |
253 | container_of(ref, struct ib_uverbs_event_file, ref); | ||
263 | 254 | ||
264 | spin_lock_irq(&file->lock); | 255 | kfree(file); |
265 | if (file->fd != -1) { | ||
266 | file->fd = -1; | ||
267 | list_for_each_entry_safe(entry, tmp, &file->event_list, list) | ||
268 | kfree(entry); | ||
269 | } | ||
270 | spin_unlock_irq(&file->lock); | ||
271 | } | 256 | } |
272 | 257 | ||
273 | static int ib_uverbs_event_fasync(int fd, struct file *filp, int on) | 258 | static int ib_uverbs_event_fasync(int fd, struct file *filp, int on) |
@@ -280,21 +265,30 @@ static int ib_uverbs_event_fasync(int fd, struct file *filp, int on) | |||
280 | static int ib_uverbs_event_close(struct inode *inode, struct file *filp) | 265 | static int ib_uverbs_event_close(struct inode *inode, struct file *filp) |
281 | { | 266 | { |
282 | struct ib_uverbs_event_file *file = filp->private_data; | 267 | struct ib_uverbs_event_file *file = filp->private_data; |
268 | struct ib_uverbs_event *entry, *tmp; | ||
269 | |||
270 | spin_lock_irq(&file->lock); | ||
271 | file->file = NULL; | ||
272 | list_for_each_entry_safe(entry, tmp, &file->event_list, list) { | ||
273 | if (entry->counter) | ||
274 | list_del(&entry->obj_list); | ||
275 | kfree(entry); | ||
276 | } | ||
277 | spin_unlock_irq(&file->lock); | ||
283 | 278 | ||
284 | ib_uverbs_event_release(file); | ||
285 | ib_uverbs_event_fasync(-1, filp, 0); | 279 | ib_uverbs_event_fasync(-1, filp, 0); |
286 | kref_put(&file->uverbs_file->ref, ib_uverbs_release_file); | 280 | |
281 | if (file->is_async) { | ||
282 | ib_unregister_event_handler(&file->uverbs_file->event_handler); | ||
283 | kref_put(&file->uverbs_file->ref, ib_uverbs_release_file); | ||
284 | } | ||
285 | kref_put(&file->ref, ib_uverbs_release_event_file); | ||
287 | 286 | ||
288 | return 0; | 287 | return 0; |
289 | } | 288 | } |
290 | 289 | ||
291 | static struct file_operations uverbs_event_fops = { | 290 | static struct file_operations uverbs_event_fops = { |
292 | /* | 291 | .owner = THIS_MODULE, |
293 | * No .owner field since we artificially create event files, | ||
294 | * so there is no increment to the module reference count in | ||
295 | * the open path. All event files come from a uverbs command | ||
296 | * file, which already takes a module reference, so this is OK. | ||
297 | */ | ||
298 | .read = ib_uverbs_event_read, | 292 | .read = ib_uverbs_event_read, |
299 | .poll = ib_uverbs_event_poll, | 293 | .poll = ib_uverbs_event_poll, |
300 | .release = ib_uverbs_event_close, | 294 | .release = ib_uverbs_event_close, |
@@ -303,10 +297,19 @@ static struct file_operations uverbs_event_fops = { | |||
303 | 297 | ||
304 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) | 298 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) |
305 | { | 299 | { |
306 | struct ib_uverbs_file *file = cq_context; | 300 | struct ib_uverbs_event_file *file = cq_context; |
307 | struct ib_ucq_object *uobj; | 301 | struct ib_ucq_object *uobj; |
308 | struct ib_uverbs_event *entry; | 302 | struct ib_uverbs_event *entry; |
309 | unsigned long flags; | 303 | unsigned long flags; |
304 | |||
305 | if (!file) | ||
306 | return; | ||
307 | |||
308 | spin_lock_irqsave(&file->lock, flags); | ||
309 | if (!file->file) { | ||
310 | spin_unlock_irqrestore(&file->lock, flags); | ||
311 | return; | ||
312 | } | ||
310 | 313 | ||
311 | entry = kmalloc(sizeof *entry, GFP_ATOMIC); | 314 | entry = kmalloc(sizeof *entry, GFP_ATOMIC); |
312 | if (!entry) | 315 | if (!entry) |
@@ -317,13 +320,12 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) | |||
317 | entry->desc.comp.cq_handle = cq->uobject->user_handle; | 320 | entry->desc.comp.cq_handle = cq->uobject->user_handle; |
318 | entry->counter = &uobj->comp_events_reported; | 321 | entry->counter = &uobj->comp_events_reported; |
319 | 322 | ||
320 | spin_lock_irqsave(&file->comp_file[0].lock, flags); | 323 | list_add_tail(&entry->list, &file->event_list); |
321 | list_add_tail(&entry->list, &file->comp_file[0].event_list); | ||
322 | list_add_tail(&entry->obj_list, &uobj->comp_list); | 324 | list_add_tail(&entry->obj_list, &uobj->comp_list); |
323 | spin_unlock_irqrestore(&file->comp_file[0].lock, flags); | 325 | spin_unlock_irqrestore(&file->lock, flags); |
324 | 326 | ||
325 | wake_up_interruptible(&file->comp_file[0].poll_wait); | 327 | wake_up_interruptible(&file->poll_wait); |
326 | kill_fasync(&file->comp_file[0].async_queue, SIGIO, POLL_IN); | 328 | kill_fasync(&file->async_queue, SIGIO, POLL_IN); |
327 | } | 329 | } |
328 | 330 | ||
329 | static void ib_uverbs_async_handler(struct ib_uverbs_file *file, | 331 | static void ib_uverbs_async_handler(struct ib_uverbs_file *file, |
@@ -334,6 +336,12 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file, | |||
334 | struct ib_uverbs_event *entry; | 336 | struct ib_uverbs_event *entry; |
335 | unsigned long flags; | 337 | unsigned long flags; |
336 | 338 | ||
339 | spin_lock_irqsave(&file->async_file->lock, flags); | ||
340 | if (!file->async_file->file) { | ||
341 | spin_unlock_irqrestore(&file->async_file->lock, flags); | ||
342 | return; | ||
343 | } | ||
344 | |||
337 | entry = kmalloc(sizeof *entry, GFP_ATOMIC); | 345 | entry = kmalloc(sizeof *entry, GFP_ATOMIC); |
338 | if (!entry) | 346 | if (!entry) |
339 | return; | 347 | return; |
@@ -342,24 +350,24 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file, | |||
342 | entry->desc.async.event_type = event; | 350 | entry->desc.async.event_type = event; |
343 | entry->counter = counter; | 351 | entry->counter = counter; |
344 | 352 | ||
345 | spin_lock_irqsave(&file->async_file.lock, flags); | 353 | list_add_tail(&entry->list, &file->async_file->event_list); |
346 | list_add_tail(&entry->list, &file->async_file.event_list); | ||
347 | if (obj_list) | 354 | if (obj_list) |
348 | list_add_tail(&entry->obj_list, obj_list); | 355 | list_add_tail(&entry->obj_list, obj_list); |
349 | spin_unlock_irqrestore(&file->async_file.lock, flags); | 356 | spin_unlock_irqrestore(&file->async_file->lock, flags); |
350 | 357 | ||
351 | wake_up_interruptible(&file->async_file.poll_wait); | 358 | wake_up_interruptible(&file->async_file->poll_wait); |
352 | kill_fasync(&file->async_file.async_queue, SIGIO, POLL_IN); | 359 | kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN); |
353 | } | 360 | } |
354 | 361 | ||
355 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) | 362 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) |
356 | { | 363 | { |
364 | struct ib_uverbs_event_file *ev_file = context_ptr; | ||
357 | struct ib_ucq_object *uobj; | 365 | struct ib_ucq_object *uobj; |
358 | 366 | ||
359 | uobj = container_of(event->element.cq->uobject, | 367 | uobj = container_of(event->element.cq->uobject, |
360 | struct ib_ucq_object, uobject); | 368 | struct ib_ucq_object, uobject); |
361 | 369 | ||
362 | ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, | 370 | ib_uverbs_async_handler(ev_file->uverbs_file, uobj->uobject.user_handle, |
363 | event->event, &uobj->async_list, | 371 | event->event, &uobj->async_list, |
364 | &uobj->async_events_reported); | 372 | &uobj->async_events_reported); |
365 | 373 | ||
@@ -389,8 +397,8 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) | |||
389 | &uobj->events_reported); | 397 | &uobj->events_reported); |
390 | } | 398 | } |
391 | 399 | ||
392 | static void ib_uverbs_event_handler(struct ib_event_handler *handler, | 400 | void ib_uverbs_event_handler(struct ib_event_handler *handler, |
393 | struct ib_event *event) | 401 | struct ib_event *event) |
394 | { | 402 | { |
395 | struct ib_uverbs_file *file = | 403 | struct ib_uverbs_file *file = |
396 | container_of(handler, struct ib_uverbs_file, event_handler); | 404 | container_of(handler, struct ib_uverbs_file, event_handler); |
@@ -399,38 +407,90 @@ static void ib_uverbs_event_handler(struct ib_event_handler *handler, | |||
399 | NULL, NULL); | 407 | NULL, NULL); |
400 | } | 408 | } |
401 | 409 | ||
402 | static int ib_uverbs_event_init(struct ib_uverbs_event_file *file, | 410 | struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, |
403 | struct ib_uverbs_file *uverbs_file) | 411 | int is_async, int *fd) |
404 | { | 412 | { |
413 | struct ib_uverbs_event_file *ev_file; | ||
405 | struct file *filp; | 414 | struct file *filp; |
415 | int ret; | ||
406 | 416 | ||
407 | spin_lock_init(&file->lock); | 417 | ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL); |
408 | INIT_LIST_HEAD(&file->event_list); | 418 | if (!ev_file) |
409 | init_waitqueue_head(&file->poll_wait); | 419 | return ERR_PTR(-ENOMEM); |
410 | file->uverbs_file = uverbs_file; | 420 | |
411 | file->async_queue = NULL; | 421 | kref_init(&ev_file->ref); |
412 | 422 | spin_lock_init(&ev_file->lock); | |
413 | file->fd = get_unused_fd(); | 423 | INIT_LIST_HEAD(&ev_file->event_list); |
414 | if (file->fd < 0) | 424 | init_waitqueue_head(&ev_file->poll_wait); |
415 | return file->fd; | 425 | ev_file->uverbs_file = uverbs_file; |
426 | ev_file->async_queue = NULL; | ||
427 | ev_file->is_async = is_async; | ||
428 | |||
429 | *fd = get_unused_fd(); | ||
430 | if (*fd < 0) { | ||
431 | ret = *fd; | ||
432 | goto err; | ||
433 | } | ||
416 | 434 | ||
417 | filp = get_empty_filp(); | 435 | filp = get_empty_filp(); |
418 | if (!filp) { | 436 | if (!filp) { |
419 | put_unused_fd(file->fd); | 437 | ret = -ENFILE; |
420 | return -ENFILE; | 438 | goto err_fd; |
421 | } | 439 | } |
422 | 440 | ||
423 | filp->f_op = &uverbs_event_fops; | 441 | ev_file->file = filp; |
442 | |||
443 | /* | ||
444 | * fops_get() can't fail here, because we're coming from a | ||
445 | * system call on a uverbs file, which will already have a | ||
446 | * module reference. | ||
447 | */ | ||
448 | filp->f_op = fops_get(&uverbs_event_fops); | ||
424 | filp->f_vfsmnt = mntget(uverbs_event_mnt); | 449 | filp->f_vfsmnt = mntget(uverbs_event_mnt); |
425 | filp->f_dentry = dget(uverbs_event_mnt->mnt_root); | 450 | filp->f_dentry = dget(uverbs_event_mnt->mnt_root); |
426 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | 451 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; |
427 | filp->f_flags = O_RDONLY; | 452 | filp->f_flags = O_RDONLY; |
428 | filp->f_mode = FMODE_READ; | 453 | filp->f_mode = FMODE_READ; |
429 | filp->private_data = file; | 454 | filp->private_data = ev_file; |
430 | 455 | ||
431 | fd_install(file->fd, filp); | 456 | return filp; |
432 | 457 | ||
433 | return 0; | 458 | err_fd: |
459 | put_unused_fd(*fd); | ||
460 | |||
461 | err: | ||
462 | kfree(ev_file); | ||
463 | return ERR_PTR(ret); | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * Look up a completion event file by FD. If lookup is successful, | ||
468 | * takes a ref to the event file struct that it returns; if | ||
469 | * unsuccessful, returns NULL. | ||
470 | */ | ||
471 | struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) | ||
472 | { | ||
473 | struct ib_uverbs_event_file *ev_file = NULL; | ||
474 | struct file *filp; | ||
475 | |||
476 | filp = fget(fd); | ||
477 | if (!filp) | ||
478 | return NULL; | ||
479 | |||
480 | if (filp->f_op != &uverbs_event_fops) | ||
481 | goto out; | ||
482 | |||
483 | ev_file = filp->private_data; | ||
484 | if (ev_file->is_async) { | ||
485 | ev_file = NULL; | ||
486 | goto out; | ||
487 | } | ||
488 | |||
489 | kref_get(&ev_file->ref); | ||
490 | |||
491 | out: | ||
492 | fput(filp); | ||
493 | return ev_file; | ||
434 | } | 494 | } |
435 | 495 | ||
436 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | 496 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, |
@@ -453,8 +513,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
453 | !uverbs_cmd_table[hdr.command]) | 513 | !uverbs_cmd_table[hdr.command]) |
454 | return -EINVAL; | 514 | return -EINVAL; |
455 | 515 | ||
456 | if (!file->ucontext && | 516 | if (!file->ucontext && |
457 | hdr.command != IB_USER_VERBS_CMD_QUERY_PARAMS && | ||
458 | hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) | 517 | hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) |
459 | return -EINVAL; | 518 | return -EINVAL; |
460 | 519 | ||
@@ -477,82 +536,33 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) | |||
477 | struct ib_uverbs_device *dev = | 536 | struct ib_uverbs_device *dev = |
478 | container_of(inode->i_cdev, struct ib_uverbs_device, dev); | 537 | container_of(inode->i_cdev, struct ib_uverbs_device, dev); |
479 | struct ib_uverbs_file *file; | 538 | struct ib_uverbs_file *file; |
480 | int i = 0; | ||
481 | int ret; | ||
482 | 539 | ||
483 | if (!try_module_get(dev->ib_dev->owner)) | 540 | if (!try_module_get(dev->ib_dev->owner)) |
484 | return -ENODEV; | 541 | return -ENODEV; |
485 | 542 | ||
486 | file = kmalloc(sizeof *file + | 543 | file = kmalloc(sizeof *file, GFP_KERNEL); |
487 | (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file), | ||
488 | GFP_KERNEL); | ||
489 | if (!file) { | 544 | if (!file) { |
490 | ret = -ENOMEM; | 545 | module_put(dev->ib_dev->owner); |
491 | goto err; | 546 | return -ENOMEM; |
492 | } | 547 | } |
493 | 548 | ||
494 | file->device = dev; | 549 | file->device = dev; |
550 | file->ucontext = NULL; | ||
495 | kref_init(&file->ref); | 551 | kref_init(&file->ref); |
496 | init_MUTEX(&file->mutex); | 552 | init_MUTEX(&file->mutex); |
497 | 553 | ||
498 | file->ucontext = NULL; | ||
499 | |||
500 | kref_get(&file->ref); | ||
501 | ret = ib_uverbs_event_init(&file->async_file, file); | ||
502 | if (ret) | ||
503 | goto err_kref; | ||
504 | |||
505 | file->async_file.is_async = 1; | ||
506 | |||
507 | for (i = 0; i < dev->num_comp; ++i) { | ||
508 | kref_get(&file->ref); | ||
509 | ret = ib_uverbs_event_init(&file->comp_file[i], file); | ||
510 | if (ret) | ||
511 | goto err_async; | ||
512 | file->comp_file[i].is_async = 0; | ||
513 | } | ||
514 | |||
515 | |||
516 | filp->private_data = file; | 554 | filp->private_data = file; |
517 | 555 | ||
518 | INIT_IB_EVENT_HANDLER(&file->event_handler, dev->ib_dev, | ||
519 | ib_uverbs_event_handler); | ||
520 | if (ib_register_event_handler(&file->event_handler)) | ||
521 | goto err_async; | ||
522 | |||
523 | return 0; | 556 | return 0; |
524 | |||
525 | err_async: | ||
526 | while (i--) | ||
527 | ib_uverbs_event_release(&file->comp_file[i]); | ||
528 | |||
529 | ib_uverbs_event_release(&file->async_file); | ||
530 | |||
531 | err_kref: | ||
532 | /* | ||
533 | * One extra kref_put() because we took a reference before the | ||
534 | * event file creation that failed and got us here. | ||
535 | */ | ||
536 | kref_put(&file->ref, ib_uverbs_release_file); | ||
537 | kref_put(&file->ref, ib_uverbs_release_file); | ||
538 | |||
539 | err: | ||
540 | module_put(dev->ib_dev->owner); | ||
541 | return ret; | ||
542 | } | 557 | } |
543 | 558 | ||
544 | static int ib_uverbs_close(struct inode *inode, struct file *filp) | 559 | static int ib_uverbs_close(struct inode *inode, struct file *filp) |
545 | { | 560 | { |
546 | struct ib_uverbs_file *file = filp->private_data; | 561 | struct ib_uverbs_file *file = filp->private_data; |
547 | int i; | ||
548 | 562 | ||
549 | ib_unregister_event_handler(&file->event_handler); | ||
550 | ib_uverbs_event_release(&file->async_file); | ||
551 | ib_dealloc_ucontext(file->ucontext); | 563 | ib_dealloc_ucontext(file->ucontext); |
552 | 564 | ||
553 | for (i = 0; i < file->device->num_comp; ++i) | 565 | kref_put(&file->async_file->ref, ib_uverbs_release_event_file); |
554 | ib_uverbs_event_release(&file->comp_file[i]); | ||
555 | |||
556 | kref_put(&file->ref, ib_uverbs_release_file); | 566 | kref_put(&file->ref, ib_uverbs_release_file); |
557 | 567 | ||
558 | return 0; | 568 | return 0; |
@@ -631,8 +641,8 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
631 | set_bit(uverbs_dev->devnum, dev_map); | 641 | set_bit(uverbs_dev->devnum, dev_map); |
632 | spin_unlock(&map_lock); | 642 | spin_unlock(&map_lock); |
633 | 643 | ||
634 | uverbs_dev->ib_dev = device; | 644 | uverbs_dev->ib_dev = device; |
635 | uverbs_dev->num_comp = 1; | 645 | uverbs_dev->num_comp_vectors = 1; |
636 | 646 | ||
637 | if (device->mmap) | 647 | if (device->mmap) |
638 | cdev_init(&uverbs_dev->dev, &uverbs_mmap_fops); | 648 | cdev_init(&uverbs_dev->dev, &uverbs_mmap_fops); |