diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 20:10:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 20:10:31 -0400 |
commit | 0cda611386b2fcbf8bb32e9a5d82bfed4856fc36 (patch) | |
tree | 1647e00675ab924edfb22b69ea3872db091b8900 /drivers/infiniband/core | |
parent | fdf1f7ff1bd7f1c6d1d5dc2b29b6b11a4f722276 (diff) | |
parent | 7f1d25b47d919cef29388aff37e7b074e65bf512 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull base rdma updates from Doug Ledford:
"Round one of 4.8 code: while this is mostly normal, there is a new
driver in here (the driver was hosted outside the kernel for several
years and is actually a fairly mature and well coded driver). It
amounts to 13,000 of the 16,000 lines of added code in here.
Summary:
- Updates/fixes for iw_cxgb4 driver
- Updates/fixes for mlx5 driver
- Add flow steering and RSS API
- Add hardware stats to mlx4 and mlx5 drivers
- Add firmware version API for RDMA driver use
- Add the rxe driver (this is a software RoCE driver that makes any
Ethernet device a RoCE device)
- Fixes for i40iw driver
- Support for send only multicast joins in the cma layer
- Other minor fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (72 commits)
Soft RoCE driver
IB/core: Support for CMA multicast join flags
IB/sa: Add cached attribute containing SM information to SA port
IB/uverbs: Fix race between uverbs_close and remove_one
IB/mthca: Clean up error unwind flow in mthca_reset()
IB/mthca: NULL arg to pci_dev_put is OK
IB/hfi1: NULL arg to sc_return_credits is OK
IB/mlx4: Add diagnostic hardware counters
net/mlx4: Query performance and diagnostics counters
net/mlx4: Add diagnostic counters capability bit
Use smaller 512 byte messages for portmapper messages
IB/ipoib: Report SG feature regardless of HW UD CSUM capability
IB/mlx4: Don't use GFP_ATOMIC for CQ resize struct
IB/hfi1: Disable by default
IB/rdmavt: Disable by default
IB/mlx5: Fix port counter ID association to QP offset
IB/mlx5: Fix iteration overrun in GSI qps
i40iw: Add NULL check for puda buffer
i40iw: Change dup_ack_thresh to u8
i40iw: Remove unnecessary check for moving CQ head
...
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/cma.c | 98 | ||||
-rw-r--r-- | drivers/infiniband/core/device.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 54 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/iwpm_util.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/multicast.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/core/netlink.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 41 | ||||
-rw-r--r-- | drivers/infiniband/core/sysfs.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs.h | 14 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 535 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 75 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 163 |
14 files changed, 956 insertions, 89 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index ad1b1adcf6f0..e6dfa1bd3def 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent"); | |||
68 | MODULE_LICENSE("Dual BSD/GPL"); | 68 | MODULE_LICENSE("Dual BSD/GPL"); |
69 | 69 | ||
70 | #define CMA_CM_RESPONSE_TIMEOUT 20 | 70 | #define CMA_CM_RESPONSE_TIMEOUT 20 |
71 | #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 | ||
71 | #define CMA_MAX_CM_RETRIES 15 | 72 | #define CMA_MAX_CM_RETRIES 15 |
72 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) | 73 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) |
73 | #define CMA_IBOE_PACKET_LIFETIME 18 | 74 | #define CMA_IBOE_PACKET_LIFETIME 18 |
@@ -162,6 +163,14 @@ struct rdma_bind_list { | |||
162 | unsigned short port; | 163 | unsigned short port; |
163 | }; | 164 | }; |
164 | 165 | ||
166 | struct class_port_info_context { | ||
167 | struct ib_class_port_info *class_port_info; | ||
168 | struct ib_device *device; | ||
169 | struct completion done; | ||
170 | struct ib_sa_query *sa_query; | ||
171 | u8 port_num; | ||
172 | }; | ||
173 | |||
165 | static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, | 174 | static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, |
166 | struct rdma_bind_list *bind_list, int snum) | 175 | struct rdma_bind_list *bind_list, int snum) |
167 | { | 176 | { |
@@ -306,6 +315,7 @@ struct cma_multicast { | |||
306 | struct sockaddr_storage addr; | 315 | struct sockaddr_storage addr; |
307 | struct kref mcref; | 316 | struct kref mcref; |
308 | bool igmp_joined; | 317 | bool igmp_joined; |
318 | u8 join_state; | ||
309 | }; | 319 | }; |
310 | 320 | ||
311 | struct cma_work { | 321 | struct cma_work { |
@@ -3752,10 +3762,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv, | |||
3752 | } | 3762 | } |
3753 | } | 3763 | } |
3754 | 3764 | ||
3765 | static void cma_query_sa_classport_info_cb(int status, | ||
3766 | struct ib_class_port_info *rec, | ||
3767 | void *context) | ||
3768 | { | ||
3769 | struct class_port_info_context *cb_ctx = context; | ||
3770 | |||
3771 | WARN_ON(!context); | ||
3772 | |||
3773 | if (status || !rec) { | ||
3774 | pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n", | ||
3775 | cb_ctx->device->name, cb_ctx->port_num, status); | ||
3776 | goto out; | ||
3777 | } | ||
3778 | |||
3779 | memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info)); | ||
3780 | |||
3781 | out: | ||
3782 | complete(&cb_ctx->done); | ||
3783 | } | ||
3784 | |||
3785 | static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num, | ||
3786 | struct ib_class_port_info *class_port_info) | ||
3787 | { | ||
3788 | struct class_port_info_context *cb_ctx; | ||
3789 | int ret; | ||
3790 | |||
3791 | cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL); | ||
3792 | if (!cb_ctx) | ||
3793 | return -ENOMEM; | ||
3794 | |||
3795 | cb_ctx->device = device; | ||
3796 | cb_ctx->class_port_info = class_port_info; | ||
3797 | cb_ctx->port_num = port_num; | ||
3798 | init_completion(&cb_ctx->done); | ||
3799 | |||
3800 | ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num, | ||
3801 | CMA_QUERY_CLASSPORT_INFO_TIMEOUT, | ||
3802 | GFP_KERNEL, cma_query_sa_classport_info_cb, | ||
3803 | cb_ctx, &cb_ctx->sa_query); | ||
3804 | if (ret < 0) { | ||
3805 | pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n", | ||
3806 | device->name, port_num, ret); | ||
3807 | goto out; | ||
3808 | } | ||
3809 | |||
3810 | wait_for_completion(&cb_ctx->done); | ||
3811 | |||
3812 | out: | ||
3813 | kfree(cb_ctx); | ||
3814 | return ret; | ||
3815 | } | ||
3816 | |||
3755 | static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | 3817 | static int cma_join_ib_multicast(struct rdma_id_private *id_priv, |
3756 | struct cma_multicast *mc) | 3818 | struct cma_multicast *mc) |
3757 | { | 3819 | { |
3758 | struct ib_sa_mcmember_rec rec; | 3820 | struct ib_sa_mcmember_rec rec; |
3821 | struct ib_class_port_info class_port_info; | ||
3759 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | 3822 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
3760 | ib_sa_comp_mask comp_mask; | 3823 | ib_sa_comp_mask comp_mask; |
3761 | int ret; | 3824 | int ret; |
@@ -3774,7 +3837,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | |||
3774 | rec.qkey = cpu_to_be32(id_priv->qkey); | 3837 | rec.qkey = cpu_to_be32(id_priv->qkey); |
3775 | rdma_addr_get_sgid(dev_addr, &rec.port_gid); | 3838 | rdma_addr_get_sgid(dev_addr, &rec.port_gid); |
3776 | rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); | 3839 | rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); |
3777 | rec.join_state = 1; | 3840 | rec.join_state = mc->join_state; |
3841 | |||
3842 | if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) { | ||
3843 | ret = cma_query_sa_classport_info(id_priv->id.device, | ||
3844 | id_priv->id.port_num, | ||
3845 | &class_port_info); | ||
3846 | |||
3847 | if (ret) | ||
3848 | return ret; | ||
3849 | |||
3850 | if (!(ib_get_cpi_capmask2(&class_port_info) & | ||
3851 | IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) { | ||
3852 | pr_warn("RDMA CM: %s port %u Unable to multicast join\n" | ||
3853 | "RDMA CM: SM doesn't support Send Only Full Member option\n", | ||
3854 | id_priv->id.device->name, id_priv->id.port_num); | ||
3855 | return -EOPNOTSUPP; | ||
3856 | } | ||
3857 | } | ||
3778 | 3858 | ||
3779 | comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | | 3859 | comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | |
3780 | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | | 3860 | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | |
@@ -3843,6 +3923,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | |||
3843 | struct sockaddr *addr = (struct sockaddr *)&mc->addr; | 3923 | struct sockaddr *addr = (struct sockaddr *)&mc->addr; |
3844 | struct net_device *ndev = NULL; | 3924 | struct net_device *ndev = NULL; |
3845 | enum ib_gid_type gid_type; | 3925 | enum ib_gid_type gid_type; |
3926 | bool send_only; | ||
3927 | |||
3928 | send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); | ||
3846 | 3929 | ||
3847 | if (cma_zero_addr((struct sockaddr *)&mc->addr)) | 3930 | if (cma_zero_addr((struct sockaddr *)&mc->addr)) |
3848 | return -EINVAL; | 3931 | return -EINVAL; |
@@ -3878,10 +3961,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | |||
3878 | if (addr->sa_family == AF_INET) { | 3961 | if (addr->sa_family == AF_INET) { |
3879 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { | 3962 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { |
3880 | mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; | 3963 | mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; |
3881 | err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, | 3964 | if (!send_only) { |
3882 | true); | 3965 | err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, |
3883 | if (!err) | 3966 | true); |
3884 | mc->igmp_joined = true; | 3967 | if (!err) |
3968 | mc->igmp_joined = true; | ||
3969 | } | ||
3885 | } | 3970 | } |
3886 | } else { | 3971 | } else { |
3887 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) | 3972 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) |
@@ -3911,7 +3996,7 @@ out1: | |||
3911 | } | 3996 | } |
3912 | 3997 | ||
3913 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | 3998 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, |
3914 | void *context) | 3999 | u8 join_state, void *context) |
3915 | { | 4000 | { |
3916 | struct rdma_id_private *id_priv; | 4001 | struct rdma_id_private *id_priv; |
3917 | struct cma_multicast *mc; | 4002 | struct cma_multicast *mc; |
@@ -3930,6 +4015,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | |||
3930 | mc->context = context; | 4015 | mc->context = context; |
3931 | mc->id_priv = id_priv; | 4016 | mc->id_priv = id_priv; |
3932 | mc->igmp_joined = false; | 4017 | mc->igmp_joined = false; |
4018 | mc->join_state = join_state; | ||
3933 | spin_lock(&id_priv->lock); | 4019 | spin_lock(&id_priv->lock); |
3934 | list_add(&mc->list, &id_priv->mc_list); | 4020 | list_add(&mc->list, &id_priv->mc_list); |
3935 | spin_unlock(&id_priv->lock); | 4021 | spin_unlock(&id_priv->lock); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 5c155fa91eec..760ef603a468 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -311,6 +311,15 @@ static int read_port_immutable(struct ib_device *device) | |||
311 | return 0; | 311 | return 0; |
312 | } | 312 | } |
313 | 313 | ||
314 | void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) | ||
315 | { | ||
316 | if (dev->get_dev_fw_str) | ||
317 | dev->get_dev_fw_str(dev, str, str_len); | ||
318 | else | ||
319 | str[0] = '\0'; | ||
320 | } | ||
321 | EXPORT_SYMBOL(ib_get_device_fw_str); | ||
322 | |||
314 | /** | 323 | /** |
315 | * ib_register_device - Register an IB device with IB core | 324 | * ib_register_device - Register an IB device with IB core |
316 | * @device:Device to register | 325 | * @device:Device to register |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index f0572049d291..357624f8b9d3 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -183,15 +183,14 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv) | |||
183 | 183 | ||
184 | /* | 184 | /* |
185 | * Release a reference on cm_id. If the last reference is being | 185 | * Release a reference on cm_id. If the last reference is being |
186 | * released, enable the waiting thread (in iw_destroy_cm_id) to | 186 | * released, free the cm_id and return 1. |
187 | * get woken up, and return 1 if a thread is already waiting. | ||
188 | */ | 187 | */ |
189 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) | 188 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) |
190 | { | 189 | { |
191 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | 190 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); |
192 | if (atomic_dec_and_test(&cm_id_priv->refcount)) { | 191 | if (atomic_dec_and_test(&cm_id_priv->refcount)) { |
193 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | 192 | BUG_ON(!list_empty(&cm_id_priv->work_list)); |
194 | complete(&cm_id_priv->destroy_comp); | 193 | free_cm_id(cm_id_priv); |
195 | return 1; | 194 | return 1; |
196 | } | 195 | } |
197 | 196 | ||
@@ -208,19 +207,10 @@ static void add_ref(struct iw_cm_id *cm_id) | |||
208 | static void rem_ref(struct iw_cm_id *cm_id) | 207 | static void rem_ref(struct iw_cm_id *cm_id) |
209 | { | 208 | { |
210 | struct iwcm_id_private *cm_id_priv; | 209 | struct iwcm_id_private *cm_id_priv; |
211 | int cb_destroy; | ||
212 | 210 | ||
213 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 211 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
214 | 212 | ||
215 | /* | 213 | (void)iwcm_deref_id(cm_id_priv); |
216 | * Test bit before deref in case the cm_id gets freed on another | ||
217 | * thread. | ||
218 | */ | ||
219 | cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
220 | if (iwcm_deref_id(cm_id_priv) && cb_destroy) { | ||
221 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | ||
222 | free_cm_id(cm_id_priv); | ||
223 | } | ||
224 | } | 214 | } |
225 | 215 | ||
226 | static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); | 216 | static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); |
@@ -370,6 +360,12 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) | |||
370 | wait_event(cm_id_priv->connect_wait, | 360 | wait_event(cm_id_priv->connect_wait, |
371 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); | 361 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); |
372 | 362 | ||
363 | /* | ||
364 | * Since we're deleting the cm_id, drop any events that | ||
365 | * might arrive before the last dereference. | ||
366 | */ | ||
367 | set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags); | ||
368 | |||
373 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 369 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
374 | switch (cm_id_priv->state) { | 370 | switch (cm_id_priv->state) { |
375 | case IW_CM_STATE_LISTEN: | 371 | case IW_CM_STATE_LISTEN: |
@@ -433,13 +429,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id) | |||
433 | struct iwcm_id_private *cm_id_priv; | 429 | struct iwcm_id_private *cm_id_priv; |
434 | 430 | ||
435 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 431 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
436 | BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); | ||
437 | |||
438 | destroy_cm_id(cm_id); | 432 | destroy_cm_id(cm_id); |
439 | |||
440 | wait_for_completion(&cm_id_priv->destroy_comp); | ||
441 | |||
442 | free_cm_id(cm_id_priv); | ||
443 | } | 433 | } |
444 | EXPORT_SYMBOL(iw_destroy_cm_id); | 434 | EXPORT_SYMBOL(iw_destroy_cm_id); |
445 | 435 | ||
@@ -809,10 +799,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
809 | ret = cm_id->cm_handler(cm_id, iw_event); | 799 | ret = cm_id->cm_handler(cm_id, iw_event); |
810 | if (ret) { | 800 | if (ret) { |
811 | iw_cm_reject(cm_id, NULL, 0); | 801 | iw_cm_reject(cm_id, NULL, 0); |
812 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | 802 | iw_destroy_cm_id(cm_id); |
813 | destroy_cm_id(cm_id); | ||
814 | if (atomic_read(&cm_id_priv->refcount)==0) | ||
815 | free_cm_id(cm_id_priv); | ||
816 | } | 803 | } |
817 | 804 | ||
818 | out: | 805 | out: |
@@ -1000,7 +987,6 @@ static void cm_work_handler(struct work_struct *_work) | |||
1000 | unsigned long flags; | 987 | unsigned long flags; |
1001 | int empty; | 988 | int empty; |
1002 | int ret = 0; | 989 | int ret = 0; |
1003 | int destroy_id; | ||
1004 | 990 | ||
1005 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 991 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
1006 | empty = list_empty(&cm_id_priv->work_list); | 992 | empty = list_empty(&cm_id_priv->work_list); |
@@ -1013,20 +999,14 @@ static void cm_work_handler(struct work_struct *_work) | |||
1013 | put_work(work); | 999 | put_work(work); |
1014 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1000 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1015 | 1001 | ||
1016 | ret = process_event(cm_id_priv, &levent); | 1002 | if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { |
1017 | if (ret) { | 1003 | ret = process_event(cm_id_priv, &levent); |
1018 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | 1004 | if (ret) |
1019 | destroy_cm_id(&cm_id_priv->id); | 1005 | destroy_cm_id(&cm_id_priv->id); |
1020 | } | 1006 | } else |
1021 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | 1007 | pr_debug("dropping event %d\n", levent.event); |
1022 | destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | 1008 | if (iwcm_deref_id(cm_id_priv)) |
1023 | if (iwcm_deref_id(cm_id_priv)) { | ||
1024 | if (destroy_id) { | ||
1025 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | ||
1026 | free_cm_id(cm_id_priv); | ||
1027 | } | ||
1028 | return; | 1009 | return; |
1029 | } | ||
1030 | if (empty) | 1010 | if (empty) |
1031 | return; | 1011 | return; |
1032 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 1012 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h index 3f6cc82564c8..82c2cd1b0a80 100644 --- a/drivers/infiniband/core/iwcm.h +++ b/drivers/infiniband/core/iwcm.h | |||
@@ -56,7 +56,7 @@ struct iwcm_id_private { | |||
56 | struct list_head work_free_list; | 56 | struct list_head work_free_list; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define IWCM_F_CALLBACK_DESTROY 1 | 59 | #define IWCM_F_DROP_EVENTS 1 |
60 | #define IWCM_F_CONNECT_WAIT 2 | 60 | #define IWCM_F_CONNECT_WAIT 2 |
61 | 61 | ||
62 | #endif /* IWCM_H */ | 62 | #endif /* IWCM_H */ |
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index b65e06c560d7..ade71e7f0131 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1) | 37 | #define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1) |
38 | #define IWPM_REMINFO_HASH_SIZE 64 | 38 | #define IWPM_REMINFO_HASH_SIZE 64 |
39 | #define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1) | 39 | #define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1) |
40 | #define IWPM_MSG_SIZE 512 | ||
40 | 41 | ||
41 | static LIST_HEAD(iwpm_nlmsg_req_list); | 42 | static LIST_HEAD(iwpm_nlmsg_req_list); |
42 | static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); | 43 | static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); |
@@ -452,7 +453,7 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh, | |||
452 | { | 453 | { |
453 | struct sk_buff *skb = NULL; | 454 | struct sk_buff *skb = NULL; |
454 | 455 | ||
455 | skb = dev_alloc_skb(NLMSG_GOODSIZE); | 456 | skb = dev_alloc_skb(IWPM_MSG_SIZE); |
456 | if (!skb) { | 457 | if (!skb) { |
457 | pr_err("%s Unable to allocate skb\n", __func__); | 458 | pr_err("%s Unable to allocate skb\n", __func__); |
458 | goto create_nlmsg_exit; | 459 | goto create_nlmsg_exit; |
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index a83ec28a147b..3a3c5d73bbfc 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
@@ -93,18 +93,6 @@ enum { | |||
93 | 93 | ||
94 | struct mcast_member; | 94 | struct mcast_member; |
95 | 95 | ||
96 | /* | ||
97 | * There are 4 types of join states: | ||
98 | * FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember. | ||
99 | */ | ||
100 | enum { | ||
101 | FULLMEMBER_JOIN, | ||
102 | NONMEMBER_JOIN, | ||
103 | SENDONLY_NONMEBER_JOIN, | ||
104 | SENDONLY_FULLMEMBER_JOIN, | ||
105 | NUM_JOIN_MEMBERSHIP_TYPES, | ||
106 | }; | ||
107 | |||
108 | struct mcast_group { | 96 | struct mcast_group { |
109 | struct ib_sa_mcmember_rec rec; | 97 | struct ib_sa_mcmember_rec rec; |
110 | struct rb_node node; | 98 | struct rb_node node; |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 9b8c20c8209b..10469b0088b5 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c | |||
@@ -229,7 +229,10 @@ static void ibnl_rcv(struct sk_buff *skb) | |||
229 | int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, | 229 | int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, |
230 | __u32 pid) | 230 | __u32 pid) |
231 | { | 231 | { |
232 | return nlmsg_unicast(nls, skb, pid); | 232 | int err; |
233 | |||
234 | err = netlink_unicast(nls, skb, pid, 0); | ||
235 | return (err < 0) ? err : 0; | ||
233 | } | 236 | } |
234 | EXPORT_SYMBOL(ibnl_unicast); | 237 | EXPORT_SYMBOL(ibnl_unicast); |
235 | 238 | ||
@@ -252,6 +255,7 @@ int __init ibnl_init(void) | |||
252 | return -ENOMEM; | 255 | return -ENOMEM; |
253 | } | 256 | } |
254 | 257 | ||
258 | nls->sk_sndtimeo = 10 * HZ; | ||
255 | return 0; | 259 | return 0; |
256 | } | 260 | } |
257 | 261 | ||
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e95538650dc6..b9bf7aa055e7 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -65,10 +65,17 @@ struct ib_sa_sm_ah { | |||
65 | u8 src_path_mask; | 65 | u8 src_path_mask; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct ib_sa_classport_cache { | ||
69 | bool valid; | ||
70 | struct ib_class_port_info data; | ||
71 | }; | ||
72 | |||
68 | struct ib_sa_port { | 73 | struct ib_sa_port { |
69 | struct ib_mad_agent *agent; | 74 | struct ib_mad_agent *agent; |
70 | struct ib_sa_sm_ah *sm_ah; | 75 | struct ib_sa_sm_ah *sm_ah; |
71 | struct work_struct update_task; | 76 | struct work_struct update_task; |
77 | struct ib_sa_classport_cache classport_info; | ||
78 | spinlock_t classport_lock; /* protects class port info set */ | ||
72 | spinlock_t ah_lock; | 79 | spinlock_t ah_lock; |
73 | u8 port_num; | 80 | u8 port_num; |
74 | }; | 81 | }; |
@@ -998,6 +1005,13 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
998 | port->sm_ah = NULL; | 1005 | port->sm_ah = NULL; |
999 | spin_unlock_irqrestore(&port->ah_lock, flags); | 1006 | spin_unlock_irqrestore(&port->ah_lock, flags); |
1000 | 1007 | ||
1008 | if (event->event == IB_EVENT_SM_CHANGE || | ||
1009 | event->event == IB_EVENT_CLIENT_REREGISTER || | ||
1010 | event->event == IB_EVENT_LID_CHANGE) { | ||
1011 | spin_lock_irqsave(&port->classport_lock, flags); | ||
1012 | port->classport_info.valid = false; | ||
1013 | spin_unlock_irqrestore(&port->classport_lock, flags); | ||
1014 | } | ||
1001 | queue_work(ib_wq, &sa_dev->port[event->element.port_num - | 1015 | queue_work(ib_wq, &sa_dev->port[event->element.port_num - |
1002 | sa_dev->start_port].update_task); | 1016 | sa_dev->start_port].update_task); |
1003 | } | 1017 | } |
@@ -1719,6 +1733,7 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, | |||
1719 | int status, | 1733 | int status, |
1720 | struct ib_sa_mad *mad) | 1734 | struct ib_sa_mad *mad) |
1721 | { | 1735 | { |
1736 | unsigned long flags; | ||
1722 | struct ib_sa_classport_info_query *query = | 1737 | struct ib_sa_classport_info_query *query = |
1723 | container_of(sa_query, struct ib_sa_classport_info_query, sa_query); | 1738 | container_of(sa_query, struct ib_sa_classport_info_query, sa_query); |
1724 | 1739 | ||
@@ -1728,6 +1743,16 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, | |||
1728 | ib_unpack(classport_info_rec_table, | 1743 | ib_unpack(classport_info_rec_table, |
1729 | ARRAY_SIZE(classport_info_rec_table), | 1744 | ARRAY_SIZE(classport_info_rec_table), |
1730 | mad->data, &rec); | 1745 | mad->data, &rec); |
1746 | |||
1747 | spin_lock_irqsave(&sa_query->port->classport_lock, flags); | ||
1748 | if (!status && !sa_query->port->classport_info.valid) { | ||
1749 | memcpy(&sa_query->port->classport_info.data, &rec, | ||
1750 | sizeof(sa_query->port->classport_info.data)); | ||
1751 | |||
1752 | sa_query->port->classport_info.valid = true; | ||
1753 | } | ||
1754 | spin_unlock_irqrestore(&sa_query->port->classport_lock, flags); | ||
1755 | |||
1731 | query->callback(status, &rec, query->context); | 1756 | query->callback(status, &rec, query->context); |
1732 | } else { | 1757 | } else { |
1733 | query->callback(status, NULL, query->context); | 1758 | query->callback(status, NULL, query->context); |
@@ -1754,7 +1779,9 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client, | |||
1754 | struct ib_sa_port *port; | 1779 | struct ib_sa_port *port; |
1755 | struct ib_mad_agent *agent; | 1780 | struct ib_mad_agent *agent; |
1756 | struct ib_sa_mad *mad; | 1781 | struct ib_sa_mad *mad; |
1782 | struct ib_class_port_info cached_class_port_info; | ||
1757 | int ret; | 1783 | int ret; |
1784 | unsigned long flags; | ||
1758 | 1785 | ||
1759 | if (!sa_dev) | 1786 | if (!sa_dev) |
1760 | return -ENODEV; | 1787 | return -ENODEV; |
@@ -1762,6 +1789,17 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client, | |||
1762 | port = &sa_dev->port[port_num - sa_dev->start_port]; | 1789 | port = &sa_dev->port[port_num - sa_dev->start_port]; |
1763 | agent = port->agent; | 1790 | agent = port->agent; |
1764 | 1791 | ||
1792 | /* Use cached ClassPortInfo attribute if valid instead of sending mad */ | ||
1793 | spin_lock_irqsave(&port->classport_lock, flags); | ||
1794 | if (port->classport_info.valid && callback) { | ||
1795 | memcpy(&cached_class_port_info, &port->classport_info.data, | ||
1796 | sizeof(cached_class_port_info)); | ||
1797 | spin_unlock_irqrestore(&port->classport_lock, flags); | ||
1798 | callback(0, &cached_class_port_info, context); | ||
1799 | return 0; | ||
1800 | } | ||
1801 | spin_unlock_irqrestore(&port->classport_lock, flags); | ||
1802 | |||
1765 | query = kzalloc(sizeof(*query), gfp_mask); | 1803 | query = kzalloc(sizeof(*query), gfp_mask); |
1766 | if (!query) | 1804 | if (!query) |
1767 | return -ENOMEM; | 1805 | return -ENOMEM; |
@@ -1885,6 +1923,9 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1885 | sa_dev->port[i].sm_ah = NULL; | 1923 | sa_dev->port[i].sm_ah = NULL; |
1886 | sa_dev->port[i].port_num = i + s; | 1924 | sa_dev->port[i].port_num = i + s; |
1887 | 1925 | ||
1926 | spin_lock_init(&sa_dev->port[i].classport_lock); | ||
1927 | sa_dev->port[i].classport_info.valid = false; | ||
1928 | |||
1888 | sa_dev->port[i].agent = | 1929 | sa_dev->port[i].agent = |
1889 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, | 1930 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, |
1890 | NULL, 0, send_handler, | 1931 | NULL, 0, send_handler, |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 60df4f8e81be..15defefecb4f 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/stat.h> | 38 | #include <linux/stat.h> |
39 | #include <linux/string.h> | 39 | #include <linux/string.h> |
40 | #include <linux/netdevice.h> | 40 | #include <linux/netdevice.h> |
41 | #include <linux/ethtool.h> | ||
41 | 42 | ||
42 | #include <rdma/ib_mad.h> | 43 | #include <rdma/ib_mad.h> |
43 | #include <rdma/ib_pma.h> | 44 | #include <rdma/ib_pma.h> |
@@ -1200,16 +1201,28 @@ static ssize_t set_node_desc(struct device *device, | |||
1200 | return count; | 1201 | return count; |
1201 | } | 1202 | } |
1202 | 1203 | ||
1204 | static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | ||
1205 | char *buf) | ||
1206 | { | ||
1207 | struct ib_device *dev = container_of(device, struct ib_device, dev); | ||
1208 | |||
1209 | ib_get_device_fw_str(dev, buf, PAGE_SIZE); | ||
1210 | strlcat(buf, "\n", PAGE_SIZE); | ||
1211 | return strlen(buf); | ||
1212 | } | ||
1213 | |||
1203 | static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); | 1214 | static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); |
1204 | static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL); | 1215 | static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL); |
1205 | static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL); | 1216 | static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL); |
1206 | static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc); | 1217 | static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc); |
1218 | static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | ||
1207 | 1219 | ||
1208 | static struct device_attribute *ib_class_attributes[] = { | 1220 | static struct device_attribute *ib_class_attributes[] = { |
1209 | &dev_attr_node_type, | 1221 | &dev_attr_node_type, |
1210 | &dev_attr_sys_image_guid, | 1222 | &dev_attr_sys_image_guid, |
1211 | &dev_attr_node_guid, | 1223 | &dev_attr_node_guid, |
1212 | &dev_attr_node_desc | 1224 | &dev_attr_node_desc, |
1225 | &dev_attr_fw_ver, | ||
1213 | }; | 1226 | }; |
1214 | 1227 | ||
1215 | static void free_port_list_attributes(struct ib_device *device) | 1228 | static void free_port_list_attributes(struct ib_device *device) |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index c0f3826abb30..2825ece91d3c 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -106,6 +106,7 @@ struct ucma_multicast { | |||
106 | int events_reported; | 106 | int events_reported; |
107 | 107 | ||
108 | u64 uid; | 108 | u64 uid; |
109 | u8 join_state; | ||
109 | struct list_head list; | 110 | struct list_head list; |
110 | struct sockaddr_storage addr; | 111 | struct sockaddr_storage addr; |
111 | }; | 112 | }; |
@@ -1317,12 +1318,20 @@ static ssize_t ucma_process_join(struct ucma_file *file, | |||
1317 | struct ucma_multicast *mc; | 1318 | struct ucma_multicast *mc; |
1318 | struct sockaddr *addr; | 1319 | struct sockaddr *addr; |
1319 | int ret; | 1320 | int ret; |
1321 | u8 join_state; | ||
1320 | 1322 | ||
1321 | if (out_len < sizeof(resp)) | 1323 | if (out_len < sizeof(resp)) |
1322 | return -ENOSPC; | 1324 | return -ENOSPC; |
1323 | 1325 | ||
1324 | addr = (struct sockaddr *) &cmd->addr; | 1326 | addr = (struct sockaddr *) &cmd->addr; |
1325 | if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) | 1327 | if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) |
1328 | return -EINVAL; | ||
1329 | |||
1330 | if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) | ||
1331 | join_state = BIT(FULLMEMBER_JOIN); | ||
1332 | else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) | ||
1333 | join_state = BIT(SENDONLY_FULLMEMBER_JOIN); | ||
1334 | else | ||
1326 | return -EINVAL; | 1335 | return -EINVAL; |
1327 | 1336 | ||
1328 | ctx = ucma_get_ctx(file, cmd->id); | 1337 | ctx = ucma_get_ctx(file, cmd->id); |
@@ -1335,10 +1344,11 @@ static ssize_t ucma_process_join(struct ucma_file *file, | |||
1335 | ret = -ENOMEM; | 1344 | ret = -ENOMEM; |
1336 | goto err1; | 1345 | goto err1; |
1337 | } | 1346 | } |
1338 | 1347 | mc->join_state = join_state; | |
1339 | mc->uid = cmd->uid; | 1348 | mc->uid = cmd->uid; |
1340 | memcpy(&mc->addr, addr, cmd->addr_size); | 1349 | memcpy(&mc->addr, addr, cmd->addr_size); |
1341 | ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); | 1350 | ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, |
1351 | join_state, mc); | ||
1342 | if (ret) | 1352 | if (ret) |
1343 | goto err2; | 1353 | goto err2; |
1344 | 1354 | ||
@@ -1382,7 +1392,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, | |||
1382 | join_cmd.uid = cmd.uid; | 1392 | join_cmd.uid = cmd.uid; |
1383 | join_cmd.id = cmd.id; | 1393 | join_cmd.id = cmd.id; |
1384 | join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); | 1394 | join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); |
1385 | join_cmd.reserved = 0; | 1395 | join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; |
1386 | memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); | 1396 | memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); |
1387 | 1397 | ||
1388 | return ucma_process_join(file, &join_cmd, out_len); | 1398 | return ucma_process_join(file, &join_cmd, out_len); |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 612ccfd39bf9..df26a741cda6 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -116,6 +116,7 @@ struct ib_uverbs_event_file { | |||
116 | struct ib_uverbs_file { | 116 | struct ib_uverbs_file { |
117 | struct kref ref; | 117 | struct kref ref; |
118 | struct mutex mutex; | 118 | struct mutex mutex; |
119 | struct mutex cleanup_mutex; /* protect cleanup */ | ||
119 | struct ib_uverbs_device *device; | 120 | struct ib_uverbs_device *device; |
120 | struct ib_ucontext *ucontext; | 121 | struct ib_ucontext *ucontext; |
121 | struct ib_event_handler event_handler; | 122 | struct ib_event_handler event_handler; |
@@ -162,6 +163,10 @@ struct ib_uqp_object { | |||
162 | struct ib_uxrcd_object *uxrcd; | 163 | struct ib_uxrcd_object *uxrcd; |
163 | }; | 164 | }; |
164 | 165 | ||
166 | struct ib_uwq_object { | ||
167 | struct ib_uevent_object uevent; | ||
168 | }; | ||
169 | |||
165 | struct ib_ucq_object { | 170 | struct ib_ucq_object { |
166 | struct ib_uobject uobject; | 171 | struct ib_uobject uobject; |
167 | struct ib_uverbs_file *uverbs_file; | 172 | struct ib_uverbs_file *uverbs_file; |
@@ -181,6 +186,8 @@ extern struct idr ib_uverbs_qp_idr; | |||
181 | extern struct idr ib_uverbs_srq_idr; | 186 | extern struct idr ib_uverbs_srq_idr; |
182 | extern struct idr ib_uverbs_xrcd_idr; | 187 | extern struct idr ib_uverbs_xrcd_idr; |
183 | extern struct idr ib_uverbs_rule_idr; | 188 | extern struct idr ib_uverbs_rule_idr; |
189 | extern struct idr ib_uverbs_wq_idr; | ||
190 | extern struct idr ib_uverbs_rwq_ind_tbl_idr; | ||
184 | 191 | ||
185 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); | 192 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); |
186 | 193 | ||
@@ -199,6 +206,7 @@ void ib_uverbs_release_uevent(struct ib_uverbs_file *file, | |||
199 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); | 206 | void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); |
200 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); | 207 | void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); |
201 | void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); | 208 | void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); |
209 | void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr); | ||
202 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); | 210 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); |
203 | void ib_uverbs_event_handler(struct ib_event_handler *handler, | 211 | void ib_uverbs_event_handler(struct ib_event_handler *handler, |
204 | struct ib_event *event); | 212 | struct ib_event *event); |
@@ -219,6 +227,7 @@ struct ib_uverbs_flow_spec { | |||
219 | struct ib_uverbs_flow_spec_eth eth; | 227 | struct ib_uverbs_flow_spec_eth eth; |
220 | struct ib_uverbs_flow_spec_ipv4 ipv4; | 228 | struct ib_uverbs_flow_spec_ipv4 ipv4; |
221 | struct ib_uverbs_flow_spec_tcp_udp tcp_udp; | 229 | struct ib_uverbs_flow_spec_tcp_udp tcp_udp; |
230 | struct ib_uverbs_flow_spec_ipv6 ipv6; | ||
222 | }; | 231 | }; |
223 | }; | 232 | }; |
224 | 233 | ||
@@ -275,5 +284,10 @@ IB_UVERBS_DECLARE_EX_CMD(destroy_flow); | |||
275 | IB_UVERBS_DECLARE_EX_CMD(query_device); | 284 | IB_UVERBS_DECLARE_EX_CMD(query_device); |
276 | IB_UVERBS_DECLARE_EX_CMD(create_cq); | 285 | IB_UVERBS_DECLARE_EX_CMD(create_cq); |
277 | IB_UVERBS_DECLARE_EX_CMD(create_qp); | 286 | IB_UVERBS_DECLARE_EX_CMD(create_qp); |
287 | IB_UVERBS_DECLARE_EX_CMD(create_wq); | ||
288 | IB_UVERBS_DECLARE_EX_CMD(modify_wq); | ||
289 | IB_UVERBS_DECLARE_EX_CMD(destroy_wq); | ||
290 | IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table); | ||
291 | IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table); | ||
278 | 292 | ||
279 | #endif /* UVERBS_H */ | 293 | #endif /* UVERBS_H */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 825021d1008b..f6647318138d 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -57,6 +57,8 @@ static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; | |||
57 | static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; | 57 | static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; |
58 | static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; | 58 | static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; |
59 | static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; | 59 | static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; |
60 | static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" }; | ||
61 | static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" }; | ||
60 | 62 | ||
61 | /* | 63 | /* |
62 | * The ib_uobject locking scheme is as follows: | 64 | * The ib_uobject locking scheme is as follows: |
@@ -243,6 +245,27 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) | |||
243 | return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); | 245 | return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); |
244 | } | 246 | } |
245 | 247 | ||
248 | static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context) | ||
249 | { | ||
250 | return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0); | ||
251 | } | ||
252 | |||
253 | static void put_wq_read(struct ib_wq *wq) | ||
254 | { | ||
255 | put_uobj_read(wq->uobject); | ||
256 | } | ||
257 | |||
258 | static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle, | ||
259 | struct ib_ucontext *context) | ||
260 | { | ||
261 | return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0); | ||
262 | } | ||
263 | |||
264 | static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table) | ||
265 | { | ||
266 | put_uobj_read(ind_table->uobject); | ||
267 | } | ||
268 | |||
246 | static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) | 269 | static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) |
247 | { | 270 | { |
248 | struct ib_uobject *uobj; | 271 | struct ib_uobject *uobj; |
@@ -326,6 +349,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | |||
326 | INIT_LIST_HEAD(&ucontext->qp_list); | 349 | INIT_LIST_HEAD(&ucontext->qp_list); |
327 | INIT_LIST_HEAD(&ucontext->srq_list); | 350 | INIT_LIST_HEAD(&ucontext->srq_list); |
328 | INIT_LIST_HEAD(&ucontext->ah_list); | 351 | INIT_LIST_HEAD(&ucontext->ah_list); |
352 | INIT_LIST_HEAD(&ucontext->wq_list); | ||
353 | INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list); | ||
329 | INIT_LIST_HEAD(&ucontext->xrcd_list); | 354 | INIT_LIST_HEAD(&ucontext->xrcd_list); |
330 | INIT_LIST_HEAD(&ucontext->rule_list); | 355 | INIT_LIST_HEAD(&ucontext->rule_list); |
331 | rcu_read_lock(); | 356 | rcu_read_lock(); |
@@ -1750,6 +1775,8 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1750 | struct ib_qp_init_attr attr = {}; | 1775 | struct ib_qp_init_attr attr = {}; |
1751 | struct ib_uverbs_ex_create_qp_resp resp; | 1776 | struct ib_uverbs_ex_create_qp_resp resp; |
1752 | int ret; | 1777 | int ret; |
1778 | struct ib_rwq_ind_table *ind_tbl = NULL; | ||
1779 | bool has_sq = true; | ||
1753 | 1780 | ||
1754 | if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) | 1781 | if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) |
1755 | return -EPERM; | 1782 | return -EPERM; |
@@ -1761,6 +1788,32 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1761 | init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, | 1788 | init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, |
1762 | &qp_lock_class); | 1789 | &qp_lock_class); |
1763 | down_write(&obj->uevent.uobject.mutex); | 1790 | down_write(&obj->uevent.uobject.mutex); |
1791 | if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + | ||
1792 | sizeof(cmd->rwq_ind_tbl_handle) && | ||
1793 | (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { | ||
1794 | ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle, | ||
1795 | file->ucontext); | ||
1796 | if (!ind_tbl) { | ||
1797 | ret = -EINVAL; | ||
1798 | goto err_put; | ||
1799 | } | ||
1800 | |||
1801 | attr.rwq_ind_tbl = ind_tbl; | ||
1802 | } | ||
1803 | |||
1804 | if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + | ||
1805 | sizeof(cmd->reserved1)) && cmd->reserved1) { | ||
1806 | ret = -EOPNOTSUPP; | ||
1807 | goto err_put; | ||
1808 | } | ||
1809 | |||
1810 | if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { | ||
1811 | ret = -EINVAL; | ||
1812 | goto err_put; | ||
1813 | } | ||
1814 | |||
1815 | if (ind_tbl && !cmd->max_send_wr) | ||
1816 | has_sq = false; | ||
1764 | 1817 | ||
1765 | if (cmd->qp_type == IB_QPT_XRC_TGT) { | 1818 | if (cmd->qp_type == IB_QPT_XRC_TGT) { |
1766 | xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, | 1819 | xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, |
@@ -1784,20 +1837,24 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1784 | } | 1837 | } |
1785 | } | 1838 | } |
1786 | 1839 | ||
1787 | if (cmd->recv_cq_handle != cmd->send_cq_handle) { | 1840 | if (!ind_tbl) { |
1788 | rcq = idr_read_cq(cmd->recv_cq_handle, | 1841 | if (cmd->recv_cq_handle != cmd->send_cq_handle) { |
1789 | file->ucontext, 0); | 1842 | rcq = idr_read_cq(cmd->recv_cq_handle, |
1790 | if (!rcq) { | 1843 | file->ucontext, 0); |
1791 | ret = -EINVAL; | 1844 | if (!rcq) { |
1792 | goto err_put; | 1845 | ret = -EINVAL; |
1846 | goto err_put; | ||
1847 | } | ||
1793 | } | 1848 | } |
1794 | } | 1849 | } |
1795 | } | 1850 | } |
1796 | 1851 | ||
1797 | scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); | 1852 | if (has_sq) |
1798 | rcq = rcq ?: scq; | 1853 | scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); |
1854 | if (!ind_tbl) | ||
1855 | rcq = rcq ?: scq; | ||
1799 | pd = idr_read_pd(cmd->pd_handle, file->ucontext); | 1856 | pd = idr_read_pd(cmd->pd_handle, file->ucontext); |
1800 | if (!pd || !scq) { | 1857 | if (!pd || (!scq && has_sq)) { |
1801 | ret = -EINVAL; | 1858 | ret = -EINVAL; |
1802 | goto err_put; | 1859 | goto err_put; |
1803 | } | 1860 | } |
@@ -1864,16 +1921,20 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1864 | qp->send_cq = attr.send_cq; | 1921 | qp->send_cq = attr.send_cq; |
1865 | qp->recv_cq = attr.recv_cq; | 1922 | qp->recv_cq = attr.recv_cq; |
1866 | qp->srq = attr.srq; | 1923 | qp->srq = attr.srq; |
1924 | qp->rwq_ind_tbl = ind_tbl; | ||
1867 | qp->event_handler = attr.event_handler; | 1925 | qp->event_handler = attr.event_handler; |
1868 | qp->qp_context = attr.qp_context; | 1926 | qp->qp_context = attr.qp_context; |
1869 | qp->qp_type = attr.qp_type; | 1927 | qp->qp_type = attr.qp_type; |
1870 | atomic_set(&qp->usecnt, 0); | 1928 | atomic_set(&qp->usecnt, 0); |
1871 | atomic_inc(&pd->usecnt); | 1929 | atomic_inc(&pd->usecnt); |
1872 | atomic_inc(&attr.send_cq->usecnt); | 1930 | if (attr.send_cq) |
1931 | atomic_inc(&attr.send_cq->usecnt); | ||
1873 | if (attr.recv_cq) | 1932 | if (attr.recv_cq) |
1874 | atomic_inc(&attr.recv_cq->usecnt); | 1933 | atomic_inc(&attr.recv_cq->usecnt); |
1875 | if (attr.srq) | 1934 | if (attr.srq) |
1876 | atomic_inc(&attr.srq->usecnt); | 1935 | atomic_inc(&attr.srq->usecnt); |
1936 | if (ind_tbl) | ||
1937 | atomic_inc(&ind_tbl->usecnt); | ||
1877 | } | 1938 | } |
1878 | qp->uobject = &obj->uevent.uobject; | 1939 | qp->uobject = &obj->uevent.uobject; |
1879 | 1940 | ||
@@ -1913,6 +1974,8 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1913 | put_cq_read(rcq); | 1974 | put_cq_read(rcq); |
1914 | if (srq) | 1975 | if (srq) |
1915 | put_srq_read(srq); | 1976 | put_srq_read(srq); |
1977 | if (ind_tbl) | ||
1978 | put_rwq_indirection_table_read(ind_tbl); | ||
1916 | 1979 | ||
1917 | mutex_lock(&file->mutex); | 1980 | mutex_lock(&file->mutex); |
1918 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); | 1981 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); |
@@ -1940,6 +2003,8 @@ err_put: | |||
1940 | put_cq_read(rcq); | 2003 | put_cq_read(rcq); |
1941 | if (srq) | 2004 | if (srq) |
1942 | put_srq_read(srq); | 2005 | put_srq_read(srq); |
2006 | if (ind_tbl) | ||
2007 | put_rwq_indirection_table_read(ind_tbl); | ||
1943 | 2008 | ||
1944 | put_uobj_write(&obj->uevent.uobject); | 2009 | put_uobj_write(&obj->uevent.uobject); |
1945 | return ret; | 2010 | return ret; |
@@ -2033,7 +2098,7 @@ int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, | |||
2033 | if (err) | 2098 | if (err) |
2034 | return err; | 2099 | return err; |
2035 | 2100 | ||
2036 | if (cmd.comp_mask) | 2101 | if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) |
2037 | return -EINVAL; | 2102 | return -EINVAL; |
2038 | 2103 | ||
2039 | if (cmd.reserved) | 2104 | if (cmd.reserved) |
@@ -3040,6 +3105,15 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, | |||
3040 | memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, | 3105 | memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask, |
3041 | sizeof(struct ib_flow_ipv4_filter)); | 3106 | sizeof(struct ib_flow_ipv4_filter)); |
3042 | break; | 3107 | break; |
3108 | case IB_FLOW_SPEC_IPV6: | ||
3109 | ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6); | ||
3110 | if (ib_spec->ipv6.size != kern_spec->ipv6.size) | ||
3111 | return -EINVAL; | ||
3112 | memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val, | ||
3113 | sizeof(struct ib_flow_ipv6_filter)); | ||
3114 | memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask, | ||
3115 | sizeof(struct ib_flow_ipv6_filter)); | ||
3116 | break; | ||
3043 | case IB_FLOW_SPEC_TCP: | 3117 | case IB_FLOW_SPEC_TCP: |
3044 | case IB_FLOW_SPEC_UDP: | 3118 | case IB_FLOW_SPEC_UDP: |
3045 | ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); | 3119 | ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp); |
@@ -3056,6 +3130,445 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, | |||
3056 | return 0; | 3130 | return 0; |
3057 | } | 3131 | } |
3058 | 3132 | ||
3133 | int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, | ||
3134 | struct ib_device *ib_dev, | ||
3135 | struct ib_udata *ucore, | ||
3136 | struct ib_udata *uhw) | ||
3137 | { | ||
3138 | struct ib_uverbs_ex_create_wq cmd = {}; | ||
3139 | struct ib_uverbs_ex_create_wq_resp resp = {}; | ||
3140 | struct ib_uwq_object *obj; | ||
3141 | int err = 0; | ||
3142 | struct ib_cq *cq; | ||
3143 | struct ib_pd *pd; | ||
3144 | struct ib_wq *wq; | ||
3145 | struct ib_wq_init_attr wq_init_attr = {}; | ||
3146 | size_t required_cmd_sz; | ||
3147 | size_t required_resp_len; | ||
3148 | |||
3149 | required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); | ||
3150 | required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); | ||
3151 | |||
3152 | if (ucore->inlen < required_cmd_sz) | ||
3153 | return -EINVAL; | ||
3154 | |||
3155 | if (ucore->outlen < required_resp_len) | ||
3156 | return -ENOSPC; | ||
3157 | |||
3158 | if (ucore->inlen > sizeof(cmd) && | ||
3159 | !ib_is_udata_cleared(ucore, sizeof(cmd), | ||
3160 | ucore->inlen - sizeof(cmd))) | ||
3161 | return -EOPNOTSUPP; | ||
3162 | |||
3163 | err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); | ||
3164 | if (err) | ||
3165 | return err; | ||
3166 | |||
3167 | if (cmd.comp_mask) | ||
3168 | return -EOPNOTSUPP; | ||
3169 | |||
3170 | obj = kmalloc(sizeof(*obj), GFP_KERNEL); | ||
3171 | if (!obj) | ||
3172 | return -ENOMEM; | ||
3173 | |||
3174 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, | ||
3175 | &wq_lock_class); | ||
3176 | down_write(&obj->uevent.uobject.mutex); | ||
3177 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
3178 | if (!pd) { | ||
3179 | err = -EINVAL; | ||
3180 | goto err_uobj; | ||
3181 | } | ||
3182 | |||
3183 | cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); | ||
3184 | if (!cq) { | ||
3185 | err = -EINVAL; | ||
3186 | goto err_put_pd; | ||
3187 | } | ||
3188 | |||
3189 | wq_init_attr.cq = cq; | ||
3190 | wq_init_attr.max_sge = cmd.max_sge; | ||
3191 | wq_init_attr.max_wr = cmd.max_wr; | ||
3192 | wq_init_attr.wq_context = file; | ||
3193 | wq_init_attr.wq_type = cmd.wq_type; | ||
3194 | wq_init_attr.event_handler = ib_uverbs_wq_event_handler; | ||
3195 | obj->uevent.events_reported = 0; | ||
3196 | INIT_LIST_HEAD(&obj->uevent.event_list); | ||
3197 | wq = pd->device->create_wq(pd, &wq_init_attr, uhw); | ||
3198 | if (IS_ERR(wq)) { | ||
3199 | err = PTR_ERR(wq); | ||
3200 | goto err_put_cq; | ||
3201 | } | ||
3202 | |||
3203 | wq->uobject = &obj->uevent.uobject; | ||
3204 | obj->uevent.uobject.object = wq; | ||
3205 | wq->wq_type = wq_init_attr.wq_type; | ||
3206 | wq->cq = cq; | ||
3207 | wq->pd = pd; | ||
3208 | wq->device = pd->device; | ||
3209 | wq->wq_context = wq_init_attr.wq_context; | ||
3210 | atomic_set(&wq->usecnt, 0); | ||
3211 | atomic_inc(&pd->usecnt); | ||
3212 | atomic_inc(&cq->usecnt); | ||
3213 | wq->uobject = &obj->uevent.uobject; | ||
3214 | obj->uevent.uobject.object = wq; | ||
3215 | err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); | ||
3216 | if (err) | ||
3217 | goto destroy_wq; | ||
3218 | |||
3219 | memset(&resp, 0, sizeof(resp)); | ||
3220 | resp.wq_handle = obj->uevent.uobject.id; | ||
3221 | resp.max_sge = wq_init_attr.max_sge; | ||
3222 | resp.max_wr = wq_init_attr.max_wr; | ||
3223 | resp.wqn = wq->wq_num; | ||
3224 | resp.response_length = required_resp_len; | ||
3225 | err = ib_copy_to_udata(ucore, | ||
3226 | &resp, resp.response_length); | ||
3227 | if (err) | ||
3228 | goto err_copy; | ||
3229 | |||
3230 | put_pd_read(pd); | ||
3231 | put_cq_read(cq); | ||
3232 | |||
3233 | mutex_lock(&file->mutex); | ||
3234 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list); | ||
3235 | mutex_unlock(&file->mutex); | ||
3236 | |||
3237 | obj->uevent.uobject.live = 1; | ||
3238 | up_write(&obj->uevent.uobject.mutex); | ||
3239 | return 0; | ||
3240 | |||
3241 | err_copy: | ||
3242 | idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); | ||
3243 | destroy_wq: | ||
3244 | ib_destroy_wq(wq); | ||
3245 | err_put_cq: | ||
3246 | put_cq_read(cq); | ||
3247 | err_put_pd: | ||
3248 | put_pd_read(pd); | ||
3249 | err_uobj: | ||
3250 | put_uobj_write(&obj->uevent.uobject); | ||
3251 | |||
3252 | return err; | ||
3253 | } | ||
3254 | |||
3255 | int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, | ||
3256 | struct ib_device *ib_dev, | ||
3257 | struct ib_udata *ucore, | ||
3258 | struct ib_udata *uhw) | ||
3259 | { | ||
3260 | struct ib_uverbs_ex_destroy_wq cmd = {}; | ||
3261 | struct ib_uverbs_ex_destroy_wq_resp resp = {}; | ||
3262 | struct ib_wq *wq; | ||
3263 | struct ib_uobject *uobj; | ||
3264 | struct ib_uwq_object *obj; | ||
3265 | size_t required_cmd_sz; | ||
3266 | size_t required_resp_len; | ||
3267 | int ret; | ||
3268 | |||
3269 | required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); | ||
3270 | required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); | ||
3271 | |||
3272 | if (ucore->inlen < required_cmd_sz) | ||
3273 | return -EINVAL; | ||
3274 | |||
3275 | if (ucore->outlen < required_resp_len) | ||
3276 | return -ENOSPC; | ||
3277 | |||
3278 | if (ucore->inlen > sizeof(cmd) && | ||
3279 | !ib_is_udata_cleared(ucore, sizeof(cmd), | ||
3280 | ucore->inlen - sizeof(cmd))) | ||
3281 | return -EOPNOTSUPP; | ||
3282 | |||
3283 | ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); | ||
3284 | if (ret) | ||
3285 | return ret; | ||
3286 | |||
3287 | if (cmd.comp_mask) | ||
3288 | return -EOPNOTSUPP; | ||
3289 | |||
3290 | resp.response_length = required_resp_len; | ||
3291 | uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle, | ||
3292 | file->ucontext); | ||
3293 | if (!uobj) | ||
3294 | return -EINVAL; | ||
3295 | |||
3296 | wq = uobj->object; | ||
3297 | obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); | ||
3298 | ret = ib_destroy_wq(wq); | ||
3299 | if (!ret) | ||
3300 | uobj->live = 0; | ||
3301 | |||
3302 | put_uobj_write(uobj); | ||
3303 | if (ret) | ||
3304 | return ret; | ||
3305 | |||
3306 | idr_remove_uobj(&ib_uverbs_wq_idr, uobj); | ||
3307 | |||
3308 | mutex_lock(&file->mutex); | ||
3309 | list_del(&uobj->list); | ||
3310 | mutex_unlock(&file->mutex); | ||
3311 | |||
3312 | ib_uverbs_release_uevent(file, &obj->uevent); | ||
3313 | resp.events_reported = obj->uevent.events_reported; | ||
3314 | put_uobj(uobj); | ||
3315 | |||
3316 | ret = ib_copy_to_udata(ucore, &resp, resp.response_length); | ||
3317 | if (ret) | ||
3318 | return ret; | ||
3319 | |||
3320 | return 0; | ||
3321 | } | ||
3322 | |||
3323 | int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, | ||
3324 | struct ib_device *ib_dev, | ||
3325 | struct ib_udata *ucore, | ||
3326 | struct ib_udata *uhw) | ||
3327 | { | ||
3328 | struct ib_uverbs_ex_modify_wq cmd = {}; | ||
3329 | struct ib_wq *wq; | ||
3330 | struct ib_wq_attr wq_attr = {}; | ||
3331 | size_t required_cmd_sz; | ||
3332 | int ret; | ||
3333 | |||
3334 | required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); | ||
3335 | if (ucore->inlen < required_cmd_sz) | ||
3336 | return -EINVAL; | ||
3337 | |||
3338 | if (ucore->inlen > sizeof(cmd) && | ||
3339 | !ib_is_udata_cleared(ucore, sizeof(cmd), | ||
3340 | ucore->inlen - sizeof(cmd))) | ||
3341 | return -EOPNOTSUPP; | ||
3342 | |||
3343 | ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); | ||
3344 | if (ret) | ||
3345 | return ret; | ||
3346 | |||
3347 | if (!cmd.attr_mask) | ||
3348 | return -EINVAL; | ||
3349 | |||
3350 | if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE)) | ||
3351 | return -EINVAL; | ||
3352 | |||
3353 | wq = idr_read_wq(cmd.wq_handle, file->ucontext); | ||
3354 | if (!wq) | ||
3355 | return -EINVAL; | ||
3356 | |||
3357 | wq_attr.curr_wq_state = cmd.curr_wq_state; | ||
3358 | wq_attr.wq_state = cmd.wq_state; | ||
3359 | ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); | ||
3360 | put_wq_read(wq); | ||
3361 | return ret; | ||
3362 | } | ||
3363 | |||
3364 | int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, | ||
3365 | struct ib_device *ib_dev, | ||
3366 | struct ib_udata *ucore, | ||
3367 | struct ib_udata *uhw) | ||
3368 | { | ||
3369 | struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; | ||
3370 | struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; | ||
3371 | struct ib_uobject *uobj; | ||
3372 | int err = 0; | ||
3373 | struct ib_rwq_ind_table_init_attr init_attr = {}; | ||
3374 | struct ib_rwq_ind_table *rwq_ind_tbl; | ||
3375 | struct ib_wq **wqs = NULL; | ||
3376 | u32 *wqs_handles = NULL; | ||
3377 | struct ib_wq *wq = NULL; | ||
3378 | int i, j, num_read_wqs; | ||
3379 | u32 num_wq_handles; | ||
3380 | u32 expected_in_size; | ||
3381 | size_t required_cmd_sz_header; | ||
3382 | size_t required_resp_len; | ||
3383 | |||
3384 | required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); | ||
3385 | required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); | ||
3386 | |||
3387 | if (ucore->inlen < required_cmd_sz_header) | ||
3388 | return -EINVAL; | ||
3389 | |||
3390 | if (ucore->outlen < required_resp_len) | ||
3391 | return -ENOSPC; | ||
3392 | |||
3393 | err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); | ||
3394 | if (err) | ||
3395 | return err; | ||
3396 | |||
3397 | ucore->inbuf += required_cmd_sz_header; | ||
3398 | ucore->inlen -= required_cmd_sz_header; | ||
3399 | |||
3400 | if (cmd.comp_mask) | ||
3401 | return -EOPNOTSUPP; | ||
3402 | |||
3403 | if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) | ||
3404 | return -EINVAL; | ||
3405 | |||
3406 | num_wq_handles = 1 << cmd.log_ind_tbl_size; | ||
3407 | expected_in_size = num_wq_handles * sizeof(__u32); | ||
3408 | if (num_wq_handles == 1) | ||
3409 | /* input size for wq handles is u64 aligned */ | ||
3410 | expected_in_size += sizeof(__u32); | ||
3411 | |||
3412 | if (ucore->inlen < expected_in_size) | ||
3413 | return -EINVAL; | ||
3414 | |||
3415 | if (ucore->inlen > expected_in_size && | ||
3416 | !ib_is_udata_cleared(ucore, expected_in_size, | ||
3417 | ucore->inlen - expected_in_size)) | ||
3418 | return -EOPNOTSUPP; | ||
3419 | |||
3420 | wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), | ||
3421 | GFP_KERNEL); | ||
3422 | if (!wqs_handles) | ||
3423 | return -ENOMEM; | ||
3424 | |||
3425 | err = ib_copy_from_udata(wqs_handles, ucore, | ||
3426 | num_wq_handles * sizeof(__u32)); | ||
3427 | if (err) | ||
3428 | goto err_free; | ||
3429 | |||
3430 | wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); | ||
3431 | if (!wqs) { | ||
3432 | err = -ENOMEM; | ||
3433 | goto err_free; | ||
3434 | } | ||
3435 | |||
3436 | for (num_read_wqs = 0; num_read_wqs < num_wq_handles; | ||
3437 | num_read_wqs++) { | ||
3438 | wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext); | ||
3439 | if (!wq) { | ||
3440 | err = -EINVAL; | ||
3441 | goto put_wqs; | ||
3442 | } | ||
3443 | |||
3444 | wqs[num_read_wqs] = wq; | ||
3445 | } | ||
3446 | |||
3447 | uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); | ||
3448 | if (!uobj) { | ||
3449 | err = -ENOMEM; | ||
3450 | goto put_wqs; | ||
3451 | } | ||
3452 | |||
3453 | init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class); | ||
3454 | down_write(&uobj->mutex); | ||
3455 | init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; | ||
3456 | init_attr.ind_tbl = wqs; | ||
3457 | rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); | ||
3458 | |||
3459 | if (IS_ERR(rwq_ind_tbl)) { | ||
3460 | err = PTR_ERR(rwq_ind_tbl); | ||
3461 | goto err_uobj; | ||
3462 | } | ||
3463 | |||
3464 | rwq_ind_tbl->ind_tbl = wqs; | ||
3465 | rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; | ||
3466 | rwq_ind_tbl->uobject = uobj; | ||
3467 | uobj->object = rwq_ind_tbl; | ||
3468 | rwq_ind_tbl->device = ib_dev; | ||
3469 | atomic_set(&rwq_ind_tbl->usecnt, 0); | ||
3470 | |||
3471 | for (i = 0; i < num_wq_handles; i++) | ||
3472 | atomic_inc(&wqs[i]->usecnt); | ||
3473 | |||
3474 | err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); | ||
3475 | if (err) | ||
3476 | goto destroy_ind_tbl; | ||
3477 | |||
3478 | resp.ind_tbl_handle = uobj->id; | ||
3479 | resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; | ||
3480 | resp.response_length = required_resp_len; | ||
3481 | |||
3482 | err = ib_copy_to_udata(ucore, | ||
3483 | &resp, resp.response_length); | ||
3484 | if (err) | ||
3485 | goto err_copy; | ||
3486 | |||
3487 | kfree(wqs_handles); | ||
3488 | |||
3489 | for (j = 0; j < num_read_wqs; j++) | ||
3490 | put_wq_read(wqs[j]); | ||
3491 | |||
3492 | mutex_lock(&file->mutex); | ||
3493 | list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list); | ||
3494 | mutex_unlock(&file->mutex); | ||
3495 | |||
3496 | uobj->live = 1; | ||
3497 | |||
3498 | up_write(&uobj->mutex); | ||
3499 | return 0; | ||
3500 | |||
3501 | err_copy: | ||
3502 | idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); | ||
3503 | destroy_ind_tbl: | ||
3504 | ib_destroy_rwq_ind_table(rwq_ind_tbl); | ||
3505 | err_uobj: | ||
3506 | put_uobj_write(uobj); | ||
3507 | put_wqs: | ||
3508 | for (j = 0; j < num_read_wqs; j++) | ||
3509 | put_wq_read(wqs[j]); | ||
3510 | err_free: | ||
3511 | kfree(wqs_handles); | ||
3512 | kfree(wqs); | ||
3513 | return err; | ||
3514 | } | ||
3515 | |||
3516 | int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, | ||
3517 | struct ib_device *ib_dev, | ||
3518 | struct ib_udata *ucore, | ||
3519 | struct ib_udata *uhw) | ||
3520 | { | ||
3521 | struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; | ||
3522 | struct ib_rwq_ind_table *rwq_ind_tbl; | ||
3523 | struct ib_uobject *uobj; | ||
3524 | int ret; | ||
3525 | struct ib_wq **ind_tbl; | ||
3526 | size_t required_cmd_sz; | ||
3527 | |||
3528 | required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); | ||
3529 | |||
3530 | if (ucore->inlen < required_cmd_sz) | ||
3531 | return -EINVAL; | ||
3532 | |||
3533 | if (ucore->inlen > sizeof(cmd) && | ||
3534 | !ib_is_udata_cleared(ucore, sizeof(cmd), | ||
3535 | ucore->inlen - sizeof(cmd))) | ||
3536 | return -EOPNOTSUPP; | ||
3537 | |||
3538 | ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); | ||
3539 | if (ret) | ||
3540 | return ret; | ||
3541 | |||
3542 | if (cmd.comp_mask) | ||
3543 | return -EOPNOTSUPP; | ||
3544 | |||
3545 | uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle, | ||
3546 | file->ucontext); | ||
3547 | if (!uobj) | ||
3548 | return -EINVAL; | ||
3549 | rwq_ind_tbl = uobj->object; | ||
3550 | ind_tbl = rwq_ind_tbl->ind_tbl; | ||
3551 | |||
3552 | ret = ib_destroy_rwq_ind_table(rwq_ind_tbl); | ||
3553 | if (!ret) | ||
3554 | uobj->live = 0; | ||
3555 | |||
3556 | put_uobj_write(uobj); | ||
3557 | |||
3558 | if (ret) | ||
3559 | return ret; | ||
3560 | |||
3561 | idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); | ||
3562 | |||
3563 | mutex_lock(&file->mutex); | ||
3564 | list_del(&uobj->list); | ||
3565 | mutex_unlock(&file->mutex); | ||
3566 | |||
3567 | put_uobj(uobj); | ||
3568 | kfree(ind_tbl); | ||
3569 | return ret; | ||
3570 | } | ||
3571 | |||
3059 | int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | 3572 | int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, |
3060 | struct ib_device *ib_dev, | 3573 | struct ib_device *ib_dev, |
3061 | struct ib_udata *ucore, | 3574 | struct ib_udata *ucore, |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 31f422a70623..0012fa58c105 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -76,6 +76,8 @@ DEFINE_IDR(ib_uverbs_qp_idr); | |||
76 | DEFINE_IDR(ib_uverbs_srq_idr); | 76 | DEFINE_IDR(ib_uverbs_srq_idr); |
77 | DEFINE_IDR(ib_uverbs_xrcd_idr); | 77 | DEFINE_IDR(ib_uverbs_xrcd_idr); |
78 | DEFINE_IDR(ib_uverbs_rule_idr); | 78 | DEFINE_IDR(ib_uverbs_rule_idr); |
79 | DEFINE_IDR(ib_uverbs_wq_idr); | ||
80 | DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr); | ||
79 | 81 | ||
80 | static DEFINE_SPINLOCK(map_lock); | 82 | static DEFINE_SPINLOCK(map_lock); |
81 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); | 83 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); |
@@ -130,6 +132,11 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, | |||
130 | [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, | 132 | [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, |
131 | [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq, | 133 | [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq, |
132 | [IB_USER_VERBS_EX_CMD_CREATE_QP] = ib_uverbs_ex_create_qp, | 134 | [IB_USER_VERBS_EX_CMD_CREATE_QP] = ib_uverbs_ex_create_qp, |
135 | [IB_USER_VERBS_EX_CMD_CREATE_WQ] = ib_uverbs_ex_create_wq, | ||
136 | [IB_USER_VERBS_EX_CMD_MODIFY_WQ] = ib_uverbs_ex_modify_wq, | ||
137 | [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq, | ||
138 | [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table, | ||
139 | [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table, | ||
133 | }; | 140 | }; |
134 | 141 | ||
135 | static void ib_uverbs_add_one(struct ib_device *device); | 142 | static void ib_uverbs_add_one(struct ib_device *device); |
@@ -265,6 +272,27 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
265 | kfree(uqp); | 272 | kfree(uqp); |
266 | } | 273 | } |
267 | 274 | ||
275 | list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) { | ||
276 | struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object; | ||
277 | struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl; | ||
278 | |||
279 | idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); | ||
280 | ib_destroy_rwq_ind_table(rwq_ind_tbl); | ||
281 | kfree(ind_tbl); | ||
282 | kfree(uobj); | ||
283 | } | ||
284 | |||
285 | list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) { | ||
286 | struct ib_wq *wq = uobj->object; | ||
287 | struct ib_uwq_object *uwq = | ||
288 | container_of(uobj, struct ib_uwq_object, uevent.uobject); | ||
289 | |||
290 | idr_remove_uobj(&ib_uverbs_wq_idr, uobj); | ||
291 | ib_destroy_wq(wq); | ||
292 | ib_uverbs_release_uevent(file, &uwq->uevent); | ||
293 | kfree(uwq); | ||
294 | } | ||
295 | |||
268 | list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { | 296 | list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { |
269 | struct ib_srq *srq = uobj->object; | 297 | struct ib_srq *srq = uobj->object; |
270 | struct ib_uevent_object *uevent = | 298 | struct ib_uevent_object *uevent = |
@@ -568,6 +596,16 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) | |||
568 | &uobj->events_reported); | 596 | &uobj->events_reported); |
569 | } | 597 | } |
570 | 598 | ||
599 | void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) | ||
600 | { | ||
601 | struct ib_uevent_object *uobj = container_of(event->element.wq->uobject, | ||
602 | struct ib_uevent_object, uobject); | ||
603 | |||
604 | ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, | ||
605 | event->event, &uobj->event_list, | ||
606 | &uobj->events_reported); | ||
607 | } | ||
608 | |||
571 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) | 609 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) |
572 | { | 610 | { |
573 | struct ib_uevent_object *uobj; | 611 | struct ib_uevent_object *uobj; |
@@ -931,6 +969,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) | |||
931 | file->async_file = NULL; | 969 | file->async_file = NULL; |
932 | kref_init(&file->ref); | 970 | kref_init(&file->ref); |
933 | mutex_init(&file->mutex); | 971 | mutex_init(&file->mutex); |
972 | mutex_init(&file->cleanup_mutex); | ||
934 | 973 | ||
935 | filp->private_data = file; | 974 | filp->private_data = file; |
936 | kobject_get(&dev->kobj); | 975 | kobject_get(&dev->kobj); |
@@ -956,18 +995,20 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) | |||
956 | { | 995 | { |
957 | struct ib_uverbs_file *file = filp->private_data; | 996 | struct ib_uverbs_file *file = filp->private_data; |
958 | struct ib_uverbs_device *dev = file->device; | 997 | struct ib_uverbs_device *dev = file->device; |
959 | struct ib_ucontext *ucontext = NULL; | 998 | |
999 | mutex_lock(&file->cleanup_mutex); | ||
1000 | if (file->ucontext) { | ||
1001 | ib_uverbs_cleanup_ucontext(file, file->ucontext); | ||
1002 | file->ucontext = NULL; | ||
1003 | } | ||
1004 | mutex_unlock(&file->cleanup_mutex); | ||
960 | 1005 | ||
961 | mutex_lock(&file->device->lists_mutex); | 1006 | mutex_lock(&file->device->lists_mutex); |
962 | ucontext = file->ucontext; | ||
963 | file->ucontext = NULL; | ||
964 | if (!file->is_closed) { | 1007 | if (!file->is_closed) { |
965 | list_del(&file->list); | 1008 | list_del(&file->list); |
966 | file->is_closed = 1; | 1009 | file->is_closed = 1; |
967 | } | 1010 | } |
968 | mutex_unlock(&file->device->lists_mutex); | 1011 | mutex_unlock(&file->device->lists_mutex); |
969 | if (ucontext) | ||
970 | ib_uverbs_cleanup_ucontext(file, ucontext); | ||
971 | 1012 | ||
972 | if (file->async_file) | 1013 | if (file->async_file) |
973 | kref_put(&file->async_file->ref, ib_uverbs_release_event_file); | 1014 | kref_put(&file->async_file->ref, ib_uverbs_release_event_file); |
@@ -1181,22 +1222,30 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, | |||
1181 | mutex_lock(&uverbs_dev->lists_mutex); | 1222 | mutex_lock(&uverbs_dev->lists_mutex); |
1182 | while (!list_empty(&uverbs_dev->uverbs_file_list)) { | 1223 | while (!list_empty(&uverbs_dev->uverbs_file_list)) { |
1183 | struct ib_ucontext *ucontext; | 1224 | struct ib_ucontext *ucontext; |
1184 | |||
1185 | file = list_first_entry(&uverbs_dev->uverbs_file_list, | 1225 | file = list_first_entry(&uverbs_dev->uverbs_file_list, |
1186 | struct ib_uverbs_file, list); | 1226 | struct ib_uverbs_file, list); |
1187 | file->is_closed = 1; | 1227 | file->is_closed = 1; |
1188 | ucontext = file->ucontext; | ||
1189 | list_del(&file->list); | 1228 | list_del(&file->list); |
1190 | file->ucontext = NULL; | ||
1191 | kref_get(&file->ref); | 1229 | kref_get(&file->ref); |
1192 | mutex_unlock(&uverbs_dev->lists_mutex); | 1230 | mutex_unlock(&uverbs_dev->lists_mutex); |
1193 | /* We must release the mutex before going ahead and calling | 1231 | |
1194 | * disassociate_ucontext. disassociate_ucontext might end up | ||
1195 | * indirectly calling uverbs_close, for example due to freeing | ||
1196 | * the resources (e.g mmput). | ||
1197 | */ | ||
1198 | ib_uverbs_event_handler(&file->event_handler, &event); | 1232 | ib_uverbs_event_handler(&file->event_handler, &event); |
1233 | |||
1234 | mutex_lock(&file->cleanup_mutex); | ||
1235 | ucontext = file->ucontext; | ||
1236 | file->ucontext = NULL; | ||
1237 | mutex_unlock(&file->cleanup_mutex); | ||
1238 | |||
1239 | /* At this point ib_uverbs_close cannot be running | ||
1240 | * ib_uverbs_cleanup_ucontext | ||
1241 | */ | ||
1199 | if (ucontext) { | 1242 | if (ucontext) { |
1243 | /* We must release the mutex before going ahead and | ||
1244 | * calling disassociate_ucontext. disassociate_ucontext | ||
1245 | * might end up indirectly calling uverbs_close, | ||
1246 | * for example due to freeing the resources | ||
1247 | * (e.g mmput). | ||
1248 | */ | ||
1200 | ib_dev->disassociate_ucontext(ucontext); | 1249 | ib_dev->disassociate_ucontext(ucontext); |
1201 | ib_uverbs_cleanup_ucontext(file, ucontext); | 1250 | ib_uverbs_cleanup_ucontext(file, ucontext); |
1202 | } | 1251 | } |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 6298f54b4137..2e813edcddab 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -758,6 +758,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
758 | struct ib_qp *qp; | 758 | struct ib_qp *qp; |
759 | int ret; | 759 | int ret; |
760 | 760 | ||
761 | if (qp_init_attr->rwq_ind_tbl && | ||
762 | (qp_init_attr->recv_cq || | ||
763 | qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || | ||
764 | qp_init_attr->cap.max_recv_sge)) | ||
765 | return ERR_PTR(-EINVAL); | ||
766 | |||
761 | /* | 767 | /* |
762 | * If the callers is using the RDMA API calculate the resources | 768 | * If the callers is using the RDMA API calculate the resources |
763 | * needed for the RDMA READ/WRITE operations. | 769 | * needed for the RDMA READ/WRITE operations. |
@@ -775,6 +781,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
775 | qp->real_qp = qp; | 781 | qp->real_qp = qp; |
776 | qp->uobject = NULL; | 782 | qp->uobject = NULL; |
777 | qp->qp_type = qp_init_attr->qp_type; | 783 | qp->qp_type = qp_init_attr->qp_type; |
784 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; | ||
778 | 785 | ||
779 | atomic_set(&qp->usecnt, 0); | 786 | atomic_set(&qp->usecnt, 0); |
780 | qp->mrs_used = 0; | 787 | qp->mrs_used = 0; |
@@ -792,7 +799,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
792 | qp->srq = NULL; | 799 | qp->srq = NULL; |
793 | } else { | 800 | } else { |
794 | qp->recv_cq = qp_init_attr->recv_cq; | 801 | qp->recv_cq = qp_init_attr->recv_cq; |
795 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | 802 | if (qp_init_attr->recv_cq) |
803 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | ||
796 | qp->srq = qp_init_attr->srq; | 804 | qp->srq = qp_init_attr->srq; |
797 | if (qp->srq) | 805 | if (qp->srq) |
798 | atomic_inc(&qp_init_attr->srq->usecnt); | 806 | atomic_inc(&qp_init_attr->srq->usecnt); |
@@ -803,7 +811,10 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
803 | qp->xrcd = NULL; | 811 | qp->xrcd = NULL; |
804 | 812 | ||
805 | atomic_inc(&pd->usecnt); | 813 | atomic_inc(&pd->usecnt); |
806 | atomic_inc(&qp_init_attr->send_cq->usecnt); | 814 | if (qp_init_attr->send_cq) |
815 | atomic_inc(&qp_init_attr->send_cq->usecnt); | ||
816 | if (qp_init_attr->rwq_ind_tbl) | ||
817 | atomic_inc(&qp->rwq_ind_tbl->usecnt); | ||
807 | 818 | ||
808 | if (qp_init_attr->cap.max_rdma_ctxs) { | 819 | if (qp_init_attr->cap.max_rdma_ctxs) { |
809 | ret = rdma_rw_init_mrs(qp, qp_init_attr); | 820 | ret = rdma_rw_init_mrs(qp, qp_init_attr); |
@@ -1283,6 +1294,7 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
1283 | struct ib_pd *pd; | 1294 | struct ib_pd *pd; |
1284 | struct ib_cq *scq, *rcq; | 1295 | struct ib_cq *scq, *rcq; |
1285 | struct ib_srq *srq; | 1296 | struct ib_srq *srq; |
1297 | struct ib_rwq_ind_table *ind_tbl; | ||
1286 | int ret; | 1298 | int ret; |
1287 | 1299 | ||
1288 | WARN_ON_ONCE(qp->mrs_used > 0); | 1300 | WARN_ON_ONCE(qp->mrs_used > 0); |
@@ -1297,6 +1309,7 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
1297 | scq = qp->send_cq; | 1309 | scq = qp->send_cq; |
1298 | rcq = qp->recv_cq; | 1310 | rcq = qp->recv_cq; |
1299 | srq = qp->srq; | 1311 | srq = qp->srq; |
1312 | ind_tbl = qp->rwq_ind_tbl; | ||
1300 | 1313 | ||
1301 | if (!qp->uobject) | 1314 | if (!qp->uobject) |
1302 | rdma_rw_cleanup_mrs(qp); | 1315 | rdma_rw_cleanup_mrs(qp); |
@@ -1311,6 +1324,8 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
1311 | atomic_dec(&rcq->usecnt); | 1324 | atomic_dec(&rcq->usecnt); |
1312 | if (srq) | 1325 | if (srq) |
1313 | atomic_dec(&srq->usecnt); | 1326 | atomic_dec(&srq->usecnt); |
1327 | if (ind_tbl) | ||
1328 | atomic_dec(&ind_tbl->usecnt); | ||
1314 | } | 1329 | } |
1315 | 1330 | ||
1316 | return ret; | 1331 | return ret; |
@@ -1558,6 +1573,150 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd) | |||
1558 | } | 1573 | } |
1559 | EXPORT_SYMBOL(ib_dealloc_xrcd); | 1574 | EXPORT_SYMBOL(ib_dealloc_xrcd); |
1560 | 1575 | ||
1576 | /** | ||
1577 | * ib_create_wq - Creates a WQ associated with the specified protection | ||
1578 | * domain. | ||
1579 | * @pd: The protection domain associated with the WQ. | ||
1580 | * @wq_init_attr: A list of initial attributes required to create the | ||
1581 | * WQ. If WQ creation succeeds, then the attributes are updated to | ||
1582 | * the actual capabilities of the created WQ. | ||
1583 | * | ||
1584 | * wq_init_attr->max_wr and wq_init_attr->max_sge determine | ||
1585 | * the requested size of the WQ, and set to the actual values allocated | ||
1586 | * on return. | ||
1587 | * If ib_create_wq() succeeds, then max_wr and max_sge will always be | ||
1588 | * at least as large as the requested values. | ||
1589 | */ | ||
1590 | struct ib_wq *ib_create_wq(struct ib_pd *pd, | ||
1591 | struct ib_wq_init_attr *wq_attr) | ||
1592 | { | ||
1593 | struct ib_wq *wq; | ||
1594 | |||
1595 | if (!pd->device->create_wq) | ||
1596 | return ERR_PTR(-ENOSYS); | ||
1597 | |||
1598 | wq = pd->device->create_wq(pd, wq_attr, NULL); | ||
1599 | if (!IS_ERR(wq)) { | ||
1600 | wq->event_handler = wq_attr->event_handler; | ||
1601 | wq->wq_context = wq_attr->wq_context; | ||
1602 | wq->wq_type = wq_attr->wq_type; | ||
1603 | wq->cq = wq_attr->cq; | ||
1604 | wq->device = pd->device; | ||
1605 | wq->pd = pd; | ||
1606 | wq->uobject = NULL; | ||
1607 | atomic_inc(&pd->usecnt); | ||
1608 | atomic_inc(&wq_attr->cq->usecnt); | ||
1609 | atomic_set(&wq->usecnt, 0); | ||
1610 | } | ||
1611 | return wq; | ||
1612 | } | ||
1613 | EXPORT_SYMBOL(ib_create_wq); | ||
1614 | |||
1615 | /** | ||
1616 | * ib_destroy_wq - Destroys the specified WQ. | ||
1617 | * @wq: The WQ to destroy. | ||
1618 | */ | ||
1619 | int ib_destroy_wq(struct ib_wq *wq) | ||
1620 | { | ||
1621 | int err; | ||
1622 | struct ib_cq *cq = wq->cq; | ||
1623 | struct ib_pd *pd = wq->pd; | ||
1624 | |||
1625 | if (atomic_read(&wq->usecnt)) | ||
1626 | return -EBUSY; | ||
1627 | |||
1628 | err = wq->device->destroy_wq(wq); | ||
1629 | if (!err) { | ||
1630 | atomic_dec(&pd->usecnt); | ||
1631 | atomic_dec(&cq->usecnt); | ||
1632 | } | ||
1633 | return err; | ||
1634 | } | ||
1635 | EXPORT_SYMBOL(ib_destroy_wq); | ||
1636 | |||
1637 | /** | ||
1638 | * ib_modify_wq - Modifies the specified WQ. | ||
1639 | * @wq: The WQ to modify. | ||
1640 | * @wq_attr: On input, specifies the WQ attributes to modify. | ||
1641 | * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ | ||
1642 | * are being modified. | ||
1643 | * On output, the current values of selected WQ attributes are returned. | ||
1644 | */ | ||
1645 | int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, | ||
1646 | u32 wq_attr_mask) | ||
1647 | { | ||
1648 | int err; | ||
1649 | |||
1650 | if (!wq->device->modify_wq) | ||
1651 | return -ENOSYS; | ||
1652 | |||
1653 | err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); | ||
1654 | return err; | ||
1655 | } | ||
1656 | EXPORT_SYMBOL(ib_modify_wq); | ||
1657 | |||
1658 | /* | ||
1659 | * ib_create_rwq_ind_table - Creates a RQ Indirection Table. | ||
1660 | * @device: The device on which to create the rwq indirection table. | ||
1661 | * @ib_rwq_ind_table_init_attr: A list of initial attributes required to | ||
1662 | * create the Indirection Table. | ||
1663 | * | ||
1664 | * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less | ||
1665 | * than the created ib_rwq_ind_table object and the caller is responsible | ||
1666 | * for its memory allocation/free. | ||
1667 | */ | ||
1668 | struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, | ||
1669 | struct ib_rwq_ind_table_init_attr *init_attr) | ||
1670 | { | ||
1671 | struct ib_rwq_ind_table *rwq_ind_table; | ||
1672 | int i; | ||
1673 | u32 table_size; | ||
1674 | |||
1675 | if (!device->create_rwq_ind_table) | ||
1676 | return ERR_PTR(-ENOSYS); | ||
1677 | |||
1678 | table_size = (1 << init_attr->log_ind_tbl_size); | ||
1679 | rwq_ind_table = device->create_rwq_ind_table(device, | ||
1680 | init_attr, NULL); | ||
1681 | if (IS_ERR(rwq_ind_table)) | ||
1682 | return rwq_ind_table; | ||
1683 | |||
1684 | rwq_ind_table->ind_tbl = init_attr->ind_tbl; | ||
1685 | rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; | ||
1686 | rwq_ind_table->device = device; | ||
1687 | rwq_ind_table->uobject = NULL; | ||
1688 | atomic_set(&rwq_ind_table->usecnt, 0); | ||
1689 | |||
1690 | for (i = 0; i < table_size; i++) | ||
1691 | atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); | ||
1692 | |||
1693 | return rwq_ind_table; | ||
1694 | } | ||
1695 | EXPORT_SYMBOL(ib_create_rwq_ind_table); | ||
1696 | |||
1697 | /* | ||
1698 | * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. | ||
1699 | * @wq_ind_table: The Indirection Table to destroy. | ||
1700 | */ | ||
1701 | int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) | ||
1702 | { | ||
1703 | int err, i; | ||
1704 | u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); | ||
1705 | struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; | ||
1706 | |||
1707 | if (atomic_read(&rwq_ind_table->usecnt)) | ||
1708 | return -EBUSY; | ||
1709 | |||
1710 | err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); | ||
1711 | if (!err) { | ||
1712 | for (i = 0; i < table_size; i++) | ||
1713 | atomic_dec(&ind_tbl[i]->usecnt); | ||
1714 | } | ||
1715 | |||
1716 | return err; | ||
1717 | } | ||
1718 | EXPORT_SYMBOL(ib_destroy_rwq_ind_table); | ||
1719 | |||
1561 | struct ib_flow *ib_create_flow(struct ib_qp *qp, | 1720 | struct ib_flow *ib_create_flow(struct ib_qp *qp, |
1562 | struct ib_flow_attr *flow_attr, | 1721 | struct ib_flow_attr *flow_attr, |
1563 | int domain) | 1722 | int domain) |