aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-15 20:03:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-15 20:03:03 -0400
commit9090fdb9c95a68a44129d1392f7bd675aec8b0c2 (patch)
tree03bb9951d204505e1a5d55d4269579c4db685925
parent16ff49a08bac27f3f4799e4169f0d72a2ea66c2e (diff)
parentd8b2ba7c5928173fe1c12bd2545f5ed85d1c3c7a (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma fixes from Doug Ledford: "Mainly fix-ups for the various 4.2 items" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (24 commits) IB/core: Destroy ocrdma_dev_id IDR on module exit IB/core: Destroy multcast_idr on module exit IB/mlx4: Optimize do_slave_init IB/mlx4: Fix memory leak in do_slave_init IB/mlx4: Optimize freeing of items on error unwind IB/mlx4: Fix use of flow-counters for process_mad IB/ipath: Convert use of __constant_<foo> to <foo> IB/ipoib: Set MTU to max allowed by mode when mode changes IB/ipoib: Scatter-Gather support in connected mode IB/ucm: Fix bitmap wrap when devnum > IB_UCM_MAX_DEVICES IB/ipoib: Prevent lockdep warning in __ipoib_ib_dev_flush IB/ucma: Fix lockdep warning in ucma_lock_files rds: rds_ib_device.refcount overflow RDMA/nes: Fix for incorrect recording of the MAC address RDMA/nes: Fix for resolving the neigh RDMA/core: Fixes for port mapper client registration IB/IPoIB: Fix bad error flow in ipoib_add_port() IB/mlx4: Do not attemp to report HCA clock offset on VFs IB/cm: Do not queue work to a device that's going away IB/srp: Avoid using uninitialized variable ...
-rw-r--r--drivers/infiniband/core/agent.c4
-rw-r--r--drivers/infiniband/core/cm.c61
-rw-r--r--drivers/infiniband/core/iwpm_msg.c33
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.h28
-rw-r--r--drivers/infiniband/core/mad.c47
-rw-r--r--drivers/infiniband/core/multicast.c8
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/sa_query.c8
-rw-r--r--drivers/infiniband/core/smi.c37
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c33
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c21
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c23
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c71
-rw-r--r--drivers/scsi/scsi_transport_srp.c3
-rw-r--r--include/rdma/ib_verbs.h20
-rw-r--r--include/scsi/scsi_transport_srp.h1
-rw-r--r--net/rds/ib_rdma.c4
36 files changed, 351 insertions, 269 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index c7dcfe4ca5f1..0429040304fd 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
88 struct ib_ah *ah; 88 struct ib_ah *ah;
89 struct ib_mad_send_wr_private *mad_send_wr; 89 struct ib_mad_send_wr_private *mad_send_wr;
90 90
91 if (device->node_type == RDMA_NODE_IB_SWITCH) 91 if (rdma_cap_ib_switch(device))
92 port_priv = ib_get_agent_port(device, 0); 92 port_priv = ib_get_agent_port(device, 0);
93 else 93 else
94 port_priv = ib_get_agent_port(device, port_num); 94 port_priv = ib_get_agent_port(device, port_num);
@@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
122 memcpy(send_buf->mad, mad_hdr, resp_mad_len); 122 memcpy(send_buf->mad, mad_hdr, resp_mad_len);
123 send_buf->ah = ah; 123 send_buf->ah = ah;
124 124
125 if (device->node_type == RDMA_NODE_IB_SWITCH) { 125 if (rdma_cap_ib_switch(device)) {
126 mad_send_wr = container_of(send_buf, 126 mad_send_wr = container_of(send_buf,
127 struct ib_mad_send_wr_private, 127 struct ib_mad_send_wr_private,
128 send_buf); 128 send_buf);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index dbddddd6fb5d..3a972ebf3c0d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -169,6 +169,7 @@ struct cm_device {
169 struct ib_device *ib_device; 169 struct ib_device *ib_device;
170 struct device *device; 170 struct device *device;
171 u8 ack_delay; 171 u8 ack_delay;
172 int going_down;
172 struct cm_port *port[0]; 173 struct cm_port *port[0];
173}; 174};
174 175
@@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
805{ 806{
806 int wait_time; 807 int wait_time;
807 unsigned long flags; 808 unsigned long flags;
809 struct cm_device *cm_dev;
810
811 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
812 if (!cm_dev)
813 return;
808 814
809 spin_lock_irqsave(&cm.lock, flags); 815 spin_lock_irqsave(&cm.lock, flags);
810 cm_cleanup_timewait(cm_id_priv->timewait_info); 816 cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
818 */ 824 */
819 cm_id_priv->id.state = IB_CM_TIMEWAIT; 825 cm_id_priv->id.state = IB_CM_TIMEWAIT;
820 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); 826 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
821 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 827
822 msecs_to_jiffies(wait_time)); 828 /* Check if the device started its remove_one */
829 spin_lock_irq(&cm.lock);
830 if (!cm_dev->going_down)
831 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
832 msecs_to_jiffies(wait_time));
833 spin_unlock_irq(&cm.lock);
834
823 cm_id_priv->timewait_info = NULL; 835 cm_id_priv->timewait_info = NULL;
824} 836}
825 837
@@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
3305 struct cm_work *work; 3317 struct cm_work *work;
3306 unsigned long flags; 3318 unsigned long flags;
3307 int ret = 0; 3319 int ret = 0;
3320 struct cm_device *cm_dev;
3321
3322 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3323 if (!cm_dev)
3324 return -ENODEV;
3308 3325
3309 work = kmalloc(sizeof *work, GFP_ATOMIC); 3326 work = kmalloc(sizeof *work, GFP_ATOMIC);
3310 if (!work) 3327 if (!work)
@@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
3343 work->remote_id = cm_id->remote_id; 3360 work->remote_id = cm_id->remote_id;
3344 work->mad_recv_wc = NULL; 3361 work->mad_recv_wc = NULL;
3345 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3362 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3346 queue_delayed_work(cm.wq, &work->work, 0); 3363
3364 /* Check if the device started its remove_one */
3365 spin_lock_irq(&cm.lock);
3366 if (!cm_dev->going_down) {
3367 queue_delayed_work(cm.wq, &work->work, 0);
3368 } else {
3369 kfree(work);
3370 ret = -ENODEV;
3371 }
3372 spin_unlock_irq(&cm.lock);
3373
3347out: 3374out:
3348 return ret; 3375 return ret;
3349} 3376}
@@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3394 enum ib_cm_event_type event; 3421 enum ib_cm_event_type event;
3395 u16 attr_id; 3422 u16 attr_id;
3396 int paths = 0; 3423 int paths = 0;
3424 int going_down = 0;
3397 3425
3398 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3426 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3399 case CM_REQ_ATTR_ID: 3427 case CM_REQ_ATTR_ID:
@@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3452 work->cm_event.event = event; 3480 work->cm_event.event = event;
3453 work->mad_recv_wc = mad_recv_wc; 3481 work->mad_recv_wc = mad_recv_wc;
3454 work->port = port; 3482 work->port = port;
3455 queue_delayed_work(cm.wq, &work->work, 0); 3483
3484 /* Check if the device started its remove_one */
3485 spin_lock_irq(&cm.lock);
3486 if (!port->cm_dev->going_down)
3487 queue_delayed_work(cm.wq, &work->work, 0);
3488 else
3489 going_down = 1;
3490 spin_unlock_irq(&cm.lock);
3491
3492 if (going_down) {
3493 kfree(work);
3494 ib_free_recv_mad(mad_recv_wc);
3495 }
3456} 3496}
3457 3497
3458static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3498static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
3771 3811
3772 cm_dev->ib_device = ib_device; 3812 cm_dev->ib_device = ib_device;
3773 cm_get_ack_delay(cm_dev); 3813 cm_get_ack_delay(cm_dev);
3774 3814 cm_dev->going_down = 0;
3775 cm_dev->device = device_create(&cm_class, &ib_device->dev, 3815 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3776 MKDEV(0, 0), NULL, 3816 MKDEV(0, 0), NULL,
3777 "%s", ib_device->name); 3817 "%s", ib_device->name);
@@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
3864 list_del(&cm_dev->list); 3904 list_del(&cm_dev->list);
3865 write_unlock_irqrestore(&cm.device_lock, flags); 3905 write_unlock_irqrestore(&cm.device_lock, flags);
3866 3906
3907 spin_lock_irq(&cm.lock);
3908 cm_dev->going_down = 1;
3909 spin_unlock_irq(&cm.lock);
3910
3867 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 3911 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3868 if (!rdma_cap_ib_cm(ib_device, i)) 3912 if (!rdma_cap_ib_cm(ib_device, i))
3869 continue; 3913 continue;
3870 3914
3871 port = cm_dev->port[i-1]; 3915 port = cm_dev->port[i-1];
3872 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 3916 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3873 ib_unregister_mad_agent(port->mad_agent); 3917 /*
3918 * We flush the queue here after the going_down set, this
3919 * verify that no new works will be queued in the recv handler,
3920 * after that we can call the unregister_mad_agent
3921 */
3874 flush_workqueue(cm.wq); 3922 flush_workqueue(cm.wq);
3923 ib_unregister_mad_agent(port->mad_agent);
3875 cm_remove_port_fs(port); 3924 cm_remove_port_fs(port);
3876 } 3925 }
3877 device_unregister(cm_dev->device); 3926 device_unregister(cm_dev->device);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index e6ffa2e66c1a..22a3abee2a54 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
67 err_str = "Invalid port mapper client"; 67 err_str = "Invalid port mapper client";
68 goto pid_query_error; 68 goto pid_query_error;
69 } 69 }
70 if (iwpm_registered_client(nl_client)) 70 if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
71 iwpm_user_pid == IWPM_PID_UNAVAILABLE)
71 return 0; 72 return 0;
72 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); 73 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
73 if (!skb) { 74 if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
106 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL); 107 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
107 if (ret) { 108 if (ret) {
108 skb = NULL; /* skb is freed in the netlink send-op handling */ 109 skb = NULL; /* skb is freed in the netlink send-op handling */
109 iwpm_set_registered(nl_client, 1);
110 iwpm_user_pid = IWPM_PID_UNAVAILABLE; 110 iwpm_user_pid = IWPM_PID_UNAVAILABLE;
111 err_str = "Unable to send a nlmsg"; 111 err_str = "Unable to send a nlmsg";
112 goto pid_query_error; 112 goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
144 err_str = "Invalid port mapper client"; 144 err_str = "Invalid port mapper client";
145 goto add_mapping_error; 145 goto add_mapping_error;
146 } 146 }
147 if (!iwpm_registered_client(nl_client)) { 147 if (!iwpm_valid_pid())
148 return 0;
149 if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
148 err_str = "Unregistered port mapper client"; 150 err_str = "Unregistered port mapper client";
149 goto add_mapping_error; 151 goto add_mapping_error;
150 } 152 }
151 if (!iwpm_valid_pid())
152 return 0;
153 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); 153 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
154 if (!skb) { 154 if (!skb) {
155 err_str = "Unable to create a nlmsg"; 155 err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
214 err_str = "Invalid port mapper client"; 214 err_str = "Invalid port mapper client";
215 goto query_mapping_error; 215 goto query_mapping_error;
216 } 216 }
217 if (!iwpm_registered_client(nl_client)) { 217 if (!iwpm_valid_pid())
218 return 0;
219 if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
218 err_str = "Unregistered port mapper client"; 220 err_str = "Unregistered port mapper client";
219 goto query_mapping_error; 221 goto query_mapping_error;
220 } 222 }
221 if (!iwpm_valid_pid())
222 return 0;
223 ret = -ENOMEM; 223 ret = -ENOMEM;
224 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); 224 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
225 if (!skb) { 225 if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
288 err_str = "Invalid port mapper client"; 288 err_str = "Invalid port mapper client";
289 goto remove_mapping_error; 289 goto remove_mapping_error;
290 } 290 }
291 if (!iwpm_registered_client(nl_client)) { 291 if (!iwpm_valid_pid())
292 return 0;
293 if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
292 err_str = "Unregistered port mapper client"; 294 err_str = "Unregistered port mapper client";
293 goto remove_mapping_error; 295 goto remove_mapping_error;
294 } 296 }
295 if (!iwpm_valid_pid())
296 return 0;
297 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); 297 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
298 if (!skb) { 298 if (!skb) {
299 ret = -ENOMEM; 299 ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
388 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", 388 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
389 __func__, iwpm_user_pid); 389 __func__, iwpm_user_pid);
390 if (iwpm_valid_client(nl_client)) 390 if (iwpm_valid_client(nl_client))
391 iwpm_set_registered(nl_client, 1); 391 iwpm_set_registration(nl_client, IWPM_REG_VALID);
392register_pid_response_exit: 392register_pid_response_exit:
393 nlmsg_request->request_done = 1; 393 nlmsg_request->request_done = 1;
394 /* always for found nlmsg_request */ 394 /* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
644{ 644{
645 struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; 645 struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
646 const char *msg_type = "Mapping Info response"; 646 const char *msg_type = "Mapping Info response";
647 int iwpm_pid;
648 u8 nl_client; 647 u8 nl_client;
649 char *iwpm_name; 648 char *iwpm_name;
650 u16 iwpm_version; 649 u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
669 __func__, nl_client); 668 __func__, nl_client);
670 return ret; 669 return ret;
671 } 670 }
672 iwpm_set_registered(nl_client, 0); 671 iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
673 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); 672 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
673 iwpm_user_pid = cb->nlh->nlmsg_pid;
674 if (!iwpm_mapinfo_available()) 674 if (!iwpm_mapinfo_available())
675 return 0; 675 return 0;
676 iwpm_pid = cb->nlh->nlmsg_pid;
677 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", 676 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
678 __func__, iwpm_pid); 677 __func__, iwpm_user_pid);
679 ret = iwpm_send_mapinfo(nl_client, iwpm_pid); 678 ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
680 return ret; 679 return ret;
681} 680}
682EXPORT_SYMBOL(iwpm_mapping_info_cb); 681EXPORT_SYMBOL(iwpm_mapping_info_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a626795bf9c7..5fb089e91353 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -78,6 +78,7 @@ init_exit:
78 mutex_unlock(&iwpm_admin_lock); 78 mutex_unlock(&iwpm_admin_lock);
79 if (!ret) { 79 if (!ret) {
80 iwpm_set_valid(nl_client, 1); 80 iwpm_set_valid(nl_client, 1);
81 iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
81 pr_debug("%s: Mapinfo and reminfo tables are created\n", 82 pr_debug("%s: Mapinfo and reminfo tables are created\n",
82 __func__); 83 __func__);
83 } 84 }
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
106 } 107 }
107 mutex_unlock(&iwpm_admin_lock); 108 mutex_unlock(&iwpm_admin_lock);
108 iwpm_set_valid(nl_client, 0); 109 iwpm_set_valid(nl_client, 0);
110 iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
109 return 0; 111 return 0;
110} 112}
111EXPORT_SYMBOL(iwpm_exit); 113EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
397} 399}
398 400
399/* valid client */ 401/* valid client */
400int iwpm_registered_client(u8 nl_client) 402u32 iwpm_get_registration(u8 nl_client)
401{ 403{
402 return iwpm_admin.reg_list[nl_client]; 404 return iwpm_admin.reg_list[nl_client];
403} 405}
404 406
405/* valid client */ 407/* valid client */
406void iwpm_set_registered(u8 nl_client, int reg) 408void iwpm_set_registration(u8 nl_client, u32 reg)
407{ 409{
408 iwpm_admin.reg_list[nl_client] = reg; 410 iwpm_admin.reg_list[nl_client] = reg;
409} 411}
410 412
413/* valid client */
414u32 iwpm_check_registration(u8 nl_client, u32 reg)
415{
416 return (iwpm_get_registration(nl_client) & reg);
417}
418
411int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, 419int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
412 struct sockaddr_storage *b_sockaddr) 420 struct sockaddr_storage *b_sockaddr)
413{ 421{
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index ee2d9ff095be..b7b9e194ce81 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -58,6 +58,10 @@
58#define IWPM_PID_UNDEFINED -1 58#define IWPM_PID_UNDEFINED -1
59#define IWPM_PID_UNAVAILABLE -2 59#define IWPM_PID_UNAVAILABLE -2
60 60
61#define IWPM_REG_UNDEF 0x01
62#define IWPM_REG_VALID 0x02
63#define IWPM_REG_INCOMPL 0x04
64
61struct iwpm_nlmsg_request { 65struct iwpm_nlmsg_request {
62 struct list_head inprocess_list; 66 struct list_head inprocess_list;
63 __u32 nlmsg_seq; 67 __u32 nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
88 atomic_t refcount; 92 atomic_t refcount;
89 atomic_t nlmsg_seq; 93 atomic_t nlmsg_seq;
90 int client_list[RDMA_NL_NUM_CLIENTS]; 94 int client_list[RDMA_NL_NUM_CLIENTS];
91 int reg_list[RDMA_NL_NUM_CLIENTS]; 95 u32 reg_list[RDMA_NL_NUM_CLIENTS];
92}; 96};
93 97
94/** 98/**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
159void iwpm_set_valid(u8 nl_client, int valid); 163void iwpm_set_valid(u8 nl_client, int valid);
160 164
161/** 165/**
162 * iwpm_registered_client - Check if the port mapper client is registered 166 * iwpm_check_registration - Check if the client registration
167 * matches the given one
163 * @nl_client: The index of the netlink client 168 * @nl_client: The index of the netlink client
169 * @reg: The given registration type to compare with
164 * 170 *
165 * Call iwpm_register_pid() to register a client 171 * Call iwpm_register_pid() to register a client
172 * Returns true if the client registration matches reg,
173 * otherwise returns false
174 */
175u32 iwpm_check_registration(u8 nl_client, u32 reg);
176
177/**
178 * iwpm_set_registration - Set the client registration
179 * @nl_client: The index of the netlink client
180 * @reg: Registration type to set
166 */ 181 */
167int iwpm_registered_client(u8 nl_client); 182void iwpm_set_registration(u8 nl_client, u32 reg);
168 183
169/** 184/**
170 * iwpm_set_registered - Set the port mapper client to registered or not 185 * iwpm_get_registration
171 * @nl_client: The index of the netlink client 186 * @nl_client: The index of the netlink client
172 * @reg: 1 if registered or 0 if not 187 *
188 * Returns the client registration type
173 */ 189 */
174void iwpm_set_registered(u8 nl_client, int reg); 190u32 iwpm_get_registration(u8 nl_client);
175 191
176/** 192/**
177 * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of 193 * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a4b1466c1bf6..786fc51bf04b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
769 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 769 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
770 mad_agent_priv->qp_info->port_priv->port_num); 770 mad_agent_priv->qp_info->port_priv->port_num);
771 771
772 if (device->node_type == RDMA_NODE_IB_SWITCH && 772 if (rdma_cap_ib_switch(device) &&
773 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 773 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
774 port_num = send_wr->wr.ud.port_num; 774 port_num = send_wr->wr.ud.port_num;
775 else 775 else
@@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
787 if ((opa_get_smp_direction(opa_smp) 787 if ((opa_get_smp_direction(opa_smp)
788 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 788 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
789 OPA_LID_PERMISSIVE && 789 OPA_LID_PERMISSIVE &&
790 opa_smi_handle_dr_smp_send(opa_smp, device->node_type, 790 opa_smi_handle_dr_smp_send(opa_smp,
791 rdma_cap_ib_switch(device),
791 port_num) == IB_SMI_DISCARD) { 792 port_num) == IB_SMI_DISCARD) {
792 ret = -EINVAL; 793 ret = -EINVAL;
793 dev_err(&device->dev, "OPA Invalid directed route\n"); 794 dev_err(&device->dev, "OPA Invalid directed route\n");
794 goto out; 795 goto out;
795 } 796 }
796 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 797 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
797 if (opa_drslid != OPA_LID_PERMISSIVE && 798 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
798 opa_drslid & 0xffff0000) { 799 opa_drslid & 0xffff0000) {
799 ret = -EINVAL; 800 ret = -EINVAL;
800 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 801 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
810 } else { 811 } else {
811 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 812 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
812 IB_LID_PERMISSIVE && 813 IB_LID_PERMISSIVE &&
813 smi_handle_dr_smp_send(smp, device->node_type, port_num) == 814 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
814 IB_SMI_DISCARD) { 815 IB_SMI_DISCARD) {
815 ret = -EINVAL; 816 ret = -EINVAL;
816 dev_err(&device->dev, "Invalid directed route\n"); 817 dev_err(&device->dev, "Invalid directed route\n");
@@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
2030 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2031 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2031 2032
2032 if (smi_handle_dr_smp_recv(smp, 2033 if (smi_handle_dr_smp_recv(smp,
2033 port_priv->device->node_type, 2034 rdma_cap_ib_switch(port_priv->device),
2034 port_num, 2035 port_num,
2035 port_priv->device->phys_port_cnt) == 2036 port_priv->device->phys_port_cnt) ==
2036 IB_SMI_DISCARD) 2037 IB_SMI_DISCARD)
@@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
2042 2043
2043 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2044 if (retsmi == IB_SMI_SEND) { /* don't forward */
2044 if (smi_handle_dr_smp_send(smp, 2045 if (smi_handle_dr_smp_send(smp,
2045 port_priv->device->node_type, 2046 rdma_cap_ib_switch(port_priv->device),
2046 port_num) == IB_SMI_DISCARD) 2047 port_num) == IB_SMI_DISCARD)
2047 return IB_SMI_DISCARD; 2048 return IB_SMI_DISCARD;
2048 2049
2049 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2050 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2050 return IB_SMI_DISCARD; 2051 return IB_SMI_DISCARD;
2051 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { 2052 } else if (rdma_cap_ib_switch(port_priv->device)) {
2052 /* forward case for switches */ 2053 /* forward case for switches */
2053 memcpy(response, recv, mad_priv_size(response)); 2054 memcpy(response, recv, mad_priv_size(response));
2054 response->header.recv_wc.wc = &response->header.wc; 2055 response->header.recv_wc.wc = &response->header.wc;
@@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2115 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2116 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2116 2117
2117 if (opa_smi_handle_dr_smp_recv(smp, 2118 if (opa_smi_handle_dr_smp_recv(smp,
2118 port_priv->device->node_type, 2119 rdma_cap_ib_switch(port_priv->device),
2119 port_num, 2120 port_num,
2120 port_priv->device->phys_port_cnt) == 2121 port_priv->device->phys_port_cnt) ==
2121 IB_SMI_DISCARD) 2122 IB_SMI_DISCARD)
@@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2127 2128
2128 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2129 if (retsmi == IB_SMI_SEND) { /* don't forward */
2129 if (opa_smi_handle_dr_smp_send(smp, 2130 if (opa_smi_handle_dr_smp_send(smp,
2130 port_priv->device->node_type, 2131 rdma_cap_ib_switch(port_priv->device),
2131 port_num) == IB_SMI_DISCARD) 2132 port_num) == IB_SMI_DISCARD)
2132 return IB_SMI_DISCARD; 2133 return IB_SMI_DISCARD;
2133 2134
@@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2135 IB_SMI_DISCARD) 2136 IB_SMI_DISCARD)
2136 return IB_SMI_DISCARD; 2137 return IB_SMI_DISCARD;
2137 2138
2138 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { 2139 } else if (rdma_cap_ib_switch(port_priv->device)) {
2139 /* forward case for switches */ 2140 /* forward case for switches */
2140 memcpy(response, recv, mad_priv_size(response)); 2141 memcpy(response, recv, mad_priv_size(response));
2141 response->header.recv_wc.wc = &response->header.wc; 2142 response->header.recv_wc.wc = &response->header.wc;
@@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2235 goto out; 2236 goto out;
2236 } 2237 }
2237 2238
2238 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) 2239 if (rdma_cap_ib_switch(port_priv->device))
2239 port_num = wc->port_num; 2240 port_num = wc->port_num;
2240 else 2241 else
2241 port_num = port_priv->port_num; 2242 port_num = port_priv->port_num;
@@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
3297 3298
3298static void ib_mad_init_device(struct ib_device *device) 3299static void ib_mad_init_device(struct ib_device *device)
3299{ 3300{
3300 int start, end, i; 3301 int start, i;
3301 3302
3302 if (device->node_type == RDMA_NODE_IB_SWITCH) { 3303 start = rdma_start_port(device);
3303 start = 0;
3304 end = 0;
3305 } else {
3306 start = 1;
3307 end = device->phys_port_cnt;
3308 }
3309 3304
3310 for (i = start; i <= end; i++) { 3305 for (i = start; i <= rdma_end_port(device); i++) {
3311 if (!rdma_cap_ib_mad(device, i)) 3306 if (!rdma_cap_ib_mad(device, i))
3312 continue; 3307 continue;
3313 3308
@@ -3342,17 +3337,9 @@ error:
3342 3337
3343static void ib_mad_remove_device(struct ib_device *device) 3338static void ib_mad_remove_device(struct ib_device *device)
3344{ 3339{
3345 int start, end, i; 3340 int i;
3346
3347 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3348 start = 0;
3349 end = 0;
3350 } else {
3351 start = 1;
3352 end = device->phys_port_cnt;
3353 }
3354 3341
3355 for (i = start; i <= end; i++) { 3342 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3356 if (!rdma_cap_ib_mad(device, i)) 3343 if (!rdma_cap_ib_mad(device, i))
3357 continue; 3344 continue;
3358 3345
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 1244f02a5c6d..2cb865c7ce7a 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
812 if (!dev) 812 if (!dev)
813 return; 813 return;
814 814
815 if (device->node_type == RDMA_NODE_IB_SWITCH) 815 dev->start_port = rdma_start_port(device);
816 dev->start_port = dev->end_port = 0; 816 dev->end_port = rdma_end_port(device);
817 else {
818 dev->start_port = 1;
819 dev->end_port = device->phys_port_cnt;
820 }
821 817
822 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 818 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
823 if (!rdma_cap_ib_mcast(device, dev->start_port + i)) 819 if (!rdma_cap_ib_mcast(device, dev->start_port + i))
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index 62d91bfa4cb7..3bfab3505a29 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -39,12 +39,12 @@
39 39
40#include "smi.h" 40#include "smi.h"
41 41
42enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, 42enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
43 int port_num, int phys_port_cnt); 43 int port_num, int phys_port_cnt);
44int opa_smi_get_fwd_port(struct opa_smp *smp); 44int opa_smi_get_fwd_port(struct opa_smp *smp);
45extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); 45extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
46extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 46extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
47 u8 node_type, int port_num); 47 bool is_switch, int port_num);
48 48
49/* 49/*
50 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM 50 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 0fae85062a65..ca919f429666 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device)
1156 int s, e, i; 1156 int s, e, i;
1157 int count = 0; 1157 int count = 0;
1158 1158
1159 if (device->node_type == RDMA_NODE_IB_SWITCH) 1159 s = rdma_start_port(device);
1160 s = e = 0; 1160 e = rdma_end_port(device);
1161 else {
1162 s = 1;
1163 e = device->phys_port_cnt;
1164 }
1165 1161
1166 sa_dev = kzalloc(sizeof *sa_dev + 1162 sa_dev = kzalloc(sizeof *sa_dev +
1167 (e - s + 1) * sizeof (struct ib_sa_port), 1163 (e - s + 1) * sizeof (struct ib_sa_port),
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 368a561d1a5d..f19b23817c2b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
41#include "smi.h" 41#include "smi.h"
42#include "opa_smi.h" 42#include "opa_smi.h"
43 43
44static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, 44static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
45 u8 *hop_ptr, u8 hop_cnt, 45 u8 *hop_ptr, u8 hop_cnt,
46 const u8 *initial_path, 46 const u8 *initial_path,
47 const u8 *return_path, 47 const u8 *return_path,
@@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
64 64
65 /* C14-9:2 */ 65 /* C14-9:2 */
66 if (*hop_ptr && *hop_ptr < hop_cnt) { 66 if (*hop_ptr && *hop_ptr < hop_cnt) {
67 if (node_type != RDMA_NODE_IB_SWITCH) 67 if (!is_switch)
68 return IB_SMI_DISCARD; 68 return IB_SMI_DISCARD;
69 69
70 /* return_path set when received */ 70 /* return_path set when received */
@@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
77 if (*hop_ptr == hop_cnt) { 77 if (*hop_ptr == hop_cnt) {
78 /* return_path set when received */ 78 /* return_path set when received */
79 (*hop_ptr)++; 79 (*hop_ptr)++;
80 return (node_type == RDMA_NODE_IB_SWITCH || 80 return (is_switch ||
81 dr_dlid_is_permissive ? 81 dr_dlid_is_permissive ?
82 IB_SMI_HANDLE : IB_SMI_DISCARD); 82 IB_SMI_HANDLE : IB_SMI_DISCARD);
83 } 83 }
@@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
96 96
97 /* C14-13:2 */ 97 /* C14-13:2 */
98 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { 98 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
99 if (node_type != RDMA_NODE_IB_SWITCH) 99 if (!is_switch)
100 return IB_SMI_DISCARD; 100 return IB_SMI_DISCARD;
101 101
102 (*hop_ptr)--; 102 (*hop_ptr)--;
@@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
108 if (*hop_ptr == 1) { 108 if (*hop_ptr == 1) {
109 (*hop_ptr)--; 109 (*hop_ptr)--;
110 /* C14-13:3 -- SMPs destined for SM shouldn't be here */ 110 /* C14-13:3 -- SMPs destined for SM shouldn't be here */
111 return (node_type == RDMA_NODE_IB_SWITCH || 111 return (is_switch ||
112 dr_slid_is_permissive ? 112 dr_slid_is_permissive ?
113 IB_SMI_HANDLE : IB_SMI_DISCARD); 113 IB_SMI_HANDLE : IB_SMI_DISCARD);
114 } 114 }
@@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
127 * Return IB_SMI_DISCARD if the SMP should be discarded 127 * Return IB_SMI_DISCARD if the SMP should be discarded
128 */ 128 */
129enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 129enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
130 u8 node_type, int port_num) 130 bool is_switch, int port_num)
131{ 131{
132 return __smi_handle_dr_smp_send(node_type, port_num, 132 return __smi_handle_dr_smp_send(is_switch, port_num,
133 &smp->hop_ptr, smp->hop_cnt, 133 &smp->hop_ptr, smp->hop_cnt,
134 smp->initial_path, 134 smp->initial_path,
135 smp->return_path, 135 smp->return_path,
@@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
139} 139}
140 140
141enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 141enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
142 u8 node_type, int port_num) 142 bool is_switch, int port_num)
143{ 143{
144 return __smi_handle_dr_smp_send(node_type, port_num, 144 return __smi_handle_dr_smp_send(is_switch, port_num,
145 &smp->hop_ptr, smp->hop_cnt, 145 &smp->hop_ptr, smp->hop_cnt,
146 smp->route.dr.initial_path, 146 smp->route.dr.initial_path,
147 smp->route.dr.return_path, 147 smp->route.dr.return_path,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
152 OPA_LID_PERMISSIVE); 152 OPA_LID_PERMISSIVE);
153} 153}
154 154
155static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, 155static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
156 int phys_port_cnt, 156 int phys_port_cnt,
157 u8 *hop_ptr, u8 hop_cnt, 157 u8 *hop_ptr, u8 hop_cnt,
158 const u8 *initial_path, 158 const u8 *initial_path,
@@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
173 173
174 /* C14-9:2 -- intermediate hop */ 174 /* C14-9:2 -- intermediate hop */
175 if (*hop_ptr && *hop_ptr < hop_cnt) { 175 if (*hop_ptr && *hop_ptr < hop_cnt) {
176 if (node_type != RDMA_NODE_IB_SWITCH) 176 if (!is_switch)
177 return IB_SMI_DISCARD; 177 return IB_SMI_DISCARD;
178 178
179 return_path[*hop_ptr] = port_num; 179 return_path[*hop_ptr] = port_num;
@@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
188 return_path[*hop_ptr] = port_num; 188 return_path[*hop_ptr] = port_num;
189 /* hop_ptr updated when sending */ 189 /* hop_ptr updated when sending */
190 190
191 return (node_type == RDMA_NODE_IB_SWITCH || 191 return (is_switch ||
192 dr_dlid_is_permissive ? 192 dr_dlid_is_permissive ?
193 IB_SMI_HANDLE : IB_SMI_DISCARD); 193 IB_SMI_HANDLE : IB_SMI_DISCARD);
194 } 194 }
@@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
208 208
209 /* C14-13:2 */ 209 /* C14-13:2 */
210 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { 210 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
211 if (node_type != RDMA_NODE_IB_SWITCH) 211 if (!is_switch)
212 return IB_SMI_DISCARD; 212 return IB_SMI_DISCARD;
213 213
214 /* hop_ptr updated when sending */ 214 /* hop_ptr updated when sending */
@@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
224 return IB_SMI_HANDLE; 224 return IB_SMI_HANDLE;
225 } 225 }
226 /* hop_ptr updated when sending */ 226 /* hop_ptr updated when sending */
227 return (node_type == RDMA_NODE_IB_SWITCH ? 227 return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
228 IB_SMI_HANDLE : IB_SMI_DISCARD);
229 } 228 }
230 229
231 /* C14-13:4 -- hop_ptr = 0 -> give to SM */ 230 /* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
238 * Adjust information for a received SMP 237 * Adjust information for a received SMP
239 * Return IB_SMI_DISCARD if the SMP should be dropped 238 * Return IB_SMI_DISCARD if the SMP should be dropped
240 */ 239 */
241enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, 240enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
242 int port_num, int phys_port_cnt) 241 int port_num, int phys_port_cnt)
243{ 242{
244 return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, 243 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
245 &smp->hop_ptr, smp->hop_cnt, 244 &smp->hop_ptr, smp->hop_cnt,
246 smp->initial_path, 245 smp->initial_path,
247 smp->return_path, 246 smp->return_path,
@@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
254 * Adjust information for a received SMP 253 * Adjust information for a received SMP
255 * Return IB_SMI_DISCARD if the SMP should be dropped 254 * Return IB_SMI_DISCARD if the SMP should be dropped
256 */ 255 */
257enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, 256enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
258 int port_num, int phys_port_cnt) 257 int port_num, int phys_port_cnt)
259{ 258{
260 return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, 259 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
261 &smp->hop_ptr, smp->hop_cnt, 260 &smp->hop_ptr, smp->hop_cnt,
262 smp->route.dr.initial_path, 261 smp->route.dr.initial_path,
263 smp->route.dr.return_path, 262 smp->route.dr.return_path,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index aff96bac49b4..33c91c8a16e9 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -51,12 +51,12 @@ enum smi_forward_action {
51 IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */ 51 IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
52}; 52};
53 53
54enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, 54enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
55 int port_num, int phys_port_cnt); 55 int port_num, int phys_port_cnt);
56int smi_get_fwd_port(struct ib_smp *smp); 56int smi_get_fwd_port(struct ib_smp *smp);
57extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); 57extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
58extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 58extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
59 u8 node_type, int port_num); 59 bool is_switch, int port_num);
60 60
61/* 61/*
62 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM 62 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index ed6b6c85c334..0b84a9cdfe5b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
870 goto err_put; 870 goto err_put;
871 } 871 }
872 872
873 if (device->node_type == RDMA_NODE_IB_SWITCH) { 873 if (rdma_cap_ib_switch(device)) {
874 ret = add_port(device, 0, port_callback); 874 ret = add_port(device, 0, port_callback);
875 if (ret) 875 if (ret)
876 goto err_put; 876 goto err_put;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 62c24b1452b8..009481073644 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
1193 return 0; 1193 return 0;
1194} 1194}
1195 1195
1196static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1196static void ib_ucm_release_dev(struct device *dev) 1197static void ib_ucm_release_dev(struct device *dev)
1197{ 1198{
1198 struct ib_ucm_device *ucm_dev; 1199 struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
1202 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) 1203 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1203 clear_bit(ucm_dev->devnum, dev_map); 1204 clear_bit(ucm_dev->devnum, dev_map);
1204 else 1205 else
1205 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); 1206 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
1206 kfree(ucm_dev); 1207 kfree(ucm_dev);
1207} 1208}
1208 1209
@@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1226static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1227static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1227 1228
1228static dev_t overflow_maj; 1229static dev_t overflow_maj;
1229static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1230static int find_overflow_devnum(void) 1230static int find_overflow_devnum(void)
1231{ 1231{
1232 int ret; 1232 int ret;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ad45469f7582..29b21213ea75 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1354 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 1354 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1355 if (file1 < file2) { 1355 if (file1 < file2) {
1356 mutex_lock(&file1->mut); 1356 mutex_lock(&file1->mut);
1357 mutex_lock(&file2->mut); 1357 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1358 } else { 1358 } else {
1359 mutex_lock(&file2->mut); 1359 mutex_lock(&file2->mut);
1360 mutex_lock(&file1->mut); 1360 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1361 } 1361 }
1362} 1362}
1363 1363
@@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void)
1616 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1616 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1617 misc_deregister(&ucma_misc); 1617 misc_deregister(&ucma_misc);
1618 idr_destroy(&ctx_idr); 1618 idr_destroy(&ctx_idr);
1619 idr_destroy(&multicast_idr);
1619} 1620}
1620 1621
1621module_init(ucma_init); 1622module_init(ucma_init);
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 12b5bc23832b..376b031c2c7f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
226 const struct ib_mad *in_mad = (const struct ib_mad *)in; 226 const struct ib_mad *in_mad = (const struct ib_mad *)in;
227 struct ib_mad *out_mad = (struct ib_mad *)out; 227 struct ib_mad *out_mad = (struct ib_mad *)out;
228 228
229 BUG_ON(in_mad_size != sizeof(*in_mad) || 229 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
230 *out_mad_size != sizeof(*out_mad)); 230 *out_mad_size != sizeof(*out_mad)))
231 return IB_MAD_RESULT_FAILURE;
231 232
232 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) 233 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
233 return IB_MAD_RESULT_FAILURE; 234 return IB_MAD_RESULT_FAILURE;
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 948188e37f95..ad3a926ab3c5 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1499 const struct ib_mad *in_mad = (const struct ib_mad *)in; 1499 const struct ib_mad *in_mad = (const struct ib_mad *)in;
1500 struct ib_mad *out_mad = (struct ib_mad *)out; 1500 struct ib_mad *out_mad = (struct ib_mad *)out;
1501 1501
1502 BUG_ON(in_mad_size != sizeof(*in_mad) || 1502 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
1503 *out_mad_size != sizeof(*out_mad)); 1503 *out_mad_size != sizeof(*out_mad)))
1504 return IB_MAD_RESULT_FAILURE;
1504 1505
1505 switch (in_mad->mad_hdr.mgmt_class) { 1506 switch (in_mad->mad_hdr.mgmt_class) {
1506 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 1507 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 48253b839a6f..30ba49c4a98c 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
2044 2044
2045 spin_lock_init(&idev->qp_table.lock); 2045 spin_lock_init(&idev->qp_table.lock);
2046 spin_lock_init(&idev->lk_table.lock); 2046 spin_lock_init(&idev->lk_table.lock);
2047 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); 2047 idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
2048 /* Set the prefix to the default value (see ch. 4.1.1) */ 2048 /* Set the prefix to the default value (see ch. 4.1.1) */
2049 idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); 2049 idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
2050 2050
2051 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); 2051 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
2052 if (ret) 2052 if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 85a50df2f203..68b3dfa922bf 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
860 struct mlx4_ib_dev *dev = to_mdev(ibdev); 860 struct mlx4_ib_dev *dev = to_mdev(ibdev);
861 const struct ib_mad *in_mad = (const struct ib_mad *)in; 861 const struct ib_mad *in_mad = (const struct ib_mad *)in;
862 struct ib_mad *out_mad = (struct ib_mad *)out; 862 struct ib_mad *out_mad = (struct ib_mad *)out;
863 enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
863 864
864 BUG_ON(in_mad_size != sizeof(*in_mad) || 865 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
865 *out_mad_size != sizeof(*out_mad)); 866 *out_mad_size != sizeof(*out_mad)))
867 return IB_MAD_RESULT_FAILURE;
866 868
867 switch (rdma_port_get_link_layer(ibdev, port_num)) { 869 /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
868 case IB_LINK_LAYER_INFINIBAND: 870 * queries, should be called only by VFs and for that specific purpose
869 if (!mlx4_is_slave(dev->dev)) 871 */
870 return ib_process_mad(ibdev, mad_flags, port_num, in_wc, 872 if (link == IB_LINK_LAYER_INFINIBAND) {
871 in_grh, in_mad, out_mad); 873 if (mlx4_is_slave(dev->dev) &&
872 case IB_LINK_LAYER_ETHERNET: 874 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
873 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 875 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
874 in_grh, in_mad, out_mad); 876 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
875 default: 877 in_grh, in_mad, out_mad);
876 return -EINVAL; 878
879 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
880 in_grh, in_mad, out_mad);
877 } 881 }
882
883 if (link == IB_LINK_LAYER_ETHERNET)
884 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
885 in_grh, in_mad, out_mad);
886
887 return -EINVAL;
878} 888}
879 889
880static void send_handler(struct ib_mad_agent *agent, 890static void send_handler(struct ib_mad_agent *agent,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 067a691ecbed..8be6db816460 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; 253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
254 props->timestamp_mask = 0xFFFFFFFFFFFFULL; 254 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
255 255
256 err = mlx4_get_internal_clock_params(dev->dev, &clock_params); 256 if (!mlx4_is_slave(dev->dev))
257 if (err) 257 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
258 goto out;
259 258
260 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { 259 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
261 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
262 resp.response_length += sizeof(resp.hca_core_clock_offset); 260 resp.response_length += sizeof(resp.hca_core_clock_offset);
263 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; 261 if (!err && !mlx4_is_slave(dev->dev)) {
262 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
263 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
264 }
264 } 265 }
265 266
266 if (uhw->outlen) { 267 if (uhw->outlen) {
@@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2669 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); 2670 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2670 if (!dm) { 2671 if (!dm) {
2671 pr_err("failed to allocate memory for tunneling qp update\n"); 2672 pr_err("failed to allocate memory for tunneling qp update\n");
2672 goto out; 2673 return;
2673 } 2674 }
2674 2675
2675 for (i = 0; i < ports; i++) { 2676 for (i = 0; i < ports; i++) {
2676 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); 2677 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2677 if (!dm[i]) { 2678 if (!dm[i]) {
2678 pr_err("failed to allocate memory for tunneling qp update work struct\n"); 2679 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2679 for (i = 0; i < dev->caps.num_ports; i++) { 2680 while (--i >= 0)
2680 if (dm[i]) 2681 kfree(dm[i]);
2681 kfree(dm[i]);
2682 }
2683 goto out; 2682 goto out;
2684 } 2683 }
2685 }
2686 /* initialize or tear down tunnel QPs for the slave */
2687 for (i = 0; i < ports; i++) {
2688 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); 2684 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2689 dm[i]->port = first_port + i + 1; 2685 dm[i]->port = first_port + i + 1;
2690 dm[i]->slave = slave; 2686 dm[i]->slave = slave;
2691 dm[i]->do_init = do_init; 2687 dm[i]->do_init = do_init;
2692 dm[i]->dev = ibdev; 2688 dm[i]->dev = ibdev;
2693 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); 2689 }
2694 if (!ibdev->sriov.is_going_down) 2690 /* initialize or tear down tunnel QPs for the slave */
2691 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2692 if (!ibdev->sriov.is_going_down) {
2693 for (i = 0; i < ports; i++)
2695 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); 2694 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2696 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 2695 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2696 } else {
2697 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2698 for (i = 0; i < ports; i++)
2699 kfree(dm[i]);
2697 } 2700 }
2698out: 2701out:
2699 kfree(dm); 2702 kfree(dm);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 01fc97db45d6..b84d13a487cc 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
68 const struct ib_mad *in_mad = (const struct ib_mad *)in; 68 const struct ib_mad *in_mad = (const struct ib_mad *)in;
69 struct ib_mad *out_mad = (struct ib_mad *)out; 69 struct ib_mad *out_mad = (struct ib_mad *)out;
70 70
71 BUG_ON(in_mad_size != sizeof(*in_mad) || 71 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
72 *out_mad_size != sizeof(*out_mad)); 72 *out_mad_size != sizeof(*out_mad)))
73 return IB_MAD_RESULT_FAILURE;
73 74
74 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 75 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
75 76
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 6b2418b74c99..7c3f2fb44ba5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
209 const struct ib_mad *in_mad = (const struct ib_mad *)in; 209 const struct ib_mad *in_mad = (const struct ib_mad *)in;
210 struct ib_mad *out_mad = (struct ib_mad *)out; 210 struct ib_mad *out_mad = (struct ib_mad *)out;
211 211
212 BUG_ON(in_mad_size != sizeof(*in_mad) || 212 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
213 *out_mad_size != sizeof(*out_mad)); 213 *out_mad_size != sizeof(*out_mad)))
214 return IB_MAD_RESULT_FAILURE;
214 215
215 /* Forward locally generated traps to the SM */ 216 /* Forward locally generated traps to the SM */
216 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 217 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9047af429906..8a3ad170d790 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1520 int rc = arpindex; 1520 int rc = arpindex;
1521 struct net_device *netdev; 1521 struct net_device *netdev;
1522 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1522 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1523 __be32 dst_ipaddr = htonl(dst_ip);
1523 1524
1524 rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); 1525 rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
1525 if (IS_ERR(rt)) { 1526 if (IS_ERR(rt)) {
1526 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", 1527 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
1527 __func__, dst_ip); 1528 __func__, dst_ip);
@@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1533 else 1534 else
1534 netdev = nesvnic->netdev; 1535 netdev = nesvnic->netdev;
1535 1536
1536 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); 1537 neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
1537 1538
1538 rcu_read_lock(); 1539 rcu_read_lock();
1539 if (neigh) { 1540 if (neigh) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 02120d340d50..4713dd7ed764 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
3861 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | 3861 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
3862 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); 3862 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
3863 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( 3863 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
3864 (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); 3864 (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
3865 } else { 3865 } else {
3866 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; 3866 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
3867 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; 3867 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 4bafa15708d0..29b27675dd70 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -215,8 +215,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
215 const struct ib_mad *in_mad = (const struct ib_mad *)in; 215 const struct ib_mad *in_mad = (const struct ib_mad *)in;
216 struct ib_mad *out_mad = (struct ib_mad *)out; 216 struct ib_mad *out_mad = (struct ib_mad *)out;
217 217
218 BUG_ON(in_mad_size != sizeof(*in_mad) || 218 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
219 *out_mad_size != sizeof(*out_mad)); 219 *out_mad_size != sizeof(*out_mad)))
220 return IB_MAD_RESULT_FAILURE;
220 221
221 switch (in_mad->mad_hdr.mgmt_class) { 222 switch (in_mad->mad_hdr.mgmt_class) {
222 case IB_MGMT_CLASS_PERF_MGMT: 223 case IB_MGMT_CLASS_PERF_MGMT:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 8a1398b253a2..d98a707a5eb9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -696,6 +696,7 @@ static void __exit ocrdma_exit_module(void)
696 ocrdma_unregister_inet6addr_notifier(); 696 ocrdma_unregister_inet6addr_notifier();
697 ocrdma_unregister_inetaddr_notifier(); 697 ocrdma_unregister_inetaddr_notifier();
698 ocrdma_rem_debugfs(); 698 ocrdma_rem_debugfs();
699 idr_destroy(&ocrdma_dev_id);
699} 700}
700 701
701module_init(ocrdma_init_module); 702module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 05e3242d8442..9625e7c438e5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2412 const struct ib_mad *in_mad = (const struct ib_mad *)in; 2412 const struct ib_mad *in_mad = (const struct ib_mad *)in;
2413 struct ib_mad *out_mad = (struct ib_mad *)out; 2413 struct ib_mad *out_mad = (struct ib_mad *)out;
2414 2414
2415 BUG_ON(in_mad_size != sizeof(*in_mad) || 2415 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
2416 *out_mad_size != sizeof(*out_mad)); 2416 *out_mad_size != sizeof(*out_mad)))
2417 return IB_MAD_RESULT_FAILURE;
2417 2418
2418 switch (in_mad->mad_hdr.mgmt_class) { 2419 switch (in_mad->mad_hdr.mgmt_class) {
2419 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 2420 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bd94b0a6e9e5..79859c4d43c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -239,7 +239,7 @@ struct ipoib_cm_tx {
239 struct net_device *dev; 239 struct net_device *dev;
240 struct ipoib_neigh *neigh; 240 struct ipoib_neigh *neigh;
241 struct ipoib_path *path; 241 struct ipoib_path *path;
242 struct ipoib_cm_tx_buf *tx_ring; 242 struct ipoib_tx_buf *tx_ring;
243 unsigned tx_head; 243 unsigned tx_head;
244 unsigned tx_tail; 244 unsigned tx_tail;
245 unsigned long flags; 245 unsigned long flags;
@@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
504void ipoib_mcast_dev_down(struct net_device *dev); 504void ipoib_mcast_dev_down(struct net_device *dev);
505void ipoib_mcast_dev_flush(struct net_device *dev); 505void ipoib_mcast_dev_flush(struct net_device *dev);
506 506
507int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
508void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
509 struct ipoib_tx_buf *tx_req);
510
511static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
512 struct ipoib_tx_buf *tx_req)
513{
514 int i, off;
515 struct sk_buff *skb = tx_req->skb;
516 skb_frag_t *frags = skb_shinfo(skb)->frags;
517 int nr_frags = skb_shinfo(skb)->nr_frags;
518 u64 *mapping = tx_req->mapping;
519
520 if (skb_headlen(skb)) {
521 priv->tx_sge[0].addr = mapping[0];
522 priv->tx_sge[0].length = skb_headlen(skb);
523 off = 1;
524 } else
525 off = 0;
526
527 for (i = 0; i < nr_frags; ++i) {
528 priv->tx_sge[i + off].addr = mapping[i + off];
529 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
530 }
531 priv->tx_wr.num_sge = nr_frags + off;
532}
533
507#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 534#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
508struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); 535struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
509int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); 536int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index cf32a778e7d0..ee39be6ccfb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -694,14 +694,12 @@ repost:
694static inline int post_send(struct ipoib_dev_priv *priv, 694static inline int post_send(struct ipoib_dev_priv *priv,
695 struct ipoib_cm_tx *tx, 695 struct ipoib_cm_tx *tx,
696 unsigned int wr_id, 696 unsigned int wr_id,
697 u64 addr, int len) 697 struct ipoib_tx_buf *tx_req)
698{ 698{
699 struct ib_send_wr *bad_wr; 699 struct ib_send_wr *bad_wr;
700 700
701 priv->tx_sge[0].addr = addr; 701 ipoib_build_sge(priv, tx_req);
702 priv->tx_sge[0].length = len;
703 702
704 priv->tx_wr.num_sge = 1;
705 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; 703 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
706 704
707 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); 705 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
710void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) 708void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
711{ 709{
712 struct ipoib_dev_priv *priv = netdev_priv(dev); 710 struct ipoib_dev_priv *priv = netdev_priv(dev);
713 struct ipoib_cm_tx_buf *tx_req; 711 struct ipoib_tx_buf *tx_req;
714 u64 addr;
715 int rc; 712 int rc;
716 713
717 if (unlikely(skb->len > tx->mtu)) { 714 if (unlikely(skb->len > tx->mtu)) {
@@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
735 */ 732 */
736 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; 733 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
737 tx_req->skb = skb; 734 tx_req->skb = skb;
738 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); 735
739 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 736 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
740 ++dev->stats.tx_errors; 737 ++dev->stats.tx_errors;
741 dev_kfree_skb_any(skb); 738 dev_kfree_skb_any(skb);
742 return; 739 return;
743 } 740 }
744 741
745 tx_req->mapping = addr;
746
747 skb_orphan(skb); 742 skb_orphan(skb);
748 skb_dst_drop(skb); 743 skb_dst_drop(skb);
749 744
750 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 745 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
751 addr, skb->len);
752 if (unlikely(rc)) { 746 if (unlikely(rc)) {
753 ipoib_warn(priv, "post_send failed, error %d\n", rc); 747 ipoib_warn(priv, "post_send failed, error %d\n", rc);
754 ++dev->stats.tx_errors; 748 ++dev->stats.tx_errors;
755 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 749 ipoib_dma_unmap_tx(priv, tx_req);
756 dev_kfree_skb_any(skb); 750 dev_kfree_skb_any(skb);
757 } else { 751 } else {
758 dev->trans_start = jiffies; 752 dev->trans_start = jiffies;
@@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
777 struct ipoib_dev_priv *priv = netdev_priv(dev); 771 struct ipoib_dev_priv *priv = netdev_priv(dev);
778 struct ipoib_cm_tx *tx = wc->qp->qp_context; 772 struct ipoib_cm_tx *tx = wc->qp->qp_context;
779 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; 773 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
780 struct ipoib_cm_tx_buf *tx_req; 774 struct ipoib_tx_buf *tx_req;
781 unsigned long flags; 775 unsigned long flags;
782 776
783 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", 777 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
791 785
792 tx_req = &tx->tx_ring[wr_id]; 786 tx_req = &tx->tx_ring[wr_id];
793 787
794 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); 788 ipoib_dma_unmap_tx(priv, tx_req);
795 789
796 /* FIXME: is this right? Shouldn't we only increment on success? */ 790 /* FIXME: is this right? Shouldn't we only increment on success? */
797 ++dev->stats.tx_packets; 791 ++dev->stats.tx_packets;
@@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1036 1030
1037 struct ib_qp *tx_qp; 1031 struct ib_qp *tx_qp;
1038 1032
1033 if (dev->features & NETIF_F_SG)
1034 attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
1035
1039 tx_qp = ib_create_qp(priv->pd, &attr); 1036 tx_qp = ib_create_qp(priv->pd, &attr);
1040 if (PTR_ERR(tx_qp) == -EINVAL) { 1037 if (PTR_ERR(tx_qp) == -EINVAL) {
1041 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", 1038 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@@ -1170,7 +1167,7 @@ err_tx:
1170static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) 1167static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1171{ 1168{
1172 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1169 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1173 struct ipoib_cm_tx_buf *tx_req; 1170 struct ipoib_tx_buf *tx_req;
1174 unsigned long begin; 1171 unsigned long begin;
1175 1172
1176 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1173 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1197,8 +1194,7 @@ timeout:
1197 1194
1198 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1195 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1199 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; 1196 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1200 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, 1197 ipoib_dma_unmap_tx(priv, tx_req);
1201 DMA_TO_DEVICE);
1202 dev_kfree_skb_any(tx_req->skb); 1198 dev_kfree_skb_any(tx_req->skb);
1203 ++p->tx_tail; 1199 ++p->tx_tail;
1204 netif_tx_lock_bh(p->dev); 1200 netif_tx_lock_bh(p->dev);
@@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1455 spin_unlock_irq(&priv->lock); 1451 spin_unlock_irq(&priv->lock);
1456} 1452}
1457 1453
1458
1459static ssize_t show_mode(struct device *d, struct device_attribute *attr, 1454static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1460 char *buf) 1455 char *buf)
1461{ 1456{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 63b92cbb29ad..d266667ca9b8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -263,8 +263,7 @@ repost:
263 "for buf %d\n", wr_id); 263 "for buf %d\n", wr_id);
264} 264}
265 265
266static int ipoib_dma_map_tx(struct ib_device *ca, 266int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
267 struct ipoib_tx_buf *tx_req)
268{ 267{
269 struct sk_buff *skb = tx_req->skb; 268 struct sk_buff *skb = tx_req->skb;
270 u64 *mapping = tx_req->mapping; 269 u64 *mapping = tx_req->mapping;
@@ -305,8 +304,8 @@ partial_error:
305 return -EIO; 304 return -EIO;
306} 305}
307 306
308static void ipoib_dma_unmap_tx(struct ib_device *ca, 307void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
309 struct ipoib_tx_buf *tx_req) 308 struct ipoib_tx_buf *tx_req)
310{ 309{
311 struct sk_buff *skb = tx_req->skb; 310 struct sk_buff *skb = tx_req->skb;
312 u64 *mapping = tx_req->mapping; 311 u64 *mapping = tx_req->mapping;
@@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
314 int off; 313 int off;
315 314
316 if (skb_headlen(skb)) { 315 if (skb_headlen(skb)) {
317 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); 316 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
317 DMA_TO_DEVICE);
318 off = 1; 318 off = 1;
319 } else 319 } else
320 off = 0; 320 off = 0;
@@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
322 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 322 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
323 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 323 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
324 324
325 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), 325 ib_dma_unmap_page(priv->ca, mapping[i + off],
326 DMA_TO_DEVICE); 326 skb_frag_size(frag), DMA_TO_DEVICE);
327 } 327 }
328} 328}
329 329
@@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
389 389
390 tx_req = &priv->tx_ring[wr_id]; 390 tx_req = &priv->tx_ring[wr_id];
391 391
392 ipoib_dma_unmap_tx(priv->ca, tx_req); 392 ipoib_dma_unmap_tx(priv, tx_req);
393 393
394 ++dev->stats.tx_packets; 394 ++dev->stats.tx_packets;
395 dev->stats.tx_bytes += tx_req->skb->len; 395 dev->stats.tx_bytes += tx_req->skb->len;
@@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
514 void *head, int hlen) 514 void *head, int hlen)
515{ 515{
516 struct ib_send_wr *bad_wr; 516 struct ib_send_wr *bad_wr;
517 int i, off;
518 struct sk_buff *skb = tx_req->skb; 517 struct sk_buff *skb = tx_req->skb;
519 skb_frag_t *frags = skb_shinfo(skb)->frags;
520 int nr_frags = skb_shinfo(skb)->nr_frags;
521 u64 *mapping = tx_req->mapping;
522 518
523 if (skb_headlen(skb)) { 519 ipoib_build_sge(priv, tx_req);
524 priv->tx_sge[0].addr = mapping[0];
525 priv->tx_sge[0].length = skb_headlen(skb);
526 off = 1;
527 } else
528 off = 0;
529 520
530 for (i = 0; i < nr_frags; ++i) {
531 priv->tx_sge[i + off].addr = mapping[i + off];
532 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
533 }
534 priv->tx_wr.num_sge = nr_frags + off;
535 priv->tx_wr.wr_id = wr_id; 521 priv->tx_wr.wr_id = wr_id;
536 priv->tx_wr.wr.ud.remote_qpn = qpn; 522 priv->tx_wr.wr.ud.remote_qpn = qpn;
537 priv->tx_wr.wr.ud.ah = address; 523 priv->tx_wr.wr.ud.ah = address;
@@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
617 ipoib_warn(priv, "post_send failed, error %d\n", rc); 603 ipoib_warn(priv, "post_send failed, error %d\n", rc);
618 ++dev->stats.tx_errors; 604 ++dev->stats.tx_errors;
619 --priv->tx_outstanding; 605 --priv->tx_outstanding;
620 ipoib_dma_unmap_tx(priv->ca, tx_req); 606 ipoib_dma_unmap_tx(priv, tx_req);
621 dev_kfree_skb_any(skb); 607 dev_kfree_skb_any(skb);
622 if (netif_queue_stopped(dev)) 608 if (netif_queue_stopped(dev))
623 netif_wake_queue(dev); 609 netif_wake_queue(dev);
@@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
868 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 854 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
869 tx_req = &priv->tx_ring[priv->tx_tail & 855 tx_req = &priv->tx_ring[priv->tx_tail &
870 (ipoib_sendq_size - 1)]; 856 (ipoib_sendq_size - 1)];
871 ipoib_dma_unmap_tx(priv->ca, tx_req); 857 ipoib_dma_unmap_tx(priv, tx_req);
872 dev_kfree_skb_any(tx_req->skb); 858 dev_kfree_skb_any(tx_req->skb);
873 ++priv->tx_tail; 859 ++priv->tx_tail;
874 --priv->tx_outstanding; 860 --priv->tx_outstanding;
@@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
985} 971}
986 972
987static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 973static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
988 enum ipoib_flush_level level) 974 enum ipoib_flush_level level,
975 int nesting)
989{ 976{
990 struct ipoib_dev_priv *cpriv; 977 struct ipoib_dev_priv *cpriv;
991 struct net_device *dev = priv->dev; 978 struct net_device *dev = priv->dev;
992 int result; 979 int result;
993 980
994 down_read(&priv->vlan_rwsem); 981 down_read_nested(&priv->vlan_rwsem, nesting);
995 982
996 /* 983 /*
997 * Flush any child interfaces too -- they might be up even if 984 * Flush any child interfaces too -- they might be up even if
998 * the parent is down. 985 * the parent is down.
999 */ 986 */
1000 list_for_each_entry(cpriv, &priv->child_intfs, list) 987 list_for_each_entry(cpriv, &priv->child_intfs, list)
1001 __ipoib_ib_dev_flush(cpriv, level); 988 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
1002 989
1003 up_read(&priv->vlan_rwsem); 990 up_read(&priv->vlan_rwsem);
1004 991
@@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
1076 struct ipoib_dev_priv *priv = 1063 struct ipoib_dev_priv *priv =
1077 container_of(work, struct ipoib_dev_priv, flush_light); 1064 container_of(work, struct ipoib_dev_priv, flush_light);
1078 1065
1079 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); 1066 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1080} 1067}
1081 1068
1082void ipoib_ib_dev_flush_normal(struct work_struct *work) 1069void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
1084 struct ipoib_dev_priv *priv = 1071 struct ipoib_dev_priv *priv =
1085 container_of(work, struct ipoib_dev_priv, flush_normal); 1072 container_of(work, struct ipoib_dev_priv, flush_normal);
1086 1073
1087 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); 1074 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1088} 1075}
1089 1076
1090void ipoib_ib_dev_flush_heavy(struct work_struct *work) 1077void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1092 struct ipoib_dev_priv *priv = 1079 struct ipoib_dev_priv *priv =
1093 container_of(work, struct ipoib_dev_priv, flush_heavy); 1080 container_of(work, struct ipoib_dev_priv, flush_heavy);
1094 1081
1095 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); 1082 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1096} 1083}
1097 1084
1098void ipoib_ib_dev_cleanup(struct net_device *dev) 1085void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index da149c278cb8..b2943c84a5dd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
190 struct ipoib_dev_priv *priv = netdev_priv(dev); 190 struct ipoib_dev_priv *priv = netdev_priv(dev);
191 191
192 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 192 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
193 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 193 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
194 194
195 return features; 195 return features;
196} 196}
@@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
232 ipoib_warn(priv, "enabling connected mode " 232 ipoib_warn(priv, "enabling connected mode "
233 "will cause multicast packet drops\n"); 233 "will cause multicast packet drops\n");
234 netdev_update_features(dev); 234 netdev_update_features(dev);
235 dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
235 rtnl_unlock(); 236 rtnl_unlock();
236 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 237 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
237 238
@@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format,
1577 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1578 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1578 priv->dev->dev_id = port - 1; 1579 priv->dev->dev_id = port - 1;
1579 1580
1580 if (!ib_query_port(hca, port, &attr)) 1581 result = ib_query_port(hca, port, &attr);
1582 if (!result)
1581 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1583 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1582 else { 1584 else {
1583 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1585 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format,
1598 goto device_init_failed; 1600 goto device_init_failed;
1599 } 1601 }
1600 1602
1601 if (ipoib_set_dev_features(priv, hca)) 1603 result = ipoib_set_dev_features(priv, hca);
1604 if (result)
1602 goto device_init_failed; 1605 goto device_init_failed;
1603 1606
1604 /* 1607 /*
@@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device)
1684 struct list_head *dev_list; 1687 struct list_head *dev_list;
1685 struct net_device *dev; 1688 struct net_device *dev;
1686 struct ipoib_dev_priv *priv; 1689 struct ipoib_dev_priv *priv;
1687 int s, e, p; 1690 int p;
1688 int count = 0; 1691 int count = 0;
1689 1692
1690 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1693 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device)
1693 1696
1694 INIT_LIST_HEAD(dev_list); 1697 INIT_LIST_HEAD(dev_list);
1695 1698
1696 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1699 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
1697 s = 0;
1698 e = 0;
1699 } else {
1700 s = 1;
1701 e = device->phys_port_cnt;
1702 }
1703
1704 for (p = s; p <= e; ++p) {
1705 if (!rdma_protocol_ib(device, p)) 1700 if (!rdma_protocol_ib(device, p))
1706 continue; 1701 continue;
1707 dev = ipoib_add_port("ib%d", device, p); 1702 dev = ipoib_add_port("ib%d", device, p);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 267dc4f75502..31a20b462266 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{ 161{
162 int tmo, res; 162 int tmo, res;
163 163
164 if (strncmp(val, "off", 3) != 0) { 164 res = srp_parse_tmo(&tmo, val);
165 res = kstrtoint(val, 0, &tmo); 165 if (res)
166 if (res) 166 goto out;
167 goto out; 167
168 } else {
169 tmo = -1;
170 }
171 if (kp->arg == &srp_reconnect_delay) 168 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 srp_dev_loss_tmo); 170 srp_dev_loss_tmo);
@@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device)
3379 struct srp_device *srp_dev; 3376 struct srp_device *srp_dev;
3380 struct ib_device_attr *dev_attr; 3377 struct ib_device_attr *dev_attr;
3381 struct srp_host *host; 3378 struct srp_host *host;
3382 int mr_page_shift, s, e, p; 3379 int mr_page_shift, p;
3383 u64 max_pages_per_mr; 3380 u64 max_pages_per_mr;
3384 3381
3385 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 3382 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device)
3443 if (IS_ERR(srp_dev->mr)) 3440 if (IS_ERR(srp_dev->mr))
3444 goto err_pd; 3441 goto err_pd;
3445 3442
3446 if (device->node_type == RDMA_NODE_IB_SWITCH) { 3443 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3447 s = 0;
3448 e = 0;
3449 } else {
3450 s = 1;
3451 e = device->phys_port_cnt;
3452 }
3453
3454 for (p = s; p <= e; ++p) {
3455 host = srp_add_port(srp_dev, p); 3444 host = srp_add_port(srp_dev, p);
3456 if (host) 3445 if (host)
3457 list_add_tail(&host->list, &srp_dev->dev_list); 3446 list_add_tail(&host->list, &srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 82897ca17f32..60ff0a2390e5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
302 int i; 302 int i;
303 303
304 ioui = (struct ib_dm_iou_info *)mad->data; 304 ioui = (struct ib_dm_iou_info *)mad->data;
305 ioui->change_id = __constant_cpu_to_be16(1); 305 ioui->change_id = cpu_to_be16(1);
306 ioui->max_controllers = 16; 306 ioui->max_controllers = 16;
307 307
308 /* set present for slot 1 and empty for the rest */ 308 /* set present for slot 1 and empty for the rest */
@@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
330 330
331 if (!slot || slot > 16) { 331 if (!slot || slot > 16) {
332 mad->mad_hdr.status 332 mad->mad_hdr.status
333 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 333 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
334 return; 334 return;
335 } 335 }
336 336
337 if (slot > 2) { 337 if (slot > 2) {
338 mad->mad_hdr.status 338 mad->mad_hdr.status
339 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 339 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
340 return; 340 return;
341 } 341 }
342 342
@@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
348 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); 348 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
349 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); 349 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
350 iocp->subsys_device_id = 0x0; 350 iocp->subsys_device_id = 0x0;
351 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); 351 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
352 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); 352 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
353 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); 353 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
354 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); 354 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
355 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); 355 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
356 iocp->rdma_read_depth = 4; 356 iocp->rdma_read_depth = 4;
357 iocp->send_size = cpu_to_be32(srp_max_req_size); 357 iocp->send_size = cpu_to_be32(srp_max_req_size);
@@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid,
379 379
380 if (!slot || slot > 16) { 380 if (!slot || slot > 16) {
381 mad->mad_hdr.status 381 mad->mad_hdr.status
382 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 382 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
383 return; 383 return;
384 } 384 }
385 385
386 if (slot > 2 || lo > hi || hi > 1) { 386 if (slot > 2 || lo > hi || hi > 1) {
387 mad->mad_hdr.status 387 mad->mad_hdr.status
388 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 388 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
389 return; 389 return;
390 } 390 }
391 391
@@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
436 break; 436 break;
437 default: 437 default:
438 rsp_mad->mad_hdr.status = 438 rsp_mad->mad_hdr.status =
439 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 439 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
440 break; 440 break;
441 } 441 }
442} 442}
@@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
493 break; 493 break;
494 case IB_MGMT_METHOD_SET: 494 case IB_MGMT_METHOD_SET:
495 dm_mad->mad_hdr.status = 495 dm_mad->mad_hdr.status =
496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 496 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 break; 497 break;
498 default: 498 default:
499 dm_mad->mad_hdr.status = 499 dm_mad->mad_hdr.status =
500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); 500 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 break; 501 break;
502 } 502 }
503 503
@@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1535 memset(srp_rsp, 0, sizeof *srp_rsp); 1535 memset(srp_rsp, 0, sizeof *srp_rsp);
1536 srp_rsp->opcode = SRP_RSP; 1536 srp_rsp->opcode = SRP_RSP;
1537 srp_rsp->req_lim_delta = 1537 srp_rsp->req_lim_delta =
1538 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1538 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1539 srp_rsp->tag = tag; 1539 srp_rsp->tag = tag;
1540 srp_rsp->status = status; 1540 srp_rsp->status = status;
1541 1541
@@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1585 memset(srp_rsp, 0, sizeof *srp_rsp); 1585 memset(srp_rsp, 0, sizeof *srp_rsp);
1586 1586
1587 srp_rsp->opcode = SRP_RSP; 1587 srp_rsp->opcode = SRP_RSP;
1588 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 1588 srp_rsp->req_lim_delta =
1589 + atomic_xchg(&ch->req_lim_delta, 0)); 1589 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1590 srp_rsp->tag = tag; 1590 srp_rsp->tag = tag;
1591 1591
1592 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1592 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
@@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1630 switch (len) { 1630 switch (len) {
1631 case 8: 1631 case 8:
1632 if ((*((__be64 *)lun) & 1632 if ((*((__be64 *)lun) &
1633 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) 1633 cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1634 goto out_err; 1634 goto out_err;
1635 break; 1635 break;
1636 case 4: 1636 case 4:
@@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2449 } 2449 }
2450 2450
2451 if (it_iu_len > srp_max_req_size || it_iu_len < 64) { 2451 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2452 rej->reason = __constant_cpu_to_be32( 2452 rej->reason = cpu_to_be32(
2453 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); 2453 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2454 ret = -EINVAL; 2454 ret = -EINVAL;
2455 pr_err("rejected SRP_LOGIN_REQ because its" 2455 pr_err("rejected SRP_LOGIN_REQ because its"
2456 " length (%d bytes) is out of range (%d .. %d)\n", 2456 " length (%d bytes) is out of range (%d .. %d)\n",
@@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2459 } 2459 }
2460 2460
2461 if (!sport->enabled) { 2461 if (!sport->enabled) {
2462 rej->reason = __constant_cpu_to_be32( 2462 rej->reason = cpu_to_be32(
2463 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2463 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2464 ret = -EINVAL; 2464 ret = -EINVAL;
2465 pr_err("rejected SRP_LOGIN_REQ because the target port" 2465 pr_err("rejected SRP_LOGIN_REQ because the target port"
2466 " has not yet been enabled\n"); 2466 " has not yet been enabled\n");
@@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2505 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) 2505 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2506 || *(__be64 *)(req->target_port_id + 8) != 2506 || *(__be64 *)(req->target_port_id + 8) !=
2507 cpu_to_be64(srpt_service_guid)) { 2507 cpu_to_be64(srpt_service_guid)) {
2508 rej->reason = __constant_cpu_to_be32( 2508 rej->reason = cpu_to_be32(
2509 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); 2509 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2510 ret = -ENOMEM; 2510 ret = -ENOMEM;
2511 pr_err("rejected SRP_LOGIN_REQ because it" 2511 pr_err("rejected SRP_LOGIN_REQ because it"
2512 " has an invalid target port identifier.\n"); 2512 " has an invalid target port identifier.\n");
@@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2515 2515
2516 ch = kzalloc(sizeof *ch, GFP_KERNEL); 2516 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2517 if (!ch) { 2517 if (!ch) {
2518 rej->reason = __constant_cpu_to_be32( 2518 rej->reason = cpu_to_be32(
2519 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2519 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2520 pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); 2520 pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
2521 ret = -ENOMEM; 2521 ret = -ENOMEM;
2522 goto reject; 2522 goto reject;
@@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2552 2552
2553 ret = srpt_create_ch_ib(ch); 2553 ret = srpt_create_ch_ib(ch);
2554 if (ret) { 2554 if (ret) {
2555 rej->reason = __constant_cpu_to_be32( 2555 rej->reason = cpu_to_be32(
2556 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2556 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2557 pr_err("rejected SRP_LOGIN_REQ because creating" 2557 pr_err("rejected SRP_LOGIN_REQ because creating"
2558 " a new RDMA channel failed.\n"); 2558 " a new RDMA channel failed.\n");
2559 goto free_ring; 2559 goto free_ring;
@@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2561 2561
2562 ret = srpt_ch_qp_rtr(ch, ch->qp); 2562 ret = srpt_ch_qp_rtr(ch, ch->qp);
2563 if (ret) { 2563 if (ret) {
2564 rej->reason = __constant_cpu_to_be32( 2564 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2565 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2566 pr_err("rejected SRP_LOGIN_REQ because enabling" 2565 pr_err("rejected SRP_LOGIN_REQ because enabling"
2567 " RTR failed (error code = %d)\n", ret); 2566 " RTR failed (error code = %d)\n", ret);
2568 goto destroy_ib; 2567 goto destroy_ib;
@@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2580 if (!nacl) { 2579 if (!nacl) {
2581 pr_info("Rejected login because no ACL has been" 2580 pr_info("Rejected login because no ACL has been"
2582 " configured yet for initiator %s.\n", ch->sess_name); 2581 " configured yet for initiator %s.\n", ch->sess_name);
2583 rej->reason = __constant_cpu_to_be32( 2582 rej->reason = cpu_to_be32(
2584 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2583 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2585 goto destroy_ib; 2584 goto destroy_ib;
2586 } 2585 }
2587 2586
2588 ch->sess = transport_init_session(TARGET_PROT_NORMAL); 2587 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2589 if (IS_ERR(ch->sess)) { 2588 if (IS_ERR(ch->sess)) {
2590 rej->reason = __constant_cpu_to_be32( 2589 rej->reason = cpu_to_be32(
2591 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2590 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2592 pr_debug("Failed to create session\n"); 2591 pr_debug("Failed to create session\n");
2593 goto deregister_session; 2592 goto deregister_session;
2594 } 2593 }
@@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2604 rsp->max_it_iu_len = req->req_it_iu_len; 2603 rsp->max_it_iu_len = req->req_it_iu_len;
2605 rsp->max_ti_iu_len = req->req_it_iu_len; 2604 rsp->max_ti_iu_len = req->req_it_iu_len;
2606 ch->max_ti_iu_len = it_iu_len; 2605 ch->max_ti_iu_len = it_iu_len;
2607 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2606 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2608 | SRP_BUF_FORMAT_INDIRECT); 2607 | SRP_BUF_FORMAT_INDIRECT);
2609 rsp->req_lim_delta = cpu_to_be32(ch->rq_size); 2608 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2610 atomic_set(&ch->req_lim, ch->rq_size); 2609 atomic_set(&ch->req_lim, ch->rq_size);
2611 atomic_set(&ch->req_lim_delta, 0); 2610 atomic_set(&ch->req_lim_delta, 0);
@@ -2655,8 +2654,8 @@ free_ch:
2655reject: 2654reject:
2656 rej->opcode = SRP_LOGIN_REJ; 2655 rej->opcode = SRP_LOGIN_REJ;
2657 rej->tag = req->tag; 2656 rej->tag = req->tag;
2658 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2657 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2659 | SRP_BUF_FORMAT_INDIRECT); 2658 | SRP_BUF_FORMAT_INDIRECT);
2660 2659
2661 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2660 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2662 (void *)rej, sizeof *rej); 2661 (void *)rej, sizeof *rej);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index a85292b1d09d..e3cd3ece4412 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo)
203 return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); 203 return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
204} 204}
205 205
206static int srp_parse_tmo(int *tmo, const char *buf) 206int srp_parse_tmo(int *tmo, const char *buf)
207{ 207{
208 int res = 0; 208 int res = 0;
209 209
@@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf)
214 214
215 return res; 215 return res;
216} 216}
217EXPORT_SYMBOL(srp_parse_tmo);
217 218
218static ssize_t show_reconnect_delay(struct device *dev, 219static ssize_t show_reconnect_delay(struct device *dev,
219 struct device_attribute *attr, char *buf) 220 struct device_attribute *attr, char *buf)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 986fddb08579..b0f898e3b2e7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1745,6 +1745,7 @@ struct ib_device {
1745 char node_desc[64]; 1745 char node_desc[64];
1746 __be64 node_guid; 1746 __be64 node_guid;
1747 u32 local_dma_lkey; 1747 u32 local_dma_lkey;
1748 u16 is_switch:1;
1748 u8 node_type; 1749 u8 node_type;
1749 u8 phys_port_cnt; 1750 u8 phys_port_cnt;
1750 1751
@@ -1824,6 +1825,20 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1824 u8 port_num); 1825 u8 port_num);
1825 1826
1826/** 1827/**
1828 * rdma_cap_ib_switch - Check if the device is IB switch
1829 * @device: Device to check
1830 *
1831 * Device driver is responsible for setting is_switch bit on
1832 * in ib_device structure at init time.
1833 *
1834 * Return: true if the device is IB switch.
1835 */
1836static inline bool rdma_cap_ib_switch(const struct ib_device *device)
1837{
1838 return device->is_switch;
1839}
1840
1841/**
1827 * rdma_start_port - Return the first valid port number for the device 1842 * rdma_start_port - Return the first valid port number for the device
1828 * specified 1843 * specified
1829 * 1844 *
@@ -1833,7 +1848,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1833 */ 1848 */
1834static inline u8 rdma_start_port(const struct ib_device *device) 1849static inline u8 rdma_start_port(const struct ib_device *device)
1835{ 1850{
1836 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; 1851 return rdma_cap_ib_switch(device) ? 0 : 1;
1837} 1852}
1838 1853
1839/** 1854/**
@@ -1846,8 +1861,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
1846 */ 1861 */
1847static inline u8 rdma_end_port(const struct ib_device *device) 1862static inline u8 rdma_end_port(const struct ib_device *device)
1848{ 1863{
1849 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 1864 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
1850 0 : device->phys_port_cnt;
1851} 1865}
1852 1866
1853static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 1867static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index cdb05dd1d440..d40d3ef25707 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
119extern void srp_rport_del(struct srp_rport *); 119extern void srp_rport_del(struct srp_rport *);
120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, 120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
121 int dev_loss_tmo); 121 int dev_loss_tmo);
122int srp_parse_tmo(int *tmo, const char *buf);
122extern int srp_reconnect_rport(struct srp_rport *rport); 123extern int srp_reconnect_rport(struct srp_rport *rport);
123extern void srp_start_tl_fail_timers(struct srp_rport *rport); 124extern void srp_start_tl_fail_timers(struct srp_rport *rport);
124extern void srp_remove_host(struct Scsi_Host *); 125extern void srp_remove_host(struct Scsi_Host *);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 273b8bff6ba4..657ba9f5d308 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
759 } 759 }
760 760
761 ibmr = rds_ib_alloc_fmr(rds_ibdev); 761 ibmr = rds_ib_alloc_fmr(rds_ibdev);
762 if (IS_ERR(ibmr)) 762 if (IS_ERR(ibmr)) {
763 rds_ib_dev_put(rds_ibdev);
763 return ibmr; 764 return ibmr;
765 }
764 766
765 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); 767 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
766 if (ret == 0) 768 if (ret == 0)