aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/agent.c24
-rw-r--r--drivers/infiniband/core/agent.h6
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/mad.c25
-rw-r--r--drivers/infiniband/core/mad_rmpp.c8
-rw-r--r--drivers/infiniband/core/sa_query.c4
-rw-r--r--drivers/infiniband/core/umem.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c16
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c31
-rw-r--r--drivers/net/mlx4/reset.c3
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_verbs.h7
15 files changed, 77 insertions, 61 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index db2633e4aae6..ae7c2880e624 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -78,15 +78,14 @@ ib_get_agent_port(struct ib_device *device, int port_num)
78 return entry; 78 return entry;
79} 79}
80 80
81int agent_send_response(struct ib_mad *mad, struct ib_grh *grh, 81void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
82 struct ib_wc *wc, struct ib_device *device, 82 struct ib_wc *wc, struct ib_device *device,
83 int port_num, int qpn) 83 int port_num, int qpn)
84{ 84{
85 struct ib_agent_port_private *port_priv; 85 struct ib_agent_port_private *port_priv;
86 struct ib_mad_agent *agent; 86 struct ib_mad_agent *agent;
87 struct ib_mad_send_buf *send_buf; 87 struct ib_mad_send_buf *send_buf;
88 struct ib_ah *ah; 88 struct ib_ah *ah;
89 int ret;
90 struct ib_mad_send_wr_private *mad_send_wr; 89 struct ib_mad_send_wr_private *mad_send_wr;
91 90
92 if (device->node_type == RDMA_NODE_IB_SWITCH) 91 if (device->node_type == RDMA_NODE_IB_SWITCH)
@@ -96,23 +95,21 @@ int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
96 95
97 if (!port_priv) { 96 if (!port_priv) {
98 printk(KERN_ERR SPFX "Unable to find port agent\n"); 97 printk(KERN_ERR SPFX "Unable to find port agent\n");
99 return -ENODEV; 98 return;
100 } 99 }
101 100
102 agent = port_priv->agent[qpn]; 101 agent = port_priv->agent[qpn];
103 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); 102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
104 if (IS_ERR(ah)) { 103 if (IS_ERR(ah)) {
105 ret = PTR_ERR(ah); 104 printk(KERN_ERR SPFX "ib_create_ah_from_wc error\n");
106 printk(KERN_ERR SPFX "ib_create_ah_from_wc error:%d\n", ret); 105 return;
107 return ret;
108 } 106 }
109 107
110 send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, 108 send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0,
111 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 109 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
112 GFP_KERNEL); 110 GFP_KERNEL);
113 if (IS_ERR(send_buf)) { 111 if (IS_ERR(send_buf)) {
114 ret = PTR_ERR(send_buf); 112 printk(KERN_ERR SPFX "ib_create_send_mad error\n");
115 printk(KERN_ERR SPFX "ib_create_send_mad error:%d\n", ret);
116 goto err1; 113 goto err1;
117 } 114 }
118 115
@@ -126,16 +123,15 @@ int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
126 mad_send_wr->send_wr.wr.ud.port_num = port_num; 123 mad_send_wr->send_wr.wr.ud.port_num = port_num;
127 } 124 }
128 125
129 if ((ret = ib_post_send_mad(send_buf, NULL))) { 126 if (ib_post_send_mad(send_buf, NULL)) {
130 printk(KERN_ERR SPFX "ib_post_send_mad error:%d\n", ret); 127 printk(KERN_ERR SPFX "ib_post_send_mad error\n");
131 goto err2; 128 goto err2;
132 } 129 }
133 return 0; 130 return;
134err2: 131err2:
135 ib_free_send_mad(send_buf); 132 ib_free_send_mad(send_buf);
136err1: 133err1:
137 ib_destroy_ah(ah); 134 ib_destroy_ah(ah);
138 return ret;
139} 135}
140 136
141static void agent_send_handler(struct ib_mad_agent *mad_agent, 137static void agent_send_handler(struct ib_mad_agent *mad_agent,
diff --git a/drivers/infiniband/core/agent.h b/drivers/infiniband/core/agent.h
index 86d72fab37b0..fb9ed1489f95 100644
--- a/drivers/infiniband/core/agent.h
+++ b/drivers/infiniband/core/agent.h
@@ -46,8 +46,8 @@ extern int ib_agent_port_open(struct ib_device *device, int port_num);
46 46
47extern int ib_agent_port_close(struct ib_device *device, int port_num); 47extern int ib_agent_port_close(struct ib_device *device, int port_num);
48 48
49extern int agent_send_response(struct ib_mad *mad, struct ib_grh *grh, 49extern void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
50 struct ib_wc *wc, struct ib_device *device, 50 struct ib_wc *wc, struct ib_device *device,
51 int port_num, int qpn); 51 int port_num, int qpn);
52 52
53#endif /* __AGENT_H_ */ 53#endif /* __AGENT_H_ */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 3ada17c0f239..2506c43ba041 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -702,7 +702,7 @@ int ib_find_pkey(struct ib_device *device,
702 if (ret) 702 if (ret)
703 return ret; 703 return ret;
704 704
705 if (pkey == tmp_pkey) { 705 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
706 *index = i; 706 *index = i;
707 return 0; 707 return 0;
708 } 708 }
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index bc547f1d34ba..6f4287716ab1 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1842,16 +1842,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1842{ 1842{
1843 struct ib_mad_qp_info *qp_info; 1843 struct ib_mad_qp_info *qp_info;
1844 struct ib_mad_private_header *mad_priv_hdr; 1844 struct ib_mad_private_header *mad_priv_hdr;
1845 struct ib_mad_private *recv, *response; 1845 struct ib_mad_private *recv, *response = NULL;
1846 struct ib_mad_list_head *mad_list; 1846 struct ib_mad_list_head *mad_list;
1847 struct ib_mad_agent_private *mad_agent; 1847 struct ib_mad_agent_private *mad_agent;
1848 int port_num; 1848 int port_num;
1849 1849
1850 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1851 if (!response)
1852 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1853 "for response buffer\n");
1854
1855 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 1850 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1856 qp_info = mad_list->mad_queue->qp_info; 1851 qp_info = mad_list->mad_queue->qp_info;
1857 dequeue_mad(mad_list); 1852 dequeue_mad(mad_list);
@@ -1879,6 +1874,13 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1879 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) 1874 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1880 goto out; 1875 goto out;
1881 1876
1877 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1878 if (!response) {
1879 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1880 "for response buffer\n");
1881 goto out;
1882 }
1883
1882 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) 1884 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1883 port_num = wc->port_num; 1885 port_num = wc->port_num;
1884 else 1886 else
@@ -1914,12 +1916,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1914 response->header.recv_wc.recv_buf.mad = &response->mad.mad; 1916 response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1915 response->header.recv_wc.recv_buf.grh = &response->grh; 1917 response->header.recv_wc.recv_buf.grh = &response->grh;
1916 1918
1917 if (!agent_send_response(&response->mad.mad, 1919 agent_send_response(&response->mad.mad,
1918 &response->grh, wc, 1920 &response->grh, wc,
1919 port_priv->device, 1921 port_priv->device,
1920 smi_get_fwd_port(&recv->mad.smp), 1922 smi_get_fwd_port(&recv->mad.smp),
1921 qp_info->qp->qp_num)) 1923 qp_info->qp->qp_num);
1922 response = NULL;
1923 1924
1924 goto out; 1925 goto out;
1925 } 1926 }
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 3663fd7022be..d43bc62005b3 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -163,8 +163,10 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
163 hdr_len, 0, GFP_KERNEL); 163 hdr_len, 0, GFP_KERNEL);
164 if (IS_ERR(msg)) 164 if (IS_ERR(msg))
165 ib_destroy_ah(ah); 165 ib_destroy_ah(ah);
166 else 166 else {
167 msg->ah = ah; 167 msg->ah = ah;
168 msg->context[0] = ah;
169 }
168 170
169 return msg; 171 return msg;
170} 172}
@@ -197,9 +199,7 @@ static void ack_ds_ack(struct ib_mad_agent_private *agent,
197 199
198void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) 200void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
199{ 201{
200 struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; 202 if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
201
202 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK)
203 ib_destroy_ah(mad_send_wc->send_buf->ah); 203 ib_destroy_ah(mad_send_wc->send_buf->ah);
204 ib_free_send_mad(mad_send_wc->send_buf); 204 ib_free_send_mad(mad_send_wc->send_buf);
205} 205}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 20ab6b3e484d..d271bd715c12 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -385,9 +385,7 @@ static void update_sm_ah(struct work_struct *work)
385 385
386 new_ah->pkey_index = 0; 386 new_ah->pkey_index = 0;
387 if (ib_find_pkey(port->agent->device, port->port_num, 387 if (ib_find_pkey(port->agent->device, port->port_num,
388 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index) && 388 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
389 ib_find_pkey(port->agent->device, port->port_num,
390 IB_DEFAULT_PKEY_PARTIAL, &new_ah->pkey_index))
391 printk(KERN_ERR "Couldn't find index for default PKey\n"); 389 printk(KERN_ERR "Couldn't find index for default PKey\n");
392 390
393 memset(&ah_attr, 0, sizeof ah_attr); 391 memset(&ah_attr, 0, sizeof ah_attr);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 26d0470eef6e..664d2faa9e74 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -40,6 +40,11 @@
40 40
41#include "uverbs.h" 41#include "uverbs.h"
42 42
43#define IB_UMEM_MAX_PAGE_CHUNK \
44 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
45 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
46 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
47
43static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 48static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
44{ 49{
45 struct ib_umem_chunk *chunk, *tmp; 50 struct ib_umem_chunk *chunk, *tmp;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 9574088f0d4e..1cdfcd43b0bc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -139,7 +139,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
139 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 139 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
140 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 140 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
141 skb->priority = CPL_PRIORITY_SETUP; 141 skb->priority = CPL_PRIORITY_SETUP;
142 tdev->send(tdev, skb); 142 cxgb3_ofld_send(tdev, skb);
143 return; 143 return;
144} 144}
145 145
@@ -161,7 +161,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep)
161 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); 161 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
162 162
163 skb->priority = CPL_PRIORITY_DATA; 163 skb->priority = CPL_PRIORITY_DATA;
164 ep->com.tdev->send(ep->com.tdev, skb); 164 cxgb3_ofld_send(ep->com.tdev, skb);
165 return 0; 165 return 0;
166} 166}
167 167
@@ -183,7 +183,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
183 req->val = 0; 183 req->val = 0;
184 184
185 skb->priority = CPL_PRIORITY_DATA; 185 skb->priority = CPL_PRIORITY_DATA;
186 ep->com.tdev->send(ep->com.tdev, skb); 186 cxgb3_ofld_send(ep->com.tdev, skb);
187 return 0; 187 return 0;
188} 188}
189 189
@@ -784,7 +784,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
784 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); 784 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
785 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); 785 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
786 skb->priority = CPL_PRIORITY_ACK; 786 skb->priority = CPL_PRIORITY_ACK;
787 ep->com.tdev->send(ep->com.tdev, skb); 787 cxgb3_ofld_send(ep->com.tdev, skb);
788 return credits; 788 return credits;
789} 789}
790 790
@@ -1152,7 +1152,7 @@ static int listen_start(struct iwch_listen_ep *ep)
1152 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); 1152 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1153 1153
1154 skb->priority = 1; 1154 skb->priority = 1;
1155 ep->com.tdev->send(ep->com.tdev, skb); 1155 cxgb3_ofld_send(ep->com.tdev, skb);
1156 return 0; 1156 return 0;
1157} 1157}
1158 1158
@@ -1186,7 +1186,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
1186 req->cpu_idx = 0; 1186 req->cpu_idx = 0;
1187 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); 1187 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1188 skb->priority = 1; 1188 skb->priority = 1;
1189 ep->com.tdev->send(ep->com.tdev, skb); 1189 cxgb3_ofld_send(ep->com.tdev, skb);
1190 return 0; 1190 return 0;
1191} 1191}
1192 1192
@@ -1264,7 +1264,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1264 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); 1264 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1265 rpl->opt2 = 0; 1265 rpl->opt2 = 0;
1266 rpl->rsvd = rpl->opt2; 1266 rpl->rsvd = rpl->opt2;
1267 tdev->send(tdev, skb); 1267 cxgb3_ofld_send(tdev, skb);
1268 } 1268 }
1269} 1269}
1270 1270
@@ -1557,7 +1557,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1557 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 1557 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1558 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 1558 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1559 rpl->cmd = CPL_ABORT_NO_RST; 1559 rpl->cmd = CPL_ABORT_NO_RST;
1560 ep->com.tdev->send(ep->com.tdev, rpl_skb); 1560 cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1561 if (state != ABORTING) { 1561 if (state != ABORTING) {
1562 state_set(&ep->com, DEAD); 1562 state_set(&ep->com, DEAD);
1563 release_ep_resources(ep); 1563 release_ep_resources(ep);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 660b27aecae5..8bf44daf45ec 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -389,7 +389,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
389 wc->opcode = IB_WC_SEND; 389 wc->opcode = IB_WC_SEND;
390 break; 390 break;
391 case MLX4_OPCODE_RDMA_READ: 391 case MLX4_OPCODE_RDMA_READ:
392 wc->opcode = IB_WC_SEND; 392 wc->opcode = IB_WC_RDMA_READ;
393 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 393 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
394 break; 394 break;
395 case MLX4_OPCODE_ATOMIC_CS: 395 case MLX4_OPCODE_ATOMIC_CS:
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 333091787c5f..0ed02b7834da 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -109,7 +109,7 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
109 in_modifier, op_modifier, 109 in_modifier, op_modifier,
110 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); 110 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
111 111
112 if (!err); 112 if (!err)
113 memcpy(response_mad, outmailbox->buf, 256); 113 memcpy(response_mad, outmailbox->buf, 256);
114 114
115 mlx4_free_cmd_mailbox(dev->dev, inmailbox); 115 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 982eb88e27ec..563aeacf9e14 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -211,6 +211,7 @@ out_free_cq:
211 211
212out_free_mr: 212out_free_mr:
213 ib_dereg_mr(priv->mr); 213 ib_dereg_mr(priv->mr);
214 ipoib_cm_dev_cleanup(dev);
214 215
215out_free_pd: 216out_free_pd:
216 ib_dealloc_pd(priv->pd); 217 ib_dealloc_pd(priv->pd);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index f01ca182f226..f6a051428144 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -75,16 +75,12 @@ module_param(topspin_workarounds, int, 0444);
75MODULE_PARM_DESC(topspin_workarounds, 75MODULE_PARM_DESC(topspin_workarounds,
76 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 76 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
77 77
78static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
79
80static int mellanox_workarounds = 1; 78static int mellanox_workarounds = 1;
81 79
82module_param(mellanox_workarounds, int, 0444); 80module_param(mellanox_workarounds, int, 0444);
83MODULE_PARM_DESC(mellanox_workarounds, 81MODULE_PARM_DESC(mellanox_workarounds,
84 "Enable workarounds for Mellanox SRP target bugs if != 0"); 82 "Enable workarounds for Mellanox SRP target bugs if != 0");
85 83
86static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
87
88static void srp_add_one(struct ib_device *device); 84static void srp_add_one(struct ib_device *device);
89static void srp_remove_one(struct ib_device *device); 85static void srp_remove_one(struct ib_device *device);
90static void srp_completion(struct ib_cq *cq, void *target_ptr); 86static void srp_completion(struct ib_cq *cq, void *target_ptr);
@@ -108,6 +104,24 @@ static const char *srp_target_info(struct Scsi_Host *host)
108 return host_to_target(host)->target_name; 104 return host_to_target(host)->target_name;
109} 105}
110 106
107static int srp_target_is_topspin(struct srp_target_port *target)
108{
109 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
110 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
111
112 return topspin_workarounds &&
113 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
114 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
115}
116
117static int srp_target_is_mellanox(struct srp_target_port *target)
118{
119 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
120
121 return mellanox_workarounds &&
122 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui);
123}
124
111static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 125static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
112 gfp_t gfp_mask, 126 gfp_t gfp_mask,
113 enum dma_data_direction direction) 127 enum dma_data_direction direction)
@@ -360,7 +374,7 @@ static int srp_send_req(struct srp_target_port *target)
360 * zero out the first 8 bytes of our initiator port ID and set 374 * zero out the first 8 bytes of our initiator port ID and set
361 * the second 8 bytes to the local node GUID. 375 * the second 8 bytes to the local node GUID.
362 */ 376 */
363 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { 377 if (srp_target_is_topspin(target)) {
364 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " 378 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
365 "activated for target GUID %016llx\n", 379 "activated for target GUID %016llx\n",
366 (unsigned long long) be64_to_cpu(target->ioc_guid)); 380 (unsigned long long) be64_to_cpu(target->ioc_guid));
@@ -585,8 +599,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
585 if (!dev->fmr_pool) 599 if (!dev->fmr_pool)
586 return -ENODEV; 600 return -ENODEV;
587 601
588 if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) && 602 if (srp_target_is_mellanox(target) &&
589 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 603 (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask))
590 return -EINVAL; 604 return -EINVAL;
591 605
592 len = page_cnt = 0; 606 len = page_cnt = 0;
@@ -1087,8 +1101,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1087 break; 1101 break;
1088 1102
1089 case IB_CM_REJ_PORT_REDIRECT: 1103 case IB_CM_REJ_PORT_REDIRECT:
1090 if (topspin_workarounds && 1104 if (srp_target_is_topspin(target)) {
1091 !memcmp(&target->ioc_guid, topspin_oui, 3)) {
1092 /* 1105 /*
1093 * Topspin/Cisco SRP gateways incorrectly send 1106 * Topspin/Cisco SRP gateways incorrectly send
1094 * reject reason code 25 when they mean 24 1107 * reject reason code 25 when they mean 24
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index e4dfd4b11a4a..e199715fabd0 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -119,6 +119,9 @@ int mlx4_reset(struct mlx4_dev *dev)
119 writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); 119 writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
120 iounmap(reset); 120 iounmap(reset);
121 121
122 /* Docs say to wait one second before accessing device */
123 msleep(1000);
124
122 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; 125 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
123 do { 126 do {
124 if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && 127 if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 30712ddd8a5e..8ec3799e42e1 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -39,6 +39,8 @@
39#if !defined( IB_MAD_H ) 39#if !defined( IB_MAD_H )
40#define IB_MAD_H 40#define IB_MAD_H
41 41
42#include <linux/list.h>
43
42#include <rdma/ib_verbs.h> 44#include <rdma/ib_verbs.h>
43 45
44/* Management base version */ 46/* Management base version */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0627a6aa282a..4bea182d7116 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -46,6 +46,8 @@
46#include <linux/mm.h> 46#include <linux/mm.h>
47#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
48#include <linux/kref.h> 48#include <linux/kref.h>
49#include <linux/list.h>
50#include <linux/rwsem.h>
49 51
50#include <asm/atomic.h> 52#include <asm/atomic.h>
51#include <asm/scatterlist.h> 53#include <asm/scatterlist.h>
@@ -731,11 +733,6 @@ struct ib_udata {
731 size_t outlen; 733 size_t outlen;
732}; 734};
733 735
734#define IB_UMEM_MAX_PAGE_CHUNK \
735 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
736 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
737 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
738
739struct ib_pd { 736struct ib_pd {
740 struct ib_device *device; 737 struct ib_device *device;
741 struct ib_uobject *uobject; 738 struct ib_uobject *uobject;