aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c33
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig2
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig2
5 files changed, 43 insertions, 46 deletions
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index af78ccc4ce71..1f76bad020f3 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_IPOIB 1config INFINIBAND_IPOIB
2 tristate "IP-over-InfiniBand" 2 tristate "IP-over-InfiniBand"
3 depends on INFINIBAND && NETDEVICES && INET && (IPV6 || IPV6=n) 3 depends on NETDEVICES && INET && (IPV6 || IPV6=n)
4 ---help--- 4 ---help---
5 Support for the IP-over-InfiniBand protocol (IPoIB). This 5 Support for the IP-over-InfiniBand protocol (IPoIB). This
6 transports IP packets over InfiniBand so you can use your IB 6 transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 076a0bbb63d7..08b4676a3820 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -56,13 +56,6 @@ MODULE_PARM_DESC(cm_data_debug_level,
56#define IPOIB_CM_RX_DELAY (3 * 256 * HZ) 56#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
57#define IPOIB_CM_RX_UPDATE_MASK (0x3) 57#define IPOIB_CM_RX_UPDATE_MASK (0x3)
58 58
59struct ipoib_cm_id {
60 struct ib_cm_id *id;
61 int flags;
62 u32 remote_qpn;
63 u32 remote_mtu;
64};
65
66static struct ib_qp_attr ipoib_cm_err_attr = { 59static struct ib_qp_attr ipoib_cm_err_attr = {
67 .qp_state = IB_QPS_ERR 60 .qp_state = IB_QPS_ERR
68}; 61};
@@ -155,8 +148,8 @@ partial_error:
155 148
156 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 149 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
157 150
158 for (; i >= 0; --i) 151 for (; i > 0; --i)
159 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 152 ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
160 153
161 dev_kfree_skb_any(skb); 154 dev_kfree_skb_any(skb);
162 return NULL; 155 return NULL;
@@ -288,7 +281,6 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
288 rep.private_data_len = sizeof data; 281 rep.private_data_len = sizeof data;
289 rep.flow_control = 0; 282 rep.flow_control = 0;
290 rep.rnr_retry_count = req->rnr_retry_count; 283 rep.rnr_retry_count = req->rnr_retry_count;
291 rep.target_ack_delay = 20; /* FIXME */
292 rep.srq = 1; 284 rep.srq = 1;
293 rep.qp_num = qp->qp_num; 285 rep.qp_num = qp->qp_num;
294 rep.starting_psn = psn; 286 rep.starting_psn = psn;
@@ -309,6 +301,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
309 return -ENOMEM; 301 return -ENOMEM;
310 p->dev = dev; 302 p->dev = dev;
311 p->id = cm_id; 303 p->id = cm_id;
304 cm_id->context = p;
305 p->state = IPOIB_CM_RX_LIVE;
306 p->jiffies = jiffies;
307 INIT_LIST_HEAD(&p->list);
308
312 p->qp = ipoib_cm_create_rx_qp(dev, p); 309 p->qp = ipoib_cm_create_rx_qp(dev, p);
313 if (IS_ERR(p->qp)) { 310 if (IS_ERR(p->qp)) {
314 ret = PTR_ERR(p->qp); 311 ret = PTR_ERR(p->qp);
@@ -320,24 +317,24 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
320 if (ret) 317 if (ret)
321 goto err_modify; 318 goto err_modify;
322 319
320 spin_lock_irq(&priv->lock);
321 queue_delayed_work(ipoib_workqueue,
322 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
323 /* Add this entry to passive ids list head, but do not re-add it
324 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
325 p->jiffies = jiffies;
326 if (p->state == IPOIB_CM_RX_LIVE)
327 list_move(&p->list, &priv->cm.passive_ids);
328 spin_unlock_irq(&priv->lock);
329
323 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); 330 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
324 if (ret) { 331 if (ret) {
325 ipoib_warn(priv, "failed to send REP: %d\n", ret); 332 ipoib_warn(priv, "failed to send REP: %d\n", ret);
326 goto err_rep; 333 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
334 ipoib_warn(priv, "unable to move qp to error state\n");
327 } 335 }
328
329 cm_id->context = p;
330 p->jiffies = jiffies;
331 p->state = IPOIB_CM_RX_LIVE;
332 spin_lock_irq(&priv->lock);
333 if (list_empty(&priv->cm.passive_ids))
334 queue_delayed_work(ipoib_workqueue,
335 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
336 list_add(&p->list, &priv->cm.passive_ids);
337 spin_unlock_irq(&priv->lock);
338 return 0; 336 return 0;
339 337
340err_rep:
341err_modify: 338err_modify:
342 ib_destroy_qp(p->qp); 339 ib_destroy_qp(p->qp);
343err_qp: 340err_qp:
@@ -754,9 +751,9 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
754 751
755 p->mtu = be32_to_cpu(data->mtu); 752 p->mtu = be32_to_cpu(data->mtu);
756 753
757 if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) { 754 if (p->mtu <= IPOIB_ENCAP_LEN) {
758 ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n", 755 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
759 p->mtu, priv->dev->mtu); 756 p->mtu, IPOIB_ENCAP_LEN);
760 return -EINVAL; 757 return -EINVAL;
761 } 758 }
762 759
@@ -1150,7 +1147,6 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
1150{ 1147{
1151 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1148 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1152 cm.skb_task); 1149 cm.skb_task);
1153 struct net_device *dev = priv->dev;
1154 struct sk_buff *skb; 1150 struct sk_buff *skb;
1155 1151
1156 unsigned mtu = priv->mcast_mtu; 1152 unsigned mtu = priv->mcast_mtu;
@@ -1164,7 +1160,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
1164 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 1160 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1165#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1161#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1166 else if (skb->protocol == htons(ETH_P_IPV6)) 1162 else if (skb->protocol == htons(ETH_P_IPV6))
1167 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 1163 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
1168#endif 1164#endif
1169 dev_kfree_skb_any(skb); 1165 dev_kfree_skb_any(skb);
1170 spin_lock_irq(&priv->tx_lock); 1166 spin_lock_irq(&priv->tx_lock);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8404f05b2b6e..10944888cffd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -197,6 +197,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
197 } 197 }
198 198
199 /* 199 /*
200 * Drop packets that this interface sent, ie multicast packets
201 * that the HCA has replicated.
202 */
203 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
204 goto repost;
205
206 /*
200 * If we can't allocate a new RX buffer, dump 207 * If we can't allocate a new RX buffer, dump
201 * this packet and reuse the old buffer. 208 * this packet and reuse the old buffer.
202 */ 209 */
@@ -213,24 +220,18 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
213 skb_put(skb, wc->byte_len); 220 skb_put(skb, wc->byte_len);
214 skb_pull(skb, IB_GRH_BYTES); 221 skb_pull(skb, IB_GRH_BYTES);
215 222
216 if (wc->slid != priv->local_lid || 223 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
217 wc->src_qp != priv->qp->qp_num) { 224 skb_reset_mac_header(skb);
218 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 225 skb_pull(skb, IPOIB_ENCAP_LEN);
219 skb_reset_mac_header(skb);
220 skb_pull(skb, IPOIB_ENCAP_LEN);
221 226
222 dev->last_rx = jiffies; 227 dev->last_rx = jiffies;
223 ++priv->stats.rx_packets; 228 ++priv->stats.rx_packets;
224 priv->stats.rx_bytes += skb->len; 229 priv->stats.rx_bytes += skb->len;
225 230
226 skb->dev = dev; 231 skb->dev = dev;
227 /* XXX get correct PACKET_ type here */ 232 /* XXX get correct PACKET_ type here */
228 skb->pkt_type = PACKET_HOST; 233 skb->pkt_type = PACKET_HOST;
229 netif_receive_skb(skb); 234 netif_receive_skb(skb);
230 } else {
231 ipoib_dbg_data(priv, "dropping loopback packet\n");
232 dev_kfree_skb_any(skb);
233 }
234 235
235repost: 236repost:
236 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 237 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index aecbb9083f0c..fe604c8d2996 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_ISER 1config INFINIBAND_ISER
2 tristate "iSCSI Extensions for RDMA (iSER)" 2 tristate "iSCSI Extensions for RDMA (iSER)"
3 depends on INFINIBAND && SCSI && INET 3 depends on SCSI && INET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 Support for the iSCSI Extensions for RDMA (iSER) Protocol 6 Support for the iSCSI Extensions for RDMA (iSER) Protocol
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
index 8fe3be4e9910..3432dce29520 100644
--- a/drivers/infiniband/ulp/srp/Kconfig
+++ b/drivers/infiniband/ulp/srp/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_SRP 1config INFINIBAND_SRP
2 tristate "InfiniBand SCSI RDMA Protocol" 2 tristate "InfiniBand SCSI RDMA Protocol"
3 depends on INFINIBAND && SCSI 3 depends on SCSI
4 ---help--- 4 ---help---
5 Support for the SCSI RDMA Protocol over InfiniBand. This 5 Support for the SCSI RDMA Protocol over InfiniBand. This
6 allows you to access storage devices that speak SRP over 6 allows you to access storage devices that speak SRP over