aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib
diff options
context:
space:
mode:
authorPradeep Satyanarayana <pradeeps@linux.vnet.ibm.com>2007-12-21 16:08:23 -0500
committerRoland Dreier <rolandd@cisco.com>2008-01-25 17:15:37 -0500
commit586a693448676de5174e752426ced69ec79ab174 (patch)
treea42a760115ccdde07853428c485296ce364a9b7d /drivers/infiniband/ulp/ipoib
parentfff09a8e6e726f0752254e1f46f7224e3bebb302 (diff)
IPoIB/CM: Enable SRQ support on HCAs that support fewer than 16 SG entries
Some HCAs (such as ehca2) support SRQ, but only support fewer than 16 SG entries for SRQs. Currently IPoIB/CM implicitly assumes all HCAs will support 16 SG entries for SRQs (to handle a 64K MTU with 4K pages). This patch removes that restriction by limiting the maximum MTU in connected mode to what the maximum number of SRQ SG entries allows. This patch addresses <https://bugs.openfabrics.org/show_bug.cgi?id=728> Signed-off-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/ipoib')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c41
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c9
3 files changed, 49 insertions, 14 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index d35025f0652b..fe250c60607d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -238,6 +238,8 @@ struct ipoib_cm_dev_priv {
238 struct ib_sge rx_sge[IPOIB_CM_RX_SG]; 238 struct ib_sge rx_sge[IPOIB_CM_RX_SG];
239 struct ib_recv_wr rx_wr; 239 struct ib_recv_wr rx_wr;
240 int nonsrq_conn_qp; 240 int nonsrq_conn_qp;
241 int max_cm_mtu;
242 int num_frags;
241}; 243};
242 244
243/* 245/*
@@ -503,6 +505,12 @@ static inline int ipoib_cm_has_srq(struct net_device *dev)
503 return !!priv->cm.srq; 505 return !!priv->cm.srq;
504} 506}
505 507
508static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
509{
510 struct ipoib_dev_priv *priv = netdev_priv(dev);
511 return priv->cm.max_cm_mtu;
512}
513
506void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx); 514void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx);
507int ipoib_cm_dev_open(struct net_device *dev); 515int ipoib_cm_dev_open(struct net_device *dev);
508void ipoib_cm_dev_stop(struct net_device *dev); 516void ipoib_cm_dev_stop(struct net_device *dev);
@@ -552,6 +560,11 @@ static inline int ipoib_cm_has_srq(struct net_device *dev)
552 return 0; 560 return 0;
553} 561}
554 562
563static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
564{
565 return 0;
566}
567
555static inline 568static inline
556void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) 569void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
557{ 570{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index fdf33cecc6d5..1818f958c250 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -96,13 +96,13 @@ static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
96 96
97 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; 97 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
98 98
99 for (i = 0; i < IPOIB_CM_RX_SG; ++i) 99 for (i = 0; i < priv->cm.num_frags; ++i)
100 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; 100 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
101 101
102 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 102 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
103 if (unlikely(ret)) { 103 if (unlikely(ret)) {
104 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 104 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
105 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 105 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
106 priv->cm.srq_ring[id].mapping); 106 priv->cm.srq_ring[id].mapping);
107 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); 107 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
108 priv->cm.srq_ring[id].skb = NULL; 108 priv->cm.srq_ring[id].skb = NULL;
@@ -1399,13 +1399,13 @@ int ipoib_cm_add_mode_attr(struct net_device *dev)
1399 return device_create_file(&dev->dev, &dev_attr_mode); 1399 return device_create_file(&dev->dev, &dev_attr_mode);
1400} 1400}
1401 1401
1402static void ipoib_cm_create_srq(struct net_device *dev) 1402static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1403{ 1403{
1404 struct ipoib_dev_priv *priv = netdev_priv(dev); 1404 struct ipoib_dev_priv *priv = netdev_priv(dev);
1405 struct ib_srq_init_attr srq_init_attr = { 1405 struct ib_srq_init_attr srq_init_attr = {
1406 .attr = { 1406 .attr = {
1407 .max_wr = ipoib_recvq_size, 1407 .max_wr = ipoib_recvq_size,
1408 .max_sge = IPOIB_CM_RX_SG 1408 .max_sge = max_sge
1409 } 1409 }
1410 }; 1410 };
1411 1411
@@ -1431,7 +1431,8 @@ static void ipoib_cm_create_srq(struct net_device *dev)
1431int ipoib_cm_dev_init(struct net_device *dev) 1431int ipoib_cm_dev_init(struct net_device *dev)
1432{ 1432{
1433 struct ipoib_dev_priv *priv = netdev_priv(dev); 1433 struct ipoib_dev_priv *priv = netdev_priv(dev);
1434 int i; 1434 int i, ret;
1435 struct ib_device_attr attr;
1435 1436
1436 INIT_LIST_HEAD(&priv->cm.passive_ids); 1437 INIT_LIST_HEAD(&priv->cm.passive_ids);
1437 INIT_LIST_HEAD(&priv->cm.reap_list); 1438 INIT_LIST_HEAD(&priv->cm.reap_list);
@@ -1448,22 +1449,40 @@ int ipoib_cm_dev_init(struct net_device *dev)
1448 1449
1449 skb_queue_head_init(&priv->cm.skb_queue); 1450 skb_queue_head_init(&priv->cm.skb_queue);
1450 1451
1451 for (i = 0; i < IPOIB_CM_RX_SG; ++i) 1452 ret = ib_query_device(priv->ca, &attr);
1453 if (ret) {
1454 printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
1455 return ret;
1456 }
1457
1458 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
1459
1460 attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
1461 ipoib_cm_create_srq(dev, attr.max_srq_sge);
1462 if (ipoib_cm_has_srq(dev)) {
1463 priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
1464 priv->cm.num_frags = attr.max_srq_sge;
1465 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1466 priv->cm.max_cm_mtu, priv->cm.num_frags);
1467 } else {
1468 priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1469 priv->cm.num_frags = IPOIB_CM_RX_SG;
1470 }
1471
1472 for (i = 0; i < priv->cm.num_frags; ++i)
1452 priv->cm.rx_sge[i].lkey = priv->mr->lkey; 1473 priv->cm.rx_sge[i].lkey = priv->mr->lkey;
1453 1474
1454 priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE; 1475 priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
1455 for (i = 1; i < IPOIB_CM_RX_SG; ++i) 1476 for (i = 1; i < priv->cm.num_frags; ++i)
1456 priv->cm.rx_sge[i].length = PAGE_SIZE; 1477 priv->cm.rx_sge[i].length = PAGE_SIZE;
1457 priv->cm.rx_wr.next = NULL; 1478 priv->cm.rx_wr.next = NULL;
1458 priv->cm.rx_wr.sg_list = priv->cm.rx_sge; 1479 priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
1459 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; 1480 priv->cm.rx_wr.num_sge = priv->cm.num_frags;
1460
1461 ipoib_cm_create_srq(dev);
1462 1481
1463 if (ipoib_cm_has_srq(dev)) { 1482 if (ipoib_cm_has_srq(dev)) {
1464 for (i = 0; i < ipoib_recvq_size; ++i) { 1483 for (i = 0; i < ipoib_recvq_size; ++i) {
1465 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, 1484 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1466 IPOIB_CM_RX_SG - 1, 1485 priv->cm.num_frags - 1,
1467 priv->cm.srq_ring[i].mapping)) { 1486 priv->cm.srq_ring[i].mapping)) {
1468 ipoib_warn(priv, "failed to allocate " 1487 ipoib_warn(priv, "failed to allocate "
1469 "receive buffer %d\n", i); 1488 "receive buffer %d\n", i);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3bfc2ef1303e..d7330451685c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -182,17 +182,20 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
182 struct ipoib_dev_priv *priv = netdev_priv(dev); 182 struct ipoib_dev_priv *priv = netdev_priv(dev);
183 183
184 /* dev->mtu > 2K ==> connected mode */ 184 /* dev->mtu > 2K ==> connected mode */
185 if (ipoib_cm_admin_enabled(dev) && new_mtu <= IPOIB_CM_MTU) { 185 if (ipoib_cm_admin_enabled(dev)) {
186 if (new_mtu > ipoib_cm_max_mtu(dev))
187 return -EINVAL;
188
186 if (new_mtu > priv->mcast_mtu) 189 if (new_mtu > priv->mcast_mtu)
187 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 190 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
188 priv->mcast_mtu); 191 priv->mcast_mtu);
192
189 dev->mtu = new_mtu; 193 dev->mtu = new_mtu;
190 return 0; 194 return 0;
191 } 195 }
192 196
193 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) { 197 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
194 return -EINVAL; 198 return -EINVAL;
195 }
196 199
197 priv->admin_mtu = new_mtu; 200 priv->admin_mtu = new_mtu;
198 201