aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib/ipoib_ib.c
diff options
context:
space:
mode:
authorEli Cohen <eli@dev.mellanox.co.il>2008-04-17 00:09:27 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:09:27 -0400
commit40ca1988e03c001747d0b4cc1b25cf38297c9f9e (patch)
tree5eb1d1e32b41409bd722df9c80deb326a227b09f /drivers/infiniband/ulp/ipoib/ipoib_ib.c
parentc93570f23a98c633570397aedc6d1808f5d5846a (diff)
IPoIB: Add LSO support
For HCAs that support TCP segmentation offload (IB_DEVICE_UD_TSO), set NETIF_F_TSO and use HW LSO to offload TCP segmentation. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib_ib.c')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c107
1 files changed, 76 insertions, 31 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index d13f4fb3853f..8b4ff69ecb80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -39,6 +39,8 @@
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40 40
41#include <rdma/ib_cache.h> 41#include <rdma/ib_cache.h>
42#include <linux/ip.h>
43#include <linux/tcp.h>
42 44
43#include "ipoib.h" 45#include "ipoib.h"
44 46
@@ -249,29 +251,37 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
249 struct sk_buff *skb = tx_req->skb; 251 struct sk_buff *skb = tx_req->skb;
250 u64 *mapping = tx_req->mapping; 252 u64 *mapping = tx_req->mapping;
251 int i; 253 int i;
254 int off;
252 255
253 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), 256 if (skb_headlen(skb)) {
254 DMA_TO_DEVICE); 257 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
255 if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) 258 DMA_TO_DEVICE);
256 return -EIO; 259 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
260 return -EIO;
261
262 off = 1;
263 } else
264 off = 0;
257 265
258 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 266 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 267 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
260 mapping[i + 1] = ib_dma_map_page(ca, frag->page, 268 mapping[i + off] = ib_dma_map_page(ca, frag->page,
261 frag->page_offset, frag->size, 269 frag->page_offset, frag->size,
262 DMA_TO_DEVICE); 270 DMA_TO_DEVICE);
263 if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1]))) 271 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
264 goto partial_error; 272 goto partial_error;
265 } 273 }
266 return 0; 274 return 0;
267 275
268partial_error: 276partial_error:
269 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
270
271 for (; i > 0; --i) { 277 for (; i > 0; --i) {
272 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 278 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
273 ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE); 279 ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
274 } 280 }
281
282 if (off)
283 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
284
275 return -EIO; 285 return -EIO;
276} 286}
277 287
@@ -281,12 +291,17 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
281 struct sk_buff *skb = tx_req->skb; 291 struct sk_buff *skb = tx_req->skb;
282 u64 *mapping = tx_req->mapping; 292 u64 *mapping = tx_req->mapping;
283 int i; 293 int i;
294 int off;
284 295
285 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); 296 if (skb_headlen(skb)) {
297 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
298 off = 1;
299 } else
300 off = 0;
286 301
287 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 302 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
288 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 303 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
289 ib_dma_unmap_page(ca, mapping[i + 1], frag->size, 304 ib_dma_unmap_page(ca, mapping[i + off], frag->size,
290 DMA_TO_DEVICE); 305 DMA_TO_DEVICE);
291 } 306 }
292} 307}
@@ -392,24 +407,40 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
392static inline int post_send(struct ipoib_dev_priv *priv, 407static inline int post_send(struct ipoib_dev_priv *priv,
393 unsigned int wr_id, 408 unsigned int wr_id,
394 struct ib_ah *address, u32 qpn, 409 struct ib_ah *address, u32 qpn,
395 u64 *mapping, int headlen, 410 struct ipoib_tx_buf *tx_req,
396 skb_frag_t *frags, 411 void *head, int hlen)
397 int nr_frags)
398{ 412{
399 struct ib_send_wr *bad_wr; 413 struct ib_send_wr *bad_wr;
400 int i; 414 int i, off;
415 struct sk_buff *skb = tx_req->skb;
416 skb_frag_t *frags = skb_shinfo(skb)->frags;
417 int nr_frags = skb_shinfo(skb)->nr_frags;
418 u64 *mapping = tx_req->mapping;
419
420 if (skb_headlen(skb)) {
421 priv->tx_sge[0].addr = mapping[0];
422 priv->tx_sge[0].length = skb_headlen(skb);
423 off = 1;
424 } else
425 off = 0;
401 426
402 priv->tx_sge[0].addr = mapping[0];
403 priv->tx_sge[0].length = headlen;
404 for (i = 0; i < nr_frags; ++i) { 427 for (i = 0; i < nr_frags; ++i) {
405 priv->tx_sge[i + 1].addr = mapping[i + 1]; 428 priv->tx_sge[i + off].addr = mapping[i + off];
406 priv->tx_sge[i + 1].length = frags[i].size; 429 priv->tx_sge[i + off].length = frags[i].size;
407 } 430 }
408 priv->tx_wr.num_sge = nr_frags + 1; 431 priv->tx_wr.num_sge = nr_frags + off;
409 priv->tx_wr.wr_id = wr_id; 432 priv->tx_wr.wr_id = wr_id;
410 priv->tx_wr.wr.ud.remote_qpn = qpn; 433 priv->tx_wr.wr.ud.remote_qpn = qpn;
411 priv->tx_wr.wr.ud.ah = address; 434 priv->tx_wr.wr.ud.ah = address;
412 435
436 if (head) {
437 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
438 priv->tx_wr.wr.ud.header = head;
439 priv->tx_wr.wr.ud.hlen = hlen;
440 priv->tx_wr.opcode = IB_WR_LSO;
441 } else
442 priv->tx_wr.opcode = IB_WR_SEND;
443
413 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); 444 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
414} 445}
415 446
@@ -418,14 +449,30 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
418{ 449{
419 struct ipoib_dev_priv *priv = netdev_priv(dev); 450 struct ipoib_dev_priv *priv = netdev_priv(dev);
420 struct ipoib_tx_buf *tx_req; 451 struct ipoib_tx_buf *tx_req;
421 452 int hlen;
422 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { 453 void *phead;
423 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 454
424 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); 455 if (skb_is_gso(skb)) {
425 ++dev->stats.tx_dropped; 456 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
426 ++dev->stats.tx_errors; 457 phead = skb->data;
427 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); 458 if (unlikely(!skb_pull(skb, hlen))) {
428 return; 459 ipoib_warn(priv, "linear data too small\n");
460 ++dev->stats.tx_dropped;
461 ++dev->stats.tx_errors;
462 dev_kfree_skb_any(skb);
463 return;
464 }
465 } else {
466 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
467 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
468 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
469 ++dev->stats.tx_dropped;
470 ++dev->stats.tx_errors;
471 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
472 return;
473 }
474 phead = NULL;
475 hlen = 0;
429 } 476 }
430 477
431 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", 478 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
@@ -452,9 +499,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
452 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 499 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
453 500
454 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 501 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
455 address->ah, qpn, 502 address->ah, qpn, tx_req, phead, hlen))) {
456 tx_req->mapping, skb_headlen(skb),
457 skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
458 ipoib_warn(priv, "post_send failed\n"); 503 ipoib_warn(priv, "post_send failed\n");
459 ++dev->stats.tx_errors; 504 ++dev->stats.tx_errors;
460 ipoib_dma_unmap_tx(priv->ca, tx_req); 505 ipoib_dma_unmap_tx(priv->ca, tx_req);