diff options
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 46 |
1 files changed, 27 insertions, 19 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 4d59682f7d4a..3484e8ba24a4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -65,14 +65,14 @@ struct ipoib_cm_id { | |||
65 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | 65 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, |
66 | struct ib_cm_event *event); | 66 | struct ib_cm_event *event); |
67 | 67 | ||
68 | static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, | 68 | static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, |
69 | u64 mapping[IPOIB_CM_RX_SG]) | 69 | u64 mapping[IPOIB_CM_RX_SG]) |
70 | { | 70 | { |
71 | int i; | 71 | int i; |
72 | 72 | ||
73 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | 73 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); |
74 | 74 | ||
75 | for (i = 0; i < IPOIB_CM_RX_SG - 1; ++i) | 75 | for (i = 0; i < frags; ++i) |
76 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); | 76 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); |
77 | } | 77 | } |
78 | 78 | ||
@@ -90,7 +90,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id) | |||
90 | ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); | 90 | ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); |
91 | if (unlikely(ret)) { | 91 | if (unlikely(ret)) { |
92 | ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); | 92 | ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); |
93 | ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[id].mapping); | 93 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, |
94 | priv->cm.srq_ring[id].mapping); | ||
94 | dev_kfree_skb_any(priv->cm.srq_ring[id].skb); | 95 | dev_kfree_skb_any(priv->cm.srq_ring[id].skb); |
95 | priv->cm.srq_ring[id].skb = NULL; | 96 | priv->cm.srq_ring[id].skb = NULL; |
96 | } | 97 | } |
@@ -98,8 +99,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id) | |||
98 | return ret; | 99 | return ret; |
99 | } | 100 | } |
100 | 101 | ||
101 | static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, | 102 | static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags, |
102 | u64 mapping[IPOIB_CM_RX_SG]) | 103 | u64 mapping[IPOIB_CM_RX_SG]) |
103 | { | 104 | { |
104 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 105 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
105 | struct sk_buff *skb; | 106 | struct sk_buff *skb; |
@@ -107,7 +108,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, | |||
107 | 108 | ||
108 | skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); | 109 | skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); |
109 | if (unlikely(!skb)) | 110 | if (unlikely(!skb)) |
110 | return -ENOMEM; | 111 | return NULL; |
111 | 112 | ||
112 | /* | 113 | /* |
113 | * IPoIB adds a 4 byte header. So we need 12 more bytes to align the | 114 | * IPoIB adds a 4 byte header. So we need 12 more bytes to align the |
@@ -119,10 +120,10 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, | |||
119 | DMA_FROM_DEVICE); | 120 | DMA_FROM_DEVICE); |
120 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { | 121 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { |
121 | dev_kfree_skb_any(skb); | 122 | dev_kfree_skb_any(skb); |
122 | return -EIO; | 123 | return NULL; |
123 | } | 124 | } |
124 | 125 | ||
125 | for (i = 0; i < IPOIB_CM_RX_SG - 1; i++) { | 126 | for (i = 0; i < frags; i++) { |
126 | struct page *page = alloc_page(GFP_ATOMIC); | 127 | struct page *page = alloc_page(GFP_ATOMIC); |
127 | 128 | ||
128 | if (!page) | 129 | if (!page) |
@@ -136,7 +137,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, | |||
136 | } | 137 | } |
137 | 138 | ||
138 | priv->cm.srq_ring[id].skb = skb; | 139 | priv->cm.srq_ring[id].skb = skb; |
139 | return 0; | 140 | return skb; |
140 | 141 | ||
141 | partial_error: | 142 | partial_error: |
142 | 143 | ||
@@ -146,7 +147,7 @@ partial_error: | |||
146 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); | 147 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); |
147 | 148 | ||
148 | dev_kfree_skb_any(skb); | 149 | dev_kfree_skb_any(skb); |
149 | return -ENOMEM; | 150 | return NULL; |
150 | } | 151 | } |
151 | 152 | ||
152 | static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, | 153 | static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, |
@@ -309,7 +310,7 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |||
309 | } | 310 | } |
310 | /* Adjust length of skb with fragments to match received data */ | 311 | /* Adjust length of skb with fragments to match received data */ |
311 | static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, | 312 | static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, |
312 | unsigned int length) | 313 | unsigned int length, struct sk_buff *toskb) |
313 | { | 314 | { |
314 | int i, num_frags; | 315 | int i, num_frags; |
315 | unsigned int size; | 316 | unsigned int size; |
@@ -326,7 +327,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, | |||
326 | 327 | ||
327 | if (length == 0) { | 328 | if (length == 0) { |
328 | /* don't need this page */ | 329 | /* don't need this page */ |
329 | __free_page(frag->page); | 330 | skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); |
330 | --skb_shinfo(skb)->nr_frags; | 331 | --skb_shinfo(skb)->nr_frags; |
331 | } else { | 332 | } else { |
332 | size = min(length, (unsigned) PAGE_SIZE); | 333 | size = min(length, (unsigned) PAGE_SIZE); |
@@ -344,10 +345,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
344 | { | 345 | { |
345 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 346 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
346 | unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; | 347 | unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; |
347 | struct sk_buff *skb; | 348 | struct sk_buff *skb, *newskb; |
348 | struct ipoib_cm_rx *p; | 349 | struct ipoib_cm_rx *p; |
349 | unsigned long flags; | 350 | unsigned long flags; |
350 | u64 mapping[IPOIB_CM_RX_SG]; | 351 | u64 mapping[IPOIB_CM_RX_SG]; |
352 | int frags; | ||
351 | 353 | ||
352 | ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", | 354 | ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", |
353 | wr_id, wc->opcode, wc->status); | 355 | wr_id, wc->opcode, wc->status); |
@@ -383,7 +385,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
383 | } | 385 | } |
384 | } | 386 | } |
385 | 387 | ||
386 | if (unlikely(ipoib_cm_alloc_rx_skb(dev, wr_id, mapping))) { | 388 | frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, |
389 | (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; | ||
390 | |||
391 | newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping); | ||
392 | if (unlikely(!newskb)) { | ||
387 | /* | 393 | /* |
388 | * If we can't allocate a new RX buffer, dump | 394 | * If we can't allocate a new RX buffer, dump |
389 | * this packet and reuse the old buffer. | 395 | * this packet and reuse the old buffer. |
@@ -393,13 +399,13 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
393 | goto repost; | 399 | goto repost; |
394 | } | 400 | } |
395 | 401 | ||
396 | ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[wr_id].mapping); | 402 | ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping); |
397 | memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, sizeof mapping); | 403 | memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); |
398 | 404 | ||
399 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", | 405 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", |
400 | wc->byte_len, wc->slid); | 406 | wc->byte_len, wc->slid); |
401 | 407 | ||
402 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len); | 408 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); |
403 | 409 | ||
404 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 410 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
405 | skb->mac.raw = skb->data; | 411 | skb->mac.raw = skb->data; |
@@ -1193,7 +1199,8 @@ int ipoib_cm_dev_init(struct net_device *dev) | |||
1193 | priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; | 1199 | priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; |
1194 | 1200 | ||
1195 | for (i = 0; i < ipoib_recvq_size; ++i) { | 1201 | for (i = 0; i < ipoib_recvq_size; ++i) { |
1196 | if (ipoib_cm_alloc_rx_skb(dev, i, priv->cm.srq_ring[i].mapping)) { | 1202 | if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1, |
1203 | priv->cm.srq_ring[i].mapping)) { | ||
1197 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); | 1204 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
1198 | ipoib_cm_dev_cleanup(dev); | 1205 | ipoib_cm_dev_cleanup(dev); |
1199 | return -ENOMEM; | 1206 | return -ENOMEM; |
@@ -1228,7 +1235,8 @@ void ipoib_cm_dev_cleanup(struct net_device *dev) | |||
1228 | return; | 1235 | return; |
1229 | for (i = 0; i < ipoib_recvq_size; ++i) | 1236 | for (i = 0; i < ipoib_recvq_size; ++i) |
1230 | if (priv->cm.srq_ring[i].skb) { | 1237 | if (priv->cm.srq_ring[i].skb) { |
1231 | ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[i].mapping); | 1238 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, |
1239 | priv->cm.srq_ring[i].mapping); | ||
1232 | dev_kfree_skb_any(priv->cm.srq_ring[i].skb); | 1240 | dev_kfree_skb_any(priv->cm.srq_ring[i].skb); |
1233 | priv->cm.srq_ring[i].skb = NULL; | 1241 | priv->cm.srq_ring[i].skb = NULL; |
1234 | } | 1242 | } |