aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorIan Campbell <Ian.Campbell@citrix.com>2011-10-19 19:01:45 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-21 02:52:52 -0400
commit311761c8a553adaa3ad7482b1fdde1ce9042d3e2 (patch)
tree6b8d42d30f6ed77da03d57c3f2f35ac43e40a08d /drivers/net/ethernet
parent6cc7a765c2987f03ba278dac03c7cc759ee198e7 (diff)
mlx4: convert to SKB paged frag API.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
2 files changed, 20 insertions, 32 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9aec8b836fe..b89c36dbf5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -44,7 +44,7 @@
44 44
45static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, 45static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
46 struct mlx4_en_rx_desc *rx_desc, 46 struct mlx4_en_rx_desc *rx_desc,
47 struct skb_frag_struct *skb_frags, 47 struct page_frag *skb_frags,
48 struct mlx4_en_rx_alloc *ring_alloc, 48 struct mlx4_en_rx_alloc *ring_alloc,
49 int i) 49 int i)
50{ 50{
@@ -61,7 +61,7 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
61 return -ENOMEM; 61 return -ENOMEM;
62 62
63 skb_frags[i].page = page_alloc->page; 63 skb_frags[i].page = page_alloc->page;
64 skb_frags[i].page_offset = page_alloc->offset; 64 skb_frags[i].offset = page_alloc->offset;
65 page_alloc->page = page; 65 page_alloc->page = page;
66 page_alloc->offset = frag_info->frag_align; 66 page_alloc->offset = frag_info->frag_align;
67 } else { 67 } else {
@@ -69,11 +69,11 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
69 get_page(page); 69 get_page(page);
70 70
71 skb_frags[i].page = page; 71 skb_frags[i].page = page;
72 skb_frags[i].page_offset = page_alloc->offset; 72 skb_frags[i].offset = page_alloc->offset;
73 page_alloc->offset += frag_info->frag_stride; 73 page_alloc->offset += frag_info->frag_stride;
74 } 74 }
75 dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) + 75 dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
76 skb_frags[i].page_offset, frag_info->frag_size, 76 skb_frags[i].offset, frag_info->frag_size,
77 PCI_DMA_FROMDEVICE); 77 PCI_DMA_FROMDEVICE);
78 rx_desc->data[i].addr = cpu_to_be64(dma); 78 rx_desc->data[i].addr = cpu_to_be64(dma);
79 return 0; 79 return 0;
@@ -157,8 +157,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
157 struct mlx4_en_rx_ring *ring, int index) 157 struct mlx4_en_rx_ring *ring, int index)
158{ 158{
159 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); 159 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
160 struct skb_frag_struct *skb_frags = ring->rx_info + 160 struct page_frag *skb_frags = ring->rx_info +
161 (index << priv->log_rx_info); 161 (index << priv->log_rx_info);
162 int i; 162 int i;
163 163
164 for (i = 0; i < priv->num_frags; i++) 164 for (i = 0; i < priv->num_frags; i++)
@@ -183,7 +183,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
183 int index) 183 int index)
184{ 184{
185 struct mlx4_en_dev *mdev = priv->mdev; 185 struct mlx4_en_dev *mdev = priv->mdev;
186 struct skb_frag_struct *skb_frags; 186 struct page_frag *skb_frags;
187 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride); 187 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
188 dma_addr_t dma; 188 dma_addr_t dma;
189 int nr; 189 int nr;
@@ -194,7 +194,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
194 dma = be64_to_cpu(rx_desc->data[nr].addr); 194 dma = be64_to_cpu(rx_desc->data[nr].addr);
195 195
196 en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma); 196 en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
197 pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags[nr]), 197 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
198 PCI_DMA_FROMDEVICE); 198 PCI_DMA_FROMDEVICE);
199 put_page(skb_frags[nr].page); 199 put_page(skb_frags[nr].page);
200 } 200 }
@@ -403,7 +403,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
403/* Unmap a completed descriptor and free unused pages */ 403/* Unmap a completed descriptor and free unused pages */
404static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, 404static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
405 struct mlx4_en_rx_desc *rx_desc, 405 struct mlx4_en_rx_desc *rx_desc,
406 struct skb_frag_struct *skb_frags, 406 struct page_frag *skb_frags,
407 struct sk_buff *skb, 407 struct sk_buff *skb,
408 struct mlx4_en_rx_alloc *page_alloc, 408 struct mlx4_en_rx_alloc *page_alloc,
409 int length) 409 int length)
@@ -421,9 +421,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
421 break; 421 break;
422 422
423 /* Save page reference in skb */ 423 /* Save page reference in skb */
424 skb_frags_rx[nr].page = skb_frags[nr].page; 424 __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
425 skb_frag_size_set(&skb_frags_rx[nr], skb_frag_size(&skb_frags[nr])); 425 skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
426 skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset; 426 skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
427 skb->truesize += frag_info->frag_stride; 427 skb->truesize += frag_info->frag_stride;
428 dma = be64_to_cpu(rx_desc->data[nr].addr); 428 dma = be64_to_cpu(rx_desc->data[nr].addr);
429 429
@@ -446,7 +446,7 @@ fail:
446 * the descriptor) of this packet; remaining fragments are reused... */ 446 * the descriptor) of this packet; remaining fragments are reused... */
447 while (nr > 0) { 447 while (nr > 0) {
448 nr--; 448 nr--;
449 put_page(skb_frags_rx[nr].page); 449 __skb_frag_unref(&skb_frags_rx[nr]);
450 } 450 }
451 return 0; 451 return 0;
452} 452}
@@ -454,7 +454,7 @@ fail:
454 454
455static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, 455static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
456 struct mlx4_en_rx_desc *rx_desc, 456 struct mlx4_en_rx_desc *rx_desc,
457 struct skb_frag_struct *skb_frags, 457 struct page_frag *skb_frags,
458 struct mlx4_en_rx_alloc *page_alloc, 458 struct mlx4_en_rx_alloc *page_alloc,
459 unsigned int length) 459 unsigned int length)
460{ 460{
@@ -475,7 +475,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
475 475
476 /* Get pointer to first fragment so we could copy the headers into the 476 /* Get pointer to first fragment so we could copy the headers into the
477 * (linear part of the) skb */ 477 * (linear part of the) skb */
478 va = page_address(skb_frags[0].page) + skb_frags[0].page_offset; 478 va = page_address(skb_frags[0].page) + skb_frags[0].offset;
479 479
480 if (length <= SMALL_PACKET_SIZE) { 480 if (length <= SMALL_PACKET_SIZE) {
481 /* We are copying all relevant data to the skb - temporarily 481 /* We are copying all relevant data to the skb - temporarily
@@ -533,7 +533,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
533 struct mlx4_en_priv *priv = netdev_priv(dev); 533 struct mlx4_en_priv *priv = netdev_priv(dev);
534 struct mlx4_cqe *cqe; 534 struct mlx4_cqe *cqe;
535 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 535 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
536 struct skb_frag_struct *skb_frags; 536 struct page_frag *skb_frags;
537 struct mlx4_en_rx_desc *rx_desc; 537 struct mlx4_en_rx_desc *rx_desc;
538 struct sk_buff *skb; 538 struct sk_buff *skb;
539 int index; 539 int index;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 75dda26189f..75338eb88e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -460,26 +460,13 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
460 } 460 }
461} 461}
462 462
463static void *get_frag_ptr(struct sk_buff *skb)
464{
465 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
466 struct page *page = frag->page;
467 void *ptr;
468
469 ptr = page_address(page);
470 if (unlikely(!ptr))
471 return NULL;
472
473 return ptr + frag->page_offset;
474}
475
476static int is_inline(struct sk_buff *skb, void **pfrag) 463static int is_inline(struct sk_buff *skb, void **pfrag)
477{ 464{
478 void *ptr; 465 void *ptr;
479 466
480 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { 467 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
481 if (skb_shinfo(skb)->nr_frags == 1) { 468 if (skb_shinfo(skb)->nr_frags == 1) {
482 ptr = get_frag_ptr(skb); 469 ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
483 if (unlikely(!ptr)) 470 if (unlikely(!ptr))
484 return 0; 471 return 0;
485 472
@@ -756,8 +743,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
756 /* Map fragments */ 743 /* Map fragments */
757 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { 744 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
758 frag = &skb_shinfo(skb)->frags[i]; 745 frag = &skb_shinfo(skb)->frags[i];
759 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset, 746 dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag,
760 skb_frag_size(frag), PCI_DMA_TODEVICE); 747 0, skb_frag_size(frag),
748 DMA_TO_DEVICE);
761 data->addr = cpu_to_be64(dma); 749 data->addr = cpu_to_be64(dma);
762 data->lkey = cpu_to_be32(mdev->mr.key); 750 data->lkey = cpu_to_be32(mdev->mr.key);
763 wmb(); 751 wmb();