aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4/en_rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f627..066fc2747854 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
70 put_page(page); 70 put_page(page);
71 return -ENOMEM; 71 return -ENOMEM;
72 } 72 }
73 page_alloc->size = PAGE_SIZE << order; 73 page_alloc->page_size = PAGE_SIZE << order;
74 page_alloc->page = page; 74 page_alloc->page = page;
75 page_alloc->dma = dma; 75 page_alloc->dma = dma;
76 page_alloc->offset = frag_info->frag_align; 76 page_alloc->page_offset = frag_info->frag_align;
77 /* Not doing get_page() for each frag is a big win 77 /* Not doing get_page() for each frag is a big win
78 * on asymetric workloads. 78 * on asymetric workloads.
79 */ 79 */
80 atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); 80 atomic_set(&page->_count,
81 page_alloc->page_size / frag_info->frag_stride);
81 return 0; 82 return 0;
82} 83}
83 84
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
96 for (i = 0; i < priv->num_frags; i++) { 97 for (i = 0; i < priv->num_frags; i++) {
97 frag_info = &priv->frag_info[i]; 98 frag_info = &priv->frag_info[i];
98 page_alloc[i] = ring_alloc[i]; 99 page_alloc[i] = ring_alloc[i];
99 page_alloc[i].offset += frag_info->frag_stride; 100 page_alloc[i].page_offset += frag_info->frag_stride;
100 if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) 101
102 if (page_alloc[i].page_offset + frag_info->frag_stride <=
103 ring_alloc[i].page_size)
101 continue; 104 continue;
105
102 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) 106 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
103 goto out; 107 goto out;
104 } 108 }
105 109
106 for (i = 0; i < priv->num_frags; i++) { 110 for (i = 0; i < priv->num_frags; i++) {
107 frags[i] = ring_alloc[i]; 111 frags[i] = ring_alloc[i];
108 dma = ring_alloc[i].dma + ring_alloc[i].offset; 112 dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
109 ring_alloc[i] = page_alloc[i]; 113 ring_alloc[i] = page_alloc[i];
110 rx_desc->data[i].addr = cpu_to_be64(dma); 114 rx_desc->data[i].addr = cpu_to_be64(dma);
111 } 115 }
@@ -117,7 +121,7 @@ out:
117 frag_info = &priv->frag_info[i]; 121 frag_info = &priv->frag_info[i];
118 if (page_alloc[i].page != ring_alloc[i].page) { 122 if (page_alloc[i].page != ring_alloc[i].page) {
119 dma_unmap_page(priv->ddev, page_alloc[i].dma, 123 dma_unmap_page(priv->ddev, page_alloc[i].dma,
120 page_alloc[i].size, PCI_DMA_FROMDEVICE); 124 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
121 page = page_alloc[i].page; 125 page = page_alloc[i].page;
122 atomic_set(&page->_count, 1); 126 atomic_set(&page->_count, 1);
123 put_page(page); 127 put_page(page);
@@ -132,9 +136,10 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
132{ 136{
133 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 137 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
134 138
135 if (frags[i].offset + frag_info->frag_stride > frags[i].size) 139 if (frags[i].page_offset + frag_info->frag_stride >
136 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, 140 frags[i].page_size)
137 PCI_DMA_FROMDEVICE); 141 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
142 PCI_DMA_FROMDEVICE);
138 143
139 if (frags[i].page) 144 if (frags[i].page)
140 put_page(frags[i].page); 145 put_page(frags[i].page);
@@ -161,7 +166,7 @@ out:
161 166
162 page_alloc = &ring->page_alloc[i]; 167 page_alloc = &ring->page_alloc[i];
163 dma_unmap_page(priv->ddev, page_alloc->dma, 168 dma_unmap_page(priv->ddev, page_alloc->dma,
164 page_alloc->size, PCI_DMA_FROMDEVICE); 169 page_alloc->page_size, PCI_DMA_FROMDEVICE);
165 page = page_alloc->page; 170 page = page_alloc->page;
166 atomic_set(&page->_count, 1); 171 atomic_set(&page->_count, 1);
167 put_page(page); 172 put_page(page);
@@ -184,10 +189,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
184 i, page_count(page_alloc->page)); 189 i, page_count(page_alloc->page));
185 190
186 dma_unmap_page(priv->ddev, page_alloc->dma, 191 dma_unmap_page(priv->ddev, page_alloc->dma,
187 page_alloc->size, PCI_DMA_FROMDEVICE); 192 page_alloc->page_size, PCI_DMA_FROMDEVICE);
188 while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { 193 while (page_alloc->page_offset + frag_info->frag_stride <
194 page_alloc->page_size) {
189 put_page(page_alloc->page); 195 put_page(page_alloc->page);
190 page_alloc->offset += frag_info->frag_stride; 196 page_alloc->page_offset += frag_info->frag_stride;
191 } 197 }
192 page_alloc->page = NULL; 198 page_alloc->page = NULL;
193 } 199 }
@@ -478,7 +484,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
478 /* Save page reference in skb */ 484 /* Save page reference in skb */
479 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 485 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
480 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 486 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
481 skb_frags_rx[nr].page_offset = frags[nr].offset; 487 skb_frags_rx[nr].page_offset = frags[nr].page_offset;
482 skb->truesize += frag_info->frag_stride; 488 skb->truesize += frag_info->frag_stride;
483 frags[nr].page = NULL; 489 frags[nr].page = NULL;
484 } 490 }
@@ -517,7 +523,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
517 523
518 /* Get pointer to first fragment so we could copy the headers into the 524 /* Get pointer to first fragment so we could copy the headers into the
519 * (linear part of the) skb */ 525 * (linear part of the) skb */
520 va = page_address(frags[0].page) + frags[0].offset; 526 va = page_address(frags[0].page) + frags[0].page_offset;
521 527
522 if (length <= SMALL_PACKET_SIZE) { 528 if (length <= SMALL_PACKET_SIZE) {
523 /* We are copying all relevant data to the skb - temporarily 529 /* We are copying all relevant data to the skb - temporarily
@@ -645,7 +651,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
645 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 651 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
646 DMA_FROM_DEVICE); 652 DMA_FROM_DEVICE);
647 ethh = (struct ethhdr *)(page_address(frags[0].page) + 653 ethh = (struct ethhdr *)(page_address(frags[0].page) +
648 frags[0].offset); 654 frags[0].page_offset);
649 655
650 if (is_multicast_ether_addr(ethh->h_dest)) { 656 if (is_multicast_ether_addr(ethh->h_dest)) {
651 struct mlx4_mac_entry *entry; 657 struct mlx4_mac_entry *entry;