aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLendacky, Thomas <Thomas.Lendacky@amd.com>2014-11-04 17:06:44 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-05 21:50:12 -0500
commit08dcc47c06c79de31b9b2c0b4637f6119e5701fa (patch)
tree55a56cf66275b02c33afb6e4a2d2296b3d1450d8
parentaa96bd3c9fda52b9f57128798b49d662e5d4659c (diff)
amd-xgbe: Use page allocations for Rx buffers
Use page allocations for Rx buffers instead of pre-allocating skbs of a set size. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c143
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c60
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c95
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h25
4 files changed, 196 insertions, 127 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 6fc5da01437d..99911f45f334 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,7 @@
117#include "xgbe.h" 117#include "xgbe.h"
118#include "xgbe-common.h" 118#include "xgbe-common.h"
119 119
120static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); 120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
121 121
122static void xgbe_free_ring(struct xgbe_prv_data *pdata, 122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring) 123 struct xgbe_ring *ring)
@@ -131,13 +131,24 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
131 if (ring->rdata) { 131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) { 132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i); 133 rdata = XGBE_GET_DESC_DATA(ring, i);
134 xgbe_unmap_skb(pdata, rdata); 134 xgbe_unmap_rdata(pdata, rdata);
135 } 135 }
136 136
137 kfree(ring->rdata); 137 kfree(ring->rdata);
138 ring->rdata = NULL; 138 ring->rdata = NULL;
139 } 139 }
140 140
141 if (ring->rx_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_pa.pages_dma,
143 ring->rx_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_pa.pages);
145
146 ring->rx_pa.pages = NULL;
147 ring->rx_pa.pages_len = 0;
148 ring->rx_pa.pages_offset = 0;
149 ring->rx_pa.pages_dma = 0;
150 }
151
141 if (ring->rdesc) { 152 if (ring->rdesc) {
142 dma_free_coherent(pdata->dev, 153 dma_free_coherent(pdata->dev,
143 (sizeof(struct xgbe_ring_desc) * 154 (sizeof(struct xgbe_ring_desc) *
@@ -233,6 +244,65 @@ err_ring:
233 return ret; 244 return ret;
234} 245}
235 246
247static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
248 struct xgbe_ring *ring,
249 struct xgbe_ring_data *rdata)
250{
251 if (!ring->rx_pa.pages) {
252 struct page *pages = NULL;
253 dma_addr_t pages_dma;
254 gfp_t gfp;
255 int order, ret;
256
257 /* Try to obtain pages, decreasing order if necessary */
258 gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP;
259 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER, 1);
260 while (--order >= 0) {
261 pages = alloc_pages(gfp, order);
262 if (pages)
263 break;
264 }
265 if (!pages)
266 return -ENOMEM;
267
268 /* Map the pages */
269 pages_dma = dma_map_page(pdata->dev, pages, 0,
270 PAGE_SIZE << order, DMA_FROM_DEVICE);
271 ret = dma_mapping_error(pdata->dev, pages_dma);
272 if (ret) {
273 put_page(pages);
274 return ret;
275 }
276
277 /* Set the values for this ring */
278 ring->rx_pa.pages = pages;
279 ring->rx_pa.pages_len = PAGE_SIZE << order;
280 ring->rx_pa.pages_offset = 0;
281 ring->rx_pa.pages_dma = pages_dma;
282 }
283
284 get_page(ring->rx_pa.pages);
285 rdata->rx_pa = ring->rx_pa;
286
287 rdata->rx_dma = ring->rx_pa.pages_dma + ring->rx_pa.pages_offset;
288 rdata->rx_dma_len = pdata->rx_buf_size;
289
290 ring->rx_pa.pages_offset += pdata->rx_buf_size;
291 if ((ring->rx_pa.pages_offset + pdata->rx_buf_size) >
292 ring->rx_pa.pages_len) {
293 /* This data descriptor is responsible for unmapping page(s) */
294 rdata->rx_unmap = ring->rx_pa;
295
296 /* Get a new allocation next time */
297 ring->rx_pa.pages = NULL;
298 ring->rx_pa.pages_len = 0;
299 ring->rx_pa.pages_offset = 0;
300 ring->rx_pa.pages_dma = 0;
301 }
302
303 return 0;
304}
305
236static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) 306static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
237{ 307{
238 struct xgbe_hw_if *hw_if = &pdata->hw_if; 308 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -281,8 +351,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
281 struct xgbe_ring *ring; 351 struct xgbe_ring *ring;
282 struct xgbe_ring_desc *rdesc; 352 struct xgbe_ring_desc *rdesc;
283 struct xgbe_ring_data *rdata; 353 struct xgbe_ring_data *rdata;
284 dma_addr_t rdesc_dma, skb_dma; 354 dma_addr_t rdesc_dma;
285 struct sk_buff *skb = NULL;
286 unsigned int i, j; 355 unsigned int i, j;
287 356
288 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); 357 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -302,22 +371,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
302 rdata->rdesc = rdesc; 371 rdata->rdesc = rdesc;
303 rdata->rdesc_dma = rdesc_dma; 372 rdata->rdesc_dma = rdesc_dma;
304 373
305 /* Allocate skb & assign to each rdesc */ 374 if (xgbe_map_rx_buffer(pdata, ring, rdata))
306 skb = dev_alloc_skb(pdata->rx_buf_size);
307 if (skb == NULL)
308 break;
309 skb_dma = dma_map_single(pdata->dev, skb->data,
310 pdata->rx_buf_size,
311 DMA_FROM_DEVICE);
312 if (dma_mapping_error(pdata->dev, skb_dma)) {
313 netdev_alert(pdata->netdev,
314 "failed to do the dma map\n");
315 dev_kfree_skb_any(skb);
316 break; 375 break;
317 }
318 rdata->skb = skb;
319 rdata->skb_dma = skb_dma;
320 rdata->skb_dma_len = pdata->rx_buf_size;
321 376
322 rdesc++; 377 rdesc++;
323 rdesc_dma += sizeof(struct xgbe_ring_desc); 378 rdesc_dma += sizeof(struct xgbe_ring_desc);
@@ -334,8 +389,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
334 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); 389 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
335} 390}
336 391
337static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, 392static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
338 struct xgbe_ring_data *rdata) 393 struct xgbe_ring_data *rdata)
339{ 394{
340 if (rdata->skb_dma) { 395 if (rdata->skb_dma) {
341 if (rdata->mapped_as_page) { 396 if (rdata->mapped_as_page) {
@@ -354,6 +409,21 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
354 rdata->skb = NULL; 409 rdata->skb = NULL;
355 } 410 }
356 411
412 if (rdata->rx_pa.pages)
413 put_page(rdata->rx_pa.pages);
414
415 if (rdata->rx_unmap.pages) {
416 dma_unmap_page(pdata->dev, rdata->rx_unmap.pages_dma,
417 rdata->rx_unmap.pages_len, DMA_FROM_DEVICE);
418 put_page(rdata->rx_unmap.pages);
419 }
420
421 memset(&rdata->rx_pa, 0, sizeof(rdata->rx_pa));
422 memset(&rdata->rx_unmap, 0, sizeof(rdata->rx_unmap));
423
424 rdata->rx_dma = 0;
425 rdata->rx_dma_len = 0;
426
357 rdata->tso_header = 0; 427 rdata->tso_header = 0;
358 rdata->len = 0; 428 rdata->len = 0;
359 rdata->interrupt = 0; 429 rdata->interrupt = 0;
@@ -494,7 +564,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
494err_out: 564err_out:
495 while (start_index < cur_index) { 565 while (start_index < cur_index) {
496 rdata = XGBE_GET_DESC_DATA(ring, start_index++); 566 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
497 xgbe_unmap_skb(pdata, rdata); 567 xgbe_unmap_rdata(pdata, rdata);
498 } 568 }
499 569
500 DBGPR("<--xgbe_map_tx_skb: count=0\n"); 570 DBGPR("<--xgbe_map_tx_skb: count=0\n");
@@ -502,40 +572,25 @@ err_out:
502 return 0; 572 return 0;
503} 573}
504 574
505static void xgbe_realloc_skb(struct xgbe_channel *channel) 575static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
506{ 576{
507 struct xgbe_prv_data *pdata = channel->pdata; 577 struct xgbe_prv_data *pdata = channel->pdata;
508 struct xgbe_hw_if *hw_if = &pdata->hw_if; 578 struct xgbe_hw_if *hw_if = &pdata->hw_if;
509 struct xgbe_ring *ring = channel->rx_ring; 579 struct xgbe_ring *ring = channel->rx_ring;
510 struct xgbe_ring_data *rdata; 580 struct xgbe_ring_data *rdata;
511 struct sk_buff *skb = NULL;
512 dma_addr_t skb_dma;
513 int i; 581 int i;
514 582
515 DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n", 583 DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
516 ring->rx.realloc_index); 584 ring->rx.realloc_index);
517 585
518 for (i = 0; i < ring->dirty; i++) { 586 for (i = 0; i < ring->dirty; i++) {
519 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index); 587 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
520 588
521 /* Reset rdata values */ 589 /* Reset rdata values */
522 xgbe_unmap_skb(pdata, rdata); 590 xgbe_unmap_rdata(pdata, rdata);
523 591
524 /* Allocate skb & assign to each rdesc */ 592 if (xgbe_map_rx_buffer(pdata, ring, rdata))
525 skb = dev_alloc_skb(pdata->rx_buf_size);
526 if (skb == NULL)
527 break; 593 break;
528 skb_dma = dma_map_single(pdata->dev, skb->data,
529 pdata->rx_buf_size, DMA_FROM_DEVICE);
530 if (dma_mapping_error(pdata->dev, skb_dma)) {
531 netdev_alert(pdata->netdev,
532 "failed to do the dma map\n");
533 dev_kfree_skb_any(skb);
534 break;
535 }
536 rdata->skb = skb;
537 rdata->skb_dma = skb_dma;
538 rdata->skb_dma_len = pdata->rx_buf_size;
539 594
540 hw_if->rx_desc_reset(rdata); 595 hw_if->rx_desc_reset(rdata);
541 596
@@ -543,7 +598,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
543 } 598 }
544 ring->dirty = 0; 599 ring->dirty = 0;
545 600
546 DBGPR("<--xgbe_realloc_skb\n"); 601 DBGPR("<--xgbe_realloc_rx_buffer\n");
547} 602}
548 603
549void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) 604void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
@@ -553,8 +608,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
553 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; 608 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
554 desc_if->free_ring_resources = xgbe_free_ring_resources; 609 desc_if->free_ring_resources = xgbe_free_ring_resources;
555 desc_if->map_tx_skb = xgbe_map_tx_skb; 610 desc_if->map_tx_skb = xgbe_map_tx_skb;
556 desc_if->realloc_skb = xgbe_realloc_skb; 611 desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
557 desc_if->unmap_skb = xgbe_unmap_skb; 612 desc_if->unmap_rdata = xgbe_unmap_rdata;
558 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; 613 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
559 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; 614 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
560 615
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 7b97d3852091..7748b758baf6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -880,13 +880,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
880 rdesc->desc1 = 0; 880 rdesc->desc1 = 0;
881 rdesc->desc2 = 0; 881 rdesc->desc2 = 0;
882 rdesc->desc3 = 0; 882 rdesc->desc3 = 0;
883
884 /* Make sure ownership is written to the descriptor */
885 wmb();
883} 886}
884 887
885static void xgbe_tx_desc_init(struct xgbe_channel *channel) 888static void xgbe_tx_desc_init(struct xgbe_channel *channel)
886{ 889{
887 struct xgbe_ring *ring = channel->tx_ring; 890 struct xgbe_ring *ring = channel->tx_ring;
888 struct xgbe_ring_data *rdata; 891 struct xgbe_ring_data *rdata;
889 struct xgbe_ring_desc *rdesc;
890 int i; 892 int i;
891 int start_index = ring->cur; 893 int start_index = ring->cur;
892 894
@@ -895,26 +897,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
895 /* Initialze all descriptors */ 897 /* Initialze all descriptors */
896 for (i = 0; i < ring->rdesc_count; i++) { 898 for (i = 0; i < ring->rdesc_count; i++) {
897 rdata = XGBE_GET_DESC_DATA(ring, i); 899 rdata = XGBE_GET_DESC_DATA(ring, i);
898 rdesc = rdata->rdesc;
899 900
900 /* Initialize Tx descriptor 901 /* Initialize Tx descriptor */
901 * Set buffer 1 (lo) address to zero 902 xgbe_tx_desc_reset(rdata);
902 * Set buffer 1 (hi) address to zero
903 * Reset all other control bits (IC, TTSE, B2L & B1L)
904 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
905 * etc)
906 */
907 rdesc->desc0 = 0;
908 rdesc->desc1 = 0;
909 rdesc->desc2 = 0;
910 rdesc->desc3 = 0;
911 } 903 }
912 904
913 /* Make sure everything is written to the descriptor(s) before
914 * telling the device about them
915 */
916 wmb();
917
918 /* Update the total number of Tx descriptors */ 905 /* Update the total number of Tx descriptors */
919 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 906 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
920 907
@@ -939,8 +926,8 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
939 * Set buffer 2 (hi) address to zero and set control bits 926 * Set buffer 2 (hi) address to zero and set control bits
940 * OWN and INTE 927 * OWN and INTE
941 */ 928 */
942 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 929 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_dma));
943 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 930 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_dma));
944 rdesc->desc2 = 0; 931 rdesc->desc2 = 0;
945 932
946 rdesc->desc3 = 0; 933 rdesc->desc3 = 0;
@@ -964,7 +951,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
964 struct xgbe_prv_data *pdata = channel->pdata; 951 struct xgbe_prv_data *pdata = channel->pdata;
965 struct xgbe_ring *ring = channel->rx_ring; 952 struct xgbe_ring *ring = channel->rx_ring;
966 struct xgbe_ring_data *rdata; 953 struct xgbe_ring_data *rdata;
967 struct xgbe_ring_desc *rdesc;
968 unsigned int start_index = ring->cur; 954 unsigned int start_index = ring->cur;
969 unsigned int rx_coalesce, rx_frames; 955 unsigned int rx_coalesce, rx_frames;
970 unsigned int i; 956 unsigned int i;
@@ -977,34 +963,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
977 /* Initialize all descriptors */ 963 /* Initialize all descriptors */
978 for (i = 0; i < ring->rdesc_count; i++) { 964 for (i = 0; i < ring->rdesc_count; i++) {
979 rdata = XGBE_GET_DESC_DATA(ring, i); 965 rdata = XGBE_GET_DESC_DATA(ring, i);
980 rdesc = rdata->rdesc;
981 966
982 /* Initialize Rx descriptor 967 /* Set interrupt on completion bit as appropriate */
983 * Set buffer 1 (lo) address to dma address (lo) 968 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
984 * Set buffer 1 (hi) address to dma address (hi)
985 * Set buffer 2 (lo) address to zero
986 * Set buffer 2 (hi) address to zero and set control
987 * bits OWN and INTE appropriateley
988 */
989 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
990 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
991 rdesc->desc2 = 0;
992 rdesc->desc3 = 0;
993 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
994 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
995 rdata->interrupt = 1;
996 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
997 /* Clear interrupt on completion bit */
998 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
999 0);
1000 rdata->interrupt = 0; 969 rdata->interrupt = 0;
1001 } 970 else
1002 } 971 rdata->interrupt = 1;
1003 972
1004 /* Make sure everything is written to the descriptors before 973 /* Initialize Rx descriptor */
1005 * telling the device about them 974 xgbe_rx_desc_reset(rdata);
1006 */ 975 }
1007 wmb();
1008 976
1009 /* Update the total number of Rx descriptors */ 977 /* Update the total number of Rx descriptors */
1010 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 978 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 8cb2372f8fa9..d65f5aa8fdce 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -218,8 +218,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
218 } 218 }
219 219
220 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 220 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
221 if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE) 221 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
222 rx_buf_size = XGBE_RX_MIN_BUF_SIZE; 222
223 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & 223 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
224 ~(XGBE_RX_BUF_ALIGN - 1); 224 ~(XGBE_RX_BUF_ALIGN - 1);
225 225
@@ -546,7 +546,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
546 DBGPR("<--xgbe_init_rx_coalesce\n"); 546 DBGPR("<--xgbe_init_rx_coalesce\n");
547} 547}
548 548
549static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata) 549static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
550{ 550{
551 struct xgbe_desc_if *desc_if = &pdata->desc_if; 551 struct xgbe_desc_if *desc_if = &pdata->desc_if;
552 struct xgbe_channel *channel; 552 struct xgbe_channel *channel;
@@ -554,7 +554,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
554 struct xgbe_ring_data *rdata; 554 struct xgbe_ring_data *rdata;
555 unsigned int i, j; 555 unsigned int i, j;
556 556
557 DBGPR("-->xgbe_free_tx_skbuff\n"); 557 DBGPR("-->xgbe_free_tx_data\n");
558 558
559 channel = pdata->channel; 559 channel = pdata->channel;
560 for (i = 0; i < pdata->channel_count; i++, channel++) { 560 for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -564,14 +564,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
564 564
565 for (j = 0; j < ring->rdesc_count; j++) { 565 for (j = 0; j < ring->rdesc_count; j++) {
566 rdata = XGBE_GET_DESC_DATA(ring, j); 566 rdata = XGBE_GET_DESC_DATA(ring, j);
567 desc_if->unmap_skb(pdata, rdata); 567 desc_if->unmap_rdata(pdata, rdata);
568 } 568 }
569 } 569 }
570 570
571 DBGPR("<--xgbe_free_tx_skbuff\n"); 571 DBGPR("<--xgbe_free_tx_data\n");
572} 572}
573 573
574static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata) 574static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
575{ 575{
576 struct xgbe_desc_if *desc_if = &pdata->desc_if; 576 struct xgbe_desc_if *desc_if = &pdata->desc_if;
577 struct xgbe_channel *channel; 577 struct xgbe_channel *channel;
@@ -579,7 +579,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
579 struct xgbe_ring_data *rdata; 579 struct xgbe_ring_data *rdata;
580 unsigned int i, j; 580 unsigned int i, j;
581 581
582 DBGPR("-->xgbe_free_rx_skbuff\n"); 582 DBGPR("-->xgbe_free_rx_data\n");
583 583
584 channel = pdata->channel; 584 channel = pdata->channel;
585 for (i = 0; i < pdata->channel_count; i++, channel++) { 585 for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -589,11 +589,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
589 589
590 for (j = 0; j < ring->rdesc_count; j++) { 590 for (j = 0; j < ring->rdesc_count; j++) {
591 rdata = XGBE_GET_DESC_DATA(ring, j); 591 rdata = XGBE_GET_DESC_DATA(ring, j);
592 desc_if->unmap_skb(pdata, rdata); 592 desc_if->unmap_rdata(pdata, rdata);
593 } 593 }
594 } 594 }
595 595
596 DBGPR("<--xgbe_free_rx_skbuff\n"); 596 DBGPR("<--xgbe_free_rx_data\n");
597} 597}
598 598
599static void xgbe_adjust_link(struct net_device *netdev) 599static void xgbe_adjust_link(struct net_device *netdev)
@@ -839,8 +839,8 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
839 xgbe_stop(pdata); 839 xgbe_stop(pdata);
840 synchronize_irq(pdata->irq_number); 840 synchronize_irq(pdata->irq_number);
841 841
842 xgbe_free_tx_skbuff(pdata); 842 xgbe_free_tx_data(pdata);
843 xgbe_free_rx_skbuff(pdata); 843 xgbe_free_rx_data(pdata);
844 844
845 /* Issue software reset to device if requested */ 845 /* Issue software reset to device if requested */
846 if (reset) 846 if (reset)
@@ -1609,7 +1609,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
1609 struct xgbe_ring *ring = channel->rx_ring; 1609 struct xgbe_ring *ring = channel->rx_ring;
1610 struct xgbe_ring_data *rdata; 1610 struct xgbe_ring_data *rdata;
1611 1611
1612 desc_if->realloc_skb(channel); 1612 desc_if->realloc_rx_buffer(channel);
1613 1613
1614 /* Update the Rx Tail Pointer Register with address of 1614 /* Update the Rx Tail Pointer Register with address of
1615 * the last cleaned entry */ 1615 * the last cleaned entry */
@@ -1618,6 +1618,37 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
1618 lower_32_bits(rdata->rdesc_dma)); 1618 lower_32_bits(rdata->rdesc_dma));
1619} 1619}
1620 1620
1621static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1622 struct xgbe_ring_data *rdata,
1623 unsigned int len)
1624{
1625 struct net_device *netdev = pdata->netdev;
1626 struct sk_buff *skb;
1627 u8 *packet;
1628 unsigned int copy_len;
1629
1630 skb = netdev_alloc_skb_ip_align(netdev, XGBE_SKB_ALLOC_SIZE);
1631 if (!skb)
1632 return NULL;
1633
1634 packet = page_address(rdata->rx_pa.pages) + rdata->rx_pa.pages_offset;
1635 copy_len = min_t(unsigned int, XGBE_SKB_ALLOC_SIZE, len);
1636 skb_copy_to_linear_data(skb, packet, copy_len);
1637 skb_put(skb, copy_len);
1638
1639 rdata->rx_pa.pages_offset += copy_len;
1640 len -= copy_len;
1641 if (len)
1642 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1643 rdata->rx_pa.pages,
1644 rdata->rx_pa.pages_offset,
1645 len, rdata->rx_dma_len);
1646 else
1647 put_page(rdata->rx_pa.pages);
1648
1649 return skb;
1650}
1651
1621static int xgbe_tx_poll(struct xgbe_channel *channel) 1652static int xgbe_tx_poll(struct xgbe_channel *channel)
1622{ 1653{
1623 struct xgbe_prv_data *pdata = channel->pdata; 1654 struct xgbe_prv_data *pdata = channel->pdata;
@@ -1651,7 +1682,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1651#endif 1682#endif
1652 1683
1653 /* Free the SKB and reset the descriptor for re-use */ 1684 /* Free the SKB and reset the descriptor for re-use */
1654 desc_if->unmap_skb(pdata, rdata); 1685 desc_if->unmap_rdata(pdata, rdata);
1655 hw_if->tx_desc_reset(rdata); 1686 hw_if->tx_desc_reset(rdata);
1656 1687
1657 processed++; 1688 processed++;
@@ -1726,9 +1757,9 @@ read_again:
1726 ring->cur++; 1757 ring->cur++;
1727 ring->dirty++; 1758 ring->dirty++;
1728 1759
1729 dma_unmap_single(pdata->dev, rdata->skb_dma, 1760 dma_sync_single_for_cpu(pdata->dev, rdata->rx_dma,
1730 rdata->skb_dma_len, DMA_FROM_DEVICE); 1761 rdata->rx_dma_len,
1731 rdata->skb_dma = 0; 1762 DMA_FROM_DEVICE);
1732 1763
1733 incomplete = XGMAC_GET_BITS(packet->attributes, 1764 incomplete = XGMAC_GET_BITS(packet->attributes,
1734 RX_PACKET_ATTRIBUTES, 1765 RX_PACKET_ATTRIBUTES,
@@ -1753,26 +1784,22 @@ read_again:
1753 1784
1754 if (!context) { 1785 if (!context) {
1755 put_len = rdata->len - len; 1786 put_len = rdata->len - len;
1756 if (skb) { 1787 len += put_len;
1757 if (pskb_expand_head(skb, 0, put_len, 1788
1758 GFP_ATOMIC)) { 1789 if (!skb) {
1759 DBGPR("pskb_expand_head error\n"); 1790 skb = xgbe_create_skb(pdata, rdata, put_len);
1760 if (incomplete) { 1791 if (!skb) {
1761 error = 1; 1792 error = 1;
1762 goto read_again; 1793 goto read_again;
1763 }
1764
1765 dev_kfree_skb(skb);
1766 goto next_packet;
1767 } 1794 }
1768 memcpy(skb_tail_pointer(skb), rdata->skb->data,
1769 put_len);
1770 } else { 1795 } else {
1771 skb = rdata->skb; 1796 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1772 rdata->skb = NULL; 1797 rdata->rx_pa.pages,
1798 rdata->rx_pa.pages_offset,
1799 put_len, rdata->rx_dma_len);
1773 } 1800 }
1774 skb_put(skb, put_len); 1801
1775 len += put_len; 1802 rdata->rx_pa.pages = NULL;
1776 } 1803 }
1777 1804
1778 if (incomplete || context_next) 1805 if (incomplete || context_next)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 19f1d9007a6d..d3aa05501ee8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -142,6 +142,7 @@
142 142
143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
144#define XGBE_RX_BUF_ALIGN 64 144#define XGBE_RX_BUF_ALIGN 64
145#define XGBE_SKB_ALLOC_SIZE 256
145 146
146#define XGBE_MAX_DMA_CHANNELS 16 147#define XGBE_MAX_DMA_CHANNELS 16
147#define XGBE_MAX_QUEUES 16 148#define XGBE_MAX_QUEUES 16
@@ -240,6 +241,15 @@ struct xgbe_ring_desc {
240 u32 desc3; 241 u32 desc3;
241}; 242};
242 243
244/* Page allocation related values */
245struct xgbe_page_alloc {
246 struct page *pages;
247 unsigned int pages_len;
248 unsigned int pages_offset;
249
250 dma_addr_t pages_dma;
251};
252
243/* Structure used to hold information related to the descriptor 253/* Structure used to hold information related to the descriptor
244 * and the packet associated with the descriptor (always use 254 * and the packet associated with the descriptor (always use
245 * use the XGBE_GET_DESC_DATA macro to access this data from the ring) 255 * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
@@ -253,6 +263,12 @@ struct xgbe_ring_data {
253 unsigned int skb_dma_len; /* Length of SKB DMA area */ 263 unsigned int skb_dma_len; /* Length of SKB DMA area */
254 unsigned int tso_header; /* TSO header indicator */ 264 unsigned int tso_header; /* TSO header indicator */
255 265
266 struct xgbe_page_alloc rx_pa; /* Rx buffer page allocation */
267 struct xgbe_page_alloc rx_unmap;
268
269 dma_addr_t rx_dma; /* DMA address of Rx buffer */
270 unsigned int rx_dma_len; /* Length of the Rx DMA buffer */
271
256 unsigned short len; /* Length of received Rx packet */ 272 unsigned short len; /* Length of received Rx packet */
257 273
258 unsigned int interrupt; /* Interrupt indicator */ 274 unsigned int interrupt; /* Interrupt indicator */
@@ -291,6 +307,9 @@ struct xgbe_ring {
291 */ 307 */
292 struct xgbe_ring_data *rdata; 308 struct xgbe_ring_data *rdata;
293 309
310 /* Page allocation for RX buffers */
311 struct xgbe_page_alloc rx_pa;
312
294 /* Ring index values 313 /* Ring index values
295 * cur - Tx: index of descriptor to be used for current transfer 314 * cur - Tx: index of descriptor to be used for current transfer
296 * Rx: index of descriptor to check for packet availability 315 * Rx: index of descriptor to check for packet availability
@@ -515,8 +534,8 @@ struct xgbe_desc_if {
515 int (*alloc_ring_resources)(struct xgbe_prv_data *); 534 int (*alloc_ring_resources)(struct xgbe_prv_data *);
516 void (*free_ring_resources)(struct xgbe_prv_data *); 535 void (*free_ring_resources)(struct xgbe_prv_data *);
517 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); 536 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
518 void (*realloc_skb)(struct xgbe_channel *); 537 void (*realloc_rx_buffer)(struct xgbe_channel *);
519 void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *); 538 void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
520 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); 539 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
521 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); 540 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
522}; 541};
@@ -624,7 +643,7 @@ struct xgbe_prv_data {
624 unsigned int rx_riwt; 643 unsigned int rx_riwt;
625 unsigned int rx_frames; 644 unsigned int rx_frames;
626 645
627 /* Current MTU */ 646 /* Current Rx buffer size */
628 unsigned int rx_buf_size; 647 unsigned int rx_buf_size;
629 648
630 /* Flow control settings */ 649 /* Flow control settings */