aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLendacky, Thomas <Thomas.Lendacky@amd.com>2014-11-04 17:06:50 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-05 21:50:12 -0500
commit174fd2597b0bd8c19fce6a97e8b0f753ef4ce7cb (patch)
tree0770a468c71da5dcaa7353537f822584df34907e
parent08dcc47c06c79de31b9b2c0b4637f6119e5701fa (diff)
amd-xgbe: Implement split header receive support
Provide support for splitting IP packets so that the header and payload can be sent to different DMA addresses. This will allow the IP header to be put into the linear part of the skb while the payload can be added as frags. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c176
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c44
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c63
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h21
5 files changed, 201 insertions, 111 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index caade30820d5..39bcb1140198 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -207,6 +207,8 @@
207/* DMA channel register entry bit positions and sizes */ 207/* DMA channel register entry bit positions and sizes */
208#define DMA_CH_CR_PBLX8_INDEX 16 208#define DMA_CH_CR_PBLX8_INDEX 16
209#define DMA_CH_CR_PBLX8_WIDTH 1 209#define DMA_CH_CR_PBLX8_WIDTH 1
210#define DMA_CH_CR_SPH_INDEX 24
211#define DMA_CH_CR_SPH_WIDTH 1
210#define DMA_CH_IER_AIE_INDEX 15 212#define DMA_CH_IER_AIE_INDEX 15
211#define DMA_CH_IER_AIE_WIDTH 1 213#define DMA_CH_IER_AIE_WIDTH 1
212#define DMA_CH_IER_FBEE_INDEX 12 214#define DMA_CH_IER_FBEE_INDEX 12
@@ -429,6 +431,8 @@
429#define MAC_RCR_CST_WIDTH 1 431#define MAC_RCR_CST_WIDTH 1
430#define MAC_RCR_DCRCC_INDEX 3 432#define MAC_RCR_DCRCC_INDEX 3
431#define MAC_RCR_DCRCC_WIDTH 1 433#define MAC_RCR_DCRCC_WIDTH 1
434#define MAC_RCR_HDSMS_INDEX 12
435#define MAC_RCR_HDSMS_WIDTH 3
432#define MAC_RCR_IPC_INDEX 9 436#define MAC_RCR_IPC_INDEX 9
433#define MAC_RCR_IPC_WIDTH 1 437#define MAC_RCR_IPC_WIDTH 1
434#define MAC_RCR_JE_INDEX 8 438#define MAC_RCR_JE_INDEX 8
@@ -847,6 +851,8 @@
847 851
848#define RX_NORMAL_DESC0_OVT_INDEX 0 852#define RX_NORMAL_DESC0_OVT_INDEX 0
849#define RX_NORMAL_DESC0_OVT_WIDTH 16 853#define RX_NORMAL_DESC0_OVT_WIDTH 16
854#define RX_NORMAL_DESC2_HL_INDEX 0
855#define RX_NORMAL_DESC2_HL_WIDTH 10
850#define RX_NORMAL_DESC3_CDA_INDEX 27 856#define RX_NORMAL_DESC3_CDA_INDEX 27
851#define RX_NORMAL_DESC3_CDA_WIDTH 1 857#define RX_NORMAL_DESC3_CDA_WIDTH 1
852#define RX_NORMAL_DESC3_CTXT_INDEX 30 858#define RX_NORMAL_DESC3_CTXT_INDEX 30
@@ -855,6 +861,8 @@
855#define RX_NORMAL_DESC3_ES_WIDTH 1 861#define RX_NORMAL_DESC3_ES_WIDTH 1
856#define RX_NORMAL_DESC3_ETLT_INDEX 16 862#define RX_NORMAL_DESC3_ETLT_INDEX 16
857#define RX_NORMAL_DESC3_ETLT_WIDTH 4 863#define RX_NORMAL_DESC3_ETLT_WIDTH 4
864#define RX_NORMAL_DESC3_FD_INDEX 29
865#define RX_NORMAL_DESC3_FD_WIDTH 1
858#define RX_NORMAL_DESC3_INTE_INDEX 30 866#define RX_NORMAL_DESC3_INTE_INDEX 30
859#define RX_NORMAL_DESC3_INTE_WIDTH 1 867#define RX_NORMAL_DESC3_INTE_WIDTH 1
860#define RX_NORMAL_DESC3_LD_INDEX 28 868#define RX_NORMAL_DESC3_LD_INDEX 28
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 99911f45f334..e6b9f54b9697 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -138,15 +138,26 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
138 ring->rdata = NULL; 138 ring->rdata = NULL;
139 } 139 }
140 140
141 if (ring->rx_pa.pages) { 141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_pa.pages_dma, 142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring->rx_pa.pages_len, DMA_FROM_DEVICE); 143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_pa.pages); 144 put_page(ring->rx_hdr_pa.pages);
145 145
146 ring->rx_pa.pages = NULL; 146 ring->rx_hdr_pa.pages = NULL;
147 ring->rx_pa.pages_len = 0; 147 ring->rx_hdr_pa.pages_len = 0;
148 ring->rx_pa.pages_offset = 0; 148 ring->rx_hdr_pa.pages_offset = 0;
149 ring->rx_pa.pages_dma = 0; 149 ring->rx_hdr_pa.pages_dma = 0;
150 }
151
152 if (ring->rx_buf_pa.pages) {
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155 put_page(ring->rx_buf_pa.pages);
156
157 ring->rx_buf_pa.pages = NULL;
158 ring->rx_buf_pa.pages_len = 0;
159 ring->rx_buf_pa.pages_offset = 0;
160 ring->rx_buf_pa.pages_dma = 0;
150 } 161 }
151 162
152 if (ring->rdesc) { 163 if (ring->rdesc) {
@@ -244,62 +255,93 @@ err_ring:
244 return ret; 255 return ret;
245} 256}
246 257
247static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, 258static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
248 struct xgbe_ring *ring, 259 struct xgbe_page_alloc *pa, gfp_t gfp, int order)
249 struct xgbe_ring_data *rdata)
250{ 260{
251 if (!ring->rx_pa.pages) { 261 struct page *pages = NULL;
252 struct page *pages = NULL; 262 dma_addr_t pages_dma;
253 dma_addr_t pages_dma; 263 int ret;
254 gfp_t gfp;
255 int order, ret;
256
257 /* Try to obtain pages, decreasing order if necessary */
258 gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP;
259 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER, 1);
260 while (--order >= 0) {
261 pages = alloc_pages(gfp, order);
262 if (pages)
263 break;
264 }
265 if (!pages)
266 return -ENOMEM;
267 264
268 /* Map the pages */ 265 /* Try to obtain pages, decreasing order if necessary */
269 pages_dma = dma_map_page(pdata->dev, pages, 0, 266 gfp |= __GFP_COLD | __GFP_COMP;
270 PAGE_SIZE << order, DMA_FROM_DEVICE); 267 while (order >= 0) {
271 ret = dma_mapping_error(pdata->dev, pages_dma); 268 pages = alloc_pages(gfp, order);
272 if (ret) { 269 if (pages)
273 put_page(pages); 270 break;
274 return ret;
275 }
276 271
277 /* Set the values for this ring */ 272 order--;
278 ring->rx_pa.pages = pages;
279 ring->rx_pa.pages_len = PAGE_SIZE << order;
280 ring->rx_pa.pages_offset = 0;
281 ring->rx_pa.pages_dma = pages_dma;
282 } 273 }
274 if (!pages)
275 return -ENOMEM;
283 276
284 get_page(ring->rx_pa.pages); 277 /* Map the pages */
285 rdata->rx_pa = ring->rx_pa; 278 pages_dma = dma_map_page(pdata->dev, pages, 0,
279 PAGE_SIZE << order, DMA_FROM_DEVICE);
280 ret = dma_mapping_error(pdata->dev, pages_dma);
281 if (ret) {
282 put_page(pages);
283 return ret;
284 }
286 285
287 rdata->rx_dma = ring->rx_pa.pages_dma + ring->rx_pa.pages_offset; 286 pa->pages = pages;
288 rdata->rx_dma_len = pdata->rx_buf_size; 287 pa->pages_len = PAGE_SIZE << order;
288 pa->pages_offset = 0;
289 pa->pages_dma = pages_dma;
289 290
290 ring->rx_pa.pages_offset += pdata->rx_buf_size; 291 return 0;
291 if ((ring->rx_pa.pages_offset + pdata->rx_buf_size) > 292}
292 ring->rx_pa.pages_len) { 293
294static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
295 struct xgbe_page_alloc *pa,
296 unsigned int len)
297{
298 get_page(pa->pages);
299 bd->pa = *pa;
300
301 bd->dma = pa->pages_dma + pa->pages_offset;
302 bd->dma_len = len;
303
304 pa->pages_offset += len;
305 if ((pa->pages_offset + len) > pa->pages_len) {
293 /* This data descriptor is responsible for unmapping page(s) */ 306 /* This data descriptor is responsible for unmapping page(s) */
294 rdata->rx_unmap = ring->rx_pa; 307 bd->pa_unmap = *pa;
295 308
296 /* Get a new allocation next time */ 309 /* Get a new allocation next time */
297 ring->rx_pa.pages = NULL; 310 pa->pages = NULL;
298 ring->rx_pa.pages_len = 0; 311 pa->pages_len = 0;
299 ring->rx_pa.pages_offset = 0; 312 pa->pages_offset = 0;
300 ring->rx_pa.pages_dma = 0; 313 pa->pages_dma = 0;
314 }
315}
316
317static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
318 struct xgbe_ring *ring,
319 struct xgbe_ring_data *rdata)
320{
321 int order, ret;
322
323 if (!ring->rx_hdr_pa.pages) {
324 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
325 if (ret)
326 return ret;
327 }
328
329 if (!ring->rx_buf_pa.pages) {
330 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
331 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
332 order);
333 if (ret)
334 return ret;
301 } 335 }
302 336
337 /* Set up the header page info */
338 xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
339 XGBE_SKB_ALLOC_SIZE);
340
341 /* Set up the buffer page info */
342 xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
343 pdata->rx_buf_size);
344
303 return 0; 345 return 0;
304} 346}
305 347
@@ -409,20 +451,28 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
409 rdata->skb = NULL; 451 rdata->skb = NULL;
410 } 452 }
411 453
412 if (rdata->rx_pa.pages) 454 if (rdata->rx_hdr.pa.pages)
413 put_page(rdata->rx_pa.pages); 455 put_page(rdata->rx_hdr.pa.pages);
414 456
415 if (rdata->rx_unmap.pages) { 457 if (rdata->rx_hdr.pa_unmap.pages) {
416 dma_unmap_page(pdata->dev, rdata->rx_unmap.pages_dma, 458 dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
417 rdata->rx_unmap.pages_len, DMA_FROM_DEVICE); 459 rdata->rx_hdr.pa_unmap.pages_len,
418 put_page(rdata->rx_unmap.pages); 460 DMA_FROM_DEVICE);
461 put_page(rdata->rx_hdr.pa_unmap.pages);
419 } 462 }
420 463
421 memset(&rdata->rx_pa, 0, sizeof(rdata->rx_pa)); 464 if (rdata->rx_buf.pa.pages)
422 memset(&rdata->rx_unmap, 0, sizeof(rdata->rx_unmap)); 465 put_page(rdata->rx_buf.pa.pages);
466
467 if (rdata->rx_buf.pa_unmap.pages) {
468 dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
469 rdata->rx_buf.pa_unmap.pages_len,
470 DMA_FROM_DEVICE);
471 put_page(rdata->rx_buf.pa_unmap.pages);
472 }
423 473
424 rdata->rx_dma = 0; 474 memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
425 rdata->rx_dma_len = 0; 475 memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
426 476
427 rdata->tso_header = 0; 477 rdata->tso_header = 0;
428 rdata->len = 0; 478 rdata->len = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 7748b758baf6..b3719f154637 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -335,6 +335,22 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
335 } 335 }
336} 336}
337 337
338static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
339{
340 struct xgbe_channel *channel;
341 unsigned int i;
342
343 channel = pdata->channel;
344 for (i = 0; i < pdata->channel_count; i++, channel++) {
345 if (!channel->rx_ring)
346 break;
347
348 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
349 }
350
351 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
352}
353
338static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 354static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
339{ 355{
340 unsigned int max_q_count, q_count; 356 unsigned int max_q_count, q_count;
@@ -920,19 +936,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
920 struct xgbe_ring_desc *rdesc = rdata->rdesc; 936 struct xgbe_ring_desc *rdesc = rdata->rdesc;
921 937
922 /* Reset the Rx descriptor 938 /* Reset the Rx descriptor
923 * Set buffer 1 (lo) address to dma address (lo) 939 * Set buffer 1 (lo) address to header dma address (lo)
924 * Set buffer 1 (hi) address to dma address (hi) 940 * Set buffer 1 (hi) address to header dma address (hi)
925 * Set buffer 2 (lo) address to zero 941 * Set buffer 2 (lo) address to buffer dma address (lo)
926 * Set buffer 2 (hi) address to zero and set control bits 942 * Set buffer 2 (hi) address to buffer dma address (hi) and
927 * OWN and INTE 943 * set control bits OWN and INTE
928 */ 944 */
929 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_dma)); 945 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_hdr.dma));
930 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_dma)); 946 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_hdr.dma));
931 rdesc->desc2 = 0; 947 rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx_buf.dma));
948 rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx_buf.dma));
932 949
933 rdesc->desc3 = 0; 950 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
934 if (rdata->interrupt) 951 rdata->interrupt ? 1 : 0);
935 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
936 952
937 /* Since the Rx DMA engine is likely running, make sure everything 953 /* Since the Rx DMA engine is likely running, make sure everything
938 * is written to the descriptor(s) before setting the OWN bit 954 * is written to the descriptor(s) before setting the OWN bit
@@ -1422,6 +1438,11 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1422 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1438 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1423 CONTEXT_NEXT, 1); 1439 CONTEXT_NEXT, 1);
1424 1440
1441 /* Get the header length */
1442 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
1443 rdata->hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1444 RX_NORMAL_DESC2, HL);
1445
1425 /* Get the packet length */ 1446 /* Get the packet length */
1426 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1447 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1427 1448
@@ -2453,6 +2474,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
2453 xgbe_config_tx_coalesce(pdata); 2474 xgbe_config_tx_coalesce(pdata);
2454 xgbe_config_rx_buffer_size(pdata); 2475 xgbe_config_rx_buffer_size(pdata);
2455 xgbe_config_tso_mode(pdata); 2476 xgbe_config_tso_mode(pdata);
2477 xgbe_config_sph_mode(pdata);
2456 desc_if->wrapper_tx_desc_init(pdata); 2478 desc_if->wrapper_tx_desc_init(pdata);
2457 desc_if->wrapper_rx_desc_init(pdata); 2479 desc_if->wrapper_rx_desc_init(pdata);
2458 xgbe_enable_dma_interrupts(pdata); 2480 xgbe_enable_dma_interrupts(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index d65f5aa8fdce..07e2d216323a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1620,31 +1620,25 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
1620 1620
1621static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, 1621static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1622 struct xgbe_ring_data *rdata, 1622 struct xgbe_ring_data *rdata,
1623 unsigned int len) 1623 unsigned int *len)
1624{ 1624{
1625 struct net_device *netdev = pdata->netdev; 1625 struct net_device *netdev = pdata->netdev;
1626 struct sk_buff *skb; 1626 struct sk_buff *skb;
1627 u8 *packet; 1627 u8 *packet;
1628 unsigned int copy_len; 1628 unsigned int copy_len;
1629 1629
1630 skb = netdev_alloc_skb_ip_align(netdev, XGBE_SKB_ALLOC_SIZE); 1630 skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
1631 if (!skb) 1631 if (!skb)
1632 return NULL; 1632 return NULL;
1633 1633
1634 packet = page_address(rdata->rx_pa.pages) + rdata->rx_pa.pages_offset; 1634 packet = page_address(rdata->rx_hdr.pa.pages) +
1635 copy_len = min_t(unsigned int, XGBE_SKB_ALLOC_SIZE, len); 1635 rdata->rx_hdr.pa.pages_offset;
1636 copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
1637 copy_len = min(rdata->rx_hdr.dma_len, copy_len);
1636 skb_copy_to_linear_data(skb, packet, copy_len); 1638 skb_copy_to_linear_data(skb, packet, copy_len);
1637 skb_put(skb, copy_len); 1639 skb_put(skb, copy_len);
1638 1640
1639 rdata->rx_pa.pages_offset += copy_len; 1641 *len -= copy_len;
1640 len -= copy_len;
1641 if (len)
1642 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1643 rdata->rx_pa.pages,
1644 rdata->rx_pa.pages_offset,
1645 len, rdata->rx_dma_len);
1646 else
1647 put_page(rdata->rx_pa.pages);
1648 1642
1649 return skb; 1643 return skb;
1650} 1644}
@@ -1757,10 +1751,6 @@ read_again:
1757 ring->cur++; 1751 ring->cur++;
1758 ring->dirty++; 1752 ring->dirty++;
1759 1753
1760 dma_sync_single_for_cpu(pdata->dev, rdata->rx_dma,
1761 rdata->rx_dma_len,
1762 DMA_FROM_DEVICE);
1763
1764 incomplete = XGMAC_GET_BITS(packet->attributes, 1754 incomplete = XGMAC_GET_BITS(packet->attributes,
1765 RX_PACKET_ATTRIBUTES, 1755 RX_PACKET_ATTRIBUTES,
1766 INCOMPLETE); 1756 INCOMPLETE);
@@ -1787,19 +1777,30 @@ read_again:
1787 len += put_len; 1777 len += put_len;
1788 1778
1789 if (!skb) { 1779 if (!skb) {
1790 skb = xgbe_create_skb(pdata, rdata, put_len); 1780 dma_sync_single_for_cpu(pdata->dev,
1781 rdata->rx_hdr.dma,
1782 rdata->rx_hdr.dma_len,
1783 DMA_FROM_DEVICE);
1784
1785 skb = xgbe_create_skb(pdata, rdata, &put_len);
1791 if (!skb) { 1786 if (!skb) {
1792 error = 1; 1787 error = 1;
1793 goto read_again; 1788 goto read_again;
1794 } 1789 }
1795 } else {
1796 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1797 rdata->rx_pa.pages,
1798 rdata->rx_pa.pages_offset,
1799 put_len, rdata->rx_dma_len);
1800 } 1790 }
1801 1791
1802 rdata->rx_pa.pages = NULL; 1792 if (put_len) {
1793 dma_sync_single_for_cpu(pdata->dev,
1794 rdata->rx_buf.dma,
1795 rdata->rx_buf.dma_len,
1796 DMA_FROM_DEVICE);
1797
1798 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1799 rdata->rx_buf.pa.pages,
1800 rdata->rx_buf.pa.pages_offset,
1801 put_len, rdata->rx_buf.dma_len);
1802 rdata->rx_buf.pa.pages = NULL;
1803 }
1803 } 1804 }
1804 1805
1805 if (incomplete || context_next) 1806 if (incomplete || context_next)
@@ -1924,10 +1925,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1924 while (count--) { 1925 while (count--) {
1925 rdata = XGBE_GET_DESC_DATA(ring, idx); 1926 rdata = XGBE_GET_DESC_DATA(ring, idx);
1926 rdesc = rdata->rdesc; 1927 rdesc = rdata->rdesc;
1927 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, 1928 pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1928 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", 1929 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1929 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), 1930 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1930 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); 1931 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1931 idx++; 1932 idx++;
1932 } 1933 }
1933} 1934}
@@ -1935,9 +1936,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1935void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, 1936void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1936 unsigned int idx) 1937 unsigned int idx)
1937{ 1938{
1938 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, 1939 pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1939 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), 1940 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1940 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); 1941 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1941} 1942}
1942 1943
1943void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) 1944void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d3aa05501ee8..1480c9d41821 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -143,6 +143,7 @@
143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
144#define XGBE_RX_BUF_ALIGN 64 144#define XGBE_RX_BUF_ALIGN 64
145#define XGBE_SKB_ALLOC_SIZE 256 145#define XGBE_SKB_ALLOC_SIZE 256
146#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
146 147
147#define XGBE_MAX_DMA_CHANNELS 16 148#define XGBE_MAX_DMA_CHANNELS 16
148#define XGBE_MAX_QUEUES 16 149#define XGBE_MAX_QUEUES 16
@@ -250,6 +251,15 @@ struct xgbe_page_alloc {
250 dma_addr_t pages_dma; 251 dma_addr_t pages_dma;
251}; 252};
252 253
254/* Ring entry buffer data */
255struct xgbe_buffer_data {
256 struct xgbe_page_alloc pa;
257 struct xgbe_page_alloc pa_unmap;
258
259 dma_addr_t dma;
260 unsigned int dma_len;
261};
262
253/* Structure used to hold information related to the descriptor 263/* Structure used to hold information related to the descriptor
254 * and the packet associated with the descriptor (always use 264 * and the packet associated with the descriptor (always use
255 * use the XGBE_GET_DESC_DATA macro to access this data from the ring) 265 * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
@@ -263,12 +273,10 @@ struct xgbe_ring_data {
263 unsigned int skb_dma_len; /* Length of SKB DMA area */ 273 unsigned int skb_dma_len; /* Length of SKB DMA area */
264 unsigned int tso_header; /* TSO header indicator */ 274 unsigned int tso_header; /* TSO header indicator */
265 275
266 struct xgbe_page_alloc rx_pa; /* Rx buffer page allocation */ 276 struct xgbe_buffer_data rx_hdr; /* Header locations */
267 struct xgbe_page_alloc rx_unmap; 277 struct xgbe_buffer_data rx_buf; /* Payload locations */
268
269 dma_addr_t rx_dma; /* DMA address of Rx buffer */
270 unsigned int rx_dma_len; /* Length of the Rx DMA buffer */
271 278
279 unsigned short hdr_len; /* Length of received header */
272 unsigned short len; /* Length of received Rx packet */ 280 unsigned short len; /* Length of received Rx packet */
273 281
274 unsigned int interrupt; /* Interrupt indicator */ 282 unsigned int interrupt; /* Interrupt indicator */
@@ -308,7 +316,8 @@ struct xgbe_ring {
308 struct xgbe_ring_data *rdata; 316 struct xgbe_ring_data *rdata;
309 317
310 /* Page allocation for RX buffers */ 318 /* Page allocation for RX buffers */
311 struct xgbe_page_alloc rx_pa; 319 struct xgbe_page_alloc rx_hdr_pa;
320 struct xgbe_page_alloc rx_buf_pa;
312 321
313 /* Ring index values 322 /* Ring index values
314 * cur - Tx: index of descriptor to be used for current transfer 323 * cur - Tx: index of descriptor to be used for current transfer