aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHavard Skinnemoen <havard@skinnemoen.net>2012-10-31 02:04:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-11-01 11:45:58 -0400
commit55054a16a5ecf7202e698b07f00ad8e0dadf7d50 (patch)
treee024a8143ea810b333af3afbcc52f2d2bef93aae /drivers
parentcde30a857ca10b8ba55a441193864aa04a4832f7 (diff)
net/macb: clean up ring buffer logic
Instead of masking head and tail every time we increment them, just let them wrap through UINT_MAX and mask them when subscripting. Add simple accessor functions to do the subscripting properly to minimize the chances of messing this up. This makes the code slightly smaller, and hopefully faster as well. Also, doing the ring buffer management this way will simplify things a lot when making the ring sizes configurable in the future. Available number of descriptors in ring buffer function by David Laight. Signed-off-by: Havard Skinnemoen <havard@skinnemoen.net> [nicolas.ferre@atmel.com: split patch in topics, adapt to newer kernel] Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com> Tested-by: Joachim Eastwood <manabian@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c172
-rw-r--r--drivers/net/ethernet/cadence/macb.h22
3 files changed, 127 insertions, 73 deletions
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index b92815aabc65..0d6392d24ff7 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -156,7 +156,7 @@ static int at91ether_start(struct net_device *dev)
156 int i; 156 int i;
157 157
158 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 158 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
159 MAX_RX_DESCR * sizeof(struct dma_desc), 159 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
160 &lp->rx_ring_dma, GFP_KERNEL); 160 &lp->rx_ring_dma, GFP_KERNEL);
161 if (!lp->rx_ring) { 161 if (!lp->rx_ring) {
162 netdev_err(lp->dev, "unable to alloc rx ring DMA buffer\n"); 162 netdev_err(lp->dev, "unable to alloc rx ring DMA buffer\n");
@@ -170,7 +170,7 @@ static int at91ether_start(struct net_device *dev)
170 netdev_err(lp->dev, "unable to alloc rx data DMA buffer\n"); 170 netdev_err(lp->dev, "unable to alloc rx data DMA buffer\n");
171 171
172 dma_free_coherent(&lp->pdev->dev, 172 dma_free_coherent(&lp->pdev->dev,
173 MAX_RX_DESCR * sizeof(struct dma_desc), 173 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
174 lp->rx_ring, lp->rx_ring_dma); 174 lp->rx_ring, lp->rx_ring_dma);
175 lp->rx_ring = NULL; 175 lp->rx_ring = NULL;
176 return -ENOMEM; 176 return -ENOMEM;
@@ -256,7 +256,7 @@ static int at91ether_close(struct net_device *dev)
256 netif_stop_queue(dev); 256 netif_stop_queue(dev);
257 257
258 dma_free_coherent(&lp->pdev->dev, 258 dma_free_coherent(&lp->pdev->dev,
259 MAX_RX_DESCR * sizeof(struct dma_desc), 259 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
260 lp->rx_ring, lp->rx_ring_dma); 260 lp->rx_ring, lp->rx_ring_dma);
261 lp->rx_ring = NULL; 261 lp->rx_ring = NULL;
262 262
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index cd6d431ff2b4..c432d417a0dc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -30,25 +30,14 @@
30#include "macb.h" 30#include "macb.h"
31 31
32#define RX_BUFFER_SIZE 128 32#define RX_BUFFER_SIZE 128
33#define RX_RING_SIZE 512 33#define RX_RING_SIZE 512 /* must be power of 2 */
34#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) 34#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
35 35
36/* Make the IP header word-aligned (the ethernet header is 14 bytes) */ 36/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
37#define RX_OFFSET 2 37#define RX_OFFSET 2
38 38
39#define TX_RING_SIZE 128 39#define TX_RING_SIZE 128 /* must be power of 2 */
40#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) 40#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
41#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
42
43#define TX_RING_GAP(bp) \
44 (TX_RING_SIZE - (bp)->tx_pending)
45#define TX_BUFFS_AVAIL(bp) \
46 (((bp)->tx_tail <= (bp)->tx_head) ? \
47 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
48 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
49#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
50
51#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
52 41
53/* minimum number of free TX descriptors before waking up TX process */ 42/* minimum number of free TX descriptors before waking up TX process */
54#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) 43#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
@@ -56,6 +45,51 @@
56#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 45#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
57 | MACB_BIT(ISR_ROVR)) 46 | MACB_BIT(ISR_ROVR))
58 47
48/* Ring buffer accessors */
49static unsigned int macb_tx_ring_wrap(unsigned int index)
50{
51 return index & (TX_RING_SIZE - 1);
52}
53
54static unsigned int macb_tx_ring_avail(struct macb *bp)
55{
56 return (bp->tx_tail - bp->tx_head) & (TX_RING_SIZE - 1);
57}
58
59static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
60{
61 return &bp->tx_ring[macb_tx_ring_wrap(index)];
62}
63
64static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
65{
66 return &bp->tx_skb[macb_tx_ring_wrap(index)];
67}
68
69static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
70{
71 dma_addr_t offset;
72
73 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
74
75 return bp->tx_ring_dma + offset;
76}
77
78static unsigned int macb_rx_ring_wrap(unsigned int index)
79{
80 return index & (RX_RING_SIZE - 1);
81}
82
83static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
84{
85 return &bp->rx_ring[macb_rx_ring_wrap(index)];
86}
87
88static void *macb_rx_buffer(struct macb *bp, unsigned int index)
89{
90 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
91}
92
59static void __macb_set_hwaddr(struct macb *bp) 93static void __macb_set_hwaddr(struct macb *bp)
60{ 94{
61 u32 bottom; 95 u32 bottom;
@@ -336,17 +370,18 @@ static void macb_tx(struct macb *bp)
336 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 370 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
337 371
338 /* free transmit buffer in upper layer*/ 372 /* free transmit buffer in upper layer*/
339 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 373 for (tail = bp->tx_tail; tail != head; tail++) {
340 struct ring_info *rp = &bp->tx_skb[tail]; 374 struct macb_tx_skb *tx_skb;
341 struct sk_buff *skb = rp->skb; 375 struct sk_buff *skb;
342
343 BUG_ON(skb == NULL);
344 376
345 rmb(); 377 rmb();
346 378
347 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 379 tx_skb = macb_tx_skb(bp, tail);
348 DMA_TO_DEVICE); 380 skb = tx_skb->skb;
349 rp->skb = NULL; 381
382 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
383 skb->len, DMA_TO_DEVICE);
384 tx_skb->skb = NULL;
350 dev_kfree_skb_irq(skb); 385 dev_kfree_skb_irq(skb);
351 } 386 }
352 387
@@ -366,34 +401,38 @@ static void macb_tx(struct macb *bp)
366 return; 401 return;
367 402
368 head = bp->tx_head; 403 head = bp->tx_head;
369 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 404 for (tail = bp->tx_tail; tail != head; tail++) {
370 struct ring_info *rp = &bp->tx_skb[tail]; 405 struct macb_tx_skb *tx_skb;
371 struct sk_buff *skb = rp->skb; 406 struct sk_buff *skb;
372 u32 bufstat; 407 struct macb_dma_desc *desc;
408 u32 ctrl;
373 409
374 BUG_ON(skb == NULL); 410 desc = macb_tx_desc(bp, tail);
375 411
376 /* Make hw descriptor updates visible to CPU */ 412 /* Make hw descriptor updates visible to CPU */
377 rmb(); 413 rmb();
378 414
379 bufstat = bp->tx_ring[tail].ctrl; 415 ctrl = desc->ctrl;
380 416
381 if (!(bufstat & MACB_BIT(TX_USED))) 417 if (!(ctrl & MACB_BIT(TX_USED)))
382 break; 418 break;
383 419
420 tx_skb = macb_tx_skb(bp, tail);
421 skb = tx_skb->skb;
422
384 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", 423 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
385 tail, skb->data); 424 macb_tx_ring_wrap(tail), skb->data);
386 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 425 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
387 DMA_TO_DEVICE); 426 DMA_TO_DEVICE);
388 bp->stats.tx_packets++; 427 bp->stats.tx_packets++;
389 bp->stats.tx_bytes += skb->len; 428 bp->stats.tx_bytes += skb->len;
390 rp->skb = NULL; 429 tx_skb->skb = NULL;
391 dev_kfree_skb_irq(skb); 430 dev_kfree_skb_irq(skb);
392 } 431 }
393 432
394 bp->tx_tail = tail; 433 bp->tx_tail = tail;
395 if (netif_queue_stopped(bp->dev) && 434 if (netif_queue_stopped(bp->dev)
396 TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) 435 && macb_tx_ring_avail(bp) > MACB_TX_WAKEUP_THRESH)
397 netif_wake_queue(bp->dev); 436 netif_wake_queue(bp->dev);
398} 437}
399 438
@@ -404,17 +443,21 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
404 unsigned int frag; 443 unsigned int frag;
405 unsigned int offset = 0; 444 unsigned int offset = 0;
406 struct sk_buff *skb; 445 struct sk_buff *skb;
446 struct macb_dma_desc *desc;
407 447
408 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); 448 desc = macb_rx_desc(bp, last_frag);
449 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
409 450
410 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 451 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
411 first_frag, last_frag, len); 452 macb_rx_ring_wrap(first_frag),
453 macb_rx_ring_wrap(last_frag), len);
412 454
413 skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET); 455 skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
414 if (!skb) { 456 if (!skb) {
415 bp->stats.rx_dropped++; 457 bp->stats.rx_dropped++;
416 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 458 for (frag = first_frag; ; frag++) {
417 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 459 desc = macb_rx_desc(bp, frag);
460 desc->addr &= ~MACB_BIT(RX_USED);
418 if (frag == last_frag) 461 if (frag == last_frag)
419 break; 462 break;
420 } 463 }
@@ -429,7 +472,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
429 skb_checksum_none_assert(skb); 472 skb_checksum_none_assert(skb);
430 skb_put(skb, len); 473 skb_put(skb, len);
431 474
432 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 475 for (frag = first_frag; ; frag++) {
433 unsigned int frag_len = RX_BUFFER_SIZE; 476 unsigned int frag_len = RX_BUFFER_SIZE;
434 477
435 if (offset + frag_len > len) { 478 if (offset + frag_len > len) {
@@ -437,11 +480,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
437 frag_len = len - offset; 480 frag_len = len - offset;
438 } 481 }
439 skb_copy_to_linear_data_offset(skb, offset, 482 skb_copy_to_linear_data_offset(skb, offset,
440 (bp->rx_buffers + 483 macb_rx_buffer(bp, frag), frag_len);
441 (RX_BUFFER_SIZE * frag)),
442 frag_len);
443 offset += RX_BUFFER_SIZE; 484 offset += RX_BUFFER_SIZE;
444 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 485 desc = macb_rx_desc(bp, frag);
486 desc->addr &= ~MACB_BIT(RX_USED);
445 487
446 if (frag == last_frag) 488 if (frag == last_frag)
447 break; 489 break;
@@ -467,8 +509,10 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
467{ 509{
468 unsigned int frag; 510 unsigned int frag;
469 511
470 for (frag = begin; frag != end; frag = NEXT_RX(frag)) 512 for (frag = begin; frag != end; frag++) {
471 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 513 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
514 desc->addr &= ~MACB_BIT(RX_USED);
515 }
472 516
473 /* Make descriptor updates visible to hardware */ 517 /* Make descriptor updates visible to hardware */
474 wmb(); 518 wmb();
@@ -483,17 +527,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
483static int macb_rx(struct macb *bp, int budget) 527static int macb_rx(struct macb *bp, int budget)
484{ 528{
485 int received = 0; 529 int received = 0;
486 unsigned int tail = bp->rx_tail; 530 unsigned int tail;
487 int first_frag = -1; 531 int first_frag = -1;
488 532
489 for (; budget > 0; tail = NEXT_RX(tail)) { 533 for (tail = bp->rx_tail; budget > 0; tail++) {
534 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
490 u32 addr, ctrl; 535 u32 addr, ctrl;
491 536
492 /* Make hw descriptor updates visible to CPU */ 537 /* Make hw descriptor updates visible to CPU */
493 rmb(); 538 rmb();
494 539
495 addr = bp->rx_ring[tail].addr; 540 addr = desc->addr;
496 ctrl = bp->rx_ring[tail].ctrl; 541 ctrl = desc->ctrl;
497 542
498 if (!(addr & MACB_BIT(RX_USED))) 543 if (!(addr & MACB_BIT(RX_USED)))
499 break; 544 break;
@@ -647,6 +692,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
647 struct macb *bp = netdev_priv(dev); 692 struct macb *bp = netdev_priv(dev);
648 dma_addr_t mapping; 693 dma_addr_t mapping;
649 unsigned int len, entry; 694 unsigned int len, entry;
695 struct macb_dma_desc *desc;
696 struct macb_tx_skb *tx_skb;
650 u32 ctrl; 697 u32 ctrl;
651 unsigned long flags; 698 unsigned long flags;
652 699
@@ -663,7 +710,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
663 spin_lock_irqsave(&bp->lock, flags); 710 spin_lock_irqsave(&bp->lock, flags);
664 711
665 /* This is a hard error, log it. */ 712 /* This is a hard error, log it. */
666 if (TX_BUFFS_AVAIL(bp) < 1) { 713 if (macb_tx_ring_avail(bp) < 1) {
667 netif_stop_queue(dev); 714 netif_stop_queue(dev);
668 spin_unlock_irqrestore(&bp->lock, flags); 715 spin_unlock_irqrestore(&bp->lock, flags);
669 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); 716 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -672,12 +719,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
672 return NETDEV_TX_BUSY; 719 return NETDEV_TX_BUSY;
673 } 720 }
674 721
675 entry = bp->tx_head; 722 entry = macb_tx_ring_wrap(bp->tx_head);
723 bp->tx_head++;
676 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); 724 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
677 mapping = dma_map_single(&bp->pdev->dev, skb->data, 725 mapping = dma_map_single(&bp->pdev->dev, skb->data,
678 len, DMA_TO_DEVICE); 726 len, DMA_TO_DEVICE);
679 bp->tx_skb[entry].skb = skb; 727
680 bp->tx_skb[entry].mapping = mapping; 728 tx_skb = &bp->tx_skb[entry];
729 tx_skb->skb = skb;
730 tx_skb->mapping = mapping;
681 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", 731 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
682 skb->data, (unsigned long)mapping); 732 skb->data, (unsigned long)mapping);
683 733
@@ -686,20 +736,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
686 if (entry == (TX_RING_SIZE - 1)) 736 if (entry == (TX_RING_SIZE - 1))
687 ctrl |= MACB_BIT(TX_WRAP); 737 ctrl |= MACB_BIT(TX_WRAP);
688 738
689 bp->tx_ring[entry].addr = mapping; 739 desc = &bp->tx_ring[entry];
690 bp->tx_ring[entry].ctrl = ctrl; 740 desc->addr = mapping;
741 desc->ctrl = ctrl;
691 742
692 /* Make newly initialized descriptor visible to hardware */ 743 /* Make newly initialized descriptor visible to hardware */
693 wmb(); 744 wmb();
694 745
695 entry = NEXT_TX(entry);
696 bp->tx_head = entry;
697
698 skb_tx_timestamp(skb); 746 skb_tx_timestamp(skb);
699 747
700 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 748 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
701 749
702 if (TX_BUFFS_AVAIL(bp) < 1) 750 if (macb_tx_ring_avail(bp) < 1)
703 netif_stop_queue(dev); 751 netif_stop_queue(dev);
704 752
705 spin_unlock_irqrestore(&bp->lock, flags); 753 spin_unlock_irqrestore(&bp->lock, flags);
@@ -735,7 +783,7 @@ static int macb_alloc_consistent(struct macb *bp)
735{ 783{
736 int size; 784 int size;
737 785
738 size = TX_RING_SIZE * sizeof(struct ring_info); 786 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
739 bp->tx_skb = kmalloc(size, GFP_KERNEL); 787 bp->tx_skb = kmalloc(size, GFP_KERNEL);
740 if (!bp->tx_skb) 788 if (!bp->tx_skb)
741 goto out_err; 789 goto out_err;
@@ -1412,8 +1460,6 @@ static int __init macb_probe(struct platform_device *pdev)
1412 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); 1460 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1413#endif 1461#endif
1414 1462
1415 bp->tx_pending = DEF_TX_RING_PENDING;
1416
1417 err = register_netdev(dev); 1463 err = register_netdev(dev);
1418 if (err) { 1464 if (err) {
1419 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 1465 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 33a050f85ddf..024a270a792a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -362,7 +362,12 @@
362 __v; \ 362 __v; \
363 }) 363 })
364 364
365struct dma_desc { 365/**
366 * struct macb_dma_desc - Hardware DMA descriptor
367 * @addr: DMA address of data buffer
368 * @ctrl: Control and status bits
369 */
370struct macb_dma_desc {
366 u32 addr; 371 u32 addr;
367 u32 ctrl; 372 u32 ctrl;
368}; 373};
@@ -427,7 +432,12 @@ struct dma_desc {
427#define MACB_TX_USED_OFFSET 31 432#define MACB_TX_USED_OFFSET 31
428#define MACB_TX_USED_SIZE 1 433#define MACB_TX_USED_SIZE 1
429 434
430struct ring_info { 435/**
436 * struct macb_tx_skb - data about an skb which is being transmitted
437 * @skb: skb currently being transmitted
438 * @mapping: DMA address of the skb's data buffer
439 */
440struct macb_tx_skb {
431 struct sk_buff *skb; 441 struct sk_buff *skb;
432 dma_addr_t mapping; 442 dma_addr_t mapping;
433}; 443};
@@ -512,12 +522,12 @@ struct macb {
512 void __iomem *regs; 522 void __iomem *regs;
513 523
514 unsigned int rx_tail; 524 unsigned int rx_tail;
515 struct dma_desc *rx_ring; 525 struct macb_dma_desc *rx_ring;
516 void *rx_buffers; 526 void *rx_buffers;
517 527
518 unsigned int tx_head, tx_tail; 528 unsigned int tx_head, tx_tail;
519 struct dma_desc *tx_ring; 529 struct macb_dma_desc *tx_ring;
520 struct ring_info *tx_skb; 530 struct macb_tx_skb *tx_skb;
521 531
522 spinlock_t lock; 532 spinlock_t lock;
523 struct platform_device *pdev; 533 struct platform_device *pdev;
@@ -535,8 +545,6 @@ struct macb {
535 dma_addr_t tx_ring_dma; 545 dma_addr_t tx_ring_dma;
536 dma_addr_t rx_buffers_dma; 546 dma_addr_t rx_buffers_dma;
537 547
538 unsigned int rx_pending, tx_pending;
539
540 struct mii_bus *mii_bus; 548 struct mii_bus *mii_bus;
541 struct phy_device *phy_dev; 549 struct phy_device *phy_dev;
542 unsigned int link; 550 unsigned int link;