aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorFlorian Fainelli <florian.fainelli@telecomint.eu>2008-07-13 08:33:36 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-07-22 19:59:28 -0400
commit9ca28dc4c75f018201e21b10e34b8161bcb0ffb2 (patch)
tree4f7f87b570df4d12077a3854f1deb0a0b6cffc51 /drivers/net
parent129cf9a7028fc50b226b8021bc0b76fb38efa81d (diff)
r6040: completely rework the RX path
This patch completely reworks the RX path in order to be more accurate about what is going on with the MAC. We no longer read the error from the MLSR register instead read the descriptor status register which reflects, the error per descriptor. We now allocate skbs on the fly in r6040_rx, and we handle allocation failure instead of simply dropping the packet. Remove the rx_free_desc counter of r6040_private structure since we allocate skbs in the RX path. r6040_rx_buf_alloc is now removed and becomes unuseless. Signed-Off-By: Joerg Albert <jal2@gmx.de> Signed-off-by: Florian Fainelli <florian.fainelli@telecomint.eu> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/r6040.c144
1 files changed, 62 insertions, 82 deletions
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 5eb057d37200..9061ec1aa4f7 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -175,7 +175,7 @@ struct r6040_private {
175 struct r6040_descriptor *tx_ring; 175 struct r6040_descriptor *tx_ring;
176 dma_addr_t rx_ring_dma; 176 dma_addr_t rx_ring_dma;
177 dma_addr_t tx_ring_dma; 177 dma_addr_t tx_ring_dma;
178 u16 tx_free_desc, rx_free_desc, phy_addr, phy_mode; 178 u16 tx_free_desc, phy_addr, phy_mode;
179 u16 mcr0, mcr1; 179 u16 mcr0, mcr1;
180 u16 switch_sig; 180 u16 switch_sig;
181 struct net_device *dev; 181 struct net_device *dev;
@@ -291,27 +291,6 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
291 desc->vndescp = desc_ring; 291 desc->vndescp = desc_ring;
292} 292}
293 293
294/* Allocate skb buffer for rx descriptor */
295static void r6040_rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
296{
297 struct r6040_descriptor *descptr;
298
299 descptr = lp->rx_insert_ptr;
300 while (lp->rx_free_desc < RX_DCNT) {
301 descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE);
302
303 if (!descptr->skb_ptr)
304 break;
305 descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
306 descptr->skb_ptr->data,
307 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
308 descptr->status = 0x8000;
309 descptr = descptr->vndescp;
310 lp->rx_free_desc++;
311 }
312 lp->rx_insert_ptr = descptr;
313}
314
315static void r6040_init_txbufs(struct net_device *dev) 294static void r6040_init_txbufs(struct net_device *dev)
316{ 295{
317 struct r6040_private *lp = netdev_priv(dev); 296 struct r6040_private *lp = netdev_priv(dev);
@@ -556,71 +535,72 @@ static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
556static int r6040_rx(struct net_device *dev, int limit) 535static int r6040_rx(struct net_device *dev, int limit)
557{ 536{
558 struct r6040_private *priv = netdev_priv(dev); 537 struct r6040_private *priv = netdev_priv(dev);
559 int count; 538 struct r6040_descriptor *descptr = priv->rx_remove_ptr;
560 void __iomem *ioaddr = priv->base; 539 struct sk_buff *skb_ptr, *new_skb;
540 int count = 0;
561 u16 err; 541 u16 err;
562 542
563 for (count = 0; count < limit; ++count) { 543 /* Limit not reached and the descriptor belongs to the CPU */
564 struct r6040_descriptor *descptr = priv->rx_remove_ptr; 544 while (count < limit && !(descptr->status & 0x8000)) {
565 struct sk_buff *skb_ptr; 545 /* Read the descriptor status */
566 546 err = descptr->status;
567 descptr = priv->rx_remove_ptr; 547 /* Global error status set */
568 548 if (err & 0x0800) {
569 /* Check for errors */ 549 /* RX dribble */
570 err = ioread16(ioaddr + MLSR); 550 if (err & 0x0400)
571 if (err & 0x0400) 551 dev->stats.rx_frame_errors++;
572 dev->stats.rx_errors++; 552 /* Buffer lenght exceeded */
573 /* RX FIFO over-run */ 553 if (err & 0x0200)
574 if (err & 0x8000) 554 dev->stats.rx_length_errors++;
575 dev->stats.rx_fifo_errors++; 555 /* Packet too long */
576 /* RX descriptor unavailable */ 556 if (err & 0x0100)
577 if (err & 0x0080) 557 dev->stats.rx_length_errors++;
578 dev->stats.rx_frame_errors++; 558 /* Packet < 64 bytes */
579 /* Received packet with length over buffer lenght */ 559 if (err & 0x0080)
580 if (err & 0x0020) 560 dev->stats.rx_length_errors++;
581 dev->stats.rx_over_errors++; 561 /* CRC error */
582 /* Received packet with too long or short */ 562 if (err & 0x0040) {
583 if (err & (0x0010 | 0x0008)) 563 spin_lock(&priv->lock);
584 dev->stats.rx_length_errors++; 564 dev->stats.rx_crc_errors++;
585 /* Received packet with CRC errors */ 565 spin_unlock(&priv->lock);
586 if (err & 0x0004) {
587 spin_lock(&priv->lock);
588 dev->stats.rx_crc_errors++;
589 spin_unlock(&priv->lock);
590 }
591
592 while (priv->rx_free_desc) {
593 /* No RX packet */
594 if (descptr->status & 0x8000)
595 break;
596 skb_ptr = descptr->skb_ptr;
597 if (!skb_ptr) {
598 printk(KERN_ERR "%s: Inconsistent RX"
599 "descriptor chain\n",
600 dev->name);
601 break;
602 } 566 }
603 descptr->skb_ptr = NULL; 567 goto next_descr;
604 skb_ptr->dev = priv->dev; 568 }
605 /* Do not count the CRC */ 569
606 skb_put(skb_ptr, descptr->len - 4); 570 /* Packet successfully received */
607 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), 571 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
608 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); 572 if (!new_skb) {
609 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); 573 dev->stats.rx_dropped++;
610 /* Send to upper layer */ 574 goto next_descr;
611 netif_receive_skb(skb_ptr);
612 dev->last_rx = jiffies;
613 dev->stats.rx_packets++;
614 dev->stats.rx_bytes += descptr->len;
615 /* To next descriptor */
616 descptr = descptr->vndescp;
617 priv->rx_free_desc--;
618 } 575 }
619 priv->rx_remove_ptr = descptr; 576 skb_ptr = descptr->skb_ptr;
577 skb_ptr->dev = priv->dev;
578
579 /* Do not count the CRC */
580 skb_put(skb_ptr, descptr->len - 4);
581 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
582 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
583 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
584
585 /* Send to upper layer */
586 netif_receive_skb(skb_ptr);
587 dev->last_rx = jiffies;
588 dev->stats.rx_packets++;
589 dev->stats.rx_bytes += descptr->len - 4;
590
591 /* put new skb into descriptor */
592 descptr->skb_ptr = new_skb;
593 descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
594 descptr->skb_ptr->data,
595 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
596
597next_descr:
598 /* put the descriptor back to the MAC */
599 descptr->status = 0x8000;
600 descptr = descptr->vndescp;
601 count++;
620 } 602 }
621 /* Allocate new RX buffer */ 603 priv->rx_remove_ptr = descptr;
622 if (priv->rx_free_desc < RX_DCNT)
623 r6040_rx_buf_alloc(priv, priv->dev);
624 604
625 return count; 605 return count;
626} 606}