aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-03-18 14:34:34 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-04-16 20:06:48 -0400
commitc0d0f2caa1cd0f015aa42bbdb10cb8913bb95e4e (patch)
treed9e4fcc57c20797298b33996e8b6b1a47103c65a /drivers
parent4d64e718b46f4eedaf0379e0150de4d28b06b916 (diff)
mv643xx_eth: various cleanups
- Remove unused MV643XX_DEFAULT_[RT]X_QUEUE_SIZE definitions. - Remove ETH_TARGET enum -- it isn't used anywhere in the driver, and isn't even valid for non-mv643xx chip models, as those use different MBUS target IDs. - Clean up comment and control flow in mv643xx_eth_change_mtu(). - Use mp->dev instead of mp->mii.dev in mv643xx_eth_tx_timeout_task(). - Make mv643xx_eth_free_tx_descs() static. - Remove overzealous NULL check in mv643xx_eth_start_xmit(). - Use symbolic NETDEV_TX_* constants in mv643xx_eth_start_xmit(). Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Reviewed-by: Tzachi Perelstein <tzachi@marvell.com> Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/mv643xx_eth.c55
1 files changed, 16 insertions, 39 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 06e024f5d68b..b66d62768077 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -63,20 +63,6 @@
63#define MV643XX_TX_FAST_REFILL 63#define MV643XX_TX_FAST_REFILL
64#undef MV643XX_COAL 64#undef MV643XX_COAL
65 65
66/*
67 * Number of RX / TX descriptors on RX / TX rings.
68 * Note that allocating RX descriptors is done by allocating the RX
69 * ring AND a preallocated RX buffers (skb's) for each descriptor.
70 * The TX descriptors only allocates the TX descriptors ring,
71 * with no pre allocated TX buffers (skb's are allocated by higher layers.
72 */
73
74/* Default TX ring size is 1000 descriptors */
75#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
76
77/* Default RX ring size is 400 descriptors */
78#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
79
80#define MV643XX_TX_COAL 100 66#define MV643XX_TX_COAL 100
81#ifdef MV643XX_COAL 67#ifdef MV643XX_COAL
82#define MV643XX_RX_COAL 100 68#define MV643XX_RX_COAL 100
@@ -434,14 +420,6 @@ typedef enum _eth_func_ret_status {
434 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ 420 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
435} ETH_FUNC_RET_STATUS; 421} ETH_FUNC_RET_STATUS;
436 422
437typedef enum _eth_target {
438 ETH_TARGET_DRAM,
439 ETH_TARGET_DEVICE,
440 ETH_TARGET_CBS,
441 ETH_TARGET_PCI0,
442 ETH_TARGET_PCI1
443} ETH_TARGET;
444
445/* These are for big-endian machines. Little endian needs different 423/* These are for big-endian machines. Little endian needs different
446 * definitions. 424 * definitions.
447 */ 425 */
@@ -615,7 +593,6 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num);
615static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); 593static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num);
616static int mv643xx_eth_open(struct net_device *); 594static int mv643xx_eth_open(struct net_device *);
617static int mv643xx_eth_stop(struct net_device *); 595static int mv643xx_eth_stop(struct net_device *);
618static int mv643xx_eth_change_mtu(struct net_device *, int);
619static void eth_port_init_mac_tables(unsigned int eth_port_num); 596static void eth_port_init_mac_tables(unsigned int eth_port_num);
620#ifdef MV643XX_NAPI 597#ifdef MV643XX_NAPI
621static int mv643xx_poll(struct napi_struct *napi, int budget); 598static int mv643xx_poll(struct napi_struct *napi, int budget);
@@ -659,18 +636,19 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
659 return -EINVAL; 636 return -EINVAL;
660 637
661 dev->mtu = new_mtu; 638 dev->mtu = new_mtu;
639 if (!netif_running(dev))
640 return 0;
641
662 /* 642 /*
663 * Stop then re-open the interface. This will allocate RX skb's with 643 * Stop and then re-open the interface. This will allocate RX
664 * the new MTU. 644 * skbs of the new MTU.
665 * There is a possible danger that the open will not successed, due 645 * There is a possible danger that the open will not succeed,
666 * to memory is full, which might fail the open function. 646 * due to memory being full, which might fail the open function.
667 */ 647 */
668 if (netif_running(dev)) { 648 mv643xx_eth_stop(dev);
669 mv643xx_eth_stop(dev); 649 if (mv643xx_eth_open(dev)) {
670 if (mv643xx_eth_open(dev)) 650 printk(KERN_ERR "%s: Fatal error on opening device\n",
671 printk(KERN_ERR 651 dev->name);
672 "%s: Fatal error on opening device\n",
673 dev->name);
674 } 652 }
675 653
676 return 0; 654 return 0;
@@ -826,7 +804,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
826{ 804{
827 struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, 805 struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
828 tx_timeout_task); 806 tx_timeout_task);
829 struct net_device *dev = mp->mii.dev; /* yuck */ 807 struct net_device *dev = mp->dev;
830 808
831 if (!netif_running(dev)) 809 if (!netif_running(dev))
832 return; 810 return;
@@ -845,7 +823,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
845 * 823 *
846 * If force is non-zero, frees uncompleted descriptors as well 824 * If force is non-zero, frees uncompleted descriptors as well
847 */ 825 */
848int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) 826static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
849{ 827{
850 struct mv643xx_private *mp = netdev_priv(dev); 828 struct mv643xx_private *mp = netdev_priv(dev);
851 struct eth_tx_desc *desc; 829 struct eth_tx_desc *desc;
@@ -1739,13 +1717,12 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1739 unsigned long flags; 1717 unsigned long flags;
1740 1718
1741 BUG_ON(netif_queue_stopped(dev)); 1719 BUG_ON(netif_queue_stopped(dev));
1742 BUG_ON(skb == NULL);
1743 1720
1744 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 1721 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
1745 stats->tx_dropped++; 1722 stats->tx_dropped++;
1746 printk(KERN_DEBUG "%s: failed to linearize tiny " 1723 printk(KERN_DEBUG "%s: failed to linearize tiny "
1747 "unaligned fragment\n", dev->name); 1724 "unaligned fragment\n", dev->name);
1748 return 1; 1725 return NETDEV_TX_BUSY;
1749 } 1726 }
1750 1727
1751 spin_lock_irqsave(&mp->lock, flags); 1728 spin_lock_irqsave(&mp->lock, flags);
@@ -1754,7 +1731,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1754 printk(KERN_ERR "%s: transmit with queue full\n", dev->name); 1731 printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
1755 netif_stop_queue(dev); 1732 netif_stop_queue(dev);
1756 spin_unlock_irqrestore(&mp->lock, flags); 1733 spin_unlock_irqrestore(&mp->lock, flags);
1757 return 1; 1734 return NETDEV_TX_BUSY;
1758 } 1735 }
1759 1736
1760 eth_tx_submit_descs_for_skb(mp, skb); 1737 eth_tx_submit_descs_for_skb(mp, skb);
@@ -1767,7 +1744,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1767 1744
1768 spin_unlock_irqrestore(&mp->lock, flags); 1745 spin_unlock_irqrestore(&mp->lock, flags);
1769 1746
1770 return 0; /* success */ 1747 return NETDEV_TX_OK;
1771} 1748}
1772 1749
1773#ifdef CONFIG_NET_POLL_CONTROLLER 1750#ifdef CONFIG_NET_POLL_CONTROLLER