aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/lib82596.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-03 20:41:50 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:51:16 -0400
commit09f75cd7bf13720738e6a196cc0107ce9a5bd5a0 (patch)
tree4c85b0b395abe7f88c87162fc22570e5de255cb1 /drivers/net/lib82596.c
parentff8ac60948ba819b89e9c87083e8050fc2f89999 (diff)
[NET] drivers/net: statistics cleanup #1 -- save memory and shrink code
We now have struct net_device_stats embedded in struct net_device, and the default ->get_stats() hook does the obvious thing for us. Run through drivers/net/* and remove the driver-local storage of statistics, and driver-local ->get_stats() hook where applicable. This was just the low-hanging fruit in drivers/net; plenty more drivers remain to be updated. [ Resolved conflicts with napi_struct changes and fix sunqe build regression... -DaveM ] Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/lib82596.c')
-rw-r--r--drivers/net/lib82596.c64
1 files changed, 27 insertions, 37 deletions
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 5884f5bd04a4..afa4638052a2 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -322,7 +322,6 @@ struct i596_private {
322 struct i596_cmd *cmd_head; 322 struct i596_cmd *cmd_head;
323 int cmd_backlog; 323 int cmd_backlog;
324 u32 last_cmd; 324 u32 last_cmd;
325 struct net_device_stats stats;
326 int next_tx_cmd; 325 int next_tx_cmd;
327 int options; 326 int options;
328 spinlock_t lock; /* serialize access to chip */ 327 spinlock_t lock; /* serialize access to chip */
@@ -352,7 +351,6 @@ static int i596_open(struct net_device *dev);
352static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 351static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
353static irqreturn_t i596_interrupt(int irq, void *dev_id); 352static irqreturn_t i596_interrupt(int irq, void *dev_id);
354static int i596_close(struct net_device *dev); 353static int i596_close(struct net_device *dev);
355static struct net_device_stats *i596_get_stats(struct net_device *dev);
356static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); 354static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
357static void i596_tx_timeout (struct net_device *dev); 355static void i596_tx_timeout (struct net_device *dev);
358static void print_eth(unsigned char *buf, char *str); 356static void print_eth(unsigned char *buf, char *str);
@@ -725,7 +723,7 @@ memory_squeeze:
725 printk(KERN_ERR 723 printk(KERN_ERR
726 "%s: i596_rx Memory squeeze, dropping packet.\n", 724 "%s: i596_rx Memory squeeze, dropping packet.\n",
727 dev->name); 725 dev->name);
728 lp->stats.rx_dropped++; 726 dev->stats.rx_dropped++;
729 } else { 727 } else {
730 if (!rx_in_place) { 728 if (!rx_in_place) {
731 /* 16 byte align the data fields */ 729 /* 16 byte align the data fields */
@@ -742,28 +740,28 @@ memory_squeeze:
742 skb->protocol = eth_type_trans(skb, dev); 740 skb->protocol = eth_type_trans(skb, dev);
743 netif_rx(skb); 741 netif_rx(skb);
744 dev->last_rx = jiffies; 742 dev->last_rx = jiffies;
745 lp->stats.rx_packets++; 743 dev->stats.rx_packets++;
746 lp->stats.rx_bytes += pkt_len; 744 dev->stats.rx_bytes += pkt_len;
747 } 745 }
748 } else { 746 } else {
749 DEB(DEB_ERRORS, printk(KERN_DEBUG 747 DEB(DEB_ERRORS, printk(KERN_DEBUG
750 "%s: Error, rfd.stat = 0x%04x\n", 748 "%s: Error, rfd.stat = 0x%04x\n",
751 dev->name, rfd->stat)); 749 dev->name, rfd->stat));
752 lp->stats.rx_errors++; 750 dev->stats.rx_errors++;
753 if (rfd->stat & SWAP16(0x0100)) 751 if (rfd->stat & SWAP16(0x0100))
754 lp->stats.collisions++; 752 dev->stats.collisions++;
755 if (rfd->stat & SWAP16(0x8000)) 753 if (rfd->stat & SWAP16(0x8000))
756 lp->stats.rx_length_errors++; 754 dev->stats.rx_length_errors++;
757 if (rfd->stat & SWAP16(0x0001)) 755 if (rfd->stat & SWAP16(0x0001))
758 lp->stats.rx_over_errors++; 756 dev->stats.rx_over_errors++;
759 if (rfd->stat & SWAP16(0x0002)) 757 if (rfd->stat & SWAP16(0x0002))
760 lp->stats.rx_fifo_errors++; 758 dev->stats.rx_fifo_errors++;
761 if (rfd->stat & SWAP16(0x0004)) 759 if (rfd->stat & SWAP16(0x0004))
762 lp->stats.rx_frame_errors++; 760 dev->stats.rx_frame_errors++;
763 if (rfd->stat & SWAP16(0x0008)) 761 if (rfd->stat & SWAP16(0x0008))
764 lp->stats.rx_crc_errors++; 762 dev->stats.rx_crc_errors++;
765 if (rfd->stat & SWAP16(0x0010)) 763 if (rfd->stat & SWAP16(0x0010))
766 lp->stats.rx_length_errors++; 764 dev->stats.rx_length_errors++;
767 } 765 }
768 766
769 /* Clear the buffer descriptor count and EOF + F flags */ 767 /* Clear the buffer descriptor count and EOF + F flags */
@@ -821,8 +819,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
821 819
822 dev_kfree_skb(skb); 820 dev_kfree_skb(skb);
823 821
824 lp->stats.tx_errors++; 822 dev->stats.tx_errors++;
825 lp->stats.tx_aborted_errors++; 823 dev->stats.tx_aborted_errors++;
826 824
827 ptr->v_next = NULL; 825 ptr->v_next = NULL;
828 ptr->b_next = I596_NULL; 826 ptr->b_next = I596_NULL;
@@ -951,10 +949,10 @@ static void i596_tx_timeout (struct net_device *dev)
951 "%s: transmit timed out, status resetting.\n", 949 "%s: transmit timed out, status resetting.\n",
952 dev->name)); 950 dev->name));
953 951
954 lp->stats.tx_errors++; 952 dev->stats.tx_errors++;
955 953
956 /* Try to restart the adaptor */ 954 /* Try to restart the adaptor */
957 if (lp->last_restart == lp->stats.tx_packets) { 955 if (lp->last_restart == dev->stats.tx_packets) {
958 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); 956 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
959 /* Shutdown and restart */ 957 /* Shutdown and restart */
960 i596_reset (dev, lp); 958 i596_reset (dev, lp);
@@ -964,7 +962,7 @@ static void i596_tx_timeout (struct net_device *dev)
964 lp->dma->scb.command = SWAP16(CUC_START | RX_START); 962 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
965 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); 963 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
966 ca (dev); 964 ca (dev);
967 lp->last_restart = lp->stats.tx_packets; 965 lp->last_restart = dev->stats.tx_packets;
968 } 966 }
969 967
970 dev->trans_start = jiffies; 968 dev->trans_start = jiffies;
@@ -999,7 +997,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
999 DEB(DEB_ERRORS, printk(KERN_DEBUG 997 DEB(DEB_ERRORS, printk(KERN_DEBUG
1000 "%s: xmit ring full, dropping packet.\n", 998 "%s: xmit ring full, dropping packet.\n",
1001 dev->name)); 999 dev->name));
1002 lp->stats.tx_dropped++; 1000 dev->stats.tx_dropped++;
1003 1001
1004 dev_kfree_skb(skb); 1002 dev_kfree_skb(skb);
1005 } else { 1003 } else {
@@ -1025,8 +1023,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1025 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); 1023 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1026 i596_add_cmd(dev, &tx_cmd->cmd); 1024 i596_add_cmd(dev, &tx_cmd->cmd);
1027 1025
1028 lp->stats.tx_packets++; 1026 dev->stats.tx_packets++;
1029 lp->stats.tx_bytes += length; 1027 dev->stats.tx_bytes += length;
1030 } 1028 }
1031 1029
1032 netif_start_queue(dev); 1030 netif_start_queue(dev);
@@ -1076,7 +1074,6 @@ static int __devinit i82596_probe(struct net_device *dev)
1076 dev->open = i596_open; 1074 dev->open = i596_open;
1077 dev->stop = i596_close; 1075 dev->stop = i596_close;
1078 dev->hard_start_xmit = i596_start_xmit; 1076 dev->hard_start_xmit = i596_start_xmit;
1079 dev->get_stats = i596_get_stats;
1080 dev->set_multicast_list = set_multicast_list; 1077 dev->set_multicast_list = set_multicast_list;
1081 dev->tx_timeout = i596_tx_timeout; 1078 dev->tx_timeout = i596_tx_timeout;
1082 dev->watchdog_timeo = TX_TIMEOUT; 1079 dev->watchdog_timeo = TX_TIMEOUT;
@@ -1197,17 +1194,17 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1197 DEB(DEB_TXADDR, 1194 DEB(DEB_TXADDR,
1198 print_eth(skb->data, "tx-done")); 1195 print_eth(skb->data, "tx-done"));
1199 } else { 1196 } else {
1200 lp->stats.tx_errors++; 1197 dev->stats.tx_errors++;
1201 if (ptr->status & SWAP16(0x0020)) 1198 if (ptr->status & SWAP16(0x0020))
1202 lp->stats.collisions++; 1199 dev->stats.collisions++;
1203 if (!(ptr->status & SWAP16(0x0040))) 1200 if (!(ptr->status & SWAP16(0x0040)))
1204 lp->stats.tx_heartbeat_errors++; 1201 dev->stats.tx_heartbeat_errors++;
1205 if (ptr->status & SWAP16(0x0400)) 1202 if (ptr->status & SWAP16(0x0400))
1206 lp->stats.tx_carrier_errors++; 1203 dev->stats.tx_carrier_errors++;
1207 if (ptr->status & SWAP16(0x0800)) 1204 if (ptr->status & SWAP16(0x0800))
1208 lp->stats.collisions++; 1205 dev->stats.collisions++;
1209 if (ptr->status & SWAP16(0x1000)) 1206 if (ptr->status & SWAP16(0x1000))
1210 lp->stats.tx_aborted_errors++; 1207 dev->stats.tx_aborted_errors++;
1211 } 1208 }
1212 dma_unmap_single(dev->dev.parent, 1209 dma_unmap_single(dev->dev.parent,
1213 tx_cmd->dma_addr, 1210 tx_cmd->dma_addr,
@@ -1292,8 +1289,8 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1292 "%s: i596 interrupt receive unit inactive, status 0x%x\n", 1289 "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1293 dev->name, status)); 1290 dev->name, status));
1294 ack_cmd |= RX_START; 1291 ack_cmd |= RX_START;
1295 lp->stats.rx_errors++; 1292 dev->stats.rx_errors++;
1296 lp->stats.rx_fifo_errors++; 1293 dev->stats.rx_fifo_errors++;
1297 rebuild_rx_bufs(dev); 1294 rebuild_rx_bufs(dev);
1298 } 1295 }
1299 } 1296 }
@@ -1346,13 +1343,6 @@ static int i596_close(struct net_device *dev)
1346 return 0; 1343 return 0;
1347} 1344}
1348 1345
1349static struct net_device_stats *i596_get_stats(struct net_device *dev)
1350{
1351 struct i596_private *lp = netdev_priv(dev);
1352
1353 return &lp->stats;
1354}
1355
1356/* 1346/*
1357 * Set or clear the multicast filter for this adaptor. 1347 * Set or clear the multicast filter for this adaptor.
1358 */ 1348 */