aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/cris/eth_v10.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 81475cc80e1..80c2feeefec 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -59,7 +59,6 @@ static struct sockaddr default_mac = {
59 59
60/* Information that need to be kept for each board. */ 60/* Information that need to be kept for each board. */
61struct net_local { 61struct net_local {
62 struct net_device_stats stats;
63 struct mii_if_info mii_if; 62 struct mii_if_info mii_if;
64 63
65 /* Tx control lock. This protects the transmit buffer ring 64 /* Tx control lock. This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@ e100_tx_timeout(struct net_device *dev)
1059 1058
1060 /* remember we got an error */ 1059 /* remember we got an error */
1061 1060
1062 np->stats.tx_errors++; 1061 dev->stats.tx_errors++;
1063 1062
1064 /* reset the TX DMA in case it has hung on something */ 1063 /* reset the TX DMA in case it has hung on something */
1065 1064
@@ -1157,7 +1156,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
1157 * allocate a new buffer to put a packet in. 1156 * allocate a new buffer to put a packet in.
1158 */ 1157 */
1159 e100_rx(dev); 1158 e100_rx(dev);
1160 np->stats.rx_packets++; 1159 dev->stats.rx_packets++;
1161 /* restart/continue on the channel, for safety */ 1160 /* restart/continue on the channel, for safety */
1162 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); 1161 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1163 /* clear dma channel 1 eop/descr irq bits */ 1162 /* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
1173 /* Report any packets that have been sent */ 1172 /* Report any packets that have been sent */
1174 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && 1173 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1175 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { 1174 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1176 np->stats.tx_bytes += myFirstTxDesc->skb->len; 1175 dev->stats.tx_bytes += myFirstTxDesc->skb->len;
1177 np->stats.tx_packets++; 1176 dev->stats.tx_packets++;
1178 1177
1179 /* dma is ready with the transmission of the data in tx_skb, so now 1178 /* dma is ready with the transmission of the data in tx_skb, so now
1180 we can release the skb memory */ 1179 we can release the skb memory */
@@ -1197,7 +1196,6 @@ static irqreturn_t
1197e100nw_interrupt(int irq, void *dev_id) 1196e100nw_interrupt(int irq, void *dev_id)
1198{ 1197{
1199 struct net_device *dev = (struct net_device *)dev_id; 1198 struct net_device *dev = (struct net_device *)dev_id;
1200 struct net_local *np = netdev_priv(dev);
1201 unsigned long irqbits = *R_IRQ_MASK0_RD; 1199 unsigned long irqbits = *R_IRQ_MASK0_RD;
1202 1200
1203 /* check for underrun irq */ 1201 /* check for underrun irq */
@@ -1205,13 +1203,13 @@ e100nw_interrupt(int irq, void *dev_id)
1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1203 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1206 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1204 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1207 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1208 np->stats.tx_errors++; 1206 dev->stats.tx_errors++;
1209 D(printk("ethernet receiver underrun!\n")); 1207 D(printk("ethernet receiver underrun!\n"));
1210 } 1208 }
1211 1209
1212 /* check for overrun irq */ 1210 /* check for overrun irq */
1213 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { 1211 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
1214 update_rx_stats(&np->stats); /* this will ack the irq */ 1212 update_rx_stats(&dev->stats); /* this will ack the irq */
1215 D(printk("ethernet receiver overrun!\n")); 1213 D(printk("ethernet receiver overrun!\n"));
1216 } 1214 }
1217 /* check for excessive collision irq */ 1215 /* check for excessive collision irq */
@@ -1219,7 +1217,7 @@ e100nw_interrupt(int irq, void *dev_id)
1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1217 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1220 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1218 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1221 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1222 np->stats.tx_errors++; 1220 dev->stats.tx_errors++;
1223 D(printk("ethernet excessive collisions!\n")); 1221 D(printk("ethernet excessive collisions!\n"));
1224 } 1222 }
1225 return IRQ_HANDLED; 1223 return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@ e100_rx(struct net_device *dev)
1250 spin_unlock(&np->led_lock); 1248 spin_unlock(&np->led_lock);
1251 1249
1252 length = myNextRxDesc->descr.hw_len - 4; 1250 length = myNextRxDesc->descr.hw_len - 4;
1253 np->stats.rx_bytes += length; 1251 dev->stats.rx_bytes += length;
1254 1252
1255#ifdef ETHDEBUG 1253#ifdef ETHDEBUG
1256 printk("Got a packet of length %d:\n", length); 1254 printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@ e100_rx(struct net_device *dev)
1268 /* Small packet, copy data */ 1266 /* Small packet, copy data */
1269 skb = dev_alloc_skb(length - ETHER_HEAD_LEN); 1267 skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
1270 if (!skb) { 1268 if (!skb) {
1271 np->stats.rx_errors++; 1269 dev->stats.rx_errors++;
1272 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1270 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1273 goto update_nextrxdesc; 1271 goto update_nextrxdesc;
1274 } 1272 }
@@ -1294,7 +1292,7 @@ e100_rx(struct net_device *dev)
1294 int align; 1292 int align;
1295 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 1293 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1296 if (!new_skb) { 1294 if (!new_skb) {
1297 np->stats.rx_errors++; 1295 dev->stats.rx_errors++;
1298 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1296 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1299 goto update_nextrxdesc; 1297 goto update_nextrxdesc;
1300 } 1298 }
@@ -1333,8 +1331,6 @@ e100_rx(struct net_device *dev)
1333static int 1331static int
1334e100_close(struct net_device *dev) 1332e100_close(struct net_device *dev)
1335{ 1333{
1336 struct net_local *np = netdev_priv(dev);
1337
1338 printk(KERN_INFO "Closing %s.\n", dev->name); 1334 printk(KERN_INFO "Closing %s.\n", dev->name);
1339 1335
1340 netif_stop_queue(dev); 1336 netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@ e100_close(struct net_device *dev)
1366 1362
1367 /* Update the statistics here. */ 1363 /* Update the statistics here. */
1368 1364
1369 update_rx_stats(&np->stats); 1365 update_rx_stats(&dev->stats);
1370 update_tx_stats(&np->stats); 1366 update_tx_stats(&dev->stats);
1371 1367
1372 /* Stop speed/duplex timers */ 1368 /* Stop speed/duplex timers */
1373 del_timer(&speed_timer); 1369 del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@ e100_get_stats(struct net_device *dev)
1545 1541
1546 spin_lock_irqsave(&lp->lock, flags); 1542 spin_lock_irqsave(&lp->lock, flags);
1547 1543
1548 update_rx_stats(&lp->stats); 1544 update_rx_stats(&dev->stats);
1549 update_tx_stats(&lp->stats); 1545 update_tx_stats(&dev->stats);
1550 1546
1551 spin_unlock_irqrestore(&lp->lock, flags); 1547 spin_unlock_irqrestore(&lp->lock, flags);
1552 return &lp->stats; 1548 return &dev->stats;
1553} 1549}
1554 1550
1555/* 1551/*