diff options
Diffstat (limited to 'drivers/net/sundance.c')
-rw-r--r-- | drivers/net/sundance.c | 275 |
1 files changed, 207 insertions, 68 deletions
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 2678588ea4b2..3ed2a67bd6d3 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -96,16 +96,10 @@ static char *media[MAX_UNITS]; | |||
96 | #include <asm/io.h> | 96 | #include <asm/io.h> |
97 | #include <linux/delay.h> | 97 | #include <linux/delay.h> |
98 | #include <linux/spinlock.h> | 98 | #include <linux/spinlock.h> |
99 | #ifndef _COMPAT_WITH_OLD_KERNEL | 99 | #include <linux/dma-mapping.h> |
100 | #include <linux/crc32.h> | 100 | #include <linux/crc32.h> |
101 | #include <linux/ethtool.h> | 101 | #include <linux/ethtool.h> |
102 | #include <linux/mii.h> | 102 | #include <linux/mii.h> |
103 | #else | ||
104 | #include "crc32.h" | ||
105 | #include "ethtool.h" | ||
106 | #include "mii.h" | ||
107 | #include "compat.h" | ||
108 | #endif | ||
109 | 103 | ||
110 | /* These identify the driver base version and may not be removed. */ | 104 | /* These identify the driver base version and may not be removed. */ |
111 | static const char version[] __devinitconst = | 105 | static const char version[] __devinitconst = |
@@ -369,9 +363,21 @@ struct netdev_private { | |||
369 | dma_addr_t tx_ring_dma; | 363 | dma_addr_t tx_ring_dma; |
370 | dma_addr_t rx_ring_dma; | 364 | dma_addr_t rx_ring_dma; |
371 | struct timer_list timer; /* Media monitoring timer. */ | 365 | struct timer_list timer; /* Media monitoring timer. */ |
366 | /* ethtool extra stats */ | ||
367 | struct { | ||
368 | u64 tx_multiple_collisions; | ||
369 | u64 tx_single_collisions; | ||
370 | u64 tx_late_collisions; | ||
371 | u64 tx_deferred; | ||
372 | u64 tx_deferred_excessive; | ||
373 | u64 tx_aborted; | ||
374 | u64 tx_bcasts; | ||
375 | u64 rx_bcasts; | ||
376 | u64 tx_mcasts; | ||
377 | u64 rx_mcasts; | ||
378 | } xstats; | ||
372 | /* Frequently used values: keep some adjacent for cache effect. */ | 379 | /* Frequently used values: keep some adjacent for cache effect. */ |
373 | spinlock_t lock; | 380 | spinlock_t lock; |
374 | spinlock_t rx_lock; /* Group with Tx control cache line. */ | ||
375 | int msg_enable; | 381 | int msg_enable; |
376 | int chip_id; | 382 | int chip_id; |
377 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | 383 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ |
@@ -396,6 +402,7 @@ struct netdev_private { | |||
396 | unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ | 402 | unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ |
397 | struct pci_dev *pci_dev; | 403 | struct pci_dev *pci_dev; |
398 | void __iomem *base; | 404 | void __iomem *base; |
405 | spinlock_t statlock; | ||
399 | }; | 406 | }; |
400 | 407 | ||
401 | /* The station address location in the EEPROM. */ | 408 | /* The station address location in the EEPROM. */ |
@@ -520,16 +527,19 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
520 | np->chip_id = chip_idx; | 527 | np->chip_id = chip_idx; |
521 | np->msg_enable = (1 << debug) - 1; | 528 | np->msg_enable = (1 << debug) - 1; |
522 | spin_lock_init(&np->lock); | 529 | spin_lock_init(&np->lock); |
530 | spin_lock_init(&np->statlock); | ||
523 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); | 531 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); |
524 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); | 532 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); |
525 | 533 | ||
526 | ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); | 534 | ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, |
535 | &ring_dma, GFP_KERNEL); | ||
527 | if (!ring_space) | 536 | if (!ring_space) |
528 | goto err_out_cleardev; | 537 | goto err_out_cleardev; |
529 | np->tx_ring = (struct netdev_desc *)ring_space; | 538 | np->tx_ring = (struct netdev_desc *)ring_space; |
530 | np->tx_ring_dma = ring_dma; | 539 | np->tx_ring_dma = ring_dma; |
531 | 540 | ||
532 | ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); | 541 | ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, |
542 | &ring_dma, GFP_KERNEL); | ||
533 | if (!ring_space) | 543 | if (!ring_space) |
534 | goto err_out_unmap_tx; | 544 | goto err_out_unmap_tx; |
535 | np->rx_ring = (struct netdev_desc *)ring_space; | 545 | np->rx_ring = (struct netdev_desc *)ring_space; |
@@ -663,9 +673,11 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
663 | err_out_unregister: | 673 | err_out_unregister: |
664 | unregister_netdev(dev); | 674 | unregister_netdev(dev); |
665 | err_out_unmap_rx: | 675 | err_out_unmap_rx: |
666 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); | 676 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, |
677 | np->rx_ring, np->rx_ring_dma); | ||
667 | err_out_unmap_tx: | 678 | err_out_unmap_tx: |
668 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); | 679 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, |
680 | np->tx_ring, np->tx_ring_dma); | ||
669 | err_out_cleardev: | 681 | err_out_cleardev: |
670 | pci_set_drvdata(pdev, NULL); | 682 | pci_set_drvdata(pdev, NULL); |
671 | pci_iounmap(pdev, ioaddr); | 683 | pci_iounmap(pdev, ioaddr); |
@@ -874,7 +886,7 @@ static int netdev_open(struct net_device *dev) | |||
874 | init_timer(&np->timer); | 886 | init_timer(&np->timer); |
875 | np->timer.expires = jiffies + 3*HZ; | 887 | np->timer.expires = jiffies + 3*HZ; |
876 | np->timer.data = (unsigned long)dev; | 888 | np->timer.data = (unsigned long)dev; |
877 | np->timer.function = &netdev_timer; /* timer handler */ | 889 | np->timer.function = netdev_timer; /* timer handler */ |
878 | add_timer(&np->timer); | 890 | add_timer(&np->timer); |
879 | 891 | ||
880 | /* Enable interrupts by setting the interrupt mask. */ | 892 | /* Enable interrupts by setting the interrupt mask. */ |
@@ -1011,8 +1023,14 @@ static void init_ring(struct net_device *dev) | |||
1011 | skb->dev = dev; /* Mark as being used by this device. */ | 1023 | skb->dev = dev; /* Mark as being used by this device. */ |
1012 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 1024 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
1013 | np->rx_ring[i].frag[0].addr = cpu_to_le32( | 1025 | np->rx_ring[i].frag[0].addr = cpu_to_le32( |
1014 | pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, | 1026 | dma_map_single(&np->pci_dev->dev, skb->data, |
1015 | PCI_DMA_FROMDEVICE)); | 1027 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1028 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1029 | np->rx_ring[i].frag[0].addr)) { | ||
1030 | dev_kfree_skb(skb); | ||
1031 | np->rx_skbuff[i] = NULL; | ||
1032 | break; | ||
1033 | } | ||
1016 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); | 1034 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); |
1017 | } | 1035 | } |
1018 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | 1036 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
@@ -1063,9 +1081,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev) | |||
1063 | 1081 | ||
1064 | txdesc->next_desc = 0; | 1082 | txdesc->next_desc = 0; |
1065 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); | 1083 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); |
1066 | txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data, | 1084 | txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, |
1067 | skb->len, | 1085 | skb->data, skb->len, DMA_TO_DEVICE)); |
1068 | PCI_DMA_TODEVICE)); | 1086 | if (dma_mapping_error(&np->pci_dev->dev, |
1087 | txdesc->frag[0].addr)) | ||
1088 | goto drop_frame; | ||
1069 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); | 1089 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); |
1070 | 1090 | ||
1071 | /* Increment cur_tx before tasklet_schedule() */ | 1091 | /* Increment cur_tx before tasklet_schedule() */ |
@@ -1087,6 +1107,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev) | |||
1087 | dev->name, np->cur_tx, entry); | 1107 | dev->name, np->cur_tx, entry); |
1088 | } | 1108 | } |
1089 | return NETDEV_TX_OK; | 1109 | return NETDEV_TX_OK; |
1110 | |||
1111 | drop_frame: | ||
1112 | dev_kfree_skb(skb); | ||
1113 | np->tx_skbuff[entry] = NULL; | ||
1114 | dev->stats.tx_dropped++; | ||
1115 | return NETDEV_TX_OK; | ||
1090 | } | 1116 | } |
1091 | 1117 | ||
1092 | /* Reset hardware tx and free all of tx buffers */ | 1118 | /* Reset hardware tx and free all of tx buffers */ |
@@ -1097,7 +1123,6 @@ reset_tx (struct net_device *dev) | |||
1097 | void __iomem *ioaddr = np->base; | 1123 | void __iomem *ioaddr = np->base; |
1098 | struct sk_buff *skb; | 1124 | struct sk_buff *skb; |
1099 | int i; | 1125 | int i; |
1100 | int irq = in_interrupt(); | ||
1101 | 1126 | ||
1102 | /* Reset tx logic, TxListPtr will be cleaned */ | 1127 | /* Reset tx logic, TxListPtr will be cleaned */ |
1103 | iowrite16 (TxDisable, ioaddr + MACCtrl1); | 1128 | iowrite16 (TxDisable, ioaddr + MACCtrl1); |
@@ -1109,13 +1134,10 @@ reset_tx (struct net_device *dev) | |||
1109 | 1134 | ||
1110 | skb = np->tx_skbuff[i]; | 1135 | skb = np->tx_skbuff[i]; |
1111 | if (skb) { | 1136 | if (skb) { |
1112 | pci_unmap_single(np->pci_dev, | 1137 | dma_unmap_single(&np->pci_dev->dev, |
1113 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 1138 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1114 | skb->len, PCI_DMA_TODEVICE); | 1139 | skb->len, DMA_TO_DEVICE); |
1115 | if (irq) | 1140 | dev_kfree_skb_any(skb); |
1116 | dev_kfree_skb_irq (skb); | ||
1117 | else | ||
1118 | dev_kfree_skb (skb); | ||
1119 | np->tx_skbuff[i] = NULL; | 1141 | np->tx_skbuff[i] = NULL; |
1120 | dev->stats.tx_dropped++; | 1142 | dev->stats.tx_dropped++; |
1121 | } | 1143 | } |
@@ -1233,9 +1255,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1233 | break; | 1255 | break; |
1234 | skb = np->tx_skbuff[entry]; | 1256 | skb = np->tx_skbuff[entry]; |
1235 | /* Free the original skb. */ | 1257 | /* Free the original skb. */ |
1236 | pci_unmap_single(np->pci_dev, | 1258 | dma_unmap_single(&np->pci_dev->dev, |
1237 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | 1259 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1238 | skb->len, PCI_DMA_TODEVICE); | 1260 | skb->len, DMA_TO_DEVICE); |
1239 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | 1261 | dev_kfree_skb_irq (np->tx_skbuff[entry]); |
1240 | np->tx_skbuff[entry] = NULL; | 1262 | np->tx_skbuff[entry] = NULL; |
1241 | np->tx_ring[entry].frag[0].addr = 0; | 1263 | np->tx_ring[entry].frag[0].addr = 0; |
@@ -1252,9 +1274,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1252 | break; | 1274 | break; |
1253 | skb = np->tx_skbuff[entry]; | 1275 | skb = np->tx_skbuff[entry]; |
1254 | /* Free the original skb. */ | 1276 | /* Free the original skb. */ |
1255 | pci_unmap_single(np->pci_dev, | 1277 | dma_unmap_single(&np->pci_dev->dev, |
1256 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | 1278 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1257 | skb->len, PCI_DMA_TODEVICE); | 1279 | skb->len, DMA_TO_DEVICE); |
1258 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | 1280 | dev_kfree_skb_irq (np->tx_skbuff[entry]); |
1259 | np->tx_skbuff[entry] = NULL; | 1281 | np->tx_skbuff[entry] = NULL; |
1260 | np->tx_ring[entry].frag[0].addr = 0; | 1282 | np->tx_ring[entry].frag[0].addr = 0; |
@@ -1334,22 +1356,18 @@ static void rx_poll(unsigned long data) | |||
1334 | if (pkt_len < rx_copybreak && | 1356 | if (pkt_len < rx_copybreak && |
1335 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1357 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1336 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1358 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1337 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1359 | dma_sync_single_for_cpu(&np->pci_dev->dev, |
1338 | le32_to_cpu(desc->frag[0].addr), | 1360 | le32_to_cpu(desc->frag[0].addr), |
1339 | np->rx_buf_sz, | 1361 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1340 | PCI_DMA_FROMDEVICE); | ||
1341 | |||
1342 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); | 1362 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); |
1343 | pci_dma_sync_single_for_device(np->pci_dev, | 1363 | dma_sync_single_for_device(&np->pci_dev->dev, |
1344 | le32_to_cpu(desc->frag[0].addr), | 1364 | le32_to_cpu(desc->frag[0].addr), |
1345 | np->rx_buf_sz, | 1365 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1346 | PCI_DMA_FROMDEVICE); | ||
1347 | skb_put(skb, pkt_len); | 1366 | skb_put(skb, pkt_len); |
1348 | } else { | 1367 | } else { |
1349 | pci_unmap_single(np->pci_dev, | 1368 | dma_unmap_single(&np->pci_dev->dev, |
1350 | le32_to_cpu(desc->frag[0].addr), | 1369 | le32_to_cpu(desc->frag[0].addr), |
1351 | np->rx_buf_sz, | 1370 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1352 | PCI_DMA_FROMDEVICE); | ||
1353 | skb_put(skb = np->rx_skbuff[entry], pkt_len); | 1371 | skb_put(skb = np->rx_skbuff[entry], pkt_len); |
1354 | np->rx_skbuff[entry] = NULL; | 1372 | np->rx_skbuff[entry] = NULL; |
1355 | } | 1373 | } |
@@ -1396,8 +1414,14 @@ static void refill_rx (struct net_device *dev) | |||
1396 | skb->dev = dev; /* Mark as being used by this device. */ | 1414 | skb->dev = dev; /* Mark as being used by this device. */ |
1397 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1415 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1398 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( | 1416 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( |
1399 | pci_map_single(np->pci_dev, skb->data, | 1417 | dma_map_single(&np->pci_dev->dev, skb->data, |
1400 | np->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 1418 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1419 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1420 | np->rx_ring[entry].frag[0].addr)) { | ||
1421 | dev_kfree_skb_irq(skb); | ||
1422 | np->rx_skbuff[entry] = NULL; | ||
1423 | break; | ||
1424 | } | ||
1401 | } | 1425 | } |
1402 | /* Perhaps we need not reset this field. */ | 1426 | /* Perhaps we need not reset this field. */ |
1403 | np->rx_ring[entry].frag[0].length = | 1427 | np->rx_ring[entry].frag[0].length = |
@@ -1475,27 +1499,41 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
1475 | { | 1499 | { |
1476 | struct netdev_private *np = netdev_priv(dev); | 1500 | struct netdev_private *np = netdev_priv(dev); |
1477 | void __iomem *ioaddr = np->base; | 1501 | void __iomem *ioaddr = np->base; |
1478 | int i; | 1502 | unsigned long flags; |
1503 | u8 late_coll, single_coll, mult_coll; | ||
1479 | 1504 | ||
1480 | /* We should lock this segment of code for SMP eventually, although | 1505 | spin_lock_irqsave(&np->statlock, flags); |
1481 | the vulnerability window is very small and statistics are | ||
1482 | non-critical. */ | ||
1483 | /* The chip only need report frame silently dropped. */ | 1506 | /* The chip only need report frame silently dropped. */ |
1484 | dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); | 1507 | dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); |
1485 | dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); | 1508 | dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); |
1486 | dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); | 1509 | dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); |
1487 | dev->stats.collisions += ioread8(ioaddr + StatsLateColl); | ||
1488 | dev->stats.collisions += ioread8(ioaddr + StatsMultiColl); | ||
1489 | dev->stats.collisions += ioread8(ioaddr + StatsOneColl); | ||
1490 | dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); | 1510 | dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); |
1491 | ioread8(ioaddr + StatsTxDefer); | 1511 | |
1492 | for (i = StatsTxDefer; i <= StatsMcastRx; i++) | 1512 | mult_coll = ioread8(ioaddr + StatsMultiColl); |
1493 | ioread8(ioaddr + i); | 1513 | np->xstats.tx_multiple_collisions += mult_coll; |
1514 | single_coll = ioread8(ioaddr + StatsOneColl); | ||
1515 | np->xstats.tx_single_collisions += single_coll; | ||
1516 | late_coll = ioread8(ioaddr + StatsLateColl); | ||
1517 | np->xstats.tx_late_collisions += late_coll; | ||
1518 | dev->stats.collisions += mult_coll | ||
1519 | + single_coll | ||
1520 | + late_coll; | ||
1521 | |||
1522 | np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); | ||
1523 | np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); | ||
1524 | np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); | ||
1525 | np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); | ||
1526 | np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); | ||
1527 | np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); | ||
1528 | np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); | ||
1529 | |||
1494 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); | 1530 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); |
1495 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; | 1531 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; |
1496 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); | 1532 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); |
1497 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; | 1533 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; |
1498 | 1534 | ||
1535 | spin_unlock_irqrestore(&np->statlock, flags); | ||
1536 | |||
1499 | return &dev->stats; | 1537 | return &dev->stats; |
1500 | } | 1538 | } |
1501 | 1539 | ||
@@ -1554,6 +1592,21 @@ static int __set_mac_addr(struct net_device *dev) | |||
1554 | return 0; | 1592 | return 0; |
1555 | } | 1593 | } |
1556 | 1594 | ||
1595 | static const struct { | ||
1596 | const char name[ETH_GSTRING_LEN]; | ||
1597 | } sundance_stats[] = { | ||
1598 | { "tx_multiple_collisions" }, | ||
1599 | { "tx_single_collisions" }, | ||
1600 | { "tx_late_collisions" }, | ||
1601 | { "tx_deferred" }, | ||
1602 | { "tx_deferred_excessive" }, | ||
1603 | { "tx_aborted" }, | ||
1604 | { "tx_bcasts" }, | ||
1605 | { "rx_bcasts" }, | ||
1606 | { "tx_mcasts" }, | ||
1607 | { "rx_mcasts" }, | ||
1608 | }; | ||
1609 | |||
1557 | static int check_if_running(struct net_device *dev) | 1610 | static int check_if_running(struct net_device *dev) |
1558 | { | 1611 | { |
1559 | if (!netif_running(dev)) | 1612 | if (!netif_running(dev)) |
@@ -1612,6 +1665,42 @@ static void set_msglevel(struct net_device *dev, u32 val) | |||
1612 | np->msg_enable = val; | 1665 | np->msg_enable = val; |
1613 | } | 1666 | } |
1614 | 1667 | ||
1668 | static void get_strings(struct net_device *dev, u32 stringset, | ||
1669 | u8 *data) | ||
1670 | { | ||
1671 | if (stringset == ETH_SS_STATS) | ||
1672 | memcpy(data, sundance_stats, sizeof(sundance_stats)); | ||
1673 | } | ||
1674 | |||
1675 | static int get_sset_count(struct net_device *dev, int sset) | ||
1676 | { | ||
1677 | switch (sset) { | ||
1678 | case ETH_SS_STATS: | ||
1679 | return ARRAY_SIZE(sundance_stats); | ||
1680 | default: | ||
1681 | return -EOPNOTSUPP; | ||
1682 | } | ||
1683 | } | ||
1684 | |||
1685 | static void get_ethtool_stats(struct net_device *dev, | ||
1686 | struct ethtool_stats *stats, u64 *data) | ||
1687 | { | ||
1688 | struct netdev_private *np = netdev_priv(dev); | ||
1689 | int i = 0; | ||
1690 | |||
1691 | get_stats(dev); | ||
1692 | data[i++] = np->xstats.tx_multiple_collisions; | ||
1693 | data[i++] = np->xstats.tx_single_collisions; | ||
1694 | data[i++] = np->xstats.tx_late_collisions; | ||
1695 | data[i++] = np->xstats.tx_deferred; | ||
1696 | data[i++] = np->xstats.tx_deferred_excessive; | ||
1697 | data[i++] = np->xstats.tx_aborted; | ||
1698 | data[i++] = np->xstats.tx_bcasts; | ||
1699 | data[i++] = np->xstats.rx_bcasts; | ||
1700 | data[i++] = np->xstats.tx_mcasts; | ||
1701 | data[i++] = np->xstats.rx_mcasts; | ||
1702 | } | ||
1703 | |||
1615 | static const struct ethtool_ops ethtool_ops = { | 1704 | static const struct ethtool_ops ethtool_ops = { |
1616 | .begin = check_if_running, | 1705 | .begin = check_if_running, |
1617 | .get_drvinfo = get_drvinfo, | 1706 | .get_drvinfo = get_drvinfo, |
@@ -1621,6 +1710,9 @@ static const struct ethtool_ops ethtool_ops = { | |||
1621 | .get_link = get_link, | 1710 | .get_link = get_link, |
1622 | .get_msglevel = get_msglevel, | 1711 | .get_msglevel = get_msglevel, |
1623 | .set_msglevel = set_msglevel, | 1712 | .set_msglevel = set_msglevel, |
1713 | .get_strings = get_strings, | ||
1714 | .get_sset_count = get_sset_count, | ||
1715 | .get_ethtool_stats = get_ethtool_stats, | ||
1624 | }; | 1716 | }; |
1625 | 1717 | ||
1626 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 1718 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
@@ -1715,9 +1807,9 @@ static int netdev_close(struct net_device *dev) | |||
1715 | np->rx_ring[i].status = 0; | 1807 | np->rx_ring[i].status = 0; |
1716 | skb = np->rx_skbuff[i]; | 1808 | skb = np->rx_skbuff[i]; |
1717 | if (skb) { | 1809 | if (skb) { |
1718 | pci_unmap_single(np->pci_dev, | 1810 | dma_unmap_single(&np->pci_dev->dev, |
1719 | le32_to_cpu(np->rx_ring[i].frag[0].addr), | 1811 | le32_to_cpu(np->rx_ring[i].frag[0].addr), |
1720 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1812 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1721 | dev_kfree_skb(skb); | 1813 | dev_kfree_skb(skb); |
1722 | np->rx_skbuff[i] = NULL; | 1814 | np->rx_skbuff[i] = NULL; |
1723 | } | 1815 | } |
@@ -1727,9 +1819,9 @@ static int netdev_close(struct net_device *dev) | |||
1727 | np->tx_ring[i].next_desc = 0; | 1819 | np->tx_ring[i].next_desc = 0; |
1728 | skb = np->tx_skbuff[i]; | 1820 | skb = np->tx_skbuff[i]; |
1729 | if (skb) { | 1821 | if (skb) { |
1730 | pci_unmap_single(np->pci_dev, | 1822 | dma_unmap_single(&np->pci_dev->dev, |
1731 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 1823 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1732 | skb->len, PCI_DMA_TODEVICE); | 1824 | skb->len, DMA_TO_DEVICE); |
1733 | dev_kfree_skb(skb); | 1825 | dev_kfree_skb(skb); |
1734 | np->tx_skbuff[i] = NULL; | 1826 | np->tx_skbuff[i] = NULL; |
1735 | } | 1827 | } |
@@ -1743,25 +1835,72 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev) | |||
1743 | struct net_device *dev = pci_get_drvdata(pdev); | 1835 | struct net_device *dev = pci_get_drvdata(pdev); |
1744 | 1836 | ||
1745 | if (dev) { | 1837 | if (dev) { |
1746 | struct netdev_private *np = netdev_priv(dev); | 1838 | struct netdev_private *np = netdev_priv(dev); |
1747 | 1839 | unregister_netdev(dev); | |
1748 | unregister_netdev(dev); | 1840 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, |
1749 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, | 1841 | np->rx_ring, np->rx_ring_dma); |
1750 | np->rx_ring_dma); | 1842 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, |
1751 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, | 1843 | np->tx_ring, np->tx_ring_dma); |
1752 | np->tx_ring_dma); | 1844 | pci_iounmap(pdev, np->base); |
1753 | pci_iounmap(pdev, np->base); | 1845 | pci_release_regions(pdev); |
1754 | pci_release_regions(pdev); | 1846 | free_netdev(dev); |
1755 | free_netdev(dev); | 1847 | pci_set_drvdata(pdev, NULL); |
1756 | pci_set_drvdata(pdev, NULL); | 1848 | } |
1849 | } | ||
1850 | |||
1851 | #ifdef CONFIG_PM | ||
1852 | |||
1853 | static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) | ||
1854 | { | ||
1855 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1856 | |||
1857 | if (!netif_running(dev)) | ||
1858 | return 0; | ||
1859 | |||
1860 | netdev_close(dev); | ||
1861 | netif_device_detach(dev); | ||
1862 | |||
1863 | pci_save_state(pci_dev); | ||
1864 | pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); | ||
1865 | |||
1866 | return 0; | ||
1867 | } | ||
1868 | |||
1869 | static int sundance_resume(struct pci_dev *pci_dev) | ||
1870 | { | ||
1871 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1872 | int err = 0; | ||
1873 | |||
1874 | if (!netif_running(dev)) | ||
1875 | return 0; | ||
1876 | |||
1877 | pci_set_power_state(pci_dev, PCI_D0); | ||
1878 | pci_restore_state(pci_dev); | ||
1879 | |||
1880 | err = netdev_open(dev); | ||
1881 | if (err) { | ||
1882 | printk(KERN_ERR "%s: Can't resume interface!\n", | ||
1883 | dev->name); | ||
1884 | goto out; | ||
1757 | } | 1885 | } |
1886 | |||
1887 | netif_device_attach(dev); | ||
1888 | |||
1889 | out: | ||
1890 | return err; | ||
1758 | } | 1891 | } |
1759 | 1892 | ||
1893 | #endif /* CONFIG_PM */ | ||
1894 | |||
1760 | static struct pci_driver sundance_driver = { | 1895 | static struct pci_driver sundance_driver = { |
1761 | .name = DRV_NAME, | 1896 | .name = DRV_NAME, |
1762 | .id_table = sundance_pci_tbl, | 1897 | .id_table = sundance_pci_tbl, |
1763 | .probe = sundance_probe1, | 1898 | .probe = sundance_probe1, |
1764 | .remove = __devexit_p(sundance_remove1), | 1899 | .remove = __devexit_p(sundance_remove1), |
1900 | #ifdef CONFIG_PM | ||
1901 | .suspend = sundance_suspend, | ||
1902 | .resume = sundance_resume, | ||
1903 | #endif /* CONFIG_PM */ | ||
1765 | }; | 1904 | }; |
1766 | 1905 | ||
1767 | static int __init sundance_init(void) | 1906 | static int __init sundance_init(void) |