diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/sundance.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/sundance.c')
-rw-r--r-- | drivers/net/sundance.c | 302 |
1 files changed, 229 insertions, 73 deletions
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 2678588ea4b2..4793df843c24 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -96,16 +96,10 @@ static char *media[MAX_UNITS]; | |||
96 | #include <asm/io.h> | 96 | #include <asm/io.h> |
97 | #include <linux/delay.h> | 97 | #include <linux/delay.h> |
98 | #include <linux/spinlock.h> | 98 | #include <linux/spinlock.h> |
99 | #ifndef _COMPAT_WITH_OLD_KERNEL | 99 | #include <linux/dma-mapping.h> |
100 | #include <linux/crc32.h> | 100 | #include <linux/crc32.h> |
101 | #include <linux/ethtool.h> | 101 | #include <linux/ethtool.h> |
102 | #include <linux/mii.h> | 102 | #include <linux/mii.h> |
103 | #else | ||
104 | #include "crc32.h" | ||
105 | #include "ethtool.h" | ||
106 | #include "mii.h" | ||
107 | #include "compat.h" | ||
108 | #endif | ||
109 | 103 | ||
110 | /* These identify the driver base version and may not be removed. */ | 104 | /* These identify the driver base version and may not be removed. */ |
111 | static const char version[] __devinitconst = | 105 | static const char version[] __devinitconst = |
@@ -300,6 +294,9 @@ enum alta_offsets { | |||
300 | /* Aliased and bogus values! */ | 294 | /* Aliased and bogus values! */ |
301 | RxStatus = 0x0c, | 295 | RxStatus = 0x0c, |
302 | }; | 296 | }; |
297 | |||
298 | #define ASIC_HI_WORD(x) ((x) + 2) | ||
299 | |||
303 | enum ASICCtrl_HiWord_bit { | 300 | enum ASICCtrl_HiWord_bit { |
304 | GlobalReset = 0x0001, | 301 | GlobalReset = 0x0001, |
305 | RxReset = 0x0002, | 302 | RxReset = 0x0002, |
@@ -369,9 +366,21 @@ struct netdev_private { | |||
369 | dma_addr_t tx_ring_dma; | 366 | dma_addr_t tx_ring_dma; |
370 | dma_addr_t rx_ring_dma; | 367 | dma_addr_t rx_ring_dma; |
371 | struct timer_list timer; /* Media monitoring timer. */ | 368 | struct timer_list timer; /* Media monitoring timer. */ |
369 | /* ethtool extra stats */ | ||
370 | struct { | ||
371 | u64 tx_multiple_collisions; | ||
372 | u64 tx_single_collisions; | ||
373 | u64 tx_late_collisions; | ||
374 | u64 tx_deferred; | ||
375 | u64 tx_deferred_excessive; | ||
376 | u64 tx_aborted; | ||
377 | u64 tx_bcasts; | ||
378 | u64 rx_bcasts; | ||
379 | u64 tx_mcasts; | ||
380 | u64 rx_mcasts; | ||
381 | } xstats; | ||
372 | /* Frequently used values: keep some adjacent for cache effect. */ | 382 | /* Frequently used values: keep some adjacent for cache effect. */ |
373 | spinlock_t lock; | 383 | spinlock_t lock; |
374 | spinlock_t rx_lock; /* Group with Tx control cache line. */ | ||
375 | int msg_enable; | 384 | int msg_enable; |
376 | int chip_id; | 385 | int chip_id; |
377 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | 386 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ |
@@ -396,6 +405,7 @@ struct netdev_private { | |||
396 | unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ | 405 | unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ |
397 | struct pci_dev *pci_dev; | 406 | struct pci_dev *pci_dev; |
398 | void __iomem *base; | 407 | void __iomem *base; |
408 | spinlock_t statlock; | ||
399 | }; | 409 | }; |
400 | 410 | ||
401 | /* The station address location in the EEPROM. */ | 411 | /* The station address location in the EEPROM. */ |
@@ -424,6 +434,7 @@ static void netdev_error(struct net_device *dev, int intr_status); | |||
424 | static void netdev_error(struct net_device *dev, int intr_status); | 434 | static void netdev_error(struct net_device *dev, int intr_status); |
425 | static void set_rx_mode(struct net_device *dev); | 435 | static void set_rx_mode(struct net_device *dev); |
426 | static int __set_mac_addr(struct net_device *dev); | 436 | static int __set_mac_addr(struct net_device *dev); |
437 | static int sundance_set_mac_addr(struct net_device *dev, void *data); | ||
427 | static struct net_device_stats *get_stats(struct net_device *dev); | 438 | static struct net_device_stats *get_stats(struct net_device *dev); |
428 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 439 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
429 | static int netdev_close(struct net_device *dev); | 440 | static int netdev_close(struct net_device *dev); |
@@ -457,7 +468,7 @@ static const struct net_device_ops netdev_ops = { | |||
457 | .ndo_do_ioctl = netdev_ioctl, | 468 | .ndo_do_ioctl = netdev_ioctl, |
458 | .ndo_tx_timeout = tx_timeout, | 469 | .ndo_tx_timeout = tx_timeout, |
459 | .ndo_change_mtu = change_mtu, | 470 | .ndo_change_mtu = change_mtu, |
460 | .ndo_set_mac_address = eth_mac_addr, | 471 | .ndo_set_mac_address = sundance_set_mac_addr, |
461 | .ndo_validate_addr = eth_validate_addr, | 472 | .ndo_validate_addr = eth_validate_addr, |
462 | }; | 473 | }; |
463 | 474 | ||
@@ -520,16 +531,19 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
520 | np->chip_id = chip_idx; | 531 | np->chip_id = chip_idx; |
521 | np->msg_enable = (1 << debug) - 1; | 532 | np->msg_enable = (1 << debug) - 1; |
522 | spin_lock_init(&np->lock); | 533 | spin_lock_init(&np->lock); |
534 | spin_lock_init(&np->statlock); | ||
523 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); | 535 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); |
524 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); | 536 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); |
525 | 537 | ||
526 | ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); | 538 | ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, |
539 | &ring_dma, GFP_KERNEL); | ||
527 | if (!ring_space) | 540 | if (!ring_space) |
528 | goto err_out_cleardev; | 541 | goto err_out_cleardev; |
529 | np->tx_ring = (struct netdev_desc *)ring_space; | 542 | np->tx_ring = (struct netdev_desc *)ring_space; |
530 | np->tx_ring_dma = ring_dma; | 543 | np->tx_ring_dma = ring_dma; |
531 | 544 | ||
532 | ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); | 545 | ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, |
546 | &ring_dma, GFP_KERNEL); | ||
533 | if (!ring_space) | 547 | if (!ring_space) |
534 | goto err_out_unmap_tx; | 548 | goto err_out_unmap_tx; |
535 | np->rx_ring = (struct netdev_desc *)ring_space; | 549 | np->rx_ring = (struct netdev_desc *)ring_space; |
@@ -663,9 +677,11 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
663 | err_out_unregister: | 677 | err_out_unregister: |
664 | unregister_netdev(dev); | 678 | unregister_netdev(dev); |
665 | err_out_unmap_rx: | 679 | err_out_unmap_rx: |
666 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); | 680 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, |
681 | np->rx_ring, np->rx_ring_dma); | ||
667 | err_out_unmap_tx: | 682 | err_out_unmap_tx: |
668 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); | 683 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, |
684 | np->tx_ring, np->tx_ring_dma); | ||
669 | err_out_cleardev: | 685 | err_out_cleardev: |
670 | pci_set_drvdata(pdev, NULL); | 686 | pci_set_drvdata(pdev, NULL); |
671 | pci_iounmap(pdev, ioaddr); | 687 | pci_iounmap(pdev, ioaddr); |
@@ -874,7 +890,7 @@ static int netdev_open(struct net_device *dev) | |||
874 | init_timer(&np->timer); | 890 | init_timer(&np->timer); |
875 | np->timer.expires = jiffies + 3*HZ; | 891 | np->timer.expires = jiffies + 3*HZ; |
876 | np->timer.data = (unsigned long)dev; | 892 | np->timer.data = (unsigned long)dev; |
877 | np->timer.function = &netdev_timer; /* timer handler */ | 893 | np->timer.function = netdev_timer; /* timer handler */ |
878 | add_timer(&np->timer); | 894 | add_timer(&np->timer); |
879 | 895 | ||
880 | /* Enable interrupts by setting the interrupt mask. */ | 896 | /* Enable interrupts by setting the interrupt mask. */ |
@@ -1004,15 +1020,21 @@ static void init_ring(struct net_device *dev) | |||
1004 | 1020 | ||
1005 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | 1021 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ |
1006 | for (i = 0; i < RX_RING_SIZE; i++) { | 1022 | for (i = 0; i < RX_RING_SIZE; i++) { |
1007 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); | 1023 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2); |
1008 | np->rx_skbuff[i] = skb; | 1024 | np->rx_skbuff[i] = skb; |
1009 | if (skb == NULL) | 1025 | if (skb == NULL) |
1010 | break; | 1026 | break; |
1011 | skb->dev = dev; /* Mark as being used by this device. */ | 1027 | skb->dev = dev; /* Mark as being used by this device. */ |
1012 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 1028 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
1013 | np->rx_ring[i].frag[0].addr = cpu_to_le32( | 1029 | np->rx_ring[i].frag[0].addr = cpu_to_le32( |
1014 | pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, | 1030 | dma_map_single(&np->pci_dev->dev, skb->data, |
1015 | PCI_DMA_FROMDEVICE)); | 1031 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1032 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1033 | np->rx_ring[i].frag[0].addr)) { | ||
1034 | dev_kfree_skb(skb); | ||
1035 | np->rx_skbuff[i] = NULL; | ||
1036 | break; | ||
1037 | } | ||
1016 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); | 1038 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); |
1017 | } | 1039 | } |
1018 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | 1040 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
@@ -1063,9 +1085,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev) | |||
1063 | 1085 | ||
1064 | txdesc->next_desc = 0; | 1086 | txdesc->next_desc = 0; |
1065 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); | 1087 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); |
1066 | txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data, | 1088 | txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, |
1067 | skb->len, | 1089 | skb->data, skb->len, DMA_TO_DEVICE)); |
1068 | PCI_DMA_TODEVICE)); | 1090 | if (dma_mapping_error(&np->pci_dev->dev, |
1091 | txdesc->frag[0].addr)) | ||
1092 | goto drop_frame; | ||
1069 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); | 1093 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); |
1070 | 1094 | ||
1071 | /* Increment cur_tx before tasklet_schedule() */ | 1095 | /* Increment cur_tx before tasklet_schedule() */ |
@@ -1087,6 +1111,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev) | |||
1087 | dev->name, np->cur_tx, entry); | 1111 | dev->name, np->cur_tx, entry); |
1088 | } | 1112 | } |
1089 | return NETDEV_TX_OK; | 1113 | return NETDEV_TX_OK; |
1114 | |||
1115 | drop_frame: | ||
1116 | dev_kfree_skb(skb); | ||
1117 | np->tx_skbuff[entry] = NULL; | ||
1118 | dev->stats.tx_dropped++; | ||
1119 | return NETDEV_TX_OK; | ||
1090 | } | 1120 | } |
1091 | 1121 | ||
1092 | /* Reset hardware tx and free all of tx buffers */ | 1122 | /* Reset hardware tx and free all of tx buffers */ |
@@ -1097,7 +1127,6 @@ reset_tx (struct net_device *dev) | |||
1097 | void __iomem *ioaddr = np->base; | 1127 | void __iomem *ioaddr = np->base; |
1098 | struct sk_buff *skb; | 1128 | struct sk_buff *skb; |
1099 | int i; | 1129 | int i; |
1100 | int irq = in_interrupt(); | ||
1101 | 1130 | ||
1102 | /* Reset tx logic, TxListPtr will be cleaned */ | 1131 | /* Reset tx logic, TxListPtr will be cleaned */ |
1103 | iowrite16 (TxDisable, ioaddr + MACCtrl1); | 1132 | iowrite16 (TxDisable, ioaddr + MACCtrl1); |
@@ -1109,13 +1138,10 @@ reset_tx (struct net_device *dev) | |||
1109 | 1138 | ||
1110 | skb = np->tx_skbuff[i]; | 1139 | skb = np->tx_skbuff[i]; |
1111 | if (skb) { | 1140 | if (skb) { |
1112 | pci_unmap_single(np->pci_dev, | 1141 | dma_unmap_single(&np->pci_dev->dev, |
1113 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 1142 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1114 | skb->len, PCI_DMA_TODEVICE); | 1143 | skb->len, DMA_TO_DEVICE); |
1115 | if (irq) | 1144 | dev_kfree_skb_any(skb); |
1116 | dev_kfree_skb_irq (skb); | ||
1117 | else | ||
1118 | dev_kfree_skb (skb); | ||
1119 | np->tx_skbuff[i] = NULL; | 1145 | np->tx_skbuff[i] = NULL; |
1120 | dev->stats.tx_dropped++; | 1146 | dev->stats.tx_dropped++; |
1121 | } | 1147 | } |
@@ -1233,9 +1259,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1233 | break; | 1259 | break; |
1234 | skb = np->tx_skbuff[entry]; | 1260 | skb = np->tx_skbuff[entry]; |
1235 | /* Free the original skb. */ | 1261 | /* Free the original skb. */ |
1236 | pci_unmap_single(np->pci_dev, | 1262 | dma_unmap_single(&np->pci_dev->dev, |
1237 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | 1263 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1238 | skb->len, PCI_DMA_TODEVICE); | 1264 | skb->len, DMA_TO_DEVICE); |
1239 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | 1265 | dev_kfree_skb_irq (np->tx_skbuff[entry]); |
1240 | np->tx_skbuff[entry] = NULL; | 1266 | np->tx_skbuff[entry] = NULL; |
1241 | np->tx_ring[entry].frag[0].addr = 0; | 1267 | np->tx_ring[entry].frag[0].addr = 0; |
@@ -1252,9 +1278,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1252 | break; | 1278 | break; |
1253 | skb = np->tx_skbuff[entry]; | 1279 | skb = np->tx_skbuff[entry]; |
1254 | /* Free the original skb. */ | 1280 | /* Free the original skb. */ |
1255 | pci_unmap_single(np->pci_dev, | 1281 | dma_unmap_single(&np->pci_dev->dev, |
1256 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | 1282 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1257 | skb->len, PCI_DMA_TODEVICE); | 1283 | skb->len, DMA_TO_DEVICE); |
1258 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | 1284 | dev_kfree_skb_irq (np->tx_skbuff[entry]); |
1259 | np->tx_skbuff[entry] = NULL; | 1285 | np->tx_skbuff[entry] = NULL; |
1260 | np->tx_ring[entry].frag[0].addr = 0; | 1286 | np->tx_ring[entry].frag[0].addr = 0; |
@@ -1334,22 +1360,18 @@ static void rx_poll(unsigned long data) | |||
1334 | if (pkt_len < rx_copybreak && | 1360 | if (pkt_len < rx_copybreak && |
1335 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1361 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1336 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1362 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1337 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1363 | dma_sync_single_for_cpu(&np->pci_dev->dev, |
1338 | le32_to_cpu(desc->frag[0].addr), | 1364 | le32_to_cpu(desc->frag[0].addr), |
1339 | np->rx_buf_sz, | 1365 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1340 | PCI_DMA_FROMDEVICE); | ||
1341 | |||
1342 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); | 1366 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); |
1343 | pci_dma_sync_single_for_device(np->pci_dev, | 1367 | dma_sync_single_for_device(&np->pci_dev->dev, |
1344 | le32_to_cpu(desc->frag[0].addr), | 1368 | le32_to_cpu(desc->frag[0].addr), |
1345 | np->rx_buf_sz, | 1369 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1346 | PCI_DMA_FROMDEVICE); | ||
1347 | skb_put(skb, pkt_len); | 1370 | skb_put(skb, pkt_len); |
1348 | } else { | 1371 | } else { |
1349 | pci_unmap_single(np->pci_dev, | 1372 | dma_unmap_single(&np->pci_dev->dev, |
1350 | le32_to_cpu(desc->frag[0].addr), | 1373 | le32_to_cpu(desc->frag[0].addr), |
1351 | np->rx_buf_sz, | 1374 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1352 | PCI_DMA_FROMDEVICE); | ||
1353 | skb_put(skb = np->rx_skbuff[entry], pkt_len); | 1375 | skb_put(skb = np->rx_skbuff[entry], pkt_len); |
1354 | np->rx_skbuff[entry] = NULL; | 1376 | np->rx_skbuff[entry] = NULL; |
1355 | } | 1377 | } |
@@ -1389,15 +1411,21 @@ static void refill_rx (struct net_device *dev) | |||
1389 | struct sk_buff *skb; | 1411 | struct sk_buff *skb; |
1390 | entry = np->dirty_rx % RX_RING_SIZE; | 1412 | entry = np->dirty_rx % RX_RING_SIZE; |
1391 | if (np->rx_skbuff[entry] == NULL) { | 1413 | if (np->rx_skbuff[entry] == NULL) { |
1392 | skb = dev_alloc_skb(np->rx_buf_sz); | 1414 | skb = dev_alloc_skb(np->rx_buf_sz + 2); |
1393 | np->rx_skbuff[entry] = skb; | 1415 | np->rx_skbuff[entry] = skb; |
1394 | if (skb == NULL) | 1416 | if (skb == NULL) |
1395 | break; /* Better luck next round. */ | 1417 | break; /* Better luck next round. */ |
1396 | skb->dev = dev; /* Mark as being used by this device. */ | 1418 | skb->dev = dev; /* Mark as being used by this device. */ |
1397 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1419 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1398 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( | 1420 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( |
1399 | pci_map_single(np->pci_dev, skb->data, | 1421 | dma_map_single(&np->pci_dev->dev, skb->data, |
1400 | np->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 1422 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1423 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1424 | np->rx_ring[entry].frag[0].addr)) { | ||
1425 | dev_kfree_skb_irq(skb); | ||
1426 | np->rx_skbuff[entry] = NULL; | ||
1427 | break; | ||
1428 | } | ||
1401 | } | 1429 | } |
1402 | /* Perhaps we need not reset this field. */ | 1430 | /* Perhaps we need not reset this field. */ |
1403 | np->rx_ring[entry].frag[0].length = | 1431 | np->rx_ring[entry].frag[0].length = |
@@ -1475,27 +1503,41 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
1475 | { | 1503 | { |
1476 | struct netdev_private *np = netdev_priv(dev); | 1504 | struct netdev_private *np = netdev_priv(dev); |
1477 | void __iomem *ioaddr = np->base; | 1505 | void __iomem *ioaddr = np->base; |
1478 | int i; | 1506 | unsigned long flags; |
1507 | u8 late_coll, single_coll, mult_coll; | ||
1479 | 1508 | ||
1480 | /* We should lock this segment of code for SMP eventually, although | 1509 | spin_lock_irqsave(&np->statlock, flags); |
1481 | the vulnerability window is very small and statistics are | ||
1482 | non-critical. */ | ||
1483 | /* The chip only need report frame silently dropped. */ | 1510 | /* The chip only need report frame silently dropped. */ |
1484 | dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); | 1511 | dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); |
1485 | dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); | 1512 | dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); |
1486 | dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); | 1513 | dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); |
1487 | dev->stats.collisions += ioread8(ioaddr + StatsLateColl); | ||
1488 | dev->stats.collisions += ioread8(ioaddr + StatsMultiColl); | ||
1489 | dev->stats.collisions += ioread8(ioaddr + StatsOneColl); | ||
1490 | dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); | 1514 | dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); |
1491 | ioread8(ioaddr + StatsTxDefer); | 1515 | |
1492 | for (i = StatsTxDefer; i <= StatsMcastRx; i++) | 1516 | mult_coll = ioread8(ioaddr + StatsMultiColl); |
1493 | ioread8(ioaddr + i); | 1517 | np->xstats.tx_multiple_collisions += mult_coll; |
1518 | single_coll = ioread8(ioaddr + StatsOneColl); | ||
1519 | np->xstats.tx_single_collisions += single_coll; | ||
1520 | late_coll = ioread8(ioaddr + StatsLateColl); | ||
1521 | np->xstats.tx_late_collisions += late_coll; | ||
1522 | dev->stats.collisions += mult_coll | ||
1523 | + single_coll | ||
1524 | + late_coll; | ||
1525 | |||
1526 | np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); | ||
1527 | np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); | ||
1528 | np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); | ||
1529 | np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); | ||
1530 | np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); | ||
1531 | np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); | ||
1532 | np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); | ||
1533 | |||
1494 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); | 1534 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); |
1495 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; | 1535 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; |
1496 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); | 1536 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); |
1497 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; | 1537 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; |
1498 | 1538 | ||
1539 | spin_unlock_irqrestore(&np->statlock, flags); | ||
1540 | |||
1499 | return &dev->stats; | 1541 | return &dev->stats; |
1500 | } | 1542 | } |
1501 | 1543 | ||
@@ -1554,6 +1596,34 @@ static int __set_mac_addr(struct net_device *dev) | |||
1554 | return 0; | 1596 | return 0; |
1555 | } | 1597 | } |
1556 | 1598 | ||
1599 | /* Invoked with rtnl_lock held */ | ||
1600 | static int sundance_set_mac_addr(struct net_device *dev, void *data) | ||
1601 | { | ||
1602 | const struct sockaddr *addr = data; | ||
1603 | |||
1604 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1605 | return -EINVAL; | ||
1606 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
1607 | __set_mac_addr(dev); | ||
1608 | |||
1609 | return 0; | ||
1610 | } | ||
1611 | |||
1612 | static const struct { | ||
1613 | const char name[ETH_GSTRING_LEN]; | ||
1614 | } sundance_stats[] = { | ||
1615 | { "tx_multiple_collisions" }, | ||
1616 | { "tx_single_collisions" }, | ||
1617 | { "tx_late_collisions" }, | ||
1618 | { "tx_deferred" }, | ||
1619 | { "tx_deferred_excessive" }, | ||
1620 | { "tx_aborted" }, | ||
1621 | { "tx_bcasts" }, | ||
1622 | { "rx_bcasts" }, | ||
1623 | { "tx_mcasts" }, | ||
1624 | { "rx_mcasts" }, | ||
1625 | }; | ||
1626 | |||
1557 | static int check_if_running(struct net_device *dev) | 1627 | static int check_if_running(struct net_device *dev) |
1558 | { | 1628 | { |
1559 | if (!netif_running(dev)) | 1629 | if (!netif_running(dev)) |
@@ -1612,6 +1682,42 @@ static void set_msglevel(struct net_device *dev, u32 val) | |||
1612 | np->msg_enable = val; | 1682 | np->msg_enable = val; |
1613 | } | 1683 | } |
1614 | 1684 | ||
1685 | static void get_strings(struct net_device *dev, u32 stringset, | ||
1686 | u8 *data) | ||
1687 | { | ||
1688 | if (stringset == ETH_SS_STATS) | ||
1689 | memcpy(data, sundance_stats, sizeof(sundance_stats)); | ||
1690 | } | ||
1691 | |||
1692 | static int get_sset_count(struct net_device *dev, int sset) | ||
1693 | { | ||
1694 | switch (sset) { | ||
1695 | case ETH_SS_STATS: | ||
1696 | return ARRAY_SIZE(sundance_stats); | ||
1697 | default: | ||
1698 | return -EOPNOTSUPP; | ||
1699 | } | ||
1700 | } | ||
1701 | |||
1702 | static void get_ethtool_stats(struct net_device *dev, | ||
1703 | struct ethtool_stats *stats, u64 *data) | ||
1704 | { | ||
1705 | struct netdev_private *np = netdev_priv(dev); | ||
1706 | int i = 0; | ||
1707 | |||
1708 | get_stats(dev); | ||
1709 | data[i++] = np->xstats.tx_multiple_collisions; | ||
1710 | data[i++] = np->xstats.tx_single_collisions; | ||
1711 | data[i++] = np->xstats.tx_late_collisions; | ||
1712 | data[i++] = np->xstats.tx_deferred; | ||
1713 | data[i++] = np->xstats.tx_deferred_excessive; | ||
1714 | data[i++] = np->xstats.tx_aborted; | ||
1715 | data[i++] = np->xstats.tx_bcasts; | ||
1716 | data[i++] = np->xstats.rx_bcasts; | ||
1717 | data[i++] = np->xstats.tx_mcasts; | ||
1718 | data[i++] = np->xstats.rx_mcasts; | ||
1719 | } | ||
1720 | |||
1615 | static const struct ethtool_ops ethtool_ops = { | 1721 | static const struct ethtool_ops ethtool_ops = { |
1616 | .begin = check_if_running, | 1722 | .begin = check_if_running, |
1617 | .get_drvinfo = get_drvinfo, | 1723 | .get_drvinfo = get_drvinfo, |
@@ -1621,6 +1727,9 @@ static const struct ethtool_ops ethtool_ops = { | |||
1621 | .get_link = get_link, | 1727 | .get_link = get_link, |
1622 | .get_msglevel = get_msglevel, | 1728 | .get_msglevel = get_msglevel, |
1623 | .set_msglevel = set_msglevel, | 1729 | .set_msglevel = set_msglevel, |
1730 | .get_strings = get_strings, | ||
1731 | .get_sset_count = get_sset_count, | ||
1732 | .get_ethtool_stats = get_ethtool_stats, | ||
1624 | }; | 1733 | }; |
1625 | 1734 | ||
1626 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 1735 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
@@ -1680,10 +1789,10 @@ static int netdev_close(struct net_device *dev) | |||
1680 | } | 1789 | } |
1681 | 1790 | ||
1682 | iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, | 1791 | iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, |
1683 | ioaddr +ASICCtrl + 2); | 1792 | ioaddr + ASIC_HI_WORD(ASICCtrl)); |
1684 | 1793 | ||
1685 | for (i = 2000; i > 0; i--) { | 1794 | for (i = 2000; i > 0; i--) { |
1686 | if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0) | 1795 | if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) |
1687 | break; | 1796 | break; |
1688 | mdelay(1); | 1797 | mdelay(1); |
1689 | } | 1798 | } |
@@ -1715,9 +1824,9 @@ static int netdev_close(struct net_device *dev) | |||
1715 | np->rx_ring[i].status = 0; | 1824 | np->rx_ring[i].status = 0; |
1716 | skb = np->rx_skbuff[i]; | 1825 | skb = np->rx_skbuff[i]; |
1717 | if (skb) { | 1826 | if (skb) { |
1718 | pci_unmap_single(np->pci_dev, | 1827 | dma_unmap_single(&np->pci_dev->dev, |
1719 | le32_to_cpu(np->rx_ring[i].frag[0].addr), | 1828 | le32_to_cpu(np->rx_ring[i].frag[0].addr), |
1720 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1829 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1721 | dev_kfree_skb(skb); | 1830 | dev_kfree_skb(skb); |
1722 | np->rx_skbuff[i] = NULL; | 1831 | np->rx_skbuff[i] = NULL; |
1723 | } | 1832 | } |
@@ -1727,9 +1836,9 @@ static int netdev_close(struct net_device *dev) | |||
1727 | np->tx_ring[i].next_desc = 0; | 1836 | np->tx_ring[i].next_desc = 0; |
1728 | skb = np->tx_skbuff[i]; | 1837 | skb = np->tx_skbuff[i]; |
1729 | if (skb) { | 1838 | if (skb) { |
1730 | pci_unmap_single(np->pci_dev, | 1839 | dma_unmap_single(&np->pci_dev->dev, |
1731 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 1840 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1732 | skb->len, PCI_DMA_TODEVICE); | 1841 | skb->len, DMA_TO_DEVICE); |
1733 | dev_kfree_skb(skb); | 1842 | dev_kfree_skb(skb); |
1734 | np->tx_skbuff[i] = NULL; | 1843 | np->tx_skbuff[i] = NULL; |
1735 | } | 1844 | } |
@@ -1743,25 +1852,72 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev) | |||
1743 | struct net_device *dev = pci_get_drvdata(pdev); | 1852 | struct net_device *dev = pci_get_drvdata(pdev); |
1744 | 1853 | ||
1745 | if (dev) { | 1854 | if (dev) { |
1746 | struct netdev_private *np = netdev_priv(dev); | 1855 | struct netdev_private *np = netdev_priv(dev); |
1747 | 1856 | unregister_netdev(dev); | |
1748 | unregister_netdev(dev); | 1857 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, |
1749 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, | 1858 | np->rx_ring, np->rx_ring_dma); |
1750 | np->rx_ring_dma); | 1859 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, |
1751 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, | 1860 | np->tx_ring, np->tx_ring_dma); |
1752 | np->tx_ring_dma); | 1861 | pci_iounmap(pdev, np->base); |
1753 | pci_iounmap(pdev, np->base); | 1862 | pci_release_regions(pdev); |
1754 | pci_release_regions(pdev); | 1863 | free_netdev(dev); |
1755 | free_netdev(dev); | 1864 | pci_set_drvdata(pdev, NULL); |
1756 | pci_set_drvdata(pdev, NULL); | ||
1757 | } | 1865 | } |
1758 | } | 1866 | } |
1759 | 1867 | ||
1868 | #ifdef CONFIG_PM | ||
1869 | |||
1870 | static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) | ||
1871 | { | ||
1872 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1873 | |||
1874 | if (!netif_running(dev)) | ||
1875 | return 0; | ||
1876 | |||
1877 | netdev_close(dev); | ||
1878 | netif_device_detach(dev); | ||
1879 | |||
1880 | pci_save_state(pci_dev); | ||
1881 | pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); | ||
1882 | |||
1883 | return 0; | ||
1884 | } | ||
1885 | |||
1886 | static int sundance_resume(struct pci_dev *pci_dev) | ||
1887 | { | ||
1888 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1889 | int err = 0; | ||
1890 | |||
1891 | if (!netif_running(dev)) | ||
1892 | return 0; | ||
1893 | |||
1894 | pci_set_power_state(pci_dev, PCI_D0); | ||
1895 | pci_restore_state(pci_dev); | ||
1896 | |||
1897 | err = netdev_open(dev); | ||
1898 | if (err) { | ||
1899 | printk(KERN_ERR "%s: Can't resume interface!\n", | ||
1900 | dev->name); | ||
1901 | goto out; | ||
1902 | } | ||
1903 | |||
1904 | netif_device_attach(dev); | ||
1905 | |||
1906 | out: | ||
1907 | return err; | ||
1908 | } | ||
1909 | |||
1910 | #endif /* CONFIG_PM */ | ||
1911 | |||
1760 | static struct pci_driver sundance_driver = { | 1912 | static struct pci_driver sundance_driver = { |
1761 | .name = DRV_NAME, | 1913 | .name = DRV_NAME, |
1762 | .id_table = sundance_pci_tbl, | 1914 | .id_table = sundance_pci_tbl, |
1763 | .probe = sundance_probe1, | 1915 | .probe = sundance_probe1, |
1764 | .remove = __devexit_p(sundance_remove1), | 1916 | .remove = __devexit_p(sundance_remove1), |
1917 | #ifdef CONFIG_PM | ||
1918 | .suspend = sundance_suspend, | ||
1919 | .resume = sundance_resume, | ||
1920 | #endif /* CONFIG_PM */ | ||
1765 | }; | 1921 | }; |
1766 | 1922 | ||
1767 | static int __init sundance_init(void) | 1923 | static int __init sundance_init(void) |