diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-07 22:21:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-07 22:21:56 -0500 |
commit | 7677ced48e2bbbb8d847d34f37e5d96d2b0e41e4 (patch) | |
tree | 0a859f403c02eb854d9ffa11bd17f77056891d07 /drivers/net/forcedeth.c | |
parent | 21d37bbc65e39a26856de6b14be371ff24e0d03f (diff) | |
parent | ac38dfc39e7684f55174742e5f0d6c5a0093bbf6 (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (116 commits)
sk98lin: planned removal
AT91: MACB support
sky2: version 1.12
sky2: add new chip ids
sky2: Yukon Extreme support
sky2: safer transmit timeout
sky2: TSO support for EC_U
sky2: use dev_err for error reports
sky2: add Wake On Lan support
fix unaligned exception in /drivers/net/wireless/orinoco.c
Remove unused kernel config option DLCI_COUNT
z85230: spinlock logic
mips: declance: Driver model for the PMAD-A
Spidernet: Rework RX linked list
NET: turn local_save_flags() + local_irq_disable() into local_irq_save()
NET-3c59x: turn local_save_flags() + local_irq_disable() into local_irq_save()
hp100: convert pci_module_init() to pci_register_driver()
NetXen: Added ethtool support for user level tools.
NetXen: Firmware crb init changes.
maintainers: add atl1 maintainers
...
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 1342 |
1 files changed, 898 insertions, 444 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 93f2b7a22160..a363148d0198 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -111,6 +111,7 @@ | |||
111 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. | 111 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. |
112 | * 0.58: 30 Oct 2006: Added support for sideband management unit. | 112 | * 0.58: 30 Oct 2006: Added support for sideband management unit. |
113 | * 0.59: 30 Oct 2006: Added support for recoverable error. | 113 | * 0.59: 30 Oct 2006: Added support for recoverable error. |
114 | * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats. | ||
114 | * | 115 | * |
115 | * Known bugs: | 116 | * Known bugs: |
116 | * We suspect that on some hardware no TX done interrupts are generated. | 117 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -127,7 +128,7 @@ | |||
127 | #else | 128 | #else |
128 | #define DRIVERNAPI | 129 | #define DRIVERNAPI |
129 | #endif | 130 | #endif |
130 | #define FORCEDETH_VERSION "0.59" | 131 | #define FORCEDETH_VERSION "0.60" |
131 | #define DRV_NAME "forcedeth" | 132 | #define DRV_NAME "forcedeth" |
132 | 133 | ||
133 | #include <linux/module.h> | 134 | #include <linux/module.h> |
@@ -173,9 +174,10 @@ | |||
173 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | 174 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ |
174 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ | 175 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ |
175 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ | 176 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ |
176 | #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ | 177 | #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */ |
177 | #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ | 178 | #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ |
178 | #define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */ | 179 | #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ |
180 | #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ | ||
179 | 181 | ||
180 | enum { | 182 | enum { |
181 | NvRegIrqStatus = 0x000, | 183 | NvRegIrqStatus = 0x000, |
@@ -210,7 +212,7 @@ enum { | |||
210 | * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms | 212 | * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms |
211 | */ | 213 | */ |
212 | NvRegPollingInterval = 0x00c, | 214 | NvRegPollingInterval = 0x00c, |
213 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 | 215 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */ |
214 | #define NVREG_POLL_DEFAULT_CPU 13 | 216 | #define NVREG_POLL_DEFAULT_CPU 13 |
215 | NvRegMSIMap0 = 0x020, | 217 | NvRegMSIMap0 = 0x020, |
216 | NvRegMSIMap1 = 0x024, | 218 | NvRegMSIMap1 = 0x024, |
@@ -304,8 +306,8 @@ enum { | |||
304 | #define NVREG_TXRXCTL_RESET 0x0010 | 306 | #define NVREG_TXRXCTL_RESET 0x0010 |
305 | #define NVREG_TXRXCTL_RXCHECK 0x0400 | 307 | #define NVREG_TXRXCTL_RXCHECK 0x0400 |
306 | #define NVREG_TXRXCTL_DESC_1 0 | 308 | #define NVREG_TXRXCTL_DESC_1 0 |
307 | #define NVREG_TXRXCTL_DESC_2 0x02100 | 309 | #define NVREG_TXRXCTL_DESC_2 0x002100 |
308 | #define NVREG_TXRXCTL_DESC_3 0x02200 | 310 | #define NVREG_TXRXCTL_DESC_3 0xc02200 |
309 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 | 311 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 |
310 | #define NVREG_TXRXCTL_VLANINS 0x00080 | 312 | #define NVREG_TXRXCTL_VLANINS 0x00080 |
311 | NvRegTxRingPhysAddrHigh = 0x148, | 313 | NvRegTxRingPhysAddrHigh = 0x148, |
@@ -487,7 +489,8 @@ union ring_type { | |||
487 | 489 | ||
488 | /* Miscelaneous hardware related defines: */ | 490 | /* Miscelaneous hardware related defines: */ |
489 | #define NV_PCI_REGSZ_VER1 0x270 | 491 | #define NV_PCI_REGSZ_VER1 0x270 |
490 | #define NV_PCI_REGSZ_VER2 0x604 | 492 | #define NV_PCI_REGSZ_VER2 0x2d4 |
493 | #define NV_PCI_REGSZ_VER3 0x604 | ||
491 | 494 | ||
492 | /* various timeout delays: all in usec */ | 495 | /* various timeout delays: all in usec */ |
493 | #define NV_TXRX_RESET_DELAY 4 | 496 | #define NV_TXRX_RESET_DELAY 4 |
@@ -518,12 +521,6 @@ union ring_type { | |||
518 | #define TX_RING_MIN 64 | 521 | #define TX_RING_MIN 64 |
519 | #define RING_MAX_DESC_VER_1 1024 | 522 | #define RING_MAX_DESC_VER_1 1024 |
520 | #define RING_MAX_DESC_VER_2_3 16384 | 523 | #define RING_MAX_DESC_VER_2_3 16384 |
521 | /* | ||
522 | * Difference between the get and put pointers for the tx ring. | ||
523 | * This is used to throttle the amount of data outstanding in the | ||
524 | * tx ring. | ||
525 | */ | ||
526 | #define TX_LIMIT_DIFFERENCE 1 | ||
527 | 524 | ||
528 | /* rx/tx mac addr + type + vlan + align + slack*/ | 525 | /* rx/tx mac addr + type + vlan + align + slack*/ |
529 | #define NV_RX_HEADERS (64) | 526 | #define NV_RX_HEADERS (64) |
@@ -611,9 +608,6 @@ static const struct nv_ethtool_str nv_estats_str[] = { | |||
611 | { "tx_carrier_errors" }, | 608 | { "tx_carrier_errors" }, |
612 | { "tx_excess_deferral" }, | 609 | { "tx_excess_deferral" }, |
613 | { "tx_retry_error" }, | 610 | { "tx_retry_error" }, |
614 | { "tx_deferral" }, | ||
615 | { "tx_packets" }, | ||
616 | { "tx_pause" }, | ||
617 | { "rx_frame_error" }, | 611 | { "rx_frame_error" }, |
618 | { "rx_extra_byte" }, | 612 | { "rx_extra_byte" }, |
619 | { "rx_late_collision" }, | 613 | { "rx_late_collision" }, |
@@ -626,11 +620,17 @@ static const struct nv_ethtool_str nv_estats_str[] = { | |||
626 | { "rx_unicast" }, | 620 | { "rx_unicast" }, |
627 | { "rx_multicast" }, | 621 | { "rx_multicast" }, |
628 | { "rx_broadcast" }, | 622 | { "rx_broadcast" }, |
623 | { "rx_packets" }, | ||
624 | { "rx_errors_total" }, | ||
625 | { "tx_errors_total" }, | ||
626 | |||
627 | /* version 2 stats */ | ||
628 | { "tx_deferral" }, | ||
629 | { "tx_packets" }, | ||
629 | { "rx_bytes" }, | 630 | { "rx_bytes" }, |
631 | { "tx_pause" }, | ||
630 | { "rx_pause" }, | 632 | { "rx_pause" }, |
631 | { "rx_drop_frame" }, | 633 | { "rx_drop_frame" } |
632 | { "rx_packets" }, | ||
633 | { "rx_errors_total" } | ||
634 | }; | 634 | }; |
635 | 635 | ||
636 | struct nv_ethtool_stats { | 636 | struct nv_ethtool_stats { |
@@ -643,9 +643,6 @@ struct nv_ethtool_stats { | |||
643 | u64 tx_carrier_errors; | 643 | u64 tx_carrier_errors; |
644 | u64 tx_excess_deferral; | 644 | u64 tx_excess_deferral; |
645 | u64 tx_retry_error; | 645 | u64 tx_retry_error; |
646 | u64 tx_deferral; | ||
647 | u64 tx_packets; | ||
648 | u64 tx_pause; | ||
649 | u64 rx_frame_error; | 646 | u64 rx_frame_error; |
650 | u64 rx_extra_byte; | 647 | u64 rx_extra_byte; |
651 | u64 rx_late_collision; | 648 | u64 rx_late_collision; |
@@ -658,13 +655,22 @@ struct nv_ethtool_stats { | |||
658 | u64 rx_unicast; | 655 | u64 rx_unicast; |
659 | u64 rx_multicast; | 656 | u64 rx_multicast; |
660 | u64 rx_broadcast; | 657 | u64 rx_broadcast; |
658 | u64 rx_packets; | ||
659 | u64 rx_errors_total; | ||
660 | u64 tx_errors_total; | ||
661 | |||
662 | /* version 2 stats */ | ||
663 | u64 tx_deferral; | ||
664 | u64 tx_packets; | ||
661 | u64 rx_bytes; | 665 | u64 rx_bytes; |
666 | u64 tx_pause; | ||
662 | u64 rx_pause; | 667 | u64 rx_pause; |
663 | u64 rx_drop_frame; | 668 | u64 rx_drop_frame; |
664 | u64 rx_packets; | ||
665 | u64 rx_errors_total; | ||
666 | }; | 669 | }; |
667 | 670 | ||
671 | #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) | ||
672 | #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) | ||
673 | |||
668 | /* diagnostics */ | 674 | /* diagnostics */ |
669 | #define NV_TEST_COUNT_BASE 3 | 675 | #define NV_TEST_COUNT_BASE 3 |
670 | #define NV_TEST_COUNT_EXTENDED 4 | 676 | #define NV_TEST_COUNT_EXTENDED 4 |
@@ -691,6 +697,12 @@ static const struct register_test nv_registers_test[] = { | |||
691 | { 0,0 } | 697 | { 0,0 } |
692 | }; | 698 | }; |
693 | 699 | ||
700 | struct nv_skb_map { | ||
701 | struct sk_buff *skb; | ||
702 | dma_addr_t dma; | ||
703 | unsigned int dma_len; | ||
704 | }; | ||
705 | |||
694 | /* | 706 | /* |
695 | * SMP locking: | 707 | * SMP locking: |
696 | * All hardware access under dev->priv->lock, except the performance | 708 | * All hardware access under dev->priv->lock, except the performance |
@@ -741,10 +753,12 @@ struct fe_priv { | |||
741 | /* rx specific fields. | 753 | /* rx specific fields. |
742 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 754 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
743 | */ | 755 | */ |
756 | union ring_type get_rx, put_rx, first_rx, last_rx; | ||
757 | struct nv_skb_map *get_rx_ctx, *put_rx_ctx; | ||
758 | struct nv_skb_map *first_rx_ctx, *last_rx_ctx; | ||
759 | struct nv_skb_map *rx_skb; | ||
760 | |||
744 | union ring_type rx_ring; | 761 | union ring_type rx_ring; |
745 | unsigned int cur_rx, refill_rx; | ||
746 | struct sk_buff **rx_skbuff; | ||
747 | dma_addr_t *rx_dma; | ||
748 | unsigned int rx_buf_sz; | 762 | unsigned int rx_buf_sz; |
749 | unsigned int pkt_limit; | 763 | unsigned int pkt_limit; |
750 | struct timer_list oom_kick; | 764 | struct timer_list oom_kick; |
@@ -761,15 +775,15 @@ struct fe_priv { | |||
761 | /* | 775 | /* |
762 | * tx specific fields. | 776 | * tx specific fields. |
763 | */ | 777 | */ |
778 | union ring_type get_tx, put_tx, first_tx, last_tx; | ||
779 | struct nv_skb_map *get_tx_ctx, *put_tx_ctx; | ||
780 | struct nv_skb_map *first_tx_ctx, *last_tx_ctx; | ||
781 | struct nv_skb_map *tx_skb; | ||
782 | |||
764 | union ring_type tx_ring; | 783 | union ring_type tx_ring; |
765 | unsigned int next_tx, nic_tx; | ||
766 | struct sk_buff **tx_skbuff; | ||
767 | dma_addr_t *tx_dma; | ||
768 | unsigned int *tx_dma_len; | ||
769 | u32 tx_flags; | 784 | u32 tx_flags; |
770 | int tx_ring_size; | 785 | int tx_ring_size; |
771 | int tx_limit_start; | 786 | int tx_stop; |
772 | int tx_limit_stop; | ||
773 | 787 | ||
774 | /* vlan fields */ | 788 | /* vlan fields */ |
775 | struct vlan_group *vlangrp; | 789 | struct vlan_group *vlangrp; |
@@ -921,16 +935,10 @@ static void free_rings(struct net_device *dev) | |||
921 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), | 935 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), |
922 | np->rx_ring.ex, np->ring_addr); | 936 | np->rx_ring.ex, np->ring_addr); |
923 | } | 937 | } |
924 | if (np->rx_skbuff) | 938 | if (np->rx_skb) |
925 | kfree(np->rx_skbuff); | 939 | kfree(np->rx_skb); |
926 | if (np->rx_dma) | 940 | if (np->tx_skb) |
927 | kfree(np->rx_dma); | 941 | kfree(np->tx_skb); |
928 | if (np->tx_skbuff) | ||
929 | kfree(np->tx_skbuff); | ||
930 | if (np->tx_dma) | ||
931 | kfree(np->tx_dma); | ||
932 | if (np->tx_dma_len) | ||
933 | kfree(np->tx_dma_len); | ||
934 | } | 942 | } |
935 | 943 | ||
936 | static int using_multi_irqs(struct net_device *dev) | 944 | static int using_multi_irqs(struct net_device *dev) |
@@ -1279,6 +1287,61 @@ static void nv_mac_reset(struct net_device *dev) | |||
1279 | pci_push(base); | 1287 | pci_push(base); |
1280 | } | 1288 | } |
1281 | 1289 | ||
1290 | static void nv_get_hw_stats(struct net_device *dev) | ||
1291 | { | ||
1292 | struct fe_priv *np = netdev_priv(dev); | ||
1293 | u8 __iomem *base = get_hwbase(dev); | ||
1294 | |||
1295 | np->estats.tx_bytes += readl(base + NvRegTxCnt); | ||
1296 | np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); | ||
1297 | np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); | ||
1298 | np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); | ||
1299 | np->estats.tx_late_collision += readl(base + NvRegTxLateCol); | ||
1300 | np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); | ||
1301 | np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); | ||
1302 | np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); | ||
1303 | np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); | ||
1304 | np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); | ||
1305 | np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); | ||
1306 | np->estats.rx_late_collision += readl(base + NvRegRxLateCol); | ||
1307 | np->estats.rx_runt += readl(base + NvRegRxRunt); | ||
1308 | np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); | ||
1309 | np->estats.rx_over_errors += readl(base + NvRegRxOverflow); | ||
1310 | np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); | ||
1311 | np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); | ||
1312 | np->estats.rx_length_error += readl(base + NvRegRxLenErr); | ||
1313 | np->estats.rx_unicast += readl(base + NvRegRxUnicast); | ||
1314 | np->estats.rx_multicast += readl(base + NvRegRxMulticast); | ||
1315 | np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); | ||
1316 | np->estats.rx_packets = | ||
1317 | np->estats.rx_unicast + | ||
1318 | np->estats.rx_multicast + | ||
1319 | np->estats.rx_broadcast; | ||
1320 | np->estats.rx_errors_total = | ||
1321 | np->estats.rx_crc_errors + | ||
1322 | np->estats.rx_over_errors + | ||
1323 | np->estats.rx_frame_error + | ||
1324 | (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + | ||
1325 | np->estats.rx_late_collision + | ||
1326 | np->estats.rx_runt + | ||
1327 | np->estats.rx_frame_too_long; | ||
1328 | np->estats.tx_errors_total = | ||
1329 | np->estats.tx_late_collision + | ||
1330 | np->estats.tx_fifo_errors + | ||
1331 | np->estats.tx_carrier_errors + | ||
1332 | np->estats.tx_excess_deferral + | ||
1333 | np->estats.tx_retry_error; | ||
1334 | |||
1335 | if (np->driver_data & DEV_HAS_STATISTICS_V2) { | ||
1336 | np->estats.tx_deferral += readl(base + NvRegTxDef); | ||
1337 | np->estats.tx_packets += readl(base + NvRegTxFrame); | ||
1338 | np->estats.rx_bytes += readl(base + NvRegRxCnt); | ||
1339 | np->estats.tx_pause += readl(base + NvRegTxPause); | ||
1340 | np->estats.rx_pause += readl(base + NvRegRxPause); | ||
1341 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1282 | /* | 1345 | /* |
1283 | * nv_get_stats: dev->get_stats function | 1346 | * nv_get_stats: dev->get_stats function |
1284 | * Get latest stats value from the nic. | 1347 | * Get latest stats value from the nic. |
@@ -1289,10 +1352,19 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) | |||
1289 | { | 1352 | { |
1290 | struct fe_priv *np = netdev_priv(dev); | 1353 | struct fe_priv *np = netdev_priv(dev); |
1291 | 1354 | ||
1292 | /* It seems that the nic always generates interrupts and doesn't | 1355 | /* If the nic supports hw counters then retrieve latest values */ |
1293 | * accumulate errors internally. Thus the current values in np->stats | 1356 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { |
1294 | * are already up to date. | 1357 | nv_get_hw_stats(dev); |
1295 | */ | 1358 | |
1359 | /* copy to net_device stats */ | ||
1360 | np->stats.tx_bytes = np->estats.tx_bytes; | ||
1361 | np->stats.tx_fifo_errors = np->estats.tx_fifo_errors; | ||
1362 | np->stats.tx_carrier_errors = np->estats.tx_carrier_errors; | ||
1363 | np->stats.rx_crc_errors = np->estats.rx_crc_errors; | ||
1364 | np->stats.rx_over_errors = np->estats.rx_over_errors; | ||
1365 | np->stats.rx_errors = np->estats.rx_errors_total; | ||
1366 | np->stats.tx_errors = np->estats.tx_errors_total; | ||
1367 | } | ||
1296 | return &np->stats; | 1368 | return &np->stats; |
1297 | } | 1369 | } |
1298 | 1370 | ||
@@ -1304,43 +1376,63 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) | |||
1304 | static int nv_alloc_rx(struct net_device *dev) | 1376 | static int nv_alloc_rx(struct net_device *dev) |
1305 | { | 1377 | { |
1306 | struct fe_priv *np = netdev_priv(dev); | 1378 | struct fe_priv *np = netdev_priv(dev); |
1307 | unsigned int refill_rx = np->refill_rx; | 1379 | struct ring_desc* less_rx; |
1308 | int nr; | ||
1309 | 1380 | ||
1310 | while (np->cur_rx != refill_rx) { | 1381 | less_rx = np->get_rx.orig; |
1311 | struct sk_buff *skb; | 1382 | if (less_rx-- == np->first_rx.orig) |
1312 | 1383 | less_rx = np->last_rx.orig; | |
1313 | nr = refill_rx % np->rx_ring_size; | ||
1314 | if (np->rx_skbuff[nr] == NULL) { | ||
1315 | |||
1316 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | ||
1317 | if (!skb) | ||
1318 | break; | ||
1319 | 1384 | ||
1385 | while (np->put_rx.orig != less_rx) { | ||
1386 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | ||
1387 | if (skb) { | ||
1320 | skb->dev = dev; | 1388 | skb->dev = dev; |
1321 | np->rx_skbuff[nr] = skb; | 1389 | np->put_rx_ctx->skb = skb; |
1390 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | ||
1391 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | ||
1392 | np->put_rx_ctx->dma_len = skb->end-skb->data; | ||
1393 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); | ||
1394 | wmb(); | ||
1395 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | ||
1396 | if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) | ||
1397 | np->put_rx.orig = np->first_rx.orig; | ||
1398 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) | ||
1399 | np->put_rx_ctx = np->first_rx_ctx; | ||
1322 | } else { | 1400 | } else { |
1323 | skb = np->rx_skbuff[nr]; | 1401 | return 1; |
1324 | } | 1402 | } |
1325 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, | 1403 | } |
1326 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | 1404 | return 0; |
1327 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1405 | } |
1328 | np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); | 1406 | |
1407 | static int nv_alloc_rx_optimized(struct net_device *dev) | ||
1408 | { | ||
1409 | struct fe_priv *np = netdev_priv(dev); | ||
1410 | struct ring_desc_ex* less_rx; | ||
1411 | |||
1412 | less_rx = np->get_rx.ex; | ||
1413 | if (less_rx-- == np->first_rx.ex) | ||
1414 | less_rx = np->last_rx.ex; | ||
1415 | |||
1416 | while (np->put_rx.ex != less_rx) { | ||
1417 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | ||
1418 | if (skb) { | ||
1419 | skb->dev = dev; | ||
1420 | np->put_rx_ctx->skb = skb; | ||
1421 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | ||
1422 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | ||
1423 | np->put_rx_ctx->dma_len = skb->end-skb->data; | ||
1424 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; | ||
1425 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; | ||
1329 | wmb(); | 1426 | wmb(); |
1330 | np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | 1427 | np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); |
1428 | if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) | ||
1429 | np->put_rx.ex = np->first_rx.ex; | ||
1430 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) | ||
1431 | np->put_rx_ctx = np->first_rx_ctx; | ||
1331 | } else { | 1432 | } else { |
1332 | np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; | 1433 | return 1; |
1333 | np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; | ||
1334 | wmb(); | ||
1335 | np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | ||
1336 | } | 1434 | } |
1337 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", | ||
1338 | dev->name, refill_rx); | ||
1339 | refill_rx++; | ||
1340 | } | 1435 | } |
1341 | np->refill_rx = refill_rx; | ||
1342 | if (np->cur_rx - refill_rx == np->rx_ring_size) | ||
1343 | return 1; | ||
1344 | return 0; | 1436 | return 0; |
1345 | } | 1437 | } |
1346 | 1438 | ||
@@ -1358,6 +1450,7 @@ static void nv_do_rx_refill(unsigned long data) | |||
1358 | { | 1450 | { |
1359 | struct net_device *dev = (struct net_device *) data; | 1451 | struct net_device *dev = (struct net_device *) data; |
1360 | struct fe_priv *np = netdev_priv(dev); | 1452 | struct fe_priv *np = netdev_priv(dev); |
1453 | int retcode; | ||
1361 | 1454 | ||
1362 | if (!using_multi_irqs(dev)) { | 1455 | if (!using_multi_irqs(dev)) { |
1363 | if (np->msi_flags & NV_MSI_X_ENABLED) | 1456 | if (np->msi_flags & NV_MSI_X_ENABLED) |
@@ -1367,7 +1460,11 @@ static void nv_do_rx_refill(unsigned long data) | |||
1367 | } else { | 1460 | } else { |
1368 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1461 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
1369 | } | 1462 | } |
1370 | if (nv_alloc_rx(dev)) { | 1463 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1464 | retcode = nv_alloc_rx(dev); | ||
1465 | else | ||
1466 | retcode = nv_alloc_rx_optimized(dev); | ||
1467 | if (retcode) { | ||
1371 | spin_lock_irq(&np->lock); | 1468 | spin_lock_irq(&np->lock); |
1372 | if (!np->in_shutdown) | 1469 | if (!np->in_shutdown) |
1373 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1470 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
@@ -1388,56 +1485,81 @@ static void nv_init_rx(struct net_device *dev) | |||
1388 | { | 1485 | { |
1389 | struct fe_priv *np = netdev_priv(dev); | 1486 | struct fe_priv *np = netdev_priv(dev); |
1390 | int i; | 1487 | int i; |
1488 | np->get_rx = np->put_rx = np->first_rx = np->rx_ring; | ||
1489 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
1490 | np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; | ||
1491 | else | ||
1492 | np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; | ||
1493 | np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; | ||
1494 | np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; | ||
1391 | 1495 | ||
1392 | np->cur_rx = np->rx_ring_size; | 1496 | for (i = 0; i < np->rx_ring_size; i++) { |
1393 | np->refill_rx = 0; | 1497 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1394 | for (i = 0; i < np->rx_ring_size; i++) | ||
1395 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
1396 | np->rx_ring.orig[i].flaglen = 0; | 1498 | np->rx_ring.orig[i].flaglen = 0; |
1397 | else | 1499 | np->rx_ring.orig[i].buf = 0; |
1500 | } else { | ||
1398 | np->rx_ring.ex[i].flaglen = 0; | 1501 | np->rx_ring.ex[i].flaglen = 0; |
1502 | np->rx_ring.ex[i].txvlan = 0; | ||
1503 | np->rx_ring.ex[i].bufhigh = 0; | ||
1504 | np->rx_ring.ex[i].buflow = 0; | ||
1505 | } | ||
1506 | np->rx_skb[i].skb = NULL; | ||
1507 | np->rx_skb[i].dma = 0; | ||
1508 | } | ||
1399 | } | 1509 | } |
1400 | 1510 | ||
1401 | static void nv_init_tx(struct net_device *dev) | 1511 | static void nv_init_tx(struct net_device *dev) |
1402 | { | 1512 | { |
1403 | struct fe_priv *np = netdev_priv(dev); | 1513 | struct fe_priv *np = netdev_priv(dev); |
1404 | int i; | 1514 | int i; |
1515 | np->get_tx = np->put_tx = np->first_tx = np->tx_ring; | ||
1516 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
1517 | np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; | ||
1518 | else | ||
1519 | np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; | ||
1520 | np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; | ||
1521 | np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; | ||
1405 | 1522 | ||
1406 | np->next_tx = np->nic_tx = 0; | ||
1407 | for (i = 0; i < np->tx_ring_size; i++) { | 1523 | for (i = 0; i < np->tx_ring_size; i++) { |
1408 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1524 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1409 | np->tx_ring.orig[i].flaglen = 0; | 1525 | np->tx_ring.orig[i].flaglen = 0; |
1410 | else | 1526 | np->tx_ring.orig[i].buf = 0; |
1527 | } else { | ||
1411 | np->tx_ring.ex[i].flaglen = 0; | 1528 | np->tx_ring.ex[i].flaglen = 0; |
1412 | np->tx_skbuff[i] = NULL; | 1529 | np->tx_ring.ex[i].txvlan = 0; |
1413 | np->tx_dma[i] = 0; | 1530 | np->tx_ring.ex[i].bufhigh = 0; |
1531 | np->tx_ring.ex[i].buflow = 0; | ||
1532 | } | ||
1533 | np->tx_skb[i].skb = NULL; | ||
1534 | np->tx_skb[i].dma = 0; | ||
1414 | } | 1535 | } |
1415 | } | 1536 | } |
1416 | 1537 | ||
1417 | static int nv_init_ring(struct net_device *dev) | 1538 | static int nv_init_ring(struct net_device *dev) |
1418 | { | 1539 | { |
1540 | struct fe_priv *np = netdev_priv(dev); | ||
1541 | |||
1419 | nv_init_tx(dev); | 1542 | nv_init_tx(dev); |
1420 | nv_init_rx(dev); | 1543 | nv_init_rx(dev); |
1421 | return nv_alloc_rx(dev); | 1544 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1545 | return nv_alloc_rx(dev); | ||
1546 | else | ||
1547 | return nv_alloc_rx_optimized(dev); | ||
1422 | } | 1548 | } |
1423 | 1549 | ||
1424 | static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) | 1550 | static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) |
1425 | { | 1551 | { |
1426 | struct fe_priv *np = netdev_priv(dev); | 1552 | struct fe_priv *np = netdev_priv(dev); |
1427 | 1553 | ||
1428 | dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", | 1554 | if (tx_skb->dma) { |
1429 | dev->name, skbnr); | 1555 | pci_unmap_page(np->pci_dev, tx_skb->dma, |
1430 | 1556 | tx_skb->dma_len, | |
1431 | if (np->tx_dma[skbnr]) { | ||
1432 | pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], | ||
1433 | np->tx_dma_len[skbnr], | ||
1434 | PCI_DMA_TODEVICE); | 1557 | PCI_DMA_TODEVICE); |
1435 | np->tx_dma[skbnr] = 0; | 1558 | tx_skb->dma = 0; |
1436 | } | 1559 | } |
1437 | 1560 | if (tx_skb->skb) { | |
1438 | if (np->tx_skbuff[skbnr]) { | 1561 | dev_kfree_skb_any(tx_skb->skb); |
1439 | dev_kfree_skb_any(np->tx_skbuff[skbnr]); | 1562 | tx_skb->skb = NULL; |
1440 | np->tx_skbuff[skbnr] = NULL; | ||
1441 | return 1; | 1563 | return 1; |
1442 | } else { | 1564 | } else { |
1443 | return 0; | 1565 | return 0; |
@@ -1450,11 +1572,16 @@ static void nv_drain_tx(struct net_device *dev) | |||
1450 | unsigned int i; | 1572 | unsigned int i; |
1451 | 1573 | ||
1452 | for (i = 0; i < np->tx_ring_size; i++) { | 1574 | for (i = 0; i < np->tx_ring_size; i++) { |
1453 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1575 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1454 | np->tx_ring.orig[i].flaglen = 0; | 1576 | np->tx_ring.orig[i].flaglen = 0; |
1455 | else | 1577 | np->tx_ring.orig[i].buf = 0; |
1578 | } else { | ||
1456 | np->tx_ring.ex[i].flaglen = 0; | 1579 | np->tx_ring.ex[i].flaglen = 0; |
1457 | if (nv_release_txskb(dev, i)) | 1580 | np->tx_ring.ex[i].txvlan = 0; |
1581 | np->tx_ring.ex[i].bufhigh = 0; | ||
1582 | np->tx_ring.ex[i].buflow = 0; | ||
1583 | } | ||
1584 | if (nv_release_txskb(dev, &np->tx_skb[i])) | ||
1458 | np->stats.tx_dropped++; | 1585 | np->stats.tx_dropped++; |
1459 | } | 1586 | } |
1460 | } | 1587 | } |
@@ -1463,18 +1590,24 @@ static void nv_drain_rx(struct net_device *dev) | |||
1463 | { | 1590 | { |
1464 | struct fe_priv *np = netdev_priv(dev); | 1591 | struct fe_priv *np = netdev_priv(dev); |
1465 | int i; | 1592 | int i; |
1593 | |||
1466 | for (i = 0; i < np->rx_ring_size; i++) { | 1594 | for (i = 0; i < np->rx_ring_size; i++) { |
1467 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1595 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1468 | np->rx_ring.orig[i].flaglen = 0; | 1596 | np->rx_ring.orig[i].flaglen = 0; |
1469 | else | 1597 | np->rx_ring.orig[i].buf = 0; |
1598 | } else { | ||
1470 | np->rx_ring.ex[i].flaglen = 0; | 1599 | np->rx_ring.ex[i].flaglen = 0; |
1600 | np->rx_ring.ex[i].txvlan = 0; | ||
1601 | np->rx_ring.ex[i].bufhigh = 0; | ||
1602 | np->rx_ring.ex[i].buflow = 0; | ||
1603 | } | ||
1471 | wmb(); | 1604 | wmb(); |
1472 | if (np->rx_skbuff[i]) { | 1605 | if (np->rx_skb[i].skb) { |
1473 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | 1606 | pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, |
1474 | np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, | 1607 | np->rx_skb[i].skb->end-np->rx_skb[i].skb->data, |
1475 | PCI_DMA_FROMDEVICE); | 1608 | PCI_DMA_FROMDEVICE); |
1476 | dev_kfree_skb(np->rx_skbuff[i]); | 1609 | dev_kfree_skb(np->rx_skb[i].skb); |
1477 | np->rx_skbuff[i] = NULL; | 1610 | np->rx_skb[i].skb = NULL; |
1478 | } | 1611 | } |
1479 | } | 1612 | } |
1480 | } | 1613 | } |
@@ -1485,6 +1618,11 @@ static void drain_ring(struct net_device *dev) | |||
1485 | nv_drain_rx(dev); | 1618 | nv_drain_rx(dev); |
1486 | } | 1619 | } |
1487 | 1620 | ||
1621 | static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) | ||
1622 | { | ||
1623 | return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); | ||
1624 | } | ||
1625 | |||
1488 | /* | 1626 | /* |
1489 | * nv_start_xmit: dev->hard_start_xmit function | 1627 | * nv_start_xmit: dev->hard_start_xmit function |
1490 | * Called with netif_tx_lock held. | 1628 | * Called with netif_tx_lock held. |
@@ -1495,14 +1633,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1495 | u32 tx_flags = 0; | 1633 | u32 tx_flags = 0; |
1496 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | 1634 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
1497 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | 1635 | unsigned int fragments = skb_shinfo(skb)->nr_frags; |
1498 | unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; | ||
1499 | unsigned int start_nr = np->next_tx % np->tx_ring_size; | ||
1500 | unsigned int i; | 1636 | unsigned int i; |
1501 | u32 offset = 0; | 1637 | u32 offset = 0; |
1502 | u32 bcnt; | 1638 | u32 bcnt; |
1503 | u32 size = skb->len-skb->data_len; | 1639 | u32 size = skb->len-skb->data_len; |
1504 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 1640 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
1505 | u32 tx_flags_vlan = 0; | 1641 | u32 empty_slots; |
1642 | struct ring_desc* put_tx; | ||
1643 | struct ring_desc* start_tx; | ||
1644 | struct ring_desc* prev_tx; | ||
1645 | struct nv_skb_map* prev_tx_ctx; | ||
1506 | 1646 | ||
1507 | /* add fragments to entries count */ | 1647 | /* add fragments to entries count */ |
1508 | for (i = 0; i < fragments; i++) { | 1648 | for (i = 0; i < fragments; i++) { |
@@ -1510,34 +1650,35 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1510 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 1650 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
1511 | } | 1651 | } |
1512 | 1652 | ||
1513 | spin_lock_irq(&np->lock); | 1653 | empty_slots = nv_get_empty_tx_slots(np); |
1514 | 1654 | if (unlikely(empty_slots <= entries)) { | |
1515 | if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { | 1655 | spin_lock_irq(&np->lock); |
1516 | spin_unlock_irq(&np->lock); | ||
1517 | netif_stop_queue(dev); | 1656 | netif_stop_queue(dev); |
1657 | np->tx_stop = 1; | ||
1658 | spin_unlock_irq(&np->lock); | ||
1518 | return NETDEV_TX_BUSY; | 1659 | return NETDEV_TX_BUSY; |
1519 | } | 1660 | } |
1520 | 1661 | ||
1662 | start_tx = put_tx = np->put_tx.orig; | ||
1663 | |||
1521 | /* setup the header buffer */ | 1664 | /* setup the header buffer */ |
1522 | do { | 1665 | do { |
1666 | prev_tx = put_tx; | ||
1667 | prev_tx_ctx = np->put_tx_ctx; | ||
1523 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 1668 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
1524 | nr = (nr + 1) % np->tx_ring_size; | 1669 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, |
1525 | |||
1526 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | ||
1527 | PCI_DMA_TODEVICE); | 1670 | PCI_DMA_TODEVICE); |
1528 | np->tx_dma_len[nr] = bcnt; | 1671 | np->put_tx_ctx->dma_len = bcnt; |
1672 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); | ||
1673 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | ||
1529 | 1674 | ||
1530 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
1531 | np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); | ||
1532 | np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | ||
1533 | } else { | ||
1534 | np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | ||
1535 | np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | ||
1536 | np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | ||
1537 | } | ||
1538 | tx_flags = np->tx_flags; | 1675 | tx_flags = np->tx_flags; |
1539 | offset += bcnt; | 1676 | offset += bcnt; |
1540 | size -= bcnt; | 1677 | size -= bcnt; |
1678 | if (unlikely(put_tx++ == np->last_tx.orig)) | ||
1679 | put_tx = np->first_tx.orig; | ||
1680 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) | ||
1681 | np->put_tx_ctx = np->first_tx_ctx; | ||
1541 | } while (size); | 1682 | } while (size); |
1542 | 1683 | ||
1543 | /* setup the fragments */ | 1684 | /* setup the fragments */ |
@@ -1547,58 +1688,174 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1547 | offset = 0; | 1688 | offset = 0; |
1548 | 1689 | ||
1549 | do { | 1690 | do { |
1691 | prev_tx = put_tx; | ||
1692 | prev_tx_ctx = np->put_tx_ctx; | ||
1550 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 1693 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
1551 | nr = (nr + 1) % np->tx_ring_size; | 1694 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, |
1552 | 1695 | PCI_DMA_TODEVICE); | |
1553 | np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | 1696 | np->put_tx_ctx->dma_len = bcnt; |
1554 | PCI_DMA_TODEVICE); | 1697 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
1555 | np->tx_dma_len[nr] = bcnt; | 1698 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1556 | 1699 | ||
1557 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
1558 | np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); | ||
1559 | np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | ||
1560 | } else { | ||
1561 | np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | ||
1562 | np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | ||
1563 | np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | ||
1564 | } | ||
1565 | offset += bcnt; | 1700 | offset += bcnt; |
1566 | size -= bcnt; | 1701 | size -= bcnt; |
1702 | if (unlikely(put_tx++ == np->last_tx.orig)) | ||
1703 | put_tx = np->first_tx.orig; | ||
1704 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) | ||
1705 | np->put_tx_ctx = np->first_tx_ctx; | ||
1567 | } while (size); | 1706 | } while (size); |
1568 | } | 1707 | } |
1569 | 1708 | ||
1570 | /* set last fragment flag */ | 1709 | /* set last fragment flag */ |
1571 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1710 | prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); |
1572 | np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); | 1711 | |
1573 | } else { | 1712 | /* save skb in this slot's context area */ |
1574 | np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); | 1713 | prev_tx_ctx->skb = skb; |
1714 | |||
1715 | if (skb_is_gso(skb)) | ||
1716 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); | ||
1717 | else | ||
1718 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? | ||
1719 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; | ||
1720 | |||
1721 | spin_lock_irq(&np->lock); | ||
1722 | |||
1723 | /* set tx flags */ | ||
1724 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); | ||
1725 | np->put_tx.orig = put_tx; | ||
1726 | |||
1727 | spin_unlock_irq(&np->lock); | ||
1728 | |||
1729 | dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", | ||
1730 | dev->name, entries, tx_flags_extra); | ||
1731 | { | ||
1732 | int j; | ||
1733 | for (j=0; j<64; j++) { | ||
1734 | if ((j%16) == 0) | ||
1735 | dprintk("\n%03x:", j); | ||
1736 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); | ||
1737 | } | ||
1738 | dprintk("\n"); | ||
1739 | } | ||
1740 | |||
1741 | dev->trans_start = jiffies; | ||
1742 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
1743 | return NETDEV_TX_OK; | ||
1744 | } | ||
1745 | |||
1746 | static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | ||
1747 | { | ||
1748 | struct fe_priv *np = netdev_priv(dev); | ||
1749 | u32 tx_flags = 0; | ||
1750 | u32 tx_flags_extra; | ||
1751 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | ||
1752 | unsigned int i; | ||
1753 | u32 offset = 0; | ||
1754 | u32 bcnt; | ||
1755 | u32 size = skb->len-skb->data_len; | ||
1756 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | ||
1757 | u32 empty_slots; | ||
1758 | struct ring_desc_ex* put_tx; | ||
1759 | struct ring_desc_ex* start_tx; | ||
1760 | struct ring_desc_ex* prev_tx; | ||
1761 | struct nv_skb_map* prev_tx_ctx; | ||
1762 | |||
1763 | /* add fragments to entries count */ | ||
1764 | for (i = 0; i < fragments; i++) { | ||
1765 | entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + | ||
1766 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | ||
1767 | } | ||
1768 | |||
1769 | empty_slots = nv_get_empty_tx_slots(np); | ||
1770 | if (unlikely(empty_slots <= entries)) { | ||
1771 | spin_lock_irq(&np->lock); | ||
1772 | netif_stop_queue(dev); | ||
1773 | np->tx_stop = 1; | ||
1774 | spin_unlock_irq(&np->lock); | ||
1775 | return NETDEV_TX_BUSY; | ||
1776 | } | ||
1777 | |||
1778 | start_tx = put_tx = np->put_tx.ex; | ||
1779 | |||
1780 | /* setup the header buffer */ | ||
1781 | do { | ||
1782 | prev_tx = put_tx; | ||
1783 | prev_tx_ctx = np->put_tx_ctx; | ||
1784 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | ||
1785 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | ||
1786 | PCI_DMA_TODEVICE); | ||
1787 | np->put_tx_ctx->dma_len = bcnt; | ||
1788 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; | ||
1789 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; | ||
1790 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | ||
1791 | |||
1792 | tx_flags = NV_TX2_VALID; | ||
1793 | offset += bcnt; | ||
1794 | size -= bcnt; | ||
1795 | if (unlikely(put_tx++ == np->last_tx.ex)) | ||
1796 | put_tx = np->first_tx.ex; | ||
1797 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) | ||
1798 | np->put_tx_ctx = np->first_tx_ctx; | ||
1799 | } while (size); | ||
1800 | |||
1801 | /* setup the fragments */ | ||
1802 | for (i = 0; i < fragments; i++) { | ||
1803 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1804 | u32 size = frag->size; | ||
1805 | offset = 0; | ||
1806 | |||
1807 | do { | ||
1808 | prev_tx = put_tx; | ||
1809 | prev_tx_ctx = np->put_tx_ctx; | ||
1810 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | ||
1811 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | ||
1812 | PCI_DMA_TODEVICE); | ||
1813 | np->put_tx_ctx->dma_len = bcnt; | ||
1814 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; | ||
1815 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; | ||
1816 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | ||
1817 | |||
1818 | offset += bcnt; | ||
1819 | size -= bcnt; | ||
1820 | if (unlikely(put_tx++ == np->last_tx.ex)) | ||
1821 | put_tx = np->first_tx.ex; | ||
1822 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) | ||
1823 | np->put_tx_ctx = np->first_tx_ctx; | ||
1824 | } while (size); | ||
1575 | } | 1825 | } |
1576 | 1826 | ||
1577 | np->tx_skbuff[nr] = skb; | 1827 | /* set last fragment flag */ |
1828 | prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); | ||
1829 | |||
1830 | /* save skb in this slot's context area */ | ||
1831 | prev_tx_ctx->skb = skb; | ||
1578 | 1832 | ||
1579 | #ifdef NETIF_F_TSO | ||
1580 | if (skb_is_gso(skb)) | 1833 | if (skb_is_gso(skb)) |
1581 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); | 1834 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
1582 | else | 1835 | else |
1583 | #endif | 1836 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? |
1584 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? | ||
1585 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; | 1837 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; |
1586 | 1838 | ||
1587 | /* vlan tag */ | 1839 | /* vlan tag */ |
1588 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | 1840 | if (likely(!np->vlangrp)) { |
1589 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | 1841 | start_tx->txvlan = 0; |
1842 | } else { | ||
1843 | if (vlan_tx_tag_present(skb)) | ||
1844 | start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); | ||
1845 | else | ||
1846 | start_tx->txvlan = 0; | ||
1590 | } | 1847 | } |
1591 | 1848 | ||
1849 | spin_lock_irq(&np->lock); | ||
1850 | |||
1592 | /* set tx flags */ | 1851 | /* set tx flags */ |
1593 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1852 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1594 | np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1853 | np->put_tx.ex = put_tx; |
1595 | } else { | 1854 | |
1596 | np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); | 1855 | spin_unlock_irq(&np->lock); |
1597 | np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); | ||
1598 | } | ||
1599 | 1856 | ||
1600 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", | 1857 | dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", |
1601 | dev->name, np->next_tx, entries, tx_flags_extra); | 1858 | dev->name, entries, tx_flags_extra); |
1602 | { | 1859 | { |
1603 | int j; | 1860 | int j; |
1604 | for (j=0; j<64; j++) { | 1861 | for (j=0; j<64; j++) { |
@@ -1609,12 +1866,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1609 | dprintk("\n"); | 1866 | dprintk("\n"); |
1610 | } | 1867 | } |
1611 | 1868 | ||
1612 | np->next_tx += entries; | ||
1613 | |||
1614 | dev->trans_start = jiffies; | 1869 | dev->trans_start = jiffies; |
1615 | spin_unlock_irq(&np->lock); | ||
1616 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 1870 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
1617 | pci_push(get_hwbase(dev)); | ||
1618 | return NETDEV_TX_OK; | 1871 | return NETDEV_TX_OK; |
1619 | } | 1872 | } |
1620 | 1873 | ||
@@ -1627,26 +1880,22 @@ static void nv_tx_done(struct net_device *dev) | |||
1627 | { | 1880 | { |
1628 | struct fe_priv *np = netdev_priv(dev); | 1881 | struct fe_priv *np = netdev_priv(dev); |
1629 | u32 flags; | 1882 | u32 flags; |
1630 | unsigned int i; | 1883 | struct ring_desc* orig_get_tx = np->get_tx.orig; |
1631 | struct sk_buff *skb; | ||
1632 | 1884 | ||
1633 | while (np->nic_tx != np->next_tx) { | 1885 | while ((np->get_tx.orig != np->put_tx.orig) && |
1634 | i = np->nic_tx % np->tx_ring_size; | 1886 | !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { |
1635 | 1887 | ||
1636 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1888 | dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", |
1637 | flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); | 1889 | dev->name, flags); |
1638 | else | 1890 | |
1639 | flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); | 1891 | pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, |
1892 | np->get_tx_ctx->dma_len, | ||
1893 | PCI_DMA_TODEVICE); | ||
1894 | np->get_tx_ctx->dma = 0; | ||
1640 | 1895 | ||
1641 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n", | ||
1642 | dev->name, np->nic_tx, flags); | ||
1643 | if (flags & NV_TX_VALID) | ||
1644 | break; | ||
1645 | if (np->desc_ver == DESC_VER_1) { | 1896 | if (np->desc_ver == DESC_VER_1) { |
1646 | if (flags & NV_TX_LASTPACKET) { | 1897 | if (flags & NV_TX_LASTPACKET) { |
1647 | skb = np->tx_skbuff[i]; | 1898 | if (flags & NV_TX_ERROR) { |
1648 | if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| | ||
1649 | NV_TX_UNDERFLOW|NV_TX_ERROR)) { | ||
1650 | if (flags & NV_TX_UNDERFLOW) | 1899 | if (flags & NV_TX_UNDERFLOW) |
1651 | np->stats.tx_fifo_errors++; | 1900 | np->stats.tx_fifo_errors++; |
1652 | if (flags & NV_TX_CARRIERLOST) | 1901 | if (flags & NV_TX_CARRIERLOST) |
@@ -1654,14 +1903,14 @@ static void nv_tx_done(struct net_device *dev) | |||
1654 | np->stats.tx_errors++; | 1903 | np->stats.tx_errors++; |
1655 | } else { | 1904 | } else { |
1656 | np->stats.tx_packets++; | 1905 | np->stats.tx_packets++; |
1657 | np->stats.tx_bytes += skb->len; | 1906 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; |
1658 | } | 1907 | } |
1908 | dev_kfree_skb_any(np->get_tx_ctx->skb); | ||
1909 | np->get_tx_ctx->skb = NULL; | ||
1659 | } | 1910 | } |
1660 | } else { | 1911 | } else { |
1661 | if (flags & NV_TX2_LASTPACKET) { | 1912 | if (flags & NV_TX2_LASTPACKET) { |
1662 | skb = np->tx_skbuff[i]; | 1913 | if (flags & NV_TX2_ERROR) { |
1663 | if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| | ||
1664 | NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { | ||
1665 | if (flags & NV_TX2_UNDERFLOW) | 1914 | if (flags & NV_TX2_UNDERFLOW) |
1666 | np->stats.tx_fifo_errors++; | 1915 | np->stats.tx_fifo_errors++; |
1667 | if (flags & NV_TX2_CARRIERLOST) | 1916 | if (flags & NV_TX2_CARRIERLOST) |
@@ -1669,15 +1918,56 @@ static void nv_tx_done(struct net_device *dev) | |||
1669 | np->stats.tx_errors++; | 1918 | np->stats.tx_errors++; |
1670 | } else { | 1919 | } else { |
1671 | np->stats.tx_packets++; | 1920 | np->stats.tx_packets++; |
1672 | np->stats.tx_bytes += skb->len; | 1921 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; |
1673 | } | 1922 | } |
1923 | dev_kfree_skb_any(np->get_tx_ctx->skb); | ||
1924 | np->get_tx_ctx->skb = NULL; | ||
1674 | } | 1925 | } |
1675 | } | 1926 | } |
1676 | nv_release_txskb(dev, i); | 1927 | if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) |
1677 | np->nic_tx++; | 1928 | np->get_tx.orig = np->first_tx.orig; |
1929 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) | ||
1930 | np->get_tx_ctx = np->first_tx_ctx; | ||
1678 | } | 1931 | } |
1679 | if (np->next_tx - np->nic_tx < np->tx_limit_start) | 1932 | if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { |
1933 | np->tx_stop = 0; | ||
1680 | netif_wake_queue(dev); | 1934 | netif_wake_queue(dev); |
1935 | } | ||
1936 | } | ||
1937 | |||
1938 | static void nv_tx_done_optimized(struct net_device *dev, int limit) | ||
1939 | { | ||
1940 | struct fe_priv *np = netdev_priv(dev); | ||
1941 | u32 flags; | ||
1942 | struct ring_desc_ex* orig_get_tx = np->get_tx.ex; | ||
1943 | |||
1944 | while ((np->get_tx.ex != np->put_tx.ex) && | ||
1945 | !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && | ||
1946 | (limit-- > 0)) { | ||
1947 | |||
1948 | dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", | ||
1949 | dev->name, flags); | ||
1950 | |||
1951 | pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, | ||
1952 | np->get_tx_ctx->dma_len, | ||
1953 | PCI_DMA_TODEVICE); | ||
1954 | np->get_tx_ctx->dma = 0; | ||
1955 | |||
1956 | if (flags & NV_TX2_LASTPACKET) { | ||
1957 | if (!(flags & NV_TX2_ERROR)) | ||
1958 | np->stats.tx_packets++; | ||
1959 | dev_kfree_skb_any(np->get_tx_ctx->skb); | ||
1960 | np->get_tx_ctx->skb = NULL; | ||
1961 | } | ||
1962 | if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) | ||
1963 | np->get_tx.ex = np->first_tx.ex; | ||
1964 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) | ||
1965 | np->get_tx_ctx = np->first_tx_ctx; | ||
1966 | } | ||
1967 | if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { | ||
1968 | np->tx_stop = 0; | ||
1969 | netif_wake_queue(dev); | ||
1970 | } | ||
1681 | } | 1971 | } |
1682 | 1972 | ||
1683 | /* | 1973 | /* |
@@ -1700,9 +1990,8 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1700 | { | 1990 | { |
1701 | int i; | 1991 | int i; |
1702 | 1992 | ||
1703 | printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", | 1993 | printk(KERN_INFO "%s: Ring at %lx\n", |
1704 | dev->name, (unsigned long)np->ring_addr, | 1994 | dev->name, (unsigned long)np->ring_addr); |
1705 | np->next_tx, np->nic_tx); | ||
1706 | printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); | 1995 | printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); |
1707 | for (i=0;i<=np->register_size;i+= 32) { | 1996 | for (i=0;i<=np->register_size;i+= 32) { |
1708 | printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", | 1997 | printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", |
@@ -1750,13 +2039,16 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1750 | nv_stop_tx(dev); | 2039 | nv_stop_tx(dev); |
1751 | 2040 | ||
1752 | /* 2) check that the packets were not sent already: */ | 2041 | /* 2) check that the packets were not sent already: */ |
1753 | nv_tx_done(dev); | 2042 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
2043 | nv_tx_done(dev); | ||
2044 | else | ||
2045 | nv_tx_done_optimized(dev, np->tx_ring_size); | ||
1754 | 2046 | ||
1755 | /* 3) if there are dead entries: clear everything */ | 2047 | /* 3) if there are dead entries: clear everything */ |
1756 | if (np->next_tx != np->nic_tx) { | 2048 | if (np->get_tx_ctx != np->put_tx_ctx) { |
1757 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 2049 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); |
1758 | nv_drain_tx(dev); | 2050 | nv_drain_tx(dev); |
1759 | np->next_tx = np->nic_tx = 0; | 2051 | nv_init_tx(dev); |
1760 | setup_hw_rings(dev, NV_SETUP_TX_RING); | 2052 | setup_hw_rings(dev, NV_SETUP_TX_RING); |
1761 | netif_wake_queue(dev); | 2053 | netif_wake_queue(dev); |
1762 | } | 2054 | } |
@@ -1823,40 +2115,27 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
1823 | { | 2115 | { |
1824 | struct fe_priv *np = netdev_priv(dev); | 2116 | struct fe_priv *np = netdev_priv(dev); |
1825 | u32 flags; | 2117 | u32 flags; |
1826 | u32 vlanflags = 0; | 2118 | u32 rx_processed_cnt = 0; |
1827 | int count; | 2119 | struct sk_buff *skb; |
1828 | 2120 | int len; | |
1829 | for (count = 0; count < limit; ++count) { | ||
1830 | struct sk_buff *skb; | ||
1831 | int len; | ||
1832 | int i; | ||
1833 | if (np->cur_rx - np->refill_rx >= np->rx_ring_size) | ||
1834 | break; /* we scanned the whole ring - do not continue */ | ||
1835 | |||
1836 | i = np->cur_rx % np->rx_ring_size; | ||
1837 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
1838 | flags = le32_to_cpu(np->rx_ring.orig[i].flaglen); | ||
1839 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); | ||
1840 | } else { | ||
1841 | flags = le32_to_cpu(np->rx_ring.ex[i].flaglen); | ||
1842 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | ||
1843 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow); | ||
1844 | } | ||
1845 | 2121 | ||
1846 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", | 2122 | while((np->get_rx.orig != np->put_rx.orig) && |
1847 | dev->name, np->cur_rx, flags); | 2123 | !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && |
2124 | (rx_processed_cnt++ < limit)) { | ||
1848 | 2125 | ||
1849 | if (flags & NV_RX_AVAIL) | 2126 | dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", |
1850 | break; /* still owned by hardware, */ | 2127 | dev->name, flags); |
1851 | 2128 | ||
1852 | /* | 2129 | /* |
1853 | * the packet is for us - immediately tear down the pci mapping. | 2130 | * the packet is for us - immediately tear down the pci mapping. |
1854 | * TODO: check if a prefetch of the first cacheline improves | 2131 | * TODO: check if a prefetch of the first cacheline improves |
1855 | * the performance. | 2132 | * the performance. |
1856 | */ | 2133 | */ |
1857 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | 2134 | pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, |
1858 | np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, | 2135 | np->get_rx_ctx->dma_len, |
1859 | PCI_DMA_FROMDEVICE); | 2136 | PCI_DMA_FROMDEVICE); |
2137 | skb = np->get_rx_ctx->skb; | ||
2138 | np->get_rx_ctx->skb = NULL; | ||
1860 | 2139 | ||
1861 | { | 2140 | { |
1862 | int j; | 2141 | int j; |
@@ -1864,123 +2143,228 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
1864 | for (j=0; j<64; j++) { | 2143 | for (j=0; j<64; j++) { |
1865 | if ((j%16) == 0) | 2144 | if ((j%16) == 0) |
1866 | dprintk("\n%03x:", j); | 2145 | dprintk("\n%03x:", j); |
1867 | dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); | 2146 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); |
1868 | } | 2147 | } |
1869 | dprintk("\n"); | 2148 | dprintk("\n"); |
1870 | } | 2149 | } |
1871 | /* look at what we actually got: */ | 2150 | /* look at what we actually got: */ |
1872 | if (np->desc_ver == DESC_VER_1) { | 2151 | if (np->desc_ver == DESC_VER_1) { |
1873 | if (!(flags & NV_RX_DESCRIPTORVALID)) | 2152 | if (likely(flags & NV_RX_DESCRIPTORVALID)) { |
1874 | goto next_pkt; | 2153 | len = flags & LEN_MASK_V1; |
1875 | 2154 | if (unlikely(flags & NV_RX_ERROR)) { | |
1876 | if (flags & NV_RX_ERROR) { | 2155 | if (flags & NV_RX_ERROR4) { |
1877 | if (flags & NV_RX_MISSEDFRAME) { | 2156 | len = nv_getlen(dev, skb->data, len); |
1878 | np->stats.rx_missed_errors++; | 2157 | if (len < 0) { |
1879 | np->stats.rx_errors++; | 2158 | np->stats.rx_errors++; |
1880 | goto next_pkt; | 2159 | dev_kfree_skb(skb); |
1881 | } | 2160 | goto next_pkt; |
1882 | if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { | 2161 | } |
1883 | np->stats.rx_errors++; | 2162 | } |
1884 | goto next_pkt; | 2163 | /* framing errors are soft errors */ |
1885 | } | 2164 | else if (flags & NV_RX_FRAMINGERR) { |
1886 | if (flags & NV_RX_CRCERR) { | 2165 | if (flags & NV_RX_SUBSTRACT1) { |
1887 | np->stats.rx_crc_errors++; | 2166 | len--; |
1888 | np->stats.rx_errors++; | 2167 | } |
1889 | goto next_pkt; | 2168 | } |
1890 | } | 2169 | /* the rest are hard errors */ |
1891 | if (flags & NV_RX_OVERFLOW) { | 2170 | else { |
1892 | np->stats.rx_over_errors++; | 2171 | if (flags & NV_RX_MISSEDFRAME) |
1893 | np->stats.rx_errors++; | 2172 | np->stats.rx_missed_errors++; |
1894 | goto next_pkt; | 2173 | if (flags & NV_RX_CRCERR) |
2174 | np->stats.rx_crc_errors++; | ||
2175 | if (flags & NV_RX_OVERFLOW) | ||
2176 | np->stats.rx_over_errors++; | ||
2177 | np->stats.rx_errors++; | ||
2178 | dev_kfree_skb(skb); | ||
2179 | goto next_pkt; | ||
2180 | } | ||
1895 | } | 2181 | } |
1896 | if (flags & NV_RX_ERROR4) { | 2182 | } else { |
1897 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); | 2183 | dev_kfree_skb(skb); |
1898 | if (len < 0) { | 2184 | goto next_pkt; |
2185 | } | ||
2186 | } else { | ||
2187 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { | ||
2188 | len = flags & LEN_MASK_V2; | ||
2189 | if (unlikely(flags & NV_RX2_ERROR)) { | ||
2190 | if (flags & NV_RX2_ERROR4) { | ||
2191 | len = nv_getlen(dev, skb->data, len); | ||
2192 | if (len < 0) { | ||
2193 | np->stats.rx_errors++; | ||
2194 | dev_kfree_skb(skb); | ||
2195 | goto next_pkt; | ||
2196 | } | ||
2197 | } | ||
2198 | /* framing errors are soft errors */ | ||
2199 | else if (flags & NV_RX2_FRAMINGERR) { | ||
2200 | if (flags & NV_RX2_SUBSTRACT1) { | ||
2201 | len--; | ||
2202 | } | ||
2203 | } | ||
2204 | /* the rest are hard errors */ | ||
2205 | else { | ||
2206 | if (flags & NV_RX2_CRCERR) | ||
2207 | np->stats.rx_crc_errors++; | ||
2208 | if (flags & NV_RX2_OVERFLOW) | ||
2209 | np->stats.rx_over_errors++; | ||
1899 | np->stats.rx_errors++; | 2210 | np->stats.rx_errors++; |
2211 | dev_kfree_skb(skb); | ||
1900 | goto next_pkt; | 2212 | goto next_pkt; |
1901 | } | 2213 | } |
1902 | } | 2214 | } |
1903 | /* framing errors are soft errors. */ | 2215 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { |
1904 | if (flags & NV_RX_FRAMINGERR) { | 2216 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1905 | if (flags & NV_RX_SUBSTRACT1) { | 2217 | } else { |
1906 | len--; | 2218 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || |
2219 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { | ||
2220 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1907 | } | 2221 | } |
1908 | } | 2222 | } |
1909 | } | 2223 | } else { |
1910 | } else { | 2224 | dev_kfree_skb(skb); |
1911 | if (!(flags & NV_RX2_DESCRIPTORVALID)) | ||
1912 | goto next_pkt; | 2225 | goto next_pkt; |
2226 | } | ||
2227 | } | ||
2228 | /* got a valid packet - forward it to the network core */ | ||
2229 | skb_put(skb, len); | ||
2230 | skb->protocol = eth_type_trans(skb, dev); | ||
2231 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", | ||
2232 | dev->name, len, skb->protocol); | ||
2233 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2234 | netif_receive_skb(skb); | ||
2235 | #else | ||
2236 | netif_rx(skb); | ||
2237 | #endif | ||
2238 | dev->last_rx = jiffies; | ||
2239 | np->stats.rx_packets++; | ||
2240 | np->stats.rx_bytes += len; | ||
2241 | next_pkt: | ||
2242 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) | ||
2243 | np->get_rx.orig = np->first_rx.orig; | ||
2244 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) | ||
2245 | np->get_rx_ctx = np->first_rx_ctx; | ||
2246 | } | ||
1913 | 2247 | ||
1914 | if (flags & NV_RX2_ERROR) { | 2248 | return rx_processed_cnt; |
1915 | if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { | 2249 | } |
1916 | np->stats.rx_errors++; | 2250 | |
1917 | goto next_pkt; | 2251 | static int nv_rx_process_optimized(struct net_device *dev, int limit) |
1918 | } | 2252 | { |
1919 | if (flags & NV_RX2_CRCERR) { | 2253 | struct fe_priv *np = netdev_priv(dev); |
1920 | np->stats.rx_crc_errors++; | 2254 | u32 flags; |
1921 | np->stats.rx_errors++; | 2255 | u32 vlanflags = 0; |
1922 | goto next_pkt; | 2256 | u32 rx_processed_cnt = 0; |
1923 | } | 2257 | struct sk_buff *skb; |
1924 | if (flags & NV_RX2_OVERFLOW) { | 2258 | int len; |
1925 | np->stats.rx_over_errors++; | 2259 | |
1926 | np->stats.rx_errors++; | 2260 | while((np->get_rx.ex != np->put_rx.ex) && |
1927 | goto next_pkt; | 2261 | !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && |
1928 | } | 2262 | (rx_processed_cnt++ < limit)) { |
2263 | |||
2264 | dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", | ||
2265 | dev->name, flags); | ||
2266 | |||
2267 | /* | ||
2268 | * the packet is for us - immediately tear down the pci mapping. | ||
2269 | * TODO: check if a prefetch of the first cacheline improves | ||
2270 | * the performance. | ||
2271 | */ | ||
2272 | pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, | ||
2273 | np->get_rx_ctx->dma_len, | ||
2274 | PCI_DMA_FROMDEVICE); | ||
2275 | skb = np->get_rx_ctx->skb; | ||
2276 | np->get_rx_ctx->skb = NULL; | ||
2277 | |||
2278 | { | ||
2279 | int j; | ||
2280 | dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); | ||
2281 | for (j=0; j<64; j++) { | ||
2282 | if ((j%16) == 0) | ||
2283 | dprintk("\n%03x:", j); | ||
2284 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); | ||
2285 | } | ||
2286 | dprintk("\n"); | ||
2287 | } | ||
2288 | /* look at what we actually got: */ | ||
2289 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { | ||
2290 | len = flags & LEN_MASK_V2; | ||
2291 | if (unlikely(flags & NV_RX2_ERROR)) { | ||
1929 | if (flags & NV_RX2_ERROR4) { | 2292 | if (flags & NV_RX2_ERROR4) { |
1930 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); | 2293 | len = nv_getlen(dev, skb->data, len); |
1931 | if (len < 0) { | 2294 | if (len < 0) { |
1932 | np->stats.rx_errors++; | 2295 | dev_kfree_skb(skb); |
1933 | goto next_pkt; | 2296 | goto next_pkt; |
1934 | } | 2297 | } |
1935 | } | 2298 | } |
1936 | /* framing errors are soft errors */ | 2299 | /* framing errors are soft errors */ |
1937 | if (flags & NV_RX2_FRAMINGERR) { | 2300 | else if (flags & NV_RX2_FRAMINGERR) { |
1938 | if (flags & NV_RX2_SUBSTRACT1) { | 2301 | if (flags & NV_RX2_SUBSTRACT1) { |
1939 | len--; | 2302 | len--; |
1940 | } | 2303 | } |
1941 | } | 2304 | } |
2305 | /* the rest are hard errors */ | ||
2306 | else { | ||
2307 | dev_kfree_skb(skb); | ||
2308 | goto next_pkt; | ||
2309 | } | ||
1942 | } | 2310 | } |
1943 | if (np->rx_csum) { | 2311 | |
1944 | flags &= NV_RX2_CHECKSUMMASK; | 2312 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { |
1945 | if (flags == NV_RX2_CHECKSUMOK1 || | 2313 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1946 | flags == NV_RX2_CHECKSUMOK2 || | 2314 | } else { |
1947 | flags == NV_RX2_CHECKSUMOK3) { | 2315 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || |
1948 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); | 2316 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { |
1949 | np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; | 2317 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1950 | } else { | ||
1951 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | ||
1952 | } | 2318 | } |
1953 | } | 2319 | } |
1954 | } | ||
1955 | /* got a valid packet - forward it to the network core */ | ||
1956 | skb = np->rx_skbuff[i]; | ||
1957 | np->rx_skbuff[i] = NULL; | ||
1958 | 2320 | ||
1959 | skb_put(skb, len); | 2321 | /* got a valid packet - forward it to the network core */ |
1960 | skb->protocol = eth_type_trans(skb, dev); | 2322 | skb_put(skb, len); |
1961 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", | 2323 | skb->protocol = eth_type_trans(skb, dev); |
1962 | dev->name, np->cur_rx, len, skb->protocol); | 2324 | prefetch(skb->data); |
2325 | |||
2326 | dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", | ||
2327 | dev->name, len, skb->protocol); | ||
2328 | |||
2329 | if (likely(!np->vlangrp)) { | ||
1963 | #ifdef CONFIG_FORCEDETH_NAPI | 2330 | #ifdef CONFIG_FORCEDETH_NAPI |
1964 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | 2331 | netif_receive_skb(skb); |
1965 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | ||
1966 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
1967 | else | ||
1968 | netif_receive_skb(skb); | ||
1969 | #else | 2332 | #else |
1970 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | 2333 | netif_rx(skb); |
1971 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
1972 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
1973 | else | ||
1974 | netif_rx(skb); | ||
1975 | #endif | 2334 | #endif |
1976 | dev->last_rx = jiffies; | 2335 | } else { |
1977 | np->stats.rx_packets++; | 2336 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); |
1978 | np->stats.rx_bytes += len; | 2337 | if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { |
2338 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2339 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | ||
2340 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2341 | #else | ||
2342 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
2343 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2344 | #endif | ||
2345 | } else { | ||
2346 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2347 | netif_receive_skb(skb); | ||
2348 | #else | ||
2349 | netif_rx(skb); | ||
2350 | #endif | ||
2351 | } | ||
2352 | } | ||
2353 | |||
2354 | dev->last_rx = jiffies; | ||
2355 | np->stats.rx_packets++; | ||
2356 | np->stats.rx_bytes += len; | ||
2357 | } else { | ||
2358 | dev_kfree_skb(skb); | ||
2359 | } | ||
1979 | next_pkt: | 2360 | next_pkt: |
1980 | np->cur_rx++; | 2361 | if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) |
2362 | np->get_rx.ex = np->first_rx.ex; | ||
2363 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) | ||
2364 | np->get_rx_ctx = np->first_rx_ctx; | ||
1981 | } | 2365 | } |
1982 | 2366 | ||
1983 | return count; | 2367 | return rx_processed_cnt; |
1984 | } | 2368 | } |
1985 | 2369 | ||
1986 | static void set_bufsize(struct net_device *dev) | 2370 | static void set_bufsize(struct net_device *dev) |
@@ -2456,7 +2840,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
2456 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | 2840 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; |
2457 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | 2841 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); |
2458 | } | 2842 | } |
2459 | pci_push(base); | ||
2460 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2843 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
2461 | if (!(events & np->irqmask)) | 2844 | if (!(events & np->irqmask)) |
2462 | break; | 2845 | break; |
@@ -2465,22 +2848,46 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
2465 | nv_tx_done(dev); | 2848 | nv_tx_done(dev); |
2466 | spin_unlock(&np->lock); | 2849 | spin_unlock(&np->lock); |
2467 | 2850 | ||
2468 | if (events & NVREG_IRQ_LINK) { | 2851 | #ifdef CONFIG_FORCEDETH_NAPI |
2852 | if (events & NVREG_IRQ_RX_ALL) { | ||
2853 | netif_rx_schedule(dev); | ||
2854 | |||
2855 | /* Disable furthur receive irq's */ | ||
2856 | spin_lock(&np->lock); | ||
2857 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | ||
2858 | |||
2859 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
2860 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
2861 | else | ||
2862 | writel(np->irqmask, base + NvRegIrqMask); | ||
2863 | spin_unlock(&np->lock); | ||
2864 | } | ||
2865 | #else | ||
2866 | if (nv_rx_process(dev, dev->weight)) { | ||
2867 | if (unlikely(nv_alloc_rx(dev))) { | ||
2868 | spin_lock(&np->lock); | ||
2869 | if (!np->in_shutdown) | ||
2870 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2871 | spin_unlock(&np->lock); | ||
2872 | } | ||
2873 | } | ||
2874 | #endif | ||
2875 | if (unlikely(events & NVREG_IRQ_LINK)) { | ||
2469 | spin_lock(&np->lock); | 2876 | spin_lock(&np->lock); |
2470 | nv_link_irq(dev); | 2877 | nv_link_irq(dev); |
2471 | spin_unlock(&np->lock); | 2878 | spin_unlock(&np->lock); |
2472 | } | 2879 | } |
2473 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | 2880 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { |
2474 | spin_lock(&np->lock); | 2881 | spin_lock(&np->lock); |
2475 | nv_linkchange(dev); | 2882 | nv_linkchange(dev); |
2476 | spin_unlock(&np->lock); | 2883 | spin_unlock(&np->lock); |
2477 | np->link_timeout = jiffies + LINK_TIMEOUT; | 2884 | np->link_timeout = jiffies + LINK_TIMEOUT; |
2478 | } | 2885 | } |
2479 | if (events & (NVREG_IRQ_TX_ERR)) { | 2886 | if (unlikely(events & (NVREG_IRQ_TX_ERR))) { |
2480 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2887 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
2481 | dev->name, events); | 2888 | dev->name, events); |
2482 | } | 2889 | } |
2483 | if (events & (NVREG_IRQ_UNKNOWN)) { | 2890 | if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { |
2484 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | 2891 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", |
2485 | dev->name, events); | 2892 | dev->name, events); |
2486 | } | 2893 | } |
@@ -2501,6 +2908,63 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
2501 | spin_unlock(&np->lock); | 2908 | spin_unlock(&np->lock); |
2502 | break; | 2909 | break; |
2503 | } | 2910 | } |
2911 | if (unlikely(i > max_interrupt_work)) { | ||
2912 | spin_lock(&np->lock); | ||
2913 | /* disable interrupts on the nic */ | ||
2914 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
2915 | writel(0, base + NvRegIrqMask); | ||
2916 | else | ||
2917 | writel(np->irqmask, base + NvRegIrqMask); | ||
2918 | pci_push(base); | ||
2919 | |||
2920 | if (!np->in_shutdown) { | ||
2921 | np->nic_poll_irq = np->irqmask; | ||
2922 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2923 | } | ||
2924 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); | ||
2925 | spin_unlock(&np->lock); | ||
2926 | break; | ||
2927 | } | ||
2928 | |||
2929 | } | ||
2930 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); | ||
2931 | |||
2932 | return IRQ_RETVAL(i); | ||
2933 | } | ||
2934 | |||
2935 | #define TX_WORK_PER_LOOP 64 | ||
2936 | #define RX_WORK_PER_LOOP 64 | ||
2937 | /** | ||
2938 | * All _optimized functions are used to help increase performance | ||
2939 | * (reduce CPU and increase throughput). They use descripter version 3, | ||
2940 | * compiler directives, and reduce memory accesses. | ||
2941 | */ | ||
2942 | static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | ||
2943 | { | ||
2944 | struct net_device *dev = (struct net_device *) data; | ||
2945 | struct fe_priv *np = netdev_priv(dev); | ||
2946 | u8 __iomem *base = get_hwbase(dev); | ||
2947 | u32 events; | ||
2948 | int i; | ||
2949 | |||
2950 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); | ||
2951 | |||
2952 | for (i=0; ; i++) { | ||
2953 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | ||
2954 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2955 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
2956 | } else { | ||
2957 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2958 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
2959 | } | ||
2960 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | ||
2961 | if (!(events & np->irqmask)) | ||
2962 | break; | ||
2963 | |||
2964 | spin_lock(&np->lock); | ||
2965 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); | ||
2966 | spin_unlock(&np->lock); | ||
2967 | |||
2504 | #ifdef CONFIG_FORCEDETH_NAPI | 2968 | #ifdef CONFIG_FORCEDETH_NAPI |
2505 | if (events & NVREG_IRQ_RX_ALL) { | 2969 | if (events & NVREG_IRQ_RX_ALL) { |
2506 | netif_rx_schedule(dev); | 2970 | netif_rx_schedule(dev); |
@@ -2516,15 +2980,53 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
2516 | spin_unlock(&np->lock); | 2980 | spin_unlock(&np->lock); |
2517 | } | 2981 | } |
2518 | #else | 2982 | #else |
2519 | nv_rx_process(dev, dev->weight); | 2983 | if (nv_rx_process_optimized(dev, dev->weight)) { |
2520 | if (nv_alloc_rx(dev)) { | 2984 | if (unlikely(nv_alloc_rx_optimized(dev))) { |
2985 | spin_lock(&np->lock); | ||
2986 | if (!np->in_shutdown) | ||
2987 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2988 | spin_unlock(&np->lock); | ||
2989 | } | ||
2990 | } | ||
2991 | #endif | ||
2992 | if (unlikely(events & NVREG_IRQ_LINK)) { | ||
2521 | spin_lock(&np->lock); | 2993 | spin_lock(&np->lock); |
2522 | if (!np->in_shutdown) | 2994 | nv_link_irq(dev); |
2523 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2524 | spin_unlock(&np->lock); | 2995 | spin_unlock(&np->lock); |
2525 | } | 2996 | } |
2526 | #endif | 2997 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { |
2527 | if (i > max_interrupt_work) { | 2998 | spin_lock(&np->lock); |
2999 | nv_linkchange(dev); | ||
3000 | spin_unlock(&np->lock); | ||
3001 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
3002 | } | ||
3003 | if (unlikely(events & (NVREG_IRQ_TX_ERR))) { | ||
3004 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | ||
3005 | dev->name, events); | ||
3006 | } | ||
3007 | if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { | ||
3008 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | ||
3009 | dev->name, events); | ||
3010 | } | ||
3011 | if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { | ||
3012 | spin_lock(&np->lock); | ||
3013 | /* disable interrupts on the nic */ | ||
3014 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
3015 | writel(0, base + NvRegIrqMask); | ||
3016 | else | ||
3017 | writel(np->irqmask, base + NvRegIrqMask); | ||
3018 | pci_push(base); | ||
3019 | |||
3020 | if (!np->in_shutdown) { | ||
3021 | np->nic_poll_irq = np->irqmask; | ||
3022 | np->recover_error = 1; | ||
3023 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
3024 | } | ||
3025 | spin_unlock(&np->lock); | ||
3026 | break; | ||
3027 | } | ||
3028 | |||
3029 | if (unlikely(i > max_interrupt_work)) { | ||
2528 | spin_lock(&np->lock); | 3030 | spin_lock(&np->lock); |
2529 | /* disable interrupts on the nic */ | 3031 | /* disable interrupts on the nic */ |
2530 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | 3032 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
@@ -2543,7 +3045,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
2543 | } | 3045 | } |
2544 | 3046 | ||
2545 | } | 3047 | } |
2546 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); | 3048 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); |
2547 | 3049 | ||
2548 | return IRQ_RETVAL(i); | 3050 | return IRQ_RETVAL(i); |
2549 | } | 3051 | } |
@@ -2562,20 +3064,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
2562 | for (i=0; ; i++) { | 3064 | for (i=0; ; i++) { |
2563 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | 3065 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; |
2564 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | 3066 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); |
2565 | pci_push(base); | ||
2566 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | 3067 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); |
2567 | if (!(events & np->irqmask)) | 3068 | if (!(events & np->irqmask)) |
2568 | break; | 3069 | break; |
2569 | 3070 | ||
2570 | spin_lock_irqsave(&np->lock, flags); | 3071 | spin_lock_irqsave(&np->lock, flags); |
2571 | nv_tx_done(dev); | 3072 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); |
2572 | spin_unlock_irqrestore(&np->lock, flags); | 3073 | spin_unlock_irqrestore(&np->lock, flags); |
2573 | 3074 | ||
2574 | if (events & (NVREG_IRQ_TX_ERR)) { | 3075 | if (unlikely(events & (NVREG_IRQ_TX_ERR))) { |
2575 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 3076 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
2576 | dev->name, events); | 3077 | dev->name, events); |
2577 | } | 3078 | } |
2578 | if (i > max_interrupt_work) { | 3079 | if (unlikely(i > max_interrupt_work)) { |
2579 | spin_lock_irqsave(&np->lock, flags); | 3080 | spin_lock_irqsave(&np->lock, flags); |
2580 | /* disable interrupts on the nic */ | 3081 | /* disable interrupts on the nic */ |
2581 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | 3082 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); |
@@ -2604,7 +3105,10 @@ static int nv_napi_poll(struct net_device *dev, int *budget) | |||
2604 | u8 __iomem *base = get_hwbase(dev); | 3105 | u8 __iomem *base = get_hwbase(dev); |
2605 | unsigned long flags; | 3106 | unsigned long flags; |
2606 | 3107 | ||
2607 | pkts = nv_rx_process(dev, limit); | 3108 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
3109 | pkts = nv_rx_process(dev, limit); | ||
3110 | else | ||
3111 | pkts = nv_rx_process_optimized(dev, limit); | ||
2608 | 3112 | ||
2609 | if (nv_alloc_rx(dev)) { | 3113 | if (nv_alloc_rx(dev)) { |
2610 | spin_lock_irqsave(&np->lock, flags); | 3114 | spin_lock_irqsave(&np->lock, flags); |
@@ -2670,20 +3174,20 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
2670 | for (i=0; ; i++) { | 3174 | for (i=0; ; i++) { |
2671 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | 3175 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; |
2672 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | 3176 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); |
2673 | pci_push(base); | ||
2674 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | 3177 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); |
2675 | if (!(events & np->irqmask)) | 3178 | if (!(events & np->irqmask)) |
2676 | break; | 3179 | break; |
2677 | 3180 | ||
2678 | nv_rx_process(dev, dev->weight); | 3181 | if (nv_rx_process_optimized(dev, dev->weight)) { |
2679 | if (nv_alloc_rx(dev)) { | 3182 | if (unlikely(nv_alloc_rx_optimized(dev))) { |
2680 | spin_lock_irqsave(&np->lock, flags); | 3183 | spin_lock_irqsave(&np->lock, flags); |
2681 | if (!np->in_shutdown) | 3184 | if (!np->in_shutdown) |
2682 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 3185 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2683 | spin_unlock_irqrestore(&np->lock, flags); | 3186 | spin_unlock_irqrestore(&np->lock, flags); |
3187 | } | ||
2684 | } | 3188 | } |
2685 | 3189 | ||
2686 | if (i > max_interrupt_work) { | 3190 | if (unlikely(i > max_interrupt_work)) { |
2687 | spin_lock_irqsave(&np->lock, flags); | 3191 | spin_lock_irqsave(&np->lock, flags); |
2688 | /* disable interrupts on the nic */ | 3192 | /* disable interrupts on the nic */ |
2689 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 3193 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
@@ -2718,11 +3222,15 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
2718 | for (i=0; ; i++) { | 3222 | for (i=0; ; i++) { |
2719 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | 3223 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; |
2720 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | 3224 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); |
2721 | pci_push(base); | ||
2722 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 3225 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
2723 | if (!(events & np->irqmask)) | 3226 | if (!(events & np->irqmask)) |
2724 | break; | 3227 | break; |
2725 | 3228 | ||
3229 | /* check tx in case we reached max loop limit in tx isr */ | ||
3230 | spin_lock_irqsave(&np->lock, flags); | ||
3231 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); | ||
3232 | spin_unlock_irqrestore(&np->lock, flags); | ||
3233 | |||
2726 | if (events & NVREG_IRQ_LINK) { | 3234 | if (events & NVREG_IRQ_LINK) { |
2727 | spin_lock_irqsave(&np->lock, flags); | 3235 | spin_lock_irqsave(&np->lock, flags); |
2728 | nv_link_irq(dev); | 3236 | nv_link_irq(dev); |
@@ -2752,7 +3260,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
2752 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | 3260 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", |
2753 | dev->name, events); | 3261 | dev->name, events); |
2754 | } | 3262 | } |
2755 | if (i > max_interrupt_work) { | 3263 | if (unlikely(i > max_interrupt_work)) { |
2756 | spin_lock_irqsave(&np->lock, flags); | 3264 | spin_lock_irqsave(&np->lock, flags); |
2757 | /* disable interrupts on the nic */ | 3265 | /* disable interrupts on the nic */ |
2758 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | 3266 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); |
@@ -2835,6 +3343,16 @@ static int nv_request_irq(struct net_device *dev, int intr_test) | |||
2835 | u8 __iomem *base = get_hwbase(dev); | 3343 | u8 __iomem *base = get_hwbase(dev); |
2836 | int ret = 1; | 3344 | int ret = 1; |
2837 | int i; | 3345 | int i; |
3346 | irqreturn_t (*handler)(int foo, void *data); | ||
3347 | |||
3348 | if (intr_test) { | ||
3349 | handler = nv_nic_irq_test; | ||
3350 | } else { | ||
3351 | if (np->desc_ver == DESC_VER_3) | ||
3352 | handler = nv_nic_irq_optimized; | ||
3353 | else | ||
3354 | handler = nv_nic_irq; | ||
3355 | } | ||
2838 | 3356 | ||
2839 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | 3357 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
2840 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | 3358 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { |
@@ -2872,10 +3390,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) | |||
2872 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | 3390 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); |
2873 | } else { | 3391 | } else { |
2874 | /* Request irq for all interrupts */ | 3392 | /* Request irq for all interrupts */ |
2875 | if ((!intr_test && | 3393 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { |
2876 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || | ||
2877 | (intr_test && | ||
2878 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { | ||
2879 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | 3394 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
2880 | pci_disable_msix(np->pci_dev); | 3395 | pci_disable_msix(np->pci_dev); |
2881 | np->msi_flags &= ~NV_MSI_X_ENABLED; | 3396 | np->msi_flags &= ~NV_MSI_X_ENABLED; |
@@ -2891,8 +3406,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) | |||
2891 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | 3406 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { |
2892 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | 3407 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { |
2893 | np->msi_flags |= NV_MSI_ENABLED; | 3408 | np->msi_flags |= NV_MSI_ENABLED; |
2894 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || | 3409 | if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { |
2895 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { | ||
2896 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | 3410 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
2897 | pci_disable_msi(np->pci_dev); | 3411 | pci_disable_msi(np->pci_dev); |
2898 | np->msi_flags &= ~NV_MSI_ENABLED; | 3412 | np->msi_flags &= ~NV_MSI_ENABLED; |
@@ -2907,8 +3421,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) | |||
2907 | } | 3421 | } |
2908 | } | 3422 | } |
2909 | if (ret != 0) { | 3423 | if (ret != 0) { |
2910 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || | 3424 | if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) |
2911 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) | ||
2912 | goto out_err; | 3425 | goto out_err; |
2913 | 3426 | ||
2914 | } | 3427 | } |
@@ -3051,47 +3564,8 @@ static void nv_do_stats_poll(unsigned long data) | |||
3051 | { | 3564 | { |
3052 | struct net_device *dev = (struct net_device *) data; | 3565 | struct net_device *dev = (struct net_device *) data; |
3053 | struct fe_priv *np = netdev_priv(dev); | 3566 | struct fe_priv *np = netdev_priv(dev); |
3054 | u8 __iomem *base = get_hwbase(dev); | ||
3055 | 3567 | ||
3056 | np->estats.tx_bytes += readl(base + NvRegTxCnt); | 3568 | nv_get_hw_stats(dev); |
3057 | np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); | ||
3058 | np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); | ||
3059 | np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); | ||
3060 | np->estats.tx_late_collision += readl(base + NvRegTxLateCol); | ||
3061 | np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); | ||
3062 | np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); | ||
3063 | np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); | ||
3064 | np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); | ||
3065 | np->estats.tx_deferral += readl(base + NvRegTxDef); | ||
3066 | np->estats.tx_packets += readl(base + NvRegTxFrame); | ||
3067 | np->estats.tx_pause += readl(base + NvRegTxPause); | ||
3068 | np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); | ||
3069 | np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); | ||
3070 | np->estats.rx_late_collision += readl(base + NvRegRxLateCol); | ||
3071 | np->estats.rx_runt += readl(base + NvRegRxRunt); | ||
3072 | np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); | ||
3073 | np->estats.rx_over_errors += readl(base + NvRegRxOverflow); | ||
3074 | np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); | ||
3075 | np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); | ||
3076 | np->estats.rx_length_error += readl(base + NvRegRxLenErr); | ||
3077 | np->estats.rx_unicast += readl(base + NvRegRxUnicast); | ||
3078 | np->estats.rx_multicast += readl(base + NvRegRxMulticast); | ||
3079 | np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); | ||
3080 | np->estats.rx_bytes += readl(base + NvRegRxCnt); | ||
3081 | np->estats.rx_pause += readl(base + NvRegRxPause); | ||
3082 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); | ||
3083 | np->estats.rx_packets = | ||
3084 | np->estats.rx_unicast + | ||
3085 | np->estats.rx_multicast + | ||
3086 | np->estats.rx_broadcast; | ||
3087 | np->estats.rx_errors_total = | ||
3088 | np->estats.rx_crc_errors + | ||
3089 | np->estats.rx_over_errors + | ||
3090 | np->estats.rx_frame_error + | ||
3091 | (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + | ||
3092 | np->estats.rx_late_collision + | ||
3093 | np->estats.rx_runt + | ||
3094 | np->estats.rx_frame_too_long; | ||
3095 | 3569 | ||
3096 | if (!np->in_shutdown) | 3570 | if (!np->in_shutdown) |
3097 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); | 3571 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); |
@@ -3465,7 +3939,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
3465 | { | 3939 | { |
3466 | struct fe_priv *np = netdev_priv(dev); | 3940 | struct fe_priv *np = netdev_priv(dev); |
3467 | u8 __iomem *base = get_hwbase(dev); | 3941 | u8 __iomem *base = get_hwbase(dev); |
3468 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; | 3942 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; |
3469 | dma_addr_t ring_addr; | 3943 | dma_addr_t ring_addr; |
3470 | 3944 | ||
3471 | if (ring->rx_pending < RX_RING_MIN || | 3945 | if (ring->rx_pending < RX_RING_MIN || |
@@ -3491,12 +3965,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
3491 | sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | 3965 | sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), |
3492 | &ring_addr); | 3966 | &ring_addr); |
3493 | } | 3967 | } |
3494 | rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); | 3968 | rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); |
3495 | rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); | 3969 | tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); |
3496 | tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); | 3970 | if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { |
3497 | tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); | ||
3498 | tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); | ||
3499 | if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { | ||
3500 | /* fall back to old rings */ | 3971 | /* fall back to old rings */ |
3501 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 3972 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3502 | if (rxtx_ring) | 3973 | if (rxtx_ring) |
@@ -3509,14 +3980,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
3509 | } | 3980 | } |
3510 | if (rx_skbuff) | 3981 | if (rx_skbuff) |
3511 | kfree(rx_skbuff); | 3982 | kfree(rx_skbuff); |
3512 | if (rx_dma) | ||
3513 | kfree(rx_dma); | ||
3514 | if (tx_skbuff) | 3983 | if (tx_skbuff) |
3515 | kfree(tx_skbuff); | 3984 | kfree(tx_skbuff); |
3516 | if (tx_dma) | ||
3517 | kfree(tx_dma); | ||
3518 | if (tx_dma_len) | ||
3519 | kfree(tx_dma_len); | ||
3520 | goto exit; | 3985 | goto exit; |
3521 | } | 3986 | } |
3522 | 3987 | ||
@@ -3538,8 +4003,6 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
3538 | /* set new values */ | 4003 | /* set new values */ |
3539 | np->rx_ring_size = ring->rx_pending; | 4004 | np->rx_ring_size = ring->rx_pending; |
3540 | np->tx_ring_size = ring->tx_pending; | 4005 | np->tx_ring_size = ring->tx_pending; |
3541 | np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; | ||
3542 | np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; | ||
3543 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4006 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3544 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; | 4007 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; |
3545 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; | 4008 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
@@ -3547,18 +4010,12 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
3547 | np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; | 4010 | np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; |
3548 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; | 4011 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
3549 | } | 4012 | } |
3550 | np->rx_skbuff = (struct sk_buff**)rx_skbuff; | 4013 | np->rx_skb = (struct nv_skb_map*)rx_skbuff; |
3551 | np->rx_dma = (dma_addr_t*)rx_dma; | 4014 | np->tx_skb = (struct nv_skb_map*)tx_skbuff; |
3552 | np->tx_skbuff = (struct sk_buff**)tx_skbuff; | ||
3553 | np->tx_dma = (dma_addr_t*)tx_dma; | ||
3554 | np->tx_dma_len = (unsigned int*)tx_dma_len; | ||
3555 | np->ring_addr = ring_addr; | 4015 | np->ring_addr = ring_addr; |
3556 | 4016 | ||
3557 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | 4017 | memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); |
3558 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | 4018 | memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); |
3559 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | ||
3560 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | ||
3561 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | ||
3562 | 4019 | ||
3563 | if (netif_running(dev)) { | 4020 | if (netif_running(dev)) { |
3564 | /* reinit driver view of the queues */ | 4021 | /* reinit driver view of the queues */ |
@@ -3727,8 +4184,10 @@ static int nv_get_stats_count(struct net_device *dev) | |||
3727 | { | 4184 | { |
3728 | struct fe_priv *np = netdev_priv(dev); | 4185 | struct fe_priv *np = netdev_priv(dev); |
3729 | 4186 | ||
3730 | if (np->driver_data & DEV_HAS_STATISTICS) | 4187 | if (np->driver_data & DEV_HAS_STATISTICS_V1) |
3731 | return sizeof(struct nv_ethtool_stats)/sizeof(u64); | 4188 | return NV_DEV_STATISTICS_V1_COUNT; |
4189 | else if (np->driver_data & DEV_HAS_STATISTICS_V2) | ||
4190 | return NV_DEV_STATISTICS_V2_COUNT; | ||
3732 | else | 4191 | else |
3733 | return 0; | 4192 | return 0; |
3734 | } | 4193 | } |
@@ -3955,7 +4414,7 @@ static int nv_loopback_test(struct net_device *dev) | |||
3955 | dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", | 4414 | dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", |
3956 | dev->name, len, pkt_len); | 4415 | dev->name, len, pkt_len); |
3957 | } else { | 4416 | } else { |
3958 | rx_skb = np->rx_skbuff[0]; | 4417 | rx_skb = np->rx_skb[0].skb; |
3959 | for (i = 0; i < pkt_len; i++) { | 4418 | for (i = 0; i < pkt_len; i++) { |
3960 | if (rx_skb->data[i] != (u8)(i & 0xff)) { | 4419 | if (rx_skb->data[i] != (u8)(i & 0xff)) { |
3961 | ret = 0; | 4420 | ret = 0; |
@@ -4315,7 +4774,7 @@ static int nv_open(struct net_device *dev) | |||
4315 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 4774 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
4316 | 4775 | ||
4317 | /* start statistics timer */ | 4776 | /* start statistics timer */ |
4318 | if (np->driver_data & DEV_HAS_STATISTICS) | 4777 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) |
4319 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); | 4778 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); |
4320 | 4779 | ||
4321 | spin_unlock_irq(&np->lock); | 4780 | spin_unlock_irq(&np->lock); |
@@ -4412,7 +4871,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4412 | if (err < 0) | 4871 | if (err < 0) |
4413 | goto out_disable; | 4872 | goto out_disable; |
4414 | 4873 | ||
4415 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) | 4874 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) |
4875 | np->register_size = NV_PCI_REGSZ_VER3; | ||
4876 | else if (id->driver_data & DEV_HAS_STATISTICS_V1) | ||
4416 | np->register_size = NV_PCI_REGSZ_VER2; | 4877 | np->register_size = NV_PCI_REGSZ_VER2; |
4417 | else | 4878 | else |
4418 | np->register_size = NV_PCI_REGSZ_VER1; | 4879 | np->register_size = NV_PCI_REGSZ_VER1; |
@@ -4475,10 +4936,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4475 | np->rx_csum = 1; | 4936 | np->rx_csum = 1; |
4476 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | 4937 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
4477 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; | 4938 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
4478 | #ifdef NETIF_F_TSO | ||
4479 | dev->features |= NETIF_F_TSO; | 4939 | dev->features |= NETIF_F_TSO; |
4480 | #endif | 4940 | } |
4481 | } | ||
4482 | 4941 | ||
4483 | np->vlanctl_bits = 0; | 4942 | np->vlanctl_bits = 0; |
4484 | if (id->driver_data & DEV_HAS_VLAN) { | 4943 | if (id->driver_data & DEV_HAS_VLAN) { |
@@ -4512,8 +4971,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4512 | 4971 | ||
4513 | np->rx_ring_size = RX_RING_DEFAULT; | 4972 | np->rx_ring_size = RX_RING_DEFAULT; |
4514 | np->tx_ring_size = TX_RING_DEFAULT; | 4973 | np->tx_ring_size = TX_RING_DEFAULT; |
4515 | np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; | ||
4516 | np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; | ||
4517 | 4974 | ||
4518 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4975 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
4519 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, | 4976 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, |
@@ -4530,22 +4987,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4530 | goto out_unmap; | 4987 | goto out_unmap; |
4531 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; | 4988 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
4532 | } | 4989 | } |
4533 | np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); | 4990 | np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL); |
4534 | np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); | 4991 | np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL); |
4535 | np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); | 4992 | if (!np->rx_skb || !np->tx_skb) |
4536 | np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); | ||
4537 | np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); | ||
4538 | if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) | ||
4539 | goto out_freering; | 4993 | goto out_freering; |
4540 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | 4994 | memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); |
4541 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | 4995 | memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); |
4542 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | ||
4543 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | ||
4544 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | ||
4545 | 4996 | ||
4546 | dev->open = nv_open; | 4997 | dev->open = nv_open; |
4547 | dev->stop = nv_close; | 4998 | dev->stop = nv_close; |
4548 | dev->hard_start_xmit = nv_start_xmit; | 4999 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
5000 | dev->hard_start_xmit = nv_start_xmit; | ||
5001 | else | ||
5002 | dev->hard_start_xmit = nv_start_xmit_optimized; | ||
4549 | dev->get_stats = nv_get_stats; | 5003 | dev->get_stats = nv_get_stats; |
4550 | dev->change_mtu = nv_change_mtu; | 5004 | dev->change_mtu = nv_change_mtu; |
4551 | dev->set_mac_address = nv_set_mac_address; | 5005 | dev->set_mac_address = nv_set_mac_address; |
@@ -4553,7 +5007,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4553 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5007 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4554 | dev->poll_controller = nv_poll_controller; | 5008 | dev->poll_controller = nv_poll_controller; |
4555 | #endif | 5009 | #endif |
4556 | dev->weight = 64; | 5010 | dev->weight = RX_WORK_PER_LOOP; |
4557 | #ifdef CONFIG_FORCEDETH_NAPI | 5011 | #ifdef CONFIG_FORCEDETH_NAPI |
4558 | dev->poll = nv_napi_poll; | 5012 | dev->poll = nv_napi_poll; |
4559 | #endif | 5013 | #endif |
@@ -4868,83 +5322,83 @@ static struct pci_device_id pci_tbl[] = { | |||
4868 | }, | 5322 | }, |
4869 | { /* CK804 Ethernet Controller */ | 5323 | { /* CK804 Ethernet Controller */ |
4870 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), | 5324 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), |
4871 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 5325 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
4872 | }, | 5326 | }, |
4873 | { /* CK804 Ethernet Controller */ | 5327 | { /* CK804 Ethernet Controller */ |
4874 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), | 5328 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), |
4875 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 5329 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
4876 | }, | 5330 | }, |
4877 | { /* MCP04 Ethernet Controller */ | 5331 | { /* MCP04 Ethernet Controller */ |
4878 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), | 5332 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), |
4879 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 5333 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
4880 | }, | 5334 | }, |
4881 | { /* MCP04 Ethernet Controller */ | 5335 | { /* MCP04 Ethernet Controller */ |
4882 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), | 5336 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), |
4883 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 5337 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
4884 | }, | 5338 | }, |
4885 | { /* MCP51 Ethernet Controller */ | 5339 | { /* MCP51 Ethernet Controller */ |
4886 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), | 5340 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), |
4887 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, | 5341 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, |
4888 | }, | 5342 | }, |
4889 | { /* MCP51 Ethernet Controller */ | 5343 | { /* MCP51 Ethernet Controller */ |
4890 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), | 5344 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), |
4891 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, | 5345 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, |
4892 | }, | 5346 | }, |
4893 | { /* MCP55 Ethernet Controller */ | 5347 | { /* MCP55 Ethernet Controller */ |
4894 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 5348 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
4895 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5349 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4896 | }, | 5350 | }, |
4897 | { /* MCP55 Ethernet Controller */ | 5351 | { /* MCP55 Ethernet Controller */ |
4898 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 5352 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
4899 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5353 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4900 | }, | 5354 | }, |
4901 | { /* MCP61 Ethernet Controller */ | 5355 | { /* MCP61 Ethernet Controller */ |
4902 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), | 5356 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), |
4903 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5357 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4904 | }, | 5358 | }, |
4905 | { /* MCP61 Ethernet Controller */ | 5359 | { /* MCP61 Ethernet Controller */ |
4906 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), | 5360 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), |
4907 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5361 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4908 | }, | 5362 | }, |
4909 | { /* MCP61 Ethernet Controller */ | 5363 | { /* MCP61 Ethernet Controller */ |
4910 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), | 5364 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), |
4911 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5365 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4912 | }, | 5366 | }, |
4913 | { /* MCP61 Ethernet Controller */ | 5367 | { /* MCP61 Ethernet Controller */ |
4914 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), | 5368 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), |
4915 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5369 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4916 | }, | 5370 | }, |
4917 | { /* MCP65 Ethernet Controller */ | 5371 | { /* MCP65 Ethernet Controller */ |
4918 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), | 5372 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), |
4919 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5373 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4920 | }, | 5374 | }, |
4921 | { /* MCP65 Ethernet Controller */ | 5375 | { /* MCP65 Ethernet Controller */ |
4922 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), | 5376 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), |
4923 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5377 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4924 | }, | 5378 | }, |
4925 | { /* MCP65 Ethernet Controller */ | 5379 | { /* MCP65 Ethernet Controller */ |
4926 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), | 5380 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), |
4927 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5381 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4928 | }, | 5382 | }, |
4929 | { /* MCP65 Ethernet Controller */ | 5383 | { /* MCP65 Ethernet Controller */ |
4930 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), | 5384 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), |
4931 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5385 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4932 | }, | 5386 | }, |
4933 | { /* MCP67 Ethernet Controller */ | 5387 | { /* MCP67 Ethernet Controller */ |
4934 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), | 5388 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), |
4935 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5389 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4936 | }, | 5390 | }, |
4937 | { /* MCP67 Ethernet Controller */ | 5391 | { /* MCP67 Ethernet Controller */ |
4938 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), | 5392 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), |
4939 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5393 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4940 | }, | 5394 | }, |
4941 | { /* MCP67 Ethernet Controller */ | 5395 | { /* MCP67 Ethernet Controller */ |
4942 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), | 5396 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), |
4943 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5397 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4944 | }, | 5398 | }, |
4945 | { /* MCP67 Ethernet Controller */ | 5399 | { /* MCP67 Ethernet Controller */ |
4946 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), | 5400 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), |
4947 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | 5401 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
4948 | }, | 5402 | }, |
4949 | {0,}, | 5403 | {0,}, |
4950 | }; | 5404 | }; |