diff options
Diffstat (limited to 'drivers/net/forcedeth.c')
| -rw-r--r-- | drivers/net/forcedeth.c | 593 |
1 files changed, 545 insertions, 48 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 3682ec61e8a8..e7fc28b07e5a 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
| @@ -102,6 +102,9 @@ | |||
| 102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. | 102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. |
| 103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single | 103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single |
| 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. | 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
| 105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. | ||
| 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | ||
| 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | ||
| 105 | * | 108 | * |
| 106 | * Known bugs: | 109 | * Known bugs: |
| 107 | * We suspect that on some hardware no TX done interrupts are generated. | 110 | * We suspect that on some hardware no TX done interrupts are generated. |
| @@ -113,7 +116,7 @@ | |||
| 113 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 116 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
| 114 | * superfluous timer interrupts from the nic. | 117 | * superfluous timer interrupts from the nic. |
| 115 | */ | 118 | */ |
| 116 | #define FORCEDETH_VERSION "0.49" | 119 | #define FORCEDETH_VERSION "0.52" |
| 117 | #define DRV_NAME "forcedeth" | 120 | #define DRV_NAME "forcedeth" |
| 118 | 121 | ||
| 119 | #include <linux/module.h> | 122 | #include <linux/module.h> |
| @@ -153,6 +156,9 @@ | |||
| 153 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ | 156 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ |
| 154 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ | 157 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
| 155 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ | 158 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
| 159 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ | ||
| 160 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ | ||
| 161 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | ||
| 156 | 162 | ||
| 157 | enum { | 163 | enum { |
| 158 | NvRegIrqStatus = 0x000, | 164 | NvRegIrqStatus = 0x000, |
| @@ -166,14 +172,17 @@ enum { | |||
| 166 | #define NVREG_IRQ_TX_OK 0x0010 | 172 | #define NVREG_IRQ_TX_OK 0x0010 |
| 167 | #define NVREG_IRQ_TIMER 0x0020 | 173 | #define NVREG_IRQ_TIMER 0x0020 |
| 168 | #define NVREG_IRQ_LINK 0x0040 | 174 | #define NVREG_IRQ_LINK 0x0040 |
| 169 | #define NVREG_IRQ_TX_ERROR 0x0080 | 175 | #define NVREG_IRQ_RX_FORCED 0x0080 |
| 170 | #define NVREG_IRQ_TX1 0x0100 | 176 | #define NVREG_IRQ_TX_FORCED 0x0100 |
| 171 | #define NVREG_IRQMASK_THROUGHPUT 0x00df | 177 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
| 172 | #define NVREG_IRQMASK_CPU 0x0040 | 178 | #define NVREG_IRQMASK_CPU 0x0040 |
| 179 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) | ||
| 180 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | ||
| 181 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | ||
| 173 | 182 | ||
| 174 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | 183 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ |
| 175 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ | 184 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
| 176 | NVREG_IRQ_TX1)) | 185 | NVREG_IRQ_TX_FORCED)) |
| 177 | 186 | ||
| 178 | NvRegUnknownSetupReg6 = 0x008, | 187 | NvRegUnknownSetupReg6 = 0x008, |
| 179 | #define NVREG_UNKSETUP6_VAL 3 | 188 | #define NVREG_UNKSETUP6_VAL 3 |
| @@ -185,6 +194,10 @@ enum { | |||
| 185 | NvRegPollingInterval = 0x00c, | 194 | NvRegPollingInterval = 0x00c, |
| 186 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 | 195 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 |
| 187 | #define NVREG_POLL_DEFAULT_CPU 13 | 196 | #define NVREG_POLL_DEFAULT_CPU 13 |
| 197 | NvRegMSIMap0 = 0x020, | ||
| 198 | NvRegMSIMap1 = 0x024, | ||
| 199 | NvRegMSIIrqMask = 0x030, | ||
| 200 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | ||
| 188 | NvRegMisc1 = 0x080, | 201 | NvRegMisc1 = 0x080, |
| 189 | #define NVREG_MISC1_HD 0x02 | 202 | #define NVREG_MISC1_HD 0x02 |
| 190 | #define NVREG_MISC1_FORCE 0x3b0f3c | 203 | #define NVREG_MISC1_FORCE 0x3b0f3c |
| @@ -254,6 +267,10 @@ enum { | |||
| 254 | #define NVREG_TXRXCTL_DESC_1 0 | 267 | #define NVREG_TXRXCTL_DESC_1 0 |
| 255 | #define NVREG_TXRXCTL_DESC_2 0x02100 | 268 | #define NVREG_TXRXCTL_DESC_2 0x02100 |
| 256 | #define NVREG_TXRXCTL_DESC_3 0x02200 | 269 | #define NVREG_TXRXCTL_DESC_3 0x02200 |
| 270 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 | ||
| 271 | #define NVREG_TXRXCTL_VLANINS 0x00080 | ||
| 272 | NvRegTxRingPhysAddrHigh = 0x148, | ||
| 273 | NvRegRxRingPhysAddrHigh = 0x14C, | ||
| 257 | NvRegMIIStatus = 0x180, | 274 | NvRegMIIStatus = 0x180, |
| 258 | #define NVREG_MIISTAT_ERROR 0x0001 | 275 | #define NVREG_MIISTAT_ERROR 0x0001 |
| 259 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 276 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
| @@ -303,6 +320,11 @@ enum { | |||
| 303 | #define NVREG_POWERSTATE_D1 0x0001 | 320 | #define NVREG_POWERSTATE_D1 0x0001 |
| 304 | #define NVREG_POWERSTATE_D2 0x0002 | 321 | #define NVREG_POWERSTATE_D2 0x0002 |
| 305 | #define NVREG_POWERSTATE_D3 0x0003 | 322 | #define NVREG_POWERSTATE_D3 0x0003 |
| 323 | NvRegVlanControl = 0x300, | ||
| 324 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | ||
| 325 | NvRegMSIXMap0 = 0x3e0, | ||
| 326 | NvRegMSIXMap1 = 0x3e4, | ||
| 327 | NvRegMSIXIrqStatus = 0x3f0, | ||
| 306 | }; | 328 | }; |
| 307 | 329 | ||
| 308 | /* Big endian: should work, but is untested */ | 330 | /* Big endian: should work, but is untested */ |
| @@ -314,7 +336,7 @@ struct ring_desc { | |||
| 314 | struct ring_desc_ex { | 336 | struct ring_desc_ex { |
| 315 | u32 PacketBufferHigh; | 337 | u32 PacketBufferHigh; |
| 316 | u32 PacketBufferLow; | 338 | u32 PacketBufferLow; |
| 317 | u32 Reserved; | 339 | u32 TxVlan; |
| 318 | u32 FlagLen; | 340 | u32 FlagLen; |
| 319 | }; | 341 | }; |
| 320 | 342 | ||
| @@ -355,6 +377,8 @@ typedef union _ring_type { | |||
| 355 | #define NV_TX2_CHECKSUM_L3 (1<<27) | 377 | #define NV_TX2_CHECKSUM_L3 (1<<27) |
| 356 | #define NV_TX2_CHECKSUM_L4 (1<<26) | 378 | #define NV_TX2_CHECKSUM_L4 (1<<26) |
| 357 | 379 | ||
| 380 | #define NV_TX3_VLAN_TAG_PRESENT (1<<18) | ||
| 381 | |||
| 358 | #define NV_RX_DESCRIPTORVALID (1<<16) | 382 | #define NV_RX_DESCRIPTORVALID (1<<16) |
| 359 | #define NV_RX_MISSEDFRAME (1<<17) | 383 | #define NV_RX_MISSEDFRAME (1<<17) |
| 360 | #define NV_RX_SUBSTRACT1 (1<<18) | 384 | #define NV_RX_SUBSTRACT1 (1<<18) |
| @@ -385,6 +409,9 @@ typedef union _ring_type { | |||
| 385 | #define NV_RX2_ERROR (1<<30) | 409 | #define NV_RX2_ERROR (1<<30) |
| 386 | #define NV_RX2_AVAIL (1<<31) | 410 | #define NV_RX2_AVAIL (1<<31) |
| 387 | 411 | ||
| 412 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) | ||
| 413 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) | ||
| 414 | |||
| 388 | /* Miscelaneous hardware related defines: */ | 415 | /* Miscelaneous hardware related defines: */ |
| 389 | #define NV_PCI_REGSZ 0x270 | 416 | #define NV_PCI_REGSZ 0x270 |
| 390 | 417 | ||
| @@ -475,6 +502,18 @@ typedef union _ring_type { | |||
| 475 | #define LPA_1000FULL 0x0800 | 502 | #define LPA_1000FULL 0x0800 |
| 476 | #define LPA_1000HALF 0x0400 | 503 | #define LPA_1000HALF 0x0400 |
| 477 | 504 | ||
| 505 | /* MSI/MSI-X defines */ | ||
| 506 | #define NV_MSI_X_MAX_VECTORS 8 | ||
| 507 | #define NV_MSI_X_VECTORS_MASK 0x000f | ||
| 508 | #define NV_MSI_CAPABLE 0x0010 | ||
| 509 | #define NV_MSI_X_CAPABLE 0x0020 | ||
| 510 | #define NV_MSI_ENABLED 0x0040 | ||
| 511 | #define NV_MSI_X_ENABLED 0x0080 | ||
| 512 | |||
| 513 | #define NV_MSI_X_VECTOR_ALL 0x0 | ||
| 514 | #define NV_MSI_X_VECTOR_RX 0x0 | ||
| 515 | #define NV_MSI_X_VECTOR_TX 0x1 | ||
| 516 | #define NV_MSI_X_VECTOR_OTHER 0x2 | ||
| 478 | 517 | ||
| 479 | /* | 518 | /* |
| 480 | * SMP locking: | 519 | * SMP locking: |
| @@ -511,6 +550,7 @@ struct fe_priv { | |||
| 511 | u32 irqmask; | 550 | u32 irqmask; |
| 512 | u32 desc_ver; | 551 | u32 desc_ver; |
| 513 | u32 txrxctl_bits; | 552 | u32 txrxctl_bits; |
| 553 | u32 vlanctl_bits; | ||
| 514 | 554 | ||
| 515 | void __iomem *base; | 555 | void __iomem *base; |
| 516 | 556 | ||
| @@ -525,6 +565,7 @@ struct fe_priv { | |||
| 525 | unsigned int pkt_limit; | 565 | unsigned int pkt_limit; |
| 526 | struct timer_list oom_kick; | 566 | struct timer_list oom_kick; |
| 527 | struct timer_list nic_poll; | 567 | struct timer_list nic_poll; |
| 568 | u32 nic_poll_irq; | ||
| 528 | 569 | ||
| 529 | /* media detection workaround. | 570 | /* media detection workaround. |
| 530 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 571 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
| @@ -540,6 +581,13 @@ struct fe_priv { | |||
| 540 | dma_addr_t tx_dma[TX_RING]; | 581 | dma_addr_t tx_dma[TX_RING]; |
| 541 | unsigned int tx_dma_len[TX_RING]; | 582 | unsigned int tx_dma_len[TX_RING]; |
| 542 | u32 tx_flags; | 583 | u32 tx_flags; |
| 584 | |||
| 585 | /* vlan fields */ | ||
| 586 | struct vlan_group *vlangrp; | ||
| 587 | |||
| 588 | /* msi/msi-x fields */ | ||
| 589 | u32 msi_flags; | ||
| 590 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | ||
| 543 | }; | 591 | }; |
| 544 | 592 | ||
| 545 | /* | 593 | /* |
| @@ -567,6 +615,16 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; | |||
| 567 | */ | 615 | */ |
| 568 | static int poll_interval = -1; | 616 | static int poll_interval = -1; |
| 569 | 617 | ||
| 618 | /* | ||
| 619 | * Disable MSI interrupts | ||
| 620 | */ | ||
| 621 | static int disable_msi = 0; | ||
| 622 | |||
| 623 | /* | ||
| 624 | * Disable MSIX interrupts | ||
| 625 | */ | ||
| 626 | static int disable_msix = 0; | ||
| 627 | |||
| 570 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) | 628 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
| 571 | { | 629 | { |
| 572 | return netdev_priv(dev); | 630 | return netdev_priv(dev); |
| @@ -612,6 +670,33 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | |||
| 612 | return 0; | 670 | return 0; |
| 613 | } | 671 | } |
| 614 | 672 | ||
| 673 | #define NV_SETUP_RX_RING 0x01 | ||
| 674 | #define NV_SETUP_TX_RING 0x02 | ||
| 675 | |||
| 676 | static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | ||
| 677 | { | ||
| 678 | struct fe_priv *np = get_nvpriv(dev); | ||
| 679 | u8 __iomem *base = get_hwbase(dev); | ||
| 680 | |||
| 681 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
| 682 | if (rxtx_flags & NV_SETUP_RX_RING) { | ||
| 683 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | ||
| 684 | } | ||
| 685 | if (rxtx_flags & NV_SETUP_TX_RING) { | ||
| 686 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
| 687 | } | ||
| 688 | } else { | ||
| 689 | if (rxtx_flags & NV_SETUP_RX_RING) { | ||
| 690 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | ||
| 691 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | ||
| 692 | } | ||
| 693 | if (rxtx_flags & NV_SETUP_TX_RING) { | ||
| 694 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
| 695 | writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | ||
| 696 | } | ||
| 697 | } | ||
| 698 | } | ||
| 699 | |||
| 615 | #define MII_READ (-1) | 700 | #define MII_READ (-1) |
| 616 | /* mii_rw: read/write a register on the PHY. | 701 | /* mii_rw: read/write a register on the PHY. |
| 617 | * | 702 | * |
| @@ -903,14 +988,27 @@ static void nv_do_rx_refill(unsigned long data) | |||
| 903 | struct net_device *dev = (struct net_device *) data; | 988 | struct net_device *dev = (struct net_device *) data; |
| 904 | struct fe_priv *np = netdev_priv(dev); | 989 | struct fe_priv *np = netdev_priv(dev); |
| 905 | 990 | ||
| 906 | disable_irq(dev->irq); | 991 | |
| 992 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
| 993 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 994 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 995 | disable_irq(dev->irq); | ||
| 996 | } else { | ||
| 997 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 998 | } | ||
| 907 | if (nv_alloc_rx(dev)) { | 999 | if (nv_alloc_rx(dev)) { |
| 908 | spin_lock(&np->lock); | 1000 | spin_lock(&np->lock); |
| 909 | if (!np->in_shutdown) | 1001 | if (!np->in_shutdown) |
| 910 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1002 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
| 911 | spin_unlock(&np->lock); | 1003 | spin_unlock(&np->lock); |
| 912 | } | 1004 | } |
| 913 | enable_irq(dev->irq); | 1005 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 1006 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 1007 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 1008 | enable_irq(dev->irq); | ||
| 1009 | } else { | ||
| 1010 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 1011 | } | ||
| 914 | } | 1012 | } |
| 915 | 1013 | ||
| 916 | static void nv_init_rx(struct net_device *dev) | 1014 | static void nv_init_rx(struct net_device *dev) |
| @@ -965,7 +1063,7 @@ static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) | |||
| 965 | } | 1063 | } |
| 966 | 1064 | ||
| 967 | if (np->tx_skbuff[skbnr]) { | 1065 | if (np->tx_skbuff[skbnr]) { |
| 968 | dev_kfree_skb_irq(np->tx_skbuff[skbnr]); | 1066 | dev_kfree_skb_any(np->tx_skbuff[skbnr]); |
| 969 | np->tx_skbuff[skbnr] = NULL; | 1067 | np->tx_skbuff[skbnr] = NULL; |
| 970 | return 1; | 1068 | return 1; |
| 971 | } else { | 1069 | } else { |
| @@ -1031,6 +1129,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1031 | u32 bcnt; | 1129 | u32 bcnt; |
| 1032 | u32 size = skb->len-skb->data_len; | 1130 | u32 size = skb->len-skb->data_len; |
| 1033 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 1131 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
| 1132 | u32 tx_flags_vlan = 0; | ||
| 1034 | 1133 | ||
| 1035 | /* add fragments to entries count */ | 1134 | /* add fragments to entries count */ |
| 1036 | for (i = 0; i < fragments; i++) { | 1135 | for (i = 0; i < fragments; i++) { |
| @@ -1111,10 +1210,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1111 | #endif | 1210 | #endif |
| 1112 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); | 1211 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); |
| 1113 | 1212 | ||
| 1213 | /* vlan tag */ | ||
| 1214 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | ||
| 1215 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | ||
| 1216 | } | ||
| 1217 | |||
| 1114 | /* set tx flags */ | 1218 | /* set tx flags */ |
| 1115 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1219 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
| 1116 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1220 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
| 1117 | } else { | 1221 | } else { |
| 1222 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); | ||
| 1118 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1223 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
| 1119 | } | 1224 | } |
| 1120 | 1225 | ||
| @@ -1209,9 +1314,14 @@ static void nv_tx_timeout(struct net_device *dev) | |||
| 1209 | { | 1314 | { |
| 1210 | struct fe_priv *np = netdev_priv(dev); | 1315 | struct fe_priv *np = netdev_priv(dev); |
| 1211 | u8 __iomem *base = get_hwbase(dev); | 1316 | u8 __iomem *base = get_hwbase(dev); |
| 1317 | u32 status; | ||
| 1318 | |||
| 1319 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
| 1320 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
| 1321 | else | ||
| 1322 | status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | ||
| 1212 | 1323 | ||
| 1213 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, | 1324 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); |
| 1214 | readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); | ||
| 1215 | 1325 | ||
| 1216 | { | 1326 | { |
| 1217 | int i; | 1327 | int i; |
| @@ -1273,10 +1383,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
| 1273 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 1383 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); |
| 1274 | nv_drain_tx(dev); | 1384 | nv_drain_tx(dev); |
| 1275 | np->next_tx = np->nic_tx = 0; | 1385 | np->next_tx = np->nic_tx = 0; |
| 1276 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1386 | setup_hw_rings(dev, NV_SETUP_TX_RING); |
| 1277 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
| 1278 | else | ||
| 1279 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
| 1280 | netif_wake_queue(dev); | 1387 | netif_wake_queue(dev); |
| 1281 | } | 1388 | } |
| 1282 | 1389 | ||
| @@ -1342,6 +1449,8 @@ static void nv_rx_process(struct net_device *dev) | |||
| 1342 | { | 1449 | { |
| 1343 | struct fe_priv *np = netdev_priv(dev); | 1450 | struct fe_priv *np = netdev_priv(dev); |
| 1344 | u32 Flags; | 1451 | u32 Flags; |
| 1452 | u32 vlanflags = 0; | ||
| 1453 | |||
| 1345 | 1454 | ||
| 1346 | for (;;) { | 1455 | for (;;) { |
| 1347 | struct sk_buff *skb; | 1456 | struct sk_buff *skb; |
| @@ -1357,6 +1466,7 @@ static void nv_rx_process(struct net_device *dev) | |||
| 1357 | } else { | 1466 | } else { |
| 1358 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); | 1467 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); |
| 1359 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | 1468 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); |
| 1469 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); | ||
| 1360 | } | 1470 | } |
| 1361 | 1471 | ||
| 1362 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", | 1472 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", |
| @@ -1474,7 +1584,11 @@ static void nv_rx_process(struct net_device *dev) | |||
| 1474 | skb->protocol = eth_type_trans(skb, dev); | 1584 | skb->protocol = eth_type_trans(skb, dev); |
| 1475 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", | 1585 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", |
| 1476 | dev->name, np->cur_rx, len, skb->protocol); | 1586 | dev->name, np->cur_rx, len, skb->protocol); |
| 1477 | netif_rx(skb); | 1587 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { |
| 1588 | vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
| 1589 | } else { | ||
| 1590 | netif_rx(skb); | ||
| 1591 | } | ||
| 1478 | dev->last_rx = jiffies; | 1592 | dev->last_rx = jiffies; |
| 1479 | np->stats.rx_packets++; | 1593 | np->stats.rx_packets++; |
| 1480 | np->stats.rx_bytes += len; | 1594 | np->stats.rx_bytes += len; |
| @@ -1523,7 +1637,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1523 | * guessed, there is probably a simpler approach. | 1637 | * guessed, there is probably a simpler approach. |
| 1524 | * Changing the MTU is a rare event, it shouldn't matter. | 1638 | * Changing the MTU is a rare event, it shouldn't matter. |
| 1525 | */ | 1639 | */ |
| 1526 | disable_irq(dev->irq); | 1640 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 1641 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 1642 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 1643 | disable_irq(dev->irq); | ||
| 1644 | } else { | ||
| 1645 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 1646 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 1647 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 1648 | } | ||
| 1527 | spin_lock_bh(&dev->xmit_lock); | 1649 | spin_lock_bh(&dev->xmit_lock); |
| 1528 | spin_lock(&np->lock); | 1650 | spin_lock(&np->lock); |
| 1529 | /* stop engines */ | 1651 | /* stop engines */ |
| @@ -1544,11 +1666,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1544 | } | 1666 | } |
| 1545 | /* reinit nic view of the rx queue */ | 1667 | /* reinit nic view of the rx queue */ |
| 1546 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | 1668 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
| 1547 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 1669 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
| 1548 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
| 1549 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
| 1550 | else | ||
| 1551 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
| 1552 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 1670 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
| 1553 | base + NvRegRingSizes); | 1671 | base + NvRegRingSizes); |
| 1554 | pci_push(base); | 1672 | pci_push(base); |
| @@ -1560,7 +1678,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1560 | nv_start_tx(dev); | 1678 | nv_start_tx(dev); |
| 1561 | spin_unlock(&np->lock); | 1679 | spin_unlock(&np->lock); |
| 1562 | spin_unlock_bh(&dev->xmit_lock); | 1680 | spin_unlock_bh(&dev->xmit_lock); |
| 1563 | enable_irq(dev->irq); | 1681 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 1682 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 1683 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 1684 | enable_irq(dev->irq); | ||
| 1685 | } else { | ||
| 1686 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 1687 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 1688 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 1689 | } | ||
| 1564 | } | 1690 | } |
| 1565 | return 0; | 1691 | return 0; |
| 1566 | } | 1692 | } |
| @@ -1866,8 +1992,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
| 1866 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | 1992 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); |
| 1867 | 1993 | ||
| 1868 | for (i=0; ; i++) { | 1994 | for (i=0; ; i++) { |
| 1869 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | 1995 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
| 1870 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 1996 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; |
| 1997 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
| 1998 | } else { | ||
| 1999 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
| 2000 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
| 2001 | } | ||
| 1871 | pci_push(base); | 2002 | pci_push(base); |
| 1872 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2003 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
| 1873 | if (!(events & np->irqmask)) | 2004 | if (!(events & np->irqmask)) |
| @@ -1907,11 +2038,16 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
| 1907 | if (i > max_interrupt_work) { | 2038 | if (i > max_interrupt_work) { |
| 1908 | spin_lock(&np->lock); | 2039 | spin_lock(&np->lock); |
| 1909 | /* disable interrupts on the nic */ | 2040 | /* disable interrupts on the nic */ |
| 1910 | writel(0, base + NvRegIrqMask); | 2041 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
| 2042 | writel(0, base + NvRegIrqMask); | ||
| 2043 | else | ||
| 2044 | writel(np->irqmask, base + NvRegIrqMask); | ||
| 1911 | pci_push(base); | 2045 | pci_push(base); |
| 1912 | 2046 | ||
| 1913 | if (!np->in_shutdown) | 2047 | if (!np->in_shutdown) { |
| 2048 | np->nic_poll_irq = np->irqmask; | ||
| 1914 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2049 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
| 2050 | } | ||
| 1915 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); | 2051 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
| 1916 | spin_unlock(&np->lock); | 2052 | spin_unlock(&np->lock); |
| 1917 | break; | 2053 | break; |
| @@ -1923,22 +2059,212 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
| 1923 | return IRQ_RETVAL(i); | 2059 | return IRQ_RETVAL(i); |
| 1924 | } | 2060 | } |
| 1925 | 2061 | ||
| 2062 | static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | ||
| 2063 | { | ||
| 2064 | struct net_device *dev = (struct net_device *) data; | ||
| 2065 | struct fe_priv *np = netdev_priv(dev); | ||
| 2066 | u8 __iomem *base = get_hwbase(dev); | ||
| 2067 | u32 events; | ||
| 2068 | int i; | ||
| 2069 | |||
| 2070 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | ||
| 2071 | |||
| 2072 | for (i=0; ; i++) { | ||
| 2073 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | ||
| 2074 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | ||
| 2075 | pci_push(base); | ||
| 2076 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | ||
| 2077 | if (!(events & np->irqmask)) | ||
| 2078 | break; | ||
| 2079 | |||
| 2080 | spin_lock(&np->lock); | ||
| 2081 | nv_tx_done(dev); | ||
| 2082 | spin_unlock(&np->lock); | ||
| 2083 | |||
| 2084 | if (events & (NVREG_IRQ_TX_ERR)) { | ||
| 2085 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | ||
| 2086 | dev->name, events); | ||
| 2087 | } | ||
| 2088 | if (i > max_interrupt_work) { | ||
| 2089 | spin_lock(&np->lock); | ||
| 2090 | /* disable interrupts on the nic */ | ||
| 2091 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | ||
| 2092 | pci_push(base); | ||
| 2093 | |||
| 2094 | if (!np->in_shutdown) { | ||
| 2095 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | ||
| 2096 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
| 2097 | } | ||
| 2098 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | ||
| 2099 | spin_unlock(&np->lock); | ||
| 2100 | break; | ||
| 2101 | } | ||
| 2102 | |||
| 2103 | } | ||
| 2104 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | ||
| 2105 | |||
| 2106 | return IRQ_RETVAL(i); | ||
| 2107 | } | ||
| 2108 | |||
| 2109 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | ||
| 2110 | { | ||
| 2111 | struct net_device *dev = (struct net_device *) data; | ||
| 2112 | struct fe_priv *np = netdev_priv(dev); | ||
| 2113 | u8 __iomem *base = get_hwbase(dev); | ||
| 2114 | u32 events; | ||
| 2115 | int i; | ||
| 2116 | |||
| 2117 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | ||
| 2118 | |||
| 2119 | for (i=0; ; i++) { | ||
| 2120 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | ||
| 2121 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | ||
| 2122 | pci_push(base); | ||
| 2123 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | ||
| 2124 | if (!(events & np->irqmask)) | ||
| 2125 | break; | ||
| 2126 | |||
| 2127 | nv_rx_process(dev); | ||
| 2128 | if (nv_alloc_rx(dev)) { | ||
| 2129 | spin_lock(&np->lock); | ||
| 2130 | if (!np->in_shutdown) | ||
| 2131 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
| 2132 | spin_unlock(&np->lock); | ||
| 2133 | } | ||
| 2134 | |||
| 2135 | if (i > max_interrupt_work) { | ||
| 2136 | spin_lock(&np->lock); | ||
| 2137 | /* disable interrupts on the nic */ | ||
| 2138 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
| 2139 | pci_push(base); | ||
| 2140 | |||
| 2141 | if (!np->in_shutdown) { | ||
| 2142 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | ||
| 2143 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
| 2144 | } | ||
| 2145 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | ||
| 2146 | spin_unlock(&np->lock); | ||
| 2147 | break; | ||
| 2148 | } | ||
| 2149 | |||
| 2150 | } | ||
| 2151 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | ||
| 2152 | |||
| 2153 | return IRQ_RETVAL(i); | ||
| 2154 | } | ||
| 2155 | |||
| 2156 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | ||
| 2157 | { | ||
| 2158 | struct net_device *dev = (struct net_device *) data; | ||
| 2159 | struct fe_priv *np = netdev_priv(dev); | ||
| 2160 | u8 __iomem *base = get_hwbase(dev); | ||
| 2161 | u32 events; | ||
| 2162 | int i; | ||
| 2163 | |||
| 2164 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | ||
| 2165 | |||
| 2166 | for (i=0; ; i++) { | ||
| 2167 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | ||
| 2168 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | ||
| 2169 | pci_push(base); | ||
| 2170 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | ||
| 2171 | if (!(events & np->irqmask)) | ||
| 2172 | break; | ||
| 2173 | |||
| 2174 | if (events & NVREG_IRQ_LINK) { | ||
| 2175 | spin_lock(&np->lock); | ||
| 2176 | nv_link_irq(dev); | ||
| 2177 | spin_unlock(&np->lock); | ||
| 2178 | } | ||
| 2179 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | ||
| 2180 | spin_lock(&np->lock); | ||
| 2181 | nv_linkchange(dev); | ||
| 2182 | spin_unlock(&np->lock); | ||
| 2183 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
| 2184 | } | ||
| 2185 | if (events & (NVREG_IRQ_UNKNOWN)) { | ||
| 2186 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | ||
| 2187 | dev->name, events); | ||
| 2188 | } | ||
| 2189 | if (i > max_interrupt_work) { | ||
| 2190 | spin_lock(&np->lock); | ||
| 2191 | /* disable interrupts on the nic */ | ||
| 2192 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | ||
| 2193 | pci_push(base); | ||
| 2194 | |||
| 2195 | if (!np->in_shutdown) { | ||
| 2196 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | ||
| 2197 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
| 2198 | } | ||
| 2199 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | ||
| 2200 | spin_unlock(&np->lock); | ||
| 2201 | break; | ||
| 2202 | } | ||
| 2203 | |||
| 2204 | } | ||
| 2205 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | ||
| 2206 | |||
| 2207 | return IRQ_RETVAL(i); | ||
| 2208 | } | ||
| 2209 | |||
| 1926 | static void nv_do_nic_poll(unsigned long data) | 2210 | static void nv_do_nic_poll(unsigned long data) |
| 1927 | { | 2211 | { |
| 1928 | struct net_device *dev = (struct net_device *) data; | 2212 | struct net_device *dev = (struct net_device *) data; |
| 1929 | struct fe_priv *np = netdev_priv(dev); | 2213 | struct fe_priv *np = netdev_priv(dev); |
| 1930 | u8 __iomem *base = get_hwbase(dev); | 2214 | u8 __iomem *base = get_hwbase(dev); |
| 2215 | u32 mask = 0; | ||
| 1931 | 2216 | ||
| 1932 | disable_irq(dev->irq); | ||
| 1933 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
| 1934 | /* | 2217 | /* |
| 2218 | * First disable irq(s) and then | ||
| 1935 | * reenable interrupts on the nic, we have to do this before calling | 2219 | * reenable interrupts on the nic, we have to do this before calling |
| 1936 | * nv_nic_irq because that may decide to do otherwise | 2220 | * nv_nic_irq because that may decide to do otherwise |
| 1937 | */ | 2221 | */ |
| 1938 | writel(np->irqmask, base + NvRegIrqMask); | 2222 | |
| 2223 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
| 2224 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 2225 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 2226 | disable_irq(dev->irq); | ||
| 2227 | mask = np->irqmask; | ||
| 2228 | } else { | ||
| 2229 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
| 2230 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 2231 | mask |= NVREG_IRQ_RX_ALL; | ||
| 2232 | } | ||
| 2233 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
| 2234 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 2235 | mask |= NVREG_IRQ_TX_ALL; | ||
| 2236 | } | ||
| 2237 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
| 2238 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 2239 | mask |= NVREG_IRQ_OTHER; | ||
| 2240 | } | ||
| 2241 | } | ||
| 2242 | np->nic_poll_irq = 0; | ||
| 2243 | |||
| 2244 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
| 2245 | |||
| 2246 | writel(mask, base + NvRegIrqMask); | ||
| 1939 | pci_push(base); | 2247 | pci_push(base); |
| 1940 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | 2248 | |
| 1941 | enable_irq(dev->irq); | 2249 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 2250 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 2251 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 2252 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
| 2253 | enable_irq(dev->irq); | ||
| 2254 | } else { | ||
| 2255 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
| 2256 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
| 2257 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 2258 | } | ||
| 2259 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
| 2260 | nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
| 2261 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 2262 | } | ||
| 2263 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
| 2264 | nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
| 2265 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 2266 | } | ||
| 2267 | } | ||
| 1942 | } | 2268 | } |
| 1943 | 2269 | ||
| 1944 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2270 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| @@ -2217,11 +2543,66 @@ static struct ethtool_ops ops = { | |||
| 2217 | .get_perm_addr = ethtool_op_get_perm_addr, | 2543 | .get_perm_addr = ethtool_op_get_perm_addr, |
| 2218 | }; | 2544 | }; |
| 2219 | 2545 | ||
| 2546 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
| 2547 | { | ||
| 2548 | struct fe_priv *np = get_nvpriv(dev); | ||
| 2549 | |||
| 2550 | spin_lock_irq(&np->lock); | ||
| 2551 | |||
| 2552 | /* save vlan group */ | ||
| 2553 | np->vlangrp = grp; | ||
| 2554 | |||
| 2555 | if (grp) { | ||
| 2556 | /* enable vlan on MAC */ | ||
| 2557 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | ||
| 2558 | } else { | ||
| 2559 | /* disable vlan on MAC */ | ||
| 2560 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; | ||
| 2561 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; | ||
| 2562 | } | ||
| 2563 | |||
| 2564 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
| 2565 | |||
| 2566 | spin_unlock_irq(&np->lock); | ||
| 2567 | }; | ||
| 2568 | |||
| 2569 | static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
| 2570 | { | ||
| 2571 | /* nothing to do */ | ||
| 2572 | }; | ||
| 2573 | |||
| 2574 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | ||
| 2575 | { | ||
| 2576 | u8 __iomem *base = get_hwbase(dev); | ||
| 2577 | int i; | ||
| 2578 | u32 msixmap = 0; | ||
| 2579 | |||
| 2580 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | ||
| 2581 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | ||
| 2582 | * the remaining 8 interrupts. | ||
| 2583 | */ | ||
| 2584 | for (i = 0; i < 8; i++) { | ||
| 2585 | if ((irqmask >> i) & 0x1) { | ||
| 2586 | msixmap |= vector << (i << 2); | ||
| 2587 | } | ||
| 2588 | } | ||
| 2589 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
| 2590 | |||
| 2591 | msixmap = 0; | ||
| 2592 | for (i = 0; i < 8; i++) { | ||
| 2593 | if ((irqmask >> (i + 8)) & 0x1) { | ||
| 2594 | msixmap |= vector << (i << 2); | ||
| 2595 | } | ||
| 2596 | } | ||
| 2597 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | ||
| 2598 | } | ||
| 2599 | |||
| 2220 | static int nv_open(struct net_device *dev) | 2600 | static int nv_open(struct net_device *dev) |
| 2221 | { | 2601 | { |
| 2222 | struct fe_priv *np = netdev_priv(dev); | 2602 | struct fe_priv *np = netdev_priv(dev); |
| 2223 | u8 __iomem *base = get_hwbase(dev); | 2603 | u8 __iomem *base = get_hwbase(dev); |
| 2224 | int ret, oom, i; | 2604 | int ret = 1; |
| 2605 | int oom, i; | ||
| 2225 | 2606 | ||
| 2226 | dprintk(KERN_DEBUG "nv_open: begin\n"); | 2607 | dprintk(KERN_DEBUG "nv_open: begin\n"); |
| 2227 | 2608 | ||
| @@ -2253,11 +2634,7 @@ static int nv_open(struct net_device *dev) | |||
| 2253 | nv_copy_mac_to_hw(dev); | 2634 | nv_copy_mac_to_hw(dev); |
| 2254 | 2635 | ||
| 2255 | /* 4) give hw rings */ | 2636 | /* 4) give hw rings */ |
| 2256 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 2637 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
| 2257 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
| 2258 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
| 2259 | else | ||
| 2260 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
| 2261 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 2638 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
| 2262 | base + NvRegRingSizes); | 2639 | base + NvRegRingSizes); |
| 2263 | 2640 | ||
| @@ -2265,6 +2642,7 @@ static int nv_open(struct net_device *dev) | |||
| 2265 | writel(np->linkspeed, base + NvRegLinkSpeed); | 2642 | writel(np->linkspeed, base + NvRegLinkSpeed); |
| 2266 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); | 2643 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); |
| 2267 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | 2644 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
| 2645 | writel(np->vlanctl_bits, base + NvRegVlanControl); | ||
| 2268 | pci_push(base); | 2646 | pci_push(base); |
| 2269 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); | 2647 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); |
| 2270 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, | 2648 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, |
| @@ -2315,9 +2693,77 @@ static int nv_open(struct net_device *dev) | |||
| 2315 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 2693 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
| 2316 | pci_push(base); | 2694 | pci_push(base); |
| 2317 | 2695 | ||
| 2318 | ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev); | 2696 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
| 2319 | if (ret) | 2697 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { |
| 2320 | goto out_drain; | 2698 | np->msi_x_entry[i].entry = i; |
| 2699 | } | ||
| 2700 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
| 2701 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
| 2702 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
| 2703 | /* Request irq for rx handling */ | ||
| 2704 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2705 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
| 2706 | pci_disable_msix(np->pci_dev); | ||
| 2707 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2708 | goto out_drain; | ||
| 2709 | } | ||
| 2710 | /* Request irq for tx handling */ | ||
| 2711 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2712 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
| 2713 | pci_disable_msix(np->pci_dev); | ||
| 2714 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2715 | goto out_drain; | ||
| 2716 | } | ||
| 2717 | /* Request irq for link and timer handling */ | ||
| 2718 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2719 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
| 2720 | pci_disable_msix(np->pci_dev); | ||
| 2721 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2722 | goto out_drain; | ||
| 2723 | } | ||
| 2724 | |||
| 2725 | /* map interrupts to their respective vector */ | ||
| 2726 | writel(0, base + NvRegMSIXMap0); | ||
| 2727 | writel(0, base + NvRegMSIXMap1); | ||
| 2728 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
| 2729 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
| 2730 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
| 2731 | } else { | ||
| 2732 | /* Request irq for all interrupts */ | ||
| 2733 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2734 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2735 | pci_disable_msix(np->pci_dev); | ||
| 2736 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2737 | goto out_drain; | ||
| 2738 | } | ||
| 2739 | |||
| 2740 | /* map interrupts to vector 0 */ | ||
| 2741 | writel(0, base + NvRegMSIXMap0); | ||
| 2742 | writel(0, base + NvRegMSIXMap1); | ||
| 2743 | } | ||
| 2744 | } | ||
| 2745 | } | ||
| 2746 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
| 2747 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
| 2748 | np->msi_flags |= NV_MSI_ENABLED; | ||
| 2749 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2750 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2751 | pci_disable_msi(np->pci_dev); | ||
| 2752 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2753 | goto out_drain; | ||
| 2754 | } | ||
| 2755 | |||
| 2756 | /* map interrupts to vector 0 */ | ||
| 2757 | writel(0, base + NvRegMSIMap0); | ||
| 2758 | writel(0, base + NvRegMSIMap1); | ||
| 2759 | /* enable msi vector 0 */ | ||
| 2760 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
| 2761 | } | ||
| 2762 | } | ||
| 2763 | if (ret != 0) { | ||
| 2764 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
| 2765 | goto out_drain; | ||
| 2766 | } | ||
| 2321 | 2767 | ||
| 2322 | /* ask for interrupts */ | 2768 | /* ask for interrupts */ |
| 2323 | writel(np->irqmask, base + NvRegIrqMask); | 2769 | writel(np->irqmask, base + NvRegIrqMask); |
| @@ -2364,6 +2810,7 @@ static int nv_close(struct net_device *dev) | |||
| 2364 | { | 2810 | { |
| 2365 | struct fe_priv *np = netdev_priv(dev); | 2811 | struct fe_priv *np = netdev_priv(dev); |
| 2366 | u8 __iomem *base; | 2812 | u8 __iomem *base; |
| 2813 | int i; | ||
| 2367 | 2814 | ||
| 2368 | spin_lock_irq(&np->lock); | 2815 | spin_lock_irq(&np->lock); |
| 2369 | np->in_shutdown = 1; | 2816 | np->in_shutdown = 1; |
| @@ -2381,13 +2828,31 @@ static int nv_close(struct net_device *dev) | |||
| 2381 | 2828 | ||
| 2382 | /* disable interrupts on the nic or we will lock up */ | 2829 | /* disable interrupts on the nic or we will lock up */ |
| 2383 | base = get_hwbase(dev); | 2830 | base = get_hwbase(dev); |
| 2384 | writel(0, base + NvRegIrqMask); | 2831 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
| 2832 | writel(np->irqmask, base + NvRegIrqMask); | ||
| 2833 | } else { | ||
| 2834 | if (np->msi_flags & NV_MSI_ENABLED) | ||
| 2835 | writel(0, base + NvRegMSIIrqMask); | ||
| 2836 | writel(0, base + NvRegIrqMask); | ||
| 2837 | } | ||
| 2385 | pci_push(base); | 2838 | pci_push(base); |
| 2386 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | 2839 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
| 2387 | 2840 | ||
| 2388 | spin_unlock_irq(&np->lock); | 2841 | spin_unlock_irq(&np->lock); |
| 2389 | 2842 | ||
| 2390 | free_irq(dev->irq, dev); | 2843 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
| 2844 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
| 2845 | free_irq(np->msi_x_entry[i].vector, dev); | ||
| 2846 | } | ||
| 2847 | pci_disable_msix(np->pci_dev); | ||
| 2848 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2849 | } else { | ||
| 2850 | free_irq(np->pci_dev->irq, dev); | ||
| 2851 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
| 2852 | pci_disable_msi(np->pci_dev); | ||
| 2853 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2854 | } | ||
| 2855 | } | ||
| 2391 | 2856 | ||
| 2392 | drain_ring(dev); | 2857 | drain_ring(dev); |
| 2393 | 2858 | ||
| @@ -2471,7 +2936,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 2471 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 2936 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
| 2472 | pci_name(pci_dev)); | 2937 | pci_name(pci_dev)); |
| 2473 | } else { | 2938 | } else { |
| 2474 | dev->features |= NETIF_F_HIGHDMA; | 2939 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { |
| 2940 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | ||
| 2941 | pci_name(pci_dev)); | ||
| 2942 | goto out_relreg; | ||
| 2943 | } else { | ||
| 2944 | dev->features |= NETIF_F_HIGHDMA; | ||
| 2945 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | ||
| 2946 | } | ||
| 2475 | } | 2947 | } |
| 2476 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | 2948 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
| 2477 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 2949 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
| @@ -2496,6 +2968,22 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 2496 | #endif | 2968 | #endif |
| 2497 | } | 2969 | } |
| 2498 | 2970 | ||
| 2971 | np->vlanctl_bits = 0; | ||
| 2972 | if (id->driver_data & DEV_HAS_VLAN) { | ||
| 2973 | np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; | ||
| 2974 | dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; | ||
| 2975 | dev->vlan_rx_register = nv_vlan_rx_register; | ||
| 2976 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; | ||
| 2977 | } | ||
| 2978 | |||
| 2979 | np->msi_flags = 0; | ||
| 2980 | if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { | ||
| 2981 | np->msi_flags |= NV_MSI_CAPABLE; | ||
| 2982 | } | ||
| 2983 | if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { | ||
| 2984 | np->msi_flags |= NV_MSI_X_CAPABLE; | ||
| 2985 | } | ||
| 2986 | |||
| 2499 | err = -ENOMEM; | 2987 | err = -ENOMEM; |
| 2500 | np->base = ioremap(addr, NV_PCI_REGSZ); | 2988 | np->base = ioremap(addr, NV_PCI_REGSZ); |
| 2501 | if (!np->base) | 2989 | if (!np->base) |
| @@ -2578,10 +3066,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 2578 | } else { | 3066 | } else { |
| 2579 | np->tx_flags = NV_TX2_VALID; | 3067 | np->tx_flags = NV_TX2_VALID; |
| 2580 | } | 3068 | } |
| 2581 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | 3069 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { |
| 2582 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; | 3070 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
| 2583 | else | 3071 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
| 3072 | np->msi_flags |= 0x0003; | ||
| 3073 | } else { | ||
| 2584 | np->irqmask = NVREG_IRQMASK_CPU; | 3074 | np->irqmask = NVREG_IRQMASK_CPU; |
| 3075 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ | ||
| 3076 | np->msi_flags |= 0x0001; | ||
| 3077 | } | ||
| 2585 | 3078 | ||
| 2586 | if (id->driver_data & DEV_NEED_TIMERIRQ) | 3079 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
| 2587 | np->irqmask |= NVREG_IRQ_TIMER; | 3080 | np->irqmask |= NVREG_IRQ_TIMER; |
| @@ -2737,11 +3230,11 @@ static struct pci_device_id pci_tbl[] = { | |||
| 2737 | }, | 3230 | }, |
| 2738 | { /* MCP55 Ethernet Controller */ | 3231 | { /* MCP55 Ethernet Controller */ |
| 2739 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 3232 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
| 2740 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 3233 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
| 2741 | }, | 3234 | }, |
| 2742 | { /* MCP55 Ethernet Controller */ | 3235 | { /* MCP55 Ethernet Controller */ |
| 2743 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 3236 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
| 2744 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, | 3237 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
| 2745 | }, | 3238 | }, |
| 2746 | {0,}, | 3239 | {0,}, |
| 2747 | }; | 3240 | }; |
| @@ -2771,6 +3264,10 @@ module_param(optimization_mode, int, 0); | |||
| 2771 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | 3264 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); |
| 2772 | module_param(poll_interval, int, 0); | 3265 | module_param(poll_interval, int, 0); |
| 2773 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | 3266 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); |
| 3267 | module_param(disable_msi, int, 0); | ||
| 3268 | MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); | ||
| 3269 | module_param(disable_msix, int, 0); | ||
| 3270 | MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); | ||
| 2774 | 3271 | ||
| 2775 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | 3272 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); |
| 2776 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | 3273 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); |
