diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2006-02-04 13:13:31 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2006-02-20 05:59:18 -0500 |
commit | d33a73c81241e3d9ab8da2d0558429bdd5b4ef9a (patch) | |
tree | 7e8054ebf23f4924f6bf5b5f678bfb2e5e6965dd /drivers/net/forcedeth.c | |
parent | 0832b25a75d128e4f9724156380ba071c4f3f20d (diff) |
[PATCH] forcedeth: Add support for MSI/MSIX
This forcedeth patch adds support for MSI/MSIX interrupts.
Signed-off-By: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 467 |
1 files changed, 436 insertions, 31 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 870613bf3fd6..e7fc28b07e5a 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -104,6 +104,7 @@ | |||
104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. | 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. | 105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. |
106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | ||
107 | * | 108 | * |
108 | * Known bugs: | 109 | * Known bugs: |
109 | * We suspect that on some hardware no TX done interrupts are generated. | 110 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -115,7 +116,7 @@ | |||
115 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 116 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
116 | * superfluous timer interrupts from the nic. | 117 | * superfluous timer interrupts from the nic. |
117 | */ | 118 | */ |
118 | #define FORCEDETH_VERSION "0.51" | 119 | #define FORCEDETH_VERSION "0.52" |
119 | #define DRV_NAME "forcedeth" | 120 | #define DRV_NAME "forcedeth" |
120 | 121 | ||
121 | #include <linux/module.h> | 122 | #include <linux/module.h> |
@@ -156,6 +157,8 @@ | |||
156 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ | 157 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
157 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ | 158 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
158 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ | 159 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ |
160 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ | ||
161 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | ||
159 | 162 | ||
160 | enum { | 163 | enum { |
161 | NvRegIrqStatus = 0x000, | 164 | NvRegIrqStatus = 0x000, |
@@ -169,14 +172,17 @@ enum { | |||
169 | #define NVREG_IRQ_TX_OK 0x0010 | 172 | #define NVREG_IRQ_TX_OK 0x0010 |
170 | #define NVREG_IRQ_TIMER 0x0020 | 173 | #define NVREG_IRQ_TIMER 0x0020 |
171 | #define NVREG_IRQ_LINK 0x0040 | 174 | #define NVREG_IRQ_LINK 0x0040 |
172 | #define NVREG_IRQ_TX_ERROR 0x0080 | 175 | #define NVREG_IRQ_RX_FORCED 0x0080 |
173 | #define NVREG_IRQ_TX1 0x0100 | 176 | #define NVREG_IRQ_TX_FORCED 0x0100 |
174 | #define NVREG_IRQMASK_THROUGHPUT 0x00df | 177 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
175 | #define NVREG_IRQMASK_CPU 0x0040 | 178 | #define NVREG_IRQMASK_CPU 0x0040 |
179 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) | ||
180 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | ||
181 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | ||
176 | 182 | ||
177 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | 183 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ |
178 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ | 184 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
179 | NVREG_IRQ_TX1)) | 185 | NVREG_IRQ_TX_FORCED)) |
180 | 186 | ||
181 | NvRegUnknownSetupReg6 = 0x008, | 187 | NvRegUnknownSetupReg6 = 0x008, |
182 | #define NVREG_UNKSETUP6_VAL 3 | 188 | #define NVREG_UNKSETUP6_VAL 3 |
@@ -188,6 +194,10 @@ enum { | |||
188 | NvRegPollingInterval = 0x00c, | 194 | NvRegPollingInterval = 0x00c, |
189 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 | 195 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 |
190 | #define NVREG_POLL_DEFAULT_CPU 13 | 196 | #define NVREG_POLL_DEFAULT_CPU 13 |
197 | NvRegMSIMap0 = 0x020, | ||
198 | NvRegMSIMap1 = 0x024, | ||
199 | NvRegMSIIrqMask = 0x030, | ||
200 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | ||
191 | NvRegMisc1 = 0x080, | 201 | NvRegMisc1 = 0x080, |
192 | #define NVREG_MISC1_HD 0x02 | 202 | #define NVREG_MISC1_HD 0x02 |
193 | #define NVREG_MISC1_FORCE 0x3b0f3c | 203 | #define NVREG_MISC1_FORCE 0x3b0f3c |
@@ -312,6 +322,9 @@ enum { | |||
312 | #define NVREG_POWERSTATE_D3 0x0003 | 322 | #define NVREG_POWERSTATE_D3 0x0003 |
313 | NvRegVlanControl = 0x300, | 323 | NvRegVlanControl = 0x300, |
314 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | 324 | #define NVREG_VLANCONTROL_ENABLE 0x2000 |
325 | NvRegMSIXMap0 = 0x3e0, | ||
326 | NvRegMSIXMap1 = 0x3e4, | ||
327 | NvRegMSIXIrqStatus = 0x3f0, | ||
315 | }; | 328 | }; |
316 | 329 | ||
317 | /* Big endian: should work, but is untested */ | 330 | /* Big endian: should work, but is untested */ |
@@ -489,6 +502,18 @@ typedef union _ring_type { | |||
489 | #define LPA_1000FULL 0x0800 | 502 | #define LPA_1000FULL 0x0800 |
490 | #define LPA_1000HALF 0x0400 | 503 | #define LPA_1000HALF 0x0400 |
491 | 504 | ||
505 | /* MSI/MSI-X defines */ | ||
506 | #define NV_MSI_X_MAX_VECTORS 8 | ||
507 | #define NV_MSI_X_VECTORS_MASK 0x000f | ||
508 | #define NV_MSI_CAPABLE 0x0010 | ||
509 | #define NV_MSI_X_CAPABLE 0x0020 | ||
510 | #define NV_MSI_ENABLED 0x0040 | ||
511 | #define NV_MSI_X_ENABLED 0x0080 | ||
512 | |||
513 | #define NV_MSI_X_VECTOR_ALL 0x0 | ||
514 | #define NV_MSI_X_VECTOR_RX 0x0 | ||
515 | #define NV_MSI_X_VECTOR_TX 0x1 | ||
516 | #define NV_MSI_X_VECTOR_OTHER 0x2 | ||
492 | 517 | ||
493 | /* | 518 | /* |
494 | * SMP locking: | 519 | * SMP locking: |
@@ -540,6 +565,7 @@ struct fe_priv { | |||
540 | unsigned int pkt_limit; | 565 | unsigned int pkt_limit; |
541 | struct timer_list oom_kick; | 566 | struct timer_list oom_kick; |
542 | struct timer_list nic_poll; | 567 | struct timer_list nic_poll; |
568 | u32 nic_poll_irq; | ||
543 | 569 | ||
544 | /* media detection workaround. | 570 | /* media detection workaround. |
545 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 571 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
@@ -558,6 +584,10 @@ struct fe_priv { | |||
558 | 584 | ||
559 | /* vlan fields */ | 585 | /* vlan fields */ |
560 | struct vlan_group *vlangrp; | 586 | struct vlan_group *vlangrp; |
587 | |||
588 | /* msi/msi-x fields */ | ||
589 | u32 msi_flags; | ||
590 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | ||
561 | }; | 591 | }; |
562 | 592 | ||
563 | /* | 593 | /* |
@@ -585,6 +615,16 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; | |||
585 | */ | 615 | */ |
586 | static int poll_interval = -1; | 616 | static int poll_interval = -1; |
587 | 617 | ||
618 | /* | ||
619 | * Disable MSI interrupts | ||
620 | */ | ||
621 | static int disable_msi = 0; | ||
622 | |||
623 | /* | ||
624 | * Disable MSIX interrupts | ||
625 | */ | ||
626 | static int disable_msix = 0; | ||
627 | |||
588 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) | 628 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
589 | { | 629 | { |
590 | return netdev_priv(dev); | 630 | return netdev_priv(dev); |
@@ -948,14 +988,27 @@ static void nv_do_rx_refill(unsigned long data) | |||
948 | struct net_device *dev = (struct net_device *) data; | 988 | struct net_device *dev = (struct net_device *) data; |
949 | struct fe_priv *np = netdev_priv(dev); | 989 | struct fe_priv *np = netdev_priv(dev); |
950 | 990 | ||
951 | disable_irq(dev->irq); | 991 | |
992 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
993 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
994 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
995 | disable_irq(dev->irq); | ||
996 | } else { | ||
997 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
998 | } | ||
952 | if (nv_alloc_rx(dev)) { | 999 | if (nv_alloc_rx(dev)) { |
953 | spin_lock(&np->lock); | 1000 | spin_lock(&np->lock); |
954 | if (!np->in_shutdown) | 1001 | if (!np->in_shutdown) |
955 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1002 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
956 | spin_unlock(&np->lock); | 1003 | spin_unlock(&np->lock); |
957 | } | 1004 | } |
958 | enable_irq(dev->irq); | 1005 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1006 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1007 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1008 | enable_irq(dev->irq); | ||
1009 | } else { | ||
1010 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1011 | } | ||
959 | } | 1012 | } |
960 | 1013 | ||
961 | static void nv_init_rx(struct net_device *dev) | 1014 | static void nv_init_rx(struct net_device *dev) |
@@ -1010,7 +1063,7 @@ static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) | |||
1010 | } | 1063 | } |
1011 | 1064 | ||
1012 | if (np->tx_skbuff[skbnr]) { | 1065 | if (np->tx_skbuff[skbnr]) { |
1013 | dev_kfree_skb_irq(np->tx_skbuff[skbnr]); | 1066 | dev_kfree_skb_any(np->tx_skbuff[skbnr]); |
1014 | np->tx_skbuff[skbnr] = NULL; | 1067 | np->tx_skbuff[skbnr] = NULL; |
1015 | return 1; | 1068 | return 1; |
1016 | } else { | 1069 | } else { |
@@ -1261,9 +1314,14 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1261 | { | 1314 | { |
1262 | struct fe_priv *np = netdev_priv(dev); | 1315 | struct fe_priv *np = netdev_priv(dev); |
1263 | u8 __iomem *base = get_hwbase(dev); | 1316 | u8 __iomem *base = get_hwbase(dev); |
1317 | u32 status; | ||
1318 | |||
1319 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
1320 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
1321 | else | ||
1322 | status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | ||
1264 | 1323 | ||
1265 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, | 1324 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); |
1266 | readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); | ||
1267 | 1325 | ||
1268 | { | 1326 | { |
1269 | int i; | 1327 | int i; |
@@ -1579,7 +1637,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1579 | * guessed, there is probably a simpler approach. | 1637 | * guessed, there is probably a simpler approach. |
1580 | * Changing the MTU is a rare event, it shouldn't matter. | 1638 | * Changing the MTU is a rare event, it shouldn't matter. |
1581 | */ | 1639 | */ |
1582 | disable_irq(dev->irq); | 1640 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1641 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1642 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1643 | disable_irq(dev->irq); | ||
1644 | } else { | ||
1645 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1646 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1647 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1648 | } | ||
1583 | spin_lock_bh(&dev->xmit_lock); | 1649 | spin_lock_bh(&dev->xmit_lock); |
1584 | spin_lock(&np->lock); | 1650 | spin_lock(&np->lock); |
1585 | /* stop engines */ | 1651 | /* stop engines */ |
@@ -1612,7 +1678,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1612 | nv_start_tx(dev); | 1678 | nv_start_tx(dev); |
1613 | spin_unlock(&np->lock); | 1679 | spin_unlock(&np->lock); |
1614 | spin_unlock_bh(&dev->xmit_lock); | 1680 | spin_unlock_bh(&dev->xmit_lock); |
1615 | enable_irq(dev->irq); | 1681 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1682 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1683 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1684 | enable_irq(dev->irq); | ||
1685 | } else { | ||
1686 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1687 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1688 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1689 | } | ||
1616 | } | 1690 | } |
1617 | return 0; | 1691 | return 0; |
1618 | } | 1692 | } |
@@ -1918,8 +1992,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1918 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | 1992 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); |
1919 | 1993 | ||
1920 | for (i=0; ; i++) { | 1994 | for (i=0; ; i++) { |
1921 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | 1995 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
1922 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 1996 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; |
1997 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
1998 | } else { | ||
1999 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2000 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
2001 | } | ||
1923 | pci_push(base); | 2002 | pci_push(base); |
1924 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2003 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
1925 | if (!(events & np->irqmask)) | 2004 | if (!(events & np->irqmask)) |
@@ -1959,11 +2038,16 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1959 | if (i > max_interrupt_work) { | 2038 | if (i > max_interrupt_work) { |
1960 | spin_lock(&np->lock); | 2039 | spin_lock(&np->lock); |
1961 | /* disable interrupts on the nic */ | 2040 | /* disable interrupts on the nic */ |
1962 | writel(0, base + NvRegIrqMask); | 2041 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
2042 | writel(0, base + NvRegIrqMask); | ||
2043 | else | ||
2044 | writel(np->irqmask, base + NvRegIrqMask); | ||
1963 | pci_push(base); | 2045 | pci_push(base); |
1964 | 2046 | ||
1965 | if (!np->in_shutdown) | 2047 | if (!np->in_shutdown) { |
2048 | np->nic_poll_irq = np->irqmask; | ||
1966 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2049 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2050 | } | ||
1967 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); | 2051 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
1968 | spin_unlock(&np->lock); | 2052 | spin_unlock(&np->lock); |
1969 | break; | 2053 | break; |
@@ -1975,22 +2059,212 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
1975 | return IRQ_RETVAL(i); | 2059 | return IRQ_RETVAL(i); |
1976 | } | 2060 | } |
1977 | 2061 | ||
2062 | static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | ||
2063 | { | ||
2064 | struct net_device *dev = (struct net_device *) data; | ||
2065 | struct fe_priv *np = netdev_priv(dev); | ||
2066 | u8 __iomem *base = get_hwbase(dev); | ||
2067 | u32 events; | ||
2068 | int i; | ||
2069 | |||
2070 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | ||
2071 | |||
2072 | for (i=0; ; i++) { | ||
2073 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | ||
2074 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | ||
2075 | pci_push(base); | ||
2076 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | ||
2077 | if (!(events & np->irqmask)) | ||
2078 | break; | ||
2079 | |||
2080 | spin_lock(&np->lock); | ||
2081 | nv_tx_done(dev); | ||
2082 | spin_unlock(&np->lock); | ||
2083 | |||
2084 | if (events & (NVREG_IRQ_TX_ERR)) { | ||
2085 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | ||
2086 | dev->name, events); | ||
2087 | } | ||
2088 | if (i > max_interrupt_work) { | ||
2089 | spin_lock(&np->lock); | ||
2090 | /* disable interrupts on the nic */ | ||
2091 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | ||
2092 | pci_push(base); | ||
2093 | |||
2094 | if (!np->in_shutdown) { | ||
2095 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | ||
2096 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2097 | } | ||
2098 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | ||
2099 | spin_unlock(&np->lock); | ||
2100 | break; | ||
2101 | } | ||
2102 | |||
2103 | } | ||
2104 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | ||
2105 | |||
2106 | return IRQ_RETVAL(i); | ||
2107 | } | ||
2108 | |||
2109 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | ||
2110 | { | ||
2111 | struct net_device *dev = (struct net_device *) data; | ||
2112 | struct fe_priv *np = netdev_priv(dev); | ||
2113 | u8 __iomem *base = get_hwbase(dev); | ||
2114 | u32 events; | ||
2115 | int i; | ||
2116 | |||
2117 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | ||
2118 | |||
2119 | for (i=0; ; i++) { | ||
2120 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | ||
2121 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | ||
2122 | pci_push(base); | ||
2123 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | ||
2124 | if (!(events & np->irqmask)) | ||
2125 | break; | ||
2126 | |||
2127 | nv_rx_process(dev); | ||
2128 | if (nv_alloc_rx(dev)) { | ||
2129 | spin_lock(&np->lock); | ||
2130 | if (!np->in_shutdown) | ||
2131 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2132 | spin_unlock(&np->lock); | ||
2133 | } | ||
2134 | |||
2135 | if (i > max_interrupt_work) { | ||
2136 | spin_lock(&np->lock); | ||
2137 | /* disable interrupts on the nic */ | ||
2138 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
2139 | pci_push(base); | ||
2140 | |||
2141 | if (!np->in_shutdown) { | ||
2142 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | ||
2143 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2144 | } | ||
2145 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | ||
2146 | spin_unlock(&np->lock); | ||
2147 | break; | ||
2148 | } | ||
2149 | |||
2150 | } | ||
2151 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | ||
2152 | |||
2153 | return IRQ_RETVAL(i); | ||
2154 | } | ||
2155 | |||
2156 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | ||
2157 | { | ||
2158 | struct net_device *dev = (struct net_device *) data; | ||
2159 | struct fe_priv *np = netdev_priv(dev); | ||
2160 | u8 __iomem *base = get_hwbase(dev); | ||
2161 | u32 events; | ||
2162 | int i; | ||
2163 | |||
2164 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | ||
2165 | |||
2166 | for (i=0; ; i++) { | ||
2167 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | ||
2168 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | ||
2169 | pci_push(base); | ||
2170 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | ||
2171 | if (!(events & np->irqmask)) | ||
2172 | break; | ||
2173 | |||
2174 | if (events & NVREG_IRQ_LINK) { | ||
2175 | spin_lock(&np->lock); | ||
2176 | nv_link_irq(dev); | ||
2177 | spin_unlock(&np->lock); | ||
2178 | } | ||
2179 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | ||
2180 | spin_lock(&np->lock); | ||
2181 | nv_linkchange(dev); | ||
2182 | spin_unlock(&np->lock); | ||
2183 | np->link_timeout = jiffies + LINK_TIMEOUT; | ||
2184 | } | ||
2185 | if (events & (NVREG_IRQ_UNKNOWN)) { | ||
2186 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | ||
2187 | dev->name, events); | ||
2188 | } | ||
2189 | if (i > max_interrupt_work) { | ||
2190 | spin_lock(&np->lock); | ||
2191 | /* disable interrupts on the nic */ | ||
2192 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | ||
2193 | pci_push(base); | ||
2194 | |||
2195 | if (!np->in_shutdown) { | ||
2196 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | ||
2197 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | ||
2198 | } | ||
2199 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | ||
2200 | spin_unlock(&np->lock); | ||
2201 | break; | ||
2202 | } | ||
2203 | |||
2204 | } | ||
2205 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | ||
2206 | |||
2207 | return IRQ_RETVAL(i); | ||
2208 | } | ||
2209 | |||
1978 | static void nv_do_nic_poll(unsigned long data) | 2210 | static void nv_do_nic_poll(unsigned long data) |
1979 | { | 2211 | { |
1980 | struct net_device *dev = (struct net_device *) data; | 2212 | struct net_device *dev = (struct net_device *) data; |
1981 | struct fe_priv *np = netdev_priv(dev); | 2213 | struct fe_priv *np = netdev_priv(dev); |
1982 | u8 __iomem *base = get_hwbase(dev); | 2214 | u8 __iomem *base = get_hwbase(dev); |
2215 | u32 mask = 0; | ||
1983 | 2216 | ||
1984 | disable_irq(dev->irq); | ||
1985 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
1986 | /* | 2217 | /* |
2218 | * First disable irq(s) and then | ||
1987 | * reenable interrupts on the nic, we have to do this before calling | 2219 | * reenable interrupts on the nic, we have to do this before calling |
1988 | * nv_nic_irq because that may decide to do otherwise | 2220 | * nv_nic_irq because that may decide to do otherwise |
1989 | */ | 2221 | */ |
1990 | writel(np->irqmask, base + NvRegIrqMask); | 2222 | |
2223 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
2224 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
2225 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
2226 | disable_irq(dev->irq); | ||
2227 | mask = np->irqmask; | ||
2228 | } else { | ||
2229 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
2230 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
2231 | mask |= NVREG_IRQ_RX_ALL; | ||
2232 | } | ||
2233 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
2234 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
2235 | mask |= NVREG_IRQ_TX_ALL; | ||
2236 | } | ||
2237 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
2238 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
2239 | mask |= NVREG_IRQ_OTHER; | ||
2240 | } | ||
2241 | } | ||
2242 | np->nic_poll_irq = 0; | ||
2243 | |||
2244 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | ||
2245 | |||
2246 | writel(mask, base + NvRegIrqMask); | ||
1991 | pci_push(base); | 2247 | pci_push(base); |
1992 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | 2248 | |
1993 | enable_irq(dev->irq); | 2249 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
2250 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
2251 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
2252 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2253 | enable_irq(dev->irq); | ||
2254 | } else { | ||
2255 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | ||
2256 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2257 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
2258 | } | ||
2259 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | ||
2260 | nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2261 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
2262 | } | ||
2263 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | ||
2264 | nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); | ||
2265 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
2266 | } | ||
2267 | } | ||
1994 | } | 2268 | } |
1995 | 2269 | ||
1996 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2270 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -2297,11 +2571,38 @@ static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2297 | /* nothing to do */ | 2571 | /* nothing to do */ |
2298 | }; | 2572 | }; |
2299 | 2573 | ||
2574 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | ||
2575 | { | ||
2576 | u8 __iomem *base = get_hwbase(dev); | ||
2577 | int i; | ||
2578 | u32 msixmap = 0; | ||
2579 | |||
2580 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | ||
2581 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | ||
2582 | * the remaining 8 interrupts. | ||
2583 | */ | ||
2584 | for (i = 0; i < 8; i++) { | ||
2585 | if ((irqmask >> i) & 0x1) { | ||
2586 | msixmap |= vector << (i << 2); | ||
2587 | } | ||
2588 | } | ||
2589 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
2590 | |||
2591 | msixmap = 0; | ||
2592 | for (i = 0; i < 8; i++) { | ||
2593 | if ((irqmask >> (i + 8)) & 0x1) { | ||
2594 | msixmap |= vector << (i << 2); | ||
2595 | } | ||
2596 | } | ||
2597 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | ||
2598 | } | ||
2599 | |||
2300 | static int nv_open(struct net_device *dev) | 2600 | static int nv_open(struct net_device *dev) |
2301 | { | 2601 | { |
2302 | struct fe_priv *np = netdev_priv(dev); | 2602 | struct fe_priv *np = netdev_priv(dev); |
2303 | u8 __iomem *base = get_hwbase(dev); | 2603 | u8 __iomem *base = get_hwbase(dev); |
2304 | int ret, oom, i; | 2604 | int ret = 1; |
2605 | int oom, i; | ||
2305 | 2606 | ||
2306 | dprintk(KERN_DEBUG "nv_open: begin\n"); | 2607 | dprintk(KERN_DEBUG "nv_open: begin\n"); |
2307 | 2608 | ||
@@ -2392,9 +2693,77 @@ static int nv_open(struct net_device *dev) | |||
2392 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 2693 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
2393 | pci_push(base); | 2694 | pci_push(base); |
2394 | 2695 | ||
2395 | ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev); | 2696 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
2396 | if (ret) | 2697 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { |
2397 | goto out_drain; | 2698 | np->msi_x_entry[i].entry = i; |
2699 | } | ||
2700 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
2701 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2702 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
2703 | /* Request irq for rx handling */ | ||
2704 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2705 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2706 | pci_disable_msix(np->pci_dev); | ||
2707 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2708 | goto out_drain; | ||
2709 | } | ||
2710 | /* Request irq for tx handling */ | ||
2711 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2712 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2713 | pci_disable_msix(np->pci_dev); | ||
2714 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2715 | goto out_drain; | ||
2716 | } | ||
2717 | /* Request irq for link and timer handling */ | ||
2718 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2719 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2720 | pci_disable_msix(np->pci_dev); | ||
2721 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2722 | goto out_drain; | ||
2723 | } | ||
2724 | |||
2725 | /* map interrupts to their respective vector */ | ||
2726 | writel(0, base + NvRegMSIXMap0); | ||
2727 | writel(0, base + NvRegMSIXMap1); | ||
2728 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2729 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2730 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2731 | } else { | ||
2732 | /* Request irq for all interrupts */ | ||
2733 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2734 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2735 | pci_disable_msix(np->pci_dev); | ||
2736 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2737 | goto out_drain; | ||
2738 | } | ||
2739 | |||
2740 | /* map interrupts to vector 0 */ | ||
2741 | writel(0, base + NvRegMSIXMap0); | ||
2742 | writel(0, base + NvRegMSIXMap1); | ||
2743 | } | ||
2744 | } | ||
2745 | } | ||
2746 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
2747 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
2748 | np->msi_flags |= NV_MSI_ENABLED; | ||
2749 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2750 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2751 | pci_disable_msi(np->pci_dev); | ||
2752 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2753 | goto out_drain; | ||
2754 | } | ||
2755 | |||
2756 | /* map interrupts to vector 0 */ | ||
2757 | writel(0, base + NvRegMSIMap0); | ||
2758 | writel(0, base + NvRegMSIMap1); | ||
2759 | /* enable msi vector 0 */ | ||
2760 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
2761 | } | ||
2762 | } | ||
2763 | if (ret != 0) { | ||
2764 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
2765 | goto out_drain; | ||
2766 | } | ||
2398 | 2767 | ||
2399 | /* ask for interrupts */ | 2768 | /* ask for interrupts */ |
2400 | writel(np->irqmask, base + NvRegIrqMask); | 2769 | writel(np->irqmask, base + NvRegIrqMask); |
@@ -2441,6 +2810,7 @@ static int nv_close(struct net_device *dev) | |||
2441 | { | 2810 | { |
2442 | struct fe_priv *np = netdev_priv(dev); | 2811 | struct fe_priv *np = netdev_priv(dev); |
2443 | u8 __iomem *base; | 2812 | u8 __iomem *base; |
2813 | int i; | ||
2444 | 2814 | ||
2445 | spin_lock_irq(&np->lock); | 2815 | spin_lock_irq(&np->lock); |
2446 | np->in_shutdown = 1; | 2816 | np->in_shutdown = 1; |
@@ -2458,13 +2828,31 @@ static int nv_close(struct net_device *dev) | |||
2458 | 2828 | ||
2459 | /* disable interrupts on the nic or we will lock up */ | 2829 | /* disable interrupts on the nic or we will lock up */ |
2460 | base = get_hwbase(dev); | 2830 | base = get_hwbase(dev); |
2461 | writel(0, base + NvRegIrqMask); | 2831 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
2832 | writel(np->irqmask, base + NvRegIrqMask); | ||
2833 | } else { | ||
2834 | if (np->msi_flags & NV_MSI_ENABLED) | ||
2835 | writel(0, base + NvRegMSIIrqMask); | ||
2836 | writel(0, base + NvRegIrqMask); | ||
2837 | } | ||
2462 | pci_push(base); | 2838 | pci_push(base); |
2463 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | 2839 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
2464 | 2840 | ||
2465 | spin_unlock_irq(&np->lock); | 2841 | spin_unlock_irq(&np->lock); |
2466 | 2842 | ||
2467 | free_irq(dev->irq, dev); | 2843 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
2844 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2845 | free_irq(np->msi_x_entry[i].vector, dev); | ||
2846 | } | ||
2847 | pci_disable_msix(np->pci_dev); | ||
2848 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2849 | } else { | ||
2850 | free_irq(np->pci_dev->irq, dev); | ||
2851 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
2852 | pci_disable_msi(np->pci_dev); | ||
2853 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2854 | } | ||
2855 | } | ||
2468 | 2856 | ||
2469 | drain_ring(dev); | 2857 | drain_ring(dev); |
2470 | 2858 | ||
@@ -2588,6 +2976,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2588 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; | 2976 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; |
2589 | } | 2977 | } |
2590 | 2978 | ||
2979 | np->msi_flags = 0; | ||
2980 | if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { | ||
2981 | np->msi_flags |= NV_MSI_CAPABLE; | ||
2982 | } | ||
2983 | if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { | ||
2984 | np->msi_flags |= NV_MSI_X_CAPABLE; | ||
2985 | } | ||
2986 | |||
2591 | err = -ENOMEM; | 2987 | err = -ENOMEM; |
2592 | np->base = ioremap(addr, NV_PCI_REGSZ); | 2988 | np->base = ioremap(addr, NV_PCI_REGSZ); |
2593 | if (!np->base) | 2989 | if (!np->base) |
@@ -2670,10 +3066,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2670 | } else { | 3066 | } else { |
2671 | np->tx_flags = NV_TX2_VALID; | 3067 | np->tx_flags = NV_TX2_VALID; |
2672 | } | 3068 | } |
2673 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | 3069 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { |
2674 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; | 3070 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
2675 | else | 3071 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
3072 | np->msi_flags |= 0x0003; | ||
3073 | } else { | ||
2676 | np->irqmask = NVREG_IRQMASK_CPU; | 3074 | np->irqmask = NVREG_IRQMASK_CPU; |
3075 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ | ||
3076 | np->msi_flags |= 0x0001; | ||
3077 | } | ||
2677 | 3078 | ||
2678 | if (id->driver_data & DEV_NEED_TIMERIRQ) | 3079 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
2679 | np->irqmask |= NVREG_IRQ_TIMER; | 3080 | np->irqmask |= NVREG_IRQ_TIMER; |
@@ -2829,11 +3230,11 @@ static struct pci_device_id pci_tbl[] = { | |||
2829 | }, | 3230 | }, |
2830 | { /* MCP55 Ethernet Controller */ | 3231 | { /* MCP55 Ethernet Controller */ |
2831 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 3232 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
2832 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN, | 3233 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
2833 | }, | 3234 | }, |
2834 | { /* MCP55 Ethernet Controller */ | 3235 | { /* MCP55 Ethernet Controller */ |
2835 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 3236 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
2836 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN, | 3237 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, |
2837 | }, | 3238 | }, |
2838 | {0,}, | 3239 | {0,}, |
2839 | }; | 3240 | }; |
@@ -2863,6 +3264,10 @@ module_param(optimization_mode, int, 0); | |||
2863 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | 3264 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); |
2864 | module_param(poll_interval, int, 0); | 3265 | module_param(poll_interval, int, 0); |
2865 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | 3266 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); |
3267 | module_param(disable_msi, int, 0); | ||
3268 | MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); | ||
3269 | module_param(disable_msix, int, 0); | ||
3270 | MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); | ||
2866 | 3271 | ||
2867 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | 3272 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); |
2868 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | 3273 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); |