diff options
| author | Jeff Garzik <jgarzik@pobox.com> | 2005-09-01 18:02:27 -0400 |
|---|---|---|
| committer | Jeff Garzik <jgarzik@pobox.com> | 2005-09-01 18:02:27 -0400 |
| commit | ceeec3dc375e3b0618f16b34efc56fe093918f8b (patch) | |
| tree | 2293d02721ee05131aaf1c60e4fba7e281585eec /drivers/net/forcedeth.c | |
| parent | fbff868db3a4cc6a89d51da9a6d49b26c29d04fb (diff) | |
| parent | e3ee3b78f83688a0ae4315e8be71b2eac559904a (diff) | |
/spare/repo/netdev-2.6 branch 'ieee80211'
Diffstat (limited to 'drivers/net/forcedeth.c')
| -rw-r--r-- | drivers/net/forcedeth.c | 582 |
1 files changed, 427 insertions, 155 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 64f0f697c958..7d93948aec83 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
| @@ -85,6 +85,16 @@ | |||
| 85 | * 0.33: 16 May 2005: Support for MCP51 added. | 85 | * 0.33: 16 May 2005: Support for MCP51 added. |
| 86 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. | 86 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. |
| 87 | * 0.35: 26 Jun 2005: Support for MCP55 added. | 87 | * 0.35: 26 Jun 2005: Support for MCP55 added. |
| 88 | * 0.36: 28 Jun 2005: Add jumbo frame support. | ||
| 89 | * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list | ||
| 90 | * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of | ||
| 91 | * per-packet flags. | ||
| 92 | * 0.39: 18 Jul 2005: Add 64bit descriptor support. | ||
| 93 | * 0.40: 19 Jul 2005: Add support for mac address change. | ||
| 94 | * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead | ||
| 95 | * of nv_remove | ||
| 96 | * 0.42: 06 Aug 2005: Fix lack of link speed initialization | ||
| 97 | * in the second (and later) nv_open call | ||
| 88 | * | 98 | * |
| 89 | * Known bugs: | 99 | * Known bugs: |
| 90 | * We suspect that on some hardware no TX done interrupts are generated. | 100 | * We suspect that on some hardware no TX done interrupts are generated. |
| @@ -96,7 +106,7 @@ | |||
| 96 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 106 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
| 97 | * superfluous timer interrupts from the nic. | 107 | * superfluous timer interrupts from the nic. |
| 98 | */ | 108 | */ |
| 99 | #define FORCEDETH_VERSION "0.35" | 109 | #define FORCEDETH_VERSION "0.41" |
| 100 | #define DRV_NAME "forcedeth" | 110 | #define DRV_NAME "forcedeth" |
| 101 | 111 | ||
| 102 | #include <linux/module.h> | 112 | #include <linux/module.h> |
| @@ -131,11 +141,10 @@ | |||
| 131 | * Hardware access: | 141 | * Hardware access: |
| 132 | */ | 142 | */ |
| 133 | 143 | ||
| 134 | #define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */ | 144 | #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ |
| 135 | #define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */ | 145 | #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ |
| 136 | #define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */ | 146 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ |
| 137 | #define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */ | 147 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
| 138 | #define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */ | ||
| 139 | 148 | ||
| 140 | enum { | 149 | enum { |
| 141 | NvRegIrqStatus = 0x000, | 150 | NvRegIrqStatus = 0x000, |
| @@ -146,13 +155,16 @@ enum { | |||
| 146 | #define NVREG_IRQ_RX 0x0002 | 155 | #define NVREG_IRQ_RX 0x0002 |
| 147 | #define NVREG_IRQ_RX_NOBUF 0x0004 | 156 | #define NVREG_IRQ_RX_NOBUF 0x0004 |
| 148 | #define NVREG_IRQ_TX_ERR 0x0008 | 157 | #define NVREG_IRQ_TX_ERR 0x0008 |
| 149 | #define NVREG_IRQ_TX2 0x0010 | 158 | #define NVREG_IRQ_TX_OK 0x0010 |
| 150 | #define NVREG_IRQ_TIMER 0x0020 | 159 | #define NVREG_IRQ_TIMER 0x0020 |
| 151 | #define NVREG_IRQ_LINK 0x0040 | 160 | #define NVREG_IRQ_LINK 0x0040 |
| 161 | #define NVREG_IRQ_TX_ERROR 0x0080 | ||
| 152 | #define NVREG_IRQ_TX1 0x0100 | 162 | #define NVREG_IRQ_TX1 0x0100 |
| 153 | #define NVREG_IRQMASK_WANTED_1 0x005f | 163 | #define NVREG_IRQMASK_WANTED 0x00df |
| 154 | #define NVREG_IRQMASK_WANTED_2 0x0147 | 164 | |
| 155 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1)) | 165 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ |
| 166 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ | ||
| 167 | NVREG_IRQ_TX1)) | ||
| 156 | 168 | ||
| 157 | NvRegUnknownSetupReg6 = 0x008, | 169 | NvRegUnknownSetupReg6 = 0x008, |
| 158 | #define NVREG_UNKSETUP6_VAL 3 | 170 | #define NVREG_UNKSETUP6_VAL 3 |
| @@ -286,6 +298,18 @@ struct ring_desc { | |||
| 286 | u32 FlagLen; | 298 | u32 FlagLen; |
| 287 | }; | 299 | }; |
| 288 | 300 | ||
| 301 | struct ring_desc_ex { | ||
| 302 | u32 PacketBufferHigh; | ||
| 303 | u32 PacketBufferLow; | ||
| 304 | u32 Reserved; | ||
| 305 | u32 FlagLen; | ||
| 306 | }; | ||
| 307 | |||
| 308 | typedef union _ring_type { | ||
| 309 | struct ring_desc* orig; | ||
| 310 | struct ring_desc_ex* ex; | ||
| 311 | } ring_type; | ||
| 312 | |||
| 289 | #define FLAG_MASK_V1 0xffff0000 | 313 | #define FLAG_MASK_V1 0xffff0000 |
| 290 | #define FLAG_MASK_V2 0xffffc000 | 314 | #define FLAG_MASK_V2 0xffffc000 |
| 291 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) | 315 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) |
| @@ -293,7 +317,7 @@ struct ring_desc { | |||
| 293 | 317 | ||
| 294 | #define NV_TX_LASTPACKET (1<<16) | 318 | #define NV_TX_LASTPACKET (1<<16) |
| 295 | #define NV_TX_RETRYERROR (1<<19) | 319 | #define NV_TX_RETRYERROR (1<<19) |
| 296 | #define NV_TX_LASTPACKET1 (1<<24) | 320 | #define NV_TX_FORCED_INTERRUPT (1<<24) |
| 297 | #define NV_TX_DEFERRED (1<<26) | 321 | #define NV_TX_DEFERRED (1<<26) |
| 298 | #define NV_TX_CARRIERLOST (1<<27) | 322 | #define NV_TX_CARRIERLOST (1<<27) |
| 299 | #define NV_TX_LATECOLLISION (1<<28) | 323 | #define NV_TX_LATECOLLISION (1<<28) |
| @@ -303,7 +327,7 @@ struct ring_desc { | |||
| 303 | 327 | ||
| 304 | #define NV_TX2_LASTPACKET (1<<29) | 328 | #define NV_TX2_LASTPACKET (1<<29) |
| 305 | #define NV_TX2_RETRYERROR (1<<18) | 329 | #define NV_TX2_RETRYERROR (1<<18) |
| 306 | #define NV_TX2_LASTPACKET1 (1<<23) | 330 | #define NV_TX2_FORCED_INTERRUPT (1<<30) |
| 307 | #define NV_TX2_DEFERRED (1<<25) | 331 | #define NV_TX2_DEFERRED (1<<25) |
| 308 | #define NV_TX2_CARRIERLOST (1<<26) | 332 | #define NV_TX2_CARRIERLOST (1<<26) |
| 309 | #define NV_TX2_LATECOLLISION (1<<27) | 333 | #define NV_TX2_LATECOLLISION (1<<27) |
| @@ -379,9 +403,13 @@ struct ring_desc { | |||
| 379 | #define TX_LIMIT_START 62 | 403 | #define TX_LIMIT_START 62 |
| 380 | 404 | ||
| 381 | /* rx/tx mac addr + type + vlan + align + slack*/ | 405 | /* rx/tx mac addr + type + vlan + align + slack*/ |
| 382 | #define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) | 406 | #define NV_RX_HEADERS (64) |
| 383 | /* even more slack */ | 407 | /* even more slack. */ |
| 384 | #define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) | 408 | #define NV_RX_ALLOC_PAD (64) |
| 409 | |||
| 410 | /* maximum mtu size */ | ||
| 411 | #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ | ||
| 412 | #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ | ||
| 385 | 413 | ||
| 386 | #define OOM_REFILL (1+HZ/20) | 414 | #define OOM_REFILL (1+HZ/20) |
| 387 | #define POLL_WAIT (1+HZ/100) | 415 | #define POLL_WAIT (1+HZ/100) |
| @@ -396,6 +424,7 @@ struct ring_desc { | |||
| 396 | */ | 424 | */ |
| 397 | #define DESC_VER_1 0x0 | 425 | #define DESC_VER_1 0x0 |
| 398 | #define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) | 426 | #define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) |
| 427 | #define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK) | ||
| 399 | 428 | ||
| 400 | /* PHY defines */ | 429 | /* PHY defines */ |
| 401 | #define PHY_OUI_MARVELL 0x5043 | 430 | #define PHY_OUI_MARVELL 0x5043 |
| @@ -468,11 +497,12 @@ struct fe_priv { | |||
| 468 | /* rx specific fields. | 497 | /* rx specific fields. |
| 469 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 498 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
| 470 | */ | 499 | */ |
| 471 | struct ring_desc *rx_ring; | 500 | ring_type rx_ring; |
| 472 | unsigned int cur_rx, refill_rx; | 501 | unsigned int cur_rx, refill_rx; |
| 473 | struct sk_buff *rx_skbuff[RX_RING]; | 502 | struct sk_buff *rx_skbuff[RX_RING]; |
| 474 | dma_addr_t rx_dma[RX_RING]; | 503 | dma_addr_t rx_dma[RX_RING]; |
| 475 | unsigned int rx_buf_sz; | 504 | unsigned int rx_buf_sz; |
| 505 | unsigned int pkt_limit; | ||
| 476 | struct timer_list oom_kick; | 506 | struct timer_list oom_kick; |
| 477 | struct timer_list nic_poll; | 507 | struct timer_list nic_poll; |
| 478 | 508 | ||
| @@ -484,7 +514,7 @@ struct fe_priv { | |||
| 484 | /* | 514 | /* |
| 485 | * tx specific fields. | 515 | * tx specific fields. |
| 486 | */ | 516 | */ |
| 487 | struct ring_desc *tx_ring; | 517 | ring_type tx_ring; |
| 488 | unsigned int next_tx, nic_tx; | 518 | unsigned int next_tx, nic_tx; |
| 489 | struct sk_buff *tx_skbuff[TX_RING]; | 519 | struct sk_buff *tx_skbuff[TX_RING]; |
| 490 | dma_addr_t tx_dma[TX_RING]; | 520 | dma_addr_t tx_dma[TX_RING]; |
| @@ -519,6 +549,11 @@ static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) | |||
| 519 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); | 549 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); |
| 520 | } | 550 | } |
| 521 | 551 | ||
| 552 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) | ||
| 553 | { | ||
| 554 | return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; | ||
| 555 | } | ||
| 556 | |||
| 522 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | 557 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
| 523 | int delay, int delaymax, const char *msg) | 558 | int delay, int delaymax, const char *msg) |
| 524 | { | 559 | { |
| @@ -792,7 +827,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
| 792 | nr = refill_rx % RX_RING; | 827 | nr = refill_rx % RX_RING; |
| 793 | if (np->rx_skbuff[nr] == NULL) { | 828 | if (np->rx_skbuff[nr] == NULL) { |
| 794 | 829 | ||
| 795 | skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); | 830 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
| 796 | if (!skb) | 831 | if (!skb) |
| 797 | break; | 832 | break; |
| 798 | 833 | ||
| @@ -803,9 +838,16 @@ static int nv_alloc_rx(struct net_device *dev) | |||
| 803 | } | 838 | } |
| 804 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, | 839 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, |
| 805 | PCI_DMA_FROMDEVICE); | 840 | PCI_DMA_FROMDEVICE); |
| 806 | np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); | 841 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
| 807 | wmb(); | 842 | np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); |
| 808 | np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); | 843 | wmb(); |
| 844 | np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | ||
| 845 | } else { | ||
| 846 | np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; | ||
| 847 | np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; | ||
| 848 | wmb(); | ||
| 849 | np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | ||
| 850 | } | ||
| 809 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", | 851 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", |
| 810 | dev->name, refill_rx); | 852 | dev->name, refill_rx); |
| 811 | refill_rx++; | 853 | refill_rx++; |
| @@ -831,19 +873,37 @@ static void nv_do_rx_refill(unsigned long data) | |||
| 831 | enable_irq(dev->irq); | 873 | enable_irq(dev->irq); |
| 832 | } | 874 | } |
| 833 | 875 | ||
| 834 | static int nv_init_ring(struct net_device *dev) | 876 | static void nv_init_rx(struct net_device *dev) |
| 835 | { | 877 | { |
| 836 | struct fe_priv *np = get_nvpriv(dev); | 878 | struct fe_priv *np = get_nvpriv(dev); |
| 837 | int i; | 879 | int i; |
| 838 | 880 | ||
| 839 | np->next_tx = np->nic_tx = 0; | ||
| 840 | for (i = 0; i < TX_RING; i++) | ||
| 841 | np->tx_ring[i].FlagLen = 0; | ||
| 842 | |||
| 843 | np->cur_rx = RX_RING; | 881 | np->cur_rx = RX_RING; |
| 844 | np->refill_rx = 0; | 882 | np->refill_rx = 0; |
| 845 | for (i = 0; i < RX_RING; i++) | 883 | for (i = 0; i < RX_RING; i++) |
| 846 | np->rx_ring[i].FlagLen = 0; | 884 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 885 | np->rx_ring.orig[i].FlagLen = 0; | ||
| 886 | else | ||
| 887 | np->rx_ring.ex[i].FlagLen = 0; | ||
| 888 | } | ||
| 889 | |||
| 890 | static void nv_init_tx(struct net_device *dev) | ||
| 891 | { | ||
| 892 | struct fe_priv *np = get_nvpriv(dev); | ||
| 893 | int i; | ||
| 894 | |||
| 895 | np->next_tx = np->nic_tx = 0; | ||
| 896 | for (i = 0; i < TX_RING; i++) | ||
| 897 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
| 898 | np->tx_ring.orig[i].FlagLen = 0; | ||
| 899 | else | ||
| 900 | np->tx_ring.ex[i].FlagLen = 0; | ||
| 901 | } | ||
| 902 | |||
| 903 | static int nv_init_ring(struct net_device *dev) | ||
| 904 | { | ||
| 905 | nv_init_tx(dev); | ||
| 906 | nv_init_rx(dev); | ||
| 847 | return nv_alloc_rx(dev); | 907 | return nv_alloc_rx(dev); |
| 848 | } | 908 | } |
| 849 | 909 | ||
| @@ -852,7 +912,10 @@ static void nv_drain_tx(struct net_device *dev) | |||
| 852 | struct fe_priv *np = get_nvpriv(dev); | 912 | struct fe_priv *np = get_nvpriv(dev); |
| 853 | int i; | 913 | int i; |
| 854 | for (i = 0; i < TX_RING; i++) { | 914 | for (i = 0; i < TX_RING; i++) { |
| 855 | np->tx_ring[i].FlagLen = 0; | 915 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 916 | np->tx_ring.orig[i].FlagLen = 0; | ||
| 917 | else | ||
| 918 | np->tx_ring.ex[i].FlagLen = 0; | ||
| 856 | if (np->tx_skbuff[i]) { | 919 | if (np->tx_skbuff[i]) { |
| 857 | pci_unmap_single(np->pci_dev, np->tx_dma[i], | 920 | pci_unmap_single(np->pci_dev, np->tx_dma[i], |
| 858 | np->tx_skbuff[i]->len, | 921 | np->tx_skbuff[i]->len, |
| @@ -869,7 +932,10 @@ static void nv_drain_rx(struct net_device *dev) | |||
| 869 | struct fe_priv *np = get_nvpriv(dev); | 932 | struct fe_priv *np = get_nvpriv(dev); |
| 870 | int i; | 933 | int i; |
| 871 | for (i = 0; i < RX_RING; i++) { | 934 | for (i = 0; i < RX_RING; i++) { |
| 872 | np->rx_ring[i].FlagLen = 0; | 935 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 936 | np->rx_ring.orig[i].FlagLen = 0; | ||
| 937 | else | ||
| 938 | np->rx_ring.ex[i].FlagLen = 0; | ||
| 873 | wmb(); | 939 | wmb(); |
| 874 | if (np->rx_skbuff[i]) { | 940 | if (np->rx_skbuff[i]) { |
| 875 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | 941 | pci_unmap_single(np->pci_dev, np->rx_dma[i], |
| @@ -900,11 +966,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 900 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, | 966 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, |
| 901 | PCI_DMA_TODEVICE); | 967 | PCI_DMA_TODEVICE); |
| 902 | 968 | ||
| 903 | np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | 969 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 970 | np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | ||
| 971 | else { | ||
| 972 | np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | ||
| 973 | np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | ||
| 974 | } | ||
| 904 | 975 | ||
| 905 | spin_lock_irq(&np->lock); | 976 | spin_lock_irq(&np->lock); |
| 906 | wmb(); | 977 | wmb(); |
| 907 | np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); | 978 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 979 | np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); | ||
| 980 | else | ||
| 981 | np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); | ||
| 908 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", | 982 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", |
| 909 | dev->name, np->next_tx); | 983 | dev->name, np->next_tx); |
| 910 | { | 984 | { |
| @@ -942,7 +1016,10 @@ static void nv_tx_done(struct net_device *dev) | |||
| 942 | while (np->nic_tx != np->next_tx) { | 1016 | while (np->nic_tx != np->next_tx) { |
| 943 | i = np->nic_tx % TX_RING; | 1017 | i = np->nic_tx % TX_RING; |
| 944 | 1018 | ||
| 945 | Flags = le32_to_cpu(np->tx_ring[i].FlagLen); | 1019 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 1020 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); | ||
| 1021 | else | ||
| 1022 | Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); | ||
| 946 | 1023 | ||
| 947 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", | 1024 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", |
| 948 | dev->name, np->nic_tx, Flags); | 1025 | dev->name, np->nic_tx, Flags); |
| @@ -993,9 +1070,56 @@ static void nv_tx_timeout(struct net_device *dev) | |||
| 993 | struct fe_priv *np = get_nvpriv(dev); | 1070 | struct fe_priv *np = get_nvpriv(dev); |
| 994 | u8 __iomem *base = get_hwbase(dev); | 1071 | u8 __iomem *base = get_hwbase(dev); |
| 995 | 1072 | ||
| 996 | dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name, | 1073 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, |
| 997 | readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); | 1074 | readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); |
| 998 | 1075 | ||
| 1076 | { | ||
| 1077 | int i; | ||
| 1078 | |||
| 1079 | printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", | ||
| 1080 | dev->name, (unsigned long)np->ring_addr, | ||
| 1081 | np->next_tx, np->nic_tx); | ||
| 1082 | printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); | ||
| 1083 | for (i=0;i<0x400;i+= 32) { | ||
| 1084 | printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", | ||
| 1085 | i, | ||
| 1086 | readl(base + i + 0), readl(base + i + 4), | ||
| 1087 | readl(base + i + 8), readl(base + i + 12), | ||
| 1088 | readl(base + i + 16), readl(base + i + 20), | ||
| 1089 | readl(base + i + 24), readl(base + i + 28)); | ||
| 1090 | } | ||
| 1091 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | ||
| 1092 | for (i=0;i<TX_RING;i+= 4) { | ||
| 1093 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
| 1094 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | ||
| 1095 | i, | ||
| 1096 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), | ||
| 1097 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), | ||
| 1098 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), | ||
| 1099 | le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), | ||
| 1100 | le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), | ||
| 1101 | le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), | ||
| 1102 | le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), | ||
| 1103 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); | ||
| 1104 | } else { | ||
| 1105 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | ||
| 1106 | i, | ||
| 1107 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), | ||
| 1108 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), | ||
| 1109 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), | ||
| 1110 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), | ||
| 1111 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), | ||
| 1112 | le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), | ||
| 1113 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), | ||
| 1114 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), | ||
| 1115 | le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), | ||
| 1116 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), | ||
| 1117 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), | ||
| 1118 | le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); | ||
| 1119 | } | ||
| 1120 | } | ||
| 1121 | } | ||
| 1122 | |||
| 999 | spin_lock_irq(&np->lock); | 1123 | spin_lock_irq(&np->lock); |
| 1000 | 1124 | ||
| 1001 | /* 1) stop tx engine */ | 1125 | /* 1) stop tx engine */ |
| @@ -1009,7 +1133,10 @@ static void nv_tx_timeout(struct net_device *dev) | |||
| 1009 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 1133 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); |
| 1010 | nv_drain_tx(dev); | 1134 | nv_drain_tx(dev); |
| 1011 | np->next_tx = np->nic_tx = 0; | 1135 | np->next_tx = np->nic_tx = 0; |
| 1012 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | 1136 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 1137 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
| 1138 | else | ||
| 1139 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
| 1013 | netif_wake_queue(dev); | 1140 | netif_wake_queue(dev); |
| 1014 | } | 1141 | } |
| 1015 | 1142 | ||
| @@ -1084,8 +1211,13 @@ static void nv_rx_process(struct net_device *dev) | |||
| 1084 | break; /* we scanned the whole ring - do not continue */ | 1211 | break; /* we scanned the whole ring - do not continue */ |
| 1085 | 1212 | ||
| 1086 | i = np->cur_rx % RX_RING; | 1213 | i = np->cur_rx % RX_RING; |
| 1087 | Flags = le32_to_cpu(np->rx_ring[i].FlagLen); | 1214 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
| 1088 | len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver); | 1215 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); |
| 1216 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); | ||
| 1217 | } else { | ||
| 1218 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); | ||
| 1219 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | ||
| 1220 | } | ||
| 1089 | 1221 | ||
| 1090 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", | 1222 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", |
| 1091 | dev->name, np->cur_rx, Flags); | 1223 | dev->name, np->cur_rx, Flags); |
| @@ -1207,15 +1339,133 @@ next_pkt: | |||
| 1207 | } | 1339 | } |
| 1208 | } | 1340 | } |
| 1209 | 1341 | ||
| 1342 | static void set_bufsize(struct net_device *dev) | ||
| 1343 | { | ||
| 1344 | struct fe_priv *np = netdev_priv(dev); | ||
| 1345 | |||
| 1346 | if (dev->mtu <= ETH_DATA_LEN) | ||
| 1347 | np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; | ||
| 1348 | else | ||
| 1349 | np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; | ||
| 1350 | } | ||
| 1351 | |||
| 1210 | /* | 1352 | /* |
| 1211 | * nv_change_mtu: dev->change_mtu function | 1353 | * nv_change_mtu: dev->change_mtu function |
| 1212 | * Called with dev_base_lock held for read. | 1354 | * Called with dev_base_lock held for read. |
| 1213 | */ | 1355 | */ |
| 1214 | static int nv_change_mtu(struct net_device *dev, int new_mtu) | 1356 | static int nv_change_mtu(struct net_device *dev, int new_mtu) |
| 1215 | { | 1357 | { |
| 1216 | if (new_mtu > ETH_DATA_LEN) | 1358 | struct fe_priv *np = get_nvpriv(dev); |
| 1359 | int old_mtu; | ||
| 1360 | |||
| 1361 | if (new_mtu < 64 || new_mtu > np->pkt_limit) | ||
| 1217 | return -EINVAL; | 1362 | return -EINVAL; |
| 1363 | |||
| 1364 | old_mtu = dev->mtu; | ||
| 1218 | dev->mtu = new_mtu; | 1365 | dev->mtu = new_mtu; |
| 1366 | |||
| 1367 | /* return early if the buffer sizes will not change */ | ||
| 1368 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | ||
| 1369 | return 0; | ||
| 1370 | if (old_mtu == new_mtu) | ||
| 1371 | return 0; | ||
| 1372 | |||
| 1373 | /* synchronized against open : rtnl_lock() held by caller */ | ||
| 1374 | if (netif_running(dev)) { | ||
| 1375 | u8 *base = get_hwbase(dev); | ||
| 1376 | /* | ||
| 1377 | * It seems that the nic preloads valid ring entries into an | ||
| 1378 | * internal buffer. The procedure for flushing everything is | ||
| 1379 | * guessed, there is probably a simpler approach. | ||
| 1380 | * Changing the MTU is a rare event, it shouldn't matter. | ||
| 1381 | */ | ||
| 1382 | disable_irq(dev->irq); | ||
| 1383 | spin_lock_bh(&dev->xmit_lock); | ||
| 1384 | spin_lock(&np->lock); | ||
| 1385 | /* stop engines */ | ||
| 1386 | nv_stop_rx(dev); | ||
| 1387 | nv_stop_tx(dev); | ||
| 1388 | nv_txrx_reset(dev); | ||
| 1389 | /* drain rx queue */ | ||
| 1390 | nv_drain_rx(dev); | ||
| 1391 | nv_drain_tx(dev); | ||
| 1392 | /* reinit driver view of the rx queue */ | ||
| 1393 | nv_init_rx(dev); | ||
| 1394 | nv_init_tx(dev); | ||
| 1395 | /* alloc new rx buffers */ | ||
| 1396 | set_bufsize(dev); | ||
| 1397 | if (nv_alloc_rx(dev)) { | ||
| 1398 | if (!np->in_shutdown) | ||
| 1399 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
| 1400 | } | ||
| 1401 | /* reinit nic view of the rx queue */ | ||
| 1402 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
| 1403 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | ||
| 1404 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
| 1405 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
| 1406 | else | ||
| 1407 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
| 1408 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | ||
| 1409 | base + NvRegRingSizes); | ||
| 1410 | pci_push(base); | ||
| 1411 | writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); | ||
| 1412 | pci_push(base); | ||
| 1413 | |||
| 1414 | /* restart rx engine */ | ||
| 1415 | nv_start_rx(dev); | ||
| 1416 | nv_start_tx(dev); | ||
| 1417 | spin_unlock(&np->lock); | ||
| 1418 | spin_unlock_bh(&dev->xmit_lock); | ||
| 1419 | enable_irq(dev->irq); | ||
| 1420 | } | ||
| 1421 | return 0; | ||
| 1422 | } | ||
| 1423 | |||
| 1424 | static void nv_copy_mac_to_hw(struct net_device *dev) | ||
| 1425 | { | ||
| 1426 | u8 *base = get_hwbase(dev); | ||
| 1427 | u32 mac[2]; | ||
| 1428 | |||
| 1429 | mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | ||
| 1430 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | ||
| 1431 | mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | ||
| 1432 | |||
| 1433 | writel(mac[0], base + NvRegMacAddrA); | ||
| 1434 | writel(mac[1], base + NvRegMacAddrB); | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | /* | ||
| 1438 | * nv_set_mac_address: dev->set_mac_address function | ||
| 1439 | * Called with rtnl_lock() held. | ||
| 1440 | */ | ||
| 1441 | static int nv_set_mac_address(struct net_device *dev, void *addr) | ||
| 1442 | { | ||
| 1443 | struct fe_priv *np = get_nvpriv(dev); | ||
| 1444 | struct sockaddr *macaddr = (struct sockaddr*)addr; | ||
| 1445 | |||
| 1446 | if(!is_valid_ether_addr(macaddr->sa_data)) | ||
| 1447 | return -EADDRNOTAVAIL; | ||
| 1448 | |||
| 1449 | /* synchronized against open : rtnl_lock() held by caller */ | ||
| 1450 | memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); | ||
| 1451 | |||
| 1452 | if (netif_running(dev)) { | ||
| 1453 | spin_lock_bh(&dev->xmit_lock); | ||
| 1454 | spin_lock_irq(&np->lock); | ||
| 1455 | |||
| 1456 | /* stop rx engine */ | ||
| 1457 | nv_stop_rx(dev); | ||
| 1458 | |||
| 1459 | /* set mac address */ | ||
| 1460 | nv_copy_mac_to_hw(dev); | ||
| 1461 | |||
| 1462 | /* restart rx engine */ | ||
| 1463 | nv_start_rx(dev); | ||
| 1464 | spin_unlock_irq(&np->lock); | ||
| 1465 | spin_unlock_bh(&dev->xmit_lock); | ||
| 1466 | } else { | ||
| 1467 | nv_copy_mac_to_hw(dev); | ||
| 1468 | } | ||
| 1219 | return 0; | 1469 | return 0; |
| 1220 | } | 1470 | } |
| 1221 | 1471 | ||
| @@ -1470,7 +1720,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
| 1470 | if (!(events & np->irqmask)) | 1720 | if (!(events & np->irqmask)) |
| 1471 | break; | 1721 | break; |
| 1472 | 1722 | ||
| 1473 | if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) { | 1723 | if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) { |
| 1474 | spin_lock(&np->lock); | 1724 | spin_lock(&np->lock); |
| 1475 | nv_tx_done(dev); | 1725 | nv_tx_done(dev); |
| 1476 | spin_unlock(&np->lock); | 1726 | spin_unlock(&np->lock); |
| @@ -1761,6 +2011,50 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
| 1761 | return 0; | 2011 | return 0; |
| 1762 | } | 2012 | } |
| 1763 | 2013 | ||
| 2014 | #define FORCEDETH_REGS_VER 1 | ||
| 2015 | #define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */ | ||
| 2016 | |||
| 2017 | static int nv_get_regs_len(struct net_device *dev) | ||
| 2018 | { | ||
| 2019 | return FORCEDETH_REGS_SIZE; | ||
| 2020 | } | ||
| 2021 | |||
| 2022 | static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) | ||
| 2023 | { | ||
| 2024 | struct fe_priv *np = get_nvpriv(dev); | ||
| 2025 | u8 __iomem *base = get_hwbase(dev); | ||
| 2026 | u32 *rbuf = buf; | ||
| 2027 | int i; | ||
| 2028 | |||
| 2029 | regs->version = FORCEDETH_REGS_VER; | ||
| 2030 | spin_lock_irq(&np->lock); | ||
| 2031 | for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++) | ||
| 2032 | rbuf[i] = readl(base + i*sizeof(u32)); | ||
| 2033 | spin_unlock_irq(&np->lock); | ||
| 2034 | } | ||
| 2035 | |||
| 2036 | static int nv_nway_reset(struct net_device *dev) | ||
| 2037 | { | ||
| 2038 | struct fe_priv *np = get_nvpriv(dev); | ||
| 2039 | int ret; | ||
| 2040 | |||
| 2041 | spin_lock_irq(&np->lock); | ||
| 2042 | if (np->autoneg) { | ||
| 2043 | int bmcr; | ||
| 2044 | |||
| 2045 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | ||
| 2046 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
| 2047 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | ||
| 2048 | |||
| 2049 | ret = 0; | ||
| 2050 | } else { | ||
| 2051 | ret = -EINVAL; | ||
| 2052 | } | ||
| 2053 | spin_unlock_irq(&np->lock); | ||
| 2054 | |||
| 2055 | return ret; | ||
| 2056 | } | ||
| 2057 | |||
| 1764 | static struct ethtool_ops ops = { | 2058 | static struct ethtool_ops ops = { |
| 1765 | .get_drvinfo = nv_get_drvinfo, | 2059 | .get_drvinfo = nv_get_drvinfo, |
| 1766 | .get_link = ethtool_op_get_link, | 2060 | .get_link = ethtool_op_get_link, |
| @@ -1768,6 +2062,9 @@ static struct ethtool_ops ops = { | |||
| 1768 | .set_wol = nv_set_wol, | 2062 | .set_wol = nv_set_wol, |
| 1769 | .get_settings = nv_get_settings, | 2063 | .get_settings = nv_get_settings, |
| 1770 | .set_settings = nv_set_settings, | 2064 | .set_settings = nv_set_settings, |
| 2065 | .get_regs_len = nv_get_regs_len, | ||
| 2066 | .get_regs = nv_get_regs, | ||
| 2067 | .nway_reset = nv_nway_reset, | ||
| 1771 | }; | 2068 | }; |
| 1772 | 2069 | ||
| 1773 | static int nv_open(struct net_device *dev) | 2070 | static int nv_open(struct net_device *dev) |
| @@ -1792,6 +2089,7 @@ static int nv_open(struct net_device *dev) | |||
| 1792 | writel(0, base + NvRegAdapterControl); | 2089 | writel(0, base + NvRegAdapterControl); |
| 1793 | 2090 | ||
| 1794 | /* 2) initialize descriptor rings */ | 2091 | /* 2) initialize descriptor rings */ |
| 2092 | set_bufsize(dev); | ||
| 1795 | oom = nv_init_ring(dev); | 2093 | oom = nv_init_ring(dev); |
| 1796 | 2094 | ||
| 1797 | writel(0, base + NvRegLinkSpeed); | 2095 | writel(0, base + NvRegLinkSpeed); |
| @@ -1802,20 +2100,14 @@ static int nv_open(struct net_device *dev) | |||
| 1802 | np->in_shutdown = 0; | 2100 | np->in_shutdown = 0; |
| 1803 | 2101 | ||
| 1804 | /* 3) set mac address */ | 2102 | /* 3) set mac address */ |
| 1805 | { | 2103 | nv_copy_mac_to_hw(dev); |
| 1806 | u32 mac[2]; | ||
| 1807 | |||
| 1808 | mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | ||
| 1809 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | ||
| 1810 | mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | ||
| 1811 | |||
| 1812 | writel(mac[0], base + NvRegMacAddrA); | ||
| 1813 | writel(mac[1], base + NvRegMacAddrB); | ||
| 1814 | } | ||
| 1815 | 2104 | ||
| 1816 | /* 4) give hw rings */ | 2105 | /* 4) give hw rings */ |
| 1817 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 2106 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); |
| 1818 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | 2107 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 2108 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
| 2109 | else | ||
| 2110 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
| 1819 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 2111 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
| 1820 | base + NvRegRingSizes); | 2112 | base + NvRegRingSizes); |
| 1821 | 2113 | ||
| @@ -1837,7 +2129,7 @@ static int nv_open(struct net_device *dev) | |||
| 1837 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); | 2129 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); |
| 1838 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); | 2130 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); |
| 1839 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); | 2131 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); |
| 1840 | writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig); | 2132 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
| 1841 | 2133 | ||
| 1842 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | 2134 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); |
| 1843 | get_random_bytes(&i, sizeof(i)); | 2135 | get_random_bytes(&i, sizeof(i)); |
| @@ -1888,6 +2180,9 @@ static int nv_open(struct net_device *dev) | |||
| 1888 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | 2180 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); |
| 1889 | dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); | 2181 | dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); |
| 1890 | } | 2182 | } |
| 2183 | /* set linkspeed to invalid value, thus force nv_update_linkspeed | ||
| 2184 | * to init hw */ | ||
| 2185 | np->linkspeed = 0; | ||
| 1891 | ret = nv_update_linkspeed(dev); | 2186 | ret = nv_update_linkspeed(dev); |
| 1892 | nv_start_rx(dev); | 2187 | nv_start_rx(dev); |
| 1893 | nv_start_tx(dev); | 2188 | nv_start_tx(dev); |
| @@ -1942,6 +2237,12 @@ static int nv_close(struct net_device *dev) | |||
| 1942 | if (np->wolenabled) | 2237 | if (np->wolenabled) |
| 1943 | nv_start_rx(dev); | 2238 | nv_start_rx(dev); |
| 1944 | 2239 | ||
| 2240 | /* special op: write back the misordered MAC address - otherwise | ||
| 2241 | * the next nv_probe would see a wrong address. | ||
| 2242 | */ | ||
| 2243 | writel(np->orig_mac[0], base + NvRegMacAddrA); | ||
| 2244 | writel(np->orig_mac[1], base + NvRegMacAddrB); | ||
| 2245 | |||
| 1945 | /* FIXME: power down nic */ | 2246 | /* FIXME: power down nic */ |
| 1946 | 2247 | ||
| 1947 | return 0; | 2248 | return 0; |
| @@ -2006,32 +2307,55 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 2006 | } | 2307 | } |
| 2007 | 2308 | ||
| 2008 | /* handle different descriptor versions */ | 2309 | /* handle different descriptor versions */ |
| 2009 | if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 || | 2310 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
| 2010 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 || | 2311 | /* packet format 3: supports 40-bit addressing */ |
| 2011 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 || | 2312 | np->desc_ver = DESC_VER_3; |
| 2012 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || | 2313 | if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) { |
| 2013 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) | 2314 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
| 2014 | np->desc_ver = DESC_VER_1; | 2315 | pci_name(pci_dev)); |
| 2015 | else | 2316 | } |
| 2317 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | ||
| 2318 | /* packet format 2: supports jumbo frames */ | ||
| 2016 | np->desc_ver = DESC_VER_2; | 2319 | np->desc_ver = DESC_VER_2; |
| 2320 | } else { | ||
| 2321 | /* original packet format */ | ||
| 2322 | np->desc_ver = DESC_VER_1; | ||
| 2323 | } | ||
| 2324 | |||
| 2325 | np->pkt_limit = NV_PKTLIMIT_1; | ||
| 2326 | if (id->driver_data & DEV_HAS_LARGEDESC) | ||
| 2327 | np->pkt_limit = NV_PKTLIMIT_2; | ||
| 2017 | 2328 | ||
| 2018 | err = -ENOMEM; | 2329 | err = -ENOMEM; |
| 2019 | np->base = ioremap(addr, NV_PCI_REGSZ); | 2330 | np->base = ioremap(addr, NV_PCI_REGSZ); |
| 2020 | if (!np->base) | 2331 | if (!np->base) |
| 2021 | goto out_relreg; | 2332 | goto out_relreg; |
| 2022 | dev->base_addr = (unsigned long)np->base; | 2333 | dev->base_addr = (unsigned long)np->base; |
| 2334 | |||
| 2023 | dev->irq = pci_dev->irq; | 2335 | dev->irq = pci_dev->irq; |
| 2024 | np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | 2336 | |
| 2025 | &np->ring_addr); | 2337 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
| 2026 | if (!np->rx_ring) | 2338 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, |
| 2027 | goto out_unmap; | 2339 | sizeof(struct ring_desc) * (RX_RING + TX_RING), |
| 2028 | np->tx_ring = &np->rx_ring[RX_RING]; | 2340 | &np->ring_addr); |
| 2341 | if (!np->rx_ring.orig) | ||
| 2342 | goto out_unmap; | ||
| 2343 | np->tx_ring.orig = &np->rx_ring.orig[RX_RING]; | ||
| 2344 | } else { | ||
| 2345 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, | ||
| 2346 | sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | ||
| 2347 | &np->ring_addr); | ||
| 2348 | if (!np->rx_ring.ex) | ||
| 2349 | goto out_unmap; | ||
| 2350 | np->tx_ring.ex = &np->rx_ring.ex[RX_RING]; | ||
| 2351 | } | ||
| 2029 | 2352 | ||
| 2030 | dev->open = nv_open; | 2353 | dev->open = nv_open; |
| 2031 | dev->stop = nv_close; | 2354 | dev->stop = nv_close; |
| 2032 | dev->hard_start_xmit = nv_start_xmit; | 2355 | dev->hard_start_xmit = nv_start_xmit; |
| 2033 | dev->get_stats = nv_get_stats; | 2356 | dev->get_stats = nv_get_stats; |
| 2034 | dev->change_mtu = nv_change_mtu; | 2357 | dev->change_mtu = nv_change_mtu; |
| 2358 | dev->set_mac_address = nv_set_mac_address; | ||
| 2035 | dev->set_multicast_list = nv_set_multicast; | 2359 | dev->set_multicast_list = nv_set_multicast; |
| 2036 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2360 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 2037 | dev->poll_controller = nv_poll_controller; | 2361 | dev->poll_controller = nv_poll_controller; |
| @@ -2080,17 +2404,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 2080 | 2404 | ||
| 2081 | if (np->desc_ver == DESC_VER_1) { | 2405 | if (np->desc_ver == DESC_VER_1) { |
| 2082 | np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; | 2406 | np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; |
| 2083 | if (id->driver_data & DEV_NEED_LASTPACKET1) | ||
| 2084 | np->tx_flags |= NV_TX_LASTPACKET1; | ||
| 2085 | } else { | 2407 | } else { |
| 2086 | np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; | 2408 | np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; |
| 2087 | if (id->driver_data & DEV_NEED_LASTPACKET1) | ||
| 2088 | np->tx_flags |= NV_TX2_LASTPACKET1; | ||
| 2089 | } | 2409 | } |
| 2090 | if (id->driver_data & DEV_IRQMASK_1) | 2410 | np->irqmask = NVREG_IRQMASK_WANTED; |
| 2091 | np->irqmask = NVREG_IRQMASK_WANTED_1; | ||
| 2092 | if (id->driver_data & DEV_IRQMASK_2) | ||
| 2093 | np->irqmask = NVREG_IRQMASK_WANTED_2; | ||
| 2094 | if (id->driver_data & DEV_NEED_TIMERIRQ) | 2411 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
| 2095 | np->irqmask |= NVREG_IRQ_TIMER; | 2412 | np->irqmask |= NVREG_IRQ_TIMER; |
| 2096 | if (id->driver_data & DEV_NEED_LINKTIMER) { | 2413 | if (id->driver_data & DEV_NEED_LINKTIMER) { |
| @@ -2155,8 +2472,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 2155 | return 0; | 2472 | return 0; |
| 2156 | 2473 | ||
| 2157 | out_freering: | 2474 | out_freering: |
| 2158 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | 2475 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 2159 | np->rx_ring, np->ring_addr); | 2476 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), |
| 2477 | np->rx_ring.orig, np->ring_addr); | ||
| 2478 | else | ||
| 2479 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | ||
| 2480 | np->rx_ring.ex, np->ring_addr); | ||
| 2160 | pci_set_drvdata(pci_dev, NULL); | 2481 | pci_set_drvdata(pci_dev, NULL); |
| 2161 | out_unmap: | 2482 | out_unmap: |
| 2162 | iounmap(get_hwbase(dev)); | 2483 | iounmap(get_hwbase(dev)); |
| @@ -2174,18 +2495,14 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) | |||
| 2174 | { | 2495 | { |
| 2175 | struct net_device *dev = pci_get_drvdata(pci_dev); | 2496 | struct net_device *dev = pci_get_drvdata(pci_dev); |
| 2176 | struct fe_priv *np = get_nvpriv(dev); | 2497 | struct fe_priv *np = get_nvpriv(dev); |
| 2177 | u8 __iomem *base = get_hwbase(dev); | ||
| 2178 | 2498 | ||
| 2179 | unregister_netdev(dev); | 2499 | unregister_netdev(dev); |
| 2180 | 2500 | ||
| 2181 | /* special op: write back the misordered MAC address - otherwise | ||
| 2182 | * the next nv_probe would see a wrong address. | ||
| 2183 | */ | ||
| 2184 | writel(np->orig_mac[0], base + NvRegMacAddrA); | ||
| 2185 | writel(np->orig_mac[1], base + NvRegMacAddrB); | ||
| 2186 | |||
| 2187 | /* free all structures */ | 2501 | /* free all structures */ |
| 2188 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr); | 2502 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
| 2503 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr); | ||
| 2504 | else | ||
| 2505 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr); | ||
| 2189 | iounmap(get_hwbase(dev)); | 2506 | iounmap(get_hwbase(dev)); |
| 2190 | pci_release_regions(pci_dev); | 2507 | pci_release_regions(pci_dev); |
| 2191 | pci_disable_device(pci_dev); | 2508 | pci_disable_device(pci_dev); |
| @@ -2195,109 +2512,64 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) | |||
| 2195 | 2512 | ||
| 2196 | static struct pci_device_id pci_tbl[] = { | 2513 | static struct pci_device_id pci_tbl[] = { |
| 2197 | { /* nForce Ethernet Controller */ | 2514 | { /* nForce Ethernet Controller */ |
| 2198 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2515 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), |
| 2199 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_1, | 2516 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
| 2200 | .subvendor = PCI_ANY_ID, | ||
| 2201 | .subdevice = PCI_ANY_ID, | ||
| 2202 | .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2203 | }, | 2517 | }, |
| 2204 | { /* nForce2 Ethernet Controller */ | 2518 | { /* nForce2 Ethernet Controller */ |
| 2205 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2519 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), |
| 2206 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_2, | 2520 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
| 2207 | .subvendor = PCI_ANY_ID, | ||
| 2208 | .subdevice = PCI_ANY_ID, | ||
| 2209 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2210 | }, | 2521 | }, |
| 2211 | { /* nForce3 Ethernet Controller */ | 2522 | { /* nForce3 Ethernet Controller */ |
| 2212 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2523 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), |
| 2213 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_3, | 2524 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
| 2214 | .subvendor = PCI_ANY_ID, | ||
| 2215 | .subdevice = PCI_ANY_ID, | ||
| 2216 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2217 | }, | 2525 | }, |
| 2218 | { /* nForce3 Ethernet Controller */ | 2526 | { /* nForce3 Ethernet Controller */ |
| 2219 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2527 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), |
| 2220 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_4, | 2528 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, |
| 2221 | .subvendor = PCI_ANY_ID, | ||
| 2222 | .subdevice = PCI_ANY_ID, | ||
| 2223 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2224 | }, | 2529 | }, |
| 2225 | { /* nForce3 Ethernet Controller */ | 2530 | { /* nForce3 Ethernet Controller */ |
| 2226 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2531 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), |
| 2227 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_5, | 2532 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, |
| 2228 | .subvendor = PCI_ANY_ID, | ||
| 2229 | .subdevice = PCI_ANY_ID, | ||
| 2230 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2231 | }, | 2533 | }, |
| 2232 | { /* nForce3 Ethernet Controller */ | 2534 | { /* nForce3 Ethernet Controller */ |
| 2233 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2535 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), |
| 2234 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_6, | 2536 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, |
| 2235 | .subvendor = PCI_ANY_ID, | ||
| 2236 | .subdevice = PCI_ANY_ID, | ||
| 2237 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2238 | }, | 2537 | }, |
| 2239 | { /* nForce3 Ethernet Controller */ | 2538 | { /* nForce3 Ethernet Controller */ |
| 2240 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2539 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), |
| 2241 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_7, | 2540 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, |
| 2242 | .subvendor = PCI_ANY_ID, | ||
| 2243 | .subdevice = PCI_ANY_ID, | ||
| 2244 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2245 | }, | 2541 | }, |
| 2246 | { /* CK804 Ethernet Controller */ | 2542 | { /* CK804 Ethernet Controller */ |
| 2247 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2543 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), |
| 2248 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_8, | 2544 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
| 2249 | .subvendor = PCI_ANY_ID, | ||
| 2250 | .subdevice = PCI_ANY_ID, | ||
| 2251 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2252 | }, | 2545 | }, |
| 2253 | { /* CK804 Ethernet Controller */ | 2546 | { /* CK804 Ethernet Controller */ |
| 2254 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2547 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), |
| 2255 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_9, | 2548 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
| 2256 | .subvendor = PCI_ANY_ID, | ||
| 2257 | .subdevice = PCI_ANY_ID, | ||
| 2258 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2259 | }, | 2549 | }, |
| 2260 | { /* MCP04 Ethernet Controller */ | 2550 | { /* MCP04 Ethernet Controller */ |
| 2261 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2551 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), |
| 2262 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_10, | 2552 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
| 2263 | .subvendor = PCI_ANY_ID, | ||
| 2264 | .subdevice = PCI_ANY_ID, | ||
| 2265 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2266 | }, | 2553 | }, |
| 2267 | { /* MCP04 Ethernet Controller */ | 2554 | { /* MCP04 Ethernet Controller */ |
| 2268 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2555 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), |
| 2269 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_11, | 2556 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
| 2270 | .subvendor = PCI_ANY_ID, | ||
| 2271 | .subdevice = PCI_ANY_ID, | ||
| 2272 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2273 | }, | 2557 | }, |
| 2274 | { /* MCP51 Ethernet Controller */ | 2558 | { /* MCP51 Ethernet Controller */ |
| 2275 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2559 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), |
| 2276 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_12, | 2560 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, |
| 2277 | .subvendor = PCI_ANY_ID, | ||
| 2278 | .subdevice = PCI_ANY_ID, | ||
| 2279 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2280 | }, | 2561 | }, |
| 2281 | { /* MCP51 Ethernet Controller */ | 2562 | { /* MCP51 Ethernet Controller */ |
| 2282 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2563 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), |
| 2283 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_13, | 2564 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, |
| 2284 | .subvendor = PCI_ANY_ID, | ||
| 2285 | .subdevice = PCI_ANY_ID, | ||
| 2286 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2287 | }, | 2565 | }, |
| 2288 | { /* MCP55 Ethernet Controller */ | 2566 | { /* MCP55 Ethernet Controller */ |
| 2289 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2567 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
| 2290 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_14, | 2568 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
| 2291 | .subvendor = PCI_ANY_ID, | ||
| 2292 | .subdevice = PCI_ANY_ID, | ||
| 2293 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2294 | }, | 2569 | }, |
| 2295 | { /* MCP55 Ethernet Controller */ | 2570 | { /* MCP55 Ethernet Controller */ |
| 2296 | .vendor = PCI_VENDOR_ID_NVIDIA, | 2571 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
| 2297 | .device = PCI_DEVICE_ID_NVIDIA_NVENET_15, | 2572 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
| 2298 | .subvendor = PCI_ANY_ID, | ||
| 2299 | .subdevice = PCI_ANY_ID, | ||
| 2300 | .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | ||
| 2301 | }, | 2573 | }, |
| 2302 | {0,}, | 2574 | {0,}, |
| 2303 | }; | 2575 | }; |
