diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 4566 |
1 files changed, 2636 insertions, 1930 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 1ec4b9e0239a..a1f9f9eef37d 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) |
5 | * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) | 5 | * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) |
6 | * Copyright (C) 2004 Sun Microsystems Inc. | 6 | * Copyright (C) 2004 Sun Microsystems Inc. |
7 | * Copyright (C) 2005-2010 Broadcom Corporation. | 7 | * Copyright (C) 2005-2011 Broadcom Corporation. |
8 | * | 8 | * |
9 | * Firmware is: | 9 | * Firmware is: |
10 | * Derived from proprietary unpublished source code, | 10 | * Derived from proprietary unpublished source code, |
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/etherdevice.h> | 32 | #include <linux/etherdevice.h> |
33 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
34 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
35 | #include <linux/mdio.h> | ||
35 | #include <linux/mii.h> | 36 | #include <linux/mii.h> |
36 | #include <linux/phy.h> | 37 | #include <linux/phy.h> |
37 | #include <linux/brcmphy.h> | 38 | #include <linux/brcmphy.h> |
@@ -47,9 +48,9 @@ | |||
47 | #include <net/ip.h> | 48 | #include <net/ip.h> |
48 | 49 | ||
49 | #include <asm/system.h> | 50 | #include <asm/system.h> |
50 | #include <asm/io.h> | 51 | #include <linux/io.h> |
51 | #include <asm/byteorder.h> | 52 | #include <asm/byteorder.h> |
52 | #include <asm/uaccess.h> | 53 | #include <linux/uaccess.h> |
53 | 54 | ||
54 | #ifdef CONFIG_SPARC | 55 | #ifdef CONFIG_SPARC |
55 | #include <asm/idprom.h> | 56 | #include <asm/idprom.h> |
@@ -59,20 +60,38 @@ | |||
59 | #define BAR_0 0 | 60 | #define BAR_0 0 |
60 | #define BAR_2 2 | 61 | #define BAR_2 2 |
61 | 62 | ||
62 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
63 | #define TG3_VLAN_TAG_USED 1 | ||
64 | #else | ||
65 | #define TG3_VLAN_TAG_USED 0 | ||
66 | #endif | ||
67 | |||
68 | #include "tg3.h" | 63 | #include "tg3.h" |
69 | 64 | ||
65 | /* Functions & macros to verify TG3_FLAGS types */ | ||
66 | |||
67 | static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) | ||
68 | { | ||
69 | return test_bit(flag, bits); | ||
70 | } | ||
71 | |||
72 | static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) | ||
73 | { | ||
74 | set_bit(flag, bits); | ||
75 | } | ||
76 | |||
77 | static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) | ||
78 | { | ||
79 | clear_bit(flag, bits); | ||
80 | } | ||
81 | |||
82 | #define tg3_flag(tp, flag) \ | ||
83 | _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) | ||
84 | #define tg3_flag_set(tp, flag) \ | ||
85 | _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) | ||
86 | #define tg3_flag_clear(tp, flag) \ | ||
87 | _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) | ||
88 | |||
70 | #define DRV_MODULE_NAME "tg3" | 89 | #define DRV_MODULE_NAME "tg3" |
71 | #define TG3_MAJ_NUM 3 | 90 | #define TG3_MAJ_NUM 3 |
72 | #define TG3_MIN_NUM 113 | 91 | #define TG3_MIN_NUM 119 |
73 | #define DRV_MODULE_VERSION \ | 92 | #define DRV_MODULE_VERSION \ |
74 | __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) | 93 | __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) |
75 | #define DRV_MODULE_RELDATE "August 2, 2010" | 94 | #define DRV_MODULE_RELDATE "May 18, 2011" |
76 | 95 | ||
77 | #define TG3_DEF_MAC_MODE 0 | 96 | #define TG3_DEF_MAC_MODE 0 |
78 | #define TG3_DEF_RX_MODE 0 | 97 | #define TG3_DEF_RX_MODE 0 |
@@ -90,20 +109,25 @@ | |||
90 | /* length of time before we decide the hardware is borked, | 109 | /* length of time before we decide the hardware is borked, |
91 | * and dev->tx_timeout() should be called to fix the problem | 110 | * and dev->tx_timeout() should be called to fix the problem |
92 | */ | 111 | */ |
112 | |||
93 | #define TG3_TX_TIMEOUT (5 * HZ) | 113 | #define TG3_TX_TIMEOUT (5 * HZ) |
94 | 114 | ||
95 | /* hardware minimum and maximum for a single frame's data payload */ | 115 | /* hardware minimum and maximum for a single frame's data payload */ |
96 | #define TG3_MIN_MTU 60 | 116 | #define TG3_MIN_MTU 60 |
97 | #define TG3_MAX_MTU(tp) \ | 117 | #define TG3_MAX_MTU(tp) \ |
98 | ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) | 118 | (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) |
99 | 119 | ||
100 | /* These numbers seem to be hard coded in the NIC firmware somehow. | 120 | /* These numbers seem to be hard coded in the NIC firmware somehow. |
101 | * You can't change the ring sizes, but you can change where you place | 121 | * You can't change the ring sizes, but you can change where you place |
102 | * them in the NIC onboard memory. | 122 | * them in the NIC onboard memory. |
103 | */ | 123 | */ |
104 | #define TG3_RX_RING_SIZE 512 | 124 | #define TG3_RX_STD_RING_SIZE(tp) \ |
125 | (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ | ||
126 | TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) | ||
105 | #define TG3_DEF_RX_RING_PENDING 200 | 127 | #define TG3_DEF_RX_RING_PENDING 200 |
106 | #define TG3_RX_JUMBO_RING_SIZE 256 | 128 | #define TG3_RX_JMB_RING_SIZE(tp) \ |
129 | (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ | ||
130 | TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) | ||
107 | #define TG3_DEF_RX_JUMBO_RING_PENDING 100 | 131 | #define TG3_DEF_RX_JUMBO_RING_PENDING 100 |
108 | #define TG3_RSS_INDIR_TBL_SIZE 128 | 132 | #define TG3_RSS_INDIR_TBL_SIZE 128 |
109 | 133 | ||
@@ -113,26 +137,20 @@ | |||
113 | * hw multiply/modulo instructions. Another solution would be to | 137 | * hw multiply/modulo instructions. Another solution would be to |
114 | * replace things like '% foo' with '& (foo - 1)'. | 138 | * replace things like '% foo' with '& (foo - 1)'. |
115 | */ | 139 | */ |
116 | #define TG3_RX_RCB_RING_SIZE(tp) \ | ||
117 | (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \ | ||
118 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512) | ||
119 | 140 | ||
120 | #define TG3_TX_RING_SIZE 512 | 141 | #define TG3_TX_RING_SIZE 512 |
121 | #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) | 142 | #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) |
122 | 143 | ||
123 | #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ | 144 | #define TG3_RX_STD_RING_BYTES(tp) \ |
124 | TG3_RX_RING_SIZE) | 145 | (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) |
125 | #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \ | 146 | #define TG3_RX_JMB_RING_BYTES(tp) \ |
126 | TG3_RX_JUMBO_RING_SIZE) | 147 | (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) |
127 | #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ | 148 | #define TG3_RX_RCB_RING_BYTES(tp) \ |
128 | TG3_RX_RCB_RING_SIZE(tp)) | 149 | (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) |
129 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 150 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ |
130 | TG3_TX_RING_SIZE) | 151 | TG3_TX_RING_SIZE) |
131 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 152 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
132 | 153 | ||
133 | #define TG3_RX_DMA_ALIGN 16 | ||
134 | #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN) | ||
135 | |||
136 | #define TG3_DMA_BYTE_ENAB 64 | 154 | #define TG3_DMA_BYTE_ENAB 64 |
137 | 155 | ||
138 | #define TG3_RX_STD_DMA_SZ 1536 | 156 | #define TG3_RX_STD_DMA_SZ 1536 |
@@ -143,11 +161,11 @@ | |||
143 | #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) | 161 | #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) |
144 | #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) | 162 | #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) |
145 | 163 | ||
146 | #define TG3_RX_STD_BUFF_RING_SIZE \ | 164 | #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ |
147 | (sizeof(struct ring_info) * TG3_RX_RING_SIZE) | 165 | (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) |
148 | 166 | ||
149 | #define TG3_RX_JMB_BUFF_RING_SIZE \ | 167 | #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ |
150 | (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) | 168 | (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) |
151 | 169 | ||
152 | /* Due to a hardware bug, the 5701 can only DMA to memory addresses | 170 | /* Due to a hardware bug, the 5701 can only DMA to memory addresses |
153 | * that are at least dword aligned when used in PCIX mode. The driver | 171 | * that are at least dword aligned when used in PCIX mode. The driver |
@@ -172,11 +190,6 @@ | |||
172 | 190 | ||
173 | #define TG3_RAW_IP_ALIGN 2 | 191 | #define TG3_RAW_IP_ALIGN 2 |
174 | 192 | ||
175 | /* number of ETHTOOL_GSTATS u64's */ | ||
176 | #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) | ||
177 | |||
178 | #define TG3_NUM_TEST 6 | ||
179 | |||
180 | #define TG3_FW_UPDATE_TIMEOUT_SEC 5 | 193 | #define TG3_FW_UPDATE_TIMEOUT_SEC 5 |
181 | 194 | ||
182 | #define FIRMWARE_TG3 "tigon/tg3.bin" | 195 | #define FIRMWARE_TG3 "tigon/tg3.bin" |
@@ -264,7 +277,6 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { | |||
264 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, | 277 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, |
265 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, | 278 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, |
266 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, | 279 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, |
267 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, | ||
268 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, | 280 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, |
269 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, | 281 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, |
270 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, | 282 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, |
@@ -272,6 +284,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { | |||
272 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, | 284 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, |
273 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, | 285 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, |
274 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, | 286 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, |
287 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, | ||
275 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, | 288 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, |
276 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, | 289 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, |
277 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, | 290 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, |
@@ -279,6 +292,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { | |||
279 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, | 292 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, |
280 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, | 293 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, |
281 | {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, | 294 | {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, |
295 | {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ | ||
282 | {} | 296 | {} |
283 | }; | 297 | }; |
284 | 298 | ||
@@ -286,7 +300,7 @@ MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); | |||
286 | 300 | ||
287 | static const struct { | 301 | static const struct { |
288 | const char string[ETH_GSTRING_LEN]; | 302 | const char string[ETH_GSTRING_LEN]; |
289 | } ethtool_stats_keys[TG3_NUM_STATS] = { | 303 | } ethtool_stats_keys[] = { |
290 | { "rx_octets" }, | 304 | { "rx_octets" }, |
291 | { "rx_fragments" }, | 305 | { "rx_fragments" }, |
292 | { "rx_ucast_packets" }, | 306 | { "rx_ucast_packets" }, |
@@ -362,12 +376,17 @@ static const struct { | |||
362 | { "ring_status_update" }, | 376 | { "ring_status_update" }, |
363 | { "nic_irqs" }, | 377 | { "nic_irqs" }, |
364 | { "nic_avoided_irqs" }, | 378 | { "nic_avoided_irqs" }, |
365 | { "nic_tx_threshold_hit" } | 379 | { "nic_tx_threshold_hit" }, |
380 | |||
381 | { "mbuf_lwm_thresh_hit" }, | ||
366 | }; | 382 | }; |
367 | 383 | ||
384 | #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) | ||
385 | |||
386 | |||
368 | static const struct { | 387 | static const struct { |
369 | const char string[ETH_GSTRING_LEN]; | 388 | const char string[ETH_GSTRING_LEN]; |
370 | } ethtool_test_keys[TG3_NUM_TEST] = { | 389 | } ethtool_test_keys[] = { |
371 | { "nvram test (online) " }, | 390 | { "nvram test (online) " }, |
372 | { "link test (online) " }, | 391 | { "link test (online) " }, |
373 | { "register test (offline)" }, | 392 | { "register test (offline)" }, |
@@ -376,6 +395,9 @@ static const struct { | |||
376 | { "interrupt test (offline)" }, | 395 | { "interrupt test (offline)" }, |
377 | }; | 396 | }; |
378 | 397 | ||
398 | #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) | ||
399 | |||
400 | |||
379 | static void tg3_write32(struct tg3 *tp, u32 off, u32 val) | 401 | static void tg3_write32(struct tg3 *tp, u32 off, u32 val) |
380 | { | 402 | { |
381 | writel(val, tp->regs + off); | 403 | writel(val, tp->regs + off); |
@@ -473,8 +495,7 @@ static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) | |||
473 | */ | 495 | */ |
474 | static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) | 496 | static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) |
475 | { | 497 | { |
476 | if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || | 498 | if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) |
477 | (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) | ||
478 | /* Non-posted methods */ | 499 | /* Non-posted methods */ |
479 | tp->write32(tp, off, val); | 500 | tp->write32(tp, off, val); |
480 | else { | 501 | else { |
@@ -494,8 +515,7 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) | |||
494 | static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) | 515 | static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) |
495 | { | 516 | { |
496 | tp->write32_mbox(tp, off, val); | 517 | tp->write32_mbox(tp, off, val); |
497 | if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && | 518 | if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) |
498 | !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) | ||
499 | tp->read32_mbox(tp, off); | 519 | tp->read32_mbox(tp, off); |
500 | } | 520 | } |
501 | 521 | ||
@@ -503,9 +523,9 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) | |||
503 | { | 523 | { |
504 | void __iomem *mbox = tp->regs + off; | 524 | void __iomem *mbox = tp->regs + off; |
505 | writel(val, mbox); | 525 | writel(val, mbox); |
506 | if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) | 526 | if (tg3_flag(tp, TXD_MBOX_HWBUG)) |
507 | writel(val, mbox); | 527 | writel(val, mbox); |
508 | if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) | 528 | if (tg3_flag(tp, MBOX_WRITE_REORDER)) |
509 | readl(mbox); | 529 | readl(mbox); |
510 | } | 530 | } |
511 | 531 | ||
@@ -534,12 +554,12 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) | |||
534 | { | 554 | { |
535 | unsigned long flags; | 555 | unsigned long flags; |
536 | 556 | ||
537 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && | 557 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && |
538 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) | 558 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) |
539 | return; | 559 | return; |
540 | 560 | ||
541 | spin_lock_irqsave(&tp->indirect_lock, flags); | 561 | spin_lock_irqsave(&tp->indirect_lock, flags); |
542 | if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { | 562 | if (tg3_flag(tp, SRAM_USE_CONFIG)) { |
543 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); | 563 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); |
544 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 564 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); |
545 | 565 | ||
@@ -559,14 +579,14 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) | |||
559 | { | 579 | { |
560 | unsigned long flags; | 580 | unsigned long flags; |
561 | 581 | ||
562 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && | 582 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && |
563 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { | 583 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { |
564 | *val = 0; | 584 | *val = 0; |
565 | return; | 585 | return; |
566 | } | 586 | } |
567 | 587 | ||
568 | spin_lock_irqsave(&tp->indirect_lock, flags); | 588 | spin_lock_irqsave(&tp->indirect_lock, flags); |
569 | if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { | 589 | if (tg3_flag(tp, SRAM_USE_CONFIG)) { |
570 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); | 590 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); |
571 | pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 591 | pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); |
572 | 592 | ||
@@ -603,7 +623,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) | |||
603 | int ret = 0; | 623 | int ret = 0; |
604 | u32 status, req, gnt; | 624 | u32 status, req, gnt; |
605 | 625 | ||
606 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) | 626 | if (!tg3_flag(tp, ENABLE_APE)) |
607 | return 0; | 627 | return 0; |
608 | 628 | ||
609 | switch (locknum) { | 629 | switch (locknum) { |
@@ -649,7 +669,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) | |||
649 | { | 669 | { |
650 | u32 gnt; | 670 | u32 gnt; |
651 | 671 | ||
652 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) | 672 | if (!tg3_flag(tp, ENABLE_APE)) |
653 | return; | 673 | return; |
654 | 674 | ||
655 | switch (locknum) { | 675 | switch (locknum) { |
@@ -693,14 +713,14 @@ static void tg3_enable_ints(struct tg3 *tp) | |||
693 | struct tg3_napi *tnapi = &tp->napi[i]; | 713 | struct tg3_napi *tnapi = &tp->napi[i]; |
694 | 714 | ||
695 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); | 715 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); |
696 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | 716 | if (tg3_flag(tp, 1SHOT_MSI)) |
697 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); | 717 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); |
698 | 718 | ||
699 | tp->coal_now |= tnapi->coal_now; | 719 | tp->coal_now |= tnapi->coal_now; |
700 | } | 720 | } |
701 | 721 | ||
702 | /* Force an initial interrupt */ | 722 | /* Force an initial interrupt */ |
703 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | 723 | if (!tg3_flag(tp, TAGGED_STATUS) && |
704 | (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) | 724 | (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) |
705 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); | 725 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); |
706 | else | 726 | else |
@@ -716,9 +736,7 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) | |||
716 | unsigned int work_exists = 0; | 736 | unsigned int work_exists = 0; |
717 | 737 | ||
718 | /* check for phy events */ | 738 | /* check for phy events */ |
719 | if (!(tp->tg3_flags & | 739 | if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { |
720 | (TG3_FLAG_USE_LINKCHG_REG | | ||
721 | TG3_FLAG_POLL_SERDES))) { | ||
722 | if (sblk->status & SD_STATUS_LINK_CHG) | 740 | if (sblk->status & SD_STATUS_LINK_CHG) |
723 | work_exists = 1; | 741 | work_exists = 1; |
724 | } | 742 | } |
@@ -746,55 +764,17 @@ static void tg3_int_reenable(struct tg3_napi *tnapi) | |||
746 | * The last_tag we write above tells the chip which piece of | 764 | * The last_tag we write above tells the chip which piece of |
747 | * work we've completed. | 765 | * work we've completed. |
748 | */ | 766 | */ |
749 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | 767 | if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) |
750 | tg3_has_work(tnapi)) | ||
751 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 768 | tw32(HOSTCC_MODE, tp->coalesce_mode | |
752 | HOSTCC_MODE_ENABLE | tnapi->coal_now); | 769 | HOSTCC_MODE_ENABLE | tnapi->coal_now); |
753 | } | 770 | } |
754 | 771 | ||
755 | static void tg3_napi_disable(struct tg3 *tp) | ||
756 | { | ||
757 | int i; | ||
758 | |||
759 | for (i = tp->irq_cnt - 1; i >= 0; i--) | ||
760 | napi_disable(&tp->napi[i].napi); | ||
761 | } | ||
762 | |||
763 | static void tg3_napi_enable(struct tg3 *tp) | ||
764 | { | ||
765 | int i; | ||
766 | |||
767 | for (i = 0; i < tp->irq_cnt; i++) | ||
768 | napi_enable(&tp->napi[i].napi); | ||
769 | } | ||
770 | |||
771 | static inline void tg3_netif_stop(struct tg3 *tp) | ||
772 | { | ||
773 | tp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
774 | tg3_napi_disable(tp); | ||
775 | netif_tx_disable(tp->dev); | ||
776 | } | ||
777 | |||
778 | static inline void tg3_netif_start(struct tg3 *tp) | ||
779 | { | ||
780 | /* NOTE: unconditional netif_tx_wake_all_queues is only | ||
781 | * appropriate so long as all callers are assured to | ||
782 | * have free tx slots (such as after tg3_init_hw) | ||
783 | */ | ||
784 | netif_tx_wake_all_queues(tp->dev); | ||
785 | |||
786 | tg3_napi_enable(tp); | ||
787 | tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; | ||
788 | tg3_enable_ints(tp); | ||
789 | } | ||
790 | |||
791 | static void tg3_switch_clocks(struct tg3 *tp) | 772 | static void tg3_switch_clocks(struct tg3 *tp) |
792 | { | 773 | { |
793 | u32 clock_ctrl; | 774 | u32 clock_ctrl; |
794 | u32 orig_clock_ctrl; | 775 | u32 orig_clock_ctrl; |
795 | 776 | ||
796 | if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || | 777 | if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) |
797 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | ||
798 | return; | 778 | return; |
799 | 779 | ||
800 | clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); | 780 | clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); |
@@ -805,7 +785,7 @@ static void tg3_switch_clocks(struct tg3 *tp) | |||
805 | 0x1f); | 785 | 0x1f); |
806 | tp->pci_clock_ctrl = clock_ctrl; | 786 | tp->pci_clock_ctrl = clock_ctrl; |
807 | 787 | ||
808 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 788 | if (tg3_flag(tp, 5705_PLUS)) { |
809 | if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { | 789 | if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { |
810 | tw32_wait_f(TG3PCI_CLOCK_CTRL, | 790 | tw32_wait_f(TG3PCI_CLOCK_CTRL, |
811 | clock_ctrl | CLOCK_CTRL_625_CORE, 40); | 791 | clock_ctrl | CLOCK_CTRL_625_CORE, 40); |
@@ -922,6 +902,104 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) | |||
922 | return ret; | 902 | return ret; |
923 | } | 903 | } |
924 | 904 | ||
905 | static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) | ||
906 | { | ||
907 | int err; | ||
908 | |||
909 | err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); | ||
910 | if (err) | ||
911 | goto done; | ||
912 | |||
913 | err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); | ||
914 | if (err) | ||
915 | goto done; | ||
916 | |||
917 | err = tg3_writephy(tp, MII_TG3_MMD_CTRL, | ||
918 | MII_TG3_MMD_CTRL_DATA_NOINC | devad); | ||
919 | if (err) | ||
920 | goto done; | ||
921 | |||
922 | err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); | ||
923 | |||
924 | done: | ||
925 | return err; | ||
926 | } | ||
927 | |||
928 | static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) | ||
929 | { | ||
930 | int err; | ||
931 | |||
932 | err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); | ||
933 | if (err) | ||
934 | goto done; | ||
935 | |||
936 | err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); | ||
937 | if (err) | ||
938 | goto done; | ||
939 | |||
940 | err = tg3_writephy(tp, MII_TG3_MMD_CTRL, | ||
941 | MII_TG3_MMD_CTRL_DATA_NOINC | devad); | ||
942 | if (err) | ||
943 | goto done; | ||
944 | |||
945 | err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); | ||
946 | |||
947 | done: | ||
948 | return err; | ||
949 | } | ||
950 | |||
951 | static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) | ||
952 | { | ||
953 | int err; | ||
954 | |||
955 | err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); | ||
956 | if (!err) | ||
957 | err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); | ||
958 | |||
959 | return err; | ||
960 | } | ||
961 | |||
962 | static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) | ||
963 | { | ||
964 | int err; | ||
965 | |||
966 | err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); | ||
967 | if (!err) | ||
968 | err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); | ||
969 | |||
970 | return err; | ||
971 | } | ||
972 | |||
973 | static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) | ||
974 | { | ||
975 | int err; | ||
976 | |||
977 | err = tg3_writephy(tp, MII_TG3_AUX_CTRL, | ||
978 | (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | | ||
979 | MII_TG3_AUXCTL_SHDWSEL_MISC); | ||
980 | if (!err) | ||
981 | err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); | ||
982 | |||
983 | return err; | ||
984 | } | ||
985 | |||
986 | static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) | ||
987 | { | ||
988 | if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) | ||
989 | set |= MII_TG3_AUXCTL_MISC_WREN; | ||
990 | |||
991 | return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); | ||
992 | } | ||
993 | |||
994 | #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ | ||
995 | tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ | ||
996 | MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ | ||
997 | MII_TG3_AUXCTL_ACTL_TX_6DB) | ||
998 | |||
999 | #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ | ||
1000 | tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ | ||
1001 | MII_TG3_AUXCTL_ACTL_TX_6DB); | ||
1002 | |||
925 | static int tg3_bmcr_reset(struct tg3 *tp) | 1003 | static int tg3_bmcr_reset(struct tg3 *tp) |
926 | { | 1004 | { |
927 | u32 phy_control; | 1005 | u32 phy_control; |
@@ -1024,7 +1102,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
1024 | return; | 1102 | return; |
1025 | } | 1103 | } |
1026 | 1104 | ||
1027 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) | 1105 | if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) |
1028 | val |= MAC_PHYCFG2_EMODE_MASK_MASK | | 1106 | val |= MAC_PHYCFG2_EMODE_MASK_MASK | |
1029 | MAC_PHYCFG2_FMODE_MASK_MASK | | 1107 | MAC_PHYCFG2_FMODE_MASK_MASK | |
1030 | MAC_PHYCFG2_GMODE_MASK_MASK | | 1108 | MAC_PHYCFG2_GMODE_MASK_MASK | |
@@ -1037,10 +1115,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
1037 | val = tr32(MAC_PHYCFG1); | 1115 | val = tr32(MAC_PHYCFG1); |
1038 | val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | | 1116 | val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | |
1039 | MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); | 1117 | MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); |
1040 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { | 1118 | if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { |
1041 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 1119 | if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) |
1042 | val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; | 1120 | val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; |
1043 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 1121 | if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) |
1044 | val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; | 1122 | val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; |
1045 | } | 1123 | } |
1046 | val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | | 1124 | val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | |
@@ -1055,13 +1133,13 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
1055 | MAC_RGMII_MODE_TX_ENABLE | | 1133 | MAC_RGMII_MODE_TX_ENABLE | |
1056 | MAC_RGMII_MODE_TX_LOWPWR | | 1134 | MAC_RGMII_MODE_TX_LOWPWR | |
1057 | MAC_RGMII_MODE_TX_RESET); | 1135 | MAC_RGMII_MODE_TX_RESET); |
1058 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { | 1136 | if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { |
1059 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 1137 | if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) |
1060 | val |= MAC_RGMII_MODE_RX_INT_B | | 1138 | val |= MAC_RGMII_MODE_RX_INT_B | |
1061 | MAC_RGMII_MODE_RX_QUALITY | | 1139 | MAC_RGMII_MODE_RX_QUALITY | |
1062 | MAC_RGMII_MODE_RX_ACTIVITY | | 1140 | MAC_RGMII_MODE_RX_ACTIVITY | |
1063 | MAC_RGMII_MODE_RX_ENG_DET; | 1141 | MAC_RGMII_MODE_RX_ENG_DET; |
1064 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 1142 | if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) |
1065 | val |= MAC_RGMII_MODE_TX_ENABLE | | 1143 | val |= MAC_RGMII_MODE_TX_ENABLE | |
1066 | MAC_RGMII_MODE_TX_LOWPWR | | 1144 | MAC_RGMII_MODE_TX_LOWPWR | |
1067 | MAC_RGMII_MODE_TX_RESET; | 1145 | MAC_RGMII_MODE_TX_RESET; |
@@ -1075,7 +1153,7 @@ static void tg3_mdio_start(struct tg3 *tp) | |||
1075 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 1153 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
1076 | udelay(80); | 1154 | udelay(80); |
1077 | 1155 | ||
1078 | if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && | 1156 | if (tg3_flag(tp, MDIOBUS_INITED) && |
1079 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 1157 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) |
1080 | tg3_mdio_config_5785(tp); | 1158 | tg3_mdio_config_5785(tp); |
1081 | } | 1159 | } |
@@ -1086,8 +1164,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1086 | u32 reg; | 1164 | u32 reg; |
1087 | struct phy_device *phydev; | 1165 | struct phy_device *phydev; |
1088 | 1166 | ||
1089 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 1167 | if (tg3_flag(tp, 5717_PLUS)) { |
1090 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { | ||
1091 | u32 is_serdes; | 1168 | u32 is_serdes; |
1092 | 1169 | ||
1093 | tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; | 1170 | tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; |
@@ -1104,8 +1181,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1104 | 1181 | ||
1105 | tg3_mdio_start(tp); | 1182 | tg3_mdio_start(tp); |
1106 | 1183 | ||
1107 | if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || | 1184 | if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) |
1108 | (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) | ||
1109 | return 0; | 1185 | return 0; |
1110 | 1186 | ||
1111 | tp->mdio_bus = mdiobus_alloc(); | 1187 | tp->mdio_bus = mdiobus_alloc(); |
@@ -1161,11 +1237,11 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1161 | PHY_BRCM_RX_REFCLK_UNUSED | | 1237 | PHY_BRCM_RX_REFCLK_UNUSED | |
1162 | PHY_BRCM_DIS_TXCRXC_NOENRGY | | 1238 | PHY_BRCM_DIS_TXCRXC_NOENRGY | |
1163 | PHY_BRCM_AUTO_PWRDWN_ENABLE; | 1239 | PHY_BRCM_AUTO_PWRDWN_ENABLE; |
1164 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE) | 1240 | if (tg3_flag(tp, RGMII_INBAND_DISABLE)) |
1165 | phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; | 1241 | phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; |
1166 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 1242 | if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) |
1167 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; | 1243 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; |
1168 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 1244 | if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) |
1169 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; | 1245 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; |
1170 | /* fallthru */ | 1246 | /* fallthru */ |
1171 | case PHY_ID_RTL8211C: | 1247 | case PHY_ID_RTL8211C: |
@@ -1179,7 +1255,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1179 | break; | 1255 | break; |
1180 | } | 1256 | } |
1181 | 1257 | ||
1182 | tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; | 1258 | tg3_flag_set(tp, MDIOBUS_INITED); |
1183 | 1259 | ||
1184 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 1260 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) |
1185 | tg3_mdio_config_5785(tp); | 1261 | tg3_mdio_config_5785(tp); |
@@ -1189,8 +1265,8 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1189 | 1265 | ||
1190 | static void tg3_mdio_fini(struct tg3 *tp) | 1266 | static void tg3_mdio_fini(struct tg3 *tp) |
1191 | { | 1267 | { |
1192 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | 1268 | if (tg3_flag(tp, MDIOBUS_INITED)) { |
1193 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; | 1269 | tg3_flag_clear(tp, MDIOBUS_INITED); |
1194 | mdiobus_unregister(tp->mdio_bus); | 1270 | mdiobus_unregister(tp->mdio_bus); |
1195 | mdiobus_free(tp->mdio_bus); | 1271 | mdiobus_free(tp->mdio_bus); |
1196 | } | 1272 | } |
@@ -1243,8 +1319,7 @@ static void tg3_ump_link_report(struct tg3 *tp) | |||
1243 | u32 reg; | 1319 | u32 reg; |
1244 | u32 val; | 1320 | u32 val; |
1245 | 1321 | ||
1246 | if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 1322 | if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) |
1247 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | ||
1248 | return; | 1323 | return; |
1249 | 1324 | ||
1250 | tg3_wait_for_event_ack(tp); | 1325 | tg3_wait_for_event_ack(tp); |
@@ -1304,6 +1379,11 @@ static void tg3_link_report(struct tg3 *tp) | |||
1304 | "on" : "off", | 1379 | "on" : "off", |
1305 | (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? | 1380 | (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? |
1306 | "on" : "off"); | 1381 | "on" : "off"); |
1382 | |||
1383 | if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) | ||
1384 | netdev_info(tp->dev, "EEE is %s\n", | ||
1385 | tp->setlpicnt ? "enabled" : "disabled"); | ||
1386 | |||
1307 | tg3_ump_link_report(tp); | 1387 | tg3_ump_link_report(tp); |
1308 | } | 1388 | } |
1309 | } | 1389 | } |
@@ -1369,13 +1449,12 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) | |||
1369 | u32 old_rx_mode = tp->rx_mode; | 1449 | u32 old_rx_mode = tp->rx_mode; |
1370 | u32 old_tx_mode = tp->tx_mode; | 1450 | u32 old_tx_mode = tp->tx_mode; |
1371 | 1451 | ||
1372 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) | 1452 | if (tg3_flag(tp, USE_PHYLIB)) |
1373 | autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; | 1453 | autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; |
1374 | else | 1454 | else |
1375 | autoneg = tp->link_config.autoneg; | 1455 | autoneg = tp->link_config.autoneg; |
1376 | 1456 | ||
1377 | if (autoneg == AUTONEG_ENABLE && | 1457 | if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { |
1378 | (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { | ||
1379 | if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) | 1458 | if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) |
1380 | flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); | 1459 | flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); |
1381 | else | 1460 | else |
@@ -1572,17 +1651,6 @@ static void tg3_phy_fini(struct tg3 *tp) | |||
1572 | } | 1651 | } |
1573 | } | 1652 | } |
1574 | 1653 | ||
1575 | static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) | ||
1576 | { | ||
1577 | int err; | ||
1578 | |||
1579 | err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); | ||
1580 | if (!err) | ||
1581 | err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); | ||
1582 | |||
1583 | return err; | ||
1584 | } | ||
1585 | |||
1586 | static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) | 1654 | static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) |
1587 | { | 1655 | { |
1588 | u32 phytest; | 1656 | u32 phytest; |
@@ -1607,9 +1675,8 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) | |||
1607 | { | 1675 | { |
1608 | u32 reg; | 1676 | u32 reg; |
1609 | 1677 | ||
1610 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 1678 | if (!tg3_flag(tp, 5705_PLUS) || |
1611 | ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 1679 | (tg3_flag(tp, 5717_PLUS) && |
1612 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) && | ||
1613 | (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) | 1680 | (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) |
1614 | return; | 1681 | return; |
1615 | 1682 | ||
@@ -1643,7 +1710,7 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) | |||
1643 | { | 1710 | { |
1644 | u32 phy; | 1711 | u32 phy; |
1645 | 1712 | ||
1646 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 1713 | if (!tg3_flag(tp, 5705_PLUS) || |
1647 | (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) | 1714 | (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) |
1648 | return; | 1715 | return; |
1649 | 1716 | ||
@@ -1665,31 +1732,33 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) | |||
1665 | tg3_writephy(tp, MII_TG3_FET_TEST, ephy); | 1732 | tg3_writephy(tp, MII_TG3_FET_TEST, ephy); |
1666 | } | 1733 | } |
1667 | } else { | 1734 | } else { |
1668 | phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | | 1735 | int ret; |
1669 | MII_TG3_AUXCTL_SHDWSEL_MISC; | 1736 | |
1670 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && | 1737 | ret = tg3_phy_auxctl_read(tp, |
1671 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { | 1738 | MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); |
1739 | if (!ret) { | ||
1672 | if (enable) | 1740 | if (enable) |
1673 | phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; | 1741 | phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; |
1674 | else | 1742 | else |
1675 | phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; | 1743 | phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; |
1676 | phy |= MII_TG3_AUXCTL_MISC_WREN; | 1744 | tg3_phy_auxctl_write(tp, |
1677 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); | 1745 | MII_TG3_AUXCTL_SHDWSEL_MISC, phy); |
1678 | } | 1746 | } |
1679 | } | 1747 | } |
1680 | } | 1748 | } |
1681 | 1749 | ||
1682 | static void tg3_phy_set_wirespeed(struct tg3 *tp) | 1750 | static void tg3_phy_set_wirespeed(struct tg3 *tp) |
1683 | { | 1751 | { |
1752 | int ret; | ||
1684 | u32 val; | 1753 | u32 val; |
1685 | 1754 | ||
1686 | if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) | 1755 | if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) |
1687 | return; | 1756 | return; |
1688 | 1757 | ||
1689 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && | 1758 | ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); |
1690 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) | 1759 | if (!ret) |
1691 | tg3_writephy(tp, MII_TG3_AUX_CTRL, | 1760 | tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, |
1692 | (val | (1 << 15) | (1 << 4))); | 1761 | val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); |
1693 | } | 1762 | } |
1694 | 1763 | ||
1695 | static void tg3_phy_apply_otp(struct tg3 *tp) | 1764 | static void tg3_phy_apply_otp(struct tg3 *tp) |
@@ -1701,11 +1770,8 @@ static void tg3_phy_apply_otp(struct tg3 *tp) | |||
1701 | 1770 | ||
1702 | otp = tp->phy_otp; | 1771 | otp = tp->phy_otp; |
1703 | 1772 | ||
1704 | /* Enable SM_DSP clock and tx 6dB coding. */ | 1773 | if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) |
1705 | phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | | 1774 | return; |
1706 | MII_TG3_AUXCTL_ACTL_SMDSP_ENA | | ||
1707 | MII_TG3_AUXCTL_ACTL_TX_6DB; | ||
1708 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); | ||
1709 | 1775 | ||
1710 | phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); | 1776 | phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); |
1711 | phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; | 1777 | phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; |
@@ -1729,10 +1795,61 @@ static void tg3_phy_apply_otp(struct tg3 *tp) | |||
1729 | ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); | 1795 | ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); |
1730 | tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); | 1796 | tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); |
1731 | 1797 | ||
1732 | /* Turn off SM_DSP clock. */ | 1798 | TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); |
1733 | phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | | 1799 | } |
1734 | MII_TG3_AUXCTL_ACTL_TX_6DB; | 1800 | |
1735 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); | 1801 | static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) |
1802 | { | ||
1803 | u32 val; | ||
1804 | |||
1805 | if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) | ||
1806 | return; | ||
1807 | |||
1808 | tp->setlpicnt = 0; | ||
1809 | |||
1810 | if (tp->link_config.autoneg == AUTONEG_ENABLE && | ||
1811 | current_link_up == 1 && | ||
1812 | tp->link_config.active_duplex == DUPLEX_FULL && | ||
1813 | (tp->link_config.active_speed == SPEED_100 || | ||
1814 | tp->link_config.active_speed == SPEED_1000)) { | ||
1815 | u32 eeectl; | ||
1816 | |||
1817 | if (tp->link_config.active_speed == SPEED_1000) | ||
1818 | eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; | ||
1819 | else | ||
1820 | eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; | ||
1821 | |||
1822 | tw32(TG3_CPMU_EEE_CTRL, eeectl); | ||
1823 | |||
1824 | tg3_phy_cl45_read(tp, MDIO_MMD_AN, | ||
1825 | TG3_CL45_D7_EEERES_STAT, &val); | ||
1826 | |||
1827 | if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || | ||
1828 | val == TG3_CL45_D7_EEERES_STAT_LP_100TX) | ||
1829 | tp->setlpicnt = 2; | ||
1830 | } | ||
1831 | |||
1832 | if (!tp->setlpicnt) { | ||
1833 | val = tr32(TG3_CPMU_EEE_MODE); | ||
1834 | tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); | ||
1835 | } | ||
1836 | } | ||
1837 | |||
1838 | static void tg3_phy_eee_enable(struct tg3 *tp) | ||
1839 | { | ||
1840 | u32 val; | ||
1841 | |||
1842 | if (tp->link_config.active_speed == SPEED_1000 && | ||
1843 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
1844 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | ||
1845 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && | ||
1846 | !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { | ||
1847 | tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003); | ||
1848 | TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); | ||
1849 | } | ||
1850 | |||
1851 | val = tr32(TG3_CPMU_EEE_MODE); | ||
1852 | tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); | ||
1736 | } | 1853 | } |
1737 | 1854 | ||
1738 | static int tg3_wait_macro_done(struct tg3 *tp) | 1855 | static int tg3_wait_macro_done(struct tg3 *tp) |
@@ -1873,8 +1990,9 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) | |||
1873 | (MII_TG3_CTRL_AS_MASTER | | 1990 | (MII_TG3_CTRL_AS_MASTER | |
1874 | MII_TG3_CTRL_ENABLE_AS_MASTER)); | 1991 | MII_TG3_CTRL_ENABLE_AS_MASTER)); |
1875 | 1992 | ||
1876 | /* Enable SM_DSP_CLOCK and 6dB. */ | 1993 | err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); |
1877 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 1994 | if (err) |
1995 | return err; | ||
1878 | 1996 | ||
1879 | /* Block the PHY control access. */ | 1997 | /* Block the PHY control access. */ |
1880 | tg3_phydsp_write(tp, 0x8005, 0x0800); | 1998 | tg3_phydsp_write(tp, 0x8005, 0x0800); |
@@ -1893,13 +2011,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) | |||
1893 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); | 2011 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); |
1894 | tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); | 2012 | tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); |
1895 | 2013 | ||
1896 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 2014 | TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); |
1897 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | ||
1898 | /* Set Extended packet length bit for jumbo frames */ | ||
1899 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); | ||
1900 | } else { | ||
1901 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | ||
1902 | } | ||
1903 | 2015 | ||
1904 | tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); | 2016 | tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); |
1905 | 2017 | ||
@@ -1917,19 +2029,16 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) | |||
1917 | */ | 2029 | */ |
1918 | static int tg3_phy_reset(struct tg3 *tp) | 2030 | static int tg3_phy_reset(struct tg3 *tp) |
1919 | { | 2031 | { |
1920 | u32 cpmuctrl; | 2032 | u32 val, cpmuctrl; |
1921 | u32 phy_status; | ||
1922 | int err; | 2033 | int err; |
1923 | 2034 | ||
1924 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 2035 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
1925 | u32 val; | ||
1926 | |||
1927 | val = tr32(GRC_MISC_CFG); | 2036 | val = tr32(GRC_MISC_CFG); |
1928 | tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); | 2037 | tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); |
1929 | udelay(40); | 2038 | udelay(40); |
1930 | } | 2039 | } |
1931 | err = tg3_readphy(tp, MII_BMSR, &phy_status); | 2040 | err = tg3_readphy(tp, MII_BMSR, &val); |
1932 | err |= tg3_readphy(tp, MII_BMSR, &phy_status); | 2041 | err |= tg3_readphy(tp, MII_BMSR, &val); |
1933 | if (err != 0) | 2042 | if (err != 0) |
1934 | return -EBUSY; | 2043 | return -EBUSY; |
1935 | 2044 | ||
@@ -1961,18 +2070,14 @@ static int tg3_phy_reset(struct tg3 *tp) | |||
1961 | return err; | 2070 | return err; |
1962 | 2071 | ||
1963 | if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { | 2072 | if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { |
1964 | u32 phy; | 2073 | val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; |
1965 | 2074 | tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); | |
1966 | phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; | ||
1967 | tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy); | ||
1968 | 2075 | ||
1969 | tw32(TG3_CPMU_CTRL, cpmuctrl); | 2076 | tw32(TG3_CPMU_CTRL, cpmuctrl); |
1970 | } | 2077 | } |
1971 | 2078 | ||
1972 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || | 2079 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || |
1973 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { | 2080 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { |
1974 | u32 val; | ||
1975 | |||
1976 | val = tr32(TG3_CPMU_LSPD_1000MB_CLK); | 2081 | val = tr32(TG3_CPMU_LSPD_1000MB_CLK); |
1977 | if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == | 2082 | if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == |
1978 | CPMU_LSPD_1000MB_MACCLK_12_5) { | 2083 | CPMU_LSPD_1000MB_MACCLK_12_5) { |
@@ -1982,8 +2087,7 @@ static int tg3_phy_reset(struct tg3 *tp) | |||
1982 | } | 2087 | } |
1983 | } | 2088 | } |
1984 | 2089 | ||
1985 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 2090 | if (tg3_flag(tp, 5717_PLUS) && |
1986 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) && | ||
1987 | (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) | 2091 | (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) |
1988 | return 0; | 2092 | return 0; |
1989 | 2093 | ||
@@ -1995,56 +2099,60 @@ static int tg3_phy_reset(struct tg3 *tp) | |||
1995 | tg3_phy_toggle_apd(tp, false); | 2099 | tg3_phy_toggle_apd(tp, false); |
1996 | 2100 | ||
1997 | out: | 2101 | out: |
1998 | if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) { | 2102 | if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && |
1999 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 2103 | !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { |
2000 | tg3_phydsp_write(tp, 0x201f, 0x2aaa); | 2104 | tg3_phydsp_write(tp, 0x201f, 0x2aaa); |
2001 | tg3_phydsp_write(tp, 0x000a, 0x0323); | 2105 | tg3_phydsp_write(tp, 0x000a, 0x0323); |
2002 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 2106 | TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); |
2003 | } | 2107 | } |
2108 | |||
2004 | if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { | 2109 | if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { |
2005 | tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); | 2110 | tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); |
2006 | tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); | 2111 | tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); |
2007 | } | 2112 | } |
2113 | |||
2008 | if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { | 2114 | if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { |
2009 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 2115 | if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { |
2010 | tg3_phydsp_write(tp, 0x000a, 0x310b); | 2116 | tg3_phydsp_write(tp, 0x000a, 0x310b); |
2011 | tg3_phydsp_write(tp, 0x201f, 0x9506); | 2117 | tg3_phydsp_write(tp, 0x201f, 0x9506); |
2012 | tg3_phydsp_write(tp, 0x401f, 0x14e2); | 2118 | tg3_phydsp_write(tp, 0x401f, 0x14e2); |
2013 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 2119 | TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); |
2120 | } | ||
2014 | } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { | 2121 | } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { |
2015 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 2122 | if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { |
2016 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 2123 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); |
2017 | if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { | 2124 | if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { |
2018 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); | 2125 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); |
2019 | tg3_writephy(tp, MII_TG3_TEST1, | 2126 | tg3_writephy(tp, MII_TG3_TEST1, |
2020 | MII_TG3_TEST1_TRIM_EN | 0x4); | 2127 | MII_TG3_TEST1_TRIM_EN | 0x4); |
2021 | } else | 2128 | } else |
2022 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); | 2129 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); |
2023 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 2130 | |
2131 | TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); | ||
2132 | } | ||
2024 | } | 2133 | } |
2134 | |||
2025 | /* Set Extended packet length bit (bit 14) on all chips that */ | 2135 | /* Set Extended packet length bit (bit 14) on all chips that */ |
2026 | /* support jumbo frames */ | 2136 | /* support jumbo frames */ |
2027 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { | 2137 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { |
2028 | /* Cannot do read-modify-write on 5401 */ | 2138 | /* Cannot do read-modify-write on 5401 */ |
2029 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); | 2139 | tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); |
2030 | } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 2140 | } else if (tg3_flag(tp, JUMBO_CAPABLE)) { |
2031 | u32 phy_reg; | ||
2032 | |||
2033 | /* Set bit 14 with read-modify-write to preserve other bits */ | 2141 | /* Set bit 14 with read-modify-write to preserve other bits */ |
2034 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && | 2142 | err = tg3_phy_auxctl_read(tp, |
2035 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) | 2143 | MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); |
2036 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); | 2144 | if (!err) |
2145 | tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, | ||
2146 | val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); | ||
2037 | } | 2147 | } |
2038 | 2148 | ||
2039 | /* Set phy register 0x10 bit 0 to high fifo elasticity to support | 2149 | /* Set phy register 0x10 bit 0 to high fifo elasticity to support |
2040 | * jumbo frames transmission. | 2150 | * jumbo frames transmission. |
2041 | */ | 2151 | */ |
2042 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 2152 | if (tg3_flag(tp, JUMBO_CAPABLE)) { |
2043 | u32 phy_reg; | 2153 | if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) |
2044 | |||
2045 | if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) | ||
2046 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 2154 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
2047 | phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); | 2155 | val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); |
2048 | } | 2156 | } |
2049 | 2157 | ||
2050 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 2158 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
@@ -2059,31 +2167,40 @@ out: | |||
2059 | 2167 | ||
2060 | static void tg3_frob_aux_power(struct tg3 *tp) | 2168 | static void tg3_frob_aux_power(struct tg3 *tp) |
2061 | { | 2169 | { |
2062 | struct tg3 *tp_peer = tp; | 2170 | bool need_vaux = false; |
2063 | 2171 | ||
2064 | /* The GPIOs do something completely different on 57765. */ | 2172 | /* The GPIOs do something completely different on 57765. */ |
2065 | if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || | 2173 | if (!tg3_flag(tp, IS_NIC) || |
2066 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | 2174 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || |
2067 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 2175 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) |
2068 | return; | 2176 | return; |
2069 | 2177 | ||
2070 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 2178 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
2071 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || | 2179 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || |
2072 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 2180 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
2181 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) && | ||
2182 | tp->pdev_peer != tp->pdev) { | ||
2073 | struct net_device *dev_peer; | 2183 | struct net_device *dev_peer; |
2074 | 2184 | ||
2075 | dev_peer = pci_get_drvdata(tp->pdev_peer); | 2185 | dev_peer = pci_get_drvdata(tp->pdev_peer); |
2186 | |||
2076 | /* remove_one() may have been run on the peer. */ | 2187 | /* remove_one() may have been run on the peer. */ |
2077 | if (!dev_peer) | 2188 | if (dev_peer) { |
2078 | tp_peer = tp; | 2189 | struct tg3 *tp_peer = netdev_priv(dev_peer); |
2079 | else | 2190 | |
2080 | tp_peer = netdev_priv(dev_peer); | 2191 | if (tg3_flag(tp_peer, INIT_COMPLETE)) |
2192 | return; | ||
2193 | |||
2194 | if (tg3_flag(tp_peer, WOL_ENABLE) || | ||
2195 | tg3_flag(tp_peer, ENABLE_ASF)) | ||
2196 | need_vaux = true; | ||
2197 | } | ||
2081 | } | 2198 | } |
2082 | 2199 | ||
2083 | if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || | 2200 | if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF)) |
2084 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || | 2201 | need_vaux = true; |
2085 | (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || | 2202 | |
2086 | (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { | 2203 | if (need_vaux) { |
2087 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 2204 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
2088 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 2205 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { |
2089 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 2206 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
@@ -2113,10 +2230,6 @@ static void tg3_frob_aux_power(struct tg3 *tp) | |||
2113 | u32 no_gpio2; | 2230 | u32 no_gpio2; |
2114 | u32 grc_local_ctrl = 0; | 2231 | u32 grc_local_ctrl = 0; |
2115 | 2232 | ||
2116 | if (tp_peer != tp && | ||
2117 | (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) | ||
2118 | return; | ||
2119 | |||
2120 | /* Workaround to prevent overdrawing Amps. */ | 2233 | /* Workaround to prevent overdrawing Amps. */ |
2121 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 2234 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
2122 | ASIC_REV_5714) { | 2235 | ASIC_REV_5714) { |
@@ -2155,10 +2268,6 @@ static void tg3_frob_aux_power(struct tg3 *tp) | |||
2155 | } else { | 2268 | } else { |
2156 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 2269 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
2157 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { | 2270 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { |
2158 | if (tp_peer != tp && | ||
2159 | (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) | ||
2160 | return; | ||
2161 | |||
2162 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 2271 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
2163 | (GRC_LCLCTRL_GPIO_OE1 | | 2272 | (GRC_LCLCTRL_GPIO_OE1 | |
2164 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); | 2273 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); |
@@ -2242,11 +2351,10 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) | |||
2242 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 2351 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
2243 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); | 2352 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); |
2244 | 2353 | ||
2245 | tg3_writephy(tp, MII_TG3_AUX_CTRL, | 2354 | val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | |
2246 | MII_TG3_AUXCTL_SHDWSEL_PWRCTL | | 2355 | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | |
2247 | MII_TG3_AUXCTL_PCTL_100TX_LPWR | | 2356 | MII_TG3_AUXCTL_PCTL_VREG_11V; |
2248 | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | | 2357 | tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); |
2249 | MII_TG3_AUXCTL_PCTL_VREG_11V); | ||
2250 | } | 2358 | } |
2251 | 2359 | ||
2252 | /* The PHY should not be powered down on some chips because | 2360 | /* The PHY should not be powered down on some chips because |
@@ -2272,7 +2380,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) | |||
2272 | /* tp->lock is held. */ | 2380 | /* tp->lock is held. */ |
2273 | static int tg3_nvram_lock(struct tg3 *tp) | 2381 | static int tg3_nvram_lock(struct tg3 *tp) |
2274 | { | 2382 | { |
2275 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { | 2383 | if (tg3_flag(tp, NVRAM)) { |
2276 | int i; | 2384 | int i; |
2277 | 2385 | ||
2278 | if (tp->nvram_lock_cnt == 0) { | 2386 | if (tp->nvram_lock_cnt == 0) { |
@@ -2295,7 +2403,7 @@ static int tg3_nvram_lock(struct tg3 *tp) | |||
2295 | /* tp->lock is held. */ | 2403 | /* tp->lock is held. */ |
2296 | static void tg3_nvram_unlock(struct tg3 *tp) | 2404 | static void tg3_nvram_unlock(struct tg3 *tp) |
2297 | { | 2405 | { |
2298 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { | 2406 | if (tg3_flag(tp, NVRAM)) { |
2299 | if (tp->nvram_lock_cnt > 0) | 2407 | if (tp->nvram_lock_cnt > 0) |
2300 | tp->nvram_lock_cnt--; | 2408 | tp->nvram_lock_cnt--; |
2301 | if (tp->nvram_lock_cnt == 0) | 2409 | if (tp->nvram_lock_cnt == 0) |
@@ -2306,8 +2414,7 @@ static void tg3_nvram_unlock(struct tg3 *tp) | |||
2306 | /* tp->lock is held. */ | 2414 | /* tp->lock is held. */ |
2307 | static void tg3_enable_nvram_access(struct tg3 *tp) | 2415 | static void tg3_enable_nvram_access(struct tg3 *tp) |
2308 | { | 2416 | { |
2309 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 2417 | if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { |
2310 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { | ||
2311 | u32 nvaccess = tr32(NVRAM_ACCESS); | 2418 | u32 nvaccess = tr32(NVRAM_ACCESS); |
2312 | 2419 | ||
2313 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); | 2420 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); |
@@ -2317,8 +2424,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp) | |||
2317 | /* tp->lock is held. */ | 2424 | /* tp->lock is held. */ |
2318 | static void tg3_disable_nvram_access(struct tg3 *tp) | 2425 | static void tg3_disable_nvram_access(struct tg3 *tp) |
2319 | { | 2426 | { |
2320 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 2427 | if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { |
2321 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { | ||
2322 | u32 nvaccess = tr32(NVRAM_ACCESS); | 2428 | u32 nvaccess = tr32(NVRAM_ACCESS); |
2323 | 2429 | ||
2324 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); | 2430 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); |
@@ -2388,10 +2494,10 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) | |||
2388 | 2494 | ||
2389 | static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) | 2495 | static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) |
2390 | { | 2496 | { |
2391 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | 2497 | if (tg3_flag(tp, NVRAM) && |
2392 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | 2498 | tg3_flag(tp, NVRAM_BUFFERED) && |
2393 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && | 2499 | tg3_flag(tp, FLASH) && |
2394 | !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && | 2500 | !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && |
2395 | (tp->nvram_jedecnum == JEDEC_ATMEL)) | 2501 | (tp->nvram_jedecnum == JEDEC_ATMEL)) |
2396 | 2502 | ||
2397 | addr = ((addr / tp->nvram_pagesize) << | 2503 | addr = ((addr / tp->nvram_pagesize) << |
@@ -2403,10 +2509,10 @@ static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) | |||
2403 | 2509 | ||
2404 | static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) | 2510 | static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) |
2405 | { | 2511 | { |
2406 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | 2512 | if (tg3_flag(tp, NVRAM) && |
2407 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | 2513 | tg3_flag(tp, NVRAM_BUFFERED) && |
2408 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && | 2514 | tg3_flag(tp, FLASH) && |
2409 | !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && | 2515 | !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && |
2410 | (tp->nvram_jedecnum == JEDEC_ATMEL)) | 2516 | (tp->nvram_jedecnum == JEDEC_ATMEL)) |
2411 | 2517 | ||
2412 | addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * | 2518 | addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * |
@@ -2426,7 +2532,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | |||
2426 | { | 2532 | { |
2427 | int ret; | 2533 | int ret; |
2428 | 2534 | ||
2429 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) | 2535 | if (!tg3_flag(tp, NVRAM)) |
2430 | return tg3_nvram_read_using_eeprom(tp, offset, val); | 2536 | return tg3_nvram_read_using_eeprom(tp, offset, val); |
2431 | 2537 | ||
2432 | offset = tg3_nvram_phys_addr(tp, offset); | 2538 | offset = tg3_nvram_phys_addr(tp, offset); |
@@ -2501,42 +2607,38 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) | |||
2501 | tw32(MAC_TX_BACKOFF_SEED, addr_high); | 2607 | tw32(MAC_TX_BACKOFF_SEED, addr_high); |
2502 | } | 2608 | } |
2503 | 2609 | ||
2504 | static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | 2610 | static void tg3_enable_register_access(struct tg3 *tp) |
2505 | { | 2611 | { |
2506 | u32 misc_host_ctrl; | 2612 | /* |
2507 | bool device_should_wake, do_low_power; | 2613 | * Make sure register accesses (indirect or otherwise) will function |
2508 | 2614 | * correctly. | |
2509 | /* Make sure register accesses (indirect or otherwise) | ||
2510 | * will function correctly. | ||
2511 | */ | 2615 | */ |
2512 | pci_write_config_dword(tp->pdev, | 2616 | pci_write_config_dword(tp->pdev, |
2513 | TG3PCI_MISC_HOST_CTRL, | 2617 | TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); |
2514 | tp->misc_host_ctrl); | 2618 | } |
2515 | 2619 | ||
2516 | switch (state) { | 2620 | static int tg3_power_up(struct tg3 *tp) |
2517 | case PCI_D0: | 2621 | { |
2518 | pci_enable_wake(tp->pdev, state, false); | 2622 | tg3_enable_register_access(tp); |
2519 | pci_set_power_state(tp->pdev, PCI_D0); | ||
2520 | 2623 | ||
2521 | /* Switch out of Vaux if it is a NIC */ | 2624 | pci_set_power_state(tp->pdev, PCI_D0); |
2522 | if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) | ||
2523 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); | ||
2524 | 2625 | ||
2525 | return 0; | 2626 | /* Switch out of Vaux if it is a NIC */ |
2627 | if (tg3_flag(tp, IS_NIC)) | ||
2628 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); | ||
2526 | 2629 | ||
2527 | case PCI_D1: | 2630 | return 0; |
2528 | case PCI_D2: | 2631 | } |
2529 | case PCI_D3hot: | ||
2530 | break; | ||
2531 | 2632 | ||
2532 | default: | 2633 | static int tg3_power_down_prepare(struct tg3 *tp) |
2533 | netdev_err(tp->dev, "Invalid power state (D%d) requested\n", | 2634 | { |
2534 | state); | 2635 | u32 misc_host_ctrl; |
2535 | return -EINVAL; | 2636 | bool device_should_wake, do_low_power; |
2536 | } | 2637 | |
2638 | tg3_enable_register_access(tp); | ||
2537 | 2639 | ||
2538 | /* Restore the CLKREQ setting. */ | 2640 | /* Restore the CLKREQ setting. */ |
2539 | if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { | 2641 | if (tg3_flag(tp, CLKREQ_BUG)) { |
2540 | u16 lnkctl; | 2642 | u16 lnkctl; |
2541 | 2643 | ||
2542 | pci_read_config_word(tp->pdev, | 2644 | pci_read_config_word(tp->pdev, |
@@ -2552,11 +2654,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2552 | tw32(TG3PCI_MISC_HOST_CTRL, | 2654 | tw32(TG3PCI_MISC_HOST_CTRL, |
2553 | misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); | 2655 | misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); |
2554 | 2656 | ||
2555 | device_should_wake = pci_pme_capable(tp->pdev, state) && | 2657 | device_should_wake = device_may_wakeup(&tp->pdev->dev) && |
2556 | device_may_wakeup(&tp->pdev->dev) && | 2658 | tg3_flag(tp, WOL_ENABLE); |
2557 | (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); | ||
2558 | 2659 | ||
2559 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 2660 | if (tg3_flag(tp, USE_PHYLIB)) { |
2560 | do_low_power = false; | 2661 | do_low_power = false; |
2561 | if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && | 2662 | if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && |
2562 | !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { | 2663 | !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { |
@@ -2577,9 +2678,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2577 | ADVERTISED_Autoneg | | 2678 | ADVERTISED_Autoneg | |
2578 | ADVERTISED_10baseT_Half; | 2679 | ADVERTISED_10baseT_Half; |
2579 | 2680 | ||
2580 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 2681 | if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { |
2581 | device_should_wake) { | 2682 | if (tg3_flag(tp, WOL_SPEED_100MB)) |
2582 | if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) | ||
2583 | advertising |= | 2683 | advertising |= |
2584 | ADVERTISED_100baseT_Half | | 2684 | ADVERTISED_100baseT_Half | |
2585 | ADVERTISED_100baseT_Full | | 2685 | ADVERTISED_100baseT_Full | |
@@ -2624,7 +2724,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2624 | 2724 | ||
2625 | val = tr32(GRC_VCPU_EXT_CTRL); | 2725 | val = tr32(GRC_VCPU_EXT_CTRL); |
2626 | tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); | 2726 | tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); |
2627 | } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 2727 | } else if (!tg3_flag(tp, ENABLE_ASF)) { |
2628 | int i; | 2728 | int i; |
2629 | u32 val; | 2729 | u32 val; |
2630 | 2730 | ||
@@ -2635,7 +2735,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2635 | msleep(1); | 2735 | msleep(1); |
2636 | } | 2736 | } |
2637 | } | 2737 | } |
2638 | if (tp->tg3_flags & TG3_FLAG_WOL_CAP) | 2738 | if (tg3_flag(tp, WOL_CAP)) |
2639 | tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | | 2739 | tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | |
2640 | WOL_DRV_STATE_SHUTDOWN | | 2740 | WOL_DRV_STATE_SHUTDOWN | |
2641 | WOL_DRV_WOL | | 2741 | WOL_DRV_WOL | |
@@ -2645,8 +2745,13 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2645 | u32 mac_mode; | 2745 | u32 mac_mode; |
2646 | 2746 | ||
2647 | if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { | 2747 | if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { |
2648 | if (do_low_power) { | 2748 | if (do_low_power && |
2649 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); | 2749 | !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { |
2750 | tg3_phy_auxctl_write(tp, | ||
2751 | MII_TG3_AUXCTL_SHDWSEL_PWRCTL, | ||
2752 | MII_TG3_AUXCTL_PCTL_WOL_EN | | ||
2753 | MII_TG3_AUXCTL_PCTL_100TX_LPWR | | ||
2754 | MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); | ||
2650 | udelay(40); | 2755 | udelay(40); |
2651 | } | 2756 | } |
2652 | 2757 | ||
@@ -2658,8 +2763,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2658 | mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; | 2763 | mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; |
2659 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 2764 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
2660 | ASIC_REV_5700) { | 2765 | ASIC_REV_5700) { |
2661 | u32 speed = (tp->tg3_flags & | 2766 | u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? |
2662 | TG3_FLAG_WOL_SPEED_100MB) ? | ||
2663 | SPEED_100 : SPEED_10; | 2767 | SPEED_100 : SPEED_10; |
2664 | if (tg3_5700_link_polarity(tp, speed)) | 2768 | if (tg3_5700_link_polarity(tp, speed)) |
2665 | mac_mode |= MAC_MODE_LINK_POLARITY; | 2769 | mac_mode |= MAC_MODE_LINK_POLARITY; |
@@ -2670,22 +2774,18 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2670 | mac_mode = MAC_MODE_PORT_MODE_TBI; | 2774 | mac_mode = MAC_MODE_PORT_MODE_TBI; |
2671 | } | 2775 | } |
2672 | 2776 | ||
2673 | if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 2777 | if (!tg3_flag(tp, 5750_PLUS)) |
2674 | tw32(MAC_LED_CTRL, tp->led_ctrl); | 2778 | tw32(MAC_LED_CTRL, tp->led_ctrl); |
2675 | 2779 | ||
2676 | mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; | 2780 | mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; |
2677 | if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 2781 | if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && |
2678 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) && | 2782 | (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) |
2679 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | ||
2680 | (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) | ||
2681 | mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; | 2783 | mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; |
2682 | 2784 | ||
2683 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 2785 | if (tg3_flag(tp, ENABLE_APE)) |
2684 | mac_mode |= tp->mac_mode & | 2786 | mac_mode |= MAC_MODE_APE_TX_EN | |
2685 | (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); | 2787 | MAC_MODE_APE_RX_EN | |
2686 | if (mac_mode & MAC_MODE_APE_TX_EN) | 2788 | MAC_MODE_TDE_ENABLE; |
2687 | mac_mode |= MAC_MODE_TDE_ENABLE; | ||
2688 | } | ||
2689 | 2789 | ||
2690 | tw32_f(MAC_MODE, mac_mode); | 2790 | tw32_f(MAC_MODE, mac_mode); |
2691 | udelay(100); | 2791 | udelay(100); |
@@ -2694,7 +2794,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2694 | udelay(10); | 2794 | udelay(10); |
2695 | } | 2795 | } |
2696 | 2796 | ||
2697 | if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && | 2797 | if (!tg3_flag(tp, WOL_SPEED_100MB) && |
2698 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 2798 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
2699 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { | 2799 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { |
2700 | u32 base_val; | 2800 | u32 base_val; |
@@ -2705,12 +2805,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2705 | 2805 | ||
2706 | tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | | 2806 | tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | |
2707 | CLOCK_CTRL_PWRDOWN_PLL133, 40); | 2807 | CLOCK_CTRL_PWRDOWN_PLL133, 40); |
2708 | } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 2808 | } else if (tg3_flag(tp, 5780_CLASS) || |
2709 | (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || | 2809 | tg3_flag(tp, CPMU_PRESENT) || |
2710 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { | 2810 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
2711 | /* do nothing */ | 2811 | /* do nothing */ |
2712 | } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 2812 | } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { |
2713 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { | ||
2714 | u32 newbits1, newbits2; | 2813 | u32 newbits1, newbits2; |
2715 | 2814 | ||
2716 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 2815 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
@@ -2719,7 +2818,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2719 | CLOCK_CTRL_TXCLK_DISABLE | | 2818 | CLOCK_CTRL_TXCLK_DISABLE | |
2720 | CLOCK_CTRL_ALTCLK); | 2819 | CLOCK_CTRL_ALTCLK); |
2721 | newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; | 2820 | newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; |
2722 | } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 2821 | } else if (tg3_flag(tp, 5705_PLUS)) { |
2723 | newbits1 = CLOCK_CTRL_625_CORE; | 2822 | newbits1 = CLOCK_CTRL_625_CORE; |
2724 | newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; | 2823 | newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; |
2725 | } else { | 2824 | } else { |
@@ -2733,7 +2832,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2733 | tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, | 2832 | tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, |
2734 | 40); | 2833 | 40); |
2735 | 2834 | ||
2736 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 2835 | if (!tg3_flag(tp, 5705_PLUS)) { |
2737 | u32 newbits3; | 2836 | u32 newbits3; |
2738 | 2837 | ||
2739 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 2838 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
@@ -2750,8 +2849,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2750 | } | 2849 | } |
2751 | } | 2850 | } |
2752 | 2851 | ||
2753 | if (!(device_should_wake) && | 2852 | if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) |
2754 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | ||
2755 | tg3_power_down_phy(tp, do_low_power); | 2853 | tg3_power_down_phy(tp, do_low_power); |
2756 | 2854 | ||
2757 | tg3_frob_aux_power(tp); | 2855 | tg3_frob_aux_power(tp); |
@@ -2763,7 +2861,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2763 | 2861 | ||
2764 | val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); | 2862 | val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); |
2765 | tw32(0x7d00, val); | 2863 | tw32(0x7d00, val); |
2766 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 2864 | if (!tg3_flag(tp, ENABLE_ASF)) { |
2767 | int err; | 2865 | int err; |
2768 | 2866 | ||
2769 | err = tg3_nvram_lock(tp); | 2867 | err = tg3_nvram_lock(tp); |
@@ -2775,13 +2873,15 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2775 | 2873 | ||
2776 | tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); | 2874 | tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); |
2777 | 2875 | ||
2778 | if (device_should_wake) | 2876 | return 0; |
2779 | pci_enable_wake(tp->pdev, state, true); | 2877 | } |
2780 | 2878 | ||
2781 | /* Finally, set the new power state. */ | 2879 | static void tg3_power_down(struct tg3 *tp) |
2782 | pci_set_power_state(tp->pdev, state); | 2880 | { |
2881 | tg3_power_down_prepare(tp); | ||
2783 | 2882 | ||
2784 | return 0; | 2883 | pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); |
2884 | pci_set_power_state(tp->pdev, PCI_D3hot); | ||
2785 | } | 2885 | } |
2786 | 2886 | ||
2787 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) | 2887 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) |
@@ -2831,93 +2931,130 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 | |||
2831 | } | 2931 | } |
2832 | } | 2932 | } |
2833 | 2933 | ||
2934 | static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) | ||
2935 | { | ||
2936 | int err = 0; | ||
2937 | u32 val, new_adv; | ||
2938 | |||
2939 | new_adv = ADVERTISE_CSMA; | ||
2940 | if (advertise & ADVERTISED_10baseT_Half) | ||
2941 | new_adv |= ADVERTISE_10HALF; | ||
2942 | if (advertise & ADVERTISED_10baseT_Full) | ||
2943 | new_adv |= ADVERTISE_10FULL; | ||
2944 | if (advertise & ADVERTISED_100baseT_Half) | ||
2945 | new_adv |= ADVERTISE_100HALF; | ||
2946 | if (advertise & ADVERTISED_100baseT_Full) | ||
2947 | new_adv |= ADVERTISE_100FULL; | ||
2948 | |||
2949 | new_adv |= tg3_advert_flowctrl_1000T(flowctrl); | ||
2950 | |||
2951 | err = tg3_writephy(tp, MII_ADVERTISE, new_adv); | ||
2952 | if (err) | ||
2953 | goto done; | ||
2954 | |||
2955 | if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) | ||
2956 | goto done; | ||
2957 | |||
2958 | new_adv = 0; | ||
2959 | if (advertise & ADVERTISED_1000baseT_Half) | ||
2960 | new_adv |= MII_TG3_CTRL_ADV_1000_HALF; | ||
2961 | if (advertise & ADVERTISED_1000baseT_Full) | ||
2962 | new_adv |= MII_TG3_CTRL_ADV_1000_FULL; | ||
2963 | |||
2964 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | ||
2965 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) | ||
2966 | new_adv |= (MII_TG3_CTRL_AS_MASTER | | ||
2967 | MII_TG3_CTRL_ENABLE_AS_MASTER); | ||
2968 | |||
2969 | err = tg3_writephy(tp, MII_TG3_CTRL, new_adv); | ||
2970 | if (err) | ||
2971 | goto done; | ||
2972 | |||
2973 | if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) | ||
2974 | goto done; | ||
2975 | |||
2976 | tw32(TG3_CPMU_EEE_MODE, | ||
2977 | tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); | ||
2978 | |||
2979 | err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); | ||
2980 | if (!err) { | ||
2981 | u32 err2; | ||
2982 | |||
2983 | switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { | ||
2984 | case ASIC_REV_5717: | ||
2985 | case ASIC_REV_57765: | ||
2986 | if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) | ||
2987 | tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | | ||
2988 | MII_TG3_DSP_CH34TP2_HIBW01); | ||
2989 | /* Fall through */ | ||
2990 | case ASIC_REV_5719: | ||
2991 | val = MII_TG3_DSP_TAP26_ALNOKO | | ||
2992 | MII_TG3_DSP_TAP26_RMRXSTO | | ||
2993 | MII_TG3_DSP_TAP26_OPCSINPT; | ||
2994 | tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); | ||
2995 | } | ||
2996 | |||
2997 | val = 0; | ||
2998 | /* Advertise 100-BaseTX EEE ability */ | ||
2999 | if (advertise & ADVERTISED_100baseT_Full) | ||
3000 | val |= MDIO_AN_EEE_ADV_100TX; | ||
3001 | /* Advertise 1000-BaseT EEE ability */ | ||
3002 | if (advertise & ADVERTISED_1000baseT_Full) | ||
3003 | val |= MDIO_AN_EEE_ADV_1000T; | ||
3004 | err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); | ||
3005 | |||
3006 | err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); | ||
3007 | if (!err) | ||
3008 | err = err2; | ||
3009 | } | ||
3010 | |||
3011 | done: | ||
3012 | return err; | ||
3013 | } | ||
3014 | |||
2834 | static void tg3_phy_copper_begin(struct tg3 *tp) | 3015 | static void tg3_phy_copper_begin(struct tg3 *tp) |
2835 | { | 3016 | { |
2836 | u32 new_adv; | 3017 | u32 new_adv; |
2837 | int i; | 3018 | int i; |
2838 | 3019 | ||
2839 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { | 3020 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { |
2840 | /* Entering low power mode. Disable gigabit and | 3021 | new_adv = ADVERTISED_10baseT_Half | |
2841 | * 100baseT advertisements. | 3022 | ADVERTISED_10baseT_Full; |
2842 | */ | 3023 | if (tg3_flag(tp, WOL_SPEED_100MB)) |
2843 | tg3_writephy(tp, MII_TG3_CTRL, 0); | 3024 | new_adv |= ADVERTISED_100baseT_Half | |
2844 | 3025 | ADVERTISED_100baseT_Full; | |
2845 | new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | | 3026 | |
2846 | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); | 3027 | tg3_phy_autoneg_cfg(tp, new_adv, |
2847 | if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) | 3028 | FLOW_CTRL_TX | FLOW_CTRL_RX); |
2848 | new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); | ||
2849 | |||
2850 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | ||
2851 | } else if (tp->link_config.speed == SPEED_INVALID) { | 3029 | } else if (tp->link_config.speed == SPEED_INVALID) { |
2852 | if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) | 3030 | if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) |
2853 | tp->link_config.advertising &= | 3031 | tp->link_config.advertising &= |
2854 | ~(ADVERTISED_1000baseT_Half | | 3032 | ~(ADVERTISED_1000baseT_Half | |
2855 | ADVERTISED_1000baseT_Full); | 3033 | ADVERTISED_1000baseT_Full); |
2856 | 3034 | ||
2857 | new_adv = ADVERTISE_CSMA; | 3035 | tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, |
2858 | if (tp->link_config.advertising & ADVERTISED_10baseT_Half) | 3036 | tp->link_config.flowctrl); |
2859 | new_adv |= ADVERTISE_10HALF; | ||
2860 | if (tp->link_config.advertising & ADVERTISED_10baseT_Full) | ||
2861 | new_adv |= ADVERTISE_10FULL; | ||
2862 | if (tp->link_config.advertising & ADVERTISED_100baseT_Half) | ||
2863 | new_adv |= ADVERTISE_100HALF; | ||
2864 | if (tp->link_config.advertising & ADVERTISED_100baseT_Full) | ||
2865 | new_adv |= ADVERTISE_100FULL; | ||
2866 | |||
2867 | new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); | ||
2868 | |||
2869 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | ||
2870 | |||
2871 | if (tp->link_config.advertising & | ||
2872 | (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { | ||
2873 | new_adv = 0; | ||
2874 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) | ||
2875 | new_adv |= MII_TG3_CTRL_ADV_1000_HALF; | ||
2876 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) | ||
2877 | new_adv |= MII_TG3_CTRL_ADV_1000_FULL; | ||
2878 | if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) && | ||
2879 | (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | ||
2880 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) | ||
2881 | new_adv |= (MII_TG3_CTRL_AS_MASTER | | ||
2882 | MII_TG3_CTRL_ENABLE_AS_MASTER); | ||
2883 | tg3_writephy(tp, MII_TG3_CTRL, new_adv); | ||
2884 | } else { | ||
2885 | tg3_writephy(tp, MII_TG3_CTRL, 0); | ||
2886 | } | ||
2887 | } else { | 3037 | } else { |
2888 | new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); | ||
2889 | new_adv |= ADVERTISE_CSMA; | ||
2890 | |||
2891 | /* Asking for a specific link mode. */ | 3038 | /* Asking for a specific link mode. */ |
2892 | if (tp->link_config.speed == SPEED_1000) { | 3039 | if (tp->link_config.speed == SPEED_1000) { |
2893 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | ||
2894 | |||
2895 | if (tp->link_config.duplex == DUPLEX_FULL) | 3040 | if (tp->link_config.duplex == DUPLEX_FULL) |
2896 | new_adv = MII_TG3_CTRL_ADV_1000_FULL; | 3041 | new_adv = ADVERTISED_1000baseT_Full; |
3042 | else | ||
3043 | new_adv = ADVERTISED_1000baseT_Half; | ||
3044 | } else if (tp->link_config.speed == SPEED_100) { | ||
3045 | if (tp->link_config.duplex == DUPLEX_FULL) | ||
3046 | new_adv = ADVERTISED_100baseT_Full; | ||
2897 | else | 3047 | else |
2898 | new_adv = MII_TG3_CTRL_ADV_1000_HALF; | 3048 | new_adv = ADVERTISED_100baseT_Half; |
2899 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | ||
2900 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) | ||
2901 | new_adv |= (MII_TG3_CTRL_AS_MASTER | | ||
2902 | MII_TG3_CTRL_ENABLE_AS_MASTER); | ||
2903 | } else { | 3049 | } else { |
2904 | if (tp->link_config.speed == SPEED_100) { | 3050 | if (tp->link_config.duplex == DUPLEX_FULL) |
2905 | if (tp->link_config.duplex == DUPLEX_FULL) | 3051 | new_adv = ADVERTISED_10baseT_Full; |
2906 | new_adv |= ADVERTISE_100FULL; | 3052 | else |
2907 | else | 3053 | new_adv = ADVERTISED_10baseT_Half; |
2908 | new_adv |= ADVERTISE_100HALF; | ||
2909 | } else { | ||
2910 | if (tp->link_config.duplex == DUPLEX_FULL) | ||
2911 | new_adv |= ADVERTISE_10FULL; | ||
2912 | else | ||
2913 | new_adv |= ADVERTISE_10HALF; | ||
2914 | } | ||
2915 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | ||
2916 | |||
2917 | new_adv = 0; | ||
2918 | } | 3054 | } |
2919 | 3055 | ||
2920 | tg3_writephy(tp, MII_TG3_CTRL, new_adv); | 3056 | tg3_phy_autoneg_cfg(tp, new_adv, |
3057 | tp->link_config.flowctrl); | ||
2921 | } | 3058 | } |
2922 | 3059 | ||
2923 | if (tp->link_config.autoneg == AUTONEG_DISABLE && | 3060 | if (tp->link_config.autoneg == AUTONEG_DISABLE && |
@@ -2975,7 +3112,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) | |||
2975 | 3112 | ||
2976 | /* Turn off tap power management. */ | 3113 | /* Turn off tap power management. */ |
2977 | /* Set Extended packet length bit */ | 3114 | /* Set Extended packet length bit */ |
2978 | err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); | 3115 | err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); |
2979 | 3116 | ||
2980 | err |= tg3_phydsp_write(tp, 0x0012, 0x1804); | 3117 | err |= tg3_phydsp_write(tp, 0x0012, 0x1804); |
2981 | err |= tg3_phydsp_write(tp, 0x0013, 0x1204); | 3118 | err |= tg3_phydsp_write(tp, 0x0013, 0x1204); |
@@ -3038,7 +3175,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) | |||
3038 | if (curadv != reqadv) | 3175 | if (curadv != reqadv) |
3039 | return 0; | 3176 | return 0; |
3040 | 3177 | ||
3041 | if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) | 3178 | if (tg3_flag(tp, PAUSE_AUTONEG)) |
3042 | tg3_readphy(tp, MII_LPA, rmtadv); | 3179 | tg3_readphy(tp, MII_LPA, rmtadv); |
3043 | } else { | 3180 | } else { |
3044 | /* Reprogram the advertisement register, even if it | 3181 | /* Reprogram the advertisement register, even if it |
@@ -3060,7 +3197,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) | |||
3060 | static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | 3197 | static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) |
3061 | { | 3198 | { |
3062 | int current_link_up; | 3199 | int current_link_up; |
3063 | u32 bmsr, dummy; | 3200 | u32 bmsr, val; |
3064 | u32 lcl_adv, rmt_adv; | 3201 | u32 lcl_adv, rmt_adv; |
3065 | u16 current_speed; | 3202 | u16 current_speed; |
3066 | u8 current_duplex; | 3203 | u8 current_duplex; |
@@ -3081,7 +3218,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | |||
3081 | udelay(80); | 3218 | udelay(80); |
3082 | } | 3219 | } |
3083 | 3220 | ||
3084 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); | 3221 | tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); |
3085 | 3222 | ||
3086 | /* Some third-party PHYs need to be reset on link going | 3223 | /* Some third-party PHYs need to be reset on link going |
3087 | * down. | 3224 | * down. |
@@ -3101,7 +3238,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | |||
3101 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { | 3238 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { |
3102 | tg3_readphy(tp, MII_BMSR, &bmsr); | 3239 | tg3_readphy(tp, MII_BMSR, &bmsr); |
3103 | if (tg3_readphy(tp, MII_BMSR, &bmsr) || | 3240 | if (tg3_readphy(tp, MII_BMSR, &bmsr) || |
3104 | !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) | 3241 | !tg3_flag(tp, INIT_COMPLETE)) |
3105 | bmsr = 0; | 3242 | bmsr = 0; |
3106 | 3243 | ||
3107 | if (!(bmsr & BMSR_LSTATUS)) { | 3244 | if (!(bmsr & BMSR_LSTATUS)) { |
@@ -3140,8 +3277,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | |||
3140 | } | 3277 | } |
3141 | 3278 | ||
3142 | /* Clear pending interrupts... */ | 3279 | /* Clear pending interrupts... */ |
3143 | tg3_readphy(tp, MII_TG3_ISTAT, &dummy); | 3280 | tg3_readphy(tp, MII_TG3_ISTAT, &val); |
3144 | tg3_readphy(tp, MII_TG3_ISTAT, &dummy); | 3281 | tg3_readphy(tp, MII_TG3_ISTAT, &val); |
3145 | 3282 | ||
3146 | if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) | 3283 | if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) |
3147 | tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); | 3284 | tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); |
@@ -3162,13 +3299,13 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | |||
3162 | current_duplex = DUPLEX_INVALID; | 3299 | current_duplex = DUPLEX_INVALID; |
3163 | 3300 | ||
3164 | if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { | 3301 | if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { |
3165 | u32 val; | 3302 | err = tg3_phy_auxctl_read(tp, |
3166 | 3303 | MII_TG3_AUXCTL_SHDWSEL_MISCTEST, | |
3167 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); | 3304 | &val); |
3168 | tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); | 3305 | if (!err && !(val & (1 << 10))) { |
3169 | if (!(val & (1 << 10))) { | 3306 | tg3_phy_auxctl_write(tp, |
3170 | val |= (1 << 10); | 3307 | MII_TG3_AUXCTL_SHDWSEL_MISCTEST, |
3171 | tg3_writephy(tp, MII_TG3_AUX_CTRL, val); | 3308 | val | (1 << 10)); |
3172 | goto relink; | 3309 | goto relink; |
3173 | } | 3310 | } |
3174 | } | 3311 | } |
@@ -3238,13 +3375,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | |||
3238 | 3375 | ||
3239 | relink: | 3376 | relink: |
3240 | if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { | 3377 | if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { |
3241 | u32 tmp; | ||
3242 | |||
3243 | tg3_phy_copper_begin(tp); | 3378 | tg3_phy_copper_begin(tp); |
3244 | 3379 | ||
3245 | tg3_readphy(tp, MII_BMSR, &tmp); | 3380 | tg3_readphy(tp, MII_BMSR, &bmsr); |
3246 | if (!tg3_readphy(tp, MII_BMSR, &tmp) && | 3381 | if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || |
3247 | (tmp & BMSR_LSTATUS)) | 3382 | (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) |
3248 | current_link_up = 1; | 3383 | current_link_up = 1; |
3249 | } | 3384 | } |
3250 | 3385 | ||
@@ -3285,7 +3420,9 @@ relink: | |||
3285 | tw32_f(MAC_MODE, tp->mac_mode); | 3420 | tw32_f(MAC_MODE, tp->mac_mode); |
3286 | udelay(40); | 3421 | udelay(40); |
3287 | 3422 | ||
3288 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { | 3423 | tg3_phy_eee_adjust(tp, current_link_up); |
3424 | |||
3425 | if (tg3_flag(tp, USE_LINKCHG_REG)) { | ||
3289 | /* Polled via timer. */ | 3426 | /* Polled via timer. */ |
3290 | tw32_f(MAC_EVENT, 0); | 3427 | tw32_f(MAC_EVENT, 0); |
3291 | } else { | 3428 | } else { |
@@ -3296,8 +3433,7 @@ relink: | |||
3296 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && | 3433 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && |
3297 | current_link_up == 1 && | 3434 | current_link_up == 1 && |
3298 | tp->link_config.active_speed == SPEED_1000 && | 3435 | tp->link_config.active_speed == SPEED_1000 && |
3299 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || | 3436 | (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { |
3300 | (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { | ||
3301 | udelay(120); | 3437 | udelay(120); |
3302 | tw32_f(MAC_STATUS, | 3438 | tw32_f(MAC_STATUS, |
3303 | (MAC_STATUS_SYNC_CHANGED | | 3439 | (MAC_STATUS_SYNC_CHANGED | |
@@ -3309,7 +3445,7 @@ relink: | |||
3309 | } | 3445 | } |
3310 | 3446 | ||
3311 | /* Prevent send BD corruption. */ | 3447 | /* Prevent send BD corruption. */ |
3312 | if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { | 3448 | if (tg3_flag(tp, CLKREQ_BUG)) { |
3313 | u16 oldlnkctl, newlnkctl; | 3449 | u16 oldlnkctl, newlnkctl; |
3314 | 3450 | ||
3315 | pci_read_config_word(tp->pdev, | 3451 | pci_read_config_word(tp->pdev, |
@@ -3704,7 +3840,7 @@ static void tg3_init_bcm8002(struct tg3 *tp) | |||
3704 | int i; | 3840 | int i; |
3705 | 3841 | ||
3706 | /* Reset when initting first time or we have a link. */ | 3842 | /* Reset when initting first time or we have a link. */ |
3707 | if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && | 3843 | if (tg3_flag(tp, INIT_COMPLETE) && |
3708 | !(mac_status & MAC_STATUS_PCS_SYNCED)) | 3844 | !(mac_status & MAC_STATUS_PCS_SYNCED)) |
3709 | return; | 3845 | return; |
3710 | 3846 | ||
@@ -3965,9 +4101,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) | |||
3965 | orig_active_speed = tp->link_config.active_speed; | 4101 | orig_active_speed = tp->link_config.active_speed; |
3966 | orig_active_duplex = tp->link_config.active_duplex; | 4102 | orig_active_duplex = tp->link_config.active_duplex; |
3967 | 4103 | ||
3968 | if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && | 4104 | if (!tg3_flag(tp, HW_AUTONEG) && |
3969 | netif_carrier_ok(tp->dev) && | 4105 | netif_carrier_ok(tp->dev) && |
3970 | (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { | 4106 | tg3_flag(tp, INIT_COMPLETE)) { |
3971 | mac_status = tr32(MAC_STATUS); | 4107 | mac_status = tr32(MAC_STATUS); |
3972 | mac_status &= (MAC_STATUS_PCS_SYNCED | | 4108 | mac_status &= (MAC_STATUS_PCS_SYNCED | |
3973 | MAC_STATUS_SIGNAL_DET | | 4109 | MAC_STATUS_SIGNAL_DET | |
@@ -3998,7 +4134,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) | |||
3998 | current_link_up = 0; | 4134 | current_link_up = 0; |
3999 | mac_status = tr32(MAC_STATUS); | 4135 | mac_status = tr32(MAC_STATUS); |
4000 | 4136 | ||
4001 | if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) | 4137 | if (tg3_flag(tp, HW_AUTONEG)) |
4002 | current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); | 4138 | current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); |
4003 | else | 4139 | else |
4004 | current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); | 4140 | current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); |
@@ -4197,7 +4333,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) | |||
4197 | current_duplex = DUPLEX_FULL; | 4333 | current_duplex = DUPLEX_FULL; |
4198 | else | 4334 | else |
4199 | current_duplex = DUPLEX_HALF; | 4335 | current_duplex = DUPLEX_HALF; |
4200 | } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 4336 | } else if (!tg3_flag(tp, 5780_CLASS)) { |
4201 | /* Link is up via parallel detect */ | 4337 | /* Link is up via parallel detect */ |
4202 | } else { | 4338 | } else { |
4203 | current_link_up = 0; | 4339 | current_link_up = 0; |
@@ -4294,6 +4430,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) | |||
4294 | 4430 | ||
4295 | static int tg3_setup_phy(struct tg3 *tp, int force_reset) | 4431 | static int tg3_setup_phy(struct tg3 *tp, int force_reset) |
4296 | { | 4432 | { |
4433 | u32 val; | ||
4297 | int err; | 4434 | int err; |
4298 | 4435 | ||
4299 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 4436 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
@@ -4304,7 +4441,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) | |||
4304 | err = tg3_setup_copper_phy(tp, force_reset); | 4441 | err = tg3_setup_copper_phy(tp, force_reset); |
4305 | 4442 | ||
4306 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { | 4443 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { |
4307 | u32 val, scale; | 4444 | u32 scale; |
4308 | 4445 | ||
4309 | val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; | 4446 | val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; |
4310 | if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) | 4447 | if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) |
@@ -4319,19 +4456,22 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) | |||
4319 | tw32(GRC_MISC_CFG, val); | 4456 | tw32(GRC_MISC_CFG, val); |
4320 | } | 4457 | } |
4321 | 4458 | ||
4459 | val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | | ||
4460 | (6 << TX_LENGTHS_IPG_SHIFT); | ||
4461 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) | ||
4462 | val |= tr32(MAC_TX_LENGTHS) & | ||
4463 | (TX_LENGTHS_JMB_FRM_LEN_MSK | | ||
4464 | TX_LENGTHS_CNT_DWN_VAL_MSK); | ||
4465 | |||
4322 | if (tp->link_config.active_speed == SPEED_1000 && | 4466 | if (tp->link_config.active_speed == SPEED_1000 && |
4323 | tp->link_config.active_duplex == DUPLEX_HALF) | 4467 | tp->link_config.active_duplex == DUPLEX_HALF) |
4324 | tw32(MAC_TX_LENGTHS, | 4468 | tw32(MAC_TX_LENGTHS, val | |
4325 | ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 4469 | (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); |
4326 | (6 << TX_LENGTHS_IPG_SHIFT) | | ||
4327 | (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); | ||
4328 | else | 4470 | else |
4329 | tw32(MAC_TX_LENGTHS, | 4471 | tw32(MAC_TX_LENGTHS, val | |
4330 | ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 4472 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); |
4331 | (6 << TX_LENGTHS_IPG_SHIFT) | | ||
4332 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); | ||
4333 | 4473 | ||
4334 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 4474 | if (!tg3_flag(tp, 5705_PLUS)) { |
4335 | if (netif_carrier_ok(tp->dev)) { | 4475 | if (netif_carrier_ok(tp->dev)) { |
4336 | tw32(HOSTCC_STAT_COAL_TICKS, | 4476 | tw32(HOSTCC_STAT_COAL_TICKS, |
4337 | tp->coal.stats_block_coalesce_usecs); | 4477 | tp->coal.stats_block_coalesce_usecs); |
@@ -4340,8 +4480,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) | |||
4340 | } | 4480 | } |
4341 | } | 4481 | } |
4342 | 4482 | ||
4343 | if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { | 4483 | if (tg3_flag(tp, ASPM_WORKAROUND)) { |
4344 | u32 val = tr32(PCIE_PWR_MGMT_THRESH); | 4484 | val = tr32(PCIE_PWR_MGMT_THRESH); |
4345 | if (!netif_carrier_ok(tp->dev)) | 4485 | if (!netif_carrier_ok(tp->dev)) |
4346 | val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | | 4486 | val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | |
4347 | tp->pwrmgmt_thresh; | 4487 | tp->pwrmgmt_thresh; |
@@ -4353,6 +4493,128 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) | |||
4353 | return err; | 4493 | return err; |
4354 | } | 4494 | } |
4355 | 4495 | ||
4496 | static inline int tg3_irq_sync(struct tg3 *tp) | ||
4497 | { | ||
4498 | return tp->irq_sync; | ||
4499 | } | ||
4500 | |||
4501 | static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) | ||
4502 | { | ||
4503 | int i; | ||
4504 | |||
4505 | dst = (u32 *)((u8 *)dst + off); | ||
4506 | for (i = 0; i < len; i += sizeof(u32)) | ||
4507 | *dst++ = tr32(off + i); | ||
4508 | } | ||
4509 | |||
4510 | static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) | ||
4511 | { | ||
4512 | tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); | ||
4513 | tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); | ||
4514 | tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); | ||
4515 | tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); | ||
4516 | tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); | ||
4517 | tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); | ||
4518 | tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); | ||
4519 | tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); | ||
4520 | tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); | ||
4521 | tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); | ||
4522 | tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); | ||
4523 | tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); | ||
4524 | tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); | ||
4525 | tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); | ||
4526 | tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); | ||
4527 | tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); | ||
4528 | tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); | ||
4529 | tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); | ||
4530 | tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); | ||
4531 | |||
4532 | if (tg3_flag(tp, SUPPORT_MSIX)) | ||
4533 | tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); | ||
4534 | |||
4535 | tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); | ||
4536 | tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); | ||
4537 | tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); | ||
4538 | tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); | ||
4539 | tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); | ||
4540 | tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); | ||
4541 | tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); | ||
4542 | tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); | ||
4543 | |||
4544 | if (!tg3_flag(tp, 5705_PLUS)) { | ||
4545 | tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); | ||
4546 | tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); | ||
4547 | tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); | ||
4548 | } | ||
4549 | |||
4550 | tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); | ||
4551 | tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); | ||
4552 | tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); | ||
4553 | tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); | ||
4554 | tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); | ||
4555 | |||
4556 | if (tg3_flag(tp, NVRAM)) | ||
4557 | tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); | ||
4558 | } | ||
4559 | |||
4560 | static void tg3_dump_state(struct tg3 *tp) | ||
4561 | { | ||
4562 | int i; | ||
4563 | u32 *regs; | ||
4564 | |||
4565 | regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); | ||
4566 | if (!regs) { | ||
4567 | netdev_err(tp->dev, "Failed allocating register dump buffer\n"); | ||
4568 | return; | ||
4569 | } | ||
4570 | |||
4571 | if (tg3_flag(tp, PCI_EXPRESS)) { | ||
4572 | /* Read up to but not including private PCI registers */ | ||
4573 | for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) | ||
4574 | regs[i / sizeof(u32)] = tr32(i); | ||
4575 | } else | ||
4576 | tg3_dump_legacy_regs(tp, regs); | ||
4577 | |||
4578 | for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { | ||
4579 | if (!regs[i + 0] && !regs[i + 1] && | ||
4580 | !regs[i + 2] && !regs[i + 3]) | ||
4581 | continue; | ||
4582 | |||
4583 | netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", | ||
4584 | i * 4, | ||
4585 | regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); | ||
4586 | } | ||
4587 | |||
4588 | kfree(regs); | ||
4589 | |||
4590 | for (i = 0; i < tp->irq_cnt; i++) { | ||
4591 | struct tg3_napi *tnapi = &tp->napi[i]; | ||
4592 | |||
4593 | /* SW status block */ | ||
4594 | netdev_err(tp->dev, | ||
4595 | "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", | ||
4596 | i, | ||
4597 | tnapi->hw_status->status, | ||
4598 | tnapi->hw_status->status_tag, | ||
4599 | tnapi->hw_status->rx_jumbo_consumer, | ||
4600 | tnapi->hw_status->rx_consumer, | ||
4601 | tnapi->hw_status->rx_mini_consumer, | ||
4602 | tnapi->hw_status->idx[0].rx_producer, | ||
4603 | tnapi->hw_status->idx[0].tx_consumer); | ||
4604 | |||
4605 | netdev_err(tp->dev, | ||
4606 | "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", | ||
4607 | i, | ||
4608 | tnapi->last_tag, tnapi->last_irq_tag, | ||
4609 | tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, | ||
4610 | tnapi->rx_rcb_ptr, | ||
4611 | tnapi->prodring.rx_std_prod_idx, | ||
4612 | tnapi->prodring.rx_std_cons_idx, | ||
4613 | tnapi->prodring.rx_jmb_prod_idx, | ||
4614 | tnapi->prodring.rx_jmb_cons_idx); | ||
4615 | } | ||
4616 | } | ||
4617 | |||
4356 | /* This is called whenever we suspect that the system chipset is re- | 4618 | /* This is called whenever we suspect that the system chipset is re- |
4357 | * ordering the sequence of MMIO to the tx send mailbox. The symptom | 4619 | * ordering the sequence of MMIO to the tx send mailbox. The symptom |
4358 | * is bogus tx completions. We try to recover by setting the | 4620 | * is bogus tx completions. We try to recover by setting the |
@@ -4361,7 +4623,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) | |||
4361 | */ | 4623 | */ |
4362 | static void tg3_tx_recover(struct tg3 *tp) | 4624 | static void tg3_tx_recover(struct tg3 *tp) |
4363 | { | 4625 | { |
4364 | BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || | 4626 | BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || |
4365 | tp->write32_tx_mbox == tg3_write_indirect_mbox); | 4627 | tp->write32_tx_mbox == tg3_write_indirect_mbox); |
4366 | 4628 | ||
4367 | netdev_warn(tp->dev, | 4629 | netdev_warn(tp->dev, |
@@ -4371,7 +4633,7 @@ static void tg3_tx_recover(struct tg3 *tp) | |||
4371 | "and include system chipset information.\n"); | 4633 | "and include system chipset information.\n"); |
4372 | 4634 | ||
4373 | spin_lock(&tp->lock); | 4635 | spin_lock(&tp->lock); |
4374 | tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; | 4636 | tg3_flag_set(tp, TX_RECOVERY_PENDING); |
4375 | spin_unlock(&tp->lock); | 4637 | spin_unlock(&tp->lock); |
4376 | } | 4638 | } |
4377 | 4639 | ||
@@ -4395,7 +4657,7 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4395 | struct netdev_queue *txq; | 4657 | struct netdev_queue *txq; |
4396 | int index = tnapi - tp->napi; | 4658 | int index = tnapi - tp->napi; |
4397 | 4659 | ||
4398 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 4660 | if (tg3_flag(tp, ENABLE_TSS)) |
4399 | index--; | 4661 | index--; |
4400 | 4662 | ||
4401 | txq = netdev_get_tx_queue(tp->dev, index); | 4663 | txq = netdev_get_tx_queue(tp->dev, index); |
@@ -4484,22 +4746,21 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, | |||
4484 | u32 opaque_key, u32 dest_idx_unmasked) | 4746 | u32 opaque_key, u32 dest_idx_unmasked) |
4485 | { | 4747 | { |
4486 | struct tg3_rx_buffer_desc *desc; | 4748 | struct tg3_rx_buffer_desc *desc; |
4487 | struct ring_info *map, *src_map; | 4749 | struct ring_info *map; |
4488 | struct sk_buff *skb; | 4750 | struct sk_buff *skb; |
4489 | dma_addr_t mapping; | 4751 | dma_addr_t mapping; |
4490 | int skb_size, dest_idx; | 4752 | int skb_size, dest_idx; |
4491 | 4753 | ||
4492 | src_map = NULL; | ||
4493 | switch (opaque_key) { | 4754 | switch (opaque_key) { |
4494 | case RXD_OPAQUE_RING_STD: | 4755 | case RXD_OPAQUE_RING_STD: |
4495 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 4756 | dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; |
4496 | desc = &tpr->rx_std[dest_idx]; | 4757 | desc = &tpr->rx_std[dest_idx]; |
4497 | map = &tpr->rx_std_buffers[dest_idx]; | 4758 | map = &tpr->rx_std_buffers[dest_idx]; |
4498 | skb_size = tp->rx_pkt_map_sz; | 4759 | skb_size = tp->rx_pkt_map_sz; |
4499 | break; | 4760 | break; |
4500 | 4761 | ||
4501 | case RXD_OPAQUE_RING_JUMBO: | 4762 | case RXD_OPAQUE_RING_JUMBO: |
4502 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 4763 | dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; |
4503 | desc = &tpr->rx_jmb[dest_idx].std; | 4764 | desc = &tpr->rx_jmb[dest_idx].std; |
4504 | map = &tpr->rx_jmb_buffers[dest_idx]; | 4765 | map = &tpr->rx_jmb_buffers[dest_idx]; |
4505 | skb_size = TG3_RX_JMB_MAP_SZ; | 4766 | skb_size = TG3_RX_JMB_MAP_SZ; |
@@ -4549,12 +4810,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, | |||
4549 | struct tg3 *tp = tnapi->tp; | 4810 | struct tg3 *tp = tnapi->tp; |
4550 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; | 4811 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; |
4551 | struct ring_info *src_map, *dest_map; | 4812 | struct ring_info *src_map, *dest_map; |
4552 | struct tg3_rx_prodring_set *spr = &tp->prodring[0]; | 4813 | struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; |
4553 | int dest_idx; | 4814 | int dest_idx; |
4554 | 4815 | ||
4555 | switch (opaque_key) { | 4816 | switch (opaque_key) { |
4556 | case RXD_OPAQUE_RING_STD: | 4817 | case RXD_OPAQUE_RING_STD: |
4557 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 4818 | dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; |
4558 | dest_desc = &dpr->rx_std[dest_idx]; | 4819 | dest_desc = &dpr->rx_std[dest_idx]; |
4559 | dest_map = &dpr->rx_std_buffers[dest_idx]; | 4820 | dest_map = &dpr->rx_std_buffers[dest_idx]; |
4560 | src_desc = &spr->rx_std[src_idx]; | 4821 | src_desc = &spr->rx_std[src_idx]; |
@@ -4562,7 +4823,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, | |||
4562 | break; | 4823 | break; |
4563 | 4824 | ||
4564 | case RXD_OPAQUE_RING_JUMBO: | 4825 | case RXD_OPAQUE_RING_JUMBO: |
4565 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 4826 | dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; |
4566 | dest_desc = &dpr->rx_jmb[dest_idx].std; | 4827 | dest_desc = &dpr->rx_jmb[dest_idx].std; |
4567 | dest_map = &dpr->rx_jmb_buffers[dest_idx]; | 4828 | dest_map = &dpr->rx_jmb_buffers[dest_idx]; |
4568 | src_desc = &spr->rx_jmb[src_idx].std; | 4829 | src_desc = &spr->rx_jmb[src_idx].std; |
@@ -4619,7 +4880,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4619 | u32 sw_idx = tnapi->rx_rcb_ptr; | 4880 | u32 sw_idx = tnapi->rx_rcb_ptr; |
4620 | u16 hw_idx; | 4881 | u16 hw_idx; |
4621 | int received; | 4882 | int received; |
4622 | struct tg3_rx_prodring_set *tpr = tnapi->prodring; | 4883 | struct tg3_rx_prodring_set *tpr = &tnapi->prodring; |
4623 | 4884 | ||
4624 | hw_idx = *(tnapi->rx_rcb_prod_idx); | 4885 | hw_idx = *(tnapi->rx_rcb_prod_idx); |
4625 | /* | 4886 | /* |
@@ -4638,19 +4899,17 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4638 | struct sk_buff *skb; | 4899 | struct sk_buff *skb; |
4639 | dma_addr_t dma_addr; | 4900 | dma_addr_t dma_addr; |
4640 | u32 opaque_key, desc_idx, *post_ptr; | 4901 | u32 opaque_key, desc_idx, *post_ptr; |
4641 | bool hw_vlan __maybe_unused = false; | ||
4642 | u16 vtag __maybe_unused = 0; | ||
4643 | 4902 | ||
4644 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 4903 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; |
4645 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 4904 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; |
4646 | if (opaque_key == RXD_OPAQUE_RING_STD) { | 4905 | if (opaque_key == RXD_OPAQUE_RING_STD) { |
4647 | ri = &tp->prodring[0].rx_std_buffers[desc_idx]; | 4906 | ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; |
4648 | dma_addr = dma_unmap_addr(ri, mapping); | 4907 | dma_addr = dma_unmap_addr(ri, mapping); |
4649 | skb = ri->skb; | 4908 | skb = ri->skb; |
4650 | post_ptr = &std_prod_idx; | 4909 | post_ptr = &std_prod_idx; |
4651 | rx_std_posted++; | 4910 | rx_std_posted++; |
4652 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { | 4911 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { |
4653 | ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; | 4912 | ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; |
4654 | dma_addr = dma_unmap_addr(ri, mapping); | 4913 | dma_addr = dma_unmap_addr(ri, mapping); |
4655 | skb = ri->skb; | 4914 | skb = ri->skb; |
4656 | post_ptr = &jmb_prod_idx; | 4915 | post_ptr = &jmb_prod_idx; |
@@ -4698,12 +4957,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4698 | tg3_recycle_rx(tnapi, tpr, opaque_key, | 4957 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4699 | desc_idx, *post_ptr); | 4958 | desc_idx, *post_ptr); |
4700 | 4959 | ||
4701 | copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + | 4960 | copy_skb = netdev_alloc_skb(tp->dev, len + |
4702 | TG3_RAW_IP_ALIGN); | 4961 | TG3_RAW_IP_ALIGN); |
4703 | if (copy_skb == NULL) | 4962 | if (copy_skb == NULL) |
4704 | goto drop_it_no_recycle; | 4963 | goto drop_it_no_recycle; |
4705 | 4964 | ||
4706 | skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); | 4965 | skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); |
4707 | skb_put(copy_skb, len); | 4966 | skb_put(copy_skb, len); |
4708 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 4967 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
4709 | skb_copy_from_linear_data(skb, copy_skb->data, len); | 4968 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
@@ -4713,13 +4972,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4713 | skb = copy_skb; | 4972 | skb = copy_skb; |
4714 | } | 4973 | } |
4715 | 4974 | ||
4716 | if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && | 4975 | if ((tp->dev->features & NETIF_F_RXCSUM) && |
4717 | (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && | 4976 | (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && |
4718 | (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) | 4977 | (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) |
4719 | >> RXD_TCPCSUM_SHIFT) == 0xffff)) | 4978 | >> RXD_TCPCSUM_SHIFT) == 0xffff)) |
4720 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 4979 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
4721 | else | 4980 | else |
4722 | skb->ip_summed = CHECKSUM_NONE; | 4981 | skb_checksum_none_assert(skb); |
4723 | 4982 | ||
4724 | skb->protocol = eth_type_trans(skb, tp->dev); | 4983 | skb->protocol = eth_type_trans(skb, tp->dev); |
4725 | 4984 | ||
@@ -4730,30 +4989,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4730 | } | 4989 | } |
4731 | 4990 | ||
4732 | if (desc->type_flags & RXD_FLAG_VLAN && | 4991 | if (desc->type_flags & RXD_FLAG_VLAN && |
4733 | !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { | 4992 | !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) |
4734 | vtag = desc->err_vlan & RXD_VLAN_MASK; | 4993 | __vlan_hwaccel_put_tag(skb, |
4735 | #if TG3_VLAN_TAG_USED | 4994 | desc->err_vlan & RXD_VLAN_MASK); |
4736 | if (tp->vlgrp) | ||
4737 | hw_vlan = true; | ||
4738 | else | ||
4739 | #endif | ||
4740 | { | ||
4741 | struct vlan_ethhdr *ve = (struct vlan_ethhdr *) | ||
4742 | __skb_push(skb, VLAN_HLEN); | ||
4743 | |||
4744 | memmove(ve, skb->data + VLAN_HLEN, | ||
4745 | ETH_ALEN * 2); | ||
4746 | ve->h_vlan_proto = htons(ETH_P_8021Q); | ||
4747 | ve->h_vlan_TCI = htons(vtag); | ||
4748 | } | ||
4749 | } | ||
4750 | 4995 | ||
4751 | #if TG3_VLAN_TAG_USED | 4996 | napi_gro_receive(&tnapi->napi, skb); |
4752 | if (hw_vlan) | ||
4753 | vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb); | ||
4754 | else | ||
4755 | #endif | ||
4756 | napi_gro_receive(&tnapi->napi, skb); | ||
4757 | 4997 | ||
4758 | received++; | 4998 | received++; |
4759 | budget--; | 4999 | budget--; |
@@ -4762,7 +5002,8 @@ next_pkt: | |||
4762 | (*post_ptr)++; | 5002 | (*post_ptr)++; |
4763 | 5003 | ||
4764 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { | 5004 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { |
4765 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; | 5005 | tpr->rx_std_prod_idx = std_prod_idx & |
5006 | tp->rx_std_ring_mask; | ||
4766 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, | 5007 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, |
4767 | tpr->rx_std_prod_idx); | 5008 | tpr->rx_std_prod_idx); |
4768 | work_mask &= ~RXD_OPAQUE_RING_STD; | 5009 | work_mask &= ~RXD_OPAQUE_RING_STD; |
@@ -4770,7 +5011,7 @@ next_pkt: | |||
4770 | } | 5011 | } |
4771 | next_pkt_nopost: | 5012 | next_pkt_nopost: |
4772 | sw_idx++; | 5013 | sw_idx++; |
4773 | sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); | 5014 | sw_idx &= tp->rx_ret_ring_mask; |
4774 | 5015 | ||
4775 | /* Refresh hw_idx to see if there is new work */ | 5016 | /* Refresh hw_idx to see if there is new work */ |
4776 | if (sw_idx == hw_idx) { | 5017 | if (sw_idx == hw_idx) { |
@@ -4784,15 +5025,16 @@ next_pkt_nopost: | |||
4784 | tw32_rx_mbox(tnapi->consmbox, sw_idx); | 5025 | tw32_rx_mbox(tnapi->consmbox, sw_idx); |
4785 | 5026 | ||
4786 | /* Refill RX ring(s). */ | 5027 | /* Refill RX ring(s). */ |
4787 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { | 5028 | if (!tg3_flag(tp, ENABLE_RSS)) { |
4788 | if (work_mask & RXD_OPAQUE_RING_STD) { | 5029 | if (work_mask & RXD_OPAQUE_RING_STD) { |
4789 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; | 5030 | tpr->rx_std_prod_idx = std_prod_idx & |
5031 | tp->rx_std_ring_mask; | ||
4790 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, | 5032 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, |
4791 | tpr->rx_std_prod_idx); | 5033 | tpr->rx_std_prod_idx); |
4792 | } | 5034 | } |
4793 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { | 5035 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { |
4794 | tpr->rx_jmb_prod_idx = jmb_prod_idx % | 5036 | tpr->rx_jmb_prod_idx = jmb_prod_idx & |
4795 | TG3_RX_JUMBO_RING_SIZE; | 5037 | tp->rx_jmb_ring_mask; |
4796 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, | 5038 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, |
4797 | tpr->rx_jmb_prod_idx); | 5039 | tpr->rx_jmb_prod_idx); |
4798 | } | 5040 | } |
@@ -4803,8 +5045,8 @@ next_pkt_nopost: | |||
4803 | */ | 5045 | */ |
4804 | smp_wmb(); | 5046 | smp_wmb(); |
4805 | 5047 | ||
4806 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; | 5048 | tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; |
4807 | tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; | 5049 | tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; |
4808 | 5050 | ||
4809 | if (tnapi != &tp->napi[1]) | 5051 | if (tnapi != &tp->napi[1]) |
4810 | napi_schedule(&tp->napi[1].napi); | 5052 | napi_schedule(&tp->napi[1].napi); |
@@ -4816,16 +5058,14 @@ next_pkt_nopost: | |||
4816 | static void tg3_poll_link(struct tg3 *tp) | 5058 | static void tg3_poll_link(struct tg3 *tp) |
4817 | { | 5059 | { |
4818 | /* handle link change and other phy events */ | 5060 | /* handle link change and other phy events */ |
4819 | if (!(tp->tg3_flags & | 5061 | if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { |
4820 | (TG3_FLAG_USE_LINKCHG_REG | | ||
4821 | TG3_FLAG_POLL_SERDES))) { | ||
4822 | struct tg3_hw_status *sblk = tp->napi[0].hw_status; | 5062 | struct tg3_hw_status *sblk = tp->napi[0].hw_status; |
4823 | 5063 | ||
4824 | if (sblk->status & SD_STATUS_LINK_CHG) { | 5064 | if (sblk->status & SD_STATUS_LINK_CHG) { |
4825 | sblk->status = SD_STATUS_UPDATED | | 5065 | sblk->status = SD_STATUS_UPDATED | |
4826 | (sblk->status & ~SD_STATUS_LINK_CHG); | 5066 | (sblk->status & ~SD_STATUS_LINK_CHG); |
4827 | spin_lock(&tp->lock); | 5067 | spin_lock(&tp->lock); |
4828 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 5068 | if (tg3_flag(tp, USE_PHYLIB)) { |
4829 | tw32_f(MAC_STATUS, | 5069 | tw32_f(MAC_STATUS, |
4830 | (MAC_STATUS_SYNC_CHANGED | | 5070 | (MAC_STATUS_SYNC_CHANGED | |
4831 | MAC_STATUS_CFG_CHANGED | | 5071 | MAC_STATUS_CFG_CHANGED | |
@@ -4860,9 +5100,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, | |||
4860 | if (spr->rx_std_cons_idx < src_prod_idx) | 5100 | if (spr->rx_std_cons_idx < src_prod_idx) |
4861 | cpycnt = src_prod_idx - spr->rx_std_cons_idx; | 5101 | cpycnt = src_prod_idx - spr->rx_std_cons_idx; |
4862 | else | 5102 | else |
4863 | cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; | 5103 | cpycnt = tp->rx_std_ring_mask + 1 - |
5104 | spr->rx_std_cons_idx; | ||
4864 | 5105 | ||
4865 | cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); | 5106 | cpycnt = min(cpycnt, |
5107 | tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); | ||
4866 | 5108 | ||
4867 | si = spr->rx_std_cons_idx; | 5109 | si = spr->rx_std_cons_idx; |
4868 | di = dpr->rx_std_prod_idx; | 5110 | di = dpr->rx_std_prod_idx; |
@@ -4896,10 +5138,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, | |||
4896 | dbd->addr_lo = sbd->addr_lo; | 5138 | dbd->addr_lo = sbd->addr_lo; |
4897 | } | 5139 | } |
4898 | 5140 | ||
4899 | spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % | 5141 | spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & |
4900 | TG3_RX_RING_SIZE; | 5142 | tp->rx_std_ring_mask; |
4901 | dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % | 5143 | dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & |
4902 | TG3_RX_RING_SIZE; | 5144 | tp->rx_std_ring_mask; |
4903 | } | 5145 | } |
4904 | 5146 | ||
4905 | while (1) { | 5147 | while (1) { |
@@ -4916,10 +5158,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, | |||
4916 | if (spr->rx_jmb_cons_idx < src_prod_idx) | 5158 | if (spr->rx_jmb_cons_idx < src_prod_idx) |
4917 | cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; | 5159 | cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; |
4918 | else | 5160 | else |
4919 | cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; | 5161 | cpycnt = tp->rx_jmb_ring_mask + 1 - |
5162 | spr->rx_jmb_cons_idx; | ||
4920 | 5163 | ||
4921 | cpycnt = min(cpycnt, | 5164 | cpycnt = min(cpycnt, |
4922 | TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); | 5165 | tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); |
4923 | 5166 | ||
4924 | si = spr->rx_jmb_cons_idx; | 5167 | si = spr->rx_jmb_cons_idx; |
4925 | di = dpr->rx_jmb_prod_idx; | 5168 | di = dpr->rx_jmb_prod_idx; |
@@ -4953,10 +5196,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, | |||
4953 | dbd->addr_lo = sbd->addr_lo; | 5196 | dbd->addr_lo = sbd->addr_lo; |
4954 | } | 5197 | } |
4955 | 5198 | ||
4956 | spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % | 5199 | spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & |
4957 | TG3_RX_JUMBO_RING_SIZE; | 5200 | tp->rx_jmb_ring_mask; |
4958 | dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % | 5201 | dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & |
4959 | TG3_RX_JUMBO_RING_SIZE; | 5202 | tp->rx_jmb_ring_mask; |
4960 | } | 5203 | } |
4961 | 5204 | ||
4962 | return err; | 5205 | return err; |
@@ -4969,7 +5212,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | |||
4969 | /* run TX completion thread */ | 5212 | /* run TX completion thread */ |
4970 | if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { | 5213 | if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { |
4971 | tg3_tx(tnapi); | 5214 | tg3_tx(tnapi); |
4972 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 5215 | if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) |
4973 | return work_done; | 5216 | return work_done; |
4974 | } | 5217 | } |
4975 | 5218 | ||
@@ -4980,15 +5223,15 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | |||
4980 | if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) | 5223 | if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) |
4981 | work_done += tg3_rx(tnapi, budget - work_done); | 5224 | work_done += tg3_rx(tnapi, budget - work_done); |
4982 | 5225 | ||
4983 | if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { | 5226 | if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { |
4984 | struct tg3_rx_prodring_set *dpr = &tp->prodring[0]; | 5227 | struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; |
4985 | int i, err = 0; | 5228 | int i, err = 0; |
4986 | u32 std_prod_idx = dpr->rx_std_prod_idx; | 5229 | u32 std_prod_idx = dpr->rx_std_prod_idx; |
4987 | u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; | 5230 | u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; |
4988 | 5231 | ||
4989 | for (i = 1; i < tp->irq_cnt; i++) | 5232 | for (i = 1; i < tp->irq_cnt; i++) |
4990 | err |= tg3_rx_prodring_xfer(tp, dpr, | 5233 | err |= tg3_rx_prodring_xfer(tp, dpr, |
4991 | tp->napi[i].prodring); | 5234 | &tp->napi[i].prodring); |
4992 | 5235 | ||
4993 | wmb(); | 5236 | wmb(); |
4994 | 5237 | ||
@@ -5019,7 +5262,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) | |||
5019 | while (1) { | 5262 | while (1) { |
5020 | work_done = tg3_poll_work(tnapi, work_done, budget); | 5263 | work_done = tg3_poll_work(tnapi, work_done, budget); |
5021 | 5264 | ||
5022 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 5265 | if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) |
5023 | goto tx_recovery; | 5266 | goto tx_recovery; |
5024 | 5267 | ||
5025 | if (unlikely(work_done >= budget)) | 5268 | if (unlikely(work_done >= budget)) |
@@ -5053,6 +5296,40 @@ tx_recovery: | |||
5053 | return work_done; | 5296 | return work_done; |
5054 | } | 5297 | } |
5055 | 5298 | ||
5299 | static void tg3_process_error(struct tg3 *tp) | ||
5300 | { | ||
5301 | u32 val; | ||
5302 | bool real_error = false; | ||
5303 | |||
5304 | if (tg3_flag(tp, ERROR_PROCESSED)) | ||
5305 | return; | ||
5306 | |||
5307 | /* Check Flow Attention register */ | ||
5308 | val = tr32(HOSTCC_FLOW_ATTN); | ||
5309 | if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { | ||
5310 | netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); | ||
5311 | real_error = true; | ||
5312 | } | ||
5313 | |||
5314 | if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { | ||
5315 | netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); | ||
5316 | real_error = true; | ||
5317 | } | ||
5318 | |||
5319 | if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { | ||
5320 | netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); | ||
5321 | real_error = true; | ||
5322 | } | ||
5323 | |||
5324 | if (!real_error) | ||
5325 | return; | ||
5326 | |||
5327 | tg3_dump_state(tp); | ||
5328 | |||
5329 | tg3_flag_set(tp, ERROR_PROCESSED); | ||
5330 | schedule_work(&tp->reset_task); | ||
5331 | } | ||
5332 | |||
5056 | static int tg3_poll(struct napi_struct *napi, int budget) | 5333 | static int tg3_poll(struct napi_struct *napi, int budget) |
5057 | { | 5334 | { |
5058 | struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); | 5335 | struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); |
@@ -5061,17 +5338,20 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
5061 | struct tg3_hw_status *sblk = tnapi->hw_status; | 5338 | struct tg3_hw_status *sblk = tnapi->hw_status; |
5062 | 5339 | ||
5063 | while (1) { | 5340 | while (1) { |
5341 | if (sblk->status & SD_STATUS_ERROR) | ||
5342 | tg3_process_error(tp); | ||
5343 | |||
5064 | tg3_poll_link(tp); | 5344 | tg3_poll_link(tp); |
5065 | 5345 | ||
5066 | work_done = tg3_poll_work(tnapi, work_done, budget); | 5346 | work_done = tg3_poll_work(tnapi, work_done, budget); |
5067 | 5347 | ||
5068 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 5348 | if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) |
5069 | goto tx_recovery; | 5349 | goto tx_recovery; |
5070 | 5350 | ||
5071 | if (unlikely(work_done >= budget)) | 5351 | if (unlikely(work_done >= budget)) |
5072 | break; | 5352 | break; |
5073 | 5353 | ||
5074 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 5354 | if (tg3_flag(tp, TAGGED_STATUS)) { |
5075 | /* tp->last_tag is used in tg3_int_reenable() below | 5355 | /* tp->last_tag is used in tg3_int_reenable() below |
5076 | * to tell the hw how much work has been processed, | 5356 | * to tell the hw how much work has been processed, |
5077 | * so we must read it before checking for more work. | 5357 | * so we must read it before checking for more work. |
@@ -5098,6 +5378,59 @@ tx_recovery: | |||
5098 | return work_done; | 5378 | return work_done; |
5099 | } | 5379 | } |
5100 | 5380 | ||
5381 | static void tg3_napi_disable(struct tg3 *tp) | ||
5382 | { | ||
5383 | int i; | ||
5384 | |||
5385 | for (i = tp->irq_cnt - 1; i >= 0; i--) | ||
5386 | napi_disable(&tp->napi[i].napi); | ||
5387 | } | ||
5388 | |||
5389 | static void tg3_napi_enable(struct tg3 *tp) | ||
5390 | { | ||
5391 | int i; | ||
5392 | |||
5393 | for (i = 0; i < tp->irq_cnt; i++) | ||
5394 | napi_enable(&tp->napi[i].napi); | ||
5395 | } | ||
5396 | |||
5397 | static void tg3_napi_init(struct tg3 *tp) | ||
5398 | { | ||
5399 | int i; | ||
5400 | |||
5401 | netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); | ||
5402 | for (i = 1; i < tp->irq_cnt; i++) | ||
5403 | netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); | ||
5404 | } | ||
5405 | |||
5406 | static void tg3_napi_fini(struct tg3 *tp) | ||
5407 | { | ||
5408 | int i; | ||
5409 | |||
5410 | for (i = 0; i < tp->irq_cnt; i++) | ||
5411 | netif_napi_del(&tp->napi[i].napi); | ||
5412 | } | ||
5413 | |||
5414 | static inline void tg3_netif_stop(struct tg3 *tp) | ||
5415 | { | ||
5416 | tp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
5417 | tg3_napi_disable(tp); | ||
5418 | netif_tx_disable(tp->dev); | ||
5419 | } | ||
5420 | |||
5421 | static inline void tg3_netif_start(struct tg3 *tp) | ||
5422 | { | ||
5423 | /* NOTE: unconditional netif_tx_wake_all_queues is only | ||
5424 | * appropriate so long as all callers are assured to | ||
5425 | * have free tx slots (such as after tg3_init_hw) | ||
5426 | */ | ||
5427 | netif_tx_wake_all_queues(tp->dev); | ||
5428 | |||
5429 | tg3_napi_enable(tp); | ||
5430 | tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; | ||
5431 | tg3_enable_ints(tp); | ||
5432 | } | ||
5433 | |||
5101 | static void tg3_irq_quiesce(struct tg3 *tp) | 5434 | static void tg3_irq_quiesce(struct tg3 *tp) |
5102 | { | 5435 | { |
5103 | int i; | 5436 | int i; |
@@ -5111,11 +5444,6 @@ static void tg3_irq_quiesce(struct tg3 *tp) | |||
5111 | synchronize_irq(tp->napi[i].irq_vec); | 5444 | synchronize_irq(tp->napi[i].irq_vec); |
5112 | } | 5445 | } |
5113 | 5446 | ||
5114 | static inline int tg3_irq_sync(struct tg3 *tp) | ||
5115 | { | ||
5116 | return tp->irq_sync; | ||
5117 | } | ||
5118 | |||
5119 | /* Fully shutdown all tg3 driver activity elsewhere in the system. | 5447 | /* Fully shutdown all tg3 driver activity elsewhere in the system. |
5120 | * If irq_sync is non-zero, then the IRQ handler must be synchronized | 5448 | * If irq_sync is non-zero, then the IRQ handler must be synchronized |
5121 | * with as well. Most of the time, this is not necessary except when | 5449 | * with as well. Most of the time, this is not necessary except when |
@@ -5190,7 +5518,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) | |||
5190 | * interrupt is ours and will flush the status block. | 5518 | * interrupt is ours and will flush the status block. |
5191 | */ | 5519 | */ |
5192 | if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { | 5520 | if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { |
5193 | if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || | 5521 | if (tg3_flag(tp, CHIP_RESETTING) || |
5194 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 5522 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { |
5195 | handled = 0; | 5523 | handled = 0; |
5196 | goto out; | 5524 | goto out; |
@@ -5239,7 +5567,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) | |||
5239 | * interrupt is ours and will flush the status block. | 5567 | * interrupt is ours and will flush the status block. |
5240 | */ | 5568 | */ |
5241 | if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { | 5569 | if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { |
5242 | if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || | 5570 | if (tg3_flag(tp, CHIP_RESETTING) || |
5243 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 5571 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { |
5244 | handled = 0; | 5572 | handled = 0; |
5245 | goto out; | 5573 | goto out; |
@@ -5352,14 +5680,14 @@ static void tg3_reset_task(struct work_struct *work) | |||
5352 | 5680 | ||
5353 | tg3_full_lock(tp, 1); | 5681 | tg3_full_lock(tp, 1); |
5354 | 5682 | ||
5355 | restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; | 5683 | restart_timer = tg3_flag(tp, RESTART_TIMER); |
5356 | tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; | 5684 | tg3_flag_clear(tp, RESTART_TIMER); |
5357 | 5685 | ||
5358 | if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { | 5686 | if (tg3_flag(tp, TX_RECOVERY_PENDING)) { |
5359 | tp->write32_tx_mbox = tg3_write32_tx_mbox; | 5687 | tp->write32_tx_mbox = tg3_write32_tx_mbox; |
5360 | tp->write32_rx_mbox = tg3_write_flush_reg32; | 5688 | tp->write32_rx_mbox = tg3_write_flush_reg32; |
5361 | tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; | 5689 | tg3_flag_set(tp, MBOX_WRITE_REORDER); |
5362 | tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; | 5690 | tg3_flag_clear(tp, TX_RECOVERY_PENDING); |
5363 | } | 5691 | } |
5364 | 5692 | ||
5365 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 5693 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); |
@@ -5379,21 +5707,13 @@ out: | |||
5379 | tg3_phy_start(tp); | 5707 | tg3_phy_start(tp); |
5380 | } | 5708 | } |
5381 | 5709 | ||
5382 | static void tg3_dump_short_state(struct tg3 *tp) | ||
5383 | { | ||
5384 | netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", | ||
5385 | tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); | ||
5386 | netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", | ||
5387 | tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); | ||
5388 | } | ||
5389 | |||
5390 | static void tg3_tx_timeout(struct net_device *dev) | 5710 | static void tg3_tx_timeout(struct net_device *dev) |
5391 | { | 5711 | { |
5392 | struct tg3 *tp = netdev_priv(dev); | 5712 | struct tg3 *tp = netdev_priv(dev); |
5393 | 5713 | ||
5394 | if (netif_msg_tx_err(tp)) { | 5714 | if (netif_msg_tx_err(tp)) { |
5395 | netdev_err(dev, "transmit timed out, resetting\n"); | 5715 | netdev_err(dev, "transmit timed out, resetting\n"); |
5396 | tg3_dump_short_state(tp); | 5716 | tg3_dump_state(tp); |
5397 | } | 5717 | } |
5398 | 5718 | ||
5399 | schedule_work(&tp->reset_task); | 5719 | schedule_work(&tp->reset_task); |
@@ -5404,8 +5724,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | |||
5404 | { | 5724 | { |
5405 | u32 base = (u32) mapping & 0xffffffff; | 5725 | u32 base = (u32) mapping & 0xffffffff; |
5406 | 5726 | ||
5407 | return ((base > 0xffffdcc0) && | 5727 | return (base > 0xffffdcc0) && (base + len + 8 < base); |
5408 | (base + len + 8 < base)); | ||
5409 | } | 5728 | } |
5410 | 5729 | ||
5411 | /* Test for DMA addresses > 40-bit */ | 5730 | /* Test for DMA addresses > 40-bit */ |
@@ -5413,26 +5732,70 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, | |||
5413 | int len) | 5732 | int len) |
5414 | { | 5733 | { |
5415 | #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) | 5734 | #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) |
5416 | if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) | 5735 | if (tg3_flag(tp, 40BIT_DMA_BUG)) |
5417 | return (((u64) mapping + len) > DMA_BIT_MASK(40)); | 5736 | return ((u64) mapping + len) > DMA_BIT_MASK(40); |
5418 | return 0; | 5737 | return 0; |
5419 | #else | 5738 | #else |
5420 | return 0; | 5739 | return 0; |
5421 | #endif | 5740 | #endif |
5422 | } | 5741 | } |
5423 | 5742 | ||
5424 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); | 5743 | static void tg3_set_txd(struct tg3_napi *tnapi, int entry, |
5744 | dma_addr_t mapping, int len, u32 flags, | ||
5745 | u32 mss_and_is_end) | ||
5746 | { | ||
5747 | struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; | ||
5748 | int is_end = (mss_and_is_end & 0x1); | ||
5749 | u32 mss = (mss_and_is_end >> 1); | ||
5750 | u32 vlan_tag = 0; | ||
5751 | |||
5752 | if (is_end) | ||
5753 | flags |= TXD_FLAG_END; | ||
5754 | if (flags & TXD_FLAG_VLAN) { | ||
5755 | vlan_tag = flags >> 16; | ||
5756 | flags &= 0xffff; | ||
5757 | } | ||
5758 | vlan_tag |= (mss << TXD_MSS_SHIFT); | ||
5759 | |||
5760 | txd->addr_hi = ((u64) mapping >> 32); | ||
5761 | txd->addr_lo = ((u64) mapping & 0xffffffff); | ||
5762 | txd->len_flags = (len << TXD_LEN_SHIFT) | flags; | ||
5763 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; | ||
5764 | } | ||
5765 | |||
5766 | static void tg3_skb_error_unmap(struct tg3_napi *tnapi, | ||
5767 | struct sk_buff *skb, int last) | ||
5768 | { | ||
5769 | int i; | ||
5770 | u32 entry = tnapi->tx_prod; | ||
5771 | struct ring_info *txb = &tnapi->tx_buffers[entry]; | ||
5772 | |||
5773 | pci_unmap_single(tnapi->tp->pdev, | ||
5774 | dma_unmap_addr(txb, mapping), | ||
5775 | skb_headlen(skb), | ||
5776 | PCI_DMA_TODEVICE); | ||
5777 | for (i = 0; i < last; i++) { | ||
5778 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5779 | |||
5780 | entry = NEXT_TX(entry); | ||
5781 | txb = &tnapi->tx_buffers[entry]; | ||
5782 | |||
5783 | pci_unmap_page(tnapi->tp->pdev, | ||
5784 | dma_unmap_addr(txb, mapping), | ||
5785 | frag->size, PCI_DMA_TODEVICE); | ||
5786 | } | ||
5787 | } | ||
5425 | 5788 | ||
5426 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ | 5789 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ |
5427 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | 5790 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, |
5428 | struct sk_buff *skb, u32 last_plus_one, | 5791 | struct sk_buff *skb, |
5429 | u32 *start, u32 base_flags, u32 mss) | 5792 | u32 base_flags, u32 mss) |
5430 | { | 5793 | { |
5431 | struct tg3 *tp = tnapi->tp; | 5794 | struct tg3 *tp = tnapi->tp; |
5432 | struct sk_buff *new_skb; | 5795 | struct sk_buff *new_skb; |
5433 | dma_addr_t new_addr = 0; | 5796 | dma_addr_t new_addr = 0; |
5434 | u32 entry = *start; | 5797 | u32 entry = tnapi->tx_prod; |
5435 | int i, ret = 0; | 5798 | int ret = 0; |
5436 | 5799 | ||
5437 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 5800 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) |
5438 | new_skb = skb_copy(skb, GFP_ATOMIC); | 5801 | new_skb = skb_copy(skb, GFP_ATOMIC); |
@@ -5448,55 +5811,30 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
5448 | ret = -1; | 5811 | ret = -1; |
5449 | } else { | 5812 | } else { |
5450 | /* New SKB is guaranteed to be linear. */ | 5813 | /* New SKB is guaranteed to be linear. */ |
5451 | entry = *start; | ||
5452 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, | 5814 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, |
5453 | PCI_DMA_TODEVICE); | 5815 | PCI_DMA_TODEVICE); |
5454 | /* Make sure the mapping succeeded */ | 5816 | /* Make sure the mapping succeeded */ |
5455 | if (pci_dma_mapping_error(tp->pdev, new_addr)) { | 5817 | if (pci_dma_mapping_error(tp->pdev, new_addr)) { |
5456 | ret = -1; | 5818 | ret = -1; |
5457 | dev_kfree_skb(new_skb); | 5819 | dev_kfree_skb(new_skb); |
5458 | new_skb = NULL; | ||
5459 | 5820 | ||
5460 | /* Make sure new skb does not cross any 4G boundaries. | 5821 | /* Make sure new skb does not cross any 4G boundaries. |
5461 | * Drop the packet if it does. | 5822 | * Drop the packet if it does. |
5462 | */ | 5823 | */ |
5463 | } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | 5824 | } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) && |
5464 | tg3_4g_overflow_test(new_addr, new_skb->len)) { | 5825 | tg3_4g_overflow_test(new_addr, new_skb->len)) { |
5465 | pci_unmap_single(tp->pdev, new_addr, new_skb->len, | 5826 | pci_unmap_single(tp->pdev, new_addr, new_skb->len, |
5466 | PCI_DMA_TODEVICE); | 5827 | PCI_DMA_TODEVICE); |
5467 | ret = -1; | 5828 | ret = -1; |
5468 | dev_kfree_skb(new_skb); | 5829 | dev_kfree_skb(new_skb); |
5469 | new_skb = NULL; | ||
5470 | } else { | 5830 | } else { |
5831 | tnapi->tx_buffers[entry].skb = new_skb; | ||
5832 | dma_unmap_addr_set(&tnapi->tx_buffers[entry], | ||
5833 | mapping, new_addr); | ||
5834 | |||
5471 | tg3_set_txd(tnapi, entry, new_addr, new_skb->len, | 5835 | tg3_set_txd(tnapi, entry, new_addr, new_skb->len, |
5472 | base_flags, 1 | (mss << 1)); | 5836 | base_flags, 1 | (mss << 1)); |
5473 | *start = NEXT_TX(entry); | ||
5474 | } | ||
5475 | } | ||
5476 | |||
5477 | /* Now clean up the sw ring entries. */ | ||
5478 | i = 0; | ||
5479 | while (entry != last_plus_one) { | ||
5480 | int len; | ||
5481 | |||
5482 | if (i == 0) | ||
5483 | len = skb_headlen(skb); | ||
5484 | else | ||
5485 | len = skb_shinfo(skb)->frags[i-1].size; | ||
5486 | |||
5487 | pci_unmap_single(tp->pdev, | ||
5488 | dma_unmap_addr(&tnapi->tx_buffers[entry], | ||
5489 | mapping), | ||
5490 | len, PCI_DMA_TODEVICE); | ||
5491 | if (i == 0) { | ||
5492 | tnapi->tx_buffers[entry].skb = new_skb; | ||
5493 | dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5494 | new_addr); | ||
5495 | } else { | ||
5496 | tnapi->tx_buffers[entry].skb = NULL; | ||
5497 | } | 5837 | } |
5498 | entry = NEXT_TX(entry); | ||
5499 | i++; | ||
5500 | } | 5838 | } |
5501 | 5839 | ||
5502 | dev_kfree_skb(skb); | 5840 | dev_kfree_skb(skb); |
@@ -5504,204 +5842,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
5504 | return ret; | 5842 | return ret; |
5505 | } | 5843 | } |
5506 | 5844 | ||
5507 | static void tg3_set_txd(struct tg3_napi *tnapi, int entry, | 5845 | static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); |
5508 | dma_addr_t mapping, int len, u32 flags, | ||
5509 | u32 mss_and_is_end) | ||
5510 | { | ||
5511 | struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; | ||
5512 | int is_end = (mss_and_is_end & 0x1); | ||
5513 | u32 mss = (mss_and_is_end >> 1); | ||
5514 | u32 vlan_tag = 0; | ||
5515 | |||
5516 | if (is_end) | ||
5517 | flags |= TXD_FLAG_END; | ||
5518 | if (flags & TXD_FLAG_VLAN) { | ||
5519 | vlan_tag = flags >> 16; | ||
5520 | flags &= 0xffff; | ||
5521 | } | ||
5522 | vlan_tag |= (mss << TXD_MSS_SHIFT); | ||
5523 | |||
5524 | txd->addr_hi = ((u64) mapping >> 32); | ||
5525 | txd->addr_lo = ((u64) mapping & 0xffffffff); | ||
5526 | txd->len_flags = (len << TXD_LEN_SHIFT) | flags; | ||
5527 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; | ||
5528 | } | ||
5529 | |||
5530 | /* hard_start_xmit for devices that don't have any bugs and | ||
5531 | * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. | ||
5532 | */ | ||
5533 | static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | ||
5534 | struct net_device *dev) | ||
5535 | { | ||
5536 | struct tg3 *tp = netdev_priv(dev); | ||
5537 | u32 len, entry, base_flags, mss; | ||
5538 | dma_addr_t mapping; | ||
5539 | struct tg3_napi *tnapi; | ||
5540 | struct netdev_queue *txq; | ||
5541 | unsigned int i, last; | ||
5542 | |||
5543 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
5544 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | ||
5545 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | ||
5546 | tnapi++; | ||
5547 | |||
5548 | /* We are running in BH disabled context with netif_tx_lock | ||
5549 | * and TX reclaim runs via tp->napi.poll inside of a software | ||
5550 | * interrupt. Furthermore, IRQ processing runs lockless so we have | ||
5551 | * no IRQ context deadlocks to worry about either. Rejoice! | ||
5552 | */ | ||
5553 | if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { | ||
5554 | if (!netif_tx_queue_stopped(txq)) { | ||
5555 | netif_tx_stop_queue(txq); | ||
5556 | |||
5557 | /* This is a hard error, log it. */ | ||
5558 | netdev_err(dev, | ||
5559 | "BUG! Tx Ring full when queue awake!\n"); | ||
5560 | } | ||
5561 | return NETDEV_TX_BUSY; | ||
5562 | } | ||
5563 | |||
5564 | entry = tnapi->tx_prod; | ||
5565 | base_flags = 0; | ||
5566 | mss = skb_shinfo(skb)->gso_size; | ||
5567 | if (mss) { | ||
5568 | int tcp_opt_len, ip_tcp_len; | ||
5569 | u32 hdrlen; | ||
5570 | |||
5571 | if (skb_header_cloned(skb) && | ||
5572 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | ||
5573 | dev_kfree_skb(skb); | ||
5574 | goto out_unlock; | ||
5575 | } | ||
5576 | |||
5577 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
5578 | hdrlen = skb_headlen(skb) - ETH_HLEN; | ||
5579 | else { | ||
5580 | struct iphdr *iph = ip_hdr(skb); | ||
5581 | |||
5582 | tcp_opt_len = tcp_optlen(skb); | ||
5583 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | ||
5584 | |||
5585 | iph->check = 0; | ||
5586 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | ||
5587 | hdrlen = ip_tcp_len + tcp_opt_len; | ||
5588 | } | ||
5589 | |||
5590 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { | ||
5591 | mss |= (hdrlen & 0xc) << 12; | ||
5592 | if (hdrlen & 0x10) | ||
5593 | base_flags |= 0x00000010; | ||
5594 | base_flags |= (hdrlen & 0x3e0) << 5; | ||
5595 | } else | ||
5596 | mss |= hdrlen << 9; | ||
5597 | |||
5598 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | ||
5599 | TXD_FLAG_CPU_POST_DMA); | ||
5600 | |||
5601 | tcp_hdr(skb)->check = 0; | ||
5602 | |||
5603 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
5604 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
5605 | } | ||
5606 | |||
5607 | #if TG3_VLAN_TAG_USED | ||
5608 | if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) | ||
5609 | base_flags |= (TXD_FLAG_VLAN | | ||
5610 | (vlan_tx_tag_get(skb) << 16)); | ||
5611 | #endif | ||
5612 | |||
5613 | len = skb_headlen(skb); | ||
5614 | |||
5615 | /* Queue skb data, a.k.a. the main skb fragment. */ | ||
5616 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
5617 | if (pci_dma_mapping_error(tp->pdev, mapping)) { | ||
5618 | dev_kfree_skb(skb); | ||
5619 | goto out_unlock; | ||
5620 | } | ||
5621 | |||
5622 | tnapi->tx_buffers[entry].skb = skb; | ||
5623 | dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); | ||
5624 | |||
5625 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && | ||
5626 | !mss && skb->len > ETH_DATA_LEN) | ||
5627 | base_flags |= TXD_FLAG_JMB_PKT; | ||
5628 | |||
5629 | tg3_set_txd(tnapi, entry, mapping, len, base_flags, | ||
5630 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | ||
5631 | |||
5632 | entry = NEXT_TX(entry); | ||
5633 | |||
5634 | /* Now loop through additional data fragments, and queue them. */ | ||
5635 | if (skb_shinfo(skb)->nr_frags > 0) { | ||
5636 | last = skb_shinfo(skb)->nr_frags - 1; | ||
5637 | for (i = 0; i <= last; i++) { | ||
5638 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5639 | |||
5640 | len = frag->size; | ||
5641 | mapping = pci_map_page(tp->pdev, | ||
5642 | frag->page, | ||
5643 | frag->page_offset, | ||
5644 | len, PCI_DMA_TODEVICE); | ||
5645 | if (pci_dma_mapping_error(tp->pdev, mapping)) | ||
5646 | goto dma_error; | ||
5647 | |||
5648 | tnapi->tx_buffers[entry].skb = NULL; | ||
5649 | dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5650 | mapping); | ||
5651 | |||
5652 | tg3_set_txd(tnapi, entry, mapping, len, | ||
5653 | base_flags, (i == last) | (mss << 1)); | ||
5654 | |||
5655 | entry = NEXT_TX(entry); | ||
5656 | } | ||
5657 | } | ||
5658 | |||
5659 | /* Packets are ready, update Tx producer idx local and on card. */ | ||
5660 | tw32_tx_mbox(tnapi->prodmbox, entry); | ||
5661 | |||
5662 | tnapi->tx_prod = entry; | ||
5663 | if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { | ||
5664 | netif_tx_stop_queue(txq); | ||
5665 | |||
5666 | /* netif_tx_stop_queue() must be done before checking | ||
5667 | * checking tx index in tg3_tx_avail() below, because in | ||
5668 | * tg3_tx(), we update tx index before checking for | ||
5669 | * netif_tx_queue_stopped(). | ||
5670 | */ | ||
5671 | smp_mb(); | ||
5672 | if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) | ||
5673 | netif_tx_wake_queue(txq); | ||
5674 | } | ||
5675 | |||
5676 | out_unlock: | ||
5677 | mmiowb(); | ||
5678 | |||
5679 | return NETDEV_TX_OK; | ||
5680 | |||
5681 | dma_error: | ||
5682 | last = i; | ||
5683 | entry = tnapi->tx_prod; | ||
5684 | tnapi->tx_buffers[entry].skb = NULL; | ||
5685 | pci_unmap_single(tp->pdev, | ||
5686 | dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), | ||
5687 | skb_headlen(skb), | ||
5688 | PCI_DMA_TODEVICE); | ||
5689 | for (i = 0; i <= last; i++) { | ||
5690 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5691 | entry = NEXT_TX(entry); | ||
5692 | |||
5693 | pci_unmap_page(tp->pdev, | ||
5694 | dma_unmap_addr(&tnapi->tx_buffers[entry], | ||
5695 | mapping), | ||
5696 | frag->size, PCI_DMA_TODEVICE); | ||
5697 | } | ||
5698 | |||
5699 | dev_kfree_skb(skb); | ||
5700 | return NETDEV_TX_OK; | ||
5701 | } | ||
5702 | |||
5703 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, | ||
5704 | struct net_device *); | ||
5705 | 5846 | ||
5706 | /* Use GSO to workaround a rare TSO bug that may be triggered when the | 5847 | /* Use GSO to workaround a rare TSO bug that may be triggered when the |
5707 | * TSO header is greater than 80 bytes. | 5848 | * TSO header is greater than 80 bytes. |
@@ -5735,7 +5876,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
5735 | nskb = segs; | 5876 | nskb = segs; |
5736 | segs = segs->next; | 5877 | segs = segs->next; |
5737 | nskb->next = NULL; | 5878 | nskb->next = NULL; |
5738 | tg3_start_xmit_dma_bug(nskb, tp->dev); | 5879 | tg3_start_xmit(nskb, tp->dev); |
5739 | } while (segs); | 5880 | } while (segs); |
5740 | 5881 | ||
5741 | tg3_tso_bug_end: | 5882 | tg3_tso_bug_end: |
@@ -5745,22 +5886,21 @@ tg3_tso_bug_end: | |||
5745 | } | 5886 | } |
5746 | 5887 | ||
5747 | /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and | 5888 | /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and |
5748 | * support TG3_FLG2_HW_TSO_1 or firmware TSO only. | 5889 | * support TG3_FLAG_HW_TSO_1 or firmware TSO only. |
5749 | */ | 5890 | */ |
5750 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | 5891 | static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
5751 | struct net_device *dev) | ||
5752 | { | 5892 | { |
5753 | struct tg3 *tp = netdev_priv(dev); | 5893 | struct tg3 *tp = netdev_priv(dev); |
5754 | u32 len, entry, base_flags, mss; | 5894 | u32 len, entry, base_flags, mss; |
5755 | int would_hit_hwbug; | 5895 | int i = -1, would_hit_hwbug; |
5756 | dma_addr_t mapping; | 5896 | dma_addr_t mapping; |
5757 | struct tg3_napi *tnapi; | 5897 | struct tg3_napi *tnapi; |
5758 | struct netdev_queue *txq; | 5898 | struct netdev_queue *txq; |
5759 | unsigned int i, last; | 5899 | unsigned int last; |
5760 | 5900 | ||
5761 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 5901 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
5762 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | 5902 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; |
5763 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 5903 | if (tg3_flag(tp, ENABLE_TSS)) |
5764 | tnapi++; | 5904 | tnapi++; |
5765 | 5905 | ||
5766 | /* We are running in BH disabled context with netif_tx_lock | 5906 | /* We are running in BH disabled context with netif_tx_lock |
@@ -5798,7 +5938,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5798 | iph = ip_hdr(skb); | 5938 | iph = ip_hdr(skb); |
5799 | tcp_opt_len = tcp_optlen(skb); | 5939 | tcp_opt_len = tcp_optlen(skb); |
5800 | 5940 | ||
5801 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 5941 | if (skb_is_gso_v6(skb)) { |
5802 | hdr_len = skb_headlen(skb) - ETH_HLEN; | 5942 | hdr_len = skb_headlen(skb) - ETH_HLEN; |
5803 | } else { | 5943 | } else { |
5804 | u32 ip_tcp_len; | 5944 | u32 ip_tcp_len; |
@@ -5811,13 +5951,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5811 | } | 5951 | } |
5812 | 5952 | ||
5813 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 5953 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
5814 | (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) | 5954 | tg3_flag(tp, TSO_BUG)) |
5815 | return tg3_tso_bug(tp, skb); | 5955 | return tg3_tso_bug(tp, skb); |
5816 | 5956 | ||
5817 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 5957 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
5818 | TXD_FLAG_CPU_POST_DMA); | 5958 | TXD_FLAG_CPU_POST_DMA); |
5819 | 5959 | ||
5820 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 5960 | if (tg3_flag(tp, HW_TSO_1) || |
5961 | tg3_flag(tp, HW_TSO_2) || | ||
5962 | tg3_flag(tp, HW_TSO_3)) { | ||
5821 | tcp_hdr(skb)->check = 0; | 5963 | tcp_hdr(skb)->check = 0; |
5822 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; | 5964 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; |
5823 | } else | 5965 | } else |
@@ -5826,14 +5968,14 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5826 | IPPROTO_TCP, | 5968 | IPPROTO_TCP, |
5827 | 0); | 5969 | 0); |
5828 | 5970 | ||
5829 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { | 5971 | if (tg3_flag(tp, HW_TSO_3)) { |
5830 | mss |= (hdr_len & 0xc) << 12; | 5972 | mss |= (hdr_len & 0xc) << 12; |
5831 | if (hdr_len & 0x10) | 5973 | if (hdr_len & 0x10) |
5832 | base_flags |= 0x00000010; | 5974 | base_flags |= 0x00000010; |
5833 | base_flags |= (hdr_len & 0x3e0) << 5; | 5975 | base_flags |= (hdr_len & 0x3e0) << 5; |
5834 | } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) | 5976 | } else if (tg3_flag(tp, HW_TSO_2)) |
5835 | mss |= hdr_len << 9; | 5977 | mss |= hdr_len << 9; |
5836 | else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || | 5978 | else if (tg3_flag(tp, HW_TSO_1) || |
5837 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 5979 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { |
5838 | if (tcp_opt_len || iph->ihl > 5) { | 5980 | if (tcp_opt_len || iph->ihl > 5) { |
5839 | int tsflags; | 5981 | int tsflags; |
@@ -5850,14 +5992,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5850 | } | 5992 | } |
5851 | } | 5993 | } |
5852 | } | 5994 | } |
5853 | #if TG3_VLAN_TAG_USED | 5995 | |
5854 | if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) | 5996 | if (vlan_tx_tag_present(skb)) |
5855 | base_flags |= (TXD_FLAG_VLAN | | 5997 | base_flags |= (TXD_FLAG_VLAN | |
5856 | (vlan_tx_tag_get(skb) << 16)); | 5998 | (vlan_tx_tag_get(skb) << 16)); |
5857 | #endif | ||
5858 | 5999 | ||
5859 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && | 6000 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
5860 | !mss && skb->len > ETH_DATA_LEN) | 6001 | !mss && skb->len > VLAN_ETH_FRAME_LEN) |
5861 | base_flags |= TXD_FLAG_JMB_PKT; | 6002 | base_flags |= TXD_FLAG_JMB_PKT; |
5862 | 6003 | ||
5863 | len = skb_headlen(skb); | 6004 | len = skb_headlen(skb); |
@@ -5873,18 +6014,18 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5873 | 6014 | ||
5874 | would_hit_hwbug = 0; | 6015 | would_hit_hwbug = 0; |
5875 | 6016 | ||
5876 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) | 6017 | if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) |
5877 | would_hit_hwbug = 1; | 6018 | would_hit_hwbug = 1; |
5878 | 6019 | ||
5879 | if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | 6020 | if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) && |
5880 | tg3_4g_overflow_test(mapping, len)) | 6021 | tg3_4g_overflow_test(mapping, len)) |
5881 | would_hit_hwbug = 1; | 6022 | would_hit_hwbug = 1; |
5882 | 6023 | ||
5883 | if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && | 6024 | if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) && |
5884 | tg3_40bit_overflow_test(tp, mapping, len)) | 6025 | tg3_40bit_overflow_test(tp, mapping, len)) |
5885 | would_hit_hwbug = 1; | 6026 | would_hit_hwbug = 1; |
5886 | 6027 | ||
5887 | if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) | 6028 | if (tg3_flag(tp, 5701_DMA_BUG)) |
5888 | would_hit_hwbug = 1; | 6029 | would_hit_hwbug = 1; |
5889 | 6030 | ||
5890 | tg3_set_txd(tnapi, entry, mapping, len, base_flags, | 6031 | tg3_set_txd(tnapi, entry, mapping, len, base_flags, |
@@ -5910,19 +6051,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5910 | if (pci_dma_mapping_error(tp->pdev, mapping)) | 6051 | if (pci_dma_mapping_error(tp->pdev, mapping)) |
5911 | goto dma_error; | 6052 | goto dma_error; |
5912 | 6053 | ||
5913 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && | 6054 | if (tg3_flag(tp, SHORT_DMA_BUG) && |
5914 | len <= 8) | 6055 | len <= 8) |
5915 | would_hit_hwbug = 1; | 6056 | would_hit_hwbug = 1; |
5916 | 6057 | ||
5917 | if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | 6058 | if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) && |
5918 | tg3_4g_overflow_test(mapping, len)) | 6059 | tg3_4g_overflow_test(mapping, len)) |
5919 | would_hit_hwbug = 1; | 6060 | would_hit_hwbug = 1; |
5920 | 6061 | ||
5921 | if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && | 6062 | if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) && |
5922 | tg3_40bit_overflow_test(tp, mapping, len)) | 6063 | tg3_40bit_overflow_test(tp, mapping, len)) |
5923 | would_hit_hwbug = 1; | 6064 | would_hit_hwbug = 1; |
5924 | 6065 | ||
5925 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 6066 | if (tg3_flag(tp, HW_TSO_1) || |
6067 | tg3_flag(tp, HW_TSO_2) || | ||
6068 | tg3_flag(tp, HW_TSO_3)) | ||
5926 | tg3_set_txd(tnapi, entry, mapping, len, | 6069 | tg3_set_txd(tnapi, entry, mapping, len, |
5927 | base_flags, (i == last)|(mss << 1)); | 6070 | base_flags, (i == last)|(mss << 1)); |
5928 | else | 6071 | else |
@@ -5934,20 +6077,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5934 | } | 6077 | } |
5935 | 6078 | ||
5936 | if (would_hit_hwbug) { | 6079 | if (would_hit_hwbug) { |
5937 | u32 last_plus_one = entry; | 6080 | tg3_skb_error_unmap(tnapi, skb, i); |
5938 | u32 start; | ||
5939 | |||
5940 | start = entry - 1 - skb_shinfo(skb)->nr_frags; | ||
5941 | start &= (TG3_TX_RING_SIZE - 1); | ||
5942 | 6081 | ||
5943 | /* If the workaround fails due to memory/mapping | 6082 | /* If the workaround fails due to memory/mapping |
5944 | * failure, silently drop this packet. | 6083 | * failure, silently drop this packet. |
5945 | */ | 6084 | */ |
5946 | if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, | 6085 | if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss)) |
5947 | &start, base_flags, mss)) | ||
5948 | goto out_unlock; | 6086 | goto out_unlock; |
5949 | 6087 | ||
5950 | entry = start; | 6088 | entry = NEXT_TX(tnapi->tx_prod); |
5951 | } | 6089 | } |
5952 | 6090 | ||
5953 | /* Packets are ready, update Tx producer idx local and on card. */ | 6091 | /* Packets are ready, update Tx producer idx local and on card. */ |
@@ -5973,25 +6111,66 @@ out_unlock: | |||
5973 | return NETDEV_TX_OK; | 6111 | return NETDEV_TX_OK; |
5974 | 6112 | ||
5975 | dma_error: | 6113 | dma_error: |
5976 | last = i; | 6114 | tg3_skb_error_unmap(tnapi, skb, i); |
5977 | entry = tnapi->tx_prod; | 6115 | dev_kfree_skb(skb); |
5978 | tnapi->tx_buffers[entry].skb = NULL; | 6116 | tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; |
5979 | pci_unmap_single(tp->pdev, | 6117 | return NETDEV_TX_OK; |
5980 | dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), | 6118 | } |
5981 | skb_headlen(skb), | ||
5982 | PCI_DMA_TODEVICE); | ||
5983 | for (i = 0; i <= last; i++) { | ||
5984 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5985 | entry = NEXT_TX(entry); | ||
5986 | 6119 | ||
5987 | pci_unmap_page(tp->pdev, | 6120 | static void tg3_set_loopback(struct net_device *dev, u32 features) |
5988 | dma_unmap_addr(&tnapi->tx_buffers[entry], | 6121 | { |
5989 | mapping), | 6122 | struct tg3 *tp = netdev_priv(dev); |
5990 | frag->size, PCI_DMA_TODEVICE); | 6123 | |
6124 | if (features & NETIF_F_LOOPBACK) { | ||
6125 | if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) | ||
6126 | return; | ||
6127 | |||
6128 | /* | ||
6129 | * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in | ||
6130 | * loopback mode if Half-Duplex mode was negotiated earlier. | ||
6131 | */ | ||
6132 | tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; | ||
6133 | |||
6134 | /* Enable internal MAC loopback mode */ | ||
6135 | tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; | ||
6136 | spin_lock_bh(&tp->lock); | ||
6137 | tw32(MAC_MODE, tp->mac_mode); | ||
6138 | netif_carrier_on(tp->dev); | ||
6139 | spin_unlock_bh(&tp->lock); | ||
6140 | netdev_info(dev, "Internal MAC loopback mode enabled.\n"); | ||
6141 | } else { | ||
6142 | if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) | ||
6143 | return; | ||
6144 | |||
6145 | /* Disable internal MAC loopback mode */ | ||
6146 | tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; | ||
6147 | spin_lock_bh(&tp->lock); | ||
6148 | tw32(MAC_MODE, tp->mac_mode); | ||
6149 | /* Force link status check */ | ||
6150 | tg3_setup_phy(tp, 1); | ||
6151 | spin_unlock_bh(&tp->lock); | ||
6152 | netdev_info(dev, "Internal MAC loopback mode disabled.\n"); | ||
5991 | } | 6153 | } |
6154 | } | ||
5992 | 6155 | ||
5993 | dev_kfree_skb(skb); | 6156 | static u32 tg3_fix_features(struct net_device *dev, u32 features) |
5994 | return NETDEV_TX_OK; | 6157 | { |
6158 | struct tg3 *tp = netdev_priv(dev); | ||
6159 | |||
6160 | if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) | ||
6161 | features &= ~NETIF_F_ALL_TSO; | ||
6162 | |||
6163 | return features; | ||
6164 | } | ||
6165 | |||
6166 | static int tg3_set_features(struct net_device *dev, u32 features) | ||
6167 | { | ||
6168 | u32 changed = dev->features ^ features; | ||
6169 | |||
6170 | if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) | ||
6171 | tg3_set_loopback(dev, features); | ||
6172 | |||
6173 | return 0; | ||
5995 | } | 6174 | } |
5996 | 6175 | ||
5997 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | 6176 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, |
@@ -6000,16 +6179,18 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | |||
6000 | dev->mtu = new_mtu; | 6179 | dev->mtu = new_mtu; |
6001 | 6180 | ||
6002 | if (new_mtu > ETH_DATA_LEN) { | 6181 | if (new_mtu > ETH_DATA_LEN) { |
6003 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { | 6182 | if (tg3_flag(tp, 5780_CLASS)) { |
6004 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | 6183 | netdev_update_features(dev); |
6005 | ethtool_op_set_tso(dev, 0); | 6184 | tg3_flag_clear(tp, TSO_CAPABLE); |
6006 | } else { | 6185 | } else { |
6007 | tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; | 6186 | tg3_flag_set(tp, JUMBO_RING_ENABLE); |
6008 | } | 6187 | } |
6009 | } else { | 6188 | } else { |
6010 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 6189 | if (tg3_flag(tp, 5780_CLASS)) { |
6011 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 6190 | tg3_flag_set(tp, TSO_CAPABLE); |
6012 | tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; | 6191 | netdev_update_features(dev); |
6192 | } | ||
6193 | tg3_flag_clear(tp, JUMBO_RING_ENABLE); | ||
6013 | } | 6194 | } |
6014 | } | 6195 | } |
6015 | 6196 | ||
@@ -6057,16 +6238,16 @@ static void tg3_rx_prodring_free(struct tg3 *tp, | |||
6057 | { | 6238 | { |
6058 | int i; | 6239 | int i; |
6059 | 6240 | ||
6060 | if (tpr != &tp->prodring[0]) { | 6241 | if (tpr != &tp->napi[0].prodring) { |
6061 | for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; | 6242 | for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; |
6062 | i = (i + 1) % TG3_RX_RING_SIZE) | 6243 | i = (i + 1) & tp->rx_std_ring_mask) |
6063 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], | 6244 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], |
6064 | tp->rx_pkt_map_sz); | 6245 | tp->rx_pkt_map_sz); |
6065 | 6246 | ||
6066 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 6247 | if (tg3_flag(tp, JUMBO_CAPABLE)) { |
6067 | for (i = tpr->rx_jmb_cons_idx; | 6248 | for (i = tpr->rx_jmb_cons_idx; |
6068 | i != tpr->rx_jmb_prod_idx; | 6249 | i != tpr->rx_jmb_prod_idx; |
6069 | i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { | 6250 | i = (i + 1) & tp->rx_jmb_ring_mask) { |
6070 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], | 6251 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], |
6071 | TG3_RX_JMB_MAP_SZ); | 6252 | TG3_RX_JMB_MAP_SZ); |
6072 | } | 6253 | } |
@@ -6075,12 +6256,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp, | |||
6075 | return; | 6256 | return; |
6076 | } | 6257 | } |
6077 | 6258 | ||
6078 | for (i = 0; i < TG3_RX_RING_SIZE; i++) | 6259 | for (i = 0; i <= tp->rx_std_ring_mask; i++) |
6079 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], | 6260 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], |
6080 | tp->rx_pkt_map_sz); | 6261 | tp->rx_pkt_map_sz); |
6081 | 6262 | ||
6082 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 6263 | if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { |
6083 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) | 6264 | for (i = 0; i <= tp->rx_jmb_ring_mask; i++) |
6084 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], | 6265 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], |
6085 | TG3_RX_JMB_MAP_SZ); | 6266 | TG3_RX_JMB_MAP_SZ); |
6086 | } | 6267 | } |
@@ -6103,19 +6284,20 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
6103 | tpr->rx_jmb_cons_idx = 0; | 6284 | tpr->rx_jmb_cons_idx = 0; |
6104 | tpr->rx_jmb_prod_idx = 0; | 6285 | tpr->rx_jmb_prod_idx = 0; |
6105 | 6286 | ||
6106 | if (tpr != &tp->prodring[0]) { | 6287 | if (tpr != &tp->napi[0].prodring) { |
6107 | memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); | 6288 | memset(&tpr->rx_std_buffers[0], 0, |
6108 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) | 6289 | TG3_RX_STD_BUFF_RING_SIZE(tp)); |
6290 | if (tpr->rx_jmb_buffers) | ||
6109 | memset(&tpr->rx_jmb_buffers[0], 0, | 6291 | memset(&tpr->rx_jmb_buffers[0], 0, |
6110 | TG3_RX_JMB_BUFF_RING_SIZE); | 6292 | TG3_RX_JMB_BUFF_RING_SIZE(tp)); |
6111 | goto done; | 6293 | goto done; |
6112 | } | 6294 | } |
6113 | 6295 | ||
6114 | /* Zero out all descriptors. */ | 6296 | /* Zero out all descriptors. */ |
6115 | memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); | 6297 | memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); |
6116 | 6298 | ||
6117 | rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; | 6299 | rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; |
6118 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && | 6300 | if (tg3_flag(tp, 5780_CLASS) && |
6119 | tp->dev->mtu > ETH_DATA_LEN) | 6301 | tp->dev->mtu > ETH_DATA_LEN) |
6120 | rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; | 6302 | rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; |
6121 | tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); | 6303 | tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); |
@@ -6124,7 +6306,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
6124 | * stuff once. This works because the card does not | 6306 | * stuff once. This works because the card does not |
6125 | * write into the rx buffer posting rings. | 6307 | * write into the rx buffer posting rings. |
6126 | */ | 6308 | */ |
6127 | for (i = 0; i < TG3_RX_RING_SIZE; i++) { | 6309 | for (i = 0; i <= tp->rx_std_ring_mask; i++) { |
6128 | struct tg3_rx_buffer_desc *rxd; | 6310 | struct tg3_rx_buffer_desc *rxd; |
6129 | 6311 | ||
6130 | rxd = &tpr->rx_std[i]; | 6312 | rxd = &tpr->rx_std[i]; |
@@ -6148,15 +6330,15 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
6148 | } | 6330 | } |
6149 | } | 6331 | } |
6150 | 6332 | ||
6151 | if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) | 6333 | if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) |
6152 | goto done; | 6334 | goto done; |
6153 | 6335 | ||
6154 | memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); | 6336 | memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); |
6155 | 6337 | ||
6156 | if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) | 6338 | if (!tg3_flag(tp, JUMBO_RING_ENABLE)) |
6157 | goto done; | 6339 | goto done; |
6158 | 6340 | ||
6159 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | 6341 | for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { |
6160 | struct tg3_rx_buffer_desc *rxd; | 6342 | struct tg3_rx_buffer_desc *rxd; |
6161 | 6343 | ||
6162 | rxd = &tpr->rx_jmb[i].std; | 6344 | rxd = &tpr->rx_jmb[i].std; |
@@ -6196,13 +6378,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, | |||
6196 | kfree(tpr->rx_jmb_buffers); | 6378 | kfree(tpr->rx_jmb_buffers); |
6197 | tpr->rx_jmb_buffers = NULL; | 6379 | tpr->rx_jmb_buffers = NULL; |
6198 | if (tpr->rx_std) { | 6380 | if (tpr->rx_std) { |
6199 | pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, | 6381 | dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), |
6200 | tpr->rx_std, tpr->rx_std_mapping); | 6382 | tpr->rx_std, tpr->rx_std_mapping); |
6201 | tpr->rx_std = NULL; | 6383 | tpr->rx_std = NULL; |
6202 | } | 6384 | } |
6203 | if (tpr->rx_jmb) { | 6385 | if (tpr->rx_jmb) { |
6204 | pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, | 6386 | dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), |
6205 | tpr->rx_jmb, tpr->rx_jmb_mapping); | 6387 | tpr->rx_jmb, tpr->rx_jmb_mapping); |
6206 | tpr->rx_jmb = NULL; | 6388 | tpr->rx_jmb = NULL; |
6207 | } | 6389 | } |
6208 | } | 6390 | } |
@@ -6210,24 +6392,28 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, | |||
6210 | static int tg3_rx_prodring_init(struct tg3 *tp, | 6392 | static int tg3_rx_prodring_init(struct tg3 *tp, |
6211 | struct tg3_rx_prodring_set *tpr) | 6393 | struct tg3_rx_prodring_set *tpr) |
6212 | { | 6394 | { |
6213 | tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); | 6395 | tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), |
6396 | GFP_KERNEL); | ||
6214 | if (!tpr->rx_std_buffers) | 6397 | if (!tpr->rx_std_buffers) |
6215 | return -ENOMEM; | 6398 | return -ENOMEM; |
6216 | 6399 | ||
6217 | tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, | 6400 | tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, |
6218 | &tpr->rx_std_mapping); | 6401 | TG3_RX_STD_RING_BYTES(tp), |
6402 | &tpr->rx_std_mapping, | ||
6403 | GFP_KERNEL); | ||
6219 | if (!tpr->rx_std) | 6404 | if (!tpr->rx_std) |
6220 | goto err_out; | 6405 | goto err_out; |
6221 | 6406 | ||
6222 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 6407 | if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { |
6223 | tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, | 6408 | tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), |
6224 | GFP_KERNEL); | 6409 | GFP_KERNEL); |
6225 | if (!tpr->rx_jmb_buffers) | 6410 | if (!tpr->rx_jmb_buffers) |
6226 | goto err_out; | 6411 | goto err_out; |
6227 | 6412 | ||
6228 | tpr->rx_jmb = pci_alloc_consistent(tp->pdev, | 6413 | tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, |
6229 | TG3_RX_JUMBO_RING_BYTES, | 6414 | TG3_RX_JMB_RING_BYTES(tp), |
6230 | &tpr->rx_jmb_mapping); | 6415 | &tpr->rx_jmb_mapping, |
6416 | GFP_KERNEL); | ||
6231 | if (!tpr->rx_jmb) | 6417 | if (!tpr->rx_jmb) |
6232 | goto err_out; | 6418 | goto err_out; |
6233 | } | 6419 | } |
@@ -6253,7 +6439,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
6253 | for (j = 0; j < tp->irq_cnt; j++) { | 6439 | for (j = 0; j < tp->irq_cnt; j++) { |
6254 | struct tg3_napi *tnapi = &tp->napi[j]; | 6440 | struct tg3_napi *tnapi = &tp->napi[j]; |
6255 | 6441 | ||
6256 | tg3_rx_prodring_free(tp, &tp->prodring[j]); | 6442 | tg3_rx_prodring_free(tp, &tnapi->prodring); |
6257 | 6443 | ||
6258 | if (!tnapi->tx_buffers) | 6444 | if (!tnapi->tx_buffers) |
6259 | continue; | 6445 | continue; |
@@ -6325,7 +6511,7 @@ static int tg3_init_rings(struct tg3 *tp) | |||
6325 | if (tnapi->rx_rcb) | 6511 | if (tnapi->rx_rcb) |
6326 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 6512 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
6327 | 6513 | ||
6328 | if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) { | 6514 | if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { |
6329 | tg3_free_rings(tp); | 6515 | tg3_free_rings(tp); |
6330 | return -ENOMEM; | 6516 | return -ENOMEM; |
6331 | } | 6517 | } |
@@ -6346,7 +6532,7 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
6346 | struct tg3_napi *tnapi = &tp->napi[i]; | 6532 | struct tg3_napi *tnapi = &tp->napi[i]; |
6347 | 6533 | ||
6348 | if (tnapi->tx_ring) { | 6534 | if (tnapi->tx_ring) { |
6349 | pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, | 6535 | dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, |
6350 | tnapi->tx_ring, tnapi->tx_desc_mapping); | 6536 | tnapi->tx_ring, tnapi->tx_desc_mapping); |
6351 | tnapi->tx_ring = NULL; | 6537 | tnapi->tx_ring = NULL; |
6352 | } | 6538 | } |
@@ -6355,28 +6541,28 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
6355 | tnapi->tx_buffers = NULL; | 6541 | tnapi->tx_buffers = NULL; |
6356 | 6542 | ||
6357 | if (tnapi->rx_rcb) { | 6543 | if (tnapi->rx_rcb) { |
6358 | pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), | 6544 | dma_free_coherent(&tp->pdev->dev, |
6359 | tnapi->rx_rcb, | 6545 | TG3_RX_RCB_RING_BYTES(tp), |
6360 | tnapi->rx_rcb_mapping); | 6546 | tnapi->rx_rcb, |
6547 | tnapi->rx_rcb_mapping); | ||
6361 | tnapi->rx_rcb = NULL; | 6548 | tnapi->rx_rcb = NULL; |
6362 | } | 6549 | } |
6363 | 6550 | ||
6551 | tg3_rx_prodring_fini(tp, &tnapi->prodring); | ||
6552 | |||
6364 | if (tnapi->hw_status) { | 6553 | if (tnapi->hw_status) { |
6365 | pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, | 6554 | dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, |
6366 | tnapi->hw_status, | 6555 | tnapi->hw_status, |
6367 | tnapi->status_mapping); | 6556 | tnapi->status_mapping); |
6368 | tnapi->hw_status = NULL; | 6557 | tnapi->hw_status = NULL; |
6369 | } | 6558 | } |
6370 | } | 6559 | } |
6371 | 6560 | ||
6372 | if (tp->hw_stats) { | 6561 | if (tp->hw_stats) { |
6373 | pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), | 6562 | dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), |
6374 | tp->hw_stats, tp->stats_mapping); | 6563 | tp->hw_stats, tp->stats_mapping); |
6375 | tp->hw_stats = NULL; | 6564 | tp->hw_stats = NULL; |
6376 | } | 6565 | } |
6377 | |||
6378 | for (i = 0; i < tp->irq_cnt; i++) | ||
6379 | tg3_rx_prodring_fini(tp, &tp->prodring[i]); | ||
6380 | } | 6566 | } |
6381 | 6567 | ||
6382 | /* | 6568 | /* |
@@ -6387,14 +6573,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
6387 | { | 6573 | { |
6388 | int i; | 6574 | int i; |
6389 | 6575 | ||
6390 | for (i = 0; i < tp->irq_cnt; i++) { | 6576 | tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, |
6391 | if (tg3_rx_prodring_init(tp, &tp->prodring[i])) | 6577 | sizeof(struct tg3_hw_stats), |
6392 | goto err_out; | 6578 | &tp->stats_mapping, |
6393 | } | 6579 | GFP_KERNEL); |
6394 | |||
6395 | tp->hw_stats = pci_alloc_consistent(tp->pdev, | ||
6396 | sizeof(struct tg3_hw_stats), | ||
6397 | &tp->stats_mapping); | ||
6398 | if (!tp->hw_stats) | 6580 | if (!tp->hw_stats) |
6399 | goto err_out; | 6581 | goto err_out; |
6400 | 6582 | ||
@@ -6404,29 +6586,34 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
6404 | struct tg3_napi *tnapi = &tp->napi[i]; | 6586 | struct tg3_napi *tnapi = &tp->napi[i]; |
6405 | struct tg3_hw_status *sblk; | 6587 | struct tg3_hw_status *sblk; |
6406 | 6588 | ||
6407 | tnapi->hw_status = pci_alloc_consistent(tp->pdev, | 6589 | tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, |
6408 | TG3_HW_STATUS_SIZE, | 6590 | TG3_HW_STATUS_SIZE, |
6409 | &tnapi->status_mapping); | 6591 | &tnapi->status_mapping, |
6592 | GFP_KERNEL); | ||
6410 | if (!tnapi->hw_status) | 6593 | if (!tnapi->hw_status) |
6411 | goto err_out; | 6594 | goto err_out; |
6412 | 6595 | ||
6413 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 6596 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); |
6414 | sblk = tnapi->hw_status; | 6597 | sblk = tnapi->hw_status; |
6415 | 6598 | ||
6599 | if (tg3_rx_prodring_init(tp, &tnapi->prodring)) | ||
6600 | goto err_out; | ||
6601 | |||
6416 | /* If multivector TSS is enabled, vector 0 does not handle | 6602 | /* If multivector TSS is enabled, vector 0 does not handle |
6417 | * tx interrupts. Don't allocate any resources for it. | 6603 | * tx interrupts. Don't allocate any resources for it. |
6418 | */ | 6604 | */ |
6419 | if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || | 6605 | if ((!i && !tg3_flag(tp, ENABLE_TSS)) || |
6420 | (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { | 6606 | (i && tg3_flag(tp, ENABLE_TSS))) { |
6421 | tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * | 6607 | tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * |
6422 | TG3_TX_RING_SIZE, | 6608 | TG3_TX_RING_SIZE, |
6423 | GFP_KERNEL); | 6609 | GFP_KERNEL); |
6424 | if (!tnapi->tx_buffers) | 6610 | if (!tnapi->tx_buffers) |
6425 | goto err_out; | 6611 | goto err_out; |
6426 | 6612 | ||
6427 | tnapi->tx_ring = pci_alloc_consistent(tp->pdev, | 6613 | tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, |
6428 | TG3_TX_RING_BYTES, | 6614 | TG3_TX_RING_BYTES, |
6429 | &tnapi->tx_desc_mapping); | 6615 | &tnapi->tx_desc_mapping, |
6616 | GFP_KERNEL); | ||
6430 | if (!tnapi->tx_ring) | 6617 | if (!tnapi->tx_ring) |
6431 | goto err_out; | 6618 | goto err_out; |
6432 | } | 6619 | } |
@@ -6452,18 +6639,17 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
6452 | break; | 6639 | break; |
6453 | } | 6640 | } |
6454 | 6641 | ||
6455 | tnapi->prodring = &tp->prodring[i]; | ||
6456 | |||
6457 | /* | 6642 | /* |
6458 | * If multivector RSS is enabled, vector 0 does not handle | 6643 | * If multivector RSS is enabled, vector 0 does not handle |
6459 | * rx or tx interrupts. Don't allocate any resources for it. | 6644 | * rx or tx interrupts. Don't allocate any resources for it. |
6460 | */ | 6645 | */ |
6461 | if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) | 6646 | if (!i && tg3_flag(tp, ENABLE_RSS)) |
6462 | continue; | 6647 | continue; |
6463 | 6648 | ||
6464 | tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, | 6649 | tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, |
6465 | TG3_RX_RCB_RING_BYTES(tp), | 6650 | TG3_RX_RCB_RING_BYTES(tp), |
6466 | &tnapi->rx_rcb_mapping); | 6651 | &tnapi->rx_rcb_mapping, |
6652 | GFP_KERNEL); | ||
6467 | if (!tnapi->rx_rcb) | 6653 | if (!tnapi->rx_rcb) |
6468 | goto err_out; | 6654 | goto err_out; |
6469 | 6655 | ||
@@ -6487,7 +6673,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int | |||
6487 | unsigned int i; | 6673 | unsigned int i; |
6488 | u32 val; | 6674 | u32 val; |
6489 | 6675 | ||
6490 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 6676 | if (tg3_flag(tp, 5705_PLUS)) { |
6491 | switch (ofs) { | 6677 | switch (ofs) { |
6492 | case RCVLSC_MODE: | 6678 | case RCVLSC_MODE: |
6493 | case DMAC_MODE: | 6679 | case DMAC_MODE: |
@@ -6596,6 +6782,10 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event) | |||
6596 | int i; | 6782 | int i; |
6597 | u32 apedata; | 6783 | u32 apedata; |
6598 | 6784 | ||
6785 | /* NCSI does not support APE events */ | ||
6786 | if (tg3_flag(tp, APE_HAS_NCSI)) | ||
6787 | return; | ||
6788 | |||
6599 | apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); | 6789 | apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); |
6600 | if (apedata != APE_SEG_SIG_MAGIC) | 6790 | if (apedata != APE_SEG_SIG_MAGIC) |
6601 | return; | 6791 | return; |
@@ -6632,7 +6822,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
6632 | u32 event; | 6822 | u32 event; |
6633 | u32 apedata; | 6823 | u32 apedata; |
6634 | 6824 | ||
6635 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) | 6825 | if (!tg3_flag(tp, ENABLE_APE)) |
6636 | return; | 6826 | return; |
6637 | 6827 | ||
6638 | switch (kind) { | 6828 | switch (kind) { |
@@ -6647,6 +6837,8 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
6647 | APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); | 6837 | APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); |
6648 | tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, | 6838 | tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, |
6649 | APE_HOST_BEHAV_NO_PHYLOCK); | 6839 | APE_HOST_BEHAV_NO_PHYLOCK); |
6840 | tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, | ||
6841 | TG3_APE_HOST_DRVR_STATE_START); | ||
6650 | 6842 | ||
6651 | event = APE_EVENT_STATUS_STATE_START; | 6843 | event = APE_EVENT_STATUS_STATE_START; |
6652 | break; | 6844 | break; |
@@ -6658,6 +6850,16 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
6658 | */ | 6850 | */ |
6659 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); | 6851 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); |
6660 | 6852 | ||
6853 | if (device_may_wakeup(&tp->pdev->dev) && | ||
6854 | tg3_flag(tp, WOL_ENABLE)) { | ||
6855 | tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, | ||
6856 | TG3_APE_HOST_WOL_SPEED_AUTO); | ||
6857 | apedata = TG3_APE_HOST_DRVR_STATE_WOL; | ||
6858 | } else | ||
6859 | apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; | ||
6860 | |||
6861 | tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); | ||
6862 | |||
6661 | event = APE_EVENT_STATUS_STATE_UNLOAD; | 6863 | event = APE_EVENT_STATUS_STATE_UNLOAD; |
6662 | break; | 6864 | break; |
6663 | case RESET_KIND_SUSPEND: | 6865 | case RESET_KIND_SUSPEND: |
@@ -6678,7 +6880,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) | |||
6678 | tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, | 6880 | tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, |
6679 | NIC_SRAM_FIRMWARE_MBOX_MAGIC1); | 6881 | NIC_SRAM_FIRMWARE_MBOX_MAGIC1); |
6680 | 6882 | ||
6681 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { | 6883 | if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { |
6682 | switch (kind) { | 6884 | switch (kind) { |
6683 | case RESET_KIND_INIT: | 6885 | case RESET_KIND_INIT: |
6684 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 6886 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
@@ -6708,7 +6910,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) | |||
6708 | /* tp->lock is held. */ | 6910 | /* tp->lock is held. */ |
6709 | static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) | 6911 | static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) |
6710 | { | 6912 | { |
6711 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { | 6913 | if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { |
6712 | switch (kind) { | 6914 | switch (kind) { |
6713 | case RESET_KIND_INIT: | 6915 | case RESET_KIND_INIT: |
6714 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 6916 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
@@ -6732,7 +6934,7 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) | |||
6732 | /* tp->lock is held. */ | 6934 | /* tp->lock is held. */ |
6733 | static void tg3_write_sig_legacy(struct tg3 *tp, int kind) | 6935 | static void tg3_write_sig_legacy(struct tg3 *tp, int kind) |
6734 | { | 6936 | { |
6735 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 6937 | if (tg3_flag(tp, ENABLE_ASF)) { |
6736 | switch (kind) { | 6938 | switch (kind) { |
6737 | case RESET_KIND_INIT: | 6939 | case RESET_KIND_INIT: |
6738 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 6940 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
@@ -6783,9 +6985,8 @@ static int tg3_poll_fw(struct tg3 *tp) | |||
6783 | * of the above loop as an error, but do report the lack of | 6985 | * of the above loop as an error, but do report the lack of |
6784 | * running firmware once. | 6986 | * running firmware once. |
6785 | */ | 6987 | */ |
6786 | if (i >= 100000 && | 6988 | if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { |
6787 | !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { | 6989 | tg3_flag_set(tp, NO_FWARE_REPORTED); |
6788 | tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; | ||
6789 | 6990 | ||
6790 | netdev_info(tp->dev, "No firmware running\n"); | 6991 | netdev_info(tp->dev, "No firmware running\n"); |
6791 | } | 6992 | } |
@@ -6818,10 +7019,10 @@ static void tg3_restore_pci_state(struct tg3 *tp) | |||
6818 | /* Set MAX PCI retry to zero. */ | 7019 | /* Set MAX PCI retry to zero. */ |
6819 | val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); | 7020 | val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); |
6820 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && | 7021 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && |
6821 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) | 7022 | tg3_flag(tp, PCIX_MODE)) |
6822 | val |= PCISTATE_RETRY_SAME_DMA; | 7023 | val |= PCISTATE_RETRY_SAME_DMA; |
6823 | /* Allow reads and writes to the APE register and memory space. */ | 7024 | /* Allow reads and writes to the APE register and memory space. */ |
6824 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 7025 | if (tg3_flag(tp, ENABLE_APE)) |
6825 | val |= PCISTATE_ALLOW_APE_CTLSPC_WR | | 7026 | val |= PCISTATE_ALLOW_APE_CTLSPC_WR | |
6826 | PCISTATE_ALLOW_APE_SHMEM_WR | | 7027 | PCISTATE_ALLOW_APE_SHMEM_WR | |
6827 | PCISTATE_ALLOW_APE_PSPACE_WR; | 7028 | PCISTATE_ALLOW_APE_PSPACE_WR; |
@@ -6830,8 +7031,8 @@ static void tg3_restore_pci_state(struct tg3 *tp) | |||
6830 | pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); | 7031 | pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); |
6831 | 7032 | ||
6832 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { | 7033 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { |
6833 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) | 7034 | if (tg3_flag(tp, PCI_EXPRESS)) |
6834 | pcie_set_readrq(tp->pdev, 4096); | 7035 | pcie_set_readrq(tp->pdev, tp->pcie_readrq); |
6835 | else { | 7036 | else { |
6836 | pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, | 7037 | pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, |
6837 | tp->pci_cacheline_sz); | 7038 | tp->pci_cacheline_sz); |
@@ -6841,7 +7042,7 @@ static void tg3_restore_pci_state(struct tg3 *tp) | |||
6841 | } | 7042 | } |
6842 | 7043 | ||
6843 | /* Make sure PCI-X relaxed ordering bit is clear. */ | 7044 | /* Make sure PCI-X relaxed ordering bit is clear. */ |
6844 | if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 7045 | if (tg3_flag(tp, PCIX_MODE)) { |
6845 | u16 pcix_cmd; | 7046 | u16 pcix_cmd; |
6846 | 7047 | ||
6847 | pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, | 7048 | pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, |
@@ -6851,12 +7052,12 @@ static void tg3_restore_pci_state(struct tg3 *tp) | |||
6851 | pcix_cmd); | 7052 | pcix_cmd); |
6852 | } | 7053 | } |
6853 | 7054 | ||
6854 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { | 7055 | if (tg3_flag(tp, 5780_CLASS)) { |
6855 | 7056 | ||
6856 | /* Chip reset on 5780 will reset MSI enable bit, | 7057 | /* Chip reset on 5780 will reset MSI enable bit, |
6857 | * so need to restore it. | 7058 | * so need to restore it. |
6858 | */ | 7059 | */ |
6859 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7060 | if (tg3_flag(tp, USING_MSI)) { |
6860 | u16 ctrl; | 7061 | u16 ctrl; |
6861 | 7062 | ||
6862 | pci_read_config_word(tp->pdev, | 7063 | pci_read_config_word(tp->pdev, |
@@ -6896,7 +7097,7 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
6896 | tg3_save_pci_state(tp); | 7097 | tg3_save_pci_state(tp); |
6897 | 7098 | ||
6898 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 7099 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || |
6899 | (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) | 7100 | tg3_flag(tp, 5755_PLUS)) |
6900 | tw32(GRC_FASTBOOT_PC, 0); | 7101 | tw32(GRC_FASTBOOT_PC, 0); |
6901 | 7102 | ||
6902 | /* | 7103 | /* |
@@ -6915,7 +7116,7 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
6915 | * at this time, but the irq handler may still be called due to irq | 7116 | * at this time, but the irq handler may still be called due to irq |
6916 | * sharing or irqpoll. | 7117 | * sharing or irqpoll. |
6917 | */ | 7118 | */ |
6918 | tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; | 7119 | tg3_flag_set(tp, CHIP_RESETTING); |
6919 | for (i = 0; i < tp->irq_cnt; i++) { | 7120 | for (i = 0; i < tp->irq_cnt; i++) { |
6920 | struct tg3_napi *tnapi = &tp->napi[i]; | 7121 | struct tg3_napi *tnapi = &tp->napi[i]; |
6921 | if (tnapi->hw_status) { | 7122 | if (tnapi->hw_status) { |
@@ -6938,10 +7139,10 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
6938 | /* do the reset */ | 7139 | /* do the reset */ |
6939 | val = GRC_MISC_CFG_CORECLK_RESET; | 7140 | val = GRC_MISC_CFG_CORECLK_RESET; |
6940 | 7141 | ||
6941 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 7142 | if (tg3_flag(tp, PCI_EXPRESS)) { |
6942 | /* Force PCIe 1.0a mode */ | 7143 | /* Force PCIe 1.0a mode */ |
6943 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 7144 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
6944 | !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && | 7145 | !tg3_flag(tp, 57765_PLUS) && |
6945 | tr32(TG3_PCIE_PHY_TSTCTL) == | 7146 | tr32(TG3_PCIE_PHY_TSTCTL) == |
6946 | (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) | 7147 | (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) |
6947 | tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); | 7148 | tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); |
@@ -6959,8 +7160,7 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
6959 | } | 7160 | } |
6960 | 7161 | ||
6961 | /* Manage gphy power for all CPMU absent PCIe devices. */ | 7162 | /* Manage gphy power for all CPMU absent PCIe devices. */ |
6962 | if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 7163 | if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) |
6963 | !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) | ||
6964 | val |= GRC_MISC_CFG_KEEP_GPHY_POWER; | 7164 | val |= GRC_MISC_CFG_KEEP_GPHY_POWER; |
6965 | 7165 | ||
6966 | tw32(GRC_MISC_CFG, val); | 7166 | tw32(GRC_MISC_CFG, val); |
@@ -6993,7 +7193,7 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
6993 | 7193 | ||
6994 | udelay(120); | 7194 | udelay(120); |
6995 | 7195 | ||
6996 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { | 7196 | if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) { |
6997 | u16 val16; | 7197 | u16 val16; |
6998 | 7198 | ||
6999 | if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { | 7199 | if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { |
@@ -7019,13 +7219,13 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
7019 | * Older PCIe devices only support the 128 byte | 7219 | * Older PCIe devices only support the 128 byte |
7020 | * MPS setting. Enforce the restriction. | 7220 | * MPS setting. Enforce the restriction. |
7021 | */ | 7221 | */ |
7022 | if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) | 7222 | if (!tg3_flag(tp, CPMU_PRESENT)) |
7023 | val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; | 7223 | val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; |
7024 | pci_write_config_word(tp->pdev, | 7224 | pci_write_config_word(tp->pdev, |
7025 | tp->pcie_cap + PCI_EXP_DEVCTL, | 7225 | tp->pcie_cap + PCI_EXP_DEVCTL, |
7026 | val16); | 7226 | val16); |
7027 | 7227 | ||
7028 | pcie_set_readrq(tp->pdev, 4096); | 7228 | pcie_set_readrq(tp->pdev, tp->pcie_readrq); |
7029 | 7229 | ||
7030 | /* Clear error status */ | 7230 | /* Clear error status */ |
7031 | pci_write_config_word(tp->pdev, | 7231 | pci_write_config_word(tp->pdev, |
@@ -7038,10 +7238,11 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
7038 | 7238 | ||
7039 | tg3_restore_pci_state(tp); | 7239 | tg3_restore_pci_state(tp); |
7040 | 7240 | ||
7041 | tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; | 7241 | tg3_flag_clear(tp, CHIP_RESETTING); |
7242 | tg3_flag_clear(tp, ERROR_PROCESSED); | ||
7042 | 7243 | ||
7043 | val = 0; | 7244 | val = 0; |
7044 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 7245 | if (tg3_flag(tp, 5780_CLASS)) |
7045 | val = tr32(MEMARB_MODE); | 7246 | val = tr32(MEMARB_MODE); |
7046 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | 7247 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); |
7047 | 7248 | ||
@@ -7066,19 +7267,21 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
7066 | tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); | 7267 | tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); |
7067 | } | 7268 | } |
7068 | 7269 | ||
7270 | if (tg3_flag(tp, ENABLE_APE)) | ||
7271 | tp->mac_mode = MAC_MODE_APE_TX_EN | | ||
7272 | MAC_MODE_APE_RX_EN | | ||
7273 | MAC_MODE_TDE_ENABLE; | ||
7274 | |||
7069 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { | 7275 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { |
7070 | tp->mac_mode = MAC_MODE_PORT_MODE_TBI; | 7276 | tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; |
7071 | tw32_f(MAC_MODE, tp->mac_mode); | 7277 | val = tp->mac_mode; |
7072 | } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { | 7278 | } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { |
7073 | tp->mac_mode = MAC_MODE_PORT_MODE_GMII; | 7279 | tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; |
7074 | tw32_f(MAC_MODE, tp->mac_mode); | 7280 | val = tp->mac_mode; |
7075 | } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | ||
7076 | tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); | ||
7077 | if (tp->mac_mode & MAC_MODE_APE_TX_EN) | ||
7078 | tp->mac_mode |= MAC_MODE_TDE_ENABLE; | ||
7079 | tw32_f(MAC_MODE, tp->mac_mode); | ||
7080 | } else | 7281 | } else |
7081 | tw32_f(MAC_MODE, 0); | 7282 | val = 0; |
7283 | |||
7284 | tw32_f(MAC_MODE, val); | ||
7082 | udelay(40); | 7285 | udelay(40); |
7083 | 7286 | ||
7084 | tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); | 7287 | tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); |
@@ -7089,28 +7292,33 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
7089 | 7292 | ||
7090 | tg3_mdio_start(tp); | 7293 | tg3_mdio_start(tp); |
7091 | 7294 | ||
7092 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 7295 | if (tg3_flag(tp, PCI_EXPRESS) && |
7093 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && | 7296 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && |
7094 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 7297 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
7095 | !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { | 7298 | !tg3_flag(tp, 57765_PLUS)) { |
7096 | val = tr32(0x7c00); | 7299 | val = tr32(0x7c00); |
7097 | 7300 | ||
7098 | tw32(0x7c00, val | (1 << 25)); | 7301 | tw32(0x7c00, val | (1 << 25)); |
7099 | } | 7302 | } |
7100 | 7303 | ||
7304 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { | ||
7305 | val = tr32(TG3_CPMU_CLCK_ORIDE); | ||
7306 | tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); | ||
7307 | } | ||
7308 | |||
7101 | /* Reprobe ASF enable state. */ | 7309 | /* Reprobe ASF enable state. */ |
7102 | tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; | 7310 | tg3_flag_clear(tp, ENABLE_ASF); |
7103 | tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; | 7311 | tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); |
7104 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); | 7312 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); |
7105 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { | 7313 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { |
7106 | u32 nic_cfg; | 7314 | u32 nic_cfg; |
7107 | 7315 | ||
7108 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); | 7316 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); |
7109 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 7317 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { |
7110 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 7318 | tg3_flag_set(tp, ENABLE_ASF); |
7111 | tp->last_event_jiffies = jiffies; | 7319 | tp->last_event_jiffies = jiffies; |
7112 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 7320 | if (tg3_flag(tp, 5750_PLUS)) |
7113 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; | 7321 | tg3_flag_set(tp, ASF_NEW_HANDSHAKE); |
7114 | } | 7322 | } |
7115 | } | 7323 | } |
7116 | 7324 | ||
@@ -7120,8 +7328,7 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
7120 | /* tp->lock is held. */ | 7328 | /* tp->lock is held. */ |
7121 | static void tg3_stop_fw(struct tg3 *tp) | 7329 | static void tg3_stop_fw(struct tg3 *tp) |
7122 | { | 7330 | { |
7123 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | 7331 | if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { |
7124 | !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | ||
7125 | /* Wait for RX cpu to ACK the previous event. */ | 7332 | /* Wait for RX cpu to ACK the previous event. */ |
7126 | tg3_wait_for_event_ack(tp); | 7333 | tg3_wait_for_event_ack(tp); |
7127 | 7334 | ||
@@ -7167,8 +7374,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) | |||
7167 | { | 7374 | { |
7168 | int i; | 7375 | int i; |
7169 | 7376 | ||
7170 | BUG_ON(offset == TX_CPU_BASE && | 7377 | BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); |
7171 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); | ||
7172 | 7378 | ||
7173 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 7379 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
7174 | u32 val = tr32(GRC_VCPU_EXT_CTRL); | 7380 | u32 val = tr32(GRC_VCPU_EXT_CTRL); |
@@ -7203,7 +7409,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) | |||
7203 | } | 7409 | } |
7204 | 7410 | ||
7205 | /* Clear firmware's nvram arbitration. */ | 7411 | /* Clear firmware's nvram arbitration. */ |
7206 | if (tp->tg3_flags & TG3_FLAG_NVRAM) | 7412 | if (tg3_flag(tp, NVRAM)) |
7207 | tw32(NVRAM_SWARB, SWARB_REQ_CLR0); | 7413 | tw32(NVRAM_SWARB, SWARB_REQ_CLR0); |
7208 | return 0; | 7414 | return 0; |
7209 | } | 7415 | } |
@@ -7221,15 +7427,14 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b | |||
7221 | int err, lock_err, i; | 7427 | int err, lock_err, i; |
7222 | void (*write_op)(struct tg3 *, u32, u32); | 7428 | void (*write_op)(struct tg3 *, u32, u32); |
7223 | 7429 | ||
7224 | if (cpu_base == TX_CPU_BASE && | 7430 | if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { |
7225 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | ||
7226 | netdev_err(tp->dev, | 7431 | netdev_err(tp->dev, |
7227 | "%s: Trying to load TX cpu firmware which is 5705\n", | 7432 | "%s: Trying to load TX cpu firmware which is 5705\n", |
7228 | __func__); | 7433 | __func__); |
7229 | return -EINVAL; | 7434 | return -EINVAL; |
7230 | } | 7435 | } |
7231 | 7436 | ||
7232 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 7437 | if (tg3_flag(tp, 5705_PLUS)) |
7233 | write_op = tg3_write_mem; | 7438 | write_op = tg3_write_mem; |
7234 | else | 7439 | else |
7235 | write_op = tg3_write_indirect_reg32; | 7440 | write_op = tg3_write_indirect_reg32; |
@@ -7315,8 +7520,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) | |||
7315 | return 0; | 7520 | return 0; |
7316 | } | 7521 | } |
7317 | 7522 | ||
7318 | /* 5705 needs a special version of the TSO firmware. */ | ||
7319 | |||
7320 | /* tp->lock is held. */ | 7523 | /* tp->lock is held. */ |
7321 | static int tg3_load_tso_firmware(struct tg3 *tp) | 7524 | static int tg3_load_tso_firmware(struct tg3 *tp) |
7322 | { | 7525 | { |
@@ -7325,7 +7528,9 @@ static int tg3_load_tso_firmware(struct tg3 *tp) | |||
7325 | unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; | 7528 | unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; |
7326 | int err, i; | 7529 | int err, i; |
7327 | 7530 | ||
7328 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 7531 | if (tg3_flag(tp, HW_TSO_1) || |
7532 | tg3_flag(tp, HW_TSO_2) || | ||
7533 | tg3_flag(tp, HW_TSO_3)) | ||
7329 | return 0; | 7534 | return 0; |
7330 | 7535 | ||
7331 | fw_data = (void *)tp->fw->data; | 7536 | fw_data = (void *)tp->fw->data; |
@@ -7394,7 +7599,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
7394 | if (!netif_running(dev)) | 7599 | if (!netif_running(dev)) |
7395 | return 0; | 7600 | return 0; |
7396 | 7601 | ||
7397 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 7602 | if (tg3_flag(tp, ENABLE_ASF)) { |
7398 | u32 addr0_high, addr0_low, addr1_high, addr1_low; | 7603 | u32 addr0_high, addr0_low, addr1_high, addr1_low; |
7399 | 7604 | ||
7400 | addr0_high = tr32(MAC_ADDR_0_HIGH); | 7605 | addr0_high = tr32(MAC_ADDR_0_HIGH); |
@@ -7429,7 +7634,7 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, | |||
7429 | (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), | 7634 | (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), |
7430 | maxlen_flags); | 7635 | maxlen_flags); |
7431 | 7636 | ||
7432 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7637 | if (!tg3_flag(tp, 5705_PLUS)) |
7433 | tg3_write_mem(tp, | 7638 | tg3_write_mem(tp, |
7434 | (bdinfo_addr + TG3_BDINFO_NIC_ADDR), | 7639 | (bdinfo_addr + TG3_BDINFO_NIC_ADDR), |
7435 | nic_addr); | 7640 | nic_addr); |
@@ -7440,7 +7645,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
7440 | { | 7645 | { |
7441 | int i; | 7646 | int i; |
7442 | 7647 | ||
7443 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { | 7648 | if (!tg3_flag(tp, ENABLE_TSS)) { |
7444 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); | 7649 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); |
7445 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); | 7650 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); |
7446 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); | 7651 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); |
@@ -7450,7 +7655,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
7450 | tw32(HOSTCC_TXCOAL_MAXF_INT, 0); | 7655 | tw32(HOSTCC_TXCOAL_MAXF_INT, 0); |
7451 | } | 7656 | } |
7452 | 7657 | ||
7453 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { | 7658 | if (!tg3_flag(tp, ENABLE_RSS)) { |
7454 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | 7659 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); |
7455 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | 7660 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); |
7456 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | 7661 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); |
@@ -7460,7 +7665,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
7460 | tw32(HOSTCC_RXCOAL_MAXF_INT, 0); | 7665 | tw32(HOSTCC_RXCOAL_MAXF_INT, 0); |
7461 | } | 7666 | } |
7462 | 7667 | ||
7463 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 7668 | if (!tg3_flag(tp, 5705_PLUS)) { |
7464 | u32 val = ec->stats_block_coalesce_usecs; | 7669 | u32 val = ec->stats_block_coalesce_usecs; |
7465 | 7670 | ||
7466 | tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); | 7671 | tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); |
@@ -7482,7 +7687,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
7482 | reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; | 7687 | reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; |
7483 | tw32(reg, ec->rx_max_coalesced_frames_irq); | 7688 | tw32(reg, ec->rx_max_coalesced_frames_irq); |
7484 | 7689 | ||
7485 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { | 7690 | if (tg3_flag(tp, ENABLE_TSS)) { |
7486 | reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; | 7691 | reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; |
7487 | tw32(reg, ec->tx_coalesce_usecs); | 7692 | tw32(reg, ec->tx_coalesce_usecs); |
7488 | reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; | 7693 | reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; |
@@ -7497,7 +7702,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
7497 | tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); | 7702 | tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); |
7498 | tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 7703 | tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); |
7499 | 7704 | ||
7500 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { | 7705 | if (tg3_flag(tp, ENABLE_TSS)) { |
7501 | tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); | 7706 | tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); |
7502 | tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); | 7707 | tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); |
7503 | tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 7708 | tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); |
@@ -7513,8 +7718,10 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7513 | struct tg3_napi *tnapi = &tp->napi[0]; | 7718 | struct tg3_napi *tnapi = &tp->napi[0]; |
7514 | 7719 | ||
7515 | /* Disable all transmit rings but the first. */ | 7720 | /* Disable all transmit rings but the first. */ |
7516 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7721 | if (!tg3_flag(tp, 5705_PLUS)) |
7517 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; | 7722 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; |
7723 | else if (tg3_flag(tp, 5717_PLUS)) | ||
7724 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; | ||
7518 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 7725 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) |
7519 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; | 7726 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; |
7520 | else | 7727 | else |
@@ -7527,10 +7734,9 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7527 | 7734 | ||
7528 | 7735 | ||
7529 | /* Disable all receive return rings but the first. */ | 7736 | /* Disable all receive return rings but the first. */ |
7530 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 7737 | if (tg3_flag(tp, 5717_PLUS)) |
7531 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | ||
7532 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; | 7738 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; |
7533 | else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7739 | else if (!tg3_flag(tp, 5705_PLUS)) |
7534 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; | 7740 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; |
7535 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 7741 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
7536 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 7742 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) |
@@ -7547,16 +7753,16 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7547 | tw32_mailbox_f(tp->napi[0].int_mbox, 1); | 7753 | tw32_mailbox_f(tp->napi[0].int_mbox, 1); |
7548 | 7754 | ||
7549 | /* Zero mailbox registers. */ | 7755 | /* Zero mailbox registers. */ |
7550 | if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { | 7756 | if (tg3_flag(tp, SUPPORT_MSIX)) { |
7551 | for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { | 7757 | for (i = 1; i < tp->irq_max; i++) { |
7552 | tp->napi[i].tx_prod = 0; | 7758 | tp->napi[i].tx_prod = 0; |
7553 | tp->napi[i].tx_cons = 0; | 7759 | tp->napi[i].tx_cons = 0; |
7554 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 7760 | if (tg3_flag(tp, ENABLE_TSS)) |
7555 | tw32_mailbox(tp->napi[i].prodmbox, 0); | 7761 | tw32_mailbox(tp->napi[i].prodmbox, 0); |
7556 | tw32_rx_mbox(tp->napi[i].consmbox, 0); | 7762 | tw32_rx_mbox(tp->napi[i].consmbox, 0); |
7557 | tw32_mailbox_f(tp->napi[i].int_mbox, 1); | 7763 | tw32_mailbox_f(tp->napi[i].int_mbox, 1); |
7558 | } | 7764 | } |
7559 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) | 7765 | if (!tg3_flag(tp, ENABLE_TSS)) |
7560 | tw32_mailbox(tp->napi[0].prodmbox, 0); | 7766 | tw32_mailbox(tp->napi[0].prodmbox, 0); |
7561 | } else { | 7767 | } else { |
7562 | tp->napi[0].tx_prod = 0; | 7768 | tp->napi[0].tx_prod = 0; |
@@ -7566,7 +7772,7 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7566 | } | 7772 | } |
7567 | 7773 | ||
7568 | /* Make sure the NIC-based send BD rings are disabled. */ | 7774 | /* Make sure the NIC-based send BD rings are disabled. */ |
7569 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 7775 | if (!tg3_flag(tp, 5705_PLUS)) { |
7570 | u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; | 7776 | u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; |
7571 | for (i = 0; i < 16; i++) | 7777 | for (i = 0; i < 16; i++) |
7572 | tw32_tx_mbox(mbox + i * 8, 0); | 7778 | tw32_tx_mbox(mbox + i * 8, 0); |
@@ -7594,8 +7800,8 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7594 | 7800 | ||
7595 | if (tnapi->rx_rcb) { | 7801 | if (tnapi->rx_rcb) { |
7596 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, | 7802 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, |
7597 | (TG3_RX_RCB_RING_SIZE(tp) << | 7803 | (tp->rx_ret_ring_mask + 1) << |
7598 | BDINFO_FLAGS_MAXLEN_SHIFT), 0); | 7804 | BDINFO_FLAGS_MAXLEN_SHIFT, 0); |
7599 | rxrcb += TG3_BDINFO_SIZE; | 7805 | rxrcb += TG3_BDINFO_SIZE; |
7600 | } | 7806 | } |
7601 | 7807 | ||
@@ -7618,7 +7824,7 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7618 | } | 7824 | } |
7619 | 7825 | ||
7620 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, | 7826 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, |
7621 | (TG3_RX_RCB_RING_SIZE(tp) << | 7827 | ((tp->rx_ret_ring_mask + 1) << |
7622 | BDINFO_FLAGS_MAXLEN_SHIFT), 0); | 7828 | BDINFO_FLAGS_MAXLEN_SHIFT), 0); |
7623 | 7829 | ||
7624 | stblk += 8; | 7830 | stblk += 8; |
@@ -7626,12 +7832,53 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7626 | } | 7832 | } |
7627 | } | 7833 | } |
7628 | 7834 | ||
7835 | static void tg3_setup_rxbd_thresholds(struct tg3 *tp) | ||
7836 | { | ||
7837 | u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; | ||
7838 | |||
7839 | if (!tg3_flag(tp, 5750_PLUS) || | ||
7840 | tg3_flag(tp, 5780_CLASS) || | ||
7841 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | ||
7842 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | ||
7843 | bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; | ||
7844 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | ||
7845 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | ||
7846 | bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; | ||
7847 | else | ||
7848 | bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; | ||
7849 | |||
7850 | nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); | ||
7851 | host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); | ||
7852 | |||
7853 | val = min(nic_rep_thresh, host_rep_thresh); | ||
7854 | tw32(RCVBDI_STD_THRESH, val); | ||
7855 | |||
7856 | if (tg3_flag(tp, 57765_PLUS)) | ||
7857 | tw32(STD_REPLENISH_LWM, bdcache_maxcnt); | ||
7858 | |||
7859 | if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) | ||
7860 | return; | ||
7861 | |||
7862 | if (!tg3_flag(tp, 5705_PLUS)) | ||
7863 | bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; | ||
7864 | else | ||
7865 | bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; | ||
7866 | |||
7867 | host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); | ||
7868 | |||
7869 | val = min(bdcache_maxcnt / 2, host_rep_thresh); | ||
7870 | tw32(RCVBDI_JUMBO_THRESH, val); | ||
7871 | |||
7872 | if (tg3_flag(tp, 57765_PLUS)) | ||
7873 | tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); | ||
7874 | } | ||
7875 | |||
7629 | /* tp->lock is held. */ | 7876 | /* tp->lock is held. */ |
7630 | static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | 7877 | static int tg3_reset_hw(struct tg3 *tp, int reset_phy) |
7631 | { | 7878 | { |
7632 | u32 val, rdmac_mode; | 7879 | u32 val, rdmac_mode; |
7633 | int i, err, limit; | 7880 | int i, err, limit; |
7634 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 7881 | struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; |
7635 | 7882 | ||
7636 | tg3_disable_ints(tp); | 7883 | tg3_disable_ints(tp); |
7637 | 7884 | ||
@@ -7639,9 +7886,40 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7639 | 7886 | ||
7640 | tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); | 7887 | tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); |
7641 | 7888 | ||
7642 | if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) | 7889 | if (tg3_flag(tp, INIT_COMPLETE)) |
7643 | tg3_abort_hw(tp, 1); | 7890 | tg3_abort_hw(tp, 1); |
7644 | 7891 | ||
7892 | /* Enable MAC control of LPI */ | ||
7893 | if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { | ||
7894 | tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, | ||
7895 | TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | | ||
7896 | TG3_CPMU_EEE_LNKIDL_UART_IDL); | ||
7897 | |||
7898 | tw32_f(TG3_CPMU_EEE_CTRL, | ||
7899 | TG3_CPMU_EEE_CTRL_EXIT_20_1_US); | ||
7900 | |||
7901 | val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | | ||
7902 | TG3_CPMU_EEEMD_LPI_IN_TX | | ||
7903 | TG3_CPMU_EEEMD_LPI_IN_RX | | ||
7904 | TG3_CPMU_EEEMD_EEE_ENABLE; | ||
7905 | |||
7906 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) | ||
7907 | val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; | ||
7908 | |||
7909 | if (tg3_flag(tp, ENABLE_APE)) | ||
7910 | val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; | ||
7911 | |||
7912 | tw32_f(TG3_CPMU_EEE_MODE, val); | ||
7913 | |||
7914 | tw32_f(TG3_CPMU_EEE_DBTMR1, | ||
7915 | TG3_CPMU_DBTMR1_PCIEXIT_2047US | | ||
7916 | TG3_CPMU_DBTMR1_LNKIDLE_2047US); | ||
7917 | |||
7918 | tw32_f(TG3_CPMU_EEE_DBTMR2, | ||
7919 | TG3_CPMU_DBTMR2_APE_TX_2047US | | ||
7920 | TG3_CPMU_DBTMR2_TXIDXEQ_2047US); | ||
7921 | } | ||
7922 | |||
7645 | if (reset_phy) | 7923 | if (reset_phy) |
7646 | tg3_phy_reset(tp); | 7924 | tg3_phy_reset(tp); |
7647 | 7925 | ||
@@ -7687,7 +7965,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7687 | tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); | 7965 | tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); |
7688 | } | 7966 | } |
7689 | 7967 | ||
7690 | if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) { | 7968 | if (tg3_flag(tp, L1PLLPD_EN)) { |
7691 | u32 grc_mode = tr32(GRC_MODE); | 7969 | u32 grc_mode = tr32(GRC_MODE); |
7692 | 7970 | ||
7693 | /* Access the lower 1K of PL PCIE block registers. */ | 7971 | /* Access the lower 1K of PL PCIE block registers. */ |
@@ -7701,18 +7979,37 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7701 | tw32(GRC_MODE, grc_mode); | 7979 | tw32(GRC_MODE, grc_mode); |
7702 | } | 7980 | } |
7703 | 7981 | ||
7704 | if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { | 7982 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { |
7705 | u32 grc_mode = tr32(GRC_MODE); | 7983 | if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { |
7984 | u32 grc_mode = tr32(GRC_MODE); | ||
7706 | 7985 | ||
7707 | /* Access the lower 1K of PL PCIE block registers. */ | 7986 | /* Access the lower 1K of PL PCIE block registers. */ |
7708 | val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; | 7987 | val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; |
7709 | tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); | 7988 | tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); |
7710 | 7989 | ||
7711 | val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); | 7990 | val = tr32(TG3_PCIE_TLDLPL_PORT + |
7712 | tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, | 7991 | TG3_PCIE_PL_LO_PHYCTL5); |
7713 | val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); | 7992 | tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, |
7993 | val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); | ||
7714 | 7994 | ||
7715 | tw32(GRC_MODE, grc_mode); | 7995 | tw32(GRC_MODE, grc_mode); |
7996 | } | ||
7997 | |||
7998 | if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { | ||
7999 | u32 grc_mode = tr32(GRC_MODE); | ||
8000 | |||
8001 | /* Access the lower 1K of DL PCIE block registers. */ | ||
8002 | val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; | ||
8003 | tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); | ||
8004 | |||
8005 | val = tr32(TG3_PCIE_TLDLPL_PORT + | ||
8006 | TG3_PCIE_DL_LO_FTSMAX); | ||
8007 | val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; | ||
8008 | tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, | ||
8009 | val | TG3_PCIE_DL_LO_FTSMAX_VAL); | ||
8010 | |||
8011 | tw32(GRC_MODE, grc_mode); | ||
8012 | } | ||
7716 | 8013 | ||
7717 | val = tr32(TG3_CPMU_LSPD_10MB_CLK); | 8014 | val = tr32(TG3_CPMU_LSPD_10MB_CLK); |
7718 | val &= ~CPMU_LSPD_10MB_MACCLK_MASK; | 8015 | val &= ~CPMU_LSPD_10MB_MACCLK_MASK; |
@@ -7725,20 +8022,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7725 | * other revision. But do not set this on PCI Express | 8022 | * other revision. But do not set this on PCI Express |
7726 | * chips and don't even touch the clocks if the CPMU is present. | 8023 | * chips and don't even touch the clocks if the CPMU is present. |
7727 | */ | 8024 | */ |
7728 | if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { | 8025 | if (!tg3_flag(tp, CPMU_PRESENT)) { |
7729 | if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 8026 | if (!tg3_flag(tp, PCI_EXPRESS)) |
7730 | tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; | 8027 | tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; |
7731 | tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); | 8028 | tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); |
7732 | } | 8029 | } |
7733 | 8030 | ||
7734 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && | 8031 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && |
7735 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { | 8032 | tg3_flag(tp, PCIX_MODE)) { |
7736 | val = tr32(TG3PCI_PCISTATE); | 8033 | val = tr32(TG3PCI_PCISTATE); |
7737 | val |= PCISTATE_RETRY_SAME_DMA; | 8034 | val |= PCISTATE_RETRY_SAME_DMA; |
7738 | tw32(TG3PCI_PCISTATE, val); | 8035 | tw32(TG3PCI_PCISTATE, val); |
7739 | } | 8036 | } |
7740 | 8037 | ||
7741 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 8038 | if (tg3_flag(tp, ENABLE_APE)) { |
7742 | /* Allow reads and writes to the | 8039 | /* Allow reads and writes to the |
7743 | * APE register and memory space. | 8040 | * APE register and memory space. |
7744 | */ | 8041 | */ |
@@ -7765,11 +8062,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7765 | if (err) | 8062 | if (err) |
7766 | return err; | 8063 | return err; |
7767 | 8064 | ||
7768 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { | 8065 | if (tg3_flag(tp, 57765_PLUS)) { |
7769 | val = tr32(TG3PCI_DMA_RW_CTRL) & | 8066 | val = tr32(TG3PCI_DMA_RW_CTRL) & |
7770 | ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | 8067 | ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; |
7771 | if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) | 8068 | if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) |
7772 | val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; | 8069 | val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; |
8070 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && | ||
8071 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) | ||
8072 | val |= DMA_RWCTRL_TAGGED_STAT_WA; | ||
7773 | tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); | 8073 | tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); |
7774 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && | 8074 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && |
7775 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { | 8075 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { |
@@ -7804,7 +8104,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7804 | tw32(GRC_MISC_CFG, val); | 8104 | tw32(GRC_MISC_CFG, val); |
7805 | 8105 | ||
7806 | /* Initialize MBUF/DESC pool. */ | 8106 | /* Initialize MBUF/DESC pool. */ |
7807 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 8107 | if (tg3_flag(tp, 5750_PLUS)) { |
7808 | /* Do nothing. */ | 8108 | /* Do nothing. */ |
7809 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { | 8109 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { |
7810 | tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); | 8110 | tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); |
@@ -7814,7 +8114,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7814 | tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); | 8114 | tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); |
7815 | tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); | 8115 | tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); |
7816 | tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); | 8116 | tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); |
7817 | } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { | 8117 | } else if (tg3_flag(tp, TSO_CAPABLE)) { |
7818 | int fw_len; | 8118 | int fw_len; |
7819 | 8119 | ||
7820 | fw_len = tp->fw_len; | 8120 | fw_len = tp->fw_len; |
@@ -7845,7 +8145,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7845 | tw32(BUFMGR_DMA_HIGH_WATER, | 8145 | tw32(BUFMGR_DMA_HIGH_WATER, |
7846 | tp->bufmgr_config.dma_high_water); | 8146 | tp->bufmgr_config.dma_high_water); |
7847 | 8147 | ||
7848 | tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); | 8148 | val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; |
8149 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | ||
8150 | val |= BUFMGR_MODE_NO_TX_UNDERRUN; | ||
8151 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
8152 | tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || | ||
8153 | tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) | ||
8154 | val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; | ||
8155 | tw32(BUFMGR_MODE, val); | ||
7849 | for (i = 0; i < 2000; i++) { | 8156 | for (i = 0; i < 2000; i++) { |
7850 | if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) | 8157 | if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) |
7851 | break; | 8158 | break; |
@@ -7856,21 +8163,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7856 | return -ENODEV; | 8163 | return -ENODEV; |
7857 | } | 8164 | } |
7858 | 8165 | ||
7859 | /* Setup replenish threshold. */ | 8166 | if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) |
7860 | val = tp->rx_pending / 8; | 8167 | tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); |
7861 | if (val == 0) | ||
7862 | val = 1; | ||
7863 | else if (val > tp->rx_std_max_post) | ||
7864 | val = tp->rx_std_max_post; | ||
7865 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | ||
7866 | if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) | ||
7867 | tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); | ||
7868 | 8168 | ||
7869 | if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) | 8169 | tg3_setup_rxbd_thresholds(tp); |
7870 | val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; | ||
7871 | } | ||
7872 | |||
7873 | tw32(RCVBDI_STD_THRESH, val); | ||
7874 | 8170 | ||
7875 | /* Initialize TG3_BDINFO's at: | 8171 | /* Initialize TG3_BDINFO's at: |
7876 | * RCVDBDI_STD_BD: standard eth size rx ring | 8172 | * RCVDBDI_STD_BD: standard eth size rx ring |
@@ -7893,33 +8189,31 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7893 | ((u64) tpr->rx_std_mapping >> 32)); | 8189 | ((u64) tpr->rx_std_mapping >> 32)); |
7894 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 8190 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, |
7895 | ((u64) tpr->rx_std_mapping & 0xffffffff)); | 8191 | ((u64) tpr->rx_std_mapping & 0xffffffff)); |
7896 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && | 8192 | if (!tg3_flag(tp, 5717_PLUS)) |
7897 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) | ||
7898 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, | 8193 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, |
7899 | NIC_SRAM_RX_BUFFER_DESC); | 8194 | NIC_SRAM_RX_BUFFER_DESC); |
7900 | 8195 | ||
7901 | /* Disable the mini ring */ | 8196 | /* Disable the mini ring */ |
7902 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 8197 | if (!tg3_flag(tp, 5705_PLUS)) |
7903 | tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, | 8198 | tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, |
7904 | BDINFO_FLAGS_DISABLED); | 8199 | BDINFO_FLAGS_DISABLED); |
7905 | 8200 | ||
7906 | /* Program the jumbo buffer descriptor ring control | 8201 | /* Program the jumbo buffer descriptor ring control |
7907 | * blocks on those devices that have them. | 8202 | * blocks on those devices that have them. |
7908 | */ | 8203 | */ |
7909 | if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && | 8204 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || |
7910 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 8205 | (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { |
7911 | /* Setup replenish threshold. */ | ||
7912 | tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); | ||
7913 | 8206 | ||
7914 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 8207 | if (tg3_flag(tp, JUMBO_RING_ENABLE)) { |
7915 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, | 8208 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, |
7916 | ((u64) tpr->rx_jmb_mapping >> 32)); | 8209 | ((u64) tpr->rx_jmb_mapping >> 32)); |
7917 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 8210 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, |
7918 | ((u64) tpr->rx_jmb_mapping & 0xffffffff)); | 8211 | ((u64) tpr->rx_jmb_mapping & 0xffffffff)); |
8212 | val = TG3_RX_JMB_RING_SIZE(tp) << | ||
8213 | BDINFO_FLAGS_MAXLEN_SHIFT; | ||
7919 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 8214 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, |
7920 | (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | | 8215 | val | BDINFO_FLAGS_USE_EXT_RECV); |
7921 | BDINFO_FLAGS_USE_EXT_RECV); | 8216 | if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || |
7922 | if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) || | ||
7923 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 8217 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) |
7924 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, | 8218 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, |
7925 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); | 8219 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); |
@@ -7928,28 +8222,27 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7928 | BDINFO_FLAGS_DISABLED); | 8222 | BDINFO_FLAGS_DISABLED); |
7929 | } | 8223 | } |
7930 | 8224 | ||
7931 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) | 8225 | if (tg3_flag(tp, 57765_PLUS)) { |
7932 | val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | | 8226 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) |
7933 | (TG3_RX_STD_DMA_SZ << 2); | 8227 | val = TG3_RX_STD_MAX_SIZE_5700; |
7934 | else | 8228 | else |
8229 | val = TG3_RX_STD_MAX_SIZE_5717; | ||
8230 | val <<= BDINFO_FLAGS_MAXLEN_SHIFT; | ||
8231 | val |= (TG3_RX_STD_DMA_SZ << 2); | ||
8232 | } else | ||
7935 | val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; | 8233 | val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; |
7936 | } else | 8234 | } else |
7937 | val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; | 8235 | val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; |
7938 | 8236 | ||
7939 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); | 8237 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); |
7940 | 8238 | ||
7941 | tpr->rx_std_prod_idx = tp->rx_pending; | 8239 | tpr->rx_std_prod_idx = tp->rx_pending; |
7942 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); | 8240 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); |
7943 | 8241 | ||
7944 | tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? | 8242 | tpr->rx_jmb_prod_idx = |
7945 | tp->rx_jumbo_pending : 0; | 8243 | tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; |
7946 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); | 8244 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); |
7947 | 8245 | ||
7948 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { | ||
7949 | tw32(STD_REPLENISH_LWM, 32); | ||
7950 | tw32(JMB_REPLENISH_LWM, 16); | ||
7951 | } | ||
7952 | |||
7953 | tg3_rings_reset(tp); | 8246 | tg3_rings_reset(tp); |
7954 | 8247 | ||
7955 | /* Initialize MAC address and backoff seed. */ | 8248 | /* Initialize MAC address and backoff seed. */ |
@@ -7962,10 +8255,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7962 | /* The slot time is changed by tg3_setup_phy if we | 8255 | /* The slot time is changed by tg3_setup_phy if we |
7963 | * run at gigabit with half duplex. | 8256 | * run at gigabit with half duplex. |
7964 | */ | 8257 | */ |
7965 | tw32(MAC_TX_LENGTHS, | 8258 | val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | |
7966 | (2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 8259 | (6 << TX_LENGTHS_IPG_SHIFT) | |
7967 | (6 << TX_LENGTHS_IPG_SHIFT) | | 8260 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT); |
7968 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); | 8261 | |
8262 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) | ||
8263 | val |= tr32(MAC_TX_LENGTHS) & | ||
8264 | (TX_LENGTHS_JMB_FRM_LEN_MSK | | ||
8265 | TX_LENGTHS_CNT_DWN_VAL_MSK); | ||
8266 | |||
8267 | tw32(MAC_TX_LENGTHS, val); | ||
7969 | 8268 | ||
7970 | /* Receive rules. */ | 8269 | /* Receive rules. */ |
7971 | tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); | 8270 | tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); |
@@ -7980,8 +8279,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7980 | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | | 8279 | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | |
7981 | RDMAC_MODE_LNGREAD_ENAB); | 8280 | RDMAC_MODE_LNGREAD_ENAB); |
7982 | 8281 | ||
7983 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 8282 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) |
7984 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | ||
7985 | rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; | 8283 | rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; |
7986 | 8284 | ||
7987 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 8285 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || |
@@ -7991,37 +8289,67 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7991 | RDMAC_MODE_MBUF_RBD_CRPT_ENAB | | 8289 | RDMAC_MODE_MBUF_RBD_CRPT_ENAB | |
7992 | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; | 8290 | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; |
7993 | 8291 | ||
7994 | /* If statement applies to 5705 and 5750 PCI devices only */ | 8292 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
7995 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 8293 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { |
7996 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || | 8294 | if (tg3_flag(tp, TSO_CAPABLE) && |
7997 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { | ||
7998 | if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && | ||
7999 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 8295 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { |
8000 | rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; | 8296 | rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; |
8001 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && | 8297 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && |
8002 | !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { | 8298 | !tg3_flag(tp, IS_5788)) { |
8003 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; | 8299 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; |
8004 | } | 8300 | } |
8005 | } | 8301 | } |
8006 | 8302 | ||
8007 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) | 8303 | if (tg3_flag(tp, PCI_EXPRESS)) |
8008 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; | 8304 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; |
8009 | 8305 | ||
8010 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 8306 | if (tg3_flag(tp, HW_TSO_1) || |
8307 | tg3_flag(tp, HW_TSO_2) || | ||
8308 | tg3_flag(tp, HW_TSO_3)) | ||
8011 | rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; | 8309 | rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; |
8012 | 8310 | ||
8013 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || | 8311 | if (tg3_flag(tp, 57765_PLUS) || |
8014 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 8312 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
8015 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 8313 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
8016 | rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; | 8314 | rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; |
8017 | 8315 | ||
8316 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) | ||
8317 | rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; | ||
8318 | |||
8319 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | ||
8320 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | ||
8321 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | ||
8322 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | ||
8323 | tg3_flag(tp, 57765_PLUS)) { | ||
8324 | val = tr32(TG3_RDMA_RSRVCTRL_REG); | ||
8325 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | ||
8326 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { | ||
8327 | val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | | ||
8328 | TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | | ||
8329 | TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); | ||
8330 | val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | | ||
8331 | TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | | ||
8332 | TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; | ||
8333 | } | ||
8334 | tw32(TG3_RDMA_RSRVCTRL_REG, | ||
8335 | val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); | ||
8336 | } | ||
8337 | |||
8338 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | ||
8339 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { | ||
8340 | val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); | ||
8341 | tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | | ||
8342 | TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | | ||
8343 | TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); | ||
8344 | } | ||
8345 | |||
8018 | /* Receive/send statistics. */ | 8346 | /* Receive/send statistics. */ |
8019 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 8347 | if (tg3_flag(tp, 5750_PLUS)) { |
8020 | val = tr32(RCVLPC_STATS_ENABLE); | 8348 | val = tr32(RCVLPC_STATS_ENABLE); |
8021 | val &= ~RCVLPC_STATSENAB_DACK_FIX; | 8349 | val &= ~RCVLPC_STATSENAB_DACK_FIX; |
8022 | tw32(RCVLPC_STATS_ENABLE, val); | 8350 | tw32(RCVLPC_STATS_ENABLE, val); |
8023 | } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && | 8351 | } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && |
8024 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 8352 | tg3_flag(tp, TSO_CAPABLE)) { |
8025 | val = tr32(RCVLPC_STATS_ENABLE); | 8353 | val = tr32(RCVLPC_STATS_ENABLE); |
8026 | val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; | 8354 | val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; |
8027 | tw32(RCVLPC_STATS_ENABLE, val); | 8355 | tw32(RCVLPC_STATS_ENABLE, val); |
@@ -8044,7 +8372,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8044 | 8372 | ||
8045 | __tg3_set_coalesce(tp, &tp->coal); | 8373 | __tg3_set_coalesce(tp, &tp->coal); |
8046 | 8374 | ||
8047 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 8375 | if (!tg3_flag(tp, 5705_PLUS)) { |
8048 | /* Status/statistics block address. See tg3_timer, | 8376 | /* Status/statistics block address. See tg3_timer, |
8049 | * the tg3_periodic_fetch_stats call there, and | 8377 | * the tg3_periodic_fetch_stats call there, and |
8050 | * tg3_get_stats to see how this works for 5705/5750 chips. | 8378 | * tg3_get_stats to see how this works for 5705/5750 chips. |
@@ -8070,7 +8398,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8070 | 8398 | ||
8071 | tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); | 8399 | tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); |
8072 | tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); | 8400 | tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); |
8073 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 8401 | if (!tg3_flag(tp, 5705_PLUS)) |
8074 | tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); | 8402 | tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); |
8075 | 8403 | ||
8076 | if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { | 8404 | if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { |
@@ -8080,13 +8408,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8080 | udelay(10); | 8408 | udelay(10); |
8081 | } | 8409 | } |
8082 | 8410 | ||
8083 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 8411 | if (tg3_flag(tp, ENABLE_APE)) |
8084 | tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; | 8412 | tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; |
8085 | else | 8413 | else |
8086 | tp->mac_mode = 0; | 8414 | tp->mac_mode = 0; |
8087 | tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | | 8415 | tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | |
8088 | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; | 8416 | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; |
8089 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 8417 | if (!tg3_flag(tp, 5705_PLUS) && |
8090 | !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && | 8418 | !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && |
8091 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) | 8419 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) |
8092 | tp->mac_mode |= MAC_MODE_LINK_POLARITY; | 8420 | tp->mac_mode |= MAC_MODE_LINK_POLARITY; |
@@ -8094,12 +8422,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8094 | udelay(40); | 8422 | udelay(40); |
8095 | 8423 | ||
8096 | /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). | 8424 | /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). |
8097 | * If TG3_FLG2_IS_NIC is zero, we should read the | 8425 | * If TG3_FLAG_IS_NIC is zero, we should read the |
8098 | * register to preserve the GPIO settings for LOMs. The GPIOs, | 8426 | * register to preserve the GPIO settings for LOMs. The GPIOs, |
8099 | * whether used as inputs or outputs, are set by boot code after | 8427 | * whether used as inputs or outputs, are set by boot code after |
8100 | * reset. | 8428 | * reset. |
8101 | */ | 8429 | */ |
8102 | if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { | 8430 | if (!tg3_flag(tp, IS_NIC)) { |
8103 | u32 gpio_mask; | 8431 | u32 gpio_mask; |
8104 | 8432 | ||
8105 | gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | | 8433 | gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | |
@@ -8117,20 +8445,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8117 | tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; | 8445 | tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; |
8118 | 8446 | ||
8119 | /* GPIO1 must be driven high for eeprom write protect */ | 8447 | /* GPIO1 must be driven high for eeprom write protect */ |
8120 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) | 8448 | if (tg3_flag(tp, EEPROM_WRITE_PROT)) |
8121 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | | 8449 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | |
8122 | GRC_LCLCTRL_GPIO_OUTPUT1); | 8450 | GRC_LCLCTRL_GPIO_OUTPUT1); |
8123 | } | 8451 | } |
8124 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 8452 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); |
8125 | udelay(100); | 8453 | udelay(100); |
8126 | 8454 | ||
8127 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { | 8455 | if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { |
8128 | val = tr32(MSGINT_MODE); | 8456 | val = tr32(MSGINT_MODE); |
8129 | val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; | 8457 | val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; |
8130 | tw32(MSGINT_MODE, val); | 8458 | tw32(MSGINT_MODE, val); |
8131 | } | 8459 | } |
8132 | 8460 | ||
8133 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 8461 | if (!tg3_flag(tp, 5705_PLUS)) { |
8134 | tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); | 8462 | tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); |
8135 | udelay(40); | 8463 | udelay(40); |
8136 | } | 8464 | } |
@@ -8141,23 +8469,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8141 | WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | | 8469 | WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | |
8142 | WDMAC_MODE_LNGREAD_ENAB); | 8470 | WDMAC_MODE_LNGREAD_ENAB); |
8143 | 8471 | ||
8144 | /* If statement applies to 5705 and 5750 PCI devices only */ | 8472 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
8145 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 8473 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { |
8146 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || | 8474 | if (tg3_flag(tp, TSO_CAPABLE) && |
8147 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { | ||
8148 | if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && | ||
8149 | (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || | 8475 | (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || |
8150 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { | 8476 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { |
8151 | /* nothing */ | 8477 | /* nothing */ |
8152 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && | 8478 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && |
8153 | !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 8479 | !tg3_flag(tp, IS_5788)) { |
8154 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | ||
8155 | val |= WDMAC_MODE_RX_ACCEL; | 8480 | val |= WDMAC_MODE_RX_ACCEL; |
8156 | } | 8481 | } |
8157 | } | 8482 | } |
8158 | 8483 | ||
8159 | /* Enable host coalescing bug fix */ | 8484 | /* Enable host coalescing bug fix */ |
8160 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 8485 | if (tg3_flag(tp, 5755_PLUS)) |
8161 | val |= WDMAC_MODE_STATUS_TAG_FIX; | 8486 | val |= WDMAC_MODE_STATUS_TAG_FIX; |
8162 | 8487 | ||
8163 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 8488 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) |
@@ -8166,7 +8491,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8166 | tw32_f(WDMAC_MODE, val); | 8491 | tw32_f(WDMAC_MODE, val); |
8167 | udelay(40); | 8492 | udelay(40); |
8168 | 8493 | ||
8169 | if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 8494 | if (tg3_flag(tp, PCIX_MODE)) { |
8170 | u16 pcix_cmd; | 8495 | u16 pcix_cmd; |
8171 | 8496 | ||
8172 | pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, | 8497 | pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, |
@@ -8186,7 +8511,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8186 | udelay(40); | 8511 | udelay(40); |
8187 | 8512 | ||
8188 | tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); | 8513 | tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); |
8189 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 8514 | if (!tg3_flag(tp, 5705_PLUS)) |
8190 | tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); | 8515 | tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); |
8191 | 8516 | ||
8192 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) | 8517 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) |
@@ -8197,12 +8522,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8197 | 8522 | ||
8198 | tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); | 8523 | tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); |
8199 | tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); | 8524 | tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); |
8200 | tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); | 8525 | val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; |
8526 | if (tg3_flag(tp, LRG_PROD_RING_CAP)) | ||
8527 | val |= RCVDBDI_MODE_LRG_RING_SZ; | ||
8528 | tw32(RCVDBDI_MODE, val); | ||
8201 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); | 8529 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); |
8202 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 8530 | if (tg3_flag(tp, HW_TSO_1) || |
8531 | tg3_flag(tp, HW_TSO_2) || | ||
8532 | tg3_flag(tp, HW_TSO_3)) | ||
8203 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); | 8533 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); |
8204 | val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; | 8534 | val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; |
8205 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 8535 | if (tg3_flag(tp, ENABLE_TSS)) |
8206 | val |= SNDBDI_MODE_MULTI_TXQ_EN; | 8536 | val |= SNDBDI_MODE_MULTI_TXQ_EN; |
8207 | tw32(SNDBDI_MODE, val); | 8537 | tw32(SNDBDI_MODE, val); |
8208 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); | 8538 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); |
@@ -8213,20 +8543,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8213 | return err; | 8543 | return err; |
8214 | } | 8544 | } |
8215 | 8545 | ||
8216 | if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { | 8546 | if (tg3_flag(tp, TSO_CAPABLE)) { |
8217 | err = tg3_load_tso_firmware(tp); | 8547 | err = tg3_load_tso_firmware(tp); |
8218 | if (err) | 8548 | if (err) |
8219 | return err; | 8549 | return err; |
8220 | } | 8550 | } |
8221 | 8551 | ||
8222 | tp->tx_mode = TX_MODE_ENABLE; | 8552 | tp->tx_mode = TX_MODE_ENABLE; |
8223 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 8553 | |
8554 | if (tg3_flag(tp, 5755_PLUS) || | ||
8224 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 8555 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
8225 | tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; | 8556 | tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; |
8557 | |||
8558 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { | ||
8559 | val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; | ||
8560 | tp->tx_mode &= ~val; | ||
8561 | tp->tx_mode |= tr32(MAC_TX_MODE) & val; | ||
8562 | } | ||
8563 | |||
8226 | tw32_f(MAC_TX_MODE, tp->tx_mode); | 8564 | tw32_f(MAC_TX_MODE, tp->tx_mode); |
8227 | udelay(100); | 8565 | udelay(100); |
8228 | 8566 | ||
8229 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) { | 8567 | if (tg3_flag(tp, ENABLE_RSS)) { |
8230 | u32 reg = MAC_RSS_INDIR_TBL_0; | 8568 | u32 reg = MAC_RSS_INDIR_TBL_0; |
8231 | u8 *ent = (u8 *)&val; | 8569 | u8 *ent = (u8 *)&val; |
8232 | 8570 | ||
@@ -8255,10 +8593,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8255 | } | 8593 | } |
8256 | 8594 | ||
8257 | tp->rx_mode = RX_MODE_ENABLE; | 8595 | tp->rx_mode = RX_MODE_ENABLE; |
8258 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 8596 | if (tg3_flag(tp, 5755_PLUS)) |
8259 | tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; | 8597 | tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; |
8260 | 8598 | ||
8261 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) | 8599 | if (tg3_flag(tp, ENABLE_RSS)) |
8262 | tp->rx_mode |= RX_MODE_RSS_ENABLE | | 8600 | tp->rx_mode |= RX_MODE_RSS_ENABLE | |
8263 | RX_MODE_RSS_ITBL_HASH_BITS_7 | | 8601 | RX_MODE_RSS_ITBL_HASH_BITS_7 | |
8264 | RX_MODE_RSS_IPV6_HASH_EN | | 8602 | RX_MODE_RSS_IPV6_HASH_EN | |
@@ -8305,11 +8643,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8305 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && | 8643 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && |
8306 | (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { | 8644 | (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { |
8307 | /* Use hardware link auto-negotiation */ | 8645 | /* Use hardware link auto-negotiation */ |
8308 | tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; | 8646 | tg3_flag_set(tp, HW_AUTONEG); |
8309 | } | 8647 | } |
8310 | 8648 | ||
8311 | if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && | 8649 | if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && |
8312 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { | 8650 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { |
8313 | u32 tmp; | 8651 | u32 tmp; |
8314 | 8652 | ||
8315 | tmp = tr32(SERDES_RX_CTRL); | 8653 | tmp = tr32(SERDES_RX_CTRL); |
@@ -8319,7 +8657,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8319 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 8657 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); |
8320 | } | 8658 | } |
8321 | 8659 | ||
8322 | if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { | 8660 | if (!tg3_flag(tp, USE_PHYLIB)) { |
8323 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { | 8661 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { |
8324 | tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; | 8662 | tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; |
8325 | tp->link_config.speed = tp->link_config.orig_speed; | 8663 | tp->link_config.speed = tp->link_config.orig_speed; |
@@ -8352,12 +8690,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8352 | tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); | 8690 | tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); |
8353 | tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); | 8691 | tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); |
8354 | 8692 | ||
8355 | if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 8693 | if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) |
8356 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | ||
8357 | limit = 8; | 8694 | limit = 8; |
8358 | else | 8695 | else |
8359 | limit = 16; | 8696 | limit = 16; |
8360 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) | 8697 | if (tg3_flag(tp, ENABLE_ASF)) |
8361 | limit -= 4; | 8698 | limit -= 4; |
8362 | switch (limit) { | 8699 | switch (limit) { |
8363 | case 16: | 8700 | case 16: |
@@ -8395,7 +8732,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
8395 | break; | 8732 | break; |
8396 | } | 8733 | } |
8397 | 8734 | ||
8398 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 8735 | if (tg3_flag(tp, ENABLE_APE)) |
8399 | /* Write our heartbeat update interval to APE. */ | 8736 | /* Write our heartbeat update interval to APE. */ |
8400 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, | 8737 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, |
8401 | APE_HOST_HEARTBEAT_INT_DISABLE); | 8738 | APE_HOST_HEARTBEAT_INT_DISABLE); |
@@ -8461,7 +8798,21 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) | |||
8461 | TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); | 8798 | TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); |
8462 | 8799 | ||
8463 | TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); | 8800 | TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); |
8464 | TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); | 8801 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
8802 | tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && | ||
8803 | tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { | ||
8804 | TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); | ||
8805 | } else { | ||
8806 | u32 val = tr32(HOSTCC_FLOW_ATTN); | ||
8807 | val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; | ||
8808 | if (val) { | ||
8809 | tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); | ||
8810 | sp->rx_discards.low += val; | ||
8811 | if (sp->rx_discards.low < val) | ||
8812 | sp->rx_discards.high += 1; | ||
8813 | } | ||
8814 | sp->mbuf_lwm_thresh_hit = sp->rx_discards; | ||
8815 | } | ||
8465 | TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); | 8816 | TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); |
8466 | } | 8817 | } |
8467 | 8818 | ||
@@ -8474,7 +8825,7 @@ static void tg3_timer(unsigned long __opaque) | |||
8474 | 8825 | ||
8475 | spin_lock(&tp->lock); | 8826 | spin_lock(&tp->lock); |
8476 | 8827 | ||
8477 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { | 8828 | if (!tg3_flag(tp, TAGGED_STATUS)) { |
8478 | /* All of this garbage is because when using non-tagged | 8829 | /* All of this garbage is because when using non-tagged |
8479 | * IRQ status the mailbox/status_block protocol the chip | 8830 | * IRQ status the mailbox/status_block protocol the chip |
8480 | * uses with the cpu is race prone. | 8831 | * uses with the cpu is race prone. |
@@ -8488,7 +8839,7 @@ static void tg3_timer(unsigned long __opaque) | |||
8488 | } | 8839 | } |
8489 | 8840 | ||
8490 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | 8841 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { |
8491 | tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; | 8842 | tg3_flag_set(tp, RESTART_TIMER); |
8492 | spin_unlock(&tp->lock); | 8843 | spin_unlock(&tp->lock); |
8493 | schedule_work(&tp->reset_task); | 8844 | schedule_work(&tp->reset_task); |
8494 | return; | 8845 | return; |
@@ -8497,10 +8848,13 @@ static void tg3_timer(unsigned long __opaque) | |||
8497 | 8848 | ||
8498 | /* This part only runs once per second. */ | 8849 | /* This part only runs once per second. */ |
8499 | if (!--tp->timer_counter) { | 8850 | if (!--tp->timer_counter) { |
8500 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 8851 | if (tg3_flag(tp, 5705_PLUS)) |
8501 | tg3_periodic_fetch_stats(tp); | 8852 | tg3_periodic_fetch_stats(tp); |
8502 | 8853 | ||
8503 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { | 8854 | if (tp->setlpicnt && !--tp->setlpicnt) |
8855 | tg3_phy_eee_enable(tp); | ||
8856 | |||
8857 | if (tg3_flag(tp, USE_LINKCHG_REG)) { | ||
8504 | u32 mac_stat; | 8858 | u32 mac_stat; |
8505 | int phy_event; | 8859 | int phy_event; |
8506 | 8860 | ||
@@ -8515,7 +8869,7 @@ static void tg3_timer(unsigned long __opaque) | |||
8515 | 8869 | ||
8516 | if (phy_event) | 8870 | if (phy_event) |
8517 | tg3_setup_phy(tp, 0); | 8871 | tg3_setup_phy(tp, 0); |
8518 | } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { | 8872 | } else if (tg3_flag(tp, POLL_SERDES)) { |
8519 | u32 mac_stat = tr32(MAC_STATUS); | 8873 | u32 mac_stat = tr32(MAC_STATUS); |
8520 | int need_setup = 0; | 8874 | int need_setup = 0; |
8521 | 8875 | ||
@@ -8540,7 +8894,7 @@ static void tg3_timer(unsigned long __opaque) | |||
8540 | tg3_setup_phy(tp, 0); | 8894 | tg3_setup_phy(tp, 0); |
8541 | } | 8895 | } |
8542 | } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && | 8896 | } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && |
8543 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 8897 | tg3_flag(tp, 5780_CLASS)) { |
8544 | tg3_serdes_parallel_detect(tp); | 8898 | tg3_serdes_parallel_detect(tp); |
8545 | } | 8899 | } |
8546 | 8900 | ||
@@ -8565,8 +8919,7 @@ static void tg3_timer(unsigned long __opaque) | |||
8565 | * resets. | 8919 | * resets. |
8566 | */ | 8920 | */ |
8567 | if (!--tp->asf_counter) { | 8921 | if (!--tp->asf_counter) { |
8568 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | 8922 | if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { |
8569 | !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | ||
8570 | tg3_wait_for_event_ack(tp); | 8923 | tg3_wait_for_event_ack(tp); |
8571 | 8924 | ||
8572 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, | 8925 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, |
@@ -8602,16 +8955,16 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num) | |||
8602 | name[IFNAMSIZ-1] = 0; | 8955 | name[IFNAMSIZ-1] = 0; |
8603 | } | 8956 | } |
8604 | 8957 | ||
8605 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { | 8958 | if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { |
8606 | fn = tg3_msi; | 8959 | fn = tg3_msi; |
8607 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | 8960 | if (tg3_flag(tp, 1SHOT_MSI)) |
8608 | fn = tg3_msi_1shot; | 8961 | fn = tg3_msi_1shot; |
8609 | flags = IRQF_SAMPLE_RANDOM; | 8962 | flags = 0; |
8610 | } else { | 8963 | } else { |
8611 | fn = tg3_interrupt; | 8964 | fn = tg3_interrupt; |
8612 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | 8965 | if (tg3_flag(tp, TAGGED_STATUS)) |
8613 | fn = tg3_interrupt_tagged; | 8966 | fn = tg3_interrupt_tagged; |
8614 | flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; | 8967 | flags = IRQF_SHARED; |
8615 | } | 8968 | } |
8616 | 8969 | ||
8617 | return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); | 8970 | return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); |
@@ -8635,8 +8988,7 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
8635 | * Turn off MSI one shot mode. Otherwise this test has no | 8988 | * Turn off MSI one shot mode. Otherwise this test has no |
8636 | * observable way to know whether the interrupt was delivered. | 8989 | * observable way to know whether the interrupt was delivered. |
8637 | */ | 8990 | */ |
8638 | if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && | 8991 | if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { |
8639 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | ||
8640 | val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; | 8992 | val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; |
8641 | tw32(MSGINT_MODE, val); | 8993 | tw32(MSGINT_MODE, val); |
8642 | } | 8994 | } |
@@ -8678,8 +9030,7 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
8678 | 9030 | ||
8679 | if (intr_ok) { | 9031 | if (intr_ok) { |
8680 | /* Reenable MSI one shot mode. */ | 9032 | /* Reenable MSI one shot mode. */ |
8681 | if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && | 9033 | if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { |
8682 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | ||
8683 | val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; | 9034 | val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; |
8684 | tw32(MSGINT_MODE, val); | 9035 | tw32(MSGINT_MODE, val); |
8685 | } | 9036 | } |
@@ -8697,7 +9048,7 @@ static int tg3_test_msi(struct tg3 *tp) | |||
8697 | int err; | 9048 | int err; |
8698 | u16 pci_cmd; | 9049 | u16 pci_cmd; |
8699 | 9050 | ||
8700 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) | 9051 | if (!tg3_flag(tp, USING_MSI)) |
8701 | return 0; | 9052 | return 0; |
8702 | 9053 | ||
8703 | /* Turn off SERR reporting in case MSI terminates with Master | 9054 | /* Turn off SERR reporting in case MSI terminates with Master |
@@ -8727,7 +9078,7 @@ static int tg3_test_msi(struct tg3 *tp) | |||
8727 | 9078 | ||
8728 | pci_disable_msi(tp->pdev); | 9079 | pci_disable_msi(tp->pdev); |
8729 | 9080 | ||
8730 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 9081 | tg3_flag_clear(tp, USING_MSI); |
8731 | tp->napi[0].irq_vec = tp->pdev->irq; | 9082 | tp->napi[0].irq_vec = tp->pdev->irq; |
8732 | 9083 | ||
8733 | err = tg3_request_irq(tp, 0); | 9084 | err = tg3_request_irq(tp, 0); |
@@ -8816,14 +9167,20 @@ static bool tg3_enable_msix(struct tg3 *tp) | |||
8816 | for (i = 0; i < tp->irq_max; i++) | 9167 | for (i = 0; i < tp->irq_max; i++) |
8817 | tp->napi[i].irq_vec = msix_ent[i].vector; | 9168 | tp->napi[i].irq_vec = msix_ent[i].vector; |
8818 | 9169 | ||
8819 | tp->dev->real_num_tx_queues = 1; | 9170 | netif_set_real_num_tx_queues(tp->dev, 1); |
9171 | rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1; | ||
9172 | if (netif_set_real_num_rx_queues(tp->dev, rc)) { | ||
9173 | pci_disable_msix(tp->pdev); | ||
9174 | return false; | ||
9175 | } | ||
9176 | |||
8820 | if (tp->irq_cnt > 1) { | 9177 | if (tp->irq_cnt > 1) { |
8821 | tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; | 9178 | tg3_flag_set(tp, ENABLE_RSS); |
8822 | 9179 | ||
8823 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 9180 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || |
8824 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { | 9181 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { |
8825 | tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; | 9182 | tg3_flag_set(tp, ENABLE_TSS); |
8826 | tp->dev->real_num_tx_queues = tp->irq_cnt - 1; | 9183 | netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); |
8827 | } | 9184 | } |
8828 | } | 9185 | } |
8829 | 9186 | ||
@@ -8832,8 +9189,8 @@ static bool tg3_enable_msix(struct tg3 *tp) | |||
8832 | 9189 | ||
8833 | static void tg3_ints_init(struct tg3 *tp) | 9190 | static void tg3_ints_init(struct tg3 *tp) |
8834 | { | 9191 | { |
8835 | if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && | 9192 | if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && |
8836 | !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { | 9193 | !tg3_flag(tp, TAGGED_STATUS)) { |
8837 | /* All MSI supporting chips should support tagged | 9194 | /* All MSI supporting chips should support tagged |
8838 | * status. Assert that this is the case. | 9195 | * status. Assert that this is the case. |
8839 | */ | 9196 | */ |
@@ -8842,34 +9199,36 @@ static void tg3_ints_init(struct tg3 *tp) | |||
8842 | goto defcfg; | 9199 | goto defcfg; |
8843 | } | 9200 | } |
8844 | 9201 | ||
8845 | if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) | 9202 | if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) |
8846 | tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; | 9203 | tg3_flag_set(tp, USING_MSIX); |
8847 | else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && | 9204 | else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) |
8848 | pci_enable_msi(tp->pdev) == 0) | 9205 | tg3_flag_set(tp, USING_MSI); |
8849 | tp->tg3_flags2 |= TG3_FLG2_USING_MSI; | ||
8850 | 9206 | ||
8851 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { | 9207 | if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { |
8852 | u32 msi_mode = tr32(MSGINT_MODE); | 9208 | u32 msi_mode = tr32(MSGINT_MODE); |
8853 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 9209 | if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) |
8854 | msi_mode |= MSGINT_MODE_MULTIVEC_EN; | 9210 | msi_mode |= MSGINT_MODE_MULTIVEC_EN; |
8855 | tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); | 9211 | tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); |
8856 | } | 9212 | } |
8857 | defcfg: | 9213 | defcfg: |
8858 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { | 9214 | if (!tg3_flag(tp, USING_MSIX)) { |
8859 | tp->irq_cnt = 1; | 9215 | tp->irq_cnt = 1; |
8860 | tp->napi[0].irq_vec = tp->pdev->irq; | 9216 | tp->napi[0].irq_vec = tp->pdev->irq; |
8861 | tp->dev->real_num_tx_queues = 1; | 9217 | netif_set_real_num_tx_queues(tp->dev, 1); |
9218 | netif_set_real_num_rx_queues(tp->dev, 1); | ||
8862 | } | 9219 | } |
8863 | } | 9220 | } |
8864 | 9221 | ||
8865 | static void tg3_ints_fini(struct tg3 *tp) | 9222 | static void tg3_ints_fini(struct tg3 *tp) |
8866 | { | 9223 | { |
8867 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 9224 | if (tg3_flag(tp, USING_MSIX)) |
8868 | pci_disable_msix(tp->pdev); | 9225 | pci_disable_msix(tp->pdev); |
8869 | else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) | 9226 | else if (tg3_flag(tp, USING_MSI)) |
8870 | pci_disable_msi(tp->pdev); | 9227 | pci_disable_msi(tp->pdev); |
8871 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; | 9228 | tg3_flag_clear(tp, USING_MSI); |
8872 | tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS); | 9229 | tg3_flag_clear(tp, USING_MSIX); |
9230 | tg3_flag_clear(tp, ENABLE_RSS); | ||
9231 | tg3_flag_clear(tp, ENABLE_TSS); | ||
8873 | } | 9232 | } |
8874 | 9233 | ||
8875 | static int tg3_open(struct net_device *dev) | 9234 | static int tg3_open(struct net_device *dev) |
@@ -8884,23 +9243,23 @@ static int tg3_open(struct net_device *dev) | |||
8884 | return err; | 9243 | return err; |
8885 | } else if (err) { | 9244 | } else if (err) { |
8886 | netdev_warn(tp->dev, "TSO capability disabled\n"); | 9245 | netdev_warn(tp->dev, "TSO capability disabled\n"); |
8887 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | 9246 | tg3_flag_clear(tp, TSO_CAPABLE); |
8888 | } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 9247 | } else if (!tg3_flag(tp, TSO_CAPABLE)) { |
8889 | netdev_notice(tp->dev, "TSO capability restored\n"); | 9248 | netdev_notice(tp->dev, "TSO capability restored\n"); |
8890 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 9249 | tg3_flag_set(tp, TSO_CAPABLE); |
8891 | } | 9250 | } |
8892 | } | 9251 | } |
8893 | 9252 | ||
8894 | netif_carrier_off(tp->dev); | 9253 | netif_carrier_off(tp->dev); |
8895 | 9254 | ||
8896 | err = tg3_set_power_state(tp, PCI_D0); | 9255 | err = tg3_power_up(tp); |
8897 | if (err) | 9256 | if (err) |
8898 | return err; | 9257 | return err; |
8899 | 9258 | ||
8900 | tg3_full_lock(tp, 0); | 9259 | tg3_full_lock(tp, 0); |
8901 | 9260 | ||
8902 | tg3_disable_ints(tp); | 9261 | tg3_disable_ints(tp); |
8903 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 9262 | tg3_flag_clear(tp, INIT_COMPLETE); |
8904 | 9263 | ||
8905 | tg3_full_unlock(tp); | 9264 | tg3_full_unlock(tp); |
8906 | 9265 | ||
@@ -8917,6 +9276,8 @@ static int tg3_open(struct net_device *dev) | |||
8917 | if (err) | 9276 | if (err) |
8918 | goto err_out1; | 9277 | goto err_out1; |
8919 | 9278 | ||
9279 | tg3_napi_init(tp); | ||
9280 | |||
8920 | tg3_napi_enable(tp); | 9281 | tg3_napi_enable(tp); |
8921 | 9282 | ||
8922 | for (i = 0; i < tp->irq_cnt; i++) { | 9283 | for (i = 0; i < tp->irq_cnt; i++) { |
@@ -8939,7 +9300,7 @@ static int tg3_open(struct net_device *dev) | |||
8939 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 9300 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8940 | tg3_free_rings(tp); | 9301 | tg3_free_rings(tp); |
8941 | } else { | 9302 | } else { |
8942 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | 9303 | if (tg3_flag(tp, TAGGED_STATUS)) |
8943 | tp->timer_offset = HZ; | 9304 | tp->timer_offset = HZ; |
8944 | else | 9305 | else |
8945 | tp->timer_offset = HZ / 10; | 9306 | tp->timer_offset = HZ / 10; |
@@ -8961,7 +9322,7 @@ static int tg3_open(struct net_device *dev) | |||
8961 | if (err) | 9322 | if (err) |
8962 | goto err_out3; | 9323 | goto err_out3; |
8963 | 9324 | ||
8964 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 9325 | if (tg3_flag(tp, USING_MSI)) { |
8965 | err = tg3_test_msi(tp); | 9326 | err = tg3_test_msi(tp); |
8966 | 9327 | ||
8967 | if (err) { | 9328 | if (err) { |
@@ -8973,8 +9334,7 @@ static int tg3_open(struct net_device *dev) | |||
8973 | goto err_out2; | 9334 | goto err_out2; |
8974 | } | 9335 | } |
8975 | 9336 | ||
8976 | if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && | 9337 | if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { |
8977 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | ||
8978 | u32 val = tr32(PCIE_TRANSACTION_CFG); | 9338 | u32 val = tr32(PCIE_TRANSACTION_CFG); |
8979 | 9339 | ||
8980 | tw32(PCIE_TRANSACTION_CFG, | 9340 | tw32(PCIE_TRANSACTION_CFG, |
@@ -8987,13 +9347,20 @@ static int tg3_open(struct net_device *dev) | |||
8987 | tg3_full_lock(tp, 0); | 9347 | tg3_full_lock(tp, 0); |
8988 | 9348 | ||
8989 | add_timer(&tp->timer); | 9349 | add_timer(&tp->timer); |
8990 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 9350 | tg3_flag_set(tp, INIT_COMPLETE); |
8991 | tg3_enable_ints(tp); | 9351 | tg3_enable_ints(tp); |
8992 | 9352 | ||
8993 | tg3_full_unlock(tp); | 9353 | tg3_full_unlock(tp); |
8994 | 9354 | ||
8995 | netif_tx_start_all_queues(dev); | 9355 | netif_tx_start_all_queues(dev); |
8996 | 9356 | ||
9357 | /* | ||
9358 | * Reset loopback feature if it was turned on while the device was down | ||
9359 | * make sure that it's installed properly now. | ||
9360 | */ | ||
9361 | if (dev->features & NETIF_F_LOOPBACK) | ||
9362 | tg3_set_loopback(dev, dev->features); | ||
9363 | |||
8997 | return 0; | 9364 | return 0; |
8998 | 9365 | ||
8999 | err_out3: | 9366 | err_out3: |
@@ -9004,6 +9371,7 @@ err_out3: | |||
9004 | 9371 | ||
9005 | err_out2: | 9372 | err_out2: |
9006 | tg3_napi_disable(tp); | 9373 | tg3_napi_disable(tp); |
9374 | tg3_napi_fini(tp); | ||
9007 | tg3_free_consistent(tp); | 9375 | tg3_free_consistent(tp); |
9008 | 9376 | ||
9009 | err_out1: | 9377 | err_out1: |
@@ -9035,7 +9403,7 @@ static int tg3_close(struct net_device *dev) | |||
9035 | 9403 | ||
9036 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 9404 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
9037 | tg3_free_rings(tp); | 9405 | tg3_free_rings(tp); |
9038 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 9406 | tg3_flag_clear(tp, INIT_COMPLETE); |
9039 | 9407 | ||
9040 | tg3_full_unlock(tp); | 9408 | tg3_full_unlock(tp); |
9041 | 9409 | ||
@@ -9051,9 +9419,11 @@ static int tg3_close(struct net_device *dev) | |||
9051 | memcpy(&tp->estats_prev, tg3_get_estats(tp), | 9419 | memcpy(&tp->estats_prev, tg3_get_estats(tp), |
9052 | sizeof(tp->estats_prev)); | 9420 | sizeof(tp->estats_prev)); |
9053 | 9421 | ||
9422 | tg3_napi_fini(tp); | ||
9423 | |||
9054 | tg3_free_consistent(tp); | 9424 | tg3_free_consistent(tp); |
9055 | 9425 | ||
9056 | tg3_set_power_state(tp, PCI_D3hot); | 9426 | tg3_power_down(tp); |
9057 | 9427 | ||
9058 | netif_carrier_off(tp->dev); | 9428 | netif_carrier_off(tp->dev); |
9059 | 9429 | ||
@@ -9180,6 +9550,8 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) | |||
9180 | ESTAT_ADD(nic_avoided_irqs); | 9550 | ESTAT_ADD(nic_avoided_irqs); |
9181 | ESTAT_ADD(nic_tx_threshold_hit); | 9551 | ESTAT_ADD(nic_tx_threshold_hit); |
9182 | 9552 | ||
9553 | ESTAT_ADD(mbuf_lwm_thresh_hit); | ||
9554 | |||
9183 | return estats; | 9555 | return estats; |
9184 | } | 9556 | } |
9185 | 9557 | ||
@@ -9286,18 +9658,11 @@ static void __tg3_set_rx_mode(struct net_device *dev) | |||
9286 | rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | | 9658 | rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | |
9287 | RX_MODE_KEEP_VLAN_TAG); | 9659 | RX_MODE_KEEP_VLAN_TAG); |
9288 | 9660 | ||
9661 | #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) | ||
9289 | /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG | 9662 | /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG |
9290 | * flag clear. | 9663 | * flag clear. |
9291 | */ | 9664 | */ |
9292 | #if TG3_VLAN_TAG_USED | 9665 | if (!tg3_flag(tp, ENABLE_ASF)) |
9293 | if (!tp->vlgrp && | ||
9294 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | ||
9295 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; | ||
9296 | #else | ||
9297 | /* By definition, VLAN is disabled always in this | ||
9298 | * case. | ||
9299 | */ | ||
9300 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | ||
9301 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; | 9666 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; |
9302 | #endif | 9667 | #endif |
9303 | 9668 | ||
@@ -9351,82 +9716,26 @@ static void tg3_set_rx_mode(struct net_device *dev) | |||
9351 | tg3_full_unlock(tp); | 9716 | tg3_full_unlock(tp); |
9352 | } | 9717 | } |
9353 | 9718 | ||
9354 | #define TG3_REGDUMP_LEN (32 * 1024) | ||
9355 | |||
9356 | static int tg3_get_regs_len(struct net_device *dev) | 9719 | static int tg3_get_regs_len(struct net_device *dev) |
9357 | { | 9720 | { |
9358 | return TG3_REGDUMP_LEN; | 9721 | return TG3_REG_BLK_SIZE; |
9359 | } | 9722 | } |
9360 | 9723 | ||
9361 | static void tg3_get_regs(struct net_device *dev, | 9724 | static void tg3_get_regs(struct net_device *dev, |
9362 | struct ethtool_regs *regs, void *_p) | 9725 | struct ethtool_regs *regs, void *_p) |
9363 | { | 9726 | { |
9364 | u32 *p = _p; | ||
9365 | struct tg3 *tp = netdev_priv(dev); | 9727 | struct tg3 *tp = netdev_priv(dev); |
9366 | u8 *orig_p = _p; | ||
9367 | int i; | ||
9368 | 9728 | ||
9369 | regs->version = 0; | 9729 | regs->version = 0; |
9370 | 9730 | ||
9371 | memset(p, 0, TG3_REGDUMP_LEN); | 9731 | memset(_p, 0, TG3_REG_BLK_SIZE); |
9372 | 9732 | ||
9373 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 9733 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) |
9374 | return; | 9734 | return; |
9375 | 9735 | ||
9376 | tg3_full_lock(tp, 0); | 9736 | tg3_full_lock(tp, 0); |
9377 | 9737 | ||
9378 | #define __GET_REG32(reg) (*(p)++ = tr32(reg)) | 9738 | tg3_dump_legacy_regs(tp, (u32 *)_p); |
9379 | #define GET_REG32_LOOP(base, len) \ | ||
9380 | do { p = (u32 *)(orig_p + (base)); \ | ||
9381 | for (i = 0; i < len; i += 4) \ | ||
9382 | __GET_REG32((base) + i); \ | ||
9383 | } while (0) | ||
9384 | #define GET_REG32_1(reg) \ | ||
9385 | do { p = (u32 *)(orig_p + (reg)); \ | ||
9386 | __GET_REG32((reg)); \ | ||
9387 | } while (0) | ||
9388 | |||
9389 | GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); | ||
9390 | GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); | ||
9391 | GET_REG32_LOOP(MAC_MODE, 0x4f0); | ||
9392 | GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); | ||
9393 | GET_REG32_1(SNDDATAC_MODE); | ||
9394 | GET_REG32_LOOP(SNDBDS_MODE, 0x80); | ||
9395 | GET_REG32_LOOP(SNDBDI_MODE, 0x48); | ||
9396 | GET_REG32_1(SNDBDC_MODE); | ||
9397 | GET_REG32_LOOP(RCVLPC_MODE, 0x20); | ||
9398 | GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); | ||
9399 | GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); | ||
9400 | GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); | ||
9401 | GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); | ||
9402 | GET_REG32_1(RCVDCC_MODE); | ||
9403 | GET_REG32_LOOP(RCVBDI_MODE, 0x20); | ||
9404 | GET_REG32_LOOP(RCVCC_MODE, 0x14); | ||
9405 | GET_REG32_LOOP(RCVLSC_MODE, 0x08); | ||
9406 | GET_REG32_1(MBFREE_MODE); | ||
9407 | GET_REG32_LOOP(HOSTCC_MODE, 0x100); | ||
9408 | GET_REG32_LOOP(MEMARB_MODE, 0x10); | ||
9409 | GET_REG32_LOOP(BUFMGR_MODE, 0x58); | ||
9410 | GET_REG32_LOOP(RDMAC_MODE, 0x08); | ||
9411 | GET_REG32_LOOP(WDMAC_MODE, 0x08); | ||
9412 | GET_REG32_1(RX_CPU_MODE); | ||
9413 | GET_REG32_1(RX_CPU_STATE); | ||
9414 | GET_REG32_1(RX_CPU_PGMCTR); | ||
9415 | GET_REG32_1(RX_CPU_HWBKPT); | ||
9416 | GET_REG32_1(TX_CPU_MODE); | ||
9417 | GET_REG32_1(TX_CPU_STATE); | ||
9418 | GET_REG32_1(TX_CPU_PGMCTR); | ||
9419 | GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); | ||
9420 | GET_REG32_LOOP(FTQ_RESET, 0x120); | ||
9421 | GET_REG32_LOOP(MSGINT_MODE, 0x0c); | ||
9422 | GET_REG32_1(DMAC_MODE); | ||
9423 | GET_REG32_LOOP(GRC_MODE, 0x4c); | ||
9424 | if (tp->tg3_flags & TG3_FLAG_NVRAM) | ||
9425 | GET_REG32_LOOP(NVRAM_CMD, 0x24); | ||
9426 | |||
9427 | #undef __GET_REG32 | ||
9428 | #undef GET_REG32_LOOP | ||
9429 | #undef GET_REG32_1 | ||
9430 | 9739 | ||
9431 | tg3_full_unlock(tp); | 9740 | tg3_full_unlock(tp); |
9432 | } | 9741 | } |
@@ -9446,7 +9755,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
9446 | u32 i, offset, len, b_offset, b_count; | 9755 | u32 i, offset, len, b_offset, b_count; |
9447 | __be32 val; | 9756 | __be32 val; |
9448 | 9757 | ||
9449 | if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) | 9758 | if (tg3_flag(tp, NO_NVRAM)) |
9450 | return -EINVAL; | 9759 | return -EINVAL; |
9451 | 9760 | ||
9452 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 9761 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) |
@@ -9475,7 +9784,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
9475 | eeprom->len += b_count; | 9784 | eeprom->len += b_count; |
9476 | } | 9785 | } |
9477 | 9786 | ||
9478 | /* read bytes upto the last 4 byte boundary */ | 9787 | /* read bytes up to the last 4 byte boundary */ |
9479 | pd = &data[eeprom->len]; | 9788 | pd = &data[eeprom->len]; |
9480 | for (i = 0; i < (len - (len & 3)); i += 4) { | 9789 | for (i = 0; i < (len - (len & 3)); i += 4) { |
9481 | ret = tg3_nvram_read_be32(tp, offset + i, &val); | 9790 | ret = tg3_nvram_read_be32(tp, offset + i, &val); |
@@ -9514,7 +9823,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
9514 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 9823 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) |
9515 | return -EAGAIN; | 9824 | return -EAGAIN; |
9516 | 9825 | ||
9517 | if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || | 9826 | if (tg3_flag(tp, NO_NVRAM) || |
9518 | eeprom->magic != TG3_EEPROM_MAGIC) | 9827 | eeprom->magic != TG3_EEPROM_MAGIC) |
9519 | return -EINVAL; | 9828 | return -EINVAL; |
9520 | 9829 | ||
@@ -9566,7 +9875,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9566 | { | 9875 | { |
9567 | struct tg3 *tp = netdev_priv(dev); | 9876 | struct tg3 *tp = netdev_priv(dev); |
9568 | 9877 | ||
9569 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9878 | if (tg3_flag(tp, USE_PHYLIB)) { |
9570 | struct phy_device *phydev; | 9879 | struct phy_device *phydev; |
9571 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) | 9880 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) |
9572 | return -EAGAIN; | 9881 | return -EAGAIN; |
@@ -9594,8 +9903,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9594 | 9903 | ||
9595 | cmd->advertising = tp->link_config.advertising; | 9904 | cmd->advertising = tp->link_config.advertising; |
9596 | if (netif_running(dev)) { | 9905 | if (netif_running(dev)) { |
9597 | cmd->speed = tp->link_config.active_speed; | 9906 | ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); |
9598 | cmd->duplex = tp->link_config.active_duplex; | 9907 | cmd->duplex = tp->link_config.active_duplex; |
9908 | } else { | ||
9909 | ethtool_cmd_speed_set(cmd, SPEED_INVALID); | ||
9910 | cmd->duplex = DUPLEX_INVALID; | ||
9599 | } | 9911 | } |
9600 | cmd->phy_address = tp->phy_addr; | 9912 | cmd->phy_address = tp->phy_addr; |
9601 | cmd->transceiver = XCVR_INTERNAL; | 9913 | cmd->transceiver = XCVR_INTERNAL; |
@@ -9608,8 +9920,9 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9608 | static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 9920 | static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
9609 | { | 9921 | { |
9610 | struct tg3 *tp = netdev_priv(dev); | 9922 | struct tg3 *tp = netdev_priv(dev); |
9923 | u32 speed = ethtool_cmd_speed(cmd); | ||
9611 | 9924 | ||
9612 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9925 | if (tg3_flag(tp, USE_PHYLIB)) { |
9613 | struct phy_device *phydev; | 9926 | struct phy_device *phydev; |
9614 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) | 9927 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) |
9615 | return -EAGAIN; | 9928 | return -EAGAIN; |
@@ -9657,14 +9970,14 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9657 | cmd->advertising &= mask; | 9970 | cmd->advertising &= mask; |
9658 | } else { | 9971 | } else { |
9659 | if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { | 9972 | if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { |
9660 | if (cmd->speed != SPEED_1000) | 9973 | if (speed != SPEED_1000) |
9661 | return -EINVAL; | 9974 | return -EINVAL; |
9662 | 9975 | ||
9663 | if (cmd->duplex != DUPLEX_FULL) | 9976 | if (cmd->duplex != DUPLEX_FULL) |
9664 | return -EINVAL; | 9977 | return -EINVAL; |
9665 | } else { | 9978 | } else { |
9666 | if (cmd->speed != SPEED_100 && | 9979 | if (speed != SPEED_100 && |
9667 | cmd->speed != SPEED_10) | 9980 | speed != SPEED_10) |
9668 | return -EINVAL; | 9981 | return -EINVAL; |
9669 | } | 9982 | } |
9670 | } | 9983 | } |
@@ -9679,7 +9992,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9679 | tp->link_config.duplex = DUPLEX_INVALID; | 9992 | tp->link_config.duplex = DUPLEX_INVALID; |
9680 | } else { | 9993 | } else { |
9681 | tp->link_config.advertising = 0; | 9994 | tp->link_config.advertising = 0; |
9682 | tp->link_config.speed = cmd->speed; | 9995 | tp->link_config.speed = speed; |
9683 | tp->link_config.duplex = cmd->duplex; | 9996 | tp->link_config.duplex = cmd->duplex; |
9684 | } | 9997 | } |
9685 | 9998 | ||
@@ -9709,14 +10022,12 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
9709 | { | 10022 | { |
9710 | struct tg3 *tp = netdev_priv(dev); | 10023 | struct tg3 *tp = netdev_priv(dev); |
9711 | 10024 | ||
9712 | if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && | 10025 | if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) |
9713 | device_can_wakeup(&tp->pdev->dev)) | ||
9714 | wol->supported = WAKE_MAGIC; | 10026 | wol->supported = WAKE_MAGIC; |
9715 | else | 10027 | else |
9716 | wol->supported = 0; | 10028 | wol->supported = 0; |
9717 | wol->wolopts = 0; | 10029 | wol->wolopts = 0; |
9718 | if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && | 10030 | if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) |
9719 | device_can_wakeup(&tp->pdev->dev)) | ||
9720 | wol->wolopts = WAKE_MAGIC; | 10031 | wol->wolopts = WAKE_MAGIC; |
9721 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | 10032 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
9722 | } | 10033 | } |
@@ -9729,17 +10040,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
9729 | if (wol->wolopts & ~WAKE_MAGIC) | 10040 | if (wol->wolopts & ~WAKE_MAGIC) |
9730 | return -EINVAL; | 10041 | return -EINVAL; |
9731 | if ((wol->wolopts & WAKE_MAGIC) && | 10042 | if ((wol->wolopts & WAKE_MAGIC) && |
9732 | !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) | 10043 | !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) |
9733 | return -EINVAL; | 10044 | return -EINVAL; |
9734 | 10045 | ||
10046 | device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); | ||
10047 | |||
9735 | spin_lock_bh(&tp->lock); | 10048 | spin_lock_bh(&tp->lock); |
9736 | if (wol->wolopts & WAKE_MAGIC) { | 10049 | if (device_may_wakeup(dp)) |
9737 | tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; | 10050 | tg3_flag_set(tp, WOL_ENABLE); |
9738 | device_set_wakeup_enable(dp, true); | 10051 | else |
9739 | } else { | 10052 | tg3_flag_clear(tp, WOL_ENABLE); |
9740 | tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; | ||
9741 | device_set_wakeup_enable(dp, false); | ||
9742 | } | ||
9743 | spin_unlock_bh(&tp->lock); | 10053 | spin_unlock_bh(&tp->lock); |
9744 | 10054 | ||
9745 | return 0; | 10055 | return 0; |
@@ -9757,33 +10067,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value) | |||
9757 | tp->msg_enable = value; | 10067 | tp->msg_enable = value; |
9758 | } | 10068 | } |
9759 | 10069 | ||
9760 | static int tg3_set_tso(struct net_device *dev, u32 value) | ||
9761 | { | ||
9762 | struct tg3 *tp = netdev_priv(dev); | ||
9763 | |||
9764 | if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | ||
9765 | if (value) | ||
9766 | return -EINVAL; | ||
9767 | return 0; | ||
9768 | } | ||
9769 | if ((dev->features & NETIF_F_IPV6_CSUM) && | ||
9770 | ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || | ||
9771 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { | ||
9772 | if (value) { | ||
9773 | dev->features |= NETIF_F_TSO6; | ||
9774 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || | ||
9775 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | ||
9776 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | ||
9777 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | ||
9778 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | ||
9779 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | ||
9780 | dev->features |= NETIF_F_TSO_ECN; | ||
9781 | } else | ||
9782 | dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); | ||
9783 | } | ||
9784 | return ethtool_op_set_tso(dev, value); | ||
9785 | } | ||
9786 | |||
9787 | static int tg3_nway_reset(struct net_device *dev) | 10070 | static int tg3_nway_reset(struct net_device *dev) |
9788 | { | 10071 | { |
9789 | struct tg3 *tp = netdev_priv(dev); | 10072 | struct tg3 *tp = netdev_priv(dev); |
@@ -9795,7 +10078,7 @@ static int tg3_nway_reset(struct net_device *dev) | |||
9795 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 10078 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
9796 | return -EINVAL; | 10079 | return -EINVAL; |
9797 | 10080 | ||
9798 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 10081 | if (tg3_flag(tp, USE_PHYLIB)) { |
9799 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) | 10082 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) |
9800 | return -EAGAIN; | 10083 | return -EAGAIN; |
9801 | r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); | 10084 | r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
@@ -9822,10 +10105,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * | |||
9822 | { | 10105 | { |
9823 | struct tg3 *tp = netdev_priv(dev); | 10106 | struct tg3 *tp = netdev_priv(dev); |
9824 | 10107 | ||
9825 | ering->rx_max_pending = TG3_RX_RING_SIZE - 1; | 10108 | ering->rx_max_pending = tp->rx_std_ring_mask; |
9826 | ering->rx_mini_max_pending = 0; | 10109 | ering->rx_mini_max_pending = 0; |
9827 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) | 10110 | if (tg3_flag(tp, JUMBO_RING_ENABLE)) |
9828 | ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; | 10111 | ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; |
9829 | else | 10112 | else |
9830 | ering->rx_jumbo_max_pending = 0; | 10113 | ering->rx_jumbo_max_pending = 0; |
9831 | 10114 | ||
@@ -9833,7 +10116,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * | |||
9833 | 10116 | ||
9834 | ering->rx_pending = tp->rx_pending; | 10117 | ering->rx_pending = tp->rx_pending; |
9835 | ering->rx_mini_pending = 0; | 10118 | ering->rx_mini_pending = 0; |
9836 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) | 10119 | if (tg3_flag(tp, JUMBO_RING_ENABLE)) |
9837 | ering->rx_jumbo_pending = tp->rx_jumbo_pending; | 10120 | ering->rx_jumbo_pending = tp->rx_jumbo_pending; |
9838 | else | 10121 | else |
9839 | ering->rx_jumbo_pending = 0; | 10122 | ering->rx_jumbo_pending = 0; |
@@ -9846,11 +10129,11 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
9846 | struct tg3 *tp = netdev_priv(dev); | 10129 | struct tg3 *tp = netdev_priv(dev); |
9847 | int i, irq_sync = 0, err = 0; | 10130 | int i, irq_sync = 0, err = 0; |
9848 | 10131 | ||
9849 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || | 10132 | if ((ering->rx_pending > tp->rx_std_ring_mask) || |
9850 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || | 10133 | (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || |
9851 | (ering->tx_pending > TG3_TX_RING_SIZE - 1) || | 10134 | (ering->tx_pending > TG3_TX_RING_SIZE - 1) || |
9852 | (ering->tx_pending <= MAX_SKB_FRAGS) || | 10135 | (ering->tx_pending <= MAX_SKB_FRAGS) || |
9853 | ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && | 10136 | (tg3_flag(tp, TSO_BUG) && |
9854 | (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) | 10137 | (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) |
9855 | return -EINVAL; | 10138 | return -EINVAL; |
9856 | 10139 | ||
@@ -9864,12 +10147,12 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
9864 | 10147 | ||
9865 | tp->rx_pending = ering->rx_pending; | 10148 | tp->rx_pending = ering->rx_pending; |
9866 | 10149 | ||
9867 | if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && | 10150 | if (tg3_flag(tp, MAX_RXPEND_64) && |
9868 | tp->rx_pending > 63) | 10151 | tp->rx_pending > 63) |
9869 | tp->rx_pending = 63; | 10152 | tp->rx_pending = 63; |
9870 | tp->rx_jumbo_pending = ering->rx_jumbo_pending; | 10153 | tp->rx_jumbo_pending = ering->rx_jumbo_pending; |
9871 | 10154 | ||
9872 | for (i = 0; i < TG3_IRQ_MAX_VECS; i++) | 10155 | for (i = 0; i < tp->irq_max; i++) |
9873 | tp->napi[i].tx_pending = ering->tx_pending; | 10156 | tp->napi[i].tx_pending = ering->tx_pending; |
9874 | 10157 | ||
9875 | if (netif_running(dev)) { | 10158 | if (netif_running(dev)) { |
@@ -9891,7 +10174,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9891 | { | 10174 | { |
9892 | struct tg3 *tp = netdev_priv(dev); | 10175 | struct tg3 *tp = netdev_priv(dev); |
9893 | 10176 | ||
9894 | epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; | 10177 | epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); |
9895 | 10178 | ||
9896 | if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) | 10179 | if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) |
9897 | epause->rx_pause = 1; | 10180 | epause->rx_pause = 1; |
@@ -9909,7 +10192,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9909 | struct tg3 *tp = netdev_priv(dev); | 10192 | struct tg3 *tp = netdev_priv(dev); |
9910 | int err = 0; | 10193 | int err = 0; |
9911 | 10194 | ||
9912 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 10195 | if (tg3_flag(tp, USE_PHYLIB)) { |
9913 | u32 newadv; | 10196 | u32 newadv; |
9914 | struct phy_device *phydev; | 10197 | struct phy_device *phydev; |
9915 | 10198 | ||
@@ -9917,8 +10200,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9917 | 10200 | ||
9918 | if (!(phydev->supported & SUPPORTED_Pause) || | 10201 | if (!(phydev->supported & SUPPORTED_Pause) || |
9919 | (!(phydev->supported & SUPPORTED_Asym_Pause) && | 10202 | (!(phydev->supported & SUPPORTED_Asym_Pause) && |
9920 | ((epause->rx_pause && !epause->tx_pause) || | 10203 | (epause->rx_pause != epause->tx_pause))) |
9921 | (!epause->rx_pause && epause->tx_pause)))) | ||
9922 | return -EINVAL; | 10204 | return -EINVAL; |
9923 | 10205 | ||
9924 | tp->link_config.flowctrl = 0; | 10206 | tp->link_config.flowctrl = 0; |
@@ -9938,9 +10220,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9938 | newadv = 0; | 10220 | newadv = 0; |
9939 | 10221 | ||
9940 | if (epause->autoneg) | 10222 | if (epause->autoneg) |
9941 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 10223 | tg3_flag_set(tp, PAUSE_AUTONEG); |
9942 | else | 10224 | else |
9943 | tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; | 10225 | tg3_flag_clear(tp, PAUSE_AUTONEG); |
9944 | 10226 | ||
9945 | if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { | 10227 | if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { |
9946 | u32 oldadv = phydev->advertising & | 10228 | u32 oldadv = phydev->advertising & |
@@ -9982,9 +10264,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9982 | tg3_full_lock(tp, irq_sync); | 10264 | tg3_full_lock(tp, irq_sync); |
9983 | 10265 | ||
9984 | if (epause->autoneg) | 10266 | if (epause->autoneg) |
9985 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 10267 | tg3_flag_set(tp, PAUSE_AUTONEG); |
9986 | else | 10268 | else |
9987 | tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; | 10269 | tg3_flag_clear(tp, PAUSE_AUTONEG); |
9988 | if (epause->rx_pause) | 10270 | if (epause->rx_pause) |
9989 | tp->link_config.flowctrl |= FLOW_CTRL_RX; | 10271 | tp->link_config.flowctrl |= FLOW_CTRL_RX; |
9990 | else | 10272 | else |
@@ -10007,50 +10289,6 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
10007 | return err; | 10289 | return err; |
10008 | } | 10290 | } |
10009 | 10291 | ||
10010 | static u32 tg3_get_rx_csum(struct net_device *dev) | ||
10011 | { | ||
10012 | struct tg3 *tp = netdev_priv(dev); | ||
10013 | return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; | ||
10014 | } | ||
10015 | |||
10016 | static int tg3_set_rx_csum(struct net_device *dev, u32 data) | ||
10017 | { | ||
10018 | struct tg3 *tp = netdev_priv(dev); | ||
10019 | |||
10020 | if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { | ||
10021 | if (data != 0) | ||
10022 | return -EINVAL; | ||
10023 | return 0; | ||
10024 | } | ||
10025 | |||
10026 | spin_lock_bh(&tp->lock); | ||
10027 | if (data) | ||
10028 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; | ||
10029 | else | ||
10030 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; | ||
10031 | spin_unlock_bh(&tp->lock); | ||
10032 | |||
10033 | return 0; | ||
10034 | } | ||
10035 | |||
10036 | static int tg3_set_tx_csum(struct net_device *dev, u32 data) | ||
10037 | { | ||
10038 | struct tg3 *tp = netdev_priv(dev); | ||
10039 | |||
10040 | if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { | ||
10041 | if (data != 0) | ||
10042 | return -EINVAL; | ||
10043 | return 0; | ||
10044 | } | ||
10045 | |||
10046 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | ||
10047 | ethtool_op_set_tx_ipv6_csum(dev, data); | ||
10048 | else | ||
10049 | ethtool_op_set_tx_csum(dev, data); | ||
10050 | |||
10051 | return 0; | ||
10052 | } | ||
10053 | |||
10054 | static int tg3_get_sset_count(struct net_device *dev, int sset) | 10292 | static int tg3_get_sset_count(struct net_device *dev, int sset) |
10055 | { | 10293 | { |
10056 | switch (sset) { | 10294 | switch (sset) { |
@@ -10078,35 +10316,38 @@ static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
10078 | } | 10316 | } |
10079 | } | 10317 | } |
10080 | 10318 | ||
10081 | static int tg3_phys_id(struct net_device *dev, u32 data) | 10319 | static int tg3_set_phys_id(struct net_device *dev, |
10320 | enum ethtool_phys_id_state state) | ||
10082 | { | 10321 | { |
10083 | struct tg3 *tp = netdev_priv(dev); | 10322 | struct tg3 *tp = netdev_priv(dev); |
10084 | int i; | ||
10085 | 10323 | ||
10086 | if (!netif_running(tp->dev)) | 10324 | if (!netif_running(tp->dev)) |
10087 | return -EAGAIN; | 10325 | return -EAGAIN; |
10088 | 10326 | ||
10089 | if (data == 0) | 10327 | switch (state) { |
10090 | data = UINT_MAX / 2; | 10328 | case ETHTOOL_ID_ACTIVE: |
10091 | 10329 | return 1; /* cycle on/off once per second */ | |
10092 | for (i = 0; i < (data * 2); i++) { | 10330 | |
10093 | if ((i % 2) == 0) | 10331 | case ETHTOOL_ID_ON: |
10094 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | | 10332 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | |
10095 | LED_CTRL_1000MBPS_ON | | 10333 | LED_CTRL_1000MBPS_ON | |
10096 | LED_CTRL_100MBPS_ON | | 10334 | LED_CTRL_100MBPS_ON | |
10097 | LED_CTRL_10MBPS_ON | | 10335 | LED_CTRL_10MBPS_ON | |
10098 | LED_CTRL_TRAFFIC_OVERRIDE | | 10336 | LED_CTRL_TRAFFIC_OVERRIDE | |
10099 | LED_CTRL_TRAFFIC_BLINK | | 10337 | LED_CTRL_TRAFFIC_BLINK | |
10100 | LED_CTRL_TRAFFIC_LED); | 10338 | LED_CTRL_TRAFFIC_LED); |
10339 | break; | ||
10101 | 10340 | ||
10102 | else | 10341 | case ETHTOOL_ID_OFF: |
10103 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | | 10342 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | |
10104 | LED_CTRL_TRAFFIC_OVERRIDE); | 10343 | LED_CTRL_TRAFFIC_OVERRIDE); |
10344 | break; | ||
10105 | 10345 | ||
10106 | if (msleep_interruptible(500)) | 10346 | case ETHTOOL_ID_INACTIVE: |
10107 | break; | 10347 | tw32(MAC_LED_CTRL, tp->led_ctrl); |
10348 | break; | ||
10108 | } | 10349 | } |
10109 | tw32(MAC_LED_CTRL, tp->led_ctrl); | 10350 | |
10110 | return 0; | 10351 | return 0; |
10111 | } | 10352 | } |
10112 | 10353 | ||
@@ -10117,6 +10358,80 @@ static void tg3_get_ethtool_stats(struct net_device *dev, | |||
10117 | memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); | 10358 | memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); |
10118 | } | 10359 | } |
10119 | 10360 | ||
10361 | static __be32 * tg3_vpd_readblock(struct tg3 *tp) | ||
10362 | { | ||
10363 | int i; | ||
10364 | __be32 *buf; | ||
10365 | u32 offset = 0, len = 0; | ||
10366 | u32 magic, val; | ||
10367 | |||
10368 | if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) | ||
10369 | return NULL; | ||
10370 | |||
10371 | if (magic == TG3_EEPROM_MAGIC) { | ||
10372 | for (offset = TG3_NVM_DIR_START; | ||
10373 | offset < TG3_NVM_DIR_END; | ||
10374 | offset += TG3_NVM_DIRENT_SIZE) { | ||
10375 | if (tg3_nvram_read(tp, offset, &val)) | ||
10376 | return NULL; | ||
10377 | |||
10378 | if ((val >> TG3_NVM_DIRTYPE_SHIFT) == | ||
10379 | TG3_NVM_DIRTYPE_EXTVPD) | ||
10380 | break; | ||
10381 | } | ||
10382 | |||
10383 | if (offset != TG3_NVM_DIR_END) { | ||
10384 | len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; | ||
10385 | if (tg3_nvram_read(tp, offset + 4, &offset)) | ||
10386 | return NULL; | ||
10387 | |||
10388 | offset = tg3_nvram_logical_addr(tp, offset); | ||
10389 | } | ||
10390 | } | ||
10391 | |||
10392 | if (!offset || !len) { | ||
10393 | offset = TG3_NVM_VPD_OFF; | ||
10394 | len = TG3_NVM_VPD_LEN; | ||
10395 | } | ||
10396 | |||
10397 | buf = kmalloc(len, GFP_KERNEL); | ||
10398 | if (buf == NULL) | ||
10399 | return NULL; | ||
10400 | |||
10401 | if (magic == TG3_EEPROM_MAGIC) { | ||
10402 | for (i = 0; i < len; i += 4) { | ||
10403 | /* The data is in little-endian format in NVRAM. | ||
10404 | * Use the big-endian read routines to preserve | ||
10405 | * the byte order as it exists in NVRAM. | ||
10406 | */ | ||
10407 | if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) | ||
10408 | goto error; | ||
10409 | } | ||
10410 | } else { | ||
10411 | u8 *ptr; | ||
10412 | ssize_t cnt; | ||
10413 | unsigned int pos = 0; | ||
10414 | |||
10415 | ptr = (u8 *)&buf[0]; | ||
10416 | for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { | ||
10417 | cnt = pci_read_vpd(tp->pdev, pos, | ||
10418 | len - pos, ptr); | ||
10419 | if (cnt == -ETIMEDOUT || cnt == -EINTR) | ||
10420 | cnt = 0; | ||
10421 | else if (cnt < 0) | ||
10422 | goto error; | ||
10423 | } | ||
10424 | if (pos != len) | ||
10425 | goto error; | ||
10426 | } | ||
10427 | |||
10428 | return buf; | ||
10429 | |||
10430 | error: | ||
10431 | kfree(buf); | ||
10432 | return NULL; | ||
10433 | } | ||
10434 | |||
10120 | #define NVRAM_TEST_SIZE 0x100 | 10435 | #define NVRAM_TEST_SIZE 0x100 |
10121 | #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 | 10436 | #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 |
10122 | #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 | 10437 | #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 |
@@ -10130,7 +10445,7 @@ static int tg3_test_nvram(struct tg3 *tp) | |||
10130 | __be32 *buf; | 10445 | __be32 *buf; |
10131 | int i, j, k, err = 0, size; | 10446 | int i, j, k, err = 0, size; |
10132 | 10447 | ||
10133 | if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) | 10448 | if (tg3_flag(tp, NO_NVRAM)) |
10134 | return 0; | 10449 | return 0; |
10135 | 10450 | ||
10136 | if (tg3_nvram_read(tp, 0, &magic) != 0) | 10451 | if (tg3_nvram_read(tp, 0, &magic) != 0) |
@@ -10244,16 +10559,50 @@ static int tg3_test_nvram(struct tg3 *tp) | |||
10244 | goto out; | 10559 | goto out; |
10245 | } | 10560 | } |
10246 | 10561 | ||
10562 | err = -EIO; | ||
10563 | |||
10247 | /* Bootstrap checksum at offset 0x10 */ | 10564 | /* Bootstrap checksum at offset 0x10 */ |
10248 | csum = calc_crc((unsigned char *) buf, 0x10); | 10565 | csum = calc_crc((unsigned char *) buf, 0x10); |
10249 | if (csum != be32_to_cpu(buf[0x10/4])) | 10566 | if (csum != le32_to_cpu(buf[0x10/4])) |
10250 | goto out; | 10567 | goto out; |
10251 | 10568 | ||
10252 | /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ | 10569 | /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ |
10253 | csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); | 10570 | csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); |
10254 | if (csum != be32_to_cpu(buf[0xfc/4])) | 10571 | if (csum != le32_to_cpu(buf[0xfc/4])) |
10255 | goto out; | 10572 | goto out; |
10256 | 10573 | ||
10574 | kfree(buf); | ||
10575 | |||
10576 | buf = tg3_vpd_readblock(tp); | ||
10577 | if (!buf) | ||
10578 | return -ENOMEM; | ||
10579 | |||
10580 | i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN, | ||
10581 | PCI_VPD_LRDT_RO_DATA); | ||
10582 | if (i > 0) { | ||
10583 | j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); | ||
10584 | if (j < 0) | ||
10585 | goto out; | ||
10586 | |||
10587 | if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN) | ||
10588 | goto out; | ||
10589 | |||
10590 | i += PCI_VPD_LRDT_TAG_SIZE; | ||
10591 | j = pci_vpd_find_info_keyword((u8 *)buf, i, j, | ||
10592 | PCI_VPD_RO_KEYWORD_CHKSUM); | ||
10593 | if (j > 0) { | ||
10594 | u8 csum8 = 0; | ||
10595 | |||
10596 | j += PCI_VPD_INFO_FLD_HDR_SIZE; | ||
10597 | |||
10598 | for (i = 0; i <= j; i++) | ||
10599 | csum8 += ((u8 *)buf)[i]; | ||
10600 | |||
10601 | if (csum8) | ||
10602 | goto out; | ||
10603 | } | ||
10604 | } | ||
10605 | |||
10257 | err = 0; | 10606 | err = 0; |
10258 | 10607 | ||
10259 | out: | 10608 | out: |
@@ -10438,9 +10787,9 @@ static int tg3_test_registers(struct tg3 *tp) | |||
10438 | }; | 10787 | }; |
10439 | 10788 | ||
10440 | is_5705 = is_5750 = 0; | 10789 | is_5705 = is_5750 = 0; |
10441 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 10790 | if (tg3_flag(tp, 5705_PLUS)) { |
10442 | is_5705 = 1; | 10791 | is_5705 = 1; |
10443 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 10792 | if (tg3_flag(tp, 5750_PLUS)) |
10444 | is_5750 = 1; | 10793 | is_5750 = 1; |
10445 | } | 10794 | } |
10446 | 10795 | ||
@@ -10451,7 +10800,7 @@ static int tg3_test_registers(struct tg3 *tp) | |||
10451 | if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) | 10800 | if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) |
10452 | continue; | 10801 | continue; |
10453 | 10802 | ||
10454 | if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 10803 | if (tg3_flag(tp, IS_5788) && |
10455 | (reg_tbl[i].flags & TG3_FL_NOT_5788)) | 10804 | (reg_tbl[i].flags & TG3_FL_NOT_5788)) |
10456 | continue; | 10805 | continue; |
10457 | 10806 | ||
@@ -10574,16 +10923,15 @@ static int tg3_test_memory(struct tg3 *tp) | |||
10574 | int err = 0; | 10923 | int err = 0; |
10575 | int i; | 10924 | int i; |
10576 | 10925 | ||
10577 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 10926 | if (tg3_flag(tp, 5717_PLUS)) |
10578 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | ||
10579 | mem_tbl = mem_tbl_5717; | 10927 | mem_tbl = mem_tbl_5717; |
10580 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 10928 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) |
10581 | mem_tbl = mem_tbl_57765; | 10929 | mem_tbl = mem_tbl_57765; |
10582 | else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 10930 | else if (tg3_flag(tp, 5755_PLUS)) |
10583 | mem_tbl = mem_tbl_5755; | 10931 | mem_tbl = mem_tbl_5755; |
10584 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 10932 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
10585 | mem_tbl = mem_tbl_5906; | 10933 | mem_tbl = mem_tbl_5906; |
10586 | else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 10934 | else if (tg3_flag(tp, 5705_PLUS)) |
10587 | mem_tbl = mem_tbl_5705; | 10935 | mem_tbl = mem_tbl_5705; |
10588 | else | 10936 | else |
10589 | mem_tbl = mem_tbl_570x; | 10937 | mem_tbl = mem_tbl_570x; |
@@ -10599,24 +10947,49 @@ static int tg3_test_memory(struct tg3 *tp) | |||
10599 | 10947 | ||
10600 | #define TG3_MAC_LOOPBACK 0 | 10948 | #define TG3_MAC_LOOPBACK 0 |
10601 | #define TG3_PHY_LOOPBACK 1 | 10949 | #define TG3_PHY_LOOPBACK 1 |
10950 | #define TG3_TSO_LOOPBACK 2 | ||
10951 | |||
10952 | #define TG3_TSO_MSS 500 | ||
10953 | |||
10954 | #define TG3_TSO_IP_HDR_LEN 20 | ||
10955 | #define TG3_TSO_TCP_HDR_LEN 20 | ||
10956 | #define TG3_TSO_TCP_OPT_LEN 12 | ||
10957 | |||
10958 | static const u8 tg3_tso_header[] = { | ||
10959 | 0x08, 0x00, | ||
10960 | 0x45, 0x00, 0x00, 0x00, | ||
10961 | 0x00, 0x00, 0x40, 0x00, | ||
10962 | 0x40, 0x06, 0x00, 0x00, | ||
10963 | 0x0a, 0x00, 0x00, 0x01, | ||
10964 | 0x0a, 0x00, 0x00, 0x02, | ||
10965 | 0x0d, 0x00, 0xe0, 0x00, | ||
10966 | 0x00, 0x00, 0x01, 0x00, | ||
10967 | 0x00, 0x00, 0x02, 0x00, | ||
10968 | 0x80, 0x10, 0x10, 0x00, | ||
10969 | 0x14, 0x09, 0x00, 0x00, | ||
10970 | 0x01, 0x01, 0x08, 0x0a, | ||
10971 | 0x11, 0x11, 0x11, 0x11, | ||
10972 | 0x11, 0x11, 0x11, 0x11, | ||
10973 | }; | ||
10602 | 10974 | ||
10603 | static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | 10975 | static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode) |
10604 | { | 10976 | { |
10605 | u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; | 10977 | u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; |
10606 | u32 desc_idx, coal_now; | 10978 | u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; |
10607 | struct sk_buff *skb, *rx_skb; | 10979 | struct sk_buff *skb, *rx_skb; |
10608 | u8 *tx_data; | 10980 | u8 *tx_data; |
10609 | dma_addr_t map; | 10981 | dma_addr_t map; |
10610 | int num_pkts, tx_len, rx_len, i, err; | 10982 | int num_pkts, tx_len, rx_len, i, err; |
10611 | struct tg3_rx_buffer_desc *desc; | 10983 | struct tg3_rx_buffer_desc *desc; |
10612 | struct tg3_napi *tnapi, *rnapi; | 10984 | struct tg3_napi *tnapi, *rnapi; |
10613 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 10985 | struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; |
10614 | 10986 | ||
10615 | tnapi = &tp->napi[0]; | 10987 | tnapi = &tp->napi[0]; |
10616 | rnapi = &tp->napi[0]; | 10988 | rnapi = &tp->napi[0]; |
10617 | if (tp->irq_cnt > 1) { | 10989 | if (tp->irq_cnt > 1) { |
10618 | rnapi = &tp->napi[1]; | 10990 | if (tg3_flag(tp, ENABLE_RSS)) |
10619 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 10991 | rnapi = &tp->napi[1]; |
10992 | if (tg3_flag(tp, ENABLE_TSS)) | ||
10620 | tnapi = &tp->napi[1]; | 10993 | tnapi = &tp->napi[1]; |
10621 | } | 10994 | } |
10622 | coal_now = tnapi->coal_now | rnapi->coal_now; | 10995 | coal_now = tnapi->coal_now | rnapi->coal_now; |
@@ -10624,23 +10997,24 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10624 | if (loopback_mode == TG3_MAC_LOOPBACK) { | 10997 | if (loopback_mode == TG3_MAC_LOOPBACK) { |
10625 | /* HW errata - mac loopback fails in some cases on 5780. | 10998 | /* HW errata - mac loopback fails in some cases on 5780. |
10626 | * Normal traffic and PHY loopback are not affected by | 10999 | * Normal traffic and PHY loopback are not affected by |
10627 | * errata. | 11000 | * errata. Also, the MAC loopback test is deprecated for |
11001 | * all newer ASIC revisions. | ||
10628 | */ | 11002 | */ |
10629 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) | 11003 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || |
11004 | tg3_flag(tp, CPMU_PRESENT)) | ||
10630 | return 0; | 11005 | return 0; |
10631 | 11006 | ||
10632 | mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | | 11007 | mac_mode = tp->mac_mode & |
10633 | MAC_MODE_PORT_INT_LPBACK; | 11008 | ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); |
10634 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 11009 | mac_mode |= MAC_MODE_PORT_INT_LPBACK; |
11010 | if (!tg3_flag(tp, 5705_PLUS)) | ||
10635 | mac_mode |= MAC_MODE_LINK_POLARITY; | 11011 | mac_mode |= MAC_MODE_LINK_POLARITY; |
10636 | if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) | 11012 | if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) |
10637 | mac_mode |= MAC_MODE_PORT_MODE_MII; | 11013 | mac_mode |= MAC_MODE_PORT_MODE_MII; |
10638 | else | 11014 | else |
10639 | mac_mode |= MAC_MODE_PORT_MODE_GMII; | 11015 | mac_mode |= MAC_MODE_PORT_MODE_GMII; |
10640 | tw32(MAC_MODE, mac_mode); | 11016 | tw32(MAC_MODE, mac_mode); |
10641 | } else if (loopback_mode == TG3_PHY_LOOPBACK) { | 11017 | } else { |
10642 | u32 val; | ||
10643 | |||
10644 | if (tp->phy_flags & TG3_PHYFLG_IS_FET) { | 11018 | if (tp->phy_flags & TG3_PHYFLG_IS_FET) { |
10645 | tg3_phy_fet_toggle_apd(tp, false); | 11019 | tg3_phy_fet_toggle_apd(tp, false); |
10646 | val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; | 11020 | val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; |
@@ -10652,7 +11026,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10652 | tg3_writephy(tp, MII_BMCR, val); | 11026 | tg3_writephy(tp, MII_BMCR, val); |
10653 | udelay(40); | 11027 | udelay(40); |
10654 | 11028 | ||
10655 | mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; | 11029 | mac_mode = tp->mac_mode & |
11030 | ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); | ||
10656 | if (tp->phy_flags & TG3_PHYFLG_IS_FET) { | 11031 | if (tp->phy_flags & TG3_PHYFLG_IS_FET) { |
10657 | tg3_writephy(tp, MII_TG3_FET_PTEST, | 11032 | tg3_writephy(tp, MII_TG3_FET_PTEST, |
10658 | MII_TG3_FET_PTEST_FRC_TX_LINK | | 11033 | MII_TG3_FET_PTEST_FRC_TX_LINK | |
@@ -10680,13 +11055,18 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10680 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); | 11055 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); |
10681 | } | 11056 | } |
10682 | tw32(MAC_MODE, mac_mode); | 11057 | tw32(MAC_MODE, mac_mode); |
10683 | } else { | 11058 | |
10684 | return -EINVAL; | 11059 | /* Wait for link */ |
11060 | for (i = 0; i < 100; i++) { | ||
11061 | if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) | ||
11062 | break; | ||
11063 | mdelay(1); | ||
11064 | } | ||
10685 | } | 11065 | } |
10686 | 11066 | ||
10687 | err = -EIO; | 11067 | err = -EIO; |
10688 | 11068 | ||
10689 | tx_len = 1514; | 11069 | tx_len = pktsz; |
10690 | skb = netdev_alloc_skb(tp->dev, tx_len); | 11070 | skb = netdev_alloc_skb(tp->dev, tx_len); |
10691 | if (!skb) | 11071 | if (!skb) |
10692 | return -ENOMEM; | 11072 | return -ENOMEM; |
@@ -10695,9 +11075,58 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10695 | memcpy(tx_data, tp->dev->dev_addr, 6); | 11075 | memcpy(tx_data, tp->dev->dev_addr, 6); |
10696 | memset(tx_data + 6, 0x0, 8); | 11076 | memset(tx_data + 6, 0x0, 8); |
10697 | 11077 | ||
10698 | tw32(MAC_RX_MTU_SIZE, tx_len + 4); | 11078 | tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); |
11079 | |||
11080 | if (loopback_mode == TG3_TSO_LOOPBACK) { | ||
11081 | struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; | ||
11082 | |||
11083 | u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + | ||
11084 | TG3_TSO_TCP_OPT_LEN; | ||
11085 | |||
11086 | memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, | ||
11087 | sizeof(tg3_tso_header)); | ||
11088 | mss = TG3_TSO_MSS; | ||
11089 | |||
11090 | val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); | ||
11091 | num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); | ||
11092 | |||
11093 | /* Set the total length field in the IP header */ | ||
11094 | iph->tot_len = htons((u16)(mss + hdr_len)); | ||
11095 | |||
11096 | base_flags = (TXD_FLAG_CPU_PRE_DMA | | ||
11097 | TXD_FLAG_CPU_POST_DMA); | ||
11098 | |||
11099 | if (tg3_flag(tp, HW_TSO_1) || | ||
11100 | tg3_flag(tp, HW_TSO_2) || | ||
11101 | tg3_flag(tp, HW_TSO_3)) { | ||
11102 | struct tcphdr *th; | ||
11103 | val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; | ||
11104 | th = (struct tcphdr *)&tx_data[val]; | ||
11105 | th->check = 0; | ||
11106 | } else | ||
11107 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
10699 | 11108 | ||
10700 | for (i = 14; i < tx_len; i++) | 11109 | if (tg3_flag(tp, HW_TSO_3)) { |
11110 | mss |= (hdr_len & 0xc) << 12; | ||
11111 | if (hdr_len & 0x10) | ||
11112 | base_flags |= 0x00000010; | ||
11113 | base_flags |= (hdr_len & 0x3e0) << 5; | ||
11114 | } else if (tg3_flag(tp, HW_TSO_2)) | ||
11115 | mss |= hdr_len << 9; | ||
11116 | else if (tg3_flag(tp, HW_TSO_1) || | ||
11117 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | ||
11118 | mss |= (TG3_TSO_TCP_OPT_LEN << 9); | ||
11119 | } else { | ||
11120 | base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); | ||
11121 | } | ||
11122 | |||
11123 | data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); | ||
11124 | } else { | ||
11125 | num_pkts = 1; | ||
11126 | data_off = ETH_HLEN; | ||
11127 | } | ||
11128 | |||
11129 | for (i = data_off; i < tx_len; i++) | ||
10701 | tx_data[i] = (u8) (i & 0xff); | 11130 | tx_data[i] = (u8) (i & 0xff); |
10702 | 11131 | ||
10703 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); | 11132 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); |
@@ -10713,12 +11142,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10713 | 11142 | ||
10714 | rx_start_idx = rnapi->hw_status->idx[0].rx_producer; | 11143 | rx_start_idx = rnapi->hw_status->idx[0].rx_producer; |
10715 | 11144 | ||
10716 | num_pkts = 0; | 11145 | tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, |
10717 | 11146 | base_flags, (mss << 1) | 1); | |
10718 | tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); | ||
10719 | 11147 | ||
10720 | tnapi->tx_prod++; | 11148 | tnapi->tx_prod++; |
10721 | num_pkts++; | ||
10722 | 11149 | ||
10723 | tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); | 11150 | tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); |
10724 | tr32_mailbox(tnapi->prodmbox); | 11151 | tr32_mailbox(tnapi->prodmbox); |
@@ -10748,29 +11175,56 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10748 | if (rx_idx != rx_start_idx + num_pkts) | 11175 | if (rx_idx != rx_start_idx + num_pkts) |
10749 | goto out; | 11176 | goto out; |
10750 | 11177 | ||
10751 | desc = &rnapi->rx_rcb[rx_start_idx]; | 11178 | val = data_off; |
10752 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 11179 | while (rx_idx != rx_start_idx) { |
10753 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 11180 | desc = &rnapi->rx_rcb[rx_start_idx++]; |
10754 | if (opaque_key != RXD_OPAQUE_RING_STD) | 11181 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; |
10755 | goto out; | 11182 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; |
10756 | 11183 | ||
10757 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 11184 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && |
10758 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) | 11185 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) |
10759 | goto out; | 11186 | goto out; |
10760 | 11187 | ||
10761 | rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; | 11188 | rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) |
10762 | if (rx_len != tx_len) | 11189 | - ETH_FCS_LEN; |
10763 | goto out; | ||
10764 | 11190 | ||
10765 | rx_skb = tpr->rx_std_buffers[desc_idx].skb; | 11191 | if (loopback_mode != TG3_TSO_LOOPBACK) { |
11192 | if (rx_len != tx_len) | ||
11193 | goto out; | ||
10766 | 11194 | ||
10767 | map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); | 11195 | if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { |
10768 | pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); | 11196 | if (opaque_key != RXD_OPAQUE_RING_STD) |
11197 | goto out; | ||
11198 | } else { | ||
11199 | if (opaque_key != RXD_OPAQUE_RING_JUMBO) | ||
11200 | goto out; | ||
11201 | } | ||
11202 | } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && | ||
11203 | (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) | ||
11204 | >> RXD_TCPCSUM_SHIFT != 0xffff) { | ||
11205 | goto out; | ||
11206 | } | ||
10769 | 11207 | ||
10770 | for (i = 14; i < tx_len; i++) { | 11208 | if (opaque_key == RXD_OPAQUE_RING_STD) { |
10771 | if (*(rx_skb->data + i) != (u8) (i & 0xff)) | 11209 | rx_skb = tpr->rx_std_buffers[desc_idx].skb; |
11210 | map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], | ||
11211 | mapping); | ||
11212 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { | ||
11213 | rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; | ||
11214 | map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], | ||
11215 | mapping); | ||
11216 | } else | ||
10772 | goto out; | 11217 | goto out; |
11218 | |||
11219 | pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, | ||
11220 | PCI_DMA_FROMDEVICE); | ||
11221 | |||
11222 | for (i = data_off; i < rx_len; i++, val++) { | ||
11223 | if (*(rx_skb->data + i) != (u8) (val & 0xff)) | ||
11224 | goto out; | ||
11225 | } | ||
10773 | } | 11226 | } |
11227 | |||
10774 | err = 0; | 11228 | err = 0; |
10775 | 11229 | ||
10776 | /* tg3_free_rings will unmap and free the rx_skb */ | 11230 | /* tg3_free_rings will unmap and free the rx_skb */ |
@@ -10778,28 +11232,45 @@ out: | |||
10778 | return err; | 11232 | return err; |
10779 | } | 11233 | } |
10780 | 11234 | ||
10781 | #define TG3_MAC_LOOPBACK_FAILED 1 | 11235 | #define TG3_STD_LOOPBACK_FAILED 1 |
10782 | #define TG3_PHY_LOOPBACK_FAILED 2 | 11236 | #define TG3_JMB_LOOPBACK_FAILED 2 |
10783 | #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ | 11237 | #define TG3_TSO_LOOPBACK_FAILED 4 |
10784 | TG3_PHY_LOOPBACK_FAILED) | 11238 | |
11239 | #define TG3_MAC_LOOPBACK_SHIFT 0 | ||
11240 | #define TG3_PHY_LOOPBACK_SHIFT 4 | ||
11241 | #define TG3_LOOPBACK_FAILED 0x00000077 | ||
10785 | 11242 | ||
10786 | static int tg3_test_loopback(struct tg3 *tp) | 11243 | static int tg3_test_loopback(struct tg3 *tp) |
10787 | { | 11244 | { |
10788 | int err = 0; | 11245 | int err = 0; |
10789 | u32 cpmuctrl = 0; | 11246 | u32 eee_cap, cpmuctrl = 0; |
10790 | 11247 | ||
10791 | if (!netif_running(tp->dev)) | 11248 | if (!netif_running(tp->dev)) |
10792 | return TG3_LOOPBACK_FAILED; | 11249 | return TG3_LOOPBACK_FAILED; |
10793 | 11250 | ||
11251 | eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; | ||
11252 | tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; | ||
11253 | |||
10794 | err = tg3_reset_hw(tp, 1); | 11254 | err = tg3_reset_hw(tp, 1); |
10795 | if (err) | 11255 | if (err) { |
10796 | return TG3_LOOPBACK_FAILED; | 11256 | err = TG3_LOOPBACK_FAILED; |
11257 | goto done; | ||
11258 | } | ||
11259 | |||
11260 | if (tg3_flag(tp, ENABLE_RSS)) { | ||
11261 | int i; | ||
11262 | |||
11263 | /* Reroute all rx packets to the 1st queue */ | ||
11264 | for (i = MAC_RSS_INDIR_TBL_0; | ||
11265 | i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) | ||
11266 | tw32(i, 0x0); | ||
11267 | } | ||
10797 | 11268 | ||
10798 | /* Turn off gphy autopowerdown. */ | 11269 | /* Turn off gphy autopowerdown. */ |
10799 | if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) | 11270 | if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) |
10800 | tg3_phy_toggle_apd(tp, false); | 11271 | tg3_phy_toggle_apd(tp, false); |
10801 | 11272 | ||
10802 | if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { | 11273 | if (tg3_flag(tp, CPMU_PRESENT)) { |
10803 | int i; | 11274 | int i; |
10804 | u32 status; | 11275 | u32 status; |
10805 | 11276 | ||
@@ -10813,8 +11284,10 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
10813 | udelay(10); | 11284 | udelay(10); |
10814 | } | 11285 | } |
10815 | 11286 | ||
10816 | if (status != CPMU_MUTEX_GNT_DRIVER) | 11287 | if (status != CPMU_MUTEX_GNT_DRIVER) { |
10817 | return TG3_LOOPBACK_FAILED; | 11288 | err = TG3_LOOPBACK_FAILED; |
11289 | goto done; | ||
11290 | } | ||
10818 | 11291 | ||
10819 | /* Turn off link-based power management. */ | 11292 | /* Turn off link-based power management. */ |
10820 | cpmuctrl = tr32(TG3_CPMU_CTRL); | 11293 | cpmuctrl = tr32(TG3_CPMU_CTRL); |
@@ -10823,10 +11296,14 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
10823 | CPMU_CTRL_LINK_AWARE_MODE)); | 11296 | CPMU_CTRL_LINK_AWARE_MODE)); |
10824 | } | 11297 | } |
10825 | 11298 | ||
10826 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 11299 | if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK)) |
10827 | err |= TG3_MAC_LOOPBACK_FAILED; | 11300 | err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT; |
10828 | 11301 | ||
10829 | if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { | 11302 | if (tg3_flag(tp, JUMBO_RING_ENABLE) && |
11303 | tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK)) | ||
11304 | err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT; | ||
11305 | |||
11306 | if (tg3_flag(tp, CPMU_PRESENT)) { | ||
10830 | tw32(TG3_CPMU_CTRL, cpmuctrl); | 11307 | tw32(TG3_CPMU_CTRL, cpmuctrl); |
10831 | 11308 | ||
10832 | /* Release the mutex */ | 11309 | /* Release the mutex */ |
@@ -10834,15 +11311,27 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
10834 | } | 11311 | } |
10835 | 11312 | ||
10836 | if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && | 11313 | if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && |
10837 | !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { | 11314 | !tg3_flag(tp, USE_PHYLIB)) { |
10838 | if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) | 11315 | if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK)) |
10839 | err |= TG3_PHY_LOOPBACK_FAILED; | 11316 | err |= TG3_STD_LOOPBACK_FAILED << |
11317 | TG3_PHY_LOOPBACK_SHIFT; | ||
11318 | if (tg3_flag(tp, TSO_CAPABLE) && | ||
11319 | tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK)) | ||
11320 | err |= TG3_TSO_LOOPBACK_FAILED << | ||
11321 | TG3_PHY_LOOPBACK_SHIFT; | ||
11322 | if (tg3_flag(tp, JUMBO_RING_ENABLE) && | ||
11323 | tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK)) | ||
11324 | err |= TG3_JMB_LOOPBACK_FAILED << | ||
11325 | TG3_PHY_LOOPBACK_SHIFT; | ||
10840 | } | 11326 | } |
10841 | 11327 | ||
10842 | /* Re-enable gphy autopowerdown. */ | 11328 | /* Re-enable gphy autopowerdown. */ |
10843 | if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) | 11329 | if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) |
10844 | tg3_phy_toggle_apd(tp, true); | 11330 | tg3_phy_toggle_apd(tp, true); |
10845 | 11331 | ||
11332 | done: | ||
11333 | tp->phy_flags |= eee_cap; | ||
11334 | |||
10846 | return err; | 11335 | return err; |
10847 | } | 11336 | } |
10848 | 11337 | ||
@@ -10852,7 +11341,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
10852 | struct tg3 *tp = netdev_priv(dev); | 11341 | struct tg3 *tp = netdev_priv(dev); |
10853 | 11342 | ||
10854 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11343 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) |
10855 | tg3_set_power_state(tp, PCI_D0); | 11344 | tg3_power_up(tp); |
10856 | 11345 | ||
10857 | memset(data, 0, sizeof(u64) * TG3_NUM_TEST); | 11346 | memset(data, 0, sizeof(u64) * TG3_NUM_TEST); |
10858 | 11347 | ||
@@ -10878,7 +11367,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
10878 | tg3_halt(tp, RESET_KIND_SUSPEND, 1); | 11367 | tg3_halt(tp, RESET_KIND_SUSPEND, 1); |
10879 | err = tg3_nvram_lock(tp); | 11368 | err = tg3_nvram_lock(tp); |
10880 | tg3_halt_cpu(tp, RX_CPU_BASE); | 11369 | tg3_halt_cpu(tp, RX_CPU_BASE); |
10881 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 11370 | if (!tg3_flag(tp, 5705_PLUS)) |
10882 | tg3_halt_cpu(tp, TX_CPU_BASE); | 11371 | tg3_halt_cpu(tp, TX_CPU_BASE); |
10883 | if (!err) | 11372 | if (!err) |
10884 | tg3_nvram_unlock(tp); | 11373 | tg3_nvram_unlock(tp); |
@@ -10908,7 +11397,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
10908 | 11397 | ||
10909 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 11398 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
10910 | if (netif_running(dev)) { | 11399 | if (netif_running(dev)) { |
10911 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11400 | tg3_flag_set(tp, INIT_COMPLETE); |
10912 | err2 = tg3_restart_hw(tp, 1); | 11401 | err2 = tg3_restart_hw(tp, 1); |
10913 | if (!err2) | 11402 | if (!err2) |
10914 | tg3_netif_start(tp); | 11403 | tg3_netif_start(tp); |
@@ -10920,7 +11409,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
10920 | tg3_phy_start(tp); | 11409 | tg3_phy_start(tp); |
10921 | } | 11410 | } |
10922 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11411 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) |
10923 | tg3_set_power_state(tp, PCI_D3hot); | 11412 | tg3_power_down(tp); |
10924 | 11413 | ||
10925 | } | 11414 | } |
10926 | 11415 | ||
@@ -10930,7 +11419,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
10930 | struct tg3 *tp = netdev_priv(dev); | 11419 | struct tg3 *tp = netdev_priv(dev); |
10931 | int err; | 11420 | int err; |
10932 | 11421 | ||
10933 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 11422 | if (tg3_flag(tp, USE_PHYLIB)) { |
10934 | struct phy_device *phydev; | 11423 | struct phy_device *phydev; |
10935 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) | 11424 | if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) |
10936 | return -EAGAIN; | 11425 | return -EAGAIN; |
@@ -10949,7 +11438,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
10949 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11438 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
10950 | break; /* We have no PHY */ | 11439 | break; /* We have no PHY */ |
10951 | 11440 | ||
10952 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11441 | if (!netif_running(dev)) |
10953 | return -EAGAIN; | 11442 | return -EAGAIN; |
10954 | 11443 | ||
10955 | spin_lock_bh(&tp->lock); | 11444 | spin_lock_bh(&tp->lock); |
@@ -10965,7 +11454,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
10965 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11454 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
10966 | break; /* We have no PHY */ | 11455 | break; /* We have no PHY */ |
10967 | 11456 | ||
10968 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11457 | if (!netif_running(dev)) |
10969 | return -EAGAIN; | 11458 | return -EAGAIN; |
10970 | 11459 | ||
10971 | spin_lock_bh(&tp->lock); | 11460 | spin_lock_bh(&tp->lock); |
@@ -10981,31 +11470,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
10981 | return -EOPNOTSUPP; | 11470 | return -EOPNOTSUPP; |
10982 | } | 11471 | } |
10983 | 11472 | ||
10984 | #if TG3_VLAN_TAG_USED | ||
10985 | static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
10986 | { | ||
10987 | struct tg3 *tp = netdev_priv(dev); | ||
10988 | |||
10989 | if (!netif_running(dev)) { | ||
10990 | tp->vlgrp = grp; | ||
10991 | return; | ||
10992 | } | ||
10993 | |||
10994 | tg3_netif_stop(tp); | ||
10995 | |||
10996 | tg3_full_lock(tp, 0); | ||
10997 | |||
10998 | tp->vlgrp = grp; | ||
10999 | |||
11000 | /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ | ||
11001 | __tg3_set_rx_mode(dev); | ||
11002 | |||
11003 | tg3_netif_start(tp); | ||
11004 | |||
11005 | tg3_full_unlock(tp); | ||
11006 | } | ||
11007 | #endif | ||
11008 | |||
11009 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | 11473 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) |
11010 | { | 11474 | { |
11011 | struct tg3 *tp = netdev_priv(dev); | 11475 | struct tg3 *tp = netdev_priv(dev); |
@@ -11020,7 +11484,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | |||
11020 | u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; | 11484 | u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; |
11021 | u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; | 11485 | u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; |
11022 | 11486 | ||
11023 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 11487 | if (!tg3_flag(tp, 5705_PLUS)) { |
11024 | max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; | 11488 | max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; |
11025 | max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; | 11489 | max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; |
11026 | max_stat_coal_ticks = MAX_STAT_COAL_TICKS; | 11490 | max_stat_coal_ticks = MAX_STAT_COAL_TICKS; |
@@ -11087,14 +11551,9 @@ static const struct ethtool_ops tg3_ethtool_ops = { | |||
11087 | .set_ringparam = tg3_set_ringparam, | 11551 | .set_ringparam = tg3_set_ringparam, |
11088 | .get_pauseparam = tg3_get_pauseparam, | 11552 | .get_pauseparam = tg3_get_pauseparam, |
11089 | .set_pauseparam = tg3_set_pauseparam, | 11553 | .set_pauseparam = tg3_set_pauseparam, |
11090 | .get_rx_csum = tg3_get_rx_csum, | ||
11091 | .set_rx_csum = tg3_set_rx_csum, | ||
11092 | .set_tx_csum = tg3_set_tx_csum, | ||
11093 | .set_sg = ethtool_op_set_sg, | ||
11094 | .set_tso = tg3_set_tso, | ||
11095 | .self_test = tg3_self_test, | 11554 | .self_test = tg3_self_test, |
11096 | .get_strings = tg3_get_strings, | 11555 | .get_strings = tg3_get_strings, |
11097 | .phys_id = tg3_phys_id, | 11556 | .set_phys_id = tg3_set_phys_id, |
11098 | .get_ethtool_stats = tg3_get_ethtool_stats, | 11557 | .get_ethtool_stats = tg3_get_ethtool_stats, |
11099 | .get_coalesce = tg3_get_coalesce, | 11558 | .get_coalesce = tg3_get_coalesce, |
11100 | .set_coalesce = tg3_set_coalesce, | 11559 | .set_coalesce = tg3_set_coalesce, |
@@ -11139,8 +11598,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp) | |||
11139 | { | 11598 | { |
11140 | u32 val; | 11599 | u32 val; |
11141 | 11600 | ||
11142 | if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || | 11601 | if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) |
11143 | tg3_nvram_read(tp, 0, &val) != 0) | ||
11144 | return; | 11602 | return; |
11145 | 11603 | ||
11146 | /* Selfboot format */ | 11604 | /* Selfboot format */ |
@@ -11175,19 +11633,19 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) | |||
11175 | 11633 | ||
11176 | nvcfg1 = tr32(NVRAM_CFG1); | 11634 | nvcfg1 = tr32(NVRAM_CFG1); |
11177 | if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { | 11635 | if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { |
11178 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11636 | tg3_flag_set(tp, FLASH); |
11179 | } else { | 11637 | } else { |
11180 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 11638 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; |
11181 | tw32(NVRAM_CFG1, nvcfg1); | 11639 | tw32(NVRAM_CFG1, nvcfg1); |
11182 | } | 11640 | } |
11183 | 11641 | ||
11184 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || | 11642 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || |
11185 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 11643 | tg3_flag(tp, 5780_CLASS)) { |
11186 | switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { | 11644 | switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { |
11187 | case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: | 11645 | case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: |
11188 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11646 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11189 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; | 11647 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; |
11190 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11648 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11191 | break; | 11649 | break; |
11192 | case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: | 11650 | case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: |
11193 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11651 | tp->nvram_jedecnum = JEDEC_ATMEL; |
@@ -11196,12 +11654,12 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) | |||
11196 | case FLASH_VENDOR_ATMEL_EEPROM: | 11654 | case FLASH_VENDOR_ATMEL_EEPROM: |
11197 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11655 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11198 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 11656 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
11199 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11657 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11200 | break; | 11658 | break; |
11201 | case FLASH_VENDOR_ST: | 11659 | case FLASH_VENDOR_ST: |
11202 | tp->nvram_jedecnum = JEDEC_ST; | 11660 | tp->nvram_jedecnum = JEDEC_ST; |
11203 | tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; | 11661 | tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; |
11204 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11662 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11205 | break; | 11663 | break; |
11206 | case FLASH_VENDOR_SAIFUN: | 11664 | case FLASH_VENDOR_SAIFUN: |
11207 | tp->nvram_jedecnum = JEDEC_SAIFUN; | 11665 | tp->nvram_jedecnum = JEDEC_SAIFUN; |
@@ -11216,7 +11674,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp) | |||
11216 | } else { | 11674 | } else { |
11217 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11675 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11218 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; | 11676 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; |
11219 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11677 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11220 | } | 11678 | } |
11221 | } | 11679 | } |
11222 | 11680 | ||
@@ -11255,29 +11713,29 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) | |||
11255 | 11713 | ||
11256 | /* NVRAM protection for TPM */ | 11714 | /* NVRAM protection for TPM */ |
11257 | if (nvcfg1 & (1 << 27)) | 11715 | if (nvcfg1 & (1 << 27)) |
11258 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; | 11716 | tg3_flag_set(tp, PROTECTED_NVRAM); |
11259 | 11717 | ||
11260 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 11718 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
11261 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: | 11719 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: |
11262 | case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: | 11720 | case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: |
11263 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11721 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11264 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11722 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11265 | break; | 11723 | break; |
11266 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 11724 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: |
11267 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11725 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11268 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11726 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11269 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11727 | tg3_flag_set(tp, FLASH); |
11270 | break; | 11728 | break; |
11271 | case FLASH_5752VENDOR_ST_M45PE10: | 11729 | case FLASH_5752VENDOR_ST_M45PE10: |
11272 | case FLASH_5752VENDOR_ST_M45PE20: | 11730 | case FLASH_5752VENDOR_ST_M45PE20: |
11273 | case FLASH_5752VENDOR_ST_M45PE40: | 11731 | case FLASH_5752VENDOR_ST_M45PE40: |
11274 | tp->nvram_jedecnum = JEDEC_ST; | 11732 | tp->nvram_jedecnum = JEDEC_ST; |
11275 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11733 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11276 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11734 | tg3_flag_set(tp, FLASH); |
11277 | break; | 11735 | break; |
11278 | } | 11736 | } |
11279 | 11737 | ||
11280 | if (tp->tg3_flags2 & TG3_FLG2_FLASH) { | 11738 | if (tg3_flag(tp, FLASH)) { |
11281 | tg3_nvram_get_pagesize(tp, nvcfg1); | 11739 | tg3_nvram_get_pagesize(tp, nvcfg1); |
11282 | } else { | 11740 | } else { |
11283 | /* For eeprom, set pagesize to maximum eeprom size */ | 11741 | /* For eeprom, set pagesize to maximum eeprom size */ |
@@ -11296,7 +11754,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) | |||
11296 | 11754 | ||
11297 | /* NVRAM protection for TPM */ | 11755 | /* NVRAM protection for TPM */ |
11298 | if (nvcfg1 & (1 << 27)) { | 11756 | if (nvcfg1 & (1 << 27)) { |
11299 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; | 11757 | tg3_flag_set(tp, PROTECTED_NVRAM); |
11300 | protect = 1; | 11758 | protect = 1; |
11301 | } | 11759 | } |
11302 | 11760 | ||
@@ -11307,8 +11765,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) | |||
11307 | case FLASH_5755VENDOR_ATMEL_FLASH_3: | 11765 | case FLASH_5755VENDOR_ATMEL_FLASH_3: |
11308 | case FLASH_5755VENDOR_ATMEL_FLASH_5: | 11766 | case FLASH_5755VENDOR_ATMEL_FLASH_5: |
11309 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11767 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11310 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11768 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11311 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11769 | tg3_flag_set(tp, FLASH); |
11312 | tp->nvram_pagesize = 264; | 11770 | tp->nvram_pagesize = 264; |
11313 | if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || | 11771 | if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || |
11314 | nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) | 11772 | nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) |
@@ -11325,8 +11783,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) | |||
11325 | case FLASH_5752VENDOR_ST_M45PE20: | 11783 | case FLASH_5752VENDOR_ST_M45PE20: |
11326 | case FLASH_5752VENDOR_ST_M45PE40: | 11784 | case FLASH_5752VENDOR_ST_M45PE40: |
11327 | tp->nvram_jedecnum = JEDEC_ST; | 11785 | tp->nvram_jedecnum = JEDEC_ST; |
11328 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11786 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11329 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11787 | tg3_flag_set(tp, FLASH); |
11330 | tp->nvram_pagesize = 256; | 11788 | tp->nvram_pagesize = 256; |
11331 | if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) | 11789 | if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) |
11332 | tp->nvram_size = (protect ? | 11790 | tp->nvram_size = (protect ? |
@@ -11356,7 +11814,7 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) | |||
11356 | case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: | 11814 | case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: |
11357 | case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: | 11815 | case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: |
11358 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11816 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11359 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11817 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11360 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 11818 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
11361 | 11819 | ||
11362 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 11820 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; |
@@ -11367,16 +11825,16 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) | |||
11367 | case FLASH_5755VENDOR_ATMEL_FLASH_2: | 11825 | case FLASH_5755VENDOR_ATMEL_FLASH_2: |
11368 | case FLASH_5755VENDOR_ATMEL_FLASH_3: | 11826 | case FLASH_5755VENDOR_ATMEL_FLASH_3: |
11369 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11827 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11370 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11828 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11371 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11829 | tg3_flag_set(tp, FLASH); |
11372 | tp->nvram_pagesize = 264; | 11830 | tp->nvram_pagesize = 264; |
11373 | break; | 11831 | break; |
11374 | case FLASH_5752VENDOR_ST_M45PE10: | 11832 | case FLASH_5752VENDOR_ST_M45PE10: |
11375 | case FLASH_5752VENDOR_ST_M45PE20: | 11833 | case FLASH_5752VENDOR_ST_M45PE20: |
11376 | case FLASH_5752VENDOR_ST_M45PE40: | 11834 | case FLASH_5752VENDOR_ST_M45PE40: |
11377 | tp->nvram_jedecnum = JEDEC_ST; | 11835 | tp->nvram_jedecnum = JEDEC_ST; |
11378 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11836 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11379 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11837 | tg3_flag_set(tp, FLASH); |
11380 | tp->nvram_pagesize = 256; | 11838 | tp->nvram_pagesize = 256; |
11381 | break; | 11839 | break; |
11382 | } | 11840 | } |
@@ -11390,7 +11848,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) | |||
11390 | 11848 | ||
11391 | /* NVRAM protection for TPM */ | 11849 | /* NVRAM protection for TPM */ |
11392 | if (nvcfg1 & (1 << 27)) { | 11850 | if (nvcfg1 & (1 << 27)) { |
11393 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; | 11851 | tg3_flag_set(tp, PROTECTED_NVRAM); |
11394 | protect = 1; | 11852 | protect = 1; |
11395 | } | 11853 | } |
11396 | 11854 | ||
@@ -11405,9 +11863,9 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) | |||
11405 | case FLASH_5761VENDOR_ATMEL_MDB081D: | 11863 | case FLASH_5761VENDOR_ATMEL_MDB081D: |
11406 | case FLASH_5761VENDOR_ATMEL_MDB161D: | 11864 | case FLASH_5761VENDOR_ATMEL_MDB161D: |
11407 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11865 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11408 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11866 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11409 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11867 | tg3_flag_set(tp, FLASH); |
11410 | tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; | 11868 | tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); |
11411 | tp->nvram_pagesize = 256; | 11869 | tp->nvram_pagesize = 256; |
11412 | break; | 11870 | break; |
11413 | case FLASH_5761VENDOR_ST_A_M45PE20: | 11871 | case FLASH_5761VENDOR_ST_A_M45PE20: |
@@ -11419,8 +11877,8 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) | |||
11419 | case FLASH_5761VENDOR_ST_M_M45PE80: | 11877 | case FLASH_5761VENDOR_ST_M_M45PE80: |
11420 | case FLASH_5761VENDOR_ST_M_M45PE16: | 11878 | case FLASH_5761VENDOR_ST_M_M45PE16: |
11421 | tp->nvram_jedecnum = JEDEC_ST; | 11879 | tp->nvram_jedecnum = JEDEC_ST; |
11422 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11880 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11423 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11881 | tg3_flag_set(tp, FLASH); |
11424 | tp->nvram_pagesize = 256; | 11882 | tp->nvram_pagesize = 256; |
11425 | break; | 11883 | break; |
11426 | } | 11884 | } |
@@ -11460,7 +11918,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) | |||
11460 | static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) | 11918 | static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) |
11461 | { | 11919 | { |
11462 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11920 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11463 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11921 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11464 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 11922 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
11465 | } | 11923 | } |
11466 | 11924 | ||
@@ -11474,7 +11932,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) | |||
11474 | case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: | 11932 | case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: |
11475 | case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: | 11933 | case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: |
11476 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11934 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11477 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11935 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11478 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 11936 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
11479 | 11937 | ||
11480 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 11938 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; |
@@ -11488,8 +11946,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) | |||
11488 | case FLASH_57780VENDOR_ATMEL_AT45DB041D: | 11946 | case FLASH_57780VENDOR_ATMEL_AT45DB041D: |
11489 | case FLASH_57780VENDOR_ATMEL_AT45DB041B: | 11947 | case FLASH_57780VENDOR_ATMEL_AT45DB041B: |
11490 | tp->nvram_jedecnum = JEDEC_ATMEL; | 11948 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11491 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11949 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11492 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11950 | tg3_flag_set(tp, FLASH); |
11493 | 11951 | ||
11494 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 11952 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
11495 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 11953 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: |
@@ -11511,8 +11969,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) | |||
11511 | case FLASH_5752VENDOR_ST_M45PE20: | 11969 | case FLASH_5752VENDOR_ST_M45PE20: |
11512 | case FLASH_5752VENDOR_ST_M45PE40: | 11970 | case FLASH_5752VENDOR_ST_M45PE40: |
11513 | tp->nvram_jedecnum = JEDEC_ST; | 11971 | tp->nvram_jedecnum = JEDEC_ST; |
11514 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 11972 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11515 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 11973 | tg3_flag_set(tp, FLASH); |
11516 | 11974 | ||
11517 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 11975 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
11518 | case FLASH_5752VENDOR_ST_M45PE10: | 11976 | case FLASH_5752VENDOR_ST_M45PE10: |
@@ -11527,13 +11985,13 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) | |||
11527 | } | 11985 | } |
11528 | break; | 11986 | break; |
11529 | default: | 11987 | default: |
11530 | tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; | 11988 | tg3_flag_set(tp, NO_NVRAM); |
11531 | return; | 11989 | return; |
11532 | } | 11990 | } |
11533 | 11991 | ||
11534 | tg3_nvram_get_pagesize(tp, nvcfg1); | 11992 | tg3_nvram_get_pagesize(tp, nvcfg1); |
11535 | if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) | 11993 | if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) |
11536 | tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; | 11994 | tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); |
11537 | } | 11995 | } |
11538 | 11996 | ||
11539 | 11997 | ||
@@ -11547,7 +12005,7 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) | |||
11547 | case FLASH_5717VENDOR_ATMEL_EEPROM: | 12005 | case FLASH_5717VENDOR_ATMEL_EEPROM: |
11548 | case FLASH_5717VENDOR_MICRO_EEPROM: | 12006 | case FLASH_5717VENDOR_MICRO_EEPROM: |
11549 | tp->nvram_jedecnum = JEDEC_ATMEL; | 12007 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11550 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 12008 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11551 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 12009 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
11552 | 12010 | ||
11553 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 12011 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; |
@@ -11561,11 +12019,13 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) | |||
11561 | case FLASH_5717VENDOR_ATMEL_ADB021D: | 12019 | case FLASH_5717VENDOR_ATMEL_ADB021D: |
11562 | case FLASH_5717VENDOR_ATMEL_45USPT: | 12020 | case FLASH_5717VENDOR_ATMEL_45USPT: |
11563 | tp->nvram_jedecnum = JEDEC_ATMEL; | 12021 | tp->nvram_jedecnum = JEDEC_ATMEL; |
11564 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 12022 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11565 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 12023 | tg3_flag_set(tp, FLASH); |
11566 | 12024 | ||
11567 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 12025 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
11568 | case FLASH_5717VENDOR_ATMEL_MDB021D: | 12026 | case FLASH_5717VENDOR_ATMEL_MDB021D: |
12027 | /* Detect size with tg3_nvram_get_size() */ | ||
12028 | break; | ||
11569 | case FLASH_5717VENDOR_ATMEL_ADB021B: | 12029 | case FLASH_5717VENDOR_ATMEL_ADB021B: |
11570 | case FLASH_5717VENDOR_ATMEL_ADB021D: | 12030 | case FLASH_5717VENDOR_ATMEL_ADB021D: |
11571 | tp->nvram_size = TG3_NVRAM_SIZE_256KB; | 12031 | tp->nvram_size = TG3_NVRAM_SIZE_256KB; |
@@ -11586,13 +12046,15 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) | |||
11586 | case FLASH_5717VENDOR_ST_25USPT: | 12046 | case FLASH_5717VENDOR_ST_25USPT: |
11587 | case FLASH_5717VENDOR_ST_45USPT: | 12047 | case FLASH_5717VENDOR_ST_45USPT: |
11588 | tp->nvram_jedecnum = JEDEC_ST; | 12048 | tp->nvram_jedecnum = JEDEC_ST; |
11589 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 12049 | tg3_flag_set(tp, NVRAM_BUFFERED); |
11590 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 12050 | tg3_flag_set(tp, FLASH); |
11591 | 12051 | ||
11592 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 12052 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
11593 | case FLASH_5717VENDOR_ST_M_M25PE20: | 12053 | case FLASH_5717VENDOR_ST_M_M25PE20: |
11594 | case FLASH_5717VENDOR_ST_A_M25PE20: | ||
11595 | case FLASH_5717VENDOR_ST_M_M45PE20: | 12054 | case FLASH_5717VENDOR_ST_M_M45PE20: |
12055 | /* Detect size with tg3_nvram_get_size() */ | ||
12056 | break; | ||
12057 | case FLASH_5717VENDOR_ST_A_M25PE20: | ||
11596 | case FLASH_5717VENDOR_ST_A_M45PE20: | 12058 | case FLASH_5717VENDOR_ST_A_M45PE20: |
11597 | tp->nvram_size = TG3_NVRAM_SIZE_256KB; | 12059 | tp->nvram_size = TG3_NVRAM_SIZE_256KB; |
11598 | break; | 12060 | break; |
@@ -11602,13 +12064,125 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) | |||
11602 | } | 12064 | } |
11603 | break; | 12065 | break; |
11604 | default: | 12066 | default: |
11605 | tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; | 12067 | tg3_flag_set(tp, NO_NVRAM); |
12068 | return; | ||
12069 | } | ||
12070 | |||
12071 | tg3_nvram_get_pagesize(tp, nvcfg1); | ||
12072 | if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) | ||
12073 | tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); | ||
12074 | } | ||
12075 | |||
12076 | static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) | ||
12077 | { | ||
12078 | u32 nvcfg1, nvmpinstrp; | ||
12079 | |||
12080 | nvcfg1 = tr32(NVRAM_CFG1); | ||
12081 | nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; | ||
12082 | |||
12083 | switch (nvmpinstrp) { | ||
12084 | case FLASH_5720_EEPROM_HD: | ||
12085 | case FLASH_5720_EEPROM_LD: | ||
12086 | tp->nvram_jedecnum = JEDEC_ATMEL; | ||
12087 | tg3_flag_set(tp, NVRAM_BUFFERED); | ||
12088 | |||
12089 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | ||
12090 | tw32(NVRAM_CFG1, nvcfg1); | ||
12091 | if (nvmpinstrp == FLASH_5720_EEPROM_HD) | ||
12092 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | ||
12093 | else | ||
12094 | tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; | ||
12095 | return; | ||
12096 | case FLASH_5720VENDOR_M_ATMEL_DB011D: | ||
12097 | case FLASH_5720VENDOR_A_ATMEL_DB011B: | ||
12098 | case FLASH_5720VENDOR_A_ATMEL_DB011D: | ||
12099 | case FLASH_5720VENDOR_M_ATMEL_DB021D: | ||
12100 | case FLASH_5720VENDOR_A_ATMEL_DB021B: | ||
12101 | case FLASH_5720VENDOR_A_ATMEL_DB021D: | ||
12102 | case FLASH_5720VENDOR_M_ATMEL_DB041D: | ||
12103 | case FLASH_5720VENDOR_A_ATMEL_DB041B: | ||
12104 | case FLASH_5720VENDOR_A_ATMEL_DB041D: | ||
12105 | case FLASH_5720VENDOR_M_ATMEL_DB081D: | ||
12106 | case FLASH_5720VENDOR_A_ATMEL_DB081D: | ||
12107 | case FLASH_5720VENDOR_ATMEL_45USPT: | ||
12108 | tp->nvram_jedecnum = JEDEC_ATMEL; | ||
12109 | tg3_flag_set(tp, NVRAM_BUFFERED); | ||
12110 | tg3_flag_set(tp, FLASH); | ||
12111 | |||
12112 | switch (nvmpinstrp) { | ||
12113 | case FLASH_5720VENDOR_M_ATMEL_DB021D: | ||
12114 | case FLASH_5720VENDOR_A_ATMEL_DB021B: | ||
12115 | case FLASH_5720VENDOR_A_ATMEL_DB021D: | ||
12116 | tp->nvram_size = TG3_NVRAM_SIZE_256KB; | ||
12117 | break; | ||
12118 | case FLASH_5720VENDOR_M_ATMEL_DB041D: | ||
12119 | case FLASH_5720VENDOR_A_ATMEL_DB041B: | ||
12120 | case FLASH_5720VENDOR_A_ATMEL_DB041D: | ||
12121 | tp->nvram_size = TG3_NVRAM_SIZE_512KB; | ||
12122 | break; | ||
12123 | case FLASH_5720VENDOR_M_ATMEL_DB081D: | ||
12124 | case FLASH_5720VENDOR_A_ATMEL_DB081D: | ||
12125 | tp->nvram_size = TG3_NVRAM_SIZE_1MB; | ||
12126 | break; | ||
12127 | default: | ||
12128 | tp->nvram_size = TG3_NVRAM_SIZE_128KB; | ||
12129 | break; | ||
12130 | } | ||
12131 | break; | ||
12132 | case FLASH_5720VENDOR_M_ST_M25PE10: | ||
12133 | case FLASH_5720VENDOR_M_ST_M45PE10: | ||
12134 | case FLASH_5720VENDOR_A_ST_M25PE10: | ||
12135 | case FLASH_5720VENDOR_A_ST_M45PE10: | ||
12136 | case FLASH_5720VENDOR_M_ST_M25PE20: | ||
12137 | case FLASH_5720VENDOR_M_ST_M45PE20: | ||
12138 | case FLASH_5720VENDOR_A_ST_M25PE20: | ||
12139 | case FLASH_5720VENDOR_A_ST_M45PE20: | ||
12140 | case FLASH_5720VENDOR_M_ST_M25PE40: | ||
12141 | case FLASH_5720VENDOR_M_ST_M45PE40: | ||
12142 | case FLASH_5720VENDOR_A_ST_M25PE40: | ||
12143 | case FLASH_5720VENDOR_A_ST_M45PE40: | ||
12144 | case FLASH_5720VENDOR_M_ST_M25PE80: | ||
12145 | case FLASH_5720VENDOR_M_ST_M45PE80: | ||
12146 | case FLASH_5720VENDOR_A_ST_M25PE80: | ||
12147 | case FLASH_5720VENDOR_A_ST_M45PE80: | ||
12148 | case FLASH_5720VENDOR_ST_25USPT: | ||
12149 | case FLASH_5720VENDOR_ST_45USPT: | ||
12150 | tp->nvram_jedecnum = JEDEC_ST; | ||
12151 | tg3_flag_set(tp, NVRAM_BUFFERED); | ||
12152 | tg3_flag_set(tp, FLASH); | ||
12153 | |||
12154 | switch (nvmpinstrp) { | ||
12155 | case FLASH_5720VENDOR_M_ST_M25PE20: | ||
12156 | case FLASH_5720VENDOR_M_ST_M45PE20: | ||
12157 | case FLASH_5720VENDOR_A_ST_M25PE20: | ||
12158 | case FLASH_5720VENDOR_A_ST_M45PE20: | ||
12159 | tp->nvram_size = TG3_NVRAM_SIZE_256KB; | ||
12160 | break; | ||
12161 | case FLASH_5720VENDOR_M_ST_M25PE40: | ||
12162 | case FLASH_5720VENDOR_M_ST_M45PE40: | ||
12163 | case FLASH_5720VENDOR_A_ST_M25PE40: | ||
12164 | case FLASH_5720VENDOR_A_ST_M45PE40: | ||
12165 | tp->nvram_size = TG3_NVRAM_SIZE_512KB; | ||
12166 | break; | ||
12167 | case FLASH_5720VENDOR_M_ST_M25PE80: | ||
12168 | case FLASH_5720VENDOR_M_ST_M45PE80: | ||
12169 | case FLASH_5720VENDOR_A_ST_M25PE80: | ||
12170 | case FLASH_5720VENDOR_A_ST_M45PE80: | ||
12171 | tp->nvram_size = TG3_NVRAM_SIZE_1MB; | ||
12172 | break; | ||
12173 | default: | ||
12174 | tp->nvram_size = TG3_NVRAM_SIZE_128KB; | ||
12175 | break; | ||
12176 | } | ||
12177 | break; | ||
12178 | default: | ||
12179 | tg3_flag_set(tp, NO_NVRAM); | ||
11606 | return; | 12180 | return; |
11607 | } | 12181 | } |
11608 | 12182 | ||
11609 | tg3_nvram_get_pagesize(tp, nvcfg1); | 12183 | tg3_nvram_get_pagesize(tp, nvcfg1); |
11610 | if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) | 12184 | if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) |
11611 | tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; | 12185 | tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); |
11612 | } | 12186 | } |
11613 | 12187 | ||
11614 | /* Chips other than 5700/5701 use the NVRAM for fetching info. */ | 12188 | /* Chips other than 5700/5701 use the NVRAM for fetching info. */ |
@@ -11628,7 +12202,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
11628 | 12202 | ||
11629 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 12203 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
11630 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { | 12204 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { |
11631 | tp->tg3_flags |= TG3_FLAG_NVRAM; | 12205 | tg3_flag_set(tp, NVRAM); |
11632 | 12206 | ||
11633 | if (tg3_nvram_lock(tp)) { | 12207 | if (tg3_nvram_lock(tp)) { |
11634 | netdev_warn(tp->dev, | 12208 | netdev_warn(tp->dev, |
@@ -11658,6 +12232,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
11658 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 12232 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
11659 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | 12233 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) |
11660 | tg3_get_5717_nvram_info(tp); | 12234 | tg3_get_5717_nvram_info(tp); |
12235 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) | ||
12236 | tg3_get_5720_nvram_info(tp); | ||
11661 | else | 12237 | else |
11662 | tg3_get_nvram_info(tp); | 12238 | tg3_get_nvram_info(tp); |
11663 | 12239 | ||
@@ -11668,7 +12244,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
11668 | tg3_nvram_unlock(tp); | 12244 | tg3_nvram_unlock(tp); |
11669 | 12245 | ||
11670 | } else { | 12246 | } else { |
11671 | tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); | 12247 | tg3_flag_clear(tp, NVRAM); |
12248 | tg3_flag_clear(tp, NVRAM_BUFFERED); | ||
11672 | 12249 | ||
11673 | tg3_get_eeprom_size(tp); | 12250 | tg3_get_eeprom_size(tp); |
11674 | } | 12251 | } |
@@ -11851,7 +12428,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, | |||
11851 | nvram_cmd |= NVRAM_CMD_LAST; | 12428 | nvram_cmd |= NVRAM_CMD_LAST; |
11852 | 12429 | ||
11853 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && | 12430 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && |
11854 | !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && | 12431 | !tg3_flag(tp, 5755_PLUS) && |
11855 | (tp->nvram_jedecnum == JEDEC_ST) && | 12432 | (tp->nvram_jedecnum == JEDEC_ST) && |
11856 | (nvram_cmd & NVRAM_CMD_FIRST)) { | 12433 | (nvram_cmd & NVRAM_CMD_FIRST)) { |
11857 | 12434 | ||
@@ -11861,7 +12438,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, | |||
11861 | 12438 | ||
11862 | break; | 12439 | break; |
11863 | } | 12440 | } |
11864 | if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { | 12441 | if (!tg3_flag(tp, FLASH)) { |
11865 | /* We always do complete word writes to eeprom. */ | 12442 | /* We always do complete word writes to eeprom. */ |
11866 | nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); | 12443 | nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); |
11867 | } | 12444 | } |
@@ -11877,13 +12454,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | |||
11877 | { | 12454 | { |
11878 | int ret; | 12455 | int ret; |
11879 | 12456 | ||
11880 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { | 12457 | if (tg3_flag(tp, EEPROM_WRITE_PROT)) { |
11881 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & | 12458 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & |
11882 | ~GRC_LCLCTRL_GPIO_OUTPUT1); | 12459 | ~GRC_LCLCTRL_GPIO_OUTPUT1); |
11883 | udelay(40); | 12460 | udelay(40); |
11884 | } | 12461 | } |
11885 | 12462 | ||
11886 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { | 12463 | if (!tg3_flag(tp, NVRAM)) { |
11887 | ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); | 12464 | ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); |
11888 | } else { | 12465 | } else { |
11889 | u32 grc_mode; | 12466 | u32 grc_mode; |
@@ -11893,16 +12470,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | |||
11893 | return ret; | 12470 | return ret; |
11894 | 12471 | ||
11895 | tg3_enable_nvram_access(tp); | 12472 | tg3_enable_nvram_access(tp); |
11896 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 12473 | if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) |
11897 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) | ||
11898 | tw32(NVRAM_WRITE1, 0x406); | 12474 | tw32(NVRAM_WRITE1, 0x406); |
11899 | 12475 | ||
11900 | grc_mode = tr32(GRC_MODE); | 12476 | grc_mode = tr32(GRC_MODE); |
11901 | tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); | 12477 | tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); |
11902 | 12478 | ||
11903 | if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || | 12479 | if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { |
11904 | !(tp->tg3_flags2 & TG3_FLG2_FLASH)) { | ||
11905 | |||
11906 | ret = tg3_nvram_write_block_buffered(tp, offset, len, | 12480 | ret = tg3_nvram_write_block_buffered(tp, offset, len, |
11907 | buf); | 12481 | buf); |
11908 | } else { | 12482 | } else { |
@@ -11917,7 +12491,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | |||
11917 | tg3_nvram_unlock(tp); | 12491 | tg3_nvram_unlock(tp); |
11918 | } | 12492 | } |
11919 | 12493 | ||
11920 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { | 12494 | if (tg3_flag(tp, EEPROM_WRITE_PROT)) { |
11921 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 12495 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); |
11922 | udelay(40); | 12496 | udelay(40); |
11923 | } | 12497 | } |
@@ -12039,19 +12613,22 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
12039 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 12613 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
12040 | 12614 | ||
12041 | /* Assume an onboard device and WOL capable by default. */ | 12615 | /* Assume an onboard device and WOL capable by default. */ |
12042 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; | 12616 | tg3_flag_set(tp, EEPROM_WRITE_PROT); |
12617 | tg3_flag_set(tp, WOL_CAP); | ||
12043 | 12618 | ||
12044 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 12619 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
12045 | if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { | 12620 | if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { |
12046 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 12621 | tg3_flag_clear(tp, EEPROM_WRITE_PROT); |
12047 | tp->tg3_flags2 |= TG3_FLG2_IS_NIC; | 12622 | tg3_flag_set(tp, IS_NIC); |
12048 | } | 12623 | } |
12049 | val = tr32(VCPU_CFGSHDW); | 12624 | val = tr32(VCPU_CFGSHDW); |
12050 | if (val & VCPU_CFGSHDW_ASPM_DBNC) | 12625 | if (val & VCPU_CFGSHDW_ASPM_DBNC) |
12051 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; | 12626 | tg3_flag_set(tp, ASPM_WORKAROUND); |
12052 | if ((val & VCPU_CFGSHDW_WOL_ENABLE) && | 12627 | if ((val & VCPU_CFGSHDW_WOL_ENABLE) && |
12053 | (val & VCPU_CFGSHDW_WOL_MAGPKT)) | 12628 | (val & VCPU_CFGSHDW_WOL_MAGPKT)) { |
12054 | tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; | 12629 | tg3_flag_set(tp, WOL_ENABLE); |
12630 | device_set_wakeup_enable(&tp->pdev->dev, true); | ||
12631 | } | ||
12055 | goto done; | 12632 | goto done; |
12056 | } | 12633 | } |
12057 | 12634 | ||
@@ -12066,9 +12643,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
12066 | 12643 | ||
12067 | tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); | 12644 | tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); |
12068 | ver >>= NIC_SRAM_DATA_VER_SHIFT; | 12645 | ver >>= NIC_SRAM_DATA_VER_SHIFT; |
12069 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && | 12646 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
12070 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && | 12647 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && |
12071 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && | 12648 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && |
12072 | (ver > 0) && (ver < 0x100)) | 12649 | (ver > 0) && (ver < 0x100)) |
12073 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); | 12650 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); |
12074 | 12651 | ||
@@ -12092,13 +12669,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
12092 | 12669 | ||
12093 | tp->phy_id = eeprom_phy_id; | 12670 | tp->phy_id = eeprom_phy_id; |
12094 | if (eeprom_phy_serdes) { | 12671 | if (eeprom_phy_serdes) { |
12095 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 12672 | if (!tg3_flag(tp, 5705_PLUS)) |
12096 | tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; | 12673 | tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; |
12097 | else | 12674 | else |
12098 | tp->phy_flags |= TG3_PHYFLG_MII_SERDES; | 12675 | tp->phy_flags |= TG3_PHYFLG_MII_SERDES; |
12099 | } | 12676 | } |
12100 | 12677 | ||
12101 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 12678 | if (tg3_flag(tp, 5750_PLUS)) |
12102 | led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | | 12679 | led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | |
12103 | SHASTA_EXT_LED_MODE_MASK); | 12680 | SHASTA_EXT_LED_MODE_MASK); |
12104 | else | 12681 | else |
@@ -12158,34 +12735,36 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
12158 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 12735 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
12159 | 12736 | ||
12160 | if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { | 12737 | if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { |
12161 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; | 12738 | tg3_flag_set(tp, EEPROM_WRITE_PROT); |
12162 | if ((tp->pdev->subsystem_vendor == | 12739 | if ((tp->pdev->subsystem_vendor == |
12163 | PCI_VENDOR_ID_ARIMA) && | 12740 | PCI_VENDOR_ID_ARIMA) && |
12164 | (tp->pdev->subsystem_device == 0x205a || | 12741 | (tp->pdev->subsystem_device == 0x205a || |
12165 | tp->pdev->subsystem_device == 0x2063)) | 12742 | tp->pdev->subsystem_device == 0x2063)) |
12166 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 12743 | tg3_flag_clear(tp, EEPROM_WRITE_PROT); |
12167 | } else { | 12744 | } else { |
12168 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 12745 | tg3_flag_clear(tp, EEPROM_WRITE_PROT); |
12169 | tp->tg3_flags2 |= TG3_FLG2_IS_NIC; | 12746 | tg3_flag_set(tp, IS_NIC); |
12170 | } | 12747 | } |
12171 | 12748 | ||
12172 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 12749 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { |
12173 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 12750 | tg3_flag_set(tp, ENABLE_ASF); |
12174 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 12751 | if (tg3_flag(tp, 5750_PLUS)) |
12175 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; | 12752 | tg3_flag_set(tp, ASF_NEW_HANDSHAKE); |
12176 | } | 12753 | } |
12177 | 12754 | ||
12178 | if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && | 12755 | if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && |
12179 | (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 12756 | tg3_flag(tp, 5750_PLUS)) |
12180 | tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; | 12757 | tg3_flag_set(tp, ENABLE_APE); |
12181 | 12758 | ||
12182 | if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && | 12759 | if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && |
12183 | !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) | 12760 | !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) |
12184 | tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; | 12761 | tg3_flag_clear(tp, WOL_CAP); |
12185 | 12762 | ||
12186 | if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && | 12763 | if (tg3_flag(tp, WOL_CAP) && |
12187 | (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) | 12764 | (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { |
12188 | tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; | 12765 | tg3_flag_set(tp, WOL_ENABLE); |
12766 | device_set_wakeup_enable(&tp->pdev->dev, true); | ||
12767 | } | ||
12189 | 12768 | ||
12190 | if (cfg2 & (1 << 17)) | 12769 | if (cfg2 & (1 << 17)) |
12191 | tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; | 12770 | tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; |
@@ -12195,32 +12774,35 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
12195 | if (cfg2 & (1 << 18)) | 12774 | if (cfg2 & (1 << 18)) |
12196 | tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; | 12775 | tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; |
12197 | 12776 | ||
12198 | if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 12777 | if ((tg3_flag(tp, 57765_PLUS) || |
12778 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | ||
12199 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && | 12779 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && |
12200 | (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) | 12780 | (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) |
12201 | tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; | 12781 | tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; |
12202 | 12782 | ||
12203 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 12783 | if (tg3_flag(tp, PCI_EXPRESS) && |
12204 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 12784 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
12205 | !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { | 12785 | !tg3_flag(tp, 57765_PLUS)) { |
12206 | u32 cfg3; | 12786 | u32 cfg3; |
12207 | 12787 | ||
12208 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); | 12788 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); |
12209 | if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) | 12789 | if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) |
12210 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; | 12790 | tg3_flag_set(tp, ASPM_WORKAROUND); |
12211 | } | 12791 | } |
12212 | 12792 | ||
12213 | if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) | 12793 | if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) |
12214 | tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE; | 12794 | tg3_flag_set(tp, RGMII_INBAND_DISABLE); |
12215 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) | 12795 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) |
12216 | tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; | 12796 | tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); |
12217 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) | 12797 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) |
12218 | tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; | 12798 | tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); |
12219 | } | 12799 | } |
12220 | done: | 12800 | done: |
12221 | device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); | 12801 | if (tg3_flag(tp, WOL_CAP)) |
12222 | device_set_wakeup_enable(&tp->pdev->dev, | 12802 | device_set_wakeup_enable(&tp->pdev->dev, |
12223 | tp->tg3_flags & TG3_FLAG_WOL_ENABLE); | 12803 | tg3_flag(tp, WOL_ENABLE)); |
12804 | else | ||
12805 | device_set_wakeup_capable(&tp->pdev->dev, false); | ||
12224 | } | 12806 | } |
12225 | 12807 | ||
12226 | static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) | 12808 | static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) |
@@ -12272,21 +12854,53 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) | |||
12272 | return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); | 12854 | return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); |
12273 | } | 12855 | } |
12274 | 12856 | ||
12857 | static void __devinit tg3_phy_init_link_config(struct tg3 *tp) | ||
12858 | { | ||
12859 | u32 adv = ADVERTISED_Autoneg | | ||
12860 | ADVERTISED_Pause; | ||
12861 | |||
12862 | if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) | ||
12863 | adv |= ADVERTISED_1000baseT_Half | | ||
12864 | ADVERTISED_1000baseT_Full; | ||
12865 | |||
12866 | if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) | ||
12867 | adv |= ADVERTISED_100baseT_Half | | ||
12868 | ADVERTISED_100baseT_Full | | ||
12869 | ADVERTISED_10baseT_Half | | ||
12870 | ADVERTISED_10baseT_Full | | ||
12871 | ADVERTISED_TP; | ||
12872 | else | ||
12873 | adv |= ADVERTISED_FIBRE; | ||
12874 | |||
12875 | tp->link_config.advertising = adv; | ||
12876 | tp->link_config.speed = SPEED_INVALID; | ||
12877 | tp->link_config.duplex = DUPLEX_INVALID; | ||
12878 | tp->link_config.autoneg = AUTONEG_ENABLE; | ||
12879 | tp->link_config.active_speed = SPEED_INVALID; | ||
12880 | tp->link_config.active_duplex = DUPLEX_INVALID; | ||
12881 | tp->link_config.orig_speed = SPEED_INVALID; | ||
12882 | tp->link_config.orig_duplex = DUPLEX_INVALID; | ||
12883 | tp->link_config.orig_autoneg = AUTONEG_INVALID; | ||
12884 | } | ||
12885 | |||
12275 | static int __devinit tg3_phy_probe(struct tg3 *tp) | 12886 | static int __devinit tg3_phy_probe(struct tg3 *tp) |
12276 | { | 12887 | { |
12277 | u32 hw_phy_id_1, hw_phy_id_2; | 12888 | u32 hw_phy_id_1, hw_phy_id_2; |
12278 | u32 hw_phy_id, hw_phy_id_masked; | 12889 | u32 hw_phy_id, hw_phy_id_masked; |
12279 | int err; | 12890 | int err; |
12280 | 12891 | ||
12281 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) | 12892 | /* flow control autonegotiation is default behavior */ |
12893 | tg3_flag_set(tp, PAUSE_AUTONEG); | ||
12894 | tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; | ||
12895 | |||
12896 | if (tg3_flag(tp, USE_PHYLIB)) | ||
12282 | return tg3_phy_init(tp); | 12897 | return tg3_phy_init(tp); |
12283 | 12898 | ||
12284 | /* Reading the PHY ID register can conflict with ASF | 12899 | /* Reading the PHY ID register can conflict with ASF |
12285 | * firmware access to the PHY hardware. | 12900 | * firmware access to the PHY hardware. |
12286 | */ | 12901 | */ |
12287 | err = 0; | 12902 | err = 0; |
12288 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 12903 | if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { |
12289 | (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | ||
12290 | hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; | 12904 | hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; |
12291 | } else { | 12905 | } else { |
12292 | /* Now read the physical PHY_ID from the chip and verify | 12906 | /* Now read the physical PHY_ID from the chip and verify |
@@ -12333,9 +12947,18 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) | |||
12333 | } | 12947 | } |
12334 | 12948 | ||
12335 | if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && | 12949 | if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && |
12336 | !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && | 12950 | ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && |
12337 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 12951 | tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || |
12338 | u32 bmsr, adv_reg, tg3_ctrl, mask; | 12952 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && |
12953 | tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) | ||
12954 | tp->phy_flags |= TG3_PHYFLG_EEE_CAP; | ||
12955 | |||
12956 | tg3_phy_init_link_config(tp); | ||
12957 | |||
12958 | if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && | ||
12959 | !tg3_flag(tp, ENABLE_APE) && | ||
12960 | !tg3_flag(tp, ENABLE_ASF)) { | ||
12961 | u32 bmsr, mask; | ||
12339 | 12962 | ||
12340 | tg3_readphy(tp, MII_BMSR, &bmsr); | 12963 | tg3_readphy(tp, MII_BMSR, &bmsr); |
12341 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 12964 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && |
@@ -12346,36 +12969,18 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) | |||
12346 | if (err) | 12969 | if (err) |
12347 | return err; | 12970 | return err; |
12348 | 12971 | ||
12349 | adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | | 12972 | tg3_phy_set_wirespeed(tp); |
12350 | ADVERTISE_100HALF | ADVERTISE_100FULL | | ||
12351 | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); | ||
12352 | tg3_ctrl = 0; | ||
12353 | if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { | ||
12354 | tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | | ||
12355 | MII_TG3_CTRL_ADV_1000_FULL); | ||
12356 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | ||
12357 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) | ||
12358 | tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | | ||
12359 | MII_TG3_CTRL_ENABLE_AS_MASTER); | ||
12360 | } | ||
12361 | 12973 | ||
12362 | mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | 12974 | mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | |
12363 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | 12975 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | |
12364 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); | 12976 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); |
12365 | if (!tg3_copper_is_advertising_all(tp, mask)) { | 12977 | if (!tg3_copper_is_advertising_all(tp, mask)) { |
12366 | tg3_writephy(tp, MII_ADVERTISE, adv_reg); | 12978 | tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, |
12367 | 12979 | tp->link_config.flowctrl); | |
12368 | if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) | ||
12369 | tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); | ||
12370 | 12980 | ||
12371 | tg3_writephy(tp, MII_BMCR, | 12981 | tg3_writephy(tp, MII_BMCR, |
12372 | BMCR_ANENABLE | BMCR_ANRESTART); | 12982 | BMCR_ANENABLE | BMCR_ANRESTART); |
12373 | } | 12983 | } |
12374 | tg3_phy_set_wirespeed(tp); | ||
12375 | |||
12376 | tg3_writephy(tp, MII_ADVERTISE, adv_reg); | ||
12377 | if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) | ||
12378 | tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); | ||
12379 | } | 12984 | } |
12380 | 12985 | ||
12381 | skip_phy_reset: | 12986 | skip_phy_reset: |
@@ -12387,60 +12992,18 @@ skip_phy_reset: | |||
12387 | err = tg3_init_5401phy_dsp(tp); | 12992 | err = tg3_init_5401phy_dsp(tp); |
12388 | } | 12993 | } |
12389 | 12994 | ||
12390 | if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) | ||
12391 | tp->link_config.advertising = | ||
12392 | (ADVERTISED_1000baseT_Half | | ||
12393 | ADVERTISED_1000baseT_Full | | ||
12394 | ADVERTISED_Autoneg | | ||
12395 | ADVERTISED_FIBRE); | ||
12396 | if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) | ||
12397 | tp->link_config.advertising &= | ||
12398 | ~(ADVERTISED_1000baseT_Half | | ||
12399 | ADVERTISED_1000baseT_Full); | ||
12400 | |||
12401 | return err; | 12995 | return err; |
12402 | } | 12996 | } |
12403 | 12997 | ||
12404 | static void __devinit tg3_read_vpd(struct tg3 *tp) | 12998 | static void __devinit tg3_read_vpd(struct tg3 *tp) |
12405 | { | 12999 | { |
12406 | u8 vpd_data[TG3_NVM_VPD_LEN]; | 13000 | u8 *vpd_data; |
12407 | unsigned int block_end, rosize, len; | 13001 | unsigned int block_end, rosize, len; |
12408 | int j, i = 0; | 13002 | int j, i = 0; |
12409 | u32 magic; | ||
12410 | |||
12411 | if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || | ||
12412 | tg3_nvram_read(tp, 0x0, &magic)) | ||
12413 | goto out_not_found; | ||
12414 | 13003 | ||
12415 | if (magic == TG3_EEPROM_MAGIC) { | 13004 | vpd_data = (u8 *)tg3_vpd_readblock(tp); |
12416 | for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { | 13005 | if (!vpd_data) |
12417 | u32 tmp; | 13006 | goto out_no_vpd; |
12418 | |||
12419 | /* The data is in little-endian format in NVRAM. | ||
12420 | * Use the big-endian read routines to preserve | ||
12421 | * the byte order as it exists in NVRAM. | ||
12422 | */ | ||
12423 | if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) | ||
12424 | goto out_not_found; | ||
12425 | |||
12426 | memcpy(&vpd_data[i], &tmp, sizeof(tmp)); | ||
12427 | } | ||
12428 | } else { | ||
12429 | ssize_t cnt; | ||
12430 | unsigned int pos = 0; | ||
12431 | |||
12432 | for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { | ||
12433 | cnt = pci_read_vpd(tp->pdev, pos, | ||
12434 | TG3_NVM_VPD_LEN - pos, | ||
12435 | &vpd_data[pos]); | ||
12436 | if (cnt == -ETIMEDOUT || -EINTR) | ||
12437 | cnt = 0; | ||
12438 | else if (cnt < 0) | ||
12439 | goto out_not_found; | ||
12440 | } | ||
12441 | if (pos != TG3_NVM_VPD_LEN) | ||
12442 | goto out_not_found; | ||
12443 | } | ||
12444 | 13007 | ||
12445 | i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN, | 13008 | i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN, |
12446 | PCI_VPD_LRDT_RO_DATA); | 13009 | PCI_VPD_LRDT_RO_DATA); |
@@ -12494,43 +13057,51 @@ partno: | |||
12494 | 13057 | ||
12495 | memcpy(tp->board_part_number, &vpd_data[i], len); | 13058 | memcpy(tp->board_part_number, &vpd_data[i], len); |
12496 | 13059 | ||
12497 | return; | ||
12498 | |||
12499 | out_not_found: | 13060 | out_not_found: |
12500 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 13061 | kfree(vpd_data); |
13062 | if (tp->board_part_number[0]) | ||
13063 | return; | ||
13064 | |||
13065 | out_no_vpd: | ||
13066 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | ||
13067 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) | ||
13068 | strcpy(tp->board_part_number, "BCM5717"); | ||
13069 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) | ||
13070 | strcpy(tp->board_part_number, "BCM5718"); | ||
13071 | else | ||
13072 | goto nomatch; | ||
13073 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { | ||
13074 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) | ||
13075 | strcpy(tp->board_part_number, "BCM57780"); | ||
13076 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) | ||
13077 | strcpy(tp->board_part_number, "BCM57760"); | ||
13078 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) | ||
13079 | strcpy(tp->board_part_number, "BCM57790"); | ||
13080 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) | ||
13081 | strcpy(tp->board_part_number, "BCM57788"); | ||
13082 | else | ||
13083 | goto nomatch; | ||
13084 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | ||
13085 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) | ||
13086 | strcpy(tp->board_part_number, "BCM57761"); | ||
13087 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) | ||
13088 | strcpy(tp->board_part_number, "BCM57765"); | ||
13089 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) | ||
13090 | strcpy(tp->board_part_number, "BCM57781"); | ||
13091 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) | ||
13092 | strcpy(tp->board_part_number, "BCM57785"); | ||
13093 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) | ||
13094 | strcpy(tp->board_part_number, "BCM57791"); | ||
13095 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) | ||
13096 | strcpy(tp->board_part_number, "BCM57795"); | ||
13097 | else | ||
13098 | goto nomatch; | ||
13099 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | ||
12501 | strcpy(tp->board_part_number, "BCM95906"); | 13100 | strcpy(tp->board_part_number, "BCM95906"); |
12502 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | 13101 | } else { |
12503 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) | 13102 | nomatch: |
12504 | strcpy(tp->board_part_number, "BCM57780"); | ||
12505 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | ||
12506 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) | ||
12507 | strcpy(tp->board_part_number, "BCM57760"); | ||
12508 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | ||
12509 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) | ||
12510 | strcpy(tp->board_part_number, "BCM57790"); | ||
12511 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | ||
12512 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) | ||
12513 | strcpy(tp->board_part_number, "BCM57788"); | ||
12514 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12515 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) | ||
12516 | strcpy(tp->board_part_number, "BCM57761"); | ||
12517 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12518 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) | ||
12519 | strcpy(tp->board_part_number, "BCM57765"); | ||
12520 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12521 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) | ||
12522 | strcpy(tp->board_part_number, "BCM57781"); | ||
12523 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12524 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) | ||
12525 | strcpy(tp->board_part_number, "BCM57785"); | ||
12526 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12527 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) | ||
12528 | strcpy(tp->board_part_number, "BCM57791"); | ||
12529 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12530 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) | ||
12531 | strcpy(tp->board_part_number, "BCM57795"); | ||
12532 | else | ||
12533 | strcpy(tp->board_part_number, "none"); | 13103 | strcpy(tp->board_part_number, "none"); |
13104 | } | ||
12534 | } | 13105 | } |
12535 | 13106 | ||
12536 | static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) | 13107 | static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) |
@@ -12639,6 +13210,9 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) | |||
12639 | case TG3_EEPROM_SB_REVISION_5: | 13210 | case TG3_EEPROM_SB_REVISION_5: |
12640 | offset = TG3_EEPROM_SB_F1R5_EDH_OFF; | 13211 | offset = TG3_EEPROM_SB_F1R5_EDH_OFF; |
12641 | break; | 13212 | break; |
13213 | case TG3_EEPROM_SB_REVISION_6: | ||
13214 | offset = TG3_EEPROM_SB_F1R6_EDH_OFF; | ||
13215 | break; | ||
12642 | default: | 13216 | default: |
12643 | return; | 13217 | return; |
12644 | } | 13218 | } |
@@ -12684,7 +13258,7 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) | |||
12684 | if (offset == TG3_NVM_DIR_END) | 13258 | if (offset == TG3_NVM_DIR_END) |
12685 | return; | 13259 | return; |
12686 | 13260 | ||
12687 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 13261 | if (!tg3_flag(tp, 5705_PLUS)) |
12688 | start = 0x08000000; | 13262 | start = 0x08000000; |
12689 | else if (tg3_nvram_read(tp, offset - 4, &start)) | 13263 | else if (tg3_nvram_read(tp, offset - 4, &start)) |
12690 | return; | 13264 | return; |
@@ -12724,8 +13298,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp) | |||
12724 | u32 apedata; | 13298 | u32 apedata; |
12725 | char *fwtype; | 13299 | char *fwtype; |
12726 | 13300 | ||
12727 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || | 13301 | if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) |
12728 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | ||
12729 | return; | 13302 | return; |
12730 | 13303 | ||
12731 | apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); | 13304 | apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); |
@@ -12738,10 +13311,12 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp) | |||
12738 | 13311 | ||
12739 | apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); | 13312 | apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); |
12740 | 13313 | ||
12741 | if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) | 13314 | if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { |
13315 | tg3_flag_set(tp, APE_HAS_NCSI); | ||
12742 | fwtype = "NCSI"; | 13316 | fwtype = "NCSI"; |
12743 | else | 13317 | } else { |
12744 | fwtype = "DASH"; | 13318 | fwtype = "DASH"; |
13319 | } | ||
12745 | 13320 | ||
12746 | vlen = strlen(tp->fw_ver); | 13321 | vlen = strlen(tp->fw_ver); |
12747 | 13322 | ||
@@ -12761,7 +13336,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) | |||
12761 | if (tp->fw_ver[0] != 0) | 13336 | if (tp->fw_ver[0] != 0) |
12762 | vpd_vers = true; | 13337 | vpd_vers = true; |
12763 | 13338 | ||
12764 | if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { | 13339 | if (tg3_flag(tp, NO_NVRAM)) { |
12765 | strcat(tp->fw_ver, "sb"); | 13340 | strcat(tp->fw_ver, "sb"); |
12766 | return; | 13341 | return; |
12767 | } | 13342 | } |
@@ -12778,8 +13353,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) | |||
12778 | else | 13353 | else |
12779 | return; | 13354 | return; |
12780 | 13355 | ||
12781 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 13356 | if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers) |
12782 | (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers) | ||
12783 | goto done; | 13357 | goto done; |
12784 | 13358 | ||
12785 | tg3_read_mgmtfw_ver(tp); | 13359 | tg3_read_mgmtfw_ver(tp); |
@@ -12790,24 +13364,25 @@ done: | |||
12790 | 13364 | ||
12791 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); | 13365 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); |
12792 | 13366 | ||
12793 | static void inline vlan_features_add(struct net_device *dev, unsigned long flags) | 13367 | static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) |
12794 | { | 13368 | { |
12795 | #if TG3_VLAN_TAG_USED | 13369 | if (tg3_flag(tp, LRG_PROD_RING_CAP)) |
12796 | dev->vlan_features |= flags; | 13370 | return TG3_RX_RET_MAX_SIZE_5717; |
12797 | #endif | 13371 | else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) |
13372 | return TG3_RX_RET_MAX_SIZE_5700; | ||
13373 | else | ||
13374 | return TG3_RX_RET_MAX_SIZE_5705; | ||
12798 | } | 13375 | } |
12799 | 13376 | ||
13377 | static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { | ||
13378 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | ||
13379 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | ||
13380 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, | ||
13381 | { }, | ||
13382 | }; | ||
13383 | |||
12800 | static int __devinit tg3_get_invariants(struct tg3 *tp) | 13384 | static int __devinit tg3_get_invariants(struct tg3 *tp) |
12801 | { | 13385 | { |
12802 | static struct pci_device_id write_reorder_chipsets[] = { | ||
12803 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | ||
12804 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | ||
12805 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | ||
12806 | PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | ||
12807 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, | ||
12808 | PCI_DEVICE_ID_VIA_8385_0) }, | ||
12809 | { }, | ||
12810 | }; | ||
12811 | u32 misc_ctrl_reg; | 13386 | u32 misc_ctrl_reg; |
12812 | u32 pci_state_reg, grc_misc_cfg; | 13387 | u32 pci_state_reg, grc_misc_cfg; |
12813 | u32 val; | 13388 | u32 val; |
@@ -12841,8 +13416,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12841 | 13416 | ||
12842 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || | 13417 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || |
12843 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || | 13418 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || |
12844 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 || | 13419 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || |
12845 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719) | 13420 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) |
12846 | pci_read_config_dword(tp->pdev, | 13421 | pci_read_config_dword(tp->pdev, |
12847 | TG3PCI_GEN2_PRODID_ASICREV, | 13422 | TG3PCI_GEN2_PRODID_ASICREV, |
12848 | &prod_id_asic_rev); | 13423 | &prod_id_asic_rev); |
@@ -12919,15 +13494,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12919 | if (bridge->subordinate && | 13494 | if (bridge->subordinate && |
12920 | (bridge->subordinate->number == | 13495 | (bridge->subordinate->number == |
12921 | tp->pdev->bus->number)) { | 13496 | tp->pdev->bus->number)) { |
12922 | 13497 | tg3_flag_set(tp, ICH_WORKAROUND); | |
12923 | tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; | ||
12924 | pci_dev_put(bridge); | 13498 | pci_dev_put(bridge); |
12925 | break; | 13499 | break; |
12926 | } | 13500 | } |
12927 | } | 13501 | } |
12928 | } | 13502 | } |
12929 | 13503 | ||
12930 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { | 13504 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { |
12931 | static struct tg3_dev_id { | 13505 | static struct tg3_dev_id { |
12932 | u32 vendor; | 13506 | u32 vendor; |
12933 | u32 device; | 13507 | u32 device; |
@@ -12952,7 +13526,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12952 | tp->pdev->bus->number) && | 13526 | tp->pdev->bus->number) && |
12953 | (bridge->subordinate->subordinate >= | 13527 | (bridge->subordinate->subordinate >= |
12954 | tp->pdev->bus->number)) { | 13528 | tp->pdev->bus->number)) { |
12955 | tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG; | 13529 | tg3_flag_set(tp, 5701_DMA_BUG); |
12956 | pci_dev_put(bridge); | 13530 | pci_dev_put(bridge); |
12957 | break; | 13531 | break; |
12958 | } | 13532 | } |
@@ -12967,8 +13541,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12967 | */ | 13541 | */ |
12968 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || | 13542 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || |
12969 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | 13543 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { |
12970 | tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; | 13544 | tg3_flag_set(tp, 5780_CLASS); |
12971 | tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; | 13545 | tg3_flag_set(tp, 40BIT_DMA_BUG); |
12972 | tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); | 13546 | tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); |
12973 | } else { | 13547 | } else { |
12974 | struct pci_dev *bridge = NULL; | 13548 | struct pci_dev *bridge = NULL; |
@@ -12982,7 +13556,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12982 | tp->pdev->bus->number) && | 13556 | tp->pdev->bus->number) && |
12983 | (bridge->subordinate->subordinate >= | 13557 | (bridge->subordinate->subordinate >= |
12984 | tp->pdev->bus->number)) { | 13558 | tp->pdev->bus->number)) { |
12985 | tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; | 13559 | tg3_flag_set(tp, 40BIT_DMA_BUG); |
12986 | pci_dev_put(bridge); | 13560 | pci_dev_put(bridge); |
12987 | break; | 13561 | break; |
12988 | } | 13562 | } |
@@ -12997,13 +13571,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12997 | 13571 | ||
12998 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 13572 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
12999 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || | 13573 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || |
13000 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 13574 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13575 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) | ||
13001 | tp->pdev_peer = tg3_find_peer(tp); | 13576 | tp->pdev_peer = tg3_find_peer(tp); |
13002 | 13577 | ||
13003 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 13578 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13004 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | 13579 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || |
13005 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 13580 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) |
13006 | tp->tg3_flags3 |= TG3_FLG3_5717_PLUS; | 13581 | tg3_flag_set(tp, 5717_PLUS); |
13582 | |||
13583 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || | ||
13584 | tg3_flag(tp, 5717_PLUS)) | ||
13585 | tg3_flag_set(tp, 57765_PLUS); | ||
13007 | 13586 | ||
13008 | /* Intentionally exclude ASIC_REV_5906 */ | 13587 | /* Intentionally exclude ASIC_REV_5906 */ |
13009 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 13588 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
@@ -13012,94 +13591,102 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13012 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 13591 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
13013 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 13592 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
13014 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 13593 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
13015 | (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) | 13594 | tg3_flag(tp, 57765_PLUS)) |
13016 | tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; | 13595 | tg3_flag_set(tp, 5755_PLUS); |
13017 | 13596 | ||
13018 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 13597 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || |
13019 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 13598 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || |
13020 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || | 13599 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || |
13021 | (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 13600 | tg3_flag(tp, 5755_PLUS) || |
13022 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 13601 | tg3_flag(tp, 5780_CLASS)) |
13023 | tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; | 13602 | tg3_flag_set(tp, 5750_PLUS); |
13024 | 13603 | ||
13025 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || | 13604 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || |
13026 | (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 13605 | tg3_flag(tp, 5750_PLUS)) |
13027 | tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; | 13606 | tg3_flag_set(tp, 5705_PLUS); |
13028 | |||
13029 | /* 5700 B0 chips do not support checksumming correctly due | ||
13030 | * to hardware bugs. | ||
13031 | */ | ||
13032 | if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) | ||
13033 | tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; | ||
13034 | else { | ||
13035 | unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO; | ||
13036 | |||
13037 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; | ||
13038 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | ||
13039 | features |= NETIF_F_IPV6_CSUM; | ||
13040 | tp->dev->features |= features; | ||
13041 | vlan_features_add(tp->dev, features); | ||
13042 | } | ||
13043 | 13607 | ||
13044 | /* Determine TSO capabilities */ | 13608 | /* Determine TSO capabilities */ |
13045 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) | 13609 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) |
13046 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; | 13610 | ; /* Do nothing. HW bug. */ |
13047 | else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 13611 | else if (tg3_flag(tp, 57765_PLUS)) |
13612 | tg3_flag_set(tp, HW_TSO_3); | ||
13613 | else if (tg3_flag(tp, 5755_PLUS) || | ||
13048 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 13614 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
13049 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | 13615 | tg3_flag_set(tp, HW_TSO_2); |
13050 | else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 13616 | else if (tg3_flag(tp, 5750_PLUS)) { |
13051 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; | 13617 | tg3_flag_set(tp, HW_TSO_1); |
13618 | tg3_flag_set(tp, TSO_BUG); | ||
13052 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && | 13619 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && |
13053 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) | 13620 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) |
13054 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; | 13621 | tg3_flag_clear(tp, TSO_BUG); |
13055 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 13622 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
13056 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | 13623 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && |
13057 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { | 13624 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { |
13058 | tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; | 13625 | tg3_flag_set(tp, TSO_BUG); |
13059 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) | 13626 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) |
13060 | tp->fw_needed = FIRMWARE_TG3TSO5; | 13627 | tp->fw_needed = FIRMWARE_TG3TSO5; |
13061 | else | 13628 | else |
13062 | tp->fw_needed = FIRMWARE_TG3TSO; | 13629 | tp->fw_needed = FIRMWARE_TG3TSO; |
13063 | } | 13630 | } |
13064 | 13631 | ||
13632 | /* Selectively allow TSO based on operating conditions */ | ||
13633 | if (tg3_flag(tp, HW_TSO_1) || | ||
13634 | tg3_flag(tp, HW_TSO_2) || | ||
13635 | tg3_flag(tp, HW_TSO_3) || | ||
13636 | (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) | ||
13637 | tg3_flag_set(tp, TSO_CAPABLE); | ||
13638 | else { | ||
13639 | tg3_flag_clear(tp, TSO_CAPABLE); | ||
13640 | tg3_flag_clear(tp, TSO_BUG); | ||
13641 | tp->fw_needed = NULL; | ||
13642 | } | ||
13643 | |||
13644 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) | ||
13645 | tp->fw_needed = FIRMWARE_TG3; | ||
13646 | |||
13065 | tp->irq_max = 1; | 13647 | tp->irq_max = 1; |
13066 | 13648 | ||
13067 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 13649 | if (tg3_flag(tp, 5750_PLUS)) { |
13068 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; | 13650 | tg3_flag_set(tp, SUPPORT_MSI); |
13069 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || | 13651 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || |
13070 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || | 13652 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || |
13071 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && | 13653 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && |
13072 | tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && | 13654 | tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && |
13073 | tp->pdev_peer == tp->pdev)) | 13655 | tp->pdev_peer == tp->pdev)) |
13074 | tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; | 13656 | tg3_flag_clear(tp, SUPPORT_MSI); |
13075 | 13657 | ||
13076 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 13658 | if (tg3_flag(tp, 5755_PLUS) || |
13077 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 13659 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
13078 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; | 13660 | tg3_flag_set(tp, 1SHOT_MSI); |
13079 | } | 13661 | } |
13080 | 13662 | ||
13081 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { | 13663 | if (tg3_flag(tp, 57765_PLUS)) { |
13082 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; | 13664 | tg3_flag_set(tp, SUPPORT_MSIX); |
13083 | tp->irq_max = TG3_IRQ_MAX_VECS; | 13665 | tp->irq_max = TG3_IRQ_MAX_VECS; |
13084 | } | 13666 | } |
13085 | } | 13667 | } |
13086 | 13668 | ||
13087 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 13669 | /* All chips can get confused if TX buffers |
13088 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | 13670 | * straddle the 4GB address boundary. |
13089 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 13671 | */ |
13090 | tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; | 13672 | tg3_flag_set(tp, 4G_DMA_BNDRY_BUG); |
13091 | else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { | 13673 | |
13092 | tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; | 13674 | if (tg3_flag(tp, 5755_PLUS)) |
13093 | tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; | 13675 | tg3_flag_set(tp, SHORT_DMA_BUG); |
13094 | } | 13676 | else |
13677 | tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG); | ||
13095 | 13678 | ||
13096 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) | 13679 | if (tg3_flag(tp, 5717_PLUS)) |
13097 | tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; | 13680 | tg3_flag_set(tp, LRG_PROD_RING_CAP); |
13098 | 13681 | ||
13099 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 13682 | if (tg3_flag(tp, 57765_PLUS) && |
13100 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 13683 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) |
13101 | (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) | 13684 | tg3_flag_set(tp, USE_JUMBO_BDFLAG); |
13102 | tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; | 13685 | |
13686 | if (!tg3_flag(tp, 5705_PLUS) || | ||
13687 | tg3_flag(tp, 5780_CLASS) || | ||
13688 | tg3_flag(tp, USE_JUMBO_BDFLAG)) | ||
13689 | tg3_flag_set(tp, JUMBO_CAPABLE); | ||
13103 | 13690 | ||
13104 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 13691 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
13105 | &pci_state_reg); | 13692 | &pci_state_reg); |
@@ -13108,28 +13695,36 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13108 | if (tp->pcie_cap != 0) { | 13695 | if (tp->pcie_cap != 0) { |
13109 | u16 lnkctl; | 13696 | u16 lnkctl; |
13110 | 13697 | ||
13111 | tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; | 13698 | tg3_flag_set(tp, PCI_EXPRESS); |
13699 | |||
13700 | tp->pcie_readrq = 4096; | ||
13701 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | ||
13702 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) | ||
13703 | tp->pcie_readrq = 2048; | ||
13112 | 13704 | ||
13113 | pcie_set_readrq(tp->pdev, 4096); | 13705 | pcie_set_readrq(tp->pdev, tp->pcie_readrq); |
13114 | 13706 | ||
13115 | pci_read_config_word(tp->pdev, | 13707 | pci_read_config_word(tp->pdev, |
13116 | tp->pcie_cap + PCI_EXP_LNKCTL, | 13708 | tp->pcie_cap + PCI_EXP_LNKCTL, |
13117 | &lnkctl); | 13709 | &lnkctl); |
13118 | if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { | 13710 | if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { |
13119 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 13711 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
13120 | tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; | 13712 | ASIC_REV_5906) { |
13713 | tg3_flag_clear(tp, HW_TSO_2); | ||
13714 | tg3_flag_clear(tp, TSO_CAPABLE); | ||
13715 | } | ||
13121 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 13716 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || |
13122 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 13717 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
13123 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || | 13718 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || |
13124 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) | 13719 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) |
13125 | tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; | 13720 | tg3_flag_set(tp, CLKREQ_BUG); |
13126 | } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { | 13721 | } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { |
13127 | tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN; | 13722 | tg3_flag_set(tp, L1PLLPD_EN); |
13128 | } | 13723 | } |
13129 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { | 13724 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { |
13130 | tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; | 13725 | tg3_flag_set(tp, PCI_EXPRESS); |
13131 | } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 13726 | } else if (!tg3_flag(tp, 5705_PLUS) || |
13132 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 13727 | tg3_flag(tp, 5780_CLASS)) { |
13133 | tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); | 13728 | tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); |
13134 | if (!tp->pcix_cap) { | 13729 | if (!tp->pcix_cap) { |
13135 | dev_err(&tp->pdev->dev, | 13730 | dev_err(&tp->pdev->dev, |
@@ -13138,7 +13733,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13138 | } | 13733 | } |
13139 | 13734 | ||
13140 | if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) | 13735 | if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) |
13141 | tp->tg3_flags |= TG3_FLAG_PCIX_MODE; | 13736 | tg3_flag_set(tp, PCIX_MODE); |
13142 | } | 13737 | } |
13143 | 13738 | ||
13144 | /* If we have an AMD 762 or VIA K8T800 chipset, write | 13739 | /* If we have an AMD 762 or VIA K8T800 chipset, write |
@@ -13147,9 +13742,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13147 | * every mailbox register write to force the writes to be | 13742 | * every mailbox register write to force the writes to be |
13148 | * posted to the chip in order. | 13743 | * posted to the chip in order. |
13149 | */ | 13744 | */ |
13150 | if (pci_dev_present(write_reorder_chipsets) && | 13745 | if (pci_dev_present(tg3_write_reorder_chipsets) && |
13151 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 13746 | !tg3_flag(tp, PCI_EXPRESS)) |
13152 | tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; | 13747 | tg3_flag_set(tp, MBOX_WRITE_REORDER); |
13153 | 13748 | ||
13154 | pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, | 13749 | pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, |
13155 | &tp->pci_cacheline_sz); | 13750 | &tp->pci_cacheline_sz); |
@@ -13166,17 +13761,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13166 | /* 5700 BX chips need to have their TX producer index | 13761 | /* 5700 BX chips need to have their TX producer index |
13167 | * mailboxes written twice to workaround a bug. | 13762 | * mailboxes written twice to workaround a bug. |
13168 | */ | 13763 | */ |
13169 | tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; | 13764 | tg3_flag_set(tp, TXD_MBOX_HWBUG); |
13170 | 13765 | ||
13171 | /* If we are in PCI-X mode, enable register write workaround. | 13766 | /* If we are in PCI-X mode, enable register write workaround. |
13172 | * | 13767 | * |
13173 | * The workaround is to use indirect register accesses | 13768 | * The workaround is to use indirect register accesses |
13174 | * for all chip writes not to mailbox registers. | 13769 | * for all chip writes not to mailbox registers. |
13175 | */ | 13770 | */ |
13176 | if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 13771 | if (tg3_flag(tp, PCIX_MODE)) { |
13177 | u32 pm_reg; | 13772 | u32 pm_reg; |
13178 | 13773 | ||
13179 | tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; | 13774 | tg3_flag_set(tp, PCIX_TARGET_HWBUG); |
13180 | 13775 | ||
13181 | /* The chip can have it's power management PCI config | 13776 | /* The chip can have it's power management PCI config |
13182 | * space registers clobbered due to this bug. | 13777 | * space registers clobbered due to this bug. |
@@ -13199,9 +13794,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13199 | } | 13794 | } |
13200 | 13795 | ||
13201 | if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) | 13796 | if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) |
13202 | tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; | 13797 | tg3_flag_set(tp, PCI_HIGH_SPEED); |
13203 | if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) | 13798 | if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) |
13204 | tp->tg3_flags |= TG3_FLAG_PCI_32BIT; | 13799 | tg3_flag_set(tp, PCI_32BIT); |
13205 | 13800 | ||
13206 | /* Chip-specific fixup from Broadcom driver */ | 13801 | /* Chip-specific fixup from Broadcom driver */ |
13207 | if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && | 13802 | if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && |
@@ -13219,10 +13814,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13219 | tp->write32_rx_mbox = tg3_write32; | 13814 | tp->write32_rx_mbox = tg3_write32; |
13220 | 13815 | ||
13221 | /* Various workaround register access methods */ | 13816 | /* Various workaround register access methods */ |
13222 | if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) | 13817 | if (tg3_flag(tp, PCIX_TARGET_HWBUG)) |
13223 | tp->write32 = tg3_write_indirect_reg32; | 13818 | tp->write32 = tg3_write_indirect_reg32; |
13224 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || | 13819 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || |
13225 | ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 13820 | (tg3_flag(tp, PCI_EXPRESS) && |
13226 | tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { | 13821 | tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { |
13227 | /* | 13822 | /* |
13228 | * Back to back register writes can cause problems on these | 13823 | * Back to back register writes can cause problems on these |
@@ -13234,14 +13829,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13234 | tp->write32 = tg3_write_flush_reg32; | 13829 | tp->write32 = tg3_write_flush_reg32; |
13235 | } | 13830 | } |
13236 | 13831 | ||
13237 | if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || | 13832 | if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { |
13238 | (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { | ||
13239 | tp->write32_tx_mbox = tg3_write32_tx_mbox; | 13833 | tp->write32_tx_mbox = tg3_write32_tx_mbox; |
13240 | if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) | 13834 | if (tg3_flag(tp, MBOX_WRITE_REORDER)) |
13241 | tp->write32_rx_mbox = tg3_write_flush_reg32; | 13835 | tp->write32_rx_mbox = tg3_write_flush_reg32; |
13242 | } | 13836 | } |
13243 | 13837 | ||
13244 | if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { | 13838 | if (tg3_flag(tp, ICH_WORKAROUND)) { |
13245 | tp->read32 = tg3_read_indirect_reg32; | 13839 | tp->read32 = tg3_read_indirect_reg32; |
13246 | tp->write32 = tg3_write_indirect_reg32; | 13840 | tp->write32 = tg3_write_indirect_reg32; |
13247 | tp->read32_mbox = tg3_read_indirect_mbox; | 13841 | tp->read32_mbox = tg3_read_indirect_mbox; |
@@ -13264,13 +13858,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13264 | } | 13858 | } |
13265 | 13859 | ||
13266 | if (tp->write32 == tg3_write_indirect_reg32 || | 13860 | if (tp->write32 == tg3_write_indirect_reg32 || |
13267 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | 13861 | (tg3_flag(tp, PCIX_MODE) && |
13268 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 13862 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
13269 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) | 13863 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) |
13270 | tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; | 13864 | tg3_flag_set(tp, SRAM_USE_CONFIG); |
13271 | 13865 | ||
13272 | /* Get eeprom hw config before calling tg3_set_power_state(). | 13866 | /* Get eeprom hw config before calling tg3_set_power_state(). |
13273 | * In particular, the TG3_FLG2_IS_NIC flag must be | 13867 | * In particular, the TG3_FLAG_IS_NIC flag must be |
13274 | * determined before calling tg3_set_power_state() so that | 13868 | * determined before calling tg3_set_power_state() so that |
13275 | * we know whether or not to switch out of Vaux power. | 13869 | * we know whether or not to switch out of Vaux power. |
13276 | * When the flag is set, it means that GPIO1 is used for eeprom | 13870 | * When the flag is set, it means that GPIO1 is used for eeprom |
@@ -13279,7 +13873,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13279 | */ | 13873 | */ |
13280 | tg3_get_eeprom_hw_cfg(tp); | 13874 | tg3_get_eeprom_hw_cfg(tp); |
13281 | 13875 | ||
13282 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 13876 | if (tg3_flag(tp, ENABLE_APE)) { |
13283 | /* Allow reads and writes to the | 13877 | /* Allow reads and writes to the |
13284 | * APE register and memory space. | 13878 | * APE register and memory space. |
13285 | */ | 13879 | */ |
@@ -13294,16 +13888,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13294 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 13888 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
13295 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 13889 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
13296 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 13890 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
13297 | (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) | 13891 | tg3_flag(tp, 57765_PLUS)) |
13298 | tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; | 13892 | tg3_flag_set(tp, CPMU_PRESENT); |
13299 | 13893 | ||
13300 | /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). | 13894 | /* Set up tp->grc_local_ctrl before calling tg3_power_up(). |
13301 | * GPIO1 driven high will bring 5700's external PHY out of reset. | 13895 | * GPIO1 driven high will bring 5700's external PHY out of reset. |
13302 | * It is also used as eeprom write protect on LOMs. | 13896 | * It is also used as eeprom write protect on LOMs. |
13303 | */ | 13897 | */ |
13304 | tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; | 13898 | tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; |
13305 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || | 13899 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
13306 | (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) | 13900 | tg3_flag(tp, EEPROM_WRITE_PROT)) |
13307 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | | 13901 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | |
13308 | GRC_LCLCTRL_GPIO_OUTPUT1); | 13902 | GRC_LCLCTRL_GPIO_OUTPUT1); |
13309 | /* Unused GPIO3 must be driven as output on 5752 because there | 13903 | /* Unused GPIO3 must be driven as output on 5752 because there |
@@ -13321,14 +13915,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13321 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { | 13915 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { |
13322 | /* Turn off the debug UART. */ | 13916 | /* Turn off the debug UART. */ |
13323 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | 13917 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; |
13324 | if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) | 13918 | if (tg3_flag(tp, IS_NIC)) |
13325 | /* Keep VMain power. */ | 13919 | /* Keep VMain power. */ |
13326 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | | 13920 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | |
13327 | GRC_LCLCTRL_GPIO_OUTPUT0; | 13921 | GRC_LCLCTRL_GPIO_OUTPUT0; |
13328 | } | 13922 | } |
13329 | 13923 | ||
13330 | /* Force the chip into D0. */ | 13924 | /* Force the chip into D0. */ |
13331 | err = tg3_set_power_state(tp, PCI_D0); | 13925 | err = tg3_power_up(tp); |
13332 | if (err) { | 13926 | if (err) { |
13333 | dev_err(&tp->pdev->dev, "Transition to D0 failed\n"); | 13927 | dev_err(&tp->pdev->dev, "Transition to D0 failed\n"); |
13334 | return err; | 13928 | return err; |
@@ -13337,26 +13931,25 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13337 | /* Derive initial jumbo mode from MTU assigned in | 13931 | /* Derive initial jumbo mode from MTU assigned in |
13338 | * ether_setup() via the alloc_etherdev() call | 13932 | * ether_setup() via the alloc_etherdev() call |
13339 | */ | 13933 | */ |
13340 | if (tp->dev->mtu > ETH_DATA_LEN && | 13934 | if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) |
13341 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 13935 | tg3_flag_set(tp, JUMBO_RING_ENABLE); |
13342 | tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; | ||
13343 | 13936 | ||
13344 | /* Determine WakeOnLan speed to use. */ | 13937 | /* Determine WakeOnLan speed to use. */ |
13345 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 13938 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
13346 | tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 13939 | tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || |
13347 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || | 13940 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || |
13348 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { | 13941 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { |
13349 | tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); | 13942 | tg3_flag_clear(tp, WOL_SPEED_100MB); |
13350 | } else { | 13943 | } else { |
13351 | tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; | 13944 | tg3_flag_set(tp, WOL_SPEED_100MB); |
13352 | } | 13945 | } |
13353 | 13946 | ||
13354 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 13947 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
13355 | tp->phy_flags |= TG3_PHYFLG_IS_FET; | 13948 | tp->phy_flags |= TG3_PHYFLG_IS_FET; |
13356 | 13949 | ||
13357 | /* A few boards don't want Ethernet@WireSpeed phy feature */ | 13950 | /* A few boards don't want Ethernet@WireSpeed phy feature */ |
13358 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || | 13951 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
13359 | ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && | 13952 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
13360 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && | 13953 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && |
13361 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || | 13954 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || |
13362 | (tp->phy_flags & TG3_PHYFLG_IS_FET) || | 13955 | (tp->phy_flags & TG3_PHYFLG_IS_FET) || |
@@ -13369,11 +13962,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13369 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) | 13962 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) |
13370 | tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; | 13963 | tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; |
13371 | 13964 | ||
13372 | if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 13965 | if (tg3_flag(tp, 5705_PLUS) && |
13373 | !(tp->phy_flags & TG3_PHYFLG_IS_FET) && | 13966 | !(tp->phy_flags & TG3_PHYFLG_IS_FET) && |
13374 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 13967 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
13375 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && | 13968 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && |
13376 | !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { | 13969 | !tg3_flag(tp, 57765_PLUS)) { |
13377 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 13970 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
13378 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 13971 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || |
13379 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 13972 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || |
@@ -13394,7 +13987,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13394 | tp->phy_otp = TG3_OTP_DEFAULT; | 13987 | tp->phy_otp = TG3_OTP_DEFAULT; |
13395 | } | 13988 | } |
13396 | 13989 | ||
13397 | if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) | 13990 | if (tg3_flag(tp, CPMU_PRESENT)) |
13398 | tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; | 13991 | tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; |
13399 | else | 13992 | else |
13400 | tp->mi_mode = MAC_MI_MODE_BASE; | 13993 | tp->mi_mode = MAC_MI_MODE_BASE; |
@@ -13404,21 +13997,33 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13404 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) | 13997 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) |
13405 | tp->coalesce_mode |= HOSTCC_MODE_32BYTE; | 13998 | tp->coalesce_mode |= HOSTCC_MODE_32BYTE; |
13406 | 13999 | ||
14000 | /* Set these bits to enable statistics workaround. */ | ||
14001 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
14002 | tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || | ||
14003 | tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { | ||
14004 | tp->coalesce_mode |= HOSTCC_MODE_ATTN; | ||
14005 | tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; | ||
14006 | } | ||
14007 | |||
13407 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 14008 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
13408 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 14009 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
13409 | tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; | 14010 | tg3_flag_set(tp, USE_PHYLIB); |
13410 | 14011 | ||
13411 | err = tg3_mdio_init(tp); | 14012 | err = tg3_mdio_init(tp); |
13412 | if (err) | 14013 | if (err) |
13413 | return err; | 14014 | return err; |
13414 | 14015 | ||
13415 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | ||
13416 | tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) | ||
13417 | return -ENOTSUPP; | ||
13418 | |||
13419 | /* Initialize data/descriptor byte/word swapping. */ | 14016 | /* Initialize data/descriptor byte/word swapping. */ |
13420 | val = tr32(GRC_MODE); | 14017 | val = tr32(GRC_MODE); |
13421 | val &= GRC_MODE_HOST_STACKUP; | 14018 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) |
14019 | val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | | ||
14020 | GRC_MODE_WORD_SWAP_B2HRX_DATA | | ||
14021 | GRC_MODE_B2HRX_ENABLE | | ||
14022 | GRC_MODE_HTX2B_ENABLE | | ||
14023 | GRC_MODE_HOST_STACKUP); | ||
14024 | else | ||
14025 | val &= GRC_MODE_HOST_STACKUP; | ||
14026 | |||
13422 | tw32(GRC_MODE, val | tp->grc_mode); | 14027 | tw32(GRC_MODE, val | tp->grc_mode); |
13423 | 14028 | ||
13424 | tg3_switch_clocks(tp); | 14029 | tg3_switch_clocks(tp); |
@@ -13429,7 +14034,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13429 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 14034 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
13430 | &pci_state_reg); | 14035 | &pci_state_reg); |
13431 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && | 14036 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && |
13432 | (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { | 14037 | !tg3_flag(tp, PCIX_TARGET_HWBUG)) { |
13433 | u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); | 14038 | u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); |
13434 | 14039 | ||
13435 | if (chiprevid == CHIPREV_ID_5701_A0 || | 14040 | if (chiprevid == CHIPREV_ID_5701_A0 || |
@@ -13448,7 +14053,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13448 | writel(0x00000000, sram_base + 4); | 14053 | writel(0x00000000, sram_base + 4); |
13449 | writel(0xffffffff, sram_base + 4); | 14054 | writel(0xffffffff, sram_base + 4); |
13450 | if (readl(sram_base) != 0x00000000) | 14055 | if (readl(sram_base) != 0x00000000) |
13451 | tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; | 14056 | tg3_flag_set(tp, PCIX_TARGET_HWBUG); |
13452 | } | 14057 | } |
13453 | } | 14058 | } |
13454 | 14059 | ||
@@ -13461,12 +14066,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13461 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 14066 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
13462 | (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || | 14067 | (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || |
13463 | grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) | 14068 | grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) |
13464 | tp->tg3_flags2 |= TG3_FLG2_IS_5788; | 14069 | tg3_flag_set(tp, IS_5788); |
13465 | 14070 | ||
13466 | if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 14071 | if (!tg3_flag(tp, IS_5788) && |
13467 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) | 14072 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) |
13468 | tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; | 14073 | tg3_flag_set(tp, TAGGED_STATUS); |
13469 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 14074 | if (tg3_flag(tp, TAGGED_STATUS)) { |
13470 | tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | | 14075 | tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | |
13471 | HOSTCC_MODE_CLRTICK_TXBD); | 14076 | HOSTCC_MODE_CLRTICK_TXBD); |
13472 | 14077 | ||
@@ -13476,9 +14081,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13476 | } | 14081 | } |
13477 | 14082 | ||
13478 | /* Preserve the APE MAC_MODE bits */ | 14083 | /* Preserve the APE MAC_MODE bits */ |
13479 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 14084 | if (tg3_flag(tp, ENABLE_APE)) |
13480 | tp->mac_mode = tr32(MAC_MODE) | | 14085 | tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; |
13481 | MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; | ||
13482 | else | 14086 | else |
13483 | tp->mac_mode = TG3_DEF_MAC_MODE; | 14087 | tp->mac_mode = TG3_DEF_MAC_MODE; |
13484 | 14088 | ||
@@ -13524,9 +14128,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13524 | * status register in those cases. | 14128 | * status register in those cases. |
13525 | */ | 14129 | */ |
13526 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) | 14130 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) |
13527 | tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; | 14131 | tg3_flag_set(tp, USE_LINKCHG_REG); |
13528 | else | 14132 | else |
13529 | tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; | 14133 | tg3_flag_clear(tp, USE_LINKCHG_REG); |
13530 | 14134 | ||
13531 | /* The led_ctrl is set during tg3_phy_probe, here we might | 14135 | /* The led_ctrl is set during tg3_phy_probe, here we might |
13532 | * have to force the link status polling mechanism based | 14136 | * have to force the link status polling mechanism based |
@@ -13536,26 +14140,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13536 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 14140 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && |
13537 | !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { | 14141 | !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { |
13538 | tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; | 14142 | tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; |
13539 | tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; | 14143 | tg3_flag_set(tp, USE_LINKCHG_REG); |
13540 | } | 14144 | } |
13541 | 14145 | ||
13542 | /* For all SERDES we poll the MAC status register. */ | 14146 | /* For all SERDES we poll the MAC status register. */ |
13543 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 14147 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
13544 | tp->tg3_flags |= TG3_FLAG_POLL_SERDES; | 14148 | tg3_flag_set(tp, POLL_SERDES); |
13545 | else | 14149 | else |
13546 | tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; | 14150 | tg3_flag_clear(tp, POLL_SERDES); |
13547 | 14151 | ||
13548 | tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; | 14152 | tp->rx_offset = NET_IP_ALIGN; |
13549 | tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; | 14153 | tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; |
13550 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 14154 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && |
13551 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { | 14155 | tg3_flag(tp, PCIX_MODE)) { |
13552 | tp->rx_offset -= NET_IP_ALIGN; | 14156 | tp->rx_offset = 0; |
13553 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 14157 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
13554 | tp->rx_copy_thresh = ~(u16)0; | 14158 | tp->rx_copy_thresh = ~(u16)0; |
13555 | #endif | 14159 | #endif |
13556 | } | 14160 | } |
13557 | 14161 | ||
13558 | tp->rx_std_max_post = TG3_RX_RING_SIZE; | 14162 | tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; |
14163 | tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; | ||
14164 | tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; | ||
14165 | |||
14166 | tp->rx_std_max_post = tp->rx_std_ring_mask + 1; | ||
13559 | 14167 | ||
13560 | /* Increment the rx prod index on the rx std ring by at most | 14168 | /* Increment the rx prod index on the rx std ring by at most |
13561 | * 8 for these chips to workaround hw errata. | 14169 | * 8 for these chips to workaround hw errata. |
@@ -13565,7 +14173,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13565 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 14173 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
13566 | tp->rx_std_max_post = 8; | 14174 | tp->rx_std_max_post = 8; |
13567 | 14175 | ||
13568 | if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) | 14176 | if (tg3_flag(tp, ASPM_WORKAROUND)) |
13569 | tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & | 14177 | tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & |
13570 | PCIE_PWR_MGMT_L1_THRESH_MSK; | 14178 | PCIE_PWR_MGMT_L1_THRESH_MSK; |
13571 | 14179 | ||
@@ -13612,16 +14220,15 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
13612 | #endif | 14220 | #endif |
13613 | 14221 | ||
13614 | mac_offset = 0x7c; | 14222 | mac_offset = 0x7c; |
13615 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || | 14223 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
13616 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 14224 | tg3_flag(tp, 5780_CLASS)) { |
13617 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) | 14225 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) |
13618 | mac_offset = 0xcc; | 14226 | mac_offset = 0xcc; |
13619 | if (tg3_nvram_lock(tp)) | 14227 | if (tg3_nvram_lock(tp)) |
13620 | tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); | 14228 | tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); |
13621 | else | 14229 | else |
13622 | tg3_nvram_unlock(tp); | 14230 | tg3_nvram_unlock(tp); |
13623 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 14231 | } else if (tg3_flag(tp, 5717_PLUS)) { |
13624 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { | ||
13625 | if (PCI_FUNC(tp->pdev->devfn) & 1) | 14232 | if (PCI_FUNC(tp->pdev->devfn) & 1) |
13626 | mac_offset = 0xcc; | 14233 | mac_offset = 0xcc; |
13627 | if (PCI_FUNC(tp->pdev->devfn) > 1) | 14234 | if (PCI_FUNC(tp->pdev->devfn) > 1) |
@@ -13646,7 +14253,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
13646 | } | 14253 | } |
13647 | if (!addr_ok) { | 14254 | if (!addr_ok) { |
13648 | /* Next, try NVRAM. */ | 14255 | /* Next, try NVRAM. */ |
13649 | if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) && | 14256 | if (!tg3_flag(tp, NO_NVRAM) && |
13650 | !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && | 14257 | !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && |
13651 | !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { | 14258 | !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { |
13652 | memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); | 14259 | memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); |
@@ -13697,7 +14304,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | |||
13697 | */ | 14304 | */ |
13698 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 14305 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
13699 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | 14306 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && |
13700 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 14307 | !tg3_flag(tp, PCI_EXPRESS)) |
13701 | goto out; | 14308 | goto out; |
13702 | 14309 | ||
13703 | #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) | 14310 | #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) |
@@ -13710,7 +14317,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | |||
13710 | #endif | 14317 | #endif |
13711 | #endif | 14318 | #endif |
13712 | 14319 | ||
13713 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { | 14320 | if (tg3_flag(tp, 57765_PLUS)) { |
13714 | val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | 14321 | val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; |
13715 | goto out; | 14322 | goto out; |
13716 | } | 14323 | } |
@@ -13729,8 +14336,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | |||
13729 | * other than 5700 and 5701 which do not implement the | 14336 | * other than 5700 and 5701 which do not implement the |
13730 | * boundary bits. | 14337 | * boundary bits. |
13731 | */ | 14338 | */ |
13732 | if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | 14339 | if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { |
13733 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | ||
13734 | switch (cacheline_size) { | 14340 | switch (cacheline_size) { |
13735 | case 16: | 14341 | case 16: |
13736 | case 32: | 14342 | case 32: |
@@ -13755,7 +14361,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | |||
13755 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); | 14361 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); |
13756 | break; | 14362 | break; |
13757 | } | 14363 | } |
13758 | } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 14364 | } else if (tg3_flag(tp, PCI_EXPRESS)) { |
13759 | switch (cacheline_size) { | 14365 | switch (cacheline_size) { |
13760 | case 16: | 14366 | case 16: |
13761 | case 32: | 14367 | case 32: |
@@ -13904,13 +14510,19 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm | |||
13904 | 14510 | ||
13905 | #define TEST_BUFFER_SIZE 0x2000 | 14511 | #define TEST_BUFFER_SIZE 0x2000 |
13906 | 14512 | ||
14513 | static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { | ||
14514 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, | ||
14515 | { }, | ||
14516 | }; | ||
14517 | |||
13907 | static int __devinit tg3_test_dma(struct tg3 *tp) | 14518 | static int __devinit tg3_test_dma(struct tg3 *tp) |
13908 | { | 14519 | { |
13909 | dma_addr_t buf_dma; | 14520 | dma_addr_t buf_dma; |
13910 | u32 *buf, saved_dma_rwctrl; | 14521 | u32 *buf, saved_dma_rwctrl; |
13911 | int ret = 0; | 14522 | int ret = 0; |
13912 | 14523 | ||
13913 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); | 14524 | buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, |
14525 | &buf_dma, GFP_KERNEL); | ||
13914 | if (!buf) { | 14526 | if (!buf) { |
13915 | ret = -ENOMEM; | 14527 | ret = -ENOMEM; |
13916 | goto out_nofree; | 14528 | goto out_nofree; |
@@ -13921,13 +14533,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13921 | 14533 | ||
13922 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); | 14534 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); |
13923 | 14535 | ||
13924 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) | 14536 | if (tg3_flag(tp, 57765_PLUS)) |
13925 | goto out; | 14537 | goto out; |
13926 | 14538 | ||
13927 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 14539 | if (tg3_flag(tp, PCI_EXPRESS)) { |
13928 | /* DMA read watermark not used on PCIE */ | 14540 | /* DMA read watermark not used on PCIE */ |
13929 | tp->dma_rwctrl |= 0x00180000; | 14541 | tp->dma_rwctrl |= 0x00180000; |
13930 | } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { | 14542 | } else if (!tg3_flag(tp, PCIX_MODE)) { |
13931 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || | 14543 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || |
13932 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) | 14544 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) |
13933 | tp->dma_rwctrl |= 0x003f0000; | 14545 | tp->dma_rwctrl |= 0x003f0000; |
@@ -13943,7 +14555,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13943 | * do the less restrictive ONE_DMA workaround for | 14555 | * do the less restrictive ONE_DMA workaround for |
13944 | * better performance. | 14556 | * better performance. |
13945 | */ | 14557 | */ |
13946 | if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && | 14558 | if (tg3_flag(tp, 40BIT_DMA_BUG) && |
13947 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) | 14559 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) |
13948 | tp->dma_rwctrl |= 0x8000; | 14560 | tp->dma_rwctrl |= 0x8000; |
13949 | else if (ccval == 0x6 || ccval == 0x7) | 14561 | else if (ccval == 0x6 || ccval == 0x7) |
@@ -14072,17 +14684,11 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
14072 | } | 14684 | } |
14073 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != | 14685 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != |
14074 | DMA_RWCTRL_WRITE_BNDRY_16) { | 14686 | DMA_RWCTRL_WRITE_BNDRY_16) { |
14075 | static struct pci_device_id dma_wait_state_chipsets[] = { | ||
14076 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, | ||
14077 | PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, | ||
14078 | { }, | ||
14079 | }; | ||
14080 | |||
14081 | /* DMA test passed without adjusting DMA boundary, | 14687 | /* DMA test passed without adjusting DMA boundary, |
14082 | * now look for chipsets that are known to expose the | 14688 | * now look for chipsets that are known to expose the |
14083 | * DMA bug without failing the test. | 14689 | * DMA bug without failing the test. |
14084 | */ | 14690 | */ |
14085 | if (pci_dev_present(dma_wait_state_chipsets)) { | 14691 | if (pci_dev_present(tg3_dma_wait_state_chipsets)) { |
14086 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | 14692 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; |
14087 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; | 14693 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; |
14088 | } else { | 14694 | } else { |
@@ -14094,31 +14700,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
14094 | } | 14700 | } |
14095 | 14701 | ||
14096 | out: | 14702 | out: |
14097 | pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); | 14703 | dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); |
14098 | out_nofree: | 14704 | out_nofree: |
14099 | return ret; | 14705 | return ret; |
14100 | } | 14706 | } |
14101 | 14707 | ||
14102 | static void __devinit tg3_init_link_config(struct tg3 *tp) | ||
14103 | { | ||
14104 | tp->link_config.advertising = | ||
14105 | (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
14106 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | ||
14107 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | | ||
14108 | ADVERTISED_Autoneg | ADVERTISED_MII); | ||
14109 | tp->link_config.speed = SPEED_INVALID; | ||
14110 | tp->link_config.duplex = DUPLEX_INVALID; | ||
14111 | tp->link_config.autoneg = AUTONEG_ENABLE; | ||
14112 | tp->link_config.active_speed = SPEED_INVALID; | ||
14113 | tp->link_config.active_duplex = DUPLEX_INVALID; | ||
14114 | tp->link_config.orig_speed = SPEED_INVALID; | ||
14115 | tp->link_config.orig_duplex = DUPLEX_INVALID; | ||
14116 | tp->link_config.orig_autoneg = AUTONEG_INVALID; | ||
14117 | } | ||
14118 | |||
14119 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) | 14708 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) |
14120 | { | 14709 | { |
14121 | if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { | 14710 | if (tg3_flag(tp, 57765_PLUS)) { |
14122 | tp->bufmgr_config.mbuf_read_dma_low_water = | 14711 | tp->bufmgr_config.mbuf_read_dma_low_water = |
14123 | DEFAULT_MB_RDMA_LOW_WATER_5705; | 14712 | DEFAULT_MB_RDMA_LOW_WATER_5705; |
14124 | tp->bufmgr_config.mbuf_mac_rx_low_water = | 14713 | tp->bufmgr_config.mbuf_mac_rx_low_water = |
@@ -14132,7 +14721,7 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) | |||
14132 | DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; | 14721 | DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; |
14133 | tp->bufmgr_config.mbuf_high_water_jumbo = | 14722 | tp->bufmgr_config.mbuf_high_water_jumbo = |
14134 | DEFAULT_MB_HIGH_WATER_JUMBO_57765; | 14723 | DEFAULT_MB_HIGH_WATER_JUMBO_57765; |
14135 | } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 14724 | } else if (tg3_flag(tp, 5705_PLUS)) { |
14136 | tp->bufmgr_config.mbuf_read_dma_low_water = | 14725 | tp->bufmgr_config.mbuf_read_dma_low_water = |
14137 | DEFAULT_MB_RDMA_LOW_WATER_5705; | 14726 | DEFAULT_MB_RDMA_LOW_WATER_5705; |
14138 | tp->bufmgr_config.mbuf_mac_rx_low_water = | 14727 | tp->bufmgr_config.mbuf_mac_rx_low_water = |
@@ -14196,6 +14785,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) | |||
14196 | case TG3_PHY_ID_BCM5718S: return "5718S"; | 14785 | case TG3_PHY_ID_BCM5718S: return "5718S"; |
14197 | case TG3_PHY_ID_BCM57765: return "57765"; | 14786 | case TG3_PHY_ID_BCM57765: return "57765"; |
14198 | case TG3_PHY_ID_BCM5719C: return "5719C"; | 14787 | case TG3_PHY_ID_BCM5719C: return "5719C"; |
14788 | case TG3_PHY_ID_BCM5720C: return "5720C"; | ||
14199 | case TG3_PHY_ID_BCM8002: return "8002/serdes"; | 14789 | case TG3_PHY_ID_BCM8002: return "8002/serdes"; |
14200 | case 0: return "serdes"; | 14790 | case 0: return "serdes"; |
14201 | default: return "unknown"; | 14791 | default: return "unknown"; |
@@ -14204,10 +14794,10 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) | |||
14204 | 14794 | ||
14205 | static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) | 14795 | static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) |
14206 | { | 14796 | { |
14207 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 14797 | if (tg3_flag(tp, PCI_EXPRESS)) { |
14208 | strcpy(str, "PCI Express"); | 14798 | strcpy(str, "PCI Express"); |
14209 | return str; | 14799 | return str; |
14210 | } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 14800 | } else if (tg3_flag(tp, PCIX_MODE)) { |
14211 | u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; | 14801 | u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; |
14212 | 14802 | ||
14213 | strcpy(str, "PCIX:"); | 14803 | strcpy(str, "PCIX:"); |
@@ -14226,12 +14816,12 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) | |||
14226 | strcat(str, "100MHz"); | 14816 | strcat(str, "100MHz"); |
14227 | } else { | 14817 | } else { |
14228 | strcpy(str, "PCI:"); | 14818 | strcpy(str, "PCI:"); |
14229 | if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) | 14819 | if (tg3_flag(tp, PCI_HIGH_SPEED)) |
14230 | strcat(str, "66MHz"); | 14820 | strcat(str, "66MHz"); |
14231 | else | 14821 | else |
14232 | strcat(str, "33MHz"); | 14822 | strcat(str, "33MHz"); |
14233 | } | 14823 | } |
14234 | if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) | 14824 | if (tg3_flag(tp, PCI_32BIT)) |
14235 | strcat(str, ":32-bit"); | 14825 | strcat(str, ":32-bit"); |
14236 | else | 14826 | else |
14237 | strcat(str, ":64-bit"); | 14827 | strcat(str, ":64-bit"); |
@@ -14290,7 +14880,7 @@ static void __devinit tg3_init_coal(struct tg3 *tp) | |||
14290 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; | 14880 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; |
14291 | } | 14881 | } |
14292 | 14882 | ||
14293 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 14883 | if (tg3_flag(tp, 5705_PLUS)) { |
14294 | ec->rx_coalesce_usecs_irq = 0; | 14884 | ec->rx_coalesce_usecs_irq = 0; |
14295 | ec->tx_coalesce_usecs_irq = 0; | 14885 | ec->tx_coalesce_usecs_irq = 0; |
14296 | ec->stats_block_coalesce_usecs = 0; | 14886 | ec->stats_block_coalesce_usecs = 0; |
@@ -14308,28 +14898,8 @@ static const struct net_device_ops tg3_netdev_ops = { | |||
14308 | .ndo_do_ioctl = tg3_ioctl, | 14898 | .ndo_do_ioctl = tg3_ioctl, |
14309 | .ndo_tx_timeout = tg3_tx_timeout, | 14899 | .ndo_tx_timeout = tg3_tx_timeout, |
14310 | .ndo_change_mtu = tg3_change_mtu, | 14900 | .ndo_change_mtu = tg3_change_mtu, |
14311 | #if TG3_VLAN_TAG_USED | 14901 | .ndo_fix_features = tg3_fix_features, |
14312 | .ndo_vlan_rx_register = tg3_vlan_rx_register, | 14902 | .ndo_set_features = tg3_set_features, |
14313 | #endif | ||
14314 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
14315 | .ndo_poll_controller = tg3_poll_controller, | ||
14316 | #endif | ||
14317 | }; | ||
14318 | |||
14319 | static const struct net_device_ops tg3_netdev_ops_dma_bug = { | ||
14320 | .ndo_open = tg3_open, | ||
14321 | .ndo_stop = tg3_close, | ||
14322 | .ndo_start_xmit = tg3_start_xmit_dma_bug, | ||
14323 | .ndo_get_stats64 = tg3_get_stats64, | ||
14324 | .ndo_validate_addr = eth_validate_addr, | ||
14325 | .ndo_set_multicast_list = tg3_set_rx_mode, | ||
14326 | .ndo_set_mac_address = tg3_set_mac_addr, | ||
14327 | .ndo_do_ioctl = tg3_ioctl, | ||
14328 | .ndo_tx_timeout = tg3_tx_timeout, | ||
14329 | .ndo_change_mtu = tg3_change_mtu, | ||
14330 | #if TG3_VLAN_TAG_USED | ||
14331 | .ndo_vlan_rx_register = tg3_vlan_rx_register, | ||
14332 | #endif | ||
14333 | #ifdef CONFIG_NET_POLL_CONTROLLER | 14903 | #ifdef CONFIG_NET_POLL_CONTROLLER |
14334 | .ndo_poll_controller = tg3_poll_controller, | 14904 | .ndo_poll_controller = tg3_poll_controller, |
14335 | #endif | 14905 | #endif |
@@ -14344,6 +14914,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14344 | u32 sndmbx, rcvmbx, intmbx; | 14914 | u32 sndmbx, rcvmbx, intmbx; |
14345 | char str[40]; | 14915 | char str[40]; |
14346 | u64 dma_mask, persist_dma_mask; | 14916 | u64 dma_mask, persist_dma_mask; |
14917 | u32 features = 0; | ||
14347 | 14918 | ||
14348 | printk_once(KERN_INFO "%s\n", version); | 14919 | printk_once(KERN_INFO "%s\n", version); |
14349 | 14920 | ||
@@ -14379,10 +14950,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14379 | 14950 | ||
14380 | SET_NETDEV_DEV(dev, &pdev->dev); | 14951 | SET_NETDEV_DEV(dev, &pdev->dev); |
14381 | 14952 | ||
14382 | #if TG3_VLAN_TAG_USED | ||
14383 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
14384 | #endif | ||
14385 | |||
14386 | tp = netdev_priv(dev); | 14953 | tp = netdev_priv(dev); |
14387 | tp->pdev = pdev; | 14954 | tp->pdev = pdev; |
14388 | tp->dev = dev; | 14955 | tp->dev = dev; |
@@ -14427,13 +14994,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14427 | goto err_out_free_dev; | 14994 | goto err_out_free_dev; |
14428 | } | 14995 | } |
14429 | 14996 | ||
14430 | tg3_init_link_config(tp); | ||
14431 | |||
14432 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; | 14997 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; |
14433 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; | 14998 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; |
14434 | 14999 | ||
14435 | dev->ethtool_ops = &tg3_ethtool_ops; | 15000 | dev->ethtool_ops = &tg3_ethtool_ops; |
14436 | dev->watchdog_timeo = TG3_TX_TIMEOUT; | 15001 | dev->watchdog_timeo = TG3_TX_TIMEOUT; |
15002 | dev->netdev_ops = &tg3_netdev_ops; | ||
14437 | dev->irq = pdev->irq; | 15003 | dev->irq = pdev->irq; |
14438 | 15004 | ||
14439 | err = tg3_get_invariants(tp); | 15005 | err = tg3_get_invariants(tp); |
@@ -14443,23 +15009,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14443 | goto err_out_iounmap; | 15009 | goto err_out_iounmap; |
14444 | } | 15010 | } |
14445 | 15011 | ||
14446 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && | ||
14447 | tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 && | ||
14448 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) | ||
14449 | dev->netdev_ops = &tg3_netdev_ops; | ||
14450 | else | ||
14451 | dev->netdev_ops = &tg3_netdev_ops_dma_bug; | ||
14452 | |||
14453 | |||
14454 | /* The EPB bridge inside 5714, 5715, and 5780 and any | 15012 | /* The EPB bridge inside 5714, 5715, and 5780 and any |
14455 | * device behind the EPB cannot support DMA addresses > 40-bit. | 15013 | * device behind the EPB cannot support DMA addresses > 40-bit. |
14456 | * On 64-bit systems with IOMMU, use 40-bit dma_mask. | 15014 | * On 64-bit systems with IOMMU, use 40-bit dma_mask. |
14457 | * On 64-bit systems without IOMMU, use 64-bit dma_mask and | 15015 | * On 64-bit systems without IOMMU, use 64-bit dma_mask and |
14458 | * do DMA address check in tg3_start_xmit(). | 15016 | * do DMA address check in tg3_start_xmit(). |
14459 | */ | 15017 | */ |
14460 | if (tp->tg3_flags2 & TG3_FLG2_IS_5788) | 15018 | if (tg3_flag(tp, IS_5788)) |
14461 | persist_dma_mask = dma_mask = DMA_BIT_MASK(32); | 15019 | persist_dma_mask = dma_mask = DMA_BIT_MASK(32); |
14462 | else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { | 15020 | else if (tg3_flag(tp, 40BIT_DMA_BUG)) { |
14463 | persist_dma_mask = dma_mask = DMA_BIT_MASK(40); | 15021 | persist_dma_mask = dma_mask = DMA_BIT_MASK(40); |
14464 | #ifdef CONFIG_HIGHMEM | 15022 | #ifdef CONFIG_HIGHMEM |
14465 | dma_mask = DMA_BIT_MASK(64); | 15023 | dma_mask = DMA_BIT_MASK(64); |
@@ -14471,7 +15029,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14471 | if (dma_mask > DMA_BIT_MASK(32)) { | 15029 | if (dma_mask > DMA_BIT_MASK(32)) { |
14472 | err = pci_set_dma_mask(pdev, dma_mask); | 15030 | err = pci_set_dma_mask(pdev, dma_mask); |
14473 | if (!err) { | 15031 | if (!err) { |
14474 | dev->features |= NETIF_F_HIGHDMA; | 15032 | features |= NETIF_F_HIGHDMA; |
14475 | err = pci_set_consistent_dma_mask(pdev, | 15033 | err = pci_set_consistent_dma_mask(pdev, |
14476 | persist_dma_mask); | 15034 | persist_dma_mask); |
14477 | if (err < 0) { | 15035 | if (err < 0) { |
@@ -14492,48 +15050,58 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14492 | 15050 | ||
14493 | tg3_init_bufmgr_config(tp); | 15051 | tg3_init_bufmgr_config(tp); |
14494 | 15052 | ||
14495 | /* Selectively allow TSO based on operating conditions */ | 15053 | features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
14496 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || | ||
14497 | (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) | ||
14498 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | ||
14499 | else { | ||
14500 | tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); | ||
14501 | tp->fw_needed = NULL; | ||
14502 | } | ||
14503 | 15054 | ||
14504 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) | 15055 | /* 5700 B0 chips do not support checksumming correctly due |
14505 | tp->fw_needed = FIRMWARE_TG3; | 15056 | * to hardware bugs. |
15057 | */ | ||
15058 | if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { | ||
15059 | features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; | ||
15060 | |||
15061 | if (tg3_flag(tp, 5755_PLUS)) | ||
15062 | features |= NETIF_F_IPV6_CSUM; | ||
15063 | } | ||
14506 | 15064 | ||
14507 | /* TSO is on by default on chips that support hardware TSO. | 15065 | /* TSO is on by default on chips that support hardware TSO. |
14508 | * Firmware TSO on older chips gives lower performance, so it | 15066 | * Firmware TSO on older chips gives lower performance, so it |
14509 | * is off by default, but can be enabled using ethtool. | 15067 | * is off by default, but can be enabled using ethtool. |
14510 | */ | 15068 | */ |
14511 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && | 15069 | if ((tg3_flag(tp, HW_TSO_1) || |
14512 | (dev->features & NETIF_F_IP_CSUM)) { | 15070 | tg3_flag(tp, HW_TSO_2) || |
14513 | dev->features |= NETIF_F_TSO; | 15071 | tg3_flag(tp, HW_TSO_3)) && |
14514 | vlan_features_add(dev, NETIF_F_TSO); | 15072 | (features & NETIF_F_IP_CSUM)) |
14515 | } | 15073 | features |= NETIF_F_TSO; |
14516 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || | 15074 | if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { |
14517 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { | 15075 | if (features & NETIF_F_IPV6_CSUM) |
14518 | if (dev->features & NETIF_F_IPV6_CSUM) { | 15076 | features |= NETIF_F_TSO6; |
14519 | dev->features |= NETIF_F_TSO6; | 15077 | if (tg3_flag(tp, HW_TSO_3) || |
14520 | vlan_features_add(dev, NETIF_F_TSO6); | ||
14521 | } | ||
14522 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || | ||
14523 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 15078 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
14524 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 15079 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && |
14525 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | 15080 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || |
14526 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 15081 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
14527 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { | 15082 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
14528 | dev->features |= NETIF_F_TSO_ECN; | 15083 | features |= NETIF_F_TSO_ECN; |
14529 | vlan_features_add(dev, NETIF_F_TSO_ECN); | ||
14530 | } | ||
14531 | } | 15084 | } |
14532 | 15085 | ||
15086 | dev->features |= features; | ||
15087 | dev->vlan_features |= features; | ||
15088 | |||
15089 | /* | ||
15090 | * Add loopback capability only for a subset of devices that support | ||
15091 | * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY | ||
15092 | * loopback for the remaining devices. | ||
15093 | */ | ||
15094 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && | ||
15095 | !tg3_flag(tp, CPMU_PRESENT)) | ||
15096 | /* Add the loopback capability */ | ||
15097 | features |= NETIF_F_LOOPBACK; | ||
15098 | |||
15099 | dev->hw_features |= features; | ||
15100 | |||
14533 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && | 15101 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && |
14534 | !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && | 15102 | !tg3_flag(tp, TSO_CAPABLE) && |
14535 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { | 15103 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { |
14536 | tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; | 15104 | tg3_flag_set(tp, MAX_RXPEND_64); |
14537 | tp->rx_pending = 63; | 15105 | tp->rx_pending = 63; |
14538 | } | 15106 | } |
14539 | 15107 | ||
@@ -14544,7 +15112,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14544 | goto err_out_iounmap; | 15112 | goto err_out_iounmap; |
14545 | } | 15113 | } |
14546 | 15114 | ||
14547 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 15115 | if (tg3_flag(tp, ENABLE_APE)) { |
14548 | tp->aperegs = pci_ioremap_bar(pdev, BAR_2); | 15116 | tp->aperegs = pci_ioremap_bar(pdev, BAR_2); |
14549 | if (!tp->aperegs) { | 15117 | if (!tp->aperegs) { |
14550 | dev_err(&pdev->dev, | 15118 | dev_err(&pdev->dev, |
@@ -14555,7 +15123,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14555 | 15123 | ||
14556 | tg3_ape_lock_init(tp); | 15124 | tg3_ape_lock_init(tp); |
14557 | 15125 | ||
14558 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) | 15126 | if (tg3_flag(tp, ENABLE_ASF)) |
14559 | tg3_read_dash_ver(tp); | 15127 | tg3_read_dash_ver(tp); |
14560 | } | 15128 | } |
14561 | 15129 | ||
@@ -14576,14 +15144,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14576 | goto err_out_apeunmap; | 15144 | goto err_out_apeunmap; |
14577 | } | 15145 | } |
14578 | 15146 | ||
14579 | /* flow control autonegotiation is default behavior */ | ||
14580 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | ||
14581 | tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; | ||
14582 | |||
14583 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; | 15147 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; |
14584 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; | 15148 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; |
14585 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; | 15149 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; |
14586 | for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { | 15150 | for (i = 0; i < tp->irq_max; i++) { |
14587 | struct tg3_napi *tnapi = &tp->napi[i]; | 15151 | struct tg3_napi *tnapi = &tp->napi[i]; |
14588 | 15152 | ||
14589 | tnapi->tp = tp; | 15153 | tnapi->tp = tp; |
@@ -14598,15 +15162,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14598 | tnapi->consmbox = rcvmbx; | 15162 | tnapi->consmbox = rcvmbx; |
14599 | tnapi->prodmbox = sndmbx; | 15163 | tnapi->prodmbox = sndmbx; |
14600 | 15164 | ||
14601 | if (i) { | 15165 | if (i) |
14602 | tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); | 15166 | tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); |
14603 | netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); | 15167 | else |
14604 | } else { | ||
14605 | tnapi->coal_now = HOSTCC_MODE_NOW; | 15168 | tnapi->coal_now = HOSTCC_MODE_NOW; |
14606 | netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); | ||
14607 | } | ||
14608 | 15169 | ||
14609 | if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) | 15170 | if (!tg3_flag(tp, SUPPORT_MSIX)) |
14610 | break; | 15171 | break; |
14611 | 15172 | ||
14612 | /* | 15173 | /* |
@@ -14660,21 +15221,25 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14660 | ethtype = "10/100/1000Base-T"; | 15221 | ethtype = "10/100/1000Base-T"; |
14661 | 15222 | ||
14662 | netdev_info(dev, "attached PHY is %s (%s Ethernet) " | 15223 | netdev_info(dev, "attached PHY is %s (%s Ethernet) " |
14663 | "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype, | 15224 | "(WireSpeed[%d], EEE[%d])\n", |
14664 | (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0); | 15225 | tg3_phy_string(tp), ethtype, |
15226 | (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, | ||
15227 | (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); | ||
14665 | } | 15228 | } |
14666 | 15229 | ||
14667 | netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", | 15230 | netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", |
14668 | (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, | 15231 | (dev->features & NETIF_F_RXCSUM) != 0, |
14669 | (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, | 15232 | tg3_flag(tp, USE_LINKCHG_REG) != 0, |
14670 | (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, | 15233 | (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, |
14671 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, | 15234 | tg3_flag(tp, ENABLE_ASF) != 0, |
14672 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); | 15235 | tg3_flag(tp, TSO_CAPABLE) != 0); |
14673 | netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", | 15236 | netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", |
14674 | tp->dma_rwctrl, | 15237 | tp->dma_rwctrl, |
14675 | pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : | 15238 | pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : |
14676 | ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); | 15239 | ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); |
14677 | 15240 | ||
15241 | pci_save_state(pdev); | ||
15242 | |||
14678 | return 0; | 15243 | return 0; |
14679 | 15244 | ||
14680 | err_out_apeunmap: | 15245 | err_out_apeunmap: |
@@ -14711,9 +15276,9 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) | |||
14711 | if (tp->fw) | 15276 | if (tp->fw) |
14712 | release_firmware(tp->fw); | 15277 | release_firmware(tp->fw); |
14713 | 15278 | ||
14714 | flush_scheduled_work(); | 15279 | cancel_work_sync(&tp->reset_task); |
14715 | 15280 | ||
14716 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 15281 | if (!tg3_flag(tp, USE_PHYLIB)) { |
14717 | tg3_phy_fini(tp); | 15282 | tg3_phy_fini(tp); |
14718 | tg3_mdio_fini(tp); | 15283 | tg3_mdio_fini(tp); |
14719 | } | 15284 | } |
@@ -14734,23 +15299,18 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) | |||
14734 | } | 15299 | } |
14735 | } | 15300 | } |
14736 | 15301 | ||
14737 | static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | 15302 | #ifdef CONFIG_PM_SLEEP |
15303 | static int tg3_suspend(struct device *device) | ||
14738 | { | 15304 | { |
15305 | struct pci_dev *pdev = to_pci_dev(device); | ||
14739 | struct net_device *dev = pci_get_drvdata(pdev); | 15306 | struct net_device *dev = pci_get_drvdata(pdev); |
14740 | struct tg3 *tp = netdev_priv(dev); | 15307 | struct tg3 *tp = netdev_priv(dev); |
14741 | pci_power_t target_state; | ||
14742 | int err; | 15308 | int err; |
14743 | 15309 | ||
14744 | /* PCI register 4 needs to be saved whether netif_running() or not. | ||
14745 | * MSI address and data need to be saved if using MSI and | ||
14746 | * netif_running(). | ||
14747 | */ | ||
14748 | pci_save_state(pdev); | ||
14749 | |||
14750 | if (!netif_running(dev)) | 15310 | if (!netif_running(dev)) |
14751 | return 0; | 15311 | return 0; |
14752 | 15312 | ||
14753 | flush_scheduled_work(); | 15313 | flush_work_sync(&tp->reset_task); |
14754 | tg3_phy_stop(tp); | 15314 | tg3_phy_stop(tp); |
14755 | tg3_netif_stop(tp); | 15315 | tg3_netif_stop(tp); |
14756 | 15316 | ||
@@ -14764,18 +15324,16 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
14764 | 15324 | ||
14765 | tg3_full_lock(tp, 0); | 15325 | tg3_full_lock(tp, 0); |
14766 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 15326 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
14767 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 15327 | tg3_flag_clear(tp, INIT_COMPLETE); |
14768 | tg3_full_unlock(tp); | 15328 | tg3_full_unlock(tp); |
14769 | 15329 | ||
14770 | target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot; | 15330 | err = tg3_power_down_prepare(tp); |
14771 | |||
14772 | err = tg3_set_power_state(tp, target_state); | ||
14773 | if (err) { | 15331 | if (err) { |
14774 | int err2; | 15332 | int err2; |
14775 | 15333 | ||
14776 | tg3_full_lock(tp, 0); | 15334 | tg3_full_lock(tp, 0); |
14777 | 15335 | ||
14778 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 15336 | tg3_flag_set(tp, INIT_COMPLETE); |
14779 | err2 = tg3_restart_hw(tp, 1); | 15337 | err2 = tg3_restart_hw(tp, 1); |
14780 | if (err2) | 15338 | if (err2) |
14781 | goto out; | 15339 | goto out; |
@@ -14796,26 +15354,21 @@ out: | |||
14796 | return err; | 15354 | return err; |
14797 | } | 15355 | } |
14798 | 15356 | ||
14799 | static int tg3_resume(struct pci_dev *pdev) | 15357 | static int tg3_resume(struct device *device) |
14800 | { | 15358 | { |
15359 | struct pci_dev *pdev = to_pci_dev(device); | ||
14801 | struct net_device *dev = pci_get_drvdata(pdev); | 15360 | struct net_device *dev = pci_get_drvdata(pdev); |
14802 | struct tg3 *tp = netdev_priv(dev); | 15361 | struct tg3 *tp = netdev_priv(dev); |
14803 | int err; | 15362 | int err; |
14804 | 15363 | ||
14805 | pci_restore_state(tp->pdev); | ||
14806 | |||
14807 | if (!netif_running(dev)) | 15364 | if (!netif_running(dev)) |
14808 | return 0; | 15365 | return 0; |
14809 | 15366 | ||
14810 | err = tg3_set_power_state(tp, PCI_D0); | ||
14811 | if (err) | ||
14812 | return err; | ||
14813 | |||
14814 | netif_device_attach(dev); | 15367 | netif_device_attach(dev); |
14815 | 15368 | ||
14816 | tg3_full_lock(tp, 0); | 15369 | tg3_full_lock(tp, 0); |
14817 | 15370 | ||
14818 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 15371 | tg3_flag_set(tp, INIT_COMPLETE); |
14819 | err = tg3_restart_hw(tp, 1); | 15372 | err = tg3_restart_hw(tp, 1); |
14820 | if (err) | 15373 | if (err) |
14821 | goto out; | 15374 | goto out; |
@@ -14834,13 +15387,166 @@ out: | |||
14834 | return err; | 15387 | return err; |
14835 | } | 15388 | } |
14836 | 15389 | ||
15390 | static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); | ||
15391 | #define TG3_PM_OPS (&tg3_pm_ops) | ||
15392 | |||
15393 | #else | ||
15394 | |||
15395 | #define TG3_PM_OPS NULL | ||
15396 | |||
15397 | #endif /* CONFIG_PM_SLEEP */ | ||
15398 | |||
15399 | /** | ||
15400 | * tg3_io_error_detected - called when PCI error is detected | ||
15401 | * @pdev: Pointer to PCI device | ||
15402 | * @state: The current pci connection state | ||
15403 | * | ||
15404 | * This function is called after a PCI bus error affecting | ||
15405 | * this device has been detected. | ||
15406 | */ | ||
15407 | static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | ||
15408 | pci_channel_state_t state) | ||
15409 | { | ||
15410 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
15411 | struct tg3 *tp = netdev_priv(netdev); | ||
15412 | pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; | ||
15413 | |||
15414 | netdev_info(netdev, "PCI I/O error detected\n"); | ||
15415 | |||
15416 | rtnl_lock(); | ||
15417 | |||
15418 | if (!netif_running(netdev)) | ||
15419 | goto done; | ||
15420 | |||
15421 | tg3_phy_stop(tp); | ||
15422 | |||
15423 | tg3_netif_stop(tp); | ||
15424 | |||
15425 | del_timer_sync(&tp->timer); | ||
15426 | tg3_flag_clear(tp, RESTART_TIMER); | ||
15427 | |||
15428 | /* Want to make sure that the reset task doesn't run */ | ||
15429 | cancel_work_sync(&tp->reset_task); | ||
15430 | tg3_flag_clear(tp, TX_RECOVERY_PENDING); | ||
15431 | tg3_flag_clear(tp, RESTART_TIMER); | ||
15432 | |||
15433 | netif_device_detach(netdev); | ||
15434 | |||
15435 | /* Clean up software state, even if MMIO is blocked */ | ||
15436 | tg3_full_lock(tp, 0); | ||
15437 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | ||
15438 | tg3_full_unlock(tp); | ||
15439 | |||
15440 | done: | ||
15441 | if (state == pci_channel_io_perm_failure) | ||
15442 | err = PCI_ERS_RESULT_DISCONNECT; | ||
15443 | else | ||
15444 | pci_disable_device(pdev); | ||
15445 | |||
15446 | rtnl_unlock(); | ||
15447 | |||
15448 | return err; | ||
15449 | } | ||
15450 | |||
15451 | /** | ||
15452 | * tg3_io_slot_reset - called after the pci bus has been reset. | ||
15453 | * @pdev: Pointer to PCI device | ||
15454 | * | ||
15455 | * Restart the card from scratch, as if from a cold-boot. | ||
15456 | * At this point, the card has exprienced a hard reset, | ||
15457 | * followed by fixups by BIOS, and has its config space | ||
15458 | * set up identically to what it was at cold boot. | ||
15459 | */ | ||
15460 | static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | ||
15461 | { | ||
15462 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
15463 | struct tg3 *tp = netdev_priv(netdev); | ||
15464 | pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; | ||
15465 | int err; | ||
15466 | |||
15467 | rtnl_lock(); | ||
15468 | |||
15469 | if (pci_enable_device(pdev)) { | ||
15470 | netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); | ||
15471 | goto done; | ||
15472 | } | ||
15473 | |||
15474 | pci_set_master(pdev); | ||
15475 | pci_restore_state(pdev); | ||
15476 | pci_save_state(pdev); | ||
15477 | |||
15478 | if (!netif_running(netdev)) { | ||
15479 | rc = PCI_ERS_RESULT_RECOVERED; | ||
15480 | goto done; | ||
15481 | } | ||
15482 | |||
15483 | err = tg3_power_up(tp); | ||
15484 | if (err) { | ||
15485 | netdev_err(netdev, "Failed to restore register access.\n"); | ||
15486 | goto done; | ||
15487 | } | ||
15488 | |||
15489 | rc = PCI_ERS_RESULT_RECOVERED; | ||
15490 | |||
15491 | done: | ||
15492 | rtnl_unlock(); | ||
15493 | |||
15494 | return rc; | ||
15495 | } | ||
15496 | |||
15497 | /** | ||
15498 | * tg3_io_resume - called when traffic can start flowing again. | ||
15499 | * @pdev: Pointer to PCI device | ||
15500 | * | ||
15501 | * This callback is called when the error recovery driver tells | ||
15502 | * us that its OK to resume normal operation. | ||
15503 | */ | ||
15504 | static void tg3_io_resume(struct pci_dev *pdev) | ||
15505 | { | ||
15506 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
15507 | struct tg3 *tp = netdev_priv(netdev); | ||
15508 | int err; | ||
15509 | |||
15510 | rtnl_lock(); | ||
15511 | |||
15512 | if (!netif_running(netdev)) | ||
15513 | goto done; | ||
15514 | |||
15515 | tg3_full_lock(tp, 0); | ||
15516 | tg3_flag_set(tp, INIT_COMPLETE); | ||
15517 | err = tg3_restart_hw(tp, 1); | ||
15518 | tg3_full_unlock(tp); | ||
15519 | if (err) { | ||
15520 | netdev_err(netdev, "Cannot restart hardware after reset.\n"); | ||
15521 | goto done; | ||
15522 | } | ||
15523 | |||
15524 | netif_device_attach(netdev); | ||
15525 | |||
15526 | tp->timer.expires = jiffies + tp->timer_offset; | ||
15527 | add_timer(&tp->timer); | ||
15528 | |||
15529 | tg3_netif_start(tp); | ||
15530 | |||
15531 | tg3_phy_start(tp); | ||
15532 | |||
15533 | done: | ||
15534 | rtnl_unlock(); | ||
15535 | } | ||
15536 | |||
15537 | static struct pci_error_handlers tg3_err_handler = { | ||
15538 | .error_detected = tg3_io_error_detected, | ||
15539 | .slot_reset = tg3_io_slot_reset, | ||
15540 | .resume = tg3_io_resume | ||
15541 | }; | ||
15542 | |||
14837 | static struct pci_driver tg3_driver = { | 15543 | static struct pci_driver tg3_driver = { |
14838 | .name = DRV_MODULE_NAME, | 15544 | .name = DRV_MODULE_NAME, |
14839 | .id_table = tg3_pci_tbl, | 15545 | .id_table = tg3_pci_tbl, |
14840 | .probe = tg3_init_one, | 15546 | .probe = tg3_init_one, |
14841 | .remove = __devexit_p(tg3_remove_one), | 15547 | .remove = __devexit_p(tg3_remove_one), |
14842 | .suspend = tg3_suspend, | 15548 | .err_handler = &tg3_err_handler, |
14843 | .resume = tg3_resume | 15549 | .driver.pm = TG3_PM_OPS, |
14844 | }; | 15550 | }; |
14845 | 15551 | ||
14846 | static int __init tg3_init(void) | 15552 | static int __init tg3_init(void) |