aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2005-09-14 19:06:14 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-16 02:48:03 -0400
commit793b883ed12a6ae6e2901ddb5e038b77d6f0c0ac (patch)
treed485606a0a7f2b70c9ee0d118fbdedf589f89a2c /drivers
parentd7f6884ae0ae6e406ec3500fcde16e8f51642460 (diff)
[PATCH] sky2: driver update.
Here is revised patch against netdev sky2 branch. It includes whitespace fixes, all the changes from the previous review as well as some optimizations and timing fixes to solve some of the hangs. The stall problem is better but not perfect. It appears that under stress the chip can't keep up with the bus and sends a pause frame, then hangs. This version is for testing, and hopefully other eyes might see the root cause of the problem. I don't want to reinvent the ugly watchdog code in the syskonnect version of sk98lin. If you read it you will see, the original driver writer and the hardware developer obviously didn't understand each other. Dual port support is included, but not tested yet. It did require small change to NAPI since both ports share same IRQ. Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/sky2.c1311
-rw-r--r--drivers/net/sky2.h112
2 files changed, 803 insertions, 620 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index cfb2b41bc6b2..bc95aacab20f 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -15,7 +15,7 @@
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
@@ -26,14 +26,17 @@
26/* 26/*
27 * TODO 27 * TODO
28 * - coalescing setting? 28 * - coalescing setting?
29 * - variable ring size? 29 * - vlan support
30 * 30 *
31 * TOTEST 31 * TOTEST
32 * - variable ring size
32 * - speed setting 33 * - speed setting
33 * - power management 34 * - power management
35 * - netpoll
34 */ 36 */
35 37
36#include <linux/config.h> 38#include <linux/config.h>
39#include <linux/crc32.h>
37#include <linux/kernel.h> 40#include <linux/kernel.h>
38#include <linux/version.h> 41#include <linux/version.h>
39#include <linux/module.h> 42#include <linux/module.h>
@@ -45,14 +48,13 @@
45#include <linux/tcp.h> 48#include <linux/tcp.h>
46#include <linux/in.h> 49#include <linux/in.h>
47#include <linux/delay.h> 50#include <linux/delay.h>
48#include <linux/crc32.h>
49 51
50#include <asm/irq.h> 52#include <asm/irq.h>
51 53
52#include "sky2.h" 54#include "sky2.h"
53 55
54#define DRV_NAME "sky2" 56#define DRV_NAME "sky2"
55#define DRV_VERSION "0.2" 57#define DRV_VERSION "0.4"
56#define PFX DRV_NAME " " 58#define PFX DRV_NAME " "
57 59
58/* 60/*
@@ -70,13 +72,18 @@
70#define is_ec_a1(hw) 0 72#define is_ec_a1(hw) 0
71#endif 73#endif
72 74
73#define RX_LE_SIZE 256 75#define RX_LE_SIZE 256
74#define MIN_RX_BUFFERS 8
75#define MAX_RX_BUFFERS 124
76#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) 76#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
77#define RX_MAX_PENDING (RX_LE_SIZE/2 - 1)
78#define RX_DEF_PENDING 128
79#define RX_COPY_THRESHOLD 128
80
81#define TX_RING_SIZE 512
82#define TX_DEF_PENDING (TX_RING_SIZE - 1)
83#define TX_MIN_PENDING 64
84#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS)
77 85
78#define TX_RING_SIZE 256 // min 64 max 4096 86#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
79#define STATUS_RING_SIZE 1024 // pow2 > (2*Rx + Tx)
80#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) 87#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
81#define ETH_JUMBO_MTU 9000 88#define ETH_JUMBO_MTU 9000
82#define TX_WATCHDOG (5 * HZ) 89#define TX_WATCHDOG (5 * HZ)
@@ -84,15 +91,16 @@
84#define PHY_RETRIES 1000 91#define PHY_RETRIES 1000
85 92
86static const u32 default_msg = 93static const u32 default_msg =
87 NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK 94 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
88 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 95 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
89 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN; 96 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
90 97
91static int debug = -1; /* defaults above */ 98static int debug = -1; /* defaults above */
92module_param(debug, int, 0); 99module_param(debug, int, 0);
93MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 100MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
94 101
95static const struct pci_device_id sky2_id_table[] = { 102static const struct pci_device_id sky2_id_table[] = {
103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
96 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
97 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, 105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
98 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
@@ -111,34 +119,24 @@ static const struct pci_device_id sky2_id_table[] = {
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, 119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
112 { 0 } 120 { 0 }
113}; 121};
122
114MODULE_DEVICE_TABLE(pci, sky2_id_table); 123MODULE_DEVICE_TABLE(pci, sky2_id_table);
115 124
116/* Avoid conditionals by using array */ 125/* Avoid conditionals by using array */
117static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; 126static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
118static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; 127static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
119 128
120static inline const char *chip_name(u8 chip_id) 129static const char *yukon_name[] = {
121{ 130 [CHIP_ID_YUKON_LITE - CHIP_ID_YUKON] = "Lite", /* 0xb0 */
122 switch (chip_id) { 131 [CHIP_ID_YUKON_LP - CHIP_ID_YUKON] = "LP", /* 0xb2 */
123 case CHIP_ID_GENESIS: 132 [CHIP_ID_YUKON_XL - CHIP_ID_YUKON] = "XL", /* 0xb3 */
124 return "Genesis";
125 case CHIP_ID_YUKON:
126 return "Yukon";
127 case CHIP_ID_YUKON_LITE:
128 return "Yukon-Lite";
129 case CHIP_ID_YUKON_LP:
130 return "Yukon-LP";
131 case CHIP_ID_YUKON_XL:
132 return "Yukon-XL";
133 case CHIP_ID_YUKON_EC:
134 return "Yukon-EC";
135 case CHIP_ID_YUKON_FE:
136 return "Yukon-FE";
137 default:
138 return "???";
139 }
140}
141 133
134 [CHIP_ID_YUKON_EC - CHIP_ID_YUKON] = "EC", /* 0xb6 */
135 [CHIP_ID_YUKON_FE - CHIP_ID_YUKON] = "FE", /* 0xb7 */
136};
137
138
139/* Access to external PHY */
142static void gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) 140static void gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
143{ 141{
144 int i; 142 int i;
@@ -148,30 +146,28 @@ static void gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
148 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); 146 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
149 147
150 for (i = 0; i < PHY_RETRIES; i++) { 148 for (i = 0; i < PHY_RETRIES; i++) {
151 udelay(1);
152
153 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) 149 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
154 break; 150 return;
151 udelay(1);
155 } 152 }
153 printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name);
156} 154}
157 155
158static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) 156static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
159{ 157{
160 int i; 158 int i;
161 159
162 gma_write16(hw, port, GM_SMI_CTRL, 160 gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
163 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
164 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 161 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
165 162
166 for (i = 0; i < PHY_RETRIES; i++) { 163 for (i = 0; i < PHY_RETRIES; i++) {
167 udelay(1);
168 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) 164 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
169 goto ready; 165 goto ready;
166 udelay(1);
170 } 167 }
171 168
172 printk(KERN_WARNING PFX "%s: phy read timeout\n", 169 printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name);
173 hw->dev[port]->name); 170ready:
174 ready:
175 return gma_read16(hw, port, GM_SMI_DATA); 171 return gma_read16(hw, port, GM_SMI_DATA);
176} 172}
177 173
@@ -183,6 +179,7 @@ static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
183 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 179 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
184 /* disable PHY IRQs */ 180 /* disable PHY IRQs */
185 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 181 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
182
186 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 183 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
187 gma_write16(hw, port, GM_MC_ADDR_H2, 0); 184 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
188 gma_write16(hw, port, GM_MC_ADDR_H3, 0); 185 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
@@ -196,21 +193,13 @@ static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
196static void sky2_phy_init(struct sky2_hw *hw, unsigned port) 193static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
197{ 194{
198 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 195 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
199 u16 ctrl, ct1000, adv; 196 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
200 u16 ledctrl, ledover;
201
202 pr_debug("phy reset autoneg=%s advertising=0x%x pause rx=%s tx=%s\n",
203 sky2->autoneg == AUTONEG_ENABLE ? "enable" : "disable",
204 sky2->advertising,
205 sky2->rx_pause ? "on" : "off",
206 sky2->tx_pause ? "on" : "off");
207 197
208 if (sky2->autoneg == AUTONEG_ENABLE && 198 if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) {
209 hw->chip_id != CHIP_ID_YUKON_XL) {
210 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 199 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
211 200
212 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 201 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
213 PHY_M_EC_MAC_S_MSK); 202 PHY_M_EC_MAC_S_MSK);
214 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 203 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
215 204
216 if (hw->chip_id == CHIP_ID_YUKON_EC) 205 if (hw->chip_id == CHIP_ID_YUKON_EC)
@@ -258,9 +247,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
258 /* select page 1 to access Fiber registers */ 247 /* select page 1 to access Fiber registers */
259 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); 248 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
260 } 249 }
261
262 ctrl &= ~(PHY_M_PC_MDIX_MSK | PHY_M_MAC_MD_MSK);
263 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
264 } 250 }
265 251
266 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); 252 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
@@ -290,14 +276,14 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
290 adv |= PHY_M_AN_10_FD; 276 adv |= PHY_M_AN_10_FD;
291 if (sky2->advertising & ADVERTISED_10baseT_Half) 277 if (sky2->advertising & ADVERTISED_10baseT_Half)
292 adv |= PHY_M_AN_10_HD; 278 adv |= PHY_M_AN_10_HD;
293 } else /* special defines for FIBER (88E1011S only) */ 279 } else /* special defines for FIBER (88E1011S only) */
294 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; 280 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
295 281
296 /* Set Flow-control capabilities */ 282 /* Set Flow-control capabilities */
297 if (sky2->tx_pause && sky2->rx_pause) 283 if (sky2->tx_pause && sky2->rx_pause)
298 adv |= PHY_AN_PAUSE_CAP; /* symmetric */ 284 adv |= PHY_AN_PAUSE_CAP; /* symmetric */
299 else if (sky2->rx_pause && !sky2->tx_pause) 285 else if (sky2->rx_pause && !sky2->tx_pause)
300 adv |= PHY_AN_PAUSE_ASYM|PHY_AN_PAUSE_CAP; 286 adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP;
301 else if (!sky2->rx_pause && sky2->tx_pause) 287 else if (!sky2->rx_pause && sky2->tx_pause)
302 adv |= PHY_AN_PAUSE_ASYM; /* local */ 288 adv |= PHY_AN_PAUSE_ASYM; /* local */
303 289
@@ -347,26 +333,28 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
347 break; 333 break;
348 334
349 case CHIP_ID_YUKON_XL: 335 case CHIP_ID_YUKON_XL:
350 ctrl = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 336 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
351 337
352 /* select page 3 to access LED control register */ 338 /* select page 3 to access LED control register */
353 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 339 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
354 340
355 /* set LED Function Control register */ 341 /* set LED Function Control register */
356 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, 342 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
357 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 343 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
358 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ 344 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
359 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ 345 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
360 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
361 346
362 /* set Polarity Control register */ 347 /* set Polarity Control register */
363 gm_phy_write(hw, port, PHY_MARV_PHY_STAT, 348 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
364 (PHY_M_POLC_LS1_P_MIX(4) | PHY_M_POLC_IS0_P_MIX(4) | 349 (PHY_M_POLC_LS1_P_MIX(4) |
365 PHY_M_POLC_LOS_CTRL(2) | PHY_M_POLC_INIT_CTRL(2) | 350 PHY_M_POLC_IS0_P_MIX(4) |
366 PHY_M_POLC_STA1_CTRL(2) | PHY_M_POLC_STA0_CTRL(2))); 351 PHY_M_POLC_LOS_CTRL(2) |
352 PHY_M_POLC_INIT_CTRL(2) |
353 PHY_M_POLC_STA1_CTRL(2) |
354 PHY_M_POLC_STA0_CTRL(2)));
367 355
368 /* restore page register */ 356 /* restore page register */
369 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, ctrl); 357 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
370 break; 358 break;
371 359
372 default: 360 default:
@@ -405,8 +393,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
405 393
406 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 394 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
407 395
408 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 396 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
409 && port == 1) {
410 /* WA DEV_472 -- looks like crossed wires on port 2 */ 397 /* WA DEV_472 -- looks like crossed wires on port 2 */
411 /* clear GMAC 1 Control reset */ 398 /* clear GMAC 1 Control reset */
412 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); 399 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
@@ -418,14 +405,12 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
418 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); 405 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
419 } 406 }
420 407
421
422 if (sky2->autoneg == AUTONEG_DISABLE) { 408 if (sky2->autoneg == AUTONEG_DISABLE) {
423 reg = gma_read16(hw, port, GM_GP_CTRL); 409 reg = gma_read16(hw, port, GM_GP_CTRL);
424 reg |= GM_GPCR_AU_ALL_DIS; 410 reg |= GM_GPCR_AU_ALL_DIS;
425 gma_write16(hw, port, GM_GP_CTRL, reg); 411 gma_write16(hw, port, GM_GP_CTRL, reg);
426 gma_read16(hw, port, GM_GP_CTRL); 412 gma_read16(hw, port, GM_GP_CTRL);
427 413
428
429 switch (sky2->speed) { 414 switch (sky2->speed) {
430 case SPEED_1000: 415 case SPEED_1000:
431 reg |= GM_GPCR_SPEED_1000; 416 reg |= GM_GPCR_SPEED_1000;
@@ -441,15 +426,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
441 426
442 if (!sky2->tx_pause && !sky2->rx_pause) { 427 if (!sky2->tx_pause && !sky2->rx_pause) {
443 sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 428 sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
444 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 429 reg |=
445 } else if (sky2->tx_pause &&!sky2->rx_pause) { 430 GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
431 } else if (sky2->tx_pause && !sky2->rx_pause) {
446 /* disable Rx flow-control */ 432 /* disable Rx flow-control */
447 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 433 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
448 } 434 }
449 435
450 gma_write16(hw, port, GM_GP_CTRL, reg); 436 gma_write16(hw, port, GM_GP_CTRL, reg);
451 437
452 sky2_read16(hw, GMAC_IRQ_SRC); 438 sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
453 439
454 spin_lock_bh(&hw->phy_lock); 440 spin_lock_bh(&hw->phy_lock);
455 sky2_phy_init(hw, port); 441 sky2_phy_init(hw, port);
@@ -460,7 +446,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
460 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 446 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
461 447
462 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 448 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
463 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); 449 gma_read16(hw, port, GM_MIB_CNT_BASE + 8 * i);
464 gma_write16(hw, port, GM_PHY_ADDR, reg); 450 gma_write16(hw, port, GM_PHY_ADDR, reg);
465 451
466 /* transmit control */ 452 /* transmit control */
@@ -468,7 +454,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
468 454
469 /* receive control reg: unicast + multicast + no FCS */ 455 /* receive control reg: unicast + multicast + no FCS */
470 gma_write16(hw, port, GM_RX_CTRL, 456 gma_write16(hw, port, GM_RX_CTRL,
471 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 457 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
472 458
473 /* transmit flow control */ 459 /* transmit flow control */
474 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 460 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
@@ -482,44 +468,43 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
482 468
483 /* serial mode register */ 469 /* serial mode register */
484 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | 470 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
485 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 471 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
486 472
487 if (hw->dev[port]->mtu > 1500) 473 if (hw->dev[port]->mtu > 1500)
488 reg |= GM_SMOD_JUMBO_ENA; 474 reg |= GM_SMOD_JUMBO_ENA;
489 475
490 gma_write16(hw, port, GM_SERIAL_MODE, reg); 476 gma_write16(hw, port, GM_SERIAL_MODE, reg);
491 477
492 /* physical address: used for pause frames */
493 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
494 /* virtual address for data */ 478 /* virtual address for data */
495 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 479 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
496 480
497 /* enable interrupt mask for counter overflows */ 481 /* physical address: used for pause frames */
482 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
483
484 /* ignore counter overflows */
498 gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 485 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
499 gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 486 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
500 gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 487 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
501 488
502 /* Configure Rx MAC FIFO */ 489 /* Configure Rx MAC FIFO */
503 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 490 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
504 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T), 491 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T),
505 GMF_OPER_ON | GMF_RX_F_FL_ON); 492 GMF_OPER_ON | GMF_RX_F_FL_ON);
506 493
507 reg = RX_FF_FL_DEF_MSK; 494 /* Flush Rx MAC FIFO on any flowcontrol or error */
495 reg = GMR_FS_ANY_ERR;
508 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev <= 1) 496 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev <= 1)
509 reg = 0; /* WA Dev #4115 */ 497 reg = 0; /* WA Dev #4115 */
510 498
511 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), reg); 499 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), reg);
512 /* Set threshold to 0xa (64 bytes) 500 /* Set threshold to 0xa (64 bytes)
513 * ASF disabled so no need to do WA dev #4.30 501 * ASF disabled so no need to do WA dev #4.30
514 */ 502 */
515 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); 503 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
516 504
517 /* Configure Tx MAC FIFO */ 505 /* Configure Tx MAC FIFO */
518 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 506 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
519 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 507 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
520
521 /* Turn off Rx fifo flush (per sk98lin) */
522 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RX_F_FL_OFF);
523} 508}
524 509
525static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len) 510static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
@@ -529,7 +514,8 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
529 start /= 8; 514 start /= 8;
530 len /= 8; 515 len /= 8;
531 end = start + len - 1; 516 end = start + len - 1;
532 pr_debug("ramset q=%d start=0x%x end=0x%x\n", q, start, end); 517
518 pr_debug("sky2_ramset start=%d end=%d\n", start, end);
533 519
534 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 520 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
535 sky2_write32(hw, RB_ADDR(q, RB_START), start); 521 sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -538,11 +524,15 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
538 sky2_write32(hw, RB_ADDR(q, RB_RP), start); 524 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
539 525
540 if (q == Q_R1 || q == Q_R2) { 526 if (q == Q_R1 || q == Q_R2) {
527 u32 rxup, rxlo;
528
529 rxlo = len/2;
530 rxup = rxlo + len/4;
531 pr_debug(" utpp=%d ltpp=%d\n", rxup, rxlo);
532
541 /* Set thresholds on receive queue's */ 533 /* Set thresholds on receive queue's */
542 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), 534 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
543 start + (2*len)/3); 535 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
544 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP),
545 start + (len/3));
546 } else { 536 } else {
547 /* Enable store & forward on Tx queue's because 537 /* Enable store & forward on Tx queue's because
548 * Tx FIFO is only 1K on Yukon 538 * Tx FIFO is only 1K on Yukon
@@ -551,9 +541,9 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
551 } 541 }
552 542
553 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); 543 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
544 sky2_read8(hw, RB_ADDR(q, RB_CTRL));
554} 545}
555 546
556
557/* Setup Bus Memory Interface */ 547/* Setup Bus Memory Interface */
558static void sky2_qset(struct sky2_hw *hw, u16 q, u32 wm) 548static void sky2_qset(struct sky2_hw *hw, u16 q, u32 wm)
559{ 549{
@@ -563,61 +553,63 @@ static void sky2_qset(struct sky2_hw *hw, u16 q, u32 wm)
563 sky2_write32(hw, Q_ADDR(q, Q_WM), wm); 553 sky2_write32(hw, Q_ADDR(q, Q_WM), wm);
564} 554}
565 555
566
567/* Setup prefetch unit registers. This is the interface between 556/* Setup prefetch unit registers. This is the interface between
568 * hardware and driver list elements 557 * hardware and driver list elements
569 */ 558 */
570static inline void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, 559static inline void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
571 u64 addr, u32 last) 560 u64 addr, u32 last)
572{ 561{
573 pr_debug("sky2 prefetch init q=%x addr=%llx last=%x\n",
574 Y2_QADDR(qaddr, 0), addr, last);
575
576 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 562 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
577 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); 563 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
578 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32); 564 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
579 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr); 565 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
580 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); 566 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
581 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); 567 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
568
569 sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
582} 570}
583 571
572static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
573{
574 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
575
576 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
577 return le;
578}
584 579
585/* 580/*
586 * This is a workaround code taken from syskonnect sk98lin driver 581 * This is a workaround code taken from syskonnect sk98lin driver
587 * to deal with chip bug in the wraparound case. 582 * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
588 */ 583 */
589static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, 584static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
590 u16 idx, u16 *last, u16 size) 585 u16 idx, u16 *last, u16 size)
591
592{ 586{
593 BUG_ON(idx >= size);
594
595 wmb();
596 if (is_ec_a1(hw) && idx < *last) { 587 if (is_ec_a1(hw) && idx < *last) {
597 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); 588 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
598 589
599 if (hwget == 0) { 590 if (hwget == 0) {
600 /* Start prefetching again */ 591 /* Start prefetching again */
601 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 592 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
602 0xe0);
603 goto setnew; 593 goto setnew;
604 } 594 }
605 595
606 if (hwget == size-1) { 596 if (hwget == size - 1) {
607 /* set watermark to one list element */ 597 /* set watermark to one list element */
608 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8); 598 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
609 599
610 /* set put index to first list element */ 600 /* set put index to first list element */
611 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0); 601 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
612 } else /* have hardware go to end of list */ 602 } else /* have hardware go to end of list */
613 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), size-1); 603 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
604 size - 1);
614 } else { 605 } else {
615 setnew: 606setnew:
616 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); 607 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
617 *last = idx;
618 } 608 }
609 *last = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX));
619} 610}
620 611
612
621static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) 613static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
622{ 614{
623 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; 615 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
@@ -625,61 +617,74 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
625 return le; 617 return le;
626} 618}
627 619
628static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map, u16 len) 620/* Build description to hardware about buffer */
621static inline void sky2_rx_add(struct sky2_port *sky2, struct ring_info *re)
629{ 622{
630 struct sky2_rx_le *le; 623 struct sky2_rx_le *le;
624 u32 hi = (re->mapaddr >> 16) >> 16;
631 625
632 if (sizeof(map) > sizeof(u32)) { 626 re->idx = sky2->rx_put;
627 if (sky2->rx_addr64 != hi) {
633 le = sky2_next_rx(sky2); 628 le = sky2_next_rx(sky2);
634 le->rx.addr = cpu_to_le32((u64) map >> 32); 629 le->addr = cpu_to_le32(hi);
635 le->ctrl = 0; 630 le->ctrl = 0;
636 le->opcode = OP_ADDR64 | HW_OWNER; 631 le->opcode = OP_ADDR64 | HW_OWNER;
632 sky2->rx_addr64 = hi;
637 } 633 }
638 634
639 le = sky2_next_rx(sky2); 635 le = sky2_next_rx(sky2);
640 le->rx.addr = cpu_to_le32((u32) map); 636 le->addr = cpu_to_le32((u32) re->mapaddr);
641 le->length = cpu_to_le16(len); 637 le->length = cpu_to_le16(re->maplen);
642 le->ctrl = 0; 638 le->ctrl = 0;
643 le->opcode = OP_PACKET | HW_OWNER; 639 le->opcode = OP_PACKET | HW_OWNER;
644} 640}
645 641
642/* Tell receiver about new buffers. */
643static inline void rx_set_put(struct net_device *dev)
644{
645 struct sky2_port *sky2 = netdev_priv(dev);
646
647 if (sky2->rx_last_put != sky2->rx_put)
648 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put,
649 &sky2->rx_last_put, RX_LE_SIZE);
650}
651
646/* Tell chip where to start receive checksum. 652/* Tell chip where to start receive checksum.
647 * Actually has two checksums, but set both same to avoid possible byte 653 * Actually has two checksums, but set both same to avoid possible byte
648 * order problems. 654 * order problems.
649 */ 655 */
650static void sky2_rx_set_offset(struct sky2_port *sky2) 656static void rx_set_checksum(struct sky2_port *sky2)
651{ 657{
652 struct sky2_rx_le *le; 658 struct sky2_rx_le *le;
653 659
654 sky2_write32(sky2->hw,
655 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
656 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
657
658 le = sky2_next_rx(sky2); 660 le = sky2_next_rx(sky2);
659 le->rx.csum.start1 = ETH_HLEN; 661 le->addr = (ETH_HLEN << 16) | ETH_HLEN;
660 le->rx.csum.start2 = ETH_HLEN;
661 le->ctrl = 0; 662 le->ctrl = 0;
662 le->opcode = OP_TCPSTART | HW_OWNER; 663 le->opcode = OP_TCPSTART | HW_OWNER;
663 wmb(); 664
664 sky2_write16(sky2->hw, 665 sky2_write16(sky2->hw, Y2_QADDR(rxqaddr[sky2->port],
665 Y2_QADDR(rxqaddr[sky2->port], PREF_UNIT_PUT_IDX), 666 PREF_UNIT_PUT_IDX), sky2->rx_put);
666 sky2->rx_put); 667 sky2_read16(sky2->hw, Y2_QADDR(rxqaddr[sky2->port], PREF_UNIT_PUT_IDX));
668 mdelay(1);
669 sky2_write32(sky2->hw,
670 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
671 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
667 672
668} 673}
669 674
675
670/* Cleanout receive buffer area, assumes receiver hardware stopped */ 676/* Cleanout receive buffer area, assumes receiver hardware stopped */
671static void sky2_rx_clean(struct sky2_port *sky2) 677static void sky2_rx_clean(struct sky2_port *sky2)
672{ 678{
673 unsigned i; 679 unsigned i;
674 680
675 memset(sky2->rx_le, 0, RX_LE_BYTES); 681 memset(sky2->rx_le, 0, RX_LE_BYTES);
676 for (i = 0; i < sky2->rx_ring_size; i++) { 682 for (i = 0; i < sky2->rx_pending; i++) {
677 struct ring_info *re = sky2->rx_ring + i; 683 struct ring_info *re = sky2->rx_ring + i;
678 684
679 if (re->skb) { 685 if (re->skb) {
680 pci_unmap_single(sky2->hw->pdev, 686 pci_unmap_single(sky2->hw->pdev,
681 pci_unmap_addr(re, mapaddr), 687 re->mapaddr, re->maplen,
682 pci_unmap_len(re, maplen),
683 PCI_DMA_FROMDEVICE); 688 PCI_DMA_FROMDEVICE);
684 kfree_skb(re->skb); 689 kfree_skb(re->skb);
685 re->skb = NULL; 690 re->skb = NULL;
@@ -687,12 +692,13 @@ static void sky2_rx_clean(struct sky2_port *sky2)
687 } 692 }
688} 693}
689 694
690static inline struct sk_buff *sky2_rx_alloc_skb(struct sky2_port *sky2, 695static inline struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2,
691 unsigned int size, int gfp_mask) 696 unsigned int size,
697 unsigned int gfp_mask)
692{ 698{
693 struct sk_buff *skb; 699 struct sk_buff *skb;
694 700
695 skb = alloc_skb(size, gfp_mask); 701 skb = alloc_skb(size + NET_IP_ALIGN, gfp_mask);
696 if (likely(skb)) { 702 if (likely(skb)) {
697 skb->dev = sky2->netdev; 703 skb->dev = sky2->netdev;
698 skb_reserve(skb, NET_IP_ALIGN); 704 skb_reserve(skb, NET_IP_ALIGN);
@@ -709,29 +715,21 @@ static inline struct sk_buff *sky2_rx_alloc_skb(struct sky2_port *sky2,
709static int sky2_rx_fill(struct sky2_port *sky2) 715static int sky2_rx_fill(struct sky2_port *sky2)
710{ 716{
711 unsigned i; 717 unsigned i;
712 unsigned int rx_buf_size = sky2->netdev->mtu + ETH_HLEN + 8; 718 const unsigned rx_buf_size = sky2->netdev->mtu + ETH_HLEN + 8;
713 719
714 pr_debug("sky2_rx_fill %d\n", sky2->rx_ring_size); 720 for (i = 0; i < sky2->rx_pending; i++) {
715 for (i = 0; i < sky2->rx_ring_size; i++) {
716 struct ring_info *re = sky2->rx_ring + i; 721 struct ring_info *re = sky2->rx_ring + i;
717 dma_addr_t paddr;
718 722
719 re->skb = sky2_rx_alloc_skb(sky2, rx_buf_size, GFP_KERNEL); 723 re->skb = sky2_rx_alloc(sky2, rx_buf_size, GFP_KERNEL);
720 if (!re->skb) 724 if (!re->skb)
721 goto nomem; 725 goto nomem;
722 726
723 paddr = pci_map_single(sky2->hw->pdev, re->skb->data, 727 re->mapaddr = pci_map_single(sky2->hw->pdev, re->skb->data,
724 rx_buf_size, PCI_DMA_FROMDEVICE); 728 rx_buf_size, PCI_DMA_FROMDEVICE);
725 729 re->maplen = rx_buf_size;
726 pci_unmap_len_set(re, maplen, rx_buf_size); 730 sky2_rx_add(sky2, re);
727 pci_unmap_addr_set(re, mapaddr, paddr);
728 sky2_rx_add(sky2, paddr, rx_buf_size);
729 } 731 }
730 732
731 sky2_write16(sky2->hw,
732 Y2_QADDR(rxqaddr[sky2->port], PREF_UNIT_PUT_IDX),
733 sky2->rx_put);
734
735 return 0; 733 return 0;
736nomem: 734nomem:
737 sky2_rx_clean(sky2); 735 sky2_rx_clean(sky2);
@@ -752,7 +750,8 @@ static int sky2_up(struct net_device *dev)
752 750
753 /* must be power of 2 */ 751 /* must be power of 2 */
754 sky2->tx_le = pci_alloc_consistent(hw->pdev, 752 sky2->tx_le = pci_alloc_consistent(hw->pdev,
755 TX_RING_SIZE * sizeof(struct sky2_tx_le), 753 TX_RING_SIZE *
754 sizeof(struct sky2_tx_le),
756 &sky2->tx_le_map); 755 &sky2->tx_le_map);
757 if (!sky2->tx_le) 756 if (!sky2->tx_le)
758 goto err_out; 757 goto err_out;
@@ -770,7 +769,7 @@ static int sky2_up(struct net_device *dev)
770 goto err_out; 769 goto err_out;
771 memset(sky2->rx_le, 0, RX_LE_BYTES); 770 memset(sky2->rx_le, 0, RX_LE_BYTES);
772 771
773 sky2->rx_ring = kmalloc(sky2->rx_ring_size * sizeof(struct ring_info), 772 sky2->rx_ring = kmalloc(sky2->rx_pending * sizeof(struct ring_info),
774 GFP_KERNEL); 773 GFP_KERNEL);
775 if (!sky2->rx_ring) 774 if (!sky2->rx_ring)
776 goto err_out; 775 goto err_out;
@@ -782,8 +781,8 @@ static int sky2_up(struct net_device *dev)
782 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2)) 781 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
783 ramsize = 4096; 782 ramsize = 4096;
784 else { 783 else {
785 u8 e0 = sky2_read8(hw, B2_E_0); 784 u8 e0 = sky2_read8(hw, B2_E_0);
786 ramsize = (e0 == 0) ? (128*1024) : (e0 * 4096); 785 ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
787 } 786 }
788 787
789 /* 2/3 for Rx */ 788 /* 2/3 for Rx */
@@ -791,18 +790,29 @@ static int sky2_up(struct net_device *dev)
791 sky2_ramset(hw, rxqaddr[port], 0, rxspace); 790 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
792 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); 791 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
793 792
793 /* Make sure SyncQ is disabled */
794 sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
795 RB_RST_SET);
796
794 sky2_qset(hw, rxqaddr[port], is_pciex(hw) ? 0x80 : 0x600); 797 sky2_qset(hw, rxqaddr[port], is_pciex(hw) ? 0x80 : 0x600);
795 sky2_qset(hw, txqaddr[port], 0x600); 798 sky2_qset(hw, txqaddr[port], 0x600);
796 799
797 sky2->rx_put = sky2->rx_next = 0; 800 sky2->rx_put = sky2->rx_next = 0;
798 sky2_prefetch_init(hw, rxqaddr[port], sky2->rx_le_map, RX_LE_SIZE-1); 801 sky2_prefetch_init(hw, rxqaddr[port], sky2->rx_le_map, RX_LE_SIZE - 1);
799 802
800 sky2_rx_set_offset(sky2); 803 rx_set_checksum(sky2);
801 804
802 err = sky2_rx_fill(sky2); 805 err = sky2_rx_fill(sky2);
803 if (err) 806 if (err)
804 goto err_out; 807 goto err_out;
805 808
809 /* Give buffers to receiver */
810 sky2_write16(sky2->hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX),
811 sky2->rx_put);
812 sky2->rx_last_put = sky2_read16(sky2->hw,
813 Y2_QADDR(rxqaddr[port],
814 PREF_UNIT_PUT_IDX));
815
806 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 816 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
807 TX_RING_SIZE - 1); 817 TX_RING_SIZE - 1);
808 818
@@ -827,96 +837,117 @@ err_out:
827 return err; 837 return err;
828} 838}
829 839
830/* 840/* Modular subtraction in ring */
831 * Worst case number of list elements is 36 841static inline int tx_dist(unsigned tail, unsigned head)
832 * TSO + CHKSUM + ADDR64 + BUFFER + (ADDR+BUFFER)*MAXFRAGS 842{
833 */ 843 return (head >= tail ? head : head + TX_RING_SIZE) - tail;
834#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS) 844}
835 845
836static inline int sky2_xmit_avail(const struct sky2_port *sky2) 846/* Number of list elements available for next tx */
847static inline int tx_avail(const struct sky2_port *sky2)
837{ 848{
838 return (sky2->tx_cons > sky2->tx_prod ? 0 : TX_RING_SIZE) 849 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
839 + sky2->tx_cons - sky2->tx_prod - 1;
840} 850}
841 851
842static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) 852/* Estimate of number of transmit list elements required */
853static inline unsigned tx_le_req(const struct sk_buff *skb)
843{ 854{
844 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; 855 unsigned count;
845 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE; 856
846 return le; 857 count = sizeof(dma_addr_t) / sizeof(u32);
858 count += skb_shinfo(skb)->nr_frags * count;
859
860 if (skb_shinfo(skb)->tso_size)
861 ++count;
862
863 if (skb->ip_summed)
864 ++count;
865
866 return count;
847} 867}
848 868
849/* Put one frame in ring for transmit. */ 869/*
870 * Put one packet in ring for transmit.
871 * A single packet can generate multiple list elements, and
872 * the number of ring elements will probably be less than the number
873 * of list elements used.
874 */
850static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) 875static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
851{ 876{
852 struct sky2_port *sky2 = netdev_priv(dev); 877 struct sky2_port *sky2 = netdev_priv(dev);
853 struct sky2_hw *hw = sky2->hw; 878 struct sky2_hw *hw = sky2->hw;
854 struct sky2_tx_le *le; 879 struct sky2_tx_le *le;
855 struct ring_info *re; 880 struct ring_info *re;
881 unsigned long flags;
856 unsigned i, len; 882 unsigned i, len;
857 dma_addr_t mapping; 883 dma_addr_t mapping;
858 u32 addr64; 884 u32 addr64;
859 u16 mss; 885 u16 mss;
860 u8 ctrl; 886 u8 ctrl;
861 887
862 skb = skb_padto(skb, ETH_ZLEN); 888 local_irq_save(flags);
863 if (!skb) 889 if (!spin_trylock(&sky2->tx_lock)) {
864 return NETDEV_TX_OK; 890 local_irq_restore(flags);
865
866 if (!spin_trylock(&sky2->tx_lock))
867 return NETDEV_TX_LOCKED; 891 return NETDEV_TX_LOCKED;
892 }
868 893
869 if (unlikely(sky2_xmit_avail(sky2) < MAX_SKB_TX_LE)) { 894 if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
870 netif_stop_queue(dev); 895 netif_stop_queue(dev);
871 spin_unlock(&sky2->tx_lock); 896 spin_unlock_irqrestore(&sky2->tx_lock, flags);
872 897
873 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 898 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
874 dev->name); 899 dev->name);
875 return NETDEV_TX_BUSY; 900 return NETDEV_TX_BUSY;
876 } 901 }
877 902
878 if (netif_msg_tx_queued(sky2)) 903 if (unlikely(netif_msg_tx_queued(sky2)))
879 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n", 904 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
880 dev->name, sky2->tx_prod, skb->len); 905 dev->name, sky2->tx_prod, skb->len);
881 906
882
883 len = skb_headlen(skb); 907 len = skb_headlen(skb);
884 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 908 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
909 addr64 = (mapping >> 16) >> 16;
910
911 re = sky2->tx_ring + sky2->tx_prod;
912
913 /* Send high bits if changed */
914 if (addr64 != sky2->tx_addr64) {
915 le = get_tx_le(sky2);
916 le->tx.addr = cpu_to_le32(addr64);
917 le->ctrl = 0;
918 le->opcode = OP_ADDR64 | HW_OWNER;
919 sky2->tx_addr64 = addr64;
920 }
885 921
886 /* Check for TCP Segmentation Offload */ 922 /* Check for TCP Segmentation Offload */
887 mss = skb_shinfo(skb)->tso_size; 923 mss = skb_shinfo(skb)->tso_size;
888 if (mss) { 924 if (mss != 0) {
889 /* just drop the packet if non-linear expansion fails */ 925 /* just drop the packet if non-linear expansion fails */
890 if (skb_header_cloned(skb) && 926 if (skb_header_cloned(skb) &&
891 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 927 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
892 dev_kfree_skb(skb); 928 dev_kfree_skb_any(skb);
893 return NETDEV_TX_OK; 929 goto out_unlock;
894 } 930 }
895 931
896 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ 932 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
897 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 933 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
898 mss += ETH_HLEN; 934 mss += ETH_HLEN;
935 }
899 936
937 if (mss != sky2->tx_last_mss) {
900 le = get_tx_le(sky2); 938 le = get_tx_le(sky2);
901 le->tx.tso.size = cpu_to_le16(mss); 939 le->tx.tso.size = cpu_to_le16(mss);
902 le->ctrl = 0; 940 le->tx.tso.rsvd = 0;
903 le->opcode = OP_LRGLEN | HW_OWNER; 941 le->opcode = OP_LRGLEN | HW_OWNER;
904 }
905
906 /* Handle Hi DMA */
907 if (sizeof(mapping) > sizeof(u32)) {
908 addr64 = (u64)mapping >> 32;
909
910 le = get_tx_le(sky2);
911 le->tx.addr = cpu_to_le32(addr64);
912 le->ctrl = 0; 942 le->ctrl = 0;
913 le->opcode = OP_ADDR64 | HW_OWNER; 943 sky2->tx_last_mss = mss;
914 } 944 }
915 945
916 /* Handle TCP checksum offload */ 946 /* Handle TCP checksum offload */
917 ctrl = 0; 947 ctrl = 0;
918 if (skb->ip_summed == CHECKSUM_HW) { 948 if (skb->ip_summed == CHECKSUM_HW) {
919 ptrdiff_t hdr = skb->h.raw - skb->data; 949 u16 hdr = skb->h.raw - skb->data;
950 u16 offset = hdr + skb->csum;
920 951
921 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 952 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
922 if (skb->nh.iph->protocol == IPPROTO_UDP) 953 if (skb->nh.iph->protocol == IPPROTO_UDP)
@@ -924,119 +955,112 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
924 955
925 le = get_tx_le(sky2); 956 le = get_tx_le(sky2);
926 le->tx.csum.start = cpu_to_le16(hdr); 957 le->tx.csum.start = cpu_to_le16(hdr);
927 le->tx.csum.offset = cpu_to_le16(hdr + skb->csum); 958 le->tx.csum.offset = cpu_to_le16(offset);
928 le->length = 0; 959 le->length = 0; /* initial checksum value */
929 le->ctrl = 1; /* one packet */ 960 le->ctrl = 1; /* one packet */
930 le->opcode = OP_TCPLISW|HW_OWNER; 961 le->opcode = OP_TCPLISW | HW_OWNER;
931 } 962 }
932 963
933 le = get_tx_le(sky2); 964 le = get_tx_le(sky2);
934 le->tx.addr = cpu_to_le32((u32) mapping); 965 le->tx.addr = cpu_to_le32((u32) mapping);
935 le->length = cpu_to_le16(len); 966 le->length = cpu_to_le16(len);
936 le->ctrl = ctrl; 967 le->ctrl = ctrl;
937 le->opcode = (mss ? OP_LARGESEND : OP_PACKET) |HW_OWNER; 968 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
938 969
939 re = &sky2->tx_ring[le - sky2->tx_le]; 970 /* Record the transmit mapping info */
940 re->skb = skb; 971 re->skb = skb;
941 pci_unmap_addr_set(re, mapaddr, mapping); 972 re->mapaddr = mapping;
942 pci_unmap_len_set(re, maplen, len); 973 re->maplen = len;
943 974
944 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 975 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
945 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 976 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
977 struct ring_info *fre;
946 978
947 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, 979 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
948 frag->size, PCI_DMA_TODEVICE); 980 frag->size, PCI_DMA_TODEVICE);
949 981 addr64 = (mapping >> 16) >> 16;
950 if (sizeof(mapping) > sizeof(u32)) { 982 if (addr64 != sky2->tx_addr64) {
951 u32 hi = (u64) mapping >> 32; 983 le = get_tx_le(sky2);
952 if (hi != addr64) { 984 le->tx.addr = cpu_to_le32(addr64);
953 le = get_tx_le(sky2); 985 le->ctrl = 0;
954 le->tx.addr = cpu_to_le32(hi); 986 le->opcode = OP_ADDR64 | HW_OWNER;
955 le->ctrl = 0; 987 sky2->tx_addr64 = addr64;
956 le->opcode = OP_ADDR64|HW_OWNER;
957 addr64 = hi;
958 }
959 } 988 }
960 989
961 le = get_tx_le(sky2); 990 le = get_tx_le(sky2);
962 le->tx.addr = cpu_to_le32((u32) mapping); 991 le->tx.addr = cpu_to_le32((u32) mapping);
963 le->length = cpu_to_le16(frag->size); 992 le->length = cpu_to_le16(frag->size);
964 le->ctrl = ctrl; 993 le->ctrl = ctrl;
965 le->opcode = OP_BUFFER|HW_OWNER; 994 le->opcode = OP_BUFFER | HW_OWNER;
966 995
967 re = &sky2->tx_ring[le - sky2->tx_le]; 996 fre = sky2->tx_ring
968 pci_unmap_addr_set(re, mapaddr, mapping); 997 + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE;
969 pci_unmap_len_set(re, maplen, frag->size); 998 fre->skb = NULL;
999 fre->mapaddr = mapping;
1000 fre->maplen = frag->size;
970 } 1001 }
971 1002 re->idx = sky2->tx_prod;
972 le->ctrl |= EOP; 1003 le->ctrl |= EOP;
973 1004
974 sky2_put_idx(sky2->hw, txqaddr[sky2->port], sky2->tx_prod, 1005 sky2_put_idx(sky2->hw, txqaddr[sky2->port], sky2->tx_prod,
975 &sky2->tx_last_put, TX_RING_SIZE); 1006 &sky2->tx_last_put, TX_RING_SIZE);
976 1007
977 if (sky2_xmit_avail(sky2) < MAX_SKB_TX_LE) { 1008 if (tx_avail(sky2) < MAX_SKB_TX_LE + 1)
978 pr_debug("%s: transmit queue full\n", dev->name);
979 netif_stop_queue(dev); 1009 netif_stop_queue(dev);
980 } 1010
981 spin_unlock(&sky2->tx_lock); 1011out_unlock:
1012 mmiowb();
1013 spin_unlock_irqrestore(&sky2->tx_lock, flags);
982 1014
983 dev->trans_start = jiffies; 1015 dev->trans_start = jiffies;
984 return NETDEV_TX_OK; 1016 return NETDEV_TX_OK;
985} 1017}
986 1018
987
988/* 1019/*
989 * Free ring elements from starting at tx_cons until done 1020 * Free ring elements from starting at tx_cons until "done"
990 * This unwinds the elements based on the usage assigned 1021 *
991 * xmit routine. 1022 * NB: the hardware will tell us about partial completion of multi-part
1023 * buffers; these are defered until completion.
992 */ 1024 */
993static void sky2_tx_complete(struct net_device *dev, u16 done) 1025static void sky2_tx_complete(struct net_device *dev, u16 done)
994{ 1026{
995 struct sky2_port *sky2 = netdev_priv(dev); 1027 struct sky2_port *sky2 = netdev_priv(dev);
996 unsigned idx = sky2->tx_cons; 1028 unsigned i;
997 struct sk_buff *skb = NULL;
998 1029
999 BUG_ON(done >= TX_RING_SIZE); 1030 if (netif_msg_tx_done(sky2))
1031 printk(KERN_DEBUG "%s: tx done, upto %u\n", dev->name, done);
1000 1032
1001 spin_lock(&sky2->tx_lock); 1033 spin_lock(&sky2->tx_lock);
1002 while (idx != done) {
1003 struct ring_info *re = sky2->tx_ring + idx;
1004 struct sky2_tx_le *le = sky2->tx_le + idx;
1005
1006 BUG_ON(le->opcode == 0);
1007
1008 switch(le->opcode & ~HW_OWNER) {
1009 case OP_LARGESEND:
1010 case OP_PACKET:
1011 if (skb)
1012 dev_kfree_skb_any(skb);
1013 skb = re->skb;
1014 BUG_ON(!skb);
1015 re->skb = NULL;
1016 1034
1017 pci_unmap_single(sky2->hw->pdev, 1035 while (sky2->tx_cons != done) {
1018 pci_unmap_addr(re, mapaddr), 1036 struct ring_info *re = sky2->tx_ring + sky2->tx_cons;
1019 pci_unmap_len(re, maplen), 1037 struct sk_buff *skb;
1020 PCI_DMA_TODEVICE);
1021 break;
1022 1038
1023 case OP_BUFFER: 1039 /* Check for partial status */
1024 pci_unmap_page(sky2->hw->pdev, 1040 if (tx_dist(sky2->tx_cons, done)
1025 pci_unmap_addr(re, mapaddr), 1041 < tx_dist(sky2->tx_cons, re->idx))
1026 pci_unmap_len(re, maplen), 1042 goto out;
1027 PCI_DMA_TODEVICE); 1043
1028 break; 1044 skb = re->skb;
1045 pci_unmap_single(sky2->hw->pdev,
1046 re->mapaddr, re->maplen, PCI_DMA_TODEVICE);
1047
1048 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1049 struct ring_info *fre;
1050 fre =
1051 sky2->tx_ring + (sky2->tx_cons + i +
1052 1) % TX_RING_SIZE;
1053 pci_unmap_page(sky2->hw->pdev, fre->mapaddr,
1054 fre->maplen, PCI_DMA_TODEVICE);
1029 } 1055 }
1030 1056
1031 le->opcode = 0;
1032 idx = (idx + 1) % TX_RING_SIZE;
1033 }
1034
1035 if (skb)
1036 dev_kfree_skb_any(skb); 1057 dev_kfree_skb_any(skb);
1037 sky2->tx_cons = idx;
1038 1058
1039 if (sky2_xmit_avail(sky2) > MAX_SKB_TX_LE) 1059 sky2->tx_cons = re->idx;
1060 }
1061out:
1062
1063 if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
1040 netif_wake_queue(dev); 1064 netif_wake_queue(dev);
1041 spin_unlock(&sky2->tx_lock); 1065 spin_unlock(&sky2->tx_lock);
1042} 1066}
@@ -1061,22 +1085,24 @@ static int sky2_down(struct net_device *dev)
1061 1085
1062 netif_stop_queue(dev); 1086 netif_stop_queue(dev);
1063 1087
1088 sky2_phy_reset(hw, port);
1089
1064 /* Stop transmitter */ 1090 /* Stop transmitter */
1065 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); 1091 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1066 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); 1092 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1067 1093
1068 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 1094 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1069 RB_RST_SET|RB_DIS_OP_MD); 1095 RB_RST_SET | RB_DIS_OP_MD);
1070 1096
1071 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1097 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1072 ctrl &= ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA); 1098 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1073 gma_write16(hw, port, GM_GP_CTRL, ctrl); 1099 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1074 1100
1075 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1101 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1076 1102
1077 /* Workaround shared GMAC reset */ 1103 /* Workaround shared GMAC reset */
1078 if (! (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 1104 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1079 && port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) 1105 && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1080 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 1106 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1081 1107
1082 /* Disable Force Sync bit and Enable Alloc bit */ 1108 /* Disable Force Sync bit and Enable Alloc bit */
@@ -1088,7 +1114,8 @@ static int sky2_down(struct net_device *dev)
1088 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); 1114 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1089 1115
1090 /* Reset the PCI FIFO of the async Tx queue */ 1116 /* Reset the PCI FIFO of the async Tx queue */
1091 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_RST_SET | BMU_FIFO_RST); 1117 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1118 BMU_RST_SET | BMU_FIFO_RST);
1092 1119
1093 /* Reset the Tx prefetch units */ 1120 /* Reset the Tx prefetch units */
1094 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), 1121 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
@@ -1144,6 +1171,9 @@ static int sky2_down(struct net_device *dev)
1144 1171
1145static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) 1172static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1146{ 1173{
1174 if (!hw->copper)
1175 return SPEED_1000;
1176
1147 if (hw->chip_id == CHIP_ID_YUKON_FE) 1177 if (hw->chip_id == CHIP_ID_YUKON_FE)
1148 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10; 1178 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1149 1179
@@ -1163,14 +1193,16 @@ static void sky2_link_up(struct sky2_port *sky2)
1163 unsigned port = sky2->port; 1193 unsigned port = sky2->port;
1164 u16 reg; 1194 u16 reg;
1165 1195
1196 /* disable Rx GMAC FIFO flush mode */
1197 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RX_F_FL_OFF);
1198
1166 /* Enable Transmit FIFO Underrun */ 1199 /* Enable Transmit FIFO Underrun */
1167 sky2_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1200 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1168 1201
1169 reg = gma_read16(hw, port, GM_GP_CTRL); 1202 reg = gma_read16(hw, port, GM_GP_CTRL);
1170 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE) 1203 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1171 reg |= GM_GPCR_DUP_FULL; 1204 reg |= GM_GPCR_DUP_FULL;
1172 1205
1173
1174 /* enable Rx/Tx */ 1206 /* enable Rx/Tx */
1175 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 1207 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1176 gma_write16(hw, port, GM_GP_CTRL, reg); 1208 gma_write16(hw, port, GM_GP_CTRL, reg);
@@ -1182,17 +1214,30 @@ static void sky2_link_up(struct sky2_port *sky2)
1182 netif_wake_queue(sky2->netdev); 1214 netif_wake_queue(sky2->netdev);
1183 1215
1184 /* Turn on link LED */ 1216 /* Turn on link LED */
1185 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1217 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1186 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1218 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1187 1219
1220 if (hw->chip_id == CHIP_ID_YUKON_XL) {
1221 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1222
1223 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1224 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
1225 PHY_M_LEDC_INIT_CTRL(sky2->speed ==
1226 SPEED_10 ? 7 : 0) |
1227 PHY_M_LEDC_STA1_CTRL(sky2->speed ==
1228 SPEED_100 ? 7 : 0) |
1229 PHY_M_LEDC_STA0_CTRL(sky2->speed ==
1230 SPEED_1000 ? 7 : 0));
1231 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1232 }
1233
1188 if (netif_msg_link(sky2)) 1234 if (netif_msg_link(sky2))
1189 printk(KERN_INFO PFX 1235 printk(KERN_INFO PFX
1190 "%s: Link is up at %d Mbps, %s duplex, flowcontrol %s\n", 1236 "%s: Link is up at %d Mbps, %s duplex, flowcontrol %s\n",
1191 sky2->netdev->name, sky2->speed, 1237 sky2->netdev->name, sky2->speed,
1192 sky2->duplex == DUPLEX_FULL ? "full" : "half", 1238 sky2->duplex == DUPLEX_FULL ? "full" : "half",
1193 (sky2->tx_pause && sky2->rx_pause) ? "both" : 1239 (sky2->tx_pause && sky2->rx_pause) ? "both" :
1194 sky2->tx_pause ? "tx" : 1240 sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none");
1195 sky2->rx_pause ? "rx" : "none");
1196} 1241}
1197 1242
1198static void sky2_link_down(struct sky2_port *sky2) 1243static void sky2_link_down(struct sky2_port *sky2)
@@ -1211,9 +1256,8 @@ static void sky2_link_down(struct sky2_port *sky2)
1211 if (sky2->rx_pause && !sky2->tx_pause) { 1256 if (sky2->rx_pause && !sky2->tx_pause) {
1212 /* restore Asymmetric Pause bit */ 1257 /* restore Asymmetric Pause bit */
1213 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 1258 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1214 gm_phy_read(hw, port, 1259 gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
1215 PHY_MARV_AUNE_ADV) 1260 | PHY_M_AN_ASP);
1216 | PHY_M_AN_ASP);
1217 } 1261 }
1218 1262
1219 sky2_phy_reset(hw, port); 1263 sky2_phy_reset(hw, port);
@@ -1229,6 +1273,51 @@ static void sky2_link_down(struct sky2_port *sky2)
1229 sky2_phy_init(hw, port); 1273 sky2_phy_init(hw, port);
1230} 1274}
1231 1275
1276static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1277{
1278 struct sky2_hw *hw = sky2->hw;
1279 unsigned port = sky2->port;
1280 u16 lpa;
1281
1282 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1283
1284 if (lpa & PHY_M_AN_RF) {
1285 printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1286 return -1;
1287 }
1288
1289 if (hw->chip_id != CHIP_ID_YUKON_FE &&
1290 gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1291 printk(KERN_ERR PFX "%s: master/slave fault",
1292 sky2->netdev->name);
1293 return -1;
1294 }
1295
1296 if (!(aux & PHY_M_PS_SPDUP_RES)) {
1297 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1298 sky2->netdev->name);
1299 return -1;
1300 }
1301
1302 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1303
1304 sky2->speed = sky2_phy_speed(hw, aux);
1305
1306 /* Pause bits are offset (9..8) */
1307 if (hw->chip_id == CHIP_ID_YUKON_XL)
1308 aux >>= 6;
1309
1310 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
1311 sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
1312
1313 if ((sky2->tx_pause || sky2->rx_pause)
1314 && !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
1315 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1316 else
1317 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1318
1319 return 0;
1320}
1232 1321
1233/* 1322/*
1234 * Interrrupt from PHY are handled in tasklet (soft irq) 1323 * Interrrupt from PHY are handled in tasklet (soft irq)
@@ -1237,69 +1326,42 @@ static void sky2_link_down(struct sky2_port *sky2)
1237 */ 1326 */
1238static void sky2_phy_task(unsigned long data) 1327static void sky2_phy_task(unsigned long data)
1239{ 1328{
1240 struct sky2_port *sky2 = (struct sky2_port *) data; 1329 struct sky2_port *sky2 = (struct sky2_port *)data;
1241 struct sky2_hw *hw = sky2->hw; 1330 struct sky2_hw *hw = sky2->hw;
1242 unsigned port = sky2->port;
1243 u16 istatus, phystat; 1331 u16 istatus, phystat;
1244 1332
1245 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); 1333 spin_lock(&hw->phy_lock);
1246 1334 istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT);
1247 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 1335 phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT);
1248 1336
1249 if (netif_msg_intr(sky2)) 1337 if (netif_msg_intr(sky2))
1250 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", 1338 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1251 sky2->netdev->name, istatus, phystat); 1339 sky2->netdev->name, istatus, phystat);
1252 1340
1253 if (istatus & PHY_M_IS_AN_COMPL) { 1341 if (istatus & PHY_M_IS_AN_COMPL) {
1254 u16 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); 1342 if (sky2_autoneg_done(sky2, phystat) == 0)
1343 sky2_link_up(sky2);
1344 goto out;
1345 }
1255 1346
1256 if (lpa & PHY_M_AN_RF) { 1347 if (istatus & PHY_M_IS_LSP_CHANGE)
1257 printk(KERN_ERR PFX "%s: remote fault", 1348 sky2->speed = sky2_phy_speed(hw, phystat);
1258 sky2->netdev->name);
1259 }
1260 else if (hw->chip_id != CHIP_ID_YUKON_FE
1261 && gm_phy_read(hw, port, PHY_MARV_1000T_STAT)
1262 & PHY_B_1000S_MSF) {
1263 printk(KERN_ERR PFX "%s: master/slave fault",
1264 sky2->netdev->name);
1265 }
1266 else if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1267 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1268 sky2->netdev->name);
1269 }
1270 else {
1271 sky2->duplex = (phystat & PHY_M_PS_FULL_DUP)
1272 ? DUPLEX_FULL : DUPLEX_HALF;
1273 1349
1274 sky2->speed = sky2_phy_speed(hw, phystat); 1350 if (istatus & PHY_M_IS_DUP_CHANGE)
1275 1351 sky2->duplex =
1276 sky2->tx_pause = (phystat & PHY_M_PS_TX_P_EN) != 0; 1352 (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1277 sky2->rx_pause = (phystat & PHY_M_PS_RX_P_EN) != 0;
1278 1353
1279 if ((!sky2->tx_pause && !sky2->rx_pause) || 1354 if (istatus & PHY_M_IS_LST_CHANGE) {
1280 (sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF)) 1355 if (phystat & PHY_M_PS_LINK_UP)
1281 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1282 else
1283 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1284 sky2_link_up(sky2); 1356 sky2_link_up(sky2);
1285 } 1357 else
1286 } else { 1358 sky2_link_down(sky2);
1287
1288 if (istatus & PHY_M_IS_LSP_CHANGE)
1289 sky2->speed = sky2_phy_speed(hw, phystat);
1290
1291 if (istatus & PHY_M_IS_DUP_CHANGE)
1292 sky2->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1293 if (istatus & PHY_M_IS_LST_CHANGE) {
1294 if (phystat & PHY_M_PS_LINK_UP)
1295 sky2_link_up(sky2);
1296 else
1297 sky2_link_down(sky2);
1298 }
1299 } 1359 }
1360out:
1361 spin_unlock(&hw->phy_lock);
1300 1362
1301 local_irq_disable(); 1363 local_irq_disable();
1302 hw->intr_mask |= (port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; 1364 hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1303 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1365 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1304 local_irq_enable(); 1366 local_irq_enable();
1305} 1367}
@@ -1346,63 +1408,78 @@ static struct sk_buff *sky2_receive(struct sky2_hw *hw, unsigned port,
1346 struct net_device *dev = hw->dev[port]; 1408 struct net_device *dev = hw->dev[port];
1347 struct sky2_port *sky2 = netdev_priv(dev); 1409 struct sky2_port *sky2 = netdev_priv(dev);
1348 struct ring_info *re = sky2->rx_ring + sky2->rx_next; 1410 struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1349 struct sk_buff *skb = re->skb; 1411 struct sk_buff *skb, *nskb;
1350 dma_addr_t mapping;
1351 const unsigned int rx_buf_size = dev->mtu + ETH_HLEN + 8; 1412 const unsigned int rx_buf_size = dev->mtu + ETH_HLEN + 8;
1352 1413
1353 if (unlikely(netif_msg_rx_status(sky2))) 1414 if (unlikely(netif_msg_rx_status(sky2)))
1354 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", 1415 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1355 dev->name, sky2->rx_next, status, length); 1416 dev->name, sky2->rx_next, status, length);
1356 1417
1357 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_ring_size; 1418 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
1358
1359 pci_unmap_single(sky2->hw->pdev,
1360 pci_unmap_addr(re, mapaddr),
1361 pci_unmap_len(re, maplen),
1362 PCI_DMA_FROMDEVICE);
1363 prefetch(skb->data);
1364 1419
1365 if (!(status & GMR_FS_RX_OK) 1420 skb = NULL;
1366 || (status & GMR_FS_ANY_ERR) 1421 if (!(status & GMR_FS_RX_OK)
1367 || (length << 16) != (status & GMR_FS_LEN) 1422 || (status & GMR_FS_ANY_ERR)
1368 || length > rx_buf_size) 1423 || (length << 16) != (status & GMR_FS_LEN)
1424 || length > rx_buf_size)
1369 goto error; 1425 goto error;
1370 1426
1371 re->skb = sky2_rx_alloc_skb(sky2, rx_buf_size, GFP_ATOMIC); 1427 if (length < RX_COPY_THRESHOLD) {
1372 if (!re->skb) 1428 nskb = sky2_rx_alloc(sky2, length, GFP_ATOMIC);
1373 goto reuse; 1429 if (!nskb)
1374 1430 goto resubmit;
1375submit: 1431
1376 mapping = pci_map_single(sky2->hw->pdev, re->skb->data, 1432 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr,
1377 rx_buf_size, PCI_DMA_FROMDEVICE); 1433 length, PCI_DMA_FROMDEVICE);
1434 memcpy(nskb->data, re->skb->data, length);
1435 pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr,
1436 length, PCI_DMA_FROMDEVICE);
1437 skb = nskb;
1438 } else {
1439 nskb = sky2_rx_alloc(sky2, rx_buf_size, GFP_ATOMIC);
1440 if (!nskb)
1441 goto resubmit;
1378 1442
1379 pci_unmap_len_set(re, maplen, rx_buf_size); 1443 skb = re->skb;
1380 pci_unmap_addr_set(re, mapaddr, mapping); 1444 pci_unmap_single(sky2->hw->pdev, re->mapaddr,
1445 re->maplen, PCI_DMA_FROMDEVICE);
1446 prefetch(skb->data);
1381 1447
1382 sky2_rx_add(sky2, mapping, rx_buf_size); 1448 re->skb = nskb;
1383 sky2_put_idx(sky2->hw, rxqaddr[sky2->port], 1449 re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data,
1384 sky2->rx_put, &sky2->rx_last_put, RX_LE_SIZE); 1450 rx_buf_size, PCI_DMA_FROMDEVICE);
1451 re->maplen = rx_buf_size;
1452 }
1385 1453
1454resubmit:
1455 BUG_ON(re->skb == skb);
1456 sky2_rx_add(sky2, re);
1386 return skb; 1457 return skb;
1387 1458
1388error: 1459error:
1460 if (status & GMR_FS_GOOD_FC)
1461 goto resubmit;
1462
1389 if (netif_msg_rx_err(sky2)) 1463 if (netif_msg_rx_err(sky2))
1390 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", 1464 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1391 sky2->netdev->name, status, length); 1465 sky2->netdev->name, status, length);
1392 1466
1393 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 1467 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
1394 sky2->net_stats.rx_length_errors++; 1468 sky2->net_stats.rx_length_errors++;
1395 if (status & GMR_FS_FRAGMENT) 1469 if (status & GMR_FS_FRAGMENT)
1396 sky2->net_stats.rx_frame_errors++; 1470 sky2->net_stats.rx_frame_errors++;
1397 if (status & GMR_FS_CRC_ERR) 1471 if (status & GMR_FS_CRC_ERR)
1398 sky2->net_stats.rx_crc_errors++; 1472 sky2->net_stats.rx_crc_errors++;
1399reuse: 1473 if (status & GMR_FS_RX_FF_OV)
1400 re->skb = skb; 1474 sky2->net_stats.rx_fifo_errors++;
1401 skb = NULL; 1475 goto resubmit;
1402 goto submit;
1403} 1476}
1404 1477
1405static u16 get_tx_index(u8 port, u32 status, u16 len) 1478/* Transmit ring index in reported status block is encoded as:
1479 *
1480 * | TXS2 | TXA2 | TXS1 | TXA1
1481 */
1482static inline u16 tx_index(u8 port, u32 status, u16 len)
1406{ 1483{
1407 if (port == 0) 1484 if (port == 0)
1408 return status & 0xfff; 1485 return status & 0xfff;
@@ -1411,10 +1488,8 @@ static u16 get_tx_index(u8 port, u32 status, u16 len)
1411} 1488}
1412 1489
1413/* 1490/*
1414 * NAPI poll routine.
1415 * Both ports share the same status interrupt, therefore there is only 1491 * Both ports share the same status interrupt, therefore there is only
1416 * one poll routine. 1492 * one poll routine.
1417 *
1418 */ 1493 */
1419static int sky2_poll(struct net_device *dev, int *budget) 1494static int sky2_poll(struct net_device *dev, int *budget)
1420{ 1495{
@@ -1422,31 +1497,27 @@ static int sky2_poll(struct net_device *dev, int *budget)
1422 struct sky2_hw *hw = sky2->hw; 1497 struct sky2_hw *hw = sky2->hw;
1423 unsigned int to_do = min(dev->quota, *budget); 1498 unsigned int to_do = min(dev->quota, *budget);
1424 unsigned int work_done = 0; 1499 unsigned int work_done = 0;
1500 u16 hwidx;
1425 unsigned char summed[2] = { CHECKSUM_NONE, CHECKSUM_NONE }; 1501 unsigned char summed[2] = { CHECKSUM_NONE, CHECKSUM_NONE };
1426 unsigned int csum[2] = { 0 }; 1502 unsigned int csum[2];
1427 unsigned int rx_handled[2] = { 0, 0};
1428 u16 last;
1429
1430 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1431 last = sky2_read16(hw, STAT_PUT_IDX);
1432 1503
1433 while (hw->st_idx != last && work_done < to_do) { 1504 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1505 rmb();
1506 while (hw->st_idx != hwidx && work_done < to_do) {
1434 struct sky2_status_le *le = hw->st_le + hw->st_idx; 1507 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1435 struct sk_buff *skb; 1508 struct sk_buff *skb;
1436 u8 port; 1509 u8 port;
1437 u32 status; 1510 u32 status;
1438 u16 length; 1511 u16 length;
1439 1512
1440 rmb();
1441 status = le32_to_cpu(le->status); 1513 status = le32_to_cpu(le->status);
1442 length = le16_to_cpu(le->length); 1514 length = le16_to_cpu(le->length);
1443 port = le->link; 1515 port = le->link;
1444 1516
1445 BUG_ON(port >= hw->ports); 1517 BUG_ON(port >= hw->ports || hw->dev[port] == NULL);
1446 1518
1447 switch(le->opcode & ~HW_OWNER) { 1519 switch (le->opcode & ~HW_OWNER) {
1448 case OP_RXSTAT: 1520 case OP_RXSTAT:
1449 ++rx_handled[port];
1450 skb = sky2_receive(hw, port, length, status); 1521 skb = sky2_receive(hw, port, length, status);
1451 if (likely(skb)) { 1522 if (likely(skb)) {
1452 __skb_put(skb, length); 1523 __skb_put(skb, length);
@@ -1475,7 +1546,7 @@ static int sky2_poll(struct net_device *dev, int *budget)
1475 1546
1476 case OP_TXINDEXLE: 1547 case OP_TXINDEXLE:
1477 sky2_tx_complete(hw->dev[port], 1548 sky2_tx_complete(hw->dev[port],
1478 get_tx_index(port, status, length)); 1549 tx_index(port, status, length));
1479 break; 1550 break;
1480 1551
1481 case OP_RXTIMESTAMP: 1552 case OP_RXTIMESTAMP:
@@ -1483,14 +1554,27 @@ static int sky2_poll(struct net_device *dev, int *budget)
1483 1554
1484 default: 1555 default:
1485 if (net_ratelimit()) 1556 if (net_ratelimit())
1486 printk(KERN_WARNING PFX "unknown status opcode 0x%x\n", 1557 printk(KERN_WARNING PFX
1558 "unknown status opcode 0x%x\n",
1487 le->opcode); 1559 le->opcode);
1488 break; 1560 break;
1489 } 1561 }
1490 1562
1491 hw->st_idx = (hw->st_idx + 1) & (STATUS_RING_SIZE -1); 1563 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
1564 if (hw->st_idx == hwidx) {
1565 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1566 rmb();
1567 }
1492 } 1568 }
1493 1569
1570 mmiowb();
1571
1572 if (hw->dev[0])
1573 rx_set_put(hw->dev[0]);
1574
1575 if (hw->dev[1])
1576 rx_set_put(hw->dev[1]);
1577
1494 *budget -= work_done; 1578 *budget -= work_done;
1495 dev->quota -= work_done; 1579 dev->quota -= work_done;
1496 if (work_done < to_do) { 1580 if (work_done < to_do) {
@@ -1505,6 +1589,7 @@ static int sky2_poll(struct net_device *dev, int *budget)
1505 1589
1506 hw->intr_mask |= Y2_IS_STAT_BMU; 1590 hw->intr_mask |= Y2_IS_STAT_BMU;
1507 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1591 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1592 sky2_read32(hw, B0_IMSK);
1508 netif_rx_complete(dev); 1593 netif_rx_complete(dev);
1509 } 1594 }
1510 1595
@@ -1553,40 +1638,35 @@ static void sky2_hw_intr(struct sky2_hw *hw)
1553{ 1638{
1554 u32 status = sky2_read32(hw, B0_HWE_ISRC); 1639 u32 status = sky2_read32(hw, B0_HWE_ISRC);
1555 1640
1556 if (status & Y2_IS_TIST_OV) { 1641 if (status & Y2_IS_TIST_OV)
1557 pr_debug (PFX "%s: unused timer overflow??\n",
1558 pci_name(hw->pdev));
1559 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1642 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1560 }
1561 1643
1562 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 1644 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
1563 u16 pci_err = sky2_read16(hw, PCI_C(PCI_STATUS)); 1645 u16 pci_err;
1646
1647 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
1564 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 1648 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1565 pci_name(hw->pdev), pci_err); 1649 pci_name(hw->pdev), pci_err);
1566 1650
1567 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1651 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1568 sky2_write16(hw, PCI_C(PCI_STATUS), 1652 pci_write_config_word(hw->pdev, PCI_STATUS,
1569 pci_err | PCI_STATUS_ERROR_BITS); 1653 pci_err | PCI_STATUS_ERROR_BITS);
1570 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1654 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1571 } 1655 }
1572 1656
1573 if (status & Y2_IS_PCI_EXP) { 1657 if (status & Y2_IS_PCI_EXP) {
1574 /* PCI-Express uncorrectable Error occured */ 1658 /* PCI-Express uncorrectable Error occured */
1575 u32 pex_err = sky2_read32(hw, PCI_C(PEX_UNC_ERR_STAT)); 1659 u32 pex_err;
1660
1661 pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
1576 1662
1577 /*
1578 * On PCI-Express bus bridges are called root complexes.
1579 * PCI-Express errors are recognized by the root complex too,
1580 * which requests the system to handle the problem. After error
1581 * occurence it may be that no access to the adapter may be performed
1582 * any longer.
1583 */
1584 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 1663 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
1585 pci_name(hw->pdev), pex_err); 1664 pci_name(hw->pdev), pex_err);
1586 1665
1587 /* clear the interrupt */ 1666 /* clear the interrupt */
1588 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1667 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1589 sky2_write32(hw, PCI_C(PEX_UNC_ERR_STAT), 0xffffffffUL); 1668 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
1669 0xffffffffUL);
1590 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1670 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1591 1671
1592 if (pex_err & PEX_FATAL_ERRORS) { 1672 if (pex_err & PEX_FATAL_ERRORS) {
@@ -1622,7 +1702,6 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
1622 ++sky2->net_stats.tx_fifo_errors; 1702 ++sky2->net_stats.tx_fifo_errors;
1623 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 1703 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
1624 } 1704 }
1625
1626} 1705}
1627 1706
1628static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) 1707static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
@@ -1641,19 +1720,22 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
1641 u32 status; 1720 u32 status;
1642 1721
1643 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 1722 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
1644 if (status == 0 || status == ~0) /* hotplug or shared irq */ 1723 if (status == 0 || status == ~0)
1645 return IRQ_NONE; 1724 return IRQ_NONE;
1646 1725
1647 if (status & Y2_IS_HW_ERR) 1726 if (status & Y2_IS_HW_ERR)
1648 sky2_hw_intr(hw); 1727 sky2_hw_intr(hw);
1649 1728
1650 if ((status & Y2_IS_STAT_BMU) && netif_rx_schedule_prep(hw->dev[0])) { 1729 /* Do NAPI for Rx and Tx status */
1730 if ((status & Y2_IS_STAT_BMU) && netif_rx_schedule_test(hw->dev[0])) {
1731 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1732
1651 hw->intr_mask &= ~Y2_IS_STAT_BMU; 1733 hw->intr_mask &= ~Y2_IS_STAT_BMU;
1652 sky2_write32(hw, B0_IMSK, hw->intr_mask); 1734 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1653 __netif_rx_schedule(hw->dev[0]); 1735 __netif_rx_schedule(hw->dev[0]);
1654 } 1736 }
1655 1737
1656 if (status & Y2_IS_IRQ_PHY1) 1738 if (status & Y2_IS_IRQ_PHY1)
1657 sky2_phy_intr(hw, 0); 1739 sky2_phy_intr(hw, 0);
1658 1740
1659 if (status & Y2_IS_IRQ_PHY2) 1741 if (status & Y2_IS_IRQ_PHY2)
@@ -1665,8 +1747,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
1665 if (status & Y2_IS_IRQ_MAC2) 1747 if (status & Y2_IS_IRQ_MAC2)
1666 sky2_mac_intr(hw, 1); 1748 sky2_mac_intr(hw, 1);
1667 1749
1668
1669 sky2_write32(hw, B0_Y2_SP_ICR, 2); 1750 sky2_write32(hw, B0_Y2_SP_ICR, 2);
1751
1752 sky2_read32(hw, B0_IMSK);
1753
1670 return IRQ_HANDLED; 1754 return IRQ_HANDLED;
1671} 1755}
1672 1756
@@ -1675,21 +1759,19 @@ static void sky2_netpoll(struct net_device *dev)
1675{ 1759{
1676 struct sky2_port *sky2 = netdev_priv(dev); 1760 struct sky2_port *sky2 = netdev_priv(dev);
1677 1761
1678 disable_irq(dev->irq); 1762 sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
1679 sky2_intr(dev->irq, sky2->hw, NULL);
1680 enable_irq(dev->irq);
1681} 1763}
1682#endif 1764#endif
1683 1765
1684/* Chip internal frequency for clock calculations */ 1766/* Chip internal frequency for clock calculations */
1685static inline u32 sky2_khz(const struct sky2_hw *hw) 1767static inline u32 sky2_khz(const struct sky2_hw *hw)
1686{ 1768{
1687 switch(hw->chip_id) { 1769 switch (hw->chip_id) {
1688 case CHIP_ID_YUKON_EC: 1770 case CHIP_ID_YUKON_EC:
1689 return 125000; /* 125 Mhz */ 1771 return 125000; /* 125 Mhz */
1690 case CHIP_ID_YUKON_FE: 1772 case CHIP_ID_YUKON_FE:
1691 return 100000; /* 100 Mhz */ 1773 return 100000; /* 100 Mhz */
1692 default: /* YUKON_XL */ 1774 default: /* YUKON_XL */
1693 return 156000; /* 156 Mhz */ 1775 return 156000; /* 156 Mhz */
1694 } 1776 }
1695} 1777}
@@ -1701,7 +1783,7 @@ static inline u32 sky2_ms2clk(const struct sky2_hw *hw, u32 ms)
1701 1783
1702static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) 1784static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
1703{ 1785{
1704 return (sky2_khz(hw) * 75) / 1000; 1786 return (sky2_khz(hw) * us) / 1000;
1705} 1787}
1706 1788
1707static int sky2_reset(struct sky2_hw *hw) 1789static int sky2_reset(struct sky2_hw *hw)
@@ -1721,6 +1803,12 @@ static int sky2_reset(struct sky2_hw *hw)
1721 return -EOPNOTSUPP; 1803 return -EOPNOTSUPP;
1722 } 1804 }
1723 1805
1806 /* ring for status responses */
1807 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
1808 &hw->st_dma);
1809 if (!hw->st_le)
1810 return -ENOMEM;
1811
1724 /* disable ASF */ 1812 /* disable ASF */
1725 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 1813 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
1726 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1814 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -1732,17 +1820,19 @@ static int sky2_reset(struct sky2_hw *hw)
1732 sky2_write8(hw, B0_CTST, CS_RST_CLR); 1820 sky2_write8(hw, B0_CTST, CS_RST_CLR);
1733 1821
1734 /* clear PCI errors, if any */ 1822 /* clear PCI errors, if any */
1735 status = sky2_read16(hw, PCI_C(PCI_STATUS)); 1823 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
1736 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1824 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1737 sky2_write16(hw, PCI_C(PCI_STATUS), 1825 pci_write_config_word(hw->pdev, PCI_STATUS,
1738 status | PCI_STATUS_ERROR_BITS); 1826 status | PCI_STATUS_ERROR_BITS);
1739 1827
1740 sky2_write8(hw, B0_CTST, CS_MRST_CLR); 1828 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
1741 1829
1742 /* clear any PEX errors */ 1830 /* clear any PEX errors */
1743 if (is_pciex(hw)) { 1831 if (is_pciex(hw)) {
1744 sky2_write32(hw, PCI_C(PEX_UNC_ERR_STAT), 0xffffffffUL); 1832 u16 lstat;
1745 sky2_read16(hw, PCI_C(PEX_LNK_STAT)); 1833 pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
1834 0xffffffffUL);
1835 pci_read_config_word(hw->pdev, PEX_LNK_STAT, &lstat);
1746 } 1836 }
1747 1837
1748 pmd_type = sky2_read8(hw, B2_PMD_TYP); 1838 pmd_type = sky2_read8(hw, B2_PMD_TYP);
@@ -1769,20 +1859,20 @@ static int sky2_reset(struct sky2_hw *hw)
1769 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1859 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1770 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1860 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1771 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); 1861 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
1772 else 1862 else
1773 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 1863 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
1774 1864
1775 /* Turn off phy power saving */ 1865 /* Turn off phy power saving */
1776 power = sky2_read32(hw, PCI_C(PCI_DEV_REG1)); 1866 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &power);
1777 power &= ~(PCI_Y2_PHY1_POWD|PCI_Y2_PHY2_POWD); 1867 power &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1778 1868
1779 /* back asswards .. */ 1869 /* looks like this xl is back asswards .. */
1780 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) { 1870 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
1781 power |= PCI_Y2_PHY1_COMA; 1871 power |= PCI_Y2_PHY1_COMA;
1782 if (hw->ports > 1) 1872 if (hw->ports > 1)
1783 power |= PCI_Y2_PHY2_COMA; 1873 power |= PCI_Y2_PHY2_COMA;
1784 } 1874 }
1785 sky2_write32(hw, PCI_C(PCI_DEV_REG1), power); 1875 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, power);
1786 1876
1787 for (i = 0; i < hw->ports; i++) { 1877 for (i = 0; i < hw->ports; i++) {
1788 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 1878 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -1791,20 +1881,22 @@ static int sky2_reset(struct sky2_hw *hw)
1791 1881
1792 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1882 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1793 1883
1794 sky2_write32(hw, B2_I2C_IRQ, 1); /* Clear I2C IRQ noise */ 1884 /* Clear I2C IRQ noise */
1885 sky2_write32(hw, B2_I2C_IRQ, 1);
1795 1886
1796 /* turn off hardware timer (unused) */ 1887 /* turn off hardware timer (unused) */
1797 sky2_write8(hw, B2_TI_CTRL, TIM_STOP); 1888 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
1798 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); 1889 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
1799 1890
1800 sky2_write8(hw, B0_Y2LED, LED_STAT_ON); 1891 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
1801 1892
1802 /* Turn on descriptor polling -- is this necessary? */ 1893 /* Turn on descriptor polling (every 75us) */
1803 sky2_write32(hw, B28_DPT_INI, sky2_us2clk(hw, 75)); 1894 sky2_write32(hw, B28_DPT_INI, sky2_us2clk(hw, 75));
1804 sky2_write8(hw, B28_DPT_CTRL, DPT_START); 1895 sky2_write8(hw, B28_DPT_CTRL, DPT_START);
1805 1896
1806 /* Turn off receive timestamp */ 1897 /* Turn off receive timestamp */
1807 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1898 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1899 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1808 1900
1809 /* enable the Tx Arbiters */ 1901 /* enable the Tx Arbiters */
1810 for (i = 0; i < hw->ports; i++) 1902 for (i = 0; i < hw->ports; i++)
@@ -1812,7 +1904,7 @@ static int sky2_reset(struct sky2_hw *hw)
1812 1904
1813 /* Initialize ram interface */ 1905 /* Initialize ram interface */
1814 for (i = 0; i < hw->ports; i++) { 1906 for (i = 0; i < hw->ports; i++) {
1815 sky2_write16(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1907 sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1816 1908
1817 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); 1909 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
1818 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); 1910 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
@@ -1828,35 +1920,27 @@ static int sky2_reset(struct sky2_hw *hw)
1828 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); 1920 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
1829 } 1921 }
1830 1922
1831 /* Optimize PCI Express access */
1832 if (is_pciex(hw)) { 1923 if (is_pciex(hw)) {
1833 u16 ctrl = sky2_read32(hw, PCI_C(PEX_DEV_CTRL)); 1924 u16 pctrl;
1834 ctrl &= ~PEX_DC_MAX_RRS_MSK; 1925
1835 ctrl |= PEX_DC_MAX_RD_RQ_SIZE(4); 1926 /* change Max. Read Request Size to 2048 bytes */
1927 pci_read_config_word(hw->pdev, PEX_DEV_CTRL, &pctrl);
1928 pctrl &= ~PEX_DC_MAX_RRS_MSK;
1929 pctrl |= PEX_DC_MAX_RD_RQ_SIZE(4);
1930
1931
1836 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1932 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1837 sky2_write16(hw, PCI_C(PEX_DEV_CTRL), ctrl); 1933 pci_write_config_word(hw->pdev, PEX_DEV_CTRL, pctrl);
1838 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1934 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1839 } 1935 }
1840 1936
1841 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); 1937 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
1842 1938
1843 hw->intr_mask = Y2_IS_BASE;
1844 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1845
1846 /* disable all GMAC IRQ's */
1847 sky2_write8(hw, GMAC_IRQ_MSK, 0);
1848
1849 spin_lock_bh(&hw->phy_lock); 1939 spin_lock_bh(&hw->phy_lock);
1850 for (i = 0; i < hw->ports; i++) 1940 for (i = 0; i < hw->ports; i++)
1851 sky2_phy_reset(hw, i); 1941 sky2_phy_reset(hw, i);
1852 spin_unlock_bh(&hw->phy_lock); 1942 spin_unlock_bh(&hw->phy_lock);
1853 1943
1854 /* Setup ring for status responses */
1855 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
1856 &hw->st_dma);
1857 if (!hw->st_le)
1858 return -ENOMEM;
1859
1860 memset(hw->st_le, 0, STATUS_LE_BYTES); 1944 memset(hw->st_le, 0, STATUS_LE_BYTES);
1861 hw->st_idx = 0; 1945 hw->st_idx = 0;
1862 1946
@@ -1864,30 +1948,25 @@ static int sky2_reset(struct sky2_hw *hw)
1864 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); 1948 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
1865 1949
1866 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); 1950 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
1867 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64)hw->st_dma >> 32); 1951 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
1868 1952
1869 /* Set the list last index */ 1953 /* Set the list last index */
1870 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE-1); 1954 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
1871 1955
1956 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_ms2clk(hw, 10));
1957
1958 /* These status setup values are copied from SysKonnect's driver */
1872 if (is_ec_a1(hw)) { 1959 if (is_ec_a1(hw)) {
1873 /* WA for dev. #4.3 */ 1960 /* WA for dev. #4.3 */
1874 sky2_write16(hw, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1961 sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
1875 1962
1876 /* set Status-FIFO watermark */ 1963 /* set Status-FIFO watermark */
1877 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */ 1964 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
1878 1965
1879 /* set Status-FIFO ISR watermark */ 1966 /* set Status-FIFO ISR watermark */
1880 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07);/* WA for dev. #4.18 */ 1967 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */
1881 1968
1882 /* WA for dev. #4.3 and #4.18 */
1883 /* set Status-FIFO Tx timer init value */
1884 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_ms2clk(hw, 10));
1885 } else { 1969 } else {
1886 /*
1887 * Theses settings should avoid the
1888 * temporary hanging of the status BMU.
1889 * May be not all required... still under investigation...
1890 */
1891 sky2_write16(hw, STAT_TX_IDX_TH, 0x000a); 1970 sky2_write16(hw, STAT_TX_IDX_TH, 0x000a);
1892 1971
1893 /* set Status-FIFO watermark */ 1972 /* set Status-FIFO watermark */
@@ -1897,14 +1976,13 @@ static int sky2_reset(struct sky2_hw *hw)
1897 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) 1976 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
1898 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x10); 1977 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x10);
1899 1978
1900 else /* WA 4109 */ 1979 else /* WA 4109 */
1901 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x04); 1980 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x04);
1902 1981
1903 sky2_write32(hw, STAT_ISR_TIMER_INI, 0x0190); 1982 sky2_write32(hw, STAT_ISR_TIMER_INI, 0x0190);
1904 } 1983 }
1905 1984
1906 /* enable the prefetch unit */ 1985 /* enable status unit */
1907 /* operational bit not functional for Yukon-EC, but fixed in Yukon-2? */
1908 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); 1986 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
1909 1987
1910 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); 1988 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
@@ -1918,23 +1996,22 @@ static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
1918{ 1996{
1919 u32 modes; 1997 u32 modes;
1920 if (hw->copper) { 1998 if (hw->copper) {
1921 modes = SUPPORTED_10baseT_Half 1999 modes = SUPPORTED_10baseT_Half
1922 | SUPPORTED_10baseT_Full 2000 | SUPPORTED_10baseT_Full
1923 | SUPPORTED_100baseT_Half 2001 | SUPPORTED_100baseT_Half
1924 | SUPPORTED_100baseT_Full 2002 | SUPPORTED_100baseT_Full
1925 | SUPPORTED_Autoneg| SUPPORTED_TP; 2003 | SUPPORTED_Autoneg | SUPPORTED_TP;
1926 2004
1927 if (hw->chip_id != CHIP_ID_YUKON_FE) 2005 if (hw->chip_id != CHIP_ID_YUKON_FE)
1928 modes |= SUPPORTED_1000baseT_Half 2006 modes |= SUPPORTED_1000baseT_Half
1929 | SUPPORTED_1000baseT_Full; 2007 | SUPPORTED_1000baseT_Full;
1930 } else 2008 } else
1931 modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE 2009 modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1932 | SUPPORTED_Autoneg; 2010 | SUPPORTED_Autoneg;
1933 return modes; 2011 return modes;
1934} 2012}
1935 2013
1936static int sky2_get_settings(struct net_device *dev, 2014static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1937 struct ethtool_cmd *ecmd)
1938{ 2015{
1939 struct sky2_port *sky2 = netdev_priv(dev); 2016 struct sky2_port *sky2 = netdev_priv(dev);
1940 struct sky2_hw *hw = sky2->hw; 2017 struct sky2_hw *hw = sky2->hw;
@@ -1944,13 +2021,12 @@ static int sky2_get_settings(struct net_device *dev,
1944 ecmd->phy_address = PHY_ADDR_MARV; 2021 ecmd->phy_address = PHY_ADDR_MARV;
1945 if (hw->copper) { 2022 if (hw->copper) {
1946 ecmd->supported = SUPPORTED_10baseT_Half 2023 ecmd->supported = SUPPORTED_10baseT_Half
1947 2024 | SUPPORTED_10baseT_Full
1948 | SUPPORTED_10baseT_Full 2025 | SUPPORTED_100baseT_Half
1949 | SUPPORTED_100baseT_Half 2026 | SUPPORTED_100baseT_Full
1950 | SUPPORTED_100baseT_Full 2027 | SUPPORTED_1000baseT_Half
1951 | SUPPORTED_1000baseT_Half 2028 | SUPPORTED_1000baseT_Full
1952 | SUPPORTED_1000baseT_Full 2029 | SUPPORTED_Autoneg | SUPPORTED_TP;
1953 | SUPPORTED_Autoneg| SUPPORTED_TP;
1954 ecmd->port = PORT_TP; 2030 ecmd->port = PORT_TP;
1955 } else 2031 } else
1956 ecmd->port = PORT_FIBRE; 2032 ecmd->port = PORT_FIBRE;
@@ -1975,7 +2051,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1975 } else { 2051 } else {
1976 u32 setting; 2052 u32 setting;
1977 2053
1978 switch(ecmd->speed) { 2054 switch (ecmd->speed) {
1979 case SPEED_1000: 2055 case SPEED_1000:
1980 if (ecmd->duplex == DUPLEX_FULL) 2056 if (ecmd->duplex == DUPLEX_FULL)
1981 setting = SUPPORTED_1000baseT_Full; 2057 setting = SUPPORTED_1000baseT_Full;
@@ -2035,8 +2111,8 @@ static void sky2_get_drvinfo(struct net_device *dev,
2035} 2111}
2036 2112
2037static const struct sky2_stat { 2113static const struct sky2_stat {
2038 char name[ETH_GSTRING_LEN]; 2114 char name[ETH_GSTRING_LEN];
2039 u16 offset; 2115 u16 offset;
2040} sky2_stats[] = { 2116} sky2_stats[] = {
2041 { "tx_bytes", GM_TXO_OK_HI }, 2117 { "tx_bytes", GM_TXO_OK_HI },
2042 { "rx_bytes", GM_RXO_OK_HI }, 2118 { "rx_bytes", GM_RXO_OK_HI },
@@ -2061,7 +2137,6 @@ static const struct sky2_stat {
2061 { "rx_fcs_error", GM_RXF_FCS_ERR }, 2137 { "rx_fcs_error", GM_RXF_FCS_ERR },
2062}; 2138};
2063 2139
2064
2065static u32 sky2_get_rx_csum(struct net_device *dev) 2140static u32 sky2_get_rx_csum(struct net_device *dev)
2066{ 2141{
2067 struct sky2_port *sky2 = netdev_priv(dev); 2142 struct sky2_port *sky2 = netdev_priv(dev);
@@ -2074,6 +2149,7 @@ static int sky2_set_rx_csum(struct net_device *dev, u32 data)
2074 struct sky2_port *sky2 = netdev_priv(dev); 2149 struct sky2_port *sky2 = netdev_priv(dev);
2075 2150
2076 sky2->rx_csum = data; 2151 sky2->rx_csum = data;
2152
2077 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 2153 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2078 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 2154 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
2079 2155
@@ -2086,22 +2162,21 @@ static u32 sky2_get_msglevel(struct net_device *netdev)
2086 return sky2->msg_enable; 2162 return sky2->msg_enable;
2087} 2163}
2088 2164
2089static void sky2_phy_stats(struct sky2_port *sky2, u64 *data) 2165static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
2090{ 2166{
2091 struct sky2_hw *hw = sky2->hw; 2167 struct sky2_hw *hw = sky2->hw;
2092 unsigned port = sky2->port; 2168 unsigned port = sky2->port;
2093 int i; 2169 int i;
2094 2170
2095 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 2171 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2096 | (u64) gma_read32(hw, port, GM_TXO_OK_LO); 2172 | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2097 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 2173 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2098 | (u64) gma_read32(hw, port, GM_RXO_OK_LO); 2174 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
2099 2175
2100 for (i = 2; i < ARRAY_SIZE(sky2_stats); i++) 2176 for (i = 2; i < count; i++)
2101 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset); 2177 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
2102} 2178}
2103 2179
2104
2105static void sky2_set_msglevel(struct net_device *netdev, u32 value) 2180static void sky2_set_msglevel(struct net_device *netdev, u32 value)
2106{ 2181{
2107 struct sky2_port *sky2 = netdev_priv(netdev); 2182 struct sky2_port *sky2 = netdev_priv(netdev);
@@ -2114,14 +2189,14 @@ static int sky2_get_stats_count(struct net_device *dev)
2114} 2189}
2115 2190
2116static void sky2_get_ethtool_stats(struct net_device *dev, 2191static void sky2_get_ethtool_stats(struct net_device *dev,
2117 struct ethtool_stats *stats, u64 *data) 2192 struct ethtool_stats *stats, u64 * data)
2118{ 2193{
2119 struct sky2_port *sky2 = netdev_priv(dev); 2194 struct sky2_port *sky2 = netdev_priv(dev);
2120 2195
2121 sky2_phy_stats(sky2, data); 2196 sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
2122} 2197}
2123 2198
2124static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2199static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2125{ 2200{
2126 int i; 2201 int i;
2127 2202
@@ -2141,9 +2216,9 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2141static struct net_device_stats *sky2_get_stats(struct net_device *dev) 2216static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2142{ 2217{
2143 struct sky2_port *sky2 = netdev_priv(dev); 2218 struct sky2_port *sky2 = netdev_priv(dev);
2144 u64 data[ARRAY_SIZE(sky2_stats)]; 2219 u64 data[13];
2145 2220
2146 sky2_phy_stats(sky2, data); 2221 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2147 2222
2148 sky2->net_stats.tx_bytes = data[0]; 2223 sky2->net_stats.tx_bytes = data[0];
2149 sky2->net_stats.rx_bytes = data[1]; 2224 sky2->net_stats.rx_bytes = data[1];
@@ -2167,9 +2242,9 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
2167 2242
2168 sky2_down(dev); 2243 sky2_down(dev);
2169 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 2244 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2170 memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port*8, 2245 memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port * 8,
2171 dev->dev_addr, ETH_ALEN); 2246 dev->dev_addr, ETH_ALEN);
2172 memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port*8, 2247 memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port * 8,
2173 dev->dev_addr, ETH_ALEN); 2248 dev->dev_addr, ETH_ALEN);
2174 if (dev->flags & IFF_UP) 2249 if (dev->flags & IFF_UP)
2175 err = sky2_up(dev); 2250 err = sky2_up(dev);
@@ -2190,11 +2265,11 @@ static void sky2_set_multicast(struct net_device *dev)
2190 reg = gma_read16(hw, port, GM_RX_CTRL); 2265 reg = gma_read16(hw, port, GM_RX_CTRL);
2191 reg |= GM_RXCR_UCF_ENA; 2266 reg |= GM_RXCR_UCF_ENA;
2192 2267
2193 if (dev->flags & IFF_PROMISC) /* promiscious */ 2268 if (dev->flags & IFF_PROMISC) /* promiscious */
2194 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2269 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2195 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ 2270 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 16) /* all multicast */
2196 memset(filter, 0xff, sizeof(filter)); 2271 memset(filter, 0xff, sizeof(filter));
2197 else if (dev->mc_count == 0) /* no multicast */ 2272 else if (dev->mc_count == 0) /* no multicast */
2198 reg &= ~GM_RXCR_MCF_ENA; 2273 reg &= ~GM_RXCR_MCF_ENA;
2199 else { 2274 else {
2200 int i; 2275 int i;
@@ -2202,19 +2277,18 @@ static void sky2_set_multicast(struct net_device *dev)
2202 2277
2203 for (i = 0; list && i < dev->mc_count; i++, list = list->next) { 2278 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2204 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; 2279 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2205 filter[bit/8] |= 1 << (bit%8); 2280 filter[bit / 8] |= 1 << (bit % 8);
2206 } 2281 }
2207 } 2282 }
2208 2283
2209
2210 gma_write16(hw, port, GM_MC_ADDR_H1, 2284 gma_write16(hw, port, GM_MC_ADDR_H1,
2211 (u16)filter[0] | ((u16)filter[1] << 8)); 2285 (u16) filter[0] | ((u16) filter[1] << 8));
2212 gma_write16(hw, port, GM_MC_ADDR_H2, 2286 gma_write16(hw, port, GM_MC_ADDR_H2,
2213 (u16)filter[2] | ((u16)filter[3] << 8)); 2287 (u16) filter[2] | ((u16) filter[3] << 8));
2214 gma_write16(hw, port, GM_MC_ADDR_H3, 2288 gma_write16(hw, port, GM_MC_ADDR_H3,
2215 (u16)filter[4] | ((u16)filter[5] << 8)); 2289 (u16) filter[4] | ((u16) filter[5] << 8));
2216 gma_write16(hw, port, GM_MC_ADDR_H4, 2290 gma_write16(hw, port, GM_MC_ADDR_H4,
2217 (u16)filter[6] | ((u16)filter[7] << 8)); 2291 (u16) filter[6] | ((u16) filter[7] << 8));
2218 2292
2219 gma_write16(hw, port, GM_RX_CTRL, reg); 2293 gma_write16(hw, port, GM_RX_CTRL, reg);
2220} 2294}
@@ -2224,24 +2298,38 @@ static void sky2_set_multicast(struct net_device *dev)
2224 */ 2298 */
2225static inline void sky2_led(struct sky2_hw *hw, unsigned port, int on) 2299static inline void sky2_led(struct sky2_hw *hw, unsigned port, int on)
2226{ 2300{
2301 u16 pg;
2302
2227 spin_lock_bh(&hw->phy_lock); 2303 spin_lock_bh(&hw->phy_lock);
2228 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 2304 switch (hw->chip_id) {
2229 if (on) 2305 case CHIP_ID_YUKON_XL:
2306 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2307 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2308 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
2309 on ? (PHY_M_LEDC_LOS_CTRL(1) |
2310 PHY_M_LEDC_INIT_CTRL(7) |
2311 PHY_M_LEDC_STA1_CTRL(7) |
2312 PHY_M_LEDC_STA0_CTRL(7))
2313 : 0);
2314
2315 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2316 break;
2317
2318 default:
2319 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
2230 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 2320 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
2231 PHY_M_LED_MO_DUP(MO_LED_ON) | 2321 on ? PHY_M_LED_MO_DUP(MO_LED_ON) |
2232 PHY_M_LED_MO_10(MO_LED_ON) | 2322 PHY_M_LED_MO_10(MO_LED_ON) |
2233 PHY_M_LED_MO_100(MO_LED_ON) | 2323 PHY_M_LED_MO_100(MO_LED_ON) |
2234 PHY_M_LED_MO_1000(MO_LED_ON) | 2324 PHY_M_LED_MO_1000(MO_LED_ON) |
2235 PHY_M_LED_MO_RX(MO_LED_ON)); 2325 PHY_M_LED_MO_RX(MO_LED_ON)
2236 else 2326 : PHY_M_LED_MO_DUP(MO_LED_OFF) |
2237 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 2327 PHY_M_LED_MO_10(MO_LED_OFF) |
2238 2328 PHY_M_LED_MO_100(MO_LED_OFF) |
2239 PHY_M_LED_MO_DUP(MO_LED_OFF) |
2240 PHY_M_LED_MO_10(MO_LED_OFF) |
2241 PHY_M_LED_MO_100(MO_LED_OFF) |
2242 PHY_M_LED_MO_1000(MO_LED_OFF) | 2329 PHY_M_LED_MO_1000(MO_LED_OFF) |
2243 PHY_M_LED_MO_RX(MO_LED_OFF)); 2330 PHY_M_LED_MO_RX(MO_LED_OFF));
2244 2331
2332 }
2245 spin_unlock_bh(&hw->phy_lock); 2333 spin_unlock_bh(&hw->phy_lock);
2246} 2334}
2247 2335
@@ -2251,19 +2339,26 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2251 struct sky2_port *sky2 = netdev_priv(dev); 2339 struct sky2_port *sky2 = netdev_priv(dev);
2252 struct sky2_hw *hw = sky2->hw; 2340 struct sky2_hw *hw = sky2->hw;
2253 unsigned port = sky2->port; 2341 unsigned port = sky2->port;
2254 u16 ledctrl, ledover; 2342 u16 ledctrl, ledover = 0;
2255 long ms; 2343 long ms;
2256 int onoff = 1; 2344 int onoff = 1;
2257 2345
2258 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 2346 if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
2259 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT); 2347 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
2260 else 2348 else
2261 ms = data * 1000; 2349 ms = data * 1000;
2262 2350
2263 /* save initial values */ 2351 /* save initial values */
2264 spin_lock_bh(&hw->phy_lock); 2352 spin_lock_bh(&hw->phy_lock);
2265 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL); 2353 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2266 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER); 2354 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2355 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2356 ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
2357 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2358 } else {
2359 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
2360 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
2361 }
2267 spin_unlock_bh(&hw->phy_lock); 2362 spin_unlock_bh(&hw->phy_lock);
2268 2363
2269 while (ms > 0) { 2364 while (ms > 0) {
@@ -2277,8 +2372,15 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
2277 2372
2278 /* resume regularly scheduled programming */ 2373 /* resume regularly scheduled programming */
2279 spin_lock_bh(&hw->phy_lock); 2374 spin_lock_bh(&hw->phy_lock);
2280 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 2375 if (hw->chip_id == CHIP_ID_YUKON_XL) {
2281 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 2376 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
2377 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
2378 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
2379 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
2380 } else {
2381 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2382 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2383 }
2282 spin_unlock_bh(&hw->phy_lock); 2384 spin_unlock_bh(&hw->phy_lock);
2283 2385
2284 return 0; 2386 return 0;
@@ -2344,30 +2446,102 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2344} 2446}
2345#endif 2447#endif
2346 2448
2449static void sky2_get_ringparam(struct net_device *dev,
2450 struct ethtool_ringparam *ering)
2451{
2452 struct sky2_port *sky2 = netdev_priv(dev);
2453
2454 ering->rx_max_pending = RX_MAX_PENDING;
2455 ering->rx_mini_max_pending = 0;
2456 ering->rx_jumbo_max_pending = 0;
2457 ering->tx_max_pending = TX_RING_SIZE - 1;
2458
2459 ering->rx_pending = sky2->rx_pending;
2460 ering->rx_mini_pending = 0;
2461 ering->rx_jumbo_pending = 0;
2462 ering->tx_pending = sky2->tx_pending;
2463}
2464
2465static int sky2_set_ringparam(struct net_device *dev,
2466 struct ethtool_ringparam *ering)
2467{
2468 struct sky2_port *sky2 = netdev_priv(dev);
2469 int err = 0;
2470
2471 if (ering->rx_pending > RX_MAX_PENDING ||
2472 ering->rx_pending < 8 ||
2473 ering->tx_pending < MAX_SKB_TX_LE ||
2474 ering->tx_pending > TX_RING_SIZE - 1)
2475 return -EINVAL;
2476
2477 if (netif_running(dev))
2478 sky2_down(dev);
2479
2480 sky2->rx_pending = ering->rx_pending;
2481 sky2->tx_pending = ering->tx_pending;
2482
2483 if (netif_running(dev))
2484 err = sky2_up(dev);
2485
2486 return err;
2487}
2488
2489#define SKY2_REGS_LEN 0x1000
2490static int sky2_get_regs_len(struct net_device *dev)
2491{
2492 return SKY2_REGS_LEN;
2493}
2494
2495/*
2496 * Returns copy of control register region
2497 * I/O region is divided into banks and certain regions are unreadable
2498 */
2499static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2500 void *p)
2501{
2502 const struct sky2_port *sky2 = netdev_priv(dev);
2503 unsigned long offs;
2504 const void __iomem *io = sky2->hw->regs;
2505 static const unsigned long bankmap = 0xfff3f305;
2506
2507 regs->version = 1;
2508 for (offs = 0; offs < regs->len; offs += 128) {
2509 u32 len = min_t(u32, 128, regs->len - offs);
2510
2511 if (bankmap & (1 << (offs / 128)))
2512 memcpy_fromio(p + offs, io + offs, len);
2513 else
2514 memset(p + offs, 0, len);
2515 }
2516}
2347 2517
2348static struct ethtool_ops sky2_ethtool_ops = { 2518static struct ethtool_ops sky2_ethtool_ops = {
2349 .get_settings = sky2_get_settings, 2519 .get_settings = sky2_get_settings,
2350 .set_settings = sky2_set_settings, 2520 .set_settings = sky2_set_settings,
2351 .get_drvinfo = sky2_get_drvinfo, 2521 .get_drvinfo = sky2_get_drvinfo,
2352 .get_msglevel = sky2_get_msglevel, 2522 .get_msglevel = sky2_get_msglevel,
2353 .set_msglevel = sky2_set_msglevel, 2523 .set_msglevel = sky2_set_msglevel,
2354 .get_link = ethtool_op_get_link, 2524 .get_regs_len = sky2_get_regs_len,
2355 .get_sg = ethtool_op_get_sg, 2525 .get_regs = sky2_get_regs,
2356 .set_sg = ethtool_op_set_sg, 2526 .get_link = ethtool_op_get_link,
2357 .get_tx_csum = ethtool_op_get_tx_csum, 2527 .get_sg = ethtool_op_get_sg,
2358 .set_tx_csum = ethtool_op_set_tx_csum, 2528 .set_sg = ethtool_op_set_sg,
2359 .get_tso = ethtool_op_get_tso, 2529 .get_tx_csum = ethtool_op_get_tx_csum,
2360 .set_tso = ethtool_op_set_tso, 2530 .set_tx_csum = ethtool_op_set_tx_csum,
2361 .get_rx_csum = sky2_get_rx_csum, 2531 .get_tso = ethtool_op_get_tso,
2362 .set_rx_csum = sky2_set_rx_csum, 2532 .set_tso = ethtool_op_set_tso,
2363 .get_strings = sky2_get_strings, 2533 .get_rx_csum = sky2_get_rx_csum,
2534 .set_rx_csum = sky2_set_rx_csum,
2535 .get_strings = sky2_get_strings,
2536 .get_ringparam = sky2_get_ringparam,
2537 .set_ringparam = sky2_set_ringparam,
2364 .get_pauseparam = sky2_get_pauseparam, 2538 .get_pauseparam = sky2_get_pauseparam,
2365 .set_pauseparam = sky2_set_pauseparam, 2539 .set_pauseparam = sky2_set_pauseparam,
2366#ifdef CONFIG_PM 2540#ifdef CONFIG_PM
2367 .get_wol = sky2_get_wol, 2541 .get_wol = sky2_get_wol,
2368 .set_wol = sky2_set_wol, 2542 .set_wol = sky2_set_wol,
2369#endif 2543#endif
2370 .phys_id = sky2_phys_id, 2544 .phys_id = sky2_phys_id,
2371 .get_stats_count = sky2_get_stats_count, 2545 .get_stats_count = sky2_get_stats_count,
2372 .get_ethtool_stats = sky2_get_ethtool_stats, 2546 .get_ethtool_stats = sky2_get_ethtool_stats,
2373}; 2547};
@@ -2402,7 +2576,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2402#ifdef CONFIG_NET_POLL_CONTROLLER 2576#ifdef CONFIG_NET_POLL_CONTROLLER
2403 dev->poll_controller = sky2_netpoll; 2577 dev->poll_controller = sky2_netpoll;
2404#endif 2578#endif
2405 dev->irq = hw->pdev->irq;
2406 2579
2407 sky2 = netdev_priv(dev); 2580 sky2 = netdev_priv(dev);
2408 sky2->netdev = dev; 2581 sky2->netdev = dev;
@@ -2418,20 +2591,21 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2418 sky2->speed = -1; 2591 sky2->speed = -1;
2419 sky2->advertising = sky2_supported_modes(hw); 2592 sky2->advertising = sky2_supported_modes(hw);
2420 sky2->rx_csum = 1; 2593 sky2->rx_csum = 1;
2421 sky2->rx_ring_size = is_ec_a1(hw) ? MIN_RX_BUFFERS : MAX_RX_BUFFERS; 2594 tasklet_init(&sky2->phy_task, sky2_phy_task, (unsigned long)sky2);
2422 tasklet_init(&sky2->phy_task, sky2_phy_task, (unsigned long) sky2); 2595 sky2->tx_pending = TX_DEF_PENDING;
2596 sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING;
2423 2597
2424 hw->dev[port] = dev; 2598 hw->dev[port] = dev;
2425 2599
2426 sky2->port = port; 2600 sky2->port = port;
2427 2601
2428 dev->features |= NETIF_F_LLTX; 2602 dev->features |= NETIF_F_LLTX | NETIF_F_TSO;
2429 if (highmem) 2603 if (highmem)
2430 dev->features |= NETIF_F_HIGHDMA; 2604 dev->features |= NETIF_F_HIGHDMA;
2431 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; 2605 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2432 2606
2433 /* read the mac address */ 2607 /* read the mac address */
2434 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 2608 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
2435 2609
2436 /* device is off until link detection */ 2610 /* device is off until link detection */
2437 netif_carrier_off(dev); 2611 netif_carrier_off(dev);
@@ -2454,20 +2628,22 @@ static inline void sky2_show_addr(struct net_device *dev)
2454static int __devinit sky2_probe(struct pci_dev *pdev, 2628static int __devinit sky2_probe(struct pci_dev *pdev,
2455 const struct pci_device_id *ent) 2629 const struct pci_device_id *ent)
2456{ 2630{
2457 struct net_device *dev, *dev1; 2631 struct net_device *dev, *dev1 = NULL;
2458 struct sky2_hw *hw; 2632 struct sky2_hw *hw;
2459 int err, using_dac = 0; 2633 int err, using_dac = 0;
2460 2634
2461 if ((err = pci_enable_device(pdev))) { 2635 err = pci_enable_device(pdev);
2636 if (err) {
2462 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 2637 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
2463 pci_name(pdev)); 2638 pci_name(pdev));
2464 goto err_out; 2639 goto err_out;
2465 } 2640 }
2466 2641
2467 if ((err = pci_request_regions(pdev, DRV_NAME))) { 2642 err = pci_request_regions(pdev, DRV_NAME);
2643 if (err) {
2468 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 2644 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
2469 pci_name(pdev)); 2645 pci_name(pdev));
2470 goto err_out_disable_pdev; 2646 goto err_out;
2471 } 2647 }
2472 2648
2473 pci_set_master(pdev); 2649 pci_set_master(pdev);
@@ -2486,7 +2662,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
2486 goto err_out_free_regions; 2662 goto err_out_free_regions;
2487 } 2663 }
2488 } 2664 }
2489
2490#ifdef __BIG_ENDIAN 2665#ifdef __BIG_ENDIAN
2491 /* byte swap decriptors in hardware */ 2666 /* byte swap decriptors in hardware */
2492 { 2667 {
@@ -2517,26 +2692,21 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
2517 goto err_out_free_hw; 2692 goto err_out_free_hw;
2518 } 2693 }
2519 2694
2520 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
2521 if (err) {
2522 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
2523 pci_name(pdev), pdev->irq);
2524 goto err_out_iounmap;
2525 }
2526 pci_set_drvdata(pdev, hw);
2527
2528 err = sky2_reset(hw); 2695 err = sky2_reset(hw);
2529 if (err) 2696 if (err)
2530 goto err_out_free_irq; 2697 goto err_out_iounmap;
2531 2698
2532 printk(KERN_INFO PFX "addr 0x%lx irq %d chip 0x%x (%s) rev %d\n", 2699 printk(KERN_INFO PFX "addr 0x%lx irq %d Yukon-%s (0x%x) rev %d\n",
2533 pci_resource_start(pdev, 0), pdev->irq, 2700 pci_resource_start(pdev, 0), pdev->irq,
2534 hw->chip_id, chip_name(hw->chip_id), hw->chip_rev); 2701 yukon_name[hw->chip_id - CHIP_ID_YUKON],
2702 hw->chip_id, hw->chip_rev);
2535 2703
2536 if ((dev = sky2_init_netdev(hw, 0, using_dac)) == NULL) 2704 dev = sky2_init_netdev(hw, 0, using_dac);
2705 if (!dev)
2537 goto err_out_free_pci; 2706 goto err_out_free_pci;
2538 2707
2539 if ((err = register_netdev(dev))) { 2708 err = register_netdev(dev);
2709 if (err) {
2540 printk(KERN_ERR PFX "%s: cannot register net device\n", 2710 printk(KERN_ERR PFX "%s: cannot register net device\n",
2541 pci_name(pdev)); 2711 pci_name(pdev));
2542 goto err_out_free_netdev; 2712 goto err_out_free_netdev;
@@ -2549,20 +2719,37 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
2549 sky2_show_addr(dev1); 2719 sky2_show_addr(dev1);
2550 else { 2720 else {
2551 /* Failure to register second port need not be fatal */ 2721 /* Failure to register second port need not be fatal */
2552 printk(KERN_WARNING PFX "register of second port failed\n"); 2722 printk(KERN_WARNING PFX
2723 "register of second port failed\n");
2553 hw->dev[1] = NULL; 2724 hw->dev[1] = NULL;
2554 free_netdev(dev1); 2725 free_netdev(dev1);
2555 } 2726 }
2556 } 2727 }
2557 2728
2729 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
2730 if (err) {
2731 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
2732 pci_name(pdev), pdev->irq);
2733 goto err_out_unregister;
2734 }
2735
2736 hw->intr_mask = Y2_IS_BASE;
2737 sky2_write32(hw, B0_IMSK, hw->intr_mask);
2738
2739 pci_set_drvdata(pdev, hw);
2740
2558 return 0; 2741 return 0;
2559 2742
2743err_out_unregister:
2744 if (dev1) {
2745 unregister_netdev(dev1);
2746 free_netdev(dev1);
2747 }
2748 unregister_netdev(dev);
2560err_out_free_netdev: 2749err_out_free_netdev:
2561 free_netdev(dev); 2750 free_netdev(dev);
2562
2563err_out_free_irq:
2564 free_irq(pdev->irq, hw);
2565err_out_free_pci: 2751err_out_free_pci:
2752 sky2_write8(hw, B0_CTST, CS_RST_SET);
2566 pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 2753 pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
2567err_out_iounmap: 2754err_out_iounmap:
2568 iounmap(hw->regs); 2755 iounmap(hw->regs);
@@ -2570,33 +2757,34 @@ err_out_free_hw:
2570 kfree(hw); 2757 kfree(hw);
2571err_out_free_regions: 2758err_out_free_regions:
2572 pci_release_regions(pdev); 2759 pci_release_regions(pdev);
2573err_out_disable_pdev:
2574 pci_disable_device(pdev); 2760 pci_disable_device(pdev);
2575 pci_set_drvdata(pdev, NULL);
2576err_out: 2761err_out:
2577 return err; 2762 return err;
2578} 2763}
2579 2764
2580static void __devexit sky2_remove(struct pci_dev *pdev) 2765static void __devexit sky2_remove(struct pci_dev *pdev)
2581{ 2766{
2582 struct sky2_hw *hw = pci_get_drvdata(pdev); 2767 struct sky2_hw *hw = pci_get_drvdata(pdev);
2583 struct net_device *dev0, *dev1; 2768 struct net_device *dev0, *dev1;
2584 2769
2585 if(!hw) 2770 if (!hw)
2586 return; 2771 return;
2587 2772
2588 if ((dev1 = hw->dev[1]))
2589 unregister_netdev(dev1);
2590 dev0 = hw->dev[0]; 2773 dev0 = hw->dev[0];
2774 dev1 = hw->dev[1];
2775 if (dev1)
2776 unregister_netdev(dev1);
2591 unregister_netdev(dev0); 2777 unregister_netdev(dev0);
2592 2778
2779 sky2_write32(hw, B0_IMSK, 0);
2593 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 2780 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
2781 sky2_write8(hw, B0_CTST, CS_RST_SET);
2594 2782
2595 free_irq(pdev->irq, hw); 2783 free_irq(pdev->irq, hw);
2596 pci_free_consistent(pdev, STATUS_LE_BYTES, 2784 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
2597 hw->st_le, hw->st_dma);
2598 pci_release_regions(pdev); 2785 pci_release_regions(pdev);
2599 pci_disable_device(pdev); 2786 pci_disable_device(pdev);
2787
2600 if (dev1) 2788 if (dev1)
2601 free_netdev(dev1); 2789 free_netdev(dev1);
2602 free_netdev(dev0); 2790 free_netdev(dev0);
@@ -2608,7 +2796,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
2608#ifdef CONFIG_PM 2796#ifdef CONFIG_PM
2609static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) 2797static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
2610{ 2798{
2611 struct sky2_hw *hw = pci_get_drvdata(pdev); 2799 struct sky2_hw *hw = pci_get_drvdata(pdev);
2612 int i, wol = 0; 2800 int i, wol = 0;
2613 2801
2614 for (i = 0; i < 2; i++) { 2802 for (i = 0; i < 2; i++) {
@@ -2635,7 +2823,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
2635 2823
2636static int sky2_resume(struct pci_dev *pdev) 2824static int sky2_resume(struct pci_dev *pdev)
2637{ 2825{
2638 struct sky2_hw *hw = pci_get_drvdata(pdev); 2826 struct sky2_hw *hw = pci_get_drvdata(pdev);
2639 int i; 2827 int i;
2640 2828
2641 pci_set_power_state(pdev, PCI_D0); 2829 pci_set_power_state(pdev, PCI_D0);
@@ -2657,19 +2845,18 @@ static int sky2_resume(struct pci_dev *pdev)
2657#endif 2845#endif
2658 2846
2659static struct pci_driver sky2_driver = { 2847static struct pci_driver sky2_driver = {
2660 .name = DRV_NAME, 2848 .name = DRV_NAME,
2661 .id_table = sky2_id_table, 2849 .id_table = sky2_id_table,
2662 .probe = sky2_probe, 2850 .probe = sky2_probe,
2663 .remove = __devexit_p(sky2_remove), 2851 .remove = __devexit_p(sky2_remove),
2664#ifdef CONFIG_PM 2852#ifdef CONFIG_PM
2665 .suspend = sky2_suspend, 2853 .suspend = sky2_suspend,
2666 .resume = sky2_resume, 2854 .resume = sky2_resume,
2667#endif 2855#endif
2668}; 2856};
2669 2857
2670static int __init sky2_init_module(void) 2858static int __init sky2_init_module(void)
2671{ 2859{
2672
2673 return pci_module_init(&sky2_driver); 2860 return pci_module_init(&sky2_driver);
2674} 2861}
2675 2862
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index d2a0ac2c53e7..9256303acf76 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -209,13 +209,9 @@ enum csr_regs {
209 Y2_CFG_SPC = 0x1c00, 209 Y2_CFG_SPC = 0x1c00,
210}; 210};
211 211
212/* Access pci config through board I/O */
213#define PCI_C(x) (Y2_CFG_SPC + (x))
214
215
216/* B0_CTST 16 bit Control/Status register */ 212/* B0_CTST 16 bit Control/Status register */
217enum { 213enum {
218 Y2_VMAIN_AVAIL = 1<<17, /* VMAIN available (YUKON-2 only) */ 214 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
219 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ 215 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
220 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ 216 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
221 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ 217 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
@@ -234,13 +230,17 @@ enum {
234 CS_MRST_SET = 1<<2, /* Set Master reset */ 230 CS_MRST_SET = 1<<2, /* Set Master reset */
235 CS_RST_CLR = 1<<1, /* Clear Software reset */ 231 CS_RST_CLR = 1<<1, /* Clear Software reset */
236 CS_RST_SET = 1, /* Set Software reset */ 232 CS_RST_SET = 1, /* Set Software reset */
233};
237 234
238/* B0_LED 8 Bit LED register */ 235/* B0_LED 8 Bit LED register */
236enum {
239/* Bit 7.. 2: reserved */ 237/* Bit 7.. 2: reserved */
240 LED_STAT_ON = 1<<1, /* Status LED on */ 238 LED_STAT_ON = 1<<1, /* Status LED on */
241 LED_STAT_OFF = 1, /* Status LED off */ 239 LED_STAT_OFF = 1, /* Status LED off */
240};
242 241
243/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ 242/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
243enum {
244 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ 244 PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
245 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ 245 PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
246 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ 246 PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
@@ -336,7 +336,7 @@ enum {
336 Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 | 336 Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
337 Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2, 337 Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
338 338
339 Y2_HWE_ALL_MASK = Y2_IS_SENSOR | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | 339 Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
340 Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP | 340 Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP |
341 Y2_HWE_L1_MASK | Y2_HWE_L2_MASK, 341 Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
342}; 342};
@@ -793,11 +793,6 @@ enum {
793 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */ 793 STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
794 STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */ 794 STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */
795 STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */ 795 STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */
796
797 ST_LAST_IDX_MASK = 0x007f,/* Last Index Mask */
798 ST_TXRP_IDX_MASK = 0x0fff,/* Tx Report Index Mask */
799 ST_TXTH_IDX_MASK = 0x0fff,/* Tx Threshold Index Mask */
800 ST_WM_IDX_MASK = 0x3f,/* FIFO Watermark Index Mask */
801}; 796};
802 797
803enum { 798enum {
@@ -836,6 +831,7 @@ enum {
836 831
837/* WOL Pattern Counter Registers (YUKON only) */ 832/* WOL Pattern Counter Registers (YUKON only) */
838 833
834
839 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ 835 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
840 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ 836 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
841}; 837};
@@ -1536,34 +1532,34 @@ enum {
1536/* Receive Frame Status Encoding */ 1532/* Receive Frame Status Encoding */
1537enum { 1533enum {
1538 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ 1534 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1539 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ 1535 GMR_FS_VLAN = 1<<13, /* VLAN Packet */
1540 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ 1536 GMR_FS_JABBER = 1<<12, /* Jabber Packet */
1541 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ 1537 GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */
1542 GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ 1538 GMR_FS_MC = 1<<10, /* Multicast Packet */
1543 GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ 1539 GMR_FS_BC = 1<<9, /* Broadcast Packet */
1544 GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ 1540 GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */
1545 GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ 1541 GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */
1546 GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ 1542 GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */
1547 GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ 1543 GMR_FS_MII_ERR = 1<<5, /* MII Error */
1548 GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ 1544 GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */
1549 GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ 1545 GMR_FS_FRAGMENT = 1<<3, /* Fragment */
1550 1546
1551 GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ 1547 GMR_FS_CRC_ERR = 1<<1, /* CRC Error */
1552 GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ 1548 GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */
1553 1549
1554/*
1555 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
1556 */
1557 GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR | 1550 GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
1558 GMR_FS_FRAGMENT | GMR_FS_LONG_ERR | 1551 GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
1559 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | 1552 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
1560 GMR_FS_UN_SIZE | GMR_FS_JABBER, 1553 GMR_FS_UN_SIZE | GMR_FS_JABBER,
1561/* Rx GMAC FIFO Flush Mask (default) */
1562 RX_FF_FL_DEF_MSK = GMR_FS_ANY_ERR,
1563}; 1554};
1564 1555
1565/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ 1556/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1566enum { 1557enum {
1558 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1559 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1560 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
1561 RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
1562
1567 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ 1563 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1568 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ 1564 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1569 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ 1565 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
@@ -1574,7 +1570,8 @@ enum {
1574 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ 1570 GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
1575 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ 1571 GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
1576 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ 1572 GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
1577 GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ 1573 GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */
1574
1578 GMF_OPER_ON = 1<<3, /* Operational Mode On */ 1575 GMF_OPER_ON = 1<<3, /* Operational Mode On */
1579 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ 1576 GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
1580 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ 1577 GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
@@ -1586,6 +1583,9 @@ enum {
1586 1583
1587/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ 1584/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1588enum { 1585enum {
1586 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
1587 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
1588
1589 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ 1589 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1590 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ 1590 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
1591 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ 1591 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
@@ -1679,8 +1679,7 @@ enum {
1679 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ 1679 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
1680 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 1680 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1681 1681
1682#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV |\ 1682#define GMAC_DEF_MSK (GM_IS_TX_FF_UR|GM_IS_RX_FF_OR)
1683 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
1684 1683
1685/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 1684/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1686 /* Bits 15.. 2: reserved */ 1685 /* Bits 15.. 2: reserved */
@@ -1761,9 +1760,6 @@ enum {
1761 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN, 1760 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
1762 OP_RSS_HASH = 0x65, 1761 OP_RSS_HASH = 0x65,
1763 OP_TXINDEXLE = 0x68, 1762 OP_TXINDEXLE = 0x68,
1764
1765/* YUKON-2 SPECIAL opcodes defines */
1766 OP_PUTIDX = 0x70,
1767}; 1763};
1768 1764
1769/* Yukon 2 hardware interface 1765/* Yukon 2 hardware interface
@@ -1775,62 +1771,60 @@ struct sky2_tx_le {
1775 struct { 1771 struct {
1776 u16 offset; 1772 u16 offset;
1777 u16 start; 1773 u16 start;
1778 } csum; 1774 } csum __attribute((packed));
1779 struct { 1775 struct {
1780 u16 size; 1776 u16 size;
1781 u16 rsvd; 1777 u16 rsvd;
1782 } tso; 1778 } tso __attribute((packed));
1783 } tx; 1779 } tx;
1784 u16 length; /* also vlan tag or checksum start */ 1780 u16 length; /* also vlan tag or checksum start */
1785 u8 ctrl; 1781 u8 ctrl;
1786 u8 opcode; 1782 u8 opcode;
1787}; 1783} __attribute((packed));
1788 1784
1789struct sky2_rx_le { 1785struct sky2_rx_le {
1790 union { 1786 u32 addr;
1791 u32 addr;
1792 struct {
1793 u16 start1;
1794 u16 start2;
1795 } csum;
1796 } rx;
1797 u16 length; 1787 u16 length;
1798 u8 ctrl; 1788 u8 ctrl;
1799 u8 opcode; 1789 u8 opcode;
1800}; 1790} __attribute((packed));;
1801 1791
1802struct sky2_status_le { 1792struct sky2_status_le {
1803 u32 status; /* also checksum */ 1793 u32 status; /* also checksum */
1804 u16 length; /* also vlan tag */ 1794 u16 length; /* also vlan tag */
1805 u8 link; 1795 u8 link;
1806 u8 opcode; 1796 u8 opcode;
1807}; 1797} __attribute((packed));
1808
1809 1798
1810struct ring_info { 1799struct ring_info {
1811 struct sk_buff *skb; 1800 struct sk_buff *skb;
1812 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1801 dma_addr_t mapaddr;
1813 DECLARE_PCI_UNMAP_LEN(maplen); 1802 u16 maplen;
1803 u16 idx;
1814}; 1804};
1815 1805
1816struct sky2_port { 1806struct sky2_port {
1817 struct sky2_hw *hw ____cacheline_aligned; 1807 struct sky2_hw *hw;
1818 struct net_device *netdev; 1808 struct net_device *netdev;
1819 unsigned port; 1809 unsigned port;
1820 u32 msg_enable; 1810 u32 msg_enable;
1821 1811
1822 struct ring_info *tx_ring ____cacheline_aligned; 1812 struct ring_info *tx_ring;
1823 struct sky2_tx_le *tx_le; 1813 struct sky2_tx_le *tx_le;
1824 spinlock_t tx_lock; 1814 spinlock_t tx_lock;
1815 u32 tx_addr64;
1825 u16 tx_cons; /* next le to check */ 1816 u16 tx_cons; /* next le to check */
1826 u16 tx_prod; /* next le to use */ 1817 u16 tx_prod; /* next le to use */
1818 u16 tx_pending;
1827 u16 tx_last_put; 1819 u16 tx_last_put;
1820 u16 tx_last_mss;
1828 1821
1829 struct ring_info *rx_ring ____cacheline_aligned; 1822 struct ring_info *rx_ring;
1830 struct sky2_rx_le *rx_le; 1823 struct sky2_rx_le *rx_le;
1831 u16 rx_ring_size; 1824 u32 rx_addr64;
1832 u16 rx_next; /* next re to check */ 1825 u16 rx_next; /* next re to check */
1833 u16 rx_put; /* next le index to use */ 1826 u16 rx_put; /* next le index to use */
1827 u16 rx_pending;
1834 u16 rx_last_put; 1828 u16 rx_last_put;
1835 1829
1836 dma_addr_t rx_le_map; 1830 dma_addr_t rx_le_map;
@@ -1882,12 +1876,14 @@ static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
1882 return readb(hw->regs + reg); 1876 return readb(hw->regs + reg);
1883} 1877}
1884 1878
1879/* This should probably go away, bus based tweeks suck */
1885static inline int is_pciex(const struct sky2_hw *hw) 1880static inline int is_pciex(const struct sky2_hw *hw)
1886{ 1881{
1887 return (sky2_read32(hw, PCI_C(PCI_DEV_STATUS)) & PCI_OS_PCI_X) == 0; 1882 u32 status;
1883 pci_read_config_dword(hw->pdev, PCI_DEV_STATUS, &status);
1884 return (status & PCI_OS_PCI_X) == 0;
1888} 1885}
1889 1886
1890
1891static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val) 1887static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
1892{ 1888{
1893 writel(val, hw->regs + reg); 1889 writel(val, hw->regs + reg);