aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sky2.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2005-08-16 19:36:49 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-08-17 00:53:12 -0400
commitcd28ab6a4e50a7601d22752aa7ce0c8197b10bdf (patch)
tree7030ba1742db21caebe491c9052f312e30c33628 /drivers/net/sky2.c
parent2ba84684e8cf6f980e4e95a2300f53a505eb794e (diff)
[PATCH] sky2: new experimental Marvell Yukon2 driver
New driver for the Marvell Yukon2 Gigabit Ethernet chipset. This driver is based on the skge driver, but using the logic from the SysKonnect version of the sk98lin driver. It should support all the Yukon2 chipsets that are available in many current Intel and AMD motherboards. The driver does support ethtool, tx and rx checksum, and tcp segmentation offload. But it has only been tested for a short while and is known to stop receiving under heavy load. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/sky2.c')
-rw-r--r--drivers/net/sky2.c2686
1 files changed, 2686 insertions, 0 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
new file mode 100644
index 000000000000..cfb2b41bc6b2
--- /dev/null
+++ b/drivers/net/sky2.c
@@ -0,0 +1,2686 @@
1/*
2 * New driver for Marvell Yukon 2 chipset.
3 * Based on earlier sk98lin, and skge driver.
4 *
5 * This driver intentionally does not support all the features
6 * of the original driver such as link fail-over and link management because
7 * those should be done at higher levels.
8 *
9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26/*
27 * TODO
28 * - coalescing setting?
29 * - variable ring size?
30 *
31 * TOTEST
32 * - speed setting
33 * - power management
34 */
35
36#include <linux/config.h>
37#include <linux/kernel.h>
38#include <linux/version.h>
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/ethtool.h>
43#include <linux/pci.h>
44#include <linux/ip.h>
45#include <linux/tcp.h>
46#include <linux/in.h>
47#include <linux/delay.h>
48#include <linux/crc32.h>
49
50#include <asm/irq.h>
51
52#include "sky2.h"
53
54#define DRV_NAME "sky2"
55#define DRV_VERSION "0.2"
56#define PFX DRV_NAME " "
57
58/*
59 * The Yukon II chipset takes 64 bit command blocks (called list elements)
60 * that are organized into three (receive, transmit, status) different rings
61 * similar to Tigon3. A transmit can require several elements;
62 * a receive requires one (or two if using 64 bit dma).
63 */
64
65#ifdef CONFIG_SKY2_EC_A1
66#define is_ec_a1(hw) \
67 ((hw)->chip_id == CHIP_ID_YUKON_EC && \
68 (hw)->chip_rev == CHIP_REV_YU_EC_A1)
69#else
70#define is_ec_a1(hw) 0
71#endif
72
73#define RX_LE_SIZE 256
74#define MIN_RX_BUFFERS 8
75#define MAX_RX_BUFFERS 124
76#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
77
78#define TX_RING_SIZE 256 // min 64 max 4096
79#define STATUS_RING_SIZE 1024 // pow2 > (2*Rx + Tx)
80#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
81#define ETH_JUMBO_MTU 9000
82#define TX_WATCHDOG (5 * HZ)
83#define NAPI_WEIGHT 64
84#define PHY_RETRIES 1000
85
86static const u32 default_msg =
87 NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
88 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
89 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
90
91static int debug = -1; /* defaults above */
92module_param(debug, int, 0);
93MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
94
95static const struct pci_device_id sky2_id_table[] = {
96 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
97 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
98 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
99 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
100 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
101 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
102 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
103 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
104 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
105 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
106 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
107 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
112 { 0 }
113};
114MODULE_DEVICE_TABLE(pci, sky2_id_table);
115
116/* Avoid conditionals by using array */
117static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
118static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
119
120static inline const char *chip_name(u8 chip_id)
121{
122 switch (chip_id) {
123 case CHIP_ID_GENESIS:
124 return "Genesis";
125 case CHIP_ID_YUKON:
126 return "Yukon";
127 case CHIP_ID_YUKON_LITE:
128 return "Yukon-Lite";
129 case CHIP_ID_YUKON_LP:
130 return "Yukon-LP";
131 case CHIP_ID_YUKON_XL:
132 return "Yukon-XL";
133 case CHIP_ID_YUKON_EC:
134 return "Yukon-EC";
135 case CHIP_ID_YUKON_FE:
136 return "Yukon-FE";
137 default:
138 return "???";
139 }
140}
141
142static void gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
143{
144 int i;
145
146 gma_write16(hw, port, GM_SMI_DATA, val);
147 gma_write16(hw, port, GM_SMI_CTRL,
148 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
149
150 for (i = 0; i < PHY_RETRIES; i++) {
151 udelay(1);
152
153 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
154 break;
155 }
156}
157
158static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
159{
160 int i;
161
162 gma_write16(hw, port, GM_SMI_CTRL,
163 GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
164 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
165
166 for (i = 0; i < PHY_RETRIES; i++) {
167 udelay(1);
168 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
169 goto ready;
170 }
171
172 printk(KERN_WARNING PFX "%s: phy read timeout\n",
173 hw->dev[port]->name);
174 ready:
175 return gma_read16(hw, port, GM_SMI_DATA);
176}
177
178static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
179{
180 u16 reg;
181
182 /* disable all GMAC IRQ's */
183 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
184 /* disable PHY IRQs */
185 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
186 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
187 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
188 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
189 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
190
191 reg = gma_read16(hw, port, GM_RX_CTRL);
192 reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
193 gma_write16(hw, port, GM_RX_CTRL, reg);
194}
195
196static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
197{
198 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
199 u16 ctrl, ct1000, adv;
200 u16 ledctrl, ledover;
201
202 pr_debug("phy reset autoneg=%s advertising=0x%x pause rx=%s tx=%s\n",
203 sky2->autoneg == AUTONEG_ENABLE ? "enable" : "disable",
204 sky2->advertising,
205 sky2->rx_pause ? "on" : "off",
206 sky2->tx_pause ? "on" : "off");
207
208 if (sky2->autoneg == AUTONEG_ENABLE &&
209 hw->chip_id != CHIP_ID_YUKON_XL) {
210 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
211
212 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
213 PHY_M_EC_MAC_S_MSK);
214 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
215
216 if (hw->chip_id == CHIP_ID_YUKON_EC)
217 ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
218 else
219 ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
220
221 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
222 }
223
224 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
225 if (hw->copper) {
226 if (hw->chip_id == CHIP_ID_YUKON_FE) {
227 /* enable automatic crossover */
228 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
229 } else {
230 /* disable energy detect */
231 ctrl &= ~PHY_M_PC_EN_DET_MSK;
232
233 /* enable automatic crossover */
234 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
235
236 if (sky2->autoneg == AUTONEG_ENABLE &&
237 hw->chip_id == CHIP_ID_YUKON_XL) {
238 ctrl &= ~PHY_M_PC_DSC_MSK;
239 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
240 }
241 }
242 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
243 } else {
244 /* workaround for deviation #4.88 (CRC errors) */
245 /* disable Automatic Crossover */
246
247 ctrl &= ~PHY_M_PC_MDIX_MSK;
248 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
249
250 if (hw->chip_id == CHIP_ID_YUKON_XL) {
251 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
252 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
253 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
254 ctrl &= ~PHY_M_MAC_MD_MSK;
255 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
256 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
257
258 /* select page 1 to access Fiber registers */
259 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
260 }
261
262 ctrl &= ~(PHY_M_PC_MDIX_MSK | PHY_M_MAC_MD_MSK);
263 ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
264 }
265
266 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
267 if (sky2->autoneg == AUTONEG_DISABLE)
268 ctrl &= ~PHY_CT_ANE;
269 else
270 ctrl |= PHY_CT_ANE;
271
272 ctrl |= PHY_CT_RESET;
273 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
274
275 ctrl = 0;
276 ct1000 = 0;
277 adv = PHY_AN_CSMA;
278
279 if (sky2->autoneg == AUTONEG_ENABLE) {
280 if (hw->copper) {
281 if (sky2->advertising & ADVERTISED_1000baseT_Full)
282 ct1000 |= PHY_M_1000C_AFD;
283 if (sky2->advertising & ADVERTISED_1000baseT_Half)
284 ct1000 |= PHY_M_1000C_AHD;
285 if (sky2->advertising & ADVERTISED_100baseT_Full)
286 adv |= PHY_M_AN_100_FD;
287 if (sky2->advertising & ADVERTISED_100baseT_Half)
288 adv |= PHY_M_AN_100_HD;
289 if (sky2->advertising & ADVERTISED_10baseT_Full)
290 adv |= PHY_M_AN_10_FD;
291 if (sky2->advertising & ADVERTISED_10baseT_Half)
292 adv |= PHY_M_AN_10_HD;
293 } else /* special defines for FIBER (88E1011S only) */
294 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
295
296 /* Set Flow-control capabilities */
297 if (sky2->tx_pause && sky2->rx_pause)
298 adv |= PHY_AN_PAUSE_CAP; /* symmetric */
299 else if (sky2->rx_pause && !sky2->tx_pause)
300 adv |= PHY_AN_PAUSE_ASYM|PHY_AN_PAUSE_CAP;
301 else if (!sky2->rx_pause && sky2->tx_pause)
302 adv |= PHY_AN_PAUSE_ASYM; /* local */
303
304 /* Restart Auto-negotiation */
305 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
306 } else {
307 /* forced speed/duplex settings */
308 ct1000 = PHY_M_1000C_MSE;
309
310 if (sky2->duplex == DUPLEX_FULL)
311 ctrl |= PHY_CT_DUP_MD;
312
313 switch (sky2->speed) {
314 case SPEED_1000:
315 ctrl |= PHY_CT_SP1000;
316 break;
317 case SPEED_100:
318 ctrl |= PHY_CT_SP100;
319 break;
320 }
321
322 ctrl |= PHY_CT_RESET;
323 }
324
325 if (hw->chip_id != CHIP_ID_YUKON_FE)
326 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
327
328 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
329 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
330
331 /* Setup Phy LED's */
332 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
333 ledover = 0;
334
335 switch (hw->chip_id) {
336 case CHIP_ID_YUKON_FE:
337 /* on 88E3082 these bits are at 11..9 (shifted left) */
338 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
339
340 ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
341
342 /* delete ACT LED control bits */
343 ctrl &= ~PHY_M_FELP_LED1_MSK;
344 /* change ACT LED control to blink mode */
345 ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
346 gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
347 break;
348
349 case CHIP_ID_YUKON_XL:
350 ctrl = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
351
352 /* select page 3 to access LED control register */
353 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
354
355 /* set LED Function Control register */
356 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
357 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
358 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
359 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
360 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
361
362 /* set Polarity Control register */
363 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
364 (PHY_M_POLC_LS1_P_MIX(4) | PHY_M_POLC_IS0_P_MIX(4) |
365 PHY_M_POLC_LOS_CTRL(2) | PHY_M_POLC_INIT_CTRL(2) |
366 PHY_M_POLC_STA1_CTRL(2) | PHY_M_POLC_STA0_CTRL(2)));
367
368 /* restore page register */
369 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, ctrl);
370 break;
371
372 default:
373 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
374 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
375 /* turn off the Rx LED (LED_RX) */
376 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
377 }
378
379 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
380
381 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
382 /* turn on 100 Mbps LED (LED_LINK100) */
383 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
384 }
385
386 if (ledover)
387 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
388
389 /* Enable phy interrupt on autonegotiation complete (or link up) */
390 if (sky2->autoneg == AUTONEG_ENABLE)
391 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
392 else
393 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
394}
395
396static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
397{
398 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
399 u16 reg;
400 int i;
401 const u8 *addr = hw->dev[port]->dev_addr;
402
403 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
404 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
405
406 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
407
408 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
409 && port == 1) {
410 /* WA DEV_472 -- looks like crossed wires on port 2 */
411 /* clear GMAC 1 Control reset */
412 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
413 do {
414 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
415 sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
416 } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
417 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
418 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
419 }
420
421
422 if (sky2->autoneg == AUTONEG_DISABLE) {
423 reg = gma_read16(hw, port, GM_GP_CTRL);
424 reg |= GM_GPCR_AU_ALL_DIS;
425 gma_write16(hw, port, GM_GP_CTRL, reg);
426 gma_read16(hw, port, GM_GP_CTRL);
427
428
429 switch (sky2->speed) {
430 case SPEED_1000:
431 reg |= GM_GPCR_SPEED_1000;
432 /* fallthru */
433 case SPEED_100:
434 reg |= GM_GPCR_SPEED_100;
435 }
436
437 if (sky2->duplex == DUPLEX_FULL)
438 reg |= GM_GPCR_DUP_FULL;
439 } else
440 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
441
442 if (!sky2->tx_pause && !sky2->rx_pause) {
443 sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
444 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
445 } else if (sky2->tx_pause &&!sky2->rx_pause) {
446 /* disable Rx flow-control */
447 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
448 }
449
450 gma_write16(hw, port, GM_GP_CTRL, reg);
451
452 sky2_read16(hw, GMAC_IRQ_SRC);
453
454 spin_lock_bh(&hw->phy_lock);
455 sky2_phy_init(hw, port);
456 spin_unlock_bh(&hw->phy_lock);
457
458 /* MIB clear */
459 reg = gma_read16(hw, port, GM_PHY_ADDR);
460 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
461
462 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
463 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
464 gma_write16(hw, port, GM_PHY_ADDR, reg);
465
466 /* transmit control */
467 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
468
469 /* receive control reg: unicast + multicast + no FCS */
470 gma_write16(hw, port, GM_RX_CTRL,
471 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
472
473 /* transmit flow control */
474 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
475
476 /* transmit parameter */
477 gma_write16(hw, port, GM_TX_PARAM,
478 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
479 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
480 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
481 TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
482
483 /* serial mode register */
484 reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
485 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
486
487 if (hw->dev[port]->mtu > 1500)
488 reg |= GM_SMOD_JUMBO_ENA;
489
490 gma_write16(hw, port, GM_SERIAL_MODE, reg);
491
492 /* physical address: used for pause frames */
493 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
494 /* virtual address for data */
495 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
496
497 /* enable interrupt mask for counter overflows */
498 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
499 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
500 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
501
502 /* Configure Rx MAC FIFO */
503 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
504 sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T),
505 GMF_OPER_ON | GMF_RX_F_FL_ON);
506
507 reg = RX_FF_FL_DEF_MSK;
508 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev <= 1)
509 reg = 0; /* WA Dev #4115 */
510
511 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), reg);
512 /* Set threshold to 0xa (64 bytes)
513 * ASF disabled so no need to do WA dev #4.30
514 */
515 sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
516
517 /* Configure Tx MAC FIFO */
518 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
519 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
520
521 /* Turn off Rx fifo flush (per sk98lin) */
522 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RX_F_FL_OFF);
523}
524
525static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
526{
527 u32 end;
528
529 start /= 8;
530 len /= 8;
531 end = start + len - 1;
532 pr_debug("ramset q=%d start=0x%x end=0x%x\n", q, start, end);
533
534 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
535 sky2_write32(hw, RB_ADDR(q, RB_START), start);
536 sky2_write32(hw, RB_ADDR(q, RB_END), end);
537 sky2_write32(hw, RB_ADDR(q, RB_WP), start);
538 sky2_write32(hw, RB_ADDR(q, RB_RP), start);
539
540 if (q == Q_R1 || q == Q_R2) {
541 /* Set thresholds on receive queue's */
542 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP),
543 start + (2*len)/3);
544 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP),
545 start + (len/3));
546 } else {
547 /* Enable store & forward on Tx queue's because
548 * Tx FIFO is only 1K on Yukon
549 */
550 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
551 }
552
553 sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
554}
555
556
557/* Setup Bus Memory Interface */
558static void sky2_qset(struct sky2_hw *hw, u16 q, u32 wm)
559{
560 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
561 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
562 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
563 sky2_write32(hw, Q_ADDR(q, Q_WM), wm);
564}
565
566
567/* Setup prefetch unit registers. This is the interface between
568 * hardware and driver list elements
569 */
570static inline void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
571 u64 addr, u32 last)
572{
573 pr_debug("sky2 prefetch init q=%x addr=%llx last=%x\n",
574 Y2_QADDR(qaddr, 0), addr, last);
575
576 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
577 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
578 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
579 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
580 sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
581 sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
582}
583
584
585/*
586 * This is a workaround code taken from syskonnect sk98lin driver
587 * to deal with chip bug in the wraparound case.
588 */
589static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
590 u16 idx, u16 *last, u16 size)
591
592{
593 BUG_ON(idx >= size);
594
595 wmb();
596 if (is_ec_a1(hw) && idx < *last) {
597 u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
598
599 if (hwget == 0) {
600 /* Start prefetching again */
601 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM),
602 0xe0);
603 goto setnew;
604 }
605
606 if (hwget == size-1) {
607 /* set watermark to one list element */
608 sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
609
610 /* set put index to first list element */
611 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
612 } else /* have hardware go to end of list */
613 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), size-1);
614 } else {
615 setnew:
616 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
617 *last = idx;
618 }
619}
620
621static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
622{
623 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
624 sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE;
625 return le;
626}
627
628static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map, u16 len)
629{
630 struct sky2_rx_le *le;
631
632 if (sizeof(map) > sizeof(u32)) {
633 le = sky2_next_rx(sky2);
634 le->rx.addr = cpu_to_le32((u64) map >> 32);
635 le->ctrl = 0;
636 le->opcode = OP_ADDR64 | HW_OWNER;
637 }
638
639 le = sky2_next_rx(sky2);
640 le->rx.addr = cpu_to_le32((u32) map);
641 le->length = cpu_to_le16(len);
642 le->ctrl = 0;
643 le->opcode = OP_PACKET | HW_OWNER;
644}
645
646/* Tell chip where to start receive checksum.
647 * Actually has two checksums, but set both same to avoid possible byte
648 * order problems.
649 */
650static void sky2_rx_set_offset(struct sky2_port *sky2)
651{
652 struct sky2_rx_le *le;
653
654 sky2_write32(sky2->hw,
655 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
656 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
657
658 le = sky2_next_rx(sky2);
659 le->rx.csum.start1 = ETH_HLEN;
660 le->rx.csum.start2 = ETH_HLEN;
661 le->ctrl = 0;
662 le->opcode = OP_TCPSTART | HW_OWNER;
663 wmb();
664 sky2_write16(sky2->hw,
665 Y2_QADDR(rxqaddr[sky2->port], PREF_UNIT_PUT_IDX),
666 sky2->rx_put);
667
668}
669
670/* Cleanout receive buffer area, assumes receiver hardware stopped */
671static void sky2_rx_clean(struct sky2_port *sky2)
672{
673 unsigned i;
674
675 memset(sky2->rx_le, 0, RX_LE_BYTES);
676 for (i = 0; i < sky2->rx_ring_size; i++) {
677 struct ring_info *re = sky2->rx_ring + i;
678
679 if (re->skb) {
680 pci_unmap_single(sky2->hw->pdev,
681 pci_unmap_addr(re, mapaddr),
682 pci_unmap_len(re, maplen),
683 PCI_DMA_FROMDEVICE);
684 kfree_skb(re->skb);
685 re->skb = NULL;
686 }
687 }
688}
689
690static inline struct sk_buff *sky2_rx_alloc_skb(struct sky2_port *sky2,
691 unsigned int size, int gfp_mask)
692{
693 struct sk_buff *skb;
694
695 skb = alloc_skb(size, gfp_mask);
696 if (likely(skb)) {
697 skb->dev = sky2->netdev;
698 skb_reserve(skb, NET_IP_ALIGN);
699 }
700 return skb;
701}
702
703/*
704 * Allocate and setup receiver buffer pool.
705 * In case of 64 bit dma, there are 2X as many list elements
706 * available as ring entries
707 * and need to reserve one list element so we don't wrap around.
708 */
709static int sky2_rx_fill(struct sky2_port *sky2)
710{
711 unsigned i;
712 unsigned int rx_buf_size = sky2->netdev->mtu + ETH_HLEN + 8;
713
714 pr_debug("sky2_rx_fill %d\n", sky2->rx_ring_size);
715 for (i = 0; i < sky2->rx_ring_size; i++) {
716 struct ring_info *re = sky2->rx_ring + i;
717 dma_addr_t paddr;
718
719 re->skb = sky2_rx_alloc_skb(sky2, rx_buf_size, GFP_KERNEL);
720 if (!re->skb)
721 goto nomem;
722
723 paddr = pci_map_single(sky2->hw->pdev, re->skb->data,
724 rx_buf_size, PCI_DMA_FROMDEVICE);
725
726 pci_unmap_len_set(re, maplen, rx_buf_size);
727 pci_unmap_addr_set(re, mapaddr, paddr);
728 sky2_rx_add(sky2, paddr, rx_buf_size);
729 }
730
731 sky2_write16(sky2->hw,
732 Y2_QADDR(rxqaddr[sky2->port], PREF_UNIT_PUT_IDX),
733 sky2->rx_put);
734
735 return 0;
736nomem:
737 sky2_rx_clean(sky2);
738 return -ENOMEM;
739}
740
741/* Bring up network interface. */
742static int sky2_up(struct net_device *dev)
743{
744 struct sky2_port *sky2 = netdev_priv(dev);
745 struct sky2_hw *hw = sky2->hw;
746 unsigned port = sky2->port;
747 u32 ramsize, rxspace;
748 int err = -ENOMEM;
749
750 if (netif_msg_ifup(sky2))
751 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
752
753 /* must be power of 2 */
754 sky2->tx_le = pci_alloc_consistent(hw->pdev,
755 TX_RING_SIZE * sizeof(struct sky2_tx_le),
756 &sky2->tx_le_map);
757 if (!sky2->tx_le)
758 goto err_out;
759
760 sky2->tx_ring = kmalloc(TX_RING_SIZE * sizeof(struct ring_info),
761 GFP_KERNEL);
762 if (!sky2->tx_ring)
763 goto err_out;
764 sky2->tx_prod = sky2->tx_cons = 0;
765 memset(sky2->tx_ring, 0, TX_RING_SIZE * sizeof(struct ring_info));
766
767 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
768 &sky2->rx_le_map);
769 if (!sky2->rx_le)
770 goto err_out;
771 memset(sky2->rx_le, 0, RX_LE_BYTES);
772
773 sky2->rx_ring = kmalloc(sky2->rx_ring_size * sizeof(struct ring_info),
774 GFP_KERNEL);
775 if (!sky2->rx_ring)
776 goto err_out;
777
778 sky2_mac_init(hw, port);
779
780 /* Configure RAM buffers */
781 if (hw->chip_id == CHIP_ID_YUKON_FE ||
782 (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
783 ramsize = 4096;
784 else {
785 u8 e0 = sky2_read8(hw, B2_E_0);
786 ramsize = (e0 == 0) ? (128*1024) : (e0 * 4096);
787 }
788
789 /* 2/3 for Rx */
790 rxspace = (2 * ramsize) / 3;
791 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
792 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
793
794 sky2_qset(hw, rxqaddr[port], is_pciex(hw) ? 0x80 : 0x600);
795 sky2_qset(hw, txqaddr[port], 0x600);
796
797 sky2->rx_put = sky2->rx_next = 0;
798 sky2_prefetch_init(hw, rxqaddr[port], sky2->rx_le_map, RX_LE_SIZE-1);
799
800 sky2_rx_set_offset(sky2);
801
802 err = sky2_rx_fill(sky2);
803 if (err)
804 goto err_out;
805
806 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
807 TX_RING_SIZE - 1);
808
809 /* Enable interrupts from phy/mac for port */
810 hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
811 sky2_write32(hw, B0_IMSK, hw->intr_mask);
812 return 0;
813
814err_out:
815 if (sky2->rx_le)
816 pci_free_consistent(hw->pdev, RX_LE_BYTES,
817 sky2->rx_le, sky2->rx_le_map);
818 if (sky2->tx_le)
819 pci_free_consistent(hw->pdev,
820 TX_RING_SIZE * sizeof(struct sky2_tx_le),
821 sky2->tx_le, sky2->tx_le_map);
822 if (sky2->tx_ring)
823 kfree(sky2->tx_ring);
824 if (sky2->rx_ring)
825 kfree(sky2->rx_ring);
826
827 return err;
828}
829
830/*
831 * Worst case number of list elements is 36
832 * TSO + CHKSUM + ADDR64 + BUFFER + (ADDR+BUFFER)*MAXFRAGS
833 */
834#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS)
835
836static inline int sky2_xmit_avail(const struct sky2_port *sky2)
837{
838 return (sky2->tx_cons > sky2->tx_prod ? 0 : TX_RING_SIZE)
839 + sky2->tx_cons - sky2->tx_prod - 1;
840}
841
842static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
843{
844 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
845 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
846 return le;
847}
848
849/* Put one frame in ring for transmit. */
850static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
851{
852 struct sky2_port *sky2 = netdev_priv(dev);
853 struct sky2_hw *hw = sky2->hw;
854 struct sky2_tx_le *le;
855 struct ring_info *re;
856 unsigned i, len;
857 dma_addr_t mapping;
858 u32 addr64;
859 u16 mss;
860 u8 ctrl;
861
862 skb = skb_padto(skb, ETH_ZLEN);
863 if (!skb)
864 return NETDEV_TX_OK;
865
866 if (!spin_trylock(&sky2->tx_lock))
867 return NETDEV_TX_LOCKED;
868
869 if (unlikely(sky2_xmit_avail(sky2) < MAX_SKB_TX_LE)) {
870 netif_stop_queue(dev);
871 spin_unlock(&sky2->tx_lock);
872
873 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
874 dev->name);
875 return NETDEV_TX_BUSY;
876 }
877
878 if (netif_msg_tx_queued(sky2))
879 printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
880 dev->name, sky2->tx_prod, skb->len);
881
882
883 len = skb_headlen(skb);
884 mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
885
886 /* Check for TCP Segmentation Offload */
887 mss = skb_shinfo(skb)->tso_size;
888 if (mss) {
889 /* just drop the packet if non-linear expansion fails */
890 if (skb_header_cloned(skb) &&
891 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
892 dev_kfree_skb(skb);
893 return NETDEV_TX_OK;
894 }
895
896 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
897 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
898 mss += ETH_HLEN;
899
900 le = get_tx_le(sky2);
901 le->tx.tso.size = cpu_to_le16(mss);
902 le->ctrl = 0;
903 le->opcode = OP_LRGLEN | HW_OWNER;
904 }
905
906 /* Handle Hi DMA */
907 if (sizeof(mapping) > sizeof(u32)) {
908 addr64 = (u64)mapping >> 32;
909
910 le = get_tx_le(sky2);
911 le->tx.addr = cpu_to_le32(addr64);
912 le->ctrl = 0;
913 le->opcode = OP_ADDR64 | HW_OWNER;
914 }
915
916 /* Handle TCP checksum offload */
917 ctrl = 0;
918 if (skb->ip_summed == CHECKSUM_HW) {
919 ptrdiff_t hdr = skb->h.raw - skb->data;
920
921 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
922 if (skb->nh.iph->protocol == IPPROTO_UDP)
923 ctrl |= UDPTCP;
924
925 le = get_tx_le(sky2);
926 le->tx.csum.start = cpu_to_le16(hdr);
927 le->tx.csum.offset = cpu_to_le16(hdr + skb->csum);
928 le->length = 0;
929 le->ctrl = 1; /* one packet */
930 le->opcode = OP_TCPLISW|HW_OWNER;
931 }
932
933 le = get_tx_le(sky2);
934 le->tx.addr = cpu_to_le32((u32) mapping);
935 le->length = cpu_to_le16(len);
936 le->ctrl = ctrl;
937 le->opcode = (mss ? OP_LARGESEND : OP_PACKET) |HW_OWNER;
938
939 re = &sky2->tx_ring[le - sky2->tx_le];
940 re->skb = skb;
941 pci_unmap_addr_set(re, mapaddr, mapping);
942 pci_unmap_len_set(re, maplen, len);
943
944 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
945 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
946
947 mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
948 frag->size, PCI_DMA_TODEVICE);
949
950 if (sizeof(mapping) > sizeof(u32)) {
951 u32 hi = (u64) mapping >> 32;
952 if (hi != addr64) {
953 le = get_tx_le(sky2);
954 le->tx.addr = cpu_to_le32(hi);
955 le->ctrl = 0;
956 le->opcode = OP_ADDR64|HW_OWNER;
957 addr64 = hi;
958 }
959 }
960
961 le = get_tx_le(sky2);
962 le->tx.addr = cpu_to_le32((u32) mapping);
963 le->length = cpu_to_le16(frag->size);
964 le->ctrl = ctrl;
965 le->opcode = OP_BUFFER|HW_OWNER;
966
967 re = &sky2->tx_ring[le - sky2->tx_le];
968 pci_unmap_addr_set(re, mapaddr, mapping);
969 pci_unmap_len_set(re, maplen, frag->size);
970 }
971
972 le->ctrl |= EOP;
973
974 sky2_put_idx(sky2->hw, txqaddr[sky2->port], sky2->tx_prod,
975 &sky2->tx_last_put, TX_RING_SIZE);
976
977 if (sky2_xmit_avail(sky2) < MAX_SKB_TX_LE) {
978 pr_debug("%s: transmit queue full\n", dev->name);
979 netif_stop_queue(dev);
980 }
981 spin_unlock(&sky2->tx_lock);
982
983 dev->trans_start = jiffies;
984 return NETDEV_TX_OK;
985}
986
987
988/*
989 * Free ring elements from starting at tx_cons until done
990 * This unwinds the elements based on the usage assigned
991 * xmit routine.
992 */
993static void sky2_tx_complete(struct net_device *dev, u16 done)
994{
995 struct sky2_port *sky2 = netdev_priv(dev);
996 unsigned idx = sky2->tx_cons;
997 struct sk_buff *skb = NULL;
998
999 BUG_ON(done >= TX_RING_SIZE);
1000
1001 spin_lock(&sky2->tx_lock);
1002 while (idx != done) {
1003 struct ring_info *re = sky2->tx_ring + idx;
1004 struct sky2_tx_le *le = sky2->tx_le + idx;
1005
1006 BUG_ON(le->opcode == 0);
1007
1008 switch(le->opcode & ~HW_OWNER) {
1009 case OP_LARGESEND:
1010 case OP_PACKET:
1011 if (skb)
1012 dev_kfree_skb_any(skb);
1013 skb = re->skb;
1014 BUG_ON(!skb);
1015 re->skb = NULL;
1016
1017 pci_unmap_single(sky2->hw->pdev,
1018 pci_unmap_addr(re, mapaddr),
1019 pci_unmap_len(re, maplen),
1020 PCI_DMA_TODEVICE);
1021 break;
1022
1023 case OP_BUFFER:
1024 pci_unmap_page(sky2->hw->pdev,
1025 pci_unmap_addr(re, mapaddr),
1026 pci_unmap_len(re, maplen),
1027 PCI_DMA_TODEVICE);
1028 break;
1029 }
1030
1031 le->opcode = 0;
1032 idx = (idx + 1) % TX_RING_SIZE;
1033 }
1034
1035 if (skb)
1036 dev_kfree_skb_any(skb);
1037 sky2->tx_cons = idx;
1038
1039 if (sky2_xmit_avail(sky2) > MAX_SKB_TX_LE)
1040 netif_wake_queue(dev);
1041 spin_unlock(&sky2->tx_lock);
1042}
1043
1044/* Cleanup all untransmitted buffers, assume transmitter not running */
1045static inline void sky2_tx_clean(struct sky2_port *sky2)
1046{
1047 sky2_tx_complete(sky2->netdev, sky2->tx_prod);
1048}
1049
1050/* Network shutdown */
1051static int sky2_down(struct net_device *dev)
1052{
1053 struct sky2_port *sky2 = netdev_priv(dev);
1054 struct sky2_hw *hw = sky2->hw;
1055 unsigned port = sky2->port;
1056 u16 ctrl;
1057 int i;
1058
1059 if (netif_msg_ifdown(sky2))
1060 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1061
1062 netif_stop_queue(dev);
1063
1064 /* Stop transmitter */
1065 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1066 sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1067
1068 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1069 RB_RST_SET|RB_DIS_OP_MD);
1070
1071 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1072 ctrl &= ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA);
1073 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1074
1075 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1076
1077 /* Workaround shared GMAC reset */
1078 if (! (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1079 && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1080 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1081
1082 /* Disable Force Sync bit and Enable Alloc bit */
1083 sky2_write8(hw, SK_REG(port, TXA_CTRL),
1084 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1085
1086 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1087 sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1088 sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1089
1090 /* Reset the PCI FIFO of the async Tx queue */
1091 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1092
1093 /* Reset the Tx prefetch units */
1094 sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1095 PREF_UNIT_RST_SET);
1096
1097 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1098
1099 /*
1100 * The RX Stop command will not work for Yukon-2 if the BMU does not
1101 * reach the end of packet and since we can't make sure that we have
1102 * incoming data, we must reset the BMU while it is not doing a DMA
1103 * transfer. Since it is possible that the RX path is still active,
1104 * the RX RAM buffer will be stopped first, so any possible incoming
1105 * data will not trigger a DMA. After the RAM buffer is stopped, the
1106 * BMU is polled until any DMA in progress is ended and only then it
1107 * will be reset.
1108 */
1109
1110 /* disable the RAM Buffer receive queue */
1111 sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_DIS_OP_MD);
1112
1113 for (i = 0; i < 0xffff; i++)
1114 if (sky2_read8(hw, RB_ADDR(rxqaddr[port], Q_RSL))
1115 == sky2_read8(hw, RB_ADDR(rxqaddr[port], Q_RL)))
1116 break;
1117
1118 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR),
1119 BMU_RST_SET | BMU_FIFO_RST);
1120 /* reset the Rx prefetch unit */
1121 sky2_write32(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_CTRL),
1122 PREF_UNIT_RST_SET);
1123
1124 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1125 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1126
1127 /* turn off led's */
1128 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1129
1130 sky2_tx_clean(sky2);
1131 sky2_rx_clean(sky2);
1132
1133 pci_free_consistent(hw->pdev, RX_LE_BYTES,
1134 sky2->rx_le, sky2->rx_le_map);
1135 kfree(sky2->rx_ring);
1136
1137 pci_free_consistent(hw->pdev,
1138 TX_RING_SIZE * sizeof(struct sky2_tx_le),
1139 sky2->tx_le, sky2->tx_le_map);
1140 kfree(sky2->tx_ring);
1141
1142 return 0;
1143}
1144
1145static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1146{
1147 if (hw->chip_id == CHIP_ID_YUKON_FE)
1148 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1149
1150 switch (aux & PHY_M_PS_SPEED_MSK) {
1151 case PHY_M_PS_SPEED_1000:
1152 return SPEED_1000;
1153 case PHY_M_PS_SPEED_100:
1154 return SPEED_100;
1155 default:
1156 return SPEED_10;
1157 }
1158}
1159
1160static void sky2_link_up(struct sky2_port *sky2)
1161{
1162 struct sky2_hw *hw = sky2->hw;
1163 unsigned port = sky2->port;
1164 u16 reg;
1165
1166 /* Enable Transmit FIFO Underrun */
1167 sky2_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
1168
1169 reg = gma_read16(hw, port, GM_GP_CTRL);
1170 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1171 reg |= GM_GPCR_DUP_FULL;
1172
1173
1174 /* enable Rx/Tx */
1175 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1176 gma_write16(hw, port, GM_GP_CTRL, reg);
1177 gma_read16(hw, port, GM_GP_CTRL);
1178
1179 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1180
1181 netif_carrier_on(sky2->netdev);
1182 netif_wake_queue(sky2->netdev);
1183
1184 /* Turn on link LED */
1185 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1186 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1187
1188 if (netif_msg_link(sky2))
1189 printk(KERN_INFO PFX
1190 "%s: Link is up at %d Mbps, %s duplex, flowcontrol %s\n",
1191 sky2->netdev->name, sky2->speed,
1192 sky2->duplex == DUPLEX_FULL ? "full" : "half",
1193 (sky2->tx_pause && sky2->rx_pause) ? "both" :
1194 sky2->tx_pause ? "tx" :
1195 sky2->rx_pause ? "rx" : "none");
1196}
1197
1198static void sky2_link_down(struct sky2_port *sky2)
1199{
1200 struct sky2_hw *hw = sky2->hw;
1201 unsigned port = sky2->port;
1202 u16 reg;
1203
1204 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1205
1206 reg = gma_read16(hw, port, GM_GP_CTRL);
1207 reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1208 gma_write16(hw, port, GM_GP_CTRL, reg);
1209 gma_read16(hw, port, GM_GP_CTRL); /* PCI post */
1210
1211 if (sky2->rx_pause && !sky2->tx_pause) {
1212 /* restore Asymmetric Pause bit */
1213 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1214 gm_phy_read(hw, port,
1215 PHY_MARV_AUNE_ADV)
1216 | PHY_M_AN_ASP);
1217 }
1218
1219 sky2_phy_reset(hw, port);
1220
1221 netif_carrier_off(sky2->netdev);
1222 netif_stop_queue(sky2->netdev);
1223
1224 /* Turn on link LED */
1225 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1226
1227 if (netif_msg_link(sky2))
1228 printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1229 sky2_phy_init(hw, port);
1230}
1231
1232
1233/*
1234 * Interrrupt from PHY are handled in tasklet (soft irq)
1235 * because accessing phy registers requires spin wait which might
1236 * cause excess interrupt latency.
1237 */
1238static void sky2_phy_task(unsigned long data)
1239{
1240 struct sky2_port *sky2 = (struct sky2_port *) data;
1241 struct sky2_hw *hw = sky2->hw;
1242 unsigned port = sky2->port;
1243 u16 istatus, phystat;
1244
1245 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1246
1247 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1248
1249 if (netif_msg_intr(sky2))
1250 printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1251 sky2->netdev->name, istatus, phystat);
1252
1253 if (istatus & PHY_M_IS_AN_COMPL) {
1254 u16 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1255
1256 if (lpa & PHY_M_AN_RF) {
1257 printk(KERN_ERR PFX "%s: remote fault",
1258 sky2->netdev->name);
1259 }
1260 else if (hw->chip_id != CHIP_ID_YUKON_FE
1261 && gm_phy_read(hw, port, PHY_MARV_1000T_STAT)
1262 & PHY_B_1000S_MSF) {
1263 printk(KERN_ERR PFX "%s: master/slave fault",
1264 sky2->netdev->name);
1265 }
1266 else if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1267 printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1268 sky2->netdev->name);
1269 }
1270 else {
1271 sky2->duplex = (phystat & PHY_M_PS_FULL_DUP)
1272 ? DUPLEX_FULL : DUPLEX_HALF;
1273
1274 sky2->speed = sky2_phy_speed(hw, phystat);
1275
1276 sky2->tx_pause = (phystat & PHY_M_PS_TX_P_EN) != 0;
1277 sky2->rx_pause = (phystat & PHY_M_PS_RX_P_EN) != 0;
1278
1279 if ((!sky2->tx_pause && !sky2->rx_pause) ||
1280 (sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
1281 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1282 else
1283 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1284 sky2_link_up(sky2);
1285 }
1286 } else {
1287
1288 if (istatus & PHY_M_IS_LSP_CHANGE)
1289 sky2->speed = sky2_phy_speed(hw, phystat);
1290
1291 if (istatus & PHY_M_IS_DUP_CHANGE)
1292 sky2->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1293 if (istatus & PHY_M_IS_LST_CHANGE) {
1294 if (phystat & PHY_M_PS_LINK_UP)
1295 sky2_link_up(sky2);
1296 else
1297 sky2_link_down(sky2);
1298 }
1299 }
1300
1301 local_irq_disable();
1302 hw->intr_mask |= (port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
1303 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1304 local_irq_enable();
1305}
1306
1307static void sky2_tx_timeout(struct net_device *dev)
1308{
1309 struct sky2_port *sky2 = netdev_priv(dev);
1310
1311 if (netif_msg_timer(sky2))
1312 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1313
1314 sky2_write32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR), BMU_STOP);
1315 sky2_read32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR));
1316
1317 sky2_tx_clean(sky2);
1318}
1319
1320static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1321{
1322 int err = 0;
1323
1324 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1325 return -EINVAL;
1326
1327 if (netif_running(dev))
1328 sky2_down(dev);
1329
1330 dev->mtu = new_mtu;
1331
1332 if (netif_running(dev))
1333 err = sky2_up(dev);
1334
1335 return err;
1336}
1337
1338/*
1339 * Receive one packet.
1340 * For small packets or errors, just reuse existing skb.
1341 * For larger pakects, get new buffer.
1342 */
1343static struct sk_buff *sky2_receive(struct sky2_hw *hw, unsigned port,
1344 u16 length, u32 status)
1345{
1346 struct net_device *dev = hw->dev[port];
1347 struct sky2_port *sky2 = netdev_priv(dev);
1348 struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1349 struct sk_buff *skb = re->skb;
1350 dma_addr_t mapping;
1351 const unsigned int rx_buf_size = dev->mtu + ETH_HLEN + 8;
1352
1353 if (unlikely(netif_msg_rx_status(sky2)))
1354 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1355 dev->name, sky2->rx_next, status, length);
1356
1357 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_ring_size;
1358
1359 pci_unmap_single(sky2->hw->pdev,
1360 pci_unmap_addr(re, mapaddr),
1361 pci_unmap_len(re, maplen),
1362 PCI_DMA_FROMDEVICE);
1363 prefetch(skb->data);
1364
1365 if (!(status & GMR_FS_RX_OK)
1366 || (status & GMR_FS_ANY_ERR)
1367 || (length << 16) != (status & GMR_FS_LEN)
1368 || length > rx_buf_size)
1369 goto error;
1370
1371 re->skb = sky2_rx_alloc_skb(sky2, rx_buf_size, GFP_ATOMIC);
1372 if (!re->skb)
1373 goto reuse;
1374
1375submit:
1376 mapping = pci_map_single(sky2->hw->pdev, re->skb->data,
1377 rx_buf_size, PCI_DMA_FROMDEVICE);
1378
1379 pci_unmap_len_set(re, maplen, rx_buf_size);
1380 pci_unmap_addr_set(re, mapaddr, mapping);
1381
1382 sky2_rx_add(sky2, mapping, rx_buf_size);
1383 sky2_put_idx(sky2->hw, rxqaddr[sky2->port],
1384 sky2->rx_put, &sky2->rx_last_put, RX_LE_SIZE);
1385
1386 return skb;
1387
1388error:
1389 if (netif_msg_rx_err(sky2))
1390 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1391 sky2->netdev->name, status, length);
1392
1393 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
1394 sky2->net_stats.rx_length_errors++;
1395 if (status & GMR_FS_FRAGMENT)
1396 sky2->net_stats.rx_frame_errors++;
1397 if (status & GMR_FS_CRC_ERR)
1398 sky2->net_stats.rx_crc_errors++;
1399reuse:
1400 re->skb = skb;
1401 skb = NULL;
1402 goto submit;
1403}
1404
1405static u16 get_tx_index(u8 port, u32 status, u16 len)
1406{
1407 if (port == 0)
1408 return status & 0xfff;
1409 else
1410 return ((status >> 24) & 0xff) | (len & 0xf) << 8;
1411}
1412
1413/*
1414 * NAPI poll routine.
1415 * Both ports share the same status interrupt, therefore there is only
1416 * one poll routine.
1417 *
1418 */
1419static int sky2_poll(struct net_device *dev, int *budget)
1420{
1421 struct sky2_port *sky2 = netdev_priv(dev);
1422 struct sky2_hw *hw = sky2->hw;
1423 unsigned int to_do = min(dev->quota, *budget);
1424 unsigned int work_done = 0;
1425 unsigned char summed[2] = { CHECKSUM_NONE, CHECKSUM_NONE };
1426 unsigned int csum[2] = { 0 };
1427 unsigned int rx_handled[2] = { 0, 0};
1428 u16 last;
1429
1430 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1431 last = sky2_read16(hw, STAT_PUT_IDX);
1432
1433 while (hw->st_idx != last && work_done < to_do) {
1434 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1435 struct sk_buff *skb;
1436 u8 port;
1437 u32 status;
1438 u16 length;
1439
1440 rmb();
1441 status = le32_to_cpu(le->status);
1442 length = le16_to_cpu(le->length);
1443 port = le->link;
1444
1445 BUG_ON(port >= hw->ports);
1446
1447 switch(le->opcode & ~HW_OWNER) {
1448 case OP_RXSTAT:
1449 ++rx_handled[port];
1450 skb = sky2_receive(hw, port, length, status);
1451 if (likely(skb)) {
1452 __skb_put(skb, length);
1453 skb->protocol = eth_type_trans(skb, dev);
1454
1455 /* Add hw checksum if available */
1456 skb->ip_summed = summed[port];
1457 skb->csum = csum[port];
1458
1459 /* Clear for next packet */
1460 csum[port] = 0;
1461 summed[port] = CHECKSUM_NONE;
1462
1463 netif_receive_skb(skb);
1464
1465 dev->last_rx = jiffies;
1466 ++work_done;
1467 }
1468 break;
1469
1470 case OP_RXCHKS:
1471 /* Save computed checksum for next rx */
1472 csum[port] = le16_to_cpu(status & 0xffff);
1473 summed[port] = CHECKSUM_HW;
1474 break;
1475
1476 case OP_TXINDEXLE:
1477 sky2_tx_complete(hw->dev[port],
1478 get_tx_index(port, status, length));
1479 break;
1480
1481 case OP_RXTIMESTAMP:
1482 break;
1483
1484 default:
1485 if (net_ratelimit())
1486 printk(KERN_WARNING PFX "unknown status opcode 0x%x\n",
1487 le->opcode);
1488 break;
1489 }
1490
1491 hw->st_idx = (hw->st_idx + 1) & (STATUS_RING_SIZE -1);
1492 }
1493
1494 *budget -= work_done;
1495 dev->quota -= work_done;
1496 if (work_done < to_do) {
1497 /*
1498 * Another chip workaround, need to restart TX timer if status
1499 * LE was handled. WA_DEV_43_418
1500 */
1501 if (is_ec_a1(hw)) {
1502 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1503 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1504 }
1505
1506 hw->intr_mask |= Y2_IS_STAT_BMU;
1507 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1508 netif_rx_complete(dev);
1509 }
1510
1511 return work_done >= to_do;
1512
1513}
1514
1515static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1516{
1517 struct net_device *dev = hw->dev[port];
1518
1519 printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1520 dev->name, status);
1521
1522 if (status & Y2_IS_PAR_RD1) {
1523 printk(KERN_ERR PFX "%s: ram data read parity error\n",
1524 dev->name);
1525 /* Clear IRQ */
1526 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1527 }
1528
1529 if (status & Y2_IS_PAR_WR1) {
1530 printk(KERN_ERR PFX "%s: ram data write parity error\n",
1531 dev->name);
1532
1533 sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1534 }
1535
1536 if (status & Y2_IS_PAR_MAC1) {
1537 printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1538 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1539 }
1540
1541 if (status & Y2_IS_PAR_RX1) {
1542 printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1543 sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1544 }
1545
1546 if (status & Y2_IS_TCP_TXA1) {
1547 printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
1548 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1549 }
1550}
1551
1552static void sky2_hw_intr(struct sky2_hw *hw)
1553{
1554 u32 status = sky2_read32(hw, B0_HWE_ISRC);
1555
1556 if (status & Y2_IS_TIST_OV) {
1557 pr_debug (PFX "%s: unused timer overflow??\n",
1558 pci_name(hw->pdev));
1559 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1560 }
1561
1562 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
1563 u16 pci_err = sky2_read16(hw, PCI_C(PCI_STATUS));
1564 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
1565 pci_name(hw->pdev), pci_err);
1566
1567 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1568 sky2_write16(hw, PCI_C(PCI_STATUS),
1569 pci_err | PCI_STATUS_ERROR_BITS);
1570 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1571 }
1572
1573 if (status & Y2_IS_PCI_EXP) {
1574 /* PCI-Express uncorrectable Error occured */
1575 u32 pex_err = sky2_read32(hw, PCI_C(PEX_UNC_ERR_STAT));
1576
1577 /*
1578 * On PCI-Express bus bridges are called root complexes.
1579 * PCI-Express errors are recognized by the root complex too,
1580 * which requests the system to handle the problem. After error
1581 * occurence it may be that no access to the adapter may be performed
1582 * any longer.
1583 */
1584 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
1585 pci_name(hw->pdev), pex_err);
1586
1587 /* clear the interrupt */
1588 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1589 sky2_write32(hw, PCI_C(PEX_UNC_ERR_STAT), 0xffffffffUL);
1590 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1591
1592 if (pex_err & PEX_FATAL_ERRORS) {
1593 u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
1594 hwmsk &= ~Y2_IS_PCI_EXP;
1595 sky2_write32(hw, B0_HWE_IMSK, hwmsk);
1596 }
1597 }
1598
1599 if (status & Y2_HWE_L1_MASK)
1600 sky2_hw_error(hw, 0, status);
1601 status >>= 8;
1602 if (status & Y2_HWE_L1_MASK)
1603 sky2_hw_error(hw, 1, status);
1604}
1605
1606static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
1607{
1608 struct net_device *dev = hw->dev[port];
1609 struct sky2_port *sky2 = netdev_priv(dev);
1610 u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1611
1612 if (netif_msg_intr(sky2))
1613 printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
1614 dev->name, status);
1615
1616 if (status & GM_IS_RX_FF_OR) {
1617 ++sky2->net_stats.rx_fifo_errors;
1618 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
1619 }
1620
1621 if (status & GM_IS_TX_FF_UR) {
1622 ++sky2->net_stats.tx_fifo_errors;
1623 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
1624 }
1625
1626}
1627
1628static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1629{
1630 struct net_device *dev = hw->dev[port];
1631 struct sky2_port *sky2 = netdev_priv(dev);
1632
1633 hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
1634 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1635 tasklet_schedule(&sky2->phy_task);
1636}
1637
1638static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
1639{
1640 struct sky2_hw *hw = dev_id;
1641 u32 status;
1642
1643 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
1644 if (status == 0 || status == ~0) /* hotplug or shared irq */
1645 return IRQ_NONE;
1646
1647 if (status & Y2_IS_HW_ERR)
1648 sky2_hw_intr(hw);
1649
1650 if ((status & Y2_IS_STAT_BMU) && netif_rx_schedule_prep(hw->dev[0])) {
1651 hw->intr_mask &= ~Y2_IS_STAT_BMU;
1652 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1653 __netif_rx_schedule(hw->dev[0]);
1654 }
1655
1656 if (status & Y2_IS_IRQ_PHY1)
1657 sky2_phy_intr(hw, 0);
1658
1659 if (status & Y2_IS_IRQ_PHY2)
1660 sky2_phy_intr(hw, 1);
1661
1662 if (status & Y2_IS_IRQ_MAC1)
1663 sky2_mac_intr(hw, 0);
1664
1665 if (status & Y2_IS_IRQ_MAC2)
1666 sky2_mac_intr(hw, 1);
1667
1668
1669 sky2_write32(hw, B0_Y2_SP_ICR, 2);
1670 return IRQ_HANDLED;
1671}
1672
1673#ifdef CONFIG_NET_POLL_CONTROLLER
1674static void sky2_netpoll(struct net_device *dev)
1675{
1676 struct sky2_port *sky2 = netdev_priv(dev);
1677
1678 disable_irq(dev->irq);
1679 sky2_intr(dev->irq, sky2->hw, NULL);
1680 enable_irq(dev->irq);
1681}
1682#endif
1683
1684/* Chip internal frequency for clock calculations */
1685static inline u32 sky2_khz(const struct sky2_hw *hw)
1686{
1687 switch(hw->chip_id) {
1688 case CHIP_ID_YUKON_EC:
1689 return 125000; /* 125 Mhz */
1690 case CHIP_ID_YUKON_FE:
1691 return 100000; /* 100 Mhz */
1692 default: /* YUKON_XL */
1693 return 156000; /* 156 Mhz */
1694 }
1695}
1696
1697static inline u32 sky2_ms2clk(const struct sky2_hw *hw, u32 ms)
1698{
1699 return sky2_khz(hw) * ms;
1700}
1701
1702static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
1703{
1704 return (sky2_khz(hw) * 75) / 1000;
1705}
1706
1707static int sky2_reset(struct sky2_hw *hw)
1708{
1709 u32 ctst, power;
1710 u16 status;
1711 u8 t8, pmd_type;
1712 int i;
1713
1714 ctst = sky2_read32(hw, B0_CTST);
1715
1716 sky2_write8(hw, B0_CTST, CS_RST_CLR);
1717 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
1718 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
1719 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
1720 pci_name(hw->pdev), hw->chip_id);
1721 return -EOPNOTSUPP;
1722 }
1723
1724 /* disable ASF */
1725 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
1726 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1727 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
1728 }
1729
1730 /* do a SW reset */
1731 sky2_write8(hw, B0_CTST, CS_RST_SET);
1732 sky2_write8(hw, B0_CTST, CS_RST_CLR);
1733
1734 /* clear PCI errors, if any */
1735 status = sky2_read16(hw, PCI_C(PCI_STATUS));
1736 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1737 sky2_write16(hw, PCI_C(PCI_STATUS),
1738 status | PCI_STATUS_ERROR_BITS);
1739
1740 sky2_write8(hw, B0_CTST, CS_MRST_CLR);
1741
1742 /* clear any PEX errors */
1743 if (is_pciex(hw)) {
1744 sky2_write32(hw, PCI_C(PEX_UNC_ERR_STAT), 0xffffffffUL);
1745 sky2_read16(hw, PCI_C(PEX_LNK_STAT));
1746 }
1747
1748 pmd_type = sky2_read8(hw, B2_PMD_TYP);
1749 hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
1750
1751 hw->ports = 1;
1752 t8 = sky2_read8(hw, B2_Y2_HW_RES);
1753 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
1754 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1755 ++hw->ports;
1756 }
1757 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
1758
1759 /* switch power to VCC (WA for VAUX problem) */
1760 sky2_write8(hw, B0_POWER_CTRL,
1761 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1762
1763 /* disable Core Clock Division, */
1764 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1765
1766 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
1767 /* enable bits are inverted */
1768 sky2_write8(hw, B2_Y2_CLK_GATE,
1769 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1770 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1771 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
1772 else
1773 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
1774
1775 /* Turn off phy power saving */
1776 power = sky2_read32(hw, PCI_C(PCI_DEV_REG1));
1777 power &= ~(PCI_Y2_PHY1_POWD|PCI_Y2_PHY2_POWD);
1778
1779 /* back asswards .. */
1780 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
1781 power |= PCI_Y2_PHY1_COMA;
1782 if (hw->ports > 1)
1783 power |= PCI_Y2_PHY2_COMA;
1784 }
1785 sky2_write32(hw, PCI_C(PCI_DEV_REG1), power);
1786
1787 for (i = 0; i < hw->ports; i++) {
1788 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
1789 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
1790 }
1791
1792 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1793
1794 sky2_write32(hw, B2_I2C_IRQ, 1); /* Clear I2C IRQ noise */
1795
1796 /* turn off hardware timer (unused) */
1797 sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
1798 sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
1799
1800 sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
1801
1802 /* Turn on descriptor polling -- is this necessary? */
1803 sky2_write32(hw, B28_DPT_INI, sky2_us2clk(hw, 75));
1804 sky2_write8(hw, B28_DPT_CTRL, DPT_START);
1805
1806 /* Turn off receive timestamp */
1807 sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1808
1809 /* enable the Tx Arbiters */
1810 for (i = 0; i < hw->ports; i++)
1811 sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
1812
1813 /* Initialize ram interface */
1814 for (i = 0; i < hw->ports; i++) {
1815 sky2_write16(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1816
1817 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
1818 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
1819 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
1820 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
1821 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
1822 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
1823 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
1824 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
1825 sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
1826 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
1827 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
1828 sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
1829 }
1830
1831 /* Optimize PCI Express access */
1832 if (is_pciex(hw)) {
1833 u16 ctrl = sky2_read32(hw, PCI_C(PEX_DEV_CTRL));
1834 ctrl &= ~PEX_DC_MAX_RRS_MSK;
1835 ctrl |= PEX_DC_MAX_RD_RQ_SIZE(4);
1836 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1837 sky2_write16(hw, PCI_C(PEX_DEV_CTRL), ctrl);
1838 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1839 }
1840
1841 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
1842
1843 hw->intr_mask = Y2_IS_BASE;
1844 sky2_write32(hw, B0_IMSK, hw->intr_mask);
1845
1846 /* disable all GMAC IRQ's */
1847 sky2_write8(hw, GMAC_IRQ_MSK, 0);
1848
1849 spin_lock_bh(&hw->phy_lock);
1850 for (i = 0; i < hw->ports; i++)
1851 sky2_phy_reset(hw, i);
1852 spin_unlock_bh(&hw->phy_lock);
1853
1854 /* Setup ring for status responses */
1855 hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
1856 &hw->st_dma);
1857 if (!hw->st_le)
1858 return -ENOMEM;
1859
1860 memset(hw->st_le, 0, STATUS_LE_BYTES);
1861 hw->st_idx = 0;
1862
1863 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
1864 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
1865
1866 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
1867 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64)hw->st_dma >> 32);
1868
1869 /* Set the list last index */
1870 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE-1);
1871
1872 if (is_ec_a1(hw)) {
1873 /* WA for dev. #4.3 */
1874 sky2_write16(hw, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1875
1876 /* set Status-FIFO watermark */
1877 sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
1878
1879 /* set Status-FIFO ISR watermark */
1880 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07);/* WA for dev. #4.18 */
1881
1882 /* WA for dev. #4.3 and #4.18 */
1883 /* set Status-FIFO Tx timer init value */
1884 sky2_write32(hw, STAT_TX_TIMER_INI, sky2_ms2clk(hw, 10));
1885 } else {
1886 /*
1887 * Theses settings should avoid the
1888 * temporary hanging of the status BMU.
1889 * May be not all required... still under investigation...
1890 */
1891 sky2_write16(hw, STAT_TX_IDX_TH, 0x000a);
1892
1893 /* set Status-FIFO watermark */
1894 sky2_write8(hw, STAT_FIFO_WM, 0x10);
1895
1896 /* set Status-FIFO ISR watermark */
1897 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
1898 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x10);
1899
1900 else /* WA 4109 */
1901 sky2_write8(hw, STAT_FIFO_ISR_WM, 0x04);
1902
1903 sky2_write32(hw, STAT_ISR_TIMER_INI, 0x0190);
1904 }
1905
1906 /* enable the prefetch unit */
1907 /* operational bit not functional for Yukon-EC, but fixed in Yukon-2? */
1908 sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
1909
1910 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1911 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
1912 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
1913
1914 return 0;
1915}
1916
1917static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
1918{
1919 u32 modes;
1920 if (hw->copper) {
1921 modes = SUPPORTED_10baseT_Half
1922 | SUPPORTED_10baseT_Full
1923 | SUPPORTED_100baseT_Half
1924 | SUPPORTED_100baseT_Full
1925 | SUPPORTED_Autoneg| SUPPORTED_TP;
1926
1927 if (hw->chip_id != CHIP_ID_YUKON_FE)
1928 modes |= SUPPORTED_1000baseT_Half
1929 | SUPPORTED_1000baseT_Full;
1930 } else
1931 modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1932 | SUPPORTED_Autoneg;
1933 return modes;
1934}
1935
1936static int sky2_get_settings(struct net_device *dev,
1937 struct ethtool_cmd *ecmd)
1938{
1939 struct sky2_port *sky2 = netdev_priv(dev);
1940 struct sky2_hw *hw = sky2->hw;
1941
1942 ecmd->transceiver = XCVR_INTERNAL;
1943 ecmd->supported = sky2_supported_modes(hw);
1944 ecmd->phy_address = PHY_ADDR_MARV;
1945 if (hw->copper) {
1946 ecmd->supported = SUPPORTED_10baseT_Half
1947
1948 | SUPPORTED_10baseT_Full
1949 | SUPPORTED_100baseT_Half
1950 | SUPPORTED_100baseT_Full
1951 | SUPPORTED_1000baseT_Half
1952 | SUPPORTED_1000baseT_Full
1953 | SUPPORTED_Autoneg| SUPPORTED_TP;
1954 ecmd->port = PORT_TP;
1955 } else
1956 ecmd->port = PORT_FIBRE;
1957
1958 ecmd->advertising = sky2->advertising;
1959 ecmd->autoneg = sky2->autoneg;
1960 ecmd->speed = sky2->speed;
1961 ecmd->duplex = sky2->duplex;
1962 return 0;
1963}
1964
1965static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1966{
1967 struct sky2_port *sky2 = netdev_priv(dev);
1968 const struct sky2_hw *hw = sky2->hw;
1969 u32 supported = sky2_supported_modes(hw);
1970
1971 if (ecmd->autoneg == AUTONEG_ENABLE) {
1972 ecmd->advertising = supported;
1973 sky2->duplex = -1;
1974 sky2->speed = -1;
1975 } else {
1976 u32 setting;
1977
1978 switch(ecmd->speed) {
1979 case SPEED_1000:
1980 if (ecmd->duplex == DUPLEX_FULL)
1981 setting = SUPPORTED_1000baseT_Full;
1982 else if (ecmd->duplex == DUPLEX_HALF)
1983 setting = SUPPORTED_1000baseT_Half;
1984 else
1985 return -EINVAL;
1986 break;
1987 case SPEED_100:
1988 if (ecmd->duplex == DUPLEX_FULL)
1989 setting = SUPPORTED_100baseT_Full;
1990 else if (ecmd->duplex == DUPLEX_HALF)
1991 setting = SUPPORTED_100baseT_Half;
1992 else
1993 return -EINVAL;
1994 break;
1995
1996 case SPEED_10:
1997 if (ecmd->duplex == DUPLEX_FULL)
1998 setting = SUPPORTED_10baseT_Full;
1999 else if (ecmd->duplex == DUPLEX_HALF)
2000 setting = SUPPORTED_10baseT_Half;
2001 else
2002 return -EINVAL;
2003 break;
2004 default:
2005 return -EINVAL;
2006 }
2007
2008 if ((setting & supported) == 0)
2009 return -EINVAL;
2010
2011 sky2->speed = ecmd->speed;
2012 sky2->duplex = ecmd->duplex;
2013 }
2014
2015 sky2->autoneg = ecmd->autoneg;
2016 sky2->advertising = ecmd->advertising;
2017
2018 if (netif_running(dev)) {
2019 sky2_down(dev);
2020 sky2_up(dev);
2021 }
2022
2023 return 0;
2024}
2025
2026static void sky2_get_drvinfo(struct net_device *dev,
2027 struct ethtool_drvinfo *info)
2028{
2029 struct sky2_port *sky2 = netdev_priv(dev);
2030
2031 strcpy(info->driver, DRV_NAME);
2032 strcpy(info->version, DRV_VERSION);
2033 strcpy(info->fw_version, "N/A");
2034 strcpy(info->bus_info, pci_name(sky2->hw->pdev));
2035}
2036
2037static const struct sky2_stat {
2038 char name[ETH_GSTRING_LEN];
2039 u16 offset;
2040} sky2_stats[] = {
2041 { "tx_bytes", GM_TXO_OK_HI },
2042 { "rx_bytes", GM_RXO_OK_HI },
2043 { "tx_broadcast", GM_TXF_BC_OK },
2044 { "rx_broadcast", GM_RXF_BC_OK },
2045 { "tx_multicast", GM_TXF_MC_OK },
2046 { "rx_multicast", GM_RXF_MC_OK },
2047 { "tx_unicast", GM_TXF_UC_OK },
2048 { "rx_unicast", GM_RXF_UC_OK },
2049 { "tx_mac_pause", GM_TXF_MPAUSE },
2050 { "rx_mac_pause", GM_RXF_MPAUSE },
2051 { "collisions", GM_TXF_SNG_COL },
2052 { "late_collision",GM_TXF_LAT_COL },
2053 { "aborted", GM_TXF_ABO_COL },
2054 { "multi_collisions", GM_TXF_MUL_COL },
2055 { "fifo_underrun", GM_TXE_FIFO_UR },
2056 { "fifo_overflow", GM_RXE_FIFO_OV },
2057 { "rx_toolong", GM_RXF_LNG_ERR },
2058 { "rx_jabber", GM_RXF_JAB_PKT },
2059 { "rx_runt", GM_RXE_FRAG },
2060 { "rx_too_long", GM_RXF_LNG_ERR },
2061 { "rx_fcs_error", GM_RXF_FCS_ERR },
2062};
2063
2064
2065static u32 sky2_get_rx_csum(struct net_device *dev)
2066{
2067 struct sky2_port *sky2 = netdev_priv(dev);
2068
2069 return sky2->rx_csum;
2070}
2071
2072static int sky2_set_rx_csum(struct net_device *dev, u32 data)
2073{
2074 struct sky2_port *sky2 = netdev_priv(dev);
2075
2076 sky2->rx_csum = data;
2077 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2078 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
2079
2080 return 0;
2081}
2082
2083static u32 sky2_get_msglevel(struct net_device *netdev)
2084{
2085 struct sky2_port *sky2 = netdev_priv(netdev);
2086 return sky2->msg_enable;
2087}
2088
2089static void sky2_phy_stats(struct sky2_port *sky2, u64 *data)
2090{
2091 struct sky2_hw *hw = sky2->hw;
2092 unsigned port = sky2->port;
2093 int i;
2094
2095 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2096 | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2097 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2098 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
2099
2100 for (i = 2; i < ARRAY_SIZE(sky2_stats); i++)
2101 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
2102}
2103
2104
2105static void sky2_set_msglevel(struct net_device *netdev, u32 value)
2106{
2107 struct sky2_port *sky2 = netdev_priv(netdev);
2108 sky2->msg_enable = value;
2109}
2110
2111static int sky2_get_stats_count(struct net_device *dev)
2112{
2113 return ARRAY_SIZE(sky2_stats);
2114}
2115
2116static void sky2_get_ethtool_stats(struct net_device *dev,
2117 struct ethtool_stats *stats, u64 *data)
2118{
2119 struct sky2_port *sky2 = netdev_priv(dev);
2120
2121 sky2_phy_stats(sky2, data);
2122}
2123
2124static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2125{
2126 int i;
2127
2128 switch (stringset) {
2129 case ETH_SS_STATS:
2130 for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
2131 memcpy(data + i * ETH_GSTRING_LEN,
2132 sky2_stats[i].name, ETH_GSTRING_LEN);
2133 break;
2134 }
2135}
2136
2137/* Use hardware MIB variables for critical path statistics and
2138 * transmit feedback not reported at interrupt.
2139 * Other errors are accounted for in interrupt handler.
2140 */
2141static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2142{
2143 struct sky2_port *sky2 = netdev_priv(dev);
2144 u64 data[ARRAY_SIZE(sky2_stats)];
2145
2146 sky2_phy_stats(sky2, data);
2147
2148 sky2->net_stats.tx_bytes = data[0];
2149 sky2->net_stats.rx_bytes = data[1];
2150 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2151 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2152 sky2->net_stats.multicast = data[5] + data[7];
2153 sky2->net_stats.collisions = data[10];
2154 sky2->net_stats.tx_aborted_errors = data[12];
2155
2156 return &sky2->net_stats;
2157}
2158
2159static int sky2_set_mac_address(struct net_device *dev, void *p)
2160{
2161 struct sky2_port *sky2 = netdev_priv(dev);
2162 struct sockaddr *addr = p;
2163 int err = 0;
2164
2165 if (!is_valid_ether_addr(addr->sa_data))
2166 return -EADDRNOTAVAIL;
2167
2168 sky2_down(dev);
2169 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2170 memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port*8,
2171 dev->dev_addr, ETH_ALEN);
2172 memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port*8,
2173 dev->dev_addr, ETH_ALEN);
2174 if (dev->flags & IFF_UP)
2175 err = sky2_up(dev);
2176 return err;
2177}
2178
2179static void sky2_set_multicast(struct net_device *dev)
2180{
2181 struct sky2_port *sky2 = netdev_priv(dev);
2182 struct sky2_hw *hw = sky2->hw;
2183 unsigned port = sky2->port;
2184 struct dev_mc_list *list = dev->mc_list;
2185 u16 reg;
2186 u8 filter[8];
2187
2188 memset(filter, 0, sizeof(filter));
2189
2190 reg = gma_read16(hw, port, GM_RX_CTRL);
2191 reg |= GM_RXCR_UCF_ENA;
2192
2193 if (dev->flags & IFF_PROMISC) /* promiscious */
2194 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2195 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2196 memset(filter, 0xff, sizeof(filter));
2197 else if (dev->mc_count == 0) /* no multicast */
2198 reg &= ~GM_RXCR_MCF_ENA;
2199 else {
2200 int i;
2201 reg |= GM_RXCR_MCF_ENA;
2202
2203 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2204 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2205 filter[bit/8] |= 1 << (bit%8);
2206 }
2207 }
2208
2209
2210 gma_write16(hw, port, GM_MC_ADDR_H1,
2211 (u16)filter[0] | ((u16)filter[1] << 8));
2212 gma_write16(hw, port, GM_MC_ADDR_H2,
2213 (u16)filter[2] | ((u16)filter[3] << 8));
2214 gma_write16(hw, port, GM_MC_ADDR_H3,
2215 (u16)filter[4] | ((u16)filter[5] << 8));
2216 gma_write16(hw, port, GM_MC_ADDR_H4,
2217 (u16)filter[6] | ((u16)filter[7] << 8));
2218
2219 gma_write16(hw, port, GM_RX_CTRL, reg);
2220}
2221
2222/* Can have one global because blinking is controlled by
2223 * ethtool and that is always under RTNL mutex
2224 */
2225static inline void sky2_led(struct sky2_hw *hw, unsigned port, int on)
2226{
2227 spin_lock_bh(&hw->phy_lock);
2228 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
2229 if (on)
2230 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
2231 PHY_M_LED_MO_DUP(MO_LED_ON) |
2232 PHY_M_LED_MO_10(MO_LED_ON) |
2233 PHY_M_LED_MO_100(MO_LED_ON) |
2234 PHY_M_LED_MO_1000(MO_LED_ON) |
2235 PHY_M_LED_MO_RX(MO_LED_ON));
2236 else
2237 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
2238
2239 PHY_M_LED_MO_DUP(MO_LED_OFF) |
2240 PHY_M_LED_MO_10(MO_LED_OFF) |
2241 PHY_M_LED_MO_100(MO_LED_OFF) |
2242 PHY_M_LED_MO_1000(MO_LED_OFF) |
2243 PHY_M_LED_MO_RX(MO_LED_OFF));
2244
2245 spin_unlock_bh(&hw->phy_lock);
2246}
2247
2248/* blink LED's for finding board */
2249static int sky2_phys_id(struct net_device *dev, u32 data)
2250{
2251 struct sky2_port *sky2 = netdev_priv(dev);
2252 struct sky2_hw *hw = sky2->hw;
2253 unsigned port = sky2->port;
2254 u16 ledctrl, ledover;
2255 long ms;
2256 int onoff = 1;
2257
2258 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2259 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
2260 else
2261 ms = data * 1000;
2262
2263 /* save initial values */
2264 spin_lock_bh(&hw->phy_lock);
2265 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
2266 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
2267 spin_unlock_bh(&hw->phy_lock);
2268
2269 while (ms > 0) {
2270 sky2_led(hw, port, onoff);
2271 onoff = !onoff;
2272
2273 if (msleep_interruptible(250))
2274 break; /* interrupted */
2275 ms -= 250;
2276 }
2277
2278 /* resume regularly scheduled programming */
2279 spin_lock_bh(&hw->phy_lock);
2280 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
2281 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
2282 spin_unlock_bh(&hw->phy_lock);
2283
2284 return 0;
2285}
2286
2287static void sky2_get_pauseparam(struct net_device *dev,
2288 struct ethtool_pauseparam *ecmd)
2289{
2290 struct sky2_port *sky2 = netdev_priv(dev);
2291
2292 ecmd->tx_pause = sky2->tx_pause;
2293 ecmd->rx_pause = sky2->rx_pause;
2294 ecmd->autoneg = sky2->autoneg;
2295}
2296
2297static int sky2_set_pauseparam(struct net_device *dev,
2298 struct ethtool_pauseparam *ecmd)
2299{
2300 struct sky2_port *sky2 = netdev_priv(dev);
2301 int err = 0;
2302
2303 sky2->autoneg = ecmd->autoneg;
2304 sky2->tx_pause = ecmd->tx_pause != 0;
2305 sky2->rx_pause = ecmd->rx_pause != 0;
2306
2307 if (netif_running(dev)) {
2308 sky2_down(dev);
2309 err = sky2_up(dev);
2310 }
2311
2312 return err;
2313}
2314
2315#ifdef CONFIG_PM
2316static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2317{
2318 struct sky2_port *sky2 = netdev_priv(dev);
2319
2320 wol->supported = WAKE_MAGIC;
2321 wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
2322}
2323
2324static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2325{
2326 struct sky2_port *sky2 = netdev_priv(dev);
2327 struct sky2_hw *hw = sky2->hw;
2328
2329 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2330 return -EOPNOTSUPP;
2331
2332 sky2->wol = wol->wolopts == WAKE_MAGIC;
2333
2334 if (sky2->wol) {
2335 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
2336
2337 sky2_write16(hw, WOL_CTRL_STAT,
2338 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
2339 WOL_CTL_ENA_MAGIC_PKT_UNIT);
2340 } else
2341 sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
2342
2343 return 0;
2344}
2345#endif
2346
2347
2348static struct ethtool_ops sky2_ethtool_ops = {
2349 .get_settings = sky2_get_settings,
2350 .set_settings = sky2_set_settings,
2351 .get_drvinfo = sky2_get_drvinfo,
2352 .get_msglevel = sky2_get_msglevel,
2353 .set_msglevel = sky2_set_msglevel,
2354 .get_link = ethtool_op_get_link,
2355 .get_sg = ethtool_op_get_sg,
2356 .set_sg = ethtool_op_set_sg,
2357 .get_tx_csum = ethtool_op_get_tx_csum,
2358 .set_tx_csum = ethtool_op_set_tx_csum,
2359 .get_tso = ethtool_op_get_tso,
2360 .set_tso = ethtool_op_set_tso,
2361 .get_rx_csum = sky2_get_rx_csum,
2362 .set_rx_csum = sky2_set_rx_csum,
2363 .get_strings = sky2_get_strings,
2364 .get_pauseparam = sky2_get_pauseparam,
2365 .set_pauseparam = sky2_set_pauseparam,
2366#ifdef CONFIG_PM
2367 .get_wol = sky2_get_wol,
2368 .set_wol = sky2_set_wol,
2369#endif
2370 .phys_id = sky2_phys_id,
2371 .get_stats_count = sky2_get_stats_count,
2372 .get_ethtool_stats = sky2_get_ethtool_stats,
2373};
2374
2375/* Initialize network device */
2376static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
2377 unsigned port, int highmem)
2378{
2379 struct sky2_port *sky2;
2380 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
2381
2382 if (!dev) {
2383 printk(KERN_ERR "sky2 etherdev alloc failed");
2384 return NULL;
2385 }
2386
2387 SET_MODULE_OWNER(dev);
2388 SET_NETDEV_DEV(dev, &hw->pdev->dev);
2389 dev->open = sky2_up;
2390 dev->stop = sky2_down;
2391 dev->hard_start_xmit = sky2_xmit_frame;
2392 dev->get_stats = sky2_get_stats;
2393 dev->set_multicast_list = sky2_set_multicast;
2394 dev->set_mac_address = sky2_set_mac_address;
2395 dev->change_mtu = sky2_change_mtu;
2396 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
2397 dev->tx_timeout = sky2_tx_timeout;
2398 dev->watchdog_timeo = TX_WATCHDOG;
2399 if (port == 0)
2400 dev->poll = sky2_poll;
2401 dev->weight = NAPI_WEIGHT;
2402#ifdef CONFIG_NET_POLL_CONTROLLER
2403 dev->poll_controller = sky2_netpoll;
2404#endif
2405 dev->irq = hw->pdev->irq;
2406
2407 sky2 = netdev_priv(dev);
2408 sky2->netdev = dev;
2409 sky2->hw = hw;
2410 sky2->msg_enable = netif_msg_init(debug, default_msg);
2411
2412 spin_lock_init(&sky2->tx_lock);
2413 /* Auto speed and flow control */
2414 sky2->autoneg = AUTONEG_ENABLE;
2415 sky2->tx_pause = 0;
2416 sky2->rx_pause = 1;
2417 sky2->duplex = -1;
2418 sky2->speed = -1;
2419 sky2->advertising = sky2_supported_modes(hw);
2420 sky2->rx_csum = 1;
2421 sky2->rx_ring_size = is_ec_a1(hw) ? MIN_RX_BUFFERS : MAX_RX_BUFFERS;
2422 tasklet_init(&sky2->phy_task, sky2_phy_task, (unsigned long) sky2);
2423
2424 hw->dev[port] = dev;
2425
2426 sky2->port = port;
2427
2428 dev->features |= NETIF_F_LLTX;
2429 if (highmem)
2430 dev->features |= NETIF_F_HIGHDMA;
2431 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
2432
2433 /* read the mac address */
2434 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
2435
2436 /* device is off until link detection */
2437 netif_carrier_off(dev);
2438 netif_stop_queue(dev);
2439
2440 return dev;
2441}
2442
2443static inline void sky2_show_addr(struct net_device *dev)
2444{
2445 const struct sky2_port *sky2 = netdev_priv(dev);
2446
2447 if (netif_msg_probe(sky2))
2448 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
2449 dev->name,
2450 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2451 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2452}
2453
2454static int __devinit sky2_probe(struct pci_dev *pdev,
2455 const struct pci_device_id *ent)
2456{
2457 struct net_device *dev, *dev1;
2458 struct sky2_hw *hw;
2459 int err, using_dac = 0;
2460
2461 if ((err = pci_enable_device(pdev))) {
2462 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
2463 pci_name(pdev));
2464 goto err_out;
2465 }
2466
2467 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2468 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
2469 pci_name(pdev));
2470 goto err_out_disable_pdev;
2471 }
2472
2473 pci_set_master(pdev);
2474
2475 if (sizeof(dma_addr_t) > sizeof(u32)) {
2476 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
2477 if (!err)
2478 using_dac = 1;
2479 }
2480
2481 if (!using_dac) {
2482 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2483 if (err) {
2484 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
2485 pci_name(pdev));
2486 goto err_out_free_regions;
2487 }
2488 }
2489
2490#ifdef __BIG_ENDIAN
2491 /* byte swap decriptors in hardware */
2492 {
2493 u32 reg;
2494
2495 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
2496 reg |= PCI_REV_DESC;
2497 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
2498 }
2499#endif
2500
2501 err = -ENOMEM;
2502 hw = kmalloc(sizeof(*hw), GFP_KERNEL);
2503 if (!hw) {
2504 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
2505 pci_name(pdev));
2506 goto err_out_free_regions;
2507 }
2508
2509 memset(hw, 0, sizeof(*hw));
2510 hw->pdev = pdev;
2511 spin_lock_init(&hw->phy_lock);
2512
2513 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
2514 if (!hw->regs) {
2515 printk(KERN_ERR PFX "%s: cannot map device registers\n",
2516 pci_name(pdev));
2517 goto err_out_free_hw;
2518 }
2519
2520 err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
2521 if (err) {
2522 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
2523 pci_name(pdev), pdev->irq);
2524 goto err_out_iounmap;
2525 }
2526 pci_set_drvdata(pdev, hw);
2527
2528 err = sky2_reset(hw);
2529 if (err)
2530 goto err_out_free_irq;
2531
2532 printk(KERN_INFO PFX "addr 0x%lx irq %d chip 0x%x (%s) rev %d\n",
2533 pci_resource_start(pdev, 0), pdev->irq,
2534 hw->chip_id, chip_name(hw->chip_id), hw->chip_rev);
2535
2536 if ((dev = sky2_init_netdev(hw, 0, using_dac)) == NULL)
2537 goto err_out_free_pci;
2538
2539 if ((err = register_netdev(dev))) {
2540 printk(KERN_ERR PFX "%s: cannot register net device\n",
2541 pci_name(pdev));
2542 goto err_out_free_netdev;
2543 }
2544
2545 sky2_show_addr(dev);
2546
2547 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
2548 if (register_netdev(dev1) == 0)
2549 sky2_show_addr(dev1);
2550 else {
2551 /* Failure to register second port need not be fatal */
2552 printk(KERN_WARNING PFX "register of second port failed\n");
2553 hw->dev[1] = NULL;
2554 free_netdev(dev1);
2555 }
2556 }
2557
2558 return 0;
2559
2560err_out_free_netdev:
2561 free_netdev(dev);
2562
2563err_out_free_irq:
2564 free_irq(pdev->irq, hw);
2565err_out_free_pci:
2566 pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
2567err_out_iounmap:
2568 iounmap(hw->regs);
2569err_out_free_hw:
2570 kfree(hw);
2571err_out_free_regions:
2572 pci_release_regions(pdev);
2573err_out_disable_pdev:
2574 pci_disable_device(pdev);
2575 pci_set_drvdata(pdev, NULL);
2576err_out:
2577 return err;
2578}
2579
2580static void __devexit sky2_remove(struct pci_dev *pdev)
2581{
2582 struct sky2_hw *hw = pci_get_drvdata(pdev);
2583 struct net_device *dev0, *dev1;
2584
2585 if(!hw)
2586 return;
2587
2588 if ((dev1 = hw->dev[1]))
2589 unregister_netdev(dev1);
2590 dev0 = hw->dev[0];
2591 unregister_netdev(dev0);
2592
2593 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
2594
2595 free_irq(pdev->irq, hw);
2596 pci_free_consistent(pdev, STATUS_LE_BYTES,
2597 hw->st_le, hw->st_dma);
2598 pci_release_regions(pdev);
2599 pci_disable_device(pdev);
2600 if (dev1)
2601 free_netdev(dev1);
2602 free_netdev(dev0);
2603 iounmap(hw->regs);
2604 kfree(hw);
2605 pci_set_drvdata(pdev, NULL);
2606}
2607
2608#ifdef CONFIG_PM
2609static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
2610{
2611 struct sky2_hw *hw = pci_get_drvdata(pdev);
2612 int i, wol = 0;
2613
2614 for (i = 0; i < 2; i++) {
2615 struct net_device *dev = hw->dev[i];
2616
2617 if (dev) {
2618 struct sky2_port *sky2 = netdev_priv(dev);
2619 if (netif_running(dev)) {
2620 netif_carrier_off(dev);
2621 sky2_down(dev);
2622 }
2623 netif_device_detach(dev);
2624 wol |= sky2->wol;
2625 }
2626 }
2627
2628 pci_save_state(pdev);
2629 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
2630 pci_disable_device(pdev);
2631 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2632
2633 return 0;
2634}
2635
2636static int sky2_resume(struct pci_dev *pdev)
2637{
2638 struct sky2_hw *hw = pci_get_drvdata(pdev);
2639 int i;
2640
2641 pci_set_power_state(pdev, PCI_D0);
2642 pci_restore_state(pdev);
2643 pci_enable_wake(pdev, PCI_D0, 0);
2644
2645 sky2_reset(hw);
2646
2647 for (i = 0; i < 2; i++) {
2648 struct net_device *dev = hw->dev[i];
2649 if (dev) {
2650 netif_device_attach(dev);
2651 if (netif_running(dev))
2652 sky2_up(dev);
2653 }
2654 }
2655 return 0;
2656}
2657#endif
2658
2659static struct pci_driver sky2_driver = {
2660 .name = DRV_NAME,
2661 .id_table = sky2_id_table,
2662 .probe = sky2_probe,
2663 .remove = __devexit_p(sky2_remove),
2664#ifdef CONFIG_PM
2665 .suspend = sky2_suspend,
2666 .resume = sky2_resume,
2667#endif
2668};
2669
2670static int __init sky2_init_module(void)
2671{
2672
2673 return pci_module_init(&sky2_driver);
2674}
2675
2676static void __exit sky2_cleanup_module(void)
2677{
2678 pci_unregister_driver(&sky2_driver);
2679}
2680
2681module_init(sky2_init_module);
2682module_exit(sky2_cleanup_module);
2683
2684MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
2685MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
2686MODULE_LICENSE("GPL");