aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-06-27 22:49:47 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-06-27 22:49:47 -0400
commit245ac8738b0b840552d56b842e70e750d65911cc (patch)
tree2609d6b0a8c603804d71aed65d7f74097ebe0e58
parent716b43303df605510399d6da0d0dd4e2ea376e7c (diff)
parenta5fe736eaf9bae1b45317313de04b564441b94f2 (diff)
Merge upstream net/ieee80211.h changes into 'ieee80211' branch.
-rw-r--r--drivers/net/skge.c1710
-rw-r--r--drivers/net/skge.h586
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/net/ieee80211.h11
4 files changed, 934 insertions, 1375 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 30e8d589d167..3dbb1cb09ed8 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -7,7 +7,7 @@
7 * of the original driver such as link fail-over and link management because 7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels. 8 * those should be done at higher levels.
9 * 9 *
10 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> 10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -42,19 +42,20 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.6" 45#define DRV_VERSION "0.7"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
49#define DEFAULT_RX_RING_SIZE 512 49#define DEFAULT_RX_RING_SIZE 512
50#define MAX_TX_RING_SIZE 1024 50#define MAX_TX_RING_SIZE 1024
51#define MAX_RX_RING_SIZE 4096 51#define MAX_RX_RING_SIZE 4096
52#define RX_COPY_THRESHOLD 128
53#define RX_BUF_SIZE 1536
52#define PHY_RETRIES 1000 54#define PHY_RETRIES 1000
53#define ETH_JUMBO_MTU 9000 55#define ETH_JUMBO_MTU 9000
54#define TX_WATCHDOG (5 * HZ) 56#define TX_WATCHDOG (5 * HZ)
55#define NAPI_WEIGHT 64 57#define NAPI_WEIGHT 64
56#define BLINK_HZ (HZ/4) 58#define BLINK_HZ (HZ/4)
57#define LINK_POLL_HZ (HZ/10)
58 59
59MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); 60MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
60MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>"); 61MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
@@ -70,28 +71,17 @@ module_param(debug, int, 0);
70MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 71MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71 72
72static const struct pci_device_id skge_id_table[] = { 73static const struct pci_device_id skge_id_table[] = {
73 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940, 74 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
74 PCI_ANY_ID, PCI_ANY_ID }, 75 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
75 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B, 76 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
76 PCI_ANY_ID, PCI_ANY_ID }, 77 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
77 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE, 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
78 PCI_ANY_ID, PCI_ANY_ID }, 79 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
79 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU, 80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
80 PCI_ANY_ID, PCI_ANY_ID }, 81 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
81 { PCI_VENDOR_ID_SYSKONNECT, 0x9E00, /* SK-9Exx */ 82 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
82 PCI_ANY_ID, PCI_ANY_ID }, 83 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032) },
83 { PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T, 84 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
84 PCI_ANY_ID, PCI_ANY_ID },
85 { PCI_VENDOR_ID_MARVELL, 0x4320, /* Gigabit Ethernet Controller */
86 PCI_ANY_ID, PCI_ANY_ID },
87 { PCI_VENDOR_ID_MARVELL, 0x5005, /* Marvell (11ab), Belkin */
88 PCI_ANY_ID, PCI_ANY_ID },
89 { PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD,
90 PCI_ANY_ID, PCI_ANY_ID },
91 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032,
92 PCI_ANY_ID, PCI_ANY_ID },
93 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064,
94 PCI_ANY_ID, PCI_ANY_ID },
95 { 0 } 85 { 0 }
96}; 86};
97MODULE_DEVICE_TABLE(pci, skge_id_table); 87MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -99,19 +89,22 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
99static int skge_up(struct net_device *dev); 89static int skge_up(struct net_device *dev);
100static int skge_down(struct net_device *dev); 90static int skge_down(struct net_device *dev);
101static void skge_tx_clean(struct skge_port *skge); 91static void skge_tx_clean(struct skge_port *skge);
102static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 92static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
103static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 93static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
104static void genesis_get_stats(struct skge_port *skge, u64 *data); 94static void genesis_get_stats(struct skge_port *skge, u64 *data);
105static void yukon_get_stats(struct skge_port *skge, u64 *data); 95static void yukon_get_stats(struct skge_port *skge, u64 *data);
106static void yukon_init(struct skge_hw *hw, int port); 96static void yukon_init(struct skge_hw *hw, int port);
107static void yukon_reset(struct skge_hw *hw, int port); 97static void yukon_reset(struct skge_hw *hw, int port);
108static void genesis_mac_init(struct skge_hw *hw, int port); 98static void genesis_mac_init(struct skge_hw *hw, int port);
109static void genesis_reset(struct skge_hw *hw, int port); 99static void genesis_reset(struct skge_hw *hw, int port);
100static void genesis_link_up(struct skge_port *skge);
110 101
102/* Avoid conditionals by using array */
111static const int txqaddr[] = { Q_XA1, Q_XA2 }; 103static const int txqaddr[] = { Q_XA1, Q_XA2 };
112static const int rxqaddr[] = { Q_R1, Q_R2 }; 104static const int rxqaddr[] = { Q_R1, Q_R2 };
113static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
114static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
107static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
115 108
116/* Don't need to look at whole 16K. 109/* Don't need to look at whole 16K.
117 * last interesting register is descriptor poll timer. 110 * last interesting register is descriptor poll timer.
@@ -154,7 +147,7 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
154static int wol_supported(const struct skge_hw *hw) 147static int wol_supported(const struct skge_hw *hw)
155{ 148{
156 return !((hw->chip_id == CHIP_ID_GENESIS || 149 return !((hw->chip_id == CHIP_ID_GENESIS ||
157 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0))); 150 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)));
158} 151}
159 152
160static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 153static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -170,7 +163,7 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
170 struct skge_port *skge = netdev_priv(dev); 163 struct skge_port *skge = netdev_priv(dev);
171 struct skge_hw *hw = skge->hw; 164 struct skge_hw *hw = skge->hw;
172 165
173 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 166 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
174 return -EOPNOTSUPP; 167 return -EOPNOTSUPP;
175 168
176 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) 169 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw))
@@ -190,6 +183,36 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
190 return 0; 183 return 0;
191} 184}
192 185
186/* Determine supported/adverised modes based on hardware.
187 * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx
188 */
189static u32 skge_supported_modes(const struct skge_hw *hw)
190{
191 u32 supported;
192
193 if (iscopper(hw)) {
194 supported = SUPPORTED_10baseT_Half
195 | SUPPORTED_10baseT_Full
196 | SUPPORTED_100baseT_Half
197 | SUPPORTED_100baseT_Full
198 | SUPPORTED_1000baseT_Half
199 | SUPPORTED_1000baseT_Full
200 | SUPPORTED_Autoneg| SUPPORTED_TP;
201
202 if (hw->chip_id == CHIP_ID_GENESIS)
203 supported &= ~(SUPPORTED_10baseT_Half
204 | SUPPORTED_10baseT_Full
205 | SUPPORTED_100baseT_Half
206 | SUPPORTED_100baseT_Full);
207
208 else if (hw->chip_id == CHIP_ID_YUKON)
209 supported &= ~SUPPORTED_1000baseT_Half;
210 } else
211 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
212 | SUPPORTED_Autoneg;
213
214 return supported;
215}
193 216
194static int skge_get_settings(struct net_device *dev, 217static int skge_get_settings(struct net_device *dev,
195 struct ethtool_cmd *ecmd) 218 struct ethtool_cmd *ecmd)
@@ -198,38 +221,13 @@ static int skge_get_settings(struct net_device *dev,
198 struct skge_hw *hw = skge->hw; 221 struct skge_hw *hw = skge->hw;
199 222
200 ecmd->transceiver = XCVR_INTERNAL; 223 ecmd->transceiver = XCVR_INTERNAL;
224 ecmd->supported = skge_supported_modes(hw);
201 225
202 if (iscopper(hw)) { 226 if (iscopper(hw)) {
203 if (hw->chip_id == CHIP_ID_GENESIS)
204 ecmd->supported = SUPPORTED_1000baseT_Full
205 | SUPPORTED_1000baseT_Half
206 | SUPPORTED_Autoneg | SUPPORTED_TP;
207 else {
208 ecmd->supported = SUPPORTED_10baseT_Half
209 | SUPPORTED_10baseT_Full
210 | SUPPORTED_100baseT_Half
211 | SUPPORTED_100baseT_Full
212 | SUPPORTED_1000baseT_Half
213 | SUPPORTED_1000baseT_Full
214 | SUPPORTED_Autoneg| SUPPORTED_TP;
215
216 if (hw->chip_id == CHIP_ID_YUKON)
217 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
218
219 else if (hw->chip_id == CHIP_ID_YUKON_FE)
220 ecmd->supported &= ~(SUPPORTED_1000baseT_Half
221 | SUPPORTED_1000baseT_Full);
222 }
223
224 ecmd->port = PORT_TP; 227 ecmd->port = PORT_TP;
225 ecmd->phy_address = hw->phy_addr; 228 ecmd->phy_address = hw->phy_addr;
226 } else { 229 } else
227 ecmd->supported = SUPPORTED_1000baseT_Full
228 | SUPPORTED_FIBRE
229 | SUPPORTED_Autoneg;
230
231 ecmd->port = PORT_FIBRE; 230 ecmd->port = PORT_FIBRE;
232 }
233 231
234 ecmd->advertising = skge->advertising; 232 ecmd->advertising = skge->advertising;
235 ecmd->autoneg = skge->autoneg; 233 ecmd->autoneg = skge->autoneg;
@@ -238,65 +236,57 @@ static int skge_get_settings(struct net_device *dev,
238 return 0; 236 return 0;
239} 237}
240 238
241static u32 skge_modes(const struct skge_hw *hw)
242{
243 u32 modes = ADVERTISED_Autoneg
244 | ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half
245 | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half
246 | ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half;
247
248 if (iscopper(hw)) {
249 modes |= ADVERTISED_TP;
250 switch(hw->chip_id) {
251 case CHIP_ID_GENESIS:
252 modes &= ~(ADVERTISED_100baseT_Full
253 | ADVERTISED_100baseT_Half
254 | ADVERTISED_10baseT_Full
255 | ADVERTISED_10baseT_Half);
256 break;
257
258 case CHIP_ID_YUKON:
259 modes &= ~ADVERTISED_1000baseT_Half;
260 break;
261
262 case CHIP_ID_YUKON_FE:
263 modes &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
264 break;
265 }
266 } else {
267 modes |= ADVERTISED_FIBRE;
268 modes &= ~ADVERTISED_1000baseT_Half;
269 }
270 return modes;
271}
272
273static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 239static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
274{ 240{
275 struct skge_port *skge = netdev_priv(dev); 241 struct skge_port *skge = netdev_priv(dev);
276 const struct skge_hw *hw = skge->hw; 242 const struct skge_hw *hw = skge->hw;
243 u32 supported = skge_supported_modes(hw);
277 244
278 if (ecmd->autoneg == AUTONEG_ENABLE) { 245 if (ecmd->autoneg == AUTONEG_ENABLE) {
279 if (ecmd->advertising & skge_modes(hw)) 246 ecmd->advertising = supported;
280 return -EINVAL; 247 skge->duplex = -1;
248 skge->speed = -1;
281 } else { 249 } else {
250 u32 setting;
251
282 switch(ecmd->speed) { 252 switch(ecmd->speed) {
283 case SPEED_1000: 253 case SPEED_1000:
284 if (hw->chip_id == CHIP_ID_YUKON_FE) 254 if (ecmd->duplex == DUPLEX_FULL)
255 setting = SUPPORTED_1000baseT_Full;
256 else if (ecmd->duplex == DUPLEX_HALF)
257 setting = SUPPORTED_1000baseT_Half;
258 else
285 return -EINVAL; 259 return -EINVAL;
286 break; 260 break;
287 case SPEED_100: 261 case SPEED_100:
262 if (ecmd->duplex == DUPLEX_FULL)
263 setting = SUPPORTED_100baseT_Full;
264 else if (ecmd->duplex == DUPLEX_HALF)
265 setting = SUPPORTED_100baseT_Half;
266 else
267 return -EINVAL;
268 break;
269
288 case SPEED_10: 270 case SPEED_10:
289 if (iscopper(hw) || hw->chip_id == CHIP_ID_GENESIS) 271 if (ecmd->duplex == DUPLEX_FULL)
272 setting = SUPPORTED_10baseT_Full;
273 else if (ecmd->duplex == DUPLEX_HALF)
274 setting = SUPPORTED_10baseT_Half;
275 else
290 return -EINVAL; 276 return -EINVAL;
291 break; 277 break;
292 default: 278 default:
293 return -EINVAL; 279 return -EINVAL;
294 } 280 }
281
282 if ((setting & supported) == 0)
283 return -EINVAL;
284
285 skge->speed = ecmd->speed;
286 skge->duplex = ecmd->duplex;
295 } 287 }
296 288
297 skge->autoneg = ecmd->autoneg; 289 skge->autoneg = ecmd->autoneg;
298 skge->speed = ecmd->speed;
299 skge->duplex = ecmd->duplex;
300 skge->advertising = ecmd->advertising; 290 skge->advertising = ecmd->advertising;
301 291
302 if (netif_running(dev)) { 292 if (netif_running(dev)) {
@@ -393,7 +383,7 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
393{ 383{
394 int i; 384 int i;
395 385
396 switch(stringset) { 386 switch (stringset) {
397 case ETH_SS_STATS: 387 case ETH_SS_STATS:
398 for (i = 0; i < ARRAY_SIZE(skge_stats); i++) 388 for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
399 memcpy(data + i * ETH_GSTRING_LEN, 389 memcpy(data + i * ETH_GSTRING_LEN,
@@ -511,14 +501,6 @@ static int skge_set_rx_csum(struct net_device *dev, u32 data)
511 return 0; 501 return 0;
512} 502}
513 503
514/* Only Yukon II supports TSO (not implemented yet) */
515static int skge_set_tso(struct net_device *dev, u32 data)
516{
517 if (data)
518 return -EOPNOTSUPP;
519 return 0;
520}
521
522static void skge_get_pauseparam(struct net_device *dev, 504static void skge_get_pauseparam(struct net_device *dev,
523 struct ethtool_pauseparam *ecmd) 505 struct ethtool_pauseparam *ecmd)
524{ 506{
@@ -540,9 +522,9 @@ static int skge_set_pauseparam(struct net_device *dev,
540 skge->autoneg = ecmd->autoneg; 522 skge->autoneg = ecmd->autoneg;
541 if (ecmd->rx_pause && ecmd->tx_pause) 523 if (ecmd->rx_pause && ecmd->tx_pause)
542 skge->flow_control = FLOW_MODE_SYMMETRIC; 524 skge->flow_control = FLOW_MODE_SYMMETRIC;
543 else if(ecmd->rx_pause && !ecmd->tx_pause) 525 else if (ecmd->rx_pause && !ecmd->tx_pause)
544 skge->flow_control = FLOW_MODE_REM_SEND; 526 skge->flow_control = FLOW_MODE_REM_SEND;
545 else if(!ecmd->rx_pause && ecmd->tx_pause) 527 else if (!ecmd->rx_pause && ecmd->tx_pause)
546 skge->flow_control = FLOW_MODE_LOC_SEND; 528 skge->flow_control = FLOW_MODE_LOC_SEND;
547 else 529 else
548 skge->flow_control = FLOW_MODE_NONE; 530 skge->flow_control = FLOW_MODE_NONE;
@@ -559,8 +541,6 @@ static inline u32 hwkhz(const struct skge_hw *hw)
559{ 541{
560 if (hw->chip_id == CHIP_ID_GENESIS) 542 if (hw->chip_id == CHIP_ID_GENESIS)
561 return 53215; /* or: 53.125 MHz */ 543 return 53215; /* or: 53.125 MHz */
562 else if (hw->chip_id == CHIP_ID_YUKON_EC)
563 return 125000; /* or: 125.000 MHz */
564 else 544 else
565 return 78215; /* or: 78.125 MHz */ 545 return 78215; /* or: 78.125 MHz */
566} 546}
@@ -643,30 +623,18 @@ static int skge_set_coalesce(struct net_device *dev,
643static void skge_led_on(struct skge_hw *hw, int port) 623static void skge_led_on(struct skge_hw *hw, int port)
644{ 624{
645 if (hw->chip_id == CHIP_ID_GENESIS) { 625 if (hw->chip_id == CHIP_ID_GENESIS) {
646 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); 626 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
647 skge_write8(hw, B0_LED, LED_STAT_ON); 627 skge_write8(hw, B0_LED, LED_STAT_ON);
648 628
649 skge_write8(hw, SKGEMAC_REG(port, RX_LED_TST), LED_T_ON); 629 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
650 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 100); 630 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
651 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); 631 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
652 632
653 switch (hw->phy_type) { 633 /* For Broadcom Phy only */
654 case SK_PHY_BCOM: 634 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
655 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
656 PHY_B_PEC_LED_ON);
657 break;
658 case SK_PHY_LONE:
659 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
660 0x0800);
661 break;
662 default:
663 skge_write8(hw, SKGEMAC_REG(port, TX_LED_TST), LED_T_ON);
664 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 100);
665 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START);
666 }
667 } else { 635 } else {
668 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 636 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
669 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, 637 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
670 PHY_M_LED_MO_DUP(MO_LED_ON) | 638 PHY_M_LED_MO_DUP(MO_LED_ON) |
671 PHY_M_LED_MO_10(MO_LED_ON) | 639 PHY_M_LED_MO_10(MO_LED_ON) |
672 PHY_M_LED_MO_100(MO_LED_ON) | 640 PHY_M_LED_MO_100(MO_LED_ON) |
@@ -678,28 +646,17 @@ static void skge_led_on(struct skge_hw *hw, int port)
678static void skge_led_off(struct skge_hw *hw, int port) 646static void skge_led_off(struct skge_hw *hw, int port)
679{ 647{
680 if (hw->chip_id == CHIP_ID_GENESIS) { 648 if (hw->chip_id == CHIP_ID_GENESIS) {
681 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_OFF); 649 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
682 skge_write8(hw, B0_LED, LED_STAT_OFF); 650 skge_write8(hw, B0_LED, LED_STAT_OFF);
683 651
684 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 0); 652 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
685 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_T_OFF); 653 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
686 654
687 switch (hw->phy_type) { 655 /* Broadcom only */
688 case SK_PHY_BCOM: 656 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
689 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
690 PHY_B_PEC_LED_OFF);
691 break;
692 case SK_PHY_LONE:
693 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
694 PHY_L_LC_LEDT);
695 break;
696 default:
697 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 0);
698 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_T_OFF);
699 }
700 } else { 657 } else {
701 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 658 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
702 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, 659 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
703 PHY_M_LED_MO_DUP(MO_LED_OFF) | 660 PHY_M_LED_MO_DUP(MO_LED_OFF) |
704 PHY_M_LED_MO_10(MO_LED_OFF) | 661 PHY_M_LED_MO_10(MO_LED_OFF) |
705 PHY_M_LED_MO_100(MO_LED_OFF) | 662 PHY_M_LED_MO_100(MO_LED_OFF) |
@@ -730,7 +687,7 @@ static int skge_phys_id(struct net_device *dev, u32 data)
730{ 687{
731 struct skge_port *skge = netdev_priv(dev); 688 struct skge_port *skge = netdev_priv(dev);
732 689
733 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 690 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
734 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 691 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
735 692
736 /* start blinking */ 693 /* start blinking */
@@ -763,8 +720,6 @@ static struct ethtool_ops skge_ethtool_ops = {
763 .set_pauseparam = skge_set_pauseparam, 720 .set_pauseparam = skge_set_pauseparam,
764 .get_coalesce = skge_get_coalesce, 721 .get_coalesce = skge_get_coalesce,
765 .set_coalesce = skge_set_coalesce, 722 .set_coalesce = skge_set_coalesce,
766 .get_tso = ethtool_op_get_tso,
767 .set_tso = skge_set_tso,
768 .get_sg = ethtool_op_get_sg, 723 .get_sg = ethtool_op_get_sg,
769 .set_sg = skge_set_sg, 724 .set_sg = skge_set_sg,
770 .get_tx_csum = ethtool_op_get_tx_csum, 725 .get_tx_csum = ethtool_op_get_tx_csum,
@@ -793,6 +748,7 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
793 748
794 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 749 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
795 e->desc = d; 750 e->desc = d;
751 e->skb = NULL;
796 if (i == ring->count - 1) { 752 if (i == ring->count - 1) {
797 e->next = ring->start; 753 e->next = ring->start;
798 d->next_offset = base; 754 d->next_offset = base;
@@ -806,24 +762,23 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
806 return 0; 762 return 0;
807} 763}
808 764
809/* Setup buffer for receiving */ 765static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
810static inline int skge_rx_alloc(struct skge_port *skge,
811 struct skge_element *e)
812{ 766{
813 unsigned long bufsize = skge->netdev->mtu + ETH_HLEN; /* VLAN? */ 767 struct sk_buff *skb = dev_alloc_skb(size);
814 struct skge_rx_desc *rd = e->desc;
815 struct sk_buff *skb;
816 u64 map;
817 768
818 skb = dev_alloc_skb(bufsize + NET_IP_ALIGN); 769 if (likely(skb)) {
819 if (unlikely(!skb)) { 770 skb->dev = dev;
820 printk(KERN_DEBUG PFX "%s: out of memory for receive\n", 771 skb_reserve(skb, NET_IP_ALIGN);
821 skge->netdev->name);
822 return -ENOMEM;
823 } 772 }
773 return skb;
774}
824 775
825 skb->dev = skge->netdev; 776/* Allocate and setup a new buffer for receiving */
826 skb_reserve(skb, NET_IP_ALIGN); 777static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
778 struct sk_buff *skb, unsigned int bufsize)
779{
780 struct skge_rx_desc *rd = e->desc;
781 u64 map;
827 782
828 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 783 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
829 PCI_DMA_FROMDEVICE); 784 PCI_DMA_FROMDEVICE);
@@ -841,55 +796,69 @@ static inline int skge_rx_alloc(struct skge_port *skge,
841 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 796 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
842 pci_unmap_addr_set(e, mapaddr, map); 797 pci_unmap_addr_set(e, mapaddr, map);
843 pci_unmap_len_set(e, maplen, bufsize); 798 pci_unmap_len_set(e, maplen, bufsize);
844 return 0;
845} 799}
846 800
847/* Free all unused buffers in receive ring, assumes receiver stopped */ 801/* Resume receiving using existing skb,
802 * Note: DMA address is not changed by chip.
803 * MTU not changed while receiver active.
804 */
805static void skge_rx_reuse(struct skge_element *e, unsigned int size)
806{
807 struct skge_rx_desc *rd = e->desc;
808
809 rd->csum2 = 0;
810 rd->csum2_start = ETH_HLEN;
811
812 wmb();
813
814 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
815}
816
817
818/* Free all buffers in receive ring, assumes receiver stopped */
848static void skge_rx_clean(struct skge_port *skge) 819static void skge_rx_clean(struct skge_port *skge)
849{ 820{
850 struct skge_hw *hw = skge->hw; 821 struct skge_hw *hw = skge->hw;
851 struct skge_ring *ring = &skge->rx_ring; 822 struct skge_ring *ring = &skge->rx_ring;
852 struct skge_element *e; 823 struct skge_element *e;
853 824
854 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 825 e = ring->start;
826 do {
855 struct skge_rx_desc *rd = e->desc; 827 struct skge_rx_desc *rd = e->desc;
856 rd->control = 0; 828 rd->control = 0;
857 829 if (e->skb) {
858 pci_unmap_single(hw->pdev, 830 pci_unmap_single(hw->pdev,
859 pci_unmap_addr(e, mapaddr), 831 pci_unmap_addr(e, mapaddr),
860 pci_unmap_len(e, maplen), 832 pci_unmap_len(e, maplen),
861 PCI_DMA_FROMDEVICE); 833 PCI_DMA_FROMDEVICE);
862 dev_kfree_skb(e->skb); 834 dev_kfree_skb(e->skb);
863 e->skb = NULL; 835 e->skb = NULL;
864 } 836 }
865 ring->to_clean = e; 837 } while ((e = e->next) != ring->start);
866} 838}
867 839
840
868/* Allocate buffers for receive ring 841/* Allocate buffers for receive ring
869 * For receive: to_use is refill location 842 * For receive: to_clean is next received frame.
870 * to_clean is next received frame.
871 *
872 * if (to_use == to_clean)
873 * then ring all frames in ring need buffers
874 * if (to_use->next == to_clean)
875 * then ring all frames in ring have buffers
876 */ 843 */
877static int skge_rx_fill(struct skge_port *skge) 844static int skge_rx_fill(struct skge_port *skge)
878{ 845{
879 struct skge_ring *ring = &skge->rx_ring; 846 struct skge_ring *ring = &skge->rx_ring;
880 struct skge_element *e; 847 struct skge_element *e;
881 int ret = 0; 848 unsigned int bufsize = skge->rx_buf_size;
882 849
883 for (e = ring->to_use; e->next != ring->to_clean; e = e->next) { 850 e = ring->start;
884 if (skge_rx_alloc(skge, e)) { 851 do {
885 ret = 1; 852 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize);
886 break;
887 }
888 853
889 } 854 if (!skb)
890 ring->to_use = e; 855 return -ENOMEM;
856
857 skge_rx_setup(skge, e, skb, bufsize);
858 } while ( (e = e->next) != ring->start);
891 859
892 return ret; 860 ring->to_clean = ring->start;
861 return 0;
893} 862}
894 863
895static void skge_link_up(struct skge_port *skge) 864static void skge_link_up(struct skge_port *skge)
@@ -919,50 +888,50 @@ static void skge_link_down(struct skge_port *skge)
919 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); 888 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
920} 889}
921 890
922static u16 skge_xm_phy_read(struct skge_hw *hw, int port, u16 reg) 891static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
923{ 892{
924 int i; 893 int i;
925 u16 v; 894 u16 v;
926 895
927 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 896 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
928 v = skge_xm_read16(hw, port, XM_PHY_DATA); 897 v = xm_read16(hw, port, XM_PHY_DATA);
929 if (hw->phy_type != SK_PHY_XMAC) {
930 for (i = 0; i < PHY_RETRIES; i++) {
931 udelay(1);
932 if (skge_xm_read16(hw, port, XM_MMU_CMD)
933 & XM_MMU_PHY_RDY)
934 goto ready;
935 }
936 898
937 printk(KERN_WARNING PFX "%s: phy read timed out\n", 899 /* Need to wait for external PHY */
938 hw->dev[port]->name); 900 for (i = 0; i < PHY_RETRIES; i++) {
939 return 0; 901 udelay(1);
940 ready: 902 if (xm_read16(hw, port, XM_MMU_CMD)
941 v = skge_xm_read16(hw, port, XM_PHY_DATA); 903 & XM_MMU_PHY_RDY)
904 goto ready;
942 } 905 }
943 906
907 printk(KERN_WARNING PFX "%s: phy read timed out\n",
908 hw->dev[port]->name);
909 return 0;
910 ready:
911 v = xm_read16(hw, port, XM_PHY_DATA);
912
944 return v; 913 return v;
945} 914}
946 915
947static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 916static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
948{ 917{
949 int i; 918 int i;
950 919
951 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 920 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
952 for (i = 0; i < PHY_RETRIES; i++) { 921 for (i = 0; i < PHY_RETRIES; i++) {
953 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 922 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
954 goto ready; 923 goto ready;
955 cpu_relax(); 924 udelay(1);
956 } 925 }
957 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n", 926 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n",
958 hw->dev[port]->name); 927 hw->dev[port]->name);
959 928
960 929
961 ready: 930 ready:
962 skge_xm_write16(hw, port, XM_PHY_DATA, val); 931 xm_write16(hw, port, XM_PHY_DATA, val);
963 for (i = 0; i < PHY_RETRIES; i++) { 932 for (i = 0; i < PHY_RETRIES; i++) {
964 udelay(1); 933 udelay(1);
965 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 934 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
966 return; 935 return;
967 } 936 }
968 printk(KERN_WARNING PFX "%s: phy write timed out\n", 937 printk(KERN_WARNING PFX "%s: phy write timed out\n",
@@ -999,34 +968,112 @@ static void genesis_init(struct skge_hw *hw)
999 968
1000static void genesis_reset(struct skge_hw *hw, int port) 969static void genesis_reset(struct skge_hw *hw, int port)
1001{ 970{
1002 int i; 971 const u8 zero[8] = { 0 };
1003 u64 zero = 0;
1004 972
1005 /* reset the statistics module */ 973 /* reset the statistics module */
1006 skge_xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 974 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
1007 skge_xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 975 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
1008 skge_xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ 976 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
1009 skge_xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ 977 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
1010 skge_xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ 978 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
1011 979
1012 /* disable all PHY IRQs */ 980 /* disable Broadcom PHY IRQ */
1013 if (hw->phy_type == SK_PHY_BCOM) 981 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1014 skge_xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1015 982
1016 skge_xm_outhash(hw, port, XM_HSM, (u8 *) &zero); 983 xm_outhash(hw, port, XM_HSM, zero);
1017 for (i = 0; i < 15; i++)
1018 skge_xm_outaddr(hw, port, XM_EXM(i), (u8 *) &zero);
1019 skge_xm_outhash(hw, port, XM_SRC_CHK, (u8 *) &zero);
1020} 984}
1021 985
1022 986
1023static void genesis_mac_init(struct skge_hw *hw, int port) 987/* Convert mode to MII values */
988static const u16 phy_pause_map[] = {
989 [FLOW_MODE_NONE] = 0,
990 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
991 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
992 [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
993};
994
995
996/* Check status of Broadcom phy link */
997static void bcom_check_link(struct skge_hw *hw, int port)
1024{ 998{
1025 struct skge_port *skge = netdev_priv(hw->dev[port]); 999 struct net_device *dev = hw->dev[port];
1000 struct skge_port *skge = netdev_priv(dev);
1001 u16 status;
1002
1003 /* read twice because of latch */
1004 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1005 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1006
1007 pr_debug("bcom_check_link status=0x%x\n", status);
1008
1009 if ((status & PHY_ST_LSYNC) == 0) {
1010 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1011 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1012 xm_write16(hw, port, XM_MMU_CMD, cmd);
1013 /* dummy read to ensure writing */
1014 (void) xm_read16(hw, port, XM_MMU_CMD);
1015
1016 if (netif_carrier_ok(dev))
1017 skge_link_down(skge);
1018 } else {
1019 if (skge->autoneg == AUTONEG_ENABLE &&
1020 (status & PHY_ST_AN_OVER)) {
1021 u16 lpa = xm_phy_read(hw, port, PHY_BCOM_AUNE_LP);
1022 u16 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1023
1024 if (lpa & PHY_B_AN_RF) {
1025 printk(KERN_NOTICE PFX "%s: remote fault\n",
1026 dev->name);
1027 return;
1028 }
1029
1030 /* Check Duplex mismatch */
1031 switch(aux & PHY_B_AS_AN_RES_MSK) {
1032 case PHY_B_RES_1000FD:
1033 skge->duplex = DUPLEX_FULL;
1034 break;
1035 case PHY_B_RES_1000HD:
1036 skge->duplex = DUPLEX_HALF;
1037 break;
1038 default:
1039 printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
1040 dev->name);
1041 return;
1042 }
1043
1044
1045 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1046 switch (aux & PHY_B_AS_PAUSE_MSK) {
1047 case PHY_B_AS_PAUSE_MSK:
1048 skge->flow_control = FLOW_MODE_SYMMETRIC;
1049 break;
1050 case PHY_B_AS_PRR:
1051 skge->flow_control = FLOW_MODE_REM_SEND;
1052 break;
1053 case PHY_B_AS_PRT:
1054 skge->flow_control = FLOW_MODE_LOC_SEND;
1055 break;
1056 default:
1057 skge->flow_control = FLOW_MODE_NONE;
1058 }
1059
1060 skge->speed = SPEED_1000;
1061 }
1062
1063 if (!netif_carrier_ok(dev))
1064 genesis_link_up(skge);
1065 }
1066}
1067
1068/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1069 * Phy on for 100 or 10Mbit operation
1070 */
1071static void bcom_phy_init(struct skge_port *skge, int jumbo)
1072{
1073 struct skge_hw *hw = skge->hw;
1074 int port = skge->port;
1026 int i; 1075 int i;
1027 u32 r; 1076 u16 id1, r, ext, ctl;
1028 u16 id1;
1029 u16 ctrl1, ctrl2, ctrl3, ctrl4, ctrl5;
1030 1077
1031 /* magic workaround patterns for Broadcom */ 1078 /* magic workaround patterns for Broadcom */
1032 static const struct { 1079 static const struct {
@@ -1042,16 +1089,120 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1042 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1089 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1043 }; 1090 };
1044 1091
1092 pr_debug("bcom_phy_init\n");
1093
1094 /* read Id from external PHY (all have the same address) */
1095 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1096
1097 /* Optimize MDIO transfer by suppressing preamble. */
1098 r = xm_read16(hw, port, XM_MMU_CMD);
1099 r |= XM_MMU_NO_PRE;
1100 xm_write16(hw, port, XM_MMU_CMD,r);
1101
1102 switch(id1) {
1103 case PHY_BCOM_ID1_C0:
1104 /*
1105 * Workaround BCOM Errata for the C0 type.
1106 * Write magic patterns to reserved registers.
1107 */
1108 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1109 xm_phy_write(hw, port,
1110 C0hack[i].reg, C0hack[i].val);
1111
1112 break;
1113 case PHY_BCOM_ID1_A1:
1114 /*
1115 * Workaround BCOM Errata for the A1 type.
1116 * Write magic patterns to reserved registers.
1117 */
1118 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1119 xm_phy_write(hw, port,
1120 A1hack[i].reg, A1hack[i].val);
1121 break;
1122 }
1123
1124 /*
1125 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1126 * Disable Power Management after reset.
1127 */
1128 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1129 r |= PHY_B_AC_DIS_PM;
1130 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1131
1132 /* Dummy read */
1133 xm_read16(hw, port, XM_ISRC);
1134
1135 ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1136 ctl = PHY_CT_SP1000; /* always 1000mbit */
1137
1138 if (skge->autoneg == AUTONEG_ENABLE) {
1139 /*
1140 * Workaround BCOM Errata #1 for the C5 type.
1141 * 1000Base-T Link Acquisition Failure in Slave Mode
1142 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1143 */
1144 u16 adv = PHY_B_1000C_RD;
1145 if (skge->advertising & ADVERTISED_1000baseT_Half)
1146 adv |= PHY_B_1000C_AHD;
1147 if (skge->advertising & ADVERTISED_1000baseT_Full)
1148 adv |= PHY_B_1000C_AFD;
1149 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1150
1151 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1152 } else {
1153 if (skge->duplex == DUPLEX_FULL)
1154 ctl |= PHY_CT_DUP_MD;
1155 /* Force to slave */
1156 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1157 }
1158
1159 /* Set autonegotiation pause parameters */
1160 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1161 phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1162
1163 /* Handle Jumbo frames */
1164 if (jumbo) {
1165 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1166 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1167
1168 ext |= PHY_B_PEC_HIGH_LA;
1169
1170 }
1171
1172 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1173 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1174
1175 /* Use link status change interrrupt */
1176 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1177
1178 bcom_check_link(hw, port);
1179}
1180
1181static void genesis_mac_init(struct skge_hw *hw, int port)
1182{
1183 struct net_device *dev = hw->dev[port];
1184 struct skge_port *skge = netdev_priv(dev);
1185 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1186 int i;
1187 u32 r;
1188 const u8 zero[6] = { 0 };
1189
1190 /* Clear MIB counters */
1191 xm_write16(hw, port, XM_STAT_CMD,
1192 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1193 /* Clear two times according to Errata #3 */
1194 xm_write16(hw, port, XM_STAT_CMD,
1195 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1045 1196
1046 /* initialize Rx, Tx and Link LED */ 1197 /* initialize Rx, Tx and Link LED */
1047 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); 1198 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
1048 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); 1199 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
1049 1200
1050 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); 1201 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
1051 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START); 1202 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
1052 1203
1053 /* Unreset the XMAC. */ 1204 /* Unreset the XMAC. */
1054 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1205 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1055 1206
1056 /* 1207 /*
1057 * Perform additional initialization for external PHYs, 1208 * Perform additional initialization for external PHYs,
@@ -1059,67 +1210,56 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1059 * GMII mode. 1210 * GMII mode.
1060 */ 1211 */
1061 spin_lock_bh(&hw->phy_lock); 1212 spin_lock_bh(&hw->phy_lock);
1062 if (hw->phy_type != SK_PHY_XMAC) { 1213 /* Take external Phy out of reset */
1063 /* Take PHY out of reset. */ 1214 r = skge_read32(hw, B2_GP_IO);
1064 r = skge_read32(hw, B2_GP_IO); 1215 if (port == 0)
1065 if (port == 0) 1216 r |= GP_DIR_0|GP_IO_0;
1066 r |= GP_DIR_0|GP_IO_0; 1217 else
1067 else 1218 r |= GP_DIR_2|GP_IO_2;
1068 r |= GP_DIR_2|GP_IO_2;
1069
1070 skge_write32(hw, B2_GP_IO, r);
1071 skge_read32(hw, B2_GP_IO);
1072
1073 /* Enable GMII mode on the XMAC. */
1074 skge_xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1075
1076 id1 = skge_xm_phy_read(hw, port, PHY_XMAC_ID1);
1077
1078 /* Optimize MDIO transfer by suppressing preamble. */
1079 skge_xm_write16(hw, port, XM_MMU_CMD,
1080 skge_xm_read16(hw, port, XM_MMU_CMD)
1081 | XM_MMU_NO_PRE);
1082
1083 if (id1 == PHY_BCOM_ID1_C0) {
1084 /*
1085 * Workaround BCOM Errata for the C0 type.
1086 * Write magic patterns to reserved registers.
1087 */
1088 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1089 skge_xm_phy_write(hw, port,
1090 C0hack[i].reg, C0hack[i].val);
1091
1092 } else if (id1 == PHY_BCOM_ID1_A1) {
1093 /*
1094 * Workaround BCOM Errata for the A1 type.
1095 * Write magic patterns to reserved registers.
1096 */
1097 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1098 skge_xm_phy_write(hw, port,
1099 A1hack[i].reg, A1hack[i].val);
1100 }
1101 1219
1102 /* 1220 skge_write32(hw, B2_GP_IO, r);
1103 * Workaround BCOM Errata (#10523) for all BCom PHYs. 1221 skge_read32(hw, B2_GP_IO);
1104 * Disable Power Management after reset. 1222 spin_unlock_bh(&hw->phy_lock);
1105 */
1106 r = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1107 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r | PHY_B_AC_DIS_PM);
1108 }
1109 1223
1110 /* Dummy read */ 1224 /* Enable GMII interfac */
1111 skge_xm_read16(hw, port, XM_ISRC); 1225 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1226
1227 bcom_phy_init(skge, jumbo);
1228
1229 /* Set Station Address */
1230 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
1231
1232 /* We don't use match addresses so clear */
1233 for (i = 1; i < 16; i++)
1234 xm_outaddr(hw, port, XM_EXM(i), zero);
1112 1235
1113 r = skge_xm_read32(hw, port, XM_MODE); 1236 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1114 skge_xm_write32(hw, port, XM_MODE, r|XM_MD_CSA); 1237 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1115 1238
1116 /* We don't need the FCS appended to the packet. */ 1239 /* We don't need the FCS appended to the packet. */
1117 r = skge_xm_read16(hw, port, XM_RX_CMD); 1240 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1118 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_STRIP_FCS); 1241 if (jumbo)
1242 r |= XM_RX_BIG_PK_OK;
1243
1244 if (skge->duplex == DUPLEX_HALF) {
1245 /*
1246 * If in manual half duplex mode the other side might be in
1247 * full duplex mode, so ignore if a carrier extension is not seen
1248 * on frames received
1249 */
1250 r |= XM_RX_DIS_CEXT;
1251 }
1252 xm_write16(hw, port, XM_RX_CMD, r);
1253
1119 1254
1120 /* We want short frames padded to 60 bytes. */ 1255 /* We want short frames padded to 60 bytes. */
1121 r = skge_xm_read16(hw, port, XM_TX_CMD); 1256 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1122 skge_xm_write16(hw, port, XM_TX_CMD, r | XM_TX_AUTO_PAD); 1257
1258 /*
1259 * Bump up the transmit threshold. This helps hold off transmit
1260 * underruns when we're blasting traffic from both ports at once.
1261 */
1262 xm_write16(hw, port, XM_TX_THR, 512);
1123 1263
1124 /* 1264 /*
1125 * Enable the reception of all error frames. This is is 1265 * Enable the reception of all error frames. This is is
@@ -1135,19 +1275,22 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1135 * case the XMAC will start transfering frames out of the 1275 * case the XMAC will start transfering frames out of the
1136 * RX FIFO as soon as the FIFO threshold is reached. 1276 * RX FIFO as soon as the FIFO threshold is reached.
1137 */ 1277 */
1138 r = skge_xm_read32(hw, port, XM_MODE); 1278 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
1139 skge_xm_write32(hw, port, XM_MODE,
1140 XM_MD_RX_CRCE|XM_MD_RX_LONG|XM_MD_RX_RUNT|
1141 XM_MD_RX_ERR|XM_MD_RX_IRLE);
1142 1279
1143 skge_xm_outaddr(hw, port, XM_SA, hw->dev[port]->dev_addr);
1144 skge_xm_outaddr(hw, port, XM_EXM(0), hw->dev[port]->dev_addr);
1145 1280
1146 /* 1281 /*
1147 * Bump up the transmit threshold. This helps hold off transmit 1282 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1148 * underruns when we're blasting traffic from both ports at once. 1283 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1284 * and 'Octets Rx OK Hi Cnt Ov'.
1149 */ 1285 */
1150 skge_xm_write16(hw, port, XM_TX_THR, 512); 1286 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1287
1288 /*
1289 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1290 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1291 * and 'Octets Tx OK Hi Cnt Ov'.
1292 */
1293 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
1151 1294
1152 /* Configure MAC arbiter */ 1295 /* Configure MAC arbiter */
1153 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1296 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
@@ -1164,137 +1307,30 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1164 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1307 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1165 1308
1166 /* Configure Rx MAC FIFO */ 1309 /* Configure Rx MAC FIFO */
1167 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); 1310 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1168 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); 1311 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1169 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); 1312 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1170 1313
1171 /* Configure Tx MAC FIFO */ 1314 /* Configure Tx MAC FIFO */
1172 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); 1315 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1173 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); 1316 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1174 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); 1317 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1175 1318
1176 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 1319 if (jumbo) {
1177 /* Enable frame flushing if jumbo frames used */ 1320 /* Enable frame flushing if jumbo frames used */
1178 skge_write16(hw, SKGEMAC_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); 1321 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
1179 } else { 1322 } else {
1180 /* enable timeout timers if normal frames */ 1323 /* enable timeout timers if normal frames */
1181 skge_write16(hw, B3_PA_CTRL, 1324 skge_write16(hw, B3_PA_CTRL,
1182 port == 0 ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); 1325 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1183 } 1326 }
1184
1185
1186 r = skge_xm_read16(hw, port, XM_RX_CMD);
1187 if (hw->dev[port]->mtu > ETH_DATA_LEN)
1188 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_BIG_PK_OK);
1189 else
1190 skge_xm_write16(hw, port, XM_RX_CMD, r & ~(XM_RX_BIG_PK_OK));
1191
1192 switch (hw->phy_type) {
1193 case SK_PHY_XMAC:
1194 if (skge->autoneg == AUTONEG_ENABLE) {
1195 ctrl1 = PHY_X_AN_FD | PHY_X_AN_HD;
1196
1197 switch (skge->flow_control) {
1198 case FLOW_MODE_NONE:
1199 ctrl1 |= PHY_X_P_NO_PAUSE;
1200 break;
1201 case FLOW_MODE_LOC_SEND:
1202 ctrl1 |= PHY_X_P_ASYM_MD;
1203 break;
1204 case FLOW_MODE_SYMMETRIC:
1205 ctrl1 |= PHY_X_P_SYM_MD;
1206 break;
1207 case FLOW_MODE_REM_SEND:
1208 ctrl1 |= PHY_X_P_BOTH_MD;
1209 break;
1210 }
1211
1212 skge_xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl1);
1213 ctrl2 = PHY_CT_ANE | PHY_CT_RE_CFG;
1214 } else {
1215 ctrl2 = 0;
1216 if (skge->duplex == DUPLEX_FULL)
1217 ctrl2 |= PHY_CT_DUP_MD;
1218 }
1219
1220 skge_xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl2);
1221 break;
1222
1223 case SK_PHY_BCOM:
1224 ctrl1 = PHY_CT_SP1000;
1225 ctrl2 = 0;
1226 ctrl3 = PHY_SEL_TYPE;
1227 ctrl4 = PHY_B_PEC_EN_LTR;
1228 ctrl5 = PHY_B_AC_TX_TST;
1229
1230 if (skge->autoneg == AUTONEG_ENABLE) {
1231 /*
1232 * Workaround BCOM Errata #1 for the C5 type.
1233 * 1000Base-T Link Acquisition Failure in Slave Mode
1234 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1235 */
1236 ctrl2 |= PHY_B_1000C_RD;
1237 if (skge->advertising & ADVERTISED_1000baseT_Half)
1238 ctrl2 |= PHY_B_1000C_AHD;
1239 if (skge->advertising & ADVERTISED_1000baseT_Full)
1240 ctrl2 |= PHY_B_1000C_AFD;
1241
1242 /* Set Flow-control capabilities */
1243 switch (skge->flow_control) {
1244 case FLOW_MODE_NONE:
1245 ctrl3 |= PHY_B_P_NO_PAUSE;
1246 break;
1247 case FLOW_MODE_LOC_SEND:
1248 ctrl3 |= PHY_B_P_ASYM_MD;
1249 break;
1250 case FLOW_MODE_SYMMETRIC:
1251 ctrl3 |= PHY_B_P_SYM_MD;
1252 break;
1253 case FLOW_MODE_REM_SEND:
1254 ctrl3 |= PHY_B_P_BOTH_MD;
1255 break;
1256 }
1257
1258 /* Restart Auto-negotiation */
1259 ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG;
1260 } else {
1261 if (skge->duplex == DUPLEX_FULL)
1262 ctrl1 |= PHY_CT_DUP_MD;
1263
1264 ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */
1265 }
1266
1267 skge_xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, ctrl2);
1268 skge_xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, ctrl3);
1269
1270 if (skge->netdev->mtu > ETH_DATA_LEN) {
1271 ctrl4 |= PHY_B_PEC_HIGH_LA;
1272 ctrl5 |= PHY_B_AC_LONG_PACK;
1273
1274 skge_xm_phy_write(hw, port,PHY_BCOM_AUX_CTRL, ctrl5);
1275 }
1276
1277 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ctrl4);
1278 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl1);
1279 break;
1280 }
1281 spin_unlock_bh(&hw->phy_lock);
1282
1283 /* Clear MIB counters */
1284 skge_xm_write16(hw, port, XM_STAT_CMD,
1285 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1286 /* Clear two times according to Errata #3 */
1287 skge_xm_write16(hw, port, XM_STAT_CMD,
1288 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1289
1290 /* Start polling for link status */
1291 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1292} 1327}
1293 1328
1294static void genesis_stop(struct skge_port *skge) 1329static void genesis_stop(struct skge_port *skge)
1295{ 1330{
1296 struct skge_hw *hw = skge->hw; 1331 struct skge_hw *hw = skge->hw;
1297 int port = skge->port; 1332 int port = skge->port;
1333 u32 reg;
1298 1334
1299 /* Clear Tx packet arbiter timeout IRQ */ 1335 /* Clear Tx packet arbiter timeout IRQ */
1300 skge_write16(hw, B3_PA_CTRL, 1336 skge_write16(hw, B3_PA_CTRL,
@@ -1304,33 +1340,30 @@ static void genesis_stop(struct skge_port *skge)
1304 * If the transfer stucks at the MAC the STOP command will not 1340 * If the transfer stucks at the MAC the STOP command will not
1305 * terminate if we don't flush the XMAC's transmit FIFO ! 1341 * terminate if we don't flush the XMAC's transmit FIFO !
1306 */ 1342 */
1307 skge_xm_write32(hw, port, XM_MODE, 1343 xm_write32(hw, port, XM_MODE,
1308 skge_xm_read32(hw, port, XM_MODE)|XM_MD_FTF); 1344 xm_read32(hw, port, XM_MODE)|XM_MD_FTF);
1309 1345
1310 1346
1311 /* Reset the MAC */ 1347 /* Reset the MAC */
1312 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); 1348 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1313 1349
1314 /* For external PHYs there must be special handling */ 1350 /* For external PHYs there must be special handling */
1315 if (hw->phy_type != SK_PHY_XMAC) { 1351 reg = skge_read32(hw, B2_GP_IO);
1316 u32 reg = skge_read32(hw, B2_GP_IO); 1352 if (port == 0) {
1317 1353 reg |= GP_DIR_0;
1318 if (port == 0) { 1354 reg &= ~GP_IO_0;
1319 reg |= GP_DIR_0; 1355 } else {
1320 reg &= ~GP_IO_0; 1356 reg |= GP_DIR_2;
1321 } else { 1357 reg &= ~GP_IO_2;
1322 reg |= GP_DIR_2;
1323 reg &= ~GP_IO_2;
1324 }
1325 skge_write32(hw, B2_GP_IO, reg);
1326 skge_read32(hw, B2_GP_IO);
1327 } 1358 }
1359 skge_write32(hw, B2_GP_IO, reg);
1360 skge_read32(hw, B2_GP_IO);
1328 1361
1329 skge_xm_write16(hw, port, XM_MMU_CMD, 1362 xm_write16(hw, port, XM_MMU_CMD,
1330 skge_xm_read16(hw, port, XM_MMU_CMD) 1363 xm_read16(hw, port, XM_MMU_CMD)
1331 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); 1364 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1332 1365
1333 skge_xm_read16(hw, port, XM_MMU_CMD); 1366 xm_read16(hw, port, XM_MMU_CMD);
1334} 1367}
1335 1368
1336 1369
@@ -1341,11 +1374,11 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data)
1341 int i; 1374 int i;
1342 unsigned long timeout = jiffies + HZ; 1375 unsigned long timeout = jiffies + HZ;
1343 1376
1344 skge_xm_write16(hw, port, 1377 xm_write16(hw, port,
1345 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); 1378 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1346 1379
1347 /* wait for update to complete */ 1380 /* wait for update to complete */
1348 while (skge_xm_read16(hw, port, XM_STAT_CMD) 1381 while (xm_read16(hw, port, XM_STAT_CMD)
1349 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { 1382 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1350 if (time_after(jiffies, timeout)) 1383 if (time_after(jiffies, timeout))
1351 break; 1384 break;
@@ -1353,68 +1386,60 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data)
1353 } 1386 }
1354 1387
1355 /* special case for 64 bit octet counter */ 1388 /* special case for 64 bit octet counter */
1356 data[0] = (u64) skge_xm_read32(hw, port, XM_TXO_OK_HI) << 32 1389 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1357 | skge_xm_read32(hw, port, XM_TXO_OK_LO); 1390 | xm_read32(hw, port, XM_TXO_OK_LO);
1358 data[1] = (u64) skge_xm_read32(hw, port, XM_RXO_OK_HI) << 32 1391 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1359 | skge_xm_read32(hw, port, XM_RXO_OK_LO); 1392 | xm_read32(hw, port, XM_RXO_OK_LO);
1360 1393
1361 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1394 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1362 data[i] = skge_xm_read32(hw, port, skge_stats[i].xmac_offset); 1395 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
1363} 1396}
1364 1397
1365static void genesis_mac_intr(struct skge_hw *hw, int port) 1398static void genesis_mac_intr(struct skge_hw *hw, int port)
1366{ 1399{
1367 struct skge_port *skge = netdev_priv(hw->dev[port]); 1400 struct skge_port *skge = netdev_priv(hw->dev[port]);
1368 u16 status = skge_xm_read16(hw, port, XM_ISRC); 1401 u16 status = xm_read16(hw, port, XM_ISRC);
1369 1402
1370 pr_debug("genesis_intr status %x\n", status); 1403 if (netif_msg_intr(skge))
1371 if (hw->phy_type == SK_PHY_XMAC) { 1404 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1372 /* LInk down, start polling for state change */ 1405 skge->netdev->name, status);
1373 if (status & XM_IS_INP_ASS) {
1374 skge_xm_write16(hw, port, XM_IMSK,
1375 skge_xm_read16(hw, port, XM_IMSK) | XM_IS_INP_ASS);
1376 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1377 }
1378 else if (status & XM_IS_AND)
1379 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1380 }
1381 1406
1382 if (status & XM_IS_TXF_UR) { 1407 if (status & XM_IS_TXF_UR) {
1383 skge_xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1408 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1384 ++skge->net_stats.tx_fifo_errors; 1409 ++skge->net_stats.tx_fifo_errors;
1385 } 1410 }
1386 if (status & XM_IS_RXF_OV) { 1411 if (status & XM_IS_RXF_OV) {
1387 skge_xm_write32(hw, port, XM_MODE, XM_MD_FRF); 1412 xm_write32(hw, port, XM_MODE, XM_MD_FRF);
1388 ++skge->net_stats.rx_fifo_errors; 1413 ++skge->net_stats.rx_fifo_errors;
1389 } 1414 }
1390} 1415}
1391 1416
1392static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1417static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1393{ 1418{
1394 int i; 1419 int i;
1395 1420
1396 skge_gma_write16(hw, port, GM_SMI_DATA, val); 1421 gma_write16(hw, port, GM_SMI_DATA, val);
1397 skge_gma_write16(hw, port, GM_SMI_CTRL, 1422 gma_write16(hw, port, GM_SMI_CTRL,
1398 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); 1423 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1399 for (i = 0; i < PHY_RETRIES; i++) { 1424 for (i = 0; i < PHY_RETRIES; i++) {
1400 udelay(1); 1425 udelay(1);
1401 1426
1402 if (!(skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) 1427 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1403 break; 1428 break;
1404 } 1429 }
1405} 1430}
1406 1431
1407static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg) 1432static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1408{ 1433{
1409 int i; 1434 int i;
1410 1435
1411 skge_gma_write16(hw, port, GM_SMI_CTRL, 1436 gma_write16(hw, port, GM_SMI_CTRL,
1412 GM_SMI_CT_PHY_AD(hw->phy_addr) 1437 GM_SMI_CT_PHY_AD(hw->phy_addr)
1413 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 1438 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1414 1439
1415 for (i = 0; i < PHY_RETRIES; i++) { 1440 for (i = 0; i < PHY_RETRIES; i++) {
1416 udelay(1); 1441 udelay(1);
1417 if (skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) 1442 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1418 goto ready; 1443 goto ready;
1419 } 1444 }
1420 1445
@@ -1422,24 +1447,7 @@ static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1422 hw->dev[port]->name); 1447 hw->dev[port]->name);
1423 return 0; 1448 return 0;
1424 ready: 1449 ready:
1425 return skge_gma_read16(hw, port, GM_SMI_DATA); 1450 return gma_read16(hw, port, GM_SMI_DATA);
1426}
1427
1428static void genesis_link_down(struct skge_port *skge)
1429{
1430 struct skge_hw *hw = skge->hw;
1431 int port = skge->port;
1432
1433 pr_debug("genesis_link_down\n");
1434
1435 skge_xm_write16(hw, port, XM_MMU_CMD,
1436 skge_xm_read16(hw, port, XM_MMU_CMD)
1437 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1438
1439 /* dummy read to ensure writing */
1440 (void) skge_xm_read16(hw, port, XM_MMU_CMD);
1441
1442 skge_link_down(skge);
1443} 1451}
1444 1452
1445static void genesis_link_up(struct skge_port *skge) 1453static void genesis_link_up(struct skge_port *skge)
@@ -1450,7 +1458,7 @@ static void genesis_link_up(struct skge_port *skge)
1450 u32 mode, msk; 1458 u32 mode, msk;
1451 1459
1452 pr_debug("genesis_link_up\n"); 1460 pr_debug("genesis_link_up\n");
1453 cmd = skge_xm_read16(hw, port, XM_MMU_CMD); 1461 cmd = xm_read16(hw, port, XM_MMU_CMD);
1454 1462
1455 /* 1463 /*
1456 * enabling pause frame reception is required for 1000BT 1464 * enabling pause frame reception is required for 1000BT
@@ -1458,14 +1466,15 @@ static void genesis_link_up(struct skge_port *skge)
1458 */ 1466 */
1459 if (skge->flow_control == FLOW_MODE_NONE || 1467 if (skge->flow_control == FLOW_MODE_NONE ||
1460 skge->flow_control == FLOW_MODE_LOC_SEND) 1468 skge->flow_control == FLOW_MODE_LOC_SEND)
1469 /* Disable Pause Frame Reception */
1461 cmd |= XM_MMU_IGN_PF; 1470 cmd |= XM_MMU_IGN_PF;
1462 else 1471 else
1463 /* Enable Pause Frame Reception */ 1472 /* Enable Pause Frame Reception */
1464 cmd &= ~XM_MMU_IGN_PF; 1473 cmd &= ~XM_MMU_IGN_PF;
1465 1474
1466 skge_xm_write16(hw, port, XM_MMU_CMD, cmd); 1475 xm_write16(hw, port, XM_MMU_CMD, cmd);
1467 1476
1468 mode = skge_xm_read32(hw, port, XM_MODE); 1477 mode = xm_read32(hw, port, XM_MODE);
1469 if (skge->flow_control == FLOW_MODE_SYMMETRIC || 1478 if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
1470 skge->flow_control == FLOW_MODE_LOC_SEND) { 1479 skge->flow_control == FLOW_MODE_LOC_SEND) {
1471 /* 1480 /*
@@ -1479,10 +1488,10 @@ static void genesis_link_up(struct skge_port *skge)
1479 /* XM_PAUSE_DA = '010000C28001' (default) */ 1488 /* XM_PAUSE_DA = '010000C28001' (default) */
1480 /* XM_MAC_PTIME = 0xffff (maximum) */ 1489 /* XM_MAC_PTIME = 0xffff (maximum) */
1481 /* remember this value is defined in big endian (!) */ 1490 /* remember this value is defined in big endian (!) */
1482 skge_xm_write16(hw, port, XM_MAC_PTIME, 0xffff); 1491 xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1483 1492
1484 mode |= XM_PAUSE_MODE; 1493 mode |= XM_PAUSE_MODE;
1485 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); 1494 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1486 } else { 1495 } else {
1487 /* 1496 /*
1488 * disable pause frame generation is required for 1000BT 1497 * disable pause frame generation is required for 1000BT
@@ -1491,125 +1500,68 @@ static void genesis_link_up(struct skge_port *skge)
1491 /* Disable Pause Mode in Mode Register */ 1500 /* Disable Pause Mode in Mode Register */
1492 mode &= ~XM_PAUSE_MODE; 1501 mode &= ~XM_PAUSE_MODE;
1493 1502
1494 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); 1503 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1495 } 1504 }
1496 1505
1497 skge_xm_write32(hw, port, XM_MODE, mode); 1506 xm_write32(hw, port, XM_MODE, mode);
1498 1507
1499 msk = XM_DEF_MSK; 1508 msk = XM_DEF_MSK;
1500 if (hw->phy_type != SK_PHY_XMAC) 1509 /* disable GP0 interrupt bit for external Phy */
1501 msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */ 1510 msk |= XM_IS_INP_ASS;
1502 1511
1503 skge_xm_write16(hw, port, XM_IMSK, msk); 1512 xm_write16(hw, port, XM_IMSK, msk);
1504 skge_xm_read16(hw, port, XM_ISRC); 1513 xm_read16(hw, port, XM_ISRC);
1505 1514
1506 /* get MMU Command Reg. */ 1515 /* get MMU Command Reg. */
1507 cmd = skge_xm_read16(hw, port, XM_MMU_CMD); 1516 cmd = xm_read16(hw, port, XM_MMU_CMD);
1508 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) 1517 if (skge->duplex == DUPLEX_FULL)
1509 cmd |= XM_MMU_GMII_FD; 1518 cmd |= XM_MMU_GMII_FD;
1510 1519
1511 if (hw->phy_type == SK_PHY_BCOM) { 1520 /*
1512 /* 1521 * Workaround BCOM Errata (#10523) for all BCom Phys
1513 * Workaround BCOM Errata (#10523) for all BCom Phys 1522 * Enable Power Management after link up
1514 * Enable Power Management after link up 1523 */
1515 */ 1524 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1516 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1525 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1517 skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) 1526 & ~PHY_B_AC_DIS_PM);
1518 & ~PHY_B_AC_DIS_PM); 1527 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1519 skge_xm_phy_write(hw, port, PHY_BCOM_INT_MASK,
1520 PHY_B_DEF_MSK);
1521 }
1522 1528
1523 /* enable Rx/Tx */ 1529 /* enable Rx/Tx */
1524 skge_xm_write16(hw, port, XM_MMU_CMD, 1530 xm_write16(hw, port, XM_MMU_CMD,
1525 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1531 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1526 skge_link_up(skge); 1532 skge_link_up(skge);
1527} 1533}
1528 1534
1529 1535
1530static void genesis_bcom_intr(struct skge_port *skge) 1536static inline void bcom_phy_intr(struct skge_port *skge)
1531{ 1537{
1532 struct skge_hw *hw = skge->hw; 1538 struct skge_hw *hw = skge->hw;
1533 int port = skge->port; 1539 int port = skge->port;
1534 u16 stat = skge_xm_phy_read(hw, port, PHY_BCOM_INT_STAT); 1540 u16 isrc;
1541
1542 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1543 if (netif_msg_intr(skge))
1544 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
1545 skge->netdev->name, isrc);
1535 1546
1536 pr_debug("genesis_bcom intr stat=%x\n", stat); 1547 if (isrc & PHY_B_IS_PSE)
1548 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
1549 hw->dev[port]->name);
1537 1550
1538 /* Workaround BCom Errata: 1551 /* Workaround BCom Errata:
1539 * enable and disable loopback mode if "NO HCD" occurs. 1552 * enable and disable loopback mode if "NO HCD" occurs.
1540 */ 1553 */
1541 if (stat & PHY_B_IS_NO_HDCL) { 1554 if (isrc & PHY_B_IS_NO_HDCL) {
1542 u16 ctrl = skge_xm_phy_read(hw, port, PHY_BCOM_CTRL); 1555 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1543 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, 1556 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1544 ctrl | PHY_CT_LOOP); 1557 ctrl | PHY_CT_LOOP);
1545 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, 1558 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1546 ctrl & ~PHY_CT_LOOP); 1559 ctrl & ~PHY_CT_LOOP);
1547 } 1560 }
1548 1561
1549 stat = skge_xm_phy_read(hw, port, PHY_BCOM_STAT); 1562 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1550 if (stat & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) { 1563 bcom_check_link(hw, port);
1551 u16 aux = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1552 if ( !(aux & PHY_B_AS_LS) && netif_carrier_ok(skge->netdev))
1553 genesis_link_down(skge);
1554
1555 else if (stat & PHY_B_IS_LST_CHANGE) {
1556 if (aux & PHY_B_AS_AN_C) {
1557 switch (aux & PHY_B_AS_AN_RES_MSK) {
1558 case PHY_B_RES_1000FD:
1559 skge->duplex = DUPLEX_FULL;
1560 break;
1561 case PHY_B_RES_1000HD:
1562 skge->duplex = DUPLEX_HALF;
1563 break;
1564 }
1565
1566 switch (aux & PHY_B_AS_PAUSE_MSK) {
1567 case PHY_B_AS_PAUSE_MSK:
1568 skge->flow_control = FLOW_MODE_SYMMETRIC;
1569 break;
1570 case PHY_B_AS_PRR:
1571 skge->flow_control = FLOW_MODE_REM_SEND;
1572 break;
1573 case PHY_B_AS_PRT:
1574 skge->flow_control = FLOW_MODE_LOC_SEND;
1575 break;
1576 default:
1577 skge->flow_control = FLOW_MODE_NONE;
1578 }
1579 skge->speed = SPEED_1000;
1580 }
1581 genesis_link_up(skge);
1582 }
1583 else
1584 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1585 }
1586}
1587 1564
1588/* Perodic poll of phy status to check for link transistion */
1589static void skge_link_timer(unsigned long __arg)
1590{
1591 struct skge_port *skge = (struct skge_port *) __arg;
1592 struct skge_hw *hw = skge->hw;
1593 int port = skge->port;
1594
1595 if (hw->chip_id != CHIP_ID_GENESIS || !netif_running(skge->netdev))
1596 return;
1597
1598 spin_lock_bh(&hw->phy_lock);
1599 if (hw->phy_type == SK_PHY_BCOM)
1600 genesis_bcom_intr(skge);
1601 else {
1602 int i;
1603 for (i = 0; i < 3; i++)
1604 if (skge_xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS)
1605 break;
1606
1607 if (i == 3)
1608 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1609 else
1610 genesis_link_up(skge);
1611 }
1612 spin_unlock_bh(&hw->phy_lock);
1613} 1565}
1614 1566
1615/* Marvell Phy Initailization */ 1567/* Marvell Phy Initailization */
@@ -1621,31 +1573,27 @@ static void yukon_init(struct skge_hw *hw, int port)
1621 1573
1622 pr_debug("yukon_init\n"); 1574 pr_debug("yukon_init\n");
1623 if (skge->autoneg == AUTONEG_ENABLE) { 1575 if (skge->autoneg == AUTONEG_ENABLE) {
1624 u16 ectrl = skge_gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1576 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1625 1577
1626 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 1578 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1627 PHY_M_EC_MAC_S_MSK); 1579 PHY_M_EC_MAC_S_MSK);
1628 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 1580 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1629 1581
1630 /* on PHY 88E1111 there is a change for downshift control */ 1582 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1631 if (hw->chip_id == CHIP_ID_YUKON_EC)
1632 ectrl |= PHY_M_EC_M_DSC_2(0) | PHY_M_EC_DOWN_S_ENA;
1633 else
1634 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1635 1583
1636 skge_gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 1584 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1637 } 1585 }
1638 1586
1639 ctrl = skge_gm_phy_read(hw, port, PHY_MARV_CTRL); 1587 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1640 if (skge->autoneg == AUTONEG_DISABLE) 1588 if (skge->autoneg == AUTONEG_DISABLE)
1641 ctrl &= ~PHY_CT_ANE; 1589 ctrl &= ~PHY_CT_ANE;
1642 1590
1643 ctrl |= PHY_CT_RESET; 1591 ctrl |= PHY_CT_RESET;
1644 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1592 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1645 1593
1646 ctrl = 0; 1594 ctrl = 0;
1647 ct1000 = 0; 1595 ct1000 = 0;
1648 adv = PHY_SEL_TYPE; 1596 adv = PHY_AN_CSMA;
1649 1597
1650 if (skge->autoneg == AUTONEG_ENABLE) { 1598 if (skge->autoneg == AUTONEG_ENABLE) {
1651 if (iscopper(hw)) { 1599 if (iscopper(hw)) {
@@ -1661,41 +1609,12 @@ static void yukon_init(struct skge_hw *hw, int port)
1661 adv |= PHY_M_AN_10_FD; 1609 adv |= PHY_M_AN_10_FD;
1662 if (skge->advertising & ADVERTISED_10baseT_Half) 1610 if (skge->advertising & ADVERTISED_10baseT_Half)
1663 adv |= PHY_M_AN_10_HD; 1611 adv |= PHY_M_AN_10_HD;
1664 1612 } else /* special defines for FIBER (88E1011S only) */
1665 /* Set Flow-control capabilities */
1666 switch (skge->flow_control) {
1667 case FLOW_MODE_NONE:
1668 adv |= PHY_B_P_NO_PAUSE;
1669 break;
1670 case FLOW_MODE_LOC_SEND:
1671 adv |= PHY_B_P_ASYM_MD;
1672 break;
1673 case FLOW_MODE_SYMMETRIC:
1674 adv |= PHY_B_P_SYM_MD;
1675 break;
1676 case FLOW_MODE_REM_SEND:
1677 adv |= PHY_B_P_BOTH_MD;
1678 break;
1679 }
1680 } else { /* special defines for FIBER (88E1011S only) */
1681 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; 1613 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1682 1614
1683 /* Set Flow-control capabilities */ 1615 /* Set Flow-control capabilities */
1684 switch (skge->flow_control) { 1616 adv |= phy_pause_map[skge->flow_control];
1685 case FLOW_MODE_NONE: 1617
1686 adv |= PHY_M_P_NO_PAUSE_X;
1687 break;
1688 case FLOW_MODE_LOC_SEND:
1689 adv |= PHY_M_P_ASYM_MD_X;
1690 break;
1691 case FLOW_MODE_SYMMETRIC:
1692 adv |= PHY_M_P_SYM_MD_X;
1693 break;
1694 case FLOW_MODE_REM_SEND:
1695 adv |= PHY_M_P_BOTH_MD_X;
1696 break;
1697 }
1698 }
1699 /* Restart Auto-negotiation */ 1618 /* Restart Auto-negotiation */
1700 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 1619 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1701 } else { 1620 } else {
@@ -1717,36 +1636,23 @@ static void yukon_init(struct skge_hw *hw, int port)
1717 ctrl |= PHY_CT_RESET; 1636 ctrl |= PHY_CT_RESET;
1718 } 1637 }
1719 1638
1720 if (hw->chip_id != CHIP_ID_YUKON_FE) 1639 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1721 skge_gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1722 1640
1723 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); 1641 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1724 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1642 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1725 1643
1726 /* Setup Phy LED's */ 1644 /* Setup Phy LED's */
1727 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); 1645 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
1728 ledover = 0; 1646 ledover = 0;
1729 1647
1730 if (hw->chip_id == CHIP_ID_YUKON_FE) { 1648 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
1731 /* on 88E3082 these bits are at 11..9 (shifted left) */
1732 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
1733
1734 skge_gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR,
1735 ((skge_gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR)
1736
1737 & ~PHY_M_FELP_LED1_MSK)
1738 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL)));
1739 } else {
1740 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
1741 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
1742 1649
1743 /* turn off the Rx LED (LED_RX) */ 1650 /* turn off the Rx LED (LED_RX) */
1744 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 1651 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
1745 }
1746 1652
1747 /* disable blink mode (LED_DUPLEX) on collisions */ 1653 /* disable blink mode (LED_DUPLEX) on collisions */
1748 ctrl |= PHY_M_LEDC_DP_CTRL; 1654 ctrl |= PHY_M_LEDC_DP_CTRL;
1749 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 1655 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
1750 1656
1751 if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) { 1657 if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) {
1752 /* turn on 100 Mbps LED (LED_LINK100) */ 1658 /* turn on 100 Mbps LED (LED_LINK100) */
@@ -1754,25 +1660,25 @@ static void yukon_init(struct skge_hw *hw, int port)
1754 } 1660 }
1755 1661
1756 if (ledover) 1662 if (ledover)
1757 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 1663 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
1758 1664
1759 /* Enable phy interrupt on autonegotiation complete (or link up) */ 1665 /* Enable phy interrupt on autonegotiation complete (or link up) */
1760 if (skge->autoneg == AUTONEG_ENABLE) 1666 if (skge->autoneg == AUTONEG_ENABLE)
1761 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); 1667 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
1762 else 1668 else
1763 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 1669 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1764} 1670}
1765 1671
1766static void yukon_reset(struct skge_hw *hw, int port) 1672static void yukon_reset(struct skge_hw *hw, int port)
1767{ 1673{
1768 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ 1674 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1769 skge_gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 1675 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1770 skge_gma_write16(hw, port, GM_MC_ADDR_H2, 0); 1676 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1771 skge_gma_write16(hw, port, GM_MC_ADDR_H3, 0); 1677 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1772 skge_gma_write16(hw, port, GM_MC_ADDR_H4, 0); 1678 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1773 1679
1774 skge_gma_write16(hw, port, GM_RX_CTRL, 1680 gma_write16(hw, port, GM_RX_CTRL,
1775 skge_gma_read16(hw, port, GM_RX_CTRL) 1681 gma_read16(hw, port, GM_RX_CTRL)
1776 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1682 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1777} 1683}
1778 1684
@@ -1785,17 +1691,17 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1785 1691
1786 /* WA code for COMA mode -- set PHY reset */ 1692 /* WA code for COMA mode -- set PHY reset */
1787 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1693 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1788 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1694 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1789 skge_write32(hw, B2_GP_IO, 1695 skge_write32(hw, B2_GP_IO,
1790 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); 1696 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9));
1791 1697
1792 /* hard reset */ 1698 /* hard reset */
1793 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), GPC_RST_SET); 1699 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1794 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_RST_SET); 1700 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1795 1701
1796 /* WA code for COMA mode -- clear PHY reset */ 1702 /* WA code for COMA mode -- clear PHY reset */
1797 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1703 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1798 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1704 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1799 skge_write32(hw, B2_GP_IO, 1705 skge_write32(hw, B2_GP_IO,
1800 (skge_read32(hw, B2_GP_IO) | GP_DIR_9) 1706 (skge_read32(hw, B2_GP_IO) | GP_DIR_9)
1801 & ~GP_IO_9); 1707 & ~GP_IO_9);
@@ -1806,13 +1712,13 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1806 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 1712 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1807 1713
1808 /* Clear GMC reset */ 1714 /* Clear GMC reset */
1809 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1715 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1810 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 1716 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1811 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 1717 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1812 if (skge->autoneg == AUTONEG_DISABLE) { 1718 if (skge->autoneg == AUTONEG_DISABLE) {
1813 reg = GM_GPCR_AU_ALL_DIS; 1719 reg = GM_GPCR_AU_ALL_DIS;
1814 skge_gma_write16(hw, port, GM_GP_CTRL, 1720 gma_write16(hw, port, GM_GP_CTRL,
1815 skge_gma_read16(hw, port, GM_GP_CTRL) | reg); 1721 gma_read16(hw, port, GM_GP_CTRL) | reg);
1816 1722
1817 switch (skge->speed) { 1723 switch (skge->speed) {
1818 case SPEED_1000: 1724 case SPEED_1000:
@@ -1828,7 +1734,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1828 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 1734 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1829 switch (skge->flow_control) { 1735 switch (skge->flow_control) {
1830 case FLOW_MODE_NONE: 1736 case FLOW_MODE_NONE:
1831 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1737 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1832 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1738 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1833 break; 1739 break;
1834 case FLOW_MODE_LOC_SEND: 1740 case FLOW_MODE_LOC_SEND:
@@ -1836,7 +1742,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1836 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1742 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1837 } 1743 }
1838 1744
1839 skge_gma_write16(hw, port, GM_GP_CTRL, reg); 1745 gma_write16(hw, port, GM_GP_CTRL, reg);
1840 skge_read16(hw, GMAC_IRQ_SRC); 1746 skge_read16(hw, GMAC_IRQ_SRC);
1841 1747
1842 spin_lock_bh(&hw->phy_lock); 1748 spin_lock_bh(&hw->phy_lock);
@@ -1844,25 +1750,25 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1844 spin_unlock_bh(&hw->phy_lock); 1750 spin_unlock_bh(&hw->phy_lock);
1845 1751
1846 /* MIB clear */ 1752 /* MIB clear */
1847 reg = skge_gma_read16(hw, port, GM_PHY_ADDR); 1753 reg = gma_read16(hw, port, GM_PHY_ADDR);
1848 skge_gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 1754 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1849 1755
1850 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 1756 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1851 skge_gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); 1757 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1852 skge_gma_write16(hw, port, GM_PHY_ADDR, reg); 1758 gma_write16(hw, port, GM_PHY_ADDR, reg);
1853 1759
1854 /* transmit control */ 1760 /* transmit control */
1855 skge_gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 1761 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1856 1762
1857 /* receive control reg: unicast + multicast + no FCS */ 1763 /* receive control reg: unicast + multicast + no FCS */
1858 skge_gma_write16(hw, port, GM_RX_CTRL, 1764 gma_write16(hw, port, GM_RX_CTRL,
1859 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 1765 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1860 1766
1861 /* transmit flow control */ 1767 /* transmit flow control */
1862 skge_gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 1768 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1863 1769
1864 /* transmit parameter */ 1770 /* transmit parameter */
1865 skge_gma_write16(hw, port, GM_TX_PARAM, 1771 gma_write16(hw, port, GM_TX_PARAM,
1866 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | 1772 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1867 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 1773 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1868 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); 1774 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
@@ -1872,33 +1778,33 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1872 if (hw->dev[port]->mtu > 1500) 1778 if (hw->dev[port]->mtu > 1500)
1873 reg |= GM_SMOD_JUMBO_ENA; 1779 reg |= GM_SMOD_JUMBO_ENA;
1874 1780
1875 skge_gma_write16(hw, port, GM_SERIAL_MODE, reg); 1781 gma_write16(hw, port, GM_SERIAL_MODE, reg);
1876 1782
1877 /* physical address: used for pause frames */ 1783 /* physical address: used for pause frames */
1878 skge_gm_set_addr(hw, port, GM_SRC_ADDR_1L, addr); 1784 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1879 /* virtual address for data */ 1785 /* virtual address for data */
1880 skge_gm_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 1786 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1881 1787
1882 /* enable interrupt mask for counter overflows */ 1788 /* enable interrupt mask for counter overflows */
1883 skge_gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 1789 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1884 skge_gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 1790 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1885 skge_gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 1791 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1886 1792
1887 /* Initialize Mac Fifo */ 1793 /* Initialize Mac Fifo */
1888 1794
1889 /* Configure Rx MAC FIFO */ 1795 /* Configure Rx MAC FIFO */
1890 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1796 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1891 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1797 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1892 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1798 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1893 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1799 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1894 reg &= ~GMF_RX_F_FL_ON; 1800 reg &= ~GMF_RX_F_FL_ON;
1895 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1801 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1896 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), reg); 1802 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1897 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); 1803 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
1898 1804
1899 /* Configure Tx MAC FIFO */ 1805 /* Configure Tx MAC FIFO */
1900 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 1806 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1901 skge_write16(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 1807 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1902} 1808}
1903 1809
1904static void yukon_stop(struct skge_port *skge) 1810static void yukon_stop(struct skge_port *skge)
@@ -1907,19 +1813,19 @@ static void yukon_stop(struct skge_port *skge)
1907 int port = skge->port; 1813 int port = skge->port;
1908 1814
1909 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1815 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1910 chip_rev(hw) == CHIP_REV_YU_LITE_A3) { 1816 hw->chip_rev == CHIP_REV_YU_LITE_A3) {
1911 skge_write32(hw, B2_GP_IO, 1817 skge_write32(hw, B2_GP_IO,
1912 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9); 1818 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1913 } 1819 }
1914 1820
1915 skge_gma_write16(hw, port, GM_GP_CTRL, 1821 gma_write16(hw, port, GM_GP_CTRL,
1916 skge_gma_read16(hw, port, GM_GP_CTRL) 1822 gma_read16(hw, port, GM_GP_CTRL)
1917 & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA)); 1823 & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA));
1918 skge_gma_read16(hw, port, GM_GP_CTRL); 1824 gma_read16(hw, port, GM_GP_CTRL);
1919 1825
1920 /* set GPHY Control reset */ 1826 /* set GPHY Control reset */
1921 skge_gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET); 1827 gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET);
1922 skge_gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET); 1828 gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET);
1923} 1829}
1924 1830
1925static void yukon_get_stats(struct skge_port *skge, u64 *data) 1831static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1928,39 +1834,40 @@ static void yukon_get_stats(struct skge_port *skge, u64 *data)
1928 int port = skge->port; 1834 int port = skge->port;
1929 int i; 1835 int i;
1930 1836
1931 data[0] = (u64) skge_gma_read32(hw, port, GM_TXO_OK_HI) << 32 1837 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
1932 | skge_gma_read32(hw, port, GM_TXO_OK_LO); 1838 | gma_read32(hw, port, GM_TXO_OK_LO);
1933 data[1] = (u64) skge_gma_read32(hw, port, GM_RXO_OK_HI) << 32 1839 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
1934 | skge_gma_read32(hw, port, GM_RXO_OK_LO); 1840 | gma_read32(hw, port, GM_RXO_OK_LO);
1935 1841
1936 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1842 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1937 data[i] = skge_gma_read32(hw, port, 1843 data[i] = gma_read32(hw, port,
1938 skge_stats[i].gma_offset); 1844 skge_stats[i].gma_offset);
1939} 1845}
1940 1846
1941static void yukon_mac_intr(struct skge_hw *hw, int port) 1847static void yukon_mac_intr(struct skge_hw *hw, int port)
1942{ 1848{
1943 struct skge_port *skge = netdev_priv(hw->dev[port]); 1849 struct net_device *dev = hw->dev[port];
1944 u8 status = skge_read8(hw, SKGEMAC_REG(port, GMAC_IRQ_SRC)); 1850 struct skge_port *skge = netdev_priv(dev);
1851 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1852
1853 if (netif_msg_intr(skge))
1854 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1855 dev->name, status);
1945 1856
1946 pr_debug("yukon_intr status %x\n", status);
1947 if (status & GM_IS_RX_FF_OR) { 1857 if (status & GM_IS_RX_FF_OR) {
1948 ++skge->net_stats.rx_fifo_errors; 1858 ++skge->net_stats.rx_fifo_errors;
1949 skge_gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO); 1859 gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO);
1950 } 1860 }
1951 if (status & GM_IS_TX_FF_UR) { 1861 if (status & GM_IS_TX_FF_UR) {
1952 ++skge->net_stats.tx_fifo_errors; 1862 ++skge->net_stats.tx_fifo_errors;
1953 skge_gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU); 1863 gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU);
1954 } 1864 }
1955 1865
1956} 1866}
1957 1867
1958static u16 yukon_speed(const struct skge_hw *hw, u16 aux) 1868static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
1959{ 1869{
1960 if (hw->chip_id == CHIP_ID_YUKON_FE) 1870 switch (aux & PHY_M_PS_SPEED_MSK) {
1961 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1962
1963 switch(aux & PHY_M_PS_SPEED_MSK) {
1964 case PHY_M_PS_SPEED_1000: 1871 case PHY_M_PS_SPEED_1000:
1965 return SPEED_1000; 1872 return SPEED_1000;
1966 case PHY_M_PS_SPEED_100: 1873 case PHY_M_PS_SPEED_100:
@@ -1981,15 +1888,15 @@ static void yukon_link_up(struct skge_port *skge)
1981 /* Enable Transmit FIFO Underrun */ 1888 /* Enable Transmit FIFO Underrun */
1982 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1889 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
1983 1890
1984 reg = skge_gma_read16(hw, port, GM_GP_CTRL); 1891 reg = gma_read16(hw, port, GM_GP_CTRL);
1985 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 1892 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1986 reg |= GM_GPCR_DUP_FULL; 1893 reg |= GM_GPCR_DUP_FULL;
1987 1894
1988 /* enable Rx/Tx */ 1895 /* enable Rx/Tx */
1989 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 1896 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1990 skge_gma_write16(hw, port, GM_GP_CTRL, reg); 1897 gma_write16(hw, port, GM_GP_CTRL, reg);
1991 1898
1992 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 1899 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1993 skge_link_up(skge); 1900 skge_link_up(skge);
1994} 1901}
1995 1902
@@ -1999,16 +1906,15 @@ static void yukon_link_down(struct skge_port *skge)
1999 int port = skge->port; 1906 int port = skge->port;
2000 1907
2001 pr_debug("yukon_link_down\n"); 1908 pr_debug("yukon_link_down\n");
2002 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1909 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
2003 skge_gm_phy_write(hw, port, GM_GP_CTRL, 1910 gm_phy_write(hw, port, GM_GP_CTRL,
2004 skge_gm_phy_read(hw, port, GM_GP_CTRL) 1911 gm_phy_read(hw, port, GM_GP_CTRL)
2005 & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)); 1912 & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA));
2006 1913
2007 if (hw->chip_id != CHIP_ID_YUKON_FE && 1914 if (skge->flow_control == FLOW_MODE_REM_SEND) {
2008 skge->flow_control == FLOW_MODE_REM_SEND) {
2009 /* restore Asymmetric Pause bit */ 1915 /* restore Asymmetric Pause bit */
2010 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 1916 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
2011 skge_gm_phy_read(hw, port, 1917 gm_phy_read(hw, port,
2012 PHY_MARV_AUNE_ADV) 1918 PHY_MARV_AUNE_ADV)
2013 | PHY_M_AN_ASP); 1919 | PHY_M_AN_ASP);
2014 1920
@@ -2027,20 +1933,21 @@ static void yukon_phy_intr(struct skge_port *skge)
2027 const char *reason = NULL; 1933 const char *reason = NULL;
2028 u16 istatus, phystat; 1934 u16 istatus, phystat;
2029 1935
2030 istatus = skge_gm_phy_read(hw, port, PHY_MARV_INT_STAT); 1936 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2031 phystat = skge_gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 1937 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2032 pr_debug("yukon phy intr istat=%x phy_stat=%x\n", istatus, phystat); 1938
1939 if (netif_msg_intr(skge))
1940 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
1941 skge->netdev->name, istatus, phystat);
2033 1942
2034 if (istatus & PHY_M_IS_AN_COMPL) { 1943 if (istatus & PHY_M_IS_AN_COMPL) {
2035 if (skge_gm_phy_read(hw, port, PHY_MARV_AUNE_LP) 1944 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
2036 & PHY_M_AN_RF) { 1945 & PHY_M_AN_RF) {
2037 reason = "remote fault"; 1946 reason = "remote fault";
2038 goto failed; 1947 goto failed;
2039 } 1948 }
2040 1949
2041 if (!(hw->chip_id == CHIP_ID_YUKON_FE || hw->chip_id == CHIP_ID_YUKON_EC) 1950 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
2042 && (skge_gm_phy_read(hw, port, PHY_MARV_1000T_STAT)
2043 & PHY_B_1000S_MSF)) {
2044 reason = "master/slave fault"; 1951 reason = "master/slave fault";
2045 goto failed; 1952 goto failed;
2046 } 1953 }
@@ -2054,10 +1961,6 @@ static void yukon_phy_intr(struct skge_port *skge)
2054 ? DUPLEX_FULL : DUPLEX_HALF; 1961 ? DUPLEX_FULL : DUPLEX_HALF;
2055 skge->speed = yukon_speed(hw, phystat); 1962 skge->speed = yukon_speed(hw, phystat);
2056 1963
2057 /* Tx & Rx Pause Enabled bits are at 9..8 */
2058 if (hw->chip_id == CHIP_ID_YUKON_XL)
2059 phystat >>= 6;
2060
2061 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1964 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2062 switch (phystat & PHY_M_PS_PAUSE_MSK) { 1965 switch (phystat & PHY_M_PS_PAUSE_MSK) {
2063 case PHY_M_PS_PAUSE_MSK: 1966 case PHY_M_PS_PAUSE_MSK:
@@ -2075,9 +1978,9 @@ static void yukon_phy_intr(struct skge_port *skge)
2075 1978
2076 if (skge->flow_control == FLOW_MODE_NONE || 1979 if (skge->flow_control == FLOW_MODE_NONE ||
2077 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 1980 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2078 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1981 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2079 else 1982 else
2080 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 1983 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2081 yukon_link_up(skge); 1984 yukon_link_up(skge);
2082 return; 1985 return;
2083 } 1986 }
@@ -2161,6 +2064,12 @@ static int skge_up(struct net_device *dev)
2161 if (netif_msg_ifup(skge)) 2064 if (netif_msg_ifup(skge))
2162 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2065 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2163 2066
2067 if (dev->mtu > RX_BUF_SIZE)
2068 skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN;
2069 else
2070 skge->rx_buf_size = RX_BUF_SIZE;
2071
2072
2164 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); 2073 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2165 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); 2074 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2166 skge->mem_size = tx_size + rx_size; 2075 skge->mem_size = tx_size + rx_size;
@@ -2173,7 +2082,8 @@ static int skge_up(struct net_device *dev)
2173 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) 2082 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
2174 goto free_pci_mem; 2083 goto free_pci_mem;
2175 2084
2176 if (skge_rx_fill(skge)) 2085 err = skge_rx_fill(skge);
2086 if (err)
2177 goto free_rx_ring; 2087 goto free_rx_ring;
2178 2088
2179 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2089 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
@@ -2182,6 +2092,10 @@ static int skge_up(struct net_device *dev)
2182 2092
2183 skge->tx_avail = skge->tx_ring.count - 1; 2093 skge->tx_avail = skge->tx_ring.count - 1;
2184 2094
2095 /* Enable IRQ from port */
2096 hw->intr_mask |= portirqmask[port];
2097 skge_write32(hw, B0_IMSK, hw->intr_mask);
2098
2185 /* Initialze MAC */ 2099 /* Initialze MAC */
2186 if (hw->chip_id == CHIP_ID_GENESIS) 2100 if (hw->chip_id == CHIP_ID_GENESIS)
2187 genesis_mac_init(hw, port); 2101 genesis_mac_init(hw, port);
@@ -2189,7 +2103,7 @@ static int skge_up(struct net_device *dev)
2189 yukon_mac_init(hw, port); 2103 yukon_mac_init(hw, port);
2190 2104
2191 /* Configure RAMbuffers */ 2105 /* Configure RAMbuffers */
2192 chunk = hw->ram_size / (isdualport(hw) ? 4 : 2); 2106 chunk = hw->ram_size / ((hw->ports + 1)*2);
2193 ram_addr = hw->ram_offset + 2 * chunk * port; 2107 ram_addr = hw->ram_offset + 2 * chunk * port;
2194 2108
2195 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); 2109 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
@@ -2227,7 +2141,6 @@ static int skge_down(struct net_device *dev)
2227 netif_stop_queue(dev); 2141 netif_stop_queue(dev);
2228 2142
2229 del_timer_sync(&skge->led_blink); 2143 del_timer_sync(&skge->led_blink);
2230 del_timer_sync(&skge->link_check);
2231 2144
2232 /* Stop transmitter */ 2145 /* Stop transmitter */
2233 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2146 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2240,12 +2153,12 @@ static int skge_down(struct net_device *dev)
2240 yukon_stop(skge); 2153 yukon_stop(skge);
2241 2154
2242 /* Disable Force Sync bit and Enable Alloc bit */ 2155 /* Disable Force Sync bit and Enable Alloc bit */
2243 skge_write8(hw, SKGEMAC_REG(port, TXA_CTRL), 2156 skge_write8(hw, SK_REG(port, TXA_CTRL),
2244 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2157 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2245 2158
2246 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ 2159 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2247 skge_write32(hw, SKGEMAC_REG(port, TXA_ITI_INI), 0L); 2160 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2248 skge_write32(hw, SKGEMAC_REG(port, TXA_LIM_INI), 0L); 2161 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2249 2162
2250 /* Reset PCI FIFO */ 2163 /* Reset PCI FIFO */
2251 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); 2164 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
@@ -2260,13 +2173,13 @@ static int skge_down(struct net_device *dev)
2260 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); 2173 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2261 2174
2262 if (hw->chip_id == CHIP_ID_GENESIS) { 2175 if (hw->chip_id == CHIP_ID_GENESIS) {
2263 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_SET); 2176 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2264 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_SET); 2177 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2265 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_STOP); 2178 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_STOP);
2266 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_STOP); 2179 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_STOP);
2267 } else { 2180 } else {
2268 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 2181 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2269 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2182 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2270 } 2183 }
2271 2184
2272 /* turn off led's */ 2185 /* turn off led's */
@@ -2299,10 +2212,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2299 2212
2300 local_irq_save(flags); 2213 local_irq_save(flags);
2301 if (!spin_trylock(&skge->tx_lock)) { 2214 if (!spin_trylock(&skge->tx_lock)) {
2302 /* Collision - tell upper layer to requeue */ 2215 /* Collision - tell upper layer to requeue */
2303 local_irq_restore(flags); 2216 local_irq_restore(flags);
2304 return NETDEV_TX_LOCKED; 2217 return NETDEV_TX_LOCKED;
2305 } 2218 }
2306 2219
2307 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2220 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
2308 netif_stop_queue(dev); 2221 netif_stop_queue(dev);
@@ -2333,7 +2246,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2333 * does. Looks like hardware is wrong? 2246 * does. Looks like hardware is wrong?
2334 */ 2247 */
2335 if (ip->protocol == IPPROTO_UDP 2248 if (ip->protocol == IPPROTO_UDP
2336 && chip_rev(hw) == 0 && hw->chip_id == CHIP_ID_YUKON) 2249 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2337 control = BMU_TCP_CHECK; 2250 control = BMU_TCP_CHECK;
2338 else 2251 else
2339 control = BMU_UDP_CHECK; 2252 control = BMU_UDP_CHECK;
@@ -2394,6 +2307,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2394 2307
2395static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) 2308static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
2396{ 2309{
2310 /* This ring element can be skb or fragment */
2397 if (e->skb) { 2311 if (e->skb) {
2398 pci_unmap_single(hw->pdev, 2312 pci_unmap_single(hw->pdev,
2399 pci_unmap_addr(e, mapaddr), 2313 pci_unmap_addr(e, mapaddr),
@@ -2438,16 +2352,17 @@ static void skge_tx_timeout(struct net_device *dev)
2438static int skge_change_mtu(struct net_device *dev, int new_mtu) 2352static int skge_change_mtu(struct net_device *dev, int new_mtu)
2439{ 2353{
2440 int err = 0; 2354 int err = 0;
2355 int running = netif_running(dev);
2441 2356
2442 if(new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2357 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2443 return -EINVAL; 2358 return -EINVAL;
2444 2359
2445 dev->mtu = new_mtu;
2446 2360
2447 if (netif_running(dev)) { 2361 if (running)
2448 skge_down(dev); 2362 skge_down(dev);
2363 dev->mtu = new_mtu;
2364 if (running)
2449 skge_up(dev); 2365 skge_up(dev);
2450 }
2451 2366
2452 return err; 2367 return err;
2453} 2368}
@@ -2462,7 +2377,9 @@ static void genesis_set_multicast(struct net_device *dev)
2462 u32 mode; 2377 u32 mode;
2463 u8 filter[8]; 2378 u8 filter[8];
2464 2379
2465 mode = skge_xm_read32(hw, port, XM_MODE); 2380 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2381
2382 mode = xm_read32(hw, port, XM_MODE);
2466 mode |= XM_MD_ENA_HASH; 2383 mode |= XM_MD_ENA_HASH;
2467 if (dev->flags & IFF_PROMISC) 2384 if (dev->flags & IFF_PROMISC)
2468 mode |= XM_MD_ENA_PROM; 2385 mode |= XM_MD_ENA_PROM;
@@ -2473,17 +2390,16 @@ static void genesis_set_multicast(struct net_device *dev)
2473 memset(filter, 0xff, sizeof(filter)); 2390 memset(filter, 0xff, sizeof(filter));
2474 else { 2391 else {
2475 memset(filter, 0, sizeof(filter)); 2392 memset(filter, 0, sizeof(filter));
2476 for(i = 0; list && i < count; i++, list = list->next) { 2393 for (i = 0; list && i < count; i++, list = list->next) {
2477 u32 crc = crc32_le(~0, list->dmi_addr, ETH_ALEN); 2394 u32 crc, bit;
2478 u8 bit = 63 - (crc & 63); 2395 crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
2479 2396 bit = ~crc & 0x3f;
2480 filter[bit/8] |= 1 << (bit%8); 2397 filter[bit/8] |= 1 << (bit%8);
2481 } 2398 }
2482 } 2399 }
2483 2400
2484 skge_xm_outhash(hw, port, XM_HSM, filter); 2401 xm_write32(hw, port, XM_MODE, mode);
2485 2402 xm_outhash(hw, port, XM_HSM, filter);
2486 skge_xm_write32(hw, port, XM_MODE, mode);
2487} 2403}
2488 2404
2489static void yukon_set_multicast(struct net_device *dev) 2405static void yukon_set_multicast(struct net_device *dev)
@@ -2497,7 +2413,7 @@ static void yukon_set_multicast(struct net_device *dev)
2497 2413
2498 memset(filter, 0, sizeof(filter)); 2414 memset(filter, 0, sizeof(filter));
2499 2415
2500 reg = skge_gma_read16(hw, port, GM_RX_CTRL); 2416 reg = gma_read16(hw, port, GM_RX_CTRL);
2501 reg |= GM_RXCR_UCF_ENA; 2417 reg |= GM_RXCR_UCF_ENA;
2502 2418
2503 if (dev->flags & IFF_PROMISC) /* promiscious */ 2419 if (dev->flags & IFF_PROMISC) /* promiscious */
@@ -2510,23 +2426,23 @@ static void yukon_set_multicast(struct net_device *dev)
2510 int i; 2426 int i;
2511 reg |= GM_RXCR_MCF_ENA; 2427 reg |= GM_RXCR_MCF_ENA;
2512 2428
2513 for(i = 0; list && i < dev->mc_count; i++, list = list->next) { 2429 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2514 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; 2430 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2515 filter[bit/8] |= 1 << (bit%8); 2431 filter[bit/8] |= 1 << (bit%8);
2516 } 2432 }
2517 } 2433 }
2518 2434
2519 2435
2520 skge_gma_write16(hw, port, GM_MC_ADDR_H1, 2436 gma_write16(hw, port, GM_MC_ADDR_H1,
2521 (u16)filter[0] | ((u16)filter[1] << 8)); 2437 (u16)filter[0] | ((u16)filter[1] << 8));
2522 skge_gma_write16(hw, port, GM_MC_ADDR_H2, 2438 gma_write16(hw, port, GM_MC_ADDR_H2,
2523 (u16)filter[2] | ((u16)filter[3] << 8)); 2439 (u16)filter[2] | ((u16)filter[3] << 8));
2524 skge_gma_write16(hw, port, GM_MC_ADDR_H3, 2440 gma_write16(hw, port, GM_MC_ADDR_H3,
2525 (u16)filter[4] | ((u16)filter[5] << 8)); 2441 (u16)filter[4] | ((u16)filter[5] << 8));
2526 skge_gma_write16(hw, port, GM_MC_ADDR_H4, 2442 gma_write16(hw, port, GM_MC_ADDR_H4,
2527 (u16)filter[6] | ((u16)filter[7] << 8)); 2443 (u16)filter[6] | ((u16)filter[7] << 8));
2528 2444
2529 skge_gma_write16(hw, port, GM_RX_CTRL, reg); 2445 gma_write16(hw, port, GM_RX_CTRL, reg);
2530} 2446}
2531 2447
2532static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2448static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
@@ -2545,28 +2461,76 @@ static void skge_rx_error(struct skge_port *skge, int slot,
2545 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", 2461 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2546 skge->netdev->name, slot, control, status); 2462 skge->netdev->name, slot, control, status);
2547 2463
2548 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) 2464 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2549 || (control & BMU_BBC) > skge->netdev->mtu + VLAN_ETH_HLEN)
2550 skge->net_stats.rx_length_errors++; 2465 skge->net_stats.rx_length_errors++;
2551 else { 2466 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2552 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 2467 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2553 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 2468 skge->net_stats.rx_length_errors++;
2554 skge->net_stats.rx_length_errors++; 2469 if (status & XMR_FS_FRA_ERR)
2555 if (status & XMR_FS_FRA_ERR) 2470 skge->net_stats.rx_frame_errors++;
2556 skge->net_stats.rx_frame_errors++; 2471 if (status & XMR_FS_FCS_ERR)
2557 if (status & XMR_FS_FCS_ERR) 2472 skge->net_stats.rx_crc_errors++;
2558 skge->net_stats.rx_crc_errors++; 2473 } else {
2559 } else { 2474 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2560 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 2475 skge->net_stats.rx_length_errors++;
2561 skge->net_stats.rx_length_errors++; 2476 if (status & GMR_FS_FRAGMENT)
2562 if (status & GMR_FS_FRAGMENT) 2477 skge->net_stats.rx_frame_errors++;
2563 skge->net_stats.rx_frame_errors++; 2478 if (status & GMR_FS_CRC_ERR)
2564 if (status & GMR_FS_CRC_ERR) 2479 skge->net_stats.rx_crc_errors++;
2565 skge->net_stats.rx_crc_errors++; 2480 }
2481}
2482
2483/* Get receive buffer from descriptor.
2484 * Handles copy of small buffers and reallocation failures
2485 */
2486static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2487 struct skge_element *e,
2488 unsigned int len)
2489{
2490 struct sk_buff *nskb, *skb;
2491
2492 if (len < RX_COPY_THRESHOLD) {
2493 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN);
2494 if (unlikely(!nskb))
2495 return NULL;
2496
2497 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2498 pci_unmap_addr(e, mapaddr),
2499 len, PCI_DMA_FROMDEVICE);
2500 memcpy(nskb->data, e->skb->data, len);
2501 pci_dma_sync_single_for_device(skge->hw->pdev,
2502 pci_unmap_addr(e, mapaddr),
2503 len, PCI_DMA_FROMDEVICE);
2504
2505 if (skge->rx_csum) {
2506 struct skge_rx_desc *rd = e->desc;
2507 nskb->csum = le16_to_cpu(rd->csum2);
2508 nskb->ip_summed = CHECKSUM_HW;
2566 } 2509 }
2510 skge_rx_reuse(e, skge->rx_buf_size);
2511 return nskb;
2512 } else {
2513 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size);
2514 if (unlikely(!nskb))
2515 return NULL;
2516
2517 pci_unmap_single(skge->hw->pdev,
2518 pci_unmap_addr(e, mapaddr),
2519 pci_unmap_len(e, maplen),
2520 PCI_DMA_FROMDEVICE);
2521 skb = e->skb;
2522 if (skge->rx_csum) {
2523 struct skge_rx_desc *rd = e->desc;
2524 skb->csum = le16_to_cpu(rd->csum2);
2525 skb->ip_summed = CHECKSUM_HW;
2526 }
2527
2528 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2529 return skb;
2567 } 2530 }
2568} 2531}
2569 2532
2533
2570static int skge_poll(struct net_device *dev, int *budget) 2534static int skge_poll(struct net_device *dev, int *budget)
2571{ 2535{
2572 struct skge_port *skge = netdev_priv(dev); 2536 struct skge_port *skge = netdev_priv(dev);
@@ -2575,13 +2539,12 @@ static int skge_poll(struct net_device *dev, int *budget)
2575 struct skge_element *e; 2539 struct skge_element *e;
2576 unsigned int to_do = min(dev->quota, *budget); 2540 unsigned int to_do = min(dev->quota, *budget);
2577 unsigned int work_done = 0; 2541 unsigned int work_done = 0;
2578 int done;
2579 static const u32 irqmask[] = { IS_PORT_1, IS_PORT_2 };
2580 2542
2581 for (e = ring->to_clean; e != ring->to_use && work_done < to_do; 2543 pr_debug("skge_poll\n");
2582 e = e->next) { 2544
2545 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2583 struct skge_rx_desc *rd = e->desc; 2546 struct skge_rx_desc *rd = e->desc;
2584 struct sk_buff *skb = e->skb; 2547 struct sk_buff *skb;
2585 u32 control, len, status; 2548 u32 control, len, status;
2586 2549
2587 rmb(); 2550 rmb();
@@ -2590,19 +2553,12 @@ static int skge_poll(struct net_device *dev, int *budget)
2590 break; 2553 break;
2591 2554
2592 len = control & BMU_BBC; 2555 len = control & BMU_BBC;
2593 e->skb = NULL;
2594
2595 pci_unmap_single(hw->pdev,
2596 pci_unmap_addr(e, mapaddr),
2597 pci_unmap_len(e, maplen),
2598 PCI_DMA_FROMDEVICE);
2599
2600 status = rd->status; 2556 status = rd->status;
2601 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) 2557
2602 || len > dev->mtu + VLAN_ETH_HLEN 2558 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2603 || bad_phy_status(hw, status)) { 2559 || bad_phy_status(hw, status))) {
2604 skge_rx_error(skge, e - ring->start, control, status); 2560 skge_rx_error(skge, e - ring->start, control, status);
2605 dev_kfree_skb(skb); 2561 skge_rx_reuse(e, skge->rx_buf_size);
2606 continue; 2562 continue;
2607 } 2563 }
2608 2564
@@ -2610,43 +2566,37 @@ static int skge_poll(struct net_device *dev, int *budget)
2610 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 2566 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2611 dev->name, e - ring->start, rd->status, len); 2567 dev->name, e - ring->start, rd->status, len);
2612 2568
2613 skb_put(skb, len); 2569 skb = skge_rx_get(skge, e, len);
2614 skb->protocol = eth_type_trans(skb, dev); 2570 if (likely(skb)) {
2615 2571 skb_put(skb, len);
2616 if (skge->rx_csum) { 2572 skb->protocol = eth_type_trans(skb, dev);
2617 skb->csum = le16_to_cpu(rd->csum2);
2618 skb->ip_summed = CHECKSUM_HW;
2619 }
2620 2573
2621 dev->last_rx = jiffies; 2574 dev->last_rx = jiffies;
2622 netif_receive_skb(skb); 2575 netif_receive_skb(skb);
2623 2576
2624 ++work_done; 2577 ++work_done;
2578 } else
2579 skge_rx_reuse(e, skge->rx_buf_size);
2625 } 2580 }
2626 ring->to_clean = e; 2581 ring->to_clean = e;
2627 2582
2628 *budget -= work_done;
2629 dev->quota -= work_done;
2630 done = work_done < to_do;
2631
2632 if (skge_rx_fill(skge))
2633 done = 0;
2634
2635 /* restart receiver */ 2583 /* restart receiver */
2636 wmb(); 2584 wmb();
2637 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), 2585 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
2638 CSR_START | CSR_IRQ_CL_F); 2586 CSR_START | CSR_IRQ_CL_F);
2639 2587
2640 if (done) { 2588 *budget -= work_done;
2641 local_irq_disable(); 2589 dev->quota -= work_done;
2642 hw->intr_mask |= irqmask[skge->port];
2643 /* Order is important since data can get interrupted */
2644 skge_write32(hw, B0_IMSK, hw->intr_mask);
2645 __netif_rx_complete(dev);
2646 local_irq_enable();
2647 }
2648 2590
2649 return !done; 2591 if (work_done >= to_do)
2592 return 1; /* not done */
2593
2594 local_irq_disable();
2595 __netif_rx_complete(dev);
2596 hw->intr_mask |= portirqmask[skge->port];
2597 skge_write32(hw, B0_IMSK, hw->intr_mask);
2598 local_irq_enable();
2599 return 0;
2650} 2600}
2651 2601
2652static inline void skge_tx_intr(struct net_device *dev) 2602static inline void skge_tx_intr(struct net_device *dev)
@@ -2657,7 +2607,7 @@ static inline void skge_tx_intr(struct net_device *dev)
2657 struct skge_element *e; 2607 struct skge_element *e;
2658 2608
2659 spin_lock(&skge->tx_lock); 2609 spin_lock(&skge->tx_lock);
2660 for(e = ring->to_clean; e != ring->to_use; e = e->next) { 2610 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2661 struct skge_tx_desc *td = e->desc; 2611 struct skge_tx_desc *td = e->desc;
2662 u32 control; 2612 u32 control;
2663 2613
@@ -2690,12 +2640,12 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
2690 : (port == 0 ? "(port A)": "(port B")); 2640 : (port == 0 ? "(port A)": "(port B"));
2691 2641
2692 if (hw->chip_id == CHIP_ID_GENESIS) 2642 if (hw->chip_id == CHIP_ID_GENESIS)
2693 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), 2643 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
2694 MFF_CLR_PERR); 2644 MFF_CLR_PERR);
2695 else 2645 else
2696 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ 2646 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2697 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), 2647 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
2698 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0) 2648 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
2699 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 2649 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2700} 2650}
2701 2651
@@ -2703,16 +2653,16 @@ static void skge_pci_clear(struct skge_hw *hw)
2703{ 2653{
2704 u16 status; 2654 u16 status;
2705 2655
2706 status = skge_read16(hw, SKGEPCI_REG(PCI_STATUS)); 2656 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2707 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2657 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2708 skge_write16(hw, SKGEPCI_REG(PCI_STATUS), 2658 pci_write_config_word(hw->pdev, PCI_STATUS,
2709 status | PCI_STATUS_ERROR_BITS); 2659 status | PCI_STATUS_ERROR_BITS);
2710 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2660 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2711} 2661}
2712 2662
2713static void skge_mac_intr(struct skge_hw *hw, int port) 2663static void skge_mac_intr(struct skge_hw *hw, int port)
2714{ 2664{
2715 if (hw->chip_id == CHIP_ID_GENESIS) 2665 if (hw->chip_id == CHIP_ID_GENESIS)
2716 genesis_mac_intr(hw, port); 2666 genesis_mac_intr(hw, port);
2717 else 2667 else
2718 yukon_mac_intr(hw, port); 2668 yukon_mac_intr(hw, port);
@@ -2726,9 +2676,9 @@ static void skge_error_irq(struct skge_hw *hw)
2726 if (hw->chip_id == CHIP_ID_GENESIS) { 2676 if (hw->chip_id == CHIP_ID_GENESIS) {
2727 /* clear xmac errors */ 2677 /* clear xmac errors */
2728 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 2678 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2729 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); 2679 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT);
2730 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 2680 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2731 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); 2681 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT);
2732 } else { 2682 } else {
2733 /* Timestamp (unused) overflow */ 2683 /* Timestamp (unused) overflow */
2734 if (hwstatus & IS_IRQ_TIST_OV) 2684 if (hwstatus & IS_IRQ_TIST_OV)
@@ -2803,8 +2753,8 @@ static void skge_extirq(unsigned long data)
2803 2753
2804 if (hw->chip_id != CHIP_ID_GENESIS) 2754 if (hw->chip_id != CHIP_ID_GENESIS)
2805 yukon_phy_intr(skge); 2755 yukon_phy_intr(skge);
2806 else if (hw->phy_type == SK_PHY_BCOM) 2756 else
2807 genesis_bcom_intr(skge); 2757 bcom_phy_intr(skge);
2808 } 2758 }
2809 } 2759 }
2810 spin_unlock(&hw->phy_lock); 2760 spin_unlock(&hw->phy_lock);
@@ -2824,19 +2774,14 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2824 return IRQ_NONE; 2774 return IRQ_NONE;
2825 2775
2826 status &= hw->intr_mask; 2776 status &= hw->intr_mask;
2827 2777 if (status & IS_R1_F) {
2828 if ((status & IS_R1_F) && netif_rx_schedule_prep(hw->dev[0])) {
2829 status &= ~IS_R1_F;
2830 hw->intr_mask &= ~IS_R1_F; 2778 hw->intr_mask &= ~IS_R1_F;
2831 skge_write32(hw, B0_IMSK, hw->intr_mask); 2779 netif_rx_schedule(hw->dev[0]);
2832 __netif_rx_schedule(hw->dev[0]);
2833 } 2780 }
2834 2781
2835 if ((status & IS_R2_F) && netif_rx_schedule_prep(hw->dev[1])) { 2782 if (status & IS_R2_F) {
2836 status &= ~IS_R2_F;
2837 hw->intr_mask &= ~IS_R2_F; 2783 hw->intr_mask &= ~IS_R2_F;
2838 skge_write32(hw, B0_IMSK, hw->intr_mask); 2784 netif_rx_schedule(hw->dev[1]);
2839 __netif_rx_schedule(hw->dev[1]);
2840 } 2785 }
2841 2786
2842 if (status & IS_XA1_F) 2787 if (status & IS_XA1_F)
@@ -2845,9 +2790,27 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2845 if (status & IS_XA2_F) 2790 if (status & IS_XA2_F)
2846 skge_tx_intr(hw->dev[1]); 2791 skge_tx_intr(hw->dev[1]);
2847 2792
2793 if (status & IS_PA_TO_RX1) {
2794 struct skge_port *skge = netdev_priv(hw->dev[0]);
2795 ++skge->net_stats.rx_over_errors;
2796 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2797 }
2798
2799 if (status & IS_PA_TO_RX2) {
2800 struct skge_port *skge = netdev_priv(hw->dev[1]);
2801 ++skge->net_stats.rx_over_errors;
2802 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
2803 }
2804
2805 if (status & IS_PA_TO_TX1)
2806 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2807
2808 if (status & IS_PA_TO_TX2)
2809 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2810
2848 if (status & IS_MAC1) 2811 if (status & IS_MAC1)
2849 skge_mac_intr(hw, 0); 2812 skge_mac_intr(hw, 0);
2850 2813
2851 if (status & IS_MAC2) 2814 if (status & IS_MAC2)
2852 skge_mac_intr(hw, 1); 2815 skge_mac_intr(hw, 1);
2853 2816
@@ -2859,8 +2822,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2859 tasklet_schedule(&hw->ext_tasklet); 2822 tasklet_schedule(&hw->ext_tasklet);
2860 } 2823 }
2861 2824
2862 if (status) 2825 skge_write32(hw, B0_IMSK, hw->intr_mask);
2863 skge_write32(hw, B0_IMSK, hw->intr_mask);
2864 2826
2865 return IRQ_HANDLED; 2827 return IRQ_HANDLED;
2866} 2828}
@@ -2904,9 +2866,6 @@ static const struct {
2904 { CHIP_ID_YUKON, "Yukon" }, 2866 { CHIP_ID_YUKON, "Yukon" },
2905 { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, 2867 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2906 { CHIP_ID_YUKON_LP, "Yukon-LP"}, 2868 { CHIP_ID_YUKON_LP, "Yukon-LP"},
2907 { CHIP_ID_YUKON_XL, "Yukon-2 XL"},
2908 { CHIP_ID_YUKON_EC, "YUKON-2 EC"},
2909 { CHIP_ID_YUKON_FE, "YUKON-2 FE"},
2910}; 2869};
2911 2870
2912static const char *skge_board_name(const struct skge_hw *hw) 2871static const char *skge_board_name(const struct skge_hw *hw)
@@ -2930,8 +2889,8 @@ static const char *skge_board_name(const struct skge_hw *hw)
2930static int skge_reset(struct skge_hw *hw) 2889static int skge_reset(struct skge_hw *hw)
2931{ 2890{
2932 u16 ctst; 2891 u16 ctst;
2933 u8 t8; 2892 u8 t8, mac_cfg;
2934 int i, ports; 2893 int i;
2935 2894
2936 ctst = skge_read16(hw, B0_CTST); 2895 ctst = skge_read16(hw, B0_CTST);
2937 2896
@@ -2952,12 +2911,9 @@ static int skge_reset(struct skge_hw *hw)
2952 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 2911 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2953 hw->pmd_type = skge_read8(hw, B2_PMD_TYP); 2912 hw->pmd_type = skge_read8(hw, B2_PMD_TYP);
2954 2913
2955 switch(hw->chip_id) { 2914 switch (hw->chip_id) {
2956 case CHIP_ID_GENESIS: 2915 case CHIP_ID_GENESIS:
2957 switch (hw->phy_type) { 2916 switch (hw->phy_type) {
2958 case SK_PHY_XMAC:
2959 hw->phy_addr = PHY_ADDR_XMAC;
2960 break;
2961 case SK_PHY_BCOM: 2917 case SK_PHY_BCOM:
2962 hw->phy_addr = PHY_ADDR_BCOM; 2918 hw->phy_addr = PHY_ADDR_BCOM;
2963 break; 2919 break;
@@ -2986,8 +2942,9 @@ static int skge_reset(struct skge_hw *hw)
2986 return -EOPNOTSUPP; 2942 return -EOPNOTSUPP;
2987 } 2943 }
2988 2944
2989 hw->mac_cfg = skge_read8(hw, B2_MAC_CFG); 2945 mac_cfg = skge_read8(hw, B2_MAC_CFG);
2990 ports = isdualport(hw) ? 2 : 1; 2946 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2947 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2991 2948
2992 /* read the adapters RAM size */ 2949 /* read the adapters RAM size */
2993 t8 = skge_read8(hw, B2_E_0); 2950 t8 = skge_read8(hw, B2_E_0);
@@ -3010,9 +2967,9 @@ static int skge_reset(struct skge_hw *hw)
3010 /* switch power to VCC (WA for VAUX problem) */ 2967 /* switch power to VCC (WA for VAUX problem) */
3011 skge_write8(hw, B0_POWER_CTRL, 2968 skge_write8(hw, B0_POWER_CTRL,
3012 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 2969 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3013 for (i = 0; i < ports; i++) { 2970 for (i = 0; i < hw->ports; i++) {
3014 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2971 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3015 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2972 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3016 } 2973 }
3017 } 2974 }
3018 2975
@@ -3022,8 +2979,8 @@ static int skge_reset(struct skge_hw *hw)
3022 skge_write8(hw, B0_LED, LED_STAT_ON); 2979 skge_write8(hw, B0_LED, LED_STAT_ON);
3023 2980
3024 /* enable the Tx Arbiters */ 2981 /* enable the Tx Arbiters */
3025 for (i = 0; i < ports; i++) 2982 for (i = 0; i < hw->ports; i++)
3026 skge_write8(hw, SKGEMAC_REG(i, TXA_CTRL), TXA_ENA_ARB); 2983 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3027 2984
3028 /* Initialize ram interface */ 2985 /* Initialize ram interface */
3029 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); 2986 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
@@ -3050,16 +3007,14 @@ static int skge_reset(struct skge_hw *hw)
3050 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 3007 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3051 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 3008 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3052 3009
3053 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; 3010 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
3054 if (isdualport(hw))
3055 hw->intr_mask |= IS_PORT_2;
3056 skge_write32(hw, B0_IMSK, hw->intr_mask); 3011 skge_write32(hw, B0_IMSK, hw->intr_mask);
3057 3012
3058 if (hw->chip_id != CHIP_ID_GENESIS) 3013 if (hw->chip_id != CHIP_ID_GENESIS)
3059 skge_write8(hw, GMAC_IRQ_MSK, 0); 3014 skge_write8(hw, GMAC_IRQ_MSK, 0);
3060 3015
3061 spin_lock_bh(&hw->phy_lock); 3016 spin_lock_bh(&hw->phy_lock);
3062 for (i = 0; i < ports; i++) { 3017 for (i = 0; i < hw->ports; i++) {
3063 if (hw->chip_id == CHIP_ID_GENESIS) 3018 if (hw->chip_id == CHIP_ID_GENESIS)
3064 genesis_reset(hw, i); 3019 genesis_reset(hw, i);
3065 else 3020 else
@@ -3071,7 +3026,8 @@ static int skge_reset(struct skge_hw *hw)
3071} 3026}
3072 3027
3073/* Initialize network device */ 3028/* Initialize network device */
3074static struct net_device *skge_devinit(struct skge_hw *hw, int port) 3029static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3030 int highmem)
3075{ 3031{
3076 struct skge_port *skge; 3032 struct skge_port *skge;
3077 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3033 struct net_device *dev = alloc_etherdev(sizeof(*skge));
@@ -3104,6 +3060,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3104#endif 3060#endif
3105 dev->irq = hw->pdev->irq; 3061 dev->irq = hw->pdev->irq;
3106 dev->features = NETIF_F_LLTX; 3062 dev->features = NETIF_F_LLTX;
3063 if (highmem)
3064 dev->features |= NETIF_F_HIGHDMA;
3107 3065
3108 skge = netdev_priv(dev); 3066 skge = netdev_priv(dev);
3109 skge->netdev = dev; 3067 skge->netdev = dev;
@@ -3117,7 +3075,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3117 skge->flow_control = FLOW_MODE_SYMMETRIC; 3075 skge->flow_control = FLOW_MODE_SYMMETRIC;
3118 skge->duplex = -1; 3076 skge->duplex = -1;
3119 skge->speed = -1; 3077 skge->speed = -1;
3120 skge->advertising = skge_modes(hw); 3078 skge->advertising = skge_supported_modes(hw);
3121 3079
3122 hw->dev[port] = dev; 3080 hw->dev[port] = dev;
3123 3081
@@ -3125,10 +3083,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3125 3083
3126 spin_lock_init(&skge->tx_lock); 3084 spin_lock_init(&skge->tx_lock);
3127 3085
3128 init_timer(&skge->link_check);
3129 skge->link_check.function = skge_link_timer;
3130 skge->link_check.data = (unsigned long) skge;
3131
3132 init_timer(&skge->led_blink); 3086 init_timer(&skge->led_blink);
3133 skge->led_blink.function = skge_blink_timer; 3087 skge->led_blink.function = skge_blink_timer;
3134 skge->led_blink.data = (unsigned long) skge; 3088 skge->led_blink.data = (unsigned long) skge;
@@ -3232,14 +3186,11 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3232 3186
3233 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n", 3187 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n",
3234 pci_resource_start(pdev, 0), pdev->irq, 3188 pci_resource_start(pdev, 0), pdev->irq,
3235 skge_board_name(hw), chip_rev(hw)); 3189 skge_board_name(hw), hw->chip_rev);
3236 3190
3237 if ((dev = skge_devinit(hw, 0)) == NULL) 3191 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3238 goto err_out_led_off; 3192 goto err_out_led_off;
3239 3193
3240 if (using_dac)
3241 dev->features |= NETIF_F_HIGHDMA;
3242
3243 if ((err = register_netdev(dev))) { 3194 if ((err = register_netdev(dev))) {
3244 printk(KERN_ERR PFX "%s: cannot register net device\n", 3195 printk(KERN_ERR PFX "%s: cannot register net device\n",
3245 pci_name(pdev)); 3196 pci_name(pdev));
@@ -3248,10 +3199,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3248 3199
3249 skge_show_addr(dev); 3200 skge_show_addr(dev);
3250 3201
3251 if (isdualport(hw) && (dev1 = skge_devinit(hw, 1))) { 3202 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
3252 if (using_dac)
3253 dev1->features |= NETIF_F_HIGHDMA;
3254
3255 if (register_netdev(dev1) == 0) 3203 if (register_netdev(dev1) == 0)
3256 skge_show_addr(dev1); 3204 skge_show_addr(dev1);
3257 else { 3205 else {
@@ -3288,7 +3236,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3288 struct skge_hw *hw = pci_get_drvdata(pdev); 3236 struct skge_hw *hw = pci_get_drvdata(pdev);
3289 struct net_device *dev0, *dev1; 3237 struct net_device *dev0, *dev1;
3290 3238
3291 if(!hw) 3239 if (!hw)
3292 return; 3240 return;
3293 3241
3294 if ((dev1 = hw->dev[1])) 3242 if ((dev1 = hw->dev[1]))
@@ -3316,7 +3264,7 @@ static int skge_suspend(struct pci_dev *pdev, u32 state)
3316 struct skge_hw *hw = pci_get_drvdata(pdev); 3264 struct skge_hw *hw = pci_get_drvdata(pdev);
3317 int i, wol = 0; 3265 int i, wol = 0;
3318 3266
3319 for(i = 0; i < 2; i++) { 3267 for (i = 0; i < 2; i++) {
3320 struct net_device *dev = hw->dev[i]; 3268 struct net_device *dev = hw->dev[i];
3321 3269
3322 if (dev) { 3270 if (dev) {
@@ -3349,11 +3297,11 @@ static int skge_resume(struct pci_dev *pdev)
3349 3297
3350 skge_reset(hw); 3298 skge_reset(hw);
3351 3299
3352 for(i = 0; i < 2; i++) { 3300 for (i = 0; i < 2; i++) {
3353 struct net_device *dev = hw->dev[i]; 3301 struct net_device *dev = hw->dev[i];
3354 if (dev) { 3302 if (dev) {
3355 netif_device_attach(dev); 3303 netif_device_attach(dev);
3356 if(netif_running(dev)) 3304 if (netif_running(dev))
3357 skge_up(dev); 3305 skge_up(dev);
3358 } 3306 }
3359 } 3307 }
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 36c62b68fab4..14d0cc01fb9a 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -7,31 +7,6 @@
7/* PCI config registers */ 7/* PCI config registers */
8#define PCI_DEV_REG1 0x40 8#define PCI_DEV_REG1 0x40
9#define PCI_DEV_REG2 0x44 9#define PCI_DEV_REG2 0x44
10#ifndef PCI_VPD
11#define PCI_VPD 0x50
12#endif
13
14/* PCI_OUR_REG_2 32 bit Our Register 2 */
15enum {
16 PCI_VPD_WR_THR = 0xff<<24, /* Bit 31..24: VPD Write Threshold */
17 PCI_DEV_SEL = 0x7f<<17, /* Bit 23..17: EEPROM Device Select */
18 PCI_VPD_ROM_SZ = 7 <<14, /* Bit 16..14: VPD ROM Size */
19 /* Bit 13..12: reserved */
20 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
21 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
22 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
23};
24
25/* PCI_VPD_ADR_REG 16 bit VPD Address Register */
26enum {
27 PCI_VPD_FLAG = 1<<15, /* starts VPD rd/wr cycle */
28 PCI_VPD_ADR_MSK =0x7fffL, /* Bit 14.. 0: VPD Address Mask */
29 VPD_RES_ID = 0x82,
30 VPD_RES_READ = 0x90,
31 VPD_RES_WRITE = 0x81,
32 VPD_RES_END = 0x78,
33};
34
35 10
36#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 11#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
37 PCI_STATUS_SIG_SYSTEM_ERROR | \ 12 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -39,7 +14,6 @@ enum {
39 PCI_STATUS_REC_TARGET_ABORT | \ 14 PCI_STATUS_REC_TARGET_ABORT | \
40 PCI_STATUS_PARITY) 15 PCI_STATUS_PARITY)
41 16
42
43enum csr_regs { 17enum csr_regs {
44 B0_RAP = 0x0000, 18 B0_RAP = 0x0000,
45 B0_CTST = 0x0004, 19 B0_CTST = 0x0004,
@@ -229,8 +203,11 @@ enum {
229 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ 203 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
230 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ 204 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
231 205
232 IS_PORT_1 = IS_XA1_F| IS_R1_F| IS_MAC1, 206 IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1,
233 IS_PORT_2 = IS_XA2_F| IS_R2_F| IS_MAC2, 207 IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2,
208
209 IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
210 IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
234}; 211};
235 212
236 213
@@ -288,14 +265,6 @@ enum {
288 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ 265 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
289}; 266};
290 267
291/* B2_LD_TEST 8 bit EPROM loader test register */
292enum {
293 LD_T_ON = 1<<3, /* Loader Test mode on */
294 LD_T_OFF = 1<<2, /* Loader Test mode off */
295 LD_T_STEP = 1<<1, /* Decrement FPROM addr. Counter */
296 LD_START = 1<<0, /* Start loading FPROM */
297};
298
299/* B2_TI_CTRL 8 bit Timer control */ 268/* B2_TI_CTRL 8 bit Timer control */
300/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ 269/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
301enum { 270enum {
@@ -313,16 +282,6 @@ enum {
313 TIM_T_STEP = 1<<0, /* Test step */ 282 TIM_T_STEP = 1<<0, /* Test step */
314}; 283};
315 284
316/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */
317/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */
318/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
319enum {
320 DPT_MSK = 0x00ffffffL, /* Bit 23.. 0: Desc Poll Timer Bits */
321
322 DPT_START = 1<<1, /* Start Descriptor Poll Timer */
323 DPT_STOP = 1<<0, /* Stop Descriptor Poll Timer */
324};
325
326/* B2_GP_IO 32 bit General Purpose I/O Register */ 285/* B2_GP_IO 32 bit General Purpose I/O Register */
327enum { 286enum {
328 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ 287 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
@@ -348,30 +307,6 @@ enum {
348 GP_IO_0 = 1<<0, /* IO_0 pin */ 307 GP_IO_0 = 1<<0, /* IO_0 pin */
349}; 308};
350 309
351/* Rx/Tx Path related Arbiter Test Registers */
352/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */
353/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */
354/* B3_PA_TEST 16 bit Packet Arbiter Test Register */
355/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */
356enum {
357 TX2_T_EV = 1<<15,/* TX2 Timeout/Recv Event occured */
358 TX2_T_ON = 1<<14,/* TX2 Timeout/Recv Timer Test On */
359 TX2_T_OFF = 1<<13,/* TX2 Timeout/Recv Timer Tst Off */
360 TX2_T_STEP = 1<<12,/* TX2 Timeout/Recv Timer Step */
361 TX1_T_EV = 1<<11,/* TX1 Timeout/Recv Event occured */
362 TX1_T_ON = 1<<10,/* TX1 Timeout/Recv Timer Test On */
363 TX1_T_OFF = 1<<9, /* TX1 Timeout/Recv Timer Tst Off */
364 TX1_T_STEP = 1<<8, /* TX1 Timeout/Recv Timer Step */
365 RX2_T_EV = 1<<7, /* RX2 Timeout/Recv Event occured */
366 RX2_T_ON = 1<<6, /* RX2 Timeout/Recv Timer Test On */
367 RX2_T_OFF = 1<<5, /* RX2 Timeout/Recv Timer Tst Off */
368 RX2_T_STEP = 1<<4, /* RX2 Timeout/Recv Timer Step */
369 RX1_T_EV = 1<<3, /* RX1 Timeout/Recv Event occured */
370 RX1_T_ON = 1<<2, /* RX1 Timeout/Recv Timer Test On */
371 RX1_T_OFF = 1<<1, /* RX1 Timeout/Recv Timer Tst Off */
372 RX1_T_STEP = 1<<0, /* RX1 Timeout/Recv Timer Step */
373};
374
375/* Descriptor Bit Definition */ 310/* Descriptor Bit Definition */
376/* TxCtrl Transmit Buffer Control Field */ 311/* TxCtrl Transmit Buffer Control Field */
377/* RxCtrl Receive Buffer Control Field */ 312/* RxCtrl Receive Buffer Control Field */
@@ -428,14 +363,6 @@ enum {
428 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ 363 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
429}; 364};
430 365
431/* B3_RI_TEST 8 bit RAM Iface Test Register */
432enum {
433 RI_T_EV = 1<<3, /* Timeout Event occured */
434 RI_T_ON = 1<<2, /* Timeout Timer Test On */
435 RI_T_OFF = 1<<1, /* Timeout Timer Test Off */
436 RI_T_STEP = 1<<0, /* Timeout Timer Step */
437};
438
439/* MAC Arbiter Registers */ 366/* MAC Arbiter Registers */
440/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ 367/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
441enum { 368enum {
@@ -452,19 +379,6 @@ enum {
452#define SK_PKT_TO_MAX 0xffff /* Maximum value */ 379#define SK_PKT_TO_MAX 0xffff /* Maximum value */
453#define SK_RI_TO_53 36 /* RAM interface timeout */ 380#define SK_RI_TO_53 36 /* RAM interface timeout */
454 381
455
456/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */
457enum {
458 MA_ENA_REC_TX2 = 1<<7, /* Enable Recovery Timer TX2 */
459 MA_DIS_REC_TX2 = 1<<6, /* Disable Recovery Timer TX2 */
460 MA_ENA_REC_TX1 = 1<<5, /* Enable Recovery Timer TX1 */
461 MA_DIS_REC_TX1 = 1<<4, /* Disable Recovery Timer TX1 */
462 MA_ENA_REC_RX2 = 1<<3, /* Enable Recovery Timer RX2 */
463 MA_DIS_REC_RX2 = 1<<2, /* Disable Recovery Timer RX2 */
464 MA_ENA_REC_RX1 = 1<<1, /* Enable Recovery Timer RX1 */
465 MA_DIS_REC_RX1 = 1<<0, /* Disable Recovery Timer RX1 */
466};
467
468/* Packet Arbiter Registers */ 382/* Packet Arbiter Registers */
469/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ 383/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
470enum { 384enum {
@@ -488,7 +402,7 @@ enum {
488 PA_ENA_TO_TX1 | PA_ENA_TO_TX2) 402 PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
489 403
490 404
491/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ 405/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
492/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ 406/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
493/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ 407/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
494/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ 408/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
@@ -511,7 +425,7 @@ enum {
511/* 425/*
512 * Bank 4 - 5 426 * Bank 4 - 5
513 */ 427 */
514/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ 428/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
515enum { 429enum {
516 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ 430 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
517 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ 431 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
@@ -537,7 +451,7 @@ enum {
537 451
538/* Queue Register Offsets, use Q_ADDR() to access */ 452/* Queue Register Offsets, use Q_ADDR() to access */
539enum { 453enum {
540 B8_Q_REGS = 0x0400, /* base of Queue registers */ 454 B8_Q_REGS = 0x0400, /* base of Queue registers */
541 Q_D = 0x00, /* 8*32 bit Current Descriptor */ 455 Q_D = 0x00, /* 8*32 bit Current Descriptor */
542 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ 456 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
543 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ 457 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
@@ -618,8 +532,7 @@ enum {
618enum { 532enum {
619 PHY_ADDR_XMAC = 0<<8, 533 PHY_ADDR_XMAC = 0<<8,
620 PHY_ADDR_BCOM = 1<<8, 534 PHY_ADDR_BCOM = 1<<8,
621 PHY_ADDR_LONE = 3<<8, 535
622 PHY_ADDR_NAT = 0<<8,
623/* GPHY address (bits 15..11 of SMI control reg) */ 536/* GPHY address (bits 15..11 of SMI control reg) */
624 PHY_ADDR_MARV = 0, 537 PHY_ADDR_MARV = 0,
625}; 538};
@@ -986,7 +899,7 @@ enum {
986 LINKLED_BLINK_OFF = 0x10, 899 LINKLED_BLINK_OFF = 0x10,
987 LINKLED_BLINK_ON = 0x20, 900 LINKLED_BLINK_ON = 0x20,
988}; 901};
989 902
990/* GMAC and GPHY Control Registers (YUKON only) */ 903/* GMAC and GPHY Control Registers (YUKON only) */
991enum { 904enum {
992 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ 905 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
@@ -1151,54 +1064,6 @@ enum {
1151 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ 1064 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
1152}; 1065};
1153 1066
1154/* Level One-PHY Registers, indirect addressed over XMAC */
1155enum {
1156 PHY_LONE_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1157 PHY_LONE_STAT = 0x01,/* 16 bit r/o PHY Status Register */
1158 PHY_LONE_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1159 PHY_LONE_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1160 PHY_LONE_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1161 PHY_LONE_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
1162 PHY_LONE_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1163 PHY_LONE_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1164 PHY_LONE_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1165 /* Level One-specific registers */
1166 PHY_LONE_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1167 PHY_LONE_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1168 PHY_LONE_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
1169 PHY_LONE_PORT_CFG = 0x10,/* 16 bit r/w Port Configuration Reg*/
1170 PHY_LONE_Q_STAT = 0x11,/* 16 bit r/o Quick Status Reg */
1171 PHY_LONE_INT_ENAB = 0x12,/* 16 bit r/w Interrupt Enable Reg */
1172 PHY_LONE_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
1173 PHY_LONE_LED_CFG = 0x14,/* 16 bit r/w LED Configuration Reg */
1174 PHY_LONE_PORT_CTRL = 0x15,/* 16 bit r/w Port Control Reg */
1175 PHY_LONE_CIM = 0x16,/* 16 bit r/o CIM Reg */
1176};
1177
1178/* National-PHY Registers, indirect addressed over XMAC */
1179enum {
1180 PHY_NAT_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1181 PHY_NAT_STAT = 0x01,/* 16 bit r/w PHY Status Register */
1182 PHY_NAT_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1183 PHY_NAT_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1184 PHY_NAT_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1185 PHY_NAT_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Ability Reg */
1186 PHY_NAT_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1187 PHY_NAT_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1188 PHY_NAT_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner Reg */
1189 /* National-specific registers */
1190 PHY_NAT_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1191 PHY_NAT_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1192 PHY_NAT_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Register */
1193 PHY_NAT_EXT_CTRL1 = 0x10,/* 16 bit r/o Extended Control Reg1 */
1194 PHY_NAT_Q_STAT1 = 0x11,/* 16 bit r/o Quick Status Reg1 */
1195 PHY_NAT_10B_OP = 0x12,/* 16 bit r/o 10Base-T Operations Reg */
1196 PHY_NAT_EXT_CTRL2 = 0x13,/* 16 bit r/o Extended Control Reg1 */
1197 PHY_NAT_Q_STAT2 = 0x14,/* 16 bit r/o Quick Status Reg2 */
1198
1199 PHY_NAT_PHY_ADDR = 0x19,/* 16 bit r/o PHY Address Register */
1200};
1201
1202enum { 1067enum {
1203 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ 1068 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
1204 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ 1069 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
@@ -1253,8 +1118,29 @@ enum {
1253 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ 1118 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
1254}; 1119};
1255 1120
1121/* Advertisement register bits */
1256enum { 1122enum {
1257 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ 1123 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1124 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1125 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
1126
1127 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
1128 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
1129 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
1130 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
1131 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
1132 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
1133 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
1134 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
1135 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1136 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
1137 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
1138 PHY_AN_100HALF | PHY_AN_100FULL,
1139};
1140
1141/* Xmac Specific */
1142enum {
1143 PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1258 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ 1144 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1259 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ 1145 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
1260 1146
@@ -1263,82 +1149,6 @@ enum {
1263 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ 1149 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
1264}; 1150};
1265 1151
1266enum {
1267 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1268
1269 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1270 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1271 PHY_B_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1272};
1273
1274enum {
1275 PHY_L_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1276 /* Bit 12: reserved */
1277 PHY_L_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1278 PHY_L_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1279
1280 PHY_L_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1281};
1282
1283/* PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement */
1284/* PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1285/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
1286enum {
1287 PHY_N_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1288
1289 PHY_N_AN_100F = 1<<11, /* Bit 11: 100Base-T2 FD Support */
1290 PHY_N_AN_100H = 1<<10, /* Bit 10: 100Base-T2 HD Support */
1291
1292 PHY_N_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1293};
1294
1295/* field type definition for PHY_x_AN_SEL */
1296enum {
1297 PHY_SEL_TYPE = 1, /* 00001 = Ethernet */
1298};
1299
1300enum {
1301 PHY_ANE_LP_NP = 1<<3, /* Bit 3: Link Partner can Next Page */
1302 PHY_ANE_LOC_NP = 1<<2, /* Bit 2: Local PHY can Next Page */
1303 PHY_ANE_RX_PG = 1<<1, /* Bit 1: Page Received */
1304};
1305
1306enum {
1307 PHY_ANE_PAR_DF = 1<<4, /* Bit 4: Parallel Detection Fault */
1308
1309 PHY_ANE_LP_CAP = 1<<0, /* Bit 0: Link Partner Auto-Neg. Cap. */
1310};
1311
1312enum {
1313 PHY_NP_MORE = 1<<15, /* Bit 15: More, Next Pages to follow */
1314 PHY_NP_ACK1 = 1<<14, /* Bit 14: (ro) Ack1, for receiving a message */
1315 PHY_NP_MSG_VAL = 1<<13, /* Bit 13: Message Page valid */
1316 PHY_NP_ACK2 = 1<<12, /* Bit 12: Ack2, comply with msg content */
1317 PHY_NP_TOG = 1<<11, /* Bit 11: Toggle Bit, ensure sync */
1318 PHY_NP_MSG = 0x07ff, /* Bit 10..0: Message from/to Link Partner */
1319};
1320
1321enum {
1322 PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */
1323 PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */
1324};
1325
1326enum {
1327 PHY_X_RS_PAUSE = 3<<7,/* Bit 8..7: selected Pause Mode */
1328 PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */
1329 PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */
1330 PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */
1331 PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */
1332};
1333
1334/** Remote Fault Bits (PHY_X_AN_RFB) encoding */
1335enum {
1336 X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */
1337 X_RFB_LF = 1<<12, /* Bit 13..12 Link Failure */
1338 X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */
1339 X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */
1340};
1341
1342/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ 1152/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
1343enum { 1153enum {
1344 PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */ 1154 PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */
@@ -1418,6 +1228,16 @@ enum {
1418 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ 1228 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
1419}; 1229};
1420 1230
1231/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
1232/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1233enum {
1234 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1235
1236 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1237 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1238};
1239
1240
1421/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ 1241/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
1422enum { 1242enum {
1423 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ 1243 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
@@ -1478,7 +1298,9 @@ enum {
1478 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ 1298 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
1479 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ 1299 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
1480}; 1300};
1481#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) 1301#define PHY_B_DEF_MSK \
1302 (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
1303 PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
1482 1304
1483/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ 1305/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
1484enum { 1306enum {
@@ -1495,166 +1317,6 @@ enum {
1495 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ 1317 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
1496}; 1318};
1497 1319
1498/*
1499 * Level One-Specific
1500 */
1501/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1502enum {
1503 PHY_L_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1504 PHY_L_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1505 PHY_L_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1506 PHY_L_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1507 PHY_L_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1508 PHY_L_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1509};
1510
1511/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1512enum {
1513 PHY_L_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1514 PHY_L_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1515 PHY_L_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1516 PHY_L_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1517 PHY_L_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1518 PHY_L_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1519
1520 PHY_L_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1521
1522/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/
1523 PHY_L_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
1524 PHY_L_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
1525 PHY_L_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
1526 PHY_L_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
1527};
1528
1529/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/
1530enum {
1531 PHY_L_PC_REP_MODE = 1<<15, /* Bit 15: Repeater Mode */
1532
1533 PHY_L_PC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
1534 PHY_L_PC_BY_SCR = 1<<12, /* Bit 12: Bypass Scrambler */
1535 PHY_L_PC_BY_45 = 1<<11, /* Bit 11: Bypass 4B5B-Decoder */
1536 PHY_L_PC_JAB_DIS = 1<<10, /* Bit 10: Jabber Disabled */
1537 PHY_L_PC_SQE = 1<<9, /* Bit 9: Enable Heartbeat */
1538 PHY_L_PC_TP_LOOP = 1<<8, /* Bit 8: TP Loopback */
1539 PHY_L_PC_SSS = 1<<7, /* Bit 7: Smart Speed Selection */
1540 PHY_L_PC_FIFO_SIZE = 1<<6, /* Bit 6: FIFO Size */
1541 PHY_L_PC_PRE_EN = 1<<5, /* Bit 5: Preamble Enable */
1542 PHY_L_PC_CIM = 1<<4, /* Bit 4: Carrier Integrity Mon */
1543 PHY_L_PC_10_SER = 1<<3, /* Bit 3: Use Serial Output */
1544 PHY_L_PC_ANISOL = 1<<2, /* Bit 2: Unisolate Port */
1545 PHY_L_PC_TEN_BIT = 1<<1, /* Bit 1: 10bit iface mode on */
1546 PHY_L_PC_ALTCLOCK = 1<<0, /* Bit 0: (ro) ALTCLOCK Mode on */
1547};
1548
1549/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/
1550enum {
1551 PHY_L_QS_D_RATE = 3<<14,/* Bit 15..14: Data Rate */
1552 PHY_L_QS_TX_STAT = 1<<13, /* Bit 13: Transmitting */
1553 PHY_L_QS_RX_STAT = 1<<12, /* Bit 12: Receiving */
1554 PHY_L_QS_COL_STAT = 1<<11, /* Bit 11: Collision */
1555 PHY_L_QS_L_STAT = 1<<10, /* Bit 10: Link is up */
1556 PHY_L_QS_DUP_MOD = 1<<9, /* Bit 9: Full/Half Duplex */
1557 PHY_L_QS_AN = 1<<8, /* Bit 8: AutoNeg is On */
1558 PHY_L_QS_AN_C = 1<<7, /* Bit 7: AN is Complete */
1559 PHY_L_QS_LLE = 7<<4,/* Bit 6..4: Line Length Estim. */
1560 PHY_L_QS_PAUSE = 1<<3, /* Bit 3: LP advertised Pause */
1561 PHY_L_QS_AS_PAUSE = 1<<2, /* Bit 2: LP adv. asym. Pause */
1562 PHY_L_QS_ISOLATE = 1<<1, /* Bit 1: CIM Isolated */
1563 PHY_L_QS_EVENT = 1<<0, /* Bit 0: Event has occurred */
1564};
1565
1566/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/
1567/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1568enum {
1569 PHY_L_IS_AN_F = 1<<13, /* Bit 13: Auto-Negotiation fault */
1570 PHY_L_IS_CROSS = 1<<11, /* Bit 11: Crossover used */
1571 PHY_L_IS_POL = 1<<10, /* Bit 10: Polarity correct. used */
1572 PHY_L_IS_SS = 1<<9, /* Bit 9: Smart Speed Downgrade */
1573 PHY_L_IS_CFULL = 1<<8, /* Bit 8: Counter Full */
1574 PHY_L_IS_AN_C = 1<<7, /* Bit 7: AutoNeg Complete */
1575 PHY_L_IS_SPEED = 1<<6, /* Bit 6: Speed Changed */
1576 PHY_L_IS_DUP = 1<<5, /* Bit 5: Duplex Changed */
1577 PHY_L_IS_LS = 1<<4, /* Bit 4: Link Status Changed */
1578 PHY_L_IS_ISOL = 1<<3, /* Bit 3: Isolate Occured */
1579 PHY_L_IS_MDINT = 1<<2, /* Bit 2: (ro) STAT: MII Int Pending */
1580 PHY_L_IS_INTEN = 1<<1, /* Bit 1: ENAB: Enable IRQs */
1581 PHY_L_IS_FORCE = 1<<0, /* Bit 0: ENAB: Force Interrupt */
1582};
1583
1584/* int. mask */
1585#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN)
1586
1587/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/
1588enum {
1589 PHY_L_LC_LEDC = 3<<14,/* Bit 15..14: Col/Blink/On/Off */
1590 PHY_L_LC_LEDR = 3<<12,/* Bit 13..12: Rx/Blink/On/Off */
1591 PHY_L_LC_LEDT = 3<<10,/* Bit 11..10: Tx/Blink/On/Off */
1592 PHY_L_LC_LEDG = 3<<8,/* Bit 9..8: Giga/Blink/On/Off */
1593 PHY_L_LC_LEDS = 3<<6,/* Bit 7..6: 10-100/Blink/On/Off */
1594 PHY_L_LC_LEDL = 3<<4,/* Bit 5..4: Link/Blink/On/Off */
1595 PHY_L_LC_LEDF = 3<<2,/* Bit 3..2: Duplex/Blink/On/Off */
1596 PHY_L_LC_PSTRECH= 1<<1, /* Bit 1: Strech LED Pulses */
1597 PHY_L_LC_FREQ = 1<<0, /* Bit 0: 30/100 ms */
1598};
1599
1600/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/
1601enum {
1602 PHY_L_PC_TX_TCLK = 1<<15, /* Bit 15: Enable TX_TCLK */
1603 PHY_L_PC_ALT_NP = 1<<13, /* Bit 14: Alternate Next Page */
1604 PHY_L_PC_GMII_ALT= 1<<12, /* Bit 13: Alternate GMII driver */
1605 PHY_L_PC_TEN_CRS = 1<<10, /* Bit 10: Extend CRS*/
1606};
1607
1608/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/
1609enum {
1610 PHY_L_CIM_ISOL = 0xff<<8,/* Bit 15..8: Isolate Count */
1611 PHY_L_CIM_FALSE_CAR = 0xff, /* Bit 7..0: False Carrier Count */
1612};
1613
1614/*
1615 * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding
1616 */
1617enum {
1618 PHY_L_P_NO_PAUSE= 0<<10,/* Bit 11..10: no Pause Mode */
1619 PHY_L_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
1620 PHY_L_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
1621 PHY_L_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
1622};
1623
1624/*
1625 * National-Specific
1626 */
1627/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1628enum {
1629 PHY_N_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */
1630 PHY_N_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1631 PHY_N_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1632 PHY_N_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1633 PHY_N_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1634 PHY_N_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1635 PHY_N_1000C_APC = 1<<7, /* Bit 7: Asymmetric Pause Cap. */};
1636
1637
1638/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1639enum {
1640 PHY_N_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1641 PHY_N_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1642 PHY_N_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1643 PHY_N_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status*/
1644 PHY_N_1000S_LP_FD= 1<<11, /* Bit 11: Link Partner can FD */
1645 PHY_N_1000S_LP_HD= 1<<10, /* Bit 10: Link Partner can HD */
1646 PHY_N_1000C_LP_APC= 1<<9, /* Bit 9: LP Asym. Pause Cap. */
1647 PHY_N_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1648};
1649
1650/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/
1651enum {
1652 PHY_N_ES_X_FD_CAP= 1<<15, /* Bit 15: 1000Base-X FD capable */
1653 PHY_N_ES_X_HD_CAP= 1<<14, /* Bit 14: 1000Base-X HD capable */
1654 PHY_N_ES_T_FD_CAP= 1<<13, /* Bit 13: 1000Base-T FD capable */
1655 PHY_N_ES_T_HD_CAP= 1<<12, /* Bit 12: 1000Base-T HD capable */
1656};
1657
1658/** Marvell-Specific */ 1320/** Marvell-Specific */
1659enum { 1321enum {
1660 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ 1322 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
@@ -1718,7 +1380,7 @@ enum {
1718 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ 1380 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1719}; 1381};
1720 1382
1721#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK) 1383#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1722 1384
1723enum { 1385enum {
1724 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ 1386 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
@@ -2105,7 +1767,7 @@ enum {
2105 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ 1767 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
2106 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ 1768 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
2107}; 1769};
2108 1770
2109/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ 1771/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
2110enum { 1772enum {
2111 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ 1773 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
@@ -2127,7 +1789,7 @@ enum {
2127 1789
2128#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) 1790#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
2129#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) 1791#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
2130 1792
2131/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ 1793/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
2132enum { 1794enum {
2133 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ 1795 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
@@ -2138,7 +1800,7 @@ enum {
2138 1800
2139#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) 1801#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
2140#define TX_COL_DEF 0x04 1802#define TX_COL_DEF 0x04
2141 1803
2142/* GM_RX_CTRL 16 bit r/w Receive Control Register */ 1804/* GM_RX_CTRL 16 bit r/w Receive Control Register */
2143enum { 1805enum {
2144 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ 1806 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
@@ -2146,7 +1808,7 @@ enum {
2146 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ 1808 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
2147 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ 1809 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
2148}; 1810};
2149 1811
2150/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ 1812/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
2151enum { 1813enum {
2152 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ 1814 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
@@ -2171,7 +1833,7 @@ enum {
2171 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ 1833 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
2172 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ 1834 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
2173}; 1835};
2174 1836
2175#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) 1837#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
2176#define DATA_BLIND_DEF 0x04 1838#define DATA_BLIND_DEF 0x04
2177 1839
@@ -2186,7 +1848,7 @@ enum {
2186 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ 1848 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
2187 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ 1849 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
2188}; 1850};
2189 1851
2190#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) 1852#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
2191#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) 1853#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
2192 1854
@@ -2195,7 +1857,7 @@ enum {
2195 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ 1857 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
2196 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ 1858 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
2197}; 1859};
2198 1860
2199/* Receive Frame Status Encoding */ 1861/* Receive Frame Status Encoding */
2200enum { 1862enum {
2201 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ 1863 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
@@ -2217,12 +1879,12 @@ enum {
2217/* 1879/*
2218 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) 1880 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
2219 */ 1881 */
2220 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | 1882 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
2221 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | 1883 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
2222 GMR_FS_JABBER, 1884 GMR_FS_JABBER,
2223/* Rx GMAC FIFO Flush Mask (default) */ 1885/* Rx GMAC FIFO Flush Mask (default) */
2224 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | 1886 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
2225 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE | 1887 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE |
2226 GMR_FS_JABBER, 1888 GMR_FS_JABBER,
2227}; 1889};
2228 1890
@@ -2540,10 +2202,6 @@ enum {
2540}; 2202};
2541 2203
2542 2204
2543/* XM_PHY_ADDR 16 bit r/w PHY Address Register */
2544#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */
2545
2546
2547/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ 2205/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
2548enum { 2206enum {
2549 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ 2207 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
@@ -2662,8 +2320,8 @@ enum {
2662}; 2320};
2663 2321
2664#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) 2322#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
2665#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ 2323#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
2666 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA) 2324 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
2667 2325
2668/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ 2326/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
2669enum { 2327enum {
@@ -2793,28 +2451,20 @@ struct skge_hw {
2793 u32 intr_mask; 2451 u32 intr_mask;
2794 struct net_device *dev[2]; 2452 struct net_device *dev[2];
2795 2453
2796 u8 mac_cfg;
2797 u8 chip_id; 2454 u8 chip_id;
2455 u8 chip_rev;
2798 u8 phy_type; 2456 u8 phy_type;
2799 u8 pmd_type; 2457 u8 pmd_type;
2800 u16 phy_addr; 2458 u16 phy_addr;
2459 u8 ports;
2801 2460
2802 u32 ram_size; 2461 u32 ram_size;
2803 u32 ram_offset; 2462 u32 ram_offset;
2804 2463
2805 struct tasklet_struct ext_tasklet; 2464 struct tasklet_struct ext_tasklet;
2806 spinlock_t phy_lock; 2465 spinlock_t phy_lock;
2807}; 2466};
2808 2467
2809static inline int isdualport(const struct skge_hw *hw)
2810{
2811 return !(hw->mac_cfg & CFG_SNG_MAC);
2812}
2813
2814static inline u8 chip_rev(const struct skge_hw *hw)
2815{
2816 return (hw->mac_cfg & CFG_CHIP_R_MSK) >> 4;
2817}
2818 2468
2819static inline int iscopper(const struct skge_hw *hw) 2469static inline int iscopper(const struct skge_hw *hw)
2820{ 2470{
@@ -2827,7 +2477,7 @@ enum {
2827 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */ 2477 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
2828 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ 2478 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
2829}; 2479};
2830 2480
2831struct skge_port { 2481struct skge_port {
2832 u32 msg_enable; 2482 u32 msg_enable;
2833 struct skge_hw *hw; 2483 struct skge_hw *hw;
@@ -2853,8 +2503,8 @@ struct skge_port {
2853 void *mem; /* PCI memory for rings */ 2503 void *mem; /* PCI memory for rings */
2854 dma_addr_t dma; 2504 dma_addr_t dma;
2855 unsigned long mem_size; 2505 unsigned long mem_size;
2506 unsigned int rx_buf_size;
2856 2507
2857 struct timer_list link_check;
2858 struct timer_list led_blink; 2508 struct timer_list led_blink;
2859}; 2509};
2860 2510
@@ -2863,7 +2513,6 @@ struct skge_port {
2863static inline u32 skge_read32(const struct skge_hw *hw, int reg) 2513static inline u32 skge_read32(const struct skge_hw *hw, int reg)
2864{ 2514{
2865 return readl(hw->regs + reg); 2515 return readl(hw->regs + reg);
2866
2867} 2516}
2868 2517
2869static inline u16 skge_read16(const struct skge_hw *hw, int reg) 2518static inline u16 skge_read16(const struct skge_hw *hw, int reg)
@@ -2892,114 +2541,87 @@ static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
2892} 2541}
2893 2542
2894/* MAC Related Registers inside the device. */ 2543/* MAC Related Registers inside the device. */
2895#define SKGEMAC_REG(port,reg) (((port)<<7)+(reg)) 2544#define SK_REG(port,reg) (((port)<<7)+(reg))
2896 2545#define SK_XMAC_REG(port, reg) \
2897/* PCI config space can be accessed via memory mapped space */
2898#define SKGEPCI_REG(reg) ((reg)+ 0x380)
2899
2900#define SKGEXM_REG(port, reg) \
2901 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) 2546 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
2902 2547
2903static inline u32 skge_xm_read32(const struct skge_hw *hw, int port, int reg) 2548static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
2904{
2905 return skge_read32(hw, SKGEXM_REG(port,reg));
2906}
2907
2908static inline u16 skge_xm_read16(const struct skge_hw *hw, int port, int reg)
2909{ 2549{
2910 return skge_read16(hw, SKGEXM_REG(port,reg)); 2550 u32 v;
2551 v = skge_read16(hw, SK_XMAC_REG(port, reg));
2552 v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
2553 return v;
2911} 2554}
2912 2555
2913static inline u8 skge_xm_read8(const struct skge_hw *hw, int port, int reg) 2556static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
2914{ 2557{
2915 return skge_read8(hw, SKGEXM_REG(port,reg)); 2558 return skge_read16(hw, SK_XMAC_REG(port,reg));
2916} 2559}
2917 2560
2918static inline void skge_xm_write32(const struct skge_hw *hw, int port, int r, u32 v) 2561static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
2919{ 2562{
2920 skge_write32(hw, SKGEXM_REG(port,r), v); 2563 skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
2564 skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
2921} 2565}
2922 2566
2923static inline void skge_xm_write16(const struct skge_hw *hw, int port, int r, u16 v) 2567static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
2924{ 2568{
2925 skge_write16(hw, SKGEXM_REG(port,r), v); 2569 skge_write16(hw, SK_XMAC_REG(port,r), v);
2926} 2570}
2927 2571
2928static inline void skge_xm_write8(const struct skge_hw *hw, int port, int r, u8 v) 2572static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
2929{
2930 skge_write8(hw, SKGEXM_REG(port,r), v);
2931}
2932
2933static inline void skge_xm_outhash(const struct skge_hw *hw, int port, int reg,
2934 const u8 *hash) 2573 const u8 *hash)
2935{ 2574{
2936 skge_xm_write16(hw, port, reg, 2575 xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8));
2937 (u16)hash[0] | ((u16)hash[1] << 8)); 2576 xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
2938 skge_xm_write16(hw, port, reg+2, 2577 xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
2939 (u16)hash[2] | ((u16)hash[3] << 8)); 2578 xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
2940 skge_xm_write16(hw, port, reg+4,
2941 (u16)hash[4] | ((u16)hash[5] << 8));
2942 skge_xm_write16(hw, port, reg+6,
2943 (u16)hash[6] | ((u16)hash[7] << 8));
2944} 2579}
2945 2580
2946static inline void skge_xm_outaddr(const struct skge_hw *hw, int port, int reg, 2581static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
2947 const u8 *addr) 2582 const u8 *addr)
2948{ 2583{
2949 skge_xm_write16(hw, port, reg, 2584 xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8));
2950 (u16)addr[0] | ((u16)addr[1] << 8)); 2585 xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
2951 skge_xm_write16(hw, port, reg, 2586 xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
2952 (u16)addr[2] | ((u16)addr[3] << 8));
2953 skge_xm_write16(hw, port, reg,
2954 (u16)addr[4] | ((u16)addr[5] << 8));
2955} 2587}
2956 2588
2589#define SK_GMAC_REG(port,reg) \
2590 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
2957 2591
2958#define SKGEGMA_REG(port,reg) \ 2592static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
2959 ((reg) + BASE_GMAC_1 + \
2960 (port) * (BASE_GMAC_2-BASE_GMAC_1))
2961
2962static inline u16 skge_gma_read16(const struct skge_hw *hw, int port, int reg)
2963{ 2593{
2964 return skge_read16(hw, SKGEGMA_REG(port,reg)); 2594 return skge_read16(hw, SK_GMAC_REG(port,reg));
2965} 2595}
2966 2596
2967static inline u32 skge_gma_read32(const struct skge_hw *hw, int port, int reg) 2597static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
2968{ 2598{
2969 return (u32) skge_read16(hw, SKGEGMA_REG(port,reg)) 2599 return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
2970 | ((u32)skge_read16(hw, SKGEGMA_REG(port,reg+4)) << 16); 2600 | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
2971} 2601}
2972 2602
2973static inline u8 skge_gma_read8(const struct skge_hw *hw, int port, int reg) 2603static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
2974{ 2604{
2975 return skge_read8(hw, SKGEGMA_REG(port,reg)); 2605 skge_write16(hw, SK_GMAC_REG(port,r), v);
2976} 2606}
2977 2607
2978static inline void skge_gma_write16(const struct skge_hw *hw, int port, int r, u16 v) 2608static inline void gma_write32(const struct skge_hw *hw, int port, int r, u32 v)
2979{ 2609{
2980 skge_write16(hw, SKGEGMA_REG(port,r), v); 2610 skge_write16(hw, SK_GMAC_REG(port, r), (u16) v);
2611 skge_write32(hw, SK_GMAC_REG(port, r+4), (u16)(v >> 16));
2981} 2612}
2982 2613
2983static inline void skge_gma_write32(const struct skge_hw *hw, int port, int r, u32 v) 2614static inline void gma_write8(const struct skge_hw *hw, int port, int r, u8 v)
2984{ 2615{
2985 skge_write16(hw, SKGEGMA_REG(port, r), (u16) v); 2616 skge_write8(hw, SK_GMAC_REG(port,r), v);
2986 skge_write32(hw, SKGEGMA_REG(port, r+4), (u16)(v >> 16));
2987} 2617}
2988 2618
2989static inline void skge_gma_write8(const struct skge_hw *hw, int port, int r, u8 v) 2619static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
2990{
2991 skge_write8(hw, SKGEGMA_REG(port,r), v);
2992}
2993
2994static inline void skge_gm_set_addr(struct skge_hw *hw, int port, int reg,
2995 const u8 *addr) 2620 const u8 *addr)
2996{ 2621{
2997 skge_gma_write16(hw, port, reg, 2622 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
2998 (u16) addr[0] | ((u16) addr[1] << 8)); 2623 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
2999 skge_gma_write16(hw, port, reg+4, 2624 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
3000 (u16) addr[2] | ((u16) addr[3] << 8));
3001 skge_gma_write16(hw, port, reg+8,
3002 (u16) addr[4] | ((u16) addr[5] << 8));
3003} 2625}
3004 2626
3005#endif 2627#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index a1478258d002..8a2df4dfbc59 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -65,7 +65,7 @@ static inline int is_zero_ether_addr(const u8 *addr)
65 */ 65 */
66static inline int is_multicast_ether_addr(const u8 *addr) 66static inline int is_multicast_ether_addr(const u8 *addr)
67{ 67{
68 return addr[0] & 0x01; 68 return ((addr[0] != 0xff) && (0x01 & addr[0]));
69} 69}
70 70
71/** 71/**
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index f1d9b9e9dec7..065b702df563 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -625,17 +625,6 @@ enum ieee80211_state {
625#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5] 625#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5]
626 626
627 627
628extern inline int is_multicast_ether_addr(const u8 *addr)
629{
630 return ((addr[0] != 0xff) && (0x01 & addr[0]));
631}
632
633extern inline int is_broadcast_ether_addr(const u8 *addr)
634{
635 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
636 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
637}
638
639#define CFG_IEEE80211_RESERVE_FCS (1<<0) 628#define CFG_IEEE80211_RESERVE_FCS (1<<0)
640#define CFG_IEEE80211_COMPUTE_FCS (1<<1) 629#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
641 630