diff options
-rw-r--r-- | drivers/net/3c59x.c | 8 | ||||
-rw-r--r-- | drivers/net/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/bonding/bond_main.c | 11 | ||||
-rw-r--r-- | drivers/net/bonding/bonding.h | 4 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 35 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 2 | ||||
-rw-r--r-- | drivers/net/forcedeth.c | 168 | ||||
-rw-r--r-- | drivers/net/gianfar.c | 7 | ||||
-rw-r--r-- | drivers/net/ibm_newemac/mal.c | 25 | ||||
-rw-r--r-- | drivers/net/skge.c | 485 | ||||
-rw-r--r-- | drivers/net/skge.h | 17 | ||||
-rw-r--r-- | drivers/net/tokenring/3c359.c | 2 |
12 files changed, 558 insertions, 216 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 8d3893da06f5..862f47223fdc 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -3118,7 +3118,13 @@ static void acpi_set_WOL(struct net_device *dev) | |||
3118 | iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); | 3118 | iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); |
3119 | iowrite16(RxEnable, ioaddr + EL3_CMD); | 3119 | iowrite16(RxEnable, ioaddr + EL3_CMD); |
3120 | 3120 | ||
3121 | pci_enable_wake(VORTEX_PCI(vp), 0, 1); | 3121 | if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { |
3122 | printk(KERN_INFO "%s: WOL not supported.\n", | ||
3123 | pci_name(VORTEX_PCI(vp))); | ||
3124 | |||
3125 | vp->enable_wol = 0; | ||
3126 | return; | ||
3127 | } | ||
3122 | 3128 | ||
3123 | /* Change the power state to D3; RxEnable doesn't take effect. */ | 3129 | /* Change the power state to D3; RxEnable doesn't take effect. */ |
3124 | pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); | 3130 | pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8f99a0626616..83d52c8acab0 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2173,6 +2173,16 @@ config SKGE | |||
2173 | To compile this driver as a module, choose M here: the module | 2173 | To compile this driver as a module, choose M here: the module |
2174 | will be called skge. This is recommended. | 2174 | will be called skge. This is recommended. |
2175 | 2175 | ||
2176 | config SKGE_DEBUG | ||
2177 | bool "Debugging interface" | ||
2178 | depends on SKGE && DEBUG_FS | ||
2179 | help | ||
2180 | This option adds the ability to dump driver state for debugging. | ||
2181 | The file debugfs/skge/ethX displays the state of the internal | ||
2182 | transmit and receive rings. | ||
2183 | |||
2184 | If unsure, say N. | ||
2185 | |||
2176 | config SKY2 | 2186 | config SKY2 |
2177 | tristate "SysKonnect Yukon2 support" | 2187 | tristate "SysKonnect Yukon2 support" |
2178 | depends on PCI | 2188 | depends on PCI |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index db80f243dd37..6f85cc31f8a2 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1263,6 +1263,7 @@ static void bond_setup_by_slave(struct net_device *bond_dev, | |||
1263 | struct bonding *bond = bond_dev->priv; | 1263 | struct bonding *bond = bond_dev->priv; |
1264 | 1264 | ||
1265 | bond_dev->neigh_setup = slave_dev->neigh_setup; | 1265 | bond_dev->neigh_setup = slave_dev->neigh_setup; |
1266 | bond_dev->header_ops = slave_dev->header_ops; | ||
1266 | 1267 | ||
1267 | bond_dev->type = slave_dev->type; | 1268 | bond_dev->type = slave_dev->type; |
1268 | bond_dev->hard_header_len = slave_dev->hard_header_len; | 1269 | bond_dev->hard_header_len = slave_dev->hard_header_len; |
@@ -3351,7 +3352,10 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave | |||
3351 | switch (event) { | 3352 | switch (event) { |
3352 | case NETDEV_UNREGISTER: | 3353 | case NETDEV_UNREGISTER: |
3353 | if (bond_dev) { | 3354 | if (bond_dev) { |
3354 | bond_release(bond_dev, slave_dev); | 3355 | if (bond->setup_by_slave) |
3356 | bond_release_and_destroy(bond_dev, slave_dev); | ||
3357 | else | ||
3358 | bond_release(bond_dev, slave_dev); | ||
3355 | } | 3359 | } |
3356 | break; | 3360 | break; |
3357 | case NETDEV_CHANGE: | 3361 | case NETDEV_CHANGE: |
@@ -3366,11 +3370,6 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave | |||
3366 | * ... Or is it this? | 3370 | * ... Or is it this? |
3367 | */ | 3371 | */ |
3368 | break; | 3372 | break; |
3369 | case NETDEV_GOING_DOWN: | ||
3370 | dprintk("slave %s is going down\n", slave_dev->name); | ||
3371 | if (bond->setup_by_slave) | ||
3372 | bond_release_and_destroy(bond_dev, slave_dev); | ||
3373 | break; | ||
3374 | case NETDEV_CHANGEMTU: | 3373 | case NETDEV_CHANGEMTU: |
3375 | /* | 3374 | /* |
3376 | * TODO: Should slaves be allowed to | 3375 | * TODO: Should slaves be allowed to |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index a8bbd563265c..b8180600a309 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #include "bond_3ad.h" | 22 | #include "bond_3ad.h" |
23 | #include "bond_alb.h" | 23 | #include "bond_alb.h" |
24 | 24 | ||
25 | #define DRV_VERSION "3.2.0" | 25 | #define DRV_VERSION "3.2.1" |
26 | #define DRV_RELDATE "September 13, 2007" | 26 | #define DRV_RELDATE "October 15, 2007" |
27 | #define DRV_NAME "bonding" | 27 | #define DRV_NAME "bonding" |
28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | 28 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" |
29 | 29 | ||
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index b7a7e2ae5e13..0666e62e9ad2 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -110,6 +110,7 @@ static int e1000_get_settings(struct net_device *netdev, | |||
110 | { | 110 | { |
111 | struct e1000_adapter *adapter = netdev_priv(netdev); | 111 | struct e1000_adapter *adapter = netdev_priv(netdev); |
112 | struct e1000_hw *hw = &adapter->hw; | 112 | struct e1000_hw *hw = &adapter->hw; |
113 | u32 status; | ||
113 | 114 | ||
114 | if (hw->media_type == e1000_media_type_copper) { | 115 | if (hw->media_type == e1000_media_type_copper) { |
115 | 116 | ||
@@ -147,16 +148,16 @@ static int e1000_get_settings(struct net_device *netdev, | |||
147 | ecmd->transceiver = XCVR_EXTERNAL; | 148 | ecmd->transceiver = XCVR_EXTERNAL; |
148 | } | 149 | } |
149 | 150 | ||
150 | if (er32(STATUS) & E1000_STATUS_LU) { | 151 | status = er32(STATUS); |
151 | 152 | if (status & E1000_STATUS_LU) { | |
152 | adapter->hw.mac.ops.get_link_up_info(hw, &adapter->link_speed, | 153 | if (status & E1000_STATUS_SPEED_1000) |
153 | &adapter->link_duplex); | 154 | ecmd->speed = 1000; |
154 | ecmd->speed = adapter->link_speed; | 155 | else if (status & E1000_STATUS_SPEED_100) |
155 | 156 | ecmd->speed = 100; | |
156 | /* unfortunately FULL_DUPLEX != DUPLEX_FULL | 157 | else |
157 | * and HALF_DUPLEX != DUPLEX_HALF */ | 158 | ecmd->speed = 10; |
158 | 159 | ||
159 | if (adapter->link_duplex == FULL_DUPLEX) | 160 | if (status & E1000_STATUS_FD) |
160 | ecmd->duplex = DUPLEX_FULL; | 161 | ecmd->duplex = DUPLEX_FULL; |
161 | else | 162 | else |
162 | ecmd->duplex = DUPLEX_HALF; | 163 | ecmd->duplex = DUPLEX_HALF; |
@@ -170,6 +171,16 @@ static int e1000_get_settings(struct net_device *netdev, | |||
170 | return 0; | 171 | return 0; |
171 | } | 172 | } |
172 | 173 | ||
174 | static u32 e1000_get_link(struct net_device *netdev) | ||
175 | { | ||
176 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
177 | struct e1000_hw *hw = &adapter->hw; | ||
178 | u32 status; | ||
179 | |||
180 | status = er32(STATUS); | ||
181 | return (status & E1000_STATUS_LU); | ||
182 | } | ||
183 | |||
173 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | 184 | static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) |
174 | { | 185 | { |
175 | struct e1000_mac_info *mac = &adapter->hw.mac; | 186 | struct e1000_mac_info *mac = &adapter->hw.mac; |
@@ -1451,11 +1462,11 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | |||
1451 | } | 1462 | } |
1452 | 1463 | ||
1453 | *data = e1000_setup_desc_rings(adapter); | 1464 | *data = e1000_setup_desc_rings(adapter); |
1454 | if (data) | 1465 | if (*data) |
1455 | goto out; | 1466 | goto out; |
1456 | 1467 | ||
1457 | *data = e1000_setup_loopback_test(adapter); | 1468 | *data = e1000_setup_loopback_test(adapter); |
1458 | if (data) | 1469 | if (*data) |
1459 | goto err_loopback; | 1470 | goto err_loopback; |
1460 | 1471 | ||
1461 | *data = e1000_run_loopback_test(adapter); | 1472 | *data = e1000_run_loopback_test(adapter); |
@@ -1751,7 +1762,7 @@ static const struct ethtool_ops e1000_ethtool_ops = { | |||
1751 | .get_msglevel = e1000_get_msglevel, | 1762 | .get_msglevel = e1000_get_msglevel, |
1752 | .set_msglevel = e1000_set_msglevel, | 1763 | .set_msglevel = e1000_set_msglevel, |
1753 | .nway_reset = e1000_nway_reset, | 1764 | .nway_reset = e1000_nway_reset, |
1754 | .get_link = ethtool_op_get_link, | 1765 | .get_link = e1000_get_link, |
1755 | .get_eeprom_len = e1000_get_eeprom_len, | 1766 | .get_eeprom_len = e1000_get_eeprom_len, |
1756 | .get_eeprom = e1000_get_eeprom, | 1767 | .get_eeprom = e1000_get_eeprom, |
1757 | .set_eeprom = e1000_set_eeprom, | 1768 | .set_eeprom = e1000_set_eeprom, |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index aa82f1afb7fb..64515789fd4d 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -852,7 +852,7 @@ struct e1000_hw { | |||
852 | 852 | ||
853 | #ifdef DEBUG | 853 | #ifdef DEBUG |
854 | #define hw_dbg(hw, format, arg...) \ | 854 | #define hw_dbg(hw, format, arg...) \ |
855 | printk(KERN_DEBUG, "%s: " format, e1000e_get_hw_dev_name(hw), ##arg); | 855 | printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg) |
856 | #else | 856 | #else |
857 | static inline int __attribute__ ((format (printf, 2, 3))) | 857 | static inline int __attribute__ ((format (printf, 2, 3))) |
858 | hw_dbg(struct e1000_hw *hw, const char *format, ...) | 858 | hw_dbg(struct e1000_hw *hw, const char *format, ...) |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index dae30b731342..cfbb7aacfe94 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -128,7 +128,7 @@ | |||
128 | #else | 128 | #else |
129 | #define DRIVERNAPI | 129 | #define DRIVERNAPI |
130 | #endif | 130 | #endif |
131 | #define FORCEDETH_VERSION "0.60" | 131 | #define FORCEDETH_VERSION "0.61" |
132 | #define DRV_NAME "forcedeth" | 132 | #define DRV_NAME "forcedeth" |
133 | 133 | ||
134 | #include <linux/module.h> | 134 | #include <linux/module.h> |
@@ -752,7 +752,6 @@ struct fe_priv { | |||
752 | 752 | ||
753 | /* General data: | 753 | /* General data: |
754 | * Locking: spin_lock(&np->lock); */ | 754 | * Locking: spin_lock(&np->lock); */ |
755 | struct net_device_stats stats; | ||
756 | struct nv_ethtool_stats estats; | 755 | struct nv_ethtool_stats estats; |
757 | int in_shutdown; | 756 | int in_shutdown; |
758 | u32 linkspeed; | 757 | u32 linkspeed; |
@@ -1505,15 +1504,16 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) | |||
1505 | nv_get_hw_stats(dev); | 1504 | nv_get_hw_stats(dev); |
1506 | 1505 | ||
1507 | /* copy to net_device stats */ | 1506 | /* copy to net_device stats */ |
1508 | np->stats.tx_bytes = np->estats.tx_bytes; | 1507 | dev->stats.tx_bytes = np->estats.tx_bytes; |
1509 | np->stats.tx_fifo_errors = np->estats.tx_fifo_errors; | 1508 | dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; |
1510 | np->stats.tx_carrier_errors = np->estats.tx_carrier_errors; | 1509 | dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; |
1511 | np->stats.rx_crc_errors = np->estats.rx_crc_errors; | 1510 | dev->stats.rx_crc_errors = np->estats.rx_crc_errors; |
1512 | np->stats.rx_over_errors = np->estats.rx_over_errors; | 1511 | dev->stats.rx_over_errors = np->estats.rx_over_errors; |
1513 | np->stats.rx_errors = np->estats.rx_errors_total; | 1512 | dev->stats.rx_errors = np->estats.rx_errors_total; |
1514 | np->stats.tx_errors = np->estats.tx_errors_total; | 1513 | dev->stats.tx_errors = np->estats.tx_errors_total; |
1515 | } | 1514 | } |
1516 | return &np->stats; | 1515 | |
1516 | return &dev->stats; | ||
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | /* | 1519 | /* |
@@ -1733,7 +1733,7 @@ static void nv_drain_tx(struct net_device *dev) | |||
1733 | np->tx_ring.ex[i].buflow = 0; | 1733 | np->tx_ring.ex[i].buflow = 0; |
1734 | } | 1734 | } |
1735 | if (nv_release_txskb(dev, &np->tx_skb[i])) | 1735 | if (nv_release_txskb(dev, &np->tx_skb[i])) |
1736 | np->stats.tx_dropped++; | 1736 | dev->stats.tx_dropped++; |
1737 | } | 1737 | } |
1738 | } | 1738 | } |
1739 | 1739 | ||
@@ -2049,13 +2049,13 @@ static void nv_tx_done(struct net_device *dev) | |||
2049 | if (flags & NV_TX_LASTPACKET) { | 2049 | if (flags & NV_TX_LASTPACKET) { |
2050 | if (flags & NV_TX_ERROR) { | 2050 | if (flags & NV_TX_ERROR) { |
2051 | if (flags & NV_TX_UNDERFLOW) | 2051 | if (flags & NV_TX_UNDERFLOW) |
2052 | np->stats.tx_fifo_errors++; | 2052 | dev->stats.tx_fifo_errors++; |
2053 | if (flags & NV_TX_CARRIERLOST) | 2053 | if (flags & NV_TX_CARRIERLOST) |
2054 | np->stats.tx_carrier_errors++; | 2054 | dev->stats.tx_carrier_errors++; |
2055 | np->stats.tx_errors++; | 2055 | dev->stats.tx_errors++; |
2056 | } else { | 2056 | } else { |
2057 | np->stats.tx_packets++; | 2057 | dev->stats.tx_packets++; |
2058 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; | 2058 | dev->stats.tx_bytes += np->get_tx_ctx->skb->len; |
2059 | } | 2059 | } |
2060 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2060 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2061 | np->get_tx_ctx->skb = NULL; | 2061 | np->get_tx_ctx->skb = NULL; |
@@ -2064,13 +2064,13 @@ static void nv_tx_done(struct net_device *dev) | |||
2064 | if (flags & NV_TX2_LASTPACKET) { | 2064 | if (flags & NV_TX2_LASTPACKET) { |
2065 | if (flags & NV_TX2_ERROR) { | 2065 | if (flags & NV_TX2_ERROR) { |
2066 | if (flags & NV_TX2_UNDERFLOW) | 2066 | if (flags & NV_TX2_UNDERFLOW) |
2067 | np->stats.tx_fifo_errors++; | 2067 | dev->stats.tx_fifo_errors++; |
2068 | if (flags & NV_TX2_CARRIERLOST) | 2068 | if (flags & NV_TX2_CARRIERLOST) |
2069 | np->stats.tx_carrier_errors++; | 2069 | dev->stats.tx_carrier_errors++; |
2070 | np->stats.tx_errors++; | 2070 | dev->stats.tx_errors++; |
2071 | } else { | 2071 | } else { |
2072 | np->stats.tx_packets++; | 2072 | dev->stats.tx_packets++; |
2073 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; | 2073 | dev->stats.tx_bytes += np->get_tx_ctx->skb->len; |
2074 | } | 2074 | } |
2075 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2075 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2076 | np->get_tx_ctx->skb = NULL; | 2076 | np->get_tx_ctx->skb = NULL; |
@@ -2107,7 +2107,7 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit) | |||
2107 | 2107 | ||
2108 | if (flags & NV_TX2_LASTPACKET) { | 2108 | if (flags & NV_TX2_LASTPACKET) { |
2109 | if (!(flags & NV_TX2_ERROR)) | 2109 | if (!(flags & NV_TX2_ERROR)) |
2110 | np->stats.tx_packets++; | 2110 | dev->stats.tx_packets++; |
2111 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2111 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2112 | np->get_tx_ctx->skb = NULL; | 2112 | np->get_tx_ctx->skb = NULL; |
2113 | } | 2113 | } |
@@ -2268,13 +2268,13 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2268 | { | 2268 | { |
2269 | struct fe_priv *np = netdev_priv(dev); | 2269 | struct fe_priv *np = netdev_priv(dev); |
2270 | u32 flags; | 2270 | u32 flags; |
2271 | u32 rx_processed_cnt = 0; | 2271 | int rx_work = 0; |
2272 | struct sk_buff *skb; | 2272 | struct sk_buff *skb; |
2273 | int len; | 2273 | int len; |
2274 | 2274 | ||
2275 | while((np->get_rx.orig != np->put_rx.orig) && | 2275 | while((np->get_rx.orig != np->put_rx.orig) && |
2276 | !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && | 2276 | !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && |
2277 | (rx_processed_cnt++ < limit)) { | 2277 | (rx_work < limit)) { |
2278 | 2278 | ||
2279 | dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", | 2279 | dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", |
2280 | dev->name, flags); | 2280 | dev->name, flags); |
@@ -2308,7 +2308,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2308 | if (flags & NV_RX_ERROR4) { | 2308 | if (flags & NV_RX_ERROR4) { |
2309 | len = nv_getlen(dev, skb->data, len); | 2309 | len = nv_getlen(dev, skb->data, len); |
2310 | if (len < 0) { | 2310 | if (len < 0) { |
2311 | np->stats.rx_errors++; | 2311 | dev->stats.rx_errors++; |
2312 | dev_kfree_skb(skb); | 2312 | dev_kfree_skb(skb); |
2313 | goto next_pkt; | 2313 | goto next_pkt; |
2314 | } | 2314 | } |
@@ -2322,12 +2322,12 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2322 | /* the rest are hard errors */ | 2322 | /* the rest are hard errors */ |
2323 | else { | 2323 | else { |
2324 | if (flags & NV_RX_MISSEDFRAME) | 2324 | if (flags & NV_RX_MISSEDFRAME) |
2325 | np->stats.rx_missed_errors++; | 2325 | dev->stats.rx_missed_errors++; |
2326 | if (flags & NV_RX_CRCERR) | 2326 | if (flags & NV_RX_CRCERR) |
2327 | np->stats.rx_crc_errors++; | 2327 | dev->stats.rx_crc_errors++; |
2328 | if (flags & NV_RX_OVERFLOW) | 2328 | if (flags & NV_RX_OVERFLOW) |
2329 | np->stats.rx_over_errors++; | 2329 | dev->stats.rx_over_errors++; |
2330 | np->stats.rx_errors++; | 2330 | dev->stats.rx_errors++; |
2331 | dev_kfree_skb(skb); | 2331 | dev_kfree_skb(skb); |
2332 | goto next_pkt; | 2332 | goto next_pkt; |
2333 | } | 2333 | } |
@@ -2343,7 +2343,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2343 | if (flags & NV_RX2_ERROR4) { | 2343 | if (flags & NV_RX2_ERROR4) { |
2344 | len = nv_getlen(dev, skb->data, len); | 2344 | len = nv_getlen(dev, skb->data, len); |
2345 | if (len < 0) { | 2345 | if (len < 0) { |
2346 | np->stats.rx_errors++; | 2346 | dev->stats.rx_errors++; |
2347 | dev_kfree_skb(skb); | 2347 | dev_kfree_skb(skb); |
2348 | goto next_pkt; | 2348 | goto next_pkt; |
2349 | } | 2349 | } |
@@ -2357,10 +2357,10 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2357 | /* the rest are hard errors */ | 2357 | /* the rest are hard errors */ |
2358 | else { | 2358 | else { |
2359 | if (flags & NV_RX2_CRCERR) | 2359 | if (flags & NV_RX2_CRCERR) |
2360 | np->stats.rx_crc_errors++; | 2360 | dev->stats.rx_crc_errors++; |
2361 | if (flags & NV_RX2_OVERFLOW) | 2361 | if (flags & NV_RX2_OVERFLOW) |
2362 | np->stats.rx_over_errors++; | 2362 | dev->stats.rx_over_errors++; |
2363 | np->stats.rx_errors++; | 2363 | dev->stats.rx_errors++; |
2364 | dev_kfree_skb(skb); | 2364 | dev_kfree_skb(skb); |
2365 | goto next_pkt; | 2365 | goto next_pkt; |
2366 | } | 2366 | } |
@@ -2389,16 +2389,18 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2389 | netif_rx(skb); | 2389 | netif_rx(skb); |
2390 | #endif | 2390 | #endif |
2391 | dev->last_rx = jiffies; | 2391 | dev->last_rx = jiffies; |
2392 | np->stats.rx_packets++; | 2392 | dev->stats.rx_packets++; |
2393 | np->stats.rx_bytes += len; | 2393 | dev->stats.rx_bytes += len; |
2394 | next_pkt: | 2394 | next_pkt: |
2395 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) | 2395 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
2396 | np->get_rx.orig = np->first_rx.orig; | 2396 | np->get_rx.orig = np->first_rx.orig; |
2397 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) | 2397 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) |
2398 | np->get_rx_ctx = np->first_rx_ctx; | 2398 | np->get_rx_ctx = np->first_rx_ctx; |
2399 | |||
2400 | rx_work++; | ||
2399 | } | 2401 | } |
2400 | 2402 | ||
2401 | return rx_processed_cnt; | 2403 | return rx_work; |
2402 | } | 2404 | } |
2403 | 2405 | ||
2404 | static int nv_rx_process_optimized(struct net_device *dev, int limit) | 2406 | static int nv_rx_process_optimized(struct net_device *dev, int limit) |
@@ -2505,8 +2507,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2505 | } | 2507 | } |
2506 | 2508 | ||
2507 | dev->last_rx = jiffies; | 2509 | dev->last_rx = jiffies; |
2508 | np->stats.rx_packets++; | 2510 | dev->stats.rx_packets++; |
2509 | np->stats.rx_bytes += len; | 2511 | dev->stats.rx_bytes += len; |
2510 | } else { | 2512 | } else { |
2511 | dev_kfree_skb(skb); | 2513 | dev_kfree_skb(skb); |
2512 | } | 2514 | } |
@@ -3727,7 +3729,7 @@ static void nv_do_stats_poll(unsigned long data) | |||
3727 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 3729 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
3728 | { | 3730 | { |
3729 | struct fe_priv *np = netdev_priv(dev); | 3731 | struct fe_priv *np = netdev_priv(dev); |
3730 | strcpy(info->driver, "forcedeth"); | 3732 | strcpy(info->driver, DRV_NAME); |
3731 | strcpy(info->version, FORCEDETH_VERSION); | 3733 | strcpy(info->version, FORCEDETH_VERSION); |
3732 | strcpy(info->bus_info, pci_name(np->pci_dev)); | 3734 | strcpy(info->bus_info, pci_name(np->pci_dev)); |
3733 | } | 3735 | } |
@@ -4991,6 +4993,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4991 | u32 phystate_orig = 0, phystate; | 4993 | u32 phystate_orig = 0, phystate; |
4992 | int phyinitialized = 0; | 4994 | int phyinitialized = 0; |
4993 | DECLARE_MAC_BUF(mac); | 4995 | DECLARE_MAC_BUF(mac); |
4996 | static int printed_version; | ||
4997 | |||
4998 | if (!printed_version++) | ||
4999 | printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" | ||
5000 | " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); | ||
4994 | 5001 | ||
4995 | dev = alloc_etherdev(sizeof(struct fe_priv)); | 5002 | dev = alloc_etherdev(sizeof(struct fe_priv)); |
4996 | err = -ENOMEM; | 5003 | err = -ENOMEM; |
@@ -5014,11 +5021,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5014 | np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ | 5021 | np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ |
5015 | 5022 | ||
5016 | err = pci_enable_device(pci_dev); | 5023 | err = pci_enable_device(pci_dev); |
5017 | if (err) { | 5024 | if (err) |
5018 | printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", | ||
5019 | err, pci_name(pci_dev)); | ||
5020 | goto out_free; | 5025 | goto out_free; |
5021 | } | ||
5022 | 5026 | ||
5023 | pci_set_master(pci_dev); | 5027 | pci_set_master(pci_dev); |
5024 | 5028 | ||
@@ -5047,8 +5051,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5047 | } | 5051 | } |
5048 | } | 5052 | } |
5049 | if (i == DEVICE_COUNT_RESOURCE) { | 5053 | if (i == DEVICE_COUNT_RESOURCE) { |
5050 | printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", | 5054 | dev_printk(KERN_INFO, &pci_dev->dev, |
5051 | pci_name(pci_dev)); | 5055 | "Couldn't find register window\n"); |
5052 | goto out_relreg; | 5056 | goto out_relreg; |
5053 | } | 5057 | } |
5054 | 5058 | ||
@@ -5061,16 +5065,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5061 | np->desc_ver = DESC_VER_3; | 5065 | np->desc_ver = DESC_VER_3; |
5062 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | 5066 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
5063 | if (dma_64bit) { | 5067 | if (dma_64bit) { |
5064 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { | 5068 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) |
5065 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 5069 | dev_printk(KERN_INFO, &pci_dev->dev, |
5066 | pci_name(pci_dev)); | 5070 | "64-bit DMA failed, using 32-bit addressing\n"); |
5067 | } else { | 5071 | else |
5068 | dev->features |= NETIF_F_HIGHDMA; | 5072 | dev->features |= NETIF_F_HIGHDMA; |
5069 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | ||
5070 | } | ||
5071 | if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { | 5073 | if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { |
5072 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", | 5074 | dev_printk(KERN_INFO, &pci_dev->dev, |
5073 | pci_name(pci_dev)); | 5075 | "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); |
5074 | } | 5076 | } |
5075 | } | 5077 | } |
5076 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 5078 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
@@ -5205,9 +5207,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5205 | * Bad mac address. At least one bios sets the mac address | 5207 | * Bad mac address. At least one bios sets the mac address |
5206 | * to 01:23:45:67:89:ab | 5208 | * to 01:23:45:67:89:ab |
5207 | */ | 5209 | */ |
5208 | printk(KERN_ERR "%s: Invalid Mac address detected: %s\n", | 5210 | dev_printk(KERN_ERR, &pci_dev->dev, |
5209 | pci_name(pci_dev), print_mac(mac, dev->dev_addr)); | 5211 | "Invalid Mac address detected: %s\n", |
5210 | printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); | 5212 | print_mac(mac, dev->dev_addr)); |
5213 | dev_printk(KERN_ERR, &pci_dev->dev, | ||
5214 | "Please complain to your hardware vendor. Switching to a random MAC.\n"); | ||
5211 | dev->dev_addr[0] = 0x00; | 5215 | dev->dev_addr[0] = 0x00; |
5212 | dev->dev_addr[1] = 0x00; | 5216 | dev->dev_addr[1] = 0x00; |
5213 | dev->dev_addr[2] = 0x6c; | 5217 | dev->dev_addr[2] = 0x6c; |
@@ -5321,8 +5325,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5321 | break; | 5325 | break; |
5322 | } | 5326 | } |
5323 | if (i == 33) { | 5327 | if (i == 33) { |
5324 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", | 5328 | dev_printk(KERN_INFO, &pci_dev->dev, |
5325 | pci_name(pci_dev)); | 5329 | "open: Could not find a valid PHY.\n"); |
5326 | goto out_error; | 5330 | goto out_error; |
5327 | } | 5331 | } |
5328 | 5332 | ||
@@ -5344,12 +5348,37 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5344 | 5348 | ||
5345 | err = register_netdev(dev); | 5349 | err = register_netdev(dev); |
5346 | if (err) { | 5350 | if (err) { |
5347 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); | 5351 | dev_printk(KERN_INFO, &pci_dev->dev, |
5352 | "unable to register netdev: %d\n", err); | ||
5348 | goto out_error; | 5353 | goto out_error; |
5349 | } | 5354 | } |
5350 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", | 5355 | |
5351 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, | 5356 | dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " |
5352 | pci_name(pci_dev)); | 5357 | "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", |
5358 | dev->name, | ||
5359 | np->phy_oui, | ||
5360 | np->phyaddr, | ||
5361 | dev->dev_addr[0], | ||
5362 | dev->dev_addr[1], | ||
5363 | dev->dev_addr[2], | ||
5364 | dev->dev_addr[3], | ||
5365 | dev->dev_addr[4], | ||
5366 | dev->dev_addr[5]); | ||
5367 | |||
5368 | dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", | ||
5369 | dev->features & NETIF_F_HIGHDMA ? "highdma " : "", | ||
5370 | dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? | ||
5371 | "csum " : "", | ||
5372 | dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? | ||
5373 | "vlan " : "", | ||
5374 | id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", | ||
5375 | id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", | ||
5376 | id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", | ||
5377 | np->gigabit == PHY_GIGABIT ? "gbit " : "", | ||
5378 | np->need_linktimer ? "lnktim " : "", | ||
5379 | np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", | ||
5380 | np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", | ||
5381 | np->desc_ver); | ||
5353 | 5382 | ||
5354 | return 0; | 5383 | return 0; |
5355 | 5384 | ||
@@ -5567,17 +5596,16 @@ static struct pci_device_id pci_tbl[] = { | |||
5567 | }; | 5596 | }; |
5568 | 5597 | ||
5569 | static struct pci_driver driver = { | 5598 | static struct pci_driver driver = { |
5570 | .name = "forcedeth", | 5599 | .name = DRV_NAME, |
5571 | .id_table = pci_tbl, | 5600 | .id_table = pci_tbl, |
5572 | .probe = nv_probe, | 5601 | .probe = nv_probe, |
5573 | .remove = __devexit_p(nv_remove), | 5602 | .remove = __devexit_p(nv_remove), |
5574 | .suspend = nv_suspend, | 5603 | .suspend = nv_suspend, |
5575 | .resume = nv_resume, | 5604 | .resume = nv_resume, |
5576 | }; | 5605 | }; |
5577 | 5606 | ||
5578 | static int __init init_nic(void) | 5607 | static int __init init_nic(void) |
5579 | { | 5608 | { |
5580 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); | ||
5581 | return pci_register_driver(&driver); | 5609 | return pci_register_driver(&driver); |
5582 | } | 5610 | } |
5583 | 5611 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 558440c15b6c..cc288d8f6a53 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1237,8 +1237,6 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) | |||
1237 | * starting over will fix the problem. */ | 1237 | * starting over will fix the problem. */ |
1238 | static void gfar_timeout(struct net_device *dev) | 1238 | static void gfar_timeout(struct net_device *dev) |
1239 | { | 1239 | { |
1240 | struct gfar_private *priv = netdev_priv(dev); | ||
1241 | |||
1242 | dev->stats.tx_errors++; | 1240 | dev->stats.tx_errors++; |
1243 | 1241 | ||
1244 | if (dev->flags & IFF_UP) { | 1242 | if (dev->flags & IFF_UP) { |
@@ -1344,8 +1342,9 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1344 | return skb; | 1342 | return skb; |
1345 | } | 1343 | } |
1346 | 1344 | ||
1347 | static inline void count_errors(unsigned short status, struct gfar_private *priv) | 1345 | static inline void count_errors(unsigned short status, struct net_device *dev) |
1348 | { | 1346 | { |
1347 | struct gfar_private *priv = netdev_priv(dev); | ||
1349 | struct net_device_stats *stats = &dev->stats; | 1348 | struct net_device_stats *stats = &dev->stats; |
1350 | struct gfar_extra_stats *estats = &priv->extra_stats; | 1349 | struct gfar_extra_stats *estats = &priv->extra_stats; |
1351 | 1350 | ||
@@ -1539,7 +1538,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1539 | 1538 | ||
1540 | dev->stats.rx_bytes += pkt_len; | 1539 | dev->stats.rx_bytes += pkt_len; |
1541 | } else { | 1540 | } else { |
1542 | count_errors(bdp->status, priv); | 1541 | count_errors(bdp->status, dev); |
1543 | 1542 | ||
1544 | if (skb) | 1543 | if (skb) |
1545 | dev_kfree_skb_any(skb); | 1544 | dev_kfree_skb_any(skb); |
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c index 39f4cb6b0cf3..a680eb05ba60 100644 --- a/drivers/net/ibm_newemac/mal.c +++ b/drivers/net/ibm_newemac/mal.c | |||
@@ -45,6 +45,8 @@ int __devinit mal_register_commac(struct mal_instance *mal, | |||
45 | return -EBUSY; | 45 | return -EBUSY; |
46 | } | 46 | } |
47 | 47 | ||
48 | if (list_empty(&mal->list)) | ||
49 | napi_enable(&mal->napi); | ||
48 | mal->tx_chan_mask |= commac->tx_chan_mask; | 50 | mal->tx_chan_mask |= commac->tx_chan_mask; |
49 | mal->rx_chan_mask |= commac->rx_chan_mask; | 51 | mal->rx_chan_mask |= commac->rx_chan_mask; |
50 | list_add(&commac->list, &mal->list); | 52 | list_add(&commac->list, &mal->list); |
@@ -67,6 +69,8 @@ void __devexit mal_unregister_commac(struct mal_instance *mal, | |||
67 | mal->tx_chan_mask &= ~commac->tx_chan_mask; | 69 | mal->tx_chan_mask &= ~commac->tx_chan_mask; |
68 | mal->rx_chan_mask &= ~commac->rx_chan_mask; | 70 | mal->rx_chan_mask &= ~commac->rx_chan_mask; |
69 | list_del_init(&commac->list); | 71 | list_del_init(&commac->list); |
72 | if (list_empty(&mal->list)) | ||
73 | napi_disable(&mal->napi); | ||
70 | 74 | ||
71 | spin_unlock_irqrestore(&mal->lock, flags); | 75 | spin_unlock_irqrestore(&mal->lock, flags); |
72 | } | 76 | } |
@@ -182,7 +186,7 @@ static inline void mal_enable_eob_irq(struct mal_instance *mal) | |||
182 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); | 186 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); |
183 | } | 187 | } |
184 | 188 | ||
185 | /* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ | 189 | /* synchronized by NAPI state */ |
186 | static inline void mal_disable_eob_irq(struct mal_instance *mal) | 190 | static inline void mal_disable_eob_irq(struct mal_instance *mal) |
187 | { | 191 | { |
188 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | 192 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow |
@@ -317,8 +321,8 @@ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) | |||
317 | while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) | 321 | while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) |
318 | msleep(1); | 322 | msleep(1); |
319 | 323 | ||
320 | /* Synchronize with the MAL NAPI poller. */ | 324 | /* Synchronize with the MAL NAPI poller */ |
321 | napi_disable(&mal->napi); | 325 | __napi_synchronize(&mal->napi); |
322 | } | 326 | } |
323 | 327 | ||
324 | void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) | 328 | void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) |
@@ -326,7 +330,12 @@ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) | |||
326 | smp_wmb(); | 330 | smp_wmb(); |
327 | clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | 331 | clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); |
328 | 332 | ||
329 | // XXX might want to kick a poll now... | 333 | /* Feels better to trigger a poll here to catch up with events that |
334 | * may have happened on this channel while disabled. It will most | ||
335 | * probably be delayed until the next interrupt but that's mostly a | ||
336 | * non-issue in the context where this is called. | ||
337 | */ | ||
338 | napi_schedule(&mal->napi); | ||
330 | } | 339 | } |
331 | 340 | ||
332 | static int mal_poll(struct napi_struct *napi, int budget) | 341 | static int mal_poll(struct napi_struct *napi, int budget) |
@@ -336,8 +345,7 @@ static int mal_poll(struct napi_struct *napi, int budget) | |||
336 | int received = 0; | 345 | int received = 0; |
337 | unsigned long flags; | 346 | unsigned long flags; |
338 | 347 | ||
339 | MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, | 348 | MAL_DBG2(mal, "poll(%d)" NL, budget); |
340 | rx_work_limit); | ||
341 | again: | 349 | again: |
342 | /* Process TX skbs */ | 350 | /* Process TX skbs */ |
343 | list_for_each(l, &mal->poll_list) { | 351 | list_for_each(l, &mal->poll_list) { |
@@ -528,11 +536,12 @@ static int __devinit mal_probe(struct of_device *ofdev, | |||
528 | } | 536 | } |
529 | 537 | ||
530 | INIT_LIST_HEAD(&mal->poll_list); | 538 | INIT_LIST_HEAD(&mal->poll_list); |
531 | mal->napi.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; | ||
532 | mal->napi.poll = mal_poll; | ||
533 | INIT_LIST_HEAD(&mal->list); | 539 | INIT_LIST_HEAD(&mal->list); |
534 | spin_lock_init(&mal->lock); | 540 | spin_lock_init(&mal->lock); |
535 | 541 | ||
542 | netif_napi_add(NULL, &mal->napi, mal_poll, | ||
543 | CONFIG_IBM_NEW_EMAC_POLL_WEIGHT); | ||
544 | |||
536 | /* Load power-on reset defaults */ | 545 | /* Load power-on reset defaults */ |
537 | mal_reset(mal); | 546 | mal_reset(mal); |
538 | 547 | ||
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 2aae9fe38c5a..b9961dc47606 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -36,13 +36,15 @@ | |||
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/crc32.h> | 37 | #include <linux/crc32.h> |
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/debugfs.h> | ||
40 | #include <linux/seq_file.h> | ||
39 | #include <linux/mii.h> | 41 | #include <linux/mii.h> |
40 | #include <asm/irq.h> | 42 | #include <asm/irq.h> |
41 | 43 | ||
42 | #include "skge.h" | 44 | #include "skge.h" |
43 | 45 | ||
44 | #define DRV_NAME "skge" | 46 | #define DRV_NAME "skge" |
45 | #define DRV_VERSION "1.11" | 47 | #define DRV_VERSION "1.12" |
46 | #define PFX DRV_NAME " " | 48 | #define PFX DRV_NAME " " |
47 | 49 | ||
48 | #define DEFAULT_TX_RING_SIZE 128 | 50 | #define DEFAULT_TX_RING_SIZE 128 |
@@ -57,7 +59,10 @@ | |||
57 | #define TX_WATCHDOG (5 * HZ) | 59 | #define TX_WATCHDOG (5 * HZ) |
58 | #define NAPI_WEIGHT 64 | 60 | #define NAPI_WEIGHT 64 |
59 | #define BLINK_MS 250 | 61 | #define BLINK_MS 250 |
60 | #define LINK_HZ (HZ/2) | 62 | #define LINK_HZ HZ |
63 | |||
64 | #define SKGE_EEPROM_MAGIC 0x9933aabb | ||
65 | |||
61 | 66 | ||
62 | MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); | 67 | MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); |
63 | MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); | 68 | MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); |
@@ -445,15 +450,15 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev) | |||
445 | else | 450 | else |
446 | yukon_get_stats(skge, data); | 451 | yukon_get_stats(skge, data); |
447 | 452 | ||
448 | skge->net_stats.tx_bytes = data[0]; | 453 | dev->stats.tx_bytes = data[0]; |
449 | skge->net_stats.rx_bytes = data[1]; | 454 | dev->stats.rx_bytes = data[1]; |
450 | skge->net_stats.tx_packets = data[2] + data[4] + data[6]; | 455 | dev->stats.tx_packets = data[2] + data[4] + data[6]; |
451 | skge->net_stats.rx_packets = data[3] + data[5] + data[7]; | 456 | dev->stats.rx_packets = data[3] + data[5] + data[7]; |
452 | skge->net_stats.multicast = data[3] + data[5]; | 457 | dev->stats.multicast = data[3] + data[5]; |
453 | skge->net_stats.collisions = data[10]; | 458 | dev->stats.collisions = data[10]; |
454 | skge->net_stats.tx_aborted_errors = data[12]; | 459 | dev->stats.tx_aborted_errors = data[12]; |
455 | 460 | ||
456 | return &skge->net_stats; | 461 | return &dev->stats; |
457 | } | 462 | } |
458 | 463 | ||
459 | static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) | 464 | static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
@@ -798,6 +803,98 @@ static int skge_phys_id(struct net_device *dev, u32 data) | |||
798 | return 0; | 803 | return 0; |
799 | } | 804 | } |
800 | 805 | ||
806 | static int skge_get_eeprom_len(struct net_device *dev) | ||
807 | { | ||
808 | struct skge_port *skge = netdev_priv(dev); | ||
809 | u32 reg2; | ||
810 | |||
811 | pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); | ||
812 | return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); | ||
813 | } | ||
814 | |||
815 | static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) | ||
816 | { | ||
817 | u32 val; | ||
818 | |||
819 | pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); | ||
820 | |||
821 | do { | ||
822 | pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); | ||
823 | } while (!(offset & PCI_VPD_ADDR_F)); | ||
824 | |||
825 | pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); | ||
826 | return val; | ||
827 | } | ||
828 | |||
829 | static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) | ||
830 | { | ||
831 | pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); | ||
832 | pci_write_config_word(pdev, cap + PCI_VPD_ADDR, | ||
833 | offset | PCI_VPD_ADDR_F); | ||
834 | |||
835 | do { | ||
836 | pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); | ||
837 | } while (offset & PCI_VPD_ADDR_F); | ||
838 | } | ||
839 | |||
840 | static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
841 | u8 *data) | ||
842 | { | ||
843 | struct skge_port *skge = netdev_priv(dev); | ||
844 | struct pci_dev *pdev = skge->hw->pdev; | ||
845 | int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); | ||
846 | int length = eeprom->len; | ||
847 | u16 offset = eeprom->offset; | ||
848 | |||
849 | if (!cap) | ||
850 | return -EINVAL; | ||
851 | |||
852 | eeprom->magic = SKGE_EEPROM_MAGIC; | ||
853 | |||
854 | while (length > 0) { | ||
855 | u32 val = skge_vpd_read(pdev, cap, offset); | ||
856 | int n = min_t(int, length, sizeof(val)); | ||
857 | |||
858 | memcpy(data, &val, n); | ||
859 | length -= n; | ||
860 | data += n; | ||
861 | offset += n; | ||
862 | } | ||
863 | return 0; | ||
864 | } | ||
865 | |||
866 | static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | ||
867 | u8 *data) | ||
868 | { | ||
869 | struct skge_port *skge = netdev_priv(dev); | ||
870 | struct pci_dev *pdev = skge->hw->pdev; | ||
871 | int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); | ||
872 | int length = eeprom->len; | ||
873 | u16 offset = eeprom->offset; | ||
874 | |||
875 | if (!cap) | ||
876 | return -EINVAL; | ||
877 | |||
878 | if (eeprom->magic != SKGE_EEPROM_MAGIC) | ||
879 | return -EINVAL; | ||
880 | |||
881 | while (length > 0) { | ||
882 | u32 val; | ||
883 | int n = min_t(int, length, sizeof(val)); | ||
884 | |||
885 | if (n < sizeof(val)) | ||
886 | val = skge_vpd_read(pdev, cap, offset); | ||
887 | memcpy(&val, data, n); | ||
888 | |||
889 | skge_vpd_write(pdev, cap, offset, val); | ||
890 | |||
891 | length -= n; | ||
892 | data += n; | ||
893 | offset += n; | ||
894 | } | ||
895 | return 0; | ||
896 | } | ||
897 | |||
801 | static const struct ethtool_ops skge_ethtool_ops = { | 898 | static const struct ethtool_ops skge_ethtool_ops = { |
802 | .get_settings = skge_get_settings, | 899 | .get_settings = skge_get_settings, |
803 | .set_settings = skge_set_settings, | 900 | .set_settings = skge_set_settings, |
@@ -810,6 +907,9 @@ static const struct ethtool_ops skge_ethtool_ops = { | |||
810 | .set_msglevel = skge_set_msglevel, | 907 | .set_msglevel = skge_set_msglevel, |
811 | .nway_reset = skge_nway_reset, | 908 | .nway_reset = skge_nway_reset, |
812 | .get_link = ethtool_op_get_link, | 909 | .get_link = ethtool_op_get_link, |
910 | .get_eeprom_len = skge_get_eeprom_len, | ||
911 | .get_eeprom = skge_get_eeprom, | ||
912 | .set_eeprom = skge_set_eeprom, | ||
813 | .get_ringparam = skge_get_ring_param, | 913 | .get_ringparam = skge_get_ring_param, |
814 | .set_ringparam = skge_set_ring_param, | 914 | .set_ringparam = skge_set_ring_param, |
815 | .get_pauseparam = skge_get_pauseparam, | 915 | .get_pauseparam = skge_get_pauseparam, |
@@ -995,19 +1095,15 @@ static void xm_link_down(struct skge_hw *hw, int port) | |||
995 | { | 1095 | { |
996 | struct net_device *dev = hw->dev[port]; | 1096 | struct net_device *dev = hw->dev[port]; |
997 | struct skge_port *skge = netdev_priv(dev); | 1097 | struct skge_port *skge = netdev_priv(dev); |
998 | u16 cmd, msk; | 1098 | u16 cmd = xm_read16(hw, port, XM_MMU_CMD); |
999 | 1099 | ||
1000 | if (hw->phy_type == SK_PHY_XMAC) { | 1100 | xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); |
1001 | msk = xm_read16(hw, port, XM_IMSK); | ||
1002 | msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND; | ||
1003 | xm_write16(hw, port, XM_IMSK, msk); | ||
1004 | } | ||
1005 | 1101 | ||
1006 | cmd = xm_read16(hw, port, XM_MMU_CMD); | ||
1007 | cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); | 1102 | cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); |
1008 | xm_write16(hw, port, XM_MMU_CMD, cmd); | 1103 | xm_write16(hw, port, XM_MMU_CMD, cmd); |
1104 | |||
1009 | /* dummy read to ensure writing */ | 1105 | /* dummy read to ensure writing */ |
1010 | (void) xm_read16(hw, port, XM_MMU_CMD); | 1106 | xm_read16(hw, port, XM_MMU_CMD); |
1011 | 1107 | ||
1012 | if (netif_carrier_ok(dev)) | 1108 | if (netif_carrier_ok(dev)) |
1013 | skge_link_down(skge); | 1109 | skge_link_down(skge); |
@@ -1103,7 +1199,7 @@ static void genesis_reset(struct skge_hw *hw, int port) | |||
1103 | 1199 | ||
1104 | /* reset the statistics module */ | 1200 | /* reset the statistics module */ |
1105 | xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); | 1201 | xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); |
1106 | xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ | 1202 | xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); |
1107 | xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ | 1203 | xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ |
1108 | xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ | 1204 | xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ |
1109 | xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ | 1205 | xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ |
@@ -1141,7 +1237,7 @@ static void bcom_check_link(struct skge_hw *hw, int port) | |||
1141 | u16 status; | 1237 | u16 status; |
1142 | 1238 | ||
1143 | /* read twice because of latch */ | 1239 | /* read twice because of latch */ |
1144 | (void) xm_phy_read(hw, port, PHY_BCOM_STAT); | 1240 | xm_phy_read(hw, port, PHY_BCOM_STAT); |
1145 | status = xm_phy_read(hw, port, PHY_BCOM_STAT); | 1241 | status = xm_phy_read(hw, port, PHY_BCOM_STAT); |
1146 | 1242 | ||
1147 | if ((status & PHY_ST_LSYNC) == 0) { | 1243 | if ((status & PHY_ST_LSYNC) == 0) { |
@@ -1342,7 +1438,7 @@ static void xm_phy_init(struct skge_port *skge) | |||
1342 | mod_timer(&skge->link_timer, jiffies + LINK_HZ); | 1438 | mod_timer(&skge->link_timer, jiffies + LINK_HZ); |
1343 | } | 1439 | } |
1344 | 1440 | ||
1345 | static void xm_check_link(struct net_device *dev) | 1441 | static int xm_check_link(struct net_device *dev) |
1346 | { | 1442 | { |
1347 | struct skge_port *skge = netdev_priv(dev); | 1443 | struct skge_port *skge = netdev_priv(dev); |
1348 | struct skge_hw *hw = skge->hw; | 1444 | struct skge_hw *hw = skge->hw; |
@@ -1350,25 +1446,25 @@ static void xm_check_link(struct net_device *dev) | |||
1350 | u16 status; | 1446 | u16 status; |
1351 | 1447 | ||
1352 | /* read twice because of latch */ | 1448 | /* read twice because of latch */ |
1353 | (void) xm_phy_read(hw, port, PHY_XMAC_STAT); | 1449 | xm_phy_read(hw, port, PHY_XMAC_STAT); |
1354 | status = xm_phy_read(hw, port, PHY_XMAC_STAT); | 1450 | status = xm_phy_read(hw, port, PHY_XMAC_STAT); |
1355 | 1451 | ||
1356 | if ((status & PHY_ST_LSYNC) == 0) { | 1452 | if ((status & PHY_ST_LSYNC) == 0) { |
1357 | xm_link_down(hw, port); | 1453 | xm_link_down(hw, port); |
1358 | return; | 1454 | return 0; |
1359 | } | 1455 | } |
1360 | 1456 | ||
1361 | if (skge->autoneg == AUTONEG_ENABLE) { | 1457 | if (skge->autoneg == AUTONEG_ENABLE) { |
1362 | u16 lpa, res; | 1458 | u16 lpa, res; |
1363 | 1459 | ||
1364 | if (!(status & PHY_ST_AN_OVER)) | 1460 | if (!(status & PHY_ST_AN_OVER)) |
1365 | return; | 1461 | return 0; |
1366 | 1462 | ||
1367 | lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); | 1463 | lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); |
1368 | if (lpa & PHY_B_AN_RF) { | 1464 | if (lpa & PHY_B_AN_RF) { |
1369 | printk(KERN_NOTICE PFX "%s: remote fault\n", | 1465 | printk(KERN_NOTICE PFX "%s: remote fault\n", |
1370 | dev->name); | 1466 | dev->name); |
1371 | return; | 1467 | return 0; |
1372 | } | 1468 | } |
1373 | 1469 | ||
1374 | res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); | 1470 | res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); |
@@ -1384,7 +1480,7 @@ static void xm_check_link(struct net_device *dev) | |||
1384 | default: | 1480 | default: |
1385 | printk(KERN_NOTICE PFX "%s: duplex mismatch\n", | 1481 | printk(KERN_NOTICE PFX "%s: duplex mismatch\n", |
1386 | dev->name); | 1482 | dev->name); |
1387 | return; | 1483 | return 0; |
1388 | } | 1484 | } |
1389 | 1485 | ||
1390 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ | 1486 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ |
@@ -1408,11 +1504,14 @@ static void xm_check_link(struct net_device *dev) | |||
1408 | 1504 | ||
1409 | if (!netif_carrier_ok(dev)) | 1505 | if (!netif_carrier_ok(dev)) |
1410 | genesis_link_up(skge); | 1506 | genesis_link_up(skge); |
1507 | return 1; | ||
1411 | } | 1508 | } |
1412 | 1509 | ||
1413 | /* Poll to check for link coming up. | 1510 | /* Poll to check for link coming up. |
1511 | * | ||
1414 | * Since internal PHY is wired to a level triggered pin, can't | 1512 | * Since internal PHY is wired to a level triggered pin, can't |
1415 | * get an interrupt when carrier is detected. | 1513 | * get an interrupt when carrier is detected, need to poll for |
1514 | * link coming up. | ||
1416 | */ | 1515 | */ |
1417 | static void xm_link_timer(unsigned long arg) | 1516 | static void xm_link_timer(unsigned long arg) |
1418 | { | 1517 | { |
@@ -1420,29 +1519,35 @@ static void xm_link_timer(unsigned long arg) | |||
1420 | struct net_device *dev = skge->netdev; | 1519 | struct net_device *dev = skge->netdev; |
1421 | struct skge_hw *hw = skge->hw; | 1520 | struct skge_hw *hw = skge->hw; |
1422 | int port = skge->port; | 1521 | int port = skge->port; |
1522 | int i; | ||
1523 | unsigned long flags; | ||
1423 | 1524 | ||
1424 | if (!netif_running(dev)) | 1525 | if (!netif_running(dev)) |
1425 | return; | 1526 | return; |
1426 | 1527 | ||
1427 | if (netif_carrier_ok(dev)) { | 1528 | spin_lock_irqsave(&hw->phy_lock, flags); |
1529 | |||
1530 | /* | ||
1531 | * Verify that the link by checking GPIO register three times. | ||
1532 | * This pin has the signal from the link_sync pin connected to it. | ||
1533 | */ | ||
1534 | for (i = 0; i < 3; i++) { | ||
1535 | if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) | ||
1536 | goto link_down; | ||
1537 | } | ||
1538 | |||
1539 | /* Re-enable interrupt to detect link down */ | ||
1540 | if (xm_check_link(dev)) { | ||
1541 | u16 msk = xm_read16(hw, port, XM_IMSK); | ||
1542 | msk &= ~XM_IS_INP_ASS; | ||
1543 | xm_write16(hw, port, XM_IMSK, msk); | ||
1428 | xm_read16(hw, port, XM_ISRC); | 1544 | xm_read16(hw, port, XM_ISRC); |
1429 | if (!(xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS)) | ||
1430 | goto nochange; | ||
1431 | } else { | 1545 | } else { |
1432 | if (xm_read32(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) | 1546 | link_down: |
1433 | goto nochange; | 1547 | mod_timer(&skge->link_timer, |
1434 | xm_read16(hw, port, XM_ISRC); | 1548 | round_jiffies(jiffies + LINK_HZ)); |
1435 | if (xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS) | ||
1436 | goto nochange; | ||
1437 | } | 1549 | } |
1438 | 1550 | spin_unlock_irqrestore(&hw->phy_lock, flags); | |
1439 | spin_lock(&hw->phy_lock); | ||
1440 | xm_check_link(dev); | ||
1441 | spin_unlock(&hw->phy_lock); | ||
1442 | |||
1443 | nochange: | ||
1444 | if (netif_running(dev)) | ||
1445 | mod_timer(&skge->link_timer, jiffies + LINK_HZ); | ||
1446 | } | 1551 | } |
1447 | 1552 | ||
1448 | static void genesis_mac_init(struct skge_hw *hw, int port) | 1553 | static void genesis_mac_init(struct skge_hw *hw, int port) |
@@ -1679,24 +1784,27 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data) | |||
1679 | 1784 | ||
1680 | static void genesis_mac_intr(struct skge_hw *hw, int port) | 1785 | static void genesis_mac_intr(struct skge_hw *hw, int port) |
1681 | { | 1786 | { |
1682 | struct skge_port *skge = netdev_priv(hw->dev[port]); | 1787 | struct net_device *dev = hw->dev[port]; |
1788 | struct skge_port *skge = netdev_priv(dev); | ||
1683 | u16 status = xm_read16(hw, port, XM_ISRC); | 1789 | u16 status = xm_read16(hw, port, XM_ISRC); |
1684 | 1790 | ||
1685 | if (netif_msg_intr(skge)) | 1791 | if (netif_msg_intr(skge)) |
1686 | printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", | 1792 | printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", |
1687 | skge->netdev->name, status); | 1793 | dev->name, status); |
1688 | 1794 | ||
1689 | if (hw->phy_type == SK_PHY_XMAC && | 1795 | if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { |
1690 | (status & (XM_IS_INP_ASS | XM_IS_LIPA_RC))) | 1796 | xm_link_down(hw, port); |
1691 | xm_link_down(hw, port); | 1797 | mod_timer(&skge->link_timer, jiffies + 1); |
1798 | } | ||
1692 | 1799 | ||
1693 | if (status & XM_IS_TXF_UR) { | 1800 | if (status & XM_IS_TXF_UR) { |
1694 | xm_write32(hw, port, XM_MODE, XM_MD_FTF); | 1801 | xm_write32(hw, port, XM_MODE, XM_MD_FTF); |
1695 | ++skge->net_stats.tx_fifo_errors; | 1802 | ++dev->stats.tx_fifo_errors; |
1696 | } | 1803 | } |
1804 | |||
1697 | if (status & XM_IS_RXF_OV) { | 1805 | if (status & XM_IS_RXF_OV) { |
1698 | xm_write32(hw, port, XM_MODE, XM_MD_FRF); | 1806 | xm_write32(hw, port, XM_MODE, XM_MD_FRF); |
1699 | ++skge->net_stats.rx_fifo_errors; | 1807 | ++dev->stats.rx_fifo_errors; |
1700 | } | 1808 | } |
1701 | } | 1809 | } |
1702 | 1810 | ||
@@ -1753,11 +1861,12 @@ static void genesis_link_up(struct skge_port *skge) | |||
1753 | } | 1861 | } |
1754 | 1862 | ||
1755 | xm_write32(hw, port, XM_MODE, mode); | 1863 | xm_write32(hw, port, XM_MODE, mode); |
1756 | msk = XM_DEF_MSK; | ||
1757 | if (hw->phy_type != SK_PHY_XMAC) | ||
1758 | msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */ | ||
1759 | 1864 | ||
1865 | /* Turn on detection of Tx underrun, Rx overrun */ | ||
1866 | msk = xm_read16(hw, port, XM_IMSK); | ||
1867 | msk &= ~(XM_IS_RXF_OV | XM_IS_TXF_UR); | ||
1760 | xm_write16(hw, port, XM_IMSK, msk); | 1868 | xm_write16(hw, port, XM_IMSK, msk); |
1869 | |||
1761 | xm_read16(hw, port, XM_ISRC); | 1870 | xm_read16(hw, port, XM_ISRC); |
1762 | 1871 | ||
1763 | /* get MMU Command Reg. */ | 1872 | /* get MMU Command Reg. */ |
@@ -2192,12 +2301,12 @@ static void yukon_mac_intr(struct skge_hw *hw, int port) | |||
2192 | dev->name, status); | 2301 | dev->name, status); |
2193 | 2302 | ||
2194 | if (status & GM_IS_RX_FF_OR) { | 2303 | if (status & GM_IS_RX_FF_OR) { |
2195 | ++skge->net_stats.rx_fifo_errors; | 2304 | ++dev->stats.rx_fifo_errors; |
2196 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); | 2305 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); |
2197 | } | 2306 | } |
2198 | 2307 | ||
2199 | if (status & GM_IS_TX_FF_UR) { | 2308 | if (status & GM_IS_TX_FF_UR) { |
2200 | ++skge->net_stats.tx_fifo_errors; | 2309 | ++dev->stats.tx_fifo_errors; |
2201 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); | 2310 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); |
2202 | } | 2311 | } |
2203 | 2312 | ||
@@ -2403,32 +2512,31 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2403 | return err; | 2512 | return err; |
2404 | } | 2513 | } |
2405 | 2514 | ||
2406 | static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) | 2515 | /* Assign Ram Buffer allocation to queue */ |
2516 | static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, u32 space) | ||
2407 | { | 2517 | { |
2408 | u32 end; | 2518 | u32 end; |
2409 | 2519 | ||
2410 | start /= 8; | 2520 | /* convert from K bytes to qwords used for hw register */ |
2411 | len /= 8; | 2521 | start *= 1024/8; |
2412 | end = start + len - 1; | 2522 | space *= 1024/8; |
2523 | end = start + space - 1; | ||
2413 | 2524 | ||
2414 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); | 2525 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); |
2415 | skge_write32(hw, RB_ADDR(q, RB_START), start); | 2526 | skge_write32(hw, RB_ADDR(q, RB_START), start); |
2527 | skge_write32(hw, RB_ADDR(q, RB_END), end); | ||
2416 | skge_write32(hw, RB_ADDR(q, RB_WP), start); | 2528 | skge_write32(hw, RB_ADDR(q, RB_WP), start); |
2417 | skge_write32(hw, RB_ADDR(q, RB_RP), start); | 2529 | skge_write32(hw, RB_ADDR(q, RB_RP), start); |
2418 | skge_write32(hw, RB_ADDR(q, RB_END), end); | ||
2419 | 2530 | ||
2420 | if (q == Q_R1 || q == Q_R2) { | 2531 | if (q == Q_R1 || q == Q_R2) { |
2532 | u32 tp = space - space/4; | ||
2533 | |||
2421 | /* Set thresholds on receive queue's */ | 2534 | /* Set thresholds on receive queue's */ |
2422 | skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), | 2535 | skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); |
2423 | start + (2*len)/3); | 2536 | skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); |
2424 | skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), | 2537 | } else if (hw->chip_id != CHIP_ID_GENESIS) |
2425 | start + (len/3)); | 2538 | /* Genesis Tx Fifo is too small for normal store/forward */ |
2426 | } else { | ||
2427 | /* Enable store & forward on Tx queue's because | ||
2428 | * Tx FIFO is only 4K on Genesis and 1K on Yukon | ||
2429 | */ | ||
2430 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); | 2539 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); |
2431 | } | ||
2432 | 2540 | ||
2433 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); | 2541 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); |
2434 | } | 2542 | } |
@@ -2456,7 +2564,7 @@ static int skge_up(struct net_device *dev) | |||
2456 | struct skge_port *skge = netdev_priv(dev); | 2564 | struct skge_port *skge = netdev_priv(dev); |
2457 | struct skge_hw *hw = skge->hw; | 2565 | struct skge_hw *hw = skge->hw; |
2458 | int port = skge->port; | 2566 | int port = skge->port; |
2459 | u32 chunk, ram_addr; | 2567 | u32 ramaddr, ramsize, rxspace; |
2460 | size_t rx_size, tx_size; | 2568 | size_t rx_size, tx_size; |
2461 | int err; | 2569 | int err; |
2462 | 2570 | ||
@@ -2511,14 +2619,15 @@ static int skge_up(struct net_device *dev) | |||
2511 | spin_unlock_bh(&hw->phy_lock); | 2619 | spin_unlock_bh(&hw->phy_lock); |
2512 | 2620 | ||
2513 | /* Configure RAMbuffers */ | 2621 | /* Configure RAMbuffers */ |
2514 | chunk = hw->ram_size / ((hw->ports + 1)*2); | 2622 | ramsize = (hw->ram_size - hw->ram_offset) / hw->ports; |
2515 | ram_addr = hw->ram_offset + 2 * chunk * port; | 2623 | ramaddr = hw->ram_offset + port * ramsize; |
2624 | rxspace = 8 + (2*(ramsize - 16))/3; | ||
2516 | 2625 | ||
2517 | skge_ramset(hw, rxqaddr[port], ram_addr, chunk); | 2626 | skge_ramset(hw, rxqaddr[port], ramaddr, rxspace); |
2518 | skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); | 2627 | skge_ramset(hw, txqaddr[port], ramaddr + rxspace, ramsize - rxspace); |
2519 | 2628 | ||
2629 | skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); | ||
2520 | BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); | 2630 | BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); |
2521 | skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); | ||
2522 | skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); | 2631 | skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); |
2523 | 2632 | ||
2524 | /* Start receiver BMU */ | 2633 | /* Start receiver BMU */ |
@@ -2544,6 +2653,15 @@ static int skge_up(struct net_device *dev) | |||
2544 | return err; | 2653 | return err; |
2545 | } | 2654 | } |
2546 | 2655 | ||
2656 | /* stop receiver */ | ||
2657 | static void skge_rx_stop(struct skge_hw *hw, int port) | ||
2658 | { | ||
2659 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); | ||
2660 | skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), | ||
2661 | RB_RST_SET|RB_DIS_OP_MD); | ||
2662 | skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); | ||
2663 | } | ||
2664 | |||
2547 | static int skge_down(struct net_device *dev) | 2665 | static int skge_down(struct net_device *dev) |
2548 | { | 2666 | { |
2549 | struct skge_port *skge = netdev_priv(dev); | 2667 | struct skge_port *skge = netdev_priv(dev); |
@@ -2595,11 +2713,8 @@ static int skge_down(struct net_device *dev) | |||
2595 | 2713 | ||
2596 | /* Reset the RAM Buffer async Tx queue */ | 2714 | /* Reset the RAM Buffer async Tx queue */ |
2597 | skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); | 2715 | skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); |
2598 | /* stop receiver */ | 2716 | |
2599 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); | 2717 | skge_rx_stop(hw, port); |
2600 | skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), | ||
2601 | RB_RST_SET|RB_DIS_OP_MD); | ||
2602 | skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); | ||
2603 | 2718 | ||
2604 | if (hw->chip_id == CHIP_ID_GENESIS) { | 2719 | if (hw->chip_id == CHIP_ID_GENESIS) { |
2605 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); | 2720 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); |
@@ -2782,7 +2897,11 @@ static void skge_tx_timeout(struct net_device *dev) | |||
2782 | 2897 | ||
2783 | static int skge_change_mtu(struct net_device *dev, int new_mtu) | 2898 | static int skge_change_mtu(struct net_device *dev, int new_mtu) |
2784 | { | 2899 | { |
2900 | struct skge_port *skge = netdev_priv(dev); | ||
2901 | struct skge_hw *hw = skge->hw; | ||
2902 | int port = skge->port; | ||
2785 | int err; | 2903 | int err; |
2904 | u16 ctl, reg; | ||
2786 | 2905 | ||
2787 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | 2906 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
2788 | return -EINVAL; | 2907 | return -EINVAL; |
@@ -2792,13 +2911,40 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu) | |||
2792 | return 0; | 2911 | return 0; |
2793 | } | 2912 | } |
2794 | 2913 | ||
2795 | skge_down(dev); | 2914 | skge_write32(hw, B0_IMSK, 0); |
2915 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
2916 | netif_stop_queue(dev); | ||
2917 | napi_disable(&skge->napi); | ||
2918 | |||
2919 | ctl = gma_read16(hw, port, GM_GP_CTRL); | ||
2920 | gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); | ||
2921 | |||
2922 | skge_rx_clean(skge); | ||
2923 | skge_rx_stop(hw, port); | ||
2796 | 2924 | ||
2797 | dev->mtu = new_mtu; | 2925 | dev->mtu = new_mtu; |
2798 | 2926 | ||
2799 | err = skge_up(dev); | 2927 | reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); |
2928 | if (new_mtu > 1500) | ||
2929 | reg |= GM_SMOD_JUMBO_ENA; | ||
2930 | gma_write16(hw, port, GM_SERIAL_MODE, reg); | ||
2931 | |||
2932 | skge_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); | ||
2933 | |||
2934 | err = skge_rx_fill(dev); | ||
2935 | wmb(); | ||
2936 | if (!err) | ||
2937 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); | ||
2938 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
2939 | |||
2800 | if (err) | 2940 | if (err) |
2801 | dev_close(dev); | 2941 | dev_close(dev); |
2942 | else { | ||
2943 | gma_write16(hw, port, GM_GP_CTRL, ctl); | ||
2944 | |||
2945 | napi_enable(&skge->napi); | ||
2946 | netif_wake_queue(dev); | ||
2947 | } | ||
2802 | 2948 | ||
2803 | return err; | 2949 | return err; |
2804 | } | 2950 | } |
@@ -2994,18 +3140,18 @@ error: | |||
2994 | 3140 | ||
2995 | if (skge->hw->chip_id == CHIP_ID_GENESIS) { | 3141 | if (skge->hw->chip_id == CHIP_ID_GENESIS) { |
2996 | if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) | 3142 | if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) |
2997 | skge->net_stats.rx_length_errors++; | 3143 | dev->stats.rx_length_errors++; |
2998 | if (status & XMR_FS_FRA_ERR) | 3144 | if (status & XMR_FS_FRA_ERR) |
2999 | skge->net_stats.rx_frame_errors++; | 3145 | dev->stats.rx_frame_errors++; |
3000 | if (status & XMR_FS_FCS_ERR) | 3146 | if (status & XMR_FS_FCS_ERR) |
3001 | skge->net_stats.rx_crc_errors++; | 3147 | dev->stats.rx_crc_errors++; |
3002 | } else { | 3148 | } else { |
3003 | if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) | 3149 | if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) |
3004 | skge->net_stats.rx_length_errors++; | 3150 | dev->stats.rx_length_errors++; |
3005 | if (status & GMR_FS_FRAGMENT) | 3151 | if (status & GMR_FS_FRAGMENT) |
3006 | skge->net_stats.rx_frame_errors++; | 3152 | dev->stats.rx_frame_errors++; |
3007 | if (status & GMR_FS_CRC_ERR) | 3153 | if (status & GMR_FS_CRC_ERR) |
3008 | skge->net_stats.rx_crc_errors++; | 3154 | dev->stats.rx_crc_errors++; |
3009 | } | 3155 | } |
3010 | 3156 | ||
3011 | resubmit: | 3157 | resubmit: |
@@ -3103,10 +3249,7 @@ static void skge_mac_parity(struct skge_hw *hw, int port) | |||
3103 | { | 3249 | { |
3104 | struct net_device *dev = hw->dev[port]; | 3250 | struct net_device *dev = hw->dev[port]; |
3105 | 3251 | ||
3106 | if (dev) { | 3252 | ++dev->stats.tx_heartbeat_errors; |
3107 | struct skge_port *skge = netdev_priv(dev); | ||
3108 | ++skge->net_stats.tx_heartbeat_errors; | ||
3109 | } | ||
3110 | 3253 | ||
3111 | if (hw->chip_id == CHIP_ID_GENESIS) | 3254 | if (hw->chip_id == CHIP_ID_GENESIS) |
3112 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), | 3255 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), |
@@ -3259,9 +3402,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) | |||
3259 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); | 3402 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); |
3260 | 3403 | ||
3261 | if (status & IS_PA_TO_RX1) { | 3404 | if (status & IS_PA_TO_RX1) { |
3262 | struct skge_port *skge = netdev_priv(hw->dev[0]); | 3405 | ++hw->dev[0]->stats.rx_over_errors; |
3263 | |||
3264 | ++skge->net_stats.rx_over_errors; | ||
3265 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); | 3406 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); |
3266 | } | 3407 | } |
3267 | 3408 | ||
@@ -3278,7 +3419,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) | |||
3278 | } | 3419 | } |
3279 | 3420 | ||
3280 | if (status & IS_PA_TO_RX2) { | 3421 | if (status & IS_PA_TO_RX2) { |
3281 | ++skge->net_stats.rx_over_errors; | 3422 | ++hw->dev[1]->stats.rx_over_errors; |
3282 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); | 3423 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); |
3283 | } | 3424 | } |
3284 | 3425 | ||
@@ -3450,15 +3591,12 @@ static int skge_reset(struct skge_hw *hw) | |||
3450 | if (hw->chip_id == CHIP_ID_GENESIS) { | 3591 | if (hw->chip_id == CHIP_ID_GENESIS) { |
3451 | if (t8 == 3) { | 3592 | if (t8 == 3) { |
3452 | /* special case: 4 x 64k x 36, offset = 0x80000 */ | 3593 | /* special case: 4 x 64k x 36, offset = 0x80000 */ |
3453 | hw->ram_size = 0x100000; | 3594 | hw->ram_size = 1024; |
3454 | hw->ram_offset = 0x80000; | 3595 | hw->ram_offset = 512; |
3455 | } else | 3596 | } else |
3456 | hw->ram_size = t8 * 512; | 3597 | hw->ram_size = t8 * 512; |
3457 | } | 3598 | } else /* Yukon */ |
3458 | else if (t8 == 0) | 3599 | hw->ram_size = t8 ? t8 * 4 : 128; |
3459 | hw->ram_size = 0x20000; | ||
3460 | else | ||
3461 | hw->ram_size = t8 * 4096; | ||
3462 | 3600 | ||
3463 | hw->intr_mask = IS_HW_ERR; | 3601 | hw->intr_mask = IS_HW_ERR; |
3464 | 3602 | ||
@@ -3540,6 +3678,145 @@ static int skge_reset(struct skge_hw *hw) | |||
3540 | return 0; | 3678 | return 0; |
3541 | } | 3679 | } |
3542 | 3680 | ||
3681 | |||
3682 | #ifdef CONFIG_SKGE_DEBUG | ||
3683 | |||
3684 | static struct dentry *skge_debug; | ||
3685 | |||
3686 | static int skge_debug_show(struct seq_file *seq, void *v) | ||
3687 | { | ||
3688 | struct net_device *dev = seq->private; | ||
3689 | const struct skge_port *skge = netdev_priv(dev); | ||
3690 | const struct skge_hw *hw = skge->hw; | ||
3691 | const struct skge_element *e; | ||
3692 | |||
3693 | if (!netif_running(dev)) | ||
3694 | return -ENETDOWN; | ||
3695 | |||
3696 | seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), | ||
3697 | skge_read32(hw, B0_IMSK)); | ||
3698 | |||
3699 | seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); | ||
3700 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { | ||
3701 | const struct skge_tx_desc *t = e->desc; | ||
3702 | seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", | ||
3703 | t->control, t->dma_hi, t->dma_lo, t->status, | ||
3704 | t->csum_offs, t->csum_write, t->csum_start); | ||
3705 | } | ||
3706 | |||
3707 | seq_printf(seq, "\nRx Ring: \n"); | ||
3708 | for (e = skge->rx_ring.to_clean; ; e = e->next) { | ||
3709 | const struct skge_rx_desc *r = e->desc; | ||
3710 | |||
3711 | if (r->control & BMU_OWN) | ||
3712 | break; | ||
3713 | |||
3714 | seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", | ||
3715 | r->control, r->dma_hi, r->dma_lo, r->status, | ||
3716 | r->timestamp, r->csum1, r->csum1_start); | ||
3717 | } | ||
3718 | |||
3719 | return 0; | ||
3720 | } | ||
3721 | |||
3722 | static int skge_debug_open(struct inode *inode, struct file *file) | ||
3723 | { | ||
3724 | return single_open(file, skge_debug_show, inode->i_private); | ||
3725 | } | ||
3726 | |||
3727 | static const struct file_operations skge_debug_fops = { | ||
3728 | .owner = THIS_MODULE, | ||
3729 | .open = skge_debug_open, | ||
3730 | .read = seq_read, | ||
3731 | .llseek = seq_lseek, | ||
3732 | .release = single_release, | ||
3733 | }; | ||
3734 | |||
3735 | /* | ||
3736 | * Use network device events to create/remove/rename | ||
3737 | * debugfs file entries | ||
3738 | */ | ||
3739 | static int skge_device_event(struct notifier_block *unused, | ||
3740 | unsigned long event, void *ptr) | ||
3741 | { | ||
3742 | struct net_device *dev = ptr; | ||
3743 | struct skge_port *skge; | ||
3744 | struct dentry *d; | ||
3745 | |||
3746 | if (dev->open != &skge_up || !skge_debug) | ||
3747 | goto done; | ||
3748 | |||
3749 | skge = netdev_priv(dev); | ||
3750 | switch(event) { | ||
3751 | case NETDEV_CHANGENAME: | ||
3752 | if (skge->debugfs) { | ||
3753 | d = debugfs_rename(skge_debug, skge->debugfs, | ||
3754 | skge_debug, dev->name); | ||
3755 | if (d) | ||
3756 | skge->debugfs = d; | ||
3757 | else { | ||
3758 | pr_info(PFX "%s: rename failed\n", dev->name); | ||
3759 | debugfs_remove(skge->debugfs); | ||
3760 | } | ||
3761 | } | ||
3762 | break; | ||
3763 | |||
3764 | case NETDEV_GOING_DOWN: | ||
3765 | if (skge->debugfs) { | ||
3766 | debugfs_remove(skge->debugfs); | ||
3767 | skge->debugfs = NULL; | ||
3768 | } | ||
3769 | break; | ||
3770 | |||
3771 | case NETDEV_UP: | ||
3772 | d = debugfs_create_file(dev->name, S_IRUGO, | ||
3773 | skge_debug, dev, | ||
3774 | &skge_debug_fops); | ||
3775 | if (!d || IS_ERR(d)) | ||
3776 | pr_info(PFX "%s: debugfs create failed\n", | ||
3777 | dev->name); | ||
3778 | else | ||
3779 | skge->debugfs = d; | ||
3780 | break; | ||
3781 | } | ||
3782 | |||
3783 | done: | ||
3784 | return NOTIFY_DONE; | ||
3785 | } | ||
3786 | |||
3787 | static struct notifier_block skge_notifier = { | ||
3788 | .notifier_call = skge_device_event, | ||
3789 | }; | ||
3790 | |||
3791 | |||
3792 | static __init void skge_debug_init(void) | ||
3793 | { | ||
3794 | struct dentry *ent; | ||
3795 | |||
3796 | ent = debugfs_create_dir("skge", NULL); | ||
3797 | if (!ent || IS_ERR(ent)) { | ||
3798 | pr_info(PFX "debugfs create directory failed\n"); | ||
3799 | return; | ||
3800 | } | ||
3801 | |||
3802 | skge_debug = ent; | ||
3803 | register_netdevice_notifier(&skge_notifier); | ||
3804 | } | ||
3805 | |||
3806 | static __exit void skge_debug_cleanup(void) | ||
3807 | { | ||
3808 | if (skge_debug) { | ||
3809 | unregister_netdevice_notifier(&skge_notifier); | ||
3810 | debugfs_remove(skge_debug); | ||
3811 | skge_debug = NULL; | ||
3812 | } | ||
3813 | } | ||
3814 | |||
3815 | #else | ||
3816 | #define skge_debug_init() | ||
3817 | #define skge_debug_cleanup() | ||
3818 | #endif | ||
3819 | |||
3543 | /* Initialize network device */ | 3820 | /* Initialize network device */ |
3544 | static struct net_device *skge_devinit(struct skge_hw *hw, int port, | 3821 | static struct net_device *skge_devinit(struct skge_hw *hw, int port, |
3545 | int highmem) | 3822 | int highmem) |
@@ -3904,12 +4181,14 @@ static struct pci_driver skge_driver = { | |||
3904 | 4181 | ||
3905 | static int __init skge_init_module(void) | 4182 | static int __init skge_init_module(void) |
3906 | { | 4183 | { |
4184 | skge_debug_init(); | ||
3907 | return pci_register_driver(&skge_driver); | 4185 | return pci_register_driver(&skge_driver); |
3908 | } | 4186 | } |
3909 | 4187 | ||
3910 | static void __exit skge_cleanup_module(void) | 4188 | static void __exit skge_cleanup_module(void) |
3911 | { | 4189 | { |
3912 | pci_unregister_driver(&skge_driver); | 4190 | pci_unregister_driver(&skge_driver); |
4191 | skge_debug_cleanup(); | ||
3913 | } | 4192 | } |
3914 | 4193 | ||
3915 | module_init(skge_init_module); | 4194 | module_init(skge_init_module); |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 1a57bdd1ddf1..17caccbb7685 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Definitions for the new Marvell Yukon / SysKonenct driver. | 2 | * Definitions for the new Marvell Yukon / SysKonnect driver. |
3 | */ | 3 | */ |
4 | #ifndef _SKGE_H | 4 | #ifndef _SKGE_H |
5 | #define _SKGE_H | 5 | #define _SKGE_H |
@@ -8,8 +8,10 @@ | |||
8 | #define PCI_DEV_REG1 0x40 | 8 | #define PCI_DEV_REG1 0x40 |
9 | #define PCI_PHY_COMA 0x8000000 | 9 | #define PCI_PHY_COMA 0x8000000 |
10 | #define PCI_VIO 0x2000000 | 10 | #define PCI_VIO 0x2000000 |
11 | |||
11 | #define PCI_DEV_REG2 0x44 | 12 | #define PCI_DEV_REG2 0x44 |
12 | #define PCI_REV_DESC 0x4 | 13 | #define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ |
14 | #define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ | ||
13 | 15 | ||
14 | #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ | 16 | #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ |
15 | PCI_STATUS_SIG_SYSTEM_ERROR | \ | 17 | PCI_STATUS_SIG_SYSTEM_ERROR | \ |
@@ -2191,11 +2193,9 @@ enum { | |||
2191 | XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ | 2193 | XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ |
2192 | XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ | 2194 | XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ |
2193 | XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ | 2195 | XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ |
2194 | }; | ||
2195 | |||
2196 | #define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | \ | ||
2197 | XM_IS_RXF_OV | XM_IS_TXF_UR)) | ||
2198 | 2196 | ||
2197 | XM_IMSK_DISABLE = 0xffff, | ||
2198 | }; | ||
2199 | 2199 | ||
2200 | /* XM_HW_CFG 16 bit r/w Hardware Config Register */ | 2200 | /* XM_HW_CFG 16 bit r/w Hardware Config Register */ |
2201 | enum { | 2201 | enum { |
@@ -2469,8 +2469,9 @@ struct skge_port { | |||
2469 | void *mem; /* PCI memory for rings */ | 2469 | void *mem; /* PCI memory for rings */ |
2470 | dma_addr_t dma; | 2470 | dma_addr_t dma; |
2471 | unsigned long mem_size; | 2471 | unsigned long mem_size; |
2472 | 2472 | #ifdef CONFIG_SKGE_DEBUG | |
2473 | struct net_device_stats net_stats; | 2473 | struct dentry *debugfs; |
2474 | #endif | ||
2474 | }; | 2475 | }; |
2475 | 2476 | ||
2476 | 2477 | ||
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 7224d368b2a7..5d31519a6c67 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c | |||
@@ -760,7 +760,7 @@ static int xl_open_hw(struct net_device *dev) | |||
760 | if (xl_priv->xl_laa[0]) { /* If using a LAA address */ | 760 | if (xl_priv->xl_laa[0]) { /* If using a LAA address */ |
761 | for (i=10;i<16;i++) { | 761 | for (i=10;i<16;i++) { |
762 | writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; | 762 | writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; |
763 | writeb(xl_priv->xl_laa[i],xl_mmio + MMIO_MACDATA) ; | 763 | writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ; |
764 | } | 764 | } |
765 | memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ; | 765 | memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ; |
766 | } else { /* Regular hardware address */ | 766 | } else { /* Regular hardware address */ |