aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-04-17 17:13:13 -0400
committerDavid S. Miller <davem@davemloft.net>2008-04-17 17:13:13 -0400
commit2e5a3eaca386ce026f240c7b21e5c4958fcea946 (patch)
tree191cf2b340d008b711137ce8c40b27a3dadff8d5 /drivers/net
parent8c95b4773dd8d0415269ffad7301ef96d75be8ee (diff)
parent36b30ea940bb88d88c90698e0e3d97a805ab5856 (diff)
Merge branch 'upstream-net26' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/au1000_eth.c6
-rw-r--r--drivers/net/bfin_mac.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cpmac.c5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/e1000/e1000.h86
-rw-r--r--drivers/net/e1000/e1000_ethtool.c160
-rw-r--r--drivers/net/e1000/e1000_hw.c1324
-rw-r--r--drivers/net/e1000/e1000_hw.h512
-rw-r--r--drivers/net/e1000/e1000_main.c174
-rw-r--r--drivers/net/e1000e/82571.c36
-rw-r--r--drivers/net/e1000e/e1000.h4
-rw-r--r--drivers/net/e1000e/es2lan.c32
-rw-r--r--drivers/net/e1000e/ethtool.c123
-rw-r--r--drivers/net/e1000e/hw.h10
-rw-r--r--drivers/net/e1000e/ich8lan.c30
-rw-r--r--drivers/net/e1000e/lib.c6
-rw-r--r--drivers/net/e1000e/netdev.c24
-rw-r--r--drivers/net/fec_mpc52xx.c2
-rw-r--r--drivers/net/fec_mpc52xx_phy.c2
-rw-r--r--drivers/net/forcedeth.c234
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/gianfar.c56
-rw-r--r--drivers/net/gianfar.h13
-rw-r--r--drivers/net/gianfar_mii.c2
-rw-r--r--drivers/net/ixgb/ixgb.h32
-rw-r--r--drivers/net/ixgb/ixgb_ee.c74
-rw-r--r--drivers/net/ixgb/ixgb_ee.h10
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c50
-rw-r--r--drivers/net/ixgb/ixgb_hw.c144
-rw-r--r--drivers/net/ixgb/ixgb_hw.h234
-rw-r--r--drivers/net/ixgb/ixgb_main.c60
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/mv643xx_eth.c473
-rw-r--r--drivers/net/natsemi.c10
-rw-r--r--drivers/net/netxen/netxen_nic.h18
-rw-r--r--drivers/net/netxen/netxen_nic_isr.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c19
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/phy/broadcom.c20
-rw-r--r--drivers/net/phy/fixed.c2
-rw-r--r--drivers/net/phy/phy_device.c38
-rw-r--r--drivers/net/sb1000.c101
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sc92031.c86
-rw-r--r--drivers/net/spider_net.c36
-rw-r--r--drivers/net/spider_net.h7
-rw-r--r--drivers/net/tc35815.c1701
-rw-r--r--drivers/net/tulip/tulip.h7
-rw-r--r--drivers/net/tulip/tulip_core.c19
-rw-r--r--drivers/net/tulip/winbond-840.c5
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/ucc_geth.h2
-rw-r--r--drivers/net/ucc_geth_mii.c2
57 files changed, 2759 insertions, 3264 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index da30a31e66f9..45c3a208d93f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1438,7 +1438,7 @@ config CS89x0
1438config TC35815 1438config TC35815
1439 tristate "TOSHIBA TC35815 Ethernet support" 1439 tristate "TOSHIBA TC35815 Ethernet support"
1440 depends on NET_PCI && PCI && MIPS 1440 depends on NET_PCI && PCI && MIPS
1441 select MII 1441 select PHYLIB
1442 1442
1443config EEPRO100 1443config EEPRO100
1444 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)" 1444 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)"
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 504b7ce2747d..3634b5fd7919 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -701,7 +701,7 @@ static struct net_device * au1000_probe(int port_num)
701 aup->mii_bus.write = mdiobus_write; 701 aup->mii_bus.write = mdiobus_write;
702 aup->mii_bus.reset = mdiobus_reset; 702 aup->mii_bus.reset = mdiobus_reset;
703 aup->mii_bus.name = "au1000_eth_mii"; 703 aup->mii_bus.name = "au1000_eth_mii";
704 aup->mii_bus.id = aup->mac_id; 704 snprintf(aup->mii_bus.id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
705 aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 705 aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
706 for(i = 0; i < PHY_MAX_ADDR; ++i) 706 for(i = 0; i < PHY_MAX_ADDR; ++i)
707 aup->mii_bus.irq[i] = PHY_POLL; 707 aup->mii_bus.irq[i] = PHY_POLL;
@@ -709,11 +709,11 @@ static struct net_device * au1000_probe(int port_num)
709 /* if known, set corresponding PHY IRQs */ 709 /* if known, set corresponding PHY IRQs */
710#if defined(AU1XXX_PHY_STATIC_CONFIG) 710#if defined(AU1XXX_PHY_STATIC_CONFIG)
711# if defined(AU1XXX_PHY0_IRQ) 711# if defined(AU1XXX_PHY0_IRQ)
712 if (AU1XXX_PHY0_BUSID == aup->mii_bus.id) 712 if (AU1XXX_PHY0_BUSID == aup->mac_id)
713 aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; 713 aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
714# endif 714# endif
715# if defined(AU1XXX_PHY1_IRQ) 715# if defined(AU1XXX_PHY1_IRQ)
716 if (AU1XXX_PHY1_BUSID == aup->mii_bus.id) 716 if (AU1XXX_PHY1_BUSID == aup->mac_id)
717 aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; 717 aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
718# endif 718# endif
719#endif 719#endif
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 26b2dd5016cd..717dcc1aa1e9 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -969,7 +969,7 @@ static int __init bf537mac_probe(struct net_device *dev)
969 lp->mii_bus.write = mdiobus_write; 969 lp->mii_bus.write = mdiobus_write;
970 lp->mii_bus.reset = mdiobus_reset; 970 lp->mii_bus.reset = mdiobus_reset;
971 lp->mii_bus.name = "bfin_mac_mdio"; 971 lp->mii_bus.name = "bfin_mac_mdio";
972 lp->mii_bus.id = 0; 972 snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "0");
973 lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 973 lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
974 for (i = 0; i < PHY_MAX_ADDR; ++i) 974 for (i = 0; i < PHY_MAX_ADDR; ++i)
975 lp->mii_bus.irq[i] = PHY_POLL; 975 lp->mii_bus.irq[i] = PHY_POLL;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ecfaf1460b1a..6e91b4b7aabb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3065,8 +3065,6 @@ out:
3065 3065
3066#ifdef CONFIG_PROC_FS 3066#ifdef CONFIG_PROC_FS
3067 3067
3068#define SEQ_START_TOKEN ((void *)1)
3069
3070static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 3068static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3071{ 3069{
3072 struct bonding *bond = seq->private; 3070 struct bonding *bond = seq->private;
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index c85194f2cd2d..9da7ff437031 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -987,7 +987,7 @@ static int external_switch;
987static int __devinit cpmac_probe(struct platform_device *pdev) 987static int __devinit cpmac_probe(struct platform_device *pdev)
988{ 988{
989 int rc, phy_id, i; 989 int rc, phy_id, i;
990 int mdio_bus_id = cpmac_mii.id; 990 char *mdio_bus_id = "0";
991 struct resource *mem; 991 struct resource *mem;
992 struct cpmac_priv *priv; 992 struct cpmac_priv *priv;
993 struct net_device *dev; 993 struct net_device *dev;
@@ -1008,8 +1008,6 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1008 if (external_switch || dumb_switch) { 1008 if (external_switch || dumb_switch) {
1009 struct fixed_phy_status status = {}; 1009 struct fixed_phy_status status = {};
1010 1010
1011 mdio_bus_id = 0;
1012
1013 /* 1011 /*
1014 * FIXME: this should be in the platform code! 1012 * FIXME: this should be in the platform code!
1015 * Since there is not platform code at all (that is, 1013 * Since there is not platform code at all (that is,
@@ -1143,6 +1141,7 @@ int __devinit cpmac_init(void)
1143 } 1141 }
1144 1142
1145 cpmac_mii.phy_mask = ~(mask | 0x80000000); 1143 cpmac_mii.phy_mask = ~(mask | 0x80000000);
1144 snprintf(cpmac_mii.id, MII_BUS_ID_SIZE, "0");
1146 1145
1147 res = mdiobus_register(&cpmac_mii); 1146 res = mdiobus_register(&cpmac_mii);
1148 if (res) 1147 if (res)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index fd2e05bbb903..05e5f59e87fa 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1014,8 +1014,8 @@ static int offload_open(struct net_device *dev)
1014 adapter->port[0]->mtu : 0xffff); 1014 adapter->port[0]->mtu : 0xffff);
1015 init_smt(adapter); 1015 init_smt(adapter);
1016 1016
1017 /* Never mind if the next step fails */ 1017 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1018 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1018 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1019 1019
1020 /* Call back all registered clients */ 1020 /* Call back all registered clients */
1021 cxgb3_add_clients(tdev); 1021 cxgb3_add_clients(tdev);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index a05aa51ecfa6..31feae1ea390 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -161,13 +161,13 @@ struct e1000_buffer {
161 struct sk_buff *skb; 161 struct sk_buff *skb;
162 dma_addr_t dma; 162 dma_addr_t dma;
163 unsigned long time_stamp; 163 unsigned long time_stamp;
164 uint16_t length; 164 u16 length;
165 uint16_t next_to_watch; 165 u16 next_to_watch;
166}; 166};
167 167
168 168
169struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; 169struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
170struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; 170struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
171 171
172struct e1000_tx_ring { 172struct e1000_tx_ring {
173 /* pointer to the descriptor ring memory */ 173 /* pointer to the descriptor ring memory */
@@ -186,8 +186,8 @@ struct e1000_tx_ring {
186 struct e1000_buffer *buffer_info; 186 struct e1000_buffer *buffer_info;
187 187
188 spinlock_t tx_lock; 188 spinlock_t tx_lock;
189 uint16_t tdh; 189 u16 tdh;
190 uint16_t tdt; 190 u16 tdt;
191 bool last_tx_tso; 191 bool last_tx_tso;
192}; 192};
193 193
@@ -213,8 +213,8 @@ struct e1000_rx_ring {
213 /* cpu for rx queue */ 213 /* cpu for rx queue */
214 int cpu; 214 int cpu;
215 215
216 uint16_t rdh; 216 u16 rdh;
217 uint16_t rdt; 217 u16 rdt;
218}; 218};
219 219
220#define E1000_DESC_UNUSED(R) \ 220#define E1000_DESC_UNUSED(R) \
@@ -237,14 +237,14 @@ struct e1000_adapter {
237 struct timer_list watchdog_timer; 237 struct timer_list watchdog_timer;
238 struct timer_list phy_info_timer; 238 struct timer_list phy_info_timer;
239 struct vlan_group *vlgrp; 239 struct vlan_group *vlgrp;
240 uint16_t mng_vlan_id; 240 u16 mng_vlan_id;
241 uint32_t bd_number; 241 u32 bd_number;
242 uint32_t rx_buffer_len; 242 u32 rx_buffer_len;
243 uint32_t wol; 243 u32 wol;
244 uint32_t smartspeed; 244 u32 smartspeed;
245 uint32_t en_mng_pt; 245 u32 en_mng_pt;
246 uint16_t link_speed; 246 u16 link_speed;
247 uint16_t link_duplex; 247 u16 link_duplex;
248 spinlock_t stats_lock; 248 spinlock_t stats_lock;
249#ifdef CONFIG_E1000_NAPI 249#ifdef CONFIG_E1000_NAPI
250 spinlock_t tx_queue_lock; 250 spinlock_t tx_queue_lock;
@@ -254,13 +254,13 @@ struct e1000_adapter {
254 unsigned int total_rx_bytes; 254 unsigned int total_rx_bytes;
255 unsigned int total_rx_packets; 255 unsigned int total_rx_packets;
256 /* Interrupt Throttle Rate */ 256 /* Interrupt Throttle Rate */
257 uint32_t itr; 257 u32 itr;
258 uint32_t itr_setting; 258 u32 itr_setting;
259 uint16_t tx_itr; 259 u16 tx_itr;
260 uint16_t rx_itr; 260 u16 rx_itr;
261 261
262 struct work_struct reset_task; 262 struct work_struct reset_task;
263 uint8_t fc_autoneg; 263 u8 fc_autoneg;
264 264
265 struct timer_list blink_timer; 265 struct timer_list blink_timer;
266 unsigned long led_status; 266 unsigned long led_status;
@@ -269,18 +269,18 @@ struct e1000_adapter {
269 struct e1000_tx_ring *tx_ring; /* One per active queue */ 269 struct e1000_tx_ring *tx_ring; /* One per active queue */
270 unsigned int restart_queue; 270 unsigned int restart_queue;
271 unsigned long tx_queue_len; 271 unsigned long tx_queue_len;
272 uint32_t txd_cmd; 272 u32 txd_cmd;
273 uint32_t tx_int_delay; 273 u32 tx_int_delay;
274 uint32_t tx_abs_int_delay; 274 u32 tx_abs_int_delay;
275 uint32_t gotcl; 275 u32 gotcl;
276 uint64_t gotcl_old; 276 u64 gotcl_old;
277 uint64_t tpt_old; 277 u64 tpt_old;
278 uint64_t colc_old; 278 u64 colc_old;
279 uint32_t tx_timeout_count; 279 u32 tx_timeout_count;
280 uint32_t tx_fifo_head; 280 u32 tx_fifo_head;
281 uint32_t tx_head_addr; 281 u32 tx_head_addr;
282 uint32_t tx_fifo_size; 282 u32 tx_fifo_size;
283 uint8_t tx_timeout_factor; 283 u8 tx_timeout_factor;
284 atomic_t tx_fifo_stall; 284 atomic_t tx_fifo_stall;
285 bool pcix_82544; 285 bool pcix_82544;
286 bool detect_tx_hung; 286 bool detect_tx_hung;
@@ -305,17 +305,17 @@ struct e1000_adapter {
305 int num_tx_queues; 305 int num_tx_queues;
306 int num_rx_queues; 306 int num_rx_queues;
307 307
308 uint64_t hw_csum_err; 308 u64 hw_csum_err;
309 uint64_t hw_csum_good; 309 u64 hw_csum_good;
310 uint64_t rx_hdr_split; 310 u64 rx_hdr_split;
311 uint32_t alloc_rx_buff_failed; 311 u32 alloc_rx_buff_failed;
312 uint32_t rx_int_delay; 312 u32 rx_int_delay;
313 uint32_t rx_abs_int_delay; 313 u32 rx_abs_int_delay;
314 bool rx_csum; 314 bool rx_csum;
315 unsigned int rx_ps_pages; 315 unsigned int rx_ps_pages;
316 uint32_t gorcl; 316 u32 gorcl;
317 uint64_t gorcl_old; 317 u64 gorcl_old;
318 uint16_t rx_ps_bsize0; 318 u16 rx_ps_bsize0;
319 319
320 320
321 /* OS defined structs */ 321 /* OS defined structs */
@@ -329,7 +329,7 @@ struct e1000_adapter {
329 struct e1000_phy_info phy_info; 329 struct e1000_phy_info phy_info;
330 struct e1000_phy_stats phy_stats; 330 struct e1000_phy_stats phy_stats;
331 331
332 uint32_t test_icr; 332 u32 test_icr;
333 struct e1000_tx_ring test_tx_ring; 333 struct e1000_tx_ring test_tx_ring;
334 struct e1000_rx_ring test_rx_ring; 334 struct e1000_rx_ring test_rx_ring;
335 335
@@ -341,7 +341,7 @@ struct e1000_adapter {
341 bool smart_power_down; /* phy smart power down */ 341 bool smart_power_down; /* phy smart power down */
342 bool quad_port_a; 342 bool quad_port_a;
343 unsigned long flags; 343 unsigned long flags;
344 uint32_t eeprom_wol; 344 u32 eeprom_wol;
345}; 345};
346 346
347enum e1000_state_t { 347enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 05e1fb3cf49f..701531e72e7b 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -36,7 +36,7 @@ extern int e1000_up(struct e1000_adapter *adapter);
36extern void e1000_down(struct e1000_adapter *adapter); 36extern void e1000_down(struct e1000_adapter *adapter);
37extern void e1000_reinit_locked(struct e1000_adapter *adapter); 37extern void e1000_reinit_locked(struct e1000_adapter *adapter);
38extern void e1000_reset(struct e1000_adapter *adapter); 38extern void e1000_reset(struct e1000_adapter *adapter);
39extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 39extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
40extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 40extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
41extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 41extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
42extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 42extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
@@ -289,7 +289,7 @@ e1000_set_pauseparam(struct net_device *netdev,
289 return retval; 289 return retval;
290} 290}
291 291
292static uint32_t 292static u32
293e1000_get_rx_csum(struct net_device *netdev) 293e1000_get_rx_csum(struct net_device *netdev)
294{ 294{
295 struct e1000_adapter *adapter = netdev_priv(netdev); 295 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -297,7 +297,7 @@ e1000_get_rx_csum(struct net_device *netdev)
297} 297}
298 298
299static int 299static int
300e1000_set_rx_csum(struct net_device *netdev, uint32_t data) 300e1000_set_rx_csum(struct net_device *netdev, u32 data)
301{ 301{
302 struct e1000_adapter *adapter = netdev_priv(netdev); 302 struct e1000_adapter *adapter = netdev_priv(netdev);
303 adapter->rx_csum = data; 303 adapter->rx_csum = data;
@@ -309,14 +309,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
309 return 0; 309 return 0;
310} 310}
311 311
312static uint32_t 312static u32
313e1000_get_tx_csum(struct net_device *netdev) 313e1000_get_tx_csum(struct net_device *netdev)
314{ 314{
315 return (netdev->features & NETIF_F_HW_CSUM) != 0; 315 return (netdev->features & NETIF_F_HW_CSUM) != 0;
316} 316}
317 317
318static int 318static int
319e1000_set_tx_csum(struct net_device *netdev, uint32_t data) 319e1000_set_tx_csum(struct net_device *netdev, u32 data)
320{ 320{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 321 struct e1000_adapter *adapter = netdev_priv(netdev);
322 322
@@ -335,7 +335,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
335} 335}
336 336
337static int 337static int
338e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, u32 data)
339{ 339{
340 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
341 if ((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
@@ -357,7 +357,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
357 return 0; 357 return 0;
358} 358}
359 359
360static uint32_t 360static u32
361e1000_get_msglevel(struct net_device *netdev) 361e1000_get_msglevel(struct net_device *netdev)
362{ 362{
363 struct e1000_adapter *adapter = netdev_priv(netdev); 363 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -365,7 +365,7 @@ e1000_get_msglevel(struct net_device *netdev)
365} 365}
366 366
367static void 367static void
368e1000_set_msglevel(struct net_device *netdev, uint32_t data) 368e1000_set_msglevel(struct net_device *netdev, u32 data)
369{ 369{
370 struct e1000_adapter *adapter = netdev_priv(netdev); 370 struct e1000_adapter *adapter = netdev_priv(netdev);
371 adapter->msg_enable = data; 371 adapter->msg_enable = data;
@@ -375,7 +375,7 @@ static int
375e1000_get_regs_len(struct net_device *netdev) 375e1000_get_regs_len(struct net_device *netdev)
376{ 376{
377#define E1000_REGS_LEN 32 377#define E1000_REGS_LEN 32
378 return E1000_REGS_LEN * sizeof(uint32_t); 378 return E1000_REGS_LEN * sizeof(u32);
379} 379}
380 380
381static void 381static void
@@ -384,10 +384,10 @@ e1000_get_regs(struct net_device *netdev,
384{ 384{
385 struct e1000_adapter *adapter = netdev_priv(netdev); 385 struct e1000_adapter *adapter = netdev_priv(netdev);
386 struct e1000_hw *hw = &adapter->hw; 386 struct e1000_hw *hw = &adapter->hw;
387 uint32_t *regs_buff = p; 387 u32 *regs_buff = p;
388 uint16_t phy_data; 388 u16 phy_data;
389 389
390 memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t)); 390 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
391 391
392 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; 392 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
393 393
@@ -412,44 +412,44 @@ e1000_get_regs(struct net_device *netdev,
412 IGP01E1000_PHY_AGC_A); 412 IGP01E1000_PHY_AGC_A);
413 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 413 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
414 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 414 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
415 regs_buff[13] = (uint32_t)phy_data; /* cable length */ 415 regs_buff[13] = (u32)phy_data; /* cable length */
416 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 416 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
417 IGP01E1000_PHY_AGC_B); 417 IGP01E1000_PHY_AGC_B);
418 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & 418 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
419 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 419 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
420 regs_buff[14] = (uint32_t)phy_data; /* cable length */ 420 regs_buff[14] = (u32)phy_data; /* cable length */
421 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 421 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
422 IGP01E1000_PHY_AGC_C); 422 IGP01E1000_PHY_AGC_C);
423 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & 423 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
424 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 424 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
425 regs_buff[15] = (uint32_t)phy_data; /* cable length */ 425 regs_buff[15] = (u32)phy_data; /* cable length */
426 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 426 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
427 IGP01E1000_PHY_AGC_D); 427 IGP01E1000_PHY_AGC_D);
428 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & 428 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
429 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 429 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
430 regs_buff[16] = (uint32_t)phy_data; /* cable length */ 430 regs_buff[16] = (u32)phy_data; /* cable length */
431 regs_buff[17] = 0; /* extended 10bt distance (not needed) */ 431 regs_buff[17] = 0; /* extended 10bt distance (not needed) */
432 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); 432 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
433 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & 433 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
434 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 434 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
435 regs_buff[18] = (uint32_t)phy_data; /* cable polarity */ 435 regs_buff[18] = (u32)phy_data; /* cable polarity */
436 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 436 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
437 IGP01E1000_PHY_PCS_INIT_REG); 437 IGP01E1000_PHY_PCS_INIT_REG);
438 e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & 438 e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
439 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 439 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
440 regs_buff[19] = (uint32_t)phy_data; /* cable polarity */ 440 regs_buff[19] = (u32)phy_data; /* cable polarity */
441 regs_buff[20] = 0; /* polarity correction enabled (always) */ 441 regs_buff[20] = 0; /* polarity correction enabled (always) */
442 regs_buff[22] = 0; /* phy receive errors (unavailable) */ 442 regs_buff[22] = 0; /* phy receive errors (unavailable) */
443 regs_buff[23] = regs_buff[18]; /* mdix mode */ 443 regs_buff[23] = regs_buff[18]; /* mdix mode */
444 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); 444 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
445 } else { 445 } else {
446 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 446 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
447 regs_buff[13] = (uint32_t)phy_data; /* cable length */ 447 regs_buff[13] = (u32)phy_data; /* cable length */
448 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 448 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
449 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 449 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
450 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 450 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
451 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 451 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
452 regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */ 452 regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
453 regs_buff[18] = regs_buff[13]; /* cable polarity */ 453 regs_buff[18] = regs_buff[13]; /* cable polarity */
454 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 454 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
455 regs_buff[20] = regs_buff[17]; /* polarity correction */ 455 regs_buff[20] = regs_buff[17]; /* polarity correction */
@@ -459,7 +459,7 @@ e1000_get_regs(struct net_device *netdev,
459 } 459 }
460 regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ 460 regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
461 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 461 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
462 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 462 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
463 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 463 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
464 if (hw->mac_type >= e1000_82540 && 464 if (hw->mac_type >= e1000_82540 &&
465 hw->mac_type < e1000_82571 && 465 hw->mac_type < e1000_82571 &&
@@ -477,14 +477,14 @@ e1000_get_eeprom_len(struct net_device *netdev)
477 477
478static int 478static int
479e1000_get_eeprom(struct net_device *netdev, 479e1000_get_eeprom(struct net_device *netdev,
480 struct ethtool_eeprom *eeprom, uint8_t *bytes) 480 struct ethtool_eeprom *eeprom, u8 *bytes)
481{ 481{
482 struct e1000_adapter *adapter = netdev_priv(netdev); 482 struct e1000_adapter *adapter = netdev_priv(netdev);
483 struct e1000_hw *hw = &adapter->hw; 483 struct e1000_hw *hw = &adapter->hw;
484 uint16_t *eeprom_buff; 484 u16 *eeprom_buff;
485 int first_word, last_word; 485 int first_word, last_word;
486 int ret_val = 0; 486 int ret_val = 0;
487 uint16_t i; 487 u16 i;
488 488
489 if (eeprom->len == 0) 489 if (eeprom->len == 0)
490 return -EINVAL; 490 return -EINVAL;
@@ -494,7 +494,7 @@ e1000_get_eeprom(struct net_device *netdev,
494 first_word = eeprom->offset >> 1; 494 first_word = eeprom->offset >> 1;
495 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 495 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
496 496
497 eeprom_buff = kmalloc(sizeof(uint16_t) * 497 eeprom_buff = kmalloc(sizeof(u16) *
498 (last_word - first_word + 1), GFP_KERNEL); 498 (last_word - first_word + 1), GFP_KERNEL);
499 if (!eeprom_buff) 499 if (!eeprom_buff)
500 return -ENOMEM; 500 return -ENOMEM;
@@ -514,7 +514,7 @@ e1000_get_eeprom(struct net_device *netdev,
514 for (i = 0; i < last_word - first_word + 1; i++) 514 for (i = 0; i < last_word - first_word + 1; i++)
515 le16_to_cpus(&eeprom_buff[i]); 515 le16_to_cpus(&eeprom_buff[i]);
516 516
517 memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1), 517 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
518 eeprom->len); 518 eeprom->len);
519 kfree(eeprom_buff); 519 kfree(eeprom_buff);
520 520
@@ -523,14 +523,14 @@ e1000_get_eeprom(struct net_device *netdev,
523 523
524static int 524static int
525e1000_set_eeprom(struct net_device *netdev, 525e1000_set_eeprom(struct net_device *netdev,
526 struct ethtool_eeprom *eeprom, uint8_t *bytes) 526 struct ethtool_eeprom *eeprom, u8 *bytes)
527{ 527{
528 struct e1000_adapter *adapter = netdev_priv(netdev); 528 struct e1000_adapter *adapter = netdev_priv(netdev);
529 struct e1000_hw *hw = &adapter->hw; 529 struct e1000_hw *hw = &adapter->hw;
530 uint16_t *eeprom_buff; 530 u16 *eeprom_buff;
531 void *ptr; 531 void *ptr;
532 int max_len, first_word, last_word, ret_val = 0; 532 int max_len, first_word, last_word, ret_val = 0;
533 uint16_t i; 533 u16 i;
534 534
535 if (eeprom->len == 0) 535 if (eeprom->len == 0)
536 return -EOPNOTSUPP; 536 return -EOPNOTSUPP;
@@ -590,7 +590,7 @@ e1000_get_drvinfo(struct net_device *netdev,
590{ 590{
591 struct e1000_adapter *adapter = netdev_priv(netdev); 591 struct e1000_adapter *adapter = netdev_priv(netdev);
592 char firmware_version[32]; 592 char firmware_version[32];
593 uint16_t eeprom_data; 593 u16 eeprom_data;
594 594
595 strncpy(drvinfo->driver, e1000_driver_name, 32); 595 strncpy(drvinfo->driver, e1000_driver_name, 32);
596 strncpy(drvinfo->version, e1000_driver_version, 32); 596 strncpy(drvinfo->version, e1000_driver_version, 32);
@@ -674,13 +674,13 @@ e1000_set_ringparam(struct net_device *netdev,
674 adapter->tx_ring = txdr; 674 adapter->tx_ring = txdr;
675 adapter->rx_ring = rxdr; 675 adapter->rx_ring = rxdr;
676 676
677 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 677 rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
678 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 678 rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
679 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 679 E1000_MAX_RXD : E1000_MAX_82544_RXD));
680 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 680 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
681 681
682 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 682 txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
683 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 683 txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
684 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 684 E1000_MAX_TXD : E1000_MAX_82544_TXD));
685 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 685 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
686 686
@@ -728,13 +728,13 @@ err_setup:
728 return err; 728 return err;
729} 729}
730 730
731static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data, 731static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
732 int reg, uint32_t mask, uint32_t write) 732 int reg, u32 mask, u32 write)
733{ 733{
734 static const uint32_t test[] = 734 static const u32 test[] =
735 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 735 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
736 uint8_t __iomem *address = adapter->hw.hw_addr + reg; 736 u8 __iomem *address = adapter->hw.hw_addr + reg;
737 uint32_t read; 737 u32 read;
738 int i; 738 int i;
739 739
740 for (i = 0; i < ARRAY_SIZE(test); i++) { 740 for (i = 0; i < ARRAY_SIZE(test); i++) {
@@ -751,11 +751,11 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data,
751 return false; 751 return false;
752} 752}
753 753
754static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data, 754static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
755 int reg, uint32_t mask, uint32_t write) 755 int reg, u32 mask, u32 write)
756{ 756{
757 uint8_t __iomem *address = adapter->hw.hw_addr + reg; 757 u8 __iomem *address = adapter->hw.hw_addr + reg;
758 uint32_t read; 758 u32 read;
759 759
760 writel(write & mask, address); 760 writel(write & mask, address);
761 read = readl(address); 761 read = readl(address);
@@ -788,10 +788,10 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data,
788 } while (0) 788 } while (0)
789 789
790static int 790static int
791e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) 791e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
792{ 792{
793 uint32_t value, before, after; 793 u32 value, before, after;
794 uint32_t i, toggle; 794 u32 i, toggle;
795 795
796 /* The status register is Read Only, so a write should fail. 796 /* The status register is Read Only, so a write should fail.
797 * Some bits that get toggled are ignored. 797 * Some bits that get toggled are ignored.
@@ -884,11 +884,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
884} 884}
885 885
886static int 886static int
887e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data) 887e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
888{ 888{
889 uint16_t temp; 889 u16 temp;
890 uint16_t checksum = 0; 890 u16 checksum = 0;
891 uint16_t i; 891 u16 i;
892 892
893 *data = 0; 893 *data = 0;
894 /* Read and add up the contents of the EEPROM */ 894 /* Read and add up the contents of the EEPROM */
@@ -901,7 +901,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
901 } 901 }
902 902
903 /* If Checksum is not Correct return error else test passed */ 903 /* If Checksum is not Correct return error else test passed */
904 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 904 if ((checksum != (u16) EEPROM_SUM) && !(*data))
905 *data = 2; 905 *data = 2;
906 906
907 return *data; 907 return *data;
@@ -919,12 +919,12 @@ e1000_test_intr(int irq, void *data)
919} 919}
920 920
921static int 921static int
922e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) 922e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
923{ 923{
924 struct net_device *netdev = adapter->netdev; 924 struct net_device *netdev = adapter->netdev;
925 uint32_t mask, i = 0; 925 u32 mask, i = 0;
926 bool shared_int = true; 926 bool shared_int = true;
927 uint32_t irq = adapter->pdev->irq; 927 u32 irq = adapter->pdev->irq;
928 928
929 *data = 0; 929 *data = 0;
930 930
@@ -1070,7 +1070,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1070 struct e1000_tx_ring *txdr = &adapter->test_tx_ring; 1070 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1071 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; 1071 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1072 struct pci_dev *pdev = adapter->pdev; 1072 struct pci_dev *pdev = adapter->pdev;
1073 uint32_t rctl; 1073 u32 rctl;
1074 int i, ret_val; 1074 int i, ret_val;
1075 1075
1076 /* Setup Tx descriptor ring and Tx buffers */ 1076 /* Setup Tx descriptor ring and Tx buffers */
@@ -1096,8 +1096,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1096 txdr->next_to_use = txdr->next_to_clean = 0; 1096 txdr->next_to_use = txdr->next_to_clean = 0;
1097 1097
1098 E1000_WRITE_REG(&adapter->hw, TDBAL, 1098 E1000_WRITE_REG(&adapter->hw, TDBAL,
1099 ((uint64_t) txdr->dma & 0x00000000FFFFFFFF)); 1099 ((u64) txdr->dma & 0x00000000FFFFFFFF));
1100 E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32)); 1100 E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32));
1101 E1000_WRITE_REG(&adapter->hw, TDLEN, 1101 E1000_WRITE_REG(&adapter->hw, TDLEN,
1102 txdr->count * sizeof(struct e1000_tx_desc)); 1102 txdr->count * sizeof(struct e1000_tx_desc));
1103 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1103 E1000_WRITE_REG(&adapter->hw, TDH, 0);
@@ -1153,8 +1153,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1153 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1153 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1154 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); 1154 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
1155 E1000_WRITE_REG(&adapter->hw, RDBAL, 1155 E1000_WRITE_REG(&adapter->hw, RDBAL,
1156 ((uint64_t) rxdr->dma & 0xFFFFFFFF)); 1156 ((u64) rxdr->dma & 0xFFFFFFFF));
1157 E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32)); 1157 E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32));
1158 E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size); 1158 E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
1159 E1000_WRITE_REG(&adapter->hw, RDH, 0); 1159 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1160 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1160 E1000_WRITE_REG(&adapter->hw, RDT, 0);
@@ -1202,7 +1202,7 @@ e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1202static void 1202static void
1203e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) 1203e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1204{ 1204{
1205 uint16_t phy_reg; 1205 u16 phy_reg;
1206 1206
1207 /* Because we reset the PHY above, we need to re-force TX_CLK in the 1207 /* Because we reset the PHY above, we need to re-force TX_CLK in the
1208 * Extended PHY Specific Control Register to 25MHz clock. This 1208 * Extended PHY Specific Control Register to 25MHz clock. This
@@ -1226,8 +1226,8 @@ e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1226static int 1226static int
1227e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) 1227e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1228{ 1228{
1229 uint32_t ctrl_reg; 1229 u32 ctrl_reg;
1230 uint16_t phy_reg; 1230 u16 phy_reg;
1231 1231
1232 /* Setup the Device Control Register for PHY loopback test. */ 1232 /* Setup the Device Control Register for PHY loopback test. */
1233 1233
@@ -1293,8 +1293,8 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1293static int 1293static int
1294e1000_integrated_phy_loopback(struct e1000_adapter *adapter) 1294e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1295{ 1295{
1296 uint32_t ctrl_reg = 0; 1296 u32 ctrl_reg = 0;
1297 uint32_t stat_reg = 0; 1297 u32 stat_reg = 0;
1298 1298
1299 adapter->hw.autoneg = false; 1299 adapter->hw.autoneg = false;
1300 1300
@@ -1363,8 +1363,8 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1363static int 1363static int
1364e1000_set_phy_loopback(struct e1000_adapter *adapter) 1364e1000_set_phy_loopback(struct e1000_adapter *adapter)
1365{ 1365{
1366 uint16_t phy_reg = 0; 1366 u16 phy_reg = 0;
1367 uint16_t count = 0; 1367 u16 count = 0;
1368 1368
1369 switch (adapter->hw.mac_type) { 1369 switch (adapter->hw.mac_type) {
1370 case e1000_82543: 1370 case e1000_82543:
@@ -1416,7 +1416,7 @@ static int
1416e1000_setup_loopback_test(struct e1000_adapter *adapter) 1416e1000_setup_loopback_test(struct e1000_adapter *adapter)
1417{ 1417{
1418 struct e1000_hw *hw = &adapter->hw; 1418 struct e1000_hw *hw = &adapter->hw;
1419 uint32_t rctl; 1419 u32 rctl;
1420 1420
1421 if (hw->media_type == e1000_media_type_fiber || 1421 if (hw->media_type == e1000_media_type_fiber ||
1422 hw->media_type == e1000_media_type_internal_serdes) { 1422 hw->media_type == e1000_media_type_internal_serdes) {
@@ -1451,8 +1451,8 @@ static void
1451e1000_loopback_cleanup(struct e1000_adapter *adapter) 1451e1000_loopback_cleanup(struct e1000_adapter *adapter)
1452{ 1452{
1453 struct e1000_hw *hw = &adapter->hw; 1453 struct e1000_hw *hw = &adapter->hw;
1454 uint32_t rctl; 1454 u32 rctl;
1455 uint16_t phy_reg; 1455 u16 phy_reg;
1456 1456
1457 rctl = E1000_READ_REG(hw, RCTL); 1457 rctl = E1000_READ_REG(hw, RCTL);
1458 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1458 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
@@ -1578,7 +1578,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1578} 1578}
1579 1579
1580static int 1580static int
1581e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) 1581e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1582{ 1582{
1583 /* PHY loopback cannot be performed if SoL/IDER 1583 /* PHY loopback cannot be performed if SoL/IDER
1584 * sessions are active */ 1584 * sessions are active */
@@ -1603,7 +1603,7 @@ out:
1603} 1603}
1604 1604
1605static int 1605static int
1606e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) 1606e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1607{ 1607{
1608 *data = 0; 1608 *data = 0;
1609 if (adapter->hw.media_type == e1000_media_type_internal_serdes) { 1609 if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
@@ -1647,7 +1647,7 @@ e1000_get_sset_count(struct net_device *netdev, int sset)
1647 1647
1648static void 1648static void
1649e1000_diag_test(struct net_device *netdev, 1649e1000_diag_test(struct net_device *netdev,
1650 struct ethtool_test *eth_test, uint64_t *data) 1650 struct ethtool_test *eth_test, u64 *data)
1651{ 1651{
1652 struct e1000_adapter *adapter = netdev_priv(netdev); 1652 struct e1000_adapter *adapter = netdev_priv(netdev);
1653 bool if_running = netif_running(netdev); 1653 bool if_running = netif_running(netdev);
@@ -1657,9 +1657,9 @@ e1000_diag_test(struct net_device *netdev,
1657 /* Offline tests */ 1657 /* Offline tests */
1658 1658
1659 /* save speed, duplex, autoneg settings */ 1659 /* save speed, duplex, autoneg settings */
1660 uint16_t autoneg_advertised = adapter->hw.autoneg_advertised; 1660 u16 autoneg_advertised = adapter->hw.autoneg_advertised;
1661 uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex; 1661 u8 forced_speed_duplex = adapter->hw.forced_speed_duplex;
1662 uint8_t autoneg = adapter->hw.autoneg; 1662 u8 autoneg = adapter->hw.autoneg;
1663 1663
1664 DPRINTK(HW, INFO, "offline testing starting\n"); 1664 DPRINTK(HW, INFO, "offline testing starting\n");
1665 1665
@@ -1877,7 +1877,7 @@ e1000_led_blink_callback(unsigned long data)
1877} 1877}
1878 1878
1879static int 1879static int
1880e1000_phys_id(struct net_device *netdev, uint32_t data) 1880e1000_phys_id(struct net_device *netdev, u32 data)
1881{ 1881{
1882 struct e1000_adapter *adapter = netdev_priv(netdev); 1882 struct e1000_adapter *adapter = netdev_priv(netdev);
1883 1883
@@ -1927,7 +1927,7 @@ e1000_nway_reset(struct net_device *netdev)
1927 1927
1928static void 1928static void
1929e1000_get_ethtool_stats(struct net_device *netdev, 1929e1000_get_ethtool_stats(struct net_device *netdev,
1930 struct ethtool_stats *stats, uint64_t *data) 1930 struct ethtool_stats *stats, u64 *data)
1931{ 1931{
1932 struct e1000_adapter *adapter = netdev_priv(netdev); 1932 struct e1000_adapter *adapter = netdev_priv(netdev);
1933 int i; 1933 int i;
@@ -1936,15 +1936,15 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1936 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1936 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1937 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1937 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1938 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1938 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1939 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 1939 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1940 } 1940 }
1941/* BUG_ON(i != E1000_STATS_LEN); */ 1941/* BUG_ON(i != E1000_STATS_LEN); */
1942} 1942}
1943 1943
1944static void 1944static void
1945e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1945e1000_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1946{ 1946{
1947 uint8_t *p = data; 1947 u8 *p = data;
1948 int i; 1948 int i;
1949 1949
1950 switch (stringset) { 1950 switch (stringset) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index b64203458e9a..9a4b6cbddf2c 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -33,107 +33,107 @@
33 33
34#include "e1000_hw.h" 34#include "e1000_hw.h"
35 35
36static int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); 36static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
37static void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 37static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
38static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); 38static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
39static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 39static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
40static int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 40static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
41static void e1000_release_software_semaphore(struct e1000_hw *hw); 41static void e1000_release_software_semaphore(struct e1000_hw *hw);
42 42
43static uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); 43static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
44static int32_t e1000_check_downshift(struct e1000_hw *hw); 44static s32 e1000_check_downshift(struct e1000_hw *hw);
45static int32_t e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity); 45static s32 e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity);
46static void e1000_clear_hw_cntrs(struct e1000_hw *hw); 46static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
47static void e1000_clear_vfta(struct e1000_hw *hw); 47static void e1000_clear_vfta(struct e1000_hw *hw);
48static int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); 48static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
49static int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, 49static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
50 bool link_up); 50 bool link_up);
51static int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw); 51static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
52static int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 52static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
53static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank); 53static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
54static int32_t e1000_get_auto_rd_done(struct e1000_hw *hw); 54static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
55static int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length); 55static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, u16 *max_length);
56static int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); 56static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
57static int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw); 57static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
58static int32_t e1000_get_software_flag(struct e1000_hw *hw); 58static s32 e1000_get_software_flag(struct e1000_hw *hw);
59static int32_t e1000_ich8_cycle_init(struct e1000_hw *hw); 59static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
60static int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout); 60static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
61static int32_t e1000_id_led_init(struct e1000_hw *hw); 61static s32 e1000_id_led_init(struct e1000_hw *hw);
62static int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size); 62static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, u32 cnf_base_addr, u32 cnf_size);
63static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); 63static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
64static void e1000_init_rx_addrs(struct e1000_hw *hw); 64static void e1000_init_rx_addrs(struct e1000_hw *hw);
65static void e1000_initialize_hardware_bits(struct e1000_hw *hw); 65static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
66static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); 66static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
67static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); 67static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
68static int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); 68static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
69static int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, uint16_t length, uint16_t offset, uint8_t *sum); 69static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum);
70static int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr); 70static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr);
71static int32_t e1000_mng_write_commit(struct e1000_hw *hw); 71static s32 e1000_mng_write_commit(struct e1000_hw *hw);
72static int32_t e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 72static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
73static int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 73static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
74static int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 74static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
75static int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 75static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
76static int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); 76static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
77static int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 77static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
78static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); 78static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
79static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t *data); 79static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
80static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte); 80static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
81static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte); 81static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
82static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data); 82static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
83static int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t *data); 83static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 *data);
84static int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t data); 84static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 data);
85static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 85static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
86static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 86static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
87static void e1000_release_software_flag(struct e1000_hw *hw); 87static void e1000_release_software_flag(struct e1000_hw *hw);
88static int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); 88static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
89static int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); 89static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
90static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); 90static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
91static void e1000_set_pci_express_master_disable(struct e1000_hw *hw); 91static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
92static int32_t e1000_wait_autoneg(struct e1000_hw *hw); 92static s32 e1000_wait_autoneg(struct e1000_hw *hw);
93static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); 93static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
94static int32_t e1000_set_phy_type(struct e1000_hw *hw); 94static s32 e1000_set_phy_type(struct e1000_hw *hw);
95static void e1000_phy_init_script(struct e1000_hw *hw); 95static void e1000_phy_init_script(struct e1000_hw *hw);
96static int32_t e1000_setup_copper_link(struct e1000_hw *hw); 96static s32 e1000_setup_copper_link(struct e1000_hw *hw);
97static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw); 97static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
98static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw); 98static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
99static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw); 99static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
100static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw); 100static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
101static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); 101static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
102static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); 102static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
103static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data, 103static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
104 uint16_t count); 104 u16 count);
105static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw); 105static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
106static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw); 106static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
107static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset, 107static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
108 uint16_t words, uint16_t *data); 108 u16 words, u16 *data);
109static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw, 109static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw,
110 uint16_t offset, uint16_t words, 110 u16 offset, u16 words,
111 uint16_t *data); 111 u16 *data);
112static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw); 112static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
113static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd); 113static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
114static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd); 114static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
115static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data, 115static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data,
116 uint16_t count); 116 u16 count);
117static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, 117static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
118 uint16_t phy_data); 118 u16 phy_data);
119static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr, 119static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
120 uint16_t *phy_data); 120 u16 *phy_data);
121static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count); 121static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
122static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); 122static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
123static void e1000_release_eeprom(struct e1000_hw *hw); 123static void e1000_release_eeprom(struct e1000_hw *hw);
124static void e1000_standby_eeprom(struct e1000_hw *hw); 124static void e1000_standby_eeprom(struct e1000_hw *hw);
125static int32_t e1000_set_vco_speed(struct e1000_hw *hw); 125static s32 e1000_set_vco_speed(struct e1000_hw *hw);
126static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); 126static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
127static int32_t e1000_set_phy_mode(struct e1000_hw *hw); 127static s32 e1000_set_phy_mode(struct e1000_hw *hw);
128static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer); 128static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
129static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length); 129static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
130static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, 130static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
131 uint16_t duplex); 131 u16 duplex);
132static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 132static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
133 133
134/* IGP cable length table */ 134/* IGP cable length table */
135static const 135static const
136uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = 136u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
137 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 137 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
138 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 138 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
139 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, 139 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
@@ -144,7 +144,7 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
144 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 144 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
145 145
146static const 146static const
147uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 147u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
148 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 148 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
149 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 149 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
150 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 150 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
@@ -159,7 +159,7 @@ uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
159 * 159 *
160 * hw - Struct containing variables accessed by shared code 160 * hw - Struct containing variables accessed by shared code
161 *****************************************************************************/ 161 *****************************************************************************/
162static int32_t 162static s32
163e1000_set_phy_type(struct e1000_hw *hw) 163e1000_set_phy_type(struct e1000_hw *hw)
164{ 164{
165 DEBUGFUNC("e1000_set_phy_type"); 165 DEBUGFUNC("e1000_set_phy_type");
@@ -213,8 +213,8 @@ e1000_set_phy_type(struct e1000_hw *hw)
213static void 213static void
214e1000_phy_init_script(struct e1000_hw *hw) 214e1000_phy_init_script(struct e1000_hw *hw)
215{ 215{
216 uint32_t ret_val; 216 u32 ret_val;
217 uint16_t phy_saved_data; 217 u16 phy_saved_data;
218 218
219 DEBUGFUNC("e1000_phy_init_script"); 219 DEBUGFUNC("e1000_phy_init_script");
220 220
@@ -272,7 +272,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
272 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); 272 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
273 273
274 if (hw->mac_type == e1000_82547) { 274 if (hw->mac_type == e1000_82547) {
275 uint16_t fused, fine, coarse; 275 u16 fused, fine, coarse;
276 276
277 /* Move to analog registers page */ 277 /* Move to analog registers page */
278 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); 278 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
@@ -306,7 +306,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
306 * 306 *
307 * hw - Struct containing variables accessed by shared code 307 * hw - Struct containing variables accessed by shared code
308 *****************************************************************************/ 308 *****************************************************************************/
309int32_t 309s32
310e1000_set_mac_type(struct e1000_hw *hw) 310e1000_set_mac_type(struct e1000_hw *hw)
311{ 311{
312 DEBUGFUNC("e1000_set_mac_type"); 312 DEBUGFUNC("e1000_set_mac_type");
@@ -477,7 +477,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
477void 477void
478e1000_set_media_type(struct e1000_hw *hw) 478e1000_set_media_type(struct e1000_hw *hw)
479{ 479{
480 uint32_t status; 480 u32 status;
481 481
482 DEBUGFUNC("e1000_set_media_type"); 482 DEBUGFUNC("e1000_set_media_type");
483 483
@@ -528,17 +528,17 @@ e1000_set_media_type(struct e1000_hw *hw)
528 * 528 *
529 * hw - Struct containing variables accessed by shared code 529 * hw - Struct containing variables accessed by shared code
530 *****************************************************************************/ 530 *****************************************************************************/
531int32_t 531s32
532e1000_reset_hw(struct e1000_hw *hw) 532e1000_reset_hw(struct e1000_hw *hw)
533{ 533{
534 uint32_t ctrl; 534 u32 ctrl;
535 uint32_t ctrl_ext; 535 u32 ctrl_ext;
536 uint32_t icr; 536 u32 icr;
537 uint32_t manc; 537 u32 manc;
538 uint32_t led_ctrl; 538 u32 led_ctrl;
539 uint32_t timeout; 539 u32 timeout;
540 uint32_t extcnf_ctrl; 540 u32 extcnf_ctrl;
541 int32_t ret_val; 541 s32 ret_val;
542 542
543 DEBUGFUNC("e1000_reset_hw"); 543 DEBUGFUNC("e1000_reset_hw");
544 544
@@ -730,7 +730,7 @@ e1000_reset_hw(struct e1000_hw *hw)
730 } 730 }
731 731
732 if (hw->mac_type == e1000_ich8lan) { 732 if (hw->mac_type == e1000_ich8lan) {
733 uint32_t kab = E1000_READ_REG(hw, KABGTXD); 733 u32 kab = E1000_READ_REG(hw, KABGTXD);
734 kab |= E1000_KABGTXD_BGSQLBIAS; 734 kab |= E1000_KABGTXD_BGSQLBIAS;
735 E1000_WRITE_REG(hw, KABGTXD, kab); 735 E1000_WRITE_REG(hw, KABGTXD, kab);
736 } 736 }
@@ -752,10 +752,10 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
752{ 752{
753 if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { 753 if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
754 /* Settings common to all PCI-express silicon */ 754 /* Settings common to all PCI-express silicon */
755 uint32_t reg_ctrl, reg_ctrl_ext; 755 u32 reg_ctrl, reg_ctrl_ext;
756 uint32_t reg_tarc0, reg_tarc1; 756 u32 reg_tarc0, reg_tarc1;
757 uint32_t reg_tctl; 757 u32 reg_tctl;
758 uint32_t reg_txdctl, reg_txdctl1; 758 u32 reg_txdctl, reg_txdctl1;
759 759
760 /* link autonegotiation/sync workarounds */ 760 /* link autonegotiation/sync workarounds */
761 reg_tarc0 = E1000_READ_REG(hw, TARC0); 761 reg_tarc0 = E1000_READ_REG(hw, TARC0);
@@ -866,15 +866,15 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
866 * configuration and flow control settings. Clears all on-chip counters. Leaves 866 * configuration and flow control settings. Clears all on-chip counters. Leaves
867 * the transmit and receive units disabled and uninitialized. 867 * the transmit and receive units disabled and uninitialized.
868 *****************************************************************************/ 868 *****************************************************************************/
869int32_t 869s32
870e1000_init_hw(struct e1000_hw *hw) 870e1000_init_hw(struct e1000_hw *hw)
871{ 871{
872 uint32_t ctrl; 872 u32 ctrl;
873 uint32_t i; 873 u32 i;
874 int32_t ret_val; 874 s32 ret_val;
875 uint32_t mta_size; 875 u32 mta_size;
876 uint32_t reg_data; 876 u32 reg_data;
877 uint32_t ctrl_ext; 877 u32 ctrl_ext;
878 878
879 DEBUGFUNC("e1000_init_hw"); 879 DEBUGFUNC("e1000_init_hw");
880 880
@@ -1020,7 +1020,7 @@ e1000_init_hw(struct e1000_hw *hw)
1020 1020
1021 1021
1022 if (hw->mac_type == e1000_82573) { 1022 if (hw->mac_type == e1000_82573) {
1023 uint32_t gcr = E1000_READ_REG(hw, GCR); 1023 u32 gcr = E1000_READ_REG(hw, GCR);
1024 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; 1024 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
1025 E1000_WRITE_REG(hw, GCR, gcr); 1025 E1000_WRITE_REG(hw, GCR, gcr);
1026 } 1026 }
@@ -1054,11 +1054,11 @@ e1000_init_hw(struct e1000_hw *hw)
1054 * 1054 *
1055 * hw - Struct containing variables accessed by shared code. 1055 * hw - Struct containing variables accessed by shared code.
1056 *****************************************************************************/ 1056 *****************************************************************************/
1057static int32_t 1057static s32
1058e1000_adjust_serdes_amplitude(struct e1000_hw *hw) 1058e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
1059{ 1059{
1060 uint16_t eeprom_data; 1060 u16 eeprom_data;
1061 int32_t ret_val; 1061 s32 ret_val;
1062 1062
1063 DEBUGFUNC("e1000_adjust_serdes_amplitude"); 1063 DEBUGFUNC("e1000_adjust_serdes_amplitude");
1064 1064
@@ -1100,12 +1100,12 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
1100 * established. Assumes the hardware has previously been reset and the 1100 * established. Assumes the hardware has previously been reset and the
1101 * transmitter and receiver are not enabled. 1101 * transmitter and receiver are not enabled.
1102 *****************************************************************************/ 1102 *****************************************************************************/
1103int32_t 1103s32
1104e1000_setup_link(struct e1000_hw *hw) 1104e1000_setup_link(struct e1000_hw *hw)
1105{ 1105{
1106 uint32_t ctrl_ext; 1106 u32 ctrl_ext;
1107 int32_t ret_val; 1107 s32 ret_val;
1108 uint16_t eeprom_data; 1108 u16 eeprom_data;
1109 1109
1110 DEBUGFUNC("e1000_setup_link"); 1110 DEBUGFUNC("e1000_setup_link");
1111 1111
@@ -1233,15 +1233,15 @@ e1000_setup_link(struct e1000_hw *hw)
1233 * link. Assumes the hardware has been previously reset and the transmitter 1233 * link. Assumes the hardware has been previously reset and the transmitter
1234 * and receiver are not enabled. 1234 * and receiver are not enabled.
1235 *****************************************************************************/ 1235 *****************************************************************************/
1236static int32_t 1236static s32
1237e1000_setup_fiber_serdes_link(struct e1000_hw *hw) 1237e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1238{ 1238{
1239 uint32_t ctrl; 1239 u32 ctrl;
1240 uint32_t status; 1240 u32 status;
1241 uint32_t txcw = 0; 1241 u32 txcw = 0;
1242 uint32_t i; 1242 u32 i;
1243 uint32_t signal = 0; 1243 u32 signal = 0;
1244 int32_t ret_val; 1244 s32 ret_val;
1245 1245
1246 DEBUGFUNC("e1000_setup_fiber_serdes_link"); 1246 DEBUGFUNC("e1000_setup_fiber_serdes_link");
1247 1247
@@ -1380,12 +1380,12 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1380* 1380*
1381* hw - Struct containing variables accessed by shared code 1381* hw - Struct containing variables accessed by shared code
1382******************************************************************************/ 1382******************************************************************************/
1383static int32_t 1383static s32
1384e1000_copper_link_preconfig(struct e1000_hw *hw) 1384e1000_copper_link_preconfig(struct e1000_hw *hw)
1385{ 1385{
1386 uint32_t ctrl; 1386 u32 ctrl;
1387 int32_t ret_val; 1387 s32 ret_val;
1388 uint16_t phy_data; 1388 u16 phy_data;
1389 1389
1390 DEBUGFUNC("e1000_copper_link_preconfig"); 1390 DEBUGFUNC("e1000_copper_link_preconfig");
1391 1391
@@ -1440,12 +1440,12 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
1440* 1440*
1441* hw - Struct containing variables accessed by shared code 1441* hw - Struct containing variables accessed by shared code
1442*********************************************************************/ 1442*********************************************************************/
1443static int32_t 1443static s32
1444e1000_copper_link_igp_setup(struct e1000_hw *hw) 1444e1000_copper_link_igp_setup(struct e1000_hw *hw)
1445{ 1445{
1446 uint32_t led_ctrl; 1446 u32 led_ctrl;
1447 int32_t ret_val; 1447 s32 ret_val;
1448 uint16_t phy_data; 1448 u16 phy_data;
1449 1449
1450 DEBUGFUNC("e1000_copper_link_igp_setup"); 1450 DEBUGFUNC("e1000_copper_link_igp_setup");
1451 1451
@@ -1587,12 +1587,12 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1587* 1587*
1588* hw - Struct containing variables accessed by shared code 1588* hw - Struct containing variables accessed by shared code
1589*********************************************************************/ 1589*********************************************************************/
1590static int32_t 1590static s32
1591e1000_copper_link_ggp_setup(struct e1000_hw *hw) 1591e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1592{ 1592{
1593 int32_t ret_val; 1593 s32 ret_val;
1594 uint16_t phy_data; 1594 u16 phy_data;
1595 uint32_t reg_data; 1595 u32 reg_data;
1596 1596
1597 DEBUGFUNC("e1000_copper_link_ggp_setup"); 1597 DEBUGFUNC("e1000_copper_link_ggp_setup");
1598 1598
@@ -1735,11 +1735,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1735* 1735*
1736* hw - Struct containing variables accessed by shared code 1736* hw - Struct containing variables accessed by shared code
1737*********************************************************************/ 1737*********************************************************************/
1738static int32_t 1738static s32
1739e1000_copper_link_mgp_setup(struct e1000_hw *hw) 1739e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1740{ 1740{
1741 int32_t ret_val; 1741 s32 ret_val;
1742 uint16_t phy_data; 1742 u16 phy_data;
1743 1743
1744 DEBUGFUNC("e1000_copper_link_mgp_setup"); 1744 DEBUGFUNC("e1000_copper_link_mgp_setup");
1745 1745
@@ -1839,11 +1839,11 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1839* 1839*
1840* hw - Struct containing variables accessed by shared code 1840* hw - Struct containing variables accessed by shared code
1841*********************************************************************/ 1841*********************************************************************/
1842static int32_t 1842static s32
1843e1000_copper_link_autoneg(struct e1000_hw *hw) 1843e1000_copper_link_autoneg(struct e1000_hw *hw)
1844{ 1844{
1845 int32_t ret_val; 1845 s32 ret_val;
1846 uint16_t phy_data; 1846 u16 phy_data;
1847 1847
1848 DEBUGFUNC("e1000_copper_link_autoneg"); 1848 DEBUGFUNC("e1000_copper_link_autoneg");
1849 1849
@@ -1910,10 +1910,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1910* 1910*
1911* hw - Struct containing variables accessed by shared code 1911* hw - Struct containing variables accessed by shared code
1912******************************************************************************/ 1912******************************************************************************/
1913static int32_t 1913static s32
1914e1000_copper_link_postconfig(struct e1000_hw *hw) 1914e1000_copper_link_postconfig(struct e1000_hw *hw)
1915{ 1915{
1916 int32_t ret_val; 1916 s32 ret_val;
1917 DEBUGFUNC("e1000_copper_link_postconfig"); 1917 DEBUGFUNC("e1000_copper_link_postconfig");
1918 1918
1919 if (hw->mac_type >= e1000_82544) { 1919 if (hw->mac_type >= e1000_82544) {
@@ -1948,13 +1948,13 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
1948* 1948*
1949* hw - Struct containing variables accessed by shared code 1949* hw - Struct containing variables accessed by shared code
1950******************************************************************************/ 1950******************************************************************************/
1951static int32_t 1951static s32
1952e1000_setup_copper_link(struct e1000_hw *hw) 1952e1000_setup_copper_link(struct e1000_hw *hw)
1953{ 1953{
1954 int32_t ret_val; 1954 s32 ret_val;
1955 uint16_t i; 1955 u16 i;
1956 uint16_t phy_data; 1956 u16 phy_data;
1957 uint16_t reg_data; 1957 u16 reg_data;
1958 1958
1959 DEBUGFUNC("e1000_setup_copper_link"); 1959 DEBUGFUNC("e1000_setup_copper_link");
1960 1960
@@ -2062,12 +2062,12 @@ e1000_setup_copper_link(struct e1000_hw *hw)
2062* 2062*
2063* hw - Struct containing variables accessed by shared code 2063* hw - Struct containing variables accessed by shared code
2064******************************************************************************/ 2064******************************************************************************/
2065static int32_t 2065static s32
2066e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex) 2066e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
2067{ 2067{
2068 int32_t ret_val = E1000_SUCCESS; 2068 s32 ret_val = E1000_SUCCESS;
2069 uint32_t tipg; 2069 u32 tipg;
2070 uint16_t reg_data; 2070 u16 reg_data;
2071 2071
2072 DEBUGFUNC("e1000_configure_kmrn_for_10_100"); 2072 DEBUGFUNC("e1000_configure_kmrn_for_10_100");
2073 2073
@@ -2098,12 +2098,12 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
2098 return ret_val; 2098 return ret_val;
2099} 2099}
2100 2100
2101static int32_t 2101static s32
2102e1000_configure_kmrn_for_1000(struct e1000_hw *hw) 2102e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
2103{ 2103{
2104 int32_t ret_val = E1000_SUCCESS; 2104 s32 ret_val = E1000_SUCCESS;
2105 uint16_t reg_data; 2105 u16 reg_data;
2106 uint32_t tipg; 2106 u32 tipg;
2107 2107
2108 DEBUGFUNC("e1000_configure_kmrn_for_1000"); 2108 DEBUGFUNC("e1000_configure_kmrn_for_1000");
2109 2109
@@ -2135,12 +2135,12 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
2135* 2135*
2136* hw - Struct containing variables accessed by shared code 2136* hw - Struct containing variables accessed by shared code
2137******************************************************************************/ 2137******************************************************************************/
2138int32_t 2138s32
2139e1000_phy_setup_autoneg(struct e1000_hw *hw) 2139e1000_phy_setup_autoneg(struct e1000_hw *hw)
2140{ 2140{
2141 int32_t ret_val; 2141 s32 ret_val;
2142 uint16_t mii_autoneg_adv_reg; 2142 u16 mii_autoneg_adv_reg;
2143 uint16_t mii_1000t_ctrl_reg; 2143 u16 mii_1000t_ctrl_reg;
2144 2144
2145 DEBUGFUNC("e1000_phy_setup_autoneg"); 2145 DEBUGFUNC("e1000_phy_setup_autoneg");
2146 2146
@@ -2284,15 +2284,15 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
2284* 2284*
2285* hw - Struct containing variables accessed by shared code 2285* hw - Struct containing variables accessed by shared code
2286******************************************************************************/ 2286******************************************************************************/
2287static int32_t 2287static s32
2288e1000_phy_force_speed_duplex(struct e1000_hw *hw) 2288e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2289{ 2289{
2290 uint32_t ctrl; 2290 u32 ctrl;
2291 int32_t ret_val; 2291 s32 ret_val;
2292 uint16_t mii_ctrl_reg; 2292 u16 mii_ctrl_reg;
2293 uint16_t mii_status_reg; 2293 u16 mii_status_reg;
2294 uint16_t phy_data; 2294 u16 phy_data;
2295 uint16_t i; 2295 u16 i;
2296 2296
2297 DEBUGFUNC("e1000_phy_force_speed_duplex"); 2297 DEBUGFUNC("e1000_phy_force_speed_duplex");
2298 2298
@@ -2538,7 +2538,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2538void 2538void
2539e1000_config_collision_dist(struct e1000_hw *hw) 2539e1000_config_collision_dist(struct e1000_hw *hw)
2540{ 2540{
2541 uint32_t tctl, coll_dist; 2541 u32 tctl, coll_dist;
2542 2542
2543 DEBUGFUNC("e1000_config_collision_dist"); 2543 DEBUGFUNC("e1000_config_collision_dist");
2544 2544
@@ -2565,12 +2565,12 @@ e1000_config_collision_dist(struct e1000_hw *hw)
2565* The contents of the PHY register containing the needed information need to 2565* The contents of the PHY register containing the needed information need to
2566* be passed in. 2566* be passed in.
2567******************************************************************************/ 2567******************************************************************************/
2568static int32_t 2568static s32
2569e1000_config_mac_to_phy(struct e1000_hw *hw) 2569e1000_config_mac_to_phy(struct e1000_hw *hw)
2570{ 2570{
2571 uint32_t ctrl; 2571 u32 ctrl;
2572 int32_t ret_val; 2572 s32 ret_val;
2573 uint16_t phy_data; 2573 u16 phy_data;
2574 2574
2575 DEBUGFUNC("e1000_config_mac_to_phy"); 2575 DEBUGFUNC("e1000_config_mac_to_phy");
2576 2576
@@ -2624,10 +2624,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
2624 * by the PHY rather than the MAC. Software must also configure these 2624 * by the PHY rather than the MAC. Software must also configure these
2625 * bits when link is forced on a fiber connection. 2625 * bits when link is forced on a fiber connection.
2626 *****************************************************************************/ 2626 *****************************************************************************/
2627int32_t 2627s32
2628e1000_force_mac_fc(struct e1000_hw *hw) 2628e1000_force_mac_fc(struct e1000_hw *hw)
2629{ 2629{
2630 uint32_t ctrl; 2630 u32 ctrl;
2631 2631
2632 DEBUGFUNC("e1000_force_mac_fc"); 2632 DEBUGFUNC("e1000_force_mac_fc");
2633 2633
@@ -2691,15 +2691,15 @@ e1000_force_mac_fc(struct e1000_hw *hw)
2691 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE 2691 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
2692 * and RFCE bits will be automaticaly set to the negotiated flow control mode. 2692 * and RFCE bits will be automaticaly set to the negotiated flow control mode.
2693 *****************************************************************************/ 2693 *****************************************************************************/
2694static int32_t 2694static s32
2695e1000_config_fc_after_link_up(struct e1000_hw *hw) 2695e1000_config_fc_after_link_up(struct e1000_hw *hw)
2696{ 2696{
2697 int32_t ret_val; 2697 s32 ret_val;
2698 uint16_t mii_status_reg; 2698 u16 mii_status_reg;
2699 uint16_t mii_nway_adv_reg; 2699 u16 mii_nway_adv_reg;
2700 uint16_t mii_nway_lp_ability_reg; 2700 u16 mii_nway_lp_ability_reg;
2701 uint16_t speed; 2701 u16 speed;
2702 uint16_t duplex; 2702 u16 duplex;
2703 2703
2704 DEBUGFUNC("e1000_config_fc_after_link_up"); 2704 DEBUGFUNC("e1000_config_fc_after_link_up");
2705 2705
@@ -2896,17 +2896,17 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2896 * 2896 *
2897 * Called by any function that needs to check the link status of the adapter. 2897 * Called by any function that needs to check the link status of the adapter.
2898 *****************************************************************************/ 2898 *****************************************************************************/
2899int32_t 2899s32
2900e1000_check_for_link(struct e1000_hw *hw) 2900e1000_check_for_link(struct e1000_hw *hw)
2901{ 2901{
2902 uint32_t rxcw = 0; 2902 u32 rxcw = 0;
2903 uint32_t ctrl; 2903 u32 ctrl;
2904 uint32_t status; 2904 u32 status;
2905 uint32_t rctl; 2905 u32 rctl;
2906 uint32_t icr; 2906 u32 icr;
2907 uint32_t signal = 0; 2907 u32 signal = 0;
2908 int32_t ret_val; 2908 s32 ret_val;
2909 uint16_t phy_data; 2909 u16 phy_data;
2910 2910
2911 DEBUGFUNC("e1000_check_for_link"); 2911 DEBUGFUNC("e1000_check_for_link");
2912 2912
@@ -3022,7 +3022,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3022 * at gigabit speed, we turn on TBI compatibility. 3022 * at gigabit speed, we turn on TBI compatibility.
3023 */ 3023 */
3024 if (hw->tbi_compatibility_en) { 3024 if (hw->tbi_compatibility_en) {
3025 uint16_t speed, duplex; 3025 u16 speed, duplex;
3026 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 3026 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
3027 if (ret_val) { 3027 if (ret_val) {
3028 DEBUGOUT("Error getting link speed and duplex\n"); 3028 DEBUGOUT("Error getting link speed and duplex\n");
@@ -3132,14 +3132,14 @@ e1000_check_for_link(struct e1000_hw *hw)
3132 * speed - Speed of the connection 3132 * speed - Speed of the connection
3133 * duplex - Duplex setting of the connection 3133 * duplex - Duplex setting of the connection
3134 *****************************************************************************/ 3134 *****************************************************************************/
3135int32_t 3135s32
3136e1000_get_speed_and_duplex(struct e1000_hw *hw, 3136e1000_get_speed_and_duplex(struct e1000_hw *hw,
3137 uint16_t *speed, 3137 u16 *speed,
3138 uint16_t *duplex) 3138 u16 *duplex)
3139{ 3139{
3140 uint32_t status; 3140 u32 status;
3141 int32_t ret_val; 3141 s32 ret_val;
3142 uint16_t phy_data; 3142 u16 phy_data;
3143 3143
3144 DEBUGFUNC("e1000_get_speed_and_duplex"); 3144 DEBUGFUNC("e1000_get_speed_and_duplex");
3145 3145
@@ -3214,12 +3214,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
3214* 3214*
3215* hw - Struct containing variables accessed by shared code 3215* hw - Struct containing variables accessed by shared code
3216******************************************************************************/ 3216******************************************************************************/
3217static int32_t 3217static s32
3218e1000_wait_autoneg(struct e1000_hw *hw) 3218e1000_wait_autoneg(struct e1000_hw *hw)
3219{ 3219{
3220 int32_t ret_val; 3220 s32 ret_val;
3221 uint16_t i; 3221 u16 i;
3222 uint16_t phy_data; 3222 u16 phy_data;
3223 3223
3224 DEBUGFUNC("e1000_wait_autoneg"); 3224 DEBUGFUNC("e1000_wait_autoneg");
3225 DEBUGOUT("Waiting for Auto-Neg to complete.\n"); 3225 DEBUGOUT("Waiting for Auto-Neg to complete.\n");
@@ -3251,7 +3251,7 @@ e1000_wait_autoneg(struct e1000_hw *hw)
3251******************************************************************************/ 3251******************************************************************************/
3252static void 3252static void
3253e1000_raise_mdi_clk(struct e1000_hw *hw, 3253e1000_raise_mdi_clk(struct e1000_hw *hw,
3254 uint32_t *ctrl) 3254 u32 *ctrl)
3255{ 3255{
3256 /* Raise the clock input to the Management Data Clock (by setting the MDC 3256 /* Raise the clock input to the Management Data Clock (by setting the MDC
3257 * bit), and then delay 10 microseconds. 3257 * bit), and then delay 10 microseconds.
@@ -3269,7 +3269,7 @@ e1000_raise_mdi_clk(struct e1000_hw *hw,
3269******************************************************************************/ 3269******************************************************************************/
3270static void 3270static void
3271e1000_lower_mdi_clk(struct e1000_hw *hw, 3271e1000_lower_mdi_clk(struct e1000_hw *hw,
3272 uint32_t *ctrl) 3272 u32 *ctrl)
3273{ 3273{
3274 /* Lower the clock input to the Management Data Clock (by clearing the MDC 3274 /* Lower the clock input to the Management Data Clock (by clearing the MDC
3275 * bit), and then delay 10 microseconds. 3275 * bit), and then delay 10 microseconds.
@@ -3290,11 +3290,11 @@ e1000_lower_mdi_clk(struct e1000_hw *hw,
3290******************************************************************************/ 3290******************************************************************************/
3291static void 3291static void
3292e1000_shift_out_mdi_bits(struct e1000_hw *hw, 3292e1000_shift_out_mdi_bits(struct e1000_hw *hw,
3293 uint32_t data, 3293 u32 data,
3294 uint16_t count) 3294 u16 count)
3295{ 3295{
3296 uint32_t ctrl; 3296 u32 ctrl;
3297 uint32_t mask; 3297 u32 mask;
3298 3298
3299 /* We need to shift "count" number of bits out to the PHY. So, the value 3299 /* We need to shift "count" number of bits out to the PHY. So, the value
3300 * in the "data" parameter will be shifted out to the PHY one bit at a 3300 * in the "data" parameter will be shifted out to the PHY one bit at a
@@ -3338,12 +3338,12 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
3338* 3338*
3339* Bits are shifted in in MSB to LSB order. 3339* Bits are shifted in in MSB to LSB order.
3340******************************************************************************/ 3340******************************************************************************/
3341static uint16_t 3341static u16
3342e1000_shift_in_mdi_bits(struct e1000_hw *hw) 3342e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3343{ 3343{
3344 uint32_t ctrl; 3344 u32 ctrl;
3345 uint16_t data = 0; 3345 u16 data = 0;
3346 uint8_t i; 3346 u8 i;
3347 3347
3348 /* In order to read a register from the PHY, we need to shift in a total 3348 /* In order to read a register from the PHY, we need to shift in a total
3349 * of 18 bits from the PHY. The first two bit (turnaround) times are used 3349 * of 18 bits from the PHY. The first two bit (turnaround) times are used
@@ -3384,13 +3384,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3384 return data; 3384 return data;
3385} 3385}
3386 3386
3387static int32_t 3387static s32
3388e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) 3388e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
3389{ 3389{
3390 uint32_t swfw_sync = 0; 3390 u32 swfw_sync = 0;
3391 uint32_t swmask = mask; 3391 u32 swmask = mask;
3392 uint32_t fwmask = mask << 16; 3392 u32 fwmask = mask << 16;
3393 int32_t timeout = 200; 3393 s32 timeout = 200;
3394 3394
3395 DEBUGFUNC("e1000_swfw_sync_acquire"); 3395 DEBUGFUNC("e1000_swfw_sync_acquire");
3396 3396
@@ -3429,10 +3429,10 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3429} 3429}
3430 3430
3431static void 3431static void
3432e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) 3432e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
3433{ 3433{
3434 uint32_t swfw_sync; 3434 u32 swfw_sync;
3435 uint32_t swmask = mask; 3435 u32 swmask = mask;
3436 3436
3437 DEBUGFUNC("e1000_swfw_sync_release"); 3437 DEBUGFUNC("e1000_swfw_sync_release");
3438 3438
@@ -3464,13 +3464,13 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
3464* hw - Struct containing variables accessed by shared code 3464* hw - Struct containing variables accessed by shared code
3465* reg_addr - address of the PHY register to read 3465* reg_addr - address of the PHY register to read
3466******************************************************************************/ 3466******************************************************************************/
3467int32_t 3467s32
3468e1000_read_phy_reg(struct e1000_hw *hw, 3468e1000_read_phy_reg(struct e1000_hw *hw,
3469 uint32_t reg_addr, 3469 u32 reg_addr,
3470 uint16_t *phy_data) 3470 u16 *phy_data)
3471{ 3471{
3472 uint32_t ret_val; 3472 u32 ret_val;
3473 uint16_t swfw; 3473 u16 swfw;
3474 3474
3475 DEBUGFUNC("e1000_read_phy_reg"); 3475 DEBUGFUNC("e1000_read_phy_reg");
3476 3476
@@ -3488,7 +3488,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3488 hw->phy_type == e1000_phy_igp_2) && 3488 hw->phy_type == e1000_phy_igp_2) &&
3489 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3489 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3490 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3490 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3491 (uint16_t)reg_addr); 3491 (u16)reg_addr);
3492 if (ret_val) { 3492 if (ret_val) {
3493 e1000_swfw_sync_release(hw, swfw); 3493 e1000_swfw_sync_release(hw, swfw);
3494 return ret_val; 3494 return ret_val;
@@ -3499,14 +3499,14 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3499 /* Select Configuration Page */ 3499 /* Select Configuration Page */
3500 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 3500 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3501 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, 3501 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3502 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3502 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3503 } else { 3503 } else {
3504 /* Use Alternative Page Select register to access 3504 /* Use Alternative Page Select register to access
3505 * registers 30 and 31 3505 * registers 30 and 31
3506 */ 3506 */
3507 ret_val = e1000_write_phy_reg_ex(hw, 3507 ret_val = e1000_write_phy_reg_ex(hw,
3508 GG82563_PHY_PAGE_SELECT_ALT, 3508 GG82563_PHY_PAGE_SELECT_ALT,
3509 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3509 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3510 } 3510 }
3511 3511
3512 if (ret_val) { 3512 if (ret_val) {
@@ -3523,13 +3523,13 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3523 return ret_val; 3523 return ret_val;
3524} 3524}
3525 3525
3526static int32_t 3526static s32
3527e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, 3527e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3528 uint16_t *phy_data) 3528 u16 *phy_data)
3529{ 3529{
3530 uint32_t i; 3530 u32 i;
3531 uint32_t mdic = 0; 3531 u32 mdic = 0;
3532 const uint32_t phy_addr = 1; 3532 const u32 phy_addr = 1;
3533 3533
3534 DEBUGFUNC("e1000_read_phy_reg_ex"); 3534 DEBUGFUNC("e1000_read_phy_reg_ex");
3535 3535
@@ -3563,7 +3563,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3563 DEBUGOUT("MDI Error\n"); 3563 DEBUGOUT("MDI Error\n");
3564 return -E1000_ERR_PHY; 3564 return -E1000_ERR_PHY;
3565 } 3565 }
3566 *phy_data = (uint16_t) mdic; 3566 *phy_data = (u16) mdic;
3567 } else { 3567 } else {
3568 /* We must first send a preamble through the MDIO pin to signal the 3568 /* We must first send a preamble through the MDIO pin to signal the
3569 * beginning of an MII instruction. This is done by sending 32 3569 * beginning of an MII instruction. This is done by sending 32
@@ -3603,12 +3603,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3603* reg_addr - address of the PHY register to write 3603* reg_addr - address of the PHY register to write
3604* data - data to write to the PHY 3604* data - data to write to the PHY
3605******************************************************************************/ 3605******************************************************************************/
3606int32_t 3606s32
3607e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, 3607e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr,
3608 uint16_t phy_data) 3608 u16 phy_data)
3609{ 3609{
3610 uint32_t ret_val; 3610 u32 ret_val;
3611 uint16_t swfw; 3611 u16 swfw;
3612 3612
3613 DEBUGFUNC("e1000_write_phy_reg"); 3613 DEBUGFUNC("e1000_write_phy_reg");
3614 3614
@@ -3626,7 +3626,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
3626 hw->phy_type == e1000_phy_igp_2) && 3626 hw->phy_type == e1000_phy_igp_2) &&
3627 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3627 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3628 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3628 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3629 (uint16_t)reg_addr); 3629 (u16)reg_addr);
3630 if (ret_val) { 3630 if (ret_val) {
3631 e1000_swfw_sync_release(hw, swfw); 3631 e1000_swfw_sync_release(hw, swfw);
3632 return ret_val; 3632 return ret_val;
@@ -3637,14 +3637,14 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
3637 /* Select Configuration Page */ 3637 /* Select Configuration Page */
3638 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 3638 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3639 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, 3639 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3640 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3640 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3641 } else { 3641 } else {
3642 /* Use Alternative Page Select register to access 3642 /* Use Alternative Page Select register to access
3643 * registers 30 and 31 3643 * registers 30 and 31
3644 */ 3644 */
3645 ret_val = e1000_write_phy_reg_ex(hw, 3645 ret_val = e1000_write_phy_reg_ex(hw,
3646 GG82563_PHY_PAGE_SELECT_ALT, 3646 GG82563_PHY_PAGE_SELECT_ALT,
3647 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3647 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3648 } 3648 }
3649 3649
3650 if (ret_val) { 3650 if (ret_val) {
@@ -3661,13 +3661,13 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
3661 return ret_val; 3661 return ret_val;
3662} 3662}
3663 3663
3664static int32_t 3664static s32
3665e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, 3665e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3666 uint16_t phy_data) 3666 u16 phy_data)
3667{ 3667{
3668 uint32_t i; 3668 u32 i;
3669 uint32_t mdic = 0; 3669 u32 mdic = 0;
3670 const uint32_t phy_addr = 1; 3670 const u32 phy_addr = 1;
3671 3671
3672 DEBUGFUNC("e1000_write_phy_reg_ex"); 3672 DEBUGFUNC("e1000_write_phy_reg_ex");
3673 3673
@@ -3681,7 +3681,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3681 * for the PHY register in the MDI Control register. The MAC will take 3681 * for the PHY register in the MDI Control register. The MAC will take
3682 * care of interfacing with the PHY to send the desired data. 3682 * care of interfacing with the PHY to send the desired data.
3683 */ 3683 */
3684 mdic = (((uint32_t) phy_data) | 3684 mdic = (((u32) phy_data) |
3685 (reg_addr << E1000_MDIC_REG_SHIFT) | 3685 (reg_addr << E1000_MDIC_REG_SHIFT) |
3686 (phy_addr << E1000_MDIC_PHY_SHIFT) | 3686 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3687 (E1000_MDIC_OP_WRITE)); 3687 (E1000_MDIC_OP_WRITE));
@@ -3715,7 +3715,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3715 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | 3715 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
3716 (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); 3716 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
3717 mdic <<= 16; 3717 mdic <<= 16;
3718 mdic |= (uint32_t) phy_data; 3718 mdic |= (u32) phy_data;
3719 3719
3720 e1000_shift_out_mdi_bits(hw, mdic, 32); 3720 e1000_shift_out_mdi_bits(hw, mdic, 32);
3721 } 3721 }
@@ -3723,13 +3723,13 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3723 return E1000_SUCCESS; 3723 return E1000_SUCCESS;
3724} 3724}
3725 3725
3726static int32_t 3726static s32
3727e1000_read_kmrn_reg(struct e1000_hw *hw, 3727e1000_read_kmrn_reg(struct e1000_hw *hw,
3728 uint32_t reg_addr, 3728 u32 reg_addr,
3729 uint16_t *data) 3729 u16 *data)
3730{ 3730{
3731 uint32_t reg_val; 3731 u32 reg_val;
3732 uint16_t swfw; 3732 u16 swfw;
3733 DEBUGFUNC("e1000_read_kmrn_reg"); 3733 DEBUGFUNC("e1000_read_kmrn_reg");
3734 3734
3735 if ((hw->mac_type == e1000_80003es2lan) && 3735 if ((hw->mac_type == e1000_80003es2lan) &&
@@ -3750,19 +3750,19 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
3750 3750
3751 /* Read the data returned */ 3751 /* Read the data returned */
3752 reg_val = E1000_READ_REG(hw, KUMCTRLSTA); 3752 reg_val = E1000_READ_REG(hw, KUMCTRLSTA);
3753 *data = (uint16_t)reg_val; 3753 *data = (u16)reg_val;
3754 3754
3755 e1000_swfw_sync_release(hw, swfw); 3755 e1000_swfw_sync_release(hw, swfw);
3756 return E1000_SUCCESS; 3756 return E1000_SUCCESS;
3757} 3757}
3758 3758
3759static int32_t 3759static s32
3760e1000_write_kmrn_reg(struct e1000_hw *hw, 3760e1000_write_kmrn_reg(struct e1000_hw *hw,
3761 uint32_t reg_addr, 3761 u32 reg_addr,
3762 uint16_t data) 3762 u16 data)
3763{ 3763{
3764 uint32_t reg_val; 3764 u32 reg_val;
3765 uint16_t swfw; 3765 u16 swfw;
3766 DEBUGFUNC("e1000_write_kmrn_reg"); 3766 DEBUGFUNC("e1000_write_kmrn_reg");
3767 3767
3768 if ((hw->mac_type == e1000_80003es2lan) && 3768 if ((hw->mac_type == e1000_80003es2lan) &&
@@ -3788,13 +3788,13 @@ e1000_write_kmrn_reg(struct e1000_hw *hw,
3788* 3788*
3789* hw - Struct containing variables accessed by shared code 3789* hw - Struct containing variables accessed by shared code
3790******************************************************************************/ 3790******************************************************************************/
3791int32_t 3791s32
3792e1000_phy_hw_reset(struct e1000_hw *hw) 3792e1000_phy_hw_reset(struct e1000_hw *hw)
3793{ 3793{
3794 uint32_t ctrl, ctrl_ext; 3794 u32 ctrl, ctrl_ext;
3795 uint32_t led_ctrl; 3795 u32 led_ctrl;
3796 int32_t ret_val; 3796 s32 ret_val;
3797 uint16_t swfw; 3797 u16 swfw;
3798 3798
3799 DEBUGFUNC("e1000_phy_hw_reset"); 3799 DEBUGFUNC("e1000_phy_hw_reset");
3800 3800
@@ -3882,11 +3882,11 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3882* 3882*
3883* Sets bit 15 of the MII Control register 3883* Sets bit 15 of the MII Control register
3884******************************************************************************/ 3884******************************************************************************/
3885int32_t 3885s32
3886e1000_phy_reset(struct e1000_hw *hw) 3886e1000_phy_reset(struct e1000_hw *hw)
3887{ 3887{
3888 int32_t ret_val; 3888 s32 ret_val;
3889 uint16_t phy_data; 3889 u16 phy_data;
3890 3890
3891 DEBUGFUNC("e1000_phy_reset"); 3891 DEBUGFUNC("e1000_phy_reset");
3892 3892
@@ -3937,9 +3937,9 @@ e1000_phy_reset(struct e1000_hw *hw)
3937void 3937void
3938e1000_phy_powerdown_workaround(struct e1000_hw *hw) 3938e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3939{ 3939{
3940 int32_t reg; 3940 s32 reg;
3941 uint16_t phy_data; 3941 u16 phy_data;
3942 int32_t retry = 0; 3942 s32 retry = 0;
3943 3943
3944 DEBUGFUNC("e1000_phy_powerdown_workaround"); 3944 DEBUGFUNC("e1000_phy_powerdown_workaround");
3945 3945
@@ -3987,13 +3987,13 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3987* 3987*
3988* hw - struct containing variables accessed by shared code 3988* hw - struct containing variables accessed by shared code
3989******************************************************************************/ 3989******************************************************************************/
3990static int32_t 3990static s32
3991e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) 3991e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3992{ 3992{
3993 int32_t ret_val; 3993 s32 ret_val;
3994 int32_t reg; 3994 s32 reg;
3995 int32_t cnt; 3995 s32 cnt;
3996 uint16_t phy_data; 3996 u16 phy_data;
3997 3997
3998 if (hw->kmrn_lock_loss_workaround_disabled) 3998 if (hw->kmrn_lock_loss_workaround_disabled)
3999 return E1000_SUCCESS; 3999 return E1000_SUCCESS;
@@ -4040,11 +4040,11 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
4040* 4040*
4041* hw - Struct containing variables accessed by shared code 4041* hw - Struct containing variables accessed by shared code
4042******************************************************************************/ 4042******************************************************************************/
4043static int32_t 4043static s32
4044e1000_detect_gig_phy(struct e1000_hw *hw) 4044e1000_detect_gig_phy(struct e1000_hw *hw)
4045{ 4045{
4046 int32_t phy_init_status, ret_val; 4046 s32 phy_init_status, ret_val;
4047 uint16_t phy_id_high, phy_id_low; 4047 u16 phy_id_high, phy_id_low;
4048 bool match = false; 4048 bool match = false;
4049 4049
4050 DEBUGFUNC("e1000_detect_gig_phy"); 4050 DEBUGFUNC("e1000_detect_gig_phy");
@@ -4076,14 +4076,14 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
4076 if (ret_val) 4076 if (ret_val)
4077 return ret_val; 4077 return ret_val;
4078 4078
4079 hw->phy_id = (uint32_t) (phy_id_high << 16); 4079 hw->phy_id = (u32) (phy_id_high << 16);
4080 udelay(20); 4080 udelay(20);
4081 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); 4081 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
4082 if (ret_val) 4082 if (ret_val)
4083 return ret_val; 4083 return ret_val;
4084 4084
4085 hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK); 4085 hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
4086 hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK; 4086 hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
4087 4087
4088 switch (hw->mac_type) { 4088 switch (hw->mac_type) {
4089 case e1000_82543: 4089 case e1000_82543:
@@ -4136,10 +4136,10 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
4136* 4136*
4137* hw - Struct containing variables accessed by shared code 4137* hw - Struct containing variables accessed by shared code
4138******************************************************************************/ 4138******************************************************************************/
4139static int32_t 4139static s32
4140e1000_phy_reset_dsp(struct e1000_hw *hw) 4140e1000_phy_reset_dsp(struct e1000_hw *hw)
4141{ 4141{
4142 int32_t ret_val; 4142 s32 ret_val;
4143 DEBUGFUNC("e1000_phy_reset_dsp"); 4143 DEBUGFUNC("e1000_phy_reset_dsp");
4144 4144
4145 do { 4145 do {
@@ -4163,12 +4163,12 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
4163* hw - Struct containing variables accessed by shared code 4163* hw - Struct containing variables accessed by shared code
4164* phy_info - PHY information structure 4164* phy_info - PHY information structure
4165******************************************************************************/ 4165******************************************************************************/
4166static int32_t 4166static s32
4167e1000_phy_igp_get_info(struct e1000_hw *hw, 4167e1000_phy_igp_get_info(struct e1000_hw *hw,
4168 struct e1000_phy_info *phy_info) 4168 struct e1000_phy_info *phy_info)
4169{ 4169{
4170 int32_t ret_val; 4170 s32 ret_val;
4171 uint16_t phy_data, min_length, max_length, average; 4171 u16 phy_data, min_length, max_length, average;
4172 e1000_rev_polarity polarity; 4172 e1000_rev_polarity polarity;
4173 4173
4174 DEBUGFUNC("e1000_phy_igp_get_info"); 4174 DEBUGFUNC("e1000_phy_igp_get_info");
@@ -4240,12 +4240,12 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
4240* hw - Struct containing variables accessed by shared code 4240* hw - Struct containing variables accessed by shared code
4241* phy_info - PHY information structure 4241* phy_info - PHY information structure
4242******************************************************************************/ 4242******************************************************************************/
4243static int32_t 4243static s32
4244e1000_phy_ife_get_info(struct e1000_hw *hw, 4244e1000_phy_ife_get_info(struct e1000_hw *hw,
4245 struct e1000_phy_info *phy_info) 4245 struct e1000_phy_info *phy_info)
4246{ 4246{
4247 int32_t ret_val; 4247 s32 ret_val;
4248 uint16_t phy_data; 4248 u16 phy_data;
4249 e1000_rev_polarity polarity; 4249 e1000_rev_polarity polarity;
4250 4250
4251 DEBUGFUNC("e1000_phy_ife_get_info"); 4251 DEBUGFUNC("e1000_phy_ife_get_info");
@@ -4290,12 +4290,12 @@ e1000_phy_ife_get_info(struct e1000_hw *hw,
4290* hw - Struct containing variables accessed by shared code 4290* hw - Struct containing variables accessed by shared code
4291* phy_info - PHY information structure 4291* phy_info - PHY information structure
4292******************************************************************************/ 4292******************************************************************************/
4293static int32_t 4293static s32
4294e1000_phy_m88_get_info(struct e1000_hw *hw, 4294e1000_phy_m88_get_info(struct e1000_hw *hw,
4295 struct e1000_phy_info *phy_info) 4295 struct e1000_phy_info *phy_info)
4296{ 4296{
4297 int32_t ret_val; 4297 s32 ret_val;
4298 uint16_t phy_data; 4298 u16 phy_data;
4299 e1000_rev_polarity polarity; 4299 e1000_rev_polarity polarity;
4300 4300
4301 DEBUGFUNC("e1000_phy_m88_get_info"); 4301 DEBUGFUNC("e1000_phy_m88_get_info");
@@ -4369,12 +4369,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
4369* hw - Struct containing variables accessed by shared code 4369* hw - Struct containing variables accessed by shared code
4370* phy_info - PHY information structure 4370* phy_info - PHY information structure
4371******************************************************************************/ 4371******************************************************************************/
4372int32_t 4372s32
4373e1000_phy_get_info(struct e1000_hw *hw, 4373e1000_phy_get_info(struct e1000_hw *hw,
4374 struct e1000_phy_info *phy_info) 4374 struct e1000_phy_info *phy_info)
4375{ 4375{
4376 int32_t ret_val; 4376 s32 ret_val;
4377 uint16_t phy_data; 4377 u16 phy_data;
4378 4378
4379 DEBUGFUNC("e1000_phy_get_info"); 4379 DEBUGFUNC("e1000_phy_get_info");
4380 4380
@@ -4415,7 +4415,7 @@ e1000_phy_get_info(struct e1000_hw *hw,
4415 return e1000_phy_m88_get_info(hw, phy_info); 4415 return e1000_phy_m88_get_info(hw, phy_info);
4416} 4416}
4417 4417
4418int32_t 4418s32
4419e1000_validate_mdi_setting(struct e1000_hw *hw) 4419e1000_validate_mdi_setting(struct e1000_hw *hw)
4420{ 4420{
4421 DEBUGFUNC("e1000_validate_mdi_settings"); 4421 DEBUGFUNC("e1000_validate_mdi_settings");
@@ -4436,13 +4436,13 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
4436 * 4436 *
4437 * hw - Struct containing variables accessed by shared code 4437 * hw - Struct containing variables accessed by shared code
4438 *****************************************************************************/ 4438 *****************************************************************************/
4439int32_t 4439s32
4440e1000_init_eeprom_params(struct e1000_hw *hw) 4440e1000_init_eeprom_params(struct e1000_hw *hw)
4441{ 4441{
4442 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4442 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4443 uint32_t eecd = E1000_READ_REG(hw, EECD); 4443 u32 eecd = E1000_READ_REG(hw, EECD);
4444 int32_t ret_val = E1000_SUCCESS; 4444 s32 ret_val = E1000_SUCCESS;
4445 uint16_t eeprom_size; 4445 u16 eeprom_size;
4446 4446
4447 DEBUGFUNC("e1000_init_eeprom_params"); 4447 DEBUGFUNC("e1000_init_eeprom_params");
4448 4448
@@ -4561,8 +4561,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4561 break; 4561 break;
4562 case e1000_ich8lan: 4562 case e1000_ich8lan:
4563 { 4563 {
4564 int32_t i = 0; 4564 s32 i = 0;
4565 uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG); 4565 u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
4566 4566
4567 eeprom->type = e1000_eeprom_ich8; 4567 eeprom->type = e1000_eeprom_ich8;
4568 eeprom->use_eerd = false; 4568 eeprom->use_eerd = false;
@@ -4586,7 +4586,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4586 4586
4587 hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 4587 hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
4588 4588
4589 hw->flash_bank_size /= 2 * sizeof(uint16_t); 4589 hw->flash_bank_size /= 2 * sizeof(u16);
4590 4590
4591 break; 4591 break;
4592 } 4592 }
@@ -4611,7 +4611,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4611 if (eeprom_size) 4611 if (eeprom_size)
4612 eeprom_size++; 4612 eeprom_size++;
4613 } else { 4613 } else {
4614 eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> 4614 eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
4615 E1000_EECD_SIZE_EX_SHIFT); 4615 E1000_EECD_SIZE_EX_SHIFT);
4616 } 4616 }
4617 4617
@@ -4628,7 +4628,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4628 *****************************************************************************/ 4628 *****************************************************************************/
4629static void 4629static void
4630e1000_raise_ee_clk(struct e1000_hw *hw, 4630e1000_raise_ee_clk(struct e1000_hw *hw,
4631 uint32_t *eecd) 4631 u32 *eecd)
4632{ 4632{
4633 /* Raise the clock input to the EEPROM (by setting the SK bit), and then 4633 /* Raise the clock input to the EEPROM (by setting the SK bit), and then
4634 * wait <delay> microseconds. 4634 * wait <delay> microseconds.
@@ -4647,7 +4647,7 @@ e1000_raise_ee_clk(struct e1000_hw *hw,
4647 *****************************************************************************/ 4647 *****************************************************************************/
4648static void 4648static void
4649e1000_lower_ee_clk(struct e1000_hw *hw, 4649e1000_lower_ee_clk(struct e1000_hw *hw,
4650 uint32_t *eecd) 4650 u32 *eecd)
4651{ 4651{
4652 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then 4652 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
4653 * wait 50 microseconds. 4653 * wait 50 microseconds.
@@ -4667,12 +4667,12 @@ e1000_lower_ee_clk(struct e1000_hw *hw,
4667 *****************************************************************************/ 4667 *****************************************************************************/
4668static void 4668static void
4669e1000_shift_out_ee_bits(struct e1000_hw *hw, 4669e1000_shift_out_ee_bits(struct e1000_hw *hw,
4670 uint16_t data, 4670 u16 data,
4671 uint16_t count) 4671 u16 count)
4672{ 4672{
4673 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4673 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4674 uint32_t eecd; 4674 u32 eecd;
4675 uint32_t mask; 4675 u32 mask;
4676 4676
4677 /* We need to shift "count" bits out to the EEPROM. So, value in the 4677 /* We need to shift "count" bits out to the EEPROM. So, value in the
4678 * "data" parameter will be shifted out to the EEPROM one bit at a time. 4678 * "data" parameter will be shifted out to the EEPROM one bit at a time.
@@ -4718,13 +4718,13 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
4718 * 4718 *
4719 * hw - Struct containing variables accessed by shared code 4719 * hw - Struct containing variables accessed by shared code
4720 *****************************************************************************/ 4720 *****************************************************************************/
4721static uint16_t 4721static u16
4722e1000_shift_in_ee_bits(struct e1000_hw *hw, 4722e1000_shift_in_ee_bits(struct e1000_hw *hw,
4723 uint16_t count) 4723 u16 count)
4724{ 4724{
4725 uint32_t eecd; 4725 u32 eecd;
4726 uint32_t i; 4726 u32 i;
4727 uint16_t data; 4727 u16 data;
4728 4728
4729 /* In order to read a register from the EEPROM, we need to shift 'count' 4729 /* In order to read a register from the EEPROM, we need to shift 'count'
4730 * bits in from the EEPROM. Bits are "shifted in" by raising the clock 4730 * bits in from the EEPROM. Bits are "shifted in" by raising the clock
@@ -4762,11 +4762,11 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
4762 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This 4762 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
4763 * function should be called before issuing a command to the EEPROM. 4763 * function should be called before issuing a command to the EEPROM.
4764 *****************************************************************************/ 4764 *****************************************************************************/
4765static int32_t 4765static s32
4766e1000_acquire_eeprom(struct e1000_hw *hw) 4766e1000_acquire_eeprom(struct e1000_hw *hw)
4767{ 4767{
4768 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4768 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4769 uint32_t eecd, i=0; 4769 u32 eecd, i=0;
4770 4770
4771 DEBUGFUNC("e1000_acquire_eeprom"); 4771 DEBUGFUNC("e1000_acquire_eeprom");
4772 4772
@@ -4825,7 +4825,7 @@ static void
4825e1000_standby_eeprom(struct e1000_hw *hw) 4825e1000_standby_eeprom(struct e1000_hw *hw)
4826{ 4826{
4827 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4827 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4828 uint32_t eecd; 4828 u32 eecd;
4829 4829
4830 eecd = E1000_READ_REG(hw, EECD); 4830 eecd = E1000_READ_REG(hw, EECD);
4831 4831
@@ -4873,7 +4873,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
4873static void 4873static void
4874e1000_release_eeprom(struct e1000_hw *hw) 4874e1000_release_eeprom(struct e1000_hw *hw)
4875{ 4875{
4876 uint32_t eecd; 4876 u32 eecd;
4877 4877
4878 DEBUGFUNC("e1000_release_eeprom"); 4878 DEBUGFUNC("e1000_release_eeprom");
4879 4879
@@ -4921,11 +4921,11 @@ e1000_release_eeprom(struct e1000_hw *hw)
4921 * 4921 *
4922 * hw - Struct containing variables accessed by shared code 4922 * hw - Struct containing variables accessed by shared code
4923 *****************************************************************************/ 4923 *****************************************************************************/
4924static int32_t 4924static s32
4925e1000_spi_eeprom_ready(struct e1000_hw *hw) 4925e1000_spi_eeprom_ready(struct e1000_hw *hw)
4926{ 4926{
4927 uint16_t retry_count = 0; 4927 u16 retry_count = 0;
4928 uint8_t spi_stat_reg; 4928 u8 spi_stat_reg;
4929 4929
4930 DEBUGFUNC("e1000_spi_eeprom_ready"); 4930 DEBUGFUNC("e1000_spi_eeprom_ready");
4931 4931
@@ -4938,7 +4938,7 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
4938 do { 4938 do {
4939 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, 4939 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
4940 hw->eeprom.opcode_bits); 4940 hw->eeprom.opcode_bits);
4941 spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8); 4941 spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
4942 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) 4942 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
4943 break; 4943 break;
4944 4944
@@ -4967,14 +4967,14 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
4967 * data - word read from the EEPROM 4967 * data - word read from the EEPROM
4968 * words - number of words to read 4968 * words - number of words to read
4969 *****************************************************************************/ 4969 *****************************************************************************/
4970int32_t 4970s32
4971e1000_read_eeprom(struct e1000_hw *hw, 4971e1000_read_eeprom(struct e1000_hw *hw,
4972 uint16_t offset, 4972 u16 offset,
4973 uint16_t words, 4973 u16 words,
4974 uint16_t *data) 4974 u16 *data)
4975{ 4975{
4976 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4976 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4977 uint32_t i = 0; 4977 u32 i = 0;
4978 4978
4979 DEBUGFUNC("e1000_read_eeprom"); 4979 DEBUGFUNC("e1000_read_eeprom");
4980 4980
@@ -5012,8 +5012,8 @@ e1000_read_eeprom(struct e1000_hw *hw,
5012 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have 5012 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
5013 * acquired the EEPROM at this point, so any returns should relase it */ 5013 * acquired the EEPROM at this point, so any returns should relase it */
5014 if (eeprom->type == e1000_eeprom_spi) { 5014 if (eeprom->type == e1000_eeprom_spi) {
5015 uint16_t word_in; 5015 u16 word_in;
5016 uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; 5016 u8 read_opcode = EEPROM_READ_OPCODE_SPI;
5017 5017
5018 if (e1000_spi_eeprom_ready(hw)) { 5018 if (e1000_spi_eeprom_ready(hw)) {
5019 e1000_release_eeprom(hw); 5019 e1000_release_eeprom(hw);
@@ -5028,7 +5028,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
5028 5028
5029 /* Send the READ command (opcode + addr) */ 5029 /* Send the READ command (opcode + addr) */
5030 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); 5030 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
5031 e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits); 5031 e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
5032 5032
5033 /* Read the data. The address of the eeprom internally increments with 5033 /* Read the data. The address of the eeprom internally increments with
5034 * each byte (spi) being read, saving on the overhead of eeprom setup 5034 * each byte (spi) being read, saving on the overhead of eeprom setup
@@ -5044,7 +5044,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
5044 /* Send the READ command (opcode + addr) */ 5044 /* Send the READ command (opcode + addr) */
5045 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, 5045 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
5046 eeprom->opcode_bits); 5046 eeprom->opcode_bits);
5047 e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i), 5047 e1000_shift_out_ee_bits(hw, (u16)(offset + i),
5048 eeprom->address_bits); 5048 eeprom->address_bits);
5049 5049
5050 /* Read the data. For microwire, each word requires the overhead 5050 /* Read the data. For microwire, each word requires the overhead
@@ -5068,14 +5068,14 @@ e1000_read_eeprom(struct e1000_hw *hw,
5068 * data - word read from the EEPROM 5068 * data - word read from the EEPROM
5069 * words - number of words to read 5069 * words - number of words to read
5070 *****************************************************************************/ 5070 *****************************************************************************/
5071static int32_t 5071static s32
5072e1000_read_eeprom_eerd(struct e1000_hw *hw, 5072e1000_read_eeprom_eerd(struct e1000_hw *hw,
5073 uint16_t offset, 5073 u16 offset,
5074 uint16_t words, 5074 u16 words,
5075 uint16_t *data) 5075 u16 *data)
5076{ 5076{
5077 uint32_t i, eerd = 0; 5077 u32 i, eerd = 0;
5078 int32_t error = 0; 5078 s32 error = 0;
5079 5079
5080 for (i = 0; i < words; i++) { 5080 for (i = 0; i < words; i++) {
5081 eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + 5081 eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
@@ -5102,15 +5102,15 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
5102 * data - word read from the EEPROM 5102 * data - word read from the EEPROM
5103 * words - number of words to read 5103 * words - number of words to read
5104 *****************************************************************************/ 5104 *****************************************************************************/
5105static int32_t 5105static s32
5106e1000_write_eeprom_eewr(struct e1000_hw *hw, 5106e1000_write_eeprom_eewr(struct e1000_hw *hw,
5107 uint16_t offset, 5107 u16 offset,
5108 uint16_t words, 5108 u16 words,
5109 uint16_t *data) 5109 u16 *data)
5110{ 5110{
5111 uint32_t register_value = 0; 5111 u32 register_value = 0;
5112 uint32_t i = 0; 5112 u32 i = 0;
5113 int32_t error = 0; 5113 s32 error = 0;
5114 5114
5115 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) 5115 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
5116 return -E1000_ERR_SWFW_SYNC; 5116 return -E1000_ERR_SWFW_SYNC;
@@ -5143,12 +5143,12 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
5143 * 5143 *
5144 * hw - Struct containing variables accessed by shared code 5144 * hw - Struct containing variables accessed by shared code
5145 *****************************************************************************/ 5145 *****************************************************************************/
5146static int32_t 5146static s32
5147e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) 5147e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
5148{ 5148{
5149 uint32_t attempts = 100000; 5149 u32 attempts = 100000;
5150 uint32_t i, reg = 0; 5150 u32 i, reg = 0;
5151 int32_t done = E1000_ERR_EEPROM; 5151 s32 done = E1000_ERR_EEPROM;
5152 5152
5153 for (i = 0; i < attempts; i++) { 5153 for (i = 0; i < attempts; i++) {
5154 if (eerd == E1000_EEPROM_POLL_READ) 5154 if (eerd == E1000_EEPROM_POLL_READ)
@@ -5174,7 +5174,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
5174static bool 5174static bool
5175e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) 5175e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5176{ 5176{
5177 uint32_t eecd = 0; 5177 u32 eecd = 0;
5178 5178
5179 DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); 5179 DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
5180 5180
@@ -5204,11 +5204,11 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5204 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 5204 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
5205 * valid. 5205 * valid.
5206 *****************************************************************************/ 5206 *****************************************************************************/
5207int32_t 5207s32
5208e1000_validate_eeprom_checksum(struct e1000_hw *hw) 5208e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5209{ 5209{
5210 uint16_t checksum = 0; 5210 u16 checksum = 0;
5211 uint16_t i, eeprom_data; 5211 u16 i, eeprom_data;
5212 5212
5213 DEBUGFUNC("e1000_validate_eeprom_checksum"); 5213 DEBUGFUNC("e1000_validate_eeprom_checksum");
5214 5214
@@ -5252,7 +5252,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5252 checksum += eeprom_data; 5252 checksum += eeprom_data;
5253 } 5253 }
5254 5254
5255 if (checksum == (uint16_t) EEPROM_SUM) 5255 if (checksum == (u16) EEPROM_SUM)
5256 return E1000_SUCCESS; 5256 return E1000_SUCCESS;
5257 else { 5257 else {
5258 DEBUGOUT("EEPROM Checksum Invalid\n"); 5258 DEBUGOUT("EEPROM Checksum Invalid\n");
@@ -5268,12 +5268,12 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5268 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. 5268 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
5269 * Writes the difference to word offset 63 of the EEPROM. 5269 * Writes the difference to word offset 63 of the EEPROM.
5270 *****************************************************************************/ 5270 *****************************************************************************/
5271int32_t 5271s32
5272e1000_update_eeprom_checksum(struct e1000_hw *hw) 5272e1000_update_eeprom_checksum(struct e1000_hw *hw)
5273{ 5273{
5274 uint32_t ctrl_ext; 5274 u32 ctrl_ext;
5275 uint16_t checksum = 0; 5275 u16 checksum = 0;
5276 uint16_t i, eeprom_data; 5276 u16 i, eeprom_data;
5277 5277
5278 DEBUGFUNC("e1000_update_eeprom_checksum"); 5278 DEBUGFUNC("e1000_update_eeprom_checksum");
5279 5279
@@ -5284,7 +5284,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
5284 } 5284 }
5285 checksum += eeprom_data; 5285 checksum += eeprom_data;
5286 } 5286 }
5287 checksum = (uint16_t) EEPROM_SUM - checksum; 5287 checksum = (u16) EEPROM_SUM - checksum;
5288 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 5288 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
5289 DEBUGOUT("EEPROM Write Error\n"); 5289 DEBUGOUT("EEPROM Write Error\n");
5290 return -E1000_ERR_EEPROM; 5290 return -E1000_ERR_EEPROM;
@@ -5313,14 +5313,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
5313 * If e1000_update_eeprom_checksum is not called after this function, the 5313 * If e1000_update_eeprom_checksum is not called after this function, the
5314 * EEPROM will most likely contain an invalid checksum. 5314 * EEPROM will most likely contain an invalid checksum.
5315 *****************************************************************************/ 5315 *****************************************************************************/
5316int32_t 5316s32
5317e1000_write_eeprom(struct e1000_hw *hw, 5317e1000_write_eeprom(struct e1000_hw *hw,
5318 uint16_t offset, 5318 u16 offset,
5319 uint16_t words, 5319 u16 words,
5320 uint16_t *data) 5320 u16 *data)
5321{ 5321{
5322 struct e1000_eeprom_info *eeprom = &hw->eeprom; 5322 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5323 int32_t status = 0; 5323 s32 status = 0;
5324 5324
5325 DEBUGFUNC("e1000_write_eeprom"); 5325 DEBUGFUNC("e1000_write_eeprom");
5326 5326
@@ -5370,19 +5370,19 @@ e1000_write_eeprom(struct e1000_hw *hw,
5370 * data - pointer to array of 8 bit words to be written to the EEPROM 5370 * data - pointer to array of 8 bit words to be written to the EEPROM
5371 * 5371 *
5372 *****************************************************************************/ 5372 *****************************************************************************/
5373static int32_t 5373static s32
5374e1000_write_eeprom_spi(struct e1000_hw *hw, 5374e1000_write_eeprom_spi(struct e1000_hw *hw,
5375 uint16_t offset, 5375 u16 offset,
5376 uint16_t words, 5376 u16 words,
5377 uint16_t *data) 5377 u16 *data)
5378{ 5378{
5379 struct e1000_eeprom_info *eeprom = &hw->eeprom; 5379 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5380 uint16_t widx = 0; 5380 u16 widx = 0;
5381 5381
5382 DEBUGFUNC("e1000_write_eeprom_spi"); 5382 DEBUGFUNC("e1000_write_eeprom_spi");
5383 5383
5384 while (widx < words) { 5384 while (widx < words) {
5385 uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI; 5385 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
5386 5386
5387 if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; 5387 if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
5388 5388
@@ -5401,14 +5401,14 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
5401 /* Send the Write command (8-bit opcode + addr) */ 5401 /* Send the Write command (8-bit opcode + addr) */
5402 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); 5402 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
5403 5403
5404 e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2), 5404 e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
5405 eeprom->address_bits); 5405 eeprom->address_bits);
5406 5406
5407 /* Send the data */ 5407 /* Send the data */
5408 5408
5409 /* Loop to allow for up to whole page write (32 bytes) of eeprom */ 5409 /* Loop to allow for up to whole page write (32 bytes) of eeprom */
5410 while (widx < words) { 5410 while (widx < words) {
5411 uint16_t word_out = data[widx]; 5411 u16 word_out = data[widx];
5412 word_out = (word_out >> 8) | (word_out << 8); 5412 word_out = (word_out >> 8) | (word_out << 8);
5413 e1000_shift_out_ee_bits(hw, word_out, 16); 5413 e1000_shift_out_ee_bits(hw, word_out, 16);
5414 widx++; 5414 widx++;
@@ -5436,16 +5436,16 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
5436 * data - pointer to array of 16 bit words to be written to the EEPROM 5436 * data - pointer to array of 16 bit words to be written to the EEPROM
5437 * 5437 *
5438 *****************************************************************************/ 5438 *****************************************************************************/
5439static int32_t 5439static s32
5440e1000_write_eeprom_microwire(struct e1000_hw *hw, 5440e1000_write_eeprom_microwire(struct e1000_hw *hw,
5441 uint16_t offset, 5441 u16 offset,
5442 uint16_t words, 5442 u16 words,
5443 uint16_t *data) 5443 u16 *data)
5444{ 5444{
5445 struct e1000_eeprom_info *eeprom = &hw->eeprom; 5445 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5446 uint32_t eecd; 5446 u32 eecd;
5447 uint16_t words_written = 0; 5447 u16 words_written = 0;
5448 uint16_t i = 0; 5448 u16 i = 0;
5449 5449
5450 DEBUGFUNC("e1000_write_eeprom_microwire"); 5450 DEBUGFUNC("e1000_write_eeprom_microwire");
5451 5451
@@ -5456,9 +5456,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5456 * EEPROM into write/erase mode. 5456 * EEPROM into write/erase mode.
5457 */ 5457 */
5458 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, 5458 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
5459 (uint16_t)(eeprom->opcode_bits + 2)); 5459 (u16)(eeprom->opcode_bits + 2));
5460 5460
5461 e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); 5461 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5462 5462
5463 /* Prepare the EEPROM */ 5463 /* Prepare the EEPROM */
5464 e1000_standby_eeprom(hw); 5464 e1000_standby_eeprom(hw);
@@ -5468,7 +5468,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5468 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, 5468 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
5469 eeprom->opcode_bits); 5469 eeprom->opcode_bits);
5470 5470
5471 e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written), 5471 e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
5472 eeprom->address_bits); 5472 eeprom->address_bits);
5473 5473
5474 /* Send the data */ 5474 /* Send the data */
@@ -5506,9 +5506,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5506 * EEPROM out of write/erase mode. 5506 * EEPROM out of write/erase mode.
5507 */ 5507 */
5508 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, 5508 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
5509 (uint16_t)(eeprom->opcode_bits + 2)); 5509 (u16)(eeprom->opcode_bits + 2));
5510 5510
5511 e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); 5511 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5512 5512
5513 return E1000_SUCCESS; 5513 return E1000_SUCCESS;
5514} 5514}
@@ -5523,18 +5523,18 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5523 * data - word read from the EEPROM 5523 * data - word read from the EEPROM
5524 * words - number of words to read 5524 * words - number of words to read
5525 *****************************************************************************/ 5525 *****************************************************************************/
5526static int32_t 5526static s32
5527e1000_commit_shadow_ram(struct e1000_hw *hw) 5527e1000_commit_shadow_ram(struct e1000_hw *hw)
5528{ 5528{
5529 uint32_t attempts = 100000; 5529 u32 attempts = 100000;
5530 uint32_t eecd = 0; 5530 u32 eecd = 0;
5531 uint32_t flop = 0; 5531 u32 flop = 0;
5532 uint32_t i = 0; 5532 u32 i = 0;
5533 int32_t error = E1000_SUCCESS; 5533 s32 error = E1000_SUCCESS;
5534 uint32_t old_bank_offset = 0; 5534 u32 old_bank_offset = 0;
5535 uint32_t new_bank_offset = 0; 5535 u32 new_bank_offset = 0;
5536 uint8_t low_byte = 0; 5536 u8 low_byte = 0;
5537 uint8_t high_byte = 0; 5537 u8 high_byte = 0;
5538 bool sector_write_failed = false; 5538 bool sector_write_failed = false;
5539 5539
5540 if (hw->mac_type == e1000_82573) { 5540 if (hw->mac_type == e1000_82573) {
@@ -5595,7 +5595,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5595 * in the other NVM bank or a modified value stored 5595 * in the other NVM bank or a modified value stored
5596 * in the shadow RAM */ 5596 * in the shadow RAM */
5597 if (hw->eeprom_shadow_ram[i].modified) { 5597 if (hw->eeprom_shadow_ram[i].modified) {
5598 low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word; 5598 low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
5599 udelay(100); 5599 udelay(100);
5600 error = e1000_verify_write_ich8_byte(hw, 5600 error = e1000_verify_write_ich8_byte(hw,
5601 (i << 1) + new_bank_offset, low_byte); 5601 (i << 1) + new_bank_offset, low_byte);
@@ -5604,7 +5604,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5604 sector_write_failed = true; 5604 sector_write_failed = true;
5605 else { 5605 else {
5606 high_byte = 5606 high_byte =
5607 (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); 5607 (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
5608 udelay(100); 5608 udelay(100);
5609 } 5609 }
5610 } else { 5610 } else {
@@ -5687,11 +5687,11 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5687 * 5687 *
5688 * hw - Struct containing variables accessed by shared code 5688 * hw - Struct containing variables accessed by shared code
5689 *****************************************************************************/ 5689 *****************************************************************************/
5690int32_t 5690s32
5691e1000_read_mac_addr(struct e1000_hw * hw) 5691e1000_read_mac_addr(struct e1000_hw * hw)
5692{ 5692{
5693 uint16_t offset; 5693 u16 offset;
5694 uint16_t eeprom_data, i; 5694 u16 eeprom_data, i;
5695 5695
5696 DEBUGFUNC("e1000_read_mac_addr"); 5696 DEBUGFUNC("e1000_read_mac_addr");
5697 5697
@@ -5701,8 +5701,8 @@ e1000_read_mac_addr(struct e1000_hw * hw)
5701 DEBUGOUT("EEPROM Read Error\n"); 5701 DEBUGOUT("EEPROM Read Error\n");
5702 return -E1000_ERR_EEPROM; 5702 return -E1000_ERR_EEPROM;
5703 } 5703 }
5704 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 5704 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
5705 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 5705 hw->perm_mac_addr[i+1] = (u8) (eeprom_data >> 8);
5706 } 5706 }
5707 5707
5708 switch (hw->mac_type) { 5708 switch (hw->mac_type) {
@@ -5734,8 +5734,8 @@ e1000_read_mac_addr(struct e1000_hw * hw)
5734static void 5734static void
5735e1000_init_rx_addrs(struct e1000_hw *hw) 5735e1000_init_rx_addrs(struct e1000_hw *hw)
5736{ 5736{
5737 uint32_t i; 5737 u32 i;
5738 uint32_t rar_num; 5738 u32 rar_num;
5739 5739
5740 DEBUGFUNC("e1000_init_rx_addrs"); 5740 DEBUGFUNC("e1000_init_rx_addrs");
5741 5741
@@ -5770,11 +5770,11 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5770 * hw - Struct containing variables accessed by shared code 5770 * hw - Struct containing variables accessed by shared code
5771 * mc_addr - the multicast address to hash 5771 * mc_addr - the multicast address to hash
5772 *****************************************************************************/ 5772 *****************************************************************************/
5773uint32_t 5773u32
5774e1000_hash_mc_addr(struct e1000_hw *hw, 5774e1000_hash_mc_addr(struct e1000_hw *hw,
5775 uint8_t *mc_addr) 5775 u8 *mc_addr)
5776{ 5776{
5777 uint32_t hash_value = 0; 5777 u32 hash_value = 0;
5778 5778
5779 /* The portion of the address that is used for the hash table is 5779 /* The portion of the address that is used for the hash table is
5780 * determined by the mc_filter_type setting. 5780 * determined by the mc_filter_type setting.
@@ -5787,37 +5787,37 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
5787 case 0: 5787 case 0:
5788 if (hw->mac_type == e1000_ich8lan) { 5788 if (hw->mac_type == e1000_ich8lan) {
5789 /* [47:38] i.e. 0x158 for above example address */ 5789 /* [47:38] i.e. 0x158 for above example address */
5790 hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2)); 5790 hash_value = ((mc_addr[4] >> 6) | (((u16) mc_addr[5]) << 2));
5791 } else { 5791 } else {
5792 /* [47:36] i.e. 0x563 for above example address */ 5792 /* [47:36] i.e. 0x563 for above example address */
5793 hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); 5793 hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
5794 } 5794 }
5795 break; 5795 break;
5796 case 1: 5796 case 1:
5797 if (hw->mac_type == e1000_ich8lan) { 5797 if (hw->mac_type == e1000_ich8lan) {
5798 /* [46:37] i.e. 0x2B1 for above example address */ 5798 /* [46:37] i.e. 0x2B1 for above example address */
5799 hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3)); 5799 hash_value = ((mc_addr[4] >> 5) | (((u16) mc_addr[5]) << 3));
5800 } else { 5800 } else {
5801 /* [46:35] i.e. 0xAC6 for above example address */ 5801 /* [46:35] i.e. 0xAC6 for above example address */
5802 hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); 5802 hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
5803 } 5803 }
5804 break; 5804 break;
5805 case 2: 5805 case 2:
5806 if (hw->mac_type == e1000_ich8lan) { 5806 if (hw->mac_type == e1000_ich8lan) {
5807 /*[45:36] i.e. 0x163 for above example address */ 5807 /*[45:36] i.e. 0x163 for above example address */
5808 hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); 5808 hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
5809 } else { 5809 } else {
5810 /* [45:34] i.e. 0x5D8 for above example address */ 5810 /* [45:34] i.e. 0x5D8 for above example address */
5811 hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); 5811 hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
5812 } 5812 }
5813 break; 5813 break;
5814 case 3: 5814 case 3:
5815 if (hw->mac_type == e1000_ich8lan) { 5815 if (hw->mac_type == e1000_ich8lan) {
5816 /* [43:34] i.e. 0x18D for above example address */ 5816 /* [43:34] i.e. 0x18D for above example address */
5817 hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); 5817 hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
5818 } else { 5818 } else {
5819 /* [43:32] i.e. 0x634 for above example address */ 5819 /* [43:32] i.e. 0x634 for above example address */
5820 hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); 5820 hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
5821 } 5821 }
5822 break; 5822 break;
5823 } 5823 }
@@ -5837,11 +5837,11 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
5837 *****************************************************************************/ 5837 *****************************************************************************/
5838void 5838void
5839e1000_mta_set(struct e1000_hw *hw, 5839e1000_mta_set(struct e1000_hw *hw,
5840 uint32_t hash_value) 5840 u32 hash_value)
5841{ 5841{
5842 uint32_t hash_bit, hash_reg; 5842 u32 hash_bit, hash_reg;
5843 uint32_t mta; 5843 u32 mta;
5844 uint32_t temp; 5844 u32 temp;
5845 5845
5846 /* The MTA is a register array of 128 32-bit registers. 5846 /* The MTA is a register array of 128 32-bit registers.
5847 * It is treated like an array of 4096 bits. We want to set 5847 * It is treated like an array of 4096 bits. We want to set
@@ -5886,18 +5886,18 @@ e1000_mta_set(struct e1000_hw *hw,
5886 *****************************************************************************/ 5886 *****************************************************************************/
5887void 5887void
5888e1000_rar_set(struct e1000_hw *hw, 5888e1000_rar_set(struct e1000_hw *hw,
5889 uint8_t *addr, 5889 u8 *addr,
5890 uint32_t index) 5890 u32 index)
5891{ 5891{
5892 uint32_t rar_low, rar_high; 5892 u32 rar_low, rar_high;
5893 5893
5894 /* HW expects these in little endian so we reverse the byte order 5894 /* HW expects these in little endian so we reverse the byte order
5895 * from network order (big endian) to little endian 5895 * from network order (big endian) to little endian
5896 */ 5896 */
5897 rar_low = ((uint32_t) addr[0] | 5897 rar_low = ((u32) addr[0] |
5898 ((uint32_t) addr[1] << 8) | 5898 ((u32) addr[1] << 8) |
5899 ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); 5899 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5900 rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8)); 5900 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5901 5901
5902 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx 5902 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
5903 * unit hang. 5903 * unit hang.
@@ -5944,10 +5944,10 @@ e1000_rar_set(struct e1000_hw *hw,
5944 *****************************************************************************/ 5944 *****************************************************************************/
5945void 5945void
5946e1000_write_vfta(struct e1000_hw *hw, 5946e1000_write_vfta(struct e1000_hw *hw,
5947 uint32_t offset, 5947 u32 offset,
5948 uint32_t value) 5948 u32 value)
5949{ 5949{
5950 uint32_t temp; 5950 u32 temp;
5951 5951
5952 if (hw->mac_type == e1000_ich8lan) 5952 if (hw->mac_type == e1000_ich8lan)
5953 return; 5953 return;
@@ -5972,10 +5972,10 @@ e1000_write_vfta(struct e1000_hw *hw,
5972static void 5972static void
5973e1000_clear_vfta(struct e1000_hw *hw) 5973e1000_clear_vfta(struct e1000_hw *hw)
5974{ 5974{
5975 uint32_t offset; 5975 u32 offset;
5976 uint32_t vfta_value = 0; 5976 u32 vfta_value = 0;
5977 uint32_t vfta_offset = 0; 5977 u32 vfta_offset = 0;
5978 uint32_t vfta_bit_in_reg = 0; 5978 u32 vfta_bit_in_reg = 0;
5979 5979
5980 if (hw->mac_type == e1000_ich8lan) 5980 if (hw->mac_type == e1000_ich8lan)
5981 return; 5981 return;
@@ -6003,15 +6003,15 @@ e1000_clear_vfta(struct e1000_hw *hw)
6003 } 6003 }
6004} 6004}
6005 6005
6006static int32_t 6006static s32
6007e1000_id_led_init(struct e1000_hw * hw) 6007e1000_id_led_init(struct e1000_hw * hw)
6008{ 6008{
6009 uint32_t ledctl; 6009 u32 ledctl;
6010 const uint32_t ledctl_mask = 0x000000FF; 6010 const u32 ledctl_mask = 0x000000FF;
6011 const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON; 6011 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
6012 const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 6012 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
6013 uint16_t eeprom_data, i, temp; 6013 u16 eeprom_data, i, temp;
6014 const uint16_t led_mask = 0x0F; 6014 const u16 led_mask = 0x0F;
6015 6015
6016 DEBUGFUNC("e1000_id_led_init"); 6016 DEBUGFUNC("e1000_id_led_init");
6017 6017
@@ -6086,11 +6086,11 @@ e1000_id_led_init(struct e1000_hw * hw)
6086 * 6086 *
6087 * hw - Struct containing variables accessed by shared code 6087 * hw - Struct containing variables accessed by shared code
6088 *****************************************************************************/ 6088 *****************************************************************************/
6089int32_t 6089s32
6090e1000_setup_led(struct e1000_hw *hw) 6090e1000_setup_led(struct e1000_hw *hw)
6091{ 6091{
6092 uint32_t ledctl; 6092 u32 ledctl;
6093 int32_t ret_val = E1000_SUCCESS; 6093 s32 ret_val = E1000_SUCCESS;
6094 6094
6095 DEBUGFUNC("e1000_setup_led"); 6095 DEBUGFUNC("e1000_setup_led");
6096 6096
@@ -6111,7 +6111,7 @@ e1000_setup_led(struct e1000_hw *hw)
6111 if (ret_val) 6111 if (ret_val)
6112 return ret_val; 6112 return ret_val;
6113 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, 6113 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6114 (uint16_t)(hw->phy_spd_default & 6114 (u16)(hw->phy_spd_default &
6115 ~IGP01E1000_GMII_SPD)); 6115 ~IGP01E1000_GMII_SPD));
6116 if (ret_val) 6116 if (ret_val)
6117 return ret_val; 6117 return ret_val;
@@ -6145,11 +6145,11 @@ e1000_setup_led(struct e1000_hw *hw)
6145 * 6145 *
6146 * hw - Struct containing variables accessed by shared code 6146 * hw - Struct containing variables accessed by shared code
6147 *****************************************************************************/ 6147 *****************************************************************************/
6148int32_t 6148s32
6149e1000_blink_led_start(struct e1000_hw *hw) 6149e1000_blink_led_start(struct e1000_hw *hw)
6150{ 6150{
6151 int16_t i; 6151 s16 i;
6152 uint32_t ledctl_blink = 0; 6152 u32 ledctl_blink = 0;
6153 6153
6154 DEBUGFUNC("e1000_id_led_blink_on"); 6154 DEBUGFUNC("e1000_id_led_blink_on");
6155 6155
@@ -6180,10 +6180,10 @@ e1000_blink_led_start(struct e1000_hw *hw)
6180 * 6180 *
6181 * hw - Struct containing variables accessed by shared code 6181 * hw - Struct containing variables accessed by shared code
6182 *****************************************************************************/ 6182 *****************************************************************************/
6183int32_t 6183s32
6184e1000_cleanup_led(struct e1000_hw *hw) 6184e1000_cleanup_led(struct e1000_hw *hw)
6185{ 6185{
6186 int32_t ret_val = E1000_SUCCESS; 6186 s32 ret_val = E1000_SUCCESS;
6187 6187
6188 DEBUGFUNC("e1000_cleanup_led"); 6188 DEBUGFUNC("e1000_cleanup_led");
6189 6189
@@ -6222,10 +6222,10 @@ e1000_cleanup_led(struct e1000_hw *hw)
6222 * 6222 *
6223 * hw - Struct containing variables accessed by shared code 6223 * hw - Struct containing variables accessed by shared code
6224 *****************************************************************************/ 6224 *****************************************************************************/
6225int32_t 6225s32
6226e1000_led_on(struct e1000_hw *hw) 6226e1000_led_on(struct e1000_hw *hw)
6227{ 6227{
6228 uint32_t ctrl = E1000_READ_REG(hw, CTRL); 6228 u32 ctrl = E1000_READ_REG(hw, CTRL);
6229 6229
6230 DEBUGFUNC("e1000_led_on"); 6230 DEBUGFUNC("e1000_led_on");
6231 6231
@@ -6273,10 +6273,10 @@ e1000_led_on(struct e1000_hw *hw)
6273 * 6273 *
6274 * hw - Struct containing variables accessed by shared code 6274 * hw - Struct containing variables accessed by shared code
6275 *****************************************************************************/ 6275 *****************************************************************************/
6276int32_t 6276s32
6277e1000_led_off(struct e1000_hw *hw) 6277e1000_led_off(struct e1000_hw *hw)
6278{ 6278{
6279 uint32_t ctrl = E1000_READ_REG(hw, CTRL); 6279 u32 ctrl = E1000_READ_REG(hw, CTRL);
6280 6280
6281 DEBUGFUNC("e1000_led_off"); 6281 DEBUGFUNC("e1000_led_off");
6282 6282
@@ -6327,7 +6327,7 @@ e1000_led_off(struct e1000_hw *hw)
6327static void 6327static void
6328e1000_clear_hw_cntrs(struct e1000_hw *hw) 6328e1000_clear_hw_cntrs(struct e1000_hw *hw)
6329{ 6329{
6330 volatile uint32_t temp; 6330 volatile u32 temp;
6331 6331
6332 temp = E1000_READ_REG(hw, CRCERRS); 6332 temp = E1000_READ_REG(hw, CRCERRS);
6333 temp = E1000_READ_REG(hw, SYMERRS); 6333 temp = E1000_READ_REG(hw, SYMERRS);
@@ -6495,10 +6495,10 @@ e1000_update_adaptive(struct e1000_hw *hw)
6495void 6495void
6496e1000_tbi_adjust_stats(struct e1000_hw *hw, 6496e1000_tbi_adjust_stats(struct e1000_hw *hw,
6497 struct e1000_hw_stats *stats, 6497 struct e1000_hw_stats *stats,
6498 uint32_t frame_len, 6498 u32 frame_len,
6499 uint8_t *mac_addr) 6499 u8 *mac_addr)
6500{ 6500{
6501 uint64_t carry_bit; 6501 u64 carry_bit;
6502 6502
6503 /* First adjust the frame length. */ 6503 /* First adjust the frame length. */
6504 frame_len--; 6504 frame_len--;
@@ -6527,7 +6527,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
6527 * since the test for a multicast frame will test positive on 6527 * since the test for a multicast frame will test positive on
6528 * a broadcast frame. 6528 * a broadcast frame.
6529 */ 6529 */
6530 if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff)) 6530 if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
6531 /* Broadcast packet */ 6531 /* Broadcast packet */
6532 stats->bprc++; 6532 stats->bprc++;
6533 else if (*mac_addr & 0x01) 6533 else if (*mac_addr & 0x01)
@@ -6573,9 +6573,9 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
6573void 6573void
6574e1000_get_bus_info(struct e1000_hw *hw) 6574e1000_get_bus_info(struct e1000_hw *hw)
6575{ 6575{
6576 int32_t ret_val; 6576 s32 ret_val;
6577 uint16_t pci_ex_link_status; 6577 u16 pci_ex_link_status;
6578 uint32_t status; 6578 u32 status;
6579 6579
6580 switch (hw->mac_type) { 6580 switch (hw->mac_type) {
6581 case e1000_82542_rev2_0: 6581 case e1000_82542_rev2_0:
@@ -6647,8 +6647,8 @@ e1000_get_bus_info(struct e1000_hw *hw)
6647 *****************************************************************************/ 6647 *****************************************************************************/
6648static void 6648static void
6649e1000_write_reg_io(struct e1000_hw *hw, 6649e1000_write_reg_io(struct e1000_hw *hw,
6650 uint32_t offset, 6650 u32 offset,
6651 uint32_t value) 6651 u32 value)
6652{ 6652{
6653 unsigned long io_addr = hw->io_base; 6653 unsigned long io_addr = hw->io_base;
6654 unsigned long io_data = hw->io_base + 4; 6654 unsigned long io_data = hw->io_base + 4;
@@ -6672,15 +6672,15 @@ e1000_write_reg_io(struct e1000_hw *hw,
6672 * register to the minimum and maximum range. 6672 * register to the minimum and maximum range.
6673 * For IGP phy's, the function calculates the range by the AGC registers. 6673 * For IGP phy's, the function calculates the range by the AGC registers.
6674 *****************************************************************************/ 6674 *****************************************************************************/
6675static int32_t 6675static s32
6676e1000_get_cable_length(struct e1000_hw *hw, 6676e1000_get_cable_length(struct e1000_hw *hw,
6677 uint16_t *min_length, 6677 u16 *min_length,
6678 uint16_t *max_length) 6678 u16 *max_length)
6679{ 6679{
6680 int32_t ret_val; 6680 s32 ret_val;
6681 uint16_t agc_value = 0; 6681 u16 agc_value = 0;
6682 uint16_t i, phy_data; 6682 u16 i, phy_data;
6683 uint16_t cable_length; 6683 u16 cable_length;
6684 6684
6685 DEBUGFUNC("e1000_get_cable_length"); 6685 DEBUGFUNC("e1000_get_cable_length");
6686 6686
@@ -6751,9 +6751,9 @@ e1000_get_cable_length(struct e1000_hw *hw,
6751 break; 6751 break;
6752 } 6752 }
6753 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 6753 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
6754 uint16_t cur_agc_value; 6754 u16 cur_agc_value;
6755 uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 6755 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
6756 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 6756 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
6757 {IGP01E1000_PHY_AGC_A, 6757 {IGP01E1000_PHY_AGC_A,
6758 IGP01E1000_PHY_AGC_B, 6758 IGP01E1000_PHY_AGC_B,
6759 IGP01E1000_PHY_AGC_C, 6759 IGP01E1000_PHY_AGC_C,
@@ -6799,9 +6799,9 @@ e1000_get_cable_length(struct e1000_hw *hw,
6799 IGP01E1000_AGC_RANGE; 6799 IGP01E1000_AGC_RANGE;
6800 } else if (hw->phy_type == e1000_phy_igp_2 || 6800 } else if (hw->phy_type == e1000_phy_igp_2 ||
6801 hw->phy_type == e1000_phy_igp_3) { 6801 hw->phy_type == e1000_phy_igp_3) {
6802 uint16_t cur_agc_index, max_agc_index = 0; 6802 u16 cur_agc_index, max_agc_index = 0;
6803 uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1; 6803 u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
6804 uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = 6804 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
6805 {IGP02E1000_PHY_AGC_A, 6805 {IGP02E1000_PHY_AGC_A,
6806 IGP02E1000_PHY_AGC_B, 6806 IGP02E1000_PHY_AGC_B,
6807 IGP02E1000_PHY_AGC_C, 6807 IGP02E1000_PHY_AGC_C,
@@ -6863,12 +6863,12 @@ e1000_get_cable_length(struct e1000_hw *hw,
6863 * return 0. If the link speed is 1000 Mbps the polarity status is in the 6863 * return 0. If the link speed is 1000 Mbps the polarity status is in the
6864 * IGP01E1000_PHY_PCS_INIT_REG. 6864 * IGP01E1000_PHY_PCS_INIT_REG.
6865 *****************************************************************************/ 6865 *****************************************************************************/
6866static int32_t 6866static s32
6867e1000_check_polarity(struct e1000_hw *hw, 6867e1000_check_polarity(struct e1000_hw *hw,
6868 e1000_rev_polarity *polarity) 6868 e1000_rev_polarity *polarity)
6869{ 6869{
6870 int32_t ret_val; 6870 s32 ret_val;
6871 uint16_t phy_data; 6871 u16 phy_data;
6872 6872
6873 DEBUGFUNC("e1000_check_polarity"); 6873 DEBUGFUNC("e1000_check_polarity");
6874 6874
@@ -6939,11 +6939,11 @@ e1000_check_polarity(struct e1000_hw *hw,
6939 * Link Health register. In IGP this bit is latched high, so the driver must 6939 * Link Health register. In IGP this bit is latched high, so the driver must
6940 * read it immediately after link is established. 6940 * read it immediately after link is established.
6941 *****************************************************************************/ 6941 *****************************************************************************/
6942static int32_t 6942static s32
6943e1000_check_downshift(struct e1000_hw *hw) 6943e1000_check_downshift(struct e1000_hw *hw)
6944{ 6944{
6945 int32_t ret_val; 6945 s32 ret_val;
6946 uint16_t phy_data; 6946 u16 phy_data;
6947 6947
6948 DEBUGFUNC("e1000_check_downshift"); 6948 DEBUGFUNC("e1000_check_downshift");
6949 6949
@@ -6985,18 +6985,18 @@ e1000_check_downshift(struct e1000_hw *hw)
6985 * 6985 *
6986 ****************************************************************************/ 6986 ****************************************************************************/
6987 6987
6988static int32_t 6988static s32
6989e1000_config_dsp_after_link_change(struct e1000_hw *hw, 6989e1000_config_dsp_after_link_change(struct e1000_hw *hw,
6990 bool link_up) 6990 bool link_up)
6991{ 6991{
6992 int32_t ret_val; 6992 s32 ret_val;
6993 uint16_t phy_data, phy_saved_data, speed, duplex, i; 6993 u16 phy_data, phy_saved_data, speed, duplex, i;
6994 uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 6994 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
6995 {IGP01E1000_PHY_AGC_PARAM_A, 6995 {IGP01E1000_PHY_AGC_PARAM_A,
6996 IGP01E1000_PHY_AGC_PARAM_B, 6996 IGP01E1000_PHY_AGC_PARAM_B,
6997 IGP01E1000_PHY_AGC_PARAM_C, 6997 IGP01E1000_PHY_AGC_PARAM_C,
6998 IGP01E1000_PHY_AGC_PARAM_D}; 6998 IGP01E1000_PHY_AGC_PARAM_D};
6999 uint16_t min_length, max_length; 6999 u16 min_length, max_length;
7000 7000
7001 DEBUGFUNC("e1000_config_dsp_after_link_change"); 7001 DEBUGFUNC("e1000_config_dsp_after_link_change");
7002 7002
@@ -7038,8 +7038,8 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
7038 if ((hw->ffe_config_state == e1000_ffe_config_enabled) && 7038 if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
7039 (min_length < e1000_igp_cable_length_50)) { 7039 (min_length < e1000_igp_cable_length_50)) {
7040 7040
7041 uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; 7041 u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
7042 uint32_t idle_errs = 0; 7042 u32 idle_errs = 0;
7043 7043
7044 /* clear previous idle error counts */ 7044 /* clear previous idle error counts */
7045 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, 7045 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
@@ -7173,11 +7173,11 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
7173 * 7173 *
7174 * hw - Struct containing variables accessed by shared code 7174 * hw - Struct containing variables accessed by shared code
7175 ****************************************************************************/ 7175 ****************************************************************************/
7176static int32_t 7176static s32
7177e1000_set_phy_mode(struct e1000_hw *hw) 7177e1000_set_phy_mode(struct e1000_hw *hw)
7178{ 7178{
7179 int32_t ret_val; 7179 s32 ret_val;
7180 uint16_t eeprom_data; 7180 u16 eeprom_data;
7181 7181
7182 DEBUGFUNC("e1000_set_phy_mode"); 7182 DEBUGFUNC("e1000_set_phy_mode");
7183 7183
@@ -7218,13 +7218,13 @@ e1000_set_phy_mode(struct e1000_hw *hw)
7218 * 7218 *
7219 ****************************************************************************/ 7219 ****************************************************************************/
7220 7220
7221static int32_t 7221static s32
7222e1000_set_d3_lplu_state(struct e1000_hw *hw, 7222e1000_set_d3_lplu_state(struct e1000_hw *hw,
7223 bool active) 7223 bool active)
7224{ 7224{
7225 uint32_t phy_ctrl = 0; 7225 u32 phy_ctrl = 0;
7226 int32_t ret_val; 7226 s32 ret_val;
7227 uint16_t phy_data; 7227 u16 phy_data;
7228 DEBUGFUNC("e1000_set_d3_lplu_state"); 7228 DEBUGFUNC("e1000_set_d3_lplu_state");
7229 7229
7230 if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2 7230 if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
@@ -7348,13 +7348,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
7348 * 7348 *
7349 ****************************************************************************/ 7349 ****************************************************************************/
7350 7350
7351static int32_t 7351static s32
7352e1000_set_d0_lplu_state(struct e1000_hw *hw, 7352e1000_set_d0_lplu_state(struct e1000_hw *hw,
7353 bool active) 7353 bool active)
7354{ 7354{
7355 uint32_t phy_ctrl = 0; 7355 u32 phy_ctrl = 0;
7356 int32_t ret_val; 7356 s32 ret_val;
7357 uint16_t phy_data; 7357 u16 phy_data;
7358 DEBUGFUNC("e1000_set_d0_lplu_state"); 7358 DEBUGFUNC("e1000_set_d0_lplu_state");
7359 7359
7360 if (hw->mac_type <= e1000_82547_rev_2) 7360 if (hw->mac_type <= e1000_82547_rev_2)
@@ -7439,12 +7439,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
7439 * 7439 *
7440 * hw - Struct containing variables accessed by shared code 7440 * hw - Struct containing variables accessed by shared code
7441 *****************************************************************************/ 7441 *****************************************************************************/
7442static int32_t 7442static s32
7443e1000_set_vco_speed(struct e1000_hw *hw) 7443e1000_set_vco_speed(struct e1000_hw *hw)
7444{ 7444{
7445 int32_t ret_val; 7445 s32 ret_val;
7446 uint16_t default_page = 0; 7446 u16 default_page = 0;
7447 uint16_t phy_data; 7447 u16 phy_data;
7448 7448
7449 DEBUGFUNC("e1000_set_vco_speed"); 7449 DEBUGFUNC("e1000_set_vco_speed");
7450 7450
@@ -7503,18 +7503,18 @@ e1000_set_vco_speed(struct e1000_hw *hw)
7503 * 7503 *
7504 * returns: - E1000_SUCCESS . 7504 * returns: - E1000_SUCCESS .
7505 ****************************************************************************/ 7505 ****************************************************************************/
7506static int32_t 7506static s32
7507e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) 7507e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer)
7508{ 7508{
7509 uint8_t i; 7509 u8 i;
7510 uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; 7510 u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
7511 uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; 7511 u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
7512 7512
7513 length = (length >> 2); 7513 length = (length >> 2);
7514 offset = (offset >> 2); 7514 offset = (offset >> 2);
7515 7515
7516 for (i = 0; i < length; i++) { 7516 for (i = 0; i < length; i++) {
7517 *((uint32_t *) buffer + i) = 7517 *((u32 *) buffer + i) =
7518 E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); 7518 E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
7519 } 7519 }
7520 return E1000_SUCCESS; 7520 return E1000_SUCCESS;
@@ -7530,11 +7530,11 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
7530 * timeout 7530 * timeout
7531 * - E1000_SUCCESS for success. 7531 * - E1000_SUCCESS for success.
7532 ****************************************************************************/ 7532 ****************************************************************************/
7533static int32_t 7533static s32
7534e1000_mng_enable_host_if(struct e1000_hw * hw) 7534e1000_mng_enable_host_if(struct e1000_hw * hw)
7535{ 7535{
7536 uint32_t hicr; 7536 u32 hicr;
7537 uint8_t i; 7537 u8 i;
7538 7538
7539 /* Check that the host interface is enabled. */ 7539 /* Check that the host interface is enabled. */
7540 hicr = E1000_READ_REG(hw, HICR); 7540 hicr = E1000_READ_REG(hw, HICR);
@@ -7564,14 +7564,14 @@ e1000_mng_enable_host_if(struct e1000_hw * hw)
7564 * 7564 *
7565 * returns - E1000_SUCCESS for success. 7565 * returns - E1000_SUCCESS for success.
7566 ****************************************************************************/ 7566 ****************************************************************************/
7567static int32_t 7567static s32
7568e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, 7568e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer,
7569 uint16_t length, uint16_t offset, uint8_t *sum) 7569 u16 length, u16 offset, u8 *sum)
7570{ 7570{
7571 uint8_t *tmp; 7571 u8 *tmp;
7572 uint8_t *bufptr = buffer; 7572 u8 *bufptr = buffer;
7573 uint32_t data = 0; 7573 u32 data = 0;
7574 uint16_t remaining, i, j, prev_bytes; 7574 u16 remaining, i, j, prev_bytes;
7575 7575
7576 /* sum = only sum of the data and it is not checksum */ 7576 /* sum = only sum of the data and it is not checksum */
7577 7577
@@ -7579,14 +7579,14 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7579 return -E1000_ERR_PARAM; 7579 return -E1000_ERR_PARAM;
7580 } 7580 }
7581 7581
7582 tmp = (uint8_t *)&data; 7582 tmp = (u8 *)&data;
7583 prev_bytes = offset & 0x3; 7583 prev_bytes = offset & 0x3;
7584 offset &= 0xFFFC; 7584 offset &= 0xFFFC;
7585 offset >>= 2; 7585 offset >>= 2;
7586 7586
7587 if (prev_bytes) { 7587 if (prev_bytes) {
7588 data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); 7588 data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
7589 for (j = prev_bytes; j < sizeof(uint32_t); j++) { 7589 for (j = prev_bytes; j < sizeof(u32); j++) {
7590 *(tmp + j) = *bufptr++; 7590 *(tmp + j) = *bufptr++;
7591 *sum += *(tmp + j); 7591 *sum += *(tmp + j);
7592 } 7592 }
@@ -7604,7 +7604,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7604 /* The device driver writes the relevant command block into the 7604 /* The device driver writes the relevant command block into the
7605 * ram area. */ 7605 * ram area. */
7606 for (i = 0; i < length; i++) { 7606 for (i = 0; i < length; i++) {
7607 for (j = 0; j < sizeof(uint32_t); j++) { 7607 for (j = 0; j < sizeof(u32); j++) {
7608 *(tmp + j) = *bufptr++; 7608 *(tmp + j) = *bufptr++;
7609 *sum += *(tmp + j); 7609 *sum += *(tmp + j);
7610 } 7610 }
@@ -7612,7 +7612,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7612 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); 7612 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
7613 } 7613 }
7614 if (remaining) { 7614 if (remaining) {
7615 for (j = 0; j < sizeof(uint32_t); j++) { 7615 for (j = 0; j < sizeof(u32); j++) {
7616 if (j < remaining) 7616 if (j < remaining)
7617 *(tmp + j) = *bufptr++; 7617 *(tmp + j) = *bufptr++;
7618 else 7618 else
@@ -7632,23 +7632,23 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7632 * 7632 *
7633 * returns - E1000_SUCCESS for success. 7633 * returns - E1000_SUCCESS for success.
7634 ****************************************************************************/ 7634 ****************************************************************************/
7635static int32_t 7635static s32
7636e1000_mng_write_cmd_header(struct e1000_hw * hw, 7636e1000_mng_write_cmd_header(struct e1000_hw * hw,
7637 struct e1000_host_mng_command_header * hdr) 7637 struct e1000_host_mng_command_header * hdr)
7638{ 7638{
7639 uint16_t i; 7639 u16 i;
7640 uint8_t sum; 7640 u8 sum;
7641 uint8_t *buffer; 7641 u8 *buffer;
7642 7642
7643 /* Write the whole command header structure which includes sum of 7643 /* Write the whole command header structure which includes sum of
7644 * the buffer */ 7644 * the buffer */
7645 7645
7646 uint16_t length = sizeof(struct e1000_host_mng_command_header); 7646 u16 length = sizeof(struct e1000_host_mng_command_header);
7647 7647
7648 sum = hdr->checksum; 7648 sum = hdr->checksum;
7649 hdr->checksum = 0; 7649 hdr->checksum = 0;
7650 7650
7651 buffer = (uint8_t *) hdr; 7651 buffer = (u8 *) hdr;
7652 i = length; 7652 i = length;
7653 while (i--) 7653 while (i--)
7654 sum += buffer[i]; 7654 sum += buffer[i];
@@ -7658,7 +7658,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
7658 length >>= 2; 7658 length >>= 2;
7659 /* The device driver writes the relevant command block into the ram area. */ 7659 /* The device driver writes the relevant command block into the ram area. */
7660 for (i = 0; i < length; i++) { 7660 for (i = 0; i < length; i++) {
7661 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); 7661 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i));
7662 E1000_WRITE_FLUSH(hw); 7662 E1000_WRITE_FLUSH(hw);
7663 } 7663 }
7664 7664
@@ -7672,10 +7672,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
7672 * 7672 *
7673 * returns - E1000_SUCCESS for success. 7673 * returns - E1000_SUCCESS for success.
7674 ****************************************************************************/ 7674 ****************************************************************************/
7675static int32_t 7675static s32
7676e1000_mng_write_commit(struct e1000_hw * hw) 7676e1000_mng_write_commit(struct e1000_hw * hw)
7677{ 7677{
7678 uint32_t hicr; 7678 u32 hicr;
7679 7679
7680 hicr = E1000_READ_REG(hw, HICR); 7680 hicr = E1000_READ_REG(hw, HICR);
7681 /* Setting this bit tells the ARC that a new command is pending. */ 7681 /* Setting this bit tells the ARC that a new command is pending. */
@@ -7693,7 +7693,7 @@ e1000_mng_write_commit(struct e1000_hw * hw)
7693bool 7693bool
7694e1000_check_mng_mode(struct e1000_hw *hw) 7694e1000_check_mng_mode(struct e1000_hw *hw)
7695{ 7695{
7696 uint32_t fwsm; 7696 u32 fwsm;
7697 7697
7698 fwsm = E1000_READ_REG(hw, FWSM); 7698 fwsm = E1000_READ_REG(hw, FWSM);
7699 7699
@@ -7712,11 +7712,11 @@ e1000_check_mng_mode(struct e1000_hw *hw)
7712/***************************************************************************** 7712/*****************************************************************************
7713 * This function writes the dhcp info . 7713 * This function writes the dhcp info .
7714 ****************************************************************************/ 7714 ****************************************************************************/
7715int32_t 7715s32
7716e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, 7716e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer,
7717 uint16_t length) 7717 u16 length)
7718{ 7718{
7719 int32_t ret_val; 7719 s32 ret_val;
7720 struct e1000_host_mng_command_header hdr; 7720 struct e1000_host_mng_command_header hdr;
7721 7721
7722 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; 7722 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
@@ -7744,11 +7744,11 @@ e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer,
7744 * 7744 *
7745 * returns - checksum of buffer contents. 7745 * returns - checksum of buffer contents.
7746 ****************************************************************************/ 7746 ****************************************************************************/
7747static uint8_t 7747static u8
7748e1000_calculate_mng_checksum(char *buffer, uint32_t length) 7748e1000_calculate_mng_checksum(char *buffer, u32 length)
7749{ 7749{
7750 uint8_t sum = 0; 7750 u8 sum = 0;
7751 uint32_t i; 7751 u32 i;
7752 7752
7753 if (!buffer) 7753 if (!buffer)
7754 return 0; 7754 return 0;
@@ -7756,7 +7756,7 @@ e1000_calculate_mng_checksum(char *buffer, uint32_t length)
7756 for (i=0; i < length; i++) 7756 for (i=0; i < length; i++)
7757 sum += buffer[i]; 7757 sum += buffer[i];
7758 7758
7759 return (uint8_t) (0 - sum); 7759 return (u8) (0 - sum);
7760} 7760}
7761 7761
7762/***************************************************************************** 7762/*****************************************************************************
@@ -7769,10 +7769,10 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7769{ 7769{
7770 /* called in init as well as watchdog timer functions */ 7770 /* called in init as well as watchdog timer functions */
7771 7771
7772 int32_t ret_val, checksum; 7772 s32 ret_val, checksum;
7773 bool tx_filter = false; 7773 bool tx_filter = false;
7774 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); 7774 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
7775 uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); 7775 u8 *buffer = (u8 *) &(hw->mng_cookie);
7776 7776
7777 if (e1000_check_mng_mode(hw)) { 7777 if (e1000_check_mng_mode(hw)) {
7778 ret_val = e1000_mng_enable_host_if(hw); 7778 ret_val = e1000_mng_enable_host_if(hw);
@@ -7806,11 +7806,11 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7806 * returns: - true/false 7806 * returns: - true/false
7807 * 7807 *
7808 *****************************************************************************/ 7808 *****************************************************************************/
7809uint32_t 7809u32
7810e1000_enable_mng_pass_thru(struct e1000_hw *hw) 7810e1000_enable_mng_pass_thru(struct e1000_hw *hw)
7811{ 7811{
7812 uint32_t manc; 7812 u32 manc;
7813 uint32_t fwsm, factps; 7813 u32 fwsm, factps;
7814 7814
7815 if (hw->asf_firmware_present) { 7815 if (hw->asf_firmware_present) {
7816 manc = E1000_READ_REG(hw, MANC); 7816 manc = E1000_READ_REG(hw, MANC);
@@ -7832,12 +7832,12 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw)
7832 return false; 7832 return false;
7833} 7833}
7834 7834
7835static int32_t 7835static s32
7836e1000_polarity_reversal_workaround(struct e1000_hw *hw) 7836e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7837{ 7837{
7838 int32_t ret_val; 7838 s32 ret_val;
7839 uint16_t mii_status_reg; 7839 u16 mii_status_reg;
7840 uint16_t i; 7840 u16 i;
7841 7841
7842 /* Polarity reversal workaround for forced 10F/10H links. */ 7842 /* Polarity reversal workaround for forced 10F/10H links. */
7843 7843
@@ -7929,7 +7929,7 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7929static void 7929static void
7930e1000_set_pci_express_master_disable(struct e1000_hw *hw) 7930e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7931{ 7931{
7932 uint32_t ctrl; 7932 u32 ctrl;
7933 7933
7934 DEBUGFUNC("e1000_set_pci_express_master_disable"); 7934 DEBUGFUNC("e1000_set_pci_express_master_disable");
7935 7935
@@ -7952,10 +7952,10 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7952 * E1000_SUCCESS master requests disabled. 7952 * E1000_SUCCESS master requests disabled.
7953 * 7953 *
7954 ******************************************************************************/ 7954 ******************************************************************************/
7955int32_t 7955s32
7956e1000_disable_pciex_master(struct e1000_hw *hw) 7956e1000_disable_pciex_master(struct e1000_hw *hw)
7957{ 7957{
7958 int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ 7958 s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
7959 7959
7960 DEBUGFUNC("e1000_disable_pciex_master"); 7960 DEBUGFUNC("e1000_disable_pciex_master");
7961 7961
@@ -7990,10 +7990,10 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
7990 * E1000_SUCCESS at any other case. 7990 * E1000_SUCCESS at any other case.
7991 * 7991 *
7992 ******************************************************************************/ 7992 ******************************************************************************/
7993static int32_t 7993static s32
7994e1000_get_auto_rd_done(struct e1000_hw *hw) 7994e1000_get_auto_rd_done(struct e1000_hw *hw)
7995{ 7995{
7996 int32_t timeout = AUTO_READ_DONE_TIMEOUT; 7996 s32 timeout = AUTO_READ_DONE_TIMEOUT;
7997 7997
7998 DEBUGFUNC("e1000_get_auto_rd_done"); 7998 DEBUGFUNC("e1000_get_auto_rd_done");
7999 7999
@@ -8038,11 +8038,11 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
8038 * E1000_SUCCESS at any other case. 8038 * E1000_SUCCESS at any other case.
8039 * 8039 *
8040 ***************************************************************************/ 8040 ***************************************************************************/
8041static int32_t 8041static s32
8042e1000_get_phy_cfg_done(struct e1000_hw *hw) 8042e1000_get_phy_cfg_done(struct e1000_hw *hw)
8043{ 8043{
8044 int32_t timeout = PHY_CFG_TIMEOUT; 8044 s32 timeout = PHY_CFG_TIMEOUT;
8045 uint32_t cfg_mask = E1000_EEPROM_CFG_DONE; 8045 u32 cfg_mask = E1000_EEPROM_CFG_DONE;
8046 8046
8047 DEBUGFUNC("e1000_get_phy_cfg_done"); 8047 DEBUGFUNC("e1000_get_phy_cfg_done");
8048 8048
@@ -8085,11 +8085,11 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
8085 * E1000_SUCCESS at any other case. 8085 * E1000_SUCCESS at any other case.
8086 * 8086 *
8087 ***************************************************************************/ 8087 ***************************************************************************/
8088static int32_t 8088static s32
8089e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) 8089e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
8090{ 8090{
8091 int32_t timeout; 8091 s32 timeout;
8092 uint32_t swsm; 8092 u32 swsm;
8093 8093
8094 DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); 8094 DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
8095 8095
@@ -8138,7 +8138,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
8138static void 8138static void
8139e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) 8139e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8140{ 8140{
8141 uint32_t swsm; 8141 u32 swsm;
8142 8142
8143 DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); 8143 DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
8144 8144
@@ -8164,11 +8164,11 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8164 * E1000_SUCCESS at any other case. 8164 * E1000_SUCCESS at any other case.
8165 * 8165 *
8166 ***************************************************************************/ 8166 ***************************************************************************/
8167static int32_t 8167static s32
8168e1000_get_software_semaphore(struct e1000_hw *hw) 8168e1000_get_software_semaphore(struct e1000_hw *hw)
8169{ 8169{
8170 int32_t timeout = hw->eeprom.word_size + 1; 8170 s32 timeout = hw->eeprom.word_size + 1;
8171 uint32_t swsm; 8171 u32 swsm;
8172 8172
8173 DEBUGFUNC("e1000_get_software_semaphore"); 8173 DEBUGFUNC("e1000_get_software_semaphore");
8174 8174
@@ -8203,7 +8203,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
8203static void 8203static void
8204e1000_release_software_semaphore(struct e1000_hw *hw) 8204e1000_release_software_semaphore(struct e1000_hw *hw)
8205{ 8205{
8206 uint32_t swsm; 8206 u32 swsm;
8207 8207
8208 DEBUGFUNC("e1000_release_software_semaphore"); 8208 DEBUGFUNC("e1000_release_software_semaphore");
8209 8209
@@ -8228,11 +8228,11 @@ e1000_release_software_semaphore(struct e1000_hw *hw)
8228 * E1000_SUCCESS 8228 * E1000_SUCCESS
8229 * 8229 *
8230 *****************************************************************************/ 8230 *****************************************************************************/
8231int32_t 8231s32
8232e1000_check_phy_reset_block(struct e1000_hw *hw) 8232e1000_check_phy_reset_block(struct e1000_hw *hw)
8233{ 8233{
8234 uint32_t manc = 0; 8234 u32 manc = 0;
8235 uint32_t fwsm = 0; 8235 u32 fwsm = 0;
8236 8236
8237 if (hw->mac_type == e1000_ich8lan) { 8237 if (hw->mac_type == e1000_ich8lan) {
8238 fwsm = E1000_READ_REG(hw, FWSM); 8238 fwsm = E1000_READ_REG(hw, FWSM);
@@ -8246,10 +8246,10 @@ e1000_check_phy_reset_block(struct e1000_hw *hw)
8246 E1000_BLK_PHY_RESET : E1000_SUCCESS; 8246 E1000_BLK_PHY_RESET : E1000_SUCCESS;
8247} 8247}
8248 8248
8249static uint8_t 8249static u8
8250e1000_arc_subsystem_valid(struct e1000_hw *hw) 8250e1000_arc_subsystem_valid(struct e1000_hw *hw)
8251{ 8251{
8252 uint32_t fwsm; 8252 u32 fwsm;
8253 8253
8254 /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC 8254 /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
8255 * may not be provided a DMA clock when no manageability features are 8255 * may not be provided a DMA clock when no manageability features are
@@ -8283,10 +8283,10 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8283 * returns: E1000_SUCCESS 8283 * returns: E1000_SUCCESS
8284 * 8284 *
8285 *****************************************************************************/ 8285 *****************************************************************************/
8286static int32_t 8286static s32
8287e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) 8287e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
8288{ 8288{
8289 uint32_t gcr_reg = 0; 8289 u32 gcr_reg = 0;
8290 8290
8291 DEBUGFUNC("e1000_set_pci_ex_no_snoop"); 8291 DEBUGFUNC("e1000_set_pci_ex_no_snoop");
8292 8292
@@ -8303,7 +8303,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8303 E1000_WRITE_REG(hw, GCR, gcr_reg); 8303 E1000_WRITE_REG(hw, GCR, gcr_reg);
8304 } 8304 }
8305 if (hw->mac_type == e1000_ich8lan) { 8305 if (hw->mac_type == e1000_ich8lan) {
8306 uint32_t ctrl_ext; 8306 u32 ctrl_ext;
8307 8307
8308 E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); 8308 E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
8309 8309
@@ -8324,11 +8324,11 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8324 * hw: Struct containing variables accessed by shared code 8324 * hw: Struct containing variables accessed by shared code
8325 * 8325 *
8326 ***************************************************************************/ 8326 ***************************************************************************/
8327static int32_t 8327static s32
8328e1000_get_software_flag(struct e1000_hw *hw) 8328e1000_get_software_flag(struct e1000_hw *hw)
8329{ 8329{
8330 int32_t timeout = PHY_CFG_TIMEOUT; 8330 s32 timeout = PHY_CFG_TIMEOUT;
8331 uint32_t extcnf_ctrl; 8331 u32 extcnf_ctrl;
8332 8332
8333 DEBUGFUNC("e1000_get_software_flag"); 8333 DEBUGFUNC("e1000_get_software_flag");
8334 8334
@@ -8366,7 +8366,7 @@ e1000_get_software_flag(struct e1000_hw *hw)
8366static void 8366static void
8367e1000_release_software_flag(struct e1000_hw *hw) 8367e1000_release_software_flag(struct e1000_hw *hw)
8368{ 8368{
8369 uint32_t extcnf_ctrl; 8369 u32 extcnf_ctrl;
8370 8370
8371 DEBUGFUNC("e1000_release_software_flag"); 8371 DEBUGFUNC("e1000_release_software_flag");
8372 8372
@@ -8388,16 +8388,16 @@ e1000_release_software_flag(struct e1000_hw *hw)
8388 * data - word read from the EEPROM 8388 * data - word read from the EEPROM
8389 * words - number of words to read 8389 * words - number of words to read
8390 *****************************************************************************/ 8390 *****************************************************************************/
8391static int32_t 8391static s32
8392e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8392e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8393 uint16_t *data) 8393 u16 *data)
8394{ 8394{
8395 int32_t error = E1000_SUCCESS; 8395 s32 error = E1000_SUCCESS;
8396 uint32_t flash_bank = 0; 8396 u32 flash_bank = 0;
8397 uint32_t act_offset = 0; 8397 u32 act_offset = 0;
8398 uint32_t bank_offset = 0; 8398 u32 bank_offset = 0;
8399 uint16_t word = 0; 8399 u16 word = 0;
8400 uint16_t i = 0; 8400 u16 i = 0;
8401 8401
8402 /* We need to know which is the valid flash bank. In the event 8402 /* We need to know which is the valid flash bank. In the event
8403 * that we didn't allocate eeprom_shadow_ram, we may not be 8403 * that we didn't allocate eeprom_shadow_ram, we may not be
@@ -8444,12 +8444,12 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8444 * words - number of words to write 8444 * words - number of words to write
8445 * data - words to write to the EEPROM 8445 * data - words to write to the EEPROM
8446 *****************************************************************************/ 8446 *****************************************************************************/
8447static int32_t 8447static s32
8448e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8448e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8449 uint16_t *data) 8449 u16 *data)
8450{ 8450{
8451 uint32_t i = 0; 8451 u32 i = 0;
8452 int32_t error = E1000_SUCCESS; 8452 s32 error = E1000_SUCCESS;
8453 8453
8454 error = e1000_get_software_flag(hw); 8454 error = e1000_get_software_flag(hw);
8455 if (error != E1000_SUCCESS) 8455 if (error != E1000_SUCCESS)
@@ -8491,12 +8491,12 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8491 * 8491 *
8492 * hw - The pointer to the hw structure 8492 * hw - The pointer to the hw structure
8493 ****************************************************************************/ 8493 ****************************************************************************/
8494static int32_t 8494static s32
8495e1000_ich8_cycle_init(struct e1000_hw *hw) 8495e1000_ich8_cycle_init(struct e1000_hw *hw)
8496{ 8496{
8497 union ich8_hws_flash_status hsfsts; 8497 union ich8_hws_flash_status hsfsts;
8498 int32_t error = E1000_ERR_EEPROM; 8498 s32 error = E1000_ERR_EEPROM;
8499 int32_t i = 0; 8499 s32 i = 0;
8500 8500
8501 DEBUGFUNC("e1000_ich8_cycle_init"); 8501 DEBUGFUNC("e1000_ich8_cycle_init");
8502 8502
@@ -8558,13 +8558,13 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
8558 * 8558 *
8559 * hw - The pointer to the hw structure 8559 * hw - The pointer to the hw structure
8560 ****************************************************************************/ 8560 ****************************************************************************/
8561static int32_t 8561static s32
8562e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) 8562e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
8563{ 8563{
8564 union ich8_hws_flash_ctrl hsflctl; 8564 union ich8_hws_flash_ctrl hsflctl;
8565 union ich8_hws_flash_status hsfsts; 8565 union ich8_hws_flash_status hsfsts;
8566 int32_t error = E1000_ERR_EEPROM; 8566 s32 error = E1000_ERR_EEPROM;
8567 uint32_t i = 0; 8567 u32 i = 0;
8568 8568
8569 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 8569 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8570 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 8570 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
@@ -8593,16 +8593,16 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8593 * size - Size of data to read, 1=byte 2=word 8593 * size - Size of data to read, 1=byte 2=word
8594 * data - Pointer to the word to store the value read. 8594 * data - Pointer to the word to store the value read.
8595 *****************************************************************************/ 8595 *****************************************************************************/
8596static int32_t 8596static s32
8597e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, 8597e1000_read_ich8_data(struct e1000_hw *hw, u32 index,
8598 uint32_t size, uint16_t* data) 8598 u32 size, u16* data)
8599{ 8599{
8600 union ich8_hws_flash_status hsfsts; 8600 union ich8_hws_flash_status hsfsts;
8601 union ich8_hws_flash_ctrl hsflctl; 8601 union ich8_hws_flash_ctrl hsflctl;
8602 uint32_t flash_linear_address; 8602 u32 flash_linear_address;
8603 uint32_t flash_data = 0; 8603 u32 flash_data = 0;
8604 int32_t error = -E1000_ERR_EEPROM; 8604 s32 error = -E1000_ERR_EEPROM;
8605 int32_t count = 0; 8605 s32 count = 0;
8606 8606
8607 DEBUGFUNC("e1000_read_ich8_data"); 8607 DEBUGFUNC("e1000_read_ich8_data");
8608 8608
@@ -8640,9 +8640,9 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8640 if (error == E1000_SUCCESS) { 8640 if (error == E1000_SUCCESS) {
8641 flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0); 8641 flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
8642 if (size == 1) { 8642 if (size == 1) {
8643 *data = (uint8_t)(flash_data & 0x000000FF); 8643 *data = (u8)(flash_data & 0x000000FF);
8644 } else if (size == 2) { 8644 } else if (size == 2) {
8645 *data = (uint16_t)(flash_data & 0x0000FFFF); 8645 *data = (u16)(flash_data & 0x0000FFFF);
8646 } 8646 }
8647 break; 8647 break;
8648 } else { 8648 } else {
@@ -8672,16 +8672,16 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8672 * size - Size of data to read, 1=byte 2=word 8672 * size - Size of data to read, 1=byte 2=word
8673 * data - The byte(s) to write to the NVM. 8673 * data - The byte(s) to write to the NVM.
8674 *****************************************************************************/ 8674 *****************************************************************************/
8675static int32_t 8675static s32
8676e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, 8676e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
8677 uint16_t data) 8677 u16 data)
8678{ 8678{
8679 union ich8_hws_flash_status hsfsts; 8679 union ich8_hws_flash_status hsfsts;
8680 union ich8_hws_flash_ctrl hsflctl; 8680 union ich8_hws_flash_ctrl hsflctl;
8681 uint32_t flash_linear_address; 8681 u32 flash_linear_address;
8682 uint32_t flash_data = 0; 8682 u32 flash_data = 0;
8683 int32_t error = -E1000_ERR_EEPROM; 8683 s32 error = -E1000_ERR_EEPROM;
8684 int32_t count = 0; 8684 s32 count = 0;
8685 8685
8686 DEBUGFUNC("e1000_write_ich8_data"); 8686 DEBUGFUNC("e1000_write_ich8_data");
8687 8687
@@ -8710,9 +8710,9 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8710 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address); 8710 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8711 8711
8712 if (size == 1) 8712 if (size == 1)
8713 flash_data = (uint32_t)data & 0x00FF; 8713 flash_data = (u32)data & 0x00FF;
8714 else 8714 else
8715 flash_data = (uint32_t)data; 8715 flash_data = (u32)data;
8716 8716
8717 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); 8717 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
8718 8718
@@ -8747,15 +8747,15 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8747 * index - The index of the byte to read. 8747 * index - The index of the byte to read.
8748 * data - Pointer to a byte to store the value read. 8748 * data - Pointer to a byte to store the value read.
8749 *****************************************************************************/ 8749 *****************************************************************************/
8750static int32_t 8750static s32
8751e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) 8751e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data)
8752{ 8752{
8753 int32_t status = E1000_SUCCESS; 8753 s32 status = E1000_SUCCESS;
8754 uint16_t word = 0; 8754 u16 word = 0;
8755 8755
8756 status = e1000_read_ich8_data(hw, index, 1, &word); 8756 status = e1000_read_ich8_data(hw, index, 1, &word);
8757 if (status == E1000_SUCCESS) { 8757 if (status == E1000_SUCCESS) {
8758 *data = (uint8_t)word; 8758 *data = (u8)word;
8759 } 8759 }
8760 8760
8761 return status; 8761 return status;
@@ -8770,11 +8770,11 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8770 * index - The index of the byte to write. 8770 * index - The index of the byte to write.
8771 * byte - The byte to write to the NVM. 8771 * byte - The byte to write to the NVM.
8772 *****************************************************************************/ 8772 *****************************************************************************/
8773static int32_t 8773static s32
8774e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) 8774e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
8775{ 8775{
8776 int32_t error = E1000_SUCCESS; 8776 s32 error = E1000_SUCCESS;
8777 int32_t program_retries = 0; 8777 s32 program_retries = 0;
8778 8778
8779 DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index); 8779 DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
8780 8780
@@ -8803,11 +8803,11 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8803 * index - The index of the byte to read. 8803 * index - The index of the byte to read.
8804 * data - The byte to write to the NVM. 8804 * data - The byte to write to the NVM.
8805 *****************************************************************************/ 8805 *****************************************************************************/
8806static int32_t 8806static s32
8807e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) 8807e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
8808{ 8808{
8809 int32_t status = E1000_SUCCESS; 8809 s32 status = E1000_SUCCESS;
8810 uint16_t word = (uint16_t)data; 8810 u16 word = (u16)data;
8811 8811
8812 status = e1000_write_ich8_data(hw, index, 1, word); 8812 status = e1000_write_ich8_data(hw, index, 1, word);
8813 8813
@@ -8821,10 +8821,10 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8821 * index - The starting byte index of the word to read. 8821 * index - The starting byte index of the word to read.
8822 * data - Pointer to a word to store the value read. 8822 * data - Pointer to a word to store the value read.
8823 *****************************************************************************/ 8823 *****************************************************************************/
8824static int32_t 8824static s32
8825e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) 8825e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
8826{ 8826{
8827 int32_t status = E1000_SUCCESS; 8827 s32 status = E1000_SUCCESS;
8828 status = e1000_read_ich8_data(hw, index, 2, data); 8828 status = e1000_read_ich8_data(hw, index, 2, data);
8829 return status; 8829 return status;
8830} 8830}
@@ -8840,19 +8840,19 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8840 * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the 8840 * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
8841 * bank size may be 4, 8 or 64 KBytes 8841 * bank size may be 4, 8 or 64 KBytes
8842 *****************************************************************************/ 8842 *****************************************************************************/
8843static int32_t 8843static s32
8844e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) 8844e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
8845{ 8845{
8846 union ich8_hws_flash_status hsfsts; 8846 union ich8_hws_flash_status hsfsts;
8847 union ich8_hws_flash_ctrl hsflctl; 8847 union ich8_hws_flash_ctrl hsflctl;
8848 uint32_t flash_linear_address; 8848 u32 flash_linear_address;
8849 int32_t count = 0; 8849 s32 count = 0;
8850 int32_t error = E1000_ERR_EEPROM; 8850 s32 error = E1000_ERR_EEPROM;
8851 int32_t iteration; 8851 s32 iteration;
8852 int32_t sub_sector_size = 0; 8852 s32 sub_sector_size = 0;
8853 int32_t bank_size; 8853 s32 bank_size;
8854 int32_t j = 0; 8854 s32 j = 0;
8855 int32_t error_flag = 0; 8855 s32 error_flag = 0;
8856 8856
8857 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 8857 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8858 8858
@@ -8930,16 +8930,16 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
8930 return error; 8930 return error;
8931} 8931}
8932 8932
8933static int32_t 8933static s32
8934e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, 8934e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8935 uint32_t cnf_base_addr, uint32_t cnf_size) 8935 u32 cnf_base_addr, u32 cnf_size)
8936{ 8936{
8937 uint32_t ret_val = E1000_SUCCESS; 8937 u32 ret_val = E1000_SUCCESS;
8938 uint16_t word_addr, reg_data, reg_addr; 8938 u16 word_addr, reg_data, reg_addr;
8939 uint16_t i; 8939 u16 i;
8940 8940
8941 /* cnf_base_addr is in DWORD */ 8941 /* cnf_base_addr is in DWORD */
8942 word_addr = (uint16_t)(cnf_base_addr << 1); 8942 word_addr = (u16)(cnf_base_addr << 1);
8943 8943
8944 /* cnf_size is returned in size of dwords */ 8944 /* cnf_size is returned in size of dwords */
8945 for (i = 0; i < cnf_size; i++) { 8945 for (i = 0; i < cnf_size; i++) {
@@ -8955,7 +8955,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8955 if (ret_val != E1000_SUCCESS) 8955 if (ret_val != E1000_SUCCESS)
8956 return ret_val; 8956 return ret_val;
8957 8957
8958 ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data); 8958 ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
8959 8959
8960 e1000_release_software_flag(hw); 8960 e1000_release_software_flag(hw);
8961 } 8961 }
@@ -8972,10 +8972,10 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8972 * 8972 *
8973 * hw: Struct containing variables accessed by shared code 8973 * hw: Struct containing variables accessed by shared code
8974 *****************************************************************************/ 8974 *****************************************************************************/
8975static int32_t 8975static s32
8976e1000_init_lcd_from_nvm(struct e1000_hw *hw) 8976e1000_init_lcd_from_nvm(struct e1000_hw *hw)
8977{ 8977{
8978 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; 8978 u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
8979 8979
8980 if (hw->phy_type != e1000_phy_igp_3) 8980 if (hw->phy_type != e1000_phy_igp_3)
8981 return E1000_SUCCESS; 8981 return E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 572a7b6dc12e..99fce2c5dd26 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -100,7 +100,7 @@ typedef enum {
100} e1000_fc_type; 100} e1000_fc_type;
101 101
102struct e1000_shadow_ram { 102struct e1000_shadow_ram {
103 uint16_t eeprom_word; 103 u16 eeprom_word;
104 bool modified; 104 bool modified;
105}; 105};
106 106
@@ -263,17 +263,17 @@ struct e1000_phy_info {
263}; 263};
264 264
265struct e1000_phy_stats { 265struct e1000_phy_stats {
266 uint32_t idle_errors; 266 u32 idle_errors;
267 uint32_t receive_errors; 267 u32 receive_errors;
268}; 268};
269 269
270struct e1000_eeprom_info { 270struct e1000_eeprom_info {
271 e1000_eeprom_type type; 271 e1000_eeprom_type type;
272 uint16_t word_size; 272 u16 word_size;
273 uint16_t opcode_bits; 273 u16 opcode_bits;
274 uint16_t address_bits; 274 u16 address_bits;
275 uint16_t delay_usec; 275 u16 delay_usec;
276 uint16_t page_size; 276 u16 page_size;
277 bool use_eerd; 277 bool use_eerd;
278 bool use_eewr; 278 bool use_eewr;
279}; 279};
@@ -308,34 +308,34 @@ typedef enum {
308 308
309/* Function prototypes */ 309/* Function prototypes */
310/* Initialization */ 310/* Initialization */
311int32_t e1000_reset_hw(struct e1000_hw *hw); 311s32 e1000_reset_hw(struct e1000_hw *hw);
312int32_t e1000_init_hw(struct e1000_hw *hw); 312s32 e1000_init_hw(struct e1000_hw *hw);
313int32_t e1000_set_mac_type(struct e1000_hw *hw); 313s32 e1000_set_mac_type(struct e1000_hw *hw);
314void e1000_set_media_type(struct e1000_hw *hw); 314void e1000_set_media_type(struct e1000_hw *hw);
315 315
316/* Link Configuration */ 316/* Link Configuration */
317int32_t e1000_setup_link(struct e1000_hw *hw); 317s32 e1000_setup_link(struct e1000_hw *hw);
318int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw); 318s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
319void e1000_config_collision_dist(struct e1000_hw *hw); 319void e1000_config_collision_dist(struct e1000_hw *hw);
320int32_t e1000_check_for_link(struct e1000_hw *hw); 320s32 e1000_check_for_link(struct e1000_hw *hw);
321int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t *speed, uint16_t *duplex); 321s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
322int32_t e1000_force_mac_fc(struct e1000_hw *hw); 322s32 e1000_force_mac_fc(struct e1000_hw *hw);
323 323
324/* PHY */ 324/* PHY */
325int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); 325s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
326int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 326s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
327int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 327s32 e1000_phy_hw_reset(struct e1000_hw *hw);
328int32_t e1000_phy_reset(struct e1000_hw *hw); 328s32 e1000_phy_reset(struct e1000_hw *hw);
329int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 329s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
330int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 330s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
331 331
332void e1000_phy_powerdown_workaround(struct e1000_hw *hw); 332void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
333 333
334/* EEPROM Functions */ 334/* EEPROM Functions */
335int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 335s32 e1000_init_eeprom_params(struct e1000_hw *hw);
336 336
337/* MNG HOST IF functions */ 337/* MNG HOST IF functions */
338uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); 338u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
339 339
340#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 340#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
341#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ 341#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
@@ -354,80 +354,80 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
354#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 354#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
355 355
356struct e1000_host_mng_command_header { 356struct e1000_host_mng_command_header {
357 uint8_t command_id; 357 u8 command_id;
358 uint8_t checksum; 358 u8 checksum;
359 uint16_t reserved1; 359 u16 reserved1;
360 uint16_t reserved2; 360 u16 reserved2;
361 uint16_t command_length; 361 u16 command_length;
362}; 362};
363 363
364struct e1000_host_mng_command_info { 364struct e1000_host_mng_command_info {
365 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 365 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
366 uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ 366 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
367}; 367};
368#ifdef __BIG_ENDIAN 368#ifdef __BIG_ENDIAN
369struct e1000_host_mng_dhcp_cookie{ 369struct e1000_host_mng_dhcp_cookie{
370 uint32_t signature; 370 u32 signature;
371 uint16_t vlan_id; 371 u16 vlan_id;
372 uint8_t reserved0; 372 u8 reserved0;
373 uint8_t status; 373 u8 status;
374 uint32_t reserved1; 374 u32 reserved1;
375 uint8_t checksum; 375 u8 checksum;
376 uint8_t reserved3; 376 u8 reserved3;
377 uint16_t reserved2; 377 u16 reserved2;
378}; 378};
379#else 379#else
380struct e1000_host_mng_dhcp_cookie{ 380struct e1000_host_mng_dhcp_cookie{
381 uint32_t signature; 381 u32 signature;
382 uint8_t status; 382 u8 status;
383 uint8_t reserved0; 383 u8 reserved0;
384 uint16_t vlan_id; 384 u16 vlan_id;
385 uint32_t reserved1; 385 u32 reserved1;
386 uint16_t reserved2; 386 u16 reserved2;
387 uint8_t reserved3; 387 u8 reserved3;
388 uint8_t checksum; 388 u8 checksum;
389}; 389};
390#endif 390#endif
391 391
392int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, 392s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
393 uint16_t length); 393 u16 length);
394bool e1000_check_mng_mode(struct e1000_hw *hw); 394bool e1000_check_mng_mode(struct e1000_hw *hw);
395bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 395bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
396int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 396s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
397int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 397s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
398int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 398s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
399int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 399s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
400int32_t e1000_read_mac_addr(struct e1000_hw * hw); 400s32 e1000_read_mac_addr(struct e1000_hw * hw);
401 401
402/* Filters (multicast, vlan, receive) */ 402/* Filters (multicast, vlan, receive) */
403uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 403u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
404void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 404void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
405void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 405void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
406void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value); 406void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
407 407
408/* LED functions */ 408/* LED functions */
409int32_t e1000_setup_led(struct e1000_hw *hw); 409s32 e1000_setup_led(struct e1000_hw *hw);
410int32_t e1000_cleanup_led(struct e1000_hw *hw); 410s32 e1000_cleanup_led(struct e1000_hw *hw);
411int32_t e1000_led_on(struct e1000_hw *hw); 411s32 e1000_led_on(struct e1000_hw *hw);
412int32_t e1000_led_off(struct e1000_hw *hw); 412s32 e1000_led_off(struct e1000_hw *hw);
413int32_t e1000_blink_led_start(struct e1000_hw *hw); 413s32 e1000_blink_led_start(struct e1000_hw *hw);
414 414
415/* Adaptive IFS Functions */ 415/* Adaptive IFS Functions */
416 416
417/* Everything else */ 417/* Everything else */
418void e1000_reset_adaptive(struct e1000_hw *hw); 418void e1000_reset_adaptive(struct e1000_hw *hw);
419void e1000_update_adaptive(struct e1000_hw *hw); 419void e1000_update_adaptive(struct e1000_hw *hw);
420void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr); 420void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
421void e1000_get_bus_info(struct e1000_hw *hw); 421void e1000_get_bus_info(struct e1000_hw *hw);
422void e1000_pci_set_mwi(struct e1000_hw *hw); 422void e1000_pci_set_mwi(struct e1000_hw *hw);
423void e1000_pci_clear_mwi(struct e1000_hw *hw); 423void e1000_pci_clear_mwi(struct e1000_hw *hw);
424int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value); 424s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
425void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); 425void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
426int e1000_pcix_get_mmrbc(struct e1000_hw *hw); 426int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
427/* Port I/O is only supported on 82544 and newer */ 427/* Port I/O is only supported on 82544 and newer */
428void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 428void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
429int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 429s32 e1000_disable_pciex_master(struct e1000_hw *hw);
430int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 430s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
431 431
432 432
433#define E1000_READ_REG_IO(a, reg) \ 433#define E1000_READ_REG_IO(a, reg) \
@@ -596,8 +596,8 @@ struct e1000_rx_desc {
596 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 596 __le64 buffer_addr; /* Address of the descriptor's data buffer */
597 __le16 length; /* Length of data DMAed into data buffer */ 597 __le16 length; /* Length of data DMAed into data buffer */
598 __le16 csum; /* Packet checksum */ 598 __le16 csum; /* Packet checksum */
599 uint8_t status; /* Descriptor status */ 599 u8 status; /* Descriptor status */
600 uint8_t errors; /* Descriptor Errors */ 600 u8 errors; /* Descriptor Errors */
601 __le16 special; 601 __le16 special;
602}; 602};
603 603
@@ -718,15 +718,15 @@ struct e1000_tx_desc {
718 __le32 data; 718 __le32 data;
719 struct { 719 struct {
720 __le16 length; /* Data buffer length */ 720 __le16 length; /* Data buffer length */
721 uint8_t cso; /* Checksum offset */ 721 u8 cso; /* Checksum offset */
722 uint8_t cmd; /* Descriptor control */ 722 u8 cmd; /* Descriptor control */
723 } flags; 723 } flags;
724 } lower; 724 } lower;
725 union { 725 union {
726 __le32 data; 726 __le32 data;
727 struct { 727 struct {
728 uint8_t status; /* Descriptor status */ 728 u8 status; /* Descriptor status */
729 uint8_t css; /* Checksum start */ 729 u8 css; /* Checksum start */
730 __le16 special; 730 __le16 special;
731 } fields; 731 } fields;
732 } upper; 732 } upper;
@@ -759,16 +759,16 @@ struct e1000_context_desc {
759 union { 759 union {
760 __le32 ip_config; 760 __le32 ip_config;
761 struct { 761 struct {
762 uint8_t ipcss; /* IP checksum start */ 762 u8 ipcss; /* IP checksum start */
763 uint8_t ipcso; /* IP checksum offset */ 763 u8 ipcso; /* IP checksum offset */
764 __le16 ipcse; /* IP checksum end */ 764 __le16 ipcse; /* IP checksum end */
765 } ip_fields; 765 } ip_fields;
766 } lower_setup; 766 } lower_setup;
767 union { 767 union {
768 __le32 tcp_config; 768 __le32 tcp_config;
769 struct { 769 struct {
770 uint8_t tucss; /* TCP checksum start */ 770 u8 tucss; /* TCP checksum start */
771 uint8_t tucso; /* TCP checksum offset */ 771 u8 tucso; /* TCP checksum offset */
772 __le16 tucse; /* TCP checksum end */ 772 __le16 tucse; /* TCP checksum end */
773 } tcp_fields; 773 } tcp_fields;
774 } upper_setup; 774 } upper_setup;
@@ -776,8 +776,8 @@ struct e1000_context_desc {
776 union { 776 union {
777 __le32 data; 777 __le32 data;
778 struct { 778 struct {
779 uint8_t status; /* Descriptor status */ 779 u8 status; /* Descriptor status */
780 uint8_t hdr_len; /* Header length */ 780 u8 hdr_len; /* Header length */
781 __le16 mss; /* Maximum segment size */ 781 __le16 mss; /* Maximum segment size */
782 } fields; 782 } fields;
783 } tcp_seg_setup; 783 } tcp_seg_setup;
@@ -790,15 +790,15 @@ struct e1000_data_desc {
790 __le32 data; 790 __le32 data;
791 struct { 791 struct {
792 __le16 length; /* Data buffer length */ 792 __le16 length; /* Data buffer length */
793 uint8_t typ_len_ext; /* */ 793 u8 typ_len_ext; /* */
794 uint8_t cmd; /* */ 794 u8 cmd; /* */
795 } flags; 795 } flags;
796 } lower; 796 } lower;
797 union { 797 union {
798 __le32 data; 798 __le32 data;
799 struct { 799 struct {
800 uint8_t status; /* Descriptor status */ 800 u8 status; /* Descriptor status */
801 uint8_t popts; /* Packet Options */ 801 u8 popts; /* Packet Options */
802 __le16 special; /* */ 802 __le16 special; /* */
803 } fields; 803 } fields;
804 } upper; 804 } upper;
@@ -825,8 +825,8 @@ struct e1000_rar {
825 825
826/* IPv4 Address Table Entry */ 826/* IPv4 Address Table Entry */
827struct e1000_ipv4_at_entry { 827struct e1000_ipv4_at_entry {
828 volatile uint32_t ipv4_addr; /* IP Address (RW) */ 828 volatile u32 ipv4_addr; /* IP Address (RW) */
829 volatile uint32_t reserved; 829 volatile u32 reserved;
830}; 830};
831 831
832/* Four wakeup IP addresses are supported */ 832/* Four wakeup IP addresses are supported */
@@ -837,25 +837,25 @@ struct e1000_ipv4_at_entry {
837 837
838/* IPv6 Address Table Entry */ 838/* IPv6 Address Table Entry */
839struct e1000_ipv6_at_entry { 839struct e1000_ipv6_at_entry {
840 volatile uint8_t ipv6_addr[16]; 840 volatile u8 ipv6_addr[16];
841}; 841};
842 842
843/* Flexible Filter Length Table Entry */ 843/* Flexible Filter Length Table Entry */
844struct e1000_fflt_entry { 844struct e1000_fflt_entry {
845 volatile uint32_t length; /* Flexible Filter Length (RW) */ 845 volatile u32 length; /* Flexible Filter Length (RW) */
846 volatile uint32_t reserved; 846 volatile u32 reserved;
847}; 847};
848 848
849/* Flexible Filter Mask Table Entry */ 849/* Flexible Filter Mask Table Entry */
850struct e1000_ffmt_entry { 850struct e1000_ffmt_entry {
851 volatile uint32_t mask; /* Flexible Filter Mask (RW) */ 851 volatile u32 mask; /* Flexible Filter Mask (RW) */
852 volatile uint32_t reserved; 852 volatile u32 reserved;
853}; 853};
854 854
855/* Flexible Filter Value Table Entry */ 855/* Flexible Filter Value Table Entry */
856struct e1000_ffvt_entry { 856struct e1000_ffvt_entry {
857 volatile uint32_t value; /* Flexible Filter Value (RW) */ 857 volatile u32 value; /* Flexible Filter Value (RW) */
858 volatile uint32_t reserved; 858 volatile u32 reserved;
859}; 859};
860 860
861/* Four Flexible Filters are supported */ 861/* Four Flexible Filters are supported */
@@ -1309,89 +1309,89 @@ struct e1000_ffvt_entry {
1309 1309
1310/* Statistics counters collected by the MAC */ 1310/* Statistics counters collected by the MAC */
1311struct e1000_hw_stats { 1311struct e1000_hw_stats {
1312 uint64_t crcerrs; 1312 u64 crcerrs;
1313 uint64_t algnerrc; 1313 u64 algnerrc;
1314 uint64_t symerrs; 1314 u64 symerrs;
1315 uint64_t rxerrc; 1315 u64 rxerrc;
1316 uint64_t txerrc; 1316 u64 txerrc;
1317 uint64_t mpc; 1317 u64 mpc;
1318 uint64_t scc; 1318 u64 scc;
1319 uint64_t ecol; 1319 u64 ecol;
1320 uint64_t mcc; 1320 u64 mcc;
1321 uint64_t latecol; 1321 u64 latecol;
1322 uint64_t colc; 1322 u64 colc;
1323 uint64_t dc; 1323 u64 dc;
1324 uint64_t tncrs; 1324 u64 tncrs;
1325 uint64_t sec; 1325 u64 sec;
1326 uint64_t cexterr; 1326 u64 cexterr;
1327 uint64_t rlec; 1327 u64 rlec;
1328 uint64_t xonrxc; 1328 u64 xonrxc;
1329 uint64_t xontxc; 1329 u64 xontxc;
1330 uint64_t xoffrxc; 1330 u64 xoffrxc;
1331 uint64_t xofftxc; 1331 u64 xofftxc;
1332 uint64_t fcruc; 1332 u64 fcruc;
1333 uint64_t prc64; 1333 u64 prc64;
1334 uint64_t prc127; 1334 u64 prc127;
1335 uint64_t prc255; 1335 u64 prc255;
1336 uint64_t prc511; 1336 u64 prc511;
1337 uint64_t prc1023; 1337 u64 prc1023;
1338 uint64_t prc1522; 1338 u64 prc1522;
1339 uint64_t gprc; 1339 u64 gprc;
1340 uint64_t bprc; 1340 u64 bprc;
1341 uint64_t mprc; 1341 u64 mprc;
1342 uint64_t gptc; 1342 u64 gptc;
1343 uint64_t gorcl; 1343 u64 gorcl;
1344 uint64_t gorch; 1344 u64 gorch;
1345 uint64_t gotcl; 1345 u64 gotcl;
1346 uint64_t gotch; 1346 u64 gotch;
1347 uint64_t rnbc; 1347 u64 rnbc;
1348 uint64_t ruc; 1348 u64 ruc;
1349 uint64_t rfc; 1349 u64 rfc;
1350 uint64_t roc; 1350 u64 roc;
1351 uint64_t rlerrc; 1351 u64 rlerrc;
1352 uint64_t rjc; 1352 u64 rjc;
1353 uint64_t mgprc; 1353 u64 mgprc;
1354 uint64_t mgpdc; 1354 u64 mgpdc;
1355 uint64_t mgptc; 1355 u64 mgptc;
1356 uint64_t torl; 1356 u64 torl;
1357 uint64_t torh; 1357 u64 torh;
1358 uint64_t totl; 1358 u64 totl;
1359 uint64_t toth; 1359 u64 toth;
1360 uint64_t tpr; 1360 u64 tpr;
1361 uint64_t tpt; 1361 u64 tpt;
1362 uint64_t ptc64; 1362 u64 ptc64;
1363 uint64_t ptc127; 1363 u64 ptc127;
1364 uint64_t ptc255; 1364 u64 ptc255;
1365 uint64_t ptc511; 1365 u64 ptc511;
1366 uint64_t ptc1023; 1366 u64 ptc1023;
1367 uint64_t ptc1522; 1367 u64 ptc1522;
1368 uint64_t mptc; 1368 u64 mptc;
1369 uint64_t bptc; 1369 u64 bptc;
1370 uint64_t tsctc; 1370 u64 tsctc;
1371 uint64_t tsctfc; 1371 u64 tsctfc;
1372 uint64_t iac; 1372 u64 iac;
1373 uint64_t icrxptc; 1373 u64 icrxptc;
1374 uint64_t icrxatc; 1374 u64 icrxatc;
1375 uint64_t ictxptc; 1375 u64 ictxptc;
1376 uint64_t ictxatc; 1376 u64 ictxatc;
1377 uint64_t ictxqec; 1377 u64 ictxqec;
1378 uint64_t ictxqmtc; 1378 u64 ictxqmtc;
1379 uint64_t icrxdmtc; 1379 u64 icrxdmtc;
1380 uint64_t icrxoc; 1380 u64 icrxoc;
1381}; 1381};
1382 1382
1383/* Structure containing variables used by the shared code (e1000_hw.c) */ 1383/* Structure containing variables used by the shared code (e1000_hw.c) */
1384struct e1000_hw { 1384struct e1000_hw {
1385 uint8_t __iomem *hw_addr; 1385 u8 __iomem *hw_addr;
1386 uint8_t __iomem *flash_address; 1386 u8 __iomem *flash_address;
1387 e1000_mac_type mac_type; 1387 e1000_mac_type mac_type;
1388 e1000_phy_type phy_type; 1388 e1000_phy_type phy_type;
1389 uint32_t phy_init_script; 1389 u32 phy_init_script;
1390 e1000_media_type media_type; 1390 e1000_media_type media_type;
1391 void *back; 1391 void *back;
1392 struct e1000_shadow_ram *eeprom_shadow_ram; 1392 struct e1000_shadow_ram *eeprom_shadow_ram;
1393 uint32_t flash_bank_size; 1393 u32 flash_bank_size;
1394 uint32_t flash_base_addr; 1394 u32 flash_base_addr;
1395 e1000_fc_type fc; 1395 e1000_fc_type fc;
1396 e1000_bus_speed bus_speed; 1396 e1000_bus_speed bus_speed;
1397 e1000_bus_width bus_width; 1397 e1000_bus_width bus_width;
@@ -1400,51 +1400,51 @@ struct e1000_hw {
1400 e1000_ms_type master_slave; 1400 e1000_ms_type master_slave;
1401 e1000_ms_type original_master_slave; 1401 e1000_ms_type original_master_slave;
1402 e1000_ffe_config ffe_config_state; 1402 e1000_ffe_config ffe_config_state;
1403 uint32_t asf_firmware_present; 1403 u32 asf_firmware_present;
1404 uint32_t eeprom_semaphore_present; 1404 u32 eeprom_semaphore_present;
1405 uint32_t swfw_sync_present; 1405 u32 swfw_sync_present;
1406 uint32_t swfwhw_semaphore_present; 1406 u32 swfwhw_semaphore_present;
1407 unsigned long io_base; 1407 unsigned long io_base;
1408 uint32_t phy_id; 1408 u32 phy_id;
1409 uint32_t phy_revision; 1409 u32 phy_revision;
1410 uint32_t phy_addr; 1410 u32 phy_addr;
1411 uint32_t original_fc; 1411 u32 original_fc;
1412 uint32_t txcw; 1412 u32 txcw;
1413 uint32_t autoneg_failed; 1413 u32 autoneg_failed;
1414 uint32_t max_frame_size; 1414 u32 max_frame_size;
1415 uint32_t min_frame_size; 1415 u32 min_frame_size;
1416 uint32_t mc_filter_type; 1416 u32 mc_filter_type;
1417 uint32_t num_mc_addrs; 1417 u32 num_mc_addrs;
1418 uint32_t collision_delta; 1418 u32 collision_delta;
1419 uint32_t tx_packet_delta; 1419 u32 tx_packet_delta;
1420 uint32_t ledctl_default; 1420 u32 ledctl_default;
1421 uint32_t ledctl_mode1; 1421 u32 ledctl_mode1;
1422 uint32_t ledctl_mode2; 1422 u32 ledctl_mode2;
1423 bool tx_pkt_filtering; 1423 bool tx_pkt_filtering;
1424 struct e1000_host_mng_dhcp_cookie mng_cookie; 1424 struct e1000_host_mng_dhcp_cookie mng_cookie;
1425 uint16_t phy_spd_default; 1425 u16 phy_spd_default;
1426 uint16_t autoneg_advertised; 1426 u16 autoneg_advertised;
1427 uint16_t pci_cmd_word; 1427 u16 pci_cmd_word;
1428 uint16_t fc_high_water; 1428 u16 fc_high_water;
1429 uint16_t fc_low_water; 1429 u16 fc_low_water;
1430 uint16_t fc_pause_time; 1430 u16 fc_pause_time;
1431 uint16_t current_ifs_val; 1431 u16 current_ifs_val;
1432 uint16_t ifs_min_val; 1432 u16 ifs_min_val;
1433 uint16_t ifs_max_val; 1433 u16 ifs_max_val;
1434 uint16_t ifs_step_size; 1434 u16 ifs_step_size;
1435 uint16_t ifs_ratio; 1435 u16 ifs_ratio;
1436 uint16_t device_id; 1436 u16 device_id;
1437 uint16_t vendor_id; 1437 u16 vendor_id;
1438 uint16_t subsystem_id; 1438 u16 subsystem_id;
1439 uint16_t subsystem_vendor_id; 1439 u16 subsystem_vendor_id;
1440 uint8_t revision_id; 1440 u8 revision_id;
1441 uint8_t autoneg; 1441 u8 autoneg;
1442 uint8_t mdix; 1442 u8 mdix;
1443 uint8_t forced_speed_duplex; 1443 u8 forced_speed_duplex;
1444 uint8_t wait_autoneg_complete; 1444 u8 wait_autoneg_complete;
1445 uint8_t dma_fairness; 1445 u8 dma_fairness;
1446 uint8_t mac_addr[NODE_ADDRESS_SIZE]; 1446 u8 mac_addr[NODE_ADDRESS_SIZE];
1447 uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; 1447 u8 perm_mac_addr[NODE_ADDRESS_SIZE];
1448 bool disable_polarity_correction; 1448 bool disable_polarity_correction;
1449 bool speed_downgraded; 1449 bool speed_downgraded;
1450 e1000_smart_speed smart_speed; 1450 e1000_smart_speed smart_speed;
@@ -2165,14 +2165,14 @@ typedef enum {
2165#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ 2165#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
2166 2166
2167struct e1000_host_command_header { 2167struct e1000_host_command_header {
2168 uint8_t command_id; 2168 u8 command_id;
2169 uint8_t command_length; 2169 u8 command_length;
2170 uint8_t command_options; /* I/F bits for command, status for return */ 2170 u8 command_options; /* I/F bits for command, status for return */
2171 uint8_t checksum; 2171 u8 checksum;
2172}; 2172};
2173struct e1000_host_command_info { 2173struct e1000_host_command_info {
2174 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 2174 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
2175 uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ 2175 u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
2176}; 2176};
2177 2177
2178/* Host SMB register #0 */ 2178/* Host SMB register #0 */
@@ -2495,7 +2495,7 @@ struct e1000_host_command_info {
2495/* Number of milliseconds we wait for PHY configuration done after MAC reset */ 2495/* Number of milliseconds we wait for PHY configuration done after MAC reset */
2496#define PHY_CFG_TIMEOUT 100 2496#define PHY_CFG_TIMEOUT 100
2497 2497
2498#define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 2498#define E1000_TX_BUFFER_SIZE ((u32)1514)
2499 2499
2500/* The carrier extension symbol, as received by the NIC. */ 2500/* The carrier extension symbol, as received by the NIC. */
2501#define CARRIER_EXTENSION 0x0F 2501#define CARRIER_EXTENSION 0x0F
@@ -3312,68 +3312,68 @@ struct e1000_host_command_info {
3312/* Offset 04h HSFSTS */ 3312/* Offset 04h HSFSTS */
3313union ich8_hws_flash_status { 3313union ich8_hws_flash_status {
3314 struct ich8_hsfsts { 3314 struct ich8_hsfsts {
3315#ifdef E1000_BIG_ENDIAN 3315#ifdef __BIG_ENDIAN
3316 uint16_t reserved2 :6; 3316 u16 reserved2 :6;
3317 uint16_t fldesvalid :1; 3317 u16 fldesvalid :1;
3318 uint16_t flockdn :1; 3318 u16 flockdn :1;
3319 uint16_t flcdone :1; 3319 u16 flcdone :1;
3320 uint16_t flcerr :1; 3320 u16 flcerr :1;
3321 uint16_t dael :1; 3321 u16 dael :1;
3322 uint16_t berasesz :2; 3322 u16 berasesz :2;
3323 uint16_t flcinprog :1; 3323 u16 flcinprog :1;
3324 uint16_t reserved1 :2; 3324 u16 reserved1 :2;
3325#else 3325#else
3326 uint16_t flcdone :1; /* bit 0 Flash Cycle Done */ 3326 u16 flcdone :1; /* bit 0 Flash Cycle Done */
3327 uint16_t flcerr :1; /* bit 1 Flash Cycle Error */ 3327 u16 flcerr :1; /* bit 1 Flash Cycle Error */
3328 uint16_t dael :1; /* bit 2 Direct Access error Log */ 3328 u16 dael :1; /* bit 2 Direct Access error Log */
3329 uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */ 3329 u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
3330 uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */ 3330 u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
3331 uint16_t reserved1 :2; /* bit 13:6 Reserved */ 3331 u16 reserved1 :2; /* bit 13:6 Reserved */
3332 uint16_t reserved2 :6; /* bit 13:6 Reserved */ 3332 u16 reserved2 :6; /* bit 13:6 Reserved */
3333 uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */ 3333 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
3334 uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */ 3334 u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
3335#endif 3335#endif
3336 } hsf_status; 3336 } hsf_status;
3337 uint16_t regval; 3337 u16 regval;
3338}; 3338};
3339 3339
3340/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 3340/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
3341/* Offset 06h FLCTL */ 3341/* Offset 06h FLCTL */
3342union ich8_hws_flash_ctrl { 3342union ich8_hws_flash_ctrl {
3343 struct ich8_hsflctl { 3343 struct ich8_hsflctl {
3344#ifdef E1000_BIG_ENDIAN 3344#ifdef __BIG_ENDIAN
3345 uint16_t fldbcount :2; 3345 u16 fldbcount :2;
3346 uint16_t flockdn :6; 3346 u16 flockdn :6;
3347 uint16_t flcgo :1; 3347 u16 flcgo :1;
3348 uint16_t flcycle :2; 3348 u16 flcycle :2;
3349 uint16_t reserved :5; 3349 u16 reserved :5;
3350#else 3350#else
3351 uint16_t flcgo :1; /* 0 Flash Cycle Go */ 3351 u16 flcgo :1; /* 0 Flash Cycle Go */
3352 uint16_t flcycle :2; /* 2:1 Flash Cycle */ 3352 u16 flcycle :2; /* 2:1 Flash Cycle */
3353 uint16_t reserved :5; /* 7:3 Reserved */ 3353 u16 reserved :5; /* 7:3 Reserved */
3354 uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */ 3354 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
3355 uint16_t flockdn :6; /* 15:10 Reserved */ 3355 u16 flockdn :6; /* 15:10 Reserved */
3356#endif 3356#endif
3357 } hsf_ctrl; 3357 } hsf_ctrl;
3358 uint16_t regval; 3358 u16 regval;
3359}; 3359};
3360 3360
3361/* ICH8 Flash Region Access Permissions */ 3361/* ICH8 Flash Region Access Permissions */
3362union ich8_hws_flash_regacc { 3362union ich8_hws_flash_regacc {
3363 struct ich8_flracc { 3363 struct ich8_flracc {
3364#ifdef E1000_BIG_ENDIAN 3364#ifdef __BIG_ENDIAN
3365 uint32_t gmwag :8; 3365 u32 gmwag :8;
3366 uint32_t gmrag :8; 3366 u32 gmrag :8;
3367 uint32_t grwa :8; 3367 u32 grwa :8;
3368 uint32_t grra :8; 3368 u32 grra :8;
3369#else 3369#else
3370 uint32_t grra :8; /* 0:7 GbE region Read Access */ 3370 u32 grra :8; /* 0:7 GbE region Read Access */
3371 uint32_t grwa :8; /* 8:15 GbE region Write Access */ 3371 u32 grwa :8; /* 8:15 GbE region Write Access */
3372 uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */ 3372 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
3373 uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */ 3373 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
3374#endif 3374#endif
3375 } hsf_flregacc; 3375 } hsf_flregacc;
3376 uint16_t regval; 3376 u16 regval;
3377}; 3377};
3378 3378
3379/* Miscellaneous PHY bit definitions. */ 3379/* Miscellaneous PHY bit definitions. */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 757d02f443a5..59579b1d8843 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -127,7 +127,7 @@ int e1000_up(struct e1000_adapter *adapter);
127void e1000_down(struct e1000_adapter *adapter); 127void e1000_down(struct e1000_adapter *adapter);
128void e1000_reinit_locked(struct e1000_adapter *adapter); 128void e1000_reinit_locked(struct e1000_adapter *adapter);
129void e1000_reset(struct e1000_adapter *adapter); 129void e1000_reset(struct e1000_adapter *adapter);
130int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 130int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
131int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 131int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
132int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 132int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
133void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 133void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
@@ -203,8 +203,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
203 struct sk_buff *skb); 203 struct sk_buff *skb);
204 204
205static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); 205static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
206static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 206static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
207static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 207static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
208static void e1000_restore_vlan(struct e1000_adapter *adapter); 208static void e1000_restore_vlan(struct e1000_adapter *adapter);
209 209
210static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 210static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -368,8 +368,8 @@ static void
368e1000_update_mng_vlan(struct e1000_adapter *adapter) 368e1000_update_mng_vlan(struct e1000_adapter *adapter)
369{ 369{
370 struct net_device *netdev = adapter->netdev; 370 struct net_device *netdev = adapter->netdev;
371 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 371 u16 vid = adapter->hw.mng_cookie.vlan_id;
372 uint16_t old_vid = adapter->mng_vlan_id; 372 u16 old_vid = adapter->mng_vlan_id;
373 if (adapter->vlgrp) { 373 if (adapter->vlgrp) {
374 if (!vlan_group_get_device(adapter->vlgrp, vid)) { 374 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
375 if (adapter->hw.mng_cookie.status & 375 if (adapter->hw.mng_cookie.status &
@@ -379,7 +379,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
379 } else 379 } else
380 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 380 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
381 381
382 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 382 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
383 (vid != old_vid) && 383 (vid != old_vid) &&
384 !vlan_group_get_device(adapter->vlgrp, old_vid)) 384 !vlan_group_get_device(adapter->vlgrp, old_vid))
385 e1000_vlan_rx_kill_vid(netdev, old_vid); 385 e1000_vlan_rx_kill_vid(netdev, old_vid);
@@ -402,8 +402,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
402static void 402static void
403e1000_release_hw_control(struct e1000_adapter *adapter) 403e1000_release_hw_control(struct e1000_adapter *adapter)
404{ 404{
405 uint32_t ctrl_ext; 405 u32 ctrl_ext;
406 uint32_t swsm; 406 u32 swsm;
407 407
408 /* Let firmware taken over control of h/w */ 408 /* Let firmware taken over control of h/w */
409 switch (adapter->hw.mac_type) { 409 switch (adapter->hw.mac_type) {
@@ -439,8 +439,8 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
439static void 439static void
440e1000_get_hw_control(struct e1000_adapter *adapter) 440e1000_get_hw_control(struct e1000_adapter *adapter)
441{ 441{
442 uint32_t ctrl_ext; 442 u32 ctrl_ext;
443 uint32_t swsm; 443 u32 swsm;
444 444
445 /* Let firmware know the driver has taken over */ 445 /* Let firmware know the driver has taken over */
446 switch (adapter->hw.mac_type) { 446 switch (adapter->hw.mac_type) {
@@ -466,7 +466,7 @@ static void
466e1000_init_manageability(struct e1000_adapter *adapter) 466e1000_init_manageability(struct e1000_adapter *adapter)
467{ 467{
468 if (adapter->en_mng_pt) { 468 if (adapter->en_mng_pt) {
469 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); 469 u32 manc = E1000_READ_REG(&adapter->hw, MANC);
470 470
471 /* disable hardware interception of ARP */ 471 /* disable hardware interception of ARP */
472 manc &= ~(E1000_MANC_ARP_EN); 472 manc &= ~(E1000_MANC_ARP_EN);
@@ -475,7 +475,7 @@ e1000_init_manageability(struct e1000_adapter *adapter)
475 /* this will probably generate destination unreachable messages 475 /* this will probably generate destination unreachable messages
476 * from the host OS, but the packets will be handled on SMBUS */ 476 * from the host OS, but the packets will be handled on SMBUS */
477 if (adapter->hw.has_manc2h) { 477 if (adapter->hw.has_manc2h) {
478 uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H); 478 u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
479 479
480 manc |= E1000_MANC_EN_MNG2HOST; 480 manc |= E1000_MANC_EN_MNG2HOST;
481#define E1000_MNG2HOST_PORT_623 (1 << 5) 481#define E1000_MNG2HOST_PORT_623 (1 << 5)
@@ -493,7 +493,7 @@ static void
493e1000_release_manageability(struct e1000_adapter *adapter) 493e1000_release_manageability(struct e1000_adapter *adapter)
494{ 494{
495 if (adapter->en_mng_pt) { 495 if (adapter->en_mng_pt) {
496 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); 496 u32 manc = E1000_READ_REG(&adapter->hw, MANC);
497 497
498 /* re-enable hardware interception of ARP */ 498 /* re-enable hardware interception of ARP */
499 manc |= E1000_MANC_ARP_EN; 499 manc |= E1000_MANC_ARP_EN;
@@ -566,7 +566,7 @@ int e1000_up(struct e1000_adapter *adapter)
566 566
567void e1000_power_up_phy(struct e1000_adapter *adapter) 567void e1000_power_up_phy(struct e1000_adapter *adapter)
568{ 568{
569 uint16_t mii_reg = 0; 569 u16 mii_reg = 0;
570 570
571 /* Just clear the power down bit to wake the phy back up */ 571 /* Just clear the power down bit to wake the phy back up */
572 if (adapter->hw.media_type == e1000_media_type_copper) { 572 if (adapter->hw.media_type == e1000_media_type_copper) {
@@ -587,7 +587,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
587 * (c) SoL/IDER session is active */ 587 * (c) SoL/IDER session is active */
588 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 588 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
589 adapter->hw.media_type == e1000_media_type_copper) { 589 adapter->hw.media_type == e1000_media_type_copper) {
590 uint16_t mii_reg = 0; 590 u16 mii_reg = 0;
591 591
592 switch (adapter->hw.mac_type) { 592 switch (adapter->hw.mac_type) {
593 case e1000_82540: 593 case e1000_82540:
@@ -667,8 +667,8 @@ e1000_reinit_locked(struct e1000_adapter *adapter)
667void 667void
668e1000_reset(struct e1000_adapter *adapter) 668e1000_reset(struct e1000_adapter *adapter)
669{ 669{
670 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; 670 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
671 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 671 u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
672 bool legacy_pba_adjust = false; 672 bool legacy_pba_adjust = false;
673 673
674 /* Repartition Pba for greater than 9k mtu 674 /* Repartition Pba for greater than 9k mtu
@@ -815,7 +815,7 @@ e1000_reset(struct e1000_adapter *adapter)
815 adapter->hw.mac_type <= e1000_82547_rev_2 && 815 adapter->hw.mac_type <= e1000_82547_rev_2 &&
816 adapter->hw.autoneg == 1 && 816 adapter->hw.autoneg == 1 &&
817 adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { 817 adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) {
818 uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL); 818 u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
819 /* clear phy power management bit if we are in gig only mode, 819 /* clear phy power management bit if we are in gig only mode,
820 * which if enabled will attempt negotiation to 100Mb, which 820 * which if enabled will attempt negotiation to 100Mb, which
821 * can cause a loss of link at power off or driver unload */ 821 * can cause a loss of link at power off or driver unload */
@@ -832,7 +832,7 @@ e1000_reset(struct e1000_adapter *adapter)
832 if (!adapter->smart_power_down && 832 if (!adapter->smart_power_down &&
833 (adapter->hw.mac_type == e1000_82571 || 833 (adapter->hw.mac_type == e1000_82571 ||
834 adapter->hw.mac_type == e1000_82572)) { 834 adapter->hw.mac_type == e1000_82572)) {
835 uint16_t phy_data = 0; 835 u16 phy_data = 0;
836 /* speed up time to link by disabling smart power down, ignore 836 /* speed up time to link by disabling smart power down, ignore
837 * the return value of this function because there is nothing 837 * the return value of this function because there is nothing
838 * different we would do if it failed */ 838 * different we would do if it failed */
@@ -926,8 +926,8 @@ e1000_probe(struct pci_dev *pdev,
926 static int cards_found = 0; 926 static int cards_found = 0;
927 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 927 static int global_quad_port_a = 0; /* global ksp3 port a indication */
928 int i, err, pci_using_dac; 928 int i, err, pci_using_dac;
929 uint16_t eeprom_data = 0; 929 u16 eeprom_data = 0;
930 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 930 u16 eeprom_apme_mask = E1000_EEPROM_APME;
931 DECLARE_MAC_BUF(mac); 931 DECLARE_MAC_BUF(mac);
932 932
933 if ((err = pci_enable_device(pdev))) 933 if ((err = pci_enable_device(pdev)))
@@ -1702,10 +1702,10 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1702static void 1702static void
1703e1000_configure_tx(struct e1000_adapter *adapter) 1703e1000_configure_tx(struct e1000_adapter *adapter)
1704{ 1704{
1705 uint64_t tdba; 1705 u64 tdba;
1706 struct e1000_hw *hw = &adapter->hw; 1706 struct e1000_hw *hw = &adapter->hw;
1707 uint32_t tdlen, tctl, tipg, tarc; 1707 u32 tdlen, tctl, tipg, tarc;
1708 uint32_t ipgr1, ipgr2; 1708 u32 ipgr1, ipgr2;
1709 1709
1710 /* Setup the HW Tx Head and Tail descriptor pointers */ 1710 /* Setup the HW Tx Head and Tail descriptor pointers */
1711 1711
@@ -1947,10 +1947,10 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1947static void 1947static void
1948e1000_setup_rctl(struct e1000_adapter *adapter) 1948e1000_setup_rctl(struct e1000_adapter *adapter)
1949{ 1949{
1950 uint32_t rctl, rfctl; 1950 u32 rctl, rfctl;
1951 uint32_t psrctl = 0; 1951 u32 psrctl = 0;
1952#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT 1952#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1953 uint32_t pages = 0; 1953 u32 pages = 0;
1954#endif 1954#endif
1955 1955
1956 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1956 rctl = E1000_READ_REG(&adapter->hw, RCTL);
@@ -2065,9 +2065,9 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
2065static void 2065static void
2066e1000_configure_rx(struct e1000_adapter *adapter) 2066e1000_configure_rx(struct e1000_adapter *adapter)
2067{ 2067{
2068 uint64_t rdba; 2068 u64 rdba;
2069 struct e1000_hw *hw = &adapter->hw; 2069 struct e1000_hw *hw = &adapter->hw;
2070 uint32_t rdlen, rctl, rxcsum, ctrl_ext; 2070 u32 rdlen, rctl, rxcsum, ctrl_ext;
2071 2071
2072 if (adapter->rx_ps_pages) { 2072 if (adapter->rx_ps_pages) {
2073 /* this is a 32 byte descriptor */ 2073 /* this is a 32 byte descriptor */
@@ -2387,7 +2387,7 @@ static void
2387e1000_enter_82542_rst(struct e1000_adapter *adapter) 2387e1000_enter_82542_rst(struct e1000_adapter *adapter)
2388{ 2388{
2389 struct net_device *netdev = adapter->netdev; 2389 struct net_device *netdev = adapter->netdev;
2390 uint32_t rctl; 2390 u32 rctl;
2391 2391
2392 e1000_pci_clear_mwi(&adapter->hw); 2392 e1000_pci_clear_mwi(&adapter->hw);
2393 2393
@@ -2405,7 +2405,7 @@ static void
2405e1000_leave_82542_rst(struct e1000_adapter *adapter) 2405e1000_leave_82542_rst(struct e1000_adapter *adapter)
2406{ 2406{
2407 struct net_device *netdev = adapter->netdev; 2407 struct net_device *netdev = adapter->netdev;
2408 uint32_t rctl; 2408 u32 rctl;
2409 2409
2410 rctl = E1000_READ_REG(&adapter->hw, RCTL); 2410 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2411 rctl &= ~E1000_RCTL_RST; 2411 rctl &= ~E1000_RCTL_RST;
@@ -2490,8 +2490,8 @@ e1000_set_rx_mode(struct net_device *netdev)
2490 struct e1000_hw *hw = &adapter->hw; 2490 struct e1000_hw *hw = &adapter->hw;
2491 struct dev_addr_list *uc_ptr; 2491 struct dev_addr_list *uc_ptr;
2492 struct dev_addr_list *mc_ptr; 2492 struct dev_addr_list *mc_ptr;
2493 uint32_t rctl; 2493 u32 rctl;
2494 uint32_t hash_value; 2494 u32 hash_value;
2495 int i, rar_entries = E1000_RAR_ENTRIES; 2495 int i, rar_entries = E1000_RAR_ENTRIES;
2496 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? 2496 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2497 E1000_NUM_MTA_REGISTERS_ICH8LAN : 2497 E1000_NUM_MTA_REGISTERS_ICH8LAN :
@@ -2595,7 +2595,7 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2595{ 2595{
2596 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2596 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2597 struct net_device *netdev = adapter->netdev; 2597 struct net_device *netdev = adapter->netdev;
2598 uint32_t tctl; 2598 u32 tctl;
2599 2599
2600 if (atomic_read(&adapter->tx_fifo_stall)) { 2600 if (atomic_read(&adapter->tx_fifo_stall)) {
2601 if ((E1000_READ_REG(&adapter->hw, TDT) == 2601 if ((E1000_READ_REG(&adapter->hw, TDT) ==
@@ -2637,8 +2637,8 @@ e1000_watchdog(unsigned long data)
2637 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2637 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2638 struct net_device *netdev = adapter->netdev; 2638 struct net_device *netdev = adapter->netdev;
2639 struct e1000_tx_ring *txdr = adapter->tx_ring; 2639 struct e1000_tx_ring *txdr = adapter->tx_ring;
2640 uint32_t link, tctl; 2640 u32 link, tctl;
2641 int32_t ret_val; 2641 s32 ret_val;
2642 2642
2643 ret_val = e1000_check_for_link(&adapter->hw); 2643 ret_val = e1000_check_for_link(&adapter->hw);
2644 if ((ret_val == E1000_ERR_PHY) && 2644 if ((ret_val == E1000_ERR_PHY) &&
@@ -2663,7 +2663,7 @@ e1000_watchdog(unsigned long data)
2663 2663
2664 if (link) { 2664 if (link) {
2665 if (!netif_carrier_ok(netdev)) { 2665 if (!netif_carrier_ok(netdev)) {
2666 uint32_t ctrl; 2666 u32 ctrl;
2667 bool txb2b = true; 2667 bool txb2b = true;
2668 e1000_get_speed_and_duplex(&adapter->hw, 2668 e1000_get_speed_and_duplex(&adapter->hw,
2669 &adapter->link_speed, 2669 &adapter->link_speed,
@@ -2700,7 +2700,7 @@ e1000_watchdog(unsigned long data)
2700 if ((adapter->hw.mac_type == e1000_82571 || 2700 if ((adapter->hw.mac_type == e1000_82571 ||
2701 adapter->hw.mac_type == e1000_82572) && 2701 adapter->hw.mac_type == e1000_82572) &&
2702 !txb2b) { 2702 !txb2b) {
2703 uint32_t tarc0; 2703 u32 tarc0;
2704 tarc0 = E1000_READ_REG(&adapter->hw, TARC0); 2704 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2705 tarc0 &= ~(1 << 21); 2705 tarc0 &= ~(1 << 21);
2706 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 2706 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
@@ -2742,7 +2742,7 @@ e1000_watchdog(unsigned long data)
2742 /* make sure the receive unit is started */ 2742 /* make sure the receive unit is started */
2743 if (adapter->hw.rx_needs_kicking) { 2743 if (adapter->hw.rx_needs_kicking) {
2744 struct e1000_hw *hw = &adapter->hw; 2744 struct e1000_hw *hw = &adapter->hw;
2745 uint32_t rctl = E1000_READ_REG(hw, RCTL); 2745 u32 rctl = E1000_READ_REG(hw, RCTL);
2746 E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); 2746 E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN);
2747 } 2747 }
2748 } 2748 }
@@ -2832,7 +2832,7 @@ enum latency_range {
2832 * @bytes: the number of bytes during this measurement interval 2832 * @bytes: the number of bytes during this measurement interval
2833 **/ 2833 **/
2834static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2834static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2835 uint16_t itr_setting, 2835 u16 itr_setting,
2836 int packets, 2836 int packets,
2837 int bytes) 2837 int bytes)
2838{ 2838{
@@ -2884,8 +2884,8 @@ update_itr_done:
2884static void e1000_set_itr(struct e1000_adapter *adapter) 2884static void e1000_set_itr(struct e1000_adapter *adapter)
2885{ 2885{
2886 struct e1000_hw *hw = &adapter->hw; 2886 struct e1000_hw *hw = &adapter->hw;
2887 uint16_t current_itr; 2887 u16 current_itr;
2888 uint32_t new_itr = adapter->itr; 2888 u32 new_itr = adapter->itr;
2889 2889
2890 if (unlikely(hw->mac_type < e1000_82540)) 2890 if (unlikely(hw->mac_type < e1000_82540))
2891 return; 2891 return;
@@ -2959,9 +2959,9 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2959 struct e1000_context_desc *context_desc; 2959 struct e1000_context_desc *context_desc;
2960 struct e1000_buffer *buffer_info; 2960 struct e1000_buffer *buffer_info;
2961 unsigned int i; 2961 unsigned int i;
2962 uint32_t cmd_length = 0; 2962 u32 cmd_length = 0;
2963 uint16_t ipcse = 0, tucse, mss; 2963 u16 ipcse = 0, tucse, mss;
2964 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2964 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2965 int err; 2965 int err;
2966 2966
2967 if (skb_is_gso(skb)) { 2967 if (skb_is_gso(skb)) {
@@ -3032,7 +3032,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3032 struct e1000_context_desc *context_desc; 3032 struct e1000_context_desc *context_desc;
3033 struct e1000_buffer *buffer_info; 3033 struct e1000_buffer *buffer_info;
3034 unsigned int i; 3034 unsigned int i;
3035 uint8_t css; 3035 u8 css;
3036 3036
3037 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 3037 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
3038 css = skb_transport_offset(skb); 3038 css = skb_transport_offset(skb);
@@ -3177,7 +3177,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3177{ 3177{
3178 struct e1000_tx_desc *tx_desc = NULL; 3178 struct e1000_tx_desc *tx_desc = NULL;
3179 struct e1000_buffer *buffer_info; 3179 struct e1000_buffer *buffer_info;
3180 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 3180 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3181 unsigned int i; 3181 unsigned int i;
3182 3182
3183 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 3183 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
@@ -3241,8 +3241,8 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3241static int 3241static int
3242e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) 3242e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
3243{ 3243{
3244 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3244 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3245 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR; 3245 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3246 3246
3247 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3247 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3248 3248
@@ -3269,7 +3269,7 @@ static int
3269e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) 3269e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
3270{ 3270{
3271 struct e1000_hw *hw = &adapter->hw; 3271 struct e1000_hw *hw = &adapter->hw;
3272 uint16_t length, offset; 3272 u16 length, offset;
3273 if (vlan_tx_tag_present(skb)) { 3273 if (vlan_tx_tag_present(skb)) {
3274 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 3274 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
3275 ( adapter->hw.mng_cookie.status & 3275 ( adapter->hw.mng_cookie.status &
@@ -3280,17 +3280,17 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
3280 struct ethhdr *eth = (struct ethhdr *) skb->data; 3280 struct ethhdr *eth = (struct ethhdr *) skb->data;
3281 if ((htons(ETH_P_IP) == eth->h_proto)) { 3281 if ((htons(ETH_P_IP) == eth->h_proto)) {
3282 const struct iphdr *ip = 3282 const struct iphdr *ip =
3283 (struct iphdr *)((uint8_t *)skb->data+14); 3283 (struct iphdr *)((u8 *)skb->data+14);
3284 if (IPPROTO_UDP == ip->protocol) { 3284 if (IPPROTO_UDP == ip->protocol) {
3285 struct udphdr *udp = 3285 struct udphdr *udp =
3286 (struct udphdr *)((uint8_t *)ip + 3286 (struct udphdr *)((u8 *)ip +
3287 (ip->ihl << 2)); 3287 (ip->ihl << 2));
3288 if (ntohs(udp->dest) == 67) { 3288 if (ntohs(udp->dest) == 67) {
3289 offset = (uint8_t *)udp + 8 - skb->data; 3289 offset = (u8 *)udp + 8 - skb->data;
3290 length = skb->len - offset; 3290 length = skb->len - offset;
3291 3291
3292 return e1000_mng_write_dhcp_info(hw, 3292 return e1000_mng_write_dhcp_info(hw,
3293 (uint8_t *)udp + 8, 3293 (u8 *)udp + 8,
3294 length); 3294 length);
3295 } 3295 }
3296 } 3296 }
@@ -3370,7 +3370,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3370 * overrun the FIFO, adjust the max buffer len if mss 3370 * overrun the FIFO, adjust the max buffer len if mss
3371 * drops. */ 3371 * drops. */
3372 if (mss) { 3372 if (mss) {
3373 uint8_t hdr_len; 3373 u8 hdr_len;
3374 max_per_txd = min(mss << 2, max_per_txd); 3374 max_per_txd = min(mss << 2, max_per_txd);
3375 max_txd_pwr = fls(max_per_txd) - 1; 3375 max_txd_pwr = fls(max_per_txd) - 1;
3376 3376
@@ -3557,7 +3557,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3557{ 3557{
3558 struct e1000_adapter *adapter = netdev_priv(netdev); 3558 struct e1000_adapter *adapter = netdev_priv(netdev);
3559 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3559 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3560 uint16_t eeprom_data = 0; 3560 u16 eeprom_data = 0;
3561 3561
3562 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3562 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3563 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3563 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3652,7 +3652,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3652 struct e1000_hw *hw = &adapter->hw; 3652 struct e1000_hw *hw = &adapter->hw;
3653 struct pci_dev *pdev = adapter->pdev; 3653 struct pci_dev *pdev = adapter->pdev;
3654 unsigned long flags; 3654 unsigned long flags;
3655 uint16_t phy_tmp; 3655 u16 phy_tmp;
3656 3656
3657#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3657#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3658 3658
@@ -3829,7 +3829,7 @@ e1000_intr_msi(int irq, void *data)
3829#ifndef CONFIG_E1000_NAPI 3829#ifndef CONFIG_E1000_NAPI
3830 int i; 3830 int i;
3831#endif 3831#endif
3832 uint32_t icr = E1000_READ_REG(hw, ICR); 3832 u32 icr = E1000_READ_REG(hw, ICR);
3833 3833
3834 /* in NAPI mode read ICR disables interrupts using IAM */ 3834 /* in NAPI mode read ICR disables interrupts using IAM */
3835 3835
@@ -3841,7 +3841,7 @@ e1000_intr_msi(int irq, void *data)
3841 if (netif_carrier_ok(netdev) && 3841 if (netif_carrier_ok(netdev) &&
3842 (adapter->hw.mac_type == e1000_80003es2lan)) { 3842 (adapter->hw.mac_type == e1000_80003es2lan)) {
3843 /* disable receives */ 3843 /* disable receives */
3844 uint32_t rctl = E1000_READ_REG(hw, RCTL); 3844 u32 rctl = E1000_READ_REG(hw, RCTL);
3845 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); 3845 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3846 } 3846 }
3847 /* guard against interrupt when we're going down */ 3847 /* guard against interrupt when we're going down */
@@ -3888,7 +3888,7 @@ e1000_intr(int irq, void *data)
3888 struct net_device *netdev = data; 3888 struct net_device *netdev = data;
3889 struct e1000_adapter *adapter = netdev_priv(netdev); 3889 struct e1000_adapter *adapter = netdev_priv(netdev);
3890 struct e1000_hw *hw = &adapter->hw; 3890 struct e1000_hw *hw = &adapter->hw;
3891 uint32_t rctl, icr = E1000_READ_REG(hw, ICR); 3891 u32 rctl, icr = E1000_READ_REG(hw, ICR);
3892#ifndef CONFIG_E1000_NAPI 3892#ifndef CONFIG_E1000_NAPI
3893 int i; 3893 int i;
3894#endif 3894#endif
@@ -4139,11 +4139,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4139 4139
4140static void 4140static void
4141e1000_rx_checksum(struct e1000_adapter *adapter, 4141e1000_rx_checksum(struct e1000_adapter *adapter,
4142 uint32_t status_err, uint32_t csum, 4142 u32 status_err, u32 csum,
4143 struct sk_buff *skb) 4143 struct sk_buff *skb)
4144{ 4144{
4145 uint16_t status = (uint16_t)status_err; 4145 u16 status = (u16)status_err;
4146 uint8_t errors = (uint8_t)(status_err >> 24); 4146 u8 errors = (u8)(status_err >> 24);
4147 skb->ip_summed = CHECKSUM_NONE; 4147 skb->ip_summed = CHECKSUM_NONE;
4148 4148
4149 /* 82543 or newer only */ 4149 /* 82543 or newer only */
@@ -4200,8 +4200,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4200 struct e1000_rx_desc *rx_desc, *next_rxd; 4200 struct e1000_rx_desc *rx_desc, *next_rxd;
4201 struct e1000_buffer *buffer_info, *next_buffer; 4201 struct e1000_buffer *buffer_info, *next_buffer;
4202 unsigned long flags; 4202 unsigned long flags;
4203 uint32_t length; 4203 u32 length;
4204 uint8_t last_byte; 4204 u8 last_byte;
4205 unsigned int i; 4205 unsigned int i;
4206 int cleaned_count = 0; 4206 int cleaned_count = 0;
4207 bool cleaned = false; 4207 bool cleaned = false;
@@ -4301,8 +4301,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4301 4301
4302 /* Receive Checksum Offload */ 4302 /* Receive Checksum Offload */
4303 e1000_rx_checksum(adapter, 4303 e1000_rx_checksum(adapter,
4304 (uint32_t)(status) | 4304 (u32)(status) |
4305 ((uint32_t)(rx_desc->errors) << 24), 4305 ((u32)(rx_desc->errors) << 24),
4306 le16_to_cpu(rx_desc->csum), skb); 4306 le16_to_cpu(rx_desc->csum), skb);
4307 4307
4308 skb->protocol = eth_type_trans(skb, netdev); 4308 skb->protocol = eth_type_trans(skb, netdev);
@@ -4376,7 +4376,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4376 struct e1000_ps_page_dma *ps_page_dma; 4376 struct e1000_ps_page_dma *ps_page_dma;
4377 struct sk_buff *skb; 4377 struct sk_buff *skb;
4378 unsigned int i, j; 4378 unsigned int i, j;
4379 uint32_t length, staterr; 4379 u32 length, staterr;
4380 int cleaned_count = 0; 4380 int cleaned_count = 0;
4381 bool cleaned = false; 4381 bool cleaned = false;
4382 unsigned int total_rx_bytes=0, total_rx_packets=0; 4382 unsigned int total_rx_bytes=0, total_rx_packets=0;
@@ -4759,8 +4759,8 @@ no_buffers:
4759static void 4759static void
4760e1000_smartspeed(struct e1000_adapter *adapter) 4760e1000_smartspeed(struct e1000_adapter *adapter)
4761{ 4761{
4762 uint16_t phy_status; 4762 u16 phy_status;
4763 uint16_t phy_ctrl; 4763 u16 phy_ctrl;
4764 4764
4765 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4765 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4766 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4766 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
@@ -4839,8 +4839,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4839 struct e1000_adapter *adapter = netdev_priv(netdev); 4839 struct e1000_adapter *adapter = netdev_priv(netdev);
4840 struct mii_ioctl_data *data = if_mii(ifr); 4840 struct mii_ioctl_data *data = if_mii(ifr);
4841 int retval; 4841 int retval;
4842 uint16_t mii_reg; 4842 u16 mii_reg;
4843 uint16_t spddplx; 4843 u16 spddplx;
4844 unsigned long flags; 4844 unsigned long flags;
4845 4845
4846 if (adapter->hw.media_type != e1000_media_type_copper) 4846 if (adapter->hw.media_type != e1000_media_type_copper)
@@ -4959,11 +4959,11 @@ e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4959 pcix_set_mmrbc(adapter->pdev, mmrbc); 4959 pcix_set_mmrbc(adapter->pdev, mmrbc);
4960} 4960}
4961 4961
4962int32_t 4962s32
4963e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) 4963e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
4964{ 4964{
4965 struct e1000_adapter *adapter = hw->back; 4965 struct e1000_adapter *adapter = hw->back;
4966 uint16_t cap_offset; 4966 u16 cap_offset;
4967 4967
4968 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 4968 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4969 if (!cap_offset) 4969 if (!cap_offset)
@@ -4975,7 +4975,7 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4975} 4975}
4976 4976
4977void 4977void
4978e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4978e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4979{ 4979{
4980 outl(value, port); 4980 outl(value, port);
4981} 4981}
@@ -4984,7 +4984,7 @@ static void
4984e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 4984e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4985{ 4985{
4986 struct e1000_adapter *adapter = netdev_priv(netdev); 4986 struct e1000_adapter *adapter = netdev_priv(netdev);
4987 uint32_t ctrl, rctl; 4987 u32 ctrl, rctl;
4988 4988
4989 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4989 if (!test_bit(__E1000_DOWN, &adapter->flags))
4990 e1000_irq_disable(adapter); 4990 e1000_irq_disable(adapter);
@@ -5016,7 +5016,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5016 rctl &= ~E1000_RCTL_VFE; 5016 rctl &= ~E1000_RCTL_VFE;
5017 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 5017 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
5018 if (adapter->mng_vlan_id != 5018 if (adapter->mng_vlan_id !=
5019 (uint16_t)E1000_MNG_VLAN_NONE) { 5019 (u16)E1000_MNG_VLAN_NONE) {
5020 e1000_vlan_rx_kill_vid(netdev, 5020 e1000_vlan_rx_kill_vid(netdev,
5021 adapter->mng_vlan_id); 5021 adapter->mng_vlan_id);
5022 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 5022 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -5029,10 +5029,10 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5029} 5029}
5030 5030
5031static void 5031static void
5032e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 5032e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5033{ 5033{
5034 struct e1000_adapter *adapter = netdev_priv(netdev); 5034 struct e1000_adapter *adapter = netdev_priv(netdev);
5035 uint32_t vfta, index; 5035 u32 vfta, index;
5036 5036
5037 if ((adapter->hw.mng_cookie.status & 5037 if ((adapter->hw.mng_cookie.status &
5038 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 5038 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
@@ -5046,10 +5046,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
5046} 5046}
5047 5047
5048static void 5048static void
5049e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 5049e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5050{ 5050{
5051 struct e1000_adapter *adapter = netdev_priv(netdev); 5051 struct e1000_adapter *adapter = netdev_priv(netdev);
5052 uint32_t vfta, index; 5052 u32 vfta, index;
5053 5053
5054 if (!test_bit(__E1000_DOWN, &adapter->flags)) 5054 if (!test_bit(__E1000_DOWN, &adapter->flags))
5055 e1000_irq_disable(adapter); 5055 e1000_irq_disable(adapter);
@@ -5078,7 +5078,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
5078 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 5078 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5079 5079
5080 if (adapter->vlgrp) { 5080 if (adapter->vlgrp) {
5081 uint16_t vid; 5081 u16 vid;
5082 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5082 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5083 if (!vlan_group_get_device(adapter->vlgrp, vid)) 5083 if (!vlan_group_get_device(adapter->vlgrp, vid))
5084 continue; 5084 continue;
@@ -5088,7 +5088,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
5088} 5088}
5089 5089
5090int 5090int
5091e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) 5091e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
5092{ 5092{
5093 adapter->hw.autoneg = 0; 5093 adapter->hw.autoneg = 0;
5094 5094
@@ -5129,8 +5129,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5129{ 5129{
5130 struct net_device *netdev = pci_get_drvdata(pdev); 5130 struct net_device *netdev = pci_get_drvdata(pdev);
5131 struct e1000_adapter *adapter = netdev_priv(netdev); 5131 struct e1000_adapter *adapter = netdev_priv(netdev);
5132 uint32_t ctrl, ctrl_ext, rctl, status; 5132 u32 ctrl, ctrl_ext, rctl, status;
5133 uint32_t wufc = adapter->wol; 5133 u32 wufc = adapter->wol;
5134#ifdef CONFIG_PM 5134#ifdef CONFIG_PM
5135 int retval = 0; 5135 int retval = 0;
5136#endif 5136#endif
@@ -5227,7 +5227,7 @@ e1000_resume(struct pci_dev *pdev)
5227{ 5227{
5228 struct net_device *netdev = pci_get_drvdata(pdev); 5228 struct net_device *netdev = pci_get_drvdata(pdev);
5229 struct e1000_adapter *adapter = netdev_priv(netdev); 5229 struct e1000_adapter *adapter = netdev_priv(netdev);
5230 uint32_t err; 5230 u32 err;
5231 5231
5232 pci_set_power_state(pdev, PCI_D0); 5232 pci_set_power_state(pdev, PCI_D0);
5233 pci_restore_state(pdev); 5233 pci_restore_state(pdev);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index f7e1619b974e..01c88664bad3 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -171,6 +171,10 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
171 * for setting word_size. 171 * for setting word_size.
172 */ 172 */
173 size += NVM_WORD_SIZE_BASE_SHIFT; 173 size += NVM_WORD_SIZE_BASE_SHIFT;
174
175 /* EEPROM access above 16k is unsupported */
176 if (size > 14)
177 size = 14;
174 nvm->word_size = 1 << size; 178 nvm->word_size = 1 << size;
175 break; 179 break;
176 } 180 }
@@ -244,7 +248,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
244 return 0; 248 return 0;
245} 249}
246 250
247static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter) 251static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
248{ 252{
249 struct e1000_hw *hw = &adapter->hw; 253 struct e1000_hw *hw = &adapter->hw;
250 static int global_quad_port_a; /* global port a indication */ 254 static int global_quad_port_a; /* global port a indication */
@@ -832,19 +836,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
832 ret_val = e1000_setup_link_82571(hw); 836 ret_val = e1000_setup_link_82571(hw);
833 837
834 /* Set the transmit descriptor write-back policy */ 838 /* Set the transmit descriptor write-back policy */
835 reg_data = er32(TXDCTL); 839 reg_data = er32(TXDCTL(0));
836 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 840 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
837 E1000_TXDCTL_FULL_TX_DESC_WB | 841 E1000_TXDCTL_FULL_TX_DESC_WB |
838 E1000_TXDCTL_COUNT_DESC; 842 E1000_TXDCTL_COUNT_DESC;
839 ew32(TXDCTL, reg_data); 843 ew32(TXDCTL(0), reg_data);
840 844
841 /* ...for both queues. */ 845 /* ...for both queues. */
842 if (mac->type != e1000_82573) { 846 if (mac->type != e1000_82573) {
843 reg_data = er32(TXDCTL1); 847 reg_data = er32(TXDCTL(1));
844 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 848 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
845 E1000_TXDCTL_FULL_TX_DESC_WB | 849 E1000_TXDCTL_FULL_TX_DESC_WB |
846 E1000_TXDCTL_COUNT_DESC; 850 E1000_TXDCTL_COUNT_DESC;
847 ew32(TXDCTL1, reg_data); 851 ew32(TXDCTL(1), reg_data);
848 } else { 852 } else {
849 e1000e_enable_tx_pkt_filtering(hw); 853 e1000e_enable_tx_pkt_filtering(hw);
850 reg_data = er32(GCR); 854 reg_data = er32(GCR);
@@ -874,17 +878,17 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
874 u32 reg; 878 u32 reg;
875 879
876 /* Transmit Descriptor Control 0 */ 880 /* Transmit Descriptor Control 0 */
877 reg = er32(TXDCTL); 881 reg = er32(TXDCTL(0));
878 reg |= (1 << 22); 882 reg |= (1 << 22);
879 ew32(TXDCTL, reg); 883 ew32(TXDCTL(0), reg);
880 884
881 /* Transmit Descriptor Control 1 */ 885 /* Transmit Descriptor Control 1 */
882 reg = er32(TXDCTL1); 886 reg = er32(TXDCTL(1));
883 reg |= (1 << 22); 887 reg |= (1 << 22);
884 ew32(TXDCTL1, reg); 888 ew32(TXDCTL(1), reg);
885 889
886 /* Transmit Arbitration Control 0 */ 890 /* Transmit Arbitration Control 0 */
887 reg = er32(TARC0); 891 reg = er32(TARC(0));
888 reg &= ~(0xF << 27); /* 30:27 */ 892 reg &= ~(0xF << 27); /* 30:27 */
889 switch (hw->mac.type) { 893 switch (hw->mac.type) {
890 case e1000_82571: 894 case e1000_82571:
@@ -894,10 +898,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
894 default: 898 default:
895 break; 899 break;
896 } 900 }
897 ew32(TARC0, reg); 901 ew32(TARC(0), reg);
898 902
899 /* Transmit Arbitration Control 1 */ 903 /* Transmit Arbitration Control 1 */
900 reg = er32(TARC1); 904 reg = er32(TARC(1));
901 switch (hw->mac.type) { 905 switch (hw->mac.type) {
902 case e1000_82571: 906 case e1000_82571:
903 case e1000_82572: 907 case e1000_82572:
@@ -907,7 +911,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
907 reg &= ~(1 << 28); 911 reg &= ~(1 << 28);
908 else 912 else
909 reg |= (1 << 28); 913 reg |= (1 << 28);
910 ew32(TARC1, reg); 914 ew32(TARC(1), reg);
911 break; 915 break;
912 default: 916 default:
913 break; 917 break;
@@ -1333,7 +1337,7 @@ struct e1000_info e1000_82571_info = {
1333 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1337 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1334 | FLAG_APME_CHECK_PORT_B, 1338 | FLAG_APME_CHECK_PORT_B,
1335 .pba = 38, 1339 .pba = 38,
1336 .get_invariants = e1000_get_invariants_82571, 1340 .get_variants = e1000_get_variants_82571,
1337 .mac_ops = &e82571_mac_ops, 1341 .mac_ops = &e82571_mac_ops,
1338 .phy_ops = &e82_phy_ops_igp, 1342 .phy_ops = &e82_phy_ops_igp,
1339 .nvm_ops = &e82571_nvm_ops, 1343 .nvm_ops = &e82571_nvm_ops,
@@ -1351,7 +1355,7 @@ struct e1000_info e1000_82572_info = {
1351 | FLAG_HAS_STATS_ICR_ICT 1355 | FLAG_HAS_STATS_ICR_ICT
1352 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1356 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1353 .pba = 38, 1357 .pba = 38,
1354 .get_invariants = e1000_get_invariants_82571, 1358 .get_variants = e1000_get_variants_82571,
1355 .mac_ops = &e82571_mac_ops, 1359 .mac_ops = &e82571_mac_ops,
1356 .phy_ops = &e82_phy_ops_igp, 1360 .phy_ops = &e82_phy_ops_igp,
1357 .nvm_ops = &e82571_nvm_ops, 1361 .nvm_ops = &e82571_nvm_ops,
@@ -1371,7 +1375,7 @@ struct e1000_info e1000_82573_info = {
1371 | FLAG_HAS_ERT 1375 | FLAG_HAS_ERT
1372 | FLAG_HAS_SWSM_ON_LOAD, 1376 | FLAG_HAS_SWSM_ON_LOAD,
1373 .pba = 20, 1377 .pba = 20,
1374 .get_invariants = e1000_get_invariants_82571, 1378 .get_variants = e1000_get_variants_82571,
1375 .mac_ops = &e82571_mac_ops, 1379 .mac_ops = &e82571_mac_ops,
1376 .phy_ops = &e82_phy_ops_m88, 1380 .phy_ops = &e82_phy_ops_m88,
1377 .nvm_ops = &e82571_nvm_ops, 1381 .nvm_ops = &e82571_nvm_ops,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index b941a6b509c4..5a89dff52264 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -271,7 +271,7 @@ struct e1000_info {
271 enum e1000_mac_type mac; 271 enum e1000_mac_type mac;
272 unsigned int flags; 272 unsigned int flags;
273 u32 pba; 273 u32 pba;
274 s32 (*get_invariants)(struct e1000_adapter *); 274 s32 (*get_variants)(struct e1000_adapter *);
275 struct e1000_mac_operations *mac_ops; 275 struct e1000_mac_operations *mac_ops;
276 struct e1000_phy_operations *phy_ops; 276 struct e1000_phy_operations *phy_ops;
277 struct e1000_nvm_operations *nvm_ops; 277 struct e1000_nvm_operations *nvm_ops;
@@ -357,7 +357,7 @@ extern struct e1000_info e1000_ich8_info;
357extern struct e1000_info e1000_ich9_info; 357extern struct e1000_info e1000_ich9_info;
358extern struct e1000_info e1000_es2_info; 358extern struct e1000_info e1000_es2_info;
359 359
360extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num); 360extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
361 361
362extern s32 e1000e_commit_phy(struct e1000_hw *hw); 362extern s32 e1000e_commit_phy(struct e1000_hw *hw);
363 363
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e3f4aeefeae2..d59a99ae44be 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -178,6 +178,10 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
178 * for setting word_size. 178 * for setting word_size.
179 */ 179 */
180 size += NVM_WORD_SIZE_BASE_SHIFT; 180 size += NVM_WORD_SIZE_BASE_SHIFT;
181
182 /* EEPROM access above 16k is unsupported */
183 if (size > 14)
184 size = 14;
181 nvm->word_size = 1 << size; 185 nvm->word_size = 1 << size;
182 186
183 return 0; 187 return 0;
@@ -234,7 +238,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
234 return 0; 238 return 0;
235} 239}
236 240
237static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter) 241static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
238{ 242{
239 struct e1000_hw *hw = &adapter->hw; 243 struct e1000_hw *hw = &adapter->hw;
240 s32 rc; 244 s32 rc;
@@ -788,16 +792,16 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
788 ret_val = e1000e_setup_link(hw); 792 ret_val = e1000e_setup_link(hw);
789 793
790 /* Set the transmit descriptor write-back policy */ 794 /* Set the transmit descriptor write-back policy */
791 reg_data = er32(TXDCTL); 795 reg_data = er32(TXDCTL(0));
792 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 796 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
793 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 797 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
794 ew32(TXDCTL, reg_data); 798 ew32(TXDCTL(0), reg_data);
795 799
796 /* ...for both queues. */ 800 /* ...for both queues. */
797 reg_data = er32(TXDCTL1); 801 reg_data = er32(TXDCTL(1));
798 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 802 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
799 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 803 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
800 ew32(TXDCTL1, reg_data); 804 ew32(TXDCTL(1), reg_data);
801 805
802 /* Enable retransmit on late collisions */ 806 /* Enable retransmit on late collisions */
803 reg_data = er32(TCTL); 807 reg_data = er32(TCTL);
@@ -842,29 +846,29 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
842 u32 reg; 846 u32 reg;
843 847
844 /* Transmit Descriptor Control 0 */ 848 /* Transmit Descriptor Control 0 */
845 reg = er32(TXDCTL); 849 reg = er32(TXDCTL(0));
846 reg |= (1 << 22); 850 reg |= (1 << 22);
847 ew32(TXDCTL, reg); 851 ew32(TXDCTL(0), reg);
848 852
849 /* Transmit Descriptor Control 1 */ 853 /* Transmit Descriptor Control 1 */
850 reg = er32(TXDCTL1); 854 reg = er32(TXDCTL(1));
851 reg |= (1 << 22); 855 reg |= (1 << 22);
852 ew32(TXDCTL1, reg); 856 ew32(TXDCTL(1), reg);
853 857
854 /* Transmit Arbitration Control 0 */ 858 /* Transmit Arbitration Control 0 */
855 reg = er32(TARC0); 859 reg = er32(TARC(0));
856 reg &= ~(0xF << 27); /* 30:27 */ 860 reg &= ~(0xF << 27); /* 30:27 */
857 if (hw->phy.media_type != e1000_media_type_copper) 861 if (hw->phy.media_type != e1000_media_type_copper)
858 reg &= ~(1 << 20); 862 reg &= ~(1 << 20);
859 ew32(TARC0, reg); 863 ew32(TARC(0), reg);
860 864
861 /* Transmit Arbitration Control 1 */ 865 /* Transmit Arbitration Control 1 */
862 reg = er32(TARC1); 866 reg = er32(TARC(1));
863 if (er32(TCTL) & E1000_TCTL_MULR) 867 if (er32(TCTL) & E1000_TCTL_MULR)
864 reg &= ~(1 << 28); 868 reg &= ~(1 << 28);
865 else 869 else
866 reg |= (1 << 28); 870 reg |= (1 << 28);
867 ew32(TARC1, reg); 871 ew32(TARC(1), reg);
868} 872}
869 873
870/** 874/**
@@ -1239,7 +1243,7 @@ struct e1000_info e1000_es2_info = {
1239 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 1243 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
1240 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, 1244 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
1241 .pba = 38, 1245 .pba = 38,
1242 .get_invariants = e1000_get_invariants_80003es2lan, 1246 .get_variants = e1000_get_variants_80003es2lan,
1243 .mac_ops = &es2_mac_ops, 1247 .mac_ops = &es2_mac_ops,
1244 .phy_ops = &es2_phy_ops, 1248 .phy_ops = &es2_phy_ops,
1245 .nvm_ops = &es2_nvm_ops, 1249 .nvm_ops = &es2_nvm_ops,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 4ae00567bba6..6d1b257bbda6 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -641,10 +641,17 @@ static int e1000_set_ringparam(struct net_device *netdev,
641 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 641 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
642 if (!tx_ring) 642 if (!tx_ring)
643 goto err_alloc_tx; 643 goto err_alloc_tx;
644 /*
645 * use a memcpy to save any previously configured
646 * items like napi structs from having to be
647 * reinitialized
648 */
649 memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
644 650
645 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 651 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
646 if (!rx_ring) 652 if (!rx_ring)
647 goto err_alloc_rx; 653 goto err_alloc_rx;
654 memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
648 655
649 adapter->tx_ring = tx_ring; 656 adapter->tx_ring = tx_ring;
650 adapter->rx_ring = rx_ring; 657 adapter->rx_ring = rx_ring;
@@ -700,61 +707,55 @@ err_setup:
700 return err; 707 return err;
701} 708}
702 709
703static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, 710static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
704 int reg, int offset, u32 mask, u32 write) 711 int reg, int offset, u32 mask, u32 write)
705{ 712{
706 int i; 713 u32 pat, val;
707 u32 read;
708 static const u32 test[] = 714 static const u32 test[] =
709 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 715 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
710 for (i = 0; i < ARRAY_SIZE(test); i++) { 716 for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
711 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, 717 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
712 (test[i] & write)); 718 (test[pat] & write));
713 read = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 719 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
714 if (read != (test[i] & write & mask)) { 720 if (val != (test[pat] & write & mask)) {
715 ndev_err(adapter->netdev, "pattern test reg %04X " 721 ndev_err(adapter->netdev, "pattern test reg %04X "
716 "failed: got 0x%08X expected 0x%08X\n", 722 "failed: got 0x%08X expected 0x%08X\n",
717 reg + offset, 723 reg + offset,
718 read, (test[i] & write & mask)); 724 val, (test[pat] & write & mask));
719 *data = reg; 725 *data = reg;
720 return true; 726 return 1;
721 } 727 }
722 } 728 }
723 return false; 729 return 0;
724} 730}
725 731
726static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, 732static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
727 int reg, u32 mask, u32 write) 733 int reg, u32 mask, u32 write)
728{ 734{
729 u32 read; 735 u32 val;
730 __ew32(&adapter->hw, reg, write & mask); 736 __ew32(&adapter->hw, reg, write & mask);
731 read = __er32(&adapter->hw, reg); 737 val = __er32(&adapter->hw, reg);
732 if ((write & mask) != (read & mask)) { 738 if ((write & mask) != (val & mask)) {
733 ndev_err(adapter->netdev, "set/check reg %04X test failed: " 739 ndev_err(adapter->netdev, "set/check reg %04X test failed: "
734 "got 0x%08X expected 0x%08X\n", reg, (read & mask), 740 "got 0x%08X expected 0x%08X\n", reg, (val & mask),
735 (write & mask)); 741 (write & mask));
736 *data = reg; 742 *data = reg;
737 return true; 743 return 1;
738 } 744 }
739 return false; 745 return 0;
740} 746}
741 747#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
742#define REG_PATTERN_TEST(R, M, W) \ 748 do { \
743 do { \ 749 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
744 if (reg_pattern_test_array(adapter, data, R, 0, M, W)) \ 750 return 1; \
745 return 1; \
746 } while (0) 751 } while (0)
752#define REG_PATTERN_TEST(reg, mask, write) \
753 REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
747 754
748#define REG_PATTERN_TEST_ARRAY(R, offset, M, W) \ 755#define REG_SET_AND_CHECK(reg, mask, write) \
749 do { \ 756 do { \
750 if (reg_pattern_test_array(adapter, data, R, offset, M, W)) \ 757 if (reg_set_and_check(adapter, data, reg, mask, write)) \
751 return 1; \ 758 return 1; \
752 } while (0)
753
754#define REG_SET_AND_CHECK(R, M, W) \
755 do { \
756 if (reg_set_and_check(adapter, data, R, M, W)) \
757 return 1; \
758 } while (0) 759 } while (0)
759 760
760static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) 761static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
@@ -1038,7 +1039,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1038 struct pci_dev *pdev = adapter->pdev; 1039 struct pci_dev *pdev = adapter->pdev;
1039 struct e1000_hw *hw = &adapter->hw; 1040 struct e1000_hw *hw = &adapter->hw;
1040 u32 rctl; 1041 u32 rctl;
1041 int size;
1042 int i; 1042 int i;
1043 int ret_val; 1043 int ret_val;
1044 1044
@@ -1047,13 +1047,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1047 if (!tx_ring->count) 1047 if (!tx_ring->count)
1048 tx_ring->count = E1000_DEFAULT_TXD; 1048 tx_ring->count = E1000_DEFAULT_TXD;
1049 1049
1050 size = tx_ring->count * sizeof(struct e1000_buffer); 1050 tx_ring->buffer_info = kcalloc(tx_ring->count,
1051 tx_ring->buffer_info = kmalloc(size, GFP_KERNEL); 1051 sizeof(struct e1000_buffer),
1052 if (!tx_ring->buffer_info) { 1052 GFP_KERNEL);
1053 if (!(tx_ring->buffer_info)) {
1053 ret_val = 1; 1054 ret_val = 1;
1054 goto err_nomem; 1055 goto err_nomem;
1055 } 1056 }
1056 memset(tx_ring->buffer_info, 0, size);
1057 1057
1058 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1058 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1059 tx_ring->size = ALIGN(tx_ring->size, 4096); 1059 tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -1063,21 +1063,17 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1063 ret_val = 2; 1063 ret_val = 2;
1064 goto err_nomem; 1064 goto err_nomem;
1065 } 1065 }
1066 memset(tx_ring->desc, 0, tx_ring->size);
1067 tx_ring->next_to_use = 0; 1066 tx_ring->next_to_use = 0;
1068 tx_ring->next_to_clean = 0; 1067 tx_ring->next_to_clean = 0;
1069 1068
1070 ew32(TDBAL, 1069 ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1071 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1072 ew32(TDBAH, ((u64) tx_ring->dma >> 32)); 1070 ew32(TDBAH, ((u64) tx_ring->dma >> 32));
1073 ew32(TDLEN, 1071 ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
1074 tx_ring->count * sizeof(struct e1000_tx_desc));
1075 ew32(TDH, 0); 1072 ew32(TDH, 0);
1076 ew32(TDT, 0); 1073 ew32(TDT, 0);
1077 ew32(TCTL, 1074 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1078 E1000_TCTL_PSP | E1000_TCTL_EN | 1075 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1079 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1076 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1080 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1081 1077
1082 for (i = 0; i < tx_ring->count; i++) { 1078 for (i = 0; i < tx_ring->count; i++) {
1083 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 1079 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1099,12 +1095,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1099 ret_val = 4; 1095 ret_val = 4;
1100 goto err_nomem; 1096 goto err_nomem;
1101 } 1097 }
1102 tx_desc->buffer_addr = cpu_to_le64( 1098 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1103 tx_ring->buffer_info[i].dma);
1104 tx_desc->lower.data = cpu_to_le32(skb->len); 1099 tx_desc->lower.data = cpu_to_le32(skb->len);
1105 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1100 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1106 E1000_TXD_CMD_IFCS | 1101 E1000_TXD_CMD_IFCS |
1107 E1000_TXD_CMD_RPS); 1102 E1000_TXD_CMD_RS);
1108 tx_desc->upper.data = 0; 1103 tx_desc->upper.data = 0;
1109 } 1104 }
1110 1105
@@ -1113,13 +1108,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1113 if (!rx_ring->count) 1108 if (!rx_ring->count)
1114 rx_ring->count = E1000_DEFAULT_RXD; 1109 rx_ring->count = E1000_DEFAULT_RXD;
1115 1110
1116 size = rx_ring->count * sizeof(struct e1000_buffer); 1111 rx_ring->buffer_info = kcalloc(rx_ring->count,
1117 rx_ring->buffer_info = kmalloc(size, GFP_KERNEL); 1112 sizeof(struct e1000_buffer),
1118 if (!rx_ring->buffer_info) { 1113 GFP_KERNEL);
1114 if (!(rx_ring->buffer_info)) {
1119 ret_val = 5; 1115 ret_val = 5;
1120 goto err_nomem; 1116 goto err_nomem;
1121 } 1117 }
1122 memset(rx_ring->buffer_info, 0, size);
1123 1118
1124 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1119 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
1125 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1120 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
@@ -1128,7 +1123,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1128 ret_val = 6; 1123 ret_val = 6;
1129 goto err_nomem; 1124 goto err_nomem;
1130 } 1125 }
1131 memset(rx_ring->desc, 0, rx_ring->size);
1132 rx_ring->next_to_use = 0; 1126 rx_ring->next_to_use = 0;
1133 rx_ring->next_to_clean = 0; 1127 rx_ring->next_to_clean = 0;
1134 1128
@@ -1140,6 +1134,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1140 ew32(RDH, 0); 1134 ew32(RDH, 0);
1141 ew32(RDT, 0); 1135 ew32(RDT, 0);
1142 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1136 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1137 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1138 E1000_RCTL_SBP | E1000_RCTL_SECRC |
1143 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1139 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1144 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1140 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1145 ew32(RCTL, rctl); 1141 ew32(RCTL, rctl);
@@ -1203,7 +1199,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1203 1199
1204 ctrl_reg = er32(CTRL); 1200 ctrl_reg = er32(CTRL);
1205 1201
1206 if (hw->phy.type == e1000_phy_ife) { 1202 switch (hw->phy.type) {
1203 case e1000_phy_ife:
1207 /* force 100, set loopback */ 1204 /* force 100, set loopback */
1208 e1e_wphy(hw, PHY_CONTROL, 0x6100); 1205 e1e_wphy(hw, PHY_CONTROL, 0x6100);
1209 1206
@@ -1213,9 +1210,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1213 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1210 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1214 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1211 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1215 E1000_CTRL_FD); /* Force Duplex to FULL */ 1212 E1000_CTRL_FD); /* Force Duplex to FULL */
1216 } else { 1213 break;
1214 default:
1217 /* force 1000, set loopback */ 1215 /* force 1000, set loopback */
1218 e1e_wphy(hw, PHY_CONTROL, 0x4140); 1216 e1e_wphy(hw, PHY_CONTROL, 0x4140);
1217 mdelay(250);
1219 1218
1220 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1219 /* Now set up the MAC to the same speed/duplex as the PHY. */
1221 ctrl_reg = er32(CTRL); 1220 ctrl_reg = er32(CTRL);
@@ -1224,6 +1223,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1224 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1223 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1225 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1224 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1226 E1000_CTRL_FD); /* Force Duplex to FULL */ 1225 E1000_CTRL_FD); /* Force Duplex to FULL */
1226
1227 if ((adapter->hw.mac.type == e1000_ich8lan) ||
1228 (adapter->hw.mac.type == e1000_ich9lan))
1229 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
1227 } 1230 }
1228 1231
1229 if (hw->phy.media_type == e1000_media_type_copper && 1232 if (hw->phy.media_type == e1000_media_type_copper &&
@@ -1325,7 +1328,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1325#define KMRNCTRLSTA_OPMODE (0x1F << 16) 1328#define KMRNCTRLSTA_OPMODE (0x1F << 16)
1326#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 1329#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
1327 ew32(KMRNCTRLSTA, 1330 ew32(KMRNCTRLSTA,
1328 (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); 1331 (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
1329 1332
1330 return 0; 1333 return 0;
1331} 1334}
@@ -1451,8 +1454,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1451 l = 0; 1454 l = 0;
1452 for (j = 0; j <= lc; j++) { /* loop count loop */ 1455 for (j = 0; j <= lc; j++) { /* loop count loop */
1453 for (i = 0; i < 64; i++) { /* send the packets */ 1456 for (i = 0; i < 64; i++) { /* send the packets */
1454 e1000_create_lbtest_frame( 1457 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1455 tx_ring->buffer_info[i].skb, 1024); 1458 1024);
1456 pci_dma_sync_single_for_device(pdev, 1459 pci_dma_sync_single_for_device(pdev,
1457 tx_ring->buffer_info[k].dma, 1460 tx_ring->buffer_info[k].dma,
1458 tx_ring->buffer_info[k].length, 1461 tx_ring->buffer_info[k].length,
@@ -1487,7 +1490,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1487 ret_val = 13; /* ret_val is the same as mis-compare */ 1490 ret_val = 13; /* ret_val is the same as mis-compare */
1488 break; 1491 break;
1489 } 1492 }
1490 if (jiffies >= (time + 2)) { 1493 if (jiffies >= (time + 20)) {
1491 ret_val = 14; /* error code for time out error */ 1494 ret_val = 14; /* error code for time out error */
1492 break; 1495 break;
1493 } 1496 }
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 0b4145a73229..53f1ac6327fa 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -93,6 +93,8 @@ enum e1e_registers {
93 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ 93 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
94 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ 94 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
95 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ 95 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
96 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
97#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
96 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ 98 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
97 99
98/* Convenience macros 100/* Convenience macros
@@ -111,11 +113,11 @@ enum e1e_registers {
111 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ 113 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */
112 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ 114 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */
113 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ 115 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
114 E1000_TXDCTL = 0x03828, /* Tx Descriptor Control - RW */ 116 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
117#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
115 E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ 118 E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
116 E1000_TARC0 = 0x03840, /* Tx Arbitration Count (0) */ 119 E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
117 E1000_TXDCTL1 = 0x03928, /* Tx Descriptor Control (1) - RW */ 120#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
118 E1000_TARC1 = 0x03940, /* Tx Arbitration Count (1) */
119 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ 121 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
120 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ 122 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
121 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ 123 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index e358a773e67a..768485dbb2c6 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -316,7 +316,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
316 return 0; 316 return 0;
317} 317}
318 318
319static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter) 319static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
320{ 320{
321 struct e1000_hw *hw = &adapter->hw; 321 struct e1000_hw *hw = &adapter->hw;
322 s32 rc; 322 s32 rc;
@@ -1753,18 +1753,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1753 ret_val = e1000_setup_link_ich8lan(hw); 1753 ret_val = e1000_setup_link_ich8lan(hw);
1754 1754
1755 /* Set the transmit descriptor write-back policy for both queues */ 1755 /* Set the transmit descriptor write-back policy for both queues */
1756 txdctl = er32(TXDCTL); 1756 txdctl = er32(TXDCTL(0));
1757 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 1757 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
1758 E1000_TXDCTL_FULL_TX_DESC_WB; 1758 E1000_TXDCTL_FULL_TX_DESC_WB;
1759 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 1759 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
1760 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 1760 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1761 ew32(TXDCTL, txdctl); 1761 ew32(TXDCTL(0), txdctl);
1762 txdctl = er32(TXDCTL1); 1762 txdctl = er32(TXDCTL(1));
1763 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 1763 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
1764 E1000_TXDCTL_FULL_TX_DESC_WB; 1764 E1000_TXDCTL_FULL_TX_DESC_WB;
1765 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 1765 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
1766 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 1766 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1767 ew32(TXDCTL1, txdctl); 1767 ew32(TXDCTL(1), txdctl);
1768 1768
1769 /* 1769 /*
1770 * ICH8 has opposite polarity of no_snoop bits. 1770 * ICH8 has opposite polarity of no_snoop bits.
@@ -1807,30 +1807,30 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
1807 ew32(CTRL_EXT, reg); 1807 ew32(CTRL_EXT, reg);
1808 1808
1809 /* Transmit Descriptor Control 0 */ 1809 /* Transmit Descriptor Control 0 */
1810 reg = er32(TXDCTL); 1810 reg = er32(TXDCTL(0));
1811 reg |= (1 << 22); 1811 reg |= (1 << 22);
1812 ew32(TXDCTL, reg); 1812 ew32(TXDCTL(0), reg);
1813 1813
1814 /* Transmit Descriptor Control 1 */ 1814 /* Transmit Descriptor Control 1 */
1815 reg = er32(TXDCTL1); 1815 reg = er32(TXDCTL(1));
1816 reg |= (1 << 22); 1816 reg |= (1 << 22);
1817 ew32(TXDCTL1, reg); 1817 ew32(TXDCTL(1), reg);
1818 1818
1819 /* Transmit Arbitration Control 0 */ 1819 /* Transmit Arbitration Control 0 */
1820 reg = er32(TARC0); 1820 reg = er32(TARC(0));
1821 if (hw->mac.type == e1000_ich8lan) 1821 if (hw->mac.type == e1000_ich8lan)
1822 reg |= (1 << 28) | (1 << 29); 1822 reg |= (1 << 28) | (1 << 29);
1823 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 1823 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
1824 ew32(TARC0, reg); 1824 ew32(TARC(0), reg);
1825 1825
1826 /* Transmit Arbitration Control 1 */ 1826 /* Transmit Arbitration Control 1 */
1827 reg = er32(TARC1); 1827 reg = er32(TARC(1));
1828 if (er32(TCTL) & E1000_TCTL_MULR) 1828 if (er32(TCTL) & E1000_TCTL_MULR)
1829 reg &= ~(1 << 28); 1829 reg &= ~(1 << 28);
1830 else 1830 else
1831 reg |= (1 << 28); 1831 reg |= (1 << 28);
1832 reg |= (1 << 24) | (1 << 26) | (1 << 30); 1832 reg |= (1 << 24) | (1 << 26) | (1 << 30);
1833 ew32(TARC1, reg); 1833 ew32(TARC(1), reg);
1834 1834
1835 /* Device Status */ 1835 /* Device Status */
1836 if (hw->mac.type == e1000_ich8lan) { 1836 if (hw->mac.type == e1000_ich8lan) {
@@ -2253,7 +2253,7 @@ struct e1000_info e1000_ich8_info = {
2253 | FLAG_HAS_FLASH 2253 | FLAG_HAS_FLASH
2254 | FLAG_APME_IN_WUC, 2254 | FLAG_APME_IN_WUC,
2255 .pba = 8, 2255 .pba = 8,
2256 .get_invariants = e1000_get_invariants_ich8lan, 2256 .get_variants = e1000_get_variants_ich8lan,
2257 .mac_ops = &ich8_mac_ops, 2257 .mac_ops = &ich8_mac_ops,
2258 .phy_ops = &ich8_phy_ops, 2258 .phy_ops = &ich8_phy_ops,
2259 .nvm_ops = &ich8_nvm_ops, 2259 .nvm_ops = &ich8_nvm_ops,
@@ -2270,7 +2270,7 @@ struct e1000_info e1000_ich9_info = {
2270 | FLAG_HAS_FLASH 2270 | FLAG_HAS_FLASH
2271 | FLAG_APME_IN_WUC, 2271 | FLAG_APME_IN_WUC,
2272 .pba = 10, 2272 .pba = 10,
2273 .get_invariants = e1000_get_invariants_ich8lan, 2273 .get_variants = e1000_get_variants_ich8lan,
2274 .mac_ops = &ich8_mac_ops, 2274 .mac_ops = &ich8_mac_ops,
2275 .phy_ops = &ich8_phy_ops, 2275 .phy_ops = &ich8_phy_ops,
2276 .nvm_ops = &ich8_nvm_ops, 2276 .nvm_ops = &ich8_nvm_ops,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index ea3ff6369c86..f1f4e9dfd0a0 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -2477,7 +2477,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2477 return ret_val; 2477 return ret_val;
2478} 2478}
2479 2479
2480s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num) 2480s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2481{ 2481{
2482 s32 ret_val; 2482 s32 ret_val;
2483 u16 nvm_data; 2483 u16 nvm_data;
@@ -2487,14 +2487,14 @@ s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
2487 hw_dbg(hw, "NVM Read Error\n"); 2487 hw_dbg(hw, "NVM Read Error\n");
2488 return ret_val; 2488 return ret_val;
2489 } 2489 }
2490 *part_num = (u32)(nvm_data << 16); 2490 *pba_num = (u32)(nvm_data << 16);
2491 2491
2492 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 2492 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2493 if (ret_val) { 2493 if (ret_val) {
2494 hw_dbg(hw, "NVM Read Error\n"); 2494 hw_dbg(hw, "NVM Read Error\n");
2495 return ret_val; 2495 return ret_val;
2496 } 2496 }
2497 *part_num |= nvm_data; 2497 *pba_num |= nvm_data;
2498 2498
2499 return 0; 2499 return 0;
2500} 2500}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d70bde03619e..c8dc47fd132a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1639,24 +1639,24 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1639 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1639 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1640 1640
1641 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 1641 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
1642 tarc = er32(TARC0); 1642 tarc = er32(TARC(0));
1643 /* 1643 /*
1644 * set the speed mode bit, we'll clear it if we're not at 1644 * set the speed mode bit, we'll clear it if we're not at
1645 * gigabit link later 1645 * gigabit link later
1646 */ 1646 */
1647#define SPEED_MODE_BIT (1 << 21) 1647#define SPEED_MODE_BIT (1 << 21)
1648 tarc |= SPEED_MODE_BIT; 1648 tarc |= SPEED_MODE_BIT;
1649 ew32(TARC0, tarc); 1649 ew32(TARC(0), tarc);
1650 } 1650 }
1651 1651
1652 /* errata: program both queues to unweighted RR */ 1652 /* errata: program both queues to unweighted RR */
1653 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 1653 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
1654 tarc = er32(TARC0); 1654 tarc = er32(TARC(0));
1655 tarc |= 1; 1655 tarc |= 1;
1656 ew32(TARC0, tarc); 1656 ew32(TARC(0), tarc);
1657 tarc = er32(TARC1); 1657 tarc = er32(TARC(1));
1658 tarc |= 1; 1658 tarc |= 1;
1659 ew32(TARC1, tarc); 1659 ew32(TARC(1), tarc);
1660 } 1660 }
1661 1661
1662 e1000e_config_collision_dist(hw); 1662 e1000e_config_collision_dist(hw);
@@ -2775,9 +2775,9 @@ static void e1000_watchdog_task(struct work_struct *work)
2775 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 2775 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
2776 !txb2b) { 2776 !txb2b) {
2777 u32 tarc0; 2777 u32 tarc0;
2778 tarc0 = er32(TARC0); 2778 tarc0 = er32(TARC(0));
2779 tarc0 &= ~SPEED_MODE_BIT; 2779 tarc0 &= ~SPEED_MODE_BIT;
2780 ew32(TARC0, tarc0); 2780 ew32(TARC(0), tarc0);
2781 } 2781 }
2782 2782
2783 /* 2783 /*
@@ -3824,7 +3824,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
3824{ 3824{
3825 struct e1000_hw *hw = &adapter->hw; 3825 struct e1000_hw *hw = &adapter->hw;
3826 struct net_device *netdev = adapter->netdev; 3826 struct net_device *netdev = adapter->netdev;
3827 u32 part_num; 3827 u32 pba_num;
3828 3828
3829 /* print bus type/speed/width info */ 3829 /* print bus type/speed/width info */
3830 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " 3830 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
@@ -3839,10 +3839,10 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
3839 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", 3839 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
3840 (hw->phy.type == e1000_phy_ife) 3840 (hw->phy.type == e1000_phy_ife)
3841 ? "10/100" : "1000"); 3841 ? "10/100" : "1000");
3842 e1000e_read_part_num(hw, &part_num); 3842 e1000e_read_pba_num(hw, &pba_num);
3843 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 3843 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3844 hw->mac.type, hw->phy.type, 3844 hw->mac.type, hw->phy.type,
3845 (part_num >> 8), (part_num & 0xff)); 3845 (pba_num >> 8), (pba_num & 0xff));
3846} 3846}
3847 3847
3848/** 3848/**
@@ -3974,7 +3974,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3974 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 3974 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3975 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 3975 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3976 3976
3977 err = ei->get_invariants(adapter); 3977 err = ei->get_variants(adapter);
3978 if (err) 3978 if (err)
3979 goto err_hw_init; 3979 goto err_hw_init;
3980 3980
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 58b71e60204e..43b5f30743c2 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -198,7 +198,7 @@ static int mpc52xx_fec_init_phy(struct net_device *dev)
198 struct phy_device *phydev; 198 struct phy_device *phydev;
199 char phy_id[BUS_ID_SIZE]; 199 char phy_id[BUS_ID_SIZE];
200 200
201 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, 201 snprintf(phy_id, BUS_ID_SIZE, "%x:%02x",
202 (unsigned int)dev->base_addr, priv->phy_addr); 202 (unsigned int)dev->base_addr, priv->phy_addr);
203 203
204 priv->link = PHY_DOWN; 204 priv->link = PHY_DOWN;
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 6a3ac4ea97e9..956836fc5ec0 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -124,7 +124,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
124 goto out_free; 124 goto out_free;
125 } 125 }
126 126
127 bus->id = res.start; 127 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
128 bus->priv = priv; 128 bus->priv = priv;
129 129
130 bus->dev = dev; 130 bus->dev = dev;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3338b115fa66..8c4214b0ee1f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -29,90 +29,6 @@
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 * 31 *
32 * Changelog:
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
41 * irq mask updated
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
56 * open.
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
60 * the tx length.
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
68 * on close.
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
82 * capabilities.
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
90 * per-packet flags.
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
94 * of nv_remove
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
115 *
116 * Known bugs: 32 * Known bugs:
117 * We suspect that on some hardware no TX done interrupts are generated. 33 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer 34 * This means recovery from netif_stop_queue only happens if the hw timer
@@ -123,11 +39,6 @@
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic. 40 * superfluous timer interrupts from the nic.
125 */ 41 */
126#ifdef CONFIG_FORCEDETH_NAPI
127#define DRIVERNAPI "-NAPI"
128#else
129#define DRIVERNAPI
130#endif
131#define FORCEDETH_VERSION "0.61" 42#define FORCEDETH_VERSION "0.61"
132#define DRV_NAME "forcedeth" 43#define DRV_NAME "forcedeth"
133 44
@@ -930,6 +841,13 @@ static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
930 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 841 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
931} 842}
932 843
844static bool nv_optimized(struct fe_priv *np)
845{
846 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
847 return false;
848 return true;
849}
850
933static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 851static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
934 int delay, int delaymax, const char *msg) 852 int delay, int delaymax, const char *msg)
935{ 853{
@@ -966,7 +884,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
966 struct fe_priv *np = get_nvpriv(dev); 884 struct fe_priv *np = get_nvpriv(dev);
967 u8 __iomem *base = get_hwbase(dev); 885 u8 __iomem *base = get_hwbase(dev);
968 886
969 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 887 if (!nv_optimized(np)) {
970 if (rxtx_flags & NV_SETUP_RX_RING) { 888 if (rxtx_flags & NV_SETUP_RX_RING) {
971 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 889 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
972 } 890 }
@@ -989,7 +907,7 @@ static void free_rings(struct net_device *dev)
989{ 907{
990 struct fe_priv *np = get_nvpriv(dev); 908 struct fe_priv *np = get_nvpriv(dev);
991 909
992 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 910 if (!nv_optimized(np)) {
993 if (np->rx_ring.orig) 911 if (np->rx_ring.orig)
994 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 912 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
995 np->rx_ring.orig, np->ring_addr); 913 np->rx_ring.orig, np->ring_addr);
@@ -1435,6 +1353,18 @@ static void nv_stop_tx(struct net_device *dev)
1435 base + NvRegTransmitPoll); 1353 base + NvRegTransmitPoll);
1436} 1354}
1437 1355
1356static void nv_start_rxtx(struct net_device *dev)
1357{
1358 nv_start_rx(dev);
1359 nv_start_tx(dev);
1360}
1361
1362static void nv_stop_rxtx(struct net_device *dev)
1363{
1364 nv_stop_rx(dev);
1365 nv_stop_tx(dev);
1366}
1367
1438static void nv_txrx_reset(struct net_device *dev) 1368static void nv_txrx_reset(struct net_device *dev)
1439{ 1369{
1440 struct fe_priv *np = netdev_priv(dev); 1370 struct fe_priv *np = netdev_priv(dev);
@@ -1657,7 +1587,7 @@ static void nv_do_rx_refill(unsigned long data)
1657 } else { 1587 } else {
1658 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1588 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1659 } 1589 }
1660 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1590 if (!nv_optimized(np))
1661 retcode = nv_alloc_rx(dev); 1591 retcode = nv_alloc_rx(dev);
1662 else 1592 else
1663 retcode = nv_alloc_rx_optimized(dev); 1593 retcode = nv_alloc_rx_optimized(dev);
@@ -1682,8 +1612,10 @@ static void nv_init_rx(struct net_device *dev)
1682{ 1612{
1683 struct fe_priv *np = netdev_priv(dev); 1613 struct fe_priv *np = netdev_priv(dev);
1684 int i; 1614 int i;
1615
1685 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1616 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1686 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1617
1618 if (!nv_optimized(np))
1687 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1619 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1688 else 1620 else
1689 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1621 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
@@ -1691,7 +1623,7 @@ static void nv_init_rx(struct net_device *dev)
1691 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1623 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1692 1624
1693 for (i = 0; i < np->rx_ring_size; i++) { 1625 for (i = 0; i < np->rx_ring_size; i++) {
1694 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1626 if (!nv_optimized(np)) {
1695 np->rx_ring.orig[i].flaglen = 0; 1627 np->rx_ring.orig[i].flaglen = 0;
1696 np->rx_ring.orig[i].buf = 0; 1628 np->rx_ring.orig[i].buf = 0;
1697 } else { 1629 } else {
@@ -1709,8 +1641,10 @@ static void nv_init_tx(struct net_device *dev)
1709{ 1641{
1710 struct fe_priv *np = netdev_priv(dev); 1642 struct fe_priv *np = netdev_priv(dev);
1711 int i; 1643 int i;
1644
1712 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1645 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1713 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1646
1647 if (!nv_optimized(np))
1714 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1648 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1715 else 1649 else
1716 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1650 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
@@ -1721,7 +1655,7 @@ static void nv_init_tx(struct net_device *dev)
1721 np->tx_end_flip = NULL; 1655 np->tx_end_flip = NULL;
1722 1656
1723 for (i = 0; i < np->tx_ring_size; i++) { 1657 for (i = 0; i < np->tx_ring_size; i++) {
1724 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1658 if (!nv_optimized(np)) {
1725 np->tx_ring.orig[i].flaglen = 0; 1659 np->tx_ring.orig[i].flaglen = 0;
1726 np->tx_ring.orig[i].buf = 0; 1660 np->tx_ring.orig[i].buf = 0;
1727 } else { 1661 } else {
@@ -1744,7 +1678,8 @@ static int nv_init_ring(struct net_device *dev)
1744 1678
1745 nv_init_tx(dev); 1679 nv_init_tx(dev);
1746 nv_init_rx(dev); 1680 nv_init_rx(dev);
1747 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1681
1682 if (!nv_optimized(np))
1748 return nv_alloc_rx(dev); 1683 return nv_alloc_rx(dev);
1749 else 1684 else
1750 return nv_alloc_rx_optimized(dev); 1685 return nv_alloc_rx_optimized(dev);
@@ -1775,7 +1710,7 @@ static void nv_drain_tx(struct net_device *dev)
1775 unsigned int i; 1710 unsigned int i;
1776 1711
1777 for (i = 0; i < np->tx_ring_size; i++) { 1712 for (i = 0; i < np->tx_ring_size; i++) {
1778 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1713 if (!nv_optimized(np)) {
1779 np->tx_ring.orig[i].flaglen = 0; 1714 np->tx_ring.orig[i].flaglen = 0;
1780 np->tx_ring.orig[i].buf = 0; 1715 np->tx_ring.orig[i].buf = 0;
1781 } else { 1716 } else {
@@ -1802,7 +1737,7 @@ static void nv_drain_rx(struct net_device *dev)
1802 int i; 1737 int i;
1803 1738
1804 for (i = 0; i < np->rx_ring_size; i++) { 1739 for (i = 0; i < np->rx_ring_size; i++) {
1805 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1740 if (!nv_optimized(np)) {
1806 np->rx_ring.orig[i].flaglen = 0; 1741 np->rx_ring.orig[i].flaglen = 0;
1807 np->rx_ring.orig[i].buf = 0; 1742 np->rx_ring.orig[i].buf = 0;
1808 } else { 1743 } else {
@@ -1823,7 +1758,7 @@ static void nv_drain_rx(struct net_device *dev)
1823 } 1758 }
1824} 1759}
1825 1760
1826static void drain_ring(struct net_device *dev) 1761static void nv_drain_rxtx(struct net_device *dev)
1827{ 1762{
1828 nv_drain_tx(dev); 1763 nv_drain_tx(dev);
1829 nv_drain_rx(dev); 1764 nv_drain_rx(dev);
@@ -2260,7 +2195,7 @@ static void nv_tx_timeout(struct net_device *dev)
2260 } 2195 }
2261 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2196 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2262 for (i=0;i<np->tx_ring_size;i+= 4) { 2197 for (i=0;i<np->tx_ring_size;i+= 4) {
2263 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2198 if (!nv_optimized(np)) {
2264 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2199 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2265 i, 2200 i,
2266 le32_to_cpu(np->tx_ring.orig[i].buf), 2201 le32_to_cpu(np->tx_ring.orig[i].buf),
@@ -2296,7 +2231,7 @@ static void nv_tx_timeout(struct net_device *dev)
2296 nv_stop_tx(dev); 2231 nv_stop_tx(dev);
2297 2232
2298 /* 2) check that the packets were not sent already: */ 2233 /* 2) check that the packets were not sent already: */
2299 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 2234 if (!nv_optimized(np))
2300 nv_tx_done(dev); 2235 nv_tx_done(dev);
2301 else 2236 else
2302 nv_tx_done_optimized(dev, np->tx_ring_size); 2237 nv_tx_done_optimized(dev, np->tx_ring_size);
@@ -2663,12 +2598,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2663 netif_tx_lock_bh(dev); 2598 netif_tx_lock_bh(dev);
2664 spin_lock(&np->lock); 2599 spin_lock(&np->lock);
2665 /* stop engines */ 2600 /* stop engines */
2666 nv_stop_rx(dev); 2601 nv_stop_rxtx(dev);
2667 nv_stop_tx(dev);
2668 nv_txrx_reset(dev); 2602 nv_txrx_reset(dev);
2669 /* drain rx queue */ 2603 /* drain rx queue */
2670 nv_drain_rx(dev); 2604 nv_drain_rxtx(dev);
2671 nv_drain_tx(dev);
2672 /* reinit driver view of the rx queue */ 2605 /* reinit driver view of the rx queue */
2673 set_bufsize(dev); 2606 set_bufsize(dev);
2674 if (nv_init_ring(dev)) { 2607 if (nv_init_ring(dev)) {
@@ -2685,8 +2618,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2685 pci_push(base); 2618 pci_push(base);
2686 2619
2687 /* restart rx engine */ 2620 /* restart rx engine */
2688 nv_start_rx(dev); 2621 nv_start_rxtx(dev);
2689 nv_start_tx(dev);
2690 spin_unlock(&np->lock); 2622 spin_unlock(&np->lock);
2691 netif_tx_unlock_bh(dev); 2623 netif_tx_unlock_bh(dev);
2692 nv_enable_irq(dev); 2624 nv_enable_irq(dev);
@@ -3393,7 +3325,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3393 unsigned long flags; 3325 unsigned long flags;
3394 int pkts, retcode; 3326 int pkts, retcode;
3395 3327
3396 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3328 if (!nv_optimized(np)) {
3397 pkts = nv_rx_process(dev, budget); 3329 pkts = nv_rx_process(dev, budget);
3398 retcode = nv_alloc_rx(dev); 3330 retcode = nv_alloc_rx(dev);
3399 } else { 3331 } else {
@@ -3634,7 +3566,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3634 if (intr_test) { 3566 if (intr_test) {
3635 handler = nv_nic_irq_test; 3567 handler = nv_nic_irq_test;
3636 } else { 3568 } else {
3637 if (np->desc_ver == DESC_VER_3) 3569 if (nv_optimized(np))
3638 handler = nv_nic_irq_optimized; 3570 handler = nv_nic_irq_optimized;
3639 else 3571 else
3640 handler = nv_nic_irq; 3572 handler = nv_nic_irq;
@@ -3787,12 +3719,10 @@ static void nv_do_nic_poll(unsigned long data)
3787 netif_tx_lock_bh(dev); 3719 netif_tx_lock_bh(dev);
3788 spin_lock(&np->lock); 3720 spin_lock(&np->lock);
3789 /* stop engines */ 3721 /* stop engines */
3790 nv_stop_rx(dev); 3722 nv_stop_rxtx(dev);
3791 nv_stop_tx(dev);
3792 nv_txrx_reset(dev); 3723 nv_txrx_reset(dev);
3793 /* drain rx queue */ 3724 /* drain rx queue */
3794 nv_drain_rx(dev); 3725 nv_drain_rxtx(dev);
3795 nv_drain_tx(dev);
3796 /* reinit driver view of the rx queue */ 3726 /* reinit driver view of the rx queue */
3797 set_bufsize(dev); 3727 set_bufsize(dev);
3798 if (nv_init_ring(dev)) { 3728 if (nv_init_ring(dev)) {
@@ -3809,8 +3739,7 @@ static void nv_do_nic_poll(unsigned long data)
3809 pci_push(base); 3739 pci_push(base);
3810 3740
3811 /* restart rx engine */ 3741 /* restart rx engine */
3812 nv_start_rx(dev); 3742 nv_start_rxtx(dev);
3813 nv_start_tx(dev);
3814 spin_unlock(&np->lock); 3743 spin_unlock(&np->lock);
3815 netif_tx_unlock_bh(dev); 3744 netif_tx_unlock_bh(dev);
3816 } 3745 }
@@ -3821,7 +3750,7 @@ static void nv_do_nic_poll(unsigned long data)
3821 pci_push(base); 3750 pci_push(base);
3822 3751
3823 if (!using_multi_irqs(dev)) { 3752 if (!using_multi_irqs(dev)) {
3824 if (np->desc_ver == DESC_VER_3) 3753 if (nv_optimized(np))
3825 nv_nic_irq_optimized(0, dev); 3754 nv_nic_irq_optimized(0, dev);
3826 else 3755 else
3827 nv_nic_irq(0, dev); 3756 nv_nic_irq(0, dev);
@@ -4019,8 +3948,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4019 netif_tx_lock_bh(dev); 3948 netif_tx_lock_bh(dev);
4020 spin_lock(&np->lock); 3949 spin_lock(&np->lock);
4021 /* stop engines */ 3950 /* stop engines */
4022 nv_stop_rx(dev); 3951 nv_stop_rxtx(dev);
4023 nv_stop_tx(dev);
4024 spin_unlock(&np->lock); 3952 spin_unlock(&np->lock);
4025 netif_tx_unlock_bh(dev); 3953 netif_tx_unlock_bh(dev);
4026 } 3954 }
@@ -4126,8 +4054,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4126 } 4054 }
4127 4055
4128 if (netif_running(dev)) { 4056 if (netif_running(dev)) {
4129 nv_start_rx(dev); 4057 nv_start_rxtx(dev);
4130 nv_start_tx(dev);
4131 nv_enable_irq(dev); 4058 nv_enable_irq(dev);
4132 } 4059 }
4133 4060
@@ -4170,8 +4097,7 @@ static int nv_nway_reset(struct net_device *dev)
4170 netif_tx_lock_bh(dev); 4097 netif_tx_lock_bh(dev);
4171 spin_lock(&np->lock); 4098 spin_lock(&np->lock);
4172 /* stop engines */ 4099 /* stop engines */
4173 nv_stop_rx(dev); 4100 nv_stop_rxtx(dev);
4174 nv_stop_tx(dev);
4175 spin_unlock(&np->lock); 4101 spin_unlock(&np->lock);
4176 netif_tx_unlock_bh(dev); 4102 netif_tx_unlock_bh(dev);
4177 printk(KERN_INFO "%s: link down.\n", dev->name); 4103 printk(KERN_INFO "%s: link down.\n", dev->name);
@@ -4191,8 +4117,7 @@ static int nv_nway_reset(struct net_device *dev)
4191 } 4117 }
4192 4118
4193 if (netif_running(dev)) { 4119 if (netif_running(dev)) {
4194 nv_start_rx(dev); 4120 nv_start_rxtx(dev);
4195 nv_start_tx(dev);
4196 nv_enable_irq(dev); 4121 nv_enable_irq(dev);
4197 } 4122 }
4198 ret = 0; 4123 ret = 0;
@@ -4249,7 +4174,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4249 } 4174 }
4250 4175
4251 /* allocate new rings */ 4176 /* allocate new rings */
4252 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4177 if (!nv_optimized(np)) {
4253 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4178 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4254 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4179 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4255 &ring_addr); 4180 &ring_addr);
@@ -4262,7 +4187,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4262 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4187 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4263 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4188 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4264 /* fall back to old rings */ 4189 /* fall back to old rings */
4265 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4190 if (!nv_optimized(np)) {
4266 if (rxtx_ring) 4191 if (rxtx_ring)
4267 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4192 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4268 rxtx_ring, ring_addr); 4193 rxtx_ring, ring_addr);
@@ -4283,12 +4208,10 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4283 netif_tx_lock_bh(dev); 4208 netif_tx_lock_bh(dev);
4284 spin_lock(&np->lock); 4209 spin_lock(&np->lock);
4285 /* stop engines */ 4210 /* stop engines */
4286 nv_stop_rx(dev); 4211 nv_stop_rxtx(dev);
4287 nv_stop_tx(dev);
4288 nv_txrx_reset(dev); 4212 nv_txrx_reset(dev);
4289 /* drain queues */ 4213 /* drain queues */
4290 nv_drain_rx(dev); 4214 nv_drain_rxtx(dev);
4291 nv_drain_tx(dev);
4292 /* delete queues */ 4215 /* delete queues */
4293 free_rings(dev); 4216 free_rings(dev);
4294 } 4217 }
@@ -4296,7 +4219,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4296 /* set new values */ 4219 /* set new values */
4297 np->rx_ring_size = ring->rx_pending; 4220 np->rx_ring_size = ring->rx_pending;
4298 np->tx_ring_size = ring->tx_pending; 4221 np->tx_ring_size = ring->tx_pending;
4299 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4222
4223 if (!nv_optimized(np)) {
4300 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4224 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4301 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4225 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4302 } else { 4226 } else {
@@ -4328,8 +4252,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4328 pci_push(base); 4252 pci_push(base);
4329 4253
4330 /* restart engines */ 4254 /* restart engines */
4331 nv_start_rx(dev); 4255 nv_start_rxtx(dev);
4332 nv_start_tx(dev);
4333 spin_unlock(&np->lock); 4256 spin_unlock(&np->lock);
4334 netif_tx_unlock_bh(dev); 4257 netif_tx_unlock_bh(dev);
4335 nv_enable_irq(dev); 4258 nv_enable_irq(dev);
@@ -4370,8 +4293,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4370 netif_tx_lock_bh(dev); 4293 netif_tx_lock_bh(dev);
4371 spin_lock(&np->lock); 4294 spin_lock(&np->lock);
4372 /* stop engines */ 4295 /* stop engines */
4373 nv_stop_rx(dev); 4296 nv_stop_rxtx(dev);
4374 nv_stop_tx(dev);
4375 spin_unlock(&np->lock); 4297 spin_unlock(&np->lock);
4376 netif_tx_unlock_bh(dev); 4298 netif_tx_unlock_bh(dev);
4377 } 4299 }
@@ -4412,8 +4334,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4412 } 4334 }
4413 4335
4414 if (netif_running(dev)) { 4336 if (netif_running(dev)) {
4415 nv_start_rx(dev); 4337 nv_start_rxtx(dev);
4416 nv_start_tx(dev);
4417 nv_enable_irq(dev); 4338 nv_enable_irq(dev);
4418 } 4339 }
4419 return 0; 4340 return 0;
@@ -4649,8 +4570,7 @@ static int nv_loopback_test(struct net_device *dev)
4649 pci_push(base); 4570 pci_push(base);
4650 4571
4651 /* restart rx engine */ 4572 /* restart rx engine */
4652 nv_start_rx(dev); 4573 nv_start_rxtx(dev);
4653 nv_start_tx(dev);
4654 4574
4655 /* setup packet for tx */ 4575 /* setup packet for tx */
4656 pkt_len = ETH_DATA_LEN; 4576 pkt_len = ETH_DATA_LEN;
@@ -4668,7 +4588,7 @@ static int nv_loopback_test(struct net_device *dev)
4668 for (i = 0; i < pkt_len; i++) 4588 for (i = 0; i < pkt_len; i++)
4669 pkt_data[i] = (u8)(i & 0xff); 4589 pkt_data[i] = (u8)(i & 0xff);
4670 4590
4671 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4591 if (!nv_optimized(np)) {
4672 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4592 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4673 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4593 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4674 } else { 4594 } else {
@@ -4682,7 +4602,7 @@ static int nv_loopback_test(struct net_device *dev)
4682 msleep(500); 4602 msleep(500);
4683 4603
4684 /* check for rx of the packet */ 4604 /* check for rx of the packet */
4685 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4605 if (!nv_optimized(np)) {
4686 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4606 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4687 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4607 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4688 4608
@@ -4728,12 +4648,10 @@ static int nv_loopback_test(struct net_device *dev)
4728 dev_kfree_skb_any(tx_skb); 4648 dev_kfree_skb_any(tx_skb);
4729 out: 4649 out:
4730 /* stop engines */ 4650 /* stop engines */
4731 nv_stop_rx(dev); 4651 nv_stop_rxtx(dev);
4732 nv_stop_tx(dev);
4733 nv_txrx_reset(dev); 4652 nv_txrx_reset(dev);
4734 /* drain rx queue */ 4653 /* drain rx queue */
4735 nv_drain_rx(dev); 4654 nv_drain_rxtx(dev);
4736 nv_drain_tx(dev);
4737 4655
4738 if (netif_running(dev)) { 4656 if (netif_running(dev)) {
4739 writel(misc1_flags, base + NvRegMisc1); 4657 writel(misc1_flags, base + NvRegMisc1);
@@ -4771,12 +4689,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4771 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4689 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4772 } 4690 }
4773 /* stop engines */ 4691 /* stop engines */
4774 nv_stop_rx(dev); 4692 nv_stop_rxtx(dev);
4775 nv_stop_tx(dev);
4776 nv_txrx_reset(dev); 4693 nv_txrx_reset(dev);
4777 /* drain rx queue */ 4694 /* drain rx queue */
4778 nv_drain_rx(dev); 4695 nv_drain_rxtx(dev);
4779 nv_drain_tx(dev);
4780 spin_unlock_irq(&np->lock); 4696 spin_unlock_irq(&np->lock);
4781 netif_tx_unlock_bh(dev); 4697 netif_tx_unlock_bh(dev);
4782 } 4698 }
@@ -4817,8 +4733,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4817 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4733 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4818 pci_push(base); 4734 pci_push(base);
4819 /* restart rx engine */ 4735 /* restart rx engine */
4820 nv_start_rx(dev); 4736 nv_start_rxtx(dev);
4821 nv_start_tx(dev);
4822 netif_start_queue(dev); 4737 netif_start_queue(dev);
4823#ifdef CONFIG_FORCEDETH_NAPI 4738#ifdef CONFIG_FORCEDETH_NAPI
4824 napi_enable(&np->napi); 4739 napi_enable(&np->napi);
@@ -5047,8 +4962,7 @@ static int nv_open(struct net_device *dev)
5047 * to init hw */ 4962 * to init hw */
5048 np->linkspeed = 0; 4963 np->linkspeed = 0;
5049 ret = nv_update_linkspeed(dev); 4964 ret = nv_update_linkspeed(dev);
5050 nv_start_rx(dev); 4965 nv_start_rxtx(dev);
5051 nv_start_tx(dev);
5052 netif_start_queue(dev); 4966 netif_start_queue(dev);
5053#ifdef CONFIG_FORCEDETH_NAPI 4967#ifdef CONFIG_FORCEDETH_NAPI
5054 napi_enable(&np->napi); 4968 napi_enable(&np->napi);
@@ -5072,7 +4986,7 @@ static int nv_open(struct net_device *dev)
5072 4986
5073 return 0; 4987 return 0;
5074out_drain: 4988out_drain:
5075 drain_ring(dev); 4989 nv_drain_rxtx(dev);
5076 return ret; 4990 return ret;
5077} 4991}
5078 4992
@@ -5095,8 +5009,7 @@ static int nv_close(struct net_device *dev)
5095 5009
5096 netif_stop_queue(dev); 5010 netif_stop_queue(dev);
5097 spin_lock_irq(&np->lock); 5011 spin_lock_irq(&np->lock);
5098 nv_stop_tx(dev); 5012 nv_stop_rxtx(dev);
5099 nv_stop_rx(dev);
5100 nv_txrx_reset(dev); 5013 nv_txrx_reset(dev);
5101 5014
5102 /* disable interrupts on the nic or we will lock up */ 5015 /* disable interrupts on the nic or we will lock up */
@@ -5109,7 +5022,7 @@ static int nv_close(struct net_device *dev)
5109 5022
5110 nv_free_irq(dev); 5023 nv_free_irq(dev);
5111 5024
5112 drain_ring(dev); 5025 nv_drain_rxtx(dev);
5113 5026
5114 if (np->wolenabled) { 5027 if (np->wolenabled) {
5115 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5028 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
@@ -5269,7 +5182,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5269 np->rx_ring_size = RX_RING_DEFAULT; 5182 np->rx_ring_size = RX_RING_DEFAULT;
5270 np->tx_ring_size = TX_RING_DEFAULT; 5183 np->tx_ring_size = TX_RING_DEFAULT;
5271 5184
5272 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 5185 if (!nv_optimized(np)) {
5273 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5186 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5274 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5187 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5275 &np->ring_addr); 5188 &np->ring_addr);
@@ -5291,7 +5204,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5291 5204
5292 dev->open = nv_open; 5205 dev->open = nv_open;
5293 dev->stop = nv_close; 5206 dev->stop = nv_close;
5294 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 5207
5208 if (!nv_optimized(np))
5295 dev->hard_start_xmit = nv_start_xmit; 5209 dev->hard_start_xmit = nv_start_xmit;
5296 else 5210 else
5297 dev->hard_start_xmit = nv_start_xmit_optimized; 5211 dev->hard_start_xmit = nv_start_xmit_optimized;
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 940e2041ba38..67b4b0728fce 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1178,7 +1178,7 @@ static int __devinit find_phy(struct device_node *np,
1178 1178
1179 data = of_get_property(np, "fixed-link", NULL); 1179 data = of_get_property(np, "fixed-link", NULL);
1180 if (data) { 1180 if (data) {
1181 snprintf(fpi->bus_id, 16, PHY_ID_FMT, 0, *data); 1181 snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data);
1182 return 0; 1182 return 0;
1183 } 1183 }
1184 1184
@@ -1202,7 +1202,7 @@ static int __devinit find_phy(struct device_node *np,
1202 if (!data || len != 4) 1202 if (!data || len != 4)
1203 goto out_put_mdio; 1203 goto out_put_mdio;
1204 1204
1205 snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data); 1205 snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data);
1206 1206
1207out_put_mdio: 1207out_put_mdio:
1208 of_node_put(mdionode); 1208 of_node_put(mdionode);
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index b8e4a736a130..1620030cd33c 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -130,7 +130,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
130 * we get is an int, and the odds of multiple bitbang mdio buses 130 * we get is an int, and the odds of multiple bitbang mdio buses
131 * is low enough that it's not worth going too crazy. 131 * is low enough that it's not worth going too crazy.
132 */ 132 */
133 bus->id = res.start; 133 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
134 134
135 data = of_get_property(np, "fsl,mdio-pin", &len); 135 data = of_get_property(np, "fsl,mdio-pin", &len);
136 if (!data || len != 4) 136 if (!data || len != 4)
@@ -307,7 +307,7 @@ static int __devinit fs_enet_mdio_probe(struct device *dev)
307 return -ENOMEM; 307 return -ENOMEM;
308 308
309 new_bus->name = "BB MII Bus", 309 new_bus->name = "BB MII Bus",
310 new_bus->id = pdev->id; 310 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
311 311
312 new_bus->phy_mask = ~0x9; 312 new_bus->phy_mask = ~0x9;
313 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; 313 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a89cf15090b8..ba75efc9f5b5 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -196,7 +196,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
196 if (ret) 196 if (ret)
197 return ret; 197 return ret;
198 198
199 new_bus->id = res.start; 199 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
200 200
201 fec->fecp = ioremap(res.start, res.end - res.start + 1); 201 fec->fecp = ioremap(res.start, res.end - res.start + 1);
202 if (!fec->fecp) 202 if (!fec->fecp)
@@ -309,7 +309,7 @@ static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
309 new_bus->read = &fs_enet_fec_mii_read, 309 new_bus->read = &fs_enet_fec_mii_read,
310 new_bus->write = &fs_enet_fec_mii_write, 310 new_bus->write = &fs_enet_fec_mii_write,
311 new_bus->reset = &fs_enet_fec_mii_reset, 311 new_bus->reset = &fs_enet_fec_mii_reset,
312 new_bus->id = pdev->id; 312 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
313 313
314 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; 314 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
315 315
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 601f93e482c6..c8c3df737d73 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1250,17 +1250,12 @@ static void gfar_timeout(struct net_device *dev)
1250} 1250}
1251 1251
1252/* Interrupt Handler for Transmit complete */ 1252/* Interrupt Handler for Transmit complete */
1253static irqreturn_t gfar_transmit(int irq, void *dev_id) 1253int gfar_clean_tx_ring(struct net_device *dev)
1254{ 1254{
1255 struct net_device *dev = (struct net_device *) dev_id;
1256 struct gfar_private *priv = netdev_priv(dev);
1257 struct txbd8 *bdp; 1255 struct txbd8 *bdp;
1256 struct gfar_private *priv = netdev_priv(dev);
1257 int howmany = 0;
1258 1258
1259 /* Clear IEVENT */
1260 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1261
1262 /* Lock priv */
1263 spin_lock(&priv->txlock);
1264 bdp = priv->dirty_tx; 1259 bdp = priv->dirty_tx;
1265 while ((bdp->status & TXBD_READY) == 0) { 1260 while ((bdp->status & TXBD_READY) == 0) {
1266 /* If dirty_tx and cur_tx are the same, then either the */ 1261 /* If dirty_tx and cur_tx are the same, then either the */
@@ -1269,7 +1264,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1269 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1264 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1270 break; 1265 break;
1271 1266
1272 dev->stats.tx_packets++; 1267 howmany++;
1273 1268
1274 /* Deferred means some collisions occurred during transmit, */ 1269 /* Deferred means some collisions occurred during transmit, */
1275 /* but we eventually sent the packet. */ 1270 /* but we eventually sent the packet. */
@@ -1278,11 +1273,15 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1278 1273
1279 /* Free the sk buffer associated with this TxBD */ 1274 /* Free the sk buffer associated with this TxBD */
1280 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1275 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1276
1281 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1277 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1282 priv->skb_dirtytx = 1278 priv->skb_dirtytx =
1283 (priv->skb_dirtytx + 1279 (priv->skb_dirtytx +
1284 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1280 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1285 1281
1282 /* Clean BD length for empty detection */
1283 bdp->length = 0;
1284
1286 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1285 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1287 if (bdp->status & TXBD_WRAP) 1286 if (bdp->status & TXBD_WRAP)
1288 bdp = priv->tx_bd_base; 1287 bdp = priv->tx_bd_base;
@@ -1297,6 +1296,25 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1297 netif_wake_queue(dev); 1296 netif_wake_queue(dev);
1298 } /* while ((bdp->status & TXBD_READY) == 0) */ 1297 } /* while ((bdp->status & TXBD_READY) == 0) */
1299 1298
1299 dev->stats.tx_packets += howmany;
1300
1301 return howmany;
1302}
1303
1304/* Interrupt Handler for Transmit complete */
1305static irqreturn_t gfar_transmit(int irq, void *dev_id)
1306{
1307 struct net_device *dev = (struct net_device *) dev_id;
1308 struct gfar_private *priv = netdev_priv(dev);
1309
1310 /* Clear IEVENT */
1311 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1312
1313 /* Lock priv */
1314 spin_lock(&priv->txlock);
1315
1316 gfar_clean_tx_ring(dev);
1317
1300 /* If we are coalescing the interrupts, reset the timer */ 1318 /* If we are coalescing the interrupts, reset the timer */
1301 /* Otherwise, clear it */ 1319 /* Otherwise, clear it */
1302 if (likely(priv->txcoalescing)) { 1320 if (likely(priv->txcoalescing)) {
@@ -1392,15 +1410,15 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
1392 unsigned long flags; 1410 unsigned long flags;
1393#endif 1411#endif
1394 1412
1395 /* Clear IEVENT, so rx interrupt isn't called again
1396 * because of this interrupt */
1397 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1398
1399 /* support NAPI */ 1413 /* support NAPI */
1400#ifdef CONFIG_GFAR_NAPI 1414#ifdef CONFIG_GFAR_NAPI
1415 /* Clear IEVENT, so interrupts aren't called again
1416 * because of the packets that have already arrived */
1417 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1418
1401 if (netif_rx_schedule_prep(dev, &priv->napi)) { 1419 if (netif_rx_schedule_prep(dev, &priv->napi)) {
1402 tempval = gfar_read(&priv->regs->imask); 1420 tempval = gfar_read(&priv->regs->imask);
1403 tempval &= IMASK_RX_DISABLED; 1421 tempval &= IMASK_RTX_DISABLED;
1404 gfar_write(&priv->regs->imask, tempval); 1422 gfar_write(&priv->regs->imask, tempval);
1405 1423
1406 __netif_rx_schedule(dev, &priv->napi); 1424 __netif_rx_schedule(dev, &priv->napi);
@@ -1411,6 +1429,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
1411 gfar_read(&priv->regs->imask)); 1429 gfar_read(&priv->regs->imask));
1412 } 1430 }
1413#else 1431#else
1432 /* Clear IEVENT, so rx interrupt isn't called again
1433 * because of this interrupt */
1434 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1414 1435
1415 spin_lock_irqsave(&priv->rxlock, flags); 1436 spin_lock_irqsave(&priv->rxlock, flags);
1416 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1437 gfar_clean_rx_ring(dev, priv->rx_ring_size);
@@ -1580,6 +1601,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1580 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 1601 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1581 struct net_device *dev = priv->dev; 1602 struct net_device *dev = priv->dev;
1582 int howmany; 1603 int howmany;
1604 unsigned long flags;
1605
1606 /* If we fail to get the lock, don't bother with the TX BDs */
1607 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1608 gfar_clean_tx_ring(dev);
1609 spin_unlock_irqrestore(&priv->txlock, flags);
1610 }
1583 1611
1584 howmany = gfar_clean_rx_ring(dev, budget); 1612 howmany = gfar_clean_rx_ring(dev, budget);
1585 1613
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index ea8671f87bce..0d0883609469 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -126,9 +126,16 @@ extern const char gfar_driver_version[];
126#define DEFAULT_TXCOUNT 16 126#define DEFAULT_TXCOUNT 16
127#define DEFAULT_TXTIME 21 127#define DEFAULT_TXTIME 21
128 128
129#define DEFAULT_RXTIME 21
130
131/* Non NAPI Case */
132#ifndef CONFIG_GFAR_NAPI
129#define DEFAULT_RX_COALESCE 1 133#define DEFAULT_RX_COALESCE 1
130#define DEFAULT_RXCOUNT 16 134#define DEFAULT_RXCOUNT 16
131#define DEFAULT_RXTIME 21 135#else
136#define DEFAULT_RX_COALESCE 0
137#define DEFAULT_RXCOUNT 0
138#endif /* CONFIG_GFAR_NAPI */
132 139
133#define TBIPA_VALUE 0x1f 140#define TBIPA_VALUE 0x1f
134#define MIIMCFG_INIT_VALUE 0x00000007 141#define MIIMCFG_INIT_VALUE 0x00000007
@@ -242,6 +249,7 @@ extern const char gfar_driver_version[];
242#define IEVENT_PERR 0x00000001 249#define IEVENT_PERR 0x00000001
243#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) 250#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
244#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) 251#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
252#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
245#define IEVENT_ERR_MASK \ 253#define IEVENT_ERR_MASK \
246(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ 254(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
247 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ 255 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
@@ -269,11 +277,12 @@ extern const char gfar_driver_version[];
269#define IMASK_FIQ 0x00000004 277#define IMASK_FIQ 0x00000004
270#define IMASK_DPE 0x00000002 278#define IMASK_DPE 0x00000002
271#define IMASK_PERR 0x00000001 279#define IMASK_PERR 0x00000001
272#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
273#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ 280#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
274 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ 281 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
275 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ 282 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
276 | IMASK_PERR) 283 | IMASK_PERR)
284#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
285 & IMASK_DEFAULT)
277 286
278/* Fifo management */ 287/* Fifo management */
279#define FIFO_TX_THR_MASK 0x01ff 288#define FIFO_TX_THR_MASK 0x01ff
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 24327629bf03..b8898927236a 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -173,7 +173,7 @@ int gfar_mdio_probe(struct device *dev)
173 new_bus->read = &gfar_mdio_read, 173 new_bus->read = &gfar_mdio_read,
174 new_bus->write = &gfar_mdio_write, 174 new_bus->write = &gfar_mdio_write,
175 new_bus->reset = &gfar_mdio_reset, 175 new_bus->reset = &gfar_mdio_reset,
176 new_bus->id = pdev->id; 176 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
177 177
178 pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data; 178 pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;
179 179
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f2fff90d2c9d..16f9c756aa46 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -117,8 +117,8 @@ struct ixgb_buffer {
117 struct sk_buff *skb; 117 struct sk_buff *skb;
118 dma_addr_t dma; 118 dma_addr_t dma;
119 unsigned long time_stamp; 119 unsigned long time_stamp;
120 uint16_t length; 120 u16 length;
121 uint16_t next_to_watch; 121 u16 next_to_watch;
122}; 122};
123 123
124struct ixgb_desc_ring { 124struct ixgb_desc_ring {
@@ -152,11 +152,11 @@ struct ixgb_desc_ring {
152struct ixgb_adapter { 152struct ixgb_adapter {
153 struct timer_list watchdog_timer; 153 struct timer_list watchdog_timer;
154 struct vlan_group *vlgrp; 154 struct vlan_group *vlgrp;
155 uint32_t bd_number; 155 u32 bd_number;
156 uint32_t rx_buffer_len; 156 u32 rx_buffer_len;
157 uint32_t part_num; 157 u32 part_num;
158 uint16_t link_speed; 158 u16 link_speed;
159 uint16_t link_duplex; 159 u16 link_duplex;
160 spinlock_t tx_lock; 160 spinlock_t tx_lock;
161 struct work_struct tx_timeout_task; 161 struct work_struct tx_timeout_task;
162 162
@@ -167,19 +167,19 @@ struct ixgb_adapter {
167 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 167 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
168 unsigned int restart_queue; 168 unsigned int restart_queue;
169 unsigned long timeo_start; 169 unsigned long timeo_start;
170 uint32_t tx_cmd_type; 170 u32 tx_cmd_type;
171 uint64_t hw_csum_tx_good; 171 u64 hw_csum_tx_good;
172 uint64_t hw_csum_tx_error; 172 u64 hw_csum_tx_error;
173 uint32_t tx_int_delay; 173 u32 tx_int_delay;
174 uint32_t tx_timeout_count; 174 u32 tx_timeout_count;
175 bool tx_int_delay_enable; 175 bool tx_int_delay_enable;
176 bool detect_tx_hung; 176 bool detect_tx_hung;
177 177
178 /* RX */ 178 /* RX */
179 struct ixgb_desc_ring rx_ring; 179 struct ixgb_desc_ring rx_ring;
180 uint64_t hw_csum_rx_error; 180 u64 hw_csum_rx_error;
181 uint64_t hw_csum_rx_good; 181 u64 hw_csum_rx_good;
182 uint32_t rx_int_delay; 182 u32 rx_int_delay;
183 bool rx_csum; 183 bool rx_csum;
184 184
185 /* OS defined structs */ 185 /* OS defined structs */
@@ -192,7 +192,7 @@ struct ixgb_adapter {
192 struct ixgb_hw hw; 192 struct ixgb_hw hw;
193 u16 msg_enable; 193 u16 msg_enable;
194 struct ixgb_hw_stats stats; 194 struct ixgb_hw_stats stats;
195 uint32_t alloc_rx_buff_failed; 195 u32 alloc_rx_buff_failed;
196 bool have_msi; 196 bool have_msi;
197 unsigned long flags; 197 unsigned long flags;
198}; 198};
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 8e9302fc8865..2f7ed52c7502 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -29,11 +29,11 @@
29#include "ixgb_hw.h" 29#include "ixgb_hw.h"
30#include "ixgb_ee.h" 30#include "ixgb_ee.h"
31/* Local prototypes */ 31/* Local prototypes */
32static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw); 32static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
33 33
34static void ixgb_shift_out_bits(struct ixgb_hw *hw, 34static void ixgb_shift_out_bits(struct ixgb_hw *hw,
35 uint16_t data, 35 u16 data,
36 uint16_t count); 36 u16 count);
37static void ixgb_standby_eeprom(struct ixgb_hw *hw); 37static void ixgb_standby_eeprom(struct ixgb_hw *hw);
38 38
39static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw); 39static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
@@ -48,7 +48,7 @@ static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
48 *****************************************************************************/ 48 *****************************************************************************/
49static void 49static void
50ixgb_raise_clock(struct ixgb_hw *hw, 50ixgb_raise_clock(struct ixgb_hw *hw,
51 uint32_t *eecd_reg) 51 u32 *eecd_reg)
52{ 52{
53 /* Raise the clock input to the EEPROM (by setting the SK bit), and then 53 /* Raise the clock input to the EEPROM (by setting the SK bit), and then
54 * wait 50 microseconds. 54 * wait 50 microseconds.
@@ -67,7 +67,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
67 *****************************************************************************/ 67 *****************************************************************************/
68static void 68static void
69ixgb_lower_clock(struct ixgb_hw *hw, 69ixgb_lower_clock(struct ixgb_hw *hw,
70 uint32_t *eecd_reg) 70 u32 *eecd_reg)
71{ 71{
72 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then 72 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
73 * wait 50 microseconds. 73 * wait 50 microseconds.
@@ -87,11 +87,11 @@ ixgb_lower_clock(struct ixgb_hw *hw,
87 *****************************************************************************/ 87 *****************************************************************************/
88static void 88static void
89ixgb_shift_out_bits(struct ixgb_hw *hw, 89ixgb_shift_out_bits(struct ixgb_hw *hw,
90 uint16_t data, 90 u16 data,
91 uint16_t count) 91 u16 count)
92{ 92{
93 uint32_t eecd_reg; 93 u32 eecd_reg;
94 uint32_t mask; 94 u32 mask;
95 95
96 /* We need to shift "count" bits out to the EEPROM. So, value in the 96 /* We need to shift "count" bits out to the EEPROM. So, value in the
97 * "data" parameter will be shifted out to the EEPROM one bit at a time. 97 * "data" parameter will be shifted out to the EEPROM one bit at a time.
@@ -133,12 +133,12 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
133 * 133 *
134 * hw - Struct containing variables accessed by shared code 134 * hw - Struct containing variables accessed by shared code
135 *****************************************************************************/ 135 *****************************************************************************/
136static uint16_t 136static u16
137ixgb_shift_in_bits(struct ixgb_hw *hw) 137ixgb_shift_in_bits(struct ixgb_hw *hw)
138{ 138{
139 uint32_t eecd_reg; 139 u32 eecd_reg;
140 uint32_t i; 140 u32 i;
141 uint16_t data; 141 u16 data;
142 142
143 /* In order to read a register from the EEPROM, we need to shift 16 bits 143 /* In order to read a register from the EEPROM, we need to shift 16 bits
144 * in from the EEPROM. Bits are "shifted in" by raising the clock input to 144 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
@@ -179,7 +179,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
179static void 179static void
180ixgb_setup_eeprom(struct ixgb_hw *hw) 180ixgb_setup_eeprom(struct ixgb_hw *hw)
181{ 181{
182 uint32_t eecd_reg; 182 u32 eecd_reg;
183 183
184 eecd_reg = IXGB_READ_REG(hw, EECD); 184 eecd_reg = IXGB_READ_REG(hw, EECD);
185 185
@@ -201,7 +201,7 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
201static void 201static void
202ixgb_standby_eeprom(struct ixgb_hw *hw) 202ixgb_standby_eeprom(struct ixgb_hw *hw)
203{ 203{
204 uint32_t eecd_reg; 204 u32 eecd_reg;
205 205
206 eecd_reg = IXGB_READ_REG(hw, EECD); 206 eecd_reg = IXGB_READ_REG(hw, EECD);
207 207
@@ -235,7 +235,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
235static void 235static void
236ixgb_clock_eeprom(struct ixgb_hw *hw) 236ixgb_clock_eeprom(struct ixgb_hw *hw)
237{ 237{
238 uint32_t eecd_reg; 238 u32 eecd_reg;
239 239
240 eecd_reg = IXGB_READ_REG(hw, EECD); 240 eecd_reg = IXGB_READ_REG(hw, EECD);
241 241
@@ -259,7 +259,7 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
259static void 259static void
260ixgb_cleanup_eeprom(struct ixgb_hw *hw) 260ixgb_cleanup_eeprom(struct ixgb_hw *hw)
261{ 261{
262 uint32_t eecd_reg; 262 u32 eecd_reg;
263 263
264 eecd_reg = IXGB_READ_REG(hw, EECD); 264 eecd_reg = IXGB_READ_REG(hw, EECD);
265 265
@@ -285,8 +285,8 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
285static bool 285static bool
286ixgb_wait_eeprom_command(struct ixgb_hw *hw) 286ixgb_wait_eeprom_command(struct ixgb_hw *hw)
287{ 287{
288 uint32_t eecd_reg; 288 u32 eecd_reg;
289 uint32_t i; 289 u32 i;
290 290
291 /* Toggle the CS line. This in effect tells to EEPROM to actually execute 291 /* Toggle the CS line. This in effect tells to EEPROM to actually execute
292 * the command in question. 292 * the command in question.
@@ -325,13 +325,13 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
325bool 325bool
326ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) 326ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
327{ 327{
328 uint16_t checksum = 0; 328 u16 checksum = 0;
329 uint16_t i; 329 u16 i;
330 330
331 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) 331 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
332 checksum += ixgb_read_eeprom(hw, i); 332 checksum += ixgb_read_eeprom(hw, i);
333 333
334 if(checksum == (uint16_t) EEPROM_SUM) 334 if(checksum == (u16) EEPROM_SUM)
335 return (true); 335 return (true);
336 else 336 else
337 return (false); 337 return (false);
@@ -348,13 +348,13 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
348void 348void
349ixgb_update_eeprom_checksum(struct ixgb_hw *hw) 349ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
350{ 350{
351 uint16_t checksum = 0; 351 u16 checksum = 0;
352 uint16_t i; 352 u16 i;
353 353
354 for(i = 0; i < EEPROM_CHECKSUM_REG; i++) 354 for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
355 checksum += ixgb_read_eeprom(hw, i); 355 checksum += ixgb_read_eeprom(hw, i);
356 356
357 checksum = (uint16_t) EEPROM_SUM - checksum; 357 checksum = (u16) EEPROM_SUM - checksum;
358 358
359 ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); 359 ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
360 return; 360 return;
@@ -372,7 +372,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
372 * 372 *
373 *****************************************************************************/ 373 *****************************************************************************/
374void 374void
375ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data) 375ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
376{ 376{
377 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 377 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
378 378
@@ -425,11 +425,11 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
425 * Returns: 425 * Returns:
426 * The 16-bit value read from the eeprom 426 * The 16-bit value read from the eeprom
427 *****************************************************************************/ 427 *****************************************************************************/
428uint16_t 428u16
429ixgb_read_eeprom(struct ixgb_hw *hw, 429ixgb_read_eeprom(struct ixgb_hw *hw,
430 uint16_t offset) 430 u16 offset)
431{ 431{
432 uint16_t data; 432 u16 data;
433 433
434 /* Prepare the EEPROM for reading */ 434 /* Prepare the EEPROM for reading */
435 ixgb_setup_eeprom(hw); 435 ixgb_setup_eeprom(hw);
@@ -463,8 +463,8 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
463bool 463bool
464ixgb_get_eeprom_data(struct ixgb_hw *hw) 464ixgb_get_eeprom_data(struct ixgb_hw *hw)
465{ 465{
466 uint16_t i; 466 u16 i;
467 uint16_t checksum = 0; 467 u16 checksum = 0;
468 struct ixgb_ee_map_type *ee_map; 468 struct ixgb_ee_map_type *ee_map;
469 469
470 DEBUGFUNC("ixgb_get_eeprom_data"); 470 DEBUGFUNC("ixgb_get_eeprom_data");
@@ -473,13 +473,13 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
473 473
474 DEBUGOUT("ixgb_ee: Reading eeprom data\n"); 474 DEBUGOUT("ixgb_ee: Reading eeprom data\n");
475 for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { 475 for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
476 uint16_t ee_data; 476 u16 ee_data;
477 ee_data = ixgb_read_eeprom(hw, i); 477 ee_data = ixgb_read_eeprom(hw, i);
478 checksum += ee_data; 478 checksum += ee_data;
479 hw->eeprom[i] = cpu_to_le16(ee_data); 479 hw->eeprom[i] = cpu_to_le16(ee_data);
480 } 480 }
481 481
482 if (checksum != (uint16_t) EEPROM_SUM) { 482 if (checksum != (u16) EEPROM_SUM) {
483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 483 DEBUGOUT("ixgb_ee: Checksum invalid.\n");
484 /* clear the init_ctrl_reg_1 to signify that the cache is 484 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */ 485 * invalidated */
@@ -529,7 +529,7 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
529 * Word at indexed offset in eeprom, if valid, 0 otherwise. 529 * Word at indexed offset in eeprom, if valid, 0 otherwise.
530 ******************************************************************************/ 530 ******************************************************************************/
531__le16 531__le16
532ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index) 532ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
533{ 533{
534 534
535 if ((index < IXGB_EEPROM_SIZE) && 535 if ((index < IXGB_EEPROM_SIZE) &&
@@ -550,7 +550,7 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
550 ******************************************************************************/ 550 ******************************************************************************/
551void 551void
552ixgb_get_ee_mac_addr(struct ixgb_hw *hw, 552ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
553 uint8_t *mac_addr) 553 u8 *mac_addr)
554{ 554{
555 int i; 555 int i;
556 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 556 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
@@ -574,7 +574,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
574 * Returns: 574 * Returns:
575 * PBA number if EEPROM contents are valid, 0 otherwise 575 * PBA number if EEPROM contents are valid, 0 otherwise
576 ******************************************************************************/ 576 ******************************************************************************/
577uint32_t 577u32
578ixgb_get_ee_pba_number(struct ixgb_hw *hw) 578ixgb_get_ee_pba_number(struct ixgb_hw *hw)
579{ 579{
580 if (ixgb_check_and_get_eeprom_data(hw) == true) 580 if (ixgb_check_and_get_eeprom_data(hw) == true)
@@ -593,7 +593,7 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
593 * Returns: 593 * Returns:
594 * Device Id if EEPROM contents are valid, 0 otherwise 594 * Device Id if EEPROM contents are valid, 0 otherwise
595 ******************************************************************************/ 595 ******************************************************************************/
596uint16_t 596u16
597ixgb_get_ee_device_id(struct ixgb_hw *hw) 597ixgb_get_ee_device_id(struct ixgb_hw *hw)
598{ 598{
599 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 599 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index da62f58276fa..4b7bd0d4a8a9 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -75,7 +75,7 @@
75 75
76/* EEPROM structure */ 76/* EEPROM structure */
77struct ixgb_ee_map_type { 77struct ixgb_ee_map_type {
78 uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; 78 u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
79 __le16 compatibility; 79 __le16 compatibility;
80 __le16 reserved1[4]; 80 __le16 reserved1[4];
81 __le32 pba_number; 81 __le32 pba_number;
@@ -88,19 +88,19 @@ struct ixgb_ee_map_type {
88 __le16 oem_reserved[16]; 88 __le16 oem_reserved[16];
89 __le16 swdpins_reg; 89 __le16 swdpins_reg;
90 __le16 circuit_ctrl_reg; 90 __le16 circuit_ctrl_reg;
91 uint8_t d3_power; 91 u8 d3_power;
92 uint8_t d0_power; 92 u8 d0_power;
93 __le16 reserved2[28]; 93 __le16 reserved2[28];
94 __le16 checksum; 94 __le16 checksum;
95}; 95};
96 96
97/* EEPROM Functions */ 97/* EEPROM Functions */
98uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg); 98u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
99 99
100bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); 100bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
101 101
102void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); 102void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
103 103
104void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data); 104void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
105 105
106#endif /* IXGB_EE_H */ 106#endif /* IXGB_EE_H */
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 45ddf804fe5e..8464d8a013b0 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -185,7 +185,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
185 return 0; 185 return 0;
186} 186}
187 187
188static uint32_t 188static u32
189ixgb_get_rx_csum(struct net_device *netdev) 189ixgb_get_rx_csum(struct net_device *netdev)
190{ 190{
191 struct ixgb_adapter *adapter = netdev_priv(netdev); 191 struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -194,7 +194,7 @@ ixgb_get_rx_csum(struct net_device *netdev)
194} 194}
195 195
196static int 196static int
197ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) 197ixgb_set_rx_csum(struct net_device *netdev, u32 data)
198{ 198{
199 struct ixgb_adapter *adapter = netdev_priv(netdev); 199 struct ixgb_adapter *adapter = netdev_priv(netdev);
200 200
@@ -209,14 +209,14 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
209 return 0; 209 return 0;
210} 210}
211 211
212static uint32_t 212static u32
213ixgb_get_tx_csum(struct net_device *netdev) 213ixgb_get_tx_csum(struct net_device *netdev)
214{ 214{
215 return (netdev->features & NETIF_F_HW_CSUM) != 0; 215 return (netdev->features & NETIF_F_HW_CSUM) != 0;
216} 216}
217 217
218static int 218static int
219ixgb_set_tx_csum(struct net_device *netdev, uint32_t data) 219ixgb_set_tx_csum(struct net_device *netdev, u32 data)
220{ 220{
221 if (data) 221 if (data)
222 netdev->features |= NETIF_F_HW_CSUM; 222 netdev->features |= NETIF_F_HW_CSUM;
@@ -227,7 +227,7 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
227} 227}
228 228
229static int 229static int
230ixgb_set_tso(struct net_device *netdev, uint32_t data) 230ixgb_set_tso(struct net_device *netdev, u32 data)
231{ 231{
232 if(data) 232 if(data)
233 netdev->features |= NETIF_F_TSO; 233 netdev->features |= NETIF_F_TSO;
@@ -236,7 +236,7 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
236 return 0; 236 return 0;
237} 237}
238 238
239static uint32_t 239static u32
240ixgb_get_msglevel(struct net_device *netdev) 240ixgb_get_msglevel(struct net_device *netdev)
241{ 241{
242 struct ixgb_adapter *adapter = netdev_priv(netdev); 242 struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -244,7 +244,7 @@ ixgb_get_msglevel(struct net_device *netdev)
244} 244}
245 245
246static void 246static void
247ixgb_set_msglevel(struct net_device *netdev, uint32_t data) 247ixgb_set_msglevel(struct net_device *netdev, u32 data)
248{ 248{
249 struct ixgb_adapter *adapter = netdev_priv(netdev); 249 struct ixgb_adapter *adapter = netdev_priv(netdev);
250 adapter->msg_enable = data; 250 adapter->msg_enable = data;
@@ -254,7 +254,7 @@ ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
254static int 254static int
255ixgb_get_regs_len(struct net_device *netdev) 255ixgb_get_regs_len(struct net_device *netdev)
256{ 256{
257#define IXGB_REG_DUMP_LEN 136*sizeof(uint32_t) 257#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
258 return IXGB_REG_DUMP_LEN; 258 return IXGB_REG_DUMP_LEN;
259} 259}
260 260
@@ -264,9 +264,9 @@ ixgb_get_regs(struct net_device *netdev,
264{ 264{
265 struct ixgb_adapter *adapter = netdev_priv(netdev); 265 struct ixgb_adapter *adapter = netdev_priv(netdev);
266 struct ixgb_hw *hw = &adapter->hw; 266 struct ixgb_hw *hw = &adapter->hw;
267 uint32_t *reg = p; 267 u32 *reg = p;
268 uint32_t *reg_start = reg; 268 u32 *reg_start = reg;
269 uint8_t i; 269 u8 i;
270 270
271 /* the 1 (one) below indicates an attempt at versioning, if the 271 /* the 1 (one) below indicates an attempt at versioning, if the
272 * interface in ethtool or the driver changes, this 1 should be 272 * interface in ethtool or the driver changes, this 1 should be
@@ -395,7 +395,7 @@ ixgb_get_regs(struct net_device *netdev,
395 *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ 395 *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
396 *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ 396 *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
397 397
398 regs->len = (reg - reg_start) * sizeof(uint32_t); 398 regs->len = (reg - reg_start) * sizeof(u32);
399} 399}
400 400
401static int 401static int
@@ -407,7 +407,7 @@ ixgb_get_eeprom_len(struct net_device *netdev)
407 407
408static int 408static int
409ixgb_get_eeprom(struct net_device *netdev, 409ixgb_get_eeprom(struct net_device *netdev,
410 struct ethtool_eeprom *eeprom, uint8_t *bytes) 410 struct ethtool_eeprom *eeprom, u8 *bytes)
411{ 411{
412 struct ixgb_adapter *adapter = netdev_priv(netdev); 412 struct ixgb_adapter *adapter = netdev_priv(netdev);
413 struct ixgb_hw *hw = &adapter->hw; 413 struct ixgb_hw *hw = &adapter->hw;
@@ -445,7 +445,7 @@ ixgb_get_eeprom(struct net_device *netdev,
445 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); 445 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
446 } 446 }
447 447
448 memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1), 448 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
449 eeprom->len); 449 eeprom->len);
450 kfree(eeprom_buff); 450 kfree(eeprom_buff);
451 451
@@ -455,14 +455,14 @@ geeprom_error:
455 455
456static int 456static int
457ixgb_set_eeprom(struct net_device *netdev, 457ixgb_set_eeprom(struct net_device *netdev,
458 struct ethtool_eeprom *eeprom, uint8_t *bytes) 458 struct ethtool_eeprom *eeprom, u8 *bytes)
459{ 459{
460 struct ixgb_adapter *adapter = netdev_priv(netdev); 460 struct ixgb_adapter *adapter = netdev_priv(netdev);
461 struct ixgb_hw *hw = &adapter->hw; 461 struct ixgb_hw *hw = &adapter->hw;
462 uint16_t *eeprom_buff; 462 u16 *eeprom_buff;
463 void *ptr; 463 void *ptr;
464 int max_len, first_word, last_word; 464 int max_len, first_word, last_word;
465 uint16_t i; 465 u16 i;
466 466
467 if(eeprom->len == 0) 467 if(eeprom->len == 0)
468 return -EINVAL; 468 return -EINVAL;
@@ -563,12 +563,12 @@ ixgb_set_ringparam(struct net_device *netdev,
563 if(netif_running(adapter->netdev)) 563 if(netif_running(adapter->netdev))
564 ixgb_down(adapter, true); 564 ixgb_down(adapter, true);
565 565
566 rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD); 566 rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
567 rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD); 567 rxdr->count = min(rxdr->count,(u32)MAX_RXD);
568 rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); 568 rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
569 569
570 txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD); 570 txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
571 txdr->count = min(txdr->count,(uint32_t)MAX_TXD); 571 txdr->count = min(txdr->count,(u32)MAX_TXD);
572 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); 572 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
573 573
574 if(netif_running(adapter->netdev)) { 574 if(netif_running(adapter->netdev)) {
@@ -624,7 +624,7 @@ ixgb_led_blink_callback(unsigned long data)
624} 624}
625 625
626static int 626static int
627ixgb_phys_id(struct net_device *netdev, uint32_t data) 627ixgb_phys_id(struct net_device *netdev, u32 data)
628{ 628{
629 struct ixgb_adapter *adapter = netdev_priv(netdev); 629 struct ixgb_adapter *adapter = netdev_priv(netdev);
630 630
@@ -660,7 +660,7 @@ ixgb_get_sset_count(struct net_device *netdev, int sset)
660 660
661static void 661static void
662ixgb_get_ethtool_stats(struct net_device *netdev, 662ixgb_get_ethtool_stats(struct net_device *netdev,
663 struct ethtool_stats *stats, uint64_t *data) 663 struct ethtool_stats *stats, u64 *data)
664{ 664{
665 struct ixgb_adapter *adapter = netdev_priv(netdev); 665 struct ixgb_adapter *adapter = netdev_priv(netdev);
666 int i; 666 int i;
@@ -669,12 +669,12 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
669 for(i = 0; i < IXGB_STATS_LEN; i++) { 669 for(i = 0; i < IXGB_STATS_LEN; i++) {
670 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; 670 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
671 data[i] = (ixgb_gstrings_stats[i].sizeof_stat == 671 data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
672 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 672 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
673 } 673 }
674} 674}
675 675
676static void 676static void
677ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 677ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
678{ 678{
679 int i; 679 int i;
680 680
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 8a04bbd258a6..04d2003e24e1 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -35,9 +35,9 @@
35 35
36/* Local function prototypes */ 36/* Local function prototypes */
37 37
38static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr); 38static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
39 39
40static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value); 40static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
41 41
42static void ixgb_get_bus_info(struct ixgb_hw *hw); 42static void ixgb_get_bus_info(struct ixgb_hw *hw);
43 43
@@ -55,18 +55,18 @@ static void ixgb_clear_vfta(struct ixgb_hw *hw);
55 55
56static void ixgb_init_rx_addrs(struct ixgb_hw *hw); 56static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
57 57
58static uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw, 58static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
59 uint32_t reg_address, 59 u32 reg_address,
60 uint32_t phy_address, 60 u32 phy_address,
61 uint32_t device_type); 61 u32 device_type);
62 62
63static bool ixgb_setup_fc(struct ixgb_hw *hw); 63static bool ixgb_setup_fc(struct ixgb_hw *hw);
64 64
65static bool mac_addr_valid(uint8_t *mac_addr); 65static bool mac_addr_valid(u8 *mac_addr);
66 66
67static uint32_t ixgb_mac_reset(struct ixgb_hw *hw) 67static u32 ixgb_mac_reset(struct ixgb_hw *hw)
68{ 68{
69 uint32_t ctrl_reg; 69 u32 ctrl_reg;
70 70
71 ctrl_reg = IXGB_CTRL0_RST | 71 ctrl_reg = IXGB_CTRL0_RST |
72 IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ 72 IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
@@ -117,8 +117,8 @@ static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
117bool 117bool
118ixgb_adapter_stop(struct ixgb_hw *hw) 118ixgb_adapter_stop(struct ixgb_hw *hw)
119{ 119{
120 uint32_t ctrl_reg; 120 u32 ctrl_reg;
121 uint32_t icr_reg; 121 u32 icr_reg;
122 122
123 DEBUGFUNC("ixgb_adapter_stop"); 123 DEBUGFUNC("ixgb_adapter_stop");
124 124
@@ -179,8 +179,8 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
179static ixgb_xpak_vendor 179static ixgb_xpak_vendor
180ixgb_identify_xpak_vendor(struct ixgb_hw *hw) 180ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
181{ 181{
182 uint32_t i; 182 u32 i;
183 uint16_t vendor_name[5]; 183 u16 vendor_name[5];
184 ixgb_xpak_vendor xpak_vendor; 184 ixgb_xpak_vendor xpak_vendor;
185 185
186 DEBUGFUNC("ixgb_identify_xpak_vendor"); 186 DEBUGFUNC("ixgb_identify_xpak_vendor");
@@ -292,8 +292,8 @@ ixgb_identify_phy(struct ixgb_hw *hw)
292bool 292bool
293ixgb_init_hw(struct ixgb_hw *hw) 293ixgb_init_hw(struct ixgb_hw *hw)
294{ 294{
295 uint32_t i; 295 u32 i;
296 uint32_t ctrl_reg; 296 u32 ctrl_reg;
297 bool status; 297 bool status;
298 298
299 DEBUGFUNC("ixgb_init_hw"); 299 DEBUGFUNC("ixgb_init_hw");
@@ -377,7 +377,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
377static void 377static void
378ixgb_init_rx_addrs(struct ixgb_hw *hw) 378ixgb_init_rx_addrs(struct ixgb_hw *hw)
379{ 379{
380 uint32_t i; 380 u32 i;
381 381
382 DEBUGFUNC("ixgb_init_rx_addrs"); 382 DEBUGFUNC("ixgb_init_rx_addrs");
383 383
@@ -437,13 +437,13 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
437 *****************************************************************************/ 437 *****************************************************************************/
438void 438void
439ixgb_mc_addr_list_update(struct ixgb_hw *hw, 439ixgb_mc_addr_list_update(struct ixgb_hw *hw,
440 uint8_t *mc_addr_list, 440 u8 *mc_addr_list,
441 uint32_t mc_addr_count, 441 u32 mc_addr_count,
442 uint32_t pad) 442 u32 pad)
443{ 443{
444 uint32_t hash_value; 444 u32 hash_value;
445 uint32_t i; 445 u32 i;
446 uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */ 446 u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
447 447
448 DEBUGFUNC("ixgb_mc_addr_list_update"); 448 DEBUGFUNC("ixgb_mc_addr_list_update");
449 449
@@ -515,11 +515,11 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
515 * Returns: 515 * Returns:
516 * The hash value 516 * The hash value
517 *****************************************************************************/ 517 *****************************************************************************/
518static uint32_t 518static u32
519ixgb_hash_mc_addr(struct ixgb_hw *hw, 519ixgb_hash_mc_addr(struct ixgb_hw *hw,
520 uint8_t *mc_addr) 520 u8 *mc_addr)
521{ 521{
522 uint32_t hash_value = 0; 522 u32 hash_value = 0;
523 523
524 DEBUGFUNC("ixgb_hash_mc_addr"); 524 DEBUGFUNC("ixgb_hash_mc_addr");
525 525
@@ -533,18 +533,18 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
533 case 0: 533 case 0:
534 /* [47:36] i.e. 0x563 for above example address */ 534 /* [47:36] i.e. 0x563 for above example address */
535 hash_value = 535 hash_value =
536 ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); 536 ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
537 break; 537 break;
538 case 1: /* [46:35] i.e. 0xAC6 for above example address */ 538 case 1: /* [46:35] i.e. 0xAC6 for above example address */
539 hash_value = 539 hash_value =
540 ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); 540 ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
541 break; 541 break;
542 case 2: /* [45:34] i.e. 0x5D8 for above example address */ 542 case 2: /* [45:34] i.e. 0x5D8 for above example address */
543 hash_value = 543 hash_value =
544 ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); 544 ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
545 break; 545 break;
546 case 3: /* [43:32] i.e. 0x634 for above example address */ 546 case 3: /* [43:32] i.e. 0x634 for above example address */
547 hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); 547 hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
548 break; 548 break;
549 default: 549 default:
550 /* Invalid mc_filter_type, what should we do? */ 550 /* Invalid mc_filter_type, what should we do? */
@@ -565,10 +565,10 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
565 *****************************************************************************/ 565 *****************************************************************************/
566static void 566static void
567ixgb_mta_set(struct ixgb_hw *hw, 567ixgb_mta_set(struct ixgb_hw *hw,
568 uint32_t hash_value) 568 u32 hash_value)
569{ 569{
570 uint32_t hash_bit, hash_reg; 570 u32 hash_bit, hash_reg;
571 uint32_t mta_reg; 571 u32 mta_reg;
572 572
573 /* The MTA is a register array of 128 32-bit registers. 573 /* The MTA is a register array of 128 32-bit registers.
574 * It is treated like an array of 4096 bits. We want to set 574 * It is treated like an array of 4096 bits. We want to set
@@ -599,23 +599,23 @@ ixgb_mta_set(struct ixgb_hw *hw,
599 *****************************************************************************/ 599 *****************************************************************************/
600void 600void
601ixgb_rar_set(struct ixgb_hw *hw, 601ixgb_rar_set(struct ixgb_hw *hw,
602 uint8_t *addr, 602 u8 *addr,
603 uint32_t index) 603 u32 index)
604{ 604{
605 uint32_t rar_low, rar_high; 605 u32 rar_low, rar_high;
606 606
607 DEBUGFUNC("ixgb_rar_set"); 607 DEBUGFUNC("ixgb_rar_set");
608 608
609 /* HW expects these in little endian so we reverse the byte order 609 /* HW expects these in little endian so we reverse the byte order
610 * from network order (big endian) to little endian 610 * from network order (big endian) to little endian
611 */ 611 */
612 rar_low = ((uint32_t) addr[0] | 612 rar_low = ((u32) addr[0] |
613 ((uint32_t)addr[1] << 8) | 613 ((u32)addr[1] << 8) |
614 ((uint32_t)addr[2] << 16) | 614 ((u32)addr[2] << 16) |
615 ((uint32_t)addr[3] << 24)); 615 ((u32)addr[3] << 24));
616 616
617 rar_high = ((uint32_t) addr[4] | 617 rar_high = ((u32) addr[4] |
618 ((uint32_t)addr[5] << 8) | 618 ((u32)addr[5] << 8) |
619 IXGB_RAH_AV); 619 IXGB_RAH_AV);
620 620
621 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); 621 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
@@ -632,8 +632,8 @@ ixgb_rar_set(struct ixgb_hw *hw,
632 *****************************************************************************/ 632 *****************************************************************************/
633void 633void
634ixgb_write_vfta(struct ixgb_hw *hw, 634ixgb_write_vfta(struct ixgb_hw *hw,
635 uint32_t offset, 635 u32 offset,
636 uint32_t value) 636 u32 value)
637{ 637{
638 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); 638 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
639 return; 639 return;
@@ -647,7 +647,7 @@ ixgb_write_vfta(struct ixgb_hw *hw,
647static void 647static void
648ixgb_clear_vfta(struct ixgb_hw *hw) 648ixgb_clear_vfta(struct ixgb_hw *hw)
649{ 649{
650 uint32_t offset; 650 u32 offset;
651 651
652 for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) 652 for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
653 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 653 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
@@ -663,8 +663,8 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
663static bool 663static bool
664ixgb_setup_fc(struct ixgb_hw *hw) 664ixgb_setup_fc(struct ixgb_hw *hw)
665{ 665{
666 uint32_t ctrl_reg; 666 u32 ctrl_reg;
667 uint32_t pap_reg = 0; /* by default, assume no pause time */ 667 u32 pap_reg = 0; /* by default, assume no pause time */
668 bool status = true; 668 bool status = true;
669 669
670 DEBUGFUNC("ixgb_setup_fc"); 670 DEBUGFUNC("ixgb_setup_fc");
@@ -762,15 +762,15 @@ ixgb_setup_fc(struct ixgb_hw *hw)
762 * This requires that first an address cycle command is sent, followed by a 762 * This requires that first an address cycle command is sent, followed by a
763 * read command. 763 * read command.
764 *****************************************************************************/ 764 *****************************************************************************/
765static uint16_t 765static u16
766ixgb_read_phy_reg(struct ixgb_hw *hw, 766ixgb_read_phy_reg(struct ixgb_hw *hw,
767 uint32_t reg_address, 767 u32 reg_address,
768 uint32_t phy_address, 768 u32 phy_address,
769 uint32_t device_type) 769 u32 device_type)
770{ 770{
771 uint32_t i; 771 u32 i;
772 uint32_t data; 772 u32 data;
773 uint32_t command = 0; 773 u32 command = 0;
774 774
775 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); 775 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
776 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); 776 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
@@ -835,7 +835,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
835 */ 835 */
836 data = IXGB_READ_REG(hw, MSRWD); 836 data = IXGB_READ_REG(hw, MSRWD);
837 data >>= IXGB_MSRWD_READ_DATA_SHIFT; 837 data >>= IXGB_MSRWD_READ_DATA_SHIFT;
838 return((uint16_t) data); 838 return((u16) data);
839} 839}
840 840
841/****************************************************************************** 841/******************************************************************************
@@ -857,20 +857,20 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
857 *****************************************************************************/ 857 *****************************************************************************/
858static void 858static void
859ixgb_write_phy_reg(struct ixgb_hw *hw, 859ixgb_write_phy_reg(struct ixgb_hw *hw,
860 uint32_t reg_address, 860 u32 reg_address,
861 uint32_t phy_address, 861 u32 phy_address,
862 uint32_t device_type, 862 u32 device_type,
863 uint16_t data) 863 u16 data)
864{ 864{
865 uint32_t i; 865 u32 i;
866 uint32_t command = 0; 866 u32 command = 0;
867 867
868 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); 868 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
869 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); 869 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
870 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); 870 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
871 871
872 /* Put the data in the MDIO Read/Write Data register */ 872 /* Put the data in the MDIO Read/Write Data register */
873 IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data); 873 IXGB_WRITE_REG(hw, MSRWD, (u32)data);
874 874
875 /* Setup and write the address cycle command */ 875 /* Setup and write the address cycle command */
876 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | 876 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
@@ -939,8 +939,8 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
939void 939void
940ixgb_check_for_link(struct ixgb_hw *hw) 940ixgb_check_for_link(struct ixgb_hw *hw)
941{ 941{
942 uint32_t status_reg; 942 u32 status_reg;
943 uint32_t xpcss_reg; 943 u32 xpcss_reg;
944 944
945 DEBUGFUNC("ixgb_check_for_link"); 945 DEBUGFUNC("ixgb_check_for_link");
946 946
@@ -975,7 +975,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
975 *****************************************************************************/ 975 *****************************************************************************/
976bool ixgb_check_for_bad_link(struct ixgb_hw *hw) 976bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
977{ 977{
978 uint32_t newLFC, newRFC; 978 u32 newLFC, newRFC;
979 bool bad_link_returncode = false; 979 bool bad_link_returncode = false;
980 980
981 if (hw->phy_type == ixgb_phy_type_txn17401) { 981 if (hw->phy_type == ixgb_phy_type_txn17401) {
@@ -1002,7 +1002,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
1002static void 1002static void
1003ixgb_clear_hw_cntrs(struct ixgb_hw *hw) 1003ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1004{ 1004{
1005 volatile uint32_t temp_reg; 1005 volatile u32 temp_reg;
1006 1006
1007 DEBUGFUNC("ixgb_clear_hw_cntrs"); 1007 DEBUGFUNC("ixgb_clear_hw_cntrs");
1008 1008
@@ -1083,7 +1083,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1083void 1083void
1084ixgb_led_on(struct ixgb_hw *hw) 1084ixgb_led_on(struct ixgb_hw *hw)
1085{ 1085{
1086 uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0); 1086 u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1087 1087
1088 /* To turn on the LED, clear software-definable pin 0 (SDP0). */ 1088 /* To turn on the LED, clear software-definable pin 0 (SDP0). */
1089 ctrl0_reg &= ~IXGB_CTRL0_SDP0; 1089 ctrl0_reg &= ~IXGB_CTRL0_SDP0;
@@ -1099,7 +1099,7 @@ ixgb_led_on(struct ixgb_hw *hw)
1099void 1099void
1100ixgb_led_off(struct ixgb_hw *hw) 1100ixgb_led_off(struct ixgb_hw *hw)
1101{ 1101{
1102 uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0); 1102 u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1103 1103
1104 /* To turn off the LED, set software-definable pin 0 (SDP0). */ 1104 /* To turn off the LED, set software-definable pin 0 (SDP0). */
1105 ctrl0_reg |= IXGB_CTRL0_SDP0; 1105 ctrl0_reg |= IXGB_CTRL0_SDP0;
@@ -1115,7 +1115,7 @@ ixgb_led_off(struct ixgb_hw *hw)
1115static void 1115static void
1116ixgb_get_bus_info(struct ixgb_hw *hw) 1116ixgb_get_bus_info(struct ixgb_hw *hw)
1117{ 1117{
1118 uint32_t status_reg; 1118 u32 status_reg;
1119 1119
1120 status_reg = IXGB_READ_REG(hw, STATUS); 1120 status_reg = IXGB_READ_REG(hw, STATUS);
1121 1121
@@ -1155,7 +1155,7 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
1155 * 1155 *
1156 *****************************************************************************/ 1156 *****************************************************************************/
1157static bool 1157static bool
1158mac_addr_valid(uint8_t *mac_addr) 1158mac_addr_valid(u8 *mac_addr)
1159{ 1159{
1160 bool is_valid = true; 1160 bool is_valid = true;
1161 DEBUGFUNC("mac_addr_valid"); 1161 DEBUGFUNC("mac_addr_valid");
@@ -1193,8 +1193,8 @@ static bool
1193ixgb_link_reset(struct ixgb_hw *hw) 1193ixgb_link_reset(struct ixgb_hw *hw)
1194{ 1194{
1195 bool link_status = false; 1195 bool link_status = false;
1196 uint8_t wait_retries = MAX_RESET_ITERATIONS; 1196 u8 wait_retries = MAX_RESET_ITERATIONS;
1197 uint8_t lrst_retries = MAX_RESET_ITERATIONS; 1197 u8 lrst_retries = MAX_RESET_ITERATIONS;
1198 1198
1199 do { 1199 do {
1200 /* Reset the link */ 1200 /* Reset the link */
@@ -1224,7 +1224,7 @@ static void
1224ixgb_optics_reset(struct ixgb_hw *hw) 1224ixgb_optics_reset(struct ixgb_hw *hw)
1225{ 1225{
1226 if (hw->phy_type == ixgb_phy_type_txn17401) { 1226 if (hw->phy_type == ixgb_phy_type_txn17401) {
1227 uint16_t mdio_reg; 1227 u16 mdio_reg;
1228 1228
1229 ixgb_write_phy_reg(hw, 1229 ixgb_write_phy_reg(hw,
1230 MDIO_PMA_PMD_CR1, 1230 MDIO_PMA_PMD_CR1,
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index d4e95665ce9e..39cfa47bea69 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -538,8 +538,8 @@ struct ixgb_rx_desc {
538 __le64 buff_addr; 538 __le64 buff_addr;
539 __le16 length; 539 __le16 length;
540 __le16 reserved; 540 __le16 reserved;
541 uint8_t status; 541 u8 status;
542 uint8_t errors; 542 u8 errors;
543 __le16 special; 543 __le16 special;
544}; 544};
545 545
@@ -570,8 +570,8 @@ struct ixgb_rx_desc {
570struct ixgb_tx_desc { 570struct ixgb_tx_desc {
571 __le64 buff_addr; 571 __le64 buff_addr;
572 __le32 cmd_type_len; 572 __le32 cmd_type_len;
573 uint8_t status; 573 u8 status;
574 uint8_t popts; 574 u8 popts;
575 __le16 vlan; 575 __le16 vlan;
576}; 576};
577 577
@@ -595,15 +595,15 @@ struct ixgb_tx_desc {
595#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ 595#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
596 596
597struct ixgb_context_desc { 597struct ixgb_context_desc {
598 uint8_t ipcss; 598 u8 ipcss;
599 uint8_t ipcso; 599 u8 ipcso;
600 __le16 ipcse; 600 __le16 ipcse;
601 uint8_t tucss; 601 u8 tucss;
602 uint8_t tucso; 602 u8 tucso;
603 __le16 tucse; 603 __le16 tucse;
604 __le32 cmd_type_len; 604 __le32 cmd_type_len;
605 uint8_t status; 605 u8 status;
606 uint8_t hdr_len; 606 u8 hdr_len;
607 __le16 mss; 607 __le16 mss;
608}; 608};
609 609
@@ -637,32 +637,32 @@ struct ixgb_context_desc {
637 637
638/* This structure takes a 64k flash and maps it for identification commands */ 638/* This structure takes a 64k flash and maps it for identification commands */
639struct ixgb_flash_buffer { 639struct ixgb_flash_buffer {
640 uint8_t manufacturer_id; 640 u8 manufacturer_id;
641 uint8_t device_id; 641 u8 device_id;
642 uint8_t filler1[0x2AA8]; 642 u8 filler1[0x2AA8];
643 uint8_t cmd2; 643 u8 cmd2;
644 uint8_t filler2[0x2AAA]; 644 u8 filler2[0x2AAA];
645 uint8_t cmd1; 645 u8 cmd1;
646 uint8_t filler3[0xAAAA]; 646 u8 filler3[0xAAAA];
647}; 647};
648 648
649/* 649/*
650 * This is a little-endian specific check. 650 * This is a little-endian specific check.
651 */ 651 */
652#define IS_MULTICAST(Address) \ 652#define IS_MULTICAST(Address) \
653 (bool)(((uint8_t *)(Address))[0] & ((uint8_t)0x01)) 653 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
654 654
655/* 655/*
656 * Check whether an address is broadcast. 656 * Check whether an address is broadcast.
657 */ 657 */
658#define IS_BROADCAST(Address) \ 658#define IS_BROADCAST(Address) \
659 ((((uint8_t *)(Address))[0] == ((uint8_t)0xff)) && (((uint8_t *)(Address))[1] == ((uint8_t)0xff))) 659 ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
660 660
661/* Flow control parameters */ 661/* Flow control parameters */
662struct ixgb_fc { 662struct ixgb_fc {
663 uint32_t high_water; /* Flow Control High-water */ 663 u32 high_water; /* Flow Control High-water */
664 uint32_t low_water; /* Flow Control Low-water */ 664 u32 low_water; /* Flow Control Low-water */
665 uint16_t pause_time; /* Flow Control Pause timer */ 665 u16 pause_time; /* Flow Control Pause timer */
666 bool send_xon; /* Flow control send XON */ 666 bool send_xon; /* Flow control send XON */
667 ixgb_fc_type type; /* Type of flow control */ 667 ixgb_fc_type type; /* Type of flow control */
668}; 668};
@@ -685,101 +685,101 @@ struct ixgb_bus {
685}; 685};
686 686
687struct ixgb_hw { 687struct ixgb_hw {
688 uint8_t __iomem *hw_addr;/* Base Address of the hardware */ 688 u8 __iomem *hw_addr;/* Base Address of the hardware */
689 void *back; /* Pointer to OS-dependent struct */ 689 void *back; /* Pointer to OS-dependent struct */
690 struct ixgb_fc fc; /* Flow control parameters */ 690 struct ixgb_fc fc; /* Flow control parameters */
691 struct ixgb_bus bus; /* Bus parameters */ 691 struct ixgb_bus bus; /* Bus parameters */
692 uint32_t phy_id; /* Phy Identifier */ 692 u32 phy_id; /* Phy Identifier */
693 uint32_t phy_addr; /* XGMII address of Phy */ 693 u32 phy_addr; /* XGMII address of Phy */
694 ixgb_mac_type mac_type; /* Identifier for MAC controller */ 694 ixgb_mac_type mac_type; /* Identifier for MAC controller */
695 ixgb_phy_type phy_type; /* Transceiver/phy identifier */ 695 ixgb_phy_type phy_type; /* Transceiver/phy identifier */
696 uint32_t max_frame_size; /* Maximum frame size supported */ 696 u32 max_frame_size; /* Maximum frame size supported */
697 uint32_t mc_filter_type; /* Multicast filter hash type */ 697 u32 mc_filter_type; /* Multicast filter hash type */
698 uint32_t num_mc_addrs; /* Number of current Multicast addrs */ 698 u32 num_mc_addrs; /* Number of current Multicast addrs */
699 uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */ 699 u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
700 uint32_t num_tx_desc; /* Number of Transmit descriptors */ 700 u32 num_tx_desc; /* Number of Transmit descriptors */
701 uint32_t num_rx_desc; /* Number of Receive descriptors */ 701 u32 num_rx_desc; /* Number of Receive descriptors */
702 uint32_t rx_buffer_size; /* Size of Receive buffer */ 702 u32 rx_buffer_size; /* Size of Receive buffer */
703 bool link_up; /* true if link is valid */ 703 bool link_up; /* true if link is valid */
704 bool adapter_stopped; /* State of adapter */ 704 bool adapter_stopped; /* State of adapter */
705 uint16_t device_id; /* device id from PCI configuration space */ 705 u16 device_id; /* device id from PCI configuration space */
706 uint16_t vendor_id; /* vendor id from PCI configuration space */ 706 u16 vendor_id; /* vendor id from PCI configuration space */
707 uint8_t revision_id; /* revision id from PCI configuration space */ 707 u8 revision_id; /* revision id from PCI configuration space */
708 uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ 708 u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
709 uint16_t subsystem_id; /* subsystem id from PCI configuration space */ 709 u16 subsystem_id; /* subsystem id from PCI configuration space */
710 uint32_t bar0; /* Base Address registers */ 710 u32 bar0; /* Base Address registers */
711 uint32_t bar1; 711 u32 bar1;
712 uint32_t bar2; 712 u32 bar2;
713 uint32_t bar3; 713 u32 bar3;
714 uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */ 714 u16 pci_cmd_word; /* PCI command register id from PCI configuration space */
715 __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ 715 __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
716 unsigned long io_base; /* Our I/O mapped location */ 716 unsigned long io_base; /* Our I/O mapped location */
717 uint32_t lastLFC; 717 u32 lastLFC;
718 uint32_t lastRFC; 718 u32 lastRFC;
719}; 719};
720 720
721/* Statistics reported by the hardware */ 721/* Statistics reported by the hardware */
722struct ixgb_hw_stats { 722struct ixgb_hw_stats {
723 uint64_t tprl; 723 u64 tprl;
724 uint64_t tprh; 724 u64 tprh;
725 uint64_t gprcl; 725 u64 gprcl;
726 uint64_t gprch; 726 u64 gprch;
727 uint64_t bprcl; 727 u64 bprcl;
728 uint64_t bprch; 728 u64 bprch;
729 uint64_t mprcl; 729 u64 mprcl;
730 uint64_t mprch; 730 u64 mprch;
731 uint64_t uprcl; 731 u64 uprcl;
732 uint64_t uprch; 732 u64 uprch;
733 uint64_t vprcl; 733 u64 vprcl;
734 uint64_t vprch; 734 u64 vprch;
735 uint64_t jprcl; 735 u64 jprcl;
736 uint64_t jprch; 736 u64 jprch;
737 uint64_t gorcl; 737 u64 gorcl;
738 uint64_t gorch; 738 u64 gorch;
739 uint64_t torl; 739 u64 torl;
740 uint64_t torh; 740 u64 torh;
741 uint64_t rnbc; 741 u64 rnbc;
742 uint64_t ruc; 742 u64 ruc;
743 uint64_t roc; 743 u64 roc;
744 uint64_t rlec; 744 u64 rlec;
745 uint64_t crcerrs; 745 u64 crcerrs;
746 uint64_t icbc; 746 u64 icbc;
747 uint64_t ecbc; 747 u64 ecbc;
748 uint64_t mpc; 748 u64 mpc;
749 uint64_t tptl; 749 u64 tptl;
750 uint64_t tpth; 750 u64 tpth;
751 uint64_t gptcl; 751 u64 gptcl;
752 uint64_t gptch; 752 u64 gptch;
753 uint64_t bptcl; 753 u64 bptcl;
754 uint64_t bptch; 754 u64 bptch;
755 uint64_t mptcl; 755 u64 mptcl;
756 uint64_t mptch; 756 u64 mptch;
757 uint64_t uptcl; 757 u64 uptcl;
758 uint64_t uptch; 758 u64 uptch;
759 uint64_t vptcl; 759 u64 vptcl;
760 uint64_t vptch; 760 u64 vptch;
761 uint64_t jptcl; 761 u64 jptcl;
762 uint64_t jptch; 762 u64 jptch;
763 uint64_t gotcl; 763 u64 gotcl;
764 uint64_t gotch; 764 u64 gotch;
765 uint64_t totl; 765 u64 totl;
766 uint64_t toth; 766 u64 toth;
767 uint64_t dc; 767 u64 dc;
768 uint64_t plt64c; 768 u64 plt64c;
769 uint64_t tsctc; 769 u64 tsctc;
770 uint64_t tsctfc; 770 u64 tsctfc;
771 uint64_t ibic; 771 u64 ibic;
772 uint64_t rfc; 772 u64 rfc;
773 uint64_t lfc; 773 u64 lfc;
774 uint64_t pfrc; 774 u64 pfrc;
775 uint64_t pftc; 775 u64 pftc;
776 uint64_t mcfrc; 776 u64 mcfrc;
777 uint64_t mcftc; 777 u64 mcftc;
778 uint64_t xonrxc; 778 u64 xonrxc;
779 uint64_t xontxc; 779 u64 xontxc;
780 uint64_t xoffrxc; 780 u64 xoffrxc;
781 uint64_t xofftxc; 781 u64 xofftxc;
782 uint64_t rjc; 782 u64 rjc;
783}; 783};
784 784
785/* Function Prototypes */ 785/* Function Prototypes */
@@ -790,34 +790,34 @@ extern void ixgb_check_for_link(struct ixgb_hw *hw);
790extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); 790extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
791 791
792extern void ixgb_rar_set(struct ixgb_hw *hw, 792extern void ixgb_rar_set(struct ixgb_hw *hw,
793 uint8_t *addr, 793 u8 *addr,
794 uint32_t index); 794 u32 index);
795 795
796 796
797/* Filters (multicast, vlan, receive) */ 797/* Filters (multicast, vlan, receive) */
798extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, 798extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
799 uint8_t *mc_addr_list, 799 u8 *mc_addr_list,
800 uint32_t mc_addr_count, 800 u32 mc_addr_count,
801 uint32_t pad); 801 u32 pad);
802 802
803/* Vfta functions */ 803/* Vfta functions */
804extern void ixgb_write_vfta(struct ixgb_hw *hw, 804extern void ixgb_write_vfta(struct ixgb_hw *hw,
805 uint32_t offset, 805 u32 offset,
806 uint32_t value); 806 u32 value);
807 807
808/* Access functions to eeprom data */ 808/* Access functions to eeprom data */
809void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); 809void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
810uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); 810u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
811uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); 811u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
812bool ixgb_get_eeprom_data(struct ixgb_hw *hw); 812bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
813__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); 813__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
814 814
815/* Everything else */ 815/* Everything else */
816void ixgb_led_on(struct ixgb_hw *hw); 816void ixgb_led_on(struct ixgb_hw *hw);
817void ixgb_led_off(struct ixgb_hw *hw); 817void ixgb_led_off(struct ixgb_hw *hw);
818void ixgb_write_pci_cfg(struct ixgb_hw *hw, 818void ixgb_write_pci_cfg(struct ixgb_hw *hw,
819 uint32_t reg, 819 u32 reg,
820 uint16_t * value); 820 u16 * value);
821 821
822 822
823#endif /* _IXGB_HW_H_ */ 823#endif /* _IXGB_HW_H_ */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c68b182af008..cb8daddafa29 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -108,8 +108,8 @@ static void ixgb_tx_timeout(struct net_device *dev);
108static void ixgb_tx_timeout_task(struct work_struct *work); 108static void ixgb_tx_timeout_task(struct work_struct *work);
109static void ixgb_vlan_rx_register(struct net_device *netdev, 109static void ixgb_vlan_rx_register(struct net_device *netdev,
110 struct vlan_group *grp); 110 struct vlan_group *grp);
111static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 111static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
112static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 112static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
113static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 113static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
114 114
115#ifdef CONFIG_NET_POLL_CONTROLLER 115#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -271,7 +271,7 @@ ixgb_up(struct ixgb_adapter *adapter)
271 271
272 if(hw->max_frame_size > 272 if(hw->max_frame_size >
273 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { 273 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
274 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0); 274 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
275 275
276 if(!(ctrl0 & IXGB_CTRL0_JFE)) { 276 if(!(ctrl0 & IXGB_CTRL0_JFE)) {
277 ctrl0 |= IXGB_CTRL0_JFE; 277 ctrl0 |= IXGB_CTRL0_JFE;
@@ -718,9 +718,9 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
718static void 718static void
719ixgb_configure_tx(struct ixgb_adapter *adapter) 719ixgb_configure_tx(struct ixgb_adapter *adapter)
720{ 720{
721 uint64_t tdba = adapter->tx_ring.dma; 721 u64 tdba = adapter->tx_ring.dma;
722 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); 722 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
723 uint32_t tctl; 723 u32 tctl;
724 struct ixgb_hw *hw = &adapter->hw; 724 struct ixgb_hw *hw = &adapter->hw;
725 725
726 /* Setup the Base and Length of the Tx Descriptor Ring 726 /* Setup the Base and Length of the Tx Descriptor Ring
@@ -806,7 +806,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
806static void 806static void
807ixgb_setup_rctl(struct ixgb_adapter *adapter) 807ixgb_setup_rctl(struct ixgb_adapter *adapter)
808{ 808{
809 uint32_t rctl; 809 u32 rctl;
810 810
811 rctl = IXGB_READ_REG(&adapter->hw, RCTL); 811 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
812 812
@@ -841,12 +841,12 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
841static void 841static void
842ixgb_configure_rx(struct ixgb_adapter *adapter) 842ixgb_configure_rx(struct ixgb_adapter *adapter)
843{ 843{
844 uint64_t rdba = adapter->rx_ring.dma; 844 u64 rdba = adapter->rx_ring.dma;
845 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); 845 u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
846 struct ixgb_hw *hw = &adapter->hw; 846 struct ixgb_hw *hw = &adapter->hw;
847 uint32_t rctl; 847 u32 rctl;
848 uint32_t rxcsum; 848 u32 rxcsum;
849 uint32_t rxdctl; 849 u32 rxdctl;
850 850
851 /* make sure receives are disabled while setting up the descriptors */ 851 /* make sure receives are disabled while setting up the descriptors */
852 852
@@ -1079,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
1079 struct ixgb_adapter *adapter = netdev_priv(netdev); 1079 struct ixgb_adapter *adapter = netdev_priv(netdev);
1080 struct ixgb_hw *hw = &adapter->hw; 1080 struct ixgb_hw *hw = &adapter->hw;
1081 struct dev_mc_list *mc_ptr; 1081 struct dev_mc_list *mc_ptr;
1082 uint32_t rctl; 1082 u32 rctl;
1083 int i; 1083 int i;
1084 1084
1085 /* Check for Promiscuous and All Multicast modes */ 1085 /* Check for Promiscuous and All Multicast modes */
@@ -1099,7 +1099,7 @@ ixgb_set_multi(struct net_device *netdev)
1099 rctl |= IXGB_RCTL_MPE; 1099 rctl |= IXGB_RCTL_MPE;
1100 IXGB_WRITE_REG(hw, RCTL, rctl); 1100 IXGB_WRITE_REG(hw, RCTL, rctl);
1101 } else { 1101 } else {
1102 uint8_t mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES * 1102 u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1103 IXGB_ETH_LENGTH_OF_ADDRESS]; 1103 IXGB_ETH_LENGTH_OF_ADDRESS];
1104 1104
1105 IXGB_WRITE_REG(hw, RCTL, rctl); 1105 IXGB_WRITE_REG(hw, RCTL, rctl);
@@ -1183,8 +1183,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1183{ 1183{
1184 struct ixgb_context_desc *context_desc; 1184 struct ixgb_context_desc *context_desc;
1185 unsigned int i; 1185 unsigned int i;
1186 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1186 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1187 uint16_t ipcse, tucse, mss; 1187 u16 ipcse, tucse, mss;
1188 int err; 1188 int err;
1189 1189
1190 if (likely(skb_is_gso(skb))) { 1190 if (likely(skb_is_gso(skb))) {
@@ -1249,7 +1249,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1249{ 1249{
1250 struct ixgb_context_desc *context_desc; 1250 struct ixgb_context_desc *context_desc;
1251 unsigned int i; 1251 unsigned int i;
1252 uint8_t css, cso; 1252 u8 css, cso;
1253 1253
1254 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1254 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1255 struct ixgb_buffer *buffer_info; 1255 struct ixgb_buffer *buffer_info;
@@ -1265,7 +1265,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1265 context_desc->tucso = cso; 1265 context_desc->tucso = cso;
1266 context_desc->tucse = 0; 1266 context_desc->tucse = 0;
1267 /* zero out any previously existing data in one instruction */ 1267 /* zero out any previously existing data in one instruction */
1268 *(uint32_t *)&(context_desc->ipcss) = 0; 1268 *(u32 *)&(context_desc->ipcss) = 0;
1269 context_desc->status = 0; 1269 context_desc->status = 0;
1270 context_desc->hdr_len = 0; 1270 context_desc->hdr_len = 0;
1271 context_desc->mss = 0; 1271 context_desc->mss = 0;
@@ -1372,9 +1372,9 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1372 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1372 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1373 struct ixgb_tx_desc *tx_desc = NULL; 1373 struct ixgb_tx_desc *tx_desc = NULL;
1374 struct ixgb_buffer *buffer_info; 1374 struct ixgb_buffer *buffer_info;
1375 uint32_t cmd_type_len = adapter->tx_cmd_type; 1375 u32 cmd_type_len = adapter->tx_cmd_type;
1376 uint8_t status = 0; 1376 u8 status = 0;
1377 uint8_t popts = 0; 1377 u8 popts = 0;
1378 unsigned int i; 1378 unsigned int i;
1379 1379
1380 if(tx_flags & IXGB_TX_FLAGS_TSO) { 1380 if(tx_flags & IXGB_TX_FLAGS_TSO) {
@@ -1750,7 +1750,7 @@ ixgb_intr(int irq, void *data)
1750 struct net_device *netdev = data; 1750 struct net_device *netdev = data;
1751 struct ixgb_adapter *adapter = netdev_priv(netdev); 1751 struct ixgb_adapter *adapter = netdev_priv(netdev);
1752 struct ixgb_hw *hw = &adapter->hw; 1752 struct ixgb_hw *hw = &adapter->hw;
1753 uint32_t icr = IXGB_READ_REG(hw, ICR); 1753 u32 icr = IXGB_READ_REG(hw, ICR);
1754#ifndef CONFIG_IXGB_NAPI 1754#ifndef CONFIG_IXGB_NAPI
1755 unsigned int i; 1755 unsigned int i;
1756#endif 1756#endif
@@ -1843,7 +1843,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1843 1843
1844 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1844 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1845 1845
1846 *(uint32_t *)&(tx_desc->status) = 0; 1846 *(u32 *)&(tx_desc->status) = 0;
1847 1847
1848 cleaned = (i == eop); 1848 cleaned = (i == eop);
1849 if(++i == tx_ring->count) i = 0; 1849 if(++i == tx_ring->count) i = 0;
@@ -1948,7 +1948,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1948 struct pci_dev *pdev = adapter->pdev; 1948 struct pci_dev *pdev = adapter->pdev;
1949 struct ixgb_rx_desc *rx_desc, *next_rxd; 1949 struct ixgb_rx_desc *rx_desc, *next_rxd;
1950 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1950 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1951 uint32_t length; 1951 u32 length;
1952 unsigned int i, j; 1952 unsigned int i, j;
1953 bool cleaned = false; 1953 bool cleaned = false;
1954 1954
@@ -2166,7 +2166,7 @@ static void
2166ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2166ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2167{ 2167{
2168 struct ixgb_adapter *adapter = netdev_priv(netdev); 2168 struct ixgb_adapter *adapter = netdev_priv(netdev);
2169 uint32_t ctrl, rctl; 2169 u32 ctrl, rctl;
2170 2170
2171 ixgb_irq_disable(adapter); 2171 ixgb_irq_disable(adapter);
2172 adapter->vlgrp = grp; 2172 adapter->vlgrp = grp;
@@ -2203,10 +2203,10 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2203} 2203}
2204 2204
2205static void 2205static void
2206ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 2206ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2207{ 2207{
2208 struct ixgb_adapter *adapter = netdev_priv(netdev); 2208 struct ixgb_adapter *adapter = netdev_priv(netdev);
2209 uint32_t vfta, index; 2209 u32 vfta, index;
2210 2210
2211 /* add VID to filter table */ 2211 /* add VID to filter table */
2212 2212
@@ -2217,10 +2217,10 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2217} 2217}
2218 2218
2219static void 2219static void
2220ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 2220ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2221{ 2221{
2222 struct ixgb_adapter *adapter = netdev_priv(netdev); 2222 struct ixgb_adapter *adapter = netdev_priv(netdev);
2223 uint32_t vfta, index; 2223 u32 vfta, index;
2224 2224
2225 ixgb_irq_disable(adapter); 2225 ixgb_irq_disable(adapter);
2226 2226
@@ -2244,7 +2244,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2244 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2244 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2245 2245
2246 if(adapter->vlgrp) { 2246 if(adapter->vlgrp) {
2247 uint16_t vid; 2247 u16 vid;
2248 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2248 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2249 if(!vlan_group_get_device(adapter->vlgrp, vid)) 2249 if(!vlan_group_get_device(adapter->vlgrp, vid))
2250 continue; 2250 continue;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 489c7c3b90d9..d513bb8a4902 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -246,7 +246,7 @@ static int macb_mii_init(struct macb *bp)
246 bp->mii_bus.read = &macb_mdio_read; 246 bp->mii_bus.read = &macb_mdio_read;
247 bp->mii_bus.write = &macb_mdio_write; 247 bp->mii_bus.write = &macb_mdio_write;
248 bp->mii_bus.reset = &macb_mdio_reset; 248 bp->mii_bus.reset = &macb_mdio_reset;
249 bp->mii_bus.id = bp->pdev->id; 249 snprintf(bp->mii_bus.id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
250 bp->mii_bus.priv = bp; 250 bp->mii_bus.priv = bp;
251 bp->mii_bus.dev = &bp->dev->dev; 251 bp->mii_bus.dev = &bp->dev->dev;
252 pdata = bp->pdev->dev.platform_data; 252 pdata = bp->pdev->dev.platform_data;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index d65cadef4d22..601ffd69ebc8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -3,7 +3,8 @@
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 * 4 *
5 * Based on the 64360 driver from: 5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
7 * 8 *
8 * Copyright (C) 2003 PMC-Sierra, Inc., 9 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani 10 * written by Manish Lachwani
@@ -16,6 +17,9 @@
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
17 * <sjhill@realitydiluted.com> 18 * <sjhill@realitydiluted.com>
18 * 19 *
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
22 *
19 * This program is free software; you can redistribute it and/or 23 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License 24 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 2 25 * as published by the Free Software Foundation; either version 2
@@ -63,20 +67,6 @@
63#define MV643XX_TX_FAST_REFILL 67#define MV643XX_TX_FAST_REFILL
64#undef MV643XX_COAL 68#undef MV643XX_COAL
65 69
66/*
67 * Number of RX / TX descriptors on RX / TX rings.
68 * Note that allocating RX descriptors is done by allocating the RX
69 * ring AND a preallocated RX buffers (skb's) for each descriptor.
70 * The TX descriptors only allocates the TX descriptors ring,
71 * with no pre allocated TX buffers (skb's are allocated by higher layers.
72 */
73
74/* Default TX ring size is 1000 descriptors */
75#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
76
77/* Default RX ring size is 400 descriptors */
78#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
79
80#define MV643XX_TX_COAL 100 70#define MV643XX_TX_COAL 100
81#ifdef MV643XX_COAL 71#ifdef MV643XX_COAL
82#define MV643XX_RX_COAL 100 72#define MV643XX_RX_COAL 100
@@ -434,14 +424,6 @@ typedef enum _eth_func_ret_status {
434 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ 424 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
435} ETH_FUNC_RET_STATUS; 425} ETH_FUNC_RET_STATUS;
436 426
437typedef enum _eth_target {
438 ETH_TARGET_DRAM,
439 ETH_TARGET_DEVICE,
440 ETH_TARGET_CBS,
441 ETH_TARGET_PCI0,
442 ETH_TARGET_PCI1
443} ETH_TARGET;
444
445/* These are for big-endian machines. Little endian needs different 427/* These are for big-endian machines. Little endian needs different
446 * definitions. 428 * definitions.
447 */ 429 */
@@ -586,43 +568,44 @@ struct mv643xx_private {
586 568
587/* Static function declarations */ 569/* Static function declarations */
588static void eth_port_init(struct mv643xx_private *mp); 570static void eth_port_init(struct mv643xx_private *mp);
589static void eth_port_reset(unsigned int eth_port_num); 571static void eth_port_reset(struct mv643xx_private *mp);
590static void eth_port_start(struct net_device *dev); 572static void eth_port_start(struct net_device *dev);
591 573
592static void ethernet_phy_reset(unsigned int eth_port_num); 574static void ethernet_phy_reset(struct mv643xx_private *mp);
593 575
594static void eth_port_write_smi_reg(unsigned int eth_port_num, 576static void eth_port_write_smi_reg(struct mv643xx_private *mp,
595 unsigned int phy_reg, unsigned int value); 577 unsigned int phy_reg, unsigned int value);
596 578
597static void eth_port_read_smi_reg(unsigned int eth_port_num, 579static void eth_port_read_smi_reg(struct mv643xx_private *mp,
598 unsigned int phy_reg, unsigned int *value); 580 unsigned int phy_reg, unsigned int *value);
599 581
600static void eth_clear_mib_counters(unsigned int eth_port_num); 582static void eth_clear_mib_counters(struct mv643xx_private *mp);
601 583
602static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, 584static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
603 struct pkt_info *p_pkt_info); 585 struct pkt_info *p_pkt_info);
604static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, 586static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
605 struct pkt_info *p_pkt_info); 587 struct pkt_info *p_pkt_info);
606 588
607static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); 589static void eth_port_uc_addr_get(struct mv643xx_private *mp,
608static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); 590 unsigned char *p_addr);
591static void eth_port_uc_addr_set(struct mv643xx_private *mp,
592 unsigned char *p_addr);
609static void eth_port_set_multicast_list(struct net_device *); 593static void eth_port_set_multicast_list(struct net_device *);
610static void mv643xx_eth_port_enable_tx(unsigned int port_num, 594static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
611 unsigned int queues); 595 unsigned int queues);
612static void mv643xx_eth_port_enable_rx(unsigned int port_num, 596static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
613 unsigned int queues); 597 unsigned int queues);
614static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num); 598static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp);
615static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); 599static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp);
616static int mv643xx_eth_open(struct net_device *); 600static int mv643xx_eth_open(struct net_device *);
617static int mv643xx_eth_stop(struct net_device *); 601static int mv643xx_eth_stop(struct net_device *);
618static int mv643xx_eth_change_mtu(struct net_device *, int); 602static void eth_port_init_mac_tables(struct mv643xx_private *mp);
619static void eth_port_init_mac_tables(unsigned int eth_port_num);
620#ifdef MV643XX_NAPI 603#ifdef MV643XX_NAPI
621static int mv643xx_poll(struct napi_struct *napi, int budget); 604static int mv643xx_poll(struct napi_struct *napi, int budget);
622#endif 605#endif
623static int ethernet_phy_get(unsigned int eth_port_num); 606static int ethernet_phy_get(struct mv643xx_private *mp);
624static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); 607static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr);
625static int ethernet_phy_detect(unsigned int eth_port_num); 608static int ethernet_phy_detect(struct mv643xx_private *mp);
626static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); 609static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location);
627static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); 610static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val);
628static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 611static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -636,12 +619,12 @@ static void __iomem *mv643xx_eth_base;
636/* used to protect SMI_REG, which is shared across ports */ 619/* used to protect SMI_REG, which is shared across ports */
637static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); 620static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
638 621
639static inline u32 mv_read(int offset) 622static inline u32 rdl(struct mv643xx_private *mp, int offset)
640{ 623{
641 return readl(mv643xx_eth_base + offset); 624 return readl(mv643xx_eth_base + offset);
642} 625}
643 626
644static inline void mv_write(int offset, u32 data) 627static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
645{ 628{
646 writel(data, mv643xx_eth_base + offset); 629 writel(data, mv643xx_eth_base + offset);
647} 630}
@@ -659,18 +642,19 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
659 return -EINVAL; 642 return -EINVAL;
660 643
661 dev->mtu = new_mtu; 644 dev->mtu = new_mtu;
645 if (!netif_running(dev))
646 return 0;
647
662 /* 648 /*
663 * Stop then re-open the interface. This will allocate RX skb's with 649 * Stop and then re-open the interface. This will allocate RX
664 * the new MTU. 650 * skbs of the new MTU.
665 * There is a possible danger that the open will not successed, due 651 * There is a possible danger that the open will not succeed,
666 * to memory is full, which might fail the open function. 652 * due to memory being full, which might fail the open function.
667 */ 653 */
668 if (netif_running(dev)) { 654 mv643xx_eth_stop(dev);
669 mv643xx_eth_stop(dev); 655 if (mv643xx_eth_open(dev)) {
670 if (mv643xx_eth_open(dev)) 656 printk(KERN_ERR "%s: Fatal error on opening device\n",
671 printk(KERN_ERR 657 dev->name);
672 "%s: Fatal error on opening device\n",
673 dev->name);
674 } 658 }
675 659
676 return 0; 660 return 0;
@@ -748,10 +732,9 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
748static void mv643xx_eth_update_mac_address(struct net_device *dev) 732static void mv643xx_eth_update_mac_address(struct net_device *dev)
749{ 733{
750 struct mv643xx_private *mp = netdev_priv(dev); 734 struct mv643xx_private *mp = netdev_priv(dev);
751 unsigned int port_num = mp->port_num;
752 735
753 eth_port_init_mac_tables(port_num); 736 eth_port_init_mac_tables(mp);
754 eth_port_uc_addr_set(port_num, dev->dev_addr); 737 eth_port_uc_addr_set(mp, dev->dev_addr);
755} 738}
756 739
757/* 740/*
@@ -767,12 +750,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
767 struct mv643xx_private *mp = netdev_priv(dev); 750 struct mv643xx_private *mp = netdev_priv(dev);
768 u32 config_reg; 751 u32 config_reg;
769 752
770 config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); 753 config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num));
771 if (dev->flags & IFF_PROMISC) 754 if (dev->flags & IFF_PROMISC)
772 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; 755 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
773 else 756 else
774 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; 757 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
775 mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); 758 wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg);
776 759
777 eth_port_set_multicast_list(dev); 760 eth_port_set_multicast_list(dev);
778} 761}
@@ -826,14 +809,14 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
826{ 809{
827 struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, 810 struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
828 tx_timeout_task); 811 tx_timeout_task);
829 struct net_device *dev = mp->mii.dev; /* yuck */ 812 struct net_device *dev = mp->dev;
830 813
831 if (!netif_running(dev)) 814 if (!netif_running(dev))
832 return; 815 return;
833 816
834 netif_stop_queue(dev); 817 netif_stop_queue(dev);
835 818
836 eth_port_reset(mp->port_num); 819 eth_port_reset(mp);
837 eth_port_start(dev); 820 eth_port_start(dev);
838 821
839 if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) 822 if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
@@ -845,7 +828,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
845 * 828 *
846 * If force is non-zero, frees uncompleted descriptors as well 829 * If force is non-zero, frees uncompleted descriptors as well
847 */ 830 */
848int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) 831static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
849{ 832{
850 struct mv643xx_private *mp = netdev_priv(dev); 833 struct mv643xx_private *mp = netdev_priv(dev);
851 struct eth_tx_desc *desc; 834 struct eth_tx_desc *desc;
@@ -1008,7 +991,7 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
1008 u32 o_pscr, n_pscr; 991 u32 o_pscr, n_pscr;
1009 unsigned int queues; 992 unsigned int queues;
1010 993
1011 o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 994 o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
1012 n_pscr = o_pscr; 995 n_pscr = o_pscr;
1013 996
1014 /* clear speed, duplex and rx buffer size fields */ 997 /* clear speed, duplex and rx buffer size fields */
@@ -1031,16 +1014,16 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
1031 1014
1032 if (n_pscr != o_pscr) { 1015 if (n_pscr != o_pscr) {
1033 if ((o_pscr & SERIAL_PORT_ENABLE) == 0) 1016 if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
1034 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1017 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1035 else { 1018 else {
1036 queues = mv643xx_eth_port_disable_tx(port_num); 1019 queues = mv643xx_eth_port_disable_tx(mp);
1037 1020
1038 o_pscr &= ~SERIAL_PORT_ENABLE; 1021 o_pscr &= ~SERIAL_PORT_ENABLE;
1039 mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); 1022 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
1040 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1023 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1041 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1024 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1042 if (queues) 1025 if (queues)
1043 mv643xx_eth_port_enable_tx(port_num, queues); 1026 mv643xx_eth_port_enable_tx(mp, queues);
1044 } 1027 }
1045 } 1028 }
1046} 1029}
@@ -1064,13 +1047,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1064 unsigned int port_num = mp->port_num; 1047 unsigned int port_num = mp->port_num;
1065 1048
1066 /* Read interrupt cause registers */ 1049 /* Read interrupt cause registers */
1067 eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & 1050 eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) &
1068 ETH_INT_UNMASK_ALL; 1051 ETH_INT_UNMASK_ALL;
1069 if (eth_int_cause & ETH_INT_CAUSE_EXT) { 1052 if (eth_int_cause & ETH_INT_CAUSE_EXT) {
1070 eth_int_cause_ext = mv_read( 1053 eth_int_cause_ext = rdl(mp,
1071 INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 1054 INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
1072 ETH_INT_UNMASK_ALL_EXT; 1055 ETH_INT_UNMASK_ALL_EXT;
1073 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 1056 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num),
1074 ~eth_int_cause_ext); 1057 ~eth_int_cause_ext);
1075 } 1058 }
1076 1059
@@ -1081,8 +1064,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1081 if (mii_link_ok(&mp->mii)) { 1064 if (mii_link_ok(&mp->mii)) {
1082 mii_ethtool_gset(&mp->mii, &cmd); 1065 mii_ethtool_gset(&mp->mii, &cmd);
1083 mv643xx_eth_update_pscr(dev, &cmd); 1066 mv643xx_eth_update_pscr(dev, &cmd);
1084 mv643xx_eth_port_enable_tx(port_num, 1067 mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
1085 ETH_TX_QUEUES_ENABLED);
1086 if (!netif_carrier_ok(dev)) { 1068 if (!netif_carrier_ok(dev)) {
1087 netif_carrier_on(dev); 1069 netif_carrier_on(dev);
1088 if (mp->tx_ring_size - mp->tx_desc_count >= 1070 if (mp->tx_ring_size - mp->tx_desc_count >=
@@ -1098,10 +1080,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1098#ifdef MV643XX_NAPI 1080#ifdef MV643XX_NAPI
1099 if (eth_int_cause & ETH_INT_CAUSE_RX) { 1081 if (eth_int_cause & ETH_INT_CAUSE_RX) {
1100 /* schedule the NAPI poll routine to maintain port */ 1082 /* schedule the NAPI poll routine to maintain port */
1101 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1083 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1102 1084
1103 /* wait for previous write to complete */ 1085 /* wait for previous write to complete */
1104 mv_read(INTERRUPT_MASK_REG(port_num)); 1086 rdl(mp, INTERRUPT_MASK_REG(port_num));
1105 1087
1106 netif_rx_schedule(dev, &mp->napi); 1088 netif_rx_schedule(dev, &mp->napi);
1107 } 1089 }
@@ -1136,7 +1118,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1136 * , and the required delay of the interrupt in usec. 1118 * , and the required delay of the interrupt in usec.
1137 * 1119 *
1138 * INPUT: 1120 * INPUT:
1139 * unsigned int eth_port_num Ethernet port number 1121 * struct mv643xx_private *mp Ethernet port
1140 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units 1122 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1141 * unsigned int delay Delay in usec 1123 * unsigned int delay Delay in usec
1142 * 1124 *
@@ -1147,15 +1129,16 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1147 * The interrupt coalescing value set in the gigE port. 1129 * The interrupt coalescing value set in the gigE port.
1148 * 1130 *
1149 */ 1131 */
1150static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, 1132static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1151 unsigned int t_clk, unsigned int delay) 1133 unsigned int t_clk, unsigned int delay)
1152{ 1134{
1135 unsigned int port_num = mp->port_num;
1153 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1136 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
1154 1137
1155 /* Set RX Coalescing mechanism */ 1138 /* Set RX Coalescing mechanism */
1156 mv_write(SDMA_CONFIG_REG(eth_port_num), 1139 wrl(mp, SDMA_CONFIG_REG(port_num),
1157 ((coal & 0x3fff) << 8) | 1140 ((coal & 0x3fff) << 8) |
1158 (mv_read(SDMA_CONFIG_REG(eth_port_num)) 1141 (rdl(mp, SDMA_CONFIG_REG(port_num))
1159 & 0xffc000ff)); 1142 & 0xffc000ff));
1160 1143
1161 return coal; 1144 return coal;
@@ -1174,7 +1157,7 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
1174 * MV-643xx chip and the required delay in the interrupt in uSec 1157 * MV-643xx chip and the required delay in the interrupt in uSec
1175 * 1158 *
1176 * INPUT: 1159 * INPUT:
1177 * unsigned int eth_port_num Ethernet port number 1160 * struct mv643xx_private *mp Ethernet port
1178 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units 1161 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1179 * unsigned int delay Delay in uSeconds 1162 * unsigned int delay Delay in uSeconds
1180 * 1163 *
@@ -1185,13 +1168,14 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
1185 * The interrupt coalescing value set in the gigE port. 1168 * The interrupt coalescing value set in the gigE port.
1186 * 1169 *
1187 */ 1170 */
1188static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, 1171static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
1189 unsigned int t_clk, unsigned int delay) 1172 unsigned int t_clk, unsigned int delay)
1190{ 1173{
1191 unsigned int coal; 1174 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
1192 coal = ((t_clk / 1000000) * delay) / 64; 1175
1193 /* Set TX Coalescing mechanism */ 1176 /* Set TX Coalescing mechanism */
1194 mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4); 1177 wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
1178
1195 return coal; 1179 return coal;
1196} 1180}
1197 1181
@@ -1327,16 +1311,15 @@ static int mv643xx_eth_open(struct net_device *dev)
1327 int err; 1311 int err;
1328 1312
1329 /* Clear any pending ethernet port interrupts */ 1313 /* Clear any pending ethernet port interrupts */
1330 mv_write(INTERRUPT_CAUSE_REG(port_num), 0); 1314 wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
1331 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1315 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1332 /* wait for previous write to complete */ 1316 /* wait for previous write to complete */
1333 mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); 1317 rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num));
1334 1318
1335 err = request_irq(dev->irq, mv643xx_eth_int_handler, 1319 err = request_irq(dev->irq, mv643xx_eth_int_handler,
1336 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 1320 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
1337 if (err) { 1321 if (err) {
1338 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n", 1322 printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name);
1339 port_num);
1340 return -EAGAIN; 1323 return -EAGAIN;
1341 } 1324 }
1342 1325
@@ -1430,17 +1413,17 @@ static int mv643xx_eth_open(struct net_device *dev)
1430 1413
1431#ifdef MV643XX_COAL 1414#ifdef MV643XX_COAL
1432 mp->rx_int_coal = 1415 mp->rx_int_coal =
1433 eth_port_set_rx_coal(port_num, 133000000, MV643XX_RX_COAL); 1416 eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL);
1434#endif 1417#endif
1435 1418
1436 mp->tx_int_coal = 1419 mp->tx_int_coal =
1437 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 1420 eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL);
1438 1421
1439 /* Unmask phy and link status changes interrupts */ 1422 /* Unmask phy and link status changes interrupts */
1440 mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); 1423 wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
1441 1424
1442 /* Unmask RX buffer and TX end interrupt */ 1425 /* Unmask RX buffer and TX end interrupt */
1443 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1426 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1444 1427
1445 return 0; 1428 return 0;
1446 1429
@@ -1459,7 +1442,7 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
1459 struct mv643xx_private *mp = netdev_priv(dev); 1442 struct mv643xx_private *mp = netdev_priv(dev);
1460 1443
1461 /* Stop Tx Queues */ 1444 /* Stop Tx Queues */
1462 mv643xx_eth_port_disable_tx(mp->port_num); 1445 mv643xx_eth_port_disable_tx(mp);
1463 1446
1464 /* Free outstanding skb's on TX ring */ 1447 /* Free outstanding skb's on TX ring */
1465 mv643xx_eth_free_all_tx_descs(dev); 1448 mv643xx_eth_free_all_tx_descs(dev);
@@ -1477,11 +1460,10 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
1477static void mv643xx_eth_free_rx_rings(struct net_device *dev) 1460static void mv643xx_eth_free_rx_rings(struct net_device *dev)
1478{ 1461{
1479 struct mv643xx_private *mp = netdev_priv(dev); 1462 struct mv643xx_private *mp = netdev_priv(dev);
1480 unsigned int port_num = mp->port_num;
1481 int curr; 1463 int curr;
1482 1464
1483 /* Stop RX Queues */ 1465 /* Stop RX Queues */
1484 mv643xx_eth_port_disable_rx(port_num); 1466 mv643xx_eth_port_disable_rx(mp);
1485 1467
1486 /* Free preallocated skb's on RX rings */ 1468 /* Free preallocated skb's on RX rings */
1487 for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { 1469 for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
@@ -1520,9 +1502,9 @@ static int mv643xx_eth_stop(struct net_device *dev)
1520 unsigned int port_num = mp->port_num; 1502 unsigned int port_num = mp->port_num;
1521 1503
1522 /* Mask all interrupts on ethernet port */ 1504 /* Mask all interrupts on ethernet port */
1523 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1505 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1524 /* wait for previous write to complete */ 1506 /* wait for previous write to complete */
1525 mv_read(INTERRUPT_MASK_REG(port_num)); 1507 rdl(mp, INTERRUPT_MASK_REG(port_num));
1526 1508
1527#ifdef MV643XX_NAPI 1509#ifdef MV643XX_NAPI
1528 napi_disable(&mp->napi); 1510 napi_disable(&mp->napi);
@@ -1530,7 +1512,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
1530 netif_carrier_off(dev); 1512 netif_carrier_off(dev);
1531 netif_stop_queue(dev); 1513 netif_stop_queue(dev);
1532 1514
1533 eth_port_reset(mp->port_num); 1515 eth_port_reset(mp);
1534 1516
1535 mv643xx_eth_free_tx_rings(dev); 1517 mv643xx_eth_free_tx_rings(dev);
1536 mv643xx_eth_free_rx_rings(dev); 1518 mv643xx_eth_free_rx_rings(dev);
@@ -1561,15 +1543,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget)
1561#endif 1543#endif
1562 1544
1563 work_done = 0; 1545 work_done = 0;
1564 if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) 1546 if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
1565 != (u32) mp->rx_used_desc_q) 1547 != (u32) mp->rx_used_desc_q)
1566 work_done = mv643xx_eth_receive_queue(dev, budget); 1548 work_done = mv643xx_eth_receive_queue(dev, budget);
1567 1549
1568 if (work_done < budget) { 1550 if (work_done < budget) {
1569 netif_rx_complete(dev, napi); 1551 netif_rx_complete(dev, napi);
1570 mv_write(INTERRUPT_CAUSE_REG(port_num), 0); 1552 wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
1571 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1553 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1572 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1554 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1573 } 1555 }
1574 1556
1575 return work_done; 1557 return work_done;
@@ -1723,7 +1705,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1723 1705
1724 /* ensure all descriptors are written before poking hardware */ 1706 /* ensure all descriptors are written before poking hardware */
1725 wmb(); 1707 wmb();
1726 mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED); 1708 mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
1727 1709
1728 mp->tx_desc_count += nr_frags + 1; 1710 mp->tx_desc_count += nr_frags + 1;
1729} 1711}
@@ -1739,25 +1721,23 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1739 unsigned long flags; 1721 unsigned long flags;
1740 1722
1741 BUG_ON(netif_queue_stopped(dev)); 1723 BUG_ON(netif_queue_stopped(dev));
1742 BUG_ON(skb == NULL); 1724
1725 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
1726 stats->tx_dropped++;
1727 printk(KERN_DEBUG "%s: failed to linearize tiny "
1728 "unaligned fragment\n", dev->name);
1729 return NETDEV_TX_BUSY;
1730 }
1731
1732 spin_lock_irqsave(&mp->lock, flags);
1743 1733
1744 if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) { 1734 if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
1745 printk(KERN_ERR "%s: transmit with queue full\n", dev->name); 1735 printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
1746 netif_stop_queue(dev); 1736 netif_stop_queue(dev);
1747 return 1; 1737 spin_unlock_irqrestore(&mp->lock, flags);
1748 } 1738 return NETDEV_TX_BUSY;
1749
1750 if (has_tiny_unaligned_frags(skb)) {
1751 if (__skb_linearize(skb)) {
1752 stats->tx_dropped++;
1753 printk(KERN_DEBUG "%s: failed to linearize tiny "
1754 "unaligned fragment\n", dev->name);
1755 return 1;
1756 }
1757 } 1739 }
1758 1740
1759 spin_lock_irqsave(&mp->lock, flags);
1760
1761 eth_tx_submit_descs_for_skb(mp, skb); 1741 eth_tx_submit_descs_for_skb(mp, skb);
1762 stats->tx_bytes += skb->len; 1742 stats->tx_bytes += skb->len;
1763 stats->tx_packets++; 1743 stats->tx_packets++;
@@ -1768,7 +1748,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1768 1748
1769 spin_unlock_irqrestore(&mp->lock, flags); 1749 spin_unlock_irqrestore(&mp->lock, flags);
1770 1750
1771 return 0; /* success */ 1751 return NETDEV_TX_OK;
1772} 1752}
1773 1753
1774#ifdef CONFIG_NET_POLL_CONTROLLER 1754#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1777,13 +1757,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
1777 struct mv643xx_private *mp = netdev_priv(netdev); 1757 struct mv643xx_private *mp = netdev_priv(netdev);
1778 int port_num = mp->port_num; 1758 int port_num = mp->port_num;
1779 1759
1780 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1760 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1781 /* wait for previous write to complete */ 1761 /* wait for previous write to complete */
1782 mv_read(INTERRUPT_MASK_REG(port_num)); 1762 rdl(mp, INTERRUPT_MASK_REG(port_num));
1783 1763
1784 mv643xx_eth_int_handler(netdev->irq, netdev); 1764 mv643xx_eth_int_handler(netdev->irq, netdev);
1785 1765
1786 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1766 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1787} 1767}
1788#endif 1768#endif
1789 1769
@@ -1900,7 +1880,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1900 port_num = mp->port_num = pd->port_number; 1880 port_num = mp->port_num = pd->port_number;
1901 1881
1902 /* set default config values */ 1882 /* set default config values */
1903 eth_port_uc_addr_get(port_num, dev->dev_addr); 1883 eth_port_uc_addr_get(mp, dev->dev_addr);
1904 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1884 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
1905 mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1885 mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
1906 1886
@@ -1908,7 +1888,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1908 memcpy(dev->dev_addr, pd->mac_addr, 6); 1888 memcpy(dev->dev_addr, pd->mac_addr, 6);
1909 1889
1910 if (pd->phy_addr || pd->force_phy_addr) 1890 if (pd->phy_addr || pd->force_phy_addr)
1911 ethernet_phy_set(port_num, pd->phy_addr); 1891 ethernet_phy_set(mp, pd->phy_addr);
1912 1892
1913 if (pd->rx_queue_size) 1893 if (pd->rx_queue_size)
1914 mp->rx_ring_size = pd->rx_queue_size; 1894 mp->rx_ring_size = pd->rx_queue_size;
@@ -1933,19 +1913,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1933 mp->mii.dev = dev; 1913 mp->mii.dev = dev;
1934 mp->mii.mdio_read = mv643xx_mdio_read; 1914 mp->mii.mdio_read = mv643xx_mdio_read;
1935 mp->mii.mdio_write = mv643xx_mdio_write; 1915 mp->mii.mdio_write = mv643xx_mdio_write;
1936 mp->mii.phy_id = ethernet_phy_get(port_num); 1916 mp->mii.phy_id = ethernet_phy_get(mp);
1937 mp->mii.phy_id_mask = 0x3f; 1917 mp->mii.phy_id_mask = 0x3f;
1938 mp->mii.reg_num_mask = 0x1f; 1918 mp->mii.reg_num_mask = 0x1f;
1939 1919
1940 err = ethernet_phy_detect(port_num); 1920 err = ethernet_phy_detect(mp);
1941 if (err) { 1921 if (err) {
1942 pr_debug("MV643xx ethernet port %d: " 1922 pr_debug("%s: No PHY detected at addr %d\n",
1943 "No PHY detected at addr %d\n", 1923 dev->name, ethernet_phy_get(mp));
1944 port_num, ethernet_phy_get(port_num));
1945 goto out; 1924 goto out;
1946 } 1925 }
1947 1926
1948 ethernet_phy_reset(port_num); 1927 ethernet_phy_reset(mp);
1949 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); 1928 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
1950 mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); 1929 mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
1951 mv643xx_eth_update_pscr(dev, &cmd); 1930 mv643xx_eth_update_pscr(dev, &cmd);
@@ -2006,9 +1985,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2006 1985
2007static int mv643xx_eth_shared_probe(struct platform_device *pdev) 1986static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2008{ 1987{
1988 static int mv643xx_version_printed = 0;
2009 struct resource *res; 1989 struct resource *res;
2010 1990
2011 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); 1991 if (!mv643xx_version_printed++)
1992 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
2012 1993
2013 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1994 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2014 if (res == NULL) 1995 if (res == NULL)
@@ -2037,10 +2018,10 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
2037 unsigned int port_num = mp->port_num; 2018 unsigned int port_num = mp->port_num;
2038 2019
2039 /* Mask all interrupts on ethernet port */ 2020 /* Mask all interrupts on ethernet port */
2040 mv_write(INTERRUPT_MASK_REG(port_num), 0); 2021 wrl(mp, INTERRUPT_MASK_REG(port_num), 0);
2041 mv_read (INTERRUPT_MASK_REG(port_num)); 2022 rdl(mp, INTERRUPT_MASK_REG(port_num));
2042 2023
2043 eth_port_reset(port_num); 2024 eth_port_reset(mp);
2044} 2025}
2045 2026
2046static struct platform_driver mv643xx_eth_driver = { 2027static struct platform_driver mv643xx_eth_driver = {
@@ -2229,12 +2210,9 @@ MODULE_ALIAS("platform:mv643xx_eth");
2229 * return_info Tx/Rx user resource return information. 2210 * return_info Tx/Rx user resource return information.
2230 */ 2211 */
2231 2212
2232/* PHY routines */
2233static int ethernet_phy_get(unsigned int eth_port_num);
2234static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
2235
2236/* Ethernet Port routines */ 2213/* Ethernet Port routines */
2237static void eth_port_set_filter_table_entry(int table, unsigned char entry); 2214static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
2215 int table, unsigned char entry);
2238 2216
2239/* 2217/*
2240 * eth_port_init - Initialize the Ethernet port driver 2218 * eth_port_init - Initialize the Ethernet port driver
@@ -2264,9 +2242,9 @@ static void eth_port_init(struct mv643xx_private *mp)
2264{ 2242{
2265 mp->rx_resource_err = 0; 2243 mp->rx_resource_err = 0;
2266 2244
2267 eth_port_reset(mp->port_num); 2245 eth_port_reset(mp);
2268 2246
2269 eth_port_init_mac_tables(mp->port_num); 2247 eth_port_init_mac_tables(mp);
2270} 2248}
2271 2249
2272/* 2250/*
@@ -2306,28 +2284,28 @@ static void eth_port_start(struct net_device *dev)
2306 2284
2307 /* Assignment of Tx CTRP of given queue */ 2285 /* Assignment of Tx CTRP of given queue */
2308 tx_curr_desc = mp->tx_curr_desc_q; 2286 tx_curr_desc = mp->tx_curr_desc_q;
2309 mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2287 wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
2310 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); 2288 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
2311 2289
2312 /* Assignment of Rx CRDP of given queue */ 2290 /* Assignment of Rx CRDP of given queue */
2313 rx_curr_desc = mp->rx_curr_desc_q; 2291 rx_curr_desc = mp->rx_curr_desc_q;
2314 mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2292 wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
2315 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 2293 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
2316 2294
2317 /* Add the assigned Ethernet address to the port's address table */ 2295 /* Add the assigned Ethernet address to the port's address table */
2318 eth_port_uc_addr_set(port_num, dev->dev_addr); 2296 eth_port_uc_addr_set(mp, dev->dev_addr);
2319 2297
2320 /* Assign port configuration and command. */ 2298 /* Assign port configuration and command. */
2321 mv_write(PORT_CONFIG_REG(port_num), 2299 wrl(mp, PORT_CONFIG_REG(port_num),
2322 PORT_CONFIG_DEFAULT_VALUE); 2300 PORT_CONFIG_DEFAULT_VALUE);
2323 2301
2324 mv_write(PORT_CONFIG_EXTEND_REG(port_num), 2302 wrl(mp, PORT_CONFIG_EXTEND_REG(port_num),
2325 PORT_CONFIG_EXTEND_DEFAULT_VALUE); 2303 PORT_CONFIG_EXTEND_DEFAULT_VALUE);
2326 2304
2327 pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 2305 pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
2328 2306
2329 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); 2307 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
2330 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2308 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
2331 2309
2332 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 2310 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
2333 DISABLE_AUTO_NEG_SPEED_GMII | 2311 DISABLE_AUTO_NEG_SPEED_GMII |
@@ -2335,32 +2313,34 @@ static void eth_port_start(struct net_device *dev)
2335 DO_NOT_FORCE_LINK_FAIL | 2313 DO_NOT_FORCE_LINK_FAIL |
2336 SERIAL_PORT_CONTROL_RESERVED; 2314 SERIAL_PORT_CONTROL_RESERVED;
2337 2315
2338 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2316 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
2339 2317
2340 pscr |= SERIAL_PORT_ENABLE; 2318 pscr |= SERIAL_PORT_ENABLE;
2341 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2319 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
2342 2320
2343 /* Assign port SDMA configuration */ 2321 /* Assign port SDMA configuration */
2344 mv_write(SDMA_CONFIG_REG(port_num), 2322 wrl(mp, SDMA_CONFIG_REG(port_num),
2345 PORT_SDMA_CONFIG_DEFAULT_VALUE); 2323 PORT_SDMA_CONFIG_DEFAULT_VALUE);
2346 2324
2347 /* Enable port Rx. */ 2325 /* Enable port Rx. */
2348 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); 2326 mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED);
2349 2327
2350 /* Disable port bandwidth limits by clearing MTU register */ 2328 /* Disable port bandwidth limits by clearing MTU register */
2351 mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); 2329 wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0);
2352 2330
2353 /* save phy settings across reset */ 2331 /* save phy settings across reset */
2354 mv643xx_get_settings(dev, &ethtool_cmd); 2332 mv643xx_get_settings(dev, &ethtool_cmd);
2355 ethernet_phy_reset(mp->port_num); 2333 ethernet_phy_reset(mp);
2356 mv643xx_set_settings(dev, &ethtool_cmd); 2334 mv643xx_set_settings(dev, &ethtool_cmd);
2357} 2335}
2358 2336
2359/* 2337/*
2360 * eth_port_uc_addr_set - Write a MAC address into the port's hw registers 2338 * eth_port_uc_addr_set - Write a MAC address into the port's hw registers
2361 */ 2339 */
2362static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr) 2340static void eth_port_uc_addr_set(struct mv643xx_private *mp,
2341 unsigned char *p_addr)
2363{ 2342{
2343 unsigned int port_num = mp->port_num;
2364 unsigned int mac_h; 2344 unsigned int mac_h;
2365 unsigned int mac_l; 2345 unsigned int mac_l;
2366 int table; 2346 int table;
@@ -2369,24 +2349,26 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr)
2369 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | 2349 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
2370 (p_addr[3] << 0); 2350 (p_addr[3] << 0);
2371 2351
2372 mv_write(MAC_ADDR_LOW(port_num), mac_l); 2352 wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
2373 mv_write(MAC_ADDR_HIGH(port_num), mac_h); 2353 wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
2374 2354
2375 /* Accept frames with this address */ 2355 /* Accept frames with this address */
2376 table = DA_FILTER_UNICAST_TABLE_BASE(port_num); 2356 table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
2377 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); 2357 eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f);
2378} 2358}
2379 2359
2380/* 2360/*
2381 * eth_port_uc_addr_get - Read the MAC address from the port's hw registers 2361 * eth_port_uc_addr_get - Read the MAC address from the port's hw registers
2382 */ 2362 */
2383static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr) 2363static void eth_port_uc_addr_get(struct mv643xx_private *mp,
2364 unsigned char *p_addr)
2384{ 2365{
2366 unsigned int port_num = mp->port_num;
2385 unsigned int mac_h; 2367 unsigned int mac_h;
2386 unsigned int mac_l; 2368 unsigned int mac_l;
2387 2369
2388 mac_h = mv_read(MAC_ADDR_HIGH(port_num)); 2370 mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
2389 mac_l = mv_read(MAC_ADDR_LOW(port_num)); 2371 mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
2390 2372
2391 p_addr[0] = (mac_h >> 24) & 0xff; 2373 p_addr[0] = (mac_h >> 24) & 0xff;
2392 p_addr[1] = (mac_h >> 16) & 0xff; 2374 p_addr[1] = (mac_h >> 16) & 0xff;
@@ -2405,7 +2387,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr)
2405 * 3-1 Queue (ETH_Q0=0) 2387 * 3-1 Queue (ETH_Q0=0)
2406 * 7-4 Reserved = 0; 2388 * 7-4 Reserved = 0;
2407 */ 2389 */
2408static void eth_port_set_filter_table_entry(int table, unsigned char entry) 2390static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
2391 int table, unsigned char entry)
2409{ 2392{
2410 unsigned int table_reg; 2393 unsigned int table_reg;
2411 unsigned int tbl_offset; 2394 unsigned int tbl_offset;
@@ -2415,9 +2398,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry)
2415 reg_offset = entry % 4; /* Entry offset within the register */ 2398 reg_offset = entry % 4; /* Entry offset within the register */
2416 2399
2417 /* Set "accepts frame bit" at specified table entry */ 2400 /* Set "accepts frame bit" at specified table entry */
2418 table_reg = mv_read(table + tbl_offset); 2401 table_reg = rdl(mp, table + tbl_offset);
2419 table_reg |= 0x01 << (8 * reg_offset); 2402 table_reg |= 0x01 << (8 * reg_offset);
2420 mv_write(table + tbl_offset, table_reg); 2403 wrl(mp, table + tbl_offset, table_reg);
2421} 2404}
2422 2405
2423/* 2406/*
@@ -2434,8 +2417,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry)
2434 * In either case, eth_port_set_filter_table_entry() is then called 2417 * In either case, eth_port_set_filter_table_entry() is then called
2435 * to set to set the actual table entry. 2418 * to set to set the actual table entry.
2436 */ 2419 */
2437static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) 2420static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr)
2438{ 2421{
2422 unsigned int port_num = mp->port_num;
2439 unsigned int mac_h; 2423 unsigned int mac_h;
2440 unsigned int mac_l; 2424 unsigned int mac_l;
2441 unsigned char crc_result = 0; 2425 unsigned char crc_result = 0;
@@ -2446,9 +2430,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
2446 2430
2447 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && 2431 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
2448 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { 2432 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
2449 table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2433 table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num);
2450 (eth_port_num); 2434 eth_port_set_filter_table_entry(mp, table, p_addr[5]);
2451 eth_port_set_filter_table_entry(table, p_addr[5]);
2452 return; 2435 return;
2453 } 2436 }
2454 2437
@@ -2520,8 +2503,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
2520 for (i = 0; i < 8; i++) 2503 for (i = 0; i < 8; i++)
2521 crc_result = crc_result | (crc[i] << i); 2504 crc_result = crc_result | (crc[i] << i);
2522 2505
2523 table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); 2506 table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num);
2524 eth_port_set_filter_table_entry(table, crc_result); 2507 eth_port_set_filter_table_entry(mp, table, crc_result);
2525} 2508}
2526 2509
2527/* 2510/*
@@ -2550,7 +2533,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2550 * 3-1 Queue ETH_Q0=0 2533 * 3-1 Queue ETH_Q0=0
2551 * 7-4 Reserved = 0; 2534 * 7-4 Reserved = 0;
2552 */ 2535 */
2553 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2536 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2554 2537
2555 /* Set all entries in DA filter other multicast 2538 /* Set all entries in DA filter other multicast
2556 * table (Ex_dFOMT) 2539 * table (Ex_dFOMT)
@@ -2560,7 +2543,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2560 * 3-1 Queue ETH_Q0=0 2543 * 3-1 Queue ETH_Q0=0
2561 * 7-4 Reserved = 0; 2544 * 7-4 Reserved = 0;
2562 */ 2545 */
2563 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2546 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2564 } 2547 }
2565 return; 2548 return;
2566 } 2549 }
@@ -2570,11 +2553,11 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2570 */ 2553 */
2571 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2554 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2572 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2555 /* Clear DA filter special multicast table (Ex_dFSMT) */
2573 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2556 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2574 (eth_port_num) + table_index, 0); 2557 (eth_port_num) + table_index, 0);
2575 2558
2576 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2559 /* Clear DA filter other multicast table (Ex_dFOMT) */
2577 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2560 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2578 (eth_port_num) + table_index, 0); 2561 (eth_port_num) + table_index, 0);
2579 } 2562 }
2580 2563
@@ -2583,7 +2566,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2583 (i < 256) && (mc_list != NULL) && (i < dev->mc_count); 2566 (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
2584 i++, mc_list = mc_list->next) 2567 i++, mc_list = mc_list->next)
2585 if (mc_list->dmi_addrlen == 6) 2568 if (mc_list->dmi_addrlen == 6)
2586 eth_port_mc_addr(eth_port_num, mc_list->dmi_addr); 2569 eth_port_mc_addr(mp, mc_list->dmi_addr);
2587} 2570}
2588 2571
2589/* 2572/*
@@ -2594,7 +2577,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2594 * Other Multicast) and set each entry to 0. 2577 * Other Multicast) and set each entry to 0.
2595 * 2578 *
2596 * INPUT: 2579 * INPUT:
2597 * unsigned int eth_port_num Ethernet Port number. 2580 * struct mv643xx_private *mp Ethernet Port.
2598 * 2581 *
2599 * OUTPUT: 2582 * OUTPUT:
2600 * Multicast and Unicast packets are rejected. 2583 * Multicast and Unicast packets are rejected.
@@ -2602,22 +2585,23 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2602 * RETURN: 2585 * RETURN:
2603 * None. 2586 * None.
2604 */ 2587 */
2605static void eth_port_init_mac_tables(unsigned int eth_port_num) 2588static void eth_port_init_mac_tables(struct mv643xx_private *mp)
2606{ 2589{
2590 unsigned int port_num = mp->port_num;
2607 int table_index; 2591 int table_index;
2608 2592
2609 /* Clear DA filter unicast table (Ex_dFUT) */ 2593 /* Clear DA filter unicast table (Ex_dFUT) */
2610 for (table_index = 0; table_index <= 0xC; table_index += 4) 2594 for (table_index = 0; table_index <= 0xC; table_index += 4)
2611 mv_write(DA_FILTER_UNICAST_TABLE_BASE 2595 wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) +
2612 (eth_port_num) + table_index, 0); 2596 table_index, 0);
2613 2597
2614 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2598 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2615 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2599 /* Clear DA filter special multicast table (Ex_dFSMT) */
2616 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2600 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) +
2617 (eth_port_num) + table_index, 0); 2601 table_index, 0);
2618 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2602 /* Clear DA filter other multicast table (Ex_dFOMT) */
2619 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2603 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) +
2620 (eth_port_num) + table_index, 0); 2604 table_index, 0);
2621 } 2605 }
2622} 2606}
2623 2607
@@ -2629,7 +2613,7 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2629 * A read from the MIB counter will reset the counter. 2613 * A read from the MIB counter will reset the counter.
2630 * 2614 *
2631 * INPUT: 2615 * INPUT:
2632 * unsigned int eth_port_num Ethernet Port number. 2616 * struct mv643xx_private *mp Ethernet Port.
2633 * 2617 *
2634 * OUTPUT: 2618 * OUTPUT:
2635 * After reading all MIB counters, the counters resets. 2619 * After reading all MIB counters, the counters resets.
@@ -2638,19 +2622,20 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2638 * MIB counter value. 2622 * MIB counter value.
2639 * 2623 *
2640 */ 2624 */
2641static void eth_clear_mib_counters(unsigned int eth_port_num) 2625static void eth_clear_mib_counters(struct mv643xx_private *mp)
2642{ 2626{
2627 unsigned int port_num = mp->port_num;
2643 int i; 2628 int i;
2644 2629
2645 /* Perform dummy reads from MIB counters */ 2630 /* Perform dummy reads from MIB counters */
2646 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; 2631 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2647 i += 4) 2632 i += 4)
2648 mv_read(MIB_COUNTERS_BASE(eth_port_num) + i); 2633 rdl(mp, MIB_COUNTERS_BASE(port_num) + i);
2649} 2634}
2650 2635
2651static inline u32 read_mib(struct mv643xx_private *mp, int offset) 2636static inline u32 read_mib(struct mv643xx_private *mp, int offset)
2652{ 2637{
2653 return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); 2638 return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset);
2654} 2639}
2655 2640
2656static void eth_update_mib_counters(struct mv643xx_private *mp) 2641static void eth_update_mib_counters(struct mv643xx_private *mp)
@@ -2686,7 +2671,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
2686 * the specified port. 2671 * the specified port.
2687 * 2672 *
2688 * INPUT: 2673 * INPUT:
2689 * unsigned int eth_port_num Ethernet Port number. 2674 * struct mv643xx_private *mp Ethernet Port.
2690 * 2675 *
2691 * OUTPUT: 2676 * OUTPUT:
2692 * None 2677 * None
@@ -2696,22 +2681,22 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
2696 * -ENODEV on failure 2681 * -ENODEV on failure
2697 * 2682 *
2698 */ 2683 */
2699static int ethernet_phy_detect(unsigned int port_num) 2684static int ethernet_phy_detect(struct mv643xx_private *mp)
2700{ 2685{
2701 unsigned int phy_reg_data0; 2686 unsigned int phy_reg_data0;
2702 int auto_neg; 2687 int auto_neg;
2703 2688
2704 eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); 2689 eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
2705 auto_neg = phy_reg_data0 & 0x1000; 2690 auto_neg = phy_reg_data0 & 0x1000;
2706 phy_reg_data0 ^= 0x1000; /* invert auto_neg */ 2691 phy_reg_data0 ^= 0x1000; /* invert auto_neg */
2707 eth_port_write_smi_reg(port_num, 0, phy_reg_data0); 2692 eth_port_write_smi_reg(mp, 0, phy_reg_data0);
2708 2693
2709 eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); 2694 eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
2710 if ((phy_reg_data0 & 0x1000) == auto_neg) 2695 if ((phy_reg_data0 & 0x1000) == auto_neg)
2711 return -ENODEV; /* change didn't take */ 2696 return -ENODEV; /* change didn't take */
2712 2697
2713 phy_reg_data0 ^= 0x1000; 2698 phy_reg_data0 ^= 0x1000;
2714 eth_port_write_smi_reg(port_num, 0, phy_reg_data0); 2699 eth_port_write_smi_reg(mp, 0, phy_reg_data0);
2715 return 0; 2700 return 0;
2716} 2701}
2717 2702
@@ -2722,7 +2707,7 @@ static int ethernet_phy_detect(unsigned int port_num)
2722 * This routine returns the given ethernet port PHY address. 2707 * This routine returns the given ethernet port PHY address.
2723 * 2708 *
2724 * INPUT: 2709 * INPUT:
2725 * unsigned int eth_port_num Ethernet Port number. 2710 * struct mv643xx_private *mp Ethernet Port.
2726 * 2711 *
2727 * OUTPUT: 2712 * OUTPUT:
2728 * None. 2713 * None.
@@ -2731,13 +2716,13 @@ static int ethernet_phy_detect(unsigned int port_num)
2731 * PHY address. 2716 * PHY address.
2732 * 2717 *
2733 */ 2718 */
2734static int ethernet_phy_get(unsigned int eth_port_num) 2719static int ethernet_phy_get(struct mv643xx_private *mp)
2735{ 2720{
2736 unsigned int reg_data; 2721 unsigned int reg_data;
2737 2722
2738 reg_data = mv_read(PHY_ADDR_REG); 2723 reg_data = rdl(mp, PHY_ADDR_REG);
2739 2724
2740 return ((reg_data >> (5 * eth_port_num)) & 0x1f); 2725 return ((reg_data >> (5 * mp->port_num)) & 0x1f);
2741} 2726}
2742 2727
2743/* 2728/*
@@ -2747,7 +2732,7 @@ static int ethernet_phy_get(unsigned int eth_port_num)
2747 * This routine sets the given ethernet port PHY address. 2732 * This routine sets the given ethernet port PHY address.
2748 * 2733 *
2749 * INPUT: 2734 * INPUT:
2750 * unsigned int eth_port_num Ethernet Port number. 2735 * struct mv643xx_private *mp Ethernet Port.
2751 * int phy_addr PHY address. 2736 * int phy_addr PHY address.
2752 * 2737 *
2753 * OUTPUT: 2738 * OUTPUT:
@@ -2757,15 +2742,15 @@ static int ethernet_phy_get(unsigned int eth_port_num)
2757 * None. 2742 * None.
2758 * 2743 *
2759 */ 2744 */
2760static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) 2745static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr)
2761{ 2746{
2762 u32 reg_data; 2747 u32 reg_data;
2763 int addr_shift = 5 * eth_port_num; 2748 int addr_shift = 5 * mp->port_num;
2764 2749
2765 reg_data = mv_read(PHY_ADDR_REG); 2750 reg_data = rdl(mp, PHY_ADDR_REG);
2766 reg_data &= ~(0x1f << addr_shift); 2751 reg_data &= ~(0x1f << addr_shift);
2767 reg_data |= (phy_addr & 0x1f) << addr_shift; 2752 reg_data |= (phy_addr & 0x1f) << addr_shift;
2768 mv_write(PHY_ADDR_REG, reg_data); 2753 wrl(mp, PHY_ADDR_REG, reg_data);
2769} 2754}
2770 2755
2771/* 2756/*
@@ -2775,7 +2760,7 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
2775 * This routine utilizes the SMI interface to reset the ethernet port PHY. 2760 * This routine utilizes the SMI interface to reset the ethernet port PHY.
2776 * 2761 *
2777 * INPUT: 2762 * INPUT:
2778 * unsigned int eth_port_num Ethernet Port number. 2763 * struct mv643xx_private *mp Ethernet Port.
2779 * 2764 *
2780 * OUTPUT: 2765 * OUTPUT:
2781 * The PHY is reset. 2766 * The PHY is reset.
@@ -2784,51 +2769,52 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
2784 * None. 2769 * None.
2785 * 2770 *
2786 */ 2771 */
2787static void ethernet_phy_reset(unsigned int eth_port_num) 2772static void ethernet_phy_reset(struct mv643xx_private *mp)
2788{ 2773{
2789 unsigned int phy_reg_data; 2774 unsigned int phy_reg_data;
2790 2775
2791 /* Reset the PHY */ 2776 /* Reset the PHY */
2792 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); 2777 eth_port_read_smi_reg(mp, 0, &phy_reg_data);
2793 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ 2778 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
2794 eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); 2779 eth_port_write_smi_reg(mp, 0, phy_reg_data);
2795 2780
2796 /* wait for PHY to come out of reset */ 2781 /* wait for PHY to come out of reset */
2797 do { 2782 do {
2798 udelay(1); 2783 udelay(1);
2799 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); 2784 eth_port_read_smi_reg(mp, 0, &phy_reg_data);
2800 } while (phy_reg_data & 0x8000); 2785 } while (phy_reg_data & 0x8000);
2801} 2786}
2802 2787
2803static void mv643xx_eth_port_enable_tx(unsigned int port_num, 2788static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
2804 unsigned int queues) 2789 unsigned int queues)
2805{ 2790{
2806 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); 2791 wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues);
2807} 2792}
2808 2793
2809static void mv643xx_eth_port_enable_rx(unsigned int port_num, 2794static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
2810 unsigned int queues) 2795 unsigned int queues)
2811{ 2796{
2812 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues); 2797 wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues);
2813} 2798}
2814 2799
2815static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) 2800static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
2816{ 2801{
2802 unsigned int port_num = mp->port_num;
2817 u32 queues; 2803 u32 queues;
2818 2804
2819 /* Stop Tx port activity. Check port Tx activity. */ 2805 /* Stop Tx port activity. Check port Tx activity. */
2820 queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; 2806 queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2821 if (queues) { 2807 if (queues) {
2822 /* Issue stop command for active queues only */ 2808 /* Issue stop command for active queues only */
2823 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); 2809 wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
2824 2810
2825 /* Wait for all Tx activity to terminate. */ 2811 /* Wait for all Tx activity to terminate. */
2826 /* Check port cause register that all Tx queues are stopped */ 2812 /* Check port cause register that all Tx queues are stopped */
2827 while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) 2813 while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2828 udelay(PHY_WAIT_MICRO_SECONDS); 2814 udelay(PHY_WAIT_MICRO_SECONDS);
2829 2815
2830 /* Wait for Tx FIFO to empty */ 2816 /* Wait for Tx FIFO to empty */
2831 while (mv_read(PORT_STATUS_REG(port_num)) & 2817 while (rdl(mp, PORT_STATUS_REG(port_num)) &
2832 ETH_PORT_TX_FIFO_EMPTY) 2818 ETH_PORT_TX_FIFO_EMPTY)
2833 udelay(PHY_WAIT_MICRO_SECONDS); 2819 udelay(PHY_WAIT_MICRO_SECONDS);
2834 } 2820 }
@@ -2836,19 +2822,20 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
2836 return queues; 2822 return queues;
2837} 2823}
2838 2824
2839static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) 2825static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
2840{ 2826{
2827 unsigned int port_num = mp->port_num;
2841 u32 queues; 2828 u32 queues;
2842 2829
2843 /* Stop Rx port activity. Check port Rx activity. */ 2830 /* Stop Rx port activity. Check port Rx activity. */
2844 queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; 2831 queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2845 if (queues) { 2832 if (queues) {
2846 /* Issue stop command for active queues only */ 2833 /* Issue stop command for active queues only */
2847 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); 2834 wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
2848 2835
2849 /* Wait for all Rx activity to terminate. */ 2836 /* Wait for all Rx activity to terminate. */
2850 /* Check port cause register that all Rx queues are stopped */ 2837 /* Check port cause register that all Rx queues are stopped */
2851 while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) 2838 while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2852 udelay(PHY_WAIT_MICRO_SECONDS); 2839 udelay(PHY_WAIT_MICRO_SECONDS);
2853 } 2840 }
2854 2841
@@ -2864,7 +2851,7 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2864 * idle state after this command is performed and the port is disabled. 2851 * idle state after this command is performed and the port is disabled.
2865 * 2852 *
2866 * INPUT: 2853 * INPUT:
2867 * unsigned int eth_port_num Ethernet Port number. 2854 * struct mv643xx_private *mp Ethernet Port.
2868 * 2855 *
2869 * OUTPUT: 2856 * OUTPUT:
2870 * Channel activity is halted. 2857 * Channel activity is halted.
@@ -2873,22 +2860,23 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2873 * None. 2860 * None.
2874 * 2861 *
2875 */ 2862 */
2876static void eth_port_reset(unsigned int port_num) 2863static void eth_port_reset(struct mv643xx_private *mp)
2877{ 2864{
2865 unsigned int port_num = mp->port_num;
2878 unsigned int reg_data; 2866 unsigned int reg_data;
2879 2867
2880 mv643xx_eth_port_disable_tx(port_num); 2868 mv643xx_eth_port_disable_tx(mp);
2881 mv643xx_eth_port_disable_rx(port_num); 2869 mv643xx_eth_port_disable_rx(mp);
2882 2870
2883 /* Clear all MIB counters */ 2871 /* Clear all MIB counters */
2884 eth_clear_mib_counters(port_num); 2872 eth_clear_mib_counters(mp);
2885 2873
2886 /* Reset the Enable bit in the Configuration Register */ 2874 /* Reset the Enable bit in the Configuration Register */
2887 reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 2875 reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
2888 reg_data &= ~(SERIAL_PORT_ENABLE | 2876 reg_data &= ~(SERIAL_PORT_ENABLE |
2889 DO_NOT_FORCE_LINK_FAIL | 2877 DO_NOT_FORCE_LINK_FAIL |
2890 FORCE_LINK_PASS); 2878 FORCE_LINK_PASS);
2891 mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2879 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data);
2892} 2880}
2893 2881
2894 2882
@@ -2900,7 +2888,7 @@ static void eth_port_reset(unsigned int port_num)
2900 * order to perform PHY register read. 2888 * order to perform PHY register read.
2901 * 2889 *
2902 * INPUT: 2890 * INPUT:
2903 * unsigned int port_num Ethernet Port number. 2891 * struct mv643xx_private *mp Ethernet Port.
2904 * unsigned int phy_reg PHY register address offset. 2892 * unsigned int phy_reg PHY register address offset.
2905 * unsigned int *value Register value buffer. 2893 * unsigned int *value Register value buffer.
2906 * 2894 *
@@ -2912,10 +2900,10 @@ static void eth_port_reset(unsigned int port_num)
2912 * true otherwise. 2900 * true otherwise.
2913 * 2901 *
2914 */ 2902 */
2915static void eth_port_read_smi_reg(unsigned int port_num, 2903static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2916 unsigned int phy_reg, unsigned int *value) 2904 unsigned int phy_reg, unsigned int *value)
2917{ 2905{
2918 int phy_addr = ethernet_phy_get(port_num); 2906 int phy_addr = ethernet_phy_get(mp);
2919 unsigned long flags; 2907 unsigned long flags;
2920 int i; 2908 int i;
2921 2909
@@ -2923,27 +2911,27 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2923 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2911 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2924 2912
2925 /* wait for the SMI register to become available */ 2913 /* wait for the SMI register to become available */
2926 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { 2914 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
2927 if (i == PHY_WAIT_ITERATIONS) { 2915 if (i == PHY_WAIT_ITERATIONS) {
2928 printk("mv643xx PHY busy timeout, port %d\n", port_num); 2916 printk("%s: PHY busy timeout\n", mp->dev->name);
2929 goto out; 2917 goto out;
2930 } 2918 }
2931 udelay(PHY_WAIT_MICRO_SECONDS); 2919 udelay(PHY_WAIT_MICRO_SECONDS);
2932 } 2920 }
2933 2921
2934 mv_write(SMI_REG, 2922 wrl(mp, SMI_REG,
2935 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); 2923 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
2936 2924
2937 /* now wait for the data to be valid */ 2925 /* now wait for the data to be valid */
2938 for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { 2926 for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) {
2939 if (i == PHY_WAIT_ITERATIONS) { 2927 if (i == PHY_WAIT_ITERATIONS) {
2940 printk("mv643xx PHY read timeout, port %d\n", port_num); 2928 printk("%s: PHY read timeout\n", mp->dev->name);
2941 goto out; 2929 goto out;
2942 } 2930 }
2943 udelay(PHY_WAIT_MICRO_SECONDS); 2931 udelay(PHY_WAIT_MICRO_SECONDS);
2944 } 2932 }
2945 2933
2946 *value = mv_read(SMI_REG) & 0xffff; 2934 *value = rdl(mp, SMI_REG) & 0xffff;
2947out: 2935out:
2948 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2936 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
2949} 2937}
@@ -2956,7 +2944,7 @@ out:
2956 * order to perform writes to PHY registers. 2944 * order to perform writes to PHY registers.
2957 * 2945 *
2958 * INPUT: 2946 * INPUT:
2959 * unsigned int eth_port_num Ethernet Port number. 2947 * struct mv643xx_private *mp Ethernet Port.
2960 * unsigned int phy_reg PHY register address offset. 2948 * unsigned int phy_reg PHY register address offset.
2961 * unsigned int value Register value. 2949 * unsigned int value Register value.
2962 * 2950 *
@@ -2968,29 +2956,28 @@ out:
2968 * true otherwise. 2956 * true otherwise.
2969 * 2957 *
2970 */ 2958 */
2971static void eth_port_write_smi_reg(unsigned int eth_port_num, 2959static void eth_port_write_smi_reg(struct mv643xx_private *mp,
2972 unsigned int phy_reg, unsigned int value) 2960 unsigned int phy_reg, unsigned int value)
2973{ 2961{
2974 int phy_addr; 2962 int phy_addr;
2975 int i; 2963 int i;
2976 unsigned long flags; 2964 unsigned long flags;
2977 2965
2978 phy_addr = ethernet_phy_get(eth_port_num); 2966 phy_addr = ethernet_phy_get(mp);
2979 2967
2980 /* the SMI register is a shared resource */ 2968 /* the SMI register is a shared resource */
2981 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2969 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2982 2970
2983 /* wait for the SMI register to become available */ 2971 /* wait for the SMI register to become available */
2984 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { 2972 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
2985 if (i == PHY_WAIT_ITERATIONS) { 2973 if (i == PHY_WAIT_ITERATIONS) {
2986 printk("mv643xx PHY busy timeout, port %d\n", 2974 printk("%s: PHY busy timeout\n", mp->dev->name);
2987 eth_port_num);
2988 goto out; 2975 goto out;
2989 } 2976 }
2990 udelay(PHY_WAIT_MICRO_SECONDS); 2977 udelay(PHY_WAIT_MICRO_SECONDS);
2991 } 2978 }
2992 2979
2993 mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 2980 wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
2994 ETH_SMI_OPCODE_WRITE | (value & 0xffff)); 2981 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2995out: 2982out:
2996 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2983 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
@@ -3001,17 +2988,17 @@ out:
3001 */ 2988 */
3002static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) 2989static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location)
3003{ 2990{
3004 int val;
3005 struct mv643xx_private *mp = netdev_priv(dev); 2991 struct mv643xx_private *mp = netdev_priv(dev);
2992 int val;
3006 2993
3007 eth_port_read_smi_reg(mp->port_num, location, &val); 2994 eth_port_read_smi_reg(mp, location, &val);
3008 return val; 2995 return val;
3009} 2996}
3010 2997
3011static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) 2998static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val)
3012{ 2999{
3013 struct mv643xx_private *mp = netdev_priv(dev); 3000 struct mv643xx_private *mp = netdev_priv(dev);
3014 eth_port_write_smi_reg(mp->port_num, location, val); 3001 eth_port_write_smi_reg(mp, location, val);
3015} 3002}
3016 3003
3017/* 3004/*
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 900ab5d2ba70..46119bb3770a 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -786,7 +786,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
786 struct netdev_private *np; 786 struct netdev_private *np;
787 int i, option, irq, chip_idx = ent->driver_data; 787 int i, option, irq, chip_idx = ent->driver_data;
788 static int find_cnt = -1; 788 static int find_cnt = -1;
789 unsigned long iostart, iosize; 789 resource_size_t iostart;
790 unsigned long iosize;
790 void __iomem *ioaddr; 791 void __iomem *ioaddr;
791 const int pcibar = 1; /* PCI base address register */ 792 const int pcibar = 1; /* PCI base address register */
792 int prev_eedata; 793 int prev_eedata;
@@ -946,10 +947,11 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
946 goto err_create_file; 947 goto err_create_file;
947 948
948 if (netif_msg_drv(np)) { 949 if (netif_msg_drv(np)) {
949 printk(KERN_INFO "natsemi %s: %s at %#08lx " 950 printk(KERN_INFO "natsemi %s: %s at %#08llx "
950 "(%s), %s, IRQ %d", 951 "(%s), %s, IRQ %d",
951 dev->name, natsemi_pci_info[chip_idx].name, iostart, 952 dev->name, natsemi_pci_info[chip_idx].name,
952 pci_name(np->pci_dev), print_mac(mac, dev->dev_addr), irq); 953 (unsigned long long)iostart, pci_name(np->pci_dev),
954 print_mac(mac, dev->dev_addr), irq);
953 if (dev->if_port == PORT_TP) 955 if (dev->if_port == PORT_TP)
954 printk(", port TP.\n"); 956 printk(", port TP.\n");
955 else if (np->ignore_phy) 957 else if (np->ignore_phy)
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 7f20a03623a0..8cb29f5b1038 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -95,23 +95,6 @@
95 95
96#define ADDR_IN_WINDOW1(off) \ 96#define ADDR_IN_WINDOW1(off) \
97 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 97 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
98/*
99 * In netxen_nic_down(), we must wait for any pending callback requests into
100 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
101 * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
102 * does this synchronization.
103 *
104 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
105 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
106 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
107 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
108 * linkwatch_event() to be executed which also attempts to acquire the rtnl
109 * lock thus causing a deadlock.
110 */
111
112#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
113#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
114extern struct workqueue_struct *netxen_workq;
115 98
116/* 99/*
117 * normalize a 64MB crb address to 32MB PCI window 100 * normalize a 64MB crb address to 32MB PCI window
@@ -1050,7 +1033,6 @@ void netxen_halt_pegs(struct netxen_adapter *adapter);
1050int netxen_rom_se(struct netxen_adapter *adapter, int addr); 1033int netxen_rom_se(struct netxen_adapter *adapter, int addr);
1051 1034
1052/* Functions from netxen_nic_isr.c */ 1035/* Functions from netxen_nic_isr.c */
1053int netxen_nic_link_ok(struct netxen_adapter *adapter);
1054void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); 1036void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
1055void netxen_initialize_adapter_hw(struct netxen_adapter *adapter); 1037void netxen_initialize_adapter_hw(struct netxen_adapter *adapter);
1056void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, 1038void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c
index c81313b717bd..f487615f4063 100644
--- a/drivers/net/netxen/netxen_nic_isr.c
+++ b/drivers/net/netxen/netxen_nic_isr.c
@@ -172,6 +172,7 @@ void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
172 netxen_nic_isr_other(adapter); 172 netxen_nic_isr_other(adapter);
173} 173}
174 174
175#if 0
175int netxen_nic_link_ok(struct netxen_adapter *adapter) 176int netxen_nic_link_ok(struct netxen_adapter *adapter)
176{ 177{
177 switch (adapter->ahw.board_type) { 178 switch (adapter->ahw.board_type) {
@@ -189,6 +190,7 @@ int netxen_nic_link_ok(struct netxen_adapter *adapter)
189 190
190 return 0; 191 return 0;
191} 192}
193#endif /* 0 */
192 194
193void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) 195void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
194{ 196{
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a8fb439a4d03..7144c255ce54 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -86,7 +86,24 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
86 86
87MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); 87MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
88 88
89struct workqueue_struct *netxen_workq; 89/*
90 * In netxen_nic_down(), we must wait for any pending callback requests into
91 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
92 * reenabled right after it is deleted in netxen_nic_down().
93 * FLUSH_SCHEDULED_WORK() does this synchronization.
94 *
95 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
96 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
97 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
98 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
99 * linkwatch_event() to be executed which also attempts to acquire the rtnl
100 * lock thus causing a deadlock.
101 */
102
103static struct workqueue_struct *netxen_workq;
104#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
105#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
106
90static void netxen_watchdog(unsigned long); 107static void netxen_watchdog(unsigned long);
91 108
92static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 109static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 2e39e0285d8f..bcd7f9814ed8 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1012,7 +1012,7 @@ static int pasemi_mac_phy_init(struct net_device *dev)
1012 goto err; 1012 goto err;
1013 1013
1014 phy_id = *prop; 1014 phy_id = *prop;
1015 snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id); 1015 snprintf(mac->phy_id, BUS_ID_SIZE, "%x:%02x", (int)r.start, phy_id);
1016 1016
1017 of_node_put(phy_dn); 1017 of_node_put(phy_dn);
1018 1018
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f5310ed3760d..60c5cfe96918 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -176,6 +176,20 @@ static struct phy_driver bcm5461_driver = {
176 .driver = { .owner = THIS_MODULE }, 176 .driver = { .owner = THIS_MODULE },
177}; 177};
178 178
179static struct phy_driver bcm5464_driver = {
180 .phy_id = 0x002060b0,
181 .phy_id_mask = 0xfffffff0,
182 .name = "Broadcom BCM5464",
183 .features = PHY_GBIT_FEATURES,
184 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
185 .config_init = bcm54xx_config_init,
186 .config_aneg = genphy_config_aneg,
187 .read_status = genphy_read_status,
188 .ack_interrupt = bcm54xx_ack_interrupt,
189 .config_intr = bcm54xx_config_intr,
190 .driver = { .owner = THIS_MODULE },
191};
192
179static struct phy_driver bcm5481_driver = { 193static struct phy_driver bcm5481_driver = {
180 .phy_id = 0x0143bca0, 194 .phy_id = 0x0143bca0,
181 .phy_id_mask = 0xfffffff0, 195 .phy_id_mask = 0xfffffff0,
@@ -217,6 +231,9 @@ static int __init broadcom_init(void)
217 ret = phy_driver_register(&bcm5461_driver); 231 ret = phy_driver_register(&bcm5461_driver);
218 if (ret) 232 if (ret)
219 goto out_5461; 233 goto out_5461;
234 ret = phy_driver_register(&bcm5464_driver);
235 if (ret)
236 goto out_5464;
220 ret = phy_driver_register(&bcm5481_driver); 237 ret = phy_driver_register(&bcm5481_driver);
221 if (ret) 238 if (ret)
222 goto out_5481; 239 goto out_5481;
@@ -228,6 +245,8 @@ static int __init broadcom_init(void)
228out_5482: 245out_5482:
229 phy_driver_unregister(&bcm5481_driver); 246 phy_driver_unregister(&bcm5481_driver);
230out_5481: 247out_5481:
248 phy_driver_unregister(&bcm5464_driver);
249out_5464:
231 phy_driver_unregister(&bcm5461_driver); 250 phy_driver_unregister(&bcm5461_driver);
232out_5461: 251out_5461:
233 phy_driver_unregister(&bcm5421_driver); 252 phy_driver_unregister(&bcm5421_driver);
@@ -241,6 +260,7 @@ static void __exit broadcom_exit(void)
241{ 260{
242 phy_driver_unregister(&bcm5482_driver); 261 phy_driver_unregister(&bcm5482_driver);
243 phy_driver_unregister(&bcm5481_driver); 262 phy_driver_unregister(&bcm5481_driver);
263 phy_driver_unregister(&bcm5464_driver);
244 phy_driver_unregister(&bcm5461_driver); 264 phy_driver_unregister(&bcm5461_driver);
245 phy_driver_unregister(&bcm5421_driver); 265 phy_driver_unregister(&bcm5421_driver);
246 phy_driver_unregister(&bcm5411_driver); 266 phy_driver_unregister(&bcm5411_driver);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index ca9b040f9ad9..4e07956a483b 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -213,7 +213,7 @@ static int __init fixed_mdio_bus_init(void)
213 goto err_pdev; 213 goto err_pdev;
214 } 214 }
215 215
216 fmb->mii_bus.id = 0; 216 snprintf(fmb->mii_bus.id, MII_BUS_ID_SIZE, "0");
217 fmb->mii_bus.name = "Fixed MDIO Bus"; 217 fmb->mii_bus.name = "Fixed MDIO Bus";
218 fmb->mii_bus.dev = &pdev->dev; 218 fmb->mii_bus.dev = &pdev->dev;
219 fmb->mii_bus.read = &fixed_mdio_read; 219 fmb->mii_bus.read = &fixed_mdio_read;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index f4c4fd85425f..8b1121b02f98 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -86,35 +86,55 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
86EXPORT_SYMBOL(phy_device_create); 86EXPORT_SYMBOL(phy_device_create);
87 87
88/** 88/**
89 * get_phy_device - reads the specified PHY device and returns its @phy_device struct 89 * get_phy_id - reads the specified addr for its ID.
90 * @bus: the target MII bus 90 * @bus: the target MII bus
91 * @addr: PHY address on the MII bus 91 * @addr: PHY address on the MII bus
92 * @phy_id: where to store the ID retrieved.
92 * 93 *
93 * Description: Reads the ID registers of the PHY at @addr on the 94 * Description: Reads the ID registers of the PHY at @addr on the
94 * @bus, then allocates and returns the phy_device to represent it. 95 * @bus, stores it in @phy_id and returns zero on success.
95 */ 96 */
96struct phy_device * get_phy_device(struct mii_bus *bus, int addr) 97int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
97{ 98{
98 int phy_reg; 99 int phy_reg;
99 u32 phy_id;
100 struct phy_device *dev = NULL;
101 100
102 /* Grab the bits from PHYIR1, and put them 101 /* Grab the bits from PHYIR1, and put them
103 * in the upper half */ 102 * in the upper half */
104 phy_reg = bus->read(bus, addr, MII_PHYSID1); 103 phy_reg = bus->read(bus, addr, MII_PHYSID1);
105 104
106 if (phy_reg < 0) 105 if (phy_reg < 0)
107 return ERR_PTR(phy_reg); 106 return -EIO;
108 107
109 phy_id = (phy_reg & 0xffff) << 16; 108 *phy_id = (phy_reg & 0xffff) << 16;
110 109
111 /* Grab the bits from PHYIR2, and put them in the lower half */ 110 /* Grab the bits from PHYIR2, and put them in the lower half */
112 phy_reg = bus->read(bus, addr, MII_PHYSID2); 111 phy_reg = bus->read(bus, addr, MII_PHYSID2);
113 112
114 if (phy_reg < 0) 113 if (phy_reg < 0)
115 return ERR_PTR(phy_reg); 114 return -EIO;
115
116 *phy_id |= (phy_reg & 0xffff);
117
118 return 0;
119}
120
121/**
122 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
123 * @bus: the target MII bus
124 * @addr: PHY address on the MII bus
125 *
126 * Description: Reads the ID registers of the PHY at @addr on the
127 * @bus, then allocates and returns the phy_device to represent it.
128 */
129struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
130{
131 struct phy_device *dev = NULL;
132 u32 phy_id;
133 int r;
116 134
117 phy_id |= (phy_reg & 0xffff); 135 r = get_phy_id(bus, addr, &phy_id);
136 if (r)
137 return ERR_PTR(r);
118 138
119 /* If the phy_id is all Fs, there is no device there */ 139 /* If the phy_id is all Fs, there is no device there */
120 if (0xffffffff == phy_id) 140 if (0xffffffff == phy_id)
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 487f9d2ac5b4..5986cec17f19 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -88,31 +88,31 @@ static int sb1000_close(struct net_device *dev);
88 88
89 89
90/* SB1000 hardware routines to be used during open/configuration phases */ 90/* SB1000 hardware routines to be used during open/configuration phases */
91static inline int card_wait_for_busy_clear(const int ioaddr[], 91static int card_wait_for_busy_clear(const int ioaddr[],
92 const char* name); 92 const char* name);
93static inline int card_wait_for_ready(const int ioaddr[], const char* name, 93static int card_wait_for_ready(const int ioaddr[], const char* name,
94 unsigned char in[]); 94 unsigned char in[]);
95static int card_send_command(const int ioaddr[], const char* name, 95static int card_send_command(const int ioaddr[], const char* name,
96 const unsigned char out[], unsigned char in[]); 96 const unsigned char out[], unsigned char in[]);
97 97
98/* SB1000 hardware routines to be used during frame rx interrupt */ 98/* SB1000 hardware routines to be used during frame rx interrupt */
99static inline int sb1000_wait_for_ready(const int ioaddr[], const char* name); 99static int sb1000_wait_for_ready(const int ioaddr[], const char* name);
100static inline int sb1000_wait_for_ready_clear(const int ioaddr[], 100static int sb1000_wait_for_ready_clear(const int ioaddr[],
101 const char* name); 101 const char* name);
102static inline void sb1000_send_command(const int ioaddr[], const char* name, 102static void sb1000_send_command(const int ioaddr[], const char* name,
103 const unsigned char out[]); 103 const unsigned char out[]);
104static inline void sb1000_read_status(const int ioaddr[], unsigned char in[]); 104static void sb1000_read_status(const int ioaddr[], unsigned char in[]);
105static inline void sb1000_issue_read_command(const int ioaddr[], 105static void sb1000_issue_read_command(const int ioaddr[],
106 const char* name); 106 const char* name);
107 107
108/* SB1000 commands for open/configuration */ 108/* SB1000 commands for open/configuration */
109static inline int sb1000_reset(const int ioaddr[], const char* name); 109static int sb1000_reset(const int ioaddr[], const char* name);
110static inline int sb1000_check_CRC(const int ioaddr[], const char* name); 110static int sb1000_check_CRC(const int ioaddr[], const char* name);
111static inline int sb1000_start_get_set_command(const int ioaddr[], 111static inline int sb1000_start_get_set_command(const int ioaddr[],
112 const char* name); 112 const char* name);
113static inline int sb1000_end_get_set_command(const int ioaddr[], 113static int sb1000_end_get_set_command(const int ioaddr[],
114 const char* name); 114 const char* name);
115static inline int sb1000_activate(const int ioaddr[], const char* name); 115static int sb1000_activate(const int ioaddr[], const char* name);
116static int sb1000_get_firmware_version(const int ioaddr[], 116static int sb1000_get_firmware_version(const int ioaddr[],
117 const char* name, unsigned char version[], int do_end); 117 const char* name, unsigned char version[], int do_end);
118static int sb1000_get_frequency(const int ioaddr[], const char* name, 118static int sb1000_get_frequency(const int ioaddr[], const char* name,
@@ -125,8 +125,8 @@ static int sb1000_set_PIDs(const int ioaddr[], const char* name,
125 const short PID[]); 125 const short PID[]);
126 126
127/* SB1000 commands for frame rx interrupt */ 127/* SB1000 commands for frame rx interrupt */
128static inline int sb1000_rx(struct net_device *dev); 128static int sb1000_rx(struct net_device *dev);
129static inline void sb1000_error_dpc(struct net_device *dev); 129static void sb1000_error_dpc(struct net_device *dev);
130 130
131static const struct pnp_device_id sb1000_pnp_ids[] = { 131static const struct pnp_device_id sb1000_pnp_ids[] = {
132 { "GIC1000", 0 }, 132 { "GIC1000", 0 },
@@ -250,7 +250,7 @@ static struct pnp_driver sb1000_driver = {
250static const int TimeOutJiffies = (875 * HZ) / 100; 250static const int TimeOutJiffies = (875 * HZ) / 100;
251 251
252/* Card Wait For Busy Clear (cannot be used during an interrupt) */ 252/* Card Wait For Busy Clear (cannot be used during an interrupt) */
253static inline int 253static int
254card_wait_for_busy_clear(const int ioaddr[], const char* name) 254card_wait_for_busy_clear(const int ioaddr[], const char* name)
255{ 255{
256 unsigned char a; 256 unsigned char a;
@@ -274,7 +274,7 @@ card_wait_for_busy_clear(const int ioaddr[], const char* name)
274} 274}
275 275
276/* Card Wait For Ready (cannot be used during an interrupt) */ 276/* Card Wait For Ready (cannot be used during an interrupt) */
277static inline int 277static int
278card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[]) 278card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
279{ 279{
280 unsigned char a; 280 unsigned char a;
@@ -354,7 +354,7 @@ card_send_command(const int ioaddr[], const char* name,
354static const int Sb1000TimeOutJiffies = 7 * HZ; 354static const int Sb1000TimeOutJiffies = 7 * HZ;
355 355
356/* Card Wait For Ready (to be used during frame rx) */ 356/* Card Wait For Ready (to be used during frame rx) */
357static inline int 357static int
358sb1000_wait_for_ready(const int ioaddr[], const char* name) 358sb1000_wait_for_ready(const int ioaddr[], const char* name)
359{ 359{
360 unsigned long timeout; 360 unsigned long timeout;
@@ -380,7 +380,7 @@ sb1000_wait_for_ready(const int ioaddr[], const char* name)
380} 380}
381 381
382/* Card Wait For Ready Clear (to be used during frame rx) */ 382/* Card Wait For Ready Clear (to be used during frame rx) */
383static inline int 383static int
384sb1000_wait_for_ready_clear(const int ioaddr[], const char* name) 384sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
385{ 385{
386 unsigned long timeout; 386 unsigned long timeout;
@@ -405,7 +405,7 @@ sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
405} 405}
406 406
407/* Card Send Command (to be used during frame rx) */ 407/* Card Send Command (to be used during frame rx) */
408static inline void 408static void
409sb1000_send_command(const int ioaddr[], const char* name, 409sb1000_send_command(const int ioaddr[], const char* name,
410 const unsigned char out[]) 410 const unsigned char out[])
411{ 411{
@@ -422,7 +422,7 @@ sb1000_send_command(const int ioaddr[], const char* name,
422} 422}
423 423
424/* Card Read Status (to be used during frame rx) */ 424/* Card Read Status (to be used during frame rx) */
425static inline void 425static void
426sb1000_read_status(const int ioaddr[], unsigned char in[]) 426sb1000_read_status(const int ioaddr[], unsigned char in[])
427{ 427{
428 in[1] = inb(ioaddr[0] + 1); 428 in[1] = inb(ioaddr[0] + 1);
@@ -434,10 +434,10 @@ sb1000_read_status(const int ioaddr[], unsigned char in[])
434} 434}
435 435
436/* Issue Read Command (to be used during frame rx) */ 436/* Issue Read Command (to be used during frame rx) */
437static inline void 437static void
438sb1000_issue_read_command(const int ioaddr[], const char* name) 438sb1000_issue_read_command(const int ioaddr[], const char* name)
439{ 439{
440 const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00}; 440 static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00};
441 441
442 sb1000_wait_for_ready_clear(ioaddr, name); 442 sb1000_wait_for_ready_clear(ioaddr, name);
443 outb(0xa0, ioaddr[0] + 6); 443 outb(0xa0, ioaddr[0] + 6);
@@ -450,12 +450,13 @@ sb1000_issue_read_command(const int ioaddr[], const char* name)
450 * SB1000 commands for open/configuration 450 * SB1000 commands for open/configuration
451 */ 451 */
452/* reset SB1000 card */ 452/* reset SB1000 card */
453static inline int 453static int
454sb1000_reset(const int ioaddr[], const char* name) 454sb1000_reset(const int ioaddr[], const char* name)
455{ 455{
456 static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
457
456 unsigned char st[7]; 458 unsigned char st[7];
457 int port, status; 459 int port, status;
458 const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
459 460
460 port = ioaddr[1] + 6; 461 port = ioaddr[1] + 6;
461 outb(0x4, port); 462 outb(0x4, port);
@@ -479,12 +480,13 @@ sb1000_reset(const int ioaddr[], const char* name)
479} 480}
480 481
481/* check SB1000 firmware CRC */ 482/* check SB1000 firmware CRC */
482static inline int 483static int
483sb1000_check_CRC(const int ioaddr[], const char* name) 484sb1000_check_CRC(const int ioaddr[], const char* name)
484{ 485{
486 static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
487
485 unsigned char st[7]; 488 unsigned char st[7];
486 int crc, status; 489 int crc, status;
487 const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
488 490
489 /* check CRC */ 491 /* check CRC */
490 if ((status = card_send_command(ioaddr, name, Command0, st))) 492 if ((status = card_send_command(ioaddr, name, Command0, st)))
@@ -498,32 +500,35 @@ sb1000_check_CRC(const int ioaddr[], const char* name)
498static inline int 500static inline int
499sb1000_start_get_set_command(const int ioaddr[], const char* name) 501sb1000_start_get_set_command(const int ioaddr[], const char* name)
500{ 502{
503 static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
504
501 unsigned char st[7]; 505 unsigned char st[7];
502 const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
503 506
504 return card_send_command(ioaddr, name, Command0, st); 507 return card_send_command(ioaddr, name, Command0, st);
505} 508}
506 509
507static inline int 510static int
508sb1000_end_get_set_command(const int ioaddr[], const char* name) 511sb1000_end_get_set_command(const int ioaddr[], const char* name)
509{ 512{
513 static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
514 static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
515
510 unsigned char st[7]; 516 unsigned char st[7];
511 int status; 517 int status;
512 const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
513 const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
514 518
515 if ((status = card_send_command(ioaddr, name, Command0, st))) 519 if ((status = card_send_command(ioaddr, name, Command0, st)))
516 return status; 520 return status;
517 return card_send_command(ioaddr, name, Command1, st); 521 return card_send_command(ioaddr, name, Command1, st);
518} 522}
519 523
520static inline int 524static int
521sb1000_activate(const int ioaddr[], const char* name) 525sb1000_activate(const int ioaddr[], const char* name)
522{ 526{
527 static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
528 static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
529
523 unsigned char st[7]; 530 unsigned char st[7];
524 int status; 531 int status;
525 const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
526 const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
527 532
528 ssleep(1); 533 ssleep(1);
529 if ((status = card_send_command(ioaddr, name, Command0, st))) 534 if ((status = card_send_command(ioaddr, name, Command0, st)))
@@ -544,9 +549,10 @@ static int
544sb1000_get_firmware_version(const int ioaddr[], const char* name, 549sb1000_get_firmware_version(const int ioaddr[], const char* name,
545 unsigned char version[], int do_end) 550 unsigned char version[], int do_end)
546{ 551{
552 static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
553
547 unsigned char st[7]; 554 unsigned char st[7];
548 int status; 555 int status;
549 const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
550 556
551 if ((status = sb1000_start_get_set_command(ioaddr, name))) 557 if ((status = sb1000_start_get_set_command(ioaddr, name)))
552 return status; 558 return status;
@@ -566,9 +572,10 @@ sb1000_get_firmware_version(const int ioaddr[], const char* name,
566static int 572static int
567sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency) 573sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency)
568{ 574{
575 static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
576
569 unsigned char st[7]; 577 unsigned char st[7];
570 int status; 578 int status;
571 const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
572 579
573 udelay(1000); 580 udelay(1000);
574 if ((status = sb1000_start_get_set_command(ioaddr, name))) 581 if ((status = sb1000_start_get_set_command(ioaddr, name)))
@@ -613,12 +620,13 @@ sb1000_set_frequency(const int ioaddr[], const char* name, int frequency)
613static int 620static int
614sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]) 621sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
615{ 622{
623 static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
624 static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
625 static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
626 static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
627
616 unsigned char st[7]; 628 unsigned char st[7];
617 int status; 629 int status;
618 const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
619 const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
620 const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
621 const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
622 630
623 udelay(1000); 631 udelay(1000);
624 if ((status = sb1000_start_get_set_command(ioaddr, name))) 632 if ((status = sb1000_start_get_set_command(ioaddr, name)))
@@ -647,6 +655,8 @@ sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
647static int 655static int
648sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) 656sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
649{ 657{
658 static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
659
650 unsigned char st[7]; 660 unsigned char st[7];
651 short p; 661 short p;
652 int status; 662 int status;
@@ -654,7 +664,6 @@ sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
654 unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00}; 664 unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00};
655 unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00}; 665 unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00};
656 unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00}; 666 unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00};
657 const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
658 667
659 udelay(1000); 668 udelay(1000);
660 if ((status = sb1000_start_get_set_command(ioaddr, name))) 669 if ((status = sb1000_start_get_set_command(ioaddr, name)))
@@ -694,7 +703,7 @@ sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
694} 703}
695 704
696 705
697static inline void 706static void
698sb1000_print_status_buffer(const char* name, unsigned char st[], 707sb1000_print_status_buffer(const char* name, unsigned char st[],
699 unsigned char buffer[], int size) 708 unsigned char buffer[], int size)
700{ 709{
@@ -725,7 +734,7 @@ sb1000_print_status_buffer(const char* name, unsigned char st[],
725/* receive a single frame and assemble datagram 734/* receive a single frame and assemble datagram
726 * (this is the heart of the interrupt routine) 735 * (this is the heart of the interrupt routine)
727 */ 736 */
728static inline int 737static int
729sb1000_rx(struct net_device *dev) 738sb1000_rx(struct net_device *dev)
730{ 739{
731 740
@@ -888,14 +897,15 @@ dropped_frame:
888 return -1; 897 return -1;
889} 898}
890 899
891static inline void 900static void
892sb1000_error_dpc(struct net_device *dev) 901sb1000_error_dpc(struct net_device *dev)
893{ 902{
903 static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
904
894 char *name; 905 char *name;
895 unsigned char st[5]; 906 unsigned char st[5];
896 int ioaddr[2]; 907 int ioaddr[2];
897 struct sb1000_private *lp = netdev_priv(dev); 908 struct sb1000_private *lp = netdev_priv(dev);
898 const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
899 const int ErrorDpcCounterInitialize = 200; 909 const int ErrorDpcCounterInitialize = 200;
900 910
901 ioaddr[0] = dev->base_addr; 911 ioaddr[0] = dev->base_addr;
@@ -1077,14 +1087,15 @@ sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1077/* SB1000 interrupt handler. */ 1087/* SB1000 interrupt handler. */
1078static irqreturn_t sb1000_interrupt(int irq, void *dev_id) 1088static irqreturn_t sb1000_interrupt(int irq, void *dev_id)
1079{ 1089{
1090 static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
1091 static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
1092
1080 char *name; 1093 char *name;
1081 unsigned char st; 1094 unsigned char st;
1082 int ioaddr[2]; 1095 int ioaddr[2];
1083 struct net_device *dev = dev_id; 1096 struct net_device *dev = dev_id;
1084 struct sb1000_private *lp = netdev_priv(dev); 1097 struct sb1000_private *lp = netdev_priv(dev);
1085 1098
1086 const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
1087 const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
1088 const int MaxRxErrorCount = 6; 1099 const int MaxRxErrorCount = 6;
1089 1100
1090 ioaddr[0] = dev->base_addr; 1101 ioaddr[0] = dev->base_addr;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 7b53d658e337..888b7dec9866 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2374,7 +2374,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2374 dev->name, base, print_mac(mac, eaddr)); 2374 dev->name, base, print_mac(mac, eaddr));
2375 2375
2376 sc->mii_bus.name = sbmac_mdio_string; 2376 sc->mii_bus.name = sbmac_mdio_string;
2377 sc->mii_bus.id = idx; 2377 snprintf(sc->mii_bus.id, MII_BUS_ID_SIZE, "%x", idx);
2378 sc->mii_bus.priv = sc; 2378 sc->mii_bus.priv = sc;
2379 sc->mii_bus.read = sbmac_mii_read; 2379 sc->mii_bus.read = sbmac_mii_read;
2380 sc->mii_bus.write = sbmac_mii_write; 2380 sc->mii_bus.write = sbmac_mii_write;
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 15fcee55284e..f64a860029b7 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -311,7 +311,6 @@ struct sc92031_priv {
311 311
312 /* for dev->get_stats */ 312 /* for dev->get_stats */
313 long rx_value; 313 long rx_value;
314 struct net_device_stats stats;
315}; 314};
316 315
317/* I don't know which registers can be safely read; however, I can guess 316/* I don't know which registers can be safely read; however, I can guess
@@ -421,7 +420,7 @@ static void _sc92031_tx_clear(struct net_device *dev)
421 420
422 while (priv->tx_head - priv->tx_tail > 0) { 421 while (priv->tx_head - priv->tx_tail > 0) {
423 priv->tx_tail++; 422 priv->tx_tail++;
424 priv->stats.tx_dropped++; 423 dev->stats.tx_dropped++;
425 } 424 }
426 priv->tx_head = priv->tx_tail = 0; 425 priv->tx_head = priv->tx_tail = 0;
427} 426}
@@ -676,27 +675,27 @@ static void _sc92031_tx_tasklet(struct net_device *dev)
676 priv->tx_tail++; 675 priv->tx_tail++;
677 676
678 if (tx_status & TxStatOK) { 677 if (tx_status & TxStatOK) {
679 priv->stats.tx_bytes += tx_status & 0x1fff; 678 dev->stats.tx_bytes += tx_status & 0x1fff;
680 priv->stats.tx_packets++; 679 dev->stats.tx_packets++;
681 /* Note: TxCarrierLost is always asserted at 100mbps. */ 680 /* Note: TxCarrierLost is always asserted at 100mbps. */
682 priv->stats.collisions += (tx_status >> 22) & 0xf; 681 dev->stats.collisions += (tx_status >> 22) & 0xf;
683 } 682 }
684 683
685 if (tx_status & (TxOutOfWindow | TxAborted)) { 684 if (tx_status & (TxOutOfWindow | TxAborted)) {
686 priv->stats.tx_errors++; 685 dev->stats.tx_errors++;
687 686
688 if (tx_status & TxAborted) 687 if (tx_status & TxAborted)
689 priv->stats.tx_aborted_errors++; 688 dev->stats.tx_aborted_errors++;
690 689
691 if (tx_status & TxCarrierLost) 690 if (tx_status & TxCarrierLost)
692 priv->stats.tx_carrier_errors++; 691 dev->stats.tx_carrier_errors++;
693 692
694 if (tx_status & TxOutOfWindow) 693 if (tx_status & TxOutOfWindow)
695 priv->stats.tx_window_errors++; 694 dev->stats.tx_window_errors++;
696 } 695 }
697 696
698 if (tx_status & TxUnderrun) 697 if (tx_status & TxUnderrun)
699 priv->stats.tx_fifo_errors++; 698 dev->stats.tx_fifo_errors++;
700 } 699 }
701 700
702 if (priv->tx_tail != old_tx_tail) 701 if (priv->tx_tail != old_tx_tail)
@@ -704,27 +703,29 @@ static void _sc92031_tx_tasklet(struct net_device *dev)
704 netif_wake_queue(dev); 703 netif_wake_queue(dev);
705} 704}
706 705
707static void _sc92031_rx_tasklet_error(u32 rx_status, 706static void _sc92031_rx_tasklet_error(struct net_device *dev,
708 struct sc92031_priv *priv, unsigned rx_size) 707 u32 rx_status, unsigned rx_size)
709{ 708{
710 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { 709 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
711 priv->stats.rx_errors++; 710 dev->stats.rx_errors++;
712 priv->stats.rx_length_errors++; 711 dev->stats.rx_length_errors++;
713 } 712 }
714 713
715 if (!(rx_status & RxStatesOK)) { 714 if (!(rx_status & RxStatesOK)) {
716 priv->stats.rx_errors++; 715 dev->stats.rx_errors++;
717 716
718 if (rx_status & (RxHugeFrame | RxSmallFrame)) 717 if (rx_status & (RxHugeFrame | RxSmallFrame))
719 priv->stats.rx_length_errors++; 718 dev->stats.rx_length_errors++;
720 719
721 if (rx_status & RxBadAlign) 720 if (rx_status & RxBadAlign)
722 priv->stats.rx_frame_errors++; 721 dev->stats.rx_frame_errors++;
723 722
724 if (!(rx_status & RxCRCOK)) 723 if (!(rx_status & RxCRCOK))
725 priv->stats.rx_crc_errors++; 724 dev->stats.rx_crc_errors++;
726 } else 725 } else {
726 struct sc92031_priv *priv = netdev_priv(dev);
727 priv->rx_loss++; 727 priv->rx_loss++;
728 }
728} 729}
729 730
730static void _sc92031_rx_tasklet(struct net_device *dev) 731static void _sc92031_rx_tasklet(struct net_device *dev)
@@ -783,7 +784,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
783 || rx_size > (MAX_ETH_FRAME_SIZE + 4) 784 || rx_size > (MAX_ETH_FRAME_SIZE + 4)
784 || rx_size < 16 785 || rx_size < 16
785 || !(rx_status & RxStatesOK))) { 786 || !(rx_status & RxStatesOK))) {
786 _sc92031_rx_tasklet_error(rx_status, priv, rx_size); 787 _sc92031_rx_tasklet_error(dev, rx_status, rx_size);
787 break; 788 break;
788 } 789 }
789 790
@@ -795,7 +796,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
795 796
796 rx_len -= rx_size_align + 4; 797 rx_len -= rx_size_align + 4;
797 798
798 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 799 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
799 if (unlikely(!skb)) { 800 if (unlikely(!skb)) {
800 if (printk_ratelimit()) 801 if (printk_ratelimit())
801 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", 802 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
@@ -818,11 +819,11 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
818 dev->last_rx = jiffies; 819 dev->last_rx = jiffies;
819 netif_rx(skb); 820 netif_rx(skb);
820 821
821 priv->stats.rx_bytes += pkt_size; 822 dev->stats.rx_bytes += pkt_size;
822 priv->stats.rx_packets++; 823 dev->stats.rx_packets++;
823 824
824 if (rx_status & Rx_Multicast) 825 if (rx_status & Rx_Multicast)
825 priv->stats.multicast++; 826 dev->stats.multicast++;
826 827
827 next: 828 next:
828 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; 829 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
@@ -835,13 +836,11 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
835 836
836static void _sc92031_link_tasklet(struct net_device *dev) 837static void _sc92031_link_tasklet(struct net_device *dev)
837{ 838{
838 struct sc92031_priv *priv = netdev_priv(dev);
839
840 if (_sc92031_check_media(dev)) 839 if (_sc92031_check_media(dev))
841 netif_wake_queue(dev); 840 netif_wake_queue(dev);
842 else { 841 else {
843 netif_stop_queue(dev); 842 netif_stop_queue(dev);
844 priv->stats.tx_carrier_errors++; 843 dev->stats.tx_carrier_errors++;
845 } 844 }
846} 845}
847 846
@@ -866,11 +865,11 @@ static void sc92031_tasklet(unsigned long data)
866 _sc92031_rx_tasklet(dev); 865 _sc92031_rx_tasklet(dev);
867 866
868 if (intr_status & RxOverflow) 867 if (intr_status & RxOverflow)
869 priv->stats.rx_errors++; 868 dev->stats.rx_errors++;
870 869
871 if (intr_status & TimeOut) { 870 if (intr_status & TimeOut) {
872 priv->stats.rx_errors++; 871 dev->stats.rx_errors++;
873 priv->stats.rx_length_errors++; 872 dev->stats.rx_length_errors++;
874 } 873 }
875 874
876 if (intr_status & (LinkFail | LinkOK)) 875 if (intr_status & (LinkFail | LinkOK))
@@ -936,38 +935,36 @@ static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
936 935
937 if (temp == 0xffff) { 936 if (temp == 0xffff) {
938 priv->rx_value += temp; 937 priv->rx_value += temp;
939 priv->stats.rx_fifo_errors = priv->rx_value; 938 dev->stats.rx_fifo_errors = priv->rx_value;
940 } else { 939 } else
941 priv->stats.rx_fifo_errors = temp + priv->rx_value; 940 dev->stats.rx_fifo_errors = temp + priv->rx_value;
942 }
943 941
944 spin_unlock_bh(&priv->lock); 942 spin_unlock_bh(&priv->lock);
945 } 943 }
946 944
947 return &priv->stats; 945 return &dev->stats;
948} 946}
949 947
950static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) 948static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
951{ 949{
952 int err = 0;
953 struct sc92031_priv *priv = netdev_priv(dev); 950 struct sc92031_priv *priv = netdev_priv(dev);
954 void __iomem *port_base = priv->port_base; 951 void __iomem *port_base = priv->port_base;
955
956 unsigned len; 952 unsigned len;
957 unsigned entry; 953 unsigned entry;
958 u32 tx_status; 954 u32 tx_status;
959 955
956 if (skb_padto(skb, ETH_ZLEN))
957 return NETDEV_TX_OK;
958
960 if (unlikely(skb->len > TX_BUF_SIZE)) { 959 if (unlikely(skb->len > TX_BUF_SIZE)) {
961 err = -EMSGSIZE; 960 dev->stats.tx_dropped++;
962 priv->stats.tx_dropped++;
963 goto out; 961 goto out;
964 } 962 }
965 963
966 spin_lock(&priv->lock); 964 spin_lock(&priv->lock);
967 965
968 if (unlikely(!netif_carrier_ok(dev))) { 966 if (unlikely(!netif_carrier_ok(dev))) {
969 err = -ENOLINK; 967 dev->stats.tx_dropped++;
970 priv->stats.tx_dropped++;
971 goto out_unlock; 968 goto out_unlock;
972 } 969 }
973 970
@@ -978,11 +975,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
978 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 975 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
979 976
980 len = skb->len; 977 len = skb->len;
981 if (unlikely(len < ETH_ZLEN)) {
982 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
983 0, ETH_ZLEN - len);
984 len = ETH_ZLEN;
985 }
986 978
987 wmb(); 979 wmb();
988 980
@@ -1009,7 +1001,7 @@ out_unlock:
1009out: 1001out:
1010 dev_kfree_skb(skb); 1002 dev_kfree_skb(skb);
1011 1003
1012 return err; 1004 return NETDEV_TX_OK;
1013} 1005}
1014 1006
1015static int sc92031_open(struct net_device *dev) 1007static int sc92031_open(struct net_device *dev)
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index bccae7e5c6ad..477671606273 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1399,6 +1399,8 @@ spider_net_link_reset(struct net_device *netdev)
1399 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); 1399 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1400 1400
1401 /* reset phy and setup aneg */ 1401 /* reset phy and setup aneg */
1402 card->aneg_count = 0;
1403 card->medium = BCM54XX_COPPER;
1402 spider_net_setup_aneg(card); 1404 spider_net_setup_aneg(card);
1403 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1405 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1404 1406
@@ -1413,18 +1415,12 @@ spider_net_link_reset(struct net_device *netdev)
1413 * found when an interrupt is presented 1415 * found when an interrupt is presented
1414 */ 1416 */
1415static void 1417static void
1416spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) 1418spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1419 u32 error_reg1, u32 error_reg2)
1417{ 1420{
1418 u32 error_reg1, error_reg2;
1419 u32 i; 1421 u32 i;
1420 int show_error = 1; 1422 int show_error = 1;
1421 1423
1422 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1423 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1424
1425 error_reg1 &= SPIDER_NET_INT1_MASK_VALUE;
1426 error_reg2 &= SPIDER_NET_INT2_MASK_VALUE;
1427
1428 /* check GHIINT0STS ************************************/ 1424 /* check GHIINT0STS ************************************/
1429 if (status_reg) 1425 if (status_reg)
1430 for (i = 0; i < 32; i++) 1426 for (i = 0; i < 32; i++)
@@ -1654,12 +1650,15 @@ spider_net_interrupt(int irq, void *ptr)
1654{ 1650{
1655 struct net_device *netdev = ptr; 1651 struct net_device *netdev = ptr;
1656 struct spider_net_card *card = netdev_priv(netdev); 1652 struct spider_net_card *card = netdev_priv(netdev);
1657 u32 status_reg; 1653 u32 status_reg, error_reg1, error_reg2;
1658 1654
1659 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); 1655 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1660 status_reg &= SPIDER_NET_INT0_MASK_VALUE; 1656 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1657 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1661 1658
1662 if (!status_reg) 1659 if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
1660 !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
1661 !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
1663 return IRQ_NONE; 1662 return IRQ_NONE;
1664 1663
1665 if (status_reg & SPIDER_NET_RXINT ) { 1664 if (status_reg & SPIDER_NET_RXINT ) {
@@ -1674,7 +1673,8 @@ spider_net_interrupt(int irq, void *ptr)
1674 spider_net_link_reset(netdev); 1673 spider_net_link_reset(netdev);
1675 1674
1676 if (status_reg & SPIDER_NET_ERRINT ) 1675 if (status_reg & SPIDER_NET_ERRINT )
1677 spider_net_handle_error_irq(card, status_reg); 1676 spider_net_handle_error_irq(card, status_reg,
1677 error_reg1, error_reg2);
1678 1678
1679 /* clear interrupt sources */ 1679 /* clear interrupt sources */
1680 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1680 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1982,6 +1982,8 @@ spider_net_open(struct net_device *netdev)
1982 goto init_firmware_failed; 1982 goto init_firmware_failed;
1983 1983
1984 /* start probing with copper */ 1984 /* start probing with copper */
1985 card->aneg_count = 0;
1986 card->medium = BCM54XX_COPPER;
1985 spider_net_setup_aneg(card); 1987 spider_net_setup_aneg(card);
1986 if (card->phy.def->phy_id) 1988 if (card->phy.def->phy_id)
1987 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1989 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
@@ -2043,7 +2045,8 @@ static void spider_net_link_phy(unsigned long data)
2043 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ 2045 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
2044 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { 2046 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
2045 2047
2046 pr_info("%s: link is down trying to bring it up\n", card->netdev->name); 2048 pr_debug("%s: link is down trying to bring it up\n",
2049 card->netdev->name);
2047 2050
2048 switch (card->medium) { 2051 switch (card->medium) {
2049 case BCM54XX_COPPER: 2052 case BCM54XX_COPPER:
@@ -2094,9 +2097,10 @@ static void spider_net_link_phy(unsigned long data)
2094 2097
2095 card->aneg_count = 0; 2098 card->aneg_count = 0;
2096 2099
2097 pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n", 2100 pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
2098 phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half", 2101 card->netdev->name, phy->speed,
2099 phy->autoneg==1 ? "" : "no "); 2102 phy->duplex == 1 ? "Full" : "Half",
2103 phy->autoneg == 1 ? "" : "no ");
2100 2104
2101 return; 2105 return;
2102} 2106}
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index e1d05c0f47eb..05f74cbdd617 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -52,7 +52,7 @@ extern char spider_net_driver_name[];
52 52
53#define SPIDER_NET_TX_TIMER (HZ/5) 53#define SPIDER_NET_TX_TIMER (HZ/5)
54#define SPIDER_NET_ANEG_TIMER (HZ) 54#define SPIDER_NET_ANEG_TIMER (HZ)
55#define SPIDER_NET_ANEG_TIMEOUT 2 55#define SPIDER_NET_ANEG_TIMEOUT 5
56 56
57#define SPIDER_NET_RX_CSUM_DEFAULT 1 57#define SPIDER_NET_RX_CSUM_DEFAULT 1
58 58
@@ -159,9 +159,8 @@ extern char spider_net_driver_name[];
159 159
160/** interrupt mask registers */ 160/** interrupt mask registers */
161#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7 161#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
162#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7 162#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
163/* no MAC aborts -> auto retransmission */ 163#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
164#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
165 164
166/* we rely on flagged descriptor interrupts */ 165/* we rely on flagged descriptor interrupts */
167#define SPIDER_NET_FRAMENUM_VALUE 0x00000000 166#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 370d329d15d9..10e4e85da3fc 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -23,9 +23,9 @@
23 */ 23 */
24 24
25#ifdef TC35815_NAPI 25#ifdef TC35815_NAPI
26#define DRV_VERSION "1.36-NAPI" 26#define DRV_VERSION "1.37-NAPI"
27#else 27#else
28#define DRV_VERSION "1.36" 28#define DRV_VERSION "1.37"
29#endif 29#endif
30static const char *version = "tc35815.c:v" DRV_VERSION "\n"; 30static const char *version = "tc35815.c:v" DRV_VERSION "\n";
31#define MODNAME "tc35815" 31#define MODNAME "tc35815"
@@ -47,8 +47,8 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
47#include <linux/skbuff.h> 47#include <linux/skbuff.h>
48#include <linux/delay.h> 48#include <linux/delay.h>
49#include <linux/pci.h> 49#include <linux/pci.h>
50#include <linux/mii.h> 50#include <linux/phy.h>
51#include <linux/ethtool.h> 51#include <linux/workqueue.h>
52#include <linux/platform_device.h> 52#include <linux/platform_device.h>
53#include <asm/io.h> 53#include <asm/io.h>
54#include <asm/byteorder.h> 54#include <asm/byteorder.h>
@@ -60,16 +60,16 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
60#define WORKAROUND_100HALF_PROMISC 60#define WORKAROUND_100HALF_PROMISC
61/* #define TC35815_USE_PACKEDBUFFER */ 61/* #define TC35815_USE_PACKEDBUFFER */
62 62
63typedef enum { 63enum tc35815_chiptype {
64 TC35815CF = 0, 64 TC35815CF = 0,
65 TC35815_NWU, 65 TC35815_NWU,
66 TC35815_TX4939, 66 TC35815_TX4939,
67} board_t; 67};
68 68
69/* indexed by board_t, above */ 69/* indexed by tc35815_chiptype, above */
70static const struct { 70static const struct {
71 const char *name; 71 const char *name;
72} board_info[] __devinitdata = { 72} chip_info[] __devinitdata = {
73 { "TOSHIBA TC35815CF 10/100BaseTX" }, 73 { "TOSHIBA TC35815CF 10/100BaseTX" },
74 { "TOSHIBA TC35815 with Wake on LAN" }, 74 { "TOSHIBA TC35815 with Wake on LAN" },
75 { "TOSHIBA TC35815/TX4939" }, 75 { "TOSHIBA TC35815/TX4939" },
@@ -81,209 +81,208 @@ static const struct pci_device_id tc35815_pci_tbl[] = {
81 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, 81 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
82 {0,} 82 {0,}
83}; 83};
84MODULE_DEVICE_TABLE (pci, tc35815_pci_tbl); 84MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
85 85
86/* see MODULE_PARM_DESC */ 86/* see MODULE_PARM_DESC */
87static struct tc35815_options { 87static struct tc35815_options {
88 int speed; 88 int speed;
89 int duplex; 89 int duplex;
90 int doforce;
91} options; 90} options;
92 91
93/* 92/*
94 * Registers 93 * Registers
95 */ 94 */
96struct tc35815_regs { 95struct tc35815_regs {
97 volatile __u32 DMA_Ctl; /* 0x00 */ 96 __u32 DMA_Ctl; /* 0x00 */
98 volatile __u32 TxFrmPtr; 97 __u32 TxFrmPtr;
99 volatile __u32 TxThrsh; 98 __u32 TxThrsh;
100 volatile __u32 TxPollCtr; 99 __u32 TxPollCtr;
101 volatile __u32 BLFrmPtr; 100 __u32 BLFrmPtr;
102 volatile __u32 RxFragSize; 101 __u32 RxFragSize;
103 volatile __u32 Int_En; 102 __u32 Int_En;
104 volatile __u32 FDA_Bas; 103 __u32 FDA_Bas;
105 volatile __u32 FDA_Lim; /* 0x20 */ 104 __u32 FDA_Lim; /* 0x20 */
106 volatile __u32 Int_Src; 105 __u32 Int_Src;
107 volatile __u32 unused0[2]; 106 __u32 unused0[2];
108 volatile __u32 PauseCnt; 107 __u32 PauseCnt;
109 volatile __u32 RemPauCnt; 108 __u32 RemPauCnt;
110 volatile __u32 TxCtlFrmStat; 109 __u32 TxCtlFrmStat;
111 volatile __u32 unused1; 110 __u32 unused1;
112 volatile __u32 MAC_Ctl; /* 0x40 */ 111 __u32 MAC_Ctl; /* 0x40 */
113 volatile __u32 CAM_Ctl; 112 __u32 CAM_Ctl;
114 volatile __u32 Tx_Ctl; 113 __u32 Tx_Ctl;
115 volatile __u32 Tx_Stat; 114 __u32 Tx_Stat;
116 volatile __u32 Rx_Ctl; 115 __u32 Rx_Ctl;
117 volatile __u32 Rx_Stat; 116 __u32 Rx_Stat;
118 volatile __u32 MD_Data; 117 __u32 MD_Data;
119 volatile __u32 MD_CA; 118 __u32 MD_CA;
120 volatile __u32 CAM_Adr; /* 0x60 */ 119 __u32 CAM_Adr; /* 0x60 */
121 volatile __u32 CAM_Data; 120 __u32 CAM_Data;
122 volatile __u32 CAM_Ena; 121 __u32 CAM_Ena;
123 volatile __u32 PROM_Ctl; 122 __u32 PROM_Ctl;
124 volatile __u32 PROM_Data; 123 __u32 PROM_Data;
125 volatile __u32 Algn_Cnt; 124 __u32 Algn_Cnt;
126 volatile __u32 CRC_Cnt; 125 __u32 CRC_Cnt;
127 volatile __u32 Miss_Cnt; 126 __u32 Miss_Cnt;
128}; 127};
129 128
130/* 129/*
131 * Bit assignments 130 * Bit assignments
132 */ 131 */
133/* DMA_Ctl bit asign ------------------------------------------------------- */ 132/* DMA_Ctl bit asign ------------------------------------------------------- */
134#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ 133#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */
135#define DMA_RxAlign_1 0x00400000 134#define DMA_RxAlign_1 0x00400000
136#define DMA_RxAlign_2 0x00800000 135#define DMA_RxAlign_2 0x00800000
137#define DMA_RxAlign_3 0x00c00000 136#define DMA_RxAlign_3 0x00c00000
138#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ 137#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */
139#define DMA_IntMask 0x00040000 /* 1:Interupt mask */ 138#define DMA_IntMask 0x00040000 /* 1:Interupt mask */
140#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ 139#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */
141#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ 140#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */
142#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ 141#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */
143#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ 142#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */
144#define DMA_TestMode 0x00002000 /* 1:Test Mode */ 143#define DMA_TestMode 0x00002000 /* 1:Test Mode */
145#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ 144#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */
146#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ 145#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */
147 146
148/* RxFragSize bit asign ---------------------------------------------------- */ 147/* RxFragSize bit asign ---------------------------------------------------- */
149#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ 148#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */
150#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ 149#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */
151 150
152/* MAC_Ctl bit asign ------------------------------------------------------- */ 151/* MAC_Ctl bit asign ------------------------------------------------------- */
153#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ 152#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */
154#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ 153#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */
155#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ 154#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */
156#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ 155#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */
157#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ 156#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */
158#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ 157#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/
159#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ 158#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */
160#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ 159#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */
161#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ 160#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */
162#define MAC_Reset 0x00000004 /* 1:Software Reset */ 161#define MAC_Reset 0x00000004 /* 1:Software Reset */
163#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ 162#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */
164#define MAC_HaltReq 0x00000001 /* 1:Halt request */ 163#define MAC_HaltReq 0x00000001 /* 1:Halt request */
165 164
166/* PROM_Ctl bit asign ------------------------------------------------------ */ 165/* PROM_Ctl bit asign ------------------------------------------------------ */
167#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ 166#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */
168#define PROM_Read 0x00004000 /*10:Read operation */ 167#define PROM_Read 0x00004000 /*10:Read operation */
169#define PROM_Write 0x00002000 /*01:Write operation */ 168#define PROM_Write 0x00002000 /*01:Write operation */
170#define PROM_Erase 0x00006000 /*11:Erase operation */ 169#define PROM_Erase 0x00006000 /*11:Erase operation */
171 /*00:Enable or Disable Writting, */ 170 /*00:Enable or Disable Writting, */
172 /* as specified in PROM_Addr. */ 171 /* as specified in PROM_Addr. */
173#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ 172#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */
174 /*00xxxx: disable */ 173 /*00xxxx: disable */
175 174
176/* CAM_Ctl bit asign ------------------------------------------------------- */ 175/* CAM_Ctl bit asign ------------------------------------------------------- */
177#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ 176#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */
178#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ 177#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/
179 /* accept other */ 178 /* accept other */
180#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ 179#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */
181#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ 180#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */
182#define CAM_StationAcc 0x00000001 /* 1:unicast accept */ 181#define CAM_StationAcc 0x00000001 /* 1:unicast accept */
183 182
184/* CAM_Ena bit asign ------------------------------------------------------- */ 183/* CAM_Ena bit asign ------------------------------------------------------- */
185#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ 184#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
186#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */ 185#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
187#define CAM_Ena_Bit(index) (1<<(index)) 186#define CAM_Ena_Bit(index) (1 << (index))
188#define CAM_ENTRY_DESTINATION 0 187#define CAM_ENTRY_DESTINATION 0
189#define CAM_ENTRY_SOURCE 1 188#define CAM_ENTRY_SOURCE 1
190#define CAM_ENTRY_MACCTL 20 189#define CAM_ENTRY_MACCTL 20
191 190
192/* Tx_Ctl bit asign -------------------------------------------------------- */ 191/* Tx_Ctl bit asign -------------------------------------------------------- */
193#define Tx_En 0x00000001 /* 1:Transmit enable */ 192#define Tx_En 0x00000001 /* 1:Transmit enable */
194#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */ 193#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */
195#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */ 194#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
196#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */ 195#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */
197#define Tx_FBack 0x00000010 /* 1:Fast Back-off */ 196#define Tx_FBack 0x00000010 /* 1:Fast Back-off */
198#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */ 197#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */
199#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */ 198#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */
200#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */ 199#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */
201#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */ 200#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */
202#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */ 201#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */
203#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */ 202#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */
204#define Tx_EnComp 0x00004000 /* 1:Enable Completion */ 203#define Tx_EnComp 0x00004000 /* 1:Enable Completion */
205 204
206/* Tx_Stat bit asign ------------------------------------------------------- */ 205/* Tx_Stat bit asign ------------------------------------------------------- */
207#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */ 206#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */
208#define Tx_ExColl 0x00000010 /* Excessive Collision */ 207#define Tx_ExColl 0x00000010 /* Excessive Collision */
209#define Tx_TXDefer 0x00000020 /* Transmit Defered */ 208#define Tx_TXDefer 0x00000020 /* Transmit Defered */
210#define Tx_Paused 0x00000040 /* Transmit Paused */ 209#define Tx_Paused 0x00000040 /* Transmit Paused */
211#define Tx_IntTx 0x00000080 /* Interrupt on Tx */ 210#define Tx_IntTx 0x00000080 /* Interrupt on Tx */
212#define Tx_Under 0x00000100 /* Underrun */ 211#define Tx_Under 0x00000100 /* Underrun */
213#define Tx_Defer 0x00000200 /* Deferral */ 212#define Tx_Defer 0x00000200 /* Deferral */
214#define Tx_NCarr 0x00000400 /* No Carrier */ 213#define Tx_NCarr 0x00000400 /* No Carrier */
215#define Tx_10Stat 0x00000800 /* 10Mbps Status */ 214#define Tx_10Stat 0x00000800 /* 10Mbps Status */
216#define Tx_LateColl 0x00001000 /* Late Collision */ 215#define Tx_LateColl 0x00001000 /* Late Collision */
217#define Tx_TxPar 0x00002000 /* Tx Parity Error */ 216#define Tx_TxPar 0x00002000 /* Tx Parity Error */
218#define Tx_Comp 0x00004000 /* Completion */ 217#define Tx_Comp 0x00004000 /* Completion */
219#define Tx_Halted 0x00008000 /* Tx Halted */ 218#define Tx_Halted 0x00008000 /* Tx Halted */
220#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */ 219#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */
221 220
222/* Rx_Ctl bit asign -------------------------------------------------------- */ 221/* Rx_Ctl bit asign -------------------------------------------------------- */
223#define Rx_EnGood 0x00004000 /* 1:Enable Good */ 222#define Rx_EnGood 0x00004000 /* 1:Enable Good */
224#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */ 223#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */
225#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */ 224#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */
226#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */ 225#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */
227#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */ 226#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */
228#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */ 227#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */
229#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */ 228#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */
230#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */ 229#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */
231#define Rx_ShortEn 0x00000008 /* 1:Short Enable */ 230#define Rx_ShortEn 0x00000008 /* 1:Short Enable */
232#define Rx_LongEn 0x00000004 /* 1:Long Enable */ 231#define Rx_LongEn 0x00000004 /* 1:Long Enable */
233#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */ 232#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */
234#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */ 233#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */
235 234
236/* Rx_Stat bit asign ------------------------------------------------------- */ 235/* Rx_Stat bit asign ------------------------------------------------------- */
237#define Rx_Halted 0x00008000 /* Rx Halted */ 236#define Rx_Halted 0x00008000 /* Rx Halted */
238#define Rx_Good 0x00004000 /* Rx Good */ 237#define Rx_Good 0x00004000 /* Rx Good */
239#define Rx_RxPar 0x00002000 /* Rx Parity Error */ 238#define Rx_RxPar 0x00002000 /* Rx Parity Error */
240 /* 0x00001000 not use */ 239 /* 0x00001000 not use */
241#define Rx_LongErr 0x00000800 /* Rx Long Error */ 240#define Rx_LongErr 0x00000800 /* Rx Long Error */
242#define Rx_Over 0x00000400 /* Rx Overflow */ 241#define Rx_Over 0x00000400 /* Rx Overflow */
243#define Rx_CRCErr 0x00000200 /* Rx CRC Error */ 242#define Rx_CRCErr 0x00000200 /* Rx CRC Error */
244#define Rx_Align 0x00000100 /* Rx Alignment Error */ 243#define Rx_Align 0x00000100 /* Rx Alignment Error */
245#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ 244#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */
246#define Rx_IntRx 0x00000040 /* Rx Interrupt */ 245#define Rx_IntRx 0x00000040 /* Rx Interrupt */
247#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ 246#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */
248 247
249#define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */ 248#define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */
250 249
251/* Int_En bit asign -------------------------------------------------------- */ 250/* Int_En bit asign -------------------------------------------------------- */
252#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ 251#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
253#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Control Complete Enable */ 252#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
254#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ 253#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
255#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ 254#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
256#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ 255#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
257#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */ 256#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */
258#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */ 257#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */
259#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */ 258#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */
260#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */ 259#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */
261#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */ 260#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */
262#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */ 261#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */
263#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */ 262#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */
264 /* Exhausted Enable */ 263 /* Exhausted Enable */
265 264
266/* Int_Src bit asign ------------------------------------------------------- */ 265/* Int_Src bit asign ------------------------------------------------------- */
267#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */ 266#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */
268#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */ 267#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */
269#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */ 268#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */
270#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */ 269#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */
271#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */ 270#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */
272#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */ 271#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */
273#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */ 272#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */
274#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */ 273#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */
275#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */ 274#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */
276#define Int_SWInt 0x00000020 /* 1:Software request & Clear */ 275#define Int_SWInt 0x00000020 /* 1:Software request & Clear */
277#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */ 276#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */
278#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */ 277#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */
279#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */ 278#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */
280#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */ 279#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */
281#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */ 280#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */
282 281
283/* MD_CA bit asign --------------------------------------------------------- */ 282/* MD_CA bit asign --------------------------------------------------------- */
284#define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */ 283#define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */
285#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */ 284#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */
286#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */ 285#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */
287 286
288 287
289/* 288/*
@@ -307,24 +306,24 @@ struct BDesc {
307#define FD_ALIGN 16 306#define FD_ALIGN 16
308 307
309/* Frame Descripter bit asign ---------------------------------------------- */ 308/* Frame Descripter bit asign ---------------------------------------------- */
310#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ 309#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
311#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ 310#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
312#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ 311#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
313#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */ 312#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */
314#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */ 313#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */
315#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */ 314#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */
316#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */ 315#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */
317#define FD_FrmOpt_Packing 0x04000000 /* Rx only */ 316#define FD_FrmOpt_Packing 0x04000000 /* Rx only */
318#define FD_CownsFD 0x80000000 /* FD Controller owner bit */ 317#define FD_CownsFD 0x80000000 /* FD Controller owner bit */
319#define FD_Next_EOL 0x00000001 /* FD EOL indicator */ 318#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
320#define FD_BDCnt_SHIFT 16 319#define FD_BDCnt_SHIFT 16
321 320
322/* Buffer Descripter bit asign --------------------------------------------- */ 321/* Buffer Descripter bit asign --------------------------------------------- */
323#define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */ 322#define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */
324#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ 323#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
325#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ 324#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
326#define BD_CownsBD 0x80000000 /* BD Controller owner bit */ 325#define BD_CownsBD 0x80000000 /* BD Controller owner bit */
327#define BD_RxBDID_SHIFT 16 326#define BD_RxBDID_SHIFT 16
328#define BD_RxBDSeqN_SHIFT 24 327#define BD_RxBDSeqN_SHIFT 24
329 328
330 329
@@ -348,13 +347,15 @@ struct BDesc {
348 Int_STargAbtEn | \ 347 Int_STargAbtEn | \
349 Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/ 348 Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/
350#define DMA_CTL_CMD DMA_BURST_SIZE 349#define DMA_CTL_CMD DMA_BURST_SIZE
351#define HAVE_DMA_RXALIGN(lp) likely((lp)->boardtype != TC35815CF) 350#define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF)
352 351
353/* Tuning parameters */ 352/* Tuning parameters */
354#define DMA_BURST_SIZE 32 353#define DMA_BURST_SIZE 32
355#define TX_THRESHOLD 1024 354#define TX_THRESHOLD 1024
356#define TX_THRESHOLD_MAX 1536 /* used threshold with packet max byte for low pci transfer ability.*/ 355/* used threshold with packet max byte for low pci transfer ability.*/
357#define TX_THRESHOLD_KEEP_LIMIT 10 /* setting threshold max value when overrun error occured this count. */ 356#define TX_THRESHOLD_MAX 1536
357/* setting threshold max value when overrun error occured this count. */
358#define TX_THRESHOLD_KEEP_LIMIT 10
358 359
359/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ 360/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
360#ifdef TC35815_USE_PACKEDBUFFER 361#ifdef TC35815_USE_PACKEDBUFFER
@@ -396,21 +397,12 @@ struct FrFD {
396}; 397};
397 398
398 399
399#define tc_readl(addr) readl(addr) 400#define tc_readl(addr) ioread32(addr)
400#define tc_writel(d, addr) writel(d, addr) 401#define tc_writel(d, addr) iowrite32(d, addr)
401 402
402#define TC35815_TX_TIMEOUT msecs_to_jiffies(400) 403#define TC35815_TX_TIMEOUT msecs_to_jiffies(400)
403 404
404/* Timer state engine. */ 405/* Information that need to be kept for each controller. */
405enum tc35815_timer_state {
406 arbwait = 0, /* Waiting for auto negotiation to complete. */
407 lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
408 ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
409 asleep = 3, /* Time inactive. */
410 lcheck = 4, /* Check link status. */
411};
412
413/* Information that need to be kept for each board. */
414struct tc35815_local { 406struct tc35815_local {
415 struct pci_dev *pci_dev; 407 struct pci_dev *pci_dev;
416 408
@@ -418,12 +410,11 @@ struct tc35815_local {
418 struct napi_struct napi; 410 struct napi_struct napi;
419 411
420 /* statistics */ 412 /* statistics */
421 struct net_device_stats stats;
422 struct { 413 struct {
423 int max_tx_qlen; 414 int max_tx_qlen;
424 int tx_ints; 415 int tx_ints;
425 int rx_ints; 416 int rx_ints;
426 int tx_underrun; 417 int tx_underrun;
427 } lstats; 418 } lstats;
428 419
429 /* Tx control lock. This protects the transmit buffer ring 420 /* Tx control lock. This protects the transmit buffer ring
@@ -433,12 +424,12 @@ struct tc35815_local {
433 */ 424 */
434 spinlock_t lock; 425 spinlock_t lock;
435 426
436 int phy_addr; 427 struct mii_bus mii_bus;
437 int fullduplex; 428 struct phy_device *phy_dev;
438 unsigned short saved_lpa; 429 int duplex;
439 struct timer_list timer; 430 int speed;
440 enum tc35815_timer_state timer_state; /* State of auto-neg timer. */ 431 int link;
441 unsigned int timer_ticks; /* Number of clicks at each state */ 432 struct work_struct restart_work;
442 433
443 /* 434 /*
444 * Transmitting: Batch Mode. 435 * Transmitting: Batch Mode.
@@ -452,7 +443,7 @@ struct tc35815_local {
452 * RX_BUF_NUM BD in Free Buffer FD. 443 * RX_BUF_NUM BD in Free Buffer FD.
453 * One Free Buffer BD has ETH_FRAME_LEN data buffer. 444 * One Free Buffer BD has ETH_FRAME_LEN data buffer.
454 */ 445 */
455 void * fd_buf; /* for TxFD, RxFD, FrFD */ 446 void *fd_buf; /* for TxFD, RxFD, FrFD */
456 dma_addr_t fd_buf_dma; 447 dma_addr_t fd_buf_dma;
457 struct TxFD *tfd_base; 448 struct TxFD *tfd_base;
458 unsigned int tfd_start; 449 unsigned int tfd_start;
@@ -463,7 +454,7 @@ struct tc35815_local {
463 struct FrFD *fbl_ptr; 454 struct FrFD *fbl_ptr;
464#ifdef TC35815_USE_PACKEDBUFFER 455#ifdef TC35815_USE_PACKEDBUFFER
465 unsigned char fbl_curid; 456 unsigned char fbl_curid;
466 void * data_buf[RX_BUF_NUM]; /* packing */ 457 void *data_buf[RX_BUF_NUM]; /* packing */
467 dma_addr_t data_buf_dma[RX_BUF_NUM]; 458 dma_addr_t data_buf_dma[RX_BUF_NUM];
468 struct { 459 struct {
469 struct sk_buff *skb; 460 struct sk_buff *skb;
@@ -476,10 +467,8 @@ struct tc35815_local {
476 dma_addr_t skb_dma; 467 dma_addr_t skb_dma;
477 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; 468 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
478#endif 469#endif
479 struct mii_if_info mii;
480 unsigned short mii_id[2];
481 u32 msg_enable; 470 u32 msg_enable;
482 board_t boardtype; 471 enum tc35815_chiptype chiptype;
483}; 472};
484 473
485static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) 474static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
@@ -506,13 +495,14 @@ static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
506} 495}
507 496
508#define TC35815_DMA_SYNC_ONDEMAND 497#define TC35815_DMA_SYNC_ONDEMAND
509static void* alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) 498static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
510{ 499{
511#ifdef TC35815_DMA_SYNC_ONDEMAND 500#ifdef TC35815_DMA_SYNC_ONDEMAND
512 void *buf; 501 void *buf;
513 /* pci_map + pci_dma_sync will be more effective than 502 /* pci_map + pci_dma_sync will be more effective than
514 * pci_alloc_consistent on some archs. */ 503 * pci_alloc_consistent on some archs. */
515 if ((buf = (void *)__get_free_page(GFP_ATOMIC)) == NULL) 504 buf = (void *)__get_free_page(GFP_ATOMIC);
505 if (!buf)
516 return NULL; 506 return NULL;
517 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, 507 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
518 PCI_DMA_FROMDEVICE); 508 PCI_DMA_FROMDEVICE);
@@ -577,7 +567,7 @@ static void tc35815_txdone(struct net_device *dev);
577static int tc35815_close(struct net_device *dev); 567static int tc35815_close(struct net_device *dev);
578static struct net_device_stats *tc35815_get_stats(struct net_device *dev); 568static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
579static void tc35815_set_multicast_list(struct net_device *dev); 569static void tc35815_set_multicast_list(struct net_device *dev);
580static void tc35815_tx_timeout(struct net_device *dev); 570static void tc35815_tx_timeout(struct net_device *dev);
581static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 571static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
582#ifdef CONFIG_NET_POLL_CONTROLLER 572#ifdef CONFIG_NET_POLL_CONTROLLER
583static void tc35815_poll_controller(struct net_device *dev); 573static void tc35815_poll_controller(struct net_device *dev);
@@ -585,21 +575,225 @@ static void tc35815_poll_controller(struct net_device *dev);
585static const struct ethtool_ops tc35815_ethtool_ops; 575static const struct ethtool_ops tc35815_ethtool_ops;
586 576
587/* Example routines you must write ;->. */ 577/* Example routines you must write ;->. */
588static void tc35815_chip_reset(struct net_device *dev); 578static void tc35815_chip_reset(struct net_device *dev);
589static void tc35815_chip_init(struct net_device *dev); 579static void tc35815_chip_init(struct net_device *dev);
590static void tc35815_find_phy(struct net_device *dev);
591static void tc35815_phy_chip_init(struct net_device *dev);
592 580
593#ifdef DEBUG 581#ifdef DEBUG
594static void panic_queues(struct net_device *dev); 582static void panic_queues(struct net_device *dev);
595#endif 583#endif
596 584
597static void tc35815_timer(unsigned long data); 585static void tc35815_restart_work(struct work_struct *work);
598static void tc35815_start_auto_negotiation(struct net_device *dev, 586
599 struct ethtool_cmd *ep); 587static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
600static int tc_mdio_read(struct net_device *dev, int phy_id, int location); 588{
601static void tc_mdio_write(struct net_device *dev, int phy_id, int location, 589 struct net_device *dev = bus->priv;
602 int val); 590 struct tc35815_regs __iomem *tr =
591 (struct tc35815_regs __iomem *)dev->base_addr;
592 unsigned long timeout = jiffies + 10;
593
594 tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
595 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
596 if (time_after(jiffies, timeout))
597 return -EIO;
598 cpu_relax();
599 }
600 return tc_readl(&tr->MD_Data) & 0xffff;
601}
602
603static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
604{
605 struct net_device *dev = bus->priv;
606 struct tc35815_regs __iomem *tr =
607 (struct tc35815_regs __iomem *)dev->base_addr;
608 unsigned long timeout = jiffies + 10;
609
610 tc_writel(val, &tr->MD_Data);
611 tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
612 &tr->MD_CA);
613 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
614 if (time_after(jiffies, timeout))
615 return -EIO;
616 cpu_relax();
617 }
618 return 0;
619}
620
621static void tc_handle_link_change(struct net_device *dev)
622{
623 struct tc35815_local *lp = netdev_priv(dev);
624 struct phy_device *phydev = lp->phy_dev;
625 unsigned long flags;
626 int status_change = 0;
627
628 spin_lock_irqsave(&lp->lock, flags);
629 if (phydev->link &&
630 (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
631 struct tc35815_regs __iomem *tr =
632 (struct tc35815_regs __iomem *)dev->base_addr;
633 u32 reg;
634
635 reg = tc_readl(&tr->MAC_Ctl);
636 reg |= MAC_HaltReq;
637 tc_writel(reg, &tr->MAC_Ctl);
638 if (phydev->duplex == DUPLEX_FULL)
639 reg |= MAC_FullDup;
640 else
641 reg &= ~MAC_FullDup;
642 tc_writel(reg, &tr->MAC_Ctl);
643 reg &= ~MAC_HaltReq;
644 tc_writel(reg, &tr->MAC_Ctl);
645
646 /*
647 * TX4939 PCFG.SPEEDn bit will be changed on
648 * NETDEV_CHANGE event.
649 */
650
651#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
652 /*
653 * WORKAROUND: enable LostCrS only if half duplex
654 * operation.
655 * (TX4939 does not have EnLCarr)
656 */
657 if (phydev->duplex == DUPLEX_HALF &&
658 lp->chiptype != TC35815_TX4939)
659 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
660 &tr->Tx_Ctl);
661#endif
662
663 lp->speed = phydev->speed;
664 lp->duplex = phydev->duplex;
665 status_change = 1;
666 }
667
668 if (phydev->link != lp->link) {
669 if (phydev->link) {
670#ifdef WORKAROUND_100HALF_PROMISC
671 /* delayed promiscuous enabling */
672 if (dev->flags & IFF_PROMISC)
673 tc35815_set_multicast_list(dev);
674#endif
675 netif_schedule(dev);
676 } else {
677 lp->speed = 0;
678 lp->duplex = -1;
679 }
680 lp->link = phydev->link;
681
682 status_change = 1;
683 }
684 spin_unlock_irqrestore(&lp->lock, flags);
685
686 if (status_change && netif_msg_link(lp)) {
687 phy_print_status(phydev);
688#ifdef DEBUG
689 printk(KERN_DEBUG
690 "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
691 dev->name,
692 phy_read(phydev, MII_BMCR),
693 phy_read(phydev, MII_BMSR),
694 phy_read(phydev, MII_LPA));
695#endif
696 }
697}
698
699static int tc_mii_probe(struct net_device *dev)
700{
701 struct tc35815_local *lp = netdev_priv(dev);
702 struct phy_device *phydev = NULL;
703 int phy_addr;
704 u32 dropmask;
705
706 /* find the first phy */
707 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
708 if (lp->mii_bus.phy_map[phy_addr]) {
709 if (phydev) {
710 printk(KERN_ERR "%s: multiple PHYs found\n",
711 dev->name);
712 return -EINVAL;
713 }
714 phydev = lp->mii_bus.phy_map[phy_addr];
715 break;
716 }
717 }
718
719 if (!phydev) {
720 printk(KERN_ERR "%s: no PHY found\n", dev->name);
721 return -ENODEV;
722 }
723
724 /* attach the mac to the phy */
725 phydev = phy_connect(dev, phydev->dev.bus_id,
726 &tc_handle_link_change, 0,
727 lp->chiptype == TC35815_TX4939 ?
728 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
729 if (IS_ERR(phydev)) {
730 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
731 return PTR_ERR(phydev);
732 }
733 printk(KERN_INFO "%s: attached PHY driver [%s] "
734 "(mii_bus:phy_addr=%s, id=%x)\n",
735 dev->name, phydev->drv->name, phydev->dev.bus_id,
736 phydev->phy_id);
737
738 /* mask with MAC supported features */
739 phydev->supported &= PHY_BASIC_FEATURES;
740 dropmask = 0;
741 if (options.speed == 10)
742 dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
743 else if (options.speed == 100)
744 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
745 if (options.duplex == 1)
746 dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
747 else if (options.duplex == 2)
748 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
749 phydev->supported &= ~dropmask;
750 phydev->advertising = phydev->supported;
751
752 lp->link = 0;
753 lp->speed = 0;
754 lp->duplex = -1;
755 lp->phy_dev = phydev;
756
757 return 0;
758}
759
760static int tc_mii_init(struct net_device *dev)
761{
762 struct tc35815_local *lp = netdev_priv(dev);
763 int err;
764 int i;
765
766 lp->mii_bus.name = "tc35815_mii_bus";
767 lp->mii_bus.read = tc_mdio_read;
768 lp->mii_bus.write = tc_mdio_write;
769 snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "%x",
770 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
771 lp->mii_bus.priv = dev;
772 lp->mii_bus.dev = &lp->pci_dev->dev;
773 lp->mii_bus.irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
774 if (!lp->mii_bus.irq) {
775 err = -ENOMEM;
776 goto err_out;
777 }
778
779 for (i = 0; i < PHY_MAX_ADDR; i++)
780 lp->mii_bus.irq[i] = PHY_POLL;
781
782 err = mdiobus_register(&lp->mii_bus);
783 if (err)
784 goto err_out_free_mdio_irq;
785 err = tc_mii_probe(dev);
786 if (err)
787 goto err_out_unregister_bus;
788 return 0;
789
790err_out_unregister_bus:
791 mdiobus_unregister(&lp->mii_bus);
792err_out_free_mdio_irq:
793 kfree(lp->mii_bus.irq);
794err_out:
795 return err;
796}
603 797
604#ifdef CONFIG_CPU_TX49XX 798#ifdef CONFIG_CPU_TX49XX
605/* 799/*
@@ -617,7 +811,7 @@ static int __devinit tc35815_mac_match(struct device *dev, void *data)
617 811
618static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) 812static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
619{ 813{
620 struct tc35815_local *lp = dev->priv; 814 struct tc35815_local *lp = netdev_priv(dev);
621 struct device *pd = bus_find_device(&platform_bus_type, NULL, 815 struct device *pd = bus_find_device(&platform_bus_type, NULL,
622 lp->pci_dev, tc35815_mac_match); 816 lp->pci_dev, tc35815_mac_match);
623 if (pd) { 817 if (pd) {
@@ -635,7 +829,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
635} 829}
636#endif 830#endif
637 831
638static int __devinit tc35815_init_dev_addr (struct net_device *dev) 832static int __devinit tc35815_init_dev_addr(struct net_device *dev)
639{ 833{
640 struct tc35815_regs __iomem *tr = 834 struct tc35815_regs __iomem *tr =
641 (struct tc35815_regs __iomem *)dev->base_addr; 835 (struct tc35815_regs __iomem *)dev->base_addr;
@@ -657,21 +851,21 @@ static int __devinit tc35815_init_dev_addr (struct net_device *dev)
657 return 0; 851 return 0;
658} 852}
659 853
660static int __devinit tc35815_init_one (struct pci_dev *pdev, 854static int __devinit tc35815_init_one(struct pci_dev *pdev,
661 const struct pci_device_id *ent) 855 const struct pci_device_id *ent)
662{ 856{
663 void __iomem *ioaddr = NULL; 857 void __iomem *ioaddr = NULL;
664 struct net_device *dev; 858 struct net_device *dev;
665 struct tc35815_local *lp; 859 struct tc35815_local *lp;
666 int rc; 860 int rc;
667 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; 861 DECLARE_MAC_BUF(mac);
668 862
669 static int printed_version; 863 static int printed_version;
670 if (!printed_version++) { 864 if (!printed_version++) {
671 printk(version); 865 printk(version);
672 dev_printk(KERN_DEBUG, &pdev->dev, 866 dev_printk(KERN_DEBUG, &pdev->dev,
673 "speed:%d duplex:%d doforce:%d\n", 867 "speed:%d duplex:%d\n",
674 options.speed, options.duplex, options.doforce); 868 options.speed, options.duplex);
675 } 869 }
676 870
677 if (!pdev->irq) { 871 if (!pdev->irq) {
@@ -680,55 +874,24 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
680 } 874 }
681 875
682 /* dev zeroed in alloc_etherdev */ 876 /* dev zeroed in alloc_etherdev */
683 dev = alloc_etherdev (sizeof (*lp)); 877 dev = alloc_etherdev(sizeof(*lp));
684 if (dev == NULL) { 878 if (dev == NULL) {
685 dev_err(&pdev->dev, "unable to alloc new ethernet\n"); 879 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
686 return -ENOMEM; 880 return -ENOMEM;
687 } 881 }
688 SET_NETDEV_DEV(dev, &pdev->dev); 882 SET_NETDEV_DEV(dev, &pdev->dev);
689 lp = dev->priv; 883 lp = netdev_priv(dev);
690 lp->dev = dev; 884 lp->dev = dev;
691 885
692 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 886 /* enable device (incl. PCI PM wakeup), and bus-mastering */
693 rc = pci_enable_device (pdev); 887 rc = pcim_enable_device(pdev);
694 if (rc) 888 if (rc)
695 goto err_out; 889 goto err_out;
696 890 rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
697 mmio_start = pci_resource_start (pdev, 1);
698 mmio_end = pci_resource_end (pdev, 1);
699 mmio_flags = pci_resource_flags (pdev, 1);
700 mmio_len = pci_resource_len (pdev, 1);
701
702 /* set this immediately, we need to know before
703 * we talk to the chip directly */
704
705 /* make sure PCI base addr 1 is MMIO */
706 if (!(mmio_flags & IORESOURCE_MEM)) {
707 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
708 rc = -ENODEV;
709 goto err_out;
710 }
711
712 /* check for weird/broken PCI region reporting */
713 if ((mmio_len < sizeof(struct tc35815_regs))) {
714 dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
715 rc = -ENODEV;
716 goto err_out;
717 }
718
719 rc = pci_request_regions (pdev, MODNAME);
720 if (rc) 891 if (rc)
721 goto err_out; 892 goto err_out;
722 893 pci_set_master(pdev);
723 pci_set_master (pdev); 894 ioaddr = pcim_iomap_table(pdev)[1];
724
725 /* ioremap MMIO region */
726 ioaddr = ioremap (mmio_start, mmio_len);
727 if (ioaddr == NULL) {
728 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
729 rc = -EIO;
730 goto err_out_free_res;
731 }
732 895
733 /* Initialize the device structure. */ 896 /* Initialize the device structure. */
734 dev->open = tc35815_open; 897 dev->open = tc35815_open;
@@ -748,11 +911,12 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
748#endif 911#endif
749 912
750 dev->irq = pdev->irq; 913 dev->irq = pdev->irq;
751 dev->base_addr = (unsigned long) ioaddr; 914 dev->base_addr = (unsigned long)ioaddr;
752 915
916 INIT_WORK(&lp->restart_work, tc35815_restart_work);
753 spin_lock_init(&lp->lock); 917 spin_lock_init(&lp->lock);
754 lp->pci_dev = pdev; 918 lp->pci_dev = pdev;
755 lp->boardtype = ent->driver_data; 919 lp->chiptype = ent->driver_data;
756 920
757 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; 921 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
758 pci_set_drvdata(pdev, dev); 922 pci_set_drvdata(pdev, dev);
@@ -766,68 +930,49 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
766 random_ether_addr(dev->dev_addr); 930 random_ether_addr(dev->dev_addr);
767 } 931 }
768 932
769 rc = register_netdev (dev); 933 rc = register_netdev(dev);
770 if (rc) 934 if (rc)
771 goto err_out_unmap; 935 goto err_out;
772 936
773 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 937 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
774 printk(KERN_INFO "%s: %s at 0x%lx, " 938 printk(KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n",
775 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
776 "IRQ %d\n",
777 dev->name, 939 dev->name,
778 board_info[ent->driver_data].name, 940 chip_info[ent->driver_data].name,
779 dev->base_addr, 941 dev->base_addr,
780 dev->dev_addr[0], dev->dev_addr[1], 942 print_mac(mac, dev->dev_addr),
781 dev->dev_addr[2], dev->dev_addr[3],
782 dev->dev_addr[4], dev->dev_addr[5],
783 dev->irq); 943 dev->irq);
784 944
785 setup_timer(&lp->timer, tc35815_timer, (unsigned long) dev); 945 rc = tc_mii_init(dev);
786 lp->mii.dev = dev; 946 if (rc)
787 lp->mii.mdio_read = tc_mdio_read; 947 goto err_out_unregister;
788 lp->mii.mdio_write = tc_mdio_write;
789 lp->mii.phy_id_mask = 0x1f;
790 lp->mii.reg_num_mask = 0x1f;
791 tc35815_find_phy(dev);
792 lp->mii.phy_id = lp->phy_addr;
793 lp->mii.full_duplex = 0;
794 lp->mii.force_media = 0;
795 948
796 return 0; 949 return 0;
797 950
798err_out_unmap: 951err_out_unregister:
799 iounmap(ioaddr); 952 unregister_netdev(dev);
800err_out_free_res:
801 pci_release_regions (pdev);
802err_out: 953err_out:
803 free_netdev (dev); 954 free_netdev(dev);
804 return rc; 955 return rc;
805} 956}
806 957
807 958
808static void __devexit tc35815_remove_one (struct pci_dev *pdev) 959static void __devexit tc35815_remove_one(struct pci_dev *pdev)
809{ 960{
810 struct net_device *dev = pci_get_drvdata (pdev); 961 struct net_device *dev = pci_get_drvdata(pdev);
811 unsigned long mmio_addr; 962 struct tc35815_local *lp = netdev_priv(dev);
812
813 mmio_addr = dev->base_addr;
814
815 unregister_netdev (dev);
816
817 if (mmio_addr) {
818 iounmap ((void __iomem *)mmio_addr);
819 pci_release_regions (pdev);
820 }
821
822 free_netdev (dev);
823 963
824 pci_set_drvdata (pdev, NULL); 964 phy_disconnect(lp->phy_dev);
965 mdiobus_unregister(&lp->mii_bus);
966 kfree(lp->mii_bus.irq);
967 unregister_netdev(dev);
968 free_netdev(dev);
969 pci_set_drvdata(pdev, NULL);
825} 970}
826 971
827static int 972static int
828tc35815_init_queues(struct net_device *dev) 973tc35815_init_queues(struct net_device *dev)
829{ 974{
830 struct tc35815_local *lp = dev->priv; 975 struct tc35815_local *lp = netdev_priv(dev);
831 int i; 976 int i;
832 unsigned long fd_addr; 977 unsigned long fd_addr;
833 978
@@ -838,11 +983,17 @@ tc35815_init_queues(struct net_device *dev)
838 sizeof(struct TxFD) * TX_FD_NUM > 983 sizeof(struct TxFD) * TX_FD_NUM >
839 PAGE_SIZE * FD_PAGE_NUM); 984 PAGE_SIZE * FD_PAGE_NUM);
840 985
841 if ((lp->fd_buf = pci_alloc_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, &lp->fd_buf_dma)) == 0) 986 lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
987 PAGE_SIZE * FD_PAGE_NUM,
988 &lp->fd_buf_dma);
989 if (!lp->fd_buf)
842 return -ENOMEM; 990 return -ENOMEM;
843 for (i = 0; i < RX_BUF_NUM; i++) { 991 for (i = 0; i < RX_BUF_NUM; i++) {
844#ifdef TC35815_USE_PACKEDBUFFER 992#ifdef TC35815_USE_PACKEDBUFFER
845 if ((lp->data_buf[i] = alloc_rxbuf_page(lp->pci_dev, &lp->data_buf_dma[i])) == NULL) { 993 lp->data_buf[i] =
994 alloc_rxbuf_page(lp->pci_dev,
995 &lp->data_buf_dma[i]);
996 if (!lp->data_buf[i]) {
846 while (--i >= 0) { 997 while (--i >= 0) {
847 free_rxbuf_page(lp->pci_dev, 998 free_rxbuf_page(lp->pci_dev,
848 lp->data_buf[i], 999 lp->data_buf[i],
@@ -885,18 +1036,17 @@ tc35815_init_queues(struct net_device *dev)
885#endif 1036#endif
886 printk("\n"); 1037 printk("\n");
887 } else { 1038 } else {
888 for (i = 0; i < FD_PAGE_NUM; i++) { 1039 for (i = 0; i < FD_PAGE_NUM; i++)
889 clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE)); 1040 clear_page((void *)((unsigned long)lp->fd_buf +
890 } 1041 i * PAGE_SIZE));
891 } 1042 }
892 fd_addr = (unsigned long)lp->fd_buf; 1043 fd_addr = (unsigned long)lp->fd_buf;
893 1044
894 /* Free Descriptors (for Receive) */ 1045 /* Free Descriptors (for Receive) */
895 lp->rfd_base = (struct RxFD *)fd_addr; 1046 lp->rfd_base = (struct RxFD *)fd_addr;
896 fd_addr += sizeof(struct RxFD) * RX_FD_NUM; 1047 fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
897 for (i = 0; i < RX_FD_NUM; i++) { 1048 for (i = 0; i < RX_FD_NUM; i++)
898 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); 1049 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
899 }
900 lp->rfd_cur = lp->rfd_base; 1050 lp->rfd_cur = lp->rfd_base;
901 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); 1051 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
902 1052
@@ -964,7 +1114,7 @@ tc35815_init_queues(struct net_device *dev)
964static void 1114static void
965tc35815_clear_queues(struct net_device *dev) 1115tc35815_clear_queues(struct net_device *dev)
966{ 1116{
967 struct tc35815_local *lp = dev->priv; 1117 struct tc35815_local *lp = netdev_priv(dev);
968 int i; 1118 int i;
969 1119
970 for (i = 0; i < TX_FD_NUM; i++) { 1120 for (i = 0; i < TX_FD_NUM; i++) {
@@ -995,7 +1145,7 @@ tc35815_clear_queues(struct net_device *dev)
995static void 1145static void
996tc35815_free_queues(struct net_device *dev) 1146tc35815_free_queues(struct net_device *dev)
997{ 1147{
998 struct tc35815_local *lp = dev->priv; 1148 struct tc35815_local *lp = netdev_priv(dev);
999 int i; 1149 int i;
1000 1150
1001 if (lp->tfd_base) { 1151 if (lp->tfd_base) {
@@ -1076,7 +1226,7 @@ dump_rxfd(struct RxFD *fd)
1076 le32_to_cpu(fd->fd.FDStat), 1226 le32_to_cpu(fd->fd.FDStat),
1077 le32_to_cpu(fd->fd.FDCtl)); 1227 le32_to_cpu(fd->fd.FDCtl));
1078 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) 1228 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
1079 return 0; 1229 return 0;
1080 printk("BD: "); 1230 printk("BD: ");
1081 for (i = 0; i < bd_count; i++) 1231 for (i = 0; i < bd_count; i++)
1082 printk(" %08x %08x", 1232 printk(" %08x %08x",
@@ -1109,7 +1259,7 @@ dump_frfd(struct FrFD *fd)
1109static void 1259static void
1110panic_queues(struct net_device *dev) 1260panic_queues(struct net_device *dev)
1111{ 1261{
1112 struct tc35815_local *lp = dev->priv; 1262 struct tc35815_local *lp = netdev_priv(dev);
1113 int i; 1263 int i;
1114 1264
1115 printk("TxFD base %p, start %u, end %u\n", 1265 printk("TxFD base %p, start %u, end %u\n",
@@ -1128,42 +1278,33 @@ panic_queues(struct net_device *dev)
1128} 1278}
1129#endif 1279#endif
1130 1280
1131static void print_eth(char *add) 1281static void print_eth(const u8 *add)
1132{ 1282{
1133 int i; 1283 DECLARE_MAC_BUF(mac);
1134 1284
1135 printk("print_eth(%p)\n", add); 1285 printk(KERN_DEBUG "print_eth(%p)\n", add);
1136 for (i = 0; i < 6; i++) 1286 printk(KERN_DEBUG " %s =>", print_mac(mac, add + 6));
1137 printk(" %2.2X", (unsigned char) add[i + 6]); 1287 printk(KERN_CONT " %s : %02x%02x\n",
1138 printk(" =>"); 1288 print_mac(mac, add), add[12], add[13]);
1139 for (i = 0; i < 6; i++)
1140 printk(" %2.2X", (unsigned char) add[i]);
1141 printk(" : %2.2X%2.2X\n", (unsigned char) add[12], (unsigned char) add[13]);
1142} 1289}
1143 1290
1144static int tc35815_tx_full(struct net_device *dev) 1291static int tc35815_tx_full(struct net_device *dev)
1145{ 1292{
1146 struct tc35815_local *lp = dev->priv; 1293 struct tc35815_local *lp = netdev_priv(dev);
1147 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); 1294 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end);
1148} 1295}
1149 1296
1150static void tc35815_restart(struct net_device *dev) 1297static void tc35815_restart(struct net_device *dev)
1151{ 1298{
1152 struct tc35815_local *lp = dev->priv; 1299 struct tc35815_local *lp = netdev_priv(dev);
1153 int pid = lp->phy_addr; 1300
1154 int do_phy_reset = 1; 1301 if (lp->phy_dev) {
1155 del_timer(&lp->timer); /* Kill if running */
1156
1157 if (lp->mii_id[0] == 0x0016 && (lp->mii_id[1] & 0xfc00) == 0xf800) {
1158 /* Resetting PHY cause problem on some chip... (SEEQ 80221) */
1159 do_phy_reset = 0;
1160 }
1161 if (do_phy_reset) {
1162 int timeout; 1302 int timeout;
1163 tc_mdio_write(dev, pid, MII_BMCR, BMCR_RESET); 1303
1304 phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
1164 timeout = 100; 1305 timeout = 100;
1165 while (--timeout) { 1306 while (--timeout) {
1166 if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_RESET)) 1307 if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
1167 break; 1308 break;
1168 udelay(1); 1309 udelay(1);
1169 } 1310 }
@@ -1171,16 +1312,40 @@ static void tc35815_restart(struct net_device *dev)
1171 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); 1312 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
1172 } 1313 }
1173 1314
1315 spin_lock_irq(&lp->lock);
1174 tc35815_chip_reset(dev); 1316 tc35815_chip_reset(dev);
1175 tc35815_clear_queues(dev); 1317 tc35815_clear_queues(dev);
1176 tc35815_chip_init(dev); 1318 tc35815_chip_init(dev);
1177 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ 1319 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */
1178 tc35815_set_multicast_list(dev); 1320 tc35815_set_multicast_list(dev);
1321 spin_unlock_irq(&lp->lock);
1322
1323 netif_wake_queue(dev);
1324}
1325
1326static void tc35815_restart_work(struct work_struct *work)
1327{
1328 struct tc35815_local *lp =
1329 container_of(work, struct tc35815_local, restart_work);
1330 struct net_device *dev = lp->dev;
1331
1332 tc35815_restart(dev);
1333}
1334
1335static void tc35815_schedule_restart(struct net_device *dev)
1336{
1337 struct tc35815_local *lp = netdev_priv(dev);
1338 struct tc35815_regs __iomem *tr =
1339 (struct tc35815_regs __iomem *)dev->base_addr;
1340
1341 /* disable interrupts */
1342 tc_writel(0, &tr->Int_En);
1343 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
1344 schedule_work(&lp->restart_work);
1179} 1345}
1180 1346
1181static void tc35815_tx_timeout(struct net_device *dev) 1347static void tc35815_tx_timeout(struct net_device *dev)
1182{ 1348{
1183 struct tc35815_local *lp = dev->priv;
1184 struct tc35815_regs __iomem *tr = 1349 struct tc35815_regs __iomem *tr =
1185 (struct tc35815_regs __iomem *)dev->base_addr; 1350 (struct tc35815_regs __iomem *)dev->base_addr;
1186 1351
@@ -1188,28 +1353,12 @@ static void tc35815_tx_timeout(struct net_device *dev)
1188 dev->name, tc_readl(&tr->Tx_Stat)); 1353 dev->name, tc_readl(&tr->Tx_Stat));
1189 1354
1190 /* Try to restart the adaptor. */ 1355 /* Try to restart the adaptor. */
1191 spin_lock_irq(&lp->lock); 1356 tc35815_schedule_restart(dev);
1192 tc35815_restart(dev); 1357 dev->stats.tx_errors++;
1193 spin_unlock_irq(&lp->lock);
1194
1195 lp->stats.tx_errors++;
1196
1197 /* If we have space available to accept new transmit
1198 * requests, wake up the queueing layer. This would
1199 * be the case if the chipset_init() call above just
1200 * flushes out the tx queue and empties it.
1201 *
1202 * If instead, the tx queue is retained then the
1203 * netif_wake_queue() call should be placed in the
1204 * TX completion interrupt handler of the driver instead
1205 * of here.
1206 */
1207 if (!tc35815_tx_full(dev))
1208 netif_wake_queue(dev);
1209} 1358}
1210 1359
1211/* 1360/*
1212 * Open/initialize the board. This is called (in the current kernel) 1361 * Open/initialize the controller. This is called (in the current kernel)
1213 * sometime after booting when the 'ifconfig' program is run. 1362 * sometime after booting when the 'ifconfig' program is run.
1214 * 1363 *
1215 * This routine should set everything up anew at each open, even 1364 * This routine should set everything up anew at each open, even
@@ -1219,17 +1368,16 @@ static void tc35815_tx_timeout(struct net_device *dev)
1219static int 1368static int
1220tc35815_open(struct net_device *dev) 1369tc35815_open(struct net_device *dev)
1221{ 1370{
1222 struct tc35815_local *lp = dev->priv; 1371 struct tc35815_local *lp = netdev_priv(dev);
1223 1372
1224 /* 1373 /*
1225 * This is used if the interrupt line can turned off (shared). 1374 * This is used if the interrupt line can turned off (shared).
1226 * See 3c503.c for an example of selecting the IRQ at config-time. 1375 * See 3c503.c for an example of selecting the IRQ at config-time.
1227 */ 1376 */
1228 if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, dev->name, dev)) { 1377 if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED,
1378 dev->name, dev))
1229 return -EAGAIN; 1379 return -EAGAIN;
1230 }
1231 1380
1232 del_timer(&lp->timer); /* Kill if running */
1233 tc35815_chip_reset(dev); 1381 tc35815_chip_reset(dev);
1234 1382
1235 if (tc35815_init_queues(dev) != 0) { 1383 if (tc35815_init_queues(dev) != 0) {
@@ -1246,6 +1394,9 @@ tc35815_open(struct net_device *dev)
1246 tc35815_chip_init(dev); 1394 tc35815_chip_init(dev);
1247 spin_unlock_irq(&lp->lock); 1395 spin_unlock_irq(&lp->lock);
1248 1396
1397 /* schedule a link state check */
1398 phy_start(lp->phy_dev);
1399
1249 /* We are now ready to accept transmit requeusts from 1400 /* We are now ready to accept transmit requeusts from
1250 * the queueing layer of the networking. 1401 * the queueing layer of the networking.
1251 */ 1402 */
@@ -1261,7 +1412,7 @@ tc35815_open(struct net_device *dev)
1261 */ 1412 */
1262static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) 1413static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1263{ 1414{
1264 struct tc35815_local *lp = dev->priv; 1415 struct tc35815_local *lp = netdev_priv(dev);
1265 struct TxFD *txfd; 1416 struct TxFD *txfd;
1266 unsigned long flags; 1417 unsigned long flags;
1267 1418
@@ -1366,7 +1517,7 @@ static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1366 panic("%s: Too many fatal errors.", dev->name); 1517 panic("%s: Too many fatal errors.", dev->name);
1367 printk(KERN_WARNING "%s: Resetting ...\n", dev->name); 1518 printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
1368 /* Try to restart the adaptor. */ 1519 /* Try to restart the adaptor. */
1369 tc35815_restart(dev); 1520 tc35815_schedule_restart(dev);
1370} 1521}
1371 1522
1372#ifdef TC35815_NAPI 1523#ifdef TC35815_NAPI
@@ -1375,7 +1526,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1375static int tc35815_do_interrupt(struct net_device *dev, u32 status) 1526static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1376#endif 1527#endif
1377{ 1528{
1378 struct tc35815_local *lp = dev->priv; 1529 struct tc35815_local *lp = netdev_priv(dev);
1379 struct tc35815_regs __iomem *tr = 1530 struct tc35815_regs __iomem *tr =
1380 (struct tc35815_regs __iomem *)dev->base_addr; 1531 (struct tc35815_regs __iomem *)dev->base_addr;
1381 int ret = -1; 1532 int ret = -1;
@@ -1392,7 +1543,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1392 printk(KERN_WARNING 1543 printk(KERN_WARNING
1393 "%s: Free Descriptor Area Exhausted (%#x).\n", 1544 "%s: Free Descriptor Area Exhausted (%#x).\n",
1394 dev->name, status); 1545 dev->name, status);
1395 lp->stats.rx_dropped++; 1546 dev->stats.rx_dropped++;
1396 ret = 0; 1547 ret = 0;
1397 } 1548 }
1398 if (status & Int_IntBLEx) { 1549 if (status & Int_IntBLEx) {
@@ -1401,14 +1552,14 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1401 printk(KERN_WARNING 1552 printk(KERN_WARNING
1402 "%s: Buffer List Exhausted (%#x).\n", 1553 "%s: Buffer List Exhausted (%#x).\n",
1403 dev->name, status); 1554 dev->name, status);
1404 lp->stats.rx_dropped++; 1555 dev->stats.rx_dropped++;
1405 ret = 0; 1556 ret = 0;
1406 } 1557 }
1407 if (status & Int_IntExBD) { 1558 if (status & Int_IntExBD) {
1408 printk(KERN_WARNING 1559 printk(KERN_WARNING
1409 "%s: Excessive Buffer Descriptiors (%#x).\n", 1560 "%s: Excessive Buffer Descriptiors (%#x).\n",
1410 dev->name, status); 1561 dev->name, status);
1411 lp->stats.rx_length_errors++; 1562 dev->stats.rx_length_errors++;
1412 ret = 0; 1563 ret = 0;
1413 } 1564 }
1414 1565
@@ -1492,7 +1643,7 @@ static void
1492tc35815_rx(struct net_device *dev) 1643tc35815_rx(struct net_device *dev)
1493#endif 1644#endif
1494{ 1645{
1495 struct tc35815_local *lp = dev->priv; 1646 struct tc35815_local *lp = netdev_priv(dev);
1496 unsigned int fdctl; 1647 unsigned int fdctl;
1497 int i; 1648 int i;
1498 int buf_free_count = 0; 1649 int buf_free_count = 0;
@@ -1532,7 +1683,7 @@ tc35815_rx(struct net_device *dev)
1532 if (skb == NULL) { 1683 if (skb == NULL) {
1533 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", 1684 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1534 dev->name); 1685 dev->name);
1535 lp->stats.rx_dropped++; 1686 dev->stats.rx_dropped++;
1536 break; 1687 break;
1537 } 1688 }
1538 skb_reserve(skb, 2); /* 16 bit alignment */ 1689 skb_reserve(skb, 2); /* 16 bit alignment */
@@ -1602,10 +1753,10 @@ tc35815_rx(struct net_device *dev)
1602 netif_rx(skb); 1753 netif_rx(skb);
1603#endif 1754#endif
1604 dev->last_rx = jiffies; 1755 dev->last_rx = jiffies;
1605 lp->stats.rx_packets++; 1756 dev->stats.rx_packets++;
1606 lp->stats.rx_bytes += pkt_len; 1757 dev->stats.rx_bytes += pkt_len;
1607 } else { 1758 } else {
1608 lp->stats.rx_errors++; 1759 dev->stats.rx_errors++;
1609 printk(KERN_DEBUG "%s: Rx error (status %x)\n", 1760 printk(KERN_DEBUG "%s: Rx error (status %x)\n",
1610 dev->name, status & Rx_Stat_Mask); 1761 dev->name, status & Rx_Stat_Mask);
1611 /* WORKAROUND: LongErr and CRCErr means Overflow. */ 1762 /* WORKAROUND: LongErr and CRCErr means Overflow. */
@@ -1613,10 +1764,14 @@ tc35815_rx(struct net_device *dev)
1613 status &= ~(Rx_LongErr|Rx_CRCErr); 1764 status &= ~(Rx_LongErr|Rx_CRCErr);
1614 status |= Rx_Over; 1765 status |= Rx_Over;
1615 } 1766 }
1616 if (status & Rx_LongErr) lp->stats.rx_length_errors++; 1767 if (status & Rx_LongErr)
1617 if (status & Rx_Over) lp->stats.rx_fifo_errors++; 1768 dev->stats.rx_length_errors++;
1618 if (status & Rx_CRCErr) lp->stats.rx_crc_errors++; 1769 if (status & Rx_Over)
1619 if (status & Rx_Align) lp->stats.rx_frame_errors++; 1770 dev->stats.rx_fifo_errors++;
1771 if (status & Rx_CRCErr)
1772 dev->stats.rx_crc_errors++;
1773 if (status & Rx_Align)
1774 dev->stats.rx_frame_errors++;
1620 } 1775 }
1621 1776
1622 if (bd_count > 0) { 1777 if (bd_count > 0) {
@@ -1772,40 +1927,39 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1772static void 1927static void
1773tc35815_check_tx_stat(struct net_device *dev, int status) 1928tc35815_check_tx_stat(struct net_device *dev, int status)
1774{ 1929{
1775 struct tc35815_local *lp = dev->priv; 1930 struct tc35815_local *lp = netdev_priv(dev);
1776 const char *msg = NULL; 1931 const char *msg = NULL;
1777 1932
1778 /* count collisions */ 1933 /* count collisions */
1779 if (status & Tx_ExColl) 1934 if (status & Tx_ExColl)
1780 lp->stats.collisions += 16; 1935 dev->stats.collisions += 16;
1781 if (status & Tx_TxColl_MASK) 1936 if (status & Tx_TxColl_MASK)
1782 lp->stats.collisions += status & Tx_TxColl_MASK; 1937 dev->stats.collisions += status & Tx_TxColl_MASK;
1783 1938
1784#ifndef NO_CHECK_CARRIER 1939#ifndef NO_CHECK_CARRIER
1785 /* TX4939 does not have NCarr */ 1940 /* TX4939 does not have NCarr */
1786 if (lp->boardtype == TC35815_TX4939) 1941 if (lp->chiptype == TC35815_TX4939)
1787 status &= ~Tx_NCarr; 1942 status &= ~Tx_NCarr;
1788#ifdef WORKAROUND_LOSTCAR 1943#ifdef WORKAROUND_LOSTCAR
1789 /* WORKAROUND: ignore LostCrS in full duplex operation */ 1944 /* WORKAROUND: ignore LostCrS in full duplex operation */
1790 if ((lp->timer_state != asleep && lp->timer_state != lcheck) 1945 if (!lp->link || lp->duplex == DUPLEX_FULL)
1791 || lp->fullduplex)
1792 status &= ~Tx_NCarr; 1946 status &= ~Tx_NCarr;
1793#endif 1947#endif
1794#endif 1948#endif
1795 1949
1796 if (!(status & TX_STA_ERR)) { 1950 if (!(status & TX_STA_ERR)) {
1797 /* no error. */ 1951 /* no error. */
1798 lp->stats.tx_packets++; 1952 dev->stats.tx_packets++;
1799 return; 1953 return;
1800 } 1954 }
1801 1955
1802 lp->stats.tx_errors++; 1956 dev->stats.tx_errors++;
1803 if (status & Tx_ExColl) { 1957 if (status & Tx_ExColl) {
1804 lp->stats.tx_aborted_errors++; 1958 dev->stats.tx_aborted_errors++;
1805 msg = "Excessive Collision."; 1959 msg = "Excessive Collision.";
1806 } 1960 }
1807 if (status & Tx_Under) { 1961 if (status & Tx_Under) {
1808 lp->stats.tx_fifo_errors++; 1962 dev->stats.tx_fifo_errors++;
1809 msg = "Tx FIFO Underrun."; 1963 msg = "Tx FIFO Underrun.";
1810 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { 1964 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1811 lp->lstats.tx_underrun++; 1965 lp->lstats.tx_underrun++;
@@ -1818,25 +1972,25 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1818 } 1972 }
1819 } 1973 }
1820 if (status & Tx_Defer) { 1974 if (status & Tx_Defer) {
1821 lp->stats.tx_fifo_errors++; 1975 dev->stats.tx_fifo_errors++;
1822 msg = "Excessive Deferral."; 1976 msg = "Excessive Deferral.";
1823 } 1977 }
1824#ifndef NO_CHECK_CARRIER 1978#ifndef NO_CHECK_CARRIER
1825 if (status & Tx_NCarr) { 1979 if (status & Tx_NCarr) {
1826 lp->stats.tx_carrier_errors++; 1980 dev->stats.tx_carrier_errors++;
1827 msg = "Lost Carrier Sense."; 1981 msg = "Lost Carrier Sense.";
1828 } 1982 }
1829#endif 1983#endif
1830 if (status & Tx_LateColl) { 1984 if (status & Tx_LateColl) {
1831 lp->stats.tx_aborted_errors++; 1985 dev->stats.tx_aborted_errors++;
1832 msg = "Late Collision."; 1986 msg = "Late Collision.";
1833 } 1987 }
1834 if (status & Tx_TxPar) { 1988 if (status & Tx_TxPar) {
1835 lp->stats.tx_fifo_errors++; 1989 dev->stats.tx_fifo_errors++;
1836 msg = "Transmit Parity Error."; 1990 msg = "Transmit Parity Error.";
1837 } 1991 }
1838 if (status & Tx_SQErr) { 1992 if (status & Tx_SQErr) {
1839 lp->stats.tx_heartbeat_errors++; 1993 dev->stats.tx_heartbeat_errors++;
1840 msg = "Signal Quality Error."; 1994 msg = "Signal Quality Error.";
1841 } 1995 }
1842 if (msg && netif_msg_tx_err(lp)) 1996 if (msg && netif_msg_tx_err(lp))
@@ -1849,7 +2003,7 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1849static void 2003static void
1850tc35815_txdone(struct net_device *dev) 2004tc35815_txdone(struct net_device *dev)
1851{ 2005{
1852 struct tc35815_local *lp = dev->priv; 2006 struct tc35815_local *lp = netdev_priv(dev);
1853 struct TxFD *txfd; 2007 struct TxFD *txfd;
1854 unsigned int fdctl; 2008 unsigned int fdctl;
1855 2009
@@ -1878,7 +2032,7 @@ tc35815_txdone(struct net_device *dev)
1878 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); 2032 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
1879#endif 2033#endif
1880 if (skb) { 2034 if (skb) {
1881 lp->stats.tx_bytes += skb->len; 2035 dev->stats.tx_bytes += skb->len;
1882 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); 2036 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
1883 lp->tx_skbs[lp->tfd_end].skb = NULL; 2037 lp->tx_skbs[lp->tfd_end].skb = NULL;
1884 lp->tx_skbs[lp->tfd_end].skb_dma = 0; 2038 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
@@ -1904,7 +2058,7 @@ tc35815_txdone(struct net_device *dev)
1904 struct tc35815_regs __iomem *tr = 2058 struct tc35815_regs __iomem *tr =
1905 (struct tc35815_regs __iomem *)dev->base_addr; 2059 (struct tc35815_regs __iomem *)dev->base_addr;
1906 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; 2060 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
1907 struct TxFD* txhead = &lp->tfd_base[head]; 2061 struct TxFD *txhead = &lp->tfd_base[head];
1908 int qlen = (lp->tfd_start + TX_FD_NUM 2062 int qlen = (lp->tfd_start + TX_FD_NUM
1909 - lp->tfd_end) % TX_FD_NUM; 2063 - lp->tfd_end) % TX_FD_NUM;
1910 2064
@@ -1939,7 +2093,7 @@ tc35815_txdone(struct net_device *dev)
1939 * condition, and space has now been made available, 2093 * condition, and space has now been made available,
1940 * wake up the queue. 2094 * wake up the queue.
1941 */ 2095 */
1942 if (netif_queue_stopped(dev) && ! tc35815_tx_full(dev)) 2096 if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
1943 netif_wake_queue(dev); 2097 netif_wake_queue(dev);
1944} 2098}
1945 2099
@@ -1947,16 +2101,17 @@ tc35815_txdone(struct net_device *dev)
1947static int 2101static int
1948tc35815_close(struct net_device *dev) 2102tc35815_close(struct net_device *dev)
1949{ 2103{
1950 struct tc35815_local *lp = dev->priv; 2104 struct tc35815_local *lp = netdev_priv(dev);
1951 2105
1952 netif_stop_queue(dev); 2106 netif_stop_queue(dev);
1953#ifdef TC35815_NAPI 2107#ifdef TC35815_NAPI
1954 napi_disable(&lp->napi); 2108 napi_disable(&lp->napi);
1955#endif 2109#endif
2110 if (lp->phy_dev)
2111 phy_stop(lp->phy_dev);
2112 cancel_work_sync(&lp->restart_work);
1956 2113
1957 /* Flush the Tx and disable Rx here. */ 2114 /* Flush the Tx and disable Rx here. */
1958
1959 del_timer(&lp->timer); /* Kill if running */
1960 tc35815_chip_reset(dev); 2115 tc35815_chip_reset(dev);
1961 free_irq(dev->irq, dev); 2116 free_irq(dev->irq, dev);
1962 2117
@@ -1972,34 +2127,30 @@ tc35815_close(struct net_device *dev)
1972 */ 2127 */
1973static struct net_device_stats *tc35815_get_stats(struct net_device *dev) 2128static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
1974{ 2129{
1975 struct tc35815_local *lp = dev->priv;
1976 struct tc35815_regs __iomem *tr = 2130 struct tc35815_regs __iomem *tr =
1977 (struct tc35815_regs __iomem *)dev->base_addr; 2131 (struct tc35815_regs __iomem *)dev->base_addr;
1978 if (netif_running(dev)) { 2132 if (netif_running(dev))
1979 /* Update the statistics from the device registers. */ 2133 /* Update the statistics from the device registers. */
1980 lp->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt); 2134 dev->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt);
1981 }
1982 2135
1983 return &lp->stats; 2136 return &dev->stats;
1984} 2137}
1985 2138
1986static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) 2139static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
1987{ 2140{
1988 struct tc35815_local *lp = dev->priv; 2141 struct tc35815_local *lp = netdev_priv(dev);
1989 struct tc35815_regs __iomem *tr = 2142 struct tc35815_regs __iomem *tr =
1990 (struct tc35815_regs __iomem *)dev->base_addr; 2143 (struct tc35815_regs __iomem *)dev->base_addr;
1991 int cam_index = index * 6; 2144 int cam_index = index * 6;
1992 u32 cam_data; 2145 u32 cam_data;
1993 u32 saved_addr; 2146 u32 saved_addr;
2147 DECLARE_MAC_BUF(mac);
2148
1994 saved_addr = tc_readl(&tr->CAM_Adr); 2149 saved_addr = tc_readl(&tr->CAM_Adr);
1995 2150
1996 if (netif_msg_hw(lp)) { 2151 if (netif_msg_hw(lp))
1997 int i; 2152 printk(KERN_DEBUG "%s: CAM %d: %s\n",
1998 printk(KERN_DEBUG "%s: CAM %d:", dev->name, index); 2153 dev->name, index, print_mac(mac, addr));
1999 for (i = 0; i < 6; i++)
2000 printk(" %02x", addr[i]);
2001 printk("\n");
2002 }
2003 if (index & 1) { 2154 if (index & 1) {
2004 /* read modify write */ 2155 /* read modify write */
2005 tc_writel(cam_index - 2, &tr->CAM_Adr); 2156 tc_writel(cam_index - 2, &tr->CAM_Adr);
@@ -2039,28 +2190,24 @@ tc35815_set_multicast_list(struct net_device *dev)
2039 struct tc35815_regs __iomem *tr = 2190 struct tc35815_regs __iomem *tr =
2040 (struct tc35815_regs __iomem *)dev->base_addr; 2191 (struct tc35815_regs __iomem *)dev->base_addr;
2041 2192
2042 if (dev->flags&IFF_PROMISC) 2193 if (dev->flags & IFF_PROMISC) {
2043 {
2044#ifdef WORKAROUND_100HALF_PROMISC 2194#ifdef WORKAROUND_100HALF_PROMISC
2045 /* With some (all?) 100MHalf HUB, controller will hang 2195 /* With some (all?) 100MHalf HUB, controller will hang
2046 * if we enabled promiscuous mode before linkup... */ 2196 * if we enabled promiscuous mode before linkup... */
2047 struct tc35815_local *lp = dev->priv; 2197 struct tc35815_local *lp = netdev_priv(dev);
2048 int pid = lp->phy_addr; 2198
2049 if (!(tc_mdio_read(dev, pid, MII_BMSR) & BMSR_LSTATUS)) 2199 if (!lp->link)
2050 return; 2200 return;
2051#endif 2201#endif
2052 /* Enable promiscuous mode */ 2202 /* Enable promiscuous mode */
2053 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 2203 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
2054 } 2204 } else if ((dev->flags & IFF_ALLMULTI) ||
2055 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > CAM_ENTRY_MAX - 3) 2205 dev->mc_count > CAM_ENTRY_MAX - 3) {
2056 {
2057 /* CAM 0, 1, 20 are reserved. */ 2206 /* CAM 0, 1, 20 are reserved. */
2058 /* Disable promiscuous mode, use normal mode. */ 2207 /* Disable promiscuous mode, use normal mode. */
2059 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); 2208 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
2060 } 2209 } else if (dev->mc_count) {
2061 else if(dev->mc_count) 2210 struct dev_mc_list *cur_addr = dev->mc_list;
2062 {
2063 struct dev_mc_list* cur_addr = dev->mc_list;
2064 int i; 2211 int i;
2065 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); 2212 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
2066 2213
@@ -2075,8 +2222,7 @@ tc35815_set_multicast_list(struct net_device *dev)
2075 } 2222 }
2076 tc_writel(ena_bits, &tr->CAM_Ena); 2223 tc_writel(ena_bits, &tr->CAM_Ena);
2077 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2224 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2078 } 2225 } else {
2079 else {
2080 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); 2226 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2081 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2227 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2082 } 2228 }
@@ -2084,7 +2230,7 @@ tc35815_set_multicast_list(struct net_device *dev)
2084 2230
2085static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2231static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2086{ 2232{
2087 struct tc35815_local *lp = dev->priv; 2233 struct tc35815_local *lp = netdev_priv(dev);
2088 strcpy(info->driver, MODNAME); 2234 strcpy(info->driver, MODNAME);
2089 strcpy(info->version, DRV_VERSION); 2235 strcpy(info->version, DRV_VERSION);
2090 strcpy(info->bus_info, pci_name(lp->pci_dev)); 2236 strcpy(info->bus_info, pci_name(lp->pci_dev));
@@ -2092,78 +2238,37 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
2092 2238
2093static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2239static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2094{ 2240{
2095 struct tc35815_local *lp = dev->priv; 2241 struct tc35815_local *lp = netdev_priv(dev);
2096 spin_lock_irq(&lp->lock);
2097 mii_ethtool_gset(&lp->mii, cmd);
2098 spin_unlock_irq(&lp->lock);
2099 return 0;
2100}
2101
2102static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2103{
2104 struct tc35815_local *lp = dev->priv;
2105 int rc;
2106#if 1 /* use our negotiation method... */
2107 /* Verify the settings we care about. */
2108 if (cmd->autoneg != AUTONEG_ENABLE &&
2109 cmd->autoneg != AUTONEG_DISABLE)
2110 return -EINVAL;
2111 if (cmd->autoneg == AUTONEG_DISABLE &&
2112 ((cmd->speed != SPEED_100 &&
2113 cmd->speed != SPEED_10) ||
2114 (cmd->duplex != DUPLEX_HALF &&
2115 cmd->duplex != DUPLEX_FULL)))
2116 return -EINVAL;
2117 2242
2118 /* Ok, do it to it. */ 2243 if (!lp->phy_dev)
2119 spin_lock_irq(&lp->lock); 2244 return -ENODEV;
2120 del_timer(&lp->timer); 2245 return phy_ethtool_gset(lp->phy_dev, cmd);
2121 tc35815_start_auto_negotiation(dev, cmd);
2122 spin_unlock_irq(&lp->lock);
2123 rc = 0;
2124#else
2125 spin_lock_irq(&lp->lock);
2126 rc = mii_ethtool_sset(&lp->mii, cmd);
2127 spin_unlock_irq(&lp->lock);
2128#endif
2129 return rc;
2130} 2246}
2131 2247
2132static int tc35815_nway_reset(struct net_device *dev) 2248static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2133{ 2249{
2134 struct tc35815_local *lp = dev->priv; 2250 struct tc35815_local *lp = netdev_priv(dev);
2135 int rc;
2136 spin_lock_irq(&lp->lock);
2137 rc = mii_nway_restart(&lp->mii);
2138 spin_unlock_irq(&lp->lock);
2139 return rc;
2140}
2141 2251
2142static u32 tc35815_get_link(struct net_device *dev) 2252 if (!lp->phy_dev)
2143{ 2253 return -ENODEV;
2144 struct tc35815_local *lp = dev->priv; 2254 return phy_ethtool_sset(lp->phy_dev, cmd);
2145 int rc;
2146 spin_lock_irq(&lp->lock);
2147 rc = mii_link_ok(&lp->mii);
2148 spin_unlock_irq(&lp->lock);
2149 return rc;
2150} 2255}
2151 2256
2152static u32 tc35815_get_msglevel(struct net_device *dev) 2257static u32 tc35815_get_msglevel(struct net_device *dev)
2153{ 2258{
2154 struct tc35815_local *lp = dev->priv; 2259 struct tc35815_local *lp = netdev_priv(dev);
2155 return lp->msg_enable; 2260 return lp->msg_enable;
2156} 2261}
2157 2262
2158static void tc35815_set_msglevel(struct net_device *dev, u32 datum) 2263static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
2159{ 2264{
2160 struct tc35815_local *lp = dev->priv; 2265 struct tc35815_local *lp = netdev_priv(dev);
2161 lp->msg_enable = datum; 2266 lp->msg_enable = datum;
2162} 2267}
2163 2268
2164static int tc35815_get_sset_count(struct net_device *dev, int sset) 2269static int tc35815_get_sset_count(struct net_device *dev, int sset)
2165{ 2270{
2166 struct tc35815_local *lp = dev->priv; 2271 struct tc35815_local *lp = netdev_priv(dev);
2167 2272
2168 switch (sset) { 2273 switch (sset) {
2169 case ETH_SS_STATS: 2274 case ETH_SS_STATS:
@@ -2175,7 +2280,7 @@ static int tc35815_get_sset_count(struct net_device *dev, int sset)
2175 2280
2176static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) 2281static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2177{ 2282{
2178 struct tc35815_local *lp = dev->priv; 2283 struct tc35815_local *lp = netdev_priv(dev);
2179 data[0] = lp->lstats.max_tx_qlen; 2284 data[0] = lp->lstats.max_tx_qlen;
2180 data[1] = lp->lstats.tx_ints; 2285 data[1] = lp->lstats.tx_ints;
2181 data[2] = lp->lstats.rx_ints; 2286 data[2] = lp->lstats.rx_ints;
@@ -2200,8 +2305,7 @@ static const struct ethtool_ops tc35815_ethtool_ops = {
2200 .get_drvinfo = tc35815_get_drvinfo, 2305 .get_drvinfo = tc35815_get_drvinfo,
2201 .get_settings = tc35815_get_settings, 2306 .get_settings = tc35815_get_settings,
2202 .set_settings = tc35815_set_settings, 2307 .set_settings = tc35815_set_settings,
2203 .nway_reset = tc35815_nway_reset, 2308 .get_link = ethtool_op_get_link,
2204 .get_link = tc35815_get_link,
2205 .get_msglevel = tc35815_get_msglevel, 2309 .get_msglevel = tc35815_get_msglevel,
2206 .set_msglevel = tc35815_set_msglevel, 2310 .set_msglevel = tc35815_set_msglevel,
2207 .get_strings = tc35815_get_strings, 2311 .get_strings = tc35815_get_strings,
@@ -2211,611 +2315,13 @@ static const struct ethtool_ops tc35815_ethtool_ops = {
2211 2315
2212static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2316static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2213{ 2317{
2214 struct tc35815_local *lp = dev->priv; 2318 struct tc35815_local *lp = netdev_priv(dev);
2215 int rc;
2216 2319
2217 if (!netif_running(dev)) 2320 if (!netif_running(dev))
2218 return -EINVAL; 2321 return -EINVAL;
2219 2322 if (!lp->phy_dev)
2220 spin_lock_irq(&lp->lock); 2323 return -ENODEV;
2221 rc = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); 2324 return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd);
2222 spin_unlock_irq(&lp->lock);
2223
2224 return rc;
2225}
2226
2227static int tc_mdio_read(struct net_device *dev, int phy_id, int location)
2228{
2229 struct tc35815_regs __iomem *tr =
2230 (struct tc35815_regs __iomem *)dev->base_addr;
2231 u32 data;
2232 tc_writel(MD_CA_Busy | (phy_id << 5) | location, &tr->MD_CA);
2233 while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
2234 ;
2235 data = tc_readl(&tr->MD_Data);
2236 return data & 0xffff;
2237}
2238
2239static void tc_mdio_write(struct net_device *dev, int phy_id, int location,
2240 int val)
2241{
2242 struct tc35815_regs __iomem *tr =
2243 (struct tc35815_regs __iomem *)dev->base_addr;
2244 tc_writel(val, &tr->MD_Data);
2245 tc_writel(MD_CA_Busy | MD_CA_Wr | (phy_id << 5) | location, &tr->MD_CA);
2246 while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
2247 ;
2248}
2249
2250/* Auto negotiation. The scheme is very simple. We have a timer routine
2251 * that keeps watching the auto negotiation process as it progresses.
2252 * The DP83840 is first told to start doing it's thing, we set up the time
2253 * and place the timer state machine in it's initial state.
2254 *
2255 * Here the timer peeks at the DP83840 status registers at each click to see
2256 * if the auto negotiation has completed, we assume here that the DP83840 PHY
2257 * will time out at some point and just tell us what (didn't) happen. For
2258 * complete coverage we only allow so many of the ticks at this level to run,
2259 * when this has expired we print a warning message and try another strategy.
2260 * This "other" strategy is to force the interface into various speed/duplex
2261 * configurations and we stop when we see a link-up condition before the
2262 * maximum number of "peek" ticks have occurred.
2263 *
2264 * Once a valid link status has been detected we configure the BigMAC and
2265 * the rest of the Happy Meal to speak the most efficient protocol we could
2266 * get a clean link for. The priority for link configurations, highest first
2267 * is:
2268 * 100 Base-T Full Duplex
2269 * 100 Base-T Half Duplex
2270 * 10 Base-T Full Duplex
2271 * 10 Base-T Half Duplex
2272 *
2273 * We start a new timer now, after a successful auto negotiation status has
2274 * been detected. This timer just waits for the link-up bit to get set in
2275 * the BMCR of the DP83840. When this occurs we print a kernel log message
2276 * describing the link type in use and the fact that it is up.
2277 *
2278 * If a fatal error of some sort is signalled and detected in the interrupt
2279 * service routine, and the chip is reset, or the link is ifconfig'd down
2280 * and then back up, this entire process repeats itself all over again.
2281 */
2282/* Note: Above comments are come from sunhme driver. */
2283
2284static int tc35815_try_next_permutation(struct net_device *dev)
2285{
2286 struct tc35815_local *lp = dev->priv;
2287 int pid = lp->phy_addr;
2288 unsigned short bmcr;
2289
2290 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2291
2292 /* Downgrade from full to half duplex. Only possible via ethtool. */
2293 if (bmcr & BMCR_FULLDPLX) {
2294 bmcr &= ~BMCR_FULLDPLX;
2295 printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr);
2296 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2297 return 0;
2298 }
2299
2300 /* Downgrade from 100 to 10. */
2301 if (bmcr & BMCR_SPEED100) {
2302 bmcr &= ~BMCR_SPEED100;
2303 printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr);
2304 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2305 return 0;
2306 }
2307
2308 /* We've tried everything. */
2309 return -1;
2310}
2311
2312static void
2313tc35815_display_link_mode(struct net_device *dev)
2314{
2315 struct tc35815_local *lp = dev->priv;
2316 int pid = lp->phy_addr;
2317 unsigned short lpa, bmcr;
2318 char *speed = "", *duplex = "";
2319
2320 lpa = tc_mdio_read(dev, pid, MII_LPA);
2321 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2322 if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL)))
2323 speed = "100Mb/s";
2324 else
2325 speed = "10Mb/s";
2326 if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL)))
2327 duplex = "Full Duplex";
2328 else
2329 duplex = "Half Duplex";
2330
2331 if (netif_msg_link(lp))
2332 printk(KERN_INFO "%s: Link is up at %s, %s.\n",
2333 dev->name, speed, duplex);
2334 printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
2335 dev->name,
2336 bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa);
2337}
2338
2339static void tc35815_display_forced_link_mode(struct net_device *dev)
2340{
2341 struct tc35815_local *lp = dev->priv;
2342 int pid = lp->phy_addr;
2343 unsigned short bmcr;
2344 char *speed = "", *duplex = "";
2345
2346 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2347 if (bmcr & BMCR_SPEED100)
2348 speed = "100Mb/s";
2349 else
2350 speed = "10Mb/s";
2351 if (bmcr & BMCR_FULLDPLX)
2352 duplex = "Full Duplex.\n";
2353 else
2354 duplex = "Half Duplex.\n";
2355
2356 if (netif_msg_link(lp))
2357 printk(KERN_INFO "%s: Link has been forced up at %s, %s",
2358 dev->name, speed, duplex);
2359}
2360
2361static void tc35815_set_link_modes(struct net_device *dev)
2362{
2363 struct tc35815_local *lp = dev->priv;
2364 struct tc35815_regs __iomem *tr =
2365 (struct tc35815_regs __iomem *)dev->base_addr;
2366 int pid = lp->phy_addr;
2367 unsigned short bmcr, lpa;
2368 int speed;
2369
2370 if (lp->timer_state == arbwait) {
2371 lpa = tc_mdio_read(dev, pid, MII_LPA);
2372 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2373 printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
2374 dev->name,
2375 bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa);
2376 if (!(lpa & (LPA_10HALF | LPA_10FULL |
2377 LPA_100HALF | LPA_100FULL))) {
2378 /* fall back to 10HALF */
2379 printk(KERN_INFO "%s: bad ability %04x - falling back to 10HD.\n",
2380 dev->name, lpa);
2381 lpa = LPA_10HALF;
2382 }
2383 if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL)))
2384 lp->fullduplex = 1;
2385 else
2386 lp->fullduplex = 0;
2387 if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL)))
2388 speed = 100;
2389 else
2390 speed = 10;
2391 } else {
2392 /* Forcing a link mode. */
2393 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2394 if (bmcr & BMCR_FULLDPLX)
2395 lp->fullduplex = 1;
2396 else
2397 lp->fullduplex = 0;
2398 if (bmcr & BMCR_SPEED100)
2399 speed = 100;
2400 else
2401 speed = 10;
2402 }
2403
2404 tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_HaltReq, &tr->MAC_Ctl);
2405 if (lp->fullduplex) {
2406 tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_FullDup, &tr->MAC_Ctl);
2407 } else {
2408 tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_FullDup, &tr->MAC_Ctl);
2409 }
2410 tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_HaltReq, &tr->MAC_Ctl);
2411
2412 /* TX4939 PCFG.SPEEDn bit will be changed on NETDEV_CHANGE event. */
2413
2414#ifndef NO_CHECK_CARRIER
2415 /* TX4939 does not have EnLCarr */
2416 if (lp->boardtype != TC35815_TX4939) {
2417#ifdef WORKAROUND_LOSTCAR
2418 /* WORKAROUND: enable LostCrS only if half duplex operation */
2419 if (!lp->fullduplex && lp->boardtype != TC35815_TX4939)
2420 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, &tr->Tx_Ctl);
2421#endif
2422 }
2423#endif
2424 lp->mii.full_duplex = lp->fullduplex;
2425}
2426
2427static void tc35815_timer(unsigned long data)
2428{
2429 struct net_device *dev = (struct net_device *)data;
2430 struct tc35815_local *lp = dev->priv;
2431 int pid = lp->phy_addr;
2432 unsigned short bmsr, bmcr, lpa;
2433 int restart_timer = 0;
2434
2435 spin_lock_irq(&lp->lock);
2436
2437 lp->timer_ticks++;
2438 switch (lp->timer_state) {
2439 case arbwait:
2440 /*
2441 * Only allow for 5 ticks, thats 10 seconds and much too
2442 * long to wait for arbitration to complete.
2443 */
2444 /* TC35815 need more times... */
2445 if (lp->timer_ticks >= 10) {
2446 /* Enter force mode. */
2447 if (!options.doforce) {
2448 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
2449 " cable probblem?\n", dev->name);
2450 /* Try to restart the adaptor. */
2451 tc35815_restart(dev);
2452 goto out;
2453 }
2454 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
2455 " trying force link mode\n", dev->name);
2456 printk(KERN_DEBUG "%s: BMCR %x BMSR %x\n", dev->name,
2457 tc_mdio_read(dev, pid, MII_BMCR),
2458 tc_mdio_read(dev, pid, MII_BMSR));
2459 bmcr = BMCR_SPEED100;
2460 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2461
2462 /*
2463 * OK, seems we need do disable the transceiver
2464 * for the first tick to make sure we get an
2465 * accurate link state at the second tick.
2466 */
2467
2468 lp->timer_state = ltrywait;
2469 lp->timer_ticks = 0;
2470 restart_timer = 1;
2471 } else {
2472 /* Anything interesting happen? */
2473 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2474 if (bmsr & BMSR_ANEGCOMPLETE) {
2475 /* Just what we've been waiting for... */
2476 tc35815_set_link_modes(dev);
2477
2478 /*
2479 * Success, at least so far, advance our state
2480 * engine.
2481 */
2482 lp->timer_state = lupwait;
2483 restart_timer = 1;
2484 } else {
2485 restart_timer = 1;
2486 }
2487 }
2488 break;
2489
2490 case lupwait:
2491 /*
2492 * Auto negotiation was successful and we are awaiting a
2493 * link up status. I have decided to let this timer run
2494 * forever until some sort of error is signalled, reporting
2495 * a message to the user at 10 second intervals.
2496 */
2497 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2498 if (bmsr & BMSR_LSTATUS) {
2499 /*
2500 * Wheee, it's up, display the link mode in use and put
2501 * the timer to sleep.
2502 */
2503 tc35815_display_link_mode(dev);
2504 netif_carrier_on(dev);
2505#ifdef WORKAROUND_100HALF_PROMISC
2506 /* delayed promiscuous enabling */
2507 if (dev->flags & IFF_PROMISC)
2508 tc35815_set_multicast_list(dev);
2509#endif
2510#if 1
2511 lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA);
2512 lp->timer_state = lcheck;
2513 restart_timer = 1;
2514#else
2515 lp->timer_state = asleep;
2516 restart_timer = 0;
2517#endif
2518 } else {
2519 if (lp->timer_ticks >= 10) {
2520 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
2521 "not completely up.\n", dev->name);
2522 lp->timer_ticks = 0;
2523 restart_timer = 1;
2524 } else {
2525 restart_timer = 1;
2526 }
2527 }
2528 break;
2529
2530 case ltrywait:
2531 /*
2532 * Making the timeout here too long can make it take
2533 * annoyingly long to attempt all of the link mode
2534 * permutations, but then again this is essentially
2535 * error recovery code for the most part.
2536 */
2537 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2538 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2539 if (lp->timer_ticks == 1) {
2540 /*
2541 * Re-enable transceiver, we'll re-enable the
2542 * transceiver next tick, then check link state
2543 * on the following tick.
2544 */
2545 restart_timer = 1;
2546 break;
2547 }
2548 if (lp->timer_ticks == 2) {
2549 restart_timer = 1;
2550 break;
2551 }
2552 if (bmsr & BMSR_LSTATUS) {
2553 /* Force mode selection success. */
2554 tc35815_display_forced_link_mode(dev);
2555 netif_carrier_on(dev);
2556 tc35815_set_link_modes(dev);
2557#ifdef WORKAROUND_100HALF_PROMISC
2558 /* delayed promiscuous enabling */
2559 if (dev->flags & IFF_PROMISC)
2560 tc35815_set_multicast_list(dev);
2561#endif
2562#if 1
2563 lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA);
2564 lp->timer_state = lcheck;
2565 restart_timer = 1;
2566#else
2567 lp->timer_state = asleep;
2568 restart_timer = 0;
2569#endif
2570 } else {
2571 if (lp->timer_ticks >= 4) { /* 6 seconds or so... */
2572 int ret;
2573
2574 ret = tc35815_try_next_permutation(dev);
2575 if (ret == -1) {
2576 /*
2577 * Aieee, tried them all, reset the
2578 * chip and try all over again.
2579 */
2580 printk(KERN_NOTICE "%s: Link down, "
2581 "cable problem?\n",
2582 dev->name);
2583
2584 /* Try to restart the adaptor. */
2585 tc35815_restart(dev);
2586 goto out;
2587 }
2588 lp->timer_ticks = 0;
2589 restart_timer = 1;
2590 } else {
2591 restart_timer = 1;
2592 }
2593 }
2594 break;
2595
2596 case lcheck:
2597 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2598 lpa = tc_mdio_read(dev, pid, MII_LPA);
2599 if (bmcr & (BMCR_PDOWN | BMCR_ISOLATE | BMCR_RESET)) {
2600 printk(KERN_ERR "%s: PHY down? (BMCR %x)\n", dev->name,
2601 bmcr);
2602 } else if ((lp->saved_lpa ^ lpa) &
2603 (LPA_100FULL|LPA_100HALF|LPA_10FULL|LPA_10HALF)) {
2604 printk(KERN_NOTICE "%s: link status changed"
2605 " (BMCR %x LPA %x->%x)\n", dev->name,
2606 bmcr, lp->saved_lpa, lpa);
2607 } else {
2608 /* go on */
2609 restart_timer = 1;
2610 break;
2611 }
2612 /* Try to restart the adaptor. */
2613 tc35815_restart(dev);
2614 goto out;
2615
2616 case asleep:
2617 default:
2618 /* Can't happens.... */
2619 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got "
2620 "one anyways!\n", dev->name);
2621 restart_timer = 0;
2622 lp->timer_ticks = 0;
2623 lp->timer_state = asleep; /* foo on you */
2624 break;
2625 }
2626
2627 if (restart_timer) {
2628 lp->timer.expires = jiffies + msecs_to_jiffies(1200);
2629 add_timer(&lp->timer);
2630 }
2631out:
2632 spin_unlock_irq(&lp->lock);
2633}
2634
2635static void tc35815_start_auto_negotiation(struct net_device *dev,
2636 struct ethtool_cmd *ep)
2637{
2638 struct tc35815_local *lp = dev->priv;
2639 int pid = lp->phy_addr;
2640 unsigned short bmsr, bmcr, advertize;
2641 int timeout;
2642
2643 netif_carrier_off(dev);
2644 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2645 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2646 advertize = tc_mdio_read(dev, pid, MII_ADVERTISE);
2647
2648 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
2649 if (options.speed || options.duplex) {
2650 /* Advertise only specified configuration. */
2651 advertize &= ~(ADVERTISE_10HALF |
2652 ADVERTISE_10FULL |
2653 ADVERTISE_100HALF |
2654 ADVERTISE_100FULL);
2655 if (options.speed != 10) {
2656 if (options.duplex != 1)
2657 advertize |= ADVERTISE_100FULL;
2658 if (options.duplex != 2)
2659 advertize |= ADVERTISE_100HALF;
2660 }
2661 if (options.speed != 100) {
2662 if (options.duplex != 1)
2663 advertize |= ADVERTISE_10FULL;
2664 if (options.duplex != 2)
2665 advertize |= ADVERTISE_10HALF;
2666 }
2667 if (options.speed == 100)
2668 bmcr |= BMCR_SPEED100;
2669 else if (options.speed == 10)
2670 bmcr &= ~BMCR_SPEED100;
2671 if (options.duplex == 2)
2672 bmcr |= BMCR_FULLDPLX;
2673 else if (options.duplex == 1)
2674 bmcr &= ~BMCR_FULLDPLX;
2675 } else {
2676 /* Advertise everything we can support. */
2677 if (bmsr & BMSR_10HALF)
2678 advertize |= ADVERTISE_10HALF;
2679 else
2680 advertize &= ~ADVERTISE_10HALF;
2681 if (bmsr & BMSR_10FULL)
2682 advertize |= ADVERTISE_10FULL;
2683 else
2684 advertize &= ~ADVERTISE_10FULL;
2685 if (bmsr & BMSR_100HALF)
2686 advertize |= ADVERTISE_100HALF;
2687 else
2688 advertize &= ~ADVERTISE_100HALF;
2689 if (bmsr & BMSR_100FULL)
2690 advertize |= ADVERTISE_100FULL;
2691 else
2692 advertize &= ~ADVERTISE_100FULL;
2693 }
2694
2695 tc_mdio_write(dev, pid, MII_ADVERTISE, advertize);
2696
2697 /* Enable Auto-Negotiation, this is usually on already... */
2698 bmcr |= BMCR_ANENABLE;
2699 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2700
2701 /* Restart it to make sure it is going. */
2702 bmcr |= BMCR_ANRESTART;
2703 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2704 printk(KERN_DEBUG "%s: ADVERTISE %x BMCR %x\n", dev->name, advertize, bmcr);
2705
2706 /* BMCR_ANRESTART self clears when the process has begun. */
2707 timeout = 64; /* More than enough. */
2708 while (--timeout) {
2709 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2710 if (!(bmcr & BMCR_ANRESTART))
2711 break; /* got it. */
2712 udelay(10);
2713 }
2714 if (!timeout) {
2715 printk(KERN_ERR "%s: TC35815 would not start auto "
2716 "negotiation BMCR=0x%04x\n",
2717 dev->name, bmcr);
2718 printk(KERN_NOTICE "%s: Performing force link "
2719 "detection.\n", dev->name);
2720 goto force_link;
2721 } else {
2722 printk(KERN_DEBUG "%s: auto negotiation started.\n", dev->name);
2723 lp->timer_state = arbwait;
2724 }
2725 } else {
2726force_link:
2727 /* Force the link up, trying first a particular mode.
2728 * Either we are here at the request of ethtool or
2729 * because the Happy Meal would not start to autoneg.
2730 */
2731
2732 /* Disable auto-negotiation in BMCR, enable the duplex and
2733 * speed setting, init the timer state machine, and fire it off.
2734 */
2735 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
2736 bmcr = BMCR_SPEED100;
2737 } else {
2738 if (ep->speed == SPEED_100)
2739 bmcr = BMCR_SPEED100;
2740 else
2741 bmcr = 0;
2742 if (ep->duplex == DUPLEX_FULL)
2743 bmcr |= BMCR_FULLDPLX;
2744 }
2745 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2746
2747 /* OK, seems we need do disable the transceiver for the first
2748 * tick to make sure we get an accurate link state at the
2749 * second tick.
2750 */
2751 lp->timer_state = ltrywait;
2752 }
2753
2754 del_timer(&lp->timer);
2755 lp->timer_ticks = 0;
2756 lp->timer.expires = jiffies + msecs_to_jiffies(1200);
2757 add_timer(&lp->timer);
2758}
2759
2760static void tc35815_find_phy(struct net_device *dev)
2761{
2762 struct tc35815_local *lp = dev->priv;
2763 int pid = lp->phy_addr;
2764 unsigned short id0;
2765
2766 /* find MII phy */
2767 for (pid = 31; pid >= 0; pid--) {
2768 id0 = tc_mdio_read(dev, pid, MII_BMSR);
2769 if (id0 != 0xffff && id0 != 0x0000 &&
2770 (id0 & BMSR_RESV) != (0xffff & BMSR_RESV) /* paranoia? */
2771 ) {
2772 lp->phy_addr = pid;
2773 break;
2774 }
2775 }
2776 if (pid < 0) {
2777 printk(KERN_ERR "%s: No MII Phy found.\n",
2778 dev->name);
2779 lp->phy_addr = pid = 0;
2780 }
2781
2782 lp->mii_id[0] = tc_mdio_read(dev, pid, MII_PHYSID1);
2783 lp->mii_id[1] = tc_mdio_read(dev, pid, MII_PHYSID2);
2784 if (netif_msg_hw(lp))
2785 printk(KERN_INFO "%s: PHY(%02x) ID %04x %04x\n", dev->name,
2786 pid, lp->mii_id[0], lp->mii_id[1]);
2787}
2788
2789static void tc35815_phy_chip_init(struct net_device *dev)
2790{
2791 struct tc35815_local *lp = dev->priv;
2792 int pid = lp->phy_addr;
2793 unsigned short bmcr;
2794 struct ethtool_cmd ecmd, *ep;
2795
2796 /* dis-isolate if needed. */
2797 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2798 if (bmcr & BMCR_ISOLATE) {
2799 int count = 32;
2800 printk(KERN_DEBUG "%s: unisolating...", dev->name);
2801 tc_mdio_write(dev, pid, MII_BMCR, bmcr & ~BMCR_ISOLATE);
2802 while (--count) {
2803 if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_ISOLATE))
2804 break;
2805 udelay(20);
2806 }
2807 printk(" %s.\n", count ? "done" : "failed");
2808 }
2809
2810 if (options.speed && options.duplex) {
2811 ecmd.autoneg = AUTONEG_DISABLE;
2812 ecmd.speed = options.speed == 10 ? SPEED_10 : SPEED_100;
2813 ecmd.duplex = options.duplex == 1 ? DUPLEX_HALF : DUPLEX_FULL;
2814 ep = &ecmd;
2815 } else {
2816 ep = NULL;
2817 }
2818 tc35815_start_auto_negotiation(dev, ep);
2819} 2325}
2820 2326
2821static void tc35815_chip_reset(struct net_device *dev) 2327static void tc35815_chip_reset(struct net_device *dev)
@@ -2862,13 +2368,11 @@ static void tc35815_chip_reset(struct net_device *dev)
2862 2368
2863static void tc35815_chip_init(struct net_device *dev) 2369static void tc35815_chip_init(struct net_device *dev)
2864{ 2370{
2865 struct tc35815_local *lp = dev->priv; 2371 struct tc35815_local *lp = netdev_priv(dev);
2866 struct tc35815_regs __iomem *tr = 2372 struct tc35815_regs __iomem *tr =
2867 (struct tc35815_regs __iomem *)dev->base_addr; 2373 (struct tc35815_regs __iomem *)dev->base_addr;
2868 unsigned long txctl = TX_CTL_CMD; 2374 unsigned long txctl = TX_CTL_CMD;
2869 2375
2870 tc35815_phy_chip_init(dev);
2871
2872 /* load station address to CAM */ 2376 /* load station address to CAM */
2873 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); 2377 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
2874 2378
@@ -2905,12 +2409,11 @@ static void tc35815_chip_init(struct net_device *dev)
2905 /* start MAC transmitter */ 2409 /* start MAC transmitter */
2906#ifndef NO_CHECK_CARRIER 2410#ifndef NO_CHECK_CARRIER
2907 /* TX4939 does not have EnLCarr */ 2411 /* TX4939 does not have EnLCarr */
2908 if (lp->boardtype == TC35815_TX4939) 2412 if (lp->chiptype == TC35815_TX4939)
2909 txctl &= ~Tx_EnLCarr; 2413 txctl &= ~Tx_EnLCarr;
2910#ifdef WORKAROUND_LOSTCAR 2414#ifdef WORKAROUND_LOSTCAR
2911 /* WORKAROUND: ignore LostCrS in full duplex operation */ 2415 /* WORKAROUND: ignore LostCrS in full duplex operation */
2912 if ((lp->timer_state != asleep && lp->timer_state != lcheck) || 2416 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2913 lp->fullduplex)
2914 txctl &= ~Tx_EnLCarr; 2417 txctl &= ~Tx_EnLCarr;
2915#endif 2418#endif
2916#endif /* !NO_CHECK_CARRIER */ 2419#endif /* !NO_CHECK_CARRIER */
@@ -2924,15 +2427,16 @@ static void tc35815_chip_init(struct net_device *dev)
2924static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) 2427static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2925{ 2428{
2926 struct net_device *dev = pci_get_drvdata(pdev); 2429 struct net_device *dev = pci_get_drvdata(pdev);
2927 struct tc35815_local *lp = dev->priv; 2430 struct tc35815_local *lp = netdev_priv(dev);
2928 unsigned long flags; 2431 unsigned long flags;
2929 2432
2930 pci_save_state(pdev); 2433 pci_save_state(pdev);
2931 if (!netif_running(dev)) 2434 if (!netif_running(dev))
2932 return 0; 2435 return 0;
2933 netif_device_detach(dev); 2436 netif_device_detach(dev);
2437 if (lp->phy_dev)
2438 phy_stop(lp->phy_dev);
2934 spin_lock_irqsave(&lp->lock, flags); 2439 spin_lock_irqsave(&lp->lock, flags);
2935 del_timer(&lp->timer); /* Kill if running */
2936 tc35815_chip_reset(dev); 2440 tc35815_chip_reset(dev);
2937 spin_unlock_irqrestore(&lp->lock, flags); 2441 spin_unlock_irqrestore(&lp->lock, flags);
2938 pci_set_power_state(pdev, PCI_D3hot); 2442 pci_set_power_state(pdev, PCI_D3hot);
@@ -2942,16 +2446,15 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2942static int tc35815_resume(struct pci_dev *pdev) 2446static int tc35815_resume(struct pci_dev *pdev)
2943{ 2447{
2944 struct net_device *dev = pci_get_drvdata(pdev); 2448 struct net_device *dev = pci_get_drvdata(pdev);
2945 struct tc35815_local *lp = dev->priv; 2449 struct tc35815_local *lp = netdev_priv(dev);
2946 unsigned long flags;
2947 2450
2948 pci_restore_state(pdev); 2451 pci_restore_state(pdev);
2949 if (!netif_running(dev)) 2452 if (!netif_running(dev))
2950 return 0; 2453 return 0;
2951 pci_set_power_state(pdev, PCI_D0); 2454 pci_set_power_state(pdev, PCI_D0);
2952 spin_lock_irqsave(&lp->lock, flags);
2953 tc35815_restart(dev); 2455 tc35815_restart(dev);
2954 spin_unlock_irqrestore(&lp->lock, flags); 2456 if (lp->phy_dev)
2457 phy_start(lp->phy_dev);
2955 netif_device_attach(dev); 2458 netif_device_attach(dev);
2956 return 0; 2459 return 0;
2957} 2460}
@@ -2972,8 +2475,6 @@ module_param_named(speed, options.speed, int, 0);
2972MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); 2475MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2973module_param_named(duplex, options.duplex, int, 0); 2476module_param_named(duplex, options.duplex, int, 0);
2974MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); 2477MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2975module_param_named(doforce, options.doforce, int, 0);
2976MODULE_PARM_DESC(doforce, "try force link mode if auto-negotiation failed");
2977 2478
2978static int __init tc35815_init_module(void) 2479static int __init tc35815_init_module(void)
2979{ 2480{
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 3f69f53d7768..908422f2f320 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -268,7 +268,12 @@ enum t21143_csr6_bits {
268#define RX_RING_SIZE 128 268#define RX_RING_SIZE 128
269#define MEDIA_MASK 31 269#define MEDIA_MASK 31
270 270
271#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 271/* The receiver on the DC21143 rev 65 can fail to close the last
272 * receive descriptor in certain circumstances (see errata) when
273 * using MWI. This can only occur if the receive buffer ends on
274 * a cache line boundary, so the "+ 4" below ensures it doesn't.
275 */
276#define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */
272 277
273#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */ 278#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */
274 279
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 82f404b76d81..fa1c1c329a2d 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1154,18 +1154,13 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1154 1154
1155 tp->csr0 = csr0 = 0; 1155 tp->csr0 = csr0 = 0;
1156 1156
1157 /* if we have any cache line size at all, we can do MRM */ 1157 /* if we have any cache line size at all, we can do MRM and MWI */
1158 csr0 |= MRM; 1158 csr0 |= MRM | MWI;
1159 1159
1160 /* ...and barring hardware bugs, MWI */ 1160 /* Enable MWI in the standard PCI command bit.
1161 if (!(tp->chip_id == DC21143 && tp->revision == 65)) 1161 * Check for the case where MWI is desired but not available
1162 csr0 |= MWI;
1163
1164 /* set or disable MWI in the standard PCI command bit.
1165 * Check for the case where mwi is desired but not available
1166 */ 1162 */
1167 if (csr0 & MWI) pci_try_set_mwi(pdev); 1163 pci_try_set_mwi(pdev);
1168 else pci_clear_mwi(pdev);
1169 1164
1170 /* read result from hardware (in case bit refused to enable) */ 1165 /* read result from hardware (in case bit refused to enable) */
1171 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 1166 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
@@ -1401,10 +1396,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1401#ifdef CONFIG_TULIP_MWI 1396#ifdef CONFIG_TULIP_MWI
1402 if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) 1397 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1403 tulip_mwi_config (pdev, dev); 1398 tulip_mwi_config (pdev, dev);
1404#else
1405 /* MWI is broken for DC21143 rev 65... */
1406 if (chip_idx == DC21143 && pdev->revision == 65)
1407 tp->csr0 &= ~MWI;
1408#endif 1399#endif
1409 1400
1410 /* Stop the chip's Tx and Rx processes. */ 1401 /* Stop the chip's Tx and Rx processes. */
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 35d0cfcf8c47..50068194c163 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -107,8 +107,6 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
107/* Time in jiffies before concluding the transmitter is hung. */ 107/* Time in jiffies before concluding the transmitter is hung. */
108#define TX_TIMEOUT (2*HZ) 108#define TX_TIMEOUT (2*HZ)
109 109
110#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
111
112/* Include files, designed to support most kernel versions 2.0.0 and later. */ 110/* Include files, designed to support most kernel versions 2.0.0 and later. */
113#include <linux/module.h> 111#include <linux/module.h>
114#include <linux/kernel.h> 112#include <linux/kernel.h>
@@ -137,6 +135,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
137 135
138#include "tulip.h" 136#include "tulip.h"
139 137
138#undef PKT_BUF_SZ /* tulip.h also defines this */
139#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
140
140/* These identify the driver base version and may not be removed. */ 141/* These identify the driver base version and may not be removed. */
141static char version[] = 142static char version[] =
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 143KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 0ee4c168e4c0..29a4d650e8a8 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3954,7 +3954,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3954 if (err) 3954 if (err)
3955 return -1; 3955 return -1;
3956 3956
3957 ug_info->mdio_bus = res.start; 3957 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start);
3958 } 3958 }
3959 3959
3960 /* get the phy interface type, or default to MII */ 3960 /* get the phy interface type, or default to MII */
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 4fb95b3af948..9f8b7580a3a4 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1156,7 +1156,7 @@ struct ucc_geth_info {
1156 u16 pausePeriod; 1156 u16 pausePeriod;
1157 u16 extensionField; 1157 u16 extensionField;
1158 u8 phy_address; 1158 u8 phy_address;
1159 u32 mdio_bus; 1159 char mdio_bus[MII_BUS_ID_SIZE];
1160 u8 weightfactor[NUM_TX_QUEUES]; 1160 u8 weightfactor[NUM_TX_QUEUES];
1161 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; 1161 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
1162 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; 1162 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index c69e654d539f..e4d3f330bac3 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -157,7 +157,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma
157 if (err) 157 if (err)
158 goto reg_map_fail; 158 goto reg_map_fail;
159 159
160 new_bus->id = res.start; 160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
161 161
162 new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); 162 new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
163 163