aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-03-26 02:11:25 -0400
committerDavid S. Miller <davem@davemloft.net>2008-03-26 02:11:25 -0400
commit14eabf70c82cade5dbc71d2e913d533193a91785 (patch)
tree53bdf1534f73c59fe8ae796f660cab541a20224c /drivers/net
parentf49e1aa133c2c9b74b5dfddca8863609bbda9086 (diff)
parent2f4489112896770d66dc2960f71174d69ee23004 (diff)
Merge branch 'upstream-net26' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139too.c10
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atp.c4
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c22
-rw-r--r--drivers/net/defxx.c3
-rw-r--r--drivers/net/e1000/e1000.h27
-rw-r--r--drivers/net/e1000/e1000_ethtool.c17
-rw-r--r--drivers/net/e1000/e1000_hw.c223
-rw-r--r--drivers/net/e1000/e1000_hw.h62
-rw-r--r--drivers/net/e1000/e1000_main.c131
-rw-r--r--drivers/net/e1000/e1000_osdep.h7
-rw-r--r--drivers/net/e1000e/e1000.h4
-rw-r--r--drivers/net/e1000e/lib.c56
-rw-r--r--drivers/net/e1000e/netdev.c37
-rw-r--r--drivers/net/forcedeth.c6
-rw-r--r--drivers/net/gianfar.c24
-rw-r--r--drivers/net/gianfar.h9
-rw-r--r--drivers/net/ibmveth.c42
-rw-r--r--drivers/net/ixgb/ixgb.h28
-rw-r--r--drivers/net/ixgb/ixgb_ee.c50
-rw-r--r--drivers/net/ixgb/ixgb_ee.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c17
-rw-r--r--drivers/net/ixgb/ixgb_hw.c57
-rw-r--r--drivers/net/ixgb/ixgb_hw.h18
-rw-r--r--drivers/net/ixgb/ixgb_main.c96
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h7
-rw-r--r--drivers/net/korina.c1233
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/natsemi.c10
-rw-r--r--drivers/net/ni52.c255
-rw-r--r--drivers/net/ni52.h4
-rw-r--r--drivers/net/phy/broadcom.c57
-rw-r--r--drivers/net/qla3xxx.c6
-rw-r--r--drivers/net/s2io.c17
-rw-r--r--drivers/net/skfp/fplustm.c12
-rw-r--r--drivers/net/skfp/h/fplustm.h20
-rw-r--r--drivers/net/skfp/hwmtm.c86
-rw-r--r--drivers/net/skfp/skfddi.c4
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/rndis_host.c5
41 files changed, 1948 insertions, 740 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index be6e918456d9..53bd903d2321 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -966,8 +966,8 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
966 966
967 addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; 967 addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
968 for (i = 0; i < 3; i++) 968 for (i = 0; i < 3; i++)
969 ((u16 *) (dev->dev_addr))[i] = 969 ((__le16 *) (dev->dev_addr))[i] =
970 le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); 970 cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
971 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 971 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
972 972
973 /* The Rtl8139-specific entries in the device structure. */ 973 /* The Rtl8139-specific entries in the device structure. */
@@ -1373,8 +1373,8 @@ static void rtl8139_hw_start (struct net_device *dev)
1373 /* unlock Config[01234] and BMCR register writes */ 1373 /* unlock Config[01234] and BMCR register writes */
1374 RTL_W8_F (Cfg9346, Cfg9346_Unlock); 1374 RTL_W8_F (Cfg9346, Cfg9346_Unlock);
1375 /* Restore our idea of the MAC address. */ 1375 /* Restore our idea of the MAC address. */
1376 RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); 1376 RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1377 RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); 1377 RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
1378 1378
1379 /* Must enable Tx/Rx before setting transfer thresholds! */ 1379 /* Must enable Tx/Rx before setting transfer thresholds! */
1380 RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); 1380 RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
@@ -1945,7 +1945,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1945 rmb(); 1945 rmb();
1946 1946
1947 /* read size+status of next frame from DMA ring buffer */ 1947 /* read size+status of next frame from DMA ring buffer */
1948 rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); 1948 rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
1949 rx_size = rx_status >> 16; 1949 rx_size = rx_status >> 16;
1950 pkt_size = rx_size - 4; 1950 pkt_size = rx_size - 4;
1951 1951
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 978e72ab39c2..95d1b61342ec 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -467,6 +467,13 @@ config SNI_82596
467 Say Y here to support the on-board Intel 82596 ethernet controller 467 Say Y here to support the on-board Intel 82596 ethernet controller
468 built into SNI RM machines. 468 built into SNI RM machines.
469 469
470config KORINA
471 tristate "Korina (IDT RC32434) Ethernet support"
472 depends on NET_ETHERNET && MIKROTIK_RB500
473 help
474 If you have a Mikrotik RouterBoard 500 or IDT RC32434
475 based system say Y. Otherwise say N.
476
470config MIPS_JAZZ_SONIC 477config MIPS_JAZZ_SONIC
471 tristate "MIPS JAZZ onboard SONIC Ethernet support" 478 tristate "MIPS JAZZ onboard SONIC Ethernet support"
472 depends on MACH_JAZZ 479 depends on MACH_JAZZ
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 960999c97c69..4d71729e85e5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -190,6 +190,7 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
190obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 190obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
191obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 191obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
192obj-$(CONFIG_EQUALIZER) += eql.o 192obj-$(CONFIG_EQUALIZER) += eql.o
193obj-$(CONFIG_KORINA) += korina.o
193obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o 194obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
194obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o 195obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
195obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o 196obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 62f09e59d9c4..3d4433358a36 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -378,8 +378,8 @@ static void __init get_node_ID(struct net_device *dev)
378 sa_offset = 15; 378 sa_offset = 15;
379 379
380 for (i = 0; i < 3; i++) 380 for (i = 0; i < 3; i++)
381 ((u16 *)dev->dev_addr)[i] = 381 ((__be16 *)dev->dev_addr)[i] =
382 be16_to_cpu(eeprom_op(ioaddr, EE_READ(sa_offset + i))); 382 cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
383 383
384 write_reg(ioaddr, CMR2, CMR2_NULL); 384 write_reg(ioaddr, CMR2, CMR2_NULL);
385} 385}
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 901c824bfe6d..ff9c013ce535 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -833,10 +833,26 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
833 return 0; 833 return 0;
834} 834}
835 835
836/*
837 * That skb would better have come from process_responses() where we abuse
838 * ->priority and ->csum to carry our data. NB: if we get to per-arch
839 * ->csum, the things might get really interesting here.
840 */
841
842static inline u32 get_hwtid(struct sk_buff *skb)
843{
844 return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
845}
846
847static inline u32 get_opcode(struct sk_buff *skb)
848{
849 return G_OPCODE(ntohl((__force __be32)skb->csum));
850}
851
836static int do_term(struct t3cdev *dev, struct sk_buff *skb) 852static int do_term(struct t3cdev *dev, struct sk_buff *skb)
837{ 853{
838 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff; 854 unsigned int hwtid = get_hwtid(skb);
839 unsigned int opcode = G_OPCODE(ntohl(skb->csum)); 855 unsigned int opcode = get_opcode(skb);
840 struct t3c_tid_entry *t3c_tid; 856 struct t3c_tid_entry *t3c_tid;
841 857
842 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 858 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
@@ -914,7 +930,7 @@ int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
914{ 930{
915 while (n--) { 931 while (n--) {
916 struct sk_buff *skb = *skbs++; 932 struct sk_buff *skb = *skbs++;
917 unsigned int opcode = G_OPCODE(ntohl(skb->csum)); 933 unsigned int opcode = get_opcode(skb);
918 int ret = cpl_handlers[opcode] (dev, skb); 934 int ret = cpl_handlers[opcode] (dev, skb);
919 935
920#if VALIDATE_TID 936#if VALIDATE_TID
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ddc30c4bf34a..c062aacf229c 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -971,7 +971,8 @@ static int __devinit dfx_driver_init(struct net_device *dev,
971 int alloc_size; /* total buffer size needed */ 971 int alloc_size; /* total buffer size needed */
972 char *top_v, *curr_v; /* virtual addrs into memory block */ 972 char *top_v, *curr_v; /* virtual addrs into memory block */
973 dma_addr_t top_p, curr_p; /* physical addrs into memory block */ 973 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
974 u32 data, le32; /* host data register value */ 974 u32 data; /* host data register value */
975 __le32 le32;
975 char *board_name = NULL; 976 char *board_name = NULL;
976 977
977 DBG_printk("In dfx_driver_init...\n"); 978 DBG_printk("In dfx_driver_init...\n");
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 3b840283a9c3..a05aa51ecfa6 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -188,7 +188,7 @@ struct e1000_tx_ring {
188 spinlock_t tx_lock; 188 spinlock_t tx_lock;
189 uint16_t tdh; 189 uint16_t tdh;
190 uint16_t tdt; 190 uint16_t tdt;
191 boolean_t last_tx_tso; 191 bool last_tx_tso;
192}; 192};
193 193
194struct e1000_rx_ring { 194struct e1000_rx_ring {
@@ -249,7 +249,6 @@ struct e1000_adapter {
249#ifdef CONFIG_E1000_NAPI 249#ifdef CONFIG_E1000_NAPI
250 spinlock_t tx_queue_lock; 250 spinlock_t tx_queue_lock;
251#endif 251#endif
252 atomic_t irq_sem;
253 unsigned int total_tx_bytes; 252 unsigned int total_tx_bytes;
254 unsigned int total_tx_packets; 253 unsigned int total_tx_packets;
255 unsigned int total_rx_bytes; 254 unsigned int total_rx_bytes;
@@ -283,17 +282,17 @@ struct e1000_adapter {
283 uint32_t tx_fifo_size; 282 uint32_t tx_fifo_size;
284 uint8_t tx_timeout_factor; 283 uint8_t tx_timeout_factor;
285 atomic_t tx_fifo_stall; 284 atomic_t tx_fifo_stall;
286 boolean_t pcix_82544; 285 bool pcix_82544;
287 boolean_t detect_tx_hung; 286 bool detect_tx_hung;
288 287
289 /* RX */ 288 /* RX */
290#ifdef CONFIG_E1000_NAPI 289#ifdef CONFIG_E1000_NAPI
291 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 290 bool (*clean_rx) (struct e1000_adapter *adapter,
292 struct e1000_rx_ring *rx_ring, 291 struct e1000_rx_ring *rx_ring,
293 int *work_done, int work_to_do); 292 int *work_done, int work_to_do);
294#else 293#else
295 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 294 bool (*clean_rx) (struct e1000_adapter *adapter,
296 struct e1000_rx_ring *rx_ring); 295 struct e1000_rx_ring *rx_ring);
297#endif 296#endif
298 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 297 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
299 struct e1000_rx_ring *rx_ring, 298 struct e1000_rx_ring *rx_ring,
@@ -312,7 +311,7 @@ struct e1000_adapter {
312 uint32_t alloc_rx_buff_failed; 311 uint32_t alloc_rx_buff_failed;
313 uint32_t rx_int_delay; 312 uint32_t rx_int_delay;
314 uint32_t rx_abs_int_delay; 313 uint32_t rx_abs_int_delay;
315 boolean_t rx_csum; 314 bool rx_csum;
316 unsigned int rx_ps_pages; 315 unsigned int rx_ps_pages;
317 uint32_t gorcl; 316 uint32_t gorcl;
318 uint64_t gorcl_old; 317 uint64_t gorcl_old;
@@ -335,12 +334,12 @@ struct e1000_adapter {
335 struct e1000_rx_ring test_rx_ring; 334 struct e1000_rx_ring test_rx_ring;
336 335
337 int msg_enable; 336 int msg_enable;
338 boolean_t have_msi; 337 bool have_msi;
339 338
340 /* to not mess up cache alignment, always add to the bottom */ 339 /* to not mess up cache alignment, always add to the bottom */
341 boolean_t tso_force; 340 bool tso_force;
342 boolean_t smart_power_down; /* phy smart power down */ 341 bool smart_power_down; /* phy smart power down */
343 boolean_t quad_port_a; 342 bool quad_port_a;
344 unsigned long flags; 343 unsigned long flags;
345 uint32_t eeprom_wol; 344 uint32_t eeprom_wol;
346}; 345};
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 85e66f4c7886..05e1fb3cf49f 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -353,7 +353,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
353 netdev->features &= ~NETIF_F_TSO6; 353 netdev->features &= ~NETIF_F_TSO6;
354 354
355 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 355 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
356 adapter->tso_force = TRUE; 356 adapter->tso_force = true;
357 return 0; 357 return 0;
358} 358}
359 359
@@ -922,7 +922,8 @@ static int
922e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) 922e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
923{ 923{
924 struct net_device *netdev = adapter->netdev; 924 struct net_device *netdev = adapter->netdev;
925 uint32_t mask, i=0, shared_int = TRUE; 925 uint32_t mask, i = 0;
926 bool shared_int = true;
926 uint32_t irq = adapter->pdev->irq; 927 uint32_t irq = adapter->pdev->irq;
927 928
928 *data = 0; 929 *data = 0;
@@ -931,7 +932,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
931 /* Hook up test interrupt handler just for this test */ 932 /* Hook up test interrupt handler just for this test */
932 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 933 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
933 netdev)) 934 netdev))
934 shared_int = FALSE; 935 shared_int = false;
935 else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 936 else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
936 netdev->name, netdev)) { 937 netdev->name, netdev)) {
937 *data = 1; 938 *data = 1;
@@ -1295,7 +1296,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1295 uint32_t ctrl_reg = 0; 1296 uint32_t ctrl_reg = 0;
1296 uint32_t stat_reg = 0; 1297 uint32_t stat_reg = 0;
1297 1298
1298 adapter->hw.autoneg = FALSE; 1299 adapter->hw.autoneg = false;
1299 1300
1300 if (adapter->hw.phy_type == e1000_phy_m88) { 1301 if (adapter->hw.phy_type == e1000_phy_m88) {
1301 /* Auto-MDI/MDIX Off */ 1302 /* Auto-MDI/MDIX Off */
@@ -1473,7 +1474,7 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
1473 case e1000_82545_rev_3: 1474 case e1000_82545_rev_3:
1474 case e1000_82546_rev_3: 1475 case e1000_82546_rev_3:
1475 default: 1476 default:
1476 hw->autoneg = TRUE; 1477 hw->autoneg = true;
1477 if (hw->phy_type == e1000_phy_gg82563) 1478 if (hw->phy_type == e1000_phy_gg82563)
1478 e1000_write_phy_reg(hw, 1479 e1000_write_phy_reg(hw,
1479 GG82563_PHY_KMRN_MODE_CTRL, 1480 GG82563_PHY_KMRN_MODE_CTRL,
@@ -1607,13 +1608,13 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1607 *data = 0; 1608 *data = 0;
1608 if (adapter->hw.media_type == e1000_media_type_internal_serdes) { 1609 if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
1609 int i = 0; 1610 int i = 0;
1610 adapter->hw.serdes_link_down = TRUE; 1611 adapter->hw.serdes_link_down = true;
1611 1612
1612 /* On some blade server designs, link establishment 1613 /* On some blade server designs, link establishment
1613 * could take as long as 2-3 minutes */ 1614 * could take as long as 2-3 minutes */
1614 do { 1615 do {
1615 e1000_check_for_link(&adapter->hw); 1616 e1000_check_for_link(&adapter->hw);
1616 if (adapter->hw.serdes_link_down == FALSE) 1617 if (!adapter->hw.serdes_link_down)
1617 return *data; 1618 return *data;
1618 msleep(20); 1619 msleep(20);
1619 } while (i++ < 3750); 1620 } while (i++ < 3750);
@@ -1649,7 +1650,7 @@ e1000_diag_test(struct net_device *netdev,
1649 struct ethtool_test *eth_test, uint64_t *data) 1650 struct ethtool_test *eth_test, uint64_t *data)
1650{ 1651{
1651 struct e1000_adapter *adapter = netdev_priv(netdev); 1652 struct e1000_adapter *adapter = netdev_priv(netdev);
1652 boolean_t if_running = netif_running(netdev); 1653 bool if_running = netif_running(netdev);
1653 1654
1654 set_bit(__E1000_TESTING, &adapter->flags); 1655 set_bit(__E1000_TESTING, &adapter->flags);
1655 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1656 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 7c6888c58c21..b64203458e9a 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -46,7 +46,8 @@ static int32_t e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *pol
46static void e1000_clear_hw_cntrs(struct e1000_hw *hw); 46static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
47static void e1000_clear_vfta(struct e1000_hw *hw); 47static void e1000_clear_vfta(struct e1000_hw *hw);
48static int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); 48static int32_t e1000_commit_shadow_ram(struct e1000_hw *hw);
49static int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); 49static int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw,
50 bool link_up);
50static int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw); 51static int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw);
51static int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 52static int32_t e1000_detect_gig_phy(struct e1000_hw *hw);
52static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank); 53static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank);
@@ -62,7 +63,7 @@ static int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32
62static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); 63static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
63static void e1000_init_rx_addrs(struct e1000_hw *hw); 64static void e1000_init_rx_addrs(struct e1000_hw *hw);
64static void e1000_initialize_hardware_bits(struct e1000_hw *hw); 65static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
65static boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); 66static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
66static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); 67static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
67static int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); 68static int32_t e1000_mng_enable_host_if(struct e1000_hw *hw);
68static int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, uint16_t length, uint16_t offset, uint8_t *sum); 69static int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, uint16_t length, uint16_t offset, uint8_t *sum);
@@ -84,8 +85,8 @@ static int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32
84static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 85static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
85static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 86static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
86static void e1000_release_software_flag(struct e1000_hw *hw); 87static void e1000_release_software_flag(struct e1000_hw *hw);
87static int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); 88static int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
88static int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active); 89static int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
89static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); 90static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
90static void e1000_set_pci_express_master_disable(struct e1000_hw *hw); 91static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
91static int32_t e1000_wait_autoneg(struct e1000_hw *hw); 92static int32_t e1000_wait_autoneg(struct e1000_hw *hw);
@@ -425,22 +426,22 @@ e1000_set_mac_type(struct e1000_hw *hw)
425 426
426 switch (hw->mac_type) { 427 switch (hw->mac_type) {
427 case e1000_ich8lan: 428 case e1000_ich8lan:
428 hw->swfwhw_semaphore_present = TRUE; 429 hw->swfwhw_semaphore_present = true;
429 hw->asf_firmware_present = TRUE; 430 hw->asf_firmware_present = true;
430 break; 431 break;
431 case e1000_80003es2lan: 432 case e1000_80003es2lan:
432 hw->swfw_sync_present = TRUE; 433 hw->swfw_sync_present = true;
433 /* fall through */ 434 /* fall through */
434 case e1000_82571: 435 case e1000_82571:
435 case e1000_82572: 436 case e1000_82572:
436 case e1000_82573: 437 case e1000_82573:
437 hw->eeprom_semaphore_present = TRUE; 438 hw->eeprom_semaphore_present = true;
438 /* fall through */ 439 /* fall through */
439 case e1000_82541: 440 case e1000_82541:
440 case e1000_82547: 441 case e1000_82547:
441 case e1000_82541_rev_2: 442 case e1000_82541_rev_2:
442 case e1000_82547_rev_2: 443 case e1000_82547_rev_2:
443 hw->asf_firmware_present = TRUE; 444 hw->asf_firmware_present = true;
444 break; 445 break;
445 default: 446 default:
446 break; 447 break;
@@ -450,20 +451,20 @@ e1000_set_mac_type(struct e1000_hw *hw)
450 * FD mode 451 * FD mode
451 */ 452 */
452 if (hw->mac_type == e1000_82543) 453 if (hw->mac_type == e1000_82543)
453 hw->bad_tx_carr_stats_fd = TRUE; 454 hw->bad_tx_carr_stats_fd = true;
454 455
455 /* capable of receiving management packets to the host */ 456 /* capable of receiving management packets to the host */
456 if (hw->mac_type >= e1000_82571) 457 if (hw->mac_type >= e1000_82571)
457 hw->has_manc2h = TRUE; 458 hw->has_manc2h = true;
458 459
459 /* In rare occasions, ESB2 systems would end up started without 460 /* In rare occasions, ESB2 systems would end up started without
460 * the RX unit being turned on. 461 * the RX unit being turned on.
461 */ 462 */
462 if (hw->mac_type == e1000_80003es2lan) 463 if (hw->mac_type == e1000_80003es2lan)
463 hw->rx_needs_kicking = TRUE; 464 hw->rx_needs_kicking = true;
464 465
465 if (hw->mac_type > e1000_82544) 466 if (hw->mac_type > e1000_82544)
466 hw->has_smbus = TRUE; 467 hw->has_smbus = true;
467 468
468 return E1000_SUCCESS; 469 return E1000_SUCCESS;
469} 470}
@@ -482,7 +483,7 @@ e1000_set_media_type(struct e1000_hw *hw)
482 483
483 if (hw->mac_type != e1000_82543) { 484 if (hw->mac_type != e1000_82543) {
484 /* tbi_compatibility is only valid on 82543 */ 485 /* tbi_compatibility is only valid on 82543 */
485 hw->tbi_compatibility_en = FALSE; 486 hw->tbi_compatibility_en = false;
486 } 487 }
487 488
488 switch (hw->device_id) { 489 switch (hw->device_id) {
@@ -513,7 +514,7 @@ e1000_set_media_type(struct e1000_hw *hw)
513 if (status & E1000_STATUS_TBIMODE) { 514 if (status & E1000_STATUS_TBIMODE) {
514 hw->media_type = e1000_media_type_fiber; 515 hw->media_type = e1000_media_type_fiber;
515 /* tbi_compatibility not valid on fiber */ 516 /* tbi_compatibility not valid on fiber */
516 hw->tbi_compatibility_en = FALSE; 517 hw->tbi_compatibility_en = false;
517 } else { 518 } else {
518 hw->media_type = e1000_media_type_copper; 519 hw->media_type = e1000_media_type_copper;
519 } 520 }
@@ -569,7 +570,7 @@ e1000_reset_hw(struct e1000_hw *hw)
569 E1000_WRITE_FLUSH(hw); 570 E1000_WRITE_FLUSH(hw);
570 571
571 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ 572 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
572 hw->tbi_compatibility_on = FALSE; 573 hw->tbi_compatibility_on = false;
573 574
574 /* Delay to allow any outstanding PCI transactions to complete before 575 /* Delay to allow any outstanding PCI transactions to complete before
575 * resetting the device 576 * resetting the device
@@ -682,7 +683,7 @@ e1000_reset_hw(struct e1000_hw *hw)
682 msleep(20); 683 msleep(20);
683 break; 684 break;
684 case e1000_82573: 685 case e1000_82573:
685 if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) { 686 if (!e1000_is_onboard_nvm_eeprom(hw)) {
686 udelay(10); 687 udelay(10);
687 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 688 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
688 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 689 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
@@ -1428,7 +1429,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
1428 if (hw->mac_type <= e1000_82543 || 1429 if (hw->mac_type <= e1000_82543 ||
1429 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || 1430 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
1430 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 1431 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
1431 hw->phy_reset_disable = FALSE; 1432 hw->phy_reset_disable = false;
1432 1433
1433 return E1000_SUCCESS; 1434 return E1000_SUCCESS;
1434} 1435}
@@ -1470,7 +1471,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1470 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ 1471 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
1471 if (hw->phy_type == e1000_phy_igp) { 1472 if (hw->phy_type == e1000_phy_igp) {
1472 /* disable lplu d3 during driver init */ 1473 /* disable lplu d3 during driver init */
1473 ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1474 ret_val = e1000_set_d3_lplu_state(hw, false);
1474 if (ret_val) { 1475 if (ret_val) {
1475 DEBUGOUT("Error Disabling LPLU D3\n"); 1476 DEBUGOUT("Error Disabling LPLU D3\n");
1476 return ret_val; 1477 return ret_val;
@@ -1478,7 +1479,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1478 } 1479 }
1479 1480
1480 /* disable lplu d0 during driver init */ 1481 /* disable lplu d0 during driver init */
1481 ret_val = e1000_set_d0_lplu_state(hw, FALSE); 1482 ret_val = e1000_set_d0_lplu_state(hw, false);
1482 if (ret_val) { 1483 if (ret_val) {
1483 DEBUGOUT("Error Disabling LPLU D0\n"); 1484 DEBUGOUT("Error Disabling LPLU D0\n");
1484 return ret_val; 1485 return ret_val;
@@ -1691,7 +1692,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1691 * firmware will have already initialized them. We only initialize 1692 * firmware will have already initialized them. We only initialize
1692 * them if the HW is not in IAMT mode. 1693 * them if the HW is not in IAMT mode.
1693 */ 1694 */
1694 if (e1000_check_mng_mode(hw) == FALSE) { 1695 if (!e1000_check_mng_mode(hw)) {
1695 /* Enable Electrical Idle on the PHY */ 1696 /* Enable Electrical Idle on the PHY */
1696 phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; 1697 phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
1697 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, 1698 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
@@ -1892,7 +1893,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1892 } 1893 }
1893 } 1894 }
1894 1895
1895 hw->get_link_status = TRUE; 1896 hw->get_link_status = true;
1896 1897
1897 return E1000_SUCCESS; 1898 return E1000_SUCCESS;
1898} 1899}
@@ -1932,7 +1933,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
1932 1933
1933 /* Config DSP to improve Giga link quality */ 1934 /* Config DSP to improve Giga link quality */
1934 if (hw->phy_type == e1000_phy_igp) { 1935 if (hw->phy_type == e1000_phy_igp) {
1935 ret_val = e1000_config_dsp_after_link_change(hw, TRUE); 1936 ret_val = e1000_config_dsp_after_link_change(hw, true);
1936 if (ret_val) { 1937 if (ret_val) {
1937 DEBUGOUT("Error Configuring DSP after link up\n"); 1938 DEBUGOUT("Error Configuring DSP after link up\n");
1938 return ret_val; 1939 return ret_val;
@@ -2923,7 +2924,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2923 if (hw->media_type == e1000_media_type_fiber) { 2924 if (hw->media_type == e1000_media_type_fiber) {
2924 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 2925 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
2925 if (status & E1000_STATUS_LU) 2926 if (status & E1000_STATUS_LU)
2926 hw->get_link_status = FALSE; 2927 hw->get_link_status = false;
2927 } 2928 }
2928 } 2929 }
2929 2930
@@ -2947,7 +2948,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2947 return ret_val; 2948 return ret_val;
2948 2949
2949 if (phy_data & MII_SR_LINK_STATUS) { 2950 if (phy_data & MII_SR_LINK_STATUS) {
2950 hw->get_link_status = FALSE; 2951 hw->get_link_status = false;
2951 /* Check if there was DownShift, must be checked immediately after 2952 /* Check if there was DownShift, must be checked immediately after
2952 * link-up */ 2953 * link-up */
2953 e1000_check_downshift(hw); 2954 e1000_check_downshift(hw);
@@ -2973,7 +2974,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2973 2974
2974 } else { 2975 } else {
2975 /* No link detected */ 2976 /* No link detected */
2976 e1000_config_dsp_after_link_change(hw, FALSE); 2977 e1000_config_dsp_after_link_change(hw, false);
2977 return 0; 2978 return 0;
2978 } 2979 }
2979 2980
@@ -2983,7 +2984,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2983 if (!hw->autoneg) return -E1000_ERR_CONFIG; 2984 if (!hw->autoneg) return -E1000_ERR_CONFIG;
2984 2985
2985 /* optimize the dsp settings for the igp phy */ 2986 /* optimize the dsp settings for the igp phy */
2986 e1000_config_dsp_after_link_change(hw, TRUE); 2987 e1000_config_dsp_after_link_change(hw, true);
2987 2988
2988 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we 2989 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
2989 * have Si on board that is 82544 or newer, Auto 2990 * have Si on board that is 82544 or newer, Auto
@@ -3036,7 +3037,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3036 rctl = E1000_READ_REG(hw, RCTL); 3037 rctl = E1000_READ_REG(hw, RCTL);
3037 rctl &= ~E1000_RCTL_SBP; 3038 rctl &= ~E1000_RCTL_SBP;
3038 E1000_WRITE_REG(hw, RCTL, rctl); 3039 E1000_WRITE_REG(hw, RCTL, rctl);
3039 hw->tbi_compatibility_on = FALSE; 3040 hw->tbi_compatibility_on = false;
3040 } 3041 }
3041 } else { 3042 } else {
3042 /* If TBI compatibility is was previously off, turn it on. For 3043 /* If TBI compatibility is was previously off, turn it on. For
@@ -3045,7 +3046,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3045 * will look like CRC errors to to the hardware. 3046 * will look like CRC errors to to the hardware.
3046 */ 3047 */
3047 if (!hw->tbi_compatibility_on) { 3048 if (!hw->tbi_compatibility_on) {
3048 hw->tbi_compatibility_on = TRUE; 3049 hw->tbi_compatibility_on = true;
3049 rctl = E1000_READ_REG(hw, RCTL); 3050 rctl = E1000_READ_REG(hw, RCTL);
3050 rctl |= E1000_RCTL_SBP; 3051 rctl |= E1000_RCTL_SBP;
3051 E1000_WRITE_REG(hw, RCTL, rctl); 3052 E1000_WRITE_REG(hw, RCTL, rctl);
@@ -3098,7 +3099,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3098 E1000_WRITE_REG(hw, TXCW, hw->txcw); 3099 E1000_WRITE_REG(hw, TXCW, hw->txcw);
3099 E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); 3100 E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
3100 3101
3101 hw->serdes_link_down = FALSE; 3102 hw->serdes_link_down = false;
3102 } 3103 }
3103 /* If we force link for non-auto-negotiation switch, check link status 3104 /* If we force link for non-auto-negotiation switch, check link status
3104 * based on MAC synchronization for internal serdes media type. 3105 * based on MAC synchronization for internal serdes media type.
@@ -3109,11 +3110,11 @@ e1000_check_for_link(struct e1000_hw *hw)
3109 udelay(10); 3110 udelay(10);
3110 if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { 3111 if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
3111 if (!(rxcw & E1000_RXCW_IV)) { 3112 if (!(rxcw & E1000_RXCW_IV)) {
3112 hw->serdes_link_down = FALSE; 3113 hw->serdes_link_down = false;
3113 DEBUGOUT("SERDES: Link is up.\n"); 3114 DEBUGOUT("SERDES: Link is up.\n");
3114 } 3115 }
3115 } else { 3116 } else {
3116 hw->serdes_link_down = TRUE; 3117 hw->serdes_link_down = true;
3117 DEBUGOUT("SERDES: Link is down.\n"); 3118 DEBUGOUT("SERDES: Link is down.\n");
3118 } 3119 }
3119 } 3120 }
@@ -4044,7 +4045,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
4044{ 4045{
4045 int32_t phy_init_status, ret_val; 4046 int32_t phy_init_status, ret_val;
4046 uint16_t phy_id_high, phy_id_low; 4047 uint16_t phy_id_high, phy_id_low;
4047 boolean_t match = FALSE; 4048 bool match = false;
4048 4049
4049 DEBUGFUNC("e1000_detect_gig_phy"); 4050 DEBUGFUNC("e1000_detect_gig_phy");
4050 4051
@@ -4086,35 +4087,35 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
4086 4087
4087 switch (hw->mac_type) { 4088 switch (hw->mac_type) {
4088 case e1000_82543: 4089 case e1000_82543:
4089 if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE; 4090 if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
4090 break; 4091 break;
4091 case e1000_82544: 4092 case e1000_82544:
4092 if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE; 4093 if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
4093 break; 4094 break;
4094 case e1000_82540: 4095 case e1000_82540:
4095 case e1000_82545: 4096 case e1000_82545:
4096 case e1000_82545_rev_3: 4097 case e1000_82545_rev_3:
4097 case e1000_82546: 4098 case e1000_82546:
4098 case e1000_82546_rev_3: 4099 case e1000_82546_rev_3:
4099 if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE; 4100 if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
4100 break; 4101 break;
4101 case e1000_82541: 4102 case e1000_82541:
4102 case e1000_82541_rev_2: 4103 case e1000_82541_rev_2:
4103 case e1000_82547: 4104 case e1000_82547:
4104 case e1000_82547_rev_2: 4105 case e1000_82547_rev_2:
4105 if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 4106 if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
4106 break; 4107 break;
4107 case e1000_82573: 4108 case e1000_82573:
4108 if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; 4109 if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
4109 break; 4110 break;
4110 case e1000_80003es2lan: 4111 case e1000_80003es2lan:
4111 if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; 4112 if (hw->phy_id == GG82563_E_PHY_ID) match = true;
4112 break; 4113 break;
4113 case e1000_ich8lan: 4114 case e1000_ich8lan:
4114 if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE; 4115 if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
4115 if (hw->phy_id == IFE_E_PHY_ID) match = TRUE; 4116 if (hw->phy_id == IFE_E_PHY_ID) match = true;
4116 if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE; 4117 if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
4117 if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE; 4118 if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
4118 break; 4119 break;
4119 default: 4120 default:
4120 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); 4121 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
@@ -4455,8 +4456,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4455 eeprom->opcode_bits = 3; 4456 eeprom->opcode_bits = 3;
4456 eeprom->address_bits = 6; 4457 eeprom->address_bits = 6;
4457 eeprom->delay_usec = 50; 4458 eeprom->delay_usec = 50;
4458 eeprom->use_eerd = FALSE; 4459 eeprom->use_eerd = false;
4459 eeprom->use_eewr = FALSE; 4460 eeprom->use_eewr = false;
4460 break; 4461 break;
4461 case e1000_82540: 4462 case e1000_82540:
4462 case e1000_82545: 4463 case e1000_82545:
@@ -4473,8 +4474,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4473 eeprom->word_size = 64; 4474 eeprom->word_size = 64;
4474 eeprom->address_bits = 6; 4475 eeprom->address_bits = 6;
4475 } 4476 }
4476 eeprom->use_eerd = FALSE; 4477 eeprom->use_eerd = false;
4477 eeprom->use_eewr = FALSE; 4478 eeprom->use_eewr = false;
4478 break; 4479 break;
4479 case e1000_82541: 4480 case e1000_82541:
4480 case e1000_82541_rev_2: 4481 case e1000_82541_rev_2:
@@ -4503,8 +4504,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4503 eeprom->address_bits = 6; 4504 eeprom->address_bits = 6;
4504 } 4505 }
4505 } 4506 }
4506 eeprom->use_eerd = FALSE; 4507 eeprom->use_eerd = false;
4507 eeprom->use_eewr = FALSE; 4508 eeprom->use_eewr = false;
4508 break; 4509 break;
4509 case e1000_82571: 4510 case e1000_82571:
4510 case e1000_82572: 4511 case e1000_82572:
@@ -4518,8 +4519,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4518 eeprom->page_size = 8; 4519 eeprom->page_size = 8;
4519 eeprom->address_bits = 8; 4520 eeprom->address_bits = 8;
4520 } 4521 }
4521 eeprom->use_eerd = FALSE; 4522 eeprom->use_eerd = false;
4522 eeprom->use_eewr = FALSE; 4523 eeprom->use_eewr = false;
4523 break; 4524 break;
4524 case e1000_82573: 4525 case e1000_82573:
4525 eeprom->type = e1000_eeprom_spi; 4526 eeprom->type = e1000_eeprom_spi;
@@ -4532,9 +4533,9 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4532 eeprom->page_size = 8; 4533 eeprom->page_size = 8;
4533 eeprom->address_bits = 8; 4534 eeprom->address_bits = 8;
4534 } 4535 }
4535 eeprom->use_eerd = TRUE; 4536 eeprom->use_eerd = true;
4536 eeprom->use_eewr = TRUE; 4537 eeprom->use_eewr = true;
4537 if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) { 4538 if (!e1000_is_onboard_nvm_eeprom(hw)) {
4538 eeprom->type = e1000_eeprom_flash; 4539 eeprom->type = e1000_eeprom_flash;
4539 eeprom->word_size = 2048; 4540 eeprom->word_size = 2048;
4540 4541
@@ -4555,8 +4556,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4555 eeprom->page_size = 8; 4556 eeprom->page_size = 8;
4556 eeprom->address_bits = 8; 4557 eeprom->address_bits = 8;
4557 } 4558 }
4558 eeprom->use_eerd = TRUE; 4559 eeprom->use_eerd = true;
4559 eeprom->use_eewr = FALSE; 4560 eeprom->use_eewr = false;
4560 break; 4561 break;
4561 case e1000_ich8lan: 4562 case e1000_ich8lan:
4562 { 4563 {
@@ -4564,15 +4565,15 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4564 uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG); 4565 uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
4565 4566
4566 eeprom->type = e1000_eeprom_ich8; 4567 eeprom->type = e1000_eeprom_ich8;
4567 eeprom->use_eerd = FALSE; 4568 eeprom->use_eerd = false;
4568 eeprom->use_eewr = FALSE; 4569 eeprom->use_eewr = false;
4569 eeprom->word_size = E1000_SHADOW_RAM_WORDS; 4570 eeprom->word_size = E1000_SHADOW_RAM_WORDS;
4570 4571
4571 /* Zero the shadow RAM structure. But don't load it from NVM 4572 /* Zero the shadow RAM structure. But don't load it from NVM
4572 * so as to save time for driver init */ 4573 * so as to save time for driver init */
4573 if (hw->eeprom_shadow_ram != NULL) { 4574 if (hw->eeprom_shadow_ram != NULL) {
4574 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4575 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4575 hw->eeprom_shadow_ram[i].modified = FALSE; 4576 hw->eeprom_shadow_ram[i].modified = false;
4576 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; 4577 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
4577 } 4578 }
4578 } 4579 }
@@ -4994,15 +4995,14 @@ e1000_read_eeprom(struct e1000_hw *hw,
4994 * directly. In this case, we need to acquire the EEPROM so that 4995 * directly. In this case, we need to acquire the EEPROM so that
4995 * FW or other port software does not interrupt. 4996 * FW or other port software does not interrupt.
4996 */ 4997 */
4997 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && 4998 if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
4998 hw->eeprom.use_eerd == FALSE) {
4999 /* Prepare the EEPROM for bit-bang reading */ 4999 /* Prepare the EEPROM for bit-bang reading */
5000 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 5000 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
5001 return -E1000_ERR_EEPROM; 5001 return -E1000_ERR_EEPROM;
5002 } 5002 }
5003 5003
5004 /* Eerd register EEPROM access requires no eeprom aquire/release */ 5004 /* Eerd register EEPROM access requires no eeprom aquire/release */
5005 if (eeprom->use_eerd == TRUE) 5005 if (eeprom->use_eerd)
5006 return e1000_read_eeprom_eerd(hw, offset, words, data); 5006 return e1000_read_eeprom_eerd(hw, offset, words, data);
5007 5007
5008 /* ICH EEPROM access is done via the ICH flash controller */ 5008 /* ICH EEPROM access is done via the ICH flash controller */
@@ -5171,7 +5171,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
5171* 5171*
5172* hw - Struct containing variables accessed by shared code 5172* hw - Struct containing variables accessed by shared code
5173****************************************************************************/ 5173****************************************************************************/
5174static boolean_t 5174static bool
5175e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) 5175e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5176{ 5176{
5177 uint32_t eecd = 0; 5177 uint32_t eecd = 0;
@@ -5179,7 +5179,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5179 DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); 5179 DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
5180 5180
5181 if (hw->mac_type == e1000_ich8lan) 5181 if (hw->mac_type == e1000_ich8lan)
5182 return FALSE; 5182 return false;
5183 5183
5184 if (hw->mac_type == e1000_82573) { 5184 if (hw->mac_type == e1000_82573) {
5185 eecd = E1000_READ_REG(hw, EECD); 5185 eecd = E1000_READ_REG(hw, EECD);
@@ -5189,10 +5189,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5189 5189
5190 /* If both bits are set, device is Flash type */ 5190 /* If both bits are set, device is Flash type */
5191 if (eecd == 0x03) { 5191 if (eecd == 0x03) {
5192 return FALSE; 5192 return false;
5193 } 5193 }
5194 } 5194 }
5195 return TRUE; 5195 return true;
5196} 5196}
5197 5197
5198/****************************************************************************** 5198/******************************************************************************
@@ -5212,8 +5212,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5212 5212
5213 DEBUGFUNC("e1000_validate_eeprom_checksum"); 5213 DEBUGFUNC("e1000_validate_eeprom_checksum");
5214 5214
5215 if ((hw->mac_type == e1000_82573) && 5215 if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
5216 (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) {
5217 /* Check bit 4 of word 10h. If it is 0, firmware is done updating 5216 /* Check bit 4 of word 10h. If it is 0, firmware is done updating
5218 * 10h-12h. Checksum may need to be fixed. */ 5217 * 10h-12h. Checksum may need to be fixed. */
5219 e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); 5218 e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
@@ -5339,7 +5338,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
5339 } 5338 }
5340 5339
5341 /* 82573 writes only through eewr */ 5340 /* 82573 writes only through eewr */
5342 if (eeprom->use_eewr == TRUE) 5341 if (eeprom->use_eewr)
5343 return e1000_write_eeprom_eewr(hw, offset, words, data); 5342 return e1000_write_eeprom_eewr(hw, offset, words, data);
5344 5343
5345 if (eeprom->type == e1000_eeprom_ich8) 5344 if (eeprom->type == e1000_eeprom_ich8)
@@ -5536,7 +5535,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5536 uint32_t new_bank_offset = 0; 5535 uint32_t new_bank_offset = 0;
5537 uint8_t low_byte = 0; 5536 uint8_t low_byte = 0;
5538 uint8_t high_byte = 0; 5537 uint8_t high_byte = 0;
5539 boolean_t sector_write_failed = FALSE; 5538 bool sector_write_failed = false;
5540 5539
5541 if (hw->mac_type == e1000_82573) { 5540 if (hw->mac_type == e1000_82573) {
5542 /* The flop register will be used to determine if flash type is STM */ 5541 /* The flop register will be used to determine if flash type is STM */
@@ -5588,21 +5587,21 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5588 e1000_erase_ich8_4k_segment(hw, 0); 5587 e1000_erase_ich8_4k_segment(hw, 0);
5589 } 5588 }
5590 5589
5591 sector_write_failed = FALSE; 5590 sector_write_failed = false;
5592 /* Loop for every byte in the shadow RAM, 5591 /* Loop for every byte in the shadow RAM,
5593 * which is in units of words. */ 5592 * which is in units of words. */
5594 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 5593 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5595 /* Determine whether to write the value stored 5594 /* Determine whether to write the value stored
5596 * in the other NVM bank or a modified value stored 5595 * in the other NVM bank or a modified value stored
5597 * in the shadow RAM */ 5596 * in the shadow RAM */
5598 if (hw->eeprom_shadow_ram[i].modified == TRUE) { 5597 if (hw->eeprom_shadow_ram[i].modified) {
5599 low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word; 5598 low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word;
5600 udelay(100); 5599 udelay(100);
5601 error = e1000_verify_write_ich8_byte(hw, 5600 error = e1000_verify_write_ich8_byte(hw,
5602 (i << 1) + new_bank_offset, low_byte); 5601 (i << 1) + new_bank_offset, low_byte);
5603 5602
5604 if (error != E1000_SUCCESS) 5603 if (error != E1000_SUCCESS)
5605 sector_write_failed = TRUE; 5604 sector_write_failed = true;
5606 else { 5605 else {
5607 high_byte = 5606 high_byte =
5608 (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); 5607 (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
@@ -5616,7 +5615,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5616 (i << 1) + new_bank_offset, low_byte); 5615 (i << 1) + new_bank_offset, low_byte);
5617 5616
5618 if (error != E1000_SUCCESS) 5617 if (error != E1000_SUCCESS)
5619 sector_write_failed = TRUE; 5618 sector_write_failed = true;
5620 else { 5619 else {
5621 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, 5620 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
5622 &high_byte); 5621 &high_byte);
@@ -5624,10 +5623,10 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5624 } 5623 }
5625 } 5624 }
5626 5625
5627 /* If the write of the low byte was successful, go ahread and 5626 /* If the write of the low byte was successful, go ahead and
5628 * write the high byte while checking to make sure that if it 5627 * write the high byte while checking to make sure that if it
5629 * is the signature byte, then it is handled properly */ 5628 * is the signature byte, then it is handled properly */
5630 if (sector_write_failed == FALSE) { 5629 if (!sector_write_failed) {
5631 /* If the word is 0x13, then make sure the signature bits 5630 /* If the word is 0x13, then make sure the signature bits
5632 * (15:14) are 11b until the commit has completed. 5631 * (15:14) are 11b until the commit has completed.
5633 * This will allow us to write 10b which indicates the 5632 * This will allow us to write 10b which indicates the
@@ -5640,7 +5639,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5640 error = e1000_verify_write_ich8_byte(hw, 5639 error = e1000_verify_write_ich8_byte(hw,
5641 (i << 1) + new_bank_offset + 1, high_byte); 5640 (i << 1) + new_bank_offset + 1, high_byte);
5642 if (error != E1000_SUCCESS) 5641 if (error != E1000_SUCCESS)
5643 sector_write_failed = TRUE; 5642 sector_write_failed = true;
5644 5643
5645 } else { 5644 } else {
5646 /* If the write failed then break from the loop and 5645 /* If the write failed then break from the loop and
@@ -5651,7 +5650,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5651 5650
5652 /* Don't bother writing the segment valid bits if sector 5651 /* Don't bother writing the segment valid bits if sector
5653 * programming failed. */ 5652 * programming failed. */
5654 if (sector_write_failed == FALSE) { 5653 if (!sector_write_failed) {
5655 /* Finally validate the new segment by setting bit 15:14 5654 /* Finally validate the new segment by setting bit 15:14
5656 * to 10b in word 0x13 , this can be done without an 5655 * to 10b in word 0x13 , this can be done without an
5657 * erase as well since these bits are 11 to start with 5656 * erase as well since these bits are 11 to start with
@@ -5673,7 +5672,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5673 5672
5674 /* Clear the now not used entry in the cache */ 5673 /* Clear the now not used entry in the cache */
5675 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 5674 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5676 hw->eeprom_shadow_ram[i].modified = FALSE; 5675 hw->eeprom_shadow_ram[i].modified = false;
5677 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; 5676 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
5678 } 5677 }
5679 } 5678 }
@@ -5750,7 +5749,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5750 /* Reserve a spot for the Locally Administered Address to work around 5749 /* Reserve a spot for the Locally Administered Address to work around
5751 * an 82571 issue in which a reset on one port will reload the MAC on 5750 * an 82571 issue in which a reset on one port will reload the MAC on
5752 * the other port. */ 5751 * the other port. */
5753 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) 5752 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
5754 rar_num -= 1; 5753 rar_num -= 1;
5755 if (hw->mac_type == e1000_ich8lan) 5754 if (hw->mac_type == e1000_ich8lan)
5756 rar_num = E1000_RAR_ENTRIES_ICH8LAN; 5755 rar_num = E1000_RAR_ENTRIES_ICH8LAN;
@@ -5922,7 +5921,7 @@ e1000_rar_set(struct e1000_hw *hw,
5922 case e1000_82571: 5921 case e1000_82571:
5923 case e1000_82572: 5922 case e1000_82572:
5924 case e1000_80003es2lan: 5923 case e1000_80003es2lan:
5925 if (hw->leave_av_bit_off == TRUE) 5924 if (hw->leave_av_bit_off)
5926 break; 5925 break;
5927 default: 5926 default:
5928 /* Indicate to hardware the Address is Valid. */ 5927 /* Indicate to hardware the Address is Valid. */
@@ -6425,7 +6424,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
6425 * hw - Struct containing variables accessed by shared code 6424 * hw - Struct containing variables accessed by shared code
6426 * 6425 *
6427 * Call this after e1000_init_hw. You may override the IFS defaults by setting 6426 * Call this after e1000_init_hw. You may override the IFS defaults by setting
6428 * hw->ifs_params_forced to TRUE. However, you must initialize hw-> 6427 * hw->ifs_params_forced to true. However, you must initialize hw->
6429 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio 6428 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
6430 * before calling this function. 6429 * before calling this function.
6431 *****************************************************************************/ 6430 *****************************************************************************/
@@ -6442,7 +6441,7 @@ e1000_reset_adaptive(struct e1000_hw *hw)
6442 hw->ifs_step_size = IFS_STEP; 6441 hw->ifs_step_size = IFS_STEP;
6443 hw->ifs_ratio = IFS_RATIO; 6442 hw->ifs_ratio = IFS_RATIO;
6444 } 6443 }
6445 hw->in_ifs_mode = FALSE; 6444 hw->in_ifs_mode = false;
6446 E1000_WRITE_REG(hw, AIT, 0); 6445 E1000_WRITE_REG(hw, AIT, 0);
6447 } else { 6446 } else {
6448 DEBUGOUT("Not in Adaptive IFS mode!\n"); 6447 DEBUGOUT("Not in Adaptive IFS mode!\n");
@@ -6465,7 +6464,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
6465 if (hw->adaptive_ifs) { 6464 if (hw->adaptive_ifs) {
6466 if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { 6465 if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
6467 if (hw->tx_packet_delta > MIN_NUM_XMITS) { 6466 if (hw->tx_packet_delta > MIN_NUM_XMITS) {
6468 hw->in_ifs_mode = TRUE; 6467 hw->in_ifs_mode = true;
6469 if (hw->current_ifs_val < hw->ifs_max_val) { 6468 if (hw->current_ifs_val < hw->ifs_max_val) {
6470 if (hw->current_ifs_val == 0) 6469 if (hw->current_ifs_val == 0)
6471 hw->current_ifs_val = hw->ifs_min_val; 6470 hw->current_ifs_val = hw->ifs_min_val;
@@ -6477,7 +6476,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
6477 } else { 6476 } else {
6478 if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { 6477 if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
6479 hw->current_ifs_val = 0; 6478 hw->current_ifs_val = 0;
6480 hw->in_ifs_mode = FALSE; 6479 hw->in_ifs_mode = false;
6481 E1000_WRITE_REG(hw, AIT, 0); 6480 E1000_WRITE_REG(hw, AIT, 0);
6482 } 6481 }
6483 } 6482 }
@@ -6968,7 +6967,7 @@ e1000_check_downshift(struct e1000_hw *hw)
6968 M88E1000_PSSR_DOWNSHIFT_SHIFT; 6967 M88E1000_PSSR_DOWNSHIFT_SHIFT;
6969 } else if (hw->phy_type == e1000_phy_ife) { 6968 } else if (hw->phy_type == e1000_phy_ife) {
6970 /* e1000_phy_ife supports 10/100 speed only */ 6969 /* e1000_phy_ife supports 10/100 speed only */
6971 hw->speed_downgraded = FALSE; 6970 hw->speed_downgraded = false;
6972 } 6971 }
6973 6972
6974 return E1000_SUCCESS; 6973 return E1000_SUCCESS;
@@ -6988,7 +6987,7 @@ e1000_check_downshift(struct e1000_hw *hw)
6988 6987
6989static int32_t 6988static int32_t
6990e1000_config_dsp_after_link_change(struct e1000_hw *hw, 6989e1000_config_dsp_after_link_change(struct e1000_hw *hw,
6991 boolean_t link_up) 6990 bool link_up)
6992{ 6991{
6993 int32_t ret_val; 6992 int32_t ret_val;
6994 uint16_t phy_data, phy_saved_data, speed, duplex, i; 6993 uint16_t phy_data, phy_saved_data, speed, duplex, i;
@@ -7198,7 +7197,7 @@ e1000_set_phy_mode(struct e1000_hw *hw)
7198 if (ret_val) 7197 if (ret_val)
7199 return ret_val; 7198 return ret_val;
7200 7199
7201 hw->phy_reset_disable = FALSE; 7200 hw->phy_reset_disable = false;
7202 } 7201 }
7203 } 7202 }
7204 7203
@@ -7221,7 +7220,7 @@ e1000_set_phy_mode(struct e1000_hw *hw)
7221 7220
7222static int32_t 7221static int32_t
7223e1000_set_d3_lplu_state(struct e1000_hw *hw, 7222e1000_set_d3_lplu_state(struct e1000_hw *hw,
7224 boolean_t active) 7223 bool active)
7225{ 7224{
7226 uint32_t phy_ctrl = 0; 7225 uint32_t phy_ctrl = 0;
7227 int32_t ret_val; 7226 int32_t ret_val;
@@ -7351,7 +7350,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
7351 7350
7352static int32_t 7351static int32_t
7353e1000_set_d0_lplu_state(struct e1000_hw *hw, 7352e1000_set_d0_lplu_state(struct e1000_hw *hw,
7354 boolean_t active) 7353 bool active)
7355{ 7354{
7356 uint32_t phy_ctrl = 0; 7355 uint32_t phy_ctrl = 0;
7357 int32_t ret_val; 7356 int32_t ret_val;
@@ -7689,9 +7688,9 @@ e1000_mng_write_commit(struct e1000_hw * hw)
7689/***************************************************************************** 7688/*****************************************************************************
7690 * This function checks the mode of the firmware. 7689 * This function checks the mode of the firmware.
7691 * 7690 *
7692 * returns - TRUE when the mode is IAMT or FALSE. 7691 * returns - true when the mode is IAMT or false.
7693 ****************************************************************************/ 7692 ****************************************************************************/
7694boolean_t 7693bool
7695e1000_check_mng_mode(struct e1000_hw *hw) 7694e1000_check_mng_mode(struct e1000_hw *hw)
7696{ 7695{
7697 uint32_t fwsm; 7696 uint32_t fwsm;
@@ -7701,12 +7700,12 @@ e1000_check_mng_mode(struct e1000_hw *hw)
7701 if (hw->mac_type == e1000_ich8lan) { 7700 if (hw->mac_type == e1000_ich8lan) {
7702 if ((fwsm & E1000_FWSM_MODE_MASK) == 7701 if ((fwsm & E1000_FWSM_MODE_MASK) ==
7703 (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 7702 (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
7704 return TRUE; 7703 return true;
7705 } else if ((fwsm & E1000_FWSM_MODE_MASK) == 7704 } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
7706 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 7705 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
7707 return TRUE; 7706 return true;
7708 7707
7709 return FALSE; 7708 return false;
7710} 7709}
7711 7710
7712 7711
@@ -7763,15 +7762,15 @@ e1000_calculate_mng_checksum(char *buffer, uint32_t length)
7763/***************************************************************************** 7762/*****************************************************************************
7764 * This function checks whether tx pkt filtering needs to be enabled or not. 7763 * This function checks whether tx pkt filtering needs to be enabled or not.
7765 * 7764 *
7766 * returns - TRUE for packet filtering or FALSE. 7765 * returns - true for packet filtering or false.
7767 ****************************************************************************/ 7766 ****************************************************************************/
7768boolean_t 7767bool
7769e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) 7768e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7770{ 7769{
7771 /* called in init as well as watchdog timer functions */ 7770 /* called in init as well as watchdog timer functions */
7772 7771
7773 int32_t ret_val, checksum; 7772 int32_t ret_val, checksum;
7774 boolean_t tx_filter = FALSE; 7773 bool tx_filter = false;
7775 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); 7774 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
7776 uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); 7775 uint8_t *buffer = (uint8_t *) &(hw->mng_cookie);
7777 7776
@@ -7787,11 +7786,11 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7787 E1000_MNG_DHCP_COOKIE_LENGTH)) { 7786 E1000_MNG_DHCP_COOKIE_LENGTH)) {
7788 if (hdr->status & 7787 if (hdr->status &
7789 E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT) 7788 E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
7790 tx_filter = TRUE; 7789 tx_filter = true;
7791 } else 7790 } else
7792 tx_filter = TRUE; 7791 tx_filter = true;
7793 } else 7792 } else
7794 tx_filter = TRUE; 7793 tx_filter = true;
7795 } 7794 }
7796 } 7795 }
7797 7796
@@ -7804,7 +7803,7 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7804 * 7803 *
7805 * hw - Struct containing variables accessed by shared code 7804 * hw - Struct containing variables accessed by shared code
7806 * 7805 *
7807 * returns: - TRUE/FALSE 7806 * returns: - true/false
7808 * 7807 *
7809 *****************************************************************************/ 7808 *****************************************************************************/
7810uint32_t 7809uint32_t
@@ -7818,19 +7817,19 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw)
7818 7817
7819 if (!(manc & E1000_MANC_RCV_TCO_EN) || 7818 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
7820 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) 7819 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
7821 return FALSE; 7820 return false;
7822 if (e1000_arc_subsystem_valid(hw) == TRUE) { 7821 if (e1000_arc_subsystem_valid(hw)) {
7823 fwsm = E1000_READ_REG(hw, FWSM); 7822 fwsm = E1000_READ_REG(hw, FWSM);
7824 factps = E1000_READ_REG(hw, FACTPS); 7823 factps = E1000_READ_REG(hw, FACTPS);
7825 7824
7826 if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) == 7825 if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
7827 e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG)) 7826 e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
7828 return TRUE; 7827 return true;
7829 } else 7828 } else
7830 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) 7829 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
7831 return TRUE; 7830 return true;
7832 } 7831 }
7833 return FALSE; 7832 return false;
7834} 7833}
7835 7834
7836static int32_t 7835static int32_t
@@ -8264,14 +8263,14 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8264 case e1000_80003es2lan: 8263 case e1000_80003es2lan:
8265 fwsm = E1000_READ_REG(hw, FWSM); 8264 fwsm = E1000_READ_REG(hw, FWSM);
8266 if ((fwsm & E1000_FWSM_MODE_MASK) != 0) 8265 if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
8267 return TRUE; 8266 return true;
8268 break; 8267 break;
8269 case e1000_ich8lan: 8268 case e1000_ich8lan:
8270 return TRUE; 8269 return true;
8271 default: 8270 default:
8272 break; 8271 break;
8273 } 8272 }
8274 return FALSE; 8273 return false;
8275} 8274}
8276 8275
8277 8276
@@ -8417,7 +8416,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8417 8416
8418 for (i = 0; i < words; i++) { 8417 for (i = 0; i < words; i++) {
8419 if (hw->eeprom_shadow_ram != NULL && 8418 if (hw->eeprom_shadow_ram != NULL &&
8420 hw->eeprom_shadow_ram[offset+i].modified == TRUE) { 8419 hw->eeprom_shadow_ram[offset+i].modified) {
8421 data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word; 8420 data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
8422 } else { 8421 } else {
8423 /* The NVM part needs a byte offset, hence * 2 */ 8422 /* The NVM part needs a byte offset, hence * 2 */
@@ -8466,7 +8465,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8466 if (hw->eeprom_shadow_ram != NULL) { 8465 if (hw->eeprom_shadow_ram != NULL) {
8467 for (i = 0; i < words; i++) { 8466 for (i = 0; i < words; i++) {
8468 if ((offset + i) < E1000_SHADOW_RAM_WORDS) { 8467 if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
8469 hw->eeprom_shadow_ram[offset+i].modified = TRUE; 8468 hw->eeprom_shadow_ram[offset+i].modified = true;
8470 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i]; 8469 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
8471 } else { 8470 } else {
8472 error = -E1000_ERR_EEPROM; 8471 error = -E1000_ERR_EEPROM;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index a6c3c34feb98..572a7b6dc12e 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -100,8 +100,8 @@ typedef enum {
100} e1000_fc_type; 100} e1000_fc_type;
101 101
102struct e1000_shadow_ram { 102struct e1000_shadow_ram {
103 uint16_t eeprom_word; 103 uint16_t eeprom_word;
104 boolean_t modified; 104 bool modified;
105}; 105};
106 106
107/* PCI bus types */ 107/* PCI bus types */
@@ -274,8 +274,8 @@ struct e1000_eeprom_info {
274 uint16_t address_bits; 274 uint16_t address_bits;
275 uint16_t delay_usec; 275 uint16_t delay_usec;
276 uint16_t page_size; 276 uint16_t page_size;
277 boolean_t use_eerd; 277 bool use_eerd;
278 boolean_t use_eewr; 278 bool use_eewr;
279}; 279};
280 280
281/* Flex ASF Information */ 281/* Flex ASF Information */
@@ -391,8 +391,8 @@ struct e1000_host_mng_dhcp_cookie{
391 391
392int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, 392int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
393 uint16_t length); 393 uint16_t length);
394boolean_t e1000_check_mng_mode(struct e1000_hw *hw); 394bool e1000_check_mng_mode(struct e1000_hw *hw);
395boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 395bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
396int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 396int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
397int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 397int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
398int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 398int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
@@ -1420,7 +1420,7 @@ struct e1000_hw {
1420 uint32_t ledctl_default; 1420 uint32_t ledctl_default;
1421 uint32_t ledctl_mode1; 1421 uint32_t ledctl_mode1;
1422 uint32_t ledctl_mode2; 1422 uint32_t ledctl_mode2;
1423 boolean_t tx_pkt_filtering; 1423 bool tx_pkt_filtering;
1424 struct e1000_host_mng_dhcp_cookie mng_cookie; 1424 struct e1000_host_mng_dhcp_cookie mng_cookie;
1425 uint16_t phy_spd_default; 1425 uint16_t phy_spd_default;
1426 uint16_t autoneg_advertised; 1426 uint16_t autoneg_advertised;
@@ -1445,30 +1445,30 @@ struct e1000_hw {
1445 uint8_t dma_fairness; 1445 uint8_t dma_fairness;
1446 uint8_t mac_addr[NODE_ADDRESS_SIZE]; 1446 uint8_t mac_addr[NODE_ADDRESS_SIZE];
1447 uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; 1447 uint8_t perm_mac_addr[NODE_ADDRESS_SIZE];
1448 boolean_t disable_polarity_correction; 1448 bool disable_polarity_correction;
1449 boolean_t speed_downgraded; 1449 bool speed_downgraded;
1450 e1000_smart_speed smart_speed; 1450 e1000_smart_speed smart_speed;
1451 e1000_dsp_config dsp_config_state; 1451 e1000_dsp_config dsp_config_state;
1452 boolean_t get_link_status; 1452 bool get_link_status;
1453 boolean_t serdes_link_down; 1453 bool serdes_link_down;
1454 boolean_t tbi_compatibility_en; 1454 bool tbi_compatibility_en;
1455 boolean_t tbi_compatibility_on; 1455 bool tbi_compatibility_on;
1456 boolean_t laa_is_present; 1456 bool laa_is_present;
1457 boolean_t phy_reset_disable; 1457 bool phy_reset_disable;
1458 boolean_t initialize_hw_bits_disable; 1458 bool initialize_hw_bits_disable;
1459 boolean_t fc_send_xon; 1459 bool fc_send_xon;
1460 boolean_t fc_strict_ieee; 1460 bool fc_strict_ieee;
1461 boolean_t report_tx_early; 1461 bool report_tx_early;
1462 boolean_t adaptive_ifs; 1462 bool adaptive_ifs;
1463 boolean_t ifs_params_forced; 1463 bool ifs_params_forced;
1464 boolean_t in_ifs_mode; 1464 bool in_ifs_mode;
1465 boolean_t mng_reg_access_disabled; 1465 bool mng_reg_access_disabled;
1466 boolean_t leave_av_bit_off; 1466 bool leave_av_bit_off;
1467 boolean_t kmrn_lock_loss_workaround_disabled; 1467 bool kmrn_lock_loss_workaround_disabled;
1468 boolean_t bad_tx_carr_stats_fd; 1468 bool bad_tx_carr_stats_fd;
1469 boolean_t has_manc2h; 1469 bool has_manc2h;
1470 boolean_t rx_needs_kicking; 1470 bool rx_needs_kicking;
1471 boolean_t has_smbus; 1471 bool has_smbus;
1472}; 1472};
1473 1473
1474 1474
@@ -2518,11 +2518,11 @@ struct e1000_host_command_info {
2518 * Typical use: 2518 * Typical use:
2519 * ... 2519 * ...
2520 * if (TBI_ACCEPT) { 2520 * if (TBI_ACCEPT) {
2521 * accept_frame = TRUE; 2521 * accept_frame = true;
2522 * e1000_tbi_adjust_stats(adapter, MacAddress); 2522 * e1000_tbi_adjust_stats(adapter, MacAddress);
2523 * frame_length--; 2523 * frame_length--;
2524 * } else { 2524 * } else {
2525 * accept_frame = FALSE; 2525 * accept_frame = false;
2526 * } 2526 * }
2527 * ... 2527 * ...
2528 */ 2528 */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 0991648c53dc..757d02f443a5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -169,21 +169,21 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
169static int e1000_set_mac(struct net_device *netdev, void *p); 169static int e1000_set_mac(struct net_device *netdev, void *p);
170static irqreturn_t e1000_intr(int irq, void *data); 170static irqreturn_t e1000_intr(int irq, void *data);
171static irqreturn_t e1000_intr_msi(int irq, void *data); 171static irqreturn_t e1000_intr_msi(int irq, void *data);
172static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, 172static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
173 struct e1000_tx_ring *tx_ring); 173 struct e1000_tx_ring *tx_ring);
174#ifdef CONFIG_E1000_NAPI 174#ifdef CONFIG_E1000_NAPI
175static int e1000_clean(struct napi_struct *napi, int budget); 175static int e1000_clean(struct napi_struct *napi, int budget);
176static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 176static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
177 struct e1000_rx_ring *rx_ring, 177 struct e1000_rx_ring *rx_ring,
178 int *work_done, int work_to_do); 178 int *work_done, int work_to_do);
179static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 179static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
180 struct e1000_rx_ring *rx_ring, 180 struct e1000_rx_ring *rx_ring,
181 int *work_done, int work_to_do); 181 int *work_done, int work_to_do);
182#else 182#else
183static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 183static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
184 struct e1000_rx_ring *rx_ring); 184 struct e1000_rx_ring *rx_ring);
185static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 185static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
186 struct e1000_rx_ring *rx_ring); 186 struct e1000_rx_ring *rx_ring);
187#endif 187#endif
188static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 188static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
189 struct e1000_rx_ring *rx_ring, 189 struct e1000_rx_ring *rx_ring,
@@ -347,7 +347,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
347static void 347static void
348e1000_irq_disable(struct e1000_adapter *adapter) 348e1000_irq_disable(struct e1000_adapter *adapter)
349{ 349{
350 atomic_inc(&adapter->irq_sem);
351 E1000_WRITE_REG(&adapter->hw, IMC, ~0); 350 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
352 E1000_WRITE_FLUSH(&adapter->hw); 351 E1000_WRITE_FLUSH(&adapter->hw);
353 synchronize_irq(adapter->pdev->irq); 352 synchronize_irq(adapter->pdev->irq);
@@ -361,10 +360,8 @@ e1000_irq_disable(struct e1000_adapter *adapter)
361static void 360static void
362e1000_irq_enable(struct e1000_adapter *adapter) 361e1000_irq_enable(struct e1000_adapter *adapter)
363{ 362{
364 if (likely(atomic_dec_and_test(&adapter->irq_sem))) { 363 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
365 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 364 E1000_WRITE_FLUSH(&adapter->hw);
366 E1000_WRITE_FLUSH(&adapter->hw);
367 }
368} 365}
369 366
370static void 367static void
@@ -584,7 +581,7 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
584static void e1000_power_down_phy(struct e1000_adapter *adapter) 581static void e1000_power_down_phy(struct e1000_adapter *adapter)
585{ 582{
586 /* Power down the PHY so no link is implied when interface is down * 583 /* Power down the PHY so no link is implied when interface is down *
587 * The PHY cannot be powered down if any of the following is TRUE * 584 * The PHY cannot be powered down if any of the following is true *
588 * (a) WoL is enabled 585 * (a) WoL is enabled
589 * (b) AMT is active 586 * (b) AMT is active
590 * (c) SoL/IDER session is active */ 587 * (c) SoL/IDER session is active */
@@ -638,7 +635,6 @@ e1000_down(struct e1000_adapter *adapter)
638 635
639#ifdef CONFIG_E1000_NAPI 636#ifdef CONFIG_E1000_NAPI
640 napi_disable(&adapter->napi); 637 napi_disable(&adapter->napi);
641 atomic_set(&adapter->irq_sem, 0);
642#endif 638#endif
643 e1000_irq_disable(adapter); 639 e1000_irq_disable(adapter);
644 640
@@ -673,7 +669,7 @@ e1000_reset(struct e1000_adapter *adapter)
673{ 669{
674 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; 670 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space;
675 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 671 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
676 boolean_t legacy_pba_adjust = FALSE; 672 bool legacy_pba_adjust = false;
677 673
678 /* Repartition Pba for greater than 9k mtu 674 /* Repartition Pba for greater than 9k mtu
679 * To take effect CTRL.RST is required. 675 * To take effect CTRL.RST is required.
@@ -687,7 +683,7 @@ e1000_reset(struct e1000_adapter *adapter)
687 case e1000_82540: 683 case e1000_82540:
688 case e1000_82541: 684 case e1000_82541:
689 case e1000_82541_rev_2: 685 case e1000_82541_rev_2:
690 legacy_pba_adjust = TRUE; 686 legacy_pba_adjust = true;
691 pba = E1000_PBA_48K; 687 pba = E1000_PBA_48K;
692 break; 688 break;
693 case e1000_82545: 689 case e1000_82545:
@@ -698,7 +694,7 @@ e1000_reset(struct e1000_adapter *adapter)
698 break; 694 break;
699 case e1000_82547: 695 case e1000_82547:
700 case e1000_82547_rev_2: 696 case e1000_82547_rev_2:
701 legacy_pba_adjust = TRUE; 697 legacy_pba_adjust = true;
702 pba = E1000_PBA_30K; 698 pba = E1000_PBA_30K;
703 break; 699 break;
704 case e1000_82571: 700 case e1000_82571:
@@ -716,7 +712,7 @@ e1000_reset(struct e1000_adapter *adapter)
716 break; 712 break;
717 } 713 }
718 714
719 if (legacy_pba_adjust == TRUE) { 715 if (legacy_pba_adjust) {
720 if (adapter->netdev->mtu > E1000_RXBUFFER_8192) 716 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
721 pba -= 8; /* allocate more FIFO for Tx */ 717 pba -= 8; /* allocate more FIFO for Tx */
722 718
@@ -1366,15 +1362,15 @@ e1000_sw_init(struct e1000_adapter *adapter)
1366 1362
1367 e1000_set_media_type(hw); 1363 e1000_set_media_type(hw);
1368 1364
1369 hw->wait_autoneg_complete = FALSE; 1365 hw->wait_autoneg_complete = false;
1370 hw->tbi_compatibility_en = TRUE; 1366 hw->tbi_compatibility_en = true;
1371 hw->adaptive_ifs = TRUE; 1367 hw->adaptive_ifs = true;
1372 1368
1373 /* Copper options */ 1369 /* Copper options */
1374 1370
1375 if (hw->media_type == e1000_media_type_copper) { 1371 if (hw->media_type == e1000_media_type_copper) {
1376 hw->mdix = AUTO_ALL_MODES; 1372 hw->mdix = AUTO_ALL_MODES;
1377 hw->disable_polarity_correction = FALSE; 1373 hw->disable_polarity_correction = false;
1378 hw->master_slave = E1000_MASTER_SLAVE; 1374 hw->master_slave = E1000_MASTER_SLAVE;
1379 } 1375 }
1380 1376
@@ -1396,7 +1392,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
1396#endif 1392#endif
1397 1393
1398 /* Explicitly disable IRQ since the NIC can be in any state. */ 1394 /* Explicitly disable IRQ since the NIC can be in any state. */
1399 atomic_set(&adapter->irq_sem, 0);
1400 e1000_irq_disable(adapter); 1395 e1000_irq_disable(adapter);
1401 1396
1402 spin_lock_init(&adapter->stats_lock); 1397 spin_lock_init(&adapter->stats_lock);
@@ -1576,7 +1571,7 @@ e1000_close(struct net_device *netdev)
1576 * @start: address of beginning of memory 1571 * @start: address of beginning of memory
1577 * @len: length of memory 1572 * @len: length of memory
1578 **/ 1573 **/
1579static boolean_t 1574static bool
1580e1000_check_64k_bound(struct e1000_adapter *adapter, 1575e1000_check_64k_bound(struct e1000_adapter *adapter,
1581 void *start, unsigned long len) 1576 void *start, unsigned long len)
1582{ 1577{
@@ -1587,10 +1582,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
1587 * write location to cross 64k boundary due to errata 23 */ 1582 * write location to cross 64k boundary due to errata 23 */
1588 if (adapter->hw.mac_type == e1000_82545 || 1583 if (adapter->hw.mac_type == e1000_82545 ||
1589 adapter->hw.mac_type == e1000_82546) { 1584 adapter->hw.mac_type == e1000_82546) {
1590 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; 1585 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1591 } 1586 }
1592 1587
1593 return TRUE; 1588 return true;
1594} 1589}
1595 1590
1596/** 1591/**
@@ -2133,7 +2128,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
2133 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2128 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2134 if (hw->mac_type >= e1000_82543) { 2129 if (hw->mac_type >= e1000_82543) {
2135 rxcsum = E1000_READ_REG(hw, RXCSUM); 2130 rxcsum = E1000_READ_REG(hw, RXCSUM);
2136 if (adapter->rx_csum == TRUE) { 2131 if (adapter->rx_csum) {
2137 rxcsum |= E1000_RXCSUM_TUOFL; 2132 rxcsum |= E1000_RXCSUM_TUOFL;
2138 2133
2139 /* Enable 82571 IPv4 payload checksum for UDP fragments 2134 /* Enable 82571 IPv4 payload checksum for UDP fragments
@@ -2669,7 +2664,7 @@ e1000_watchdog(unsigned long data)
2669 if (link) { 2664 if (link) {
2670 if (!netif_carrier_ok(netdev)) { 2665 if (!netif_carrier_ok(netdev)) {
2671 uint32_t ctrl; 2666 uint32_t ctrl;
2672 boolean_t txb2b = 1; 2667 bool txb2b = true;
2673 e1000_get_speed_and_duplex(&adapter->hw, 2668 e1000_get_speed_and_duplex(&adapter->hw,
2674 &adapter->link_speed, 2669 &adapter->link_speed,
2675 &adapter->link_duplex); 2670 &adapter->link_duplex);
@@ -2691,12 +2686,12 @@ e1000_watchdog(unsigned long data)
2691 adapter->tx_timeout_factor = 1; 2686 adapter->tx_timeout_factor = 1;
2692 switch (adapter->link_speed) { 2687 switch (adapter->link_speed) {
2693 case SPEED_10: 2688 case SPEED_10:
2694 txb2b = 0; 2689 txb2b = false;
2695 netdev->tx_queue_len = 10; 2690 netdev->tx_queue_len = 10;
2696 adapter->tx_timeout_factor = 8; 2691 adapter->tx_timeout_factor = 8;
2697 break; 2692 break;
2698 case SPEED_100: 2693 case SPEED_100:
2699 txb2b = 0; 2694 txb2b = false;
2700 netdev->tx_queue_len = 100; 2695 netdev->tx_queue_len = 100;
2701 /* maybe add some timeout factor ? */ 2696 /* maybe add some timeout factor ? */
2702 break; 2697 break;
@@ -2704,7 +2699,7 @@ e1000_watchdog(unsigned long data)
2704 2699
2705 if ((adapter->hw.mac_type == e1000_82571 || 2700 if ((adapter->hw.mac_type == e1000_82571 ||
2706 adapter->hw.mac_type == e1000_82572) && 2701 adapter->hw.mac_type == e1000_82572) &&
2707 txb2b == 0) { 2702 !txb2b) {
2708 uint32_t tarc0; 2703 uint32_t tarc0;
2709 tarc0 = E1000_READ_REG(&adapter->hw, TARC0); 2704 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2710 tarc0 &= ~(1 << 21); 2705 tarc0 &= ~(1 << 21);
@@ -2802,7 +2797,7 @@ e1000_watchdog(unsigned long data)
2802 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 2797 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2803 2798
2804 /* Force detection of hung controller every watchdog period */ 2799 /* Force detection of hung controller every watchdog period */
2805 adapter->detect_tx_hung = TRUE; 2800 adapter->detect_tx_hung = true;
2806 2801
2807 /* With 82571 controllers, LAA may be overwritten due to controller 2802 /* With 82571 controllers, LAA may be overwritten due to controller
2808 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2803 * reset from the other port. Set the appropriate LAA in RAR[0] */
@@ -3025,12 +3020,12 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3025 if (++i == tx_ring->count) i = 0; 3020 if (++i == tx_ring->count) i = 0;
3026 tx_ring->next_to_use = i; 3021 tx_ring->next_to_use = i;
3027 3022
3028 return TRUE; 3023 return true;
3029 } 3024 }
3030 return FALSE; 3025 return false;
3031} 3026}
3032 3027
3033static boolean_t 3028static bool
3034e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 3029e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3035 struct sk_buff *skb) 3030 struct sk_buff *skb)
3036{ 3031{
@@ -3060,10 +3055,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3060 if (unlikely(++i == tx_ring->count)) i = 0; 3055 if (unlikely(++i == tx_ring->count)) i = 0;
3061 tx_ring->next_to_use = i; 3056 tx_ring->next_to_use = i;
3062 3057
3063 return TRUE; 3058 return true;
3064 } 3059 }
3065 3060
3066 return FALSE; 3061 return false;
3067} 3062}
3068 3063
3069#define E1000_MAX_TXD_PWR 12 3064#define E1000_MAX_TXD_PWR 12
@@ -3836,11 +3831,8 @@ e1000_intr_msi(int irq, void *data)
3836#endif 3831#endif
3837 uint32_t icr = E1000_READ_REG(hw, ICR); 3832 uint32_t icr = E1000_READ_REG(hw, ICR);
3838 3833
3839#ifdef CONFIG_E1000_NAPI 3834 /* in NAPI mode read ICR disables interrupts using IAM */
3840 /* read ICR disables interrupts using IAM, so keep up with our 3835
3841 * enable/disable accounting */
3842 atomic_inc(&adapter->irq_sem);
3843#endif
3844 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3836 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3845 hw->get_link_status = 1; 3837 hw->get_link_status = 1;
3846 /* 80003ES2LAN workaround-- For packet buffer work-around on 3838 /* 80003ES2LAN workaround-- For packet buffer work-around on
@@ -3910,12 +3902,8 @@ e1000_intr(int irq, void *data)
3910 !(icr & E1000_ICR_INT_ASSERTED))) 3902 !(icr & E1000_ICR_INT_ASSERTED)))
3911 return IRQ_NONE; 3903 return IRQ_NONE;
3912 3904
3913 /* Interrupt Auto-Mask...upon reading ICR, 3905 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3914 * interrupts are masked. No need for the 3906 * need for the IMC write */
3915 * IMC write, but it does mean we should
3916 * account for it ASAP. */
3917 if (likely(hw->mac_type >= e1000_82571))
3918 atomic_inc(&adapter->irq_sem);
3919#endif 3907#endif
3920 3908
3921 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3909 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
@@ -3939,7 +3927,6 @@ e1000_intr(int irq, void *data)
3939#ifdef CONFIG_E1000_NAPI 3927#ifdef CONFIG_E1000_NAPI
3940 if (unlikely(hw->mac_type < e1000_82571)) { 3928 if (unlikely(hw->mac_type < e1000_82571)) {
3941 /* disable interrupts, without the synchronize_irq bit */ 3929 /* disable interrupts, without the synchronize_irq bit */
3942 atomic_inc(&adapter->irq_sem);
3943 E1000_WRITE_REG(hw, IMC, ~0); 3930 E1000_WRITE_REG(hw, IMC, ~0);
3944 E1000_WRITE_FLUSH(hw); 3931 E1000_WRITE_FLUSH(hw);
3945 } 3932 }
@@ -3964,10 +3951,8 @@ e1000_intr(int irq, void *data)
3964 * in dead lock. Writing IMC forces 82547 into 3951 * in dead lock. Writing IMC forces 82547 into
3965 * de-assertion state. 3952 * de-assertion state.
3966 */ 3953 */
3967 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) { 3954 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3968 atomic_inc(&adapter->irq_sem);
3969 E1000_WRITE_REG(hw, IMC, ~0); 3955 E1000_WRITE_REG(hw, IMC, ~0);
3970 }
3971 3956
3972 adapter->total_tx_bytes = 0; 3957 adapter->total_tx_bytes = 0;
3973 adapter->total_rx_bytes = 0; 3958 adapter->total_rx_bytes = 0;
@@ -4038,7 +4023,7 @@ e1000_clean(struct napi_struct *napi, int budget)
4038 * @adapter: board private structure 4023 * @adapter: board private structure
4039 **/ 4024 **/
4040 4025
4041static boolean_t 4026static bool
4042e1000_clean_tx_irq(struct e1000_adapter *adapter, 4027e1000_clean_tx_irq(struct e1000_adapter *adapter,
4043 struct e1000_tx_ring *tx_ring) 4028 struct e1000_tx_ring *tx_ring)
4044{ 4029{
@@ -4049,7 +4034,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4049#ifdef CONFIG_E1000_NAPI 4034#ifdef CONFIG_E1000_NAPI
4050 unsigned int count = 0; 4035 unsigned int count = 0;
4051#endif 4036#endif
4052 boolean_t cleaned = FALSE; 4037 bool cleaned = false;
4053 unsigned int total_tx_bytes=0, total_tx_packets=0; 4038 unsigned int total_tx_bytes=0, total_tx_packets=0;
4054 4039
4055 i = tx_ring->next_to_clean; 4040 i = tx_ring->next_to_clean;
@@ -4057,7 +4042,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4057 eop_desc = E1000_TX_DESC(*tx_ring, eop); 4042 eop_desc = E1000_TX_DESC(*tx_ring, eop);
4058 4043
4059 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 4044 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
4060 for (cleaned = FALSE; !cleaned; ) { 4045 for (cleaned = false; !cleaned; ) {
4061 tx_desc = E1000_TX_DESC(*tx_ring, i); 4046 tx_desc = E1000_TX_DESC(*tx_ring, i);
4062 buffer_info = &tx_ring->buffer_info[i]; 4047 buffer_info = &tx_ring->buffer_info[i];
4063 cleaned = (i == eop); 4048 cleaned = (i == eop);
@@ -4105,7 +4090,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4105 if (adapter->detect_tx_hung) { 4090 if (adapter->detect_tx_hung) {
4106 /* Detect a transmit hang in hardware, this serializes the 4091 /* Detect a transmit hang in hardware, this serializes the
4107 * check with the clearing of time_stamp and movement of i */ 4092 * check with the clearing of time_stamp and movement of i */
4108 adapter->detect_tx_hung = FALSE; 4093 adapter->detect_tx_hung = false;
4109 if (tx_ring->buffer_info[eop].dma && 4094 if (tx_ring->buffer_info[eop].dma &&
4110 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 4095 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
4111 (adapter->tx_timeout_factor * HZ)) 4096 (adapter->tx_timeout_factor * HZ))
@@ -4200,7 +4185,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
4200 * @adapter: board private structure 4185 * @adapter: board private structure
4201 **/ 4186 **/
4202 4187
4203static boolean_t 4188static bool
4204#ifdef CONFIG_E1000_NAPI 4189#ifdef CONFIG_E1000_NAPI
4205e1000_clean_rx_irq(struct e1000_adapter *adapter, 4190e1000_clean_rx_irq(struct e1000_adapter *adapter,
4206 struct e1000_rx_ring *rx_ring, 4191 struct e1000_rx_ring *rx_ring,
@@ -4219,7 +4204,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4219 uint8_t last_byte; 4204 uint8_t last_byte;
4220 unsigned int i; 4205 unsigned int i;
4221 int cleaned_count = 0; 4206 int cleaned_count = 0;
4222 boolean_t cleaned = FALSE; 4207 bool cleaned = false;
4223 unsigned int total_rx_bytes=0, total_rx_packets=0; 4208 unsigned int total_rx_bytes=0, total_rx_packets=0;
4224 4209
4225 i = rx_ring->next_to_clean; 4210 i = rx_ring->next_to_clean;
@@ -4247,7 +4232,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4247 4232
4248 next_buffer = &rx_ring->buffer_info[i]; 4233 next_buffer = &rx_ring->buffer_info[i];
4249 4234
4250 cleaned = TRUE; 4235 cleaned = true;
4251 cleaned_count++; 4236 cleaned_count++;
4252 pci_unmap_single(pdev, 4237 pci_unmap_single(pdev,
4253 buffer_info->dma, 4238 buffer_info->dma,
@@ -4373,7 +4358,7 @@ next_desc:
4373 * @adapter: board private structure 4358 * @adapter: board private structure
4374 **/ 4359 **/
4375 4360
4376static boolean_t 4361static bool
4377#ifdef CONFIG_E1000_NAPI 4362#ifdef CONFIG_E1000_NAPI
4378e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 4363e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4379 struct e1000_rx_ring *rx_ring, 4364 struct e1000_rx_ring *rx_ring,
@@ -4393,7 +4378,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4393 unsigned int i, j; 4378 unsigned int i, j;
4394 uint32_t length, staterr; 4379 uint32_t length, staterr;
4395 int cleaned_count = 0; 4380 int cleaned_count = 0;
4396 boolean_t cleaned = FALSE; 4381 bool cleaned = false;
4397 unsigned int total_rx_bytes=0, total_rx_packets=0; 4382 unsigned int total_rx_bytes=0, total_rx_packets=0;
4398 4383
4399 i = rx_ring->next_to_clean; 4384 i = rx_ring->next_to_clean;
@@ -4420,7 +4405,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4420 4405
4421 next_buffer = &rx_ring->buffer_info[i]; 4406 next_buffer = &rx_ring->buffer_info[i];
4422 4407
4423 cleaned = TRUE; 4408 cleaned = true;
4424 cleaned_count++; 4409 cleaned_count++;
4425 pci_unmap_single(pdev, buffer_info->dma, 4410 pci_unmap_single(pdev, buffer_info->dma,
4426 buffer_info->length, 4411 buffer_info->length,
@@ -5001,7 +4986,8 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5001 struct e1000_adapter *adapter = netdev_priv(netdev); 4986 struct e1000_adapter *adapter = netdev_priv(netdev);
5002 uint32_t ctrl, rctl; 4987 uint32_t ctrl, rctl;
5003 4988
5004 e1000_irq_disable(adapter); 4989 if (!test_bit(__E1000_DOWN, &adapter->flags))
4990 e1000_irq_disable(adapter);
5005 adapter->vlgrp = grp; 4991 adapter->vlgrp = grp;
5006 4992
5007 if (grp) { 4993 if (grp) {
@@ -5038,7 +5024,8 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5038 } 5024 }
5039 } 5025 }
5040 5026
5041 e1000_irq_enable(adapter); 5027 if (!test_bit(__E1000_DOWN, &adapter->flags))
5028 e1000_irq_enable(adapter);
5042} 5029}
5043 5030
5044static void 5031static void
@@ -5064,9 +5051,11 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
5064 struct e1000_adapter *adapter = netdev_priv(netdev); 5051 struct e1000_adapter *adapter = netdev_priv(netdev);
5065 uint32_t vfta, index; 5052 uint32_t vfta, index;
5066 5053
5067 e1000_irq_disable(adapter); 5054 if (!test_bit(__E1000_DOWN, &adapter->flags))
5055 e1000_irq_disable(adapter);
5068 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5056 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5069 e1000_irq_enable(adapter); 5057 if (!test_bit(__E1000_DOWN, &adapter->flags))
5058 e1000_irq_enable(adapter);
5070 5059
5071 if ((adapter->hw.mng_cookie.status & 5060 if ((adapter->hw.mng_cookie.status &
5072 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 5061 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 10af742d8a20..365626d3177e 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,13 +41,6 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43 43
44typedef enum {
45#undef FALSE
46 FALSE = 0,
47#undef TRUE
48 TRUE = 1
49} boolean_t;
50
51#ifdef DBG 44#ifdef DBG
52#define DEBUGOUT(S) printk(KERN_DEBUG S "\n") 45#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
53#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A) 46#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 327c0620da31..4bf0c6c045c0 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -167,9 +167,6 @@ struct e1000_adapter {
167 167
168 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ 168 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
169 169
170 /* this is still needed for 82571 and above */
171 atomic_t irq_sem;
172
173 /* track device up/down/testing state */ 170 /* track device up/down/testing state */
174 unsigned long state; 171 unsigned long state;
175 172
@@ -462,7 +459,6 @@ extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
462extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 459extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
463extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); 460extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
464extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); 461extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
465extern s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
466extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 462extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
467extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); 463extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
468extern void e1000e_release_nvm(struct e1000_hw *hw); 464extern void e1000e_release_nvm(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 95f75a43c9f9..073934c7f73a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1852,62 +1852,6 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1852} 1852}
1853 1853
1854/** 1854/**
1855 * e1000e_read_nvm_spi - Reads EEPROM using SPI
1856 * @hw: pointer to the HW structure
1857 * @offset: offset of word in the EEPROM to read
1858 * @words: number of words to read
1859 * @data: word read from the EEPROM
1860 *
1861 * Reads a 16 bit word from the EEPROM.
1862 **/
1863s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1864{
1865 struct e1000_nvm_info *nvm = &hw->nvm;
1866 u32 i = 0;
1867 s32 ret_val;
1868 u16 word_in;
1869 u8 read_opcode = NVM_READ_OPCODE_SPI;
1870
1871 /* A check for invalid values: offset too large, too many words,
1872 * and not enough words. */
1873 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1874 (words == 0)) {
1875 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1876 return -E1000_ERR_NVM;
1877 }
1878
1879 ret_val = nvm->ops.acquire_nvm(hw);
1880 if (ret_val)
1881 return ret_val;
1882
1883 ret_val = e1000_ready_nvm_eeprom(hw);
1884 if (ret_val) {
1885 nvm->ops.release_nvm(hw);
1886 return ret_val;
1887 }
1888
1889 e1000_standby_nvm(hw);
1890
1891 if ((nvm->address_bits == 8) && (offset >= 128))
1892 read_opcode |= NVM_A8_OPCODE_SPI;
1893
1894 /* Send the READ command (opcode + addr) */
1895 e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
1896 e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
1897
1898 /* Read the data. SPI NVMs increment the address with each byte
1899 * read and will roll over if reading beyond the end. This allows
1900 * us to read the whole NVM from any offset */
1901 for (i = 0; i < words; i++) {
1902 word_in = e1000_shift_in_eec_bits(hw, 16);
1903 data[i] = (word_in >> 8) | (word_in << 8);
1904 }
1905
1906 nvm->ops.release_nvm(hw);
1907 return 0;
1908}
1909
1910/**
1911 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register 1855 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1912 * @hw: pointer to the HW structure 1856 * @hw: pointer to the HW structure
1913 * @offset: offset of word in the EEPROM to read 1857 * @offset: offset of word in the EEPROM to read
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fc5c63f4f578..f501dd5e7b16 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -836,9 +836,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
836 struct e1000_hw *hw = &adapter->hw; 836 struct e1000_hw *hw = &adapter->hw;
837 u32 icr = er32(ICR); 837 u32 icr = er32(ICR);
838 838
839 /* read ICR disables interrupts using IAM, so keep up with our 839 /* read ICR disables interrupts using IAM */
840 * enable/disable accounting */
841 atomic_inc(&adapter->irq_sem);
842 840
843 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 841 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
844 hw->mac.get_link_status = 1; 842 hw->mac.get_link_status = 1;
@@ -868,8 +866,6 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
868 adapter->total_rx_bytes = 0; 866 adapter->total_rx_bytes = 0;
869 adapter->total_rx_packets = 0; 867 adapter->total_rx_packets = 0;
870 __netif_rx_schedule(netdev, &adapter->napi); 868 __netif_rx_schedule(netdev, &adapter->napi);
871 } else {
872 atomic_dec(&adapter->irq_sem);
873 } 869 }
874 870
875 return IRQ_HANDLED; 871 return IRQ_HANDLED;
@@ -895,11 +891,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
895 if (!(icr & E1000_ICR_INT_ASSERTED)) 891 if (!(icr & E1000_ICR_INT_ASSERTED))
896 return IRQ_NONE; 892 return IRQ_NONE;
897 893
898 /* Interrupt Auto-Mask...upon reading ICR, 894 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
899 * interrupts are masked. No need for the 895 * need for the IMC write */
900 * IMC write, but it does mean we should
901 * account for it ASAP. */
902 atomic_inc(&adapter->irq_sem);
903 896
904 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 897 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
905 hw->mac.get_link_status = 1; 898 hw->mac.get_link_status = 1;
@@ -931,8 +924,6 @@ static irqreturn_t e1000_intr(int irq, void *data)
931 adapter->total_rx_bytes = 0; 924 adapter->total_rx_bytes = 0;
932 adapter->total_rx_packets = 0; 925 adapter->total_rx_packets = 0;
933 __netif_rx_schedule(netdev, &adapter->napi); 926 __netif_rx_schedule(netdev, &adapter->napi);
934 } else {
935 atomic_dec(&adapter->irq_sem);
936 } 927 }
937 928
938 return IRQ_HANDLED; 929 return IRQ_HANDLED;
@@ -983,7 +974,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
983{ 974{
984 struct e1000_hw *hw = &adapter->hw; 975 struct e1000_hw *hw = &adapter->hw;
985 976
986 atomic_inc(&adapter->irq_sem);
987 ew32(IMC, ~0); 977 ew32(IMC, ~0);
988 e1e_flush(); 978 e1e_flush();
989 synchronize_irq(adapter->pdev->irq); 979 synchronize_irq(adapter->pdev->irq);
@@ -996,10 +986,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
996{ 986{
997 struct e1000_hw *hw = &adapter->hw; 987 struct e1000_hw *hw = &adapter->hw;
998 988
999 if (atomic_dec_and_test(&adapter->irq_sem)) { 989 ew32(IMS, IMS_ENABLE_MASK);
1000 ew32(IMS, IMS_ENABLE_MASK); 990 e1e_flush();
1001 e1e_flush();
1002 }
1003} 991}
1004 992
1005/** 993/**
@@ -1427,9 +1415,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1427 struct e1000_hw *hw = &adapter->hw; 1415 struct e1000_hw *hw = &adapter->hw;
1428 u32 vfta, index; 1416 u32 vfta, index;
1429 1417
1430 e1000_irq_disable(adapter); 1418 if (!test_bit(__E1000_DOWN, &adapter->state))
1419 e1000_irq_disable(adapter);
1431 vlan_group_set_device(adapter->vlgrp, vid, NULL); 1420 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1432 e1000_irq_enable(adapter); 1421
1422 if (!test_bit(__E1000_DOWN, &adapter->state))
1423 e1000_irq_enable(adapter);
1433 1424
1434 if ((adapter->hw.mng_cookie.status & 1425 if ((adapter->hw.mng_cookie.status &
1435 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 1426 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
@@ -1480,7 +1471,8 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
1480 struct e1000_hw *hw = &adapter->hw; 1471 struct e1000_hw *hw = &adapter->hw;
1481 u32 ctrl, rctl; 1472 u32 ctrl, rctl;
1482 1473
1483 e1000_irq_disable(adapter); 1474 if (!test_bit(__E1000_DOWN, &adapter->state))
1475 e1000_irq_disable(adapter);
1484 adapter->vlgrp = grp; 1476 adapter->vlgrp = grp;
1485 1477
1486 if (grp) { 1478 if (grp) {
@@ -1517,7 +1509,8 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
1517 } 1509 }
1518 } 1510 }
1519 1511
1520 e1000_irq_enable(adapter); 1512 if (!test_bit(__E1000_DOWN, &adapter->state))
1513 e1000_irq_enable(adapter);
1521} 1514}
1522 1515
1523static void e1000_restore_vlan(struct e1000_adapter *adapter) 1516static void e1000_restore_vlan(struct e1000_adapter *adapter)
@@ -2167,7 +2160,6 @@ void e1000e_down(struct e1000_adapter *adapter)
2167 msleep(10); 2160 msleep(10);
2168 2161
2169 napi_disable(&adapter->napi); 2162 napi_disable(&adapter->napi);
2170 atomic_set(&adapter->irq_sem, 0);
2171 e1000_irq_disable(adapter); 2163 e1000_irq_disable(adapter);
2172 2164
2173 del_timer_sync(&adapter->watchdog_timer); 2165 del_timer_sync(&adapter->watchdog_timer);
@@ -2227,7 +2219,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2227 spin_lock_init(&adapter->tx_queue_lock); 2219 spin_lock_init(&adapter->tx_queue_lock);
2228 2220
2229 /* Explicitly disable IRQ since the NIC can be in any state. */ 2221 /* Explicitly disable IRQ since the NIC can be in any state. */
2230 atomic_set(&adapter->irq_sem, 0);
2231 e1000_irq_disable(adapter); 2222 e1000_irq_disable(adapter);
2232 2223
2233 spin_lock_init(&adapter->stats_lock); 2224 spin_lock_init(&adapter->stats_lock);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 6f7e3fde9e7c..0272afbdff37 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3859,7 +3859,8 @@ static void nv_do_stats_poll(unsigned long data)
3859 nv_get_hw_stats(dev); 3859 nv_get_hw_stats(dev);
3860 3860
3861 if (!np->in_shutdown) 3861 if (!np->in_shutdown)
3862 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3862 mod_timer(&np->stats_poll,
3863 round_jiffies(jiffies + STATS_INTERVAL));
3863} 3864}
3864 3865
3865static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3866static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -5063,7 +5064,8 @@ static int nv_open(struct net_device *dev)
5063 5064
5064 /* start statistics timer */ 5065 /* start statistics timer */
5065 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 5066 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
5066 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 5067 mod_timer(&np->stats_poll,
5068 round_jiffies(jiffies + STATS_INTERVAL));
5067 5069
5068 spin_unlock_irq(&np->lock); 5070 spin_unlock_irq(&np->lock);
5069 5071
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 718cf77e345a..601f93e482c6 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1185,7 +1185,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1185 int frame_size = new_mtu + ETH_HLEN; 1185 int frame_size = new_mtu + ETH_HLEN;
1186 1186
1187 if (priv->vlan_enable) 1187 if (priv->vlan_enable)
1188 frame_size += VLAN_ETH_HLEN; 1188 frame_size += VLAN_HLEN;
1189 1189
1190 if (gfar_uses_fcb(priv)) 1190 if (gfar_uses_fcb(priv))
1191 frame_size += GMAC_FCB_LEN; 1191 frame_size += GMAC_FCB_LEN;
@@ -1299,11 +1299,11 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1299 1299
1300 /* If we are coalescing the interrupts, reset the timer */ 1300 /* If we are coalescing the interrupts, reset the timer */
1301 /* Otherwise, clear it */ 1301 /* Otherwise, clear it */
1302 if (priv->txcoalescing) 1302 if (likely(priv->txcoalescing)) {
1303 gfar_write(&priv->regs->txic, 0);
1303 gfar_write(&priv->regs->txic, 1304 gfar_write(&priv->regs->txic,
1304 mk_ic_value(priv->txcount, priv->txtime)); 1305 mk_ic_value(priv->txcount, priv->txtime));
1305 else 1306 }
1306 gfar_write(&priv->regs->txic, 0);
1307 1307
1308 spin_unlock(&priv->txlock); 1308 spin_unlock(&priv->txlock);
1309 1309
@@ -1417,11 +1417,11 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
1417 1417
1418 /* If we are coalescing interrupts, update the timer */ 1418 /* If we are coalescing interrupts, update the timer */
1419 /* Otherwise, clear it */ 1419 /* Otherwise, clear it */
1420 if (priv->rxcoalescing) 1420 if (likely(priv->rxcoalescing)) {
1421 gfar_write(&priv->regs->rxic, 0);
1421 gfar_write(&priv->regs->rxic, 1422 gfar_write(&priv->regs->rxic,
1422 mk_ic_value(priv->rxcount, priv->rxtime)); 1423 mk_ic_value(priv->rxcount, priv->rxtime));
1423 else 1424 }
1424 gfar_write(&priv->regs->rxic, 0);
1425 1425
1426 spin_unlock_irqrestore(&priv->rxlock, flags); 1426 spin_unlock_irqrestore(&priv->rxlock, flags);
1427#endif 1427#endif
@@ -1526,9 +1526,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1526 rmb(); 1526 rmb();
1527 skb = priv->rx_skbuff[priv->skb_currx]; 1527 skb = priv->rx_skbuff[priv->skb_currx];
1528 1528
1529 if (!(bdp->status & 1529 if ((bdp->status & RXBD_LAST) && !(bdp->status & RXBD_ERR)) {
1530 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1531 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1532 /* Increment the number of packets */ 1530 /* Increment the number of packets */
1533 dev->stats.rx_packets++; 1531 dev->stats.rx_packets++;
1534 howmany++; 1532 howmany++;
@@ -1595,11 +1593,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1595 1593
1596 /* If we are coalescing interrupts, update the timer */ 1594 /* If we are coalescing interrupts, update the timer */
1597 /* Otherwise, clear it */ 1595 /* Otherwise, clear it */
1598 if (priv->rxcoalescing) 1596 if (likely(priv->rxcoalescing)) {
1597 gfar_write(&priv->regs->rxic, 0);
1599 gfar_write(&priv->regs->rxic, 1598 gfar_write(&priv->regs->rxic,
1600 mk_ic_value(priv->rxcount, priv->rxtime)); 1599 mk_ic_value(priv->rxcount, priv->rxtime));
1601 else 1600 }
1602 gfar_write(&priv->regs->rxic, 0);
1603 } 1601 }
1604 1602
1605 return howmany; 1603 return howmany;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 46cd7735e6fe..ea8671f87bce 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -102,7 +102,7 @@ extern const char gfar_driver_version[];
102#define DEFAULT_FIFO_TX_STARVE 0x40 102#define DEFAULT_FIFO_TX_STARVE 0x40
103#define DEFAULT_FIFO_TX_STARVE_OFF 0x80 103#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
104#define DEFAULT_BD_STASH 1 104#define DEFAULT_BD_STASH 1
105#define DEFAULT_STASH_LENGTH 64 105#define DEFAULT_STASH_LENGTH 96
106#define DEFAULT_STASH_INDEX 0 106#define DEFAULT_STASH_INDEX 0
107 107
108/* The number of Exact Match registers */ 108/* The number of Exact Match registers */
@@ -124,11 +124,11 @@ extern const char gfar_driver_version[];
124 124
125#define DEFAULT_TX_COALESCE 1 125#define DEFAULT_TX_COALESCE 1
126#define DEFAULT_TXCOUNT 16 126#define DEFAULT_TXCOUNT 16
127#define DEFAULT_TXTIME 4 127#define DEFAULT_TXTIME 21
128 128
129#define DEFAULT_RX_COALESCE 1 129#define DEFAULT_RX_COALESCE 1
130#define DEFAULT_RXCOUNT 16 130#define DEFAULT_RXCOUNT 16
131#define DEFAULT_RXTIME 4 131#define DEFAULT_RXTIME 21
132 132
133#define TBIPA_VALUE 0x1f 133#define TBIPA_VALUE 0x1f
134#define MIIMCFG_INIT_VALUE 0x00000007 134#define MIIMCFG_INIT_VALUE 0x00000007
@@ -340,6 +340,9 @@ extern const char gfar_driver_version[];
340#define RXBD_OVERRUN 0x0002 340#define RXBD_OVERRUN 0x0002
341#define RXBD_TRUNCATED 0x0001 341#define RXBD_TRUNCATED 0x0001
342#define RXBD_STATS 0x01ff 342#define RXBD_STATS 0x01ff
343#define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \
344 | RXBD_CRCERR | RXBD_OVERRUN \
345 | RXBD_TRUNCATED)
343 346
344/* Rx FCB status field bits */ 347/* Rx FCB status field bits */
345#define RXFCB_VLN 0x8000 348#define RXFCB_VLN 0x8000
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 57772bebff56..bb31e09899fc 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1259,26 +1259,7 @@ static void ibmveth_proc_unregister_driver(void)
1259 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net); 1259 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1260} 1260}
1261 1261
1262static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 1262static int ibmveth_show(struct seq_file *seq, void *v)
1263{
1264 if (*pos == 0) {
1265 return (void *)1;
1266 } else {
1267 return NULL;
1268 }
1269}
1270
1271static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1272{
1273 ++*pos;
1274 return NULL;
1275}
1276
1277static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1278{
1279}
1280
1281static int ibmveth_seq_show(struct seq_file *seq, void *v)
1282{ 1263{
1283 struct ibmveth_adapter *adapter = seq->private; 1264 struct ibmveth_adapter *adapter = seq->private;
1284 char *current_mac = ((char*) &adapter->netdev->dev_addr); 1265 char *current_mac = ((char*) &adapter->netdev->dev_addr);
@@ -1302,27 +1283,10 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
1302 1283
1303 return 0; 1284 return 0;
1304} 1285}
1305static struct seq_operations ibmveth_seq_ops = {
1306 .start = ibmveth_seq_start,
1307 .next = ibmveth_seq_next,
1308 .stop = ibmveth_seq_stop,
1309 .show = ibmveth_seq_show,
1310};
1311 1286
1312static int ibmveth_proc_open(struct inode *inode, struct file *file) 1287static int ibmveth_proc_open(struct inode *inode, struct file *file)
1313{ 1288{
1314 struct seq_file *seq; 1289 return single_open(file, ibmveth_show, PDE(inode)->data);
1315 struct proc_dir_entry *proc;
1316 int rc;
1317
1318 rc = seq_open(file, &ibmveth_seq_ops);
1319 if (!rc) {
1320 /* recover the pointer buried in proc_dir_entry data */
1321 seq = file->private_data;
1322 proc = PDE(inode);
1323 seq->private = proc->data;
1324 }
1325 return rc;
1326} 1290}
1327 1291
1328static const struct file_operations ibmveth_proc_fops = { 1292static const struct file_operations ibmveth_proc_fops = {
@@ -1330,7 +1294,7 @@ static const struct file_operations ibmveth_proc_fops = {
1330 .open = ibmveth_proc_open, 1294 .open = ibmveth_proc_open,
1331 .read = seq_read, 1295 .read = seq_read,
1332 .llseek = seq_lseek, 1296 .llseek = seq_lseek,
1333 .release = seq_release, 1297 .release = single_release,
1334}; 1298};
1335 1299
1336static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) 1300static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 3d2e7217e9af..f2fff90d2c9d 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -158,7 +158,6 @@ struct ixgb_adapter {
158 uint16_t link_speed; 158 uint16_t link_speed;
159 uint16_t link_duplex; 159 uint16_t link_duplex;
160 spinlock_t tx_lock; 160 spinlock_t tx_lock;
161 atomic_t irq_sem;
162 struct work_struct tx_timeout_task; 161 struct work_struct tx_timeout_task;
163 162
164 struct timer_list blink_timer; 163 struct timer_list blink_timer;
@@ -173,15 +172,15 @@ struct ixgb_adapter {
173 uint64_t hw_csum_tx_error; 172 uint64_t hw_csum_tx_error;
174 uint32_t tx_int_delay; 173 uint32_t tx_int_delay;
175 uint32_t tx_timeout_count; 174 uint32_t tx_timeout_count;
176 boolean_t tx_int_delay_enable; 175 bool tx_int_delay_enable;
177 boolean_t detect_tx_hung; 176 bool detect_tx_hung;
178 177
179 /* RX */ 178 /* RX */
180 struct ixgb_desc_ring rx_ring; 179 struct ixgb_desc_ring rx_ring;
181 uint64_t hw_csum_rx_error; 180 uint64_t hw_csum_rx_error;
182 uint64_t hw_csum_rx_good; 181 uint64_t hw_csum_rx_good;
183 uint32_t rx_int_delay; 182 uint32_t rx_int_delay;
184 boolean_t rx_csum; 183 bool rx_csum;
185 184
186 /* OS defined structs */ 185 /* OS defined structs */
187 struct napi_struct napi; 186 struct napi_struct napi;
@@ -194,7 +193,16 @@ struct ixgb_adapter {
194 u16 msg_enable; 193 u16 msg_enable;
195 struct ixgb_hw_stats stats; 194 struct ixgb_hw_stats stats;
196 uint32_t alloc_rx_buff_failed; 195 uint32_t alloc_rx_buff_failed;
197 boolean_t have_msi; 196 bool have_msi;
197 unsigned long flags;
198};
199
200enum ixgb_state_t {
201 /* TBD
202 __IXGB_TESTING,
203 __IXGB_RESETTING,
204 */
205 __IXGB_DOWN
198}; 206};
199 207
200/* Exported from other modules */ 208/* Exported from other modules */
@@ -203,4 +211,14 @@ extern void ixgb_set_ethtool_ops(struct net_device *netdev);
203extern char ixgb_driver_name[]; 211extern char ixgb_driver_name[];
204extern const char ixgb_driver_version[]; 212extern const char ixgb_driver_version[];
205 213
214extern int ixgb_up(struct ixgb_adapter *adapter);
215extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
216extern void ixgb_reset(struct ixgb_adapter *adapter);
217extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
218extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
219extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
220extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
221extern void ixgb_update_stats(struct ixgb_adapter *adapter);
222
223
206#endif /* _IXGB_H_ */ 224#endif /* _IXGB_H_ */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index e8eb0fd6c576..8e9302fc8865 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -36,7 +36,7 @@ static void ixgb_shift_out_bits(struct ixgb_hw *hw,
36 uint16_t count); 36 uint16_t count);
37static void ixgb_standby_eeprom(struct ixgb_hw *hw); 37static void ixgb_standby_eeprom(struct ixgb_hw *hw);
38 38
39static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw); 39static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
40 40
41static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); 41static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
42 42
@@ -279,10 +279,10 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
279 * The command is done when the EEPROM's data out pin goes high. 279 * The command is done when the EEPROM's data out pin goes high.
280 * 280 *
281 * Returns: 281 * Returns:
282 * TRUE: EEPROM data pin is high before timeout. 282 * true: EEPROM data pin is high before timeout.
283 * FALSE: Time expired. 283 * false: Time expired.
284 *****************************************************************************/ 284 *****************************************************************************/
285static boolean_t 285static bool
286ixgb_wait_eeprom_command(struct ixgb_hw *hw) 286ixgb_wait_eeprom_command(struct ixgb_hw *hw)
287{ 287{
288 uint32_t eecd_reg; 288 uint32_t eecd_reg;
@@ -301,12 +301,12 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
301 eecd_reg = IXGB_READ_REG(hw, EECD); 301 eecd_reg = IXGB_READ_REG(hw, EECD);
302 302
303 if(eecd_reg & IXGB_EECD_DO) 303 if(eecd_reg & IXGB_EECD_DO)
304 return (TRUE); 304 return (true);
305 305
306 udelay(50); 306 udelay(50);
307 } 307 }
308 ASSERT(0); 308 ASSERT(0);
309 return (FALSE); 309 return (false);
310} 310}
311 311
312/****************************************************************************** 312/******************************************************************************
@@ -319,10 +319,10 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
319 * valid. 319 * valid.
320 * 320 *
321 * Returns: 321 * Returns:
322 * TRUE: Checksum is valid 322 * true: Checksum is valid
323 * FALSE: Checksum is not valid. 323 * false: Checksum is not valid.
324 *****************************************************************************/ 324 *****************************************************************************/
325boolean_t 325bool
326ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) 326ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
327{ 327{
328 uint16_t checksum = 0; 328 uint16_t checksum = 0;
@@ -332,9 +332,9 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
332 checksum += ixgb_read_eeprom(hw, i); 332 checksum += ixgb_read_eeprom(hw, i);
333 333
334 if(checksum == (uint16_t) EEPROM_SUM) 334 if(checksum == (uint16_t) EEPROM_SUM)
335 return (TRUE); 335 return (true);
336 else 336 else
337 return (FALSE); 337 return (false);
338} 338}
339 339
340/****************************************************************************** 340/******************************************************************************
@@ -457,10 +457,10 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
457 * hw - Struct containing variables accessed by shared code 457 * hw - Struct containing variables accessed by shared code
458 * 458 *
459 * Returns: 459 * Returns:
460 * TRUE: if eeprom read is successful 460 * true: if eeprom read is successful
461 * FALSE: otherwise. 461 * false: otherwise.
462 *****************************************************************************/ 462 *****************************************************************************/
463boolean_t 463bool
464ixgb_get_eeprom_data(struct ixgb_hw *hw) 464ixgb_get_eeprom_data(struct ixgb_hw *hw)
465{ 465{
466 uint16_t i; 466 uint16_t i;
@@ -484,16 +484,16 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
484 /* clear the init_ctrl_reg_1 to signify that the cache is 484 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */ 485 * invalidated */
486 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); 486 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
487 return (FALSE); 487 return (false);
488 } 488 }
489 489
490 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 490 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
491 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 491 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
492 DEBUGOUT("ixgb_ee: Signature invalid.\n"); 492 DEBUGOUT("ixgb_ee: Signature invalid.\n");
493 return(FALSE); 493 return(false);
494 } 494 }
495 495
496 return(TRUE); 496 return(true);
497} 497}
498 498
499/****************************************************************************** 499/******************************************************************************
@@ -503,17 +503,17 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
503 * hw - Struct containing variables accessed by shared code 503 * hw - Struct containing variables accessed by shared code
504 * 504 *
505 * Returns: 505 * Returns:
506 * TRUE: eeprom signature was good and the eeprom read was successful 506 * true: eeprom signature was good and the eeprom read was successful
507 * FALSE: otherwise. 507 * false: otherwise.
508 ******************************************************************************/ 508 ******************************************************************************/
509static boolean_t 509static bool
510ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) 510ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
511{ 511{
512 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 512 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
513 513
514 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 514 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
515 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 515 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
516 return (TRUE); 516 return (true);
517 } else { 517 } else {
518 return ixgb_get_eeprom_data(hw); 518 return ixgb_get_eeprom_data(hw);
519 } 519 }
@@ -533,7 +533,7 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
533{ 533{
534 534
535 if ((index < IXGB_EEPROM_SIZE) && 535 if ((index < IXGB_EEPROM_SIZE) &&
536 (ixgb_check_and_get_eeprom_data(hw) == TRUE)) { 536 (ixgb_check_and_get_eeprom_data(hw) == true)) {
537 return(hw->eeprom[index]); 537 return(hw->eeprom[index]);
538 } 538 }
539 539
@@ -557,7 +557,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
557 557
558 DEBUGFUNC("ixgb_get_ee_mac_addr"); 558 DEBUGFUNC("ixgb_get_ee_mac_addr");
559 559
560 if (ixgb_check_and_get_eeprom_data(hw) == TRUE) { 560 if (ixgb_check_and_get_eeprom_data(hw) == true) {
561 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) { 561 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
562 mac_addr[i] = ee_map->mac_addr[i]; 562 mac_addr[i] = ee_map->mac_addr[i];
563 DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]); 563 DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
@@ -577,7 +577,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
577uint32_t 577uint32_t
578ixgb_get_ee_pba_number(struct ixgb_hw *hw) 578ixgb_get_ee_pba_number(struct ixgb_hw *hw)
579{ 579{
580 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 580 if (ixgb_check_and_get_eeprom_data(hw) == true)
581 return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) 581 return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
582 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16)); 582 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
583 583
@@ -598,7 +598,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
598{ 598{
599 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 599 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
600 600
601 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 601 if (ixgb_check_and_get_eeprom_data(hw) == true)
602 return (le16_to_cpu(ee_map->device_id)); 602 return (le16_to_cpu(ee_map->device_id));
603 603
604 return (0); 604 return (0);
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index 7908bf3005ed..da62f58276fa 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -97,7 +97,7 @@ struct ixgb_ee_map_type {
97/* EEPROM Functions */ 97/* EEPROM Functions */
98uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg); 98uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg);
99 99
100boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); 100bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
101 101
102void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); 102void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
103 103
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 75f3a68ee354..45ddf804fe5e 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -32,15 +32,6 @@
32 32
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34 34
35extern int ixgb_up(struct ixgb_adapter *adapter);
36extern void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
37extern void ixgb_reset(struct ixgb_adapter *adapter);
38extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
39extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
40extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
41extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
42extern void ixgb_update_stats(struct ixgb_adapter *adapter);
43
44#define IXGB_ALL_RAR_ENTRIES 16 35#define IXGB_ALL_RAR_ENTRIES 16
45 36
46struct ixgb_stats { 37struct ixgb_stats {
@@ -136,7 +127,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
136 return -EINVAL; 127 return -EINVAL;
137 128
138 if(netif_running(adapter->netdev)) { 129 if(netif_running(adapter->netdev)) {
139 ixgb_down(adapter, TRUE); 130 ixgb_down(adapter, true);
140 ixgb_reset(adapter); 131 ixgb_reset(adapter);
141 ixgb_up(adapter); 132 ixgb_up(adapter);
142 ixgb_set_speed_duplex(netdev); 133 ixgb_set_speed_duplex(netdev);
@@ -185,7 +176,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
185 hw->fc.type = ixgb_fc_none; 176 hw->fc.type = ixgb_fc_none;
186 177
187 if(netif_running(adapter->netdev)) { 178 if(netif_running(adapter->netdev)) {
188 ixgb_down(adapter, TRUE); 179 ixgb_down(adapter, true);
189 ixgb_up(adapter); 180 ixgb_up(adapter);
190 ixgb_set_speed_duplex(netdev); 181 ixgb_set_speed_duplex(netdev);
191 } else 182 } else
@@ -210,7 +201,7 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
210 adapter->rx_csum = data; 201 adapter->rx_csum = data;
211 202
212 if(netif_running(netdev)) { 203 if(netif_running(netdev)) {
213 ixgb_down(adapter,TRUE); 204 ixgb_down(adapter, true);
214 ixgb_up(adapter); 205 ixgb_up(adapter);
215 ixgb_set_speed_duplex(netdev); 206 ixgb_set_speed_duplex(netdev);
216 } else 207 } else
@@ -570,7 +561,7 @@ ixgb_set_ringparam(struct net_device *netdev,
570 return -EINVAL; 561 return -EINVAL;
571 562
572 if(netif_running(adapter->netdev)) 563 if(netif_running(adapter->netdev))
573 ixgb_down(adapter,TRUE); 564 ixgb_down(adapter, true);
574 565
575 rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD); 566 rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD);
576 rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD); 567 rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD);
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 80a8b9888225..8a04bbd258a6 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -41,7 +41,7 @@ static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value);
41 41
42static void ixgb_get_bus_info(struct ixgb_hw *hw); 42static void ixgb_get_bus_info(struct ixgb_hw *hw);
43 43
44static boolean_t ixgb_link_reset(struct ixgb_hw *hw); 44static bool ixgb_link_reset(struct ixgb_hw *hw);
45 45
46static void ixgb_optics_reset(struct ixgb_hw *hw); 46static void ixgb_optics_reset(struct ixgb_hw *hw);
47 47
@@ -60,9 +60,9 @@ static uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw,
60 uint32_t phy_address, 60 uint32_t phy_address,
61 uint32_t device_type); 61 uint32_t device_type);
62 62
63static boolean_t ixgb_setup_fc(struct ixgb_hw *hw); 63static bool ixgb_setup_fc(struct ixgb_hw *hw);
64 64
65static boolean_t mac_addr_valid(uint8_t *mac_addr); 65static bool mac_addr_valid(uint8_t *mac_addr);
66 66
67static uint32_t ixgb_mac_reset(struct ixgb_hw *hw) 67static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
68{ 68{
@@ -114,7 +114,7 @@ static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
114 * 114 *
115 * hw - Struct containing variables accessed by shared code 115 * hw - Struct containing variables accessed by shared code
116 *****************************************************************************/ 116 *****************************************************************************/
117boolean_t 117bool
118ixgb_adapter_stop(struct ixgb_hw *hw) 118ixgb_adapter_stop(struct ixgb_hw *hw)
119{ 119{
120 uint32_t ctrl_reg; 120 uint32_t ctrl_reg;
@@ -127,13 +127,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
127 */ 127 */
128 if(hw->adapter_stopped) { 128 if(hw->adapter_stopped) {
129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); 129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
130 return FALSE; 130 return false;
131 } 131 }
132 132
133 /* Set the Adapter Stopped flag so other driver functions stop 133 /* Set the Adapter Stopped flag so other driver functions stop
134 * touching the Hardware. 134 * touching the Hardware.
135 */ 135 */
136 hw->adapter_stopped = TRUE; 136 hw->adapter_stopped = true;
137 137
138 /* Clear interrupt mask to stop board from generating interrupts */ 138 /* Clear interrupt mask to stop board from generating interrupts */
139 DEBUGOUT("Masking off all interrupts\n"); 139 DEBUGOUT("Masking off all interrupts\n");
@@ -286,15 +286,15 @@ ixgb_identify_phy(struct ixgb_hw *hw)
286 * Leaves the transmit and receive units disabled and uninitialized. 286 * Leaves the transmit and receive units disabled and uninitialized.
287 * 287 *
288 * Returns: 288 * Returns:
289 * TRUE if successful, 289 * true if successful,
290 * FALSE if unrecoverable problems were encountered. 290 * false if unrecoverable problems were encountered.
291 *****************************************************************************/ 291 *****************************************************************************/
292boolean_t 292bool
293ixgb_init_hw(struct ixgb_hw *hw) 293ixgb_init_hw(struct ixgb_hw *hw)
294{ 294{
295 uint32_t i; 295 uint32_t i;
296 uint32_t ctrl_reg; 296 uint32_t ctrl_reg;
297 boolean_t status; 297 bool status;
298 298
299 DEBUGFUNC("ixgb_init_hw"); 299 DEBUGFUNC("ixgb_init_hw");
300 300
@@ -318,9 +318,8 @@ ixgb_init_hw(struct ixgb_hw *hw)
318 /* Delay a few ms just to allow the reset to complete */ 318 /* Delay a few ms just to allow the reset to complete */
319 msleep(IXGB_DELAY_AFTER_EE_RESET); 319 msleep(IXGB_DELAY_AFTER_EE_RESET);
320 320
321 if (ixgb_get_eeprom_data(hw) == FALSE) { 321 if (!ixgb_get_eeprom_data(hw))
322 return(FALSE); 322 return false;
323 }
324 323
325 /* Use the device id to determine the type of phy/transceiver. */ 324 /* Use the device id to determine the type of phy/transceiver. */
326 hw->device_id = ixgb_get_ee_device_id(hw); 325 hw->device_id = ixgb_get_ee_device_id(hw);
@@ -337,11 +336,11 @@ ixgb_init_hw(struct ixgb_hw *hw)
337 */ 336 */
338 if (!mac_addr_valid(hw->curr_mac_addr)) { 337 if (!mac_addr_valid(hw->curr_mac_addr)) {
339 DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n"); 338 DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
340 return(FALSE); 339 return(false);
341 } 340 }
342 341
343 /* tell the routines in this file they can access hardware again */ 342 /* tell the routines in this file they can access hardware again */
344 hw->adapter_stopped = FALSE; 343 hw->adapter_stopped = false;
345 344
346 /* Fill in the bus_info structure */ 345 /* Fill in the bus_info structure */
347 ixgb_get_bus_info(hw); 346 ixgb_get_bus_info(hw);
@@ -661,12 +660,12 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
661 * hw - Struct containing variables accessed by shared code 660 * hw - Struct containing variables accessed by shared code
662 *****************************************************************************/ 661 *****************************************************************************/
663 662
664static boolean_t 663static bool
665ixgb_setup_fc(struct ixgb_hw *hw) 664ixgb_setup_fc(struct ixgb_hw *hw)
666{ 665{
667 uint32_t ctrl_reg; 666 uint32_t ctrl_reg;
668 uint32_t pap_reg = 0; /* by default, assume no pause time */ 667 uint32_t pap_reg = 0; /* by default, assume no pause time */
669 boolean_t status = TRUE; 668 bool status = true;
670 669
671 DEBUGFUNC("ixgb_setup_fc"); 670 DEBUGFUNC("ixgb_setup_fc");
672 671
@@ -950,7 +949,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
950 949
951 if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && 950 if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
952 (status_reg & IXGB_STATUS_LU)) { 951 (status_reg & IXGB_STATUS_LU)) {
953 hw->link_up = TRUE; 952 hw->link_up = true;
954 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && 953 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
955 (status_reg & IXGB_STATUS_LU)) { 954 (status_reg & IXGB_STATUS_LU)) {
956 DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n"); 955 DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
@@ -974,10 +973,10 @@ ixgb_check_for_link(struct ixgb_hw *hw)
974 * 973 *
975 * Called by any function that needs to check the link status of the adapter. 974 * Called by any function that needs to check the link status of the adapter.
976 *****************************************************************************/ 975 *****************************************************************************/
977boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw) 976bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
978{ 977{
979 uint32_t newLFC, newRFC; 978 uint32_t newLFC, newRFC;
980 boolean_t bad_link_returncode = FALSE; 979 bool bad_link_returncode = false;
981 980
982 if (hw->phy_type == ixgb_phy_type_txn17401) { 981 if (hw->phy_type == ixgb_phy_type_txn17401) {
983 newLFC = IXGB_READ_REG(hw, LFC); 982 newLFC = IXGB_READ_REG(hw, LFC);
@@ -986,7 +985,7 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
986 || (hw->lastRFC + 250 < newRFC)) { 985 || (hw->lastRFC + 250 < newRFC)) {
987 DEBUGOUT 986 DEBUGOUT
988 ("BAD LINK! too many LFC/RFC since last check\n"); 987 ("BAD LINK! too many LFC/RFC since last check\n");
989 bad_link_returncode = TRUE; 988 bad_link_returncode = true;
990 } 989 }
991 hw->lastLFC = newLFC; 990 hw->lastLFC = newLFC;
992 hw->lastRFC = newRFC; 991 hw->lastRFC = newRFC;
@@ -1155,21 +1154,21 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
1155 * mac_addr - pointer to MAC address. 1154 * mac_addr - pointer to MAC address.
1156 * 1155 *
1157 *****************************************************************************/ 1156 *****************************************************************************/
1158static boolean_t 1157static bool
1159mac_addr_valid(uint8_t *mac_addr) 1158mac_addr_valid(uint8_t *mac_addr)
1160{ 1159{
1161 boolean_t is_valid = TRUE; 1160 bool is_valid = true;
1162 DEBUGFUNC("mac_addr_valid"); 1161 DEBUGFUNC("mac_addr_valid");
1163 1162
1164 /* Make sure it is not a multicast address */ 1163 /* Make sure it is not a multicast address */
1165 if (IS_MULTICAST(mac_addr)) { 1164 if (IS_MULTICAST(mac_addr)) {
1166 DEBUGOUT("MAC address is multicast\n"); 1165 DEBUGOUT("MAC address is multicast\n");
1167 is_valid = FALSE; 1166 is_valid = false;
1168 } 1167 }
1169 /* Not a broadcast address */ 1168 /* Not a broadcast address */
1170 else if (IS_BROADCAST(mac_addr)) { 1169 else if (IS_BROADCAST(mac_addr)) {
1171 DEBUGOUT("MAC address is broadcast\n"); 1170 DEBUGOUT("MAC address is broadcast\n");
1172 is_valid = FALSE; 1171 is_valid = false;
1173 } 1172 }
1174 /* Reject the zero address */ 1173 /* Reject the zero address */
1175 else if (mac_addr[0] == 0 && 1174 else if (mac_addr[0] == 0 &&
@@ -1179,7 +1178,7 @@ mac_addr_valid(uint8_t *mac_addr)
1179 mac_addr[4] == 0 && 1178 mac_addr[4] == 0 &&
1180 mac_addr[5] == 0) { 1179 mac_addr[5] == 0) {
1181 DEBUGOUT("MAC address is all zeros\n"); 1180 DEBUGOUT("MAC address is all zeros\n");
1182 is_valid = FALSE; 1181 is_valid = false;
1183 } 1182 }
1184 return (is_valid); 1183 return (is_valid);
1185} 1184}
@@ -1190,10 +1189,10 @@ mac_addr_valid(uint8_t *mac_addr)
1190 * 1189 *
1191 * hw - Struct containing variables accessed by shared code 1190 * hw - Struct containing variables accessed by shared code
1192 *****************************************************************************/ 1191 *****************************************************************************/
1193static boolean_t 1192static bool
1194ixgb_link_reset(struct ixgb_hw *hw) 1193ixgb_link_reset(struct ixgb_hw *hw)
1195{ 1194{
1196 boolean_t link_status = FALSE; 1195 bool link_status = false;
1197 uint8_t wait_retries = MAX_RESET_ITERATIONS; 1196 uint8_t wait_retries = MAX_RESET_ITERATIONS;
1198 uint8_t lrst_retries = MAX_RESET_ITERATIONS; 1197 uint8_t lrst_retries = MAX_RESET_ITERATIONS;
1199 1198
@@ -1208,7 +1207,7 @@ ixgb_link_reset(struct ixgb_hw *hw)
1208 link_status = 1207 link_status =
1209 ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU) 1208 ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
1210 && (IXGB_READ_REG(hw, XPCSS) & 1209 && (IXGB_READ_REG(hw, XPCSS) &
1211 IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE; 1210 IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
1212 } while (!link_status && --wait_retries); 1211 } while (!link_status && --wait_retries);
1213 1212
1214 } while (!link_status && --lrst_retries); 1213 } while (!link_status && --lrst_retries);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 4f176ff2b786..d4e95665ce9e 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -650,7 +650,7 @@ struct ixgb_flash_buffer {
650 * This is a little-endian specific check. 650 * This is a little-endian specific check.
651 */ 651 */
652#define IS_MULTICAST(Address) \ 652#define IS_MULTICAST(Address) \
653 (boolean_t)(((uint8_t *)(Address))[0] & ((uint8_t)0x01)) 653 (bool)(((uint8_t *)(Address))[0] & ((uint8_t)0x01))
654 654
655/* 655/*
656 * Check whether an address is broadcast. 656 * Check whether an address is broadcast.
@@ -663,7 +663,7 @@ struct ixgb_fc {
663 uint32_t high_water; /* Flow Control High-water */ 663 uint32_t high_water; /* Flow Control High-water */
664 uint32_t low_water; /* Flow Control Low-water */ 664 uint32_t low_water; /* Flow Control Low-water */
665 uint16_t pause_time; /* Flow Control Pause timer */ 665 uint16_t pause_time; /* Flow Control Pause timer */
666 boolean_t send_xon; /* Flow control send XON */ 666 bool send_xon; /* Flow control send XON */
667 ixgb_fc_type type; /* Type of flow control */ 667 ixgb_fc_type type; /* Type of flow control */
668}; 668};
669 669
@@ -700,8 +700,8 @@ struct ixgb_hw {
700 uint32_t num_tx_desc; /* Number of Transmit descriptors */ 700 uint32_t num_tx_desc; /* Number of Transmit descriptors */
701 uint32_t num_rx_desc; /* Number of Receive descriptors */ 701 uint32_t num_rx_desc; /* Number of Receive descriptors */
702 uint32_t rx_buffer_size; /* Size of Receive buffer */ 702 uint32_t rx_buffer_size; /* Size of Receive buffer */
703 boolean_t link_up; /* TRUE if link is valid */ 703 bool link_up; /* true if link is valid */
704 boolean_t adapter_stopped; /* State of adapter */ 704 bool adapter_stopped; /* State of adapter */
705 uint16_t device_id; /* device id from PCI configuration space */ 705 uint16_t device_id; /* device id from PCI configuration space */
706 uint16_t vendor_id; /* vendor id from PCI configuration space */ 706 uint16_t vendor_id; /* vendor id from PCI configuration space */
707 uint8_t revision_id; /* revision id from PCI configuration space */ 707 uint8_t revision_id; /* revision id from PCI configuration space */
@@ -783,11 +783,11 @@ struct ixgb_hw_stats {
783}; 783};
784 784
785/* Function Prototypes */ 785/* Function Prototypes */
786extern boolean_t ixgb_adapter_stop(struct ixgb_hw *hw); 786extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
787extern boolean_t ixgb_init_hw(struct ixgb_hw *hw); 787extern bool ixgb_init_hw(struct ixgb_hw *hw);
788extern boolean_t ixgb_adapter_start(struct ixgb_hw *hw); 788extern bool ixgb_adapter_start(struct ixgb_hw *hw);
789extern void ixgb_check_for_link(struct ixgb_hw *hw); 789extern void ixgb_check_for_link(struct ixgb_hw *hw);
790extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw); 790extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
791 791
792extern void ixgb_rar_set(struct ixgb_hw *hw, 792extern void ixgb_rar_set(struct ixgb_hw *hw,
793 uint8_t *addr, 793 uint8_t *addr,
@@ -809,7 +809,7 @@ extern void ixgb_write_vfta(struct ixgb_hw *hw,
809void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); 809void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
810uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); 810uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
811uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); 811uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
812boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw); 812bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
813__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); 813__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
814 814
815/* Everything else */ 815/* Everything else */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 269e6f805f47..9c9bf31e5c25 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -67,7 +67,7 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
67/* Local Function Prototypes */ 67/* Local Function Prototypes */
68 68
69int ixgb_up(struct ixgb_adapter *adapter); 69int ixgb_up(struct ixgb_adapter *adapter);
70void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog); 70void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
71void ixgb_reset(struct ixgb_adapter *adapter); 71void ixgb_reset(struct ixgb_adapter *adapter);
72int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); 72int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
73int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); 73int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
@@ -94,14 +94,14 @@ static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
94static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); 94static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
95static int ixgb_set_mac(struct net_device *netdev, void *p); 95static int ixgb_set_mac(struct net_device *netdev, void *p);
96static irqreturn_t ixgb_intr(int irq, void *data); 96static irqreturn_t ixgb_intr(int irq, void *data);
97static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 97static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
98 98
99#ifdef CONFIG_IXGB_NAPI 99#ifdef CONFIG_IXGB_NAPI
100static int ixgb_clean(struct napi_struct *napi, int budget); 100static int ixgb_clean(struct napi_struct *napi, int budget);
101static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, 101static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
102 int *work_done, int work_to_do); 102 int *work_done, int work_to_do);
103#else 103#else
104static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter); 104static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
105#endif 105#endif
106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); 106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
107static void ixgb_tx_timeout(struct net_device *dev); 107static void ixgb_tx_timeout(struct net_device *dev);
@@ -197,7 +197,6 @@ module_exit(ixgb_exit_module);
197static void 197static void
198ixgb_irq_disable(struct ixgb_adapter *adapter) 198ixgb_irq_disable(struct ixgb_adapter *adapter)
199{ 199{
200 atomic_inc(&adapter->irq_sem);
201 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 200 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
202 IXGB_WRITE_FLUSH(&adapter->hw); 201 IXGB_WRITE_FLUSH(&adapter->hw);
203 synchronize_irq(adapter->pdev->irq); 202 synchronize_irq(adapter->pdev->irq);
@@ -211,14 +210,12 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
211static void 210static void
212ixgb_irq_enable(struct ixgb_adapter *adapter) 211ixgb_irq_enable(struct ixgb_adapter *adapter)
213{ 212{
214 if(atomic_dec_and_test(&adapter->irq_sem)) { 213 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
215 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | 214 IXGB_INT_TXDW | IXGB_INT_LSC;
216 IXGB_INT_TXDW | IXGB_INT_LSC; 215 if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
217 if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID) 216 val |= IXGB_INT_GPI0;
218 val |= IXGB_INT_GPI0; 217 IXGB_WRITE_REG(&adapter->hw, IMS, val);
219 IXGB_WRITE_REG(&adapter->hw, IMS, val); 218 IXGB_WRITE_FLUSH(&adapter->hw);
220 IXGB_WRITE_FLUSH(&adapter->hw);
221 }
222} 219}
223 220
224int 221int
@@ -283,26 +280,30 @@ ixgb_up(struct ixgb_adapter *adapter)
283 } 280 }
284 } 281 }
285 282
286 mod_timer(&adapter->watchdog_timer, jiffies); 283 clear_bit(__IXGB_DOWN, &adapter->flags);
287 284
288#ifdef CONFIG_IXGB_NAPI 285#ifdef CONFIG_IXGB_NAPI
289 napi_enable(&adapter->napi); 286 napi_enable(&adapter->napi);
290#endif 287#endif
291 ixgb_irq_enable(adapter); 288 ixgb_irq_enable(adapter);
292 289
290 mod_timer(&adapter->watchdog_timer, jiffies);
291
293 return 0; 292 return 0;
294} 293}
295 294
296void 295void
297ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) 296ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
298{ 297{
299 struct net_device *netdev = adapter->netdev; 298 struct net_device *netdev = adapter->netdev;
300 299
300 /* prevent the interrupt handler from restarting watchdog */
301 set_bit(__IXGB_DOWN, &adapter->flags);
302
301#ifdef CONFIG_IXGB_NAPI 303#ifdef CONFIG_IXGB_NAPI
302 napi_disable(&adapter->napi); 304 napi_disable(&adapter->napi);
303 atomic_set(&adapter->irq_sem, 0);
304#endif 305#endif
305 306 /* waiting for NAPI to complete can re-enable interrupts */
306 ixgb_irq_disable(adapter); 307 ixgb_irq_disable(adapter);
307 free_irq(adapter->pdev->irq, netdev); 308 free_irq(adapter->pdev->irq, netdev);
308 309
@@ -589,9 +590,9 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
589 /* enable flow control to be programmed */ 590 /* enable flow control to be programmed */
590 hw->fc.send_xon = 1; 591 hw->fc.send_xon = 1;
591 592
592 atomic_set(&adapter->irq_sem, 1);
593 spin_lock_init(&adapter->tx_lock); 593 spin_lock_init(&adapter->tx_lock);
594 594
595 set_bit(__IXGB_DOWN, &adapter->flags);
595 return 0; 596 return 0;
596} 597}
597 598
@@ -656,7 +657,7 @@ ixgb_close(struct net_device *netdev)
656{ 657{
657 struct ixgb_adapter *adapter = netdev_priv(netdev); 658 struct ixgb_adapter *adapter = netdev_priv(netdev);
658 659
659 ixgb_down(adapter, TRUE); 660 ixgb_down(adapter, true);
660 661
661 ixgb_free_tx_resources(adapter); 662 ixgb_free_tx_resources(adapter);
662 ixgb_free_rx_resources(adapter); 663 ixgb_free_rx_resources(adapter);
@@ -881,7 +882,7 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
881 IXGB_WRITE_REG(hw, RXDCTL, rxdctl); 882 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
882 883
883 /* Enable Receive Checksum Offload for TCP and UDP */ 884 /* Enable Receive Checksum Offload for TCP and UDP */
884 if(adapter->rx_csum == TRUE) { 885 if (adapter->rx_csum) {
885 rxcsum = IXGB_READ_REG(hw, RXCSUM); 886 rxcsum = IXGB_READ_REG(hw, RXCSUM);
886 rxcsum |= IXGB_RXCSUM_TUOFL; 887 rxcsum |= IXGB_RXCSUM_TUOFL;
887 IXGB_WRITE_REG(hw, RXCSUM, rxcsum); 888 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
@@ -1164,7 +1165,7 @@ ixgb_watchdog(unsigned long data)
1164 } 1165 }
1165 1166
1166 /* Force detection of hung controller every watchdog period */ 1167 /* Force detection of hung controller every watchdog period */
1167 adapter->detect_tx_hung = TRUE; 1168 adapter->detect_tx_hung = true;
1168 1169
1169 /* generate an interrupt to force clean up of any stragglers */ 1170 /* generate an interrupt to force clean up of any stragglers */
1170 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW); 1171 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
@@ -1243,7 +1244,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1243 return 0; 1244 return 0;
1244} 1245}
1245 1246
1246static boolean_t 1247static bool
1247ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) 1248ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1248{ 1249{
1249 struct ixgb_context_desc *context_desc; 1250 struct ixgb_context_desc *context_desc;
@@ -1275,10 +1276,10 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1275 if(++i == adapter->tx_ring.count) i = 0; 1276 if(++i == adapter->tx_ring.count) i = 0;
1276 adapter->tx_ring.next_to_use = i; 1277 adapter->tx_ring.next_to_use = i;
1277 1278
1278 return TRUE; 1279 return true;
1279 } 1280 }
1280 1281
1281 return FALSE; 1282 return false;
1282} 1283}
1283 1284
1284#define IXGB_MAX_TXD_PWR 14 1285#define IXGB_MAX_TXD_PWR 14
@@ -1464,14 +1465,18 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1464 int vlan_id = 0; 1465 int vlan_id = 0;
1465 int tso; 1466 int tso;
1466 1467
1468 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1469 dev_kfree_skb(skb);
1470 return NETDEV_TX_OK;
1471 }
1472
1467 if(skb->len <= 0) { 1473 if(skb->len <= 0) {
1468 dev_kfree_skb_any(skb); 1474 dev_kfree_skb_any(skb);
1469 return 0; 1475 return 0;
1470 } 1476 }
1471 1477
1472#ifdef NETIF_F_LLTX 1478#ifdef NETIF_F_LLTX
1473 local_irq_save(flags); 1479 if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
1474 if (!spin_trylock(&adapter->tx_lock)) {
1475 /* Collision - tell upper layer to requeue */ 1480 /* Collision - tell upper layer to requeue */
1476 local_irq_restore(flags); 1481 local_irq_restore(flags);
1477 return NETDEV_TX_LOCKED; 1482 return NETDEV_TX_LOCKED;
@@ -1548,7 +1553,7 @@ ixgb_tx_timeout_task(struct work_struct *work)
1548 container_of(work, struct ixgb_adapter, tx_timeout_task); 1553 container_of(work, struct ixgb_adapter, tx_timeout_task);
1549 1554
1550 adapter->tx_timeout_count++; 1555 adapter->tx_timeout_count++;
1551 ixgb_down(adapter, TRUE); 1556 ixgb_down(adapter, true);
1552 ixgb_up(adapter); 1557 ixgb_up(adapter);
1553} 1558}
1554 1559
@@ -1595,7 +1600,7 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1595 netdev->mtu = new_mtu; 1600 netdev->mtu = new_mtu;
1596 1601
1597 if ((old_max_frame != max_frame) && netif_running(netdev)) { 1602 if ((old_max_frame != max_frame) && netif_running(netdev)) {
1598 ixgb_down(adapter, TRUE); 1603 ixgb_down(adapter, true);
1599 ixgb_up(adapter); 1604 ixgb_up(adapter);
1600 } 1605 }
1601 1606
@@ -1753,9 +1758,9 @@ ixgb_intr(int irq, void *data)
1753 if(unlikely(!icr)) 1758 if(unlikely(!icr))
1754 return IRQ_NONE; /* Not our interrupt */ 1759 return IRQ_NONE; /* Not our interrupt */
1755 1760
1756 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) { 1761 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1757 mod_timer(&adapter->watchdog_timer, jiffies); 1762 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1758 } 1763 mod_timer(&adapter->watchdog_timer, jiffies);
1759 1764
1760#ifdef CONFIG_IXGB_NAPI 1765#ifdef CONFIG_IXGB_NAPI
1761 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1766 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
@@ -1764,7 +1769,6 @@ ixgb_intr(int irq, void *data)
1764 of the posted write is intentionally left out. 1769 of the posted write is intentionally left out.
1765 */ 1770 */
1766 1771
1767 atomic_inc(&adapter->irq_sem);
1768 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1772 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1769 __netif_rx_schedule(netdev, &adapter->napi); 1773 __netif_rx_schedule(netdev, &adapter->napi);
1770 } 1774 }
@@ -1812,7 +1816,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
1812 * @adapter: board private structure 1816 * @adapter: board private structure
1813 **/ 1817 **/
1814 1818
1815static boolean_t 1819static bool
1816ixgb_clean_tx_irq(struct ixgb_adapter *adapter) 1820ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1817{ 1821{
1818 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1822 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@@ -1820,7 +1824,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1820 struct ixgb_tx_desc *tx_desc, *eop_desc; 1824 struct ixgb_tx_desc *tx_desc, *eop_desc;
1821 struct ixgb_buffer *buffer_info; 1825 struct ixgb_buffer *buffer_info;
1822 unsigned int i, eop; 1826 unsigned int i, eop;
1823 boolean_t cleaned = FALSE; 1827 bool cleaned = false;
1824 1828
1825 i = tx_ring->next_to_clean; 1829 i = tx_ring->next_to_clean;
1826 eop = tx_ring->buffer_info[i].next_to_watch; 1830 eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1828,7 +1832,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1828 1832
1829 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1833 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1830 1834
1831 for(cleaned = FALSE; !cleaned; ) { 1835 for (cleaned = false; !cleaned; ) {
1832 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1836 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1833 buffer_info = &tx_ring->buffer_info[i]; 1837 buffer_info = &tx_ring->buffer_info[i];
1834 1838
@@ -1862,7 +1866,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1862 if(adapter->detect_tx_hung) { 1866 if(adapter->detect_tx_hung) {
1863 /* detect a transmit hang in hardware, this serializes the 1867 /* detect a transmit hang in hardware, this serializes the
1864 * check with the clearing of time_stamp and movement of i */ 1868 * check with the clearing of time_stamp and movement of i */
1865 adapter->detect_tx_hung = FALSE; 1869 adapter->detect_tx_hung = false;
1866 if (tx_ring->buffer_info[eop].dma && 1870 if (tx_ring->buffer_info[eop].dma &&
1867 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) 1871 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1868 && !(IXGB_READ_REG(&adapter->hw, STATUS) & 1872 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
@@ -1932,7 +1936,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1932 * @adapter: board private structure 1936 * @adapter: board private structure
1933 **/ 1937 **/
1934 1938
1935static boolean_t 1939static bool
1936#ifdef CONFIG_IXGB_NAPI 1940#ifdef CONFIG_IXGB_NAPI
1937ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) 1941ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1938#else 1942#else
@@ -1946,7 +1950,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1946 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1950 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1947 uint32_t length; 1951 uint32_t length;
1948 unsigned int i, j; 1952 unsigned int i, j;
1949 boolean_t cleaned = FALSE; 1953 bool cleaned = false;
1950 1954
1951 i = rx_ring->next_to_clean; 1955 i = rx_ring->next_to_clean;
1952 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1956 rx_desc = IXGB_RX_DESC(*rx_ring, i);
@@ -1980,7 +1984,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1980 next_skb = next_buffer->skb; 1984 next_skb = next_buffer->skb;
1981 prefetch(next_skb); 1985 prefetch(next_skb);
1982 1986
1983 cleaned = TRUE; 1987 cleaned = true;
1984 1988
1985 pci_unmap_single(pdev, 1989 pci_unmap_single(pdev,
1986 buffer_info->dma, 1990 buffer_info->dma,
@@ -2195,7 +2199,9 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2195 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 2199 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2196 } 2200 }
2197 2201
2198 ixgb_irq_enable(adapter); 2202 /* don't enable interrupts unless we are UP */
2203 if (adapter->netdev->flags & IFF_UP)
2204 ixgb_irq_enable(adapter);
2199} 2205}
2200 2206
2201static void 2207static void
@@ -2222,9 +2228,11 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2222 2228
2223 vlan_group_set_device(adapter->vlgrp, vid, NULL); 2229 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2224 2230
2225 ixgb_irq_enable(adapter); 2231 /* don't enable interrupts unless we are UP */
2232 if (adapter->netdev->flags & IFF_UP)
2233 ixgb_irq_enable(adapter);
2226 2234
2227 /* remove VID from filter table*/ 2235 /* remove VID from filter table */
2228 2236
2229 index = (vid >> 5) & 0x7F; 2237 index = (vid >> 5) & 0x7F;
2230 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2238 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -2279,7 +2287,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
2279 struct ixgb_adapter *adapter = netdev_priv(netdev); 2287 struct ixgb_adapter *adapter = netdev_priv(netdev);
2280 2288
2281 if(netif_running(netdev)) 2289 if(netif_running(netdev))
2282 ixgb_down(adapter, TRUE); 2290 ixgb_down(adapter, true);
2283 2291
2284 pci_disable_device(pdev); 2292 pci_disable_device(pdev);
2285 2293
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 9e04a6b3ae0d..4be1b273e1b8 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -39,13 +39,6 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41 41
42typedef enum {
43#undef FALSE
44 FALSE = 0,
45#undef TRUE
46 TRUE = 1
47} boolean_t;
48
49#undef ASSERT 42#undef ASSERT
50#define ASSERT(x) if(!(x)) BUG() 43#define ASSERT(x) if(!(x)) BUG()
51#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) 44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
new file mode 100644
index 000000000000..1d24a73a0e1a
--- /dev/null
+++ b/drivers/net/korina.c
@@ -0,0 +1,1233 @@
1/*
2 * Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
3 *
4 * Copyright 2004 IDT Inc. (rischelp@idt.com)
5 * Copyright 2006 Felix Fietkau <nbd@openwrt.org>
6 * Copyright 2008 Florian Fainelli <florian@openwrt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 * Writing to a DMA status register:
29 *
30 * When writing to the status register, you should mask the bit you have
31 * been testing the status register with. Both Tx and Rx DMA registers
32 * should stick to this procedure.
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/moduleparam.h>
38#include <linux/sched.h>
39#include <linux/ctype.h>
40#include <linux/types.h>
41#include <linux/interrupt.h>
42#include <linux/init.h>
43#include <linux/ioport.h>
44#include <linux/in.h>
45#include <linux/slab.h>
46#include <linux/string.h>
47#include <linux/delay.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/skbuff.h>
51#include <linux/errno.h>
52#include <linux/platform_device.h>
53#include <linux/mii.h>
54#include <linux/ethtool.h>
55#include <linux/crc32.h>
56
57#include <asm/bootinfo.h>
58#include <asm/system.h>
59#include <asm/bitops.h>
60#include <asm/pgtable.h>
61#include <asm/segment.h>
62#include <asm/io.h>
63#include <asm/dma.h>
64
65#include <asm/mach-rc32434/rb.h>
66#include <asm/mach-rc32434/rc32434.h>
67#include <asm/mach-rc32434/eth.h>
68#include <asm/mach-rc32434/dma_v.h>
69
70#define DRV_NAME "korina"
71#define DRV_VERSION "0.10"
72#define DRV_RELDATE "04Mar2008"
73
74#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
75 ((dev)->dev_addr[1]))
76#define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
77 ((dev)->dev_addr[3] << 16) | \
78 ((dev)->dev_addr[4] << 8) | \
79 ((dev)->dev_addr[5]))
80
81#define MII_CLOCK 1250000 /* no more than 2.5MHz */
82
83/* the following must be powers of two */
84#define KORINA_NUM_RDS 64 /* number of receive descriptors */
85#define KORINA_NUM_TDS 64 /* number of transmit descriptors */
86
87#define KORINA_RBSIZE 536 /* size of one resource buffer = Ether MTU */
88#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
89#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
90#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
91#define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc))
92
93#define TX_TIMEOUT (6000 * HZ / 1000)
94
95enum chain_status { desc_filled, desc_empty };
96#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
97#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
98#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
99
100/* Information that need to be kept for each board. */
101struct korina_private {
102 struct eth_regs *eth_regs;
103 struct dma_reg *rx_dma_regs;
104 struct dma_reg *tx_dma_regs;
105 struct dma_desc *td_ring; /* transmit descriptor ring */
106 struct dma_desc *rd_ring; /* receive descriptor ring */
107
108 struct sk_buff *tx_skb[KORINA_NUM_TDS];
109 struct sk_buff *rx_skb[KORINA_NUM_RDS];
110
111 int rx_next_done;
112 int rx_chain_head;
113 int rx_chain_tail;
114 enum chain_status rx_chain_status;
115
116 int tx_next_done;
117 int tx_chain_head;
118 int tx_chain_tail;
119 enum chain_status tx_chain_status;
120 int tx_count;
121 int tx_full;
122
123 int rx_irq;
124 int tx_irq;
125 int ovr_irq;
126 int und_irq;
127
128 spinlock_t lock; /* NIC xmit lock */
129
130 int dma_halt_cnt;
131 int dma_run_cnt;
132 struct napi_struct napi;
133 struct mii_if_info mii_if;
134 struct net_device *dev;
135 int phy_addr;
136};
137
138extern unsigned int idt_cpu_freq;
139
140static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
141{
142 writel(0, &ch->dmandptr);
143 writel(dma_addr, &ch->dmadptr);
144}
145
146static inline void korina_abort_dma(struct net_device *dev,
147 struct dma_reg *ch)
148{
149 if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
150 writel(0x10, &ch->dmac);
151
152 while (!(readl(&ch->dmas) & DMA_STAT_HALT))
153 dev->trans_start = jiffies;
154
155 writel(0, &ch->dmas);
156 }
157
158 writel(0, &ch->dmadptr);
159 writel(0, &ch->dmandptr);
160}
161
162static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
163{
164 writel(dma_addr, &ch->dmandptr);
165}
166
167static void korina_abort_tx(struct net_device *dev)
168{
169 struct korina_private *lp = netdev_priv(dev);
170
171 korina_abort_dma(dev, lp->tx_dma_regs);
172}
173
174static void korina_abort_rx(struct net_device *dev)
175{
176 struct korina_private *lp = netdev_priv(dev);
177
178 korina_abort_dma(dev, lp->rx_dma_regs);
179}
180
181static void korina_start_rx(struct korina_private *lp,
182 struct dma_desc *rd)
183{
184 korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
185}
186
187static void korina_chain_rx(struct korina_private *lp,
188 struct dma_desc *rd)
189{
190 korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
191}
192
193/* transmit packet */
194static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
195{
196 struct korina_private *lp = netdev_priv(dev);
197 unsigned long flags;
198 u32 length;
199 u32 chain_index;
200 struct dma_desc *td;
201
202 spin_lock_irqsave(&lp->lock, flags);
203
204 td = &lp->td_ring[lp->tx_chain_tail];
205
206 /* stop queue when full, drop pkts if queue already full */
207 if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
208 lp->tx_full = 1;
209
210 if (lp->tx_count == (KORINA_NUM_TDS - 2))
211 netif_stop_queue(dev);
212 else {
213 dev->stats.tx_dropped++;
214 dev_kfree_skb_any(skb);
215 spin_unlock_irqrestore(&lp->lock, flags);
216
217 return NETDEV_TX_BUSY;
218 }
219 }
220
221 lp->tx_count++;
222
223 lp->tx_skb[lp->tx_chain_tail] = skb;
224
225 length = skb->len;
226 dma_cache_wback((u32)skb->data, skb->len);
227
228 /* Setup the transmit descriptor. */
229 dma_cache_inv((u32) td, sizeof(*td));
230 td->ca = CPHYSADDR(skb->data);
231 chain_index = (lp->tx_chain_tail - 1) &
232 KORINA_TDS_MASK;
233
234 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
235 if (lp->tx_chain_status == desc_empty) {
236 /* Update tail */
237 td->control = DMA_COUNT(length) |
238 DMA_DESC_COF | DMA_DESC_IOF;
239 /* Move tail */
240 lp->tx_chain_tail = chain_index;
241 /* Write to NDPTR */
242 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
243 &lp->tx_dma_regs->dmandptr);
244 /* Move head to tail */
245 lp->tx_chain_head = lp->tx_chain_tail;
246 } else {
247 /* Update tail */
248 td->control = DMA_COUNT(length) |
249 DMA_DESC_COF | DMA_DESC_IOF;
250 /* Link to prev */
251 lp->td_ring[chain_index].control &=
252 ~DMA_DESC_COF;
253 /* Link to prev */
254 lp->td_ring[chain_index].link = CPHYSADDR(td);
255 /* Move tail */
256 lp->tx_chain_tail = chain_index;
257 /* Write to NDPTR */
258 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
259 &(lp->tx_dma_regs->dmandptr));
260 /* Move head to tail */
261 lp->tx_chain_head = lp->tx_chain_tail;
262 lp->tx_chain_status = desc_empty;
263 }
264 } else {
265 if (lp->tx_chain_status == desc_empty) {
266 /* Update tail */
267 td->control = DMA_COUNT(length) |
268 DMA_DESC_COF | DMA_DESC_IOF;
269 /* Move tail */
270 lp->tx_chain_tail = chain_index;
271 lp->tx_chain_status = desc_filled;
272 netif_stop_queue(dev);
273 } else {
274 /* Update tail */
275 td->control = DMA_COUNT(length) |
276 DMA_DESC_COF | DMA_DESC_IOF;
277 lp->td_ring[chain_index].control &=
278 ~DMA_DESC_COF;
279 lp->td_ring[chain_index].link = CPHYSADDR(td);
280 lp->tx_chain_tail = chain_index;
281 }
282 }
283 dma_cache_wback((u32) td, sizeof(*td));
284
285 dev->trans_start = jiffies;
286 spin_unlock_irqrestore(&lp->lock, flags);
287
288 return NETDEV_TX_OK;
289}
290
291static int mdio_read(struct net_device *dev, int mii_id, int reg)
292{
293 struct korina_private *lp = netdev_priv(dev);
294 int ret;
295
296 mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
297
298 writel(0, &lp->eth_regs->miimcfg);
299 writel(0, &lp->eth_regs->miimcmd);
300 writel(mii_id | reg, &lp->eth_regs->miimaddr);
301 writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
302
303 ret = (int)(readl(&lp->eth_regs->miimrdd));
304 return ret;
305}
306
307static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
308{
309 struct korina_private *lp = netdev_priv(dev);
310
311 mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
312
313 writel(0, &lp->eth_regs->miimcfg);
314 writel(1, &lp->eth_regs->miimcmd);
315 writel(mii_id | reg, &lp->eth_regs->miimaddr);
316 writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
317 writel(val, &lp->eth_regs->miimwtd);
318}
319
320/* Ethernet Rx DMA interrupt */
321static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
322{
323 struct net_device *dev = dev_id;
324 struct korina_private *lp = netdev_priv(dev);
325 u32 dmas, dmasm;
326 irqreturn_t retval;
327
328 dmas = readl(&lp->rx_dma_regs->dmas);
329 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
330 netif_rx_schedule_prep(dev, &lp->napi);
331
332 dmasm = readl(&lp->rx_dma_regs->dmasm);
333 writel(dmasm | (DMA_STAT_DONE |
334 DMA_STAT_HALT | DMA_STAT_ERR),
335 &lp->rx_dma_regs->dmasm);
336
337 if (dmas & DMA_STAT_ERR)
338 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
339
340 retval = IRQ_HANDLED;
341 } else
342 retval = IRQ_NONE;
343
344 return retval;
345}
346
347static int korina_rx(struct net_device *dev, int limit)
348{
349 struct korina_private *lp = netdev_priv(dev);
350 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
351 struct sk_buff *skb, *skb_new;
352 u8 *pkt_buf;
353 u32 devcs, pkt_len, dmas, rx_free_desc;
354 int count;
355
356 dma_cache_inv((u32)rd, sizeof(*rd));
357
358 for (count = 0; count < limit; count++) {
359
360 devcs = rd->devcs;
361
362 /* Update statistics counters */
363 if (devcs & ETH_RX_CRC)
364 dev->stats.rx_crc_errors++;
365 if (devcs & ETH_RX_LOR)
366 dev->stats.rx_length_errors++;
367 if (devcs & ETH_RX_LE)
368 dev->stats.rx_length_errors++;
369 if (devcs & ETH_RX_OVR)
370 dev->stats.rx_over_errors++;
371 if (devcs & ETH_RX_CV)
372 dev->stats.rx_frame_errors++;
373 if (devcs & ETH_RX_CES)
374 dev->stats.rx_length_errors++;
375 if (devcs & ETH_RX_MP)
376 dev->stats.multicast++;
377
378 if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
379 /* check that this is a whole packet
380 * WARNING: DMA_FD bit incorrectly set
381 * in Rc32434 (errata ref #077) */
382 dev->stats.rx_errors++;
383 dev->stats.rx_dropped++;
384 }
385
386 while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
387 /* init the var. used for the later
388 * operations within the while loop */
389 skb_new = NULL;
390 pkt_len = RCVPKT_LENGTH(devcs);
391 skb = lp->rx_skb[lp->rx_next_done];
392
393 if ((devcs & ETH_RX_ROK)) {
394 /* must be the (first and) last
395 * descriptor then */
396 pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
397
398 /* invalidate the cache */
399 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
400
401 /* Malloc up new buffer. */
402 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
403
404 if (!skb_new)
405 break;
406 /* Do not count the CRC */
407 skb_put(skb, pkt_len - 4);
408 skb->protocol = eth_type_trans(skb, dev);
409
410 /* Pass the packet to upper layers */
411 netif_receive_skb(skb);
412 dev->last_rx = jiffies;
413 dev->stats.rx_packets++;
414 dev->stats.rx_bytes += pkt_len;
415
416 /* Update the mcast stats */
417 if (devcs & ETH_RX_MP)
418 dev->stats.multicast++;
419
420 lp->rx_skb[lp->rx_next_done] = skb_new;
421 }
422
423 rd->devcs = 0;
424
425 /* Restore descriptor's curr_addr */
426 if (skb_new)
427 rd->ca = CPHYSADDR(skb_new->data);
428 else
429 rd->ca = CPHYSADDR(skb->data);
430
431 rd->control = DMA_COUNT(KORINA_RBSIZE) |
432 DMA_DESC_COD | DMA_DESC_IOD;
433 lp->rd_ring[(lp->rx_next_done - 1) &
434 KORINA_RDS_MASK].control &=
435 ~DMA_DESC_COD;
436
437 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
438 dma_cache_wback((u32)rd, sizeof(*rd));
439 rd = &lp->rd_ring[lp->rx_next_done];
440 writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
441 }
442 }
443
444 dmas = readl(&lp->rx_dma_regs->dmas);
445
446 if (dmas & DMA_STAT_HALT) {
447 writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
448 &lp->rx_dma_regs->dmas);
449
450 lp->dma_halt_cnt++;
451 rd->devcs = 0;
452 skb = lp->rx_skb[lp->rx_next_done];
453 rd->ca = CPHYSADDR(skb->data);
454 dma_cache_wback((u32)rd, sizeof(*rd));
455 korina_chain_rx(lp, rd);
456 }
457
458 return count;
459}
460
461static int korina_poll(struct napi_struct *napi, int budget)
462{
463 struct korina_private *lp =
464 container_of(napi, struct korina_private, napi);
465 struct net_device *dev = lp->dev;
466 int work_done;
467
468 work_done = korina_rx(dev, budget);
469 if (work_done < budget) {
470 netif_rx_complete(dev, napi);
471
472 writel(readl(&lp->rx_dma_regs->dmasm) &
473 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
474 &lp->rx_dma_regs->dmasm);
475 }
476 return work_done;
477}
478
479/*
480 * Set or clear the multicast filter for this adaptor.
481 */
482static void korina_multicast_list(struct net_device *dev)
483{
484 struct korina_private *lp = netdev_priv(dev);
485 unsigned long flags;
486 struct dev_mc_list *dmi = dev->mc_list;
487 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
488 int i;
489
490 /* Set promiscuous mode */
491 if (dev->flags & IFF_PROMISC)
492 recognise |= ETH_ARC_PRO;
493
494 else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
495 /* All multicast and broadcast */
496 recognise |= ETH_ARC_AM;
497
498 /* Build the hash table */
499 if (dev->mc_count > 4) {
500 u16 hash_table[4];
501 u32 crc;
502
503 for (i = 0; i < 4; i++)
504 hash_table[i] = 0;
505
506 for (i = 0; i < dev->mc_count; i++) {
507 char *addrs = dmi->dmi_addr;
508
509 dmi = dmi->next;
510
511 if (!(*addrs & 1))
512 continue;
513
514 crc = ether_crc_le(6, addrs);
515 crc >>= 26;
516 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
517 }
518 /* Accept filtered multicast */
519 recognise |= ETH_ARC_AFM;
520
521 /* Fill the MAC hash tables with their values */
522 writel((u32)(hash_table[1] << 16 | hash_table[0]),
523 &lp->eth_regs->ethhash0);
524 writel((u32)(hash_table[3] << 16 | hash_table[2]),
525 &lp->eth_regs->ethhash1);
526 }
527
528 spin_lock_irqsave(&lp->lock, flags);
529 writel(recognise, &lp->eth_regs->etharc);
530 spin_unlock_irqrestore(&lp->lock, flags);
531}
532
533static void korina_tx(struct net_device *dev)
534{
535 struct korina_private *lp = netdev_priv(dev);
536 struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
537 u32 devcs;
538 u32 dmas;
539
540 spin_lock(&lp->lock);
541
542 /* Process all desc that are done */
543 while (IS_DMA_FINISHED(td->control)) {
544 if (lp->tx_full == 1) {
545 netif_wake_queue(dev);
546 lp->tx_full = 0;
547 }
548
549 devcs = lp->td_ring[lp->tx_next_done].devcs;
550 if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
551 (ETH_TX_FD | ETH_TX_LD)) {
552 dev->stats.tx_errors++;
553 dev->stats.tx_dropped++;
554
555 /* Should never happen */
556 printk(KERN_ERR DRV_NAME "%s: split tx ignored\n",
557 dev->name);
558 } else if (devcs & ETH_TX_TOK) {
559 dev->stats.tx_packets++;
560 dev->stats.tx_bytes +=
561 lp->tx_skb[lp->tx_next_done]->len;
562 } else {
563 dev->stats.tx_errors++;
564 dev->stats.tx_dropped++;
565
566 /* Underflow */
567 if (devcs & ETH_TX_UND)
568 dev->stats.tx_fifo_errors++;
569
570 /* Oversized frame */
571 if (devcs & ETH_TX_OF)
572 dev->stats.tx_aborted_errors++;
573
574 /* Excessive deferrals */
575 if (devcs & ETH_TX_ED)
576 dev->stats.tx_carrier_errors++;
577
578 /* Collisions: medium busy */
579 if (devcs & ETH_TX_EC)
580 dev->stats.collisions++;
581
582 /* Late collision */
583 if (devcs & ETH_TX_LC)
584 dev->stats.tx_window_errors++;
585 }
586
587 /* We must always free the original skb */
588 if (lp->tx_skb[lp->tx_next_done]) {
589 dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
590 lp->tx_skb[lp->tx_next_done] = NULL;
591 }
592
593 lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
594 lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
595 lp->td_ring[lp->tx_next_done].link = 0;
596 lp->td_ring[lp->tx_next_done].ca = 0;
597 lp->tx_count--;
598
599 /* Go on to next transmission */
600 lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
601 td = &lp->td_ring[lp->tx_next_done];
602
603 }
604
605 /* Clear the DMA status register */
606 dmas = readl(&lp->tx_dma_regs->dmas);
607 writel(~dmas, &lp->tx_dma_regs->dmas);
608
609 writel(readl(&lp->tx_dma_regs->dmasm) &
610 ~(DMA_STAT_FINI | DMA_STAT_ERR),
611 &lp->tx_dma_regs->dmasm);
612
613 spin_unlock(&lp->lock);
614}
615
616static irqreturn_t
617korina_tx_dma_interrupt(int irq, void *dev_id)
618{
619 struct net_device *dev = dev_id;
620 struct korina_private *lp = netdev_priv(dev);
621 u32 dmas, dmasm;
622 irqreturn_t retval;
623
624 dmas = readl(&lp->tx_dma_regs->dmas);
625
626 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
627 korina_tx(dev);
628
629 dmasm = readl(&lp->tx_dma_regs->dmasm);
630 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
631 &lp->tx_dma_regs->dmasm);
632
633 if (lp->tx_chain_status == desc_filled &&
634 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
635 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
636 &(lp->tx_dma_regs->dmandptr));
637 lp->tx_chain_status = desc_empty;
638 lp->tx_chain_head = lp->tx_chain_tail;
639 dev->trans_start = jiffies;
640 }
641 if (dmas & DMA_STAT_ERR)
642 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
643
644 retval = IRQ_HANDLED;
645 } else
646 retval = IRQ_NONE;
647
648 return retval;
649}
650
651
652static void korina_check_media(struct net_device *dev, unsigned int init_media)
653{
654 struct korina_private *lp = netdev_priv(dev);
655
656 mii_check_media(&lp->mii_if, 0, init_media);
657
658 if (lp->mii_if.full_duplex)
659 writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
660 &lp->eth_regs->ethmac2);
661 else
662 writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
663 &lp->eth_regs->ethmac2);
664}
665
666static void korina_set_carrier(struct mii_if_info *mii)
667{
668 if (mii->force_media) {
669 /* autoneg is off: Link is always assumed to be up */
670 if (!netif_carrier_ok(mii->dev))
671 netif_carrier_on(mii->dev);
672 } else /* Let MMI library update carrier status */
673 korina_check_media(mii->dev, 0);
674}
675
676static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
677{
678 struct korina_private *lp = netdev_priv(dev);
679 struct mii_ioctl_data *data = if_mii(rq);
680 int rc;
681
682 if (!netif_running(dev))
683 return -EINVAL;
684 spin_lock_irq(&lp->lock);
685 rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
686 spin_unlock_irq(&lp->lock);
687 korina_set_carrier(&lp->mii_if);
688
689 return rc;
690}
691
692/* ethtool helpers */
693static void netdev_get_drvinfo(struct net_device *dev,
694 struct ethtool_drvinfo *info)
695{
696 struct korina_private *lp = netdev_priv(dev);
697
698 strcpy(info->driver, DRV_NAME);
699 strcpy(info->version, DRV_VERSION);
700 strcpy(info->bus_info, lp->dev->name);
701}
702
703static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
704{
705 struct korina_private *lp = netdev_priv(dev);
706 int rc;
707
708 spin_lock_irq(&lp->lock);
709 rc = mii_ethtool_gset(&lp->mii_if, cmd);
710 spin_unlock_irq(&lp->lock);
711
712 return rc;
713}
714
715static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
716{
717 struct korina_private *lp = netdev_priv(dev);
718 int rc;
719
720 spin_lock_irq(&lp->lock);
721 rc = mii_ethtool_sset(&lp->mii_if, cmd);
722 spin_unlock_irq(&lp->lock);
723 korina_set_carrier(&lp->mii_if);
724
725 return rc;
726}
727
728static u32 netdev_get_link(struct net_device *dev)
729{
730 struct korina_private *lp = netdev_priv(dev);
731
732 return mii_link_ok(&lp->mii_if);
733}
734
735static struct ethtool_ops netdev_ethtool_ops = {
736 .get_drvinfo = netdev_get_drvinfo,
737 .get_settings = netdev_get_settings,
738 .set_settings = netdev_set_settings,
739 .get_link = netdev_get_link,
740};
741
742static void korina_alloc_ring(struct net_device *dev)
743{
744 struct korina_private *lp = netdev_priv(dev);
745 int i;
746
747 /* Initialize the transmit descriptors */
748 for (i = 0; i < KORINA_NUM_TDS; i++) {
749 lp->td_ring[i].control = DMA_DESC_IOF;
750 lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
751 lp->td_ring[i].ca = 0;
752 lp->td_ring[i].link = 0;
753 }
754 lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
755 lp->tx_full = lp->tx_count = 0;
756 lp->tx_chain_status = desc_empty;
757
758 /* Initialize the receive descriptors */
759 for (i = 0; i < KORINA_NUM_RDS; i++) {
760 struct sk_buff *skb = lp->rx_skb[i];
761
762 skb = dev_alloc_skb(KORINA_RBSIZE + 2);
763 if (!skb)
764 break;
765 skb_reserve(skb, 2);
766 lp->rx_skb[i] = skb;
767 lp->rd_ring[i].control = DMA_DESC_IOD |
768 DMA_COUNT(KORINA_RBSIZE);
769 lp->rd_ring[i].devcs = 0;
770 lp->rd_ring[i].ca = CPHYSADDR(skb->data);
771 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
772 }
773
774 /* loop back */
775 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]);
776 lp->rx_next_done = 0;
777
778 lp->rd_ring[i].control |= DMA_DESC_COD;
779 lp->rx_chain_head = 0;
780 lp->rx_chain_tail = 0;
781 lp->rx_chain_status = desc_empty;
782}
783
784static void korina_free_ring(struct net_device *dev)
785{
786 struct korina_private *lp = netdev_priv(dev);
787 int i;
788
789 for (i = 0; i < KORINA_NUM_RDS; i++) {
790 lp->rd_ring[i].control = 0;
791 if (lp->rx_skb[i])
792 dev_kfree_skb_any(lp->rx_skb[i]);
793 lp->rx_skb[i] = NULL;
794 }
795
796 for (i = 0; i < KORINA_NUM_TDS; i++) {
797 lp->td_ring[i].control = 0;
798 if (lp->tx_skb[i])
799 dev_kfree_skb_any(lp->tx_skb[i]);
800 lp->tx_skb[i] = NULL;
801 }
802}
803
804/*
805 * Initialize the RC32434 ethernet controller.
806 */
807static int korina_init(struct net_device *dev)
808{
809 struct korina_private *lp = netdev_priv(dev);
810
811 /* Disable DMA */
812 korina_abort_tx(dev);
813 korina_abort_rx(dev);
814
815 /* reset ethernet logic */
816 writel(0, &lp->eth_regs->ethintfc);
817 while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
818 dev->trans_start = jiffies;
819
820 /* Enable Ethernet Interface */
821 writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
822
823 /* Allocate rings */
824 korina_alloc_ring(dev);
825
826 writel(0, &lp->rx_dma_regs->dmas);
827 /* Start Rx DMA */
828 korina_start_rx(lp, &lp->rd_ring[0]);
829
830 writel(readl(&lp->tx_dma_regs->dmasm) &
831 ~(DMA_STAT_FINI | DMA_STAT_ERR),
832 &lp->tx_dma_regs->dmasm);
833 writel(readl(&lp->rx_dma_regs->dmasm) &
834 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
835 &lp->rx_dma_regs->dmasm);
836
837 /* Accept only packets destined for this Ethernet device address */
838 writel(ETH_ARC_AB, &lp->eth_regs->etharc);
839
840 /* Set all Ether station address registers to their initial values */
841 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
842 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
843
844 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
845 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
846
847 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
848 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
849
850 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
851 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
852
853
854 /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
855 writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
856 &lp->eth_regs->ethmac2);
857
858 /* Back to back inter-packet-gap */
859 writel(0x15, &lp->eth_regs->ethipgt);
860 /* Non - Back to back inter-packet-gap */
861 writel(0x12, &lp->eth_regs->ethipgr);
862
863 /* Management Clock Prescaler Divisor
864 * Clock independent setting */
865 writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
866 &lp->eth_regs->ethmcp);
867
868 /* don't transmit until fifo contains 48b */
869 writel(48, &lp->eth_regs->ethfifott);
870
871 writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
872
873 napi_enable(&lp->napi);
874 netif_start_queue(dev);
875
876 return 0;
877}
878
879/*
880 * Restart the RC32434 ethernet controller.
881 * FIXME: check the return status where we call it
882 */
883static int korina_restart(struct net_device *dev)
884{
885 struct korina_private *lp = netdev_priv(dev);
886 int ret = 0;
887
888 /*
889 * Disable interrupts
890 */
891 disable_irq(lp->rx_irq);
892 disable_irq(lp->tx_irq);
893 disable_irq(lp->ovr_irq);
894 disable_irq(lp->und_irq);
895
896 writel(readl(&lp->tx_dma_regs->dmasm) |
897 DMA_STAT_FINI | DMA_STAT_ERR,
898 &lp->tx_dma_regs->dmasm);
899 writel(readl(&lp->rx_dma_regs->dmasm) |
900 DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
901 &lp->rx_dma_regs->dmasm);
902
903 korina_free_ring(dev);
904
905 ret = korina_init(dev);
906 if (ret < 0) {
907 printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
908 dev->name);
909 return ret;
910 }
911 korina_multicast_list(dev);
912
913 enable_irq(lp->und_irq);
914 enable_irq(lp->ovr_irq);
915 enable_irq(lp->tx_irq);
916 enable_irq(lp->rx_irq);
917
918 return ret;
919}
920
921static void korina_clear_and_restart(struct net_device *dev, u32 value)
922{
923 struct korina_private *lp = netdev_priv(dev);
924
925 netif_stop_queue(dev);
926 writel(value, &lp->eth_regs->ethintfc);
927 korina_restart(dev);
928}
929
930/* Ethernet Tx Underflow interrupt */
931static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
932{
933 struct net_device *dev = dev_id;
934 struct korina_private *lp = netdev_priv(dev);
935 unsigned int und;
936
937 spin_lock(&lp->lock);
938
939 und = readl(&lp->eth_regs->ethintfc);
940
941 if (und & ETH_INT_FC_UND)
942 korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
943
944 spin_unlock(&lp->lock);
945
946 return IRQ_HANDLED;
947}
948
949static void korina_tx_timeout(struct net_device *dev)
950{
951 struct korina_private *lp = netdev_priv(dev);
952 unsigned long flags;
953
954 spin_lock_irqsave(&lp->lock, flags);
955 korina_restart(dev);
956 spin_unlock_irqrestore(&lp->lock, flags);
957}
958
959/* Ethernet Rx Overflow interrupt */
960static irqreturn_t
961korina_ovr_interrupt(int irq, void *dev_id)
962{
963 struct net_device *dev = dev_id;
964 struct korina_private *lp = netdev_priv(dev);
965 unsigned int ovr;
966
967 spin_lock(&lp->lock);
968 ovr = readl(&lp->eth_regs->ethintfc);
969
970 if (ovr & ETH_INT_FC_OVR)
971 korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
972
973 spin_unlock(&lp->lock);
974
975 return IRQ_HANDLED;
976}
977
978#ifdef CONFIG_NET_POLL_CONTROLLER
979static void korina_poll_controller(struct net_device *dev)
980{
981 disable_irq(dev->irq);
982 korina_tx_dma_interrupt(dev->irq, dev);
983 enable_irq(dev->irq);
984}
985#endif
986
987static int korina_open(struct net_device *dev)
988{
989 struct korina_private *lp = netdev_priv(dev);
990 int ret = 0;
991
992 /* Initialize */
993 ret = korina_init(dev);
994 if (ret < 0) {
995 printk(KERN_ERR DRV_NAME "%s: cannot open device\n", dev->name);
996 goto out;
997 }
998
999 /* Install the interrupt handler
1000 * that handles the Done Finished
1001 * Ovr and Und Events */
1002 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
1003 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev);
1004 if (ret < 0) {
1005 printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
1006 dev->name, lp->rx_irq);
1007 goto err_release;
1008 }
1009 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
1010 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev);
1011 if (ret < 0) {
1012 printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
1013 dev->name, lp->tx_irq);
1014 goto err_free_rx_irq;
1015 }
1016
1017 /* Install handler for overrun error. */
1018 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
1019 IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev);
1020 if (ret < 0) {
1021 printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
1022 dev->name, lp->ovr_irq);
1023 goto err_free_tx_irq;
1024 }
1025
1026 /* Install handler for underflow error. */
1027 ret = request_irq(lp->und_irq, &korina_und_interrupt,
1028 IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev);
1029 if (ret < 0) {
1030 printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
1031 dev->name, lp->und_irq);
1032 goto err_free_ovr_irq;
1033 }
1034
1035err_free_ovr_irq:
1036 free_irq(lp->ovr_irq, dev);
1037err_free_tx_irq:
1038 free_irq(lp->tx_irq, dev);
1039err_free_rx_irq:
1040 free_irq(lp->rx_irq, dev);
1041err_release:
1042 korina_free_ring(dev);
1043 goto out;
1044out:
1045 return ret;
1046}
1047
1048static int korina_close(struct net_device *dev)
1049{
1050 struct korina_private *lp = netdev_priv(dev);
1051 u32 tmp;
1052
1053 /* Disable interrupts */
1054 disable_irq(lp->rx_irq);
1055 disable_irq(lp->tx_irq);
1056 disable_irq(lp->ovr_irq);
1057 disable_irq(lp->und_irq);
1058
1059 korina_abort_tx(dev);
1060 tmp = readl(&lp->tx_dma_regs->dmasm);
1061 tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1062 writel(tmp, &lp->tx_dma_regs->dmasm);
1063
1064 korina_abort_rx(dev);
1065 tmp = readl(&lp->rx_dma_regs->dmasm);
1066 tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1067 writel(tmp, &lp->rx_dma_regs->dmasm);
1068
1069 korina_free_ring(dev);
1070
1071 free_irq(lp->rx_irq, dev);
1072 free_irq(lp->tx_irq, dev);
1073 free_irq(lp->ovr_irq, dev);
1074 free_irq(lp->und_irq, dev);
1075
1076 return 0;
1077}
1078
1079static int korina_probe(struct platform_device *pdev)
1080{
1081 struct korina_device *bif = platform_get_drvdata(pdev);
1082 struct korina_private *lp;
1083 struct net_device *dev;
1084 struct resource *r;
1085 int retval, err;
1086
1087 dev = alloc_etherdev(sizeof(struct korina_private));
1088 if (!dev) {
1089 printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
1090 return -ENOMEM;
1091 }
1092 SET_NETDEV_DEV(dev, &pdev->dev);
1093 platform_set_drvdata(pdev, dev);
1094 lp = netdev_priv(dev);
1095
1096 bif->dev = dev;
1097 memcpy(dev->dev_addr, bif->mac, 6);
1098
1099 lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1100 lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
1101 lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
1102 lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
1103
1104 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1105 dev->base_addr = r->start;
1106 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
1107 if (!lp->eth_regs) {
1108 printk(KERN_ERR DRV_NAME "cannot remap registers\n");
1109 retval = -ENXIO;
1110 goto probe_err_out;
1111 }
1112
1113 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1114 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1115 if (!lp->rx_dma_regs) {
1116 printk(KERN_ERR DRV_NAME "cannot remap Rx DMA registers\n");
1117 retval = -ENXIO;
1118 goto probe_err_dma_rx;
1119 }
1120
1121 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1122 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1123 if (!lp->tx_dma_regs) {
1124 printk(KERN_ERR DRV_NAME "cannot remap Tx DMA registers\n");
1125 retval = -ENXIO;
1126 goto probe_err_dma_tx;
1127 }
1128
1129 lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1130 if (!lp->td_ring) {
1131 printk(KERN_ERR DRV_NAME "cannot allocate descriptors\n");
1132 retval = -ENOMEM;
1133 goto probe_err_td_ring;
1134 }
1135
1136 dma_cache_inv((unsigned long)(lp->td_ring),
1137 TD_RING_SIZE + RD_RING_SIZE);
1138
1139 /* now convert TD_RING pointer to KSEG1 */
1140 lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
1141 lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
1142
1143 spin_lock_init(&lp->lock);
1144 /* just use the rx dma irq */
1145 dev->irq = lp->rx_irq;
1146 lp->dev = dev;
1147
1148 dev->open = korina_open;
1149 dev->stop = korina_close;
1150 dev->hard_start_xmit = korina_send_packet;
1151 dev->set_multicast_list = &korina_multicast_list;
1152 dev->ethtool_ops = &netdev_ethtool_ops;
1153 dev->tx_timeout = korina_tx_timeout;
1154 dev->watchdog_timeo = TX_TIMEOUT;
1155 dev->do_ioctl = &korina_ioctl;
1156#ifdef CONFIG_NET_POLL_CONTROLLER
1157 dev->poll_controller = korina_poll_controller;
1158#endif
1159 netif_napi_add(dev, &lp->napi, korina_poll, 64);
1160
1161 lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
1162 lp->mii_if.dev = dev;
1163 lp->mii_if.mdio_read = mdio_read;
1164 lp->mii_if.mdio_write = mdio_write;
1165 lp->mii_if.phy_id = lp->phy_addr;
1166 lp->mii_if.phy_id_mask = 0x1f;
1167 lp->mii_if.reg_num_mask = 0x1f;
1168
1169 err = register_netdev(dev);
1170 if (err) {
1171 printk(KERN_ERR DRV_NAME
1172 ": cannot register net device %d\n", err);
1173 retval = -EINVAL;
1174 goto probe_err_register;
1175 }
1176 return 0;
1177
1178probe_err_register:
1179 kfree(lp->td_ring);
1180probe_err_td_ring:
1181 iounmap(lp->tx_dma_regs);
1182probe_err_dma_tx:
1183 iounmap(lp->rx_dma_regs);
1184probe_err_dma_rx:
1185 iounmap(lp->eth_regs);
1186probe_err_out:
1187 free_netdev(dev);
1188 return retval;
1189}
1190
1191static int korina_remove(struct platform_device *pdev)
1192{
1193 struct korina_device *bif = platform_get_drvdata(pdev);
1194 struct korina_private *lp = netdev_priv(bif->dev);
1195
1196 if (lp->eth_regs)
1197 iounmap(lp->eth_regs);
1198 if (lp->rx_dma_regs)
1199 iounmap(lp->rx_dma_regs);
1200 if (lp->tx_dma_regs)
1201 iounmap(lp->tx_dma_regs);
1202
1203 platform_set_drvdata(pdev, NULL);
1204 unregister_netdev(bif->dev);
1205 free_netdev(bif->dev);
1206
1207 return 0;
1208}
1209
1210static struct platform_driver korina_driver = {
1211 .driver.name = "korina",
1212 .probe = korina_probe,
1213 .remove = korina_remove,
1214};
1215
1216static int __init korina_init_module(void)
1217{
1218 return platform_driver_register(&korina_driver);
1219}
1220
1221static void korina_cleanup_module(void)
1222{
1223 return platform_driver_unregister(&korina_driver);
1224}
1225
1226module_init(korina_init_module);
1227module_exit(korina_cleanup_module);
1228
1229MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1230MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1231MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1232MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1233MODULE_LICENSE("GPL");
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 771139e283af..d65cadef4d22 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -3156,7 +3156,7 @@ struct mv643xx_stats {
3156 int stat_offset; 3156 int stat_offset;
3157}; 3157};
3158 3158
3159#define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \ 3159#define MV643XX_STAT(m) FIELD_SIZEOF(struct mv643xx_private, m), \
3160 offsetof(struct mv643xx_private, m) 3160 offsetof(struct mv643xx_private, m)
3161 3161
3162static const struct mv643xx_stats mv643xx_gstrings_stats[] = { 3162static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 385f69c14387..900ab5d2ba70 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -511,10 +511,10 @@ enum PhyCtrl_bits {
511/* Note that using only 32 bit fields simplifies conversion to big-endian 511/* Note that using only 32 bit fields simplifies conversion to big-endian
512 architectures. */ 512 architectures. */
513struct netdev_desc { 513struct netdev_desc {
514 u32 next_desc; 514 __le32 next_desc;
515 s32 cmd_status; 515 __le32 cmd_status;
516 u32 addr; 516 __le32 addr;
517 u32 software_use; 517 __le32 software_use;
518}; 518};
519 519
520/* Bits in network_desc.status */ 520/* Bits in network_desc.status */
@@ -2018,7 +2018,7 @@ static void drain_rx(struct net_device *dev)
2018 /* Free all the skbuffs in the Rx queue. */ 2018 /* Free all the skbuffs in the Rx queue. */
2019 for (i = 0; i < RX_RING_SIZE; i++) { 2019 for (i = 0; i < RX_RING_SIZE; i++) {
2020 np->rx_ring[i].cmd_status = 0; 2020 np->rx_ring[i].cmd_status = 0;
2021 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */ 2021 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
2022 if (np->rx_skbuff[i]) { 2022 if (np->rx_skbuff[i]) {
2023 pci_unmap_single(np->pci_dev, 2023 pci_unmap_single(np->pci_dev,
2024 np->rx_dma[i], buflen, 2024 np->rx_dma[i], buflen,
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 26aa8fe1fb2d..a316dcc8a06d 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -134,10 +134,10 @@ static int fifo = 0x8; /* don't change */
134#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); } 134#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
135#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); } 135#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
136 136
137#define make32(ptr16) (p->memtop + (short) (ptr16)) 137#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16)))
138#define make24(ptr32) ((unsigned long)(ptr32)) - p->base 138#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base
139#define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32)\ 139#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\
140 - (unsigned long) p->memtop)) 140 - p->memtop))
141 141
142/******************* how to calculate the buffers ***************************** 142/******************* how to calculate the buffers *****************************
143 143
@@ -179,34 +179,35 @@ static void ni52_timeout(struct net_device *dev);
179 179
180/* helper-functions */ 180/* helper-functions */
181static int init586(struct net_device *dev); 181static int init586(struct net_device *dev);
182static int check586(struct net_device *dev, char *where, unsigned size); 182static int check586(struct net_device *dev, unsigned size);
183static void alloc586(struct net_device *dev); 183static void alloc586(struct net_device *dev);
184static void startrecv586(struct net_device *dev); 184static void startrecv586(struct net_device *dev);
185static void *alloc_rfa(struct net_device *dev, void *ptr); 185static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr);
186static void ni52_rcv_int(struct net_device *dev); 186static void ni52_rcv_int(struct net_device *dev);
187static void ni52_xmt_int(struct net_device *dev); 187static void ni52_xmt_int(struct net_device *dev);
188static void ni52_rnr_int(struct net_device *dev); 188static void ni52_rnr_int(struct net_device *dev);
189 189
190struct priv { 190struct priv {
191 struct net_device_stats stats; 191 struct net_device_stats stats;
192 unsigned long base; 192 char __iomem *base;
193 char *memtop; 193 char __iomem *mapped;
194 char __iomem *memtop;
194 spinlock_t spinlock; 195 spinlock_t spinlock;
195 int reset; 196 int reset;
196 struct rfd_struct *rfd_last, *rfd_top, *rfd_first; 197 struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first;
197 struct scp_struct *scp; 198 struct scp_struct __iomem *scp;
198 struct iscp_struct *iscp; 199 struct iscp_struct __iomem *iscp;
199 struct scb_struct *scb; 200 struct scb_struct __iomem *scb;
200 struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; 201 struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS];
201#if (NUM_XMIT_BUFFS == 1) 202#if (NUM_XMIT_BUFFS == 1)
202 struct transmit_cmd_struct *xmit_cmds[2]; 203 struct transmit_cmd_struct __iomem *xmit_cmds[2];
203 struct nop_cmd_struct *nop_cmds[2]; 204 struct nop_cmd_struct __iomem *nop_cmds[2];
204#else 205#else
205 struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; 206 struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS];
206 struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; 207 struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS];
207#endif 208#endif
208 int nop_point, num_recv_buffs; 209 int nop_point, num_recv_buffs;
209 char *xmit_cbuffs[NUM_XMIT_BUFFS]; 210 char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS];
210 int xmit_count, xmit_last; 211 int xmit_count, xmit_last;
211}; 212};
212 213
@@ -240,7 +241,8 @@ static void wait_for_scb_cmd_ruc(struct net_device *dev)
240 udelay(4); 241 udelay(4);
241 if (i == 16383) { 242 if (i == 16383) {
242 printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n", 243 printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
243 dev->name, p->scb->cmd_ruc, p->scb->rus); 244 dev->name, readb(&p->scb->cmd_ruc),
245 readb(&p->scb->rus));
244 if (!p->reset) { 246 if (!p->reset) {
245 p->reset = 1; 247 p->reset = 1;
246 ni_reset586(); 248 ni_reset586();
@@ -249,9 +251,9 @@ static void wait_for_scb_cmd_ruc(struct net_device *dev)
249 } 251 }
250} 252}
251 253
252static void wait_for_stat_compl(void *p) 254static void wait_for_stat_compl(void __iomem *p)
253{ 255{
254 struct nop_cmd_struct *addr = p; 256 struct nop_cmd_struct __iomem *addr = p;
255 int i; 257 int i;
256 for (i = 0; i < 32767; i++) { 258 for (i = 0; i < 32767; i++) {
257 if (readw(&((addr)->cmd_status)) & STAT_COMPL) 259 if (readw(&((addr)->cmd_status)) & STAT_COMPL)
@@ -293,47 +295,58 @@ static int ni52_open(struct net_device *dev)
293 return 0; /* most done by init */ 295 return 0; /* most done by init */
294} 296}
295 297
298static int check_iscp(struct net_device *dev, void __iomem *addr)
299{
300 struct iscp_struct __iomem *iscp = addr;
301 struct priv *p = dev->priv;
302 memset_io(iscp, 0, sizeof(struct iscp_struct));
303
304 writel(make24(iscp), &p->scp->iscp);
305 writeb(1, &iscp->busy);
306
307 ni_reset586();
308 ni_attn586();
309 mdelay(32); /* wait a while... */
310 /* i82586 clears 'busy' after successful init */
311 if (readb(&iscp->busy))
312 return 0;
313 return 1;
314}
315
296/********************************************** 316/**********************************************
297 * Check to see if there's an 82586 out there. 317 * Check to see if there's an 82586 out there.
298 */ 318 */
299static int check586(struct net_device *dev, char *where, unsigned size) 319static int check586(struct net_device *dev, unsigned size)
300{ 320{
301 struct priv pb; 321 struct priv *p = dev->priv;
302 struct priv *p = /* (struct priv *) dev->priv*/ &pb;
303 char *iscp_addrs[2];
304 int i; 322 int i;
305 323
306 p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) 324 p->mapped = ioremap(dev->mem_start, size);
307 + size - 0x01000000; 325 if (!p->mapped)
308 p->memtop = isa_bus_to_virt((unsigned long)where) + size; 326 return 0;
309 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); 327
310 memset_io((char *)p->scp, 0, sizeof(struct scp_struct)); 328 p->base = p->mapped + size - 0x01000000;
329 p->memtop = p->mapped + size;
330 p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS);
331 p->scb = (struct scb_struct __iomem *) p->mapped;
332 p->iscp = (struct iscp_struct __iomem *)p->scp - 1;
333 memset_io(p->scp, 0, sizeof(struct scp_struct));
311 for (i = 0; i < sizeof(struct scp_struct); i++) 334 for (i = 0; i < sizeof(struct scp_struct); i++)
312 /* memory was writeable? */ 335 /* memory was writeable? */
313 if (readb((char *)p->scp + i)) 336 if (readb((char __iomem *)p->scp + i))
314 return 0; 337 goto Enodev;
315 writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */ 338 writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
316 if (readb(&p->scp->sysbus) != SYSBUSVAL) 339 if (readb(&p->scp->sysbus) != SYSBUSVAL)
317 return 0; 340 goto Enodev;
318
319 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
320 iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
321 341
322 for (i = 0; i < 2; i++) { 342 if (!check_iscp(dev, p->mapped))
323 p->iscp = (struct iscp_struct *) iscp_addrs[i]; 343 goto Enodev;
324 memset_io((char *)p->iscp, 0, sizeof(struct iscp_struct)); 344 if (!check_iscp(dev, p->iscp))
325 345 goto Enodev;
326 writel(make24(p->iscp), &p->scp->iscp);
327 writeb(1, &p->iscp->busy);
328
329 ni_reset586();
330 ni_attn586();
331 mdelay(32); /* wait a while... */
332 /* i82586 clears 'busy' after successful init */
333 if (readb(&p->iscp->busy))
334 return 0;
335 }
336 return 1; 346 return 1;
347Enodev:
348 iounmap(p->mapped);
349 return 0;
337} 350}
338 351
339/****************************************************************** 352/******************************************************************
@@ -346,13 +359,6 @@ static void alloc586(struct net_device *dev)
346 ni_reset586(); 359 ni_reset586();
347 mdelay(32); 360 mdelay(32);
348 361
349 spin_lock_init(&p->spinlock);
350
351 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
352 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
353 p->iscp = (struct iscp_struct *)
354 ((char *)p->scp - sizeof(struct iscp_struct));
355
356 memset_io(p->iscp, 0, sizeof(struct iscp_struct)); 362 memset_io(p->iscp, 0, sizeof(struct iscp_struct));
357 memset_io(p->scp , 0, sizeof(struct scp_struct)); 363 memset_io(p->scp , 0, sizeof(struct scp_struct));
358 364
@@ -371,7 +377,7 @@ static void alloc586(struct net_device *dev)
371 377
372 p->reset = 0; 378 p->reset = 0;
373 379
374 memset_io((char *)p->scb, 0, sizeof(struct scb_struct)); 380 memset_io(p->scb, 0, sizeof(struct scb_struct));
375} 381}
376 382
377/* set: io,irq,memstart,memend or set it when calling insmod */ 383/* set: io,irq,memstart,memend or set it when calling insmod */
@@ -387,12 +393,15 @@ struct net_device * __init ni52_probe(int unit)
387{ 393{
388 struct net_device *dev = alloc_etherdev(sizeof(struct priv)); 394 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
389 static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0}; 395 static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
396 struct priv *p;
390 int *port; 397 int *port;
391 int err = 0; 398 int err = 0;
392 399
393 if (!dev) 400 if (!dev)
394 return ERR_PTR(-ENOMEM); 401 return ERR_PTR(-ENOMEM);
395 402
403 p = dev->priv;
404
396 if (unit >= 0) { 405 if (unit >= 0) {
397 sprintf(dev->name, "eth%d", unit); 406 sprintf(dev->name, "eth%d", unit);
398 netdev_boot_setup_check(dev); 407 netdev_boot_setup_check(dev);
@@ -427,6 +436,7 @@ got_it:
427 goto out1; 436 goto out1;
428 return dev; 437 return dev;
429out1: 438out1:
439 iounmap(p->mapped);
430 release_region(dev->base_addr, NI52_TOTAL_SIZE); 440 release_region(dev->base_addr, NI52_TOTAL_SIZE);
431out: 441out:
432 free_netdev(dev); 442 free_netdev(dev);
@@ -436,12 +446,15 @@ out:
436static int __init ni52_probe1(struct net_device *dev, int ioaddr) 446static int __init ni52_probe1(struct net_device *dev, int ioaddr)
437{ 447{
438 int i, size, retval; 448 int i, size, retval;
449 struct priv *priv = dev->priv;
439 450
440 dev->base_addr = ioaddr; 451 dev->base_addr = ioaddr;
441 dev->irq = irq; 452 dev->irq = irq;
442 dev->mem_start = memstart; 453 dev->mem_start = memstart;
443 dev->mem_end = memend; 454 dev->mem_end = memend;
444 455
456 spin_lock_init(&priv->spinlock);
457
445 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) 458 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
446 return -EBUSY; 459 return -EBUSY;
447 460
@@ -474,7 +487,7 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
474 retval = -ENODEV; 487 retval = -ENODEV;
475 goto out; 488 goto out;
476 } 489 }
477 if (!check586(dev, (char *)dev->mem_start, size)) { 490 if (!check586(dev, size)) {
478 printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size); 491 printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
479 retval = -ENODEV; 492 retval = -ENODEV;
480 goto out; 493 goto out;
@@ -483,9 +496,9 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
483 if (dev->mem_start != 0) { 496 if (dev->mem_start != 0) {
484 /* no auto-mem-probe */ 497 /* no auto-mem-probe */
485 size = 0x4000; /* check for 16K mem */ 498 size = 0x4000; /* check for 16K mem */
486 if (!check586(dev, (char *) dev->mem_start, size)) { 499 if (!check586(dev, size)) {
487 size = 0x2000; /* check for 8K mem */ 500 size = 0x2000; /* check for 8K mem */
488 if (!check586(dev, (char *)dev->mem_start, size)) { 501 if (!check586(dev, size)) {
489 printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start); 502 printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
490 retval = -ENODEV; 503 retval = -ENODEV;
491 goto out; 504 goto out;
@@ -504,11 +517,11 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
504 } 517 }
505 dev->mem_start = memaddrs[i]; 518 dev->mem_start = memaddrs[i];
506 size = 0x2000; /* check for 8K mem */ 519 size = 0x2000; /* check for 8K mem */
507 if (check586(dev, (char *)dev->mem_start, size)) 520 if (check586(dev, size))
508 /* 8K-check */ 521 /* 8K-check */
509 break; 522 break;
510 size = 0x4000; /* check for 16K mem */ 523 size = 0x4000; /* check for 16K mem */
511 if (check586(dev, (char *)dev->mem_start, size)) 524 if (check586(dev, size))
512 /* 16K-check */ 525 /* 16K-check */
513 break; 526 break;
514 } 527 }
@@ -517,19 +530,13 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
517 dev->mem_end = dev->mem_start + size; 530 dev->mem_end = dev->mem_start + size;
518#endif 531#endif
519 532
520 memset((char *)dev->priv, 0, sizeof(struct priv));
521
522 ((struct priv *)(dev->priv))->memtop =
523 isa_bus_to_virt(dev->mem_start) + size;
524 ((struct priv *)(dev->priv))->base = (unsigned long)
525 isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
526 alloc586(dev); 533 alloc586(dev);
527 534
528 /* set number of receive-buffs according to memsize */ 535 /* set number of receive-buffs according to memsize */
529 if (size == 0x2000) 536 if (size == 0x2000)
530 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8; 537 priv->num_recv_buffs = NUM_RECV_BUFFS_8;
531 else 538 else
532 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16; 539 priv->num_recv_buffs = NUM_RECV_BUFFS_16;
533 540
534 printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ", 541 printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
535 dev->mem_start, size); 542 dev->mem_start, size);
@@ -546,6 +553,7 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
546 if (!dev->irq) { 553 if (!dev->irq) {
547 printk("?autoirq, Failed to detect IRQ line!\n"); 554 printk("?autoirq, Failed to detect IRQ line!\n");
548 retval = -EAGAIN; 555 retval = -EAGAIN;
556 iounmap(priv->mapped);
549 goto out; 557 goto out;
550 } 558 }
551 printk("IRQ %d (autodetected).\n", dev->irq); 559 printk("IRQ %d (autodetected).\n", dev->irq);
@@ -578,19 +586,19 @@ out:
578 586
579static int init586(struct net_device *dev) 587static int init586(struct net_device *dev)
580{ 588{
581 void *ptr; 589 void __iomem *ptr;
582 int i, result = 0; 590 int i, result = 0;
583 struct priv *p = (struct priv *)dev->priv; 591 struct priv *p = (struct priv *)dev->priv;
584 struct configure_cmd_struct *cfg_cmd; 592 struct configure_cmd_struct __iomem *cfg_cmd;
585 struct iasetup_cmd_struct *ias_cmd; 593 struct iasetup_cmd_struct __iomem *ias_cmd;
586 struct tdr_cmd_struct *tdr_cmd; 594 struct tdr_cmd_struct __iomem *tdr_cmd;
587 struct mcsetup_cmd_struct *mc_cmd; 595 struct mcsetup_cmd_struct __iomem *mc_cmd;
588 struct dev_mc_list *dmi = dev->mc_list; 596 struct dev_mc_list *dmi = dev->mc_list;
589 int num_addrs = dev->mc_count; 597 int num_addrs = dev->mc_count;
590 598
591 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 599 ptr = p->scb + 1;
592 600
593 cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ 601 cfg_cmd = ptr; /* configure-command */
594 writew(0, &cfg_cmd->cmd_status); 602 writew(0, &cfg_cmd->cmd_status);
595 writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd); 603 writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
596 writew(0xFFFF, &cfg_cmd->cmd_link); 604 writew(0xFFFF, &cfg_cmd->cmd_link);
@@ -609,7 +617,7 @@ static int init586(struct net_device *dev)
609 writeb(0xf2, &cfg_cmd->time_high); 617 writeb(0xf2, &cfg_cmd->time_high);
610 writeb(0x00, &cfg_cmd->promisc);; 618 writeb(0x00, &cfg_cmd->promisc);;
611 if (dev->flags & IFF_ALLMULTI) { 619 if (dev->flags & IFF_ALLMULTI) {
612 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 620 int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
613 if (num_addrs > len) { 621 if (num_addrs > len) {
614 printk(KERN_ERR "%s: switching to promisc. mode\n", 622 printk(KERN_ERR "%s: switching to promisc. mode\n",
615 dev->name); 623 dev->name);
@@ -620,7 +628,7 @@ static int init586(struct net_device *dev)
620 writeb(0x01, &cfg_cmd->promisc); 628 writeb(0x01, &cfg_cmd->promisc);
621 writeb(0x00, &cfg_cmd->carr_coll); 629 writeb(0x00, &cfg_cmd->carr_coll);
622 writew(make16(cfg_cmd), &p->scb->cbl_offset); 630 writew(make16(cfg_cmd), &p->scb->cbl_offset);
623 writew(0, &p->scb->cmd_ruc); 631 writeb(0, &p->scb->cmd_ruc);
624 632
625 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ 633 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
626 ni_attn586(); 634 ni_attn586();
@@ -638,13 +646,13 @@ static int init586(struct net_device *dev)
638 * individual address setup 646 * individual address setup
639 */ 647 */
640 648
641 ias_cmd = (struct iasetup_cmd_struct *)ptr; 649 ias_cmd = ptr;
642 650
643 writew(0, &ias_cmd->cmd_status); 651 writew(0, &ias_cmd->cmd_status);
644 writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd); 652 writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
645 writew(0xffff, &ias_cmd->cmd_link); 653 writew(0xffff, &ias_cmd->cmd_link);
646 654
647 memcpy_toio((char *)&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN); 655 memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
648 656
649 writew(make16(ias_cmd), &p->scb->cbl_offset); 657 writew(make16(ias_cmd), &p->scb->cbl_offset);
650 658
@@ -663,7 +671,7 @@ static int init586(struct net_device *dev)
663 * TDR, wire check .. e.g. no resistor e.t.c 671 * TDR, wire check .. e.g. no resistor e.t.c
664 */ 672 */
665 673
666 tdr_cmd = (struct tdr_cmd_struct *)ptr; 674 tdr_cmd = ptr;
667 675
668 writew(0, &tdr_cmd->cmd_status); 676 writew(0, &tdr_cmd->cmd_status);
669 writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd); 677 writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
@@ -707,14 +715,14 @@ static int init586(struct net_device *dev)
707 * Multicast setup 715 * Multicast setup
708 */ 716 */
709 if (num_addrs && !(dev->flags & IFF_PROMISC)) { 717 if (num_addrs && !(dev->flags & IFF_PROMISC)) {
710 mc_cmd = (struct mcsetup_cmd_struct *) ptr; 718 mc_cmd = ptr;
711 writew(0, &mc_cmd->cmd_status); 719 writew(0, &mc_cmd->cmd_status);
712 writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd); 720 writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
713 writew(0xffff, &mc_cmd->cmd_link); 721 writew(0xffff, &mc_cmd->cmd_link);
714 writew(num_addrs * 6, &mc_cmd->mc_cnt); 722 writew(num_addrs * 6, &mc_cmd->mc_cnt);
715 723
716 for (i = 0; i < num_addrs; i++, dmi = dmi->next) 724 for (i = 0; i < num_addrs; i++, dmi = dmi->next)
717 memcpy_toio((char *) mc_cmd->mc_list[i], 725 memcpy_toio(mc_cmd->mc_list[i],
718 dmi->dmi_addr, 6); 726 dmi->dmi_addr, 6);
719 727
720 writew(make16(mc_cmd), &p->scb->cbl_offset); 728 writew(make16(mc_cmd), &p->scb->cbl_offset);
@@ -733,43 +741,43 @@ static int init586(struct net_device *dev)
733 */ 741 */
734#if (NUM_XMIT_BUFFS == 1) 742#if (NUM_XMIT_BUFFS == 1)
735 for (i = 0; i < 2; i++) { 743 for (i = 0; i < 2; i++) {
736 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 744 p->nop_cmds[i] = ptr;
737 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); 745 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
738 writew(0, &p->nop_cmds[i]->cmd_status); 746 writew(0, &p->nop_cmds[i]->cmd_status);
739 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); 747 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
740 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 748 ptr = ptr + sizeof(struct nop_cmd_struct);
741 } 749 }
742#else 750#else
743 for (i = 0; i < NUM_XMIT_BUFFS; i++) { 751 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
744 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 752 p->nop_cmds[i] = ptr;
745 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); 753 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
746 writew(0, &p->nop_cmds[i]->cmd_status); 754 writew(0, &p->nop_cmds[i]->cmd_status);
747 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); 755 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
748 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 756 ptr = ptr + sizeof(struct nop_cmd_struct);
749 } 757 }
750#endif 758#endif
751 759
752 ptr = alloc_rfa(dev, (void *)ptr); /* init receive-frame-area */ 760 ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */
753 761
754 /* 762 /*
755 * alloc xmit-buffs / init xmit_cmds 763 * alloc xmit-buffs / init xmit_cmds
756 */ 764 */
757 for (i = 0; i < NUM_XMIT_BUFFS; i++) { 765 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
758 /* Transmit cmd/buff 0 */ 766 /* Transmit cmd/buff 0 */
759 p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; 767 p->xmit_cmds[i] = ptr;
760 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); 768 ptr = ptr + sizeof(struct transmit_cmd_struct);
761 p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ 769 p->xmit_cbuffs[i] = ptr; /* char-buffs */
762 ptr = (char *) ptr + XMIT_BUFF_SIZE; 770 ptr = ptr + XMIT_BUFF_SIZE;
763 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 771 p->xmit_buffs[i] = ptr; /* TBD */
764 ptr = (char *) ptr + sizeof(struct tbd_struct); 772 ptr = ptr + sizeof(struct tbd_struct);
765 if ((void *)ptr > (void *)p->iscp) { 773 if ((void __iomem *)ptr > (void __iomem *)p->iscp) {
766 printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", 774 printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
767 dev->name); 775 dev->name);
768 return 1; 776 return 1;
769 } 777 }
770 memset_io((char *)(p->xmit_cmds[i]), 0, 778 memset_io(p->xmit_cmds[i], 0,
771 sizeof(struct transmit_cmd_struct)); 779 sizeof(struct transmit_cmd_struct));
772 memset_io((char *)(p->xmit_buffs[i]), 0, 780 memset_io(p->xmit_buffs[i], 0,
773 sizeof(struct tbd_struct)); 781 sizeof(struct tbd_struct));
774 writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]), 782 writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
775 &p->xmit_cmds[i]->cmd_link); 783 &p->xmit_cmds[i]->cmd_link);
@@ -816,14 +824,14 @@ static int init586(struct net_device *dev)
816 * It sets up the Receive Frame Area (RFA). 824 * It sets up the Receive Frame Area (RFA).
817 */ 825 */
818 826
819static void *alloc_rfa(struct net_device *dev, void *ptr) 827static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
820{ 828{
821 struct rfd_struct *rfd = (struct rfd_struct *)ptr; 829 struct rfd_struct __iomem *rfd = ptr;
822 struct rbd_struct *rbd; 830 struct rbd_struct __iomem *rbd;
823 int i; 831 int i;
824 struct priv *p = (struct priv *) dev->priv; 832 struct priv *p = (struct priv *) dev->priv;
825 833
826 memset_io((char *) rfd, 0, 834 memset_io(rfd, 0,
827 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd)); 835 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
828 p->rfd_first = rfd; 836 p->rfd_first = rfd;
829 837
@@ -835,20 +843,19 @@ static void *alloc_rfa(struct net_device *dev, void *ptr)
835 /* RU suspend */ 843 /* RU suspend */
836 writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last); 844 writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
837 845
838 ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd)); 846 ptr = rfd + (p->num_recv_buffs + rfdadd);
839 847
840 rbd = (struct rbd_struct *) ptr; 848 rbd = ptr;
841 ptr = (void *) (rbd + p->num_recv_buffs); 849 ptr = rbd + p->num_recv_buffs;
842 850
843 /* clr descriptors */ 851 /* clr descriptors */
844 memset_io((char *)rbd, 0, 852 memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs));
845 sizeof(struct rbd_struct) * (p->num_recv_buffs));
846 853
847 for (i = 0; i < p->num_recv_buffs; i++) { 854 for (i = 0; i < p->num_recv_buffs; i++) {
848 writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next); 855 writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
849 writew(RECV_BUFF_SIZE, &rbd[i].size); 856 writew(RECV_BUFF_SIZE, &rbd[i].size);
850 writel(make24(ptr), &rbd[i].buffer); 857 writel(make24(ptr), &rbd[i].buffer);
851 ptr = (char *) ptr + RECV_BUFF_SIZE; 858 ptr = ptr + RECV_BUFF_SIZE;
852 } 859 }
853 p->rfd_top = p->rfd_first; 860 p->rfd_top = p->rfd_first;
854 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); 861 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
@@ -892,7 +899,7 @@ static irqreturn_t ni52_interrupt(int irq, void *dev_id)
892 if (readb(&p->scb->rus) & RU_SUSPEND) { 899 if (readb(&p->scb->rus) & RU_SUSPEND) {
893 /* special case: RU_SUSPEND */ 900 /* special case: RU_SUSPEND */
894 wait_for_scb_cmd(dev); 901 wait_for_scb_cmd(dev);
895 p->scb->cmd_ruc = RUC_RESUME; 902 writeb(RUC_RESUME, &p->scb->cmd_ruc);
896 ni_attn586(); 903 ni_attn586();
897 wait_for_scb_cmd_ruc(dev); 904 wait_for_scb_cmd_ruc(dev);
898 } else { 905 } else {
@@ -919,7 +926,7 @@ static irqreturn_t ni52_interrupt(int irq, void *dev_id)
919 926
920 /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */ 927 /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
921 wait_for_scb_cmd(dev); 928 wait_for_scb_cmd(dev);
922 if (p->scb->cmd_cuc) { /* timed out? */ 929 if (readb(&p->scb->cmd_cuc)) { /* timed out? */
923 printk(KERN_ERR "%s: Acknowledge timed out.\n", 930 printk(KERN_ERR "%s: Acknowledge timed out.\n",
924 dev->name); 931 dev->name);
925 ni_disint(); 932 ni_disint();
@@ -942,14 +949,14 @@ static void ni52_rcv_int(struct net_device *dev)
942 int status, cnt = 0; 949 int status, cnt = 0;
943 unsigned short totlen; 950 unsigned short totlen;
944 struct sk_buff *skb; 951 struct sk_buff *skb;
945 struct rbd_struct *rbd; 952 struct rbd_struct __iomem *rbd;
946 struct priv *p = (struct priv *)dev->priv; 953 struct priv *p = (struct priv *)dev->priv;
947 954
948 if (debuglevel > 0) 955 if (debuglevel > 0)
949 printk("R"); 956 printk("R");
950 957
951 for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) { 958 for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
952 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); 959 rbd = make32(readw(&p->rfd_top->rbd_offset));
953 if (status & RFD_OK) { /* frame received without error? */ 960 if (status & RFD_OK) { /* frame received without error? */
954 totlen = readw(&rbd->status); 961 totlen = readw(&rbd->status);
955 if (totlen & RBD_LAST) { 962 if (totlen & RBD_LAST) {
@@ -960,7 +967,7 @@ static void ni52_rcv_int(struct net_device *dev)
960 if (skb != NULL) { 967 if (skb != NULL) {
961 skb_reserve(skb, 2); 968 skb_reserve(skb, 2);
962 skb_put(skb, totlen); 969 skb_put(skb, totlen);
963 skb_copy_to_linear_data(skb, (char *)p->base + (unsigned long) rbd->buffer, totlen); 970 memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
964 skb->protocol = eth_type_trans(skb, dev); 971 skb->protocol = eth_type_trans(skb, dev);
965 netif_rx(skb); 972 netif_rx(skb);
966 dev->last_rx = jiffies; 973 dev->last_rx = jiffies;
@@ -979,7 +986,7 @@ static void ni52_rcv_int(struct net_device *dev)
979 break; 986 break;
980 } 987 }
981 writew(0, &rbd->status); 988 writew(0, &rbd->status);
982 rbd = (struct rbd_struct *) make32(readl(&rbd->next)); 989 rbd = make32(readw(&rbd->next));
983 } 990 }
984 totlen += rstat & RBD_MASK; 991 totlen += rstat & RBD_MASK;
985 writew(0, &rbd->status); 992 writew(0, &rbd->status);
@@ -997,7 +1004,7 @@ static void ni52_rcv_int(struct net_device *dev)
997 writew(0xffff, &p->rfd_top->rbd_offset); 1004 writew(0xffff, &p->rfd_top->rbd_offset);
998 writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */ 1005 writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
999 p->rfd_last = p->rfd_top; 1006 p->rfd_last = p->rfd_top;
1000 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ 1007 p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */
1001 writew(make16(p->rfd_top), &p->scb->rfa_offset); 1008 writew(make16(p->rfd_top), &p->scb->rfa_offset);
1002 1009
1003 if (debuglevel > 0) 1010 if (debuglevel > 0)
@@ -1042,11 +1049,12 @@ static void ni52_rnr_int(struct net_device *dev)
1042 ni_attn586(); 1049 ni_attn586();
1043 wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */ 1050 wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
1044 1051
1045 alloc_rfa(dev, (char *)p->rfd_first); 1052 alloc_rfa(dev, p->rfd_first);
1046 /* maybe add a check here, before restarting the RU */ 1053 /* maybe add a check here, before restarting the RU */
1047 startrecv586(dev); /* restart RU */ 1054 startrecv586(dev); /* restart RU */
1048 1055
1049 printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->rus); 1056 printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n",
1057 dev->name, readb(&p->scb->rus));
1050 1058
1051} 1059}
1052 1060
@@ -1178,12 +1186,11 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1178 1186
1179 netif_stop_queue(dev); 1187 netif_stop_queue(dev);
1180 1188
1181 skb_copy_from_linear_data(skb, (char *)p->xmit_cbuffs[p->xmit_count], 1189 memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
1182 skb->len);
1183 len = skb->len; 1190 len = skb->len;
1184 if (len < ETH_ZLEN) { 1191 if (len < ETH_ZLEN) {
1185 len = ETH_ZLEN; 1192 len = ETH_ZLEN;
1186 memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, 1193 memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
1187 len - skb->len); 1194 len - skb->len);
1188 } 1195 }
1189 1196
@@ -1191,14 +1198,14 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1191# ifdef NO_NOPCOMMANDS 1198# ifdef NO_NOPCOMMANDS
1192 1199
1193#ifdef DEBUG 1200#ifdef DEBUG
1194 if (p->scb->cus & CU_ACTIVE) { 1201 if (readb(&p->scb->cus) & CU_ACTIVE) {
1195 printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name); 1202 printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
1196 printk(KERN_ERR "%s: stat: %04x %04x\n", 1203 printk(KERN_ERR "%s: stat: %04x %04x\n",
1197 dev->name, readb(&p->scb->cus), 1204 dev->name, readb(&p->scb->cus),
1198 readw(&p->xmit_cmds[0]->cmd_status)); 1205 readw(&p->xmit_cmds[0]->cmd_status));
1199 } 1206 }
1200#endif 1207#endif
1201 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);; 1208 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
1202 for (i = 0; i < 16; i++) { 1209 for (i = 0; i < 16; i++) {
1203 writew(0, &p->xmit_cmds[0]->cmd_status); 1210 writew(0, &p->xmit_cmds[0]->cmd_status);
1204 wait_for_scb_cmd(dev); 1211 wait_for_scb_cmd(dev);
@@ -1330,7 +1337,9 @@ int __init init_module(void)
1330 1337
1331void __exit cleanup_module(void) 1338void __exit cleanup_module(void)
1332{ 1339{
1340 struct priv *p = dev_ni52->priv;
1333 unregister_netdev(dev_ni52); 1341 unregister_netdev(dev_ni52);
1342 iounmap(p->mapped);
1334 release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE); 1343 release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
1335 free_netdev(dev_ni52); 1344 free_netdev(dev_ni52);
1336} 1345}
diff --git a/drivers/net/ni52.h b/drivers/net/ni52.h
index 1f28a4d1a319..0a03b2883327 100644
--- a/drivers/net/ni52.h
+++ b/drivers/net/ni52.h
@@ -39,8 +39,8 @@ struct scp_struct
39 u16 zero_dum0; /* has to be zero */ 39 u16 zero_dum0; /* has to be zero */
40 u8 sysbus; /* 0=16Bit,1=8Bit */ 40 u8 sysbus; /* 0=16Bit,1=8Bit */
41 u8 zero_dum1; /* has to be zero for 586 */ 41 u8 zero_dum1; /* has to be zero for 586 */
42 u8 zero_dum2; 42 u16 zero_dum2;
43 u8 zero_dum3; 43 u16 zero_dum3;
44 u32 iscp; /* pointer to the iscp-block */ 44 u32 iscp; /* pointer to the iscp-block */
45}; 45};
46 46
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 5b80358af658..f5310ed3760d 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -99,6 +99,41 @@ static int bcm54xx_config_intr(struct phy_device *phydev)
99 return err; 99 return err;
100} 100}
101 101
102static int bcm5481_config_aneg(struct phy_device *phydev)
103{
104 int ret;
105
106 /* Aneg firsly. */
107 ret = genphy_config_aneg(phydev);
108
109 /* Then we can set up the delay. */
110 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
111 u16 reg;
112
113 /*
114 * There is no BCM5481 specification available, so down
115 * here is everything we know about "register 0x18". This
116 * at least helps BCM5481 to successfuly receive packets
117 * on MPC8360E-RDK board. Peter Barada <peterb@logicpd.com>
118 * says: "This sets delay between the RXD and RXC signals
119 * instead of using trace lengths to achieve timing".
120 */
121
122 /* Set RDX clk delay. */
123 reg = 0x7 | (0x7 << 12);
124 phy_write(phydev, 0x18, reg);
125
126 reg = phy_read(phydev, 0x18);
127 /* Set RDX-RXC skew. */
128 reg |= (1 << 8);
129 /* Write bits 14:0. */
130 reg |= (1 << 15);
131 phy_write(phydev, 0x18, reg);
132 }
133
134 return ret;
135}
136
102static struct phy_driver bcm5411_driver = { 137static struct phy_driver bcm5411_driver = {
103 .phy_id = 0x00206070, 138 .phy_id = 0x00206070,
104 .phy_id_mask = 0xfffffff0, 139 .phy_id_mask = 0xfffffff0,
@@ -141,8 +176,22 @@ static struct phy_driver bcm5461_driver = {
141 .driver = { .owner = THIS_MODULE }, 176 .driver = { .owner = THIS_MODULE },
142}; 177};
143 178
179static struct phy_driver bcm5481_driver = {
180 .phy_id = 0x0143bca0,
181 .phy_id_mask = 0xfffffff0,
182 .name = "Broadcom BCM5481",
183 .features = PHY_GBIT_FEATURES,
184 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
185 .config_init = bcm54xx_config_init,
186 .config_aneg = bcm5481_config_aneg,
187 .read_status = genphy_read_status,
188 .ack_interrupt = bcm54xx_ack_interrupt,
189 .config_intr = bcm54xx_config_intr,
190 .driver = { .owner = THIS_MODULE },
191};
192
144static struct phy_driver bcm5482_driver = { 193static struct phy_driver bcm5482_driver = {
145 .phy_id = 0x0143bcb0, 194 .phy_id = 0x0143bcb0,
146 .phy_id_mask = 0xfffffff0, 195 .phy_id_mask = 0xfffffff0,
147 .name = "Broadcom BCM5482", 196 .name = "Broadcom BCM5482",
148 .features = PHY_GBIT_FEATURES, 197 .features = PHY_GBIT_FEATURES,
@@ -168,12 +217,17 @@ static int __init broadcom_init(void)
168 ret = phy_driver_register(&bcm5461_driver); 217 ret = phy_driver_register(&bcm5461_driver);
169 if (ret) 218 if (ret)
170 goto out_5461; 219 goto out_5461;
220 ret = phy_driver_register(&bcm5481_driver);
221 if (ret)
222 goto out_5481;
171 ret = phy_driver_register(&bcm5482_driver); 223 ret = phy_driver_register(&bcm5482_driver);
172 if (ret) 224 if (ret)
173 goto out_5482; 225 goto out_5482;
174 return ret; 226 return ret;
175 227
176out_5482: 228out_5482:
229 phy_driver_unregister(&bcm5481_driver);
230out_5481:
177 phy_driver_unregister(&bcm5461_driver); 231 phy_driver_unregister(&bcm5461_driver);
178out_5461: 232out_5461:
179 phy_driver_unregister(&bcm5421_driver); 233 phy_driver_unregister(&bcm5421_driver);
@@ -186,6 +240,7 @@ out_5411:
186static void __exit broadcom_exit(void) 240static void __exit broadcom_exit(void)
187{ 241{
188 phy_driver_unregister(&bcm5482_driver); 242 phy_driver_unregister(&bcm5482_driver);
243 phy_driver_unregister(&bcm5481_driver);
189 phy_driver_unregister(&bcm5461_driver); 244 phy_driver_unregister(&bcm5461_driver);
190 phy_driver_unregister(&bcm5421_driver); 245 phy_driver_unregister(&bcm5421_driver);
191 phy_driver_unregister(&bcm5411_driver); 246 phy_driver_unregister(&bcm5411_driver);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index a6aeb9d60443..b7f7b2227d56 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2472,8 +2472,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2472 2472
2473 if (seg_cnt == 1) { 2473 if (seg_cnt == 1) {
2474 /* Terminate the last segment. */ 2474 /* Terminate the last segment. */
2475 oal_entry->len = 2475 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2476 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2477 } else { 2476 } else {
2478 oal = tx_cb->oal; 2477 oal = tx_cb->oal;
2479 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { 2478 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
@@ -2530,8 +2529,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2530 frag->size); 2529 frag->size);
2531 } 2530 }
2532 /* Terminate the last segment. */ 2531 /* Terminate the last segment. */
2533 oal_entry->len = 2532 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2534 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2535 } 2533 }
2536 2534
2537 return NETDEV_TX_OK; 2535 return NETDEV_TX_OK;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 6d8e5c4cf858..86f1228c9fec 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -4267,11 +4267,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4267 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 4267 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4268 txdp->Control_1 |= TXD_BUFFER0_SIZE(8); 4268 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4269#ifdef __BIG_ENDIAN 4269#ifdef __BIG_ENDIAN
4270 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4270 fifo->ufo_in_band_v[put_off] = 4271 fifo->ufo_in_band_v[put_off] =
4271 (u64)skb_shinfo(skb)->ip6_frag_id; 4272 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4272#else 4273#else
4273 fifo->ufo_in_band_v[put_off] = 4274 fifo->ufo_in_band_v[put_off] =
4274 (u64)skb_shinfo(skb)->ip6_frag_id << 32; 4275 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4275#endif 4276#endif
4276 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; 4277 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4277 txdp->Buffer_Pointer = pci_map_single(sp->pdev, 4278 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
@@ -7089,11 +7090,11 @@ static int s2io_add_isr(struct s2io_nic * sp)
7089 if(!(sp->msix_info[i].addr && 7090 if(!(sp->msix_info[i].addr &&
7090 sp->msix_info[i].data)) { 7091 sp->msix_info[i].data)) {
7091 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7092 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7092 "Data:0x%lx\n",sp->desc[i], 7093 "Data:0x%llx\n",sp->desc[i],
7093 (unsigned long long) 7094 (unsigned long long)
7094 sp->msix_info[i].addr, 7095 sp->msix_info[i].addr,
7095 (unsigned long) 7096 (unsigned long long)
7096 ntohl(sp->msix_info[i].data)); 7097 sp->msix_info[i].data);
7097 } else { 7098 } else {
7098 msix_tx_cnt++; 7099 msix_tx_cnt++;
7099 } 7100 }
@@ -7107,11 +7108,11 @@ static int s2io_add_isr(struct s2io_nic * sp)
7107 if(!(sp->msix_info[i].addr && 7108 if(!(sp->msix_info[i].addr &&
7108 sp->msix_info[i].data)) { 7109 sp->msix_info[i].data)) {
7109 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7110 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7110 "Data:0x%lx\n",sp->desc[i], 7111 "Data:0x%llx\n",sp->desc[i],
7111 (unsigned long long) 7112 (unsigned long long)
7112 sp->msix_info[i].addr, 7113 sp->msix_info[i].addr,
7113 (unsigned long) 7114 (unsigned long long)
7114 ntohl(sp->msix_info[i].data)); 7115 sp->msix_info[i].data);
7115 } else { 7116 } else {
7116 msix_rx_cnt++; 7117 msix_rx_cnt++;
7117 } 7118 }
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 76dc8adc9441..6028bbb3b28a 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -401,18 +401,18 @@ static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
401/* int len ; length of the frame including the FC */ 401/* int len ; length of the frame including the FC */
402{ 402{
403 int i ; 403 int i ;
404 u_int *p ; 404 __le32 *p ;
405 405
406 CHECK_NPP() ; 406 CHECK_NPP() ;
407 MARW(off) ; /* set memory address reg for writes */ 407 MARW(off) ; /* set memory address reg for writes */
408 408
409 p = (u_int *) mac ; 409 p = (__le32 *) mac ;
410 for (i = (len + 3)/4 ; i ; i--) { 410 for (i = (len + 3)/4 ; i ; i--) {
411 if (i == 1) { 411 if (i == 1) {
412 /* last word, set the tag bit */ 412 /* last word, set the tag bit */
413 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; 413 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ;
414 } 414 }
415 write_mdr(smc,MDR_REVERSE(*p)) ; 415 write_mdr(smc,le32_to_cpu(*p)) ;
416 p++ ; 416 p++ ;
417 } 417 }
418 418
@@ -444,7 +444,7 @@ static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
444 */ 444 */
445static void directed_beacon(struct s_smc *smc) 445static void directed_beacon(struct s_smc *smc)
446{ 446{
447 SK_LOC_DECL(u_int,a[2]) ; 447 SK_LOC_DECL(__le32,a[2]) ;
448 448
449 /* 449 /*
450 * set UNA in frame 450 * set UNA in frame
@@ -458,9 +458,9 @@ static void directed_beacon(struct s_smc *smc)
458 CHECK_NPP() ; 458 CHECK_NPP() ;
459 /* set memory address reg for writes */ 459 /* set memory address reg for writes */
460 MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ; 460 MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ;
461 write_mdr(smc,MDR_REVERSE(a[0])) ; 461 write_mdr(smc,le32_to_cpu(a[0])) ;
462 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */ 462 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
463 write_mdr(smc,MDR_REVERSE(a[1])) ; 463 write_mdr(smc,le32_to_cpu(a[1])) ;
464 464
465 outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ; 465 outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ;
466} 466}
diff --git a/drivers/net/skfp/h/fplustm.h b/drivers/net/skfp/h/fplustm.h
index 98bbf654d12f..6d738e1e2393 100644
--- a/drivers/net/skfp/h/fplustm.h
+++ b/drivers/net/skfp/h/fplustm.h
@@ -50,12 +50,12 @@ struct err_st {
50 * Transmit Descriptor struct 50 * Transmit Descriptor struct
51 */ 51 */
52struct s_smt_fp_txd { 52struct s_smt_fp_txd {
53 u_int txd_tbctrl ; /* transmit buffer control */ 53 __le32 txd_tbctrl ; /* transmit buffer control */
54 u_int txd_txdscr ; /* transmit frame status word */ 54 __le32 txd_txdscr ; /* transmit frame status word */
55 u_int txd_tbadr ; /* physical tx buffer address */ 55 __le32 txd_tbadr ; /* physical tx buffer address */
56 u_int txd_ntdadr ; /* physical pointer to the next TxD */ 56 __le32 txd_ntdadr ; /* physical pointer to the next TxD */
57#ifdef ENA_64BIT_SUP 57#ifdef ENA_64BIT_SUP
58 u_int txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/ 58 __le32 txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/
59#endif 59#endif
60 char far *txd_virt ; /* virtual pointer to the data frag */ 60 char far *txd_virt ; /* virtual pointer to the data frag */
61 /* virt pointer to the next TxD */ 61 /* virt pointer to the next TxD */
@@ -67,12 +67,12 @@ struct s_smt_fp_txd {
67 * Receive Descriptor struct 67 * Receive Descriptor struct
68 */ 68 */
69struct s_smt_fp_rxd { 69struct s_smt_fp_rxd {
70 u_int rxd_rbctrl ; /* receive buffer control */ 70 __le32 rxd_rbctrl ; /* receive buffer control */
71 u_int rxd_rfsw ; /* receive frame status word */ 71 __le32 rxd_rfsw ; /* receive frame status word */
72 u_int rxd_rbadr ; /* physical rx buffer address */ 72 __le32 rxd_rbadr ; /* physical rx buffer address */
73 u_int rxd_nrdadr ; /* physical pointer to the next RxD */ 73 __le32 rxd_nrdadr ; /* physical pointer to the next RxD */
74#ifdef ENA_64BIT_SUP 74#ifdef ENA_64BIT_SUP
75 u_int rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/ 75 __le32 rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/
76#endif 76#endif
77 char far *rxd_virt ; /* virtual pointer to the data frag */ 77 char far *rxd_virt ; /* virtual pointer to the data frag */
78 /* virt pointer to the next RxD */ 78 /* virt pointer to the next RxD */
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index 46e339315656..4218e97033c9 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -208,7 +208,7 @@ SMbuf* smt_get_mbuf(struct s_smc *smc);
208#if defined(NDIS_OS2) || defined(ODI2) 208#if defined(NDIS_OS2) || defined(ODI2)
209#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) 209#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
210#else 210#else
211#define CR_READ(var) (u_long)(var) 211#define CR_READ(var) (__le32)(var)
212#endif 212#endif
213 213
214#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ 214#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
@@ -343,16 +343,16 @@ static u_long init_descr_ring(struct s_smc *smc,
343 for (i=count-1, d1=start; i ; i--) { 343 for (i=count-1, d1=start; i ; i--) {
344 d2 = d1 ; 344 d2 = d1 ;
345 d1++ ; /* descr is owned by the host */ 345 d1++ ; /* descr is owned by the host */
346 d2->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 346 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
347 d2->r.rxd_next = &d1->r ; 347 d2->r.rxd_next = &d1->r ;
348 phys = mac_drv_virt2phys(smc,(void *)d1) ; 348 phys = mac_drv_virt2phys(smc,(void *)d1) ;
349 d2->r.rxd_nrdadr = AIX_REVERSE(phys) ; 349 d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
350 } 350 }
351 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; 351 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
352 d1->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 352 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
353 d1->r.rxd_next = &start->r ; 353 d1->r.rxd_next = &start->r ;
354 phys = mac_drv_virt2phys(smc,(void *)start) ; 354 phys = mac_drv_virt2phys(smc,(void *)start) ;
355 d1->r.rxd_nrdadr = AIX_REVERSE(phys) ; 355 d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
356 356
357 for (i=count, d1=start; i ; i--) { 357 for (i=count, d1=start; i ; i--) {
358 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; 358 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
@@ -376,7 +376,7 @@ static void init_txd_ring(struct s_smc *smc)
376 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; 376 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
377 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 377 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
378 HWM_ASYNC_TXD_COUNT) ; 378 HWM_ASYNC_TXD_COUNT) ;
379 phys = AIX_REVERSE(ds->txd_ntdadr) ; 379 phys = le32_to_cpu(ds->txd_ntdadr) ;
380 ds++ ; 380 ds++ ;
381 queue->tx_curr_put = queue->tx_curr_get = ds ; 381 queue->tx_curr_put = queue->tx_curr_get = ds ;
382 ds-- ; 382 ds-- ;
@@ -390,7 +390,7 @@ static void init_txd_ring(struct s_smc *smc)
390 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; 390 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
391 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 391 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
392 HWM_SYNC_TXD_COUNT) ; 392 HWM_SYNC_TXD_COUNT) ;
393 phys = AIX_REVERSE(ds->txd_ntdadr) ; 393 phys = le32_to_cpu(ds->txd_ntdadr) ;
394 ds++ ; 394 ds++ ;
395 queue->tx_curr_put = queue->tx_curr_get = ds ; 395 queue->tx_curr_put = queue->tx_curr_get = ds ;
396 queue->tx_free = HWM_SYNC_TXD_COUNT ; 396 queue->tx_free = HWM_SYNC_TXD_COUNT ;
@@ -412,7 +412,7 @@ static void init_rxd_ring(struct s_smc *smc)
412 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; 412 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
413 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 413 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
414 SMT_R1_RXD_COUNT) ; 414 SMT_R1_RXD_COUNT) ;
415 phys = AIX_REVERSE(ds->rxd_nrdadr) ; 415 phys = le32_to_cpu(ds->rxd_nrdadr) ;
416 ds++ ; 416 ds++ ;
417 queue->rx_curr_put = queue->rx_curr_get = ds ; 417 queue->rx_curr_put = queue->rx_curr_get = ds ;
418 queue->rx_free = SMT_R1_RXD_COUNT ; 418 queue->rx_free = SMT_R1_RXD_COUNT ;
@@ -607,12 +607,12 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
607 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { 607 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
608 t = t->txd_next ; 608 t = t->txd_next ;
609 } 609 }
610 phys = AIX_REVERSE(t->txd_ntdadr) ; 610 phys = le32_to_cpu(t->txd_ntdadr) ;
611 611
612 t = queue->tx_curr_get ; 612 t = queue->tx_curr_get ;
613 while (tx_used) { 613 while (tx_used) {
614 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 614 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
615 tbctrl = AIX_REVERSE(t->txd_tbctrl) ; 615 tbctrl = le32_to_cpu(t->txd_tbctrl) ;
616 616
617 if (tbctrl & BMU_OWN) { 617 if (tbctrl & BMU_OWN) {
618 if (tbctrl & BMU_STF) { 618 if (tbctrl & BMU_STF) {
@@ -622,10 +622,10 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
622 /* 622 /*
623 * repair the descriptor 623 * repair the descriptor
624 */ 624 */
625 t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 625 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
626 } 626 }
627 } 627 }
628 phys = AIX_REVERSE(t->txd_ntdadr) ; 628 phys = le32_to_cpu(t->txd_ntdadr) ;
629 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 629 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
630 t = t->txd_next ; 630 t = t->txd_next ;
631 tx_used-- ; 631 tx_used-- ;
@@ -659,12 +659,12 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
659 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { 659 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
660 r = r->rxd_next ; 660 r = r->rxd_next ;
661 } 661 }
662 phys = AIX_REVERSE(r->rxd_nrdadr) ; 662 phys = le32_to_cpu(r->rxd_nrdadr) ;
663 663
664 r = queue->rx_curr_get ; 664 r = queue->rx_curr_get ;
665 while (rx_used) { 665 while (rx_used) {
666 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 666 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
667 rbctrl = AIX_REVERSE(r->rxd_rbctrl) ; 667 rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
668 668
669 if (rbctrl & BMU_OWN) { 669 if (rbctrl & BMU_OWN) {
670 if (rbctrl & BMU_STF) { 670 if (rbctrl & BMU_STF) {
@@ -674,10 +674,10 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
674 /* 674 /*
675 * repair the descriptor 675 * repair the descriptor
676 */ 676 */
677 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 677 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
678 } 678 }
679 } 679 }
680 phys = AIX_REVERSE(r->rxd_nrdadr) ; 680 phys = le32_to_cpu(r->rxd_nrdadr) ;
681 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 681 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
682 r = r->rxd_next ; 682 r = r->rxd_next ;
683 rx_used-- ; 683 rx_used-- ;
@@ -1094,8 +1094,7 @@ void process_receive(struct s_smc *smc)
1094 do { 1094 do {
1095 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; 1095 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
1096 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1096 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1097 rbctrl = CR_READ(r->rxd_rbctrl) ; 1097 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1098 rbctrl = AIX_REVERSE(rbctrl) ;
1099 1098
1100 if (rbctrl & BMU_OWN) { 1099 if (rbctrl & BMU_OWN) {
1101 NDD_TRACE("RHxE",r,rfsw,rbctrl) ; 1100 NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
@@ -1118,7 +1117,7 @@ void process_receive(struct s_smc *smc)
1118 smc->os.hwm.detec_count = 0 ; 1117 smc->os.hwm.detec_count = 0 ;
1119 goto rx_end ; 1118 goto rx_end ;
1120 } 1119 }
1121 rfsw = AIX_REVERSE(r->rxd_rfsw) ; 1120 rfsw = le32_to_cpu(r->rxd_rfsw) ;
1122 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { 1121 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1123 /* 1122 /*
1124 * The BMU_STF bit is deleted, 1 frame is 1123 * The BMU_STF bit is deleted, 1 frame is
@@ -1151,7 +1150,7 @@ void process_receive(struct s_smc *smc)
1151 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ 1150 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
1152 /* BMU_ST_BUF will not be changed by the ASIC */ 1151 /* BMU_ST_BUF will not be changed by the ASIC */
1153 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1152 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1154 while (rx_used && !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1153 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1155 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1154 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1156 r = r->rxd_next ; 1155 r = r->rxd_next ;
1157 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1156 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
@@ -1171,7 +1170,7 @@ void process_receive(struct s_smc *smc)
1171 /* 1170 /*
1172 * ASIC Errata no. 7 (STF - Bit Bug) 1171 * ASIC Errata no. 7 (STF - Bit Bug)
1173 */ 1172 */
1174 rxd->rxd_rbctrl &= AIX_REVERSE(~BMU_STF) ; 1173 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1175 1174
1176 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ 1175 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1177 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; 1176 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
@@ -1287,7 +1286,7 @@ void process_receive(struct s_smc *smc)
1287 hwm_cpy_rxd2mb(rxd,data,len) ; 1286 hwm_cpy_rxd2mb(rxd,data,len) ;
1288#else 1287#else
1289 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ 1288 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1290 n = AIX_REVERSE(r->rxd_rbctrl) & RD_LENGTH ; 1289 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1291 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; 1290 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
1292 memcpy(data,r->rxd_virt,n) ; 1291 memcpy(data,r->rxd_virt,n) ;
1293 data += n ; 1292 data += n ;
@@ -1426,14 +1425,14 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1426 int frame_status) 1425 int frame_status)
1427{ 1426{
1428 struct s_smt_fp_rxd volatile *r ; 1427 struct s_smt_fp_rxd volatile *r ;
1429 u_int rbctrl ; 1428 __le32 rbctrl;
1430 1429
1431 NDD_TRACE("RHfB",virt,len,frame_status) ; 1430 NDD_TRACE("RHfB",virt,len,frame_status) ;
1432 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; 1431 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
1433 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; 1432 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1434 r->rxd_virt = virt ; 1433 r->rxd_virt = virt ;
1435 r->rxd_rbadr = AIX_REVERSE(phys) ; 1434 r->rxd_rbadr = cpu_to_le32(phys) ;
1436 rbctrl = AIX_REVERSE( (((u_long)frame_status & 1435 rbctrl = cpu_to_le32( (((__u32)frame_status &
1437 (FIRST_FRAG|LAST_FRAG))<<26) | 1436 (FIRST_FRAG|LAST_FRAG))<<26) |
1438 (((u_long) frame_status & FIRST_FRAG) << 21) | 1437 (((u_long) frame_status & FIRST_FRAG) << 21) |
1439 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; 1438 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
@@ -1444,7 +1443,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1444 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; 1443 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1445 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; 1444 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1446 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; 1445 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1447 NDD_TRACE("RHfE",r,AIX_REVERSE(r->rxd_rbadr),0) ; 1446 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1448} 1447}
1449 1448
1450/* 1449/*
@@ -1494,15 +1493,15 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
1494 while (queue->rx_used) { 1493 while (queue->rx_used) {
1495 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1494 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1496 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ; 1495 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
1497 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1496 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1498 frag_count = 1 ; 1497 frag_count = 1 ;
1499 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1498 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1500 r = r->rxd_next ; 1499 r = r->rxd_next ;
1501 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1500 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1502 while (r != queue->rx_curr_put && 1501 while (r != queue->rx_curr_put &&
1503 !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1502 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1504 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1503 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1505 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1504 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1506 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1505 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1507 r = r->rxd_next ; 1506 r = r->rxd_next ;
1508 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1507 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
@@ -1640,7 +1639,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1640{ 1639{
1641 struct s_smt_fp_txd volatile *t ; 1640 struct s_smt_fp_txd volatile *t ;
1642 struct s_smt_tx_queue *queue ; 1641 struct s_smt_tx_queue *queue ;
1643 u_int tbctrl ; 1642 __le32 tbctrl ;
1644 1643
1645 queue = smc->os.hwm.tx_p ; 1644 queue = smc->os.hwm.tx_p ;
1646 1645
@@ -1657,9 +1656,9 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1657 /* '*t' is already defined */ 1656 /* '*t' is already defined */
1658 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ; 1657 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
1659 t->txd_virt = virt ; 1658 t->txd_virt = virt ;
1660 t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ; 1659 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1661 t->txd_tbadr = AIX_REVERSE(phys) ; 1660 t->txd_tbadr = cpu_to_le32(phys) ;
1662 tbctrl = AIX_REVERSE((((u_long)frame_status & 1661 tbctrl = cpu_to_le32((((__u32)frame_status &
1663 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | 1662 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1664 BMU_OWN|BMU_CHECK |len) ; 1663 BMU_OWN|BMU_CHECK |len) ;
1665 t->txd_tbctrl = tbctrl ; 1664 t->txd_tbctrl = tbctrl ;
@@ -1826,7 +1825,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1826 struct s_smt_tx_queue *queue ; 1825 struct s_smt_tx_queue *queue ;
1827 struct s_smt_fp_txd volatile *t ; 1826 struct s_smt_fp_txd volatile *t ;
1828 u_long phys ; 1827 u_long phys ;
1829 u_int tbctrl ; 1828 __le32 tbctrl;
1830 1829
1831 NDD_TRACE("THSB",mb,fc,0) ; 1830 NDD_TRACE("THSB",mb,fc,0) ;
1832 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ; 1831 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
@@ -1894,14 +1893,14 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1894 DB_TX("init TxD = 0x%x",(void *)t,0,5) ; 1893 DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
1895 if (i == frag_count-1) { 1894 if (i == frag_count-1) {
1896 frame_status |= LAST_FRAG ; 1895 frame_status |= LAST_FRAG ;
1897 t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR | 1896 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
1898 (((u_long)(mb->sm_len-1)&3) << 27)) ; 1897 (((__u32)(mb->sm_len-1)&3) << 27)) ;
1899 } 1898 }
1900 t->txd_virt = virt[i] ; 1899 t->txd_virt = virt[i] ;
1901 phys = dma_master(smc, (void far *)virt[i], 1900 phys = dma_master(smc, (void far *)virt[i],
1902 frag_len[i], DMA_RD|SMT_BUF) ; 1901 frag_len[i], DMA_RD|SMT_BUF) ;
1903 t->txd_tbadr = AIX_REVERSE(phys) ; 1902 t->txd_tbadr = cpu_to_le32(phys) ;
1904 tbctrl = AIX_REVERSE((((u_long) frame_status & 1903 tbctrl = cpu_to_le32((((__u32)frame_status &
1905 (FIRST_FRAG|LAST_FRAG)) << 26) | 1904 (FIRST_FRAG|LAST_FRAG)) << 26) |
1906 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; 1905 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1907 t->txd_tbctrl = tbctrl ; 1906 t->txd_tbctrl = tbctrl ;
@@ -1971,8 +1970,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
1971 do { 1970 do {
1972 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; 1971 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
1973 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ; 1972 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
1974 tbctrl = CR_READ(t1->txd_tbctrl) ; 1973 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1975 tbctrl = AIX_REVERSE(tbctrl) ;
1976 1974
1977 if (tbctrl & BMU_OWN || !queue->tx_used){ 1975 if (tbctrl & BMU_OWN || !queue->tx_used){
1978 DB_TX("End of TxDs queue %d",i,0,4) ; 1976 DB_TX("End of TxDs queue %d",i,0,4) ;
@@ -1984,7 +1982,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
1984 1982
1985 t1 = queue->tx_curr_get ; 1983 t1 = queue->tx_curr_get ;
1986 for (n = frag_count; n; n--) { 1984 for (n = frag_count; n; n--) {
1987 tbctrl = AIX_REVERSE(t1->txd_tbctrl) ; 1985 tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1988 dma_complete(smc, 1986 dma_complete(smc,
1989 (union s_fp_descr volatile *) t1, 1987 (union s_fp_descr volatile *) t1,
1990 (int) (DMA_RD | 1988 (int) (DMA_RD |
@@ -2064,7 +2062,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
2064 while (tx_used) { 2062 while (tx_used) {
2065 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 2063 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
2066 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ; 2064 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
2067 t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 2065 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2068 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 2066 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
2069 t = t->txd_next ; 2067 t = t->txd_next ;
2070 tx_used-- ; 2068 tx_used-- ;
@@ -2086,10 +2084,10 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
2086 * tx_curr_get and tx_curr_put to this position 2084 * tx_curr_get and tx_curr_put to this position
2087 */ 2085 */
2088 if (i == QUEUE_S) { 2086 if (i == QUEUE_S) {
2089 outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2087 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2090 } 2088 }
2091 else { 2089 else {
2092 outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2090 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2093 } 2091 }
2094 2092
2095 queue->tx_curr_put = queue->tx_curr_get->txd_next ; 2093 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 7cf9b9f35dee..a2b092bb3626 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -495,7 +495,7 @@ static int skfp_open(struct net_device *dev)
495 495
496 PRINTK(KERN_INFO "entering skfp_open\n"); 496 PRINTK(KERN_INFO "entering skfp_open\n");
497 /* Register IRQ - support shared interrupts by passing device ptr */ 497 /* Register IRQ - support shared interrupts by passing device ptr */
498 err = request_irq(dev->irq, (void *) skfp_interrupt, IRQF_SHARED, 498 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
499 dev->name, dev); 499 dev->name, dev);
500 if (err) 500 if (err)
501 return err; 501 return err;
@@ -1644,7 +1644,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1644 // Get RIF length from Routing Control (RC) field. 1644 // Get RIF length from Routing Control (RC) field.
1645 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header. 1645 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1646 1646
1647 ri = ntohs(*((unsigned short *) cp)); 1647 ri = ntohs(*((__be16 *) cp));
1648 RifLength = ri & FDDI_RCF_LEN_MASK; 1648 RifLength = ri & FDDI_RCF_LEN_MASK;
1649 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) { 1649 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1650 printk("fddi: Invalid RIF.\n"); 1650 printk("fddi: Invalid RIF.\n");
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 4b131a6c6b70..81ccdfaecc58 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -155,7 +155,7 @@ static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
155 dm_write_async_helper(dev, reg, value, 0, NULL); 155 dm_write_async_helper(dev, reg, value, 0, NULL);
156} 156}
157 157
158static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, u16 *value) 158static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
159{ 159{
160 int ret, i; 160 int ret, i;
161 161
@@ -194,7 +194,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, u16 *value)
194 return ret; 194 return ret;
195} 195}
196 196
197static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, u16 value) 197static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 value)
198{ 198{
199 int ret, i; 199 int ret, i;
200 200
@@ -249,7 +249,7 @@ static int dm9601_get_eeprom(struct net_device *net,
249 struct ethtool_eeprom *eeprom, u8 * data) 249 struct ethtool_eeprom *eeprom, u8 * data)
250{ 250{
251 struct usbnet *dev = netdev_priv(net); 251 struct usbnet *dev = netdev_priv(net);
252 u16 *ebuf = (u16 *) data; 252 __le16 *ebuf = (__le16 *) data;
253 int i; 253 int i;
254 254
255 /* access is 16bit */ 255 /* access is 16bit */
@@ -268,7 +268,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
268{ 268{
269 struct usbnet *dev = netdev_priv(netdev); 269 struct usbnet *dev = netdev_priv(netdev);
270 270
271 u16 res; 271 __le16 res;
272 272
273 if (phy_id) { 273 if (phy_id) {
274 devdbg(dev, "Only internal phy supported"); 274 devdbg(dev, "Only internal phy supported");
@@ -288,7 +288,7 @@ static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc,
288 int val) 288 int val)
289{ 289{
290 struct usbnet *dev = netdev_priv(netdev); 290 struct usbnet *dev = netdev_priv(netdev);
291 u16 res = cpu_to_le16(val); 291 __le16 res = cpu_to_le16(val);
292 292
293 if (phy_id) { 293 if (phy_id) {
294 devdbg(dev, "Only internal phy supported"); 294 devdbg(dev, "Only internal phy supported");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 727547a28992..06ae1b2b3b34 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(rndis_command);
218 * ActiveSync 4.1 Windows driver. 218 * ActiveSync 4.1 Windows driver.
219 */ 219 */
220static int rndis_query(struct usbnet *dev, struct usb_interface *intf, 220static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
221 void *buf, u32 oid, u32 in_len, 221 void *buf, __le32 oid, u32 in_len,
222 void **reply, int *reply_len) 222 void **reply, int *reply_len)
223{ 223{
224 int retval; 224 int retval;
@@ -283,7 +283,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
283 struct rndis_set_c *set_c; 283 struct rndis_set_c *set_c;
284 struct rndis_halt *halt; 284 struct rndis_halt *halt;
285 } u; 285 } u;
286 u32 tmp, *phym; 286 u32 tmp;
287 __le32 *phym;
287 int reply_len; 288 int reply_len;
288 unsigned char *bp; 289 unsigned char *bp;
289 290