aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS31
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/bonding/bond_3ad.c6
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c110
-rw-r--r--drivers/net/bonding/bond_alb.h2
-rw-r--r--drivers/net/bonding/bond_main.c330
-rw-r--r--drivers/net/bonding/bond_sysfs.c79
-rw-r--r--drivers/net/bonding/bonding.h14
-rw-r--r--drivers/net/cpmac.c31
-rw-r--r--drivers/net/defxx.c2
-rw-r--r--drivers/net/mipsnet.c44
-rw-r--r--drivers/net/mv643xx_eth.c807
-rw-r--r--drivers/net/mv643xx_eth.h370
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/r8169.c406
-rw-r--r--drivers/net/sky2.c4
-rw-r--r--include/linux/mv643xx.h328
-rw-r--r--include/linux/mv643xx_eth.h31
-rw-r--r--include/linux/netdevice.h2
20 files changed, 1330 insertions, 1284 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 05504f2ff110..76b857157866 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -136,17 +136,6 @@ M: ajk@iehk.rwth-aachen.de
136L: linux-hams@vger.kernel.org 136L: linux-hams@vger.kernel.org
137S: Maintained 137S: Maintained
138 138
1398139CP 10/100 FAST ETHERNET DRIVER
140P: Jeff Garzik
141M: jgarzik@pobox.com
142S: Maintained
143
1448139TOO 10/100 FAST ETHERNET DRIVER
145P: Jeff Garzik
146M: jgarzik@pobox.com
147W: http://sourceforge.net/projects/gkernel/
148S: Maintained
149
1508169 10/100/1000 GIGABIT ETHERNET DRIVER 1398169 10/100/1000 GIGABIT ETHERNET DRIVER
151P: Francois Romieu 140P: Francois Romieu
152M: romieu@fr.zoreil.com 141M: romieu@fr.zoreil.com
@@ -1043,12 +1032,6 @@ M: kernel@wantstofly.org
1043L: netdev@vger.kernel.org 1032L: netdev@vger.kernel.org
1044S: Maintained 1033S: Maintained
1045 1034
1046CIRRUS LOGIC GENERIC FBDEV DRIVER
1047P: Jeff Garzik
1048M: jgarzik@pobox.com
1049L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
1050S: Odd Fixes
1051
1052CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER 1035CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER
1053P: Lennert Buytenhek 1036P: Lennert Buytenhek
1054M: kernel@wantstofly.org 1037M: kernel@wantstofly.org
@@ -1969,12 +1952,6 @@ M: adaplas@gmail.com
1969L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only) 1952L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only)
1970S: Maintained 1953S: Maintained
1971 1954
1972INTEL I8XX RANDOM NUMBER GENERATOR SUPPORT
1973P: Jeff Garzik
1974M: jgarzik@pobox.com
1975W: http://sourceforge.net/projects/gkernel/
1976S: Maintained
1977
1978INTEL IA32 MICROCODE UPDATE SUPPORT 1955INTEL IA32 MICROCODE UPDATE SUPPORT
1979P: Tigran Aivazian 1956P: Tigran Aivazian
1980M: tigran@aivazian.fsnet.co.uk 1957M: tigran@aivazian.fsnet.co.uk
@@ -2701,8 +2678,6 @@ M: Paul.Clements@steeleye.com
2701S: Maintained 2678S: Maintained
2702 2679
2703NETWORK DEVICE DRIVERS 2680NETWORK DEVICE DRIVERS
2704P: Andrew Morton
2705M: akpm@linux-foundation.org
2706P: Jeff Garzik 2681P: Jeff Garzik
2707M: jgarzik@pobox.com 2682M: jgarzik@pobox.com
2708L: netdev@vger.kernel.org 2683L: netdev@vger.kernel.org
@@ -3254,6 +3229,8 @@ S: Supported
3254S390 NETWORK DRIVERS 3229S390 NETWORK DRIVERS
3255P: Ursula Braun 3230P: Ursula Braun
3256M: ubraun@linux.vnet.ibm.com 3231M: ubraun@linux.vnet.ibm.com
3232P: Frank Blaschka
3233M: blaschka@linux.vnet.ibm.com
3257M: linux390@de.ibm.com 3234M: linux390@de.ibm.com
3258L: linux-s390@vger.kernel.org 3235L: linux-s390@vger.kernel.org
3259W: http://www.ibm.com/developerworks/linux/linux390/ 3236W: http://www.ibm.com/developerworks/linux/linux390/
@@ -4109,10 +4086,6 @@ M: hirofumi@mail.parknet.co.jp
4109L: linux-kernel@vger.kernel.org 4086L: linux-kernel@vger.kernel.org
4110S: Maintained 4087S: Maintained
4111 4088
4112VIA 82Cxxx AUDIO DRIVER (old OSS driver)
4113P: Jeff Garzik
4114S: Odd fixes
4115
4116VIA RHINE NETWORK DRIVER 4089VIA RHINE NETWORK DRIVER
4117P: Roger Luethi 4090P: Roger Luethi
4118M: rl@hellgate.ch 4091M: rl@hellgate.ch
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2538816817aa..86b8641b4664 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2371,13 +2371,16 @@ config UGETH_TX_ON_DEMAND
2371 depends on UCC_GETH 2371 depends on UCC_GETH
2372 2372
2373config MV643XX_ETH 2373config MV643XX_ETH
2374 tristate "MV-643XX Ethernet support" 2374 tristate "Marvell Discovery (643XX) and Orion ethernet support"
2375 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) 2375 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || ARCH_ORION
2376 select MII 2376 select MII
2377 help 2377 help
2378 This driver supports the gigabit Ethernet on the Marvell MV643XX 2378 This driver supports the gigabit ethernet MACs in the
2379 chipset which is used in the Momenco Ocelot C and Jaguar ATX and 2379 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
2380 Pegasos II, amongst other PPC and MIPS boards. 2380 in the Marvell Orion ARM SoC family.
2381
2382 Some boards that use the Discovery chipset are the Momenco
2383 Ocelot C and Jaguar ATX and Pegasos II.
2381 2384
2382config QLA3XXX 2385config QLA3XXX
2383 tristate "QLogic QLA3XXX Network Driver Support" 2386 tristate "QLogic QLA3XXX Network Driver Support"
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 084f0292ea6e..cb3c6faa7888 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2076,8 +2076,10 @@ void bond_3ad_unbind_slave(struct slave *slave)
2076 * times out, and it selects an aggregator for the ports that are yet not 2076 * times out, and it selects an aggregator for the ports that are yet not
2077 * related to any aggregator, and selects the active aggregator for a bond. 2077 * related to any aggregator, and selects the active aggregator for a bond.
2078 */ 2078 */
2079void bond_3ad_state_machine_handler(struct bonding *bond) 2079void bond_3ad_state_machine_handler(struct work_struct *work)
2080{ 2080{
2081 struct bonding *bond = container_of(work, struct bonding,
2082 ad_work.work);
2081 struct port *port; 2083 struct port *port;
2082 struct aggregator *aggregator; 2084 struct aggregator *aggregator;
2083 2085
@@ -2128,7 +2130,7 @@ void bond_3ad_state_machine_handler(struct bonding *bond)
2128 } 2130 }
2129 2131
2130re_arm: 2132re_arm:
2131 mod_timer(&(BOND_AD_INFO(bond).ad_timer), jiffies + ad_delta_in_ticks); 2133 queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
2132out: 2134out:
2133 read_unlock(&bond->lock); 2135 read_unlock(&bond->lock);
2134} 2136}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f16557264944..b5ee45f6d55a 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -276,7 +276,7 @@ struct ad_slave_info {
276void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast); 276void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast);
277int bond_3ad_bind_slave(struct slave *slave); 277int bond_3ad_bind_slave(struct slave *slave);
278void bond_3ad_unbind_slave(struct slave *slave); 278void bond_3ad_unbind_slave(struct slave *slave);
279void bond_3ad_state_machine_handler(struct bonding *bond); 279void bond_3ad_state_machine_handler(struct work_struct *);
280void bond_3ad_adapter_speed_changed(struct slave *slave); 280void bond_3ad_adapter_speed_changed(struct slave *slave);
281void bond_3ad_adapter_duplex_changed(struct slave *slave); 281void bond_3ad_adapter_duplex_changed(struct slave *slave);
282void bond_3ad_handle_link_change(struct slave *slave, char link); 282void bond_3ad_handle_link_change(struct slave *slave, char link);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index aea2217c56eb..25b8dbf6cfd7 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -128,12 +128,12 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
128 128
129static inline void _lock_tx_hashtbl(struct bonding *bond) 129static inline void _lock_tx_hashtbl(struct bonding *bond)
130{ 130{
131 spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); 131 spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
132} 132}
133 133
134static inline void _unlock_tx_hashtbl(struct bonding *bond) 134static inline void _unlock_tx_hashtbl(struct bonding *bond)
135{ 135{
136 spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); 136 spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
137} 137}
138 138
139/* Caller must hold tx_hashtbl lock */ 139/* Caller must hold tx_hashtbl lock */
@@ -305,12 +305,12 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3
305/*********************** rlb specific functions ***************************/ 305/*********************** rlb specific functions ***************************/
306static inline void _lock_rx_hashtbl(struct bonding *bond) 306static inline void _lock_rx_hashtbl(struct bonding *bond)
307{ 307{
308 spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); 308 spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
309} 309}
310 310
311static inline void _unlock_rx_hashtbl(struct bonding *bond) 311static inline void _unlock_rx_hashtbl(struct bonding *bond)
312{ 312{
313 spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); 313 spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
314} 314}
315 315
316/* when an ARP REPLY is received from a client update its info 316/* when an ARP REPLY is received from a client update its info
@@ -472,13 +472,13 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
472 472
473 _unlock_rx_hashtbl(bond); 473 _unlock_rx_hashtbl(bond);
474 474
475 write_lock(&bond->curr_slave_lock); 475 write_lock_bh(&bond->curr_slave_lock);
476 476
477 if (slave != bond->curr_active_slave) { 477 if (slave != bond->curr_active_slave) {
478 rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); 478 rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
479 } 479 }
480 480
481 write_unlock(&bond->curr_slave_lock); 481 write_unlock_bh(&bond->curr_slave_lock);
482} 482}
483 483
484static void rlb_update_client(struct rlb_client_info *client_info) 484static void rlb_update_client(struct rlb_client_info *client_info)
@@ -959,19 +959,34 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
959 return 0; 959 return 0;
960} 960}
961 961
962/* Caller must hold bond lock for write or curr_slave_lock for write*/ 962/*
963 * Swap MAC addresses between two slaves.
964 *
965 * Called with RTNL held, and no other locks.
966 *
967 */
968
963static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2) 969static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct slave *slave2)
964{ 970{
965 struct slave *disabled_slave = NULL;
966 u8 tmp_mac_addr[ETH_ALEN]; 971 u8 tmp_mac_addr[ETH_ALEN];
967 int slaves_state_differ;
968
969 slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
970 972
971 memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); 973 memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
972 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); 974 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
973 alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); 975 alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
974 976
977}
978
979/*
980 * Send learning packets after MAC address swap.
981 *
982 * Called with RTNL and bond->lock held for read.
983 */
984static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
985 struct slave *slave2)
986{
987 int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
988 struct slave *disabled_slave = NULL;
989
975 /* fasten the change in the switch */ 990 /* fasten the change in the switch */
976 if (SLAVE_IS_OK(slave1)) { 991 if (SLAVE_IS_OK(slave1)) {
977 alb_send_learning_packets(slave1, slave1->dev->dev_addr); 992 alb_send_learning_packets(slave1, slave1->dev->dev_addr);
@@ -1044,7 +1059,9 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1044 } 1059 }
1045 1060
1046 if (found) { 1061 if (found) {
1062 /* locking: needs RTNL and nothing else */
1047 alb_swap_mac_addr(bond, slave, tmp_slave); 1063 alb_swap_mac_addr(bond, slave, tmp_slave);
1064 alb_fasten_mac_swap(bond, slave, tmp_slave);
1048 } 1065 }
1049 } 1066 }
1050} 1067}
@@ -1375,8 +1392,10 @@ out:
1375 return 0; 1392 return 0;
1376} 1393}
1377 1394
1378void bond_alb_monitor(struct bonding *bond) 1395void bond_alb_monitor(struct work_struct *work)
1379{ 1396{
1397 struct bonding *bond = container_of(work, struct bonding,
1398 alb_work.work);
1380 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 1399 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1381 struct slave *slave; 1400 struct slave *slave;
1382 int i; 1401 int i;
@@ -1436,16 +1455,16 @@ void bond_alb_monitor(struct bonding *bond)
1436 1455
1437 /* handle rlb stuff */ 1456 /* handle rlb stuff */
1438 if (bond_info->rlb_enabled) { 1457 if (bond_info->rlb_enabled) {
1439 /* the following code changes the promiscuity of the
1440 * the curr_active_slave. It needs to be locked with a
1441 * write lock to protect from other code that also
1442 * sets the promiscuity.
1443 */
1444 write_lock_bh(&bond->curr_slave_lock);
1445
1446 if (bond_info->primary_is_promisc && 1458 if (bond_info->primary_is_promisc &&
1447 (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { 1459 (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
1448 1460
1461 /*
1462 * dev_set_promiscuity requires rtnl and
1463 * nothing else.
1464 */
1465 read_unlock(&bond->lock);
1466 rtnl_lock();
1467
1449 bond_info->rlb_promisc_timeout_counter = 0; 1468 bond_info->rlb_promisc_timeout_counter = 0;
1450 1469
1451 /* If the primary was set to promiscuous mode 1470 /* If the primary was set to promiscuous mode
@@ -1454,9 +1473,10 @@ void bond_alb_monitor(struct bonding *bond)
1454 */ 1473 */
1455 dev_set_promiscuity(bond->curr_active_slave->dev, -1); 1474 dev_set_promiscuity(bond->curr_active_slave->dev, -1);
1456 bond_info->primary_is_promisc = 0; 1475 bond_info->primary_is_promisc = 0;
1457 }
1458 1476
1459 write_unlock_bh(&bond->curr_slave_lock); 1477 rtnl_unlock();
1478 read_lock(&bond->lock);
1479 }
1460 1480
1461 if (bond_info->rlb_rebalance) { 1481 if (bond_info->rlb_rebalance) {
1462 bond_info->rlb_rebalance = 0; 1482 bond_info->rlb_rebalance = 0;
@@ -1479,7 +1499,7 @@ void bond_alb_monitor(struct bonding *bond)
1479 } 1499 }
1480 1500
1481re_arm: 1501re_arm:
1482 mod_timer(&(bond_info->alb_timer), jiffies + alb_delta_in_ticks); 1502 queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
1483out: 1503out:
1484 read_unlock(&bond->lock); 1504 read_unlock(&bond->lock);
1485} 1505}
@@ -1500,11 +1520,11 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1500 /* caller must hold the bond lock for write since the mac addresses 1520 /* caller must hold the bond lock for write since the mac addresses
1501 * are compared and may be swapped. 1521 * are compared and may be swapped.
1502 */ 1522 */
1503 write_lock_bh(&bond->lock); 1523 read_lock(&bond->lock);
1504 1524
1505 res = alb_handle_addr_collision_on_attach(bond, slave); 1525 res = alb_handle_addr_collision_on_attach(bond, slave);
1506 1526
1507 write_unlock_bh(&bond->lock); 1527 read_unlock(&bond->lock);
1508 1528
1509 if (res) { 1529 if (res) {
1510 return res; 1530 return res;
@@ -1569,13 +1589,21 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
1569 * Set the bond->curr_active_slave to @new_slave and handle 1589 * Set the bond->curr_active_slave to @new_slave and handle
1570 * mac address swapping and promiscuity changes as needed. 1590 * mac address swapping and promiscuity changes as needed.
1571 * 1591 *
1572 * Caller must hold bond curr_slave_lock for write (or bond lock for write) 1592 * If new_slave is NULL, caller must hold curr_slave_lock or
1593 * bond->lock for write.
1594 *
1595 * If new_slave is not NULL, caller must hold RTNL, bond->lock for
1596 * read and curr_slave_lock for write. Processing here may sleep, so
1597 * no other locks may be held.
1573 */ 1598 */
1574void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) 1599void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
1575{ 1600{
1576 struct slave *swap_slave; 1601 struct slave *swap_slave;
1577 int i; 1602 int i;
1578 1603
1604 if (new_slave)
1605 ASSERT_RTNL();
1606
1579 if (bond->curr_active_slave == new_slave) { 1607 if (bond->curr_active_slave == new_slave) {
1580 return; 1608 return;
1581 } 1609 }
@@ -1608,6 +1636,19 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1608 } 1636 }
1609 } 1637 }
1610 1638
1639 /*
1640 * Arrange for swap_slave and new_slave to temporarily be
1641 * ignored so we can mess with their MAC addresses without
1642 * fear of interference from transmit activity.
1643 */
1644 if (swap_slave) {
1645 tlb_clear_slave(bond, swap_slave, 1);
1646 }
1647 tlb_clear_slave(bond, new_slave, 1);
1648
1649 write_unlock_bh(&bond->curr_slave_lock);
1650 read_unlock(&bond->lock);
1651
1611 /* curr_active_slave must be set before calling alb_swap_mac_addr */ 1652 /* curr_active_slave must be set before calling alb_swap_mac_addr */
1612 if (swap_slave) { 1653 if (swap_slave) {
1613 /* swap mac address */ 1654 /* swap mac address */
@@ -1616,11 +1657,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1616 /* set the new_slave to the bond mac address */ 1657 /* set the new_slave to the bond mac address */
1617 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, 1658 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
1618 bond->alb_info.rlb_enabled); 1659 bond->alb_info.rlb_enabled);
1660 }
1661
1662 read_lock(&bond->lock);
1663
1664 if (swap_slave) {
1665 alb_fasten_mac_swap(bond, swap_slave, new_slave);
1666 } else {
1619 /* fasten bond mac on new current slave */ 1667 /* fasten bond mac on new current slave */
1620 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1668 alb_send_learning_packets(new_slave, bond->dev->dev_addr);
1621 } 1669 }
1670
1671 write_lock_bh(&bond->curr_slave_lock);
1622} 1672}
1623 1673
1674/*
1675 * Called with RTNL
1676 */
1624int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) 1677int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1625{ 1678{
1626 struct bonding *bond = bond_dev->priv; 1679 struct bonding *bond = bond_dev->priv;
@@ -1657,8 +1710,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1657 } 1710 }
1658 } 1711 }
1659 1712
1713 write_unlock_bh(&bond->curr_slave_lock);
1714 read_unlock(&bond->lock);
1715
1660 if (swap_slave) { 1716 if (swap_slave) {
1661 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); 1717 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
1718 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
1662 } else { 1719 } else {
1663 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, 1720 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
1664 bond->alb_info.rlb_enabled); 1721 bond->alb_info.rlb_enabled);
@@ -1670,6 +1727,9 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1670 } 1727 }
1671 } 1728 }
1672 1729
1730 read_lock(&bond->lock);
1731 write_lock_bh(&bond->curr_slave_lock);
1732
1673 return 0; 1733 return 0;
1674} 1734}
1675 1735
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index fd8726429890..50968f8196cf 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -125,7 +125,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
125void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); 125void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
126void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); 126void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
127int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); 127int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
128void bond_alb_monitor(struct bonding *bond); 128void bond_alb_monitor(struct work_struct *);
129int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); 129int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
130void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); 130void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
131#endif /* __BOND_ALB_H__ */ 131#endif /* __BOND_ALB_H__ */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6f85cc31f8a2..6909becb10f6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1590,15 +1590,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1590 case BOND_MODE_TLB: 1590 case BOND_MODE_TLB:
1591 case BOND_MODE_ALB: 1591 case BOND_MODE_ALB:
1592 new_slave->state = BOND_STATE_ACTIVE; 1592 new_slave->state = BOND_STATE_ACTIVE;
1593 if ((!bond->curr_active_slave) && 1593 bond_set_slave_inactive_flags(new_slave);
1594 (new_slave->link != BOND_LINK_DOWN)) {
1595 /* first slave or no active slave yet, and this link
1596 * is OK, so make this interface the active one
1597 */
1598 bond_change_active_slave(bond, new_slave);
1599 } else {
1600 bond_set_slave_inactive_flags(new_slave);
1601 }
1602 break; 1594 break;
1603 default: 1595 default:
1604 dprintk("This slave is always active in trunk mode\n"); 1596 dprintk("This slave is always active in trunk mode\n");
@@ -1754,9 +1746,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1754 bond_alb_deinit_slave(bond, slave); 1746 bond_alb_deinit_slave(bond, slave);
1755 } 1747 }
1756 1748
1757 if (oldcurrent == slave) 1749 if (oldcurrent == slave) {
1750 /*
1751 * Note that we hold RTNL over this sequence, so there
1752 * is no concern that another slave add/remove event
1753 * will interfere.
1754 */
1755 write_unlock_bh(&bond->lock);
1756 read_lock(&bond->lock);
1757 write_lock_bh(&bond->curr_slave_lock);
1758
1758 bond_select_active_slave(bond); 1759 bond_select_active_slave(bond);
1759 1760
1761 write_unlock_bh(&bond->curr_slave_lock);
1762 read_unlock(&bond->lock);
1763 write_lock_bh(&bond->lock);
1764 }
1765
1760 if (bond->slave_cnt == 0) { 1766 if (bond->slave_cnt == 0) {
1761 bond_set_carrier(bond); 1767 bond_set_carrier(bond);
1762 1768
@@ -1840,9 +1846,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1840*/ 1846*/
1841void bond_destroy(struct bonding *bond) 1847void bond_destroy(struct bonding *bond)
1842{ 1848{
1849 unregister_netdevice(bond->dev);
1843 bond_deinit(bond->dev); 1850 bond_deinit(bond->dev);
1844 bond_destroy_sysfs_entry(bond); 1851 bond_destroy_sysfs_entry(bond);
1845 unregister_netdevice(bond->dev);
1846} 1852}
1847 1853
1848/* 1854/*
@@ -2012,16 +2018,19 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2012 return -EINVAL; 2018 return -EINVAL;
2013 } 2019 }
2014 2020
2015 write_lock_bh(&bond->lock); 2021 read_lock(&bond->lock);
2016 2022
2023 read_lock(&bond->curr_slave_lock);
2017 old_active = bond->curr_active_slave; 2024 old_active = bond->curr_active_slave;
2025 read_unlock(&bond->curr_slave_lock);
2026
2018 new_active = bond_get_slave_by_dev(bond, slave_dev); 2027 new_active = bond_get_slave_by_dev(bond, slave_dev);
2019 2028
2020 /* 2029 /*
2021 * Changing to the current active: do nothing; return success. 2030 * Changing to the current active: do nothing; return success.
2022 */ 2031 */
2023 if (new_active && (new_active == old_active)) { 2032 if (new_active && (new_active == old_active)) {
2024 write_unlock_bh(&bond->lock); 2033 read_unlock(&bond->lock);
2025 return 0; 2034 return 0;
2026 } 2035 }
2027 2036
@@ -2029,12 +2038,14 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2029 (old_active) && 2038 (old_active) &&
2030 (new_active->link == BOND_LINK_UP) && 2039 (new_active->link == BOND_LINK_UP) &&
2031 IS_UP(new_active->dev)) { 2040 IS_UP(new_active->dev)) {
2041 write_lock_bh(&bond->curr_slave_lock);
2032 bond_change_active_slave(bond, new_active); 2042 bond_change_active_slave(bond, new_active);
2043 write_unlock_bh(&bond->curr_slave_lock);
2033 } else { 2044 } else {
2034 res = -EINVAL; 2045 res = -EINVAL;
2035 } 2046 }
2036 2047
2037 write_unlock_bh(&bond->lock); 2048 read_unlock(&bond->lock);
2038 2049
2039 return res; 2050 return res;
2040} 2051}
@@ -2046,9 +2057,9 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2046 info->bond_mode = bond->params.mode; 2057 info->bond_mode = bond->params.mode;
2047 info->miimon = bond->params.miimon; 2058 info->miimon = bond->params.miimon;
2048 2059
2049 read_lock_bh(&bond->lock); 2060 read_lock(&bond->lock);
2050 info->num_slaves = bond->slave_cnt; 2061 info->num_slaves = bond->slave_cnt;
2051 read_unlock_bh(&bond->lock); 2062 read_unlock(&bond->lock);
2052 2063
2053 return 0; 2064 return 0;
2054} 2065}
@@ -2063,7 +2074,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2063 return -ENODEV; 2074 return -ENODEV;
2064 } 2075 }
2065 2076
2066 read_lock_bh(&bond->lock); 2077 read_lock(&bond->lock);
2067 2078
2068 bond_for_each_slave(bond, slave, i) { 2079 bond_for_each_slave(bond, slave, i) {
2069 if (i == (int)info->slave_id) { 2080 if (i == (int)info->slave_id) {
@@ -2072,7 +2083,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2072 } 2083 }
2073 } 2084 }
2074 2085
2075 read_unlock_bh(&bond->lock); 2086 read_unlock(&bond->lock);
2076 2087
2077 if (found) { 2088 if (found) {
2078 strcpy(info->slave_name, slave->dev->name); 2089 strcpy(info->slave_name, slave->dev->name);
@@ -2088,26 +2099,25 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2088 2099
2089/*-------------------------------- Monitoring -------------------------------*/ 2100/*-------------------------------- Monitoring -------------------------------*/
2090 2101
2091/* this function is called regularly to monitor each slave's link. */ 2102/*
2092void bond_mii_monitor(struct net_device *bond_dev) 2103 * if !have_locks, return nonzero if a failover is necessary. if
2104 * have_locks, do whatever failover activities are needed.
2105 *
2106 * This is to separate the inspection and failover steps for locking
2107 * purposes; failover requires rtnl, but acquiring it for every
2108 * inspection is undesirable, so a wrapper first does inspection, and
2109 * the acquires the necessary locks and calls again to perform
2110 * failover if needed. Since all locks are dropped, a complete
2111 * restart is needed between calls.
2112 */
2113static int __bond_mii_monitor(struct bonding *bond, int have_locks)
2093{ 2114{
2094 struct bonding *bond = bond_dev->priv;
2095 struct slave *slave, *oldcurrent; 2115 struct slave *slave, *oldcurrent;
2096 int do_failover = 0; 2116 int do_failover = 0;
2097 int delta_in_ticks;
2098 int i; 2117 int i;
2099 2118
2100 read_lock(&bond->lock); 2119 if (bond->slave_cnt == 0)
2101
2102 delta_in_ticks = (bond->params.miimon * HZ) / 1000;
2103
2104 if (bond->kill_timers) {
2105 goto out; 2120 goto out;
2106 }
2107
2108 if (bond->slave_cnt == 0) {
2109 goto re_arm;
2110 }
2111 2121
2112 /* we will try to read the link status of each of our slaves, and 2122 /* we will try to read the link status of each of our slaves, and
2113 * set their IFF_RUNNING flag appropriately. For each slave not 2123 * set their IFF_RUNNING flag appropriately. For each slave not
@@ -2141,7 +2151,11 @@ void bond_mii_monitor(struct net_device *bond_dev)
2141 switch (slave->link) { 2151 switch (slave->link) {
2142 case BOND_LINK_UP: /* the link was up */ 2152 case BOND_LINK_UP: /* the link was up */
2143 if (link_state == BMSR_LSTATUS) { 2153 if (link_state == BMSR_LSTATUS) {
2144 /* link stays up, nothing more to do */ 2154 if (!oldcurrent) {
2155 if (!have_locks)
2156 return 1;
2157 do_failover = 1;
2158 }
2145 break; 2159 break;
2146 } else { /* link going down */ 2160 } else { /* link going down */
2147 slave->link = BOND_LINK_FAIL; 2161 slave->link = BOND_LINK_FAIL;
@@ -2156,7 +2170,7 @@ void bond_mii_monitor(struct net_device *bond_dev)
2156 ": %s: link status down for %s " 2170 ": %s: link status down for %s "
2157 "interface %s, disabling it in " 2171 "interface %s, disabling it in "
2158 "%d ms.\n", 2172 "%d ms.\n",
2159 bond_dev->name, 2173 bond->dev->name,
2160 IS_UP(slave_dev) 2174 IS_UP(slave_dev)
2161 ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 2175 ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
2162 ? ((slave == oldcurrent) 2176 ? ((slave == oldcurrent)
@@ -2174,6 +2188,9 @@ void bond_mii_monitor(struct net_device *bond_dev)
2174 if (link_state != BMSR_LSTATUS) { 2188 if (link_state != BMSR_LSTATUS) {
2175 /* link stays down */ 2189 /* link stays down */
2176 if (slave->delay <= 0) { 2190 if (slave->delay <= 0) {
2191 if (!have_locks)
2192 return 1;
2193
2177 /* link down for too long time */ 2194 /* link down for too long time */
2178 slave->link = BOND_LINK_DOWN; 2195 slave->link = BOND_LINK_DOWN;
2179 2196
@@ -2189,7 +2206,7 @@ void bond_mii_monitor(struct net_device *bond_dev)
2189 ": %s: link status definitely " 2206 ": %s: link status definitely "
2190 "down for interface %s, " 2207 "down for interface %s, "
2191 "disabling it\n", 2208 "disabling it\n",
2192 bond_dev->name, 2209 bond->dev->name,
2193 slave_dev->name); 2210 slave_dev->name);
2194 2211
2195 /* notify ad that the link status has changed */ 2212 /* notify ad that the link status has changed */
@@ -2215,7 +2232,7 @@ void bond_mii_monitor(struct net_device *bond_dev)
2215 printk(KERN_INFO DRV_NAME 2232 printk(KERN_INFO DRV_NAME
2216 ": %s: link status up again after %d " 2233 ": %s: link status up again after %d "
2217 "ms for interface %s.\n", 2234 "ms for interface %s.\n",
2218 bond_dev->name, 2235 bond->dev->name,
2219 (bond->params.downdelay - slave->delay) * bond->params.miimon, 2236 (bond->params.downdelay - slave->delay) * bond->params.miimon,
2220 slave_dev->name); 2237 slave_dev->name);
2221 } 2238 }
@@ -2235,7 +2252,7 @@ void bond_mii_monitor(struct net_device *bond_dev)
2235 ": %s: link status up for " 2252 ": %s: link status up for "
2236 "interface %s, enabling it " 2253 "interface %s, enabling it "
2237 "in %d ms.\n", 2254 "in %d ms.\n",
2238 bond_dev->name, 2255 bond->dev->name,
2239 slave_dev->name, 2256 slave_dev->name,
2240 bond->params.updelay * bond->params.miimon); 2257 bond->params.updelay * bond->params.miimon);
2241 } 2258 }
@@ -2251,12 +2268,15 @@ void bond_mii_monitor(struct net_device *bond_dev)
2251 printk(KERN_INFO DRV_NAME 2268 printk(KERN_INFO DRV_NAME
2252 ": %s: link status down again after %d " 2269 ": %s: link status down again after %d "
2253 "ms for interface %s.\n", 2270 "ms for interface %s.\n",
2254 bond_dev->name, 2271 bond->dev->name,
2255 (bond->params.updelay - slave->delay) * bond->params.miimon, 2272 (bond->params.updelay - slave->delay) * bond->params.miimon,
2256 slave_dev->name); 2273 slave_dev->name);
2257 } else { 2274 } else {
2258 /* link stays up */ 2275 /* link stays up */
2259 if (slave->delay == 0) { 2276 if (slave->delay == 0) {
2277 if (!have_locks)
2278 return 1;
2279
2260 /* now the link has been up for long time enough */ 2280 /* now the link has been up for long time enough */
2261 slave->link = BOND_LINK_UP; 2281 slave->link = BOND_LINK_UP;
2262 slave->jiffies = jiffies; 2282 slave->jiffies = jiffies;
@@ -2275,7 +2295,7 @@ void bond_mii_monitor(struct net_device *bond_dev)
2275 printk(KERN_INFO DRV_NAME 2295 printk(KERN_INFO DRV_NAME
2276 ": %s: link status definitely " 2296 ": %s: link status definitely "
2277 "up for interface %s.\n", 2297 "up for interface %s.\n",
2278 bond_dev->name, 2298 bond->dev->name,
2279 slave_dev->name); 2299 slave_dev->name);
2280 2300
2281 /* notify ad that the link status has changed */ 2301 /* notify ad that the link status has changed */
@@ -2301,7 +2321,7 @@ void bond_mii_monitor(struct net_device *bond_dev)
2301 /* Should not happen */ 2321 /* Should not happen */
2302 printk(KERN_ERR DRV_NAME 2322 printk(KERN_ERR DRV_NAME
2303 ": %s: Error: %s Illegal value (link=%d)\n", 2323 ": %s: Error: %s Illegal value (link=%d)\n",
2304 bond_dev->name, 2324 bond->dev->name,
2305 slave->dev->name, 2325 slave->dev->name,
2306 slave->link); 2326 slave->link);
2307 goto out; 2327 goto out;
@@ -2322,22 +2342,52 @@ void bond_mii_monitor(struct net_device *bond_dev)
2322 } /* end of for */ 2342 } /* end of for */
2323 2343
2324 if (do_failover) { 2344 if (do_failover) {
2325 write_lock(&bond->curr_slave_lock); 2345 ASSERT_RTNL();
2346
2347 write_lock_bh(&bond->curr_slave_lock);
2326 2348
2327 bond_select_active_slave(bond); 2349 bond_select_active_slave(bond);
2328 2350
2329 write_unlock(&bond->curr_slave_lock); 2351 write_unlock_bh(&bond->curr_slave_lock);
2352
2330 } else 2353 } else
2331 bond_set_carrier(bond); 2354 bond_set_carrier(bond);
2332 2355
2333re_arm:
2334 if (bond->params.miimon) {
2335 mod_timer(&bond->mii_timer, jiffies + delta_in_ticks);
2336 }
2337out: 2356out:
2338 read_unlock(&bond->lock); 2357 return 0;
2339} 2358}
2340 2359
2360/*
2361 * bond_mii_monitor
2362 *
2363 * Really a wrapper that splits the mii monitor into two phases: an
2364 * inspection, then (if inspection indicates something needs to be
2365 * done) an acquisition of appropriate locks followed by another pass
2366 * to implement whatever link state changes are indicated.
2367 */
2368void bond_mii_monitor(struct work_struct *work)
2369{
2370 struct bonding *bond = container_of(work, struct bonding,
2371 mii_work.work);
2372 unsigned long delay;
2373
2374 read_lock(&bond->lock);
2375 if (bond->kill_timers) {
2376 read_unlock(&bond->lock);
2377 return;
2378 }
2379 if (__bond_mii_monitor(bond, 0)) {
2380 read_unlock(&bond->lock);
2381 rtnl_lock();
2382 read_lock(&bond->lock);
2383 __bond_mii_monitor(bond, 1);
2384 rtnl_unlock();
2385 }
2386
2387 delay = ((bond->params.miimon * HZ) / 1000) ? : 1;
2388 read_unlock(&bond->lock);
2389 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2390}
2341 2391
2342static __be32 bond_glean_dev_ip(struct net_device *dev) 2392static __be32 bond_glean_dev_ip(struct net_device *dev)
2343{ 2393{
@@ -2636,9 +2686,10 @@ out:
2636 * arp is transmitted to generate traffic. see activebackup_arp_monitor for 2686 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2637 * arp monitoring in active backup mode. 2687 * arp monitoring in active backup mode.
2638 */ 2688 */
2639void bond_loadbalance_arp_mon(struct net_device *bond_dev) 2689void bond_loadbalance_arp_mon(struct work_struct *work)
2640{ 2690{
2641 struct bonding *bond = bond_dev->priv; 2691 struct bonding *bond = container_of(work, struct bonding,
2692 arp_work.work);
2642 struct slave *slave, *oldcurrent; 2693 struct slave *slave, *oldcurrent;
2643 int do_failover = 0; 2694 int do_failover = 0;
2644 int delta_in_ticks; 2695 int delta_in_ticks;
@@ -2685,13 +2736,13 @@ void bond_loadbalance_arp_mon(struct net_device *bond_dev)
2685 printk(KERN_INFO DRV_NAME 2736 printk(KERN_INFO DRV_NAME
2686 ": %s: link status definitely " 2737 ": %s: link status definitely "
2687 "up for interface %s, ", 2738 "up for interface %s, ",
2688 bond_dev->name, 2739 bond->dev->name,
2689 slave->dev->name); 2740 slave->dev->name);
2690 do_failover = 1; 2741 do_failover = 1;
2691 } else { 2742 } else {
2692 printk(KERN_INFO DRV_NAME 2743 printk(KERN_INFO DRV_NAME
2693 ": %s: interface %s is now up\n", 2744 ": %s: interface %s is now up\n",
2694 bond_dev->name, 2745 bond->dev->name,
2695 slave->dev->name); 2746 slave->dev->name);
2696 } 2747 }
2697 } 2748 }
@@ -2715,7 +2766,7 @@ void bond_loadbalance_arp_mon(struct net_device *bond_dev)
2715 2766
2716 printk(KERN_INFO DRV_NAME 2767 printk(KERN_INFO DRV_NAME
2717 ": %s: interface %s is now down.\n", 2768 ": %s: interface %s is now down.\n",
2718 bond_dev->name, 2769 bond->dev->name,
2719 slave->dev->name); 2770 slave->dev->name);
2720 2771
2721 if (slave == oldcurrent) { 2772 if (slave == oldcurrent) {
@@ -2737,17 +2788,19 @@ void bond_loadbalance_arp_mon(struct net_device *bond_dev)
2737 } 2788 }
2738 2789
2739 if (do_failover) { 2790 if (do_failover) {
2740 write_lock(&bond->curr_slave_lock); 2791 rtnl_lock();
2792 write_lock_bh(&bond->curr_slave_lock);
2741 2793
2742 bond_select_active_slave(bond); 2794 bond_select_active_slave(bond);
2743 2795
2744 write_unlock(&bond->curr_slave_lock); 2796 write_unlock_bh(&bond->curr_slave_lock);
2797 rtnl_unlock();
2798
2745 } 2799 }
2746 2800
2747re_arm: 2801re_arm:
2748 if (bond->params.arp_interval) { 2802 if (bond->params.arp_interval)
2749 mod_timer(&bond->arp_timer, jiffies + delta_in_ticks); 2803 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
2750 }
2751out: 2804out:
2752 read_unlock(&bond->lock); 2805 read_unlock(&bond->lock);
2753} 2806}
@@ -2767,9 +2820,10 @@ out:
2767 * may have received. 2820 * may have received.
2768 * see loadbalance_arp_monitor for arp monitoring in load balancing mode 2821 * see loadbalance_arp_monitor for arp monitoring in load balancing mode
2769 */ 2822 */
2770void bond_activebackup_arp_mon(struct net_device *bond_dev) 2823void bond_activebackup_arp_mon(struct work_struct *work)
2771{ 2824{
2772 struct bonding *bond = bond_dev->priv; 2825 struct bonding *bond = container_of(work, struct bonding,
2826 arp_work.work);
2773 struct slave *slave; 2827 struct slave *slave;
2774 int delta_in_ticks; 2828 int delta_in_ticks;
2775 int i; 2829 int i;
@@ -2798,7 +2852,9 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev)
2798 2852
2799 slave->link = BOND_LINK_UP; 2853 slave->link = BOND_LINK_UP;
2800 2854
2801 write_lock(&bond->curr_slave_lock); 2855 rtnl_lock();
2856
2857 write_lock_bh(&bond->curr_slave_lock);
2802 2858
2803 if ((!bond->curr_active_slave) && 2859 if ((!bond->curr_active_slave) &&
2804 ((jiffies - slave->dev->trans_start) <= delta_in_ticks)) { 2860 ((jiffies - slave->dev->trans_start) <= delta_in_ticks)) {
@@ -2821,18 +2877,19 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev)
2821 printk(KERN_INFO DRV_NAME 2877 printk(KERN_INFO DRV_NAME
2822 ": %s: %s is up and now the " 2878 ": %s: %s is up and now the "
2823 "active interface\n", 2879 "active interface\n",
2824 bond_dev->name, 2880 bond->dev->name,
2825 slave->dev->name); 2881 slave->dev->name);
2826 netif_carrier_on(bond->dev); 2882 netif_carrier_on(bond->dev);
2827 } else { 2883 } else {
2828 printk(KERN_INFO DRV_NAME 2884 printk(KERN_INFO DRV_NAME
2829 ": %s: backup interface %s is " 2885 ": %s: backup interface %s is "
2830 "now up\n", 2886 "now up\n",
2831 bond_dev->name, 2887 bond->dev->name,
2832 slave->dev->name); 2888 slave->dev->name);
2833 } 2889 }
2834 2890
2835 write_unlock(&bond->curr_slave_lock); 2891 write_unlock_bh(&bond->curr_slave_lock);
2892 rtnl_unlock();
2836 } 2893 }
2837 } else { 2894 } else {
2838 read_lock(&bond->curr_slave_lock); 2895 read_lock(&bond->curr_slave_lock);
@@ -2864,7 +2921,7 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev)
2864 2921
2865 printk(KERN_INFO DRV_NAME 2922 printk(KERN_INFO DRV_NAME
2866 ": %s: backup interface %s is now down\n", 2923 ": %s: backup interface %s is now down\n",
2867 bond_dev->name, 2924 bond->dev->name,
2868 slave->dev->name); 2925 slave->dev->name);
2869 } else { 2926 } else {
2870 read_unlock(&bond->curr_slave_lock); 2927 read_unlock(&bond->curr_slave_lock);
@@ -2899,15 +2956,18 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev)
2899 printk(KERN_INFO DRV_NAME 2956 printk(KERN_INFO DRV_NAME
2900 ": %s: link status down for active interface " 2957 ": %s: link status down for active interface "
2901 "%s, disabling it\n", 2958 "%s, disabling it\n",
2902 bond_dev->name, 2959 bond->dev->name,
2903 slave->dev->name); 2960 slave->dev->name);
2904 2961
2905 write_lock(&bond->curr_slave_lock); 2962 rtnl_lock();
2963 write_lock_bh(&bond->curr_slave_lock);
2906 2964
2907 bond_select_active_slave(bond); 2965 bond_select_active_slave(bond);
2908 slave = bond->curr_active_slave; 2966 slave = bond->curr_active_slave;
2909 2967
2910 write_unlock(&bond->curr_slave_lock); 2968 write_unlock_bh(&bond->curr_slave_lock);
2969
2970 rtnl_unlock();
2911 2971
2912 bond->current_arp_slave = slave; 2972 bond->current_arp_slave = slave;
2913 2973
@@ -2921,14 +2981,17 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev)
2921 printk(KERN_INFO DRV_NAME 2981 printk(KERN_INFO DRV_NAME
2922 ": %s: changing from interface %s to primary " 2982 ": %s: changing from interface %s to primary "
2923 "interface %s\n", 2983 "interface %s\n",
2924 bond_dev->name, 2984 bond->dev->name,
2925 slave->dev->name, 2985 slave->dev->name,
2926 bond->primary_slave->dev->name); 2986 bond->primary_slave->dev->name);
2927 2987
2928 /* primary is up so switch to it */ 2988 /* primary is up so switch to it */
2929 write_lock(&bond->curr_slave_lock); 2989 rtnl_lock();
2990 write_lock_bh(&bond->curr_slave_lock);
2930 bond_change_active_slave(bond, bond->primary_slave); 2991 bond_change_active_slave(bond, bond->primary_slave);
2931 write_unlock(&bond->curr_slave_lock); 2992 write_unlock_bh(&bond->curr_slave_lock);
2993
2994 rtnl_unlock();
2932 2995
2933 slave = bond->primary_slave; 2996 slave = bond->primary_slave;
2934 slave->jiffies = jiffies; 2997 slave->jiffies = jiffies;
@@ -2985,7 +3048,7 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev)
2985 printk(KERN_INFO DRV_NAME 3048 printk(KERN_INFO DRV_NAME
2986 ": %s: backup interface %s is " 3049 ": %s: backup interface %s is "
2987 "now down.\n", 3050 "now down.\n",
2988 bond_dev->name, 3051 bond->dev->name,
2989 slave->dev->name); 3052 slave->dev->name);
2990 } 3053 }
2991 } 3054 }
@@ -2994,7 +3057,7 @@ void bond_activebackup_arp_mon(struct net_device *bond_dev)
2994 3057
2995re_arm: 3058re_arm:
2996 if (bond->params.arp_interval) { 3059 if (bond->params.arp_interval) {
2997 mod_timer(&bond->arp_timer, jiffies + delta_in_ticks); 3060 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
2998 } 3061 }
2999out: 3062out:
3000 read_unlock(&bond->lock); 3063 read_unlock(&bond->lock);
@@ -3015,7 +3078,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3015 3078
3016 /* make sure the bond won't be taken away */ 3079 /* make sure the bond won't be taken away */
3017 read_lock(&dev_base_lock); 3080 read_lock(&dev_base_lock);
3018 read_lock_bh(&bond->lock); 3081 read_lock(&bond->lock);
3019 3082
3020 if (*pos == 0) { 3083 if (*pos == 0) {
3021 return SEQ_START_TOKEN; 3084 return SEQ_START_TOKEN;
@@ -3049,7 +3112,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
3049{ 3112{
3050 struct bonding *bond = seq->private; 3113 struct bonding *bond = seq->private;
3051 3114
3052 read_unlock_bh(&bond->lock); 3115 read_unlock(&bond->lock);
3053 read_unlock(&dev_base_lock); 3116 read_unlock(&dev_base_lock);
3054} 3117}
3055 3118
@@ -3582,15 +3645,11 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
3582static int bond_open(struct net_device *bond_dev) 3645static int bond_open(struct net_device *bond_dev)
3583{ 3646{
3584 struct bonding *bond = bond_dev->priv; 3647 struct bonding *bond = bond_dev->priv;
3585 struct timer_list *mii_timer = &bond->mii_timer;
3586 struct timer_list *arp_timer = &bond->arp_timer;
3587 3648
3588 bond->kill_timers = 0; 3649 bond->kill_timers = 0;
3589 3650
3590 if ((bond->params.mode == BOND_MODE_TLB) || 3651 if ((bond->params.mode == BOND_MODE_TLB) ||
3591 (bond->params.mode == BOND_MODE_ALB)) { 3652 (bond->params.mode == BOND_MODE_ALB)) {
3592 struct timer_list *alb_timer = &(BOND_ALB_INFO(bond).alb_timer);
3593
3594 /* bond_alb_initialize must be called before the timer 3653 /* bond_alb_initialize must be called before the timer
3595 * is started. 3654 * is started.
3596 */ 3655 */
@@ -3599,44 +3658,31 @@ static int bond_open(struct net_device *bond_dev)
3599 return -1; 3658 return -1;
3600 } 3659 }
3601 3660
3602 init_timer(alb_timer); 3661 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3603 alb_timer->expires = jiffies + 1; 3662 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3604 alb_timer->data = (unsigned long)bond;
3605 alb_timer->function = (void *)&bond_alb_monitor;
3606 add_timer(alb_timer);
3607 } 3663 }
3608 3664
3609 if (bond->params.miimon) { /* link check interval, in milliseconds. */ 3665 if (bond->params.miimon) { /* link check interval, in milliseconds. */
3610 init_timer(mii_timer); 3666 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3611 mii_timer->expires = jiffies + 1; 3667 queue_delayed_work(bond->wq, &bond->mii_work, 0);
3612 mii_timer->data = (unsigned long)bond_dev;
3613 mii_timer->function = (void *)&bond_mii_monitor;
3614 add_timer(mii_timer);
3615 } 3668 }
3616 3669
3617 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 3670 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3618 init_timer(arp_timer); 3671 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3619 arp_timer->expires = jiffies + 1; 3672 INIT_DELAYED_WORK(&bond->arp_work,
3620 arp_timer->data = (unsigned long)bond_dev; 3673 bond_activebackup_arp_mon);
3621 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 3674 else
3622 arp_timer->function = (void *)&bond_activebackup_arp_mon; 3675 INIT_DELAYED_WORK(&bond->arp_work,
3623 } else { 3676 bond_loadbalance_arp_mon);
3624 arp_timer->function = (void *)&bond_loadbalance_arp_mon; 3677
3625 } 3678 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3626 if (bond->params.arp_validate) 3679 if (bond->params.arp_validate)
3627 bond_register_arp(bond); 3680 bond_register_arp(bond);
3628
3629 add_timer(arp_timer);
3630 } 3681 }
3631 3682
3632 if (bond->params.mode == BOND_MODE_8023AD) { 3683 if (bond->params.mode == BOND_MODE_8023AD) {
3633 struct timer_list *ad_timer = &(BOND_AD_INFO(bond).ad_timer); 3684 INIT_DELAYED_WORK(&bond->ad_work, bond_alb_monitor);
3634 init_timer(ad_timer); 3685 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3635 ad_timer->expires = jiffies + 1;
3636 ad_timer->data = (unsigned long)bond;
3637 ad_timer->function = (void *)&bond_3ad_state_machine_handler;
3638 add_timer(ad_timer);
3639
3640 /* register to receive LACPDUs */ 3686 /* register to receive LACPDUs */
3641 bond_register_lacpdu(bond); 3687 bond_register_lacpdu(bond);
3642 } 3688 }
@@ -3664,25 +3710,21 @@ static int bond_close(struct net_device *bond_dev)
3664 3710
3665 write_unlock_bh(&bond->lock); 3711 write_unlock_bh(&bond->lock);
3666 3712
3667 /* del_timer_sync must run without holding the bond->lock
3668 * because a running timer might be trying to hold it too
3669 */
3670
3671 if (bond->params.miimon) { /* link check interval, in milliseconds. */ 3713 if (bond->params.miimon) { /* link check interval, in milliseconds. */
3672 del_timer_sync(&bond->mii_timer); 3714 cancel_delayed_work(&bond->mii_work);
3673 } 3715 }
3674 3716
3675 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 3717 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3676 del_timer_sync(&bond->arp_timer); 3718 cancel_delayed_work(&bond->arp_work);
3677 } 3719 }
3678 3720
3679 switch (bond->params.mode) { 3721 switch (bond->params.mode) {
3680 case BOND_MODE_8023AD: 3722 case BOND_MODE_8023AD:
3681 del_timer_sync(&(BOND_AD_INFO(bond).ad_timer)); 3723 cancel_delayed_work(&bond->ad_work);
3682 break; 3724 break;
3683 case BOND_MODE_TLB: 3725 case BOND_MODE_TLB:
3684 case BOND_MODE_ALB: 3726 case BOND_MODE_ALB:
3685 del_timer_sync(&(BOND_ALB_INFO(bond).alb_timer)); 3727 cancel_delayed_work(&bond->alb_work);
3686 break; 3728 break;
3687 default: 3729 default:
3688 break; 3730 break;
@@ -3779,13 +3821,13 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3779 if (mii->reg_num == 1) { 3821 if (mii->reg_num == 1) {
3780 struct bonding *bond = bond_dev->priv; 3822 struct bonding *bond = bond_dev->priv;
3781 mii->val_out = 0; 3823 mii->val_out = 0;
3782 read_lock_bh(&bond->lock); 3824 read_lock(&bond->lock);
3783 read_lock(&bond->curr_slave_lock); 3825 read_lock(&bond->curr_slave_lock);
3784 if (netif_carrier_ok(bond->dev)) { 3826 if (netif_carrier_ok(bond->dev)) {
3785 mii->val_out = BMSR_LSTATUS; 3827 mii->val_out = BMSR_LSTATUS;
3786 } 3828 }
3787 read_unlock(&bond->curr_slave_lock); 3829 read_unlock(&bond->curr_slave_lock);
3788 read_unlock_bh(&bond->lock); 3830 read_unlock(&bond->lock);
3789 } 3831 }
3790 3832
3791 return 0; 3833 return 0;
@@ -4077,8 +4119,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4077{ 4119{
4078 struct bonding *bond = bond_dev->priv; 4120 struct bonding *bond = bond_dev->priv;
4079 struct slave *slave, *start_at; 4121 struct slave *slave, *start_at;
4080 int i; 4122 int i, slave_no, res = 1;
4081 int res = 1;
4082 4123
4083 read_lock(&bond->lock); 4124 read_lock(&bond->lock);
4084 4125
@@ -4086,29 +4127,29 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4086 goto out; 4127 goto out;
4087 } 4128 }
4088 4129
4089 read_lock(&bond->curr_slave_lock); 4130 /*
4090 slave = start_at = bond->curr_active_slave; 4131 * Concurrent TX may collide on rr_tx_counter; we accept that
4091 read_unlock(&bond->curr_slave_lock); 4132 * as being rare enough not to justify using an atomic op here
4133 */
4134 slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
4092 4135
4093 if (!slave) { 4136 bond_for_each_slave(bond, slave, i) {
4094 goto out; 4137 slave_no--;
4138 if (slave_no < 0) {
4139 break;
4140 }
4095 } 4141 }
4096 4142
4143 start_at = slave;
4097 bond_for_each_slave_from(bond, slave, i, start_at) { 4144 bond_for_each_slave_from(bond, slave, i, start_at) {
4098 if (IS_UP(slave->dev) && 4145 if (IS_UP(slave->dev) &&
4099 (slave->link == BOND_LINK_UP) && 4146 (slave->link == BOND_LINK_UP) &&
4100 (slave->state == BOND_STATE_ACTIVE)) { 4147 (slave->state == BOND_STATE_ACTIVE)) {
4101 res = bond_dev_queue_xmit(bond, skb, slave->dev); 4148 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4102
4103 write_lock(&bond->curr_slave_lock);
4104 bond->curr_active_slave = slave->next;
4105 write_unlock(&bond->curr_slave_lock);
4106
4107 break; 4149 break;
4108 } 4150 }
4109 } 4151 }
4110 4152
4111
4112out: 4153out:
4113 if (res) { 4154 if (res) {
4114 /* no suitable interface, frame not sent */ 4155 /* no suitable interface, frame not sent */
@@ -4340,6 +4381,10 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4340 4381
4341 bond->params = *params; /* copy params struct */ 4382 bond->params = *params; /* copy params struct */
4342 4383
4384 bond->wq = create_singlethread_workqueue(bond_dev->name);
4385 if (!bond->wq)
4386 return -ENOMEM;
4387
4343 /* Initialize pointers */ 4388 /* Initialize pointers */
4344 bond->first_slave = NULL; 4389 bond->first_slave = NULL;
4345 bond->curr_active_slave = NULL; 4390 bond->curr_active_slave = NULL;
@@ -4428,8 +4473,8 @@ static void bond_free_all(void)
4428 bond_mc_list_destroy(bond); 4473 bond_mc_list_destroy(bond);
4429 /* Release the bonded slaves */ 4474 /* Release the bonded slaves */
4430 bond_release_all(bond_dev); 4475 bond_release_all(bond_dev);
4431 bond_deinit(bond_dev);
4432 unregister_netdevice(bond_dev); 4476 unregister_netdevice(bond_dev);
4477 bond_deinit(bond_dev);
4433 } 4478 }
4434 4479
4435#ifdef CONFIG_PROC_FS 4480#ifdef CONFIG_PROC_FS
@@ -4826,10 +4871,32 @@ out_rtnl:
4826 return res; 4871 return res;
4827} 4872}
4828 4873
4874static void bond_work_cancel_all(struct bonding *bond)
4875{
4876 write_lock_bh(&bond->lock);
4877 bond->kill_timers = 1;
4878 write_unlock_bh(&bond->lock);
4879
4880 if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
4881 cancel_delayed_work(&bond->mii_work);
4882
4883 if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
4884 cancel_delayed_work(&bond->arp_work);
4885
4886 if (bond->params.mode == BOND_MODE_ALB &&
4887 delayed_work_pending(&bond->alb_work))
4888 cancel_delayed_work(&bond->alb_work);
4889
4890 if (bond->params.mode == BOND_MODE_8023AD &&
4891 delayed_work_pending(&bond->ad_work))
4892 cancel_delayed_work(&bond->ad_work);
4893}
4894
4829static int __init bonding_init(void) 4895static int __init bonding_init(void)
4830{ 4896{
4831 int i; 4897 int i;
4832 int res; 4898 int res;
4899 struct bonding *bond, *nxt;
4833 4900
4834 printk(KERN_INFO "%s", version); 4901 printk(KERN_INFO "%s", version);
4835 4902
@@ -4856,6 +4923,11 @@ static int __init bonding_init(void)
4856 4923
4857 goto out; 4924 goto out;
4858err: 4925err:
4926 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) {
4927 bond_work_cancel_all(bond);
4928 destroy_workqueue(bond->wq);
4929 }
4930
4859 rtnl_lock(); 4931 rtnl_lock();
4860 bond_free_all(); 4932 bond_free_all();
4861 bond_destroy_sysfs(); 4933 bond_destroy_sysfs();
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 855dc10ffa1b..7a06ade85b02 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -229,7 +229,7 @@ static ssize_t bonding_show_slaves(struct device *d,
229 int i, res = 0; 229 int i, res = 0;
230 struct bonding *bond = to_bond(d); 230 struct bonding *bond = to_bond(d);
231 231
232 read_lock_bh(&bond->lock); 232 read_lock(&bond->lock);
233 bond_for_each_slave(bond, slave, i) { 233 bond_for_each_slave(bond, slave, i) {
234 if (res > (PAGE_SIZE - IFNAMSIZ)) { 234 if (res > (PAGE_SIZE - IFNAMSIZ)) {
235 /* not enough space for another interface name */ 235 /* not enough space for another interface name */
@@ -240,7 +240,7 @@ static ssize_t bonding_show_slaves(struct device *d,
240 } 240 }
241 res += sprintf(buf + res, "%s ", slave->dev->name); 241 res += sprintf(buf + res, "%s ", slave->dev->name);
242 } 242 }
243 read_unlock_bh(&bond->lock); 243 read_unlock(&bond->lock);
244 res += sprintf(buf + res, "\n"); 244 res += sprintf(buf + res, "\n");
245 res++; 245 res++;
246 return res; 246 return res;
@@ -282,18 +282,18 @@ static ssize_t bonding_store_slaves(struct device *d,
282 282
283 /* Got a slave name in ifname. Is it already in the list? */ 283 /* Got a slave name in ifname. Is it already in the list? */
284 found = 0; 284 found = 0;
285 read_lock_bh(&bond->lock); 285 read_lock(&bond->lock);
286 bond_for_each_slave(bond, slave, i) 286 bond_for_each_slave(bond, slave, i)
287 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { 287 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
288 printk(KERN_ERR DRV_NAME 288 printk(KERN_ERR DRV_NAME
289 ": %s: Interface %s is already enslaved!\n", 289 ": %s: Interface %s is already enslaved!\n",
290 bond->dev->name, ifname); 290 bond->dev->name, ifname);
291 ret = -EPERM; 291 ret = -EPERM;
292 read_unlock_bh(&bond->lock); 292 read_unlock(&bond->lock);
293 goto out; 293 goto out;
294 } 294 }
295 295
296 read_unlock_bh(&bond->lock); 296 read_unlock(&bond->lock);
297 printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n", 297 printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n",
298 bond->dev->name, ifname); 298 bond->dev->name, ifname);
299 dev = dev_get_by_name(&init_net, ifname); 299 dev = dev_get_by_name(&init_net, ifname);
@@ -662,12 +662,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
662 "%s Disabling MII monitoring.\n", 662 "%s Disabling MII monitoring.\n",
663 bond->dev->name, bond->dev->name); 663 bond->dev->name, bond->dev->name);
664 bond->params.miimon = 0; 664 bond->params.miimon = 0;
665 /* Kill MII timer, else it brings bond's link down */ 665 if (delayed_work_pending(&bond->mii_work)) {
666 if (bond->arp_timer.function) { 666 cancel_delayed_work(&bond->mii_work);
667 printk(KERN_INFO DRV_NAME 667 flush_workqueue(bond->wq);
668 ": %s: Kill MII timer, else it brings bond's link down...\n",
669 bond->dev->name);
670 del_timer_sync(&bond->mii_timer);
671 } 668 }
672 } 669 }
673 if (!bond->params.arp_targets[0]) { 670 if (!bond->params.arp_targets[0]) {
@@ -682,25 +679,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
682 * timer will get fired off when the open function 679 * timer will get fired off when the open function
683 * is called. 680 * is called.
684 */ 681 */
685 if (bond->arp_timer.function) { 682 if (!delayed_work_pending(&bond->arp_work)) {
686 /* The timer's already set up, so fire it off */ 683 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
687 mod_timer(&bond->arp_timer, jiffies + 1); 684 INIT_DELAYED_WORK(&bond->arp_work,
688 } else { 685 bond_activebackup_arp_mon);
689 /* Set up the timer. */ 686 else
690 init_timer(&bond->arp_timer); 687 INIT_DELAYED_WORK(&bond->arp_work,
691 bond->arp_timer.expires = jiffies + 1; 688 bond_loadbalance_arp_mon);
692 bond->arp_timer.data = 689
693 (unsigned long) bond->dev; 690 queue_delayed_work(bond->wq, &bond->arp_work, 0);
694 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
695 bond->arp_timer.function =
696 (void *)
697 &bond_activebackup_arp_mon;
698 } else {
699 bond->arp_timer.function =
700 (void *)
701 &bond_loadbalance_arp_mon;
702 }
703 add_timer(&bond->arp_timer);
704 } 691 }
705 } 692 }
706 693
@@ -1056,12 +1043,9 @@ static ssize_t bonding_store_miimon(struct device *d,
1056 bond->params.arp_validate = 1043 bond->params.arp_validate =
1057 BOND_ARP_VALIDATE_NONE; 1044 BOND_ARP_VALIDATE_NONE;
1058 } 1045 }
1059 /* Kill ARP timer, else it brings bond's link down */ 1046 if (delayed_work_pending(&bond->arp_work)) {
1060 if (bond->mii_timer.function) { 1047 cancel_delayed_work(&bond->arp_work);
1061 printk(KERN_INFO DRV_NAME 1048 flush_workqueue(bond->wq);
1062 ": %s: Kill ARP timer, else it brings bond's link down...\n",
1063 bond->dev->name);
1064 del_timer_sync(&bond->arp_timer);
1065 } 1049 }
1066 } 1050 }
1067 1051
@@ -1071,18 +1055,11 @@ static ssize_t bonding_store_miimon(struct device *d,
1071 * timer will get fired off when the open function 1055 * timer will get fired off when the open function
1072 * is called. 1056 * is called.
1073 */ 1057 */
1074 if (bond->mii_timer.function) { 1058 if (!delayed_work_pending(&bond->mii_work)) {
1075 /* The timer's already set up, so fire it off */ 1059 INIT_DELAYED_WORK(&bond->mii_work,
1076 mod_timer(&bond->mii_timer, jiffies + 1); 1060 bond_mii_monitor);
1077 } else { 1061 queue_delayed_work(bond->wq,
1078 /* Set up the timer. */ 1062 &bond->mii_work, 0);
1079 init_timer(&bond->mii_timer);
1080 bond->mii_timer.expires = jiffies + 1;
1081 bond->mii_timer.data =
1082 (unsigned long) bond->dev;
1083 bond->mii_timer.function =
1084 (void *) &bond_mii_monitor;
1085 add_timer(&bond->mii_timer);
1086 } 1063 }
1087 } 1064 }
1088 } 1065 }
@@ -1156,6 +1133,9 @@ static ssize_t bonding_store_primary(struct device *d,
1156 } 1133 }
1157out: 1134out:
1158 write_unlock_bh(&bond->lock); 1135 write_unlock_bh(&bond->lock);
1136
1137 rtnl_unlock();
1138
1159 return count; 1139 return count;
1160} 1140}
1161static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary); 1141static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
@@ -1213,6 +1193,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
1213 struct bonding *bond = to_bond(d); 1193 struct bonding *bond = to_bond(d);
1214 int count; 1194 int count;
1215 1195
1196 rtnl_lock();
1216 1197
1217 read_lock(&bond->curr_slave_lock); 1198 read_lock(&bond->curr_slave_lock);
1218 curr = bond->curr_active_slave; 1199 curr = bond->curr_active_slave;
@@ -1292,6 +1273,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
1292 } 1273 }
1293out: 1274out:
1294 write_unlock_bh(&bond->lock); 1275 write_unlock_bh(&bond->lock);
1276 rtnl_unlock();
1277
1295 return count; 1278 return count;
1296 1279
1297} 1280}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8180600a309..d1ed14bf1ccb 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -184,8 +184,6 @@ struct bonding {
184 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 184 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
185 rwlock_t lock; 185 rwlock_t lock;
186 rwlock_t curr_slave_lock; 186 rwlock_t curr_slave_lock;
187 struct timer_list mii_timer;
188 struct timer_list arp_timer;
189 s8 kill_timers; 187 s8 kill_timers;
190 s8 send_grat_arp; 188 s8 send_grat_arp;
191 s8 setup_by_slave; 189 s8 setup_by_slave;
@@ -199,12 +197,18 @@ struct bonding {
199 int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); 197 int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int);
200 __be32 master_ip; 198 __be32 master_ip;
201 u16 flags; 199 u16 flags;
200 u16 rr_tx_counter;
202 struct ad_bond_info ad_info; 201 struct ad_bond_info ad_info;
203 struct alb_bond_info alb_info; 202 struct alb_bond_info alb_info;
204 struct bond_params params; 203 struct bond_params params;
205 struct list_head vlan_list; 204 struct list_head vlan_list;
206 struct vlan_group *vlgrp; 205 struct vlan_group *vlgrp;
207 struct packet_type arp_mon_pt; 206 struct packet_type arp_mon_pt;
207 struct workqueue_struct *wq;
208 struct delayed_work mii_work;
209 struct delayed_work arp_work;
210 struct delayed_work alb_work;
211 struct delayed_work ad_work;
208}; 212};
209 213
210/** 214/**
@@ -307,9 +311,9 @@ int bond_create_slave_symlinks(struct net_device *master, struct net_device *sla
307void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave); 311void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
308int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 312int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
309int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); 313int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
310void bond_mii_monitor(struct net_device *bond_dev); 314void bond_mii_monitor(struct work_struct *);
311void bond_loadbalance_arp_mon(struct net_device *bond_dev); 315void bond_loadbalance_arp_mon(struct work_struct *);
312void bond_activebackup_arp_mon(struct net_device *bond_dev); 316void bond_activebackup_arp_mon(struct work_struct *);
313void bond_set_mode_ops(struct bonding *bond, int mode); 317void bond_set_mode_ops(struct bonding *bond, int mode);
314int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl); 318int bond_parse_parm(char *mode_arg, struct bond_parm_tbl *tbl);
315void bond_select_active_slave(struct bonding *bond); 319void bond_select_active_slave(struct bonding *bond);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index ae419736158e..57541d2d9e1e 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -460,18 +460,11 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
460 struct cpmac_desc *desc; 460 struct cpmac_desc *desc;
461 struct cpmac_priv *priv = netdev_priv(dev); 461 struct cpmac_priv *priv = netdev_priv(dev);
462 462
463 if (unlikely(skb_padto(skb, ETH_ZLEN))) { 463 if (unlikely(skb_padto(skb, ETH_ZLEN)))
464 if (netif_msg_tx_err(priv) && net_ratelimit()) 464 return NETDEV_TX_OK;
465 printk(KERN_WARNING
466 "%s: tx: padding failed, dropping\n", dev->name);
467 spin_lock(&priv->lock);
468 dev->stats.tx_dropped++;
469 spin_unlock(&priv->lock);
470 return -ENOMEM;
471 }
472 465
473 len = max(skb->len, ETH_ZLEN); 466 len = max(skb->len, ETH_ZLEN);
474 queue = skb_get_queue_mapping(skb); 467 queue = skb->queue_mapping;
475#ifdef CONFIG_NETDEVICES_MULTIQUEUE 468#ifdef CONFIG_NETDEVICES_MULTIQUEUE
476 netif_stop_subqueue(dev, queue); 469 netif_stop_subqueue(dev, queue);
477#else 470#else
@@ -481,13 +474,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
481 desc = &priv->desc_ring[queue]; 474 desc = &priv->desc_ring[queue];
482 if (unlikely(desc->dataflags & CPMAC_OWN)) { 475 if (unlikely(desc->dataflags & CPMAC_OWN)) {
483 if (netif_msg_tx_err(priv) && net_ratelimit()) 476 if (netif_msg_tx_err(priv) && net_ratelimit())
484 printk(KERN_WARNING "%s: tx dma ring full, dropping\n", 477 printk(KERN_WARNING "%s: tx dma ring full\n",
485 dev->name); 478 dev->name);
486 spin_lock(&priv->lock); 479 return NETDEV_TX_BUSY;
487 dev->stats.tx_dropped++;
488 spin_unlock(&priv->lock);
489 dev_kfree_skb_any(skb);
490 return -ENOMEM;
491 } 480 }
492 481
493 spin_lock(&priv->lock); 482 spin_lock(&priv->lock);
@@ -509,7 +498,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
509 cpmac_dump_skb(dev, skb); 498 cpmac_dump_skb(dev, skb);
510 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); 499 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
511 500
512 return 0; 501 return NETDEV_TX_OK;
513} 502}
514 503
515static void cpmac_end_xmit(struct net_device *dev, int queue) 504static void cpmac_end_xmit(struct net_device *dev, int queue)
@@ -646,12 +635,14 @@ static void cpmac_clear_tx(struct net_device *dev)
646 int i; 635 int i;
647 if (unlikely(!priv->desc_ring)) 636 if (unlikely(!priv->desc_ring))
648 return; 637 return;
649 for (i = 0; i < CPMAC_QUEUES; i++) 638 for (i = 0; i < CPMAC_QUEUES; i++) {
639 priv->desc_ring[i].dataflags = 0;
650 if (priv->desc_ring[i].skb) { 640 if (priv->desc_ring[i].skb) {
651 dev_kfree_skb_any(priv->desc_ring[i].skb); 641 dev_kfree_skb_any(priv->desc_ring[i].skb);
652 if (netif_subqueue_stopped(dev, i)) 642 if (netif_subqueue_stopped(dev, i))
653 netif_wake_subqueue(dev, i); 643 netif_wake_subqueue(dev, i);
654 } 644 }
645 }
655} 646}
656 647
657static void cpmac_hw_error(struct work_struct *work) 648static void cpmac_hw_error(struct work_struct *work)
@@ -727,11 +718,13 @@ static void cpmac_tx_timeout(struct net_device *dev)
727#ifdef CONFIG_NETDEVICES_MULTIQUEUE 718#ifdef CONFIG_NETDEVICES_MULTIQUEUE
728 for (i = 0; i < CPMAC_QUEUES; i++) 719 for (i = 0; i < CPMAC_QUEUES; i++)
729 if (priv->desc_ring[i].skb) { 720 if (priv->desc_ring[i].skb) {
721 priv->desc_ring[i].dataflags = 0;
730 dev_kfree_skb_any(priv->desc_ring[i].skb); 722 dev_kfree_skb_any(priv->desc_ring[i].skb);
731 netif_wake_subqueue(dev, i); 723 netif_wake_subqueue(dev, i);
732 break; 724 break;
733 } 725 }
734#else 726#else
727 priv->desc_ring[0].dataflags = 0;
735 if (priv->desc_ring[0].skb) 728 if (priv->desc_ring[0].skb)
736 dev_kfree_skb_any(priv->desc_ring[0].skb); 729 dev_kfree_skb_any(priv->desc_ring[0].skb);
737 netif_wake_queue(dev); 730 netif_wake_queue(dev);
@@ -794,7 +787,7 @@ static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam*
794{ 787{
795 struct cpmac_priv *priv = netdev_priv(dev); 788 struct cpmac_priv *priv = netdev_priv(dev);
796 789
797 if (dev->flags && IFF_UP) 790 if (netif_running(dev))
798 return -EBUSY; 791 return -EBUSY;
799 priv->ring_size = ring->rx_pending; 792 priv->ring_size = ring->rx_pending;
800 return 0; 793 return 0;
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index b07613e61f53..ddc30c4bf34a 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -805,7 +805,7 @@ static void __devinit dfx_bus_init(struct net_device *dev)
805 * Interrupts are disabled at the adapter bus-specific logic. 805 * Interrupts are disabled at the adapter bus-specific logic.
806 */ 806 */
807 807
808static void __devinit dfx_bus_uninit(struct net_device *dev) 808static void __devexit dfx_bus_uninit(struct net_device *dev)
809{ 809{
810 DFX_board_t *bp = netdev_priv(dev); 810 DFX_board_t *bp = netdev_priv(dev);
811 struct device *bdev = bp->bus_dev; 811 struct device *bdev = bp->bus_dev;
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 37707a0c0498..aafc3ce59cbb 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -30,6 +30,7 @@ static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
30 int len) 30 int len)
31{ 31{
32 uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount)); 32 uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
33
33 if (available_len < len) 34 if (available_len < len)
34 return -EFAULT; 35 return -EFAULT;
35 36
@@ -45,14 +46,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
45 int count_to_go = skb->len; 46 int count_to_go = skb->len;
46 char *buf_ptr = skb->data; 47 char *buf_ptr = skb->data;
47 48
48 pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n",
49 dev->name, __FUNCTION__, skb->len);
50
51 outl(skb->len, mipsnet_reg_address(dev, txDataCount)); 49 outl(skb->len, mipsnet_reg_address(dev, txDataCount));
52 50
53 pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n",
54 dev->name, __FUNCTION__, skb->len);
55
56 for (; count_to_go; buf_ptr++, count_to_go--) 51 for (; count_to_go; buf_ptr++, count_to_go--)
57 outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); 52 outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
58 53
@@ -64,10 +59,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
64 59
65static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) 60static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
66{ 61{
67 pr_debug("%s:%s(): transmitting %d bytes\n", 62 /*
68 dev->name, __FUNCTION__, skb->len); 63 * Only one packet at a time. Once TXDONE interrupt is serviced, the
69
70 /* Only one packet at a time. Once TXDONE interrupt is serviced, the
71 * queue will be restarted. 64 * queue will be restarted.
72 */ 65 */
73 netif_stop_queue(dev); 66 netif_stop_queue(dev);
@@ -94,8 +87,6 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
94 skb->protocol = eth_type_trans(skb, dev); 87 skb->protocol = eth_type_trans(skb, dev);
95 skb->ip_summed = CHECKSUM_UNNECESSARY; 88 skb->ip_summed = CHECKSUM_UNNECESSARY;
96 89
97 pr_debug("%s:%s(): pushing RXed data to kernel\n",
98 dev->name, __FUNCTION__);
99 netif_rx(skb); 90 netif_rx(skb);
100 91
101 dev->stats.rx_packets++; 92 dev->stats.rx_packets++;
@@ -112,44 +103,29 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
112 uint64_t interruptFlags; 103 uint64_t interruptFlags;
113 104
114 if (irq == dev->irq) { 105 if (irq == dev->irq) {
115 pr_debug("%s:%s(): irq %d for device\n",
116 dev->name, __FUNCTION__, irq);
117
118 retval = IRQ_HANDLED; 106 retval = IRQ_HANDLED;
119 107
120 interruptFlags = 108 interruptFlags =
121 inl(mipsnet_reg_address(dev, interruptControl)); 109 inl(mipsnet_reg_address(dev, interruptControl));
122 pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name,
123 __FUNCTION__, interruptFlags);
124 110
125 if (interruptFlags & MIPSNET_INTCTL_TXDONE) { 111 if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
126 pr_debug("%s:%s(): got TXDone\n",
127 dev->name, __FUNCTION__);
128 outl(MIPSNET_INTCTL_TXDONE, 112 outl(MIPSNET_INTCTL_TXDONE,
129 mipsnet_reg_address(dev, interruptControl)); 113 mipsnet_reg_address(dev, interruptControl));
130 /* only one packet at a time, we are done. */ 114 /* only one packet at a time, we are done. */
131 netif_wake_queue(dev); 115 netif_wake_queue(dev);
132 } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { 116 } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
133 pr_debug("%s:%s(): got RX data\n",
134 dev->name, __FUNCTION__);
135 mipsnet_get_fromdev(dev, 117 mipsnet_get_fromdev(dev,
136 inl(mipsnet_reg_address(dev, rxDataCount))); 118 inl(mipsnet_reg_address(dev, rxDataCount)));
137 pr_debug("%s:%s(): clearing RX int\n",
138 dev->name, __FUNCTION__);
139 outl(MIPSNET_INTCTL_RXDONE, 119 outl(MIPSNET_INTCTL_RXDONE,
140 mipsnet_reg_address(dev, interruptControl)); 120 mipsnet_reg_address(dev, interruptControl));
141 121
142 } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { 122 } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
143 pr_debug("%s:%s(): got test interrupt\n",
144 dev->name, __FUNCTION__);
145 /* 123 /*
146 * TESTBIT is cleared on read. 124 * TESTBIT is cleared on read.
147 * And takes effect after a write with 0 125 * And takes effect after a write with 0
148 */ 126 */
149 outl(0, mipsnet_reg_address(dev, interruptControl)); 127 outl(0, mipsnet_reg_address(dev, interruptControl));
150 } else { 128 } else {
151 pr_debug("%s:%s(): no valid fags 0x%016llx\n",
152 dev->name, __FUNCTION__, interruptFlags);
153 /* Maybe shared IRQ, just ignore, no clearing. */ 129 /* Maybe shared IRQ, just ignore, no clearing. */
154 retval = IRQ_NONE; 130 retval = IRQ_NONE;
155 } 131 }
@@ -165,22 +141,15 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
165static int mipsnet_open(struct net_device *dev) 141static int mipsnet_open(struct net_device *dev)
166{ 142{
167 int err; 143 int err;
168 pr_debug("%s: mipsnet_open\n", dev->name);
169 144
170 err = request_irq(dev->irq, &mipsnet_interrupt, 145 err = request_irq(dev->irq, &mipsnet_interrupt,
171 IRQF_SHARED, dev->name, (void *) dev); 146 IRQF_SHARED, dev->name, (void *) dev);
172 147
173 if (err) { 148 if (err) {
174 pr_debug("%s: %s(): can't get irq %d\n",
175 dev->name, __FUNCTION__, dev->irq);
176 release_region(dev->base_addr, MIPSNET_IO_EXTENT); 149 release_region(dev->base_addr, MIPSNET_IO_EXTENT);
177 return err; 150 return err;
178 } 151 }
179 152
180 pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n",
181 dev->name, __FUNCTION__, dev->base_addr, dev->irq);
182
183
184 netif_start_queue(dev); 153 netif_start_queue(dev);
185 154
186 /* test interrupt handler */ 155 /* test interrupt handler */
@@ -193,8 +162,8 @@ static int mipsnet_open(struct net_device *dev)
193 162
194static int mipsnet_close(struct net_device *dev) 163static int mipsnet_close(struct net_device *dev)
195{ 164{
196 pr_debug("%s: %s()\n", dev->name, __FUNCTION__);
197 netif_stop_queue(dev); 165 netif_stop_queue(dev);
166
198 return 0; 167 return 0;
199} 168}
200 169
@@ -229,9 +198,6 @@ static int __init mipsnet_probe(struct device *dev)
229 198
230 /* Get the io region now, get irq on open() */ 199 /* Get the io region now, get irq on open() */
231 if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { 200 if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
232 pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} "
233 "for dev is not availble.\n", netdev->name,
234 __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT);
235 err = -EBUSY; 201 err = -EBUSY;
236 goto out_free_netdev; 202 goto out_free_netdev;
237 } 203 }
@@ -295,8 +261,6 @@ static int __init mipsnet_init_module(void)
295 261
296static void __exit mipsnet_exit_module(void) 262static void __exit mipsnet_exit_module(void)
297{ 263{
298 pr_debug("MIPSNet Ethernet driver exiting\n");
299
300 driver_unregister(&mipsnet_driver); 264 driver_unregister(&mipsnet_driver);
301} 265}
302 266
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 84f2d6382f1e..651c2699d5e1 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 * 4 *
5 * Based on the 64360 driver from: 5 * Based on the 64360 driver from:
@@ -43,14 +43,567 @@
43#include <linux/ethtool.h> 43#include <linux/ethtool.h>
44#include <linux/platform_device.h> 44#include <linux/platform_device.h>
45 45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/workqueue.h>
50#include <linux/mii.h>
51
52#include <linux/mv643xx_eth.h>
53
46#include <asm/io.h> 54#include <asm/io.h>
47#include <asm/types.h> 55#include <asm/types.h>
48#include <asm/pgtable.h> 56#include <asm/pgtable.h>
49#include <asm/system.h> 57#include <asm/system.h>
50#include <asm/delay.h> 58#include <asm/delay.h>
51#include "mv643xx_eth.h" 59#include <asm/dma-mapping.h>
60
61#define MV643XX_CHECKSUM_OFFLOAD_TX
62#define MV643XX_NAPI
63#define MV643XX_TX_FAST_REFILL
64#undef MV643XX_COAL
65
66/*
67 * Number of RX / TX descriptors on RX / TX rings.
68 * Note that allocating RX descriptors is done by allocating the RX
69 * ring AND a preallocated RX buffers (skb's) for each descriptor.
70 * The TX descriptors only allocates the TX descriptors ring,
71 * with no pre allocated TX buffers (skb's are allocated by higher layers.
72 */
73
74/* Default TX ring size is 1000 descriptors */
75#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
76
77/* Default RX ring size is 400 descriptors */
78#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
79
80#define MV643XX_TX_COAL 100
81#ifdef MV643XX_COAL
82#define MV643XX_RX_COAL 100
83#endif
84
85#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
86#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
87#else
88#define MAX_DESCS_PER_SKB 1
89#endif
90
91#define ETH_VLAN_HLEN 4
92#define ETH_FCS_LEN 4
93#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
94#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
95 ETH_VLAN_HLEN + ETH_FCS_LEN)
96#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \
97 dma_get_cache_alignment())
98
99/*
100 * Registers shared between all ports.
101 */
102#define PHY_ADDR_REG 0x0000
103#define SMI_REG 0x0004
104
105/*
106 * Per-port registers.
107 */
108#define PORT_CONFIG_REG(p) (0x0400 + ((p) << 10))
109#define PORT_CONFIG_EXTEND_REG(p) (0x0404 + ((p) << 10))
110#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
111#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
112#define SDMA_CONFIG_REG(p) (0x041c + ((p) << 10))
113#define PORT_SERIAL_CONTROL_REG(p) (0x043c + ((p) << 10))
114#define PORT_STATUS_REG(p) (0x0444 + ((p) << 10))
115#define TRANSMIT_QUEUE_COMMAND_REG(p) (0x0448 + ((p) << 10))
116#define MAXIMUM_TRANSMIT_UNIT(p) (0x0458 + ((p) << 10))
117#define INTERRUPT_CAUSE_REG(p) (0x0460 + ((p) << 10))
118#define INTERRUPT_CAUSE_EXTEND_REG(p) (0x0464 + ((p) << 10))
119#define INTERRUPT_MASK_REG(p) (0x0468 + ((p) << 10))
120#define INTERRUPT_EXTEND_MASK_REG(p) (0x046c + ((p) << 10))
121#define TX_FIFO_URGENT_THRESHOLD_REG(p) (0x0474 + ((p) << 10))
122#define RX_CURRENT_QUEUE_DESC_PTR_0(p) (0x060c + ((p) << 10))
123#define RECEIVE_QUEUE_COMMAND_REG(p) (0x0680 + ((p) << 10))
124#define TX_CURRENT_QUEUE_DESC_PTR_0(p) (0x06c0 + ((p) << 10))
125#define MIB_COUNTERS_BASE(p) (0x1000 + ((p) << 7))
126#define DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(p) (0x1400 + ((p) << 10))
127#define DA_FILTER_OTHER_MULTICAST_TABLE_BASE(p) (0x1500 + ((p) << 10))
128#define DA_FILTER_UNICAST_TABLE_BASE(p) (0x1600 + ((p) << 10))
129
130/* These macros describe Ethernet Port configuration reg (Px_cR) bits */
131#define UNICAST_NORMAL_MODE (0 << 0)
132#define UNICAST_PROMISCUOUS_MODE (1 << 0)
133#define DEFAULT_RX_QUEUE(queue) ((queue) << 1)
134#define DEFAULT_RX_ARP_QUEUE(queue) ((queue) << 4)
135#define RECEIVE_BC_IF_NOT_IP_OR_ARP (0 << 7)
136#define REJECT_BC_IF_NOT_IP_OR_ARP (1 << 7)
137#define RECEIVE_BC_IF_IP (0 << 8)
138#define REJECT_BC_IF_IP (1 << 8)
139#define RECEIVE_BC_IF_ARP (0 << 9)
140#define REJECT_BC_IF_ARP (1 << 9)
141#define TX_AM_NO_UPDATE_ERROR_SUMMARY (1 << 12)
142#define CAPTURE_TCP_FRAMES_DIS (0 << 14)
143#define CAPTURE_TCP_FRAMES_EN (1 << 14)
144#define CAPTURE_UDP_FRAMES_DIS (0 << 15)
145#define CAPTURE_UDP_FRAMES_EN (1 << 15)
146#define DEFAULT_RX_TCP_QUEUE(queue) ((queue) << 16)
147#define DEFAULT_RX_UDP_QUEUE(queue) ((queue) << 19)
148#define DEFAULT_RX_BPDU_QUEUE(queue) ((queue) << 22)
149
150#define PORT_CONFIG_DEFAULT_VALUE \
151 UNICAST_NORMAL_MODE | \
152 DEFAULT_RX_QUEUE(0) | \
153 DEFAULT_RX_ARP_QUEUE(0) | \
154 RECEIVE_BC_IF_NOT_IP_OR_ARP | \
155 RECEIVE_BC_IF_IP | \
156 RECEIVE_BC_IF_ARP | \
157 CAPTURE_TCP_FRAMES_DIS | \
158 CAPTURE_UDP_FRAMES_DIS | \
159 DEFAULT_RX_TCP_QUEUE(0) | \
160 DEFAULT_RX_UDP_QUEUE(0) | \
161 DEFAULT_RX_BPDU_QUEUE(0)
162
163/* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/
164#define CLASSIFY_EN (1 << 0)
165#define SPAN_BPDU_PACKETS_AS_NORMAL (0 << 1)
166#define SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1 << 1)
167#define PARTITION_DISABLE (0 << 2)
168#define PARTITION_ENABLE (1 << 2)
169
170#define PORT_CONFIG_EXTEND_DEFAULT_VALUE \
171 SPAN_BPDU_PACKETS_AS_NORMAL | \
172 PARTITION_DISABLE
173
174/* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */
175#define RIFB (1 << 0)
176#define RX_BURST_SIZE_1_64BIT (0 << 1)
177#define RX_BURST_SIZE_2_64BIT (1 << 1)
178#define RX_BURST_SIZE_4_64BIT (2 << 1)
179#define RX_BURST_SIZE_8_64BIT (3 << 1)
180#define RX_BURST_SIZE_16_64BIT (4 << 1)
181#define BLM_RX_NO_SWAP (1 << 4)
182#define BLM_RX_BYTE_SWAP (0 << 4)
183#define BLM_TX_NO_SWAP (1 << 5)
184#define BLM_TX_BYTE_SWAP (0 << 5)
185#define DESCRIPTORS_BYTE_SWAP (1 << 6)
186#define DESCRIPTORS_NO_SWAP (0 << 6)
187#define IPG_INT_RX(value) (((value) & 0x3fff) << 8)
188#define TX_BURST_SIZE_1_64BIT (0 << 22)
189#define TX_BURST_SIZE_2_64BIT (1 << 22)
190#define TX_BURST_SIZE_4_64BIT (2 << 22)
191#define TX_BURST_SIZE_8_64BIT (3 << 22)
192#define TX_BURST_SIZE_16_64BIT (4 << 22)
193
194#if defined(__BIG_ENDIAN)
195#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
196 RX_BURST_SIZE_4_64BIT | \
197 IPG_INT_RX(0) | \
198 TX_BURST_SIZE_4_64BIT
199#elif defined(__LITTLE_ENDIAN)
200#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
201 RX_BURST_SIZE_4_64BIT | \
202 BLM_RX_NO_SWAP | \
203 BLM_TX_NO_SWAP | \
204 IPG_INT_RX(0) | \
205 TX_BURST_SIZE_4_64BIT
206#else
207#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
208#endif
209
210/* These macros describe Ethernet Port serial control reg (PSCR) bits */
211#define SERIAL_PORT_DISABLE (0 << 0)
212#define SERIAL_PORT_ENABLE (1 << 0)
213#define DO_NOT_FORCE_LINK_PASS (0 << 1)
214#define FORCE_LINK_PASS (1 << 1)
215#define ENABLE_AUTO_NEG_FOR_DUPLX (0 << 2)
216#define DISABLE_AUTO_NEG_FOR_DUPLX (1 << 2)
217#define ENABLE_AUTO_NEG_FOR_FLOW_CTRL (0 << 3)
218#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
219#define ADV_NO_FLOW_CTRL (0 << 4)
220#define ADV_SYMMETRIC_FLOW_CTRL (1 << 4)
221#define FORCE_FC_MODE_NO_PAUSE_DIS_TX (0 << 5)
222#define FORCE_FC_MODE_TX_PAUSE_DIS (1 << 5)
223#define FORCE_BP_MODE_NO_JAM (0 << 7)
224#define FORCE_BP_MODE_JAM_TX (1 << 7)
225#define FORCE_BP_MODE_JAM_TX_ON_RX_ERR (2 << 7)
226#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
227#define FORCE_LINK_FAIL (0 << 10)
228#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
229#define RETRANSMIT_16_ATTEMPTS (0 << 11)
230#define RETRANSMIT_FOREVER (1 << 11)
231#define ENABLE_AUTO_NEG_SPEED_GMII (0 << 13)
232#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
233#define DTE_ADV_0 (0 << 14)
234#define DTE_ADV_1 (1 << 14)
235#define DISABLE_AUTO_NEG_BYPASS (0 << 15)
236#define ENABLE_AUTO_NEG_BYPASS (1 << 15)
237#define AUTO_NEG_NO_CHANGE (0 << 16)
238#define RESTART_AUTO_NEG (1 << 16)
239#define MAX_RX_PACKET_1518BYTE (0 << 17)
240#define MAX_RX_PACKET_1522BYTE (1 << 17)
241#define MAX_RX_PACKET_1552BYTE (2 << 17)
242#define MAX_RX_PACKET_9022BYTE (3 << 17)
243#define MAX_RX_PACKET_9192BYTE (4 << 17)
244#define MAX_RX_PACKET_9700BYTE (5 << 17)
245#define MAX_RX_PACKET_MASK (7 << 17)
246#define CLR_EXT_LOOPBACK (0 << 20)
247#define SET_EXT_LOOPBACK (1 << 20)
248#define SET_HALF_DUPLEX_MODE (0 << 21)
249#define SET_FULL_DUPLEX_MODE (1 << 21)
250#define DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (0 << 22)
251#define ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1 << 22)
252#define SET_GMII_SPEED_TO_10_100 (0 << 23)
253#define SET_GMII_SPEED_TO_1000 (1 << 23)
254#define SET_MII_SPEED_TO_10 (0 << 24)
255#define SET_MII_SPEED_TO_100 (1 << 24)
256
257#define PORT_SERIAL_CONTROL_DEFAULT_VALUE \
258 DO_NOT_FORCE_LINK_PASS | \
259 ENABLE_AUTO_NEG_FOR_DUPLX | \
260 DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
261 ADV_SYMMETRIC_FLOW_CTRL | \
262 FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
263 FORCE_BP_MODE_NO_JAM | \
264 (1 << 9) /* reserved */ | \
265 DO_NOT_FORCE_LINK_FAIL | \
266 RETRANSMIT_16_ATTEMPTS | \
267 ENABLE_AUTO_NEG_SPEED_GMII | \
268 DTE_ADV_0 | \
269 DISABLE_AUTO_NEG_BYPASS | \
270 AUTO_NEG_NO_CHANGE | \
271 MAX_RX_PACKET_9700BYTE | \
272 CLR_EXT_LOOPBACK | \
273 SET_FULL_DUPLEX_MODE | \
274 ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
275
276/* These macros describe Ethernet Serial Status reg (PSR) bits */
277#define PORT_STATUS_MODE_10_BIT (1 << 0)
278#define PORT_STATUS_LINK_UP (1 << 1)
279#define PORT_STATUS_FULL_DUPLEX (1 << 2)
280#define PORT_STATUS_FLOW_CONTROL (1 << 3)
281#define PORT_STATUS_GMII_1000 (1 << 4)
282#define PORT_STATUS_MII_100 (1 << 5)
283/* PSR bit 6 is undocumented */
284#define PORT_STATUS_TX_IN_PROGRESS (1 << 7)
285#define PORT_STATUS_AUTONEG_BYPASSED (1 << 8)
286#define PORT_STATUS_PARTITION (1 << 9)
287#define PORT_STATUS_TX_FIFO_EMPTY (1 << 10)
288/* PSR bits 11-31 are reserved */
289
290#define PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800
291#define PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400
292
293#define DESC_SIZE 64
294
295#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
296#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
297
298#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
299#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
300#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
301#define ETH_INT_CAUSE_EXT 0x00000002
302#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
303
304#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
305#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
306#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
307#define ETH_INT_CAUSE_PHY 0x00010000
308#define ETH_INT_CAUSE_STATE 0x00100000
309#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
310 ETH_INT_CAUSE_STATE)
311
312#define ETH_INT_MASK_ALL 0x00000000
313#define ETH_INT_MASK_ALL_EXT 0x00000000
314
315#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
316#define PHY_WAIT_MICRO_SECONDS 10
317
318/* Buffer offset from buffer pointer */
319#define RX_BUF_OFFSET 0x2
320
321/* Gigabit Ethernet Unit Global Registers */
322
323/* MIB Counters register definitions */
324#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
325#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
326#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
327#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
328#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
329#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
330#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
331#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
332#define ETH_MIB_FRAMES_64_OCTETS 0x20
333#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
334#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
335#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
336#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
337#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
338#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
339#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
340#define ETH_MIB_GOOD_FRAMES_SENT 0x40
341#define ETH_MIB_EXCESSIVE_COLLISION 0x44
342#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
343#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
344#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
345#define ETH_MIB_FC_SENT 0x54
346#define ETH_MIB_GOOD_FC_RECEIVED 0x58
347#define ETH_MIB_BAD_FC_RECEIVED 0x5c
348#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
349#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
350#define ETH_MIB_OVERSIZE_RECEIVED 0x68
351#define ETH_MIB_JABBER_RECEIVED 0x6c
352#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
353#define ETH_MIB_BAD_CRC_EVENT 0x74
354#define ETH_MIB_COLLISION 0x78
355#define ETH_MIB_LATE_COLLISION 0x7c
356
357/* Port serial status reg (PSR) */
358#define ETH_INTERFACE_PCM 0x00000001
359#define ETH_LINK_IS_UP 0x00000002
360#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
361#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
362#define ETH_GMII_SPEED_1000 0x00000010
363#define ETH_MII_SPEED_100 0x00000020
364#define ETH_TX_IN_PROGRESS 0x00000080
365#define ETH_BYPASS_ACTIVE 0x00000100
366#define ETH_PORT_AT_PARTITION_STATE 0x00000200
367#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
368
369/* SMI reg */
370#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
371#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
372#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
373#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
374
375/* Interrupt Cause Register Bit Definitions */
376
377/* SDMA command status fields macros */
378
379/* Tx & Rx descriptors status */
380#define ETH_ERROR_SUMMARY 0x00000001
381
382/* Tx & Rx descriptors command */
383#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
384
385/* Tx descriptors status */
386#define ETH_LC_ERROR 0
387#define ETH_UR_ERROR 0x00000002
388#define ETH_RL_ERROR 0x00000004
389#define ETH_LLC_SNAP_FORMAT 0x00000200
390
391/* Rx descriptors status */
392#define ETH_OVERRUN_ERROR 0x00000002
393#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
394#define ETH_RESOURCE_ERROR 0x00000006
395#define ETH_VLAN_TAGGED 0x00080000
396#define ETH_BPDU_FRAME 0x00100000
397#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
398#define ETH_OTHER_FRAME_TYPE 0x00400000
399#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
400#define ETH_FRAME_TYPE_IP_V_4 0x01000000
401#define ETH_FRAME_HEADER_OK 0x02000000
402#define ETH_RX_LAST_DESC 0x04000000
403#define ETH_RX_FIRST_DESC 0x08000000
404#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
405#define ETH_RX_ENABLE_INTERRUPT 0x20000000
406#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
407
408/* Rx descriptors byte count */
409#define ETH_FRAME_FRAGMENTED 0x00000004
410
411/* Tx descriptors command */
412#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
413#define ETH_FRAME_SET_TO_VLAN 0x00008000
414#define ETH_UDP_FRAME 0x00010000
415#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
416#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
417#define ETH_ZERO_PADDING 0x00080000
418#define ETH_TX_LAST_DESC 0x00100000
419#define ETH_TX_FIRST_DESC 0x00200000
420#define ETH_GEN_CRC 0x00400000
421#define ETH_TX_ENABLE_INTERRUPT 0x00800000
422#define ETH_AUTO_MODE 0x40000000
423
424#define ETH_TX_IHL_SHIFT 11
425
426/* typedefs */
427
428typedef enum _eth_func_ret_status {
429 ETH_OK, /* Returned as expected. */
430 ETH_ERROR, /* Fundamental error. */
431 ETH_RETRY, /* Could not process request. Try later.*/
432 ETH_END_OF_JOB, /* Ring has nothing to process. */
433 ETH_QUEUE_FULL, /* Ring resource error. */
434 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
435} ETH_FUNC_RET_STATUS;
436
437typedef enum _eth_target {
438 ETH_TARGET_DRAM,
439 ETH_TARGET_DEVICE,
440 ETH_TARGET_CBS,
441 ETH_TARGET_PCI0,
442 ETH_TARGET_PCI1
443} ETH_TARGET;
444
445/* These are for big-endian machines. Little endian needs different
446 * definitions.
447 */
448#if defined(__BIG_ENDIAN)
449struct eth_rx_desc {
450 u16 byte_cnt; /* Descriptor buffer byte count */
451 u16 buf_size; /* Buffer size */
452 u32 cmd_sts; /* Descriptor command status */
453 u32 next_desc_ptr; /* Next descriptor pointer */
454 u32 buf_ptr; /* Descriptor buffer pointer */
455};
456
457struct eth_tx_desc {
458 u16 byte_cnt; /* buffer byte count */
459 u16 l4i_chk; /* CPU provided TCP checksum */
460 u32 cmd_sts; /* Command/status field */
461 u32 next_desc_ptr; /* Pointer to next descriptor */
462 u32 buf_ptr; /* pointer to buffer for this descriptor*/
463};
464#elif defined(__LITTLE_ENDIAN)
465struct eth_rx_desc {
466 u32 cmd_sts; /* Descriptor command status */
467 u16 buf_size; /* Buffer size */
468 u16 byte_cnt; /* Descriptor buffer byte count */
469 u32 buf_ptr; /* Descriptor buffer pointer */
470 u32 next_desc_ptr; /* Next descriptor pointer */
471};
472
473struct eth_tx_desc {
474 u32 cmd_sts; /* Command/status field */
475 u16 l4i_chk; /* CPU provided TCP checksum */
476 u16 byte_cnt; /* buffer byte count */
477 u32 buf_ptr; /* pointer to buffer for this descriptor*/
478 u32 next_desc_ptr; /* Pointer to next descriptor */
479};
480#else
481#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
482#endif
483
484/* Unified struct for Rx and Tx operations. The user is not required to */
485/* be familier with neither Tx nor Rx descriptors. */
486struct pkt_info {
487 unsigned short byte_cnt; /* Descriptor buffer byte count */
488 unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
489 unsigned int cmd_sts; /* Descriptor command status */
490 dma_addr_t buf_ptr; /* Descriptor buffer pointer */
491 struct sk_buff *return_info; /* User resource return information */
492};
493
494/* Ethernet port specific information */
495struct mv643xx_mib_counters {
496 u64 good_octets_received;
497 u32 bad_octets_received;
498 u32 internal_mac_transmit_err;
499 u32 good_frames_received;
500 u32 bad_frames_received;
501 u32 broadcast_frames_received;
502 u32 multicast_frames_received;
503 u32 frames_64_octets;
504 u32 frames_65_to_127_octets;
505 u32 frames_128_to_255_octets;
506 u32 frames_256_to_511_octets;
507 u32 frames_512_to_1023_octets;
508 u32 frames_1024_to_max_octets;
509 u64 good_octets_sent;
510 u32 good_frames_sent;
511 u32 excessive_collision;
512 u32 multicast_frames_sent;
513 u32 broadcast_frames_sent;
514 u32 unrec_mac_control_received;
515 u32 fc_sent;
516 u32 good_fc_received;
517 u32 bad_fc_received;
518 u32 undersize_received;
519 u32 fragments_received;
520 u32 oversize_received;
521 u32 jabber_received;
522 u32 mac_receive_error;
523 u32 bad_crc_event;
524 u32 collision;
525 u32 late_collision;
526};
527
528struct mv643xx_private {
529 int port_num; /* User Ethernet port number */
530
531 u32 rx_sram_addr; /* Base address of rx sram area */
532 u32 rx_sram_size; /* Size of rx sram area */
533 u32 tx_sram_addr; /* Base address of tx sram area */
534 u32 tx_sram_size; /* Size of tx sram area */
535
536 int rx_resource_err; /* Rx ring resource error flag */
537
538 /* Tx/Rx rings managment indexes fields. For driver use */
539
540 /* Next available and first returning Rx resource */
541 int rx_curr_desc_q, rx_used_desc_q;
542
543 /* Next available and first returning Tx resource */
544 int tx_curr_desc_q, tx_used_desc_q;
545
546#ifdef MV643XX_TX_FAST_REFILL
547 u32 tx_clean_threshold;
548#endif
549
550 struct eth_rx_desc *p_rx_desc_area;
551 dma_addr_t rx_desc_dma;
552 int rx_desc_area_size;
553 struct sk_buff **rx_skb;
554
555 struct eth_tx_desc *p_tx_desc_area;
556 dma_addr_t tx_desc_dma;
557 int tx_desc_area_size;
558 struct sk_buff **tx_skb;
559
560 struct work_struct tx_timeout_task;
561
562 struct net_device *dev;
563 struct napi_struct napi;
564 struct net_device_stats stats;
565 struct mv643xx_mib_counters mib_counters;
566 spinlock_t lock;
567 /* Size of Tx Ring per queue */
568 int tx_ring_size;
569 /* Number of tx descriptors in use */
570 int tx_desc_count;
571 /* Size of Rx Ring per queue */
572 int rx_ring_size;
573 /* Number of rx descriptors in use */
574 int rx_desc_count;
575
576 /*
577 * Used in case RX Ring is empty, which can be caused when
578 * system does not have resources (skb's)
579 */
580 struct timer_list timeout;
581
582 u32 rx_int_coal;
583 u32 tx_int_coal;
584 struct mii_if_info mii;
585};
52 586
53/* Static function declarations */ 587/* Static function declarations */
588static void eth_port_init(struct mv643xx_private *mp);
589static void eth_port_reset(unsigned int eth_port_num);
590static void eth_port_start(struct net_device *dev);
591
592static void ethernet_phy_reset(unsigned int eth_port_num);
593
594static void eth_port_write_smi_reg(unsigned int eth_port_num,
595 unsigned int phy_reg, unsigned int value);
596
597static void eth_port_read_smi_reg(unsigned int eth_port_num,
598 unsigned int phy_reg, unsigned int *value);
599
600static void eth_clear_mib_counters(unsigned int eth_port_num);
601
602static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
603 struct pkt_info *p_pkt_info);
604static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
605 struct pkt_info *p_pkt_info);
606
54static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); 607static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr);
55static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); 608static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr);
56static void eth_port_set_multicast_list(struct net_device *); 609static void eth_port_set_multicast_list(struct net_device *);
@@ -78,26 +631,19 @@ static const struct ethtool_ops mv643xx_ethtool_ops;
78static char mv643xx_driver_name[] = "mv643xx_eth"; 631static char mv643xx_driver_name[] = "mv643xx_eth";
79static char mv643xx_driver_version[] = "1.0"; 632static char mv643xx_driver_version[] = "1.0";
80 633
81static void __iomem *mv643xx_eth_shared_base; 634static void __iomem *mv643xx_eth_base;
82 635
83/* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */ 636/* used to protect SMI_REG, which is shared across ports */
84static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); 637static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
85 638
86static inline u32 mv_read(int offset) 639static inline u32 mv_read(int offset)
87{ 640{
88 void __iomem *reg_base; 641 return readl(mv643xx_eth_base + offset);
89
90 reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
91
92 return readl(reg_base + offset);
93} 642}
94 643
95static inline void mv_write(int offset, u32 data) 644static inline void mv_write(int offset, u32 data)
96{ 645{
97 void __iomem *reg_base; 646 writel(data, mv643xx_eth_base + offset);
98
99 reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
100 writel(data, reg_base + offset);
101} 647}
102 648
103/* 649/*
@@ -221,12 +767,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
221 struct mv643xx_private *mp = netdev_priv(dev); 767 struct mv643xx_private *mp = netdev_priv(dev);
222 u32 config_reg; 768 u32 config_reg;
223 769
224 config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); 770 config_reg = mv_read(PORT_CONFIG_REG(mp->port_num));
225 if (dev->flags & IFF_PROMISC) 771 if (dev->flags & IFF_PROMISC)
226 config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 772 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
227 else 773 else
228 config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 774 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
229 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); 775 mv_write(PORT_CONFIG_REG(mp->port_num), config_reg);
230 776
231 eth_port_set_multicast_list(dev); 777 eth_port_set_multicast_list(dev);
232} 778}
@@ -462,41 +1008,37 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
462 u32 o_pscr, n_pscr; 1008 u32 o_pscr, n_pscr;
463 unsigned int queues; 1009 unsigned int queues;
464 1010
465 o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 1011 o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
466 n_pscr = o_pscr; 1012 n_pscr = o_pscr;
467 1013
468 /* clear speed, duplex and rx buffer size fields */ 1014 /* clear speed, duplex and rx buffer size fields */
469 n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | 1015 n_pscr &= ~(SET_MII_SPEED_TO_100 |
470 MV643XX_ETH_SET_GMII_SPEED_TO_1000 | 1016 SET_GMII_SPEED_TO_1000 |
471 MV643XX_ETH_SET_FULL_DUPLEX_MODE | 1017 SET_FULL_DUPLEX_MODE |
472 MV643XX_ETH_MAX_RX_PACKET_MASK); 1018 MAX_RX_PACKET_MASK);
473 1019
474 if (ecmd->duplex == DUPLEX_FULL) 1020 if (ecmd->duplex == DUPLEX_FULL)
475 n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; 1021 n_pscr |= SET_FULL_DUPLEX_MODE;
476 1022
477 if (ecmd->speed == SPEED_1000) 1023 if (ecmd->speed == SPEED_1000)
478 n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | 1024 n_pscr |= SET_GMII_SPEED_TO_1000 |
479 MV643XX_ETH_MAX_RX_PACKET_9700BYTE; 1025 MAX_RX_PACKET_9700BYTE;
480 else { 1026 else {
481 if (ecmd->speed == SPEED_100) 1027 if (ecmd->speed == SPEED_100)
482 n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; 1028 n_pscr |= SET_MII_SPEED_TO_100;
483 n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; 1029 n_pscr |= MAX_RX_PACKET_1522BYTE;
484 } 1030 }
485 1031
486 if (n_pscr != o_pscr) { 1032 if (n_pscr != o_pscr) {
487 if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) 1033 if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
488 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1034 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
489 n_pscr);
490 else { 1035 else {
491 queues = mv643xx_eth_port_disable_tx(port_num); 1036 queues = mv643xx_eth_port_disable_tx(port_num);
492 1037
493 o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; 1038 o_pscr &= ~SERIAL_PORT_ENABLE;
494 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1039 mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
495 o_pscr); 1040 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
496 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), 1041 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
497 n_pscr);
498 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
499 n_pscr);
500 if (queues) 1042 if (queues)
501 mv643xx_eth_port_enable_tx(port_num, queues); 1043 mv643xx_eth_port_enable_tx(port_num, queues);
502 } 1044 }
@@ -522,13 +1064,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
522 unsigned int port_num = mp->port_num; 1064 unsigned int port_num = mp->port_num;
523 1065
524 /* Read interrupt cause registers */ 1066 /* Read interrupt cause registers */
525 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & 1067 eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) &
526 ETH_INT_UNMASK_ALL; 1068 ETH_INT_UNMASK_ALL;
527 if (eth_int_cause & ETH_INT_CAUSE_EXT) { 1069 if (eth_int_cause & ETH_INT_CAUSE_EXT) {
528 eth_int_cause_ext = mv_read( 1070 eth_int_cause_ext = mv_read(
529 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 1071 INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
530 ETH_INT_UNMASK_ALL_EXT; 1072 ETH_INT_UNMASK_ALL_EXT;
531 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 1073 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num),
532 ~eth_int_cause_ext); 1074 ~eth_int_cause_ext);
533 } 1075 }
534 1076
@@ -556,10 +1098,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
556#ifdef MV643XX_NAPI 1098#ifdef MV643XX_NAPI
557 if (eth_int_cause & ETH_INT_CAUSE_RX) { 1099 if (eth_int_cause & ETH_INT_CAUSE_RX) {
558 /* schedule the NAPI poll routine to maintain port */ 1100 /* schedule the NAPI poll routine to maintain port */
559 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1101 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
560 ETH_INT_MASK_ALL); 1102
561 /* wait for previous write to complete */ 1103 /* wait for previous write to complete */
562 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1104 mv_read(INTERRUPT_MASK_REG(port_num));
563 1105
564 netif_rx_schedule(dev, &mp->napi); 1106 netif_rx_schedule(dev, &mp->napi);
565 } 1107 }
@@ -611,9 +1153,9 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
611 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1153 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
612 1154
613 /* Set RX Coalescing mechanism */ 1155 /* Set RX Coalescing mechanism */
614 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num), 1156 mv_write(SDMA_CONFIG_REG(eth_port_num),
615 ((coal & 0x3fff) << 8) | 1157 ((coal & 0x3fff) << 8) |
616 (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num)) 1158 (mv_read(SDMA_CONFIG_REG(eth_port_num))
617 & 0xffc000ff)); 1159 & 0xffc000ff));
618 1160
619 return coal; 1161 return coal;
@@ -649,8 +1191,7 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
649 unsigned int coal; 1191 unsigned int coal;
650 coal = ((t_clk / 1000000) * delay) / 64; 1192 coal = ((t_clk / 1000000) * delay) / 64;
651 /* Set TX Coalescing mechanism */ 1193 /* Set TX Coalescing mechanism */
652 mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), 1194 mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4);
653 coal << 4);
654 return coal; 1195 return coal;
655} 1196}
656 1197
@@ -786,10 +1327,10 @@ static int mv643xx_eth_open(struct net_device *dev)
786 int err; 1327 int err;
787 1328
788 /* Clear any pending ethernet port interrupts */ 1329 /* Clear any pending ethernet port interrupts */
789 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1330 mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
790 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1331 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
791 /* wait for previous write to complete */ 1332 /* wait for previous write to complete */
792 mv_read (MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)); 1333 mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num));
793 1334
794 err = request_irq(dev->irq, mv643xx_eth_int_handler, 1335 err = request_irq(dev->irq, mv643xx_eth_int_handler,
795 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 1336 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
@@ -896,11 +1437,10 @@ static int mv643xx_eth_open(struct net_device *dev)
896 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 1437 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
897 1438
898 /* Unmask phy and link status changes interrupts */ 1439 /* Unmask phy and link status changes interrupts */
899 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 1440 mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
900 ETH_INT_UNMASK_ALL_EXT);
901 1441
902 /* Unmask RX buffer and TX end interrupt */ 1442 /* Unmask RX buffer and TX end interrupt */
903 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1443 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
904 1444
905 return 0; 1445 return 0;
906 1446
@@ -980,9 +1520,9 @@ static int mv643xx_eth_stop(struct net_device *dev)
980 unsigned int port_num = mp->port_num; 1520 unsigned int port_num = mp->port_num;
981 1521
982 /* Mask all interrupts on ethernet port */ 1522 /* Mask all interrupts on ethernet port */
983 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1523 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
984 /* wait for previous write to complete */ 1524 /* wait for previous write to complete */
985 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1525 mv_read(INTERRUPT_MASK_REG(port_num));
986 1526
987#ifdef MV643XX_NAPI 1527#ifdef MV643XX_NAPI
988 napi_disable(&mp->napi); 1528 napi_disable(&mp->napi);
@@ -1021,16 +1561,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget)
1021#endif 1561#endif
1022 1562
1023 work_done = 0; 1563 work_done = 0;
1024 if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) 1564 if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
1025 != (u32) mp->rx_used_desc_q) 1565 != (u32) mp->rx_used_desc_q)
1026 work_done = mv643xx_eth_receive_queue(dev, budget); 1566 work_done = mv643xx_eth_receive_queue(dev, budget);
1027 1567
1028 if (work_done < budget) { 1568 if (work_done < budget) {
1029 netif_rx_complete(dev, napi); 1569 netif_rx_complete(dev, napi);
1030 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1570 mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
1031 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1571 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1032 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1572 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1033 ETH_INT_UNMASK_ALL);
1034 } 1573 }
1035 1574
1036 return work_done; 1575 return work_done;
@@ -1233,13 +1772,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
1233 struct mv643xx_private *mp = netdev_priv(netdev); 1772 struct mv643xx_private *mp = netdev_priv(netdev);
1234 int port_num = mp->port_num; 1773 int port_num = mp->port_num;
1235 1774
1236 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1775 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1237 /* wait for previous write to complete */ 1776 /* wait for previous write to complete */
1238 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1777 mv_read(INTERRUPT_MASK_REG(port_num));
1239 1778
1240 mv643xx_eth_int_handler(netdev->irq, netdev); 1779 mv643xx_eth_int_handler(netdev->irq, netdev);
1241 1780
1242 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1781 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1243} 1782}
1244#endif 1783#endif
1245 1784
@@ -1357,8 +1896,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1357 1896
1358 /* set default config values */ 1897 /* set default config values */
1359 eth_port_uc_addr_get(port_num, dev->dev_addr); 1898 eth_port_uc_addr_get(port_num, dev->dev_addr);
1360 mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1899 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
1361 mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1900 mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
1362 1901
1363 if (is_valid_ether_addr(pd->mac_addr)) 1902 if (is_valid_ether_addr(pd->mac_addr))
1364 memcpy(dev->dev_addr, pd->mac_addr, 6); 1903 memcpy(dev->dev_addr, pd->mac_addr, 6);
@@ -1470,9 +2009,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1470 if (res == NULL) 2009 if (res == NULL)
1471 return -ENODEV; 2010 return -ENODEV;
1472 2011
1473 mv643xx_eth_shared_base = ioremap(res->start, 2012 mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1);
1474 MV643XX_ETH_SHARED_REGS_SIZE); 2013 if (mv643xx_eth_base == NULL)
1475 if (mv643xx_eth_shared_base == NULL)
1476 return -ENOMEM; 2014 return -ENOMEM;
1477 2015
1478 return 0; 2016 return 0;
@@ -1481,8 +2019,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1481 2019
1482static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2020static int mv643xx_eth_shared_remove(struct platform_device *pdev)
1483{ 2021{
1484 iounmap(mv643xx_eth_shared_base); 2022 iounmap(mv643xx_eth_base);
1485 mv643xx_eth_shared_base = NULL; 2023 mv643xx_eth_base = NULL;
1486 2024
1487 return 0; 2025 return 0;
1488} 2026}
@@ -1494,8 +2032,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
1494 unsigned int port_num = mp->port_num; 2032 unsigned int port_num = mp->port_num;
1495 2033
1496 /* Mask all interrupts on ethernet port */ 2034 /* Mask all interrupts on ethernet port */
1497 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 2035 mv_write(INTERRUPT_MASK_REG(port_num), 0);
1498 mv_read (MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 2036 mv_read (INTERRUPT_MASK_REG(port_num));
1499 2037
1500 eth_port_reset(port_num); 2038 eth_port_reset(port_num);
1501} 2039}
@@ -1762,49 +2300,49 @@ static void eth_port_start(struct net_device *dev)
1762 2300
1763 /* Assignment of Tx CTRP of given queue */ 2301 /* Assignment of Tx CTRP of given queue */
1764 tx_curr_desc = mp->tx_curr_desc_q; 2302 tx_curr_desc = mp->tx_curr_desc_q;
1765 mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2303 mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
1766 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); 2304 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
1767 2305
1768 /* Assignment of Rx CRDP of given queue */ 2306 /* Assignment of Rx CRDP of given queue */
1769 rx_curr_desc = mp->rx_curr_desc_q; 2307 rx_curr_desc = mp->rx_curr_desc_q;
1770 mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2308 mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
1771 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 2309 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
1772 2310
1773 /* Add the assigned Ethernet address to the port's address table */ 2311 /* Add the assigned Ethernet address to the port's address table */
1774 eth_port_uc_addr_set(port_num, dev->dev_addr); 2312 eth_port_uc_addr_set(port_num, dev->dev_addr);
1775 2313
1776 /* Assign port configuration and command. */ 2314 /* Assign port configuration and command. */
1777 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), 2315 mv_write(PORT_CONFIG_REG(port_num),
1778 MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); 2316 PORT_CONFIG_DEFAULT_VALUE);
1779 2317
1780 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), 2318 mv_write(PORT_CONFIG_EXTEND_REG(port_num),
1781 MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); 2319 PORT_CONFIG_EXTEND_DEFAULT_VALUE);
1782 2320
1783 pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 2321 pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
1784 2322
1785 pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); 2323 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
1786 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 2324 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
1787 2325
1788 pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 2326 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1789 MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | 2327 DISABLE_AUTO_NEG_SPEED_GMII |
1790 MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | 2328 DISABLE_AUTO_NEG_FOR_DUPLX |
1791 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | 2329 DO_NOT_FORCE_LINK_FAIL |
1792 MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; 2330 SERIAL_PORT_CONTROL_RESERVED;
1793 2331
1794 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 2332 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
1795 2333
1796 pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; 2334 pscr |= SERIAL_PORT_ENABLE;
1797 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); 2335 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
1798 2336
1799 /* Assign port SDMA configuration */ 2337 /* Assign port SDMA configuration */
1800 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), 2338 mv_write(SDMA_CONFIG_REG(port_num),
1801 MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); 2339 PORT_SDMA_CONFIG_DEFAULT_VALUE);
1802 2340
1803 /* Enable port Rx. */ 2341 /* Enable port Rx. */
1804 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); 2342 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED);
1805 2343
1806 /* Disable port bandwidth limits by clearing MTU register */ 2344 /* Disable port bandwidth limits by clearing MTU register */
1807 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); 2345 mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0);
1808 2346
1809 /* save phy settings across reset */ 2347 /* save phy settings across reset */
1810 mv643xx_get_settings(dev, &ethtool_cmd); 2348 mv643xx_get_settings(dev, &ethtool_cmd);
@@ -1825,11 +2363,11 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr)
1825 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | 2363 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
1826 (p_addr[3] << 0); 2364 (p_addr[3] << 0);
1827 2365
1828 mv_write(MV643XX_ETH_MAC_ADDR_LOW(port_num), mac_l); 2366 mv_write(MAC_ADDR_LOW(port_num), mac_l);
1829 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(port_num), mac_h); 2367 mv_write(MAC_ADDR_HIGH(port_num), mac_h);
1830 2368
1831 /* Accept frames with this address */ 2369 /* Accept frames with this address */
1832 table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num); 2370 table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
1833 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); 2371 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f);
1834} 2372}
1835 2373
@@ -1841,8 +2379,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr)
1841 unsigned int mac_h; 2379 unsigned int mac_h;
1842 unsigned int mac_l; 2380 unsigned int mac_l;
1843 2381
1844 mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(port_num)); 2382 mac_h = mv_read(MAC_ADDR_HIGH(port_num));
1845 mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(port_num)); 2383 mac_l = mv_read(MAC_ADDR_LOW(port_num));
1846 2384
1847 p_addr[0] = (mac_h >> 24) & 0xff; 2385 p_addr[0] = (mac_h >> 24) & 0xff;
1848 p_addr[1] = (mac_h >> 16) & 0xff; 2386 p_addr[1] = (mac_h >> 16) & 0xff;
@@ -1902,7 +2440,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
1902 2440
1903 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && 2441 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
1904 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { 2442 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
1905 table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2443 table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
1906 (eth_port_num); 2444 (eth_port_num);
1907 eth_port_set_filter_table_entry(table, p_addr[5]); 2445 eth_port_set_filter_table_entry(table, p_addr[5]);
1908 return; 2446 return;
@@ -1976,7 +2514,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
1976 for (i = 0; i < 8; i++) 2514 for (i = 0; i < 8; i++)
1977 crc_result = crc_result | (crc[i] << i); 2515 crc_result = crc_result | (crc[i] << i);
1978 2516
1979 table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); 2517 table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
1980 eth_port_set_filter_table_entry(table, crc_result); 2518 eth_port_set_filter_table_entry(table, crc_result);
1981} 2519}
1982 2520
@@ -2006,7 +2544,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2006 * 3-1 Queue ETH_Q0=0 2544 * 3-1 Queue ETH_Q0=0
2007 * 7-4 Reserved = 0; 2545 * 7-4 Reserved = 0;
2008 */ 2546 */
2009 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2547 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2010 2548
2011 /* Set all entries in DA filter other multicast 2549 /* Set all entries in DA filter other multicast
2012 * table (Ex_dFOMT) 2550 * table (Ex_dFOMT)
@@ -2016,7 +2554,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2016 * 3-1 Queue ETH_Q0=0 2554 * 3-1 Queue ETH_Q0=0
2017 * 7-4 Reserved = 0; 2555 * 7-4 Reserved = 0;
2018 */ 2556 */
2019 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2557 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2020 } 2558 }
2021 return; 2559 return;
2022 } 2560 }
@@ -2026,11 +2564,11 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2026 */ 2564 */
2027 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2565 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2028 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2566 /* Clear DA filter special multicast table (Ex_dFSMT) */
2029 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2567 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2030 (eth_port_num) + table_index, 0); 2568 (eth_port_num) + table_index, 0);
2031 2569
2032 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2570 /* Clear DA filter other multicast table (Ex_dFOMT) */
2033 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2571 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2034 (eth_port_num) + table_index, 0); 2572 (eth_port_num) + table_index, 0);
2035 } 2573 }
2036 2574
@@ -2064,15 +2602,15 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2064 2602
2065 /* Clear DA filter unicast table (Ex_dFUT) */ 2603 /* Clear DA filter unicast table (Ex_dFUT) */
2066 for (table_index = 0; table_index <= 0xC; table_index += 4) 2604 for (table_index = 0; table_index <= 0xC; table_index += 4)
2067 mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE 2605 mv_write(DA_FILTER_UNICAST_TABLE_BASE
2068 (eth_port_num) + table_index, 0); 2606 (eth_port_num) + table_index, 0);
2069 2607
2070 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2608 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2071 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2609 /* Clear DA filter special multicast table (Ex_dFSMT) */
2072 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2610 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2073 (eth_port_num) + table_index, 0); 2611 (eth_port_num) + table_index, 0);
2074 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2612 /* Clear DA filter other multicast table (Ex_dFOMT) */
2075 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2613 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2076 (eth_port_num) + table_index, 0); 2614 (eth_port_num) + table_index, 0);
2077 } 2615 }
2078} 2616}
@@ -2101,12 +2639,12 @@ static void eth_clear_mib_counters(unsigned int eth_port_num)
2101 /* Perform dummy reads from MIB counters */ 2639 /* Perform dummy reads from MIB counters */
2102 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; 2640 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2103 i += 4) 2641 i += 4)
2104 mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); 2642 mv_read(MIB_COUNTERS_BASE(eth_port_num) + i);
2105} 2643}
2106 2644
2107static inline u32 read_mib(struct mv643xx_private *mp, int offset) 2645static inline u32 read_mib(struct mv643xx_private *mp, int offset)
2108{ 2646{
2109 return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset); 2647 return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset);
2110} 2648}
2111 2649
2112static void eth_update_mib_counters(struct mv643xx_private *mp) 2650static void eth_update_mib_counters(struct mv643xx_private *mp)
@@ -2191,7 +2729,7 @@ static int ethernet_phy_get(unsigned int eth_port_num)
2191{ 2729{
2192 unsigned int reg_data; 2730 unsigned int reg_data;
2193 2731
2194 reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); 2732 reg_data = mv_read(PHY_ADDR_REG);
2195 2733
2196 return ((reg_data >> (5 * eth_port_num)) & 0x1f); 2734 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2197} 2735}
@@ -2218,10 +2756,10 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
2218 u32 reg_data; 2756 u32 reg_data;
2219 int addr_shift = 5 * eth_port_num; 2757 int addr_shift = 5 * eth_port_num;
2220 2758
2221 reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); 2759 reg_data = mv_read(PHY_ADDR_REG);
2222 reg_data &= ~(0x1f << addr_shift); 2760 reg_data &= ~(0x1f << addr_shift);
2223 reg_data |= (phy_addr & 0x1f) << addr_shift; 2761 reg_data |= (phy_addr & 0x1f) << addr_shift;
2224 mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data); 2762 mv_write(PHY_ADDR_REG, reg_data);
2225} 2763}
2226 2764
2227/* 2765/*
@@ -2259,13 +2797,13 @@ static void ethernet_phy_reset(unsigned int eth_port_num)
2259static void mv643xx_eth_port_enable_tx(unsigned int port_num, 2797static void mv643xx_eth_port_enable_tx(unsigned int port_num,
2260 unsigned int queues) 2798 unsigned int queues)
2261{ 2799{
2262 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); 2800 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues);
2263} 2801}
2264 2802
2265static void mv643xx_eth_port_enable_rx(unsigned int port_num, 2803static void mv643xx_eth_port_enable_rx(unsigned int port_num,
2266 unsigned int queues) 2804 unsigned int queues)
2267{ 2805{
2268 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); 2806 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues);
2269} 2807}
2270 2808
2271static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) 2809static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
@@ -2273,21 +2811,18 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
2273 u32 queues; 2811 u32 queues;
2274 2812
2275 /* Stop Tx port activity. Check port Tx activity. */ 2813 /* Stop Tx port activity. Check port Tx activity. */
2276 queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) 2814 queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2277 & 0xFF;
2278 if (queues) { 2815 if (queues) {
2279 /* Issue stop command for active queues only */ 2816 /* Issue stop command for active queues only */
2280 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 2817 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
2281 (queues << 8));
2282 2818
2283 /* Wait for all Tx activity to terminate. */ 2819 /* Wait for all Tx activity to terminate. */
2284 /* Check port cause register that all Tx queues are stopped */ 2820 /* Check port cause register that all Tx queues are stopped */
2285 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) 2821 while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2286 & 0xFF)
2287 udelay(PHY_WAIT_MICRO_SECONDS); 2822 udelay(PHY_WAIT_MICRO_SECONDS);
2288 2823
2289 /* Wait for Tx FIFO to empty */ 2824 /* Wait for Tx FIFO to empty */
2290 while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & 2825 while (mv_read(PORT_STATUS_REG(port_num)) &
2291 ETH_PORT_TX_FIFO_EMPTY) 2826 ETH_PORT_TX_FIFO_EMPTY)
2292 udelay(PHY_WAIT_MICRO_SECONDS); 2827 udelay(PHY_WAIT_MICRO_SECONDS);
2293 } 2828 }
@@ -2300,17 +2835,14 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2300 u32 queues; 2835 u32 queues;
2301 2836
2302 /* Stop Rx port activity. Check port Rx activity. */ 2837 /* Stop Rx port activity. Check port Rx activity. */
2303 queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) 2838 queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2304 & 0xFF;
2305 if (queues) { 2839 if (queues) {
2306 /* Issue stop command for active queues only */ 2840 /* Issue stop command for active queues only */
2307 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 2841 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
2308 (queues << 8));
2309 2842
2310 /* Wait for all Rx activity to terminate. */ 2843 /* Wait for all Rx activity to terminate. */
2311 /* Check port cause register that all Rx queues are stopped */ 2844 /* Check port cause register that all Rx queues are stopped */
2312 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) 2845 while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2313 & 0xFF)
2314 udelay(PHY_WAIT_MICRO_SECONDS); 2846 udelay(PHY_WAIT_MICRO_SECONDS);
2315 } 2847 }
2316 2848
@@ -2346,11 +2878,11 @@ static void eth_port_reset(unsigned int port_num)
2346 eth_clear_mib_counters(port_num); 2878 eth_clear_mib_counters(port_num);
2347 2879
2348 /* Reset the Enable bit in the Configuration Register */ 2880 /* Reset the Enable bit in the Configuration Register */
2349 reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); 2881 reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
2350 reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | 2882 reg_data &= ~(SERIAL_PORT_ENABLE |
2351 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | 2883 DO_NOT_FORCE_LINK_FAIL |
2352 MV643XX_ETH_FORCE_LINK_PASS); 2884 FORCE_LINK_PASS);
2353 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2885 mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data);
2354} 2886}
2355 2887
2356 2888
@@ -2385,7 +2917,7 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2385 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2917 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2386 2918
2387 /* wait for the SMI register to become available */ 2919 /* wait for the SMI register to become available */
2388 for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { 2920 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
2389 if (i == PHY_WAIT_ITERATIONS) { 2921 if (i == PHY_WAIT_ITERATIONS) {
2390 printk("mv643xx PHY busy timeout, port %d\n", port_num); 2922 printk("mv643xx PHY busy timeout, port %d\n", port_num);
2391 goto out; 2923 goto out;
@@ -2393,11 +2925,11 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2393 udelay(PHY_WAIT_MICRO_SECONDS); 2925 udelay(PHY_WAIT_MICRO_SECONDS);
2394 } 2926 }
2395 2927
2396 mv_write(MV643XX_ETH_SMI_REG, 2928 mv_write(SMI_REG,
2397 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); 2929 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
2398 2930
2399 /* now wait for the data to be valid */ 2931 /* now wait for the data to be valid */
2400 for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) { 2932 for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) {
2401 if (i == PHY_WAIT_ITERATIONS) { 2933 if (i == PHY_WAIT_ITERATIONS) {
2402 printk("mv643xx PHY read timeout, port %d\n", port_num); 2934 printk("mv643xx PHY read timeout, port %d\n", port_num);
2403 goto out; 2935 goto out;
@@ -2405,7 +2937,7 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2405 udelay(PHY_WAIT_MICRO_SECONDS); 2937 udelay(PHY_WAIT_MICRO_SECONDS);
2406 } 2938 }
2407 2939
2408 *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff; 2940 *value = mv_read(SMI_REG) & 0xffff;
2409out: 2941out:
2410 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2942 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
2411} 2943}
@@ -2443,7 +2975,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num,
2443 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2975 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2444 2976
2445 /* wait for the SMI register to become available */ 2977 /* wait for the SMI register to become available */
2446 for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { 2978 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
2447 if (i == PHY_WAIT_ITERATIONS) { 2979 if (i == PHY_WAIT_ITERATIONS) {
2448 printk("mv643xx PHY busy timeout, port %d\n", 2980 printk("mv643xx PHY busy timeout, port %d\n",
2449 eth_port_num); 2981 eth_port_num);
@@ -2452,7 +2984,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num,
2452 udelay(PHY_WAIT_MICRO_SECONDS); 2984 udelay(PHY_WAIT_MICRO_SECONDS);
2453 } 2985 }
2454 2986
2455 mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 2987 mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
2456 ETH_SMI_OPCODE_WRITE | (value & 0xffff)); 2988 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2457out: 2989out:
2458 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2990 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
@@ -2742,6 +3274,7 @@ static const struct ethtool_ops mv643xx_ethtool_ops = {
2742 .get_drvinfo = mv643xx_get_drvinfo, 3274 .get_drvinfo = mv643xx_get_drvinfo,
2743 .get_link = mv643xx_eth_get_link, 3275 .get_link = mv643xx_eth_get_link,
2744 .set_sg = ethtool_op_set_sg, 3276 .set_sg = ethtool_op_set_sg,
3277 .get_sset_count = mv643xx_get_sset_count,
2745 .get_ethtool_stats = mv643xx_get_ethtool_stats, 3278 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2746 .get_strings = mv643xx_get_strings, 3279 .get_strings = mv643xx_get_strings,
2747 .nway_reset = mv643xx_eth_nway_restart, 3280 .nway_reset = mv643xx_eth_nway_restart,
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
deleted file mode 100644
index be669eb23788..000000000000
--- a/drivers/net/mv643xx_eth.h
+++ /dev/null
@@ -1,370 +0,0 @@
1#ifndef __MV643XX_ETH_H__
2#define __MV643XX_ETH_H__
3
4#include <linux/module.h>
5#include <linux/kernel.h>
6#include <linux/spinlock.h>
7#include <linux/workqueue.h>
8#include <linux/mii.h>
9
10#include <linux/mv643xx.h>
11
12#include <asm/dma-mapping.h>
13
14/* Checksum offload for Tx works for most packets, but
15 * fails if previous packet sent did not use hw csum
16 */
17#define MV643XX_CHECKSUM_OFFLOAD_TX
18#define MV643XX_NAPI
19#define MV643XX_TX_FAST_REFILL
20#undef MV643XX_COAL
21
22/*
23 * Number of RX / TX descriptors on RX / TX rings.
24 * Note that allocating RX descriptors is done by allocating the RX
25 * ring AND a preallocated RX buffers (skb's) for each descriptor.
26 * The TX descriptors only allocates the TX descriptors ring,
27 * with no pre allocated TX buffers (skb's are allocated by higher layers.
28 */
29
30/* Default TX ring size is 1000 descriptors */
31#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
32
33/* Default RX ring size is 400 descriptors */
34#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
35
36#define MV643XX_TX_COAL 100
37#ifdef MV643XX_COAL
38#define MV643XX_RX_COAL 100
39#endif
40
41#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
42#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
43#else
44#define MAX_DESCS_PER_SKB 1
45#endif
46
47#define ETH_VLAN_HLEN 4
48#define ETH_FCS_LEN 4
49#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
50#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
51 ETH_VLAN_HLEN + ETH_FCS_LEN)
52#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment())
53
54#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
55#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
56
57#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
58#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
59#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
60#define ETH_INT_CAUSE_EXT 0x00000002
61#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
62
63#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
64#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
65#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
66#define ETH_INT_CAUSE_PHY 0x00010000
67#define ETH_INT_CAUSE_STATE 0x00100000
68#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
69 ETH_INT_CAUSE_STATE)
70
71#define ETH_INT_MASK_ALL 0x00000000
72#define ETH_INT_MASK_ALL_EXT 0x00000000
73
74#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
75#define PHY_WAIT_MICRO_SECONDS 10
76
77/* Buffer offset from buffer pointer */
78#define RX_BUF_OFFSET 0x2
79
80/* Gigabit Ethernet Unit Global Registers */
81
82/* MIB Counters register definitions */
83#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
84#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
85#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
86#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
87#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
88#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
89#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
90#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
91#define ETH_MIB_FRAMES_64_OCTETS 0x20
92#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
93#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
94#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
95#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
96#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
97#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
98#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
99#define ETH_MIB_GOOD_FRAMES_SENT 0x40
100#define ETH_MIB_EXCESSIVE_COLLISION 0x44
101#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
102#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
103#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
104#define ETH_MIB_FC_SENT 0x54
105#define ETH_MIB_GOOD_FC_RECEIVED 0x58
106#define ETH_MIB_BAD_FC_RECEIVED 0x5c
107#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
108#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
109#define ETH_MIB_OVERSIZE_RECEIVED 0x68
110#define ETH_MIB_JABBER_RECEIVED 0x6c
111#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
112#define ETH_MIB_BAD_CRC_EVENT 0x74
113#define ETH_MIB_COLLISION 0x78
114#define ETH_MIB_LATE_COLLISION 0x7c
115
116/* Port serial status reg (PSR) */
117#define ETH_INTERFACE_PCM 0x00000001
118#define ETH_LINK_IS_UP 0x00000002
119#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
120#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
121#define ETH_GMII_SPEED_1000 0x00000010
122#define ETH_MII_SPEED_100 0x00000020
123#define ETH_TX_IN_PROGRESS 0x00000080
124#define ETH_BYPASS_ACTIVE 0x00000100
125#define ETH_PORT_AT_PARTITION_STATE 0x00000200
126#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
127
128/* SMI reg */
129#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
130#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
131#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
132#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
133
134/* Interrupt Cause Register Bit Definitions */
135
136/* SDMA command status fields macros */
137
138/* Tx & Rx descriptors status */
139#define ETH_ERROR_SUMMARY 0x00000001
140
141/* Tx & Rx descriptors command */
142#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
143
144/* Tx descriptors status */
145#define ETH_LC_ERROR 0
146#define ETH_UR_ERROR 0x00000002
147#define ETH_RL_ERROR 0x00000004
148#define ETH_LLC_SNAP_FORMAT 0x00000200
149
150/* Rx descriptors status */
151#define ETH_OVERRUN_ERROR 0x00000002
152#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
153#define ETH_RESOURCE_ERROR 0x00000006
154#define ETH_VLAN_TAGGED 0x00080000
155#define ETH_BPDU_FRAME 0x00100000
156#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
157#define ETH_OTHER_FRAME_TYPE 0x00400000
158#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
159#define ETH_FRAME_TYPE_IP_V_4 0x01000000
160#define ETH_FRAME_HEADER_OK 0x02000000
161#define ETH_RX_LAST_DESC 0x04000000
162#define ETH_RX_FIRST_DESC 0x08000000
163#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
164#define ETH_RX_ENABLE_INTERRUPT 0x20000000
165#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
166
167/* Rx descriptors byte count */
168#define ETH_FRAME_FRAGMENTED 0x00000004
169
170/* Tx descriptors command */
171#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
172#define ETH_FRAME_SET_TO_VLAN 0x00008000
173#define ETH_UDP_FRAME 0x00010000
174#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
175#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
176#define ETH_ZERO_PADDING 0x00080000
177#define ETH_TX_LAST_DESC 0x00100000
178#define ETH_TX_FIRST_DESC 0x00200000
179#define ETH_GEN_CRC 0x00400000
180#define ETH_TX_ENABLE_INTERRUPT 0x00800000
181#define ETH_AUTO_MODE 0x40000000
182
183#define ETH_TX_IHL_SHIFT 11
184
185/* typedefs */
186
187typedef enum _eth_func_ret_status {
188 ETH_OK, /* Returned as expected. */
189 ETH_ERROR, /* Fundamental error. */
190 ETH_RETRY, /* Could not process request. Try later.*/
191 ETH_END_OF_JOB, /* Ring has nothing to process. */
192 ETH_QUEUE_FULL, /* Ring resource error. */
193 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
194} ETH_FUNC_RET_STATUS;
195
196typedef enum _eth_target {
197 ETH_TARGET_DRAM,
198 ETH_TARGET_DEVICE,
199 ETH_TARGET_CBS,
200 ETH_TARGET_PCI0,
201 ETH_TARGET_PCI1
202} ETH_TARGET;
203
204/* These are for big-endian machines. Little endian needs different
205 * definitions.
206 */
207#if defined(__BIG_ENDIAN)
208struct eth_rx_desc {
209 u16 byte_cnt; /* Descriptor buffer byte count */
210 u16 buf_size; /* Buffer size */
211 u32 cmd_sts; /* Descriptor command status */
212 u32 next_desc_ptr; /* Next descriptor pointer */
213 u32 buf_ptr; /* Descriptor buffer pointer */
214};
215
216struct eth_tx_desc {
217 u16 byte_cnt; /* buffer byte count */
218 u16 l4i_chk; /* CPU provided TCP checksum */
219 u32 cmd_sts; /* Command/status field */
220 u32 next_desc_ptr; /* Pointer to next descriptor */
221 u32 buf_ptr; /* pointer to buffer for this descriptor*/
222};
223
224#elif defined(__LITTLE_ENDIAN)
225struct eth_rx_desc {
226 u32 cmd_sts; /* Descriptor command status */
227 u16 buf_size; /* Buffer size */
228 u16 byte_cnt; /* Descriptor buffer byte count */
229 u32 buf_ptr; /* Descriptor buffer pointer */
230 u32 next_desc_ptr; /* Next descriptor pointer */
231};
232
233struct eth_tx_desc {
234 u32 cmd_sts; /* Command/status field */
235 u16 l4i_chk; /* CPU provided TCP checksum */
236 u16 byte_cnt; /* buffer byte count */
237 u32 buf_ptr; /* pointer to buffer for this descriptor*/
238 u32 next_desc_ptr; /* Pointer to next descriptor */
239};
240#else
241#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
242#endif
243
244/* Unified struct for Rx and Tx operations. The user is not required to */
245/* be familier with neither Tx nor Rx descriptors. */
246struct pkt_info {
247 unsigned short byte_cnt; /* Descriptor buffer byte count */
248 unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
249 unsigned int cmd_sts; /* Descriptor command status */
250 dma_addr_t buf_ptr; /* Descriptor buffer pointer */
251 struct sk_buff *return_info; /* User resource return information */
252};
253
254/* Ethernet port specific information */
255
256struct mv643xx_mib_counters {
257 u64 good_octets_received;
258 u32 bad_octets_received;
259 u32 internal_mac_transmit_err;
260 u32 good_frames_received;
261 u32 bad_frames_received;
262 u32 broadcast_frames_received;
263 u32 multicast_frames_received;
264 u32 frames_64_octets;
265 u32 frames_65_to_127_octets;
266 u32 frames_128_to_255_octets;
267 u32 frames_256_to_511_octets;
268 u32 frames_512_to_1023_octets;
269 u32 frames_1024_to_max_octets;
270 u64 good_octets_sent;
271 u32 good_frames_sent;
272 u32 excessive_collision;
273 u32 multicast_frames_sent;
274 u32 broadcast_frames_sent;
275 u32 unrec_mac_control_received;
276 u32 fc_sent;
277 u32 good_fc_received;
278 u32 bad_fc_received;
279 u32 undersize_received;
280 u32 fragments_received;
281 u32 oversize_received;
282 u32 jabber_received;
283 u32 mac_receive_error;
284 u32 bad_crc_event;
285 u32 collision;
286 u32 late_collision;
287};
288
289struct mv643xx_private {
290 int port_num; /* User Ethernet port number */
291
292 u32 rx_sram_addr; /* Base address of rx sram area */
293 u32 rx_sram_size; /* Size of rx sram area */
294 u32 tx_sram_addr; /* Base address of tx sram area */
295 u32 tx_sram_size; /* Size of tx sram area */
296
297 int rx_resource_err; /* Rx ring resource error flag */
298
299 /* Tx/Rx rings managment indexes fields. For driver use */
300
301 /* Next available and first returning Rx resource */
302 int rx_curr_desc_q, rx_used_desc_q;
303
304 /* Next available and first returning Tx resource */
305 int tx_curr_desc_q, tx_used_desc_q;
306
307#ifdef MV643XX_TX_FAST_REFILL
308 u32 tx_clean_threshold;
309#endif
310
311 struct eth_rx_desc *p_rx_desc_area;
312 dma_addr_t rx_desc_dma;
313 int rx_desc_area_size;
314 struct sk_buff **rx_skb;
315
316 struct eth_tx_desc *p_tx_desc_area;
317 dma_addr_t tx_desc_dma;
318 int tx_desc_area_size;
319 struct sk_buff **tx_skb;
320
321 struct work_struct tx_timeout_task;
322
323 struct net_device *dev;
324 struct napi_struct napi;
325 struct net_device_stats stats;
326 struct mv643xx_mib_counters mib_counters;
327 spinlock_t lock;
328 /* Size of Tx Ring per queue */
329 int tx_ring_size;
330 /* Number of tx descriptors in use */
331 int tx_desc_count;
332 /* Size of Rx Ring per queue */
333 int rx_ring_size;
334 /* Number of rx descriptors in use */
335 int rx_desc_count;
336
337 /*
338 * Used in case RX Ring is empty, which can be caused when
339 * system does not have resources (skb's)
340 */
341 struct timer_list timeout;
342
343 u32 rx_int_coal;
344 u32 tx_int_coal;
345 struct mii_if_info mii;
346};
347
348/* Port operation control routines */
349static void eth_port_init(struct mv643xx_private *mp);
350static void eth_port_reset(unsigned int eth_port_num);
351static void eth_port_start(struct net_device *dev);
352
353/* PHY and MIB routines */
354static void ethernet_phy_reset(unsigned int eth_port_num);
355
356static void eth_port_write_smi_reg(unsigned int eth_port_num,
357 unsigned int phy_reg, unsigned int value);
358
359static void eth_port_read_smi_reg(unsigned int eth_port_num,
360 unsigned int phy_reg, unsigned int *value);
361
362static void eth_clear_mib_counters(unsigned int eth_port_num);
363
364/* Port data flow control routines */
365static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
366 struct pkt_info *p_pkt_info);
367static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
368 struct pkt_info *p_pkt_info);
369
370#endif /* __MV643XX_ETH_H__ */
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 9f9a421c99b3..ab4d309a858f 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -550,7 +550,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
550 550
551 n = mac->rx->next_to_clean; 551 n = mac->rx->next_to_clean;
552 552
553 prefetch(RX_RING(mac, n)); 553 prefetch(&RX_RING(mac, n));
554 554
555 for (count = 0; count < limit; count++) { 555 for (count = 0; count < limit; count++) {
556 macrx = RX_RING(mac, n); 556 macrx = RX_RING(mac, n);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 419c00cbe6e9..e8960f294a6e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -44,7 +44,8 @@
44 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 44 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
45 #expr,__FILE__,__FUNCTION__,__LINE__); \ 45 #expr,__FILE__,__FUNCTION__,__LINE__); \
46 } 46 }
47#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0) 47#define dprintk(fmt, args...) \
48 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
48#else 49#else
49#define assert(expr) do {} while (0) 50#define assert(expr) do {} while (0)
50#define dprintk(fmt, args...) do {} while (0) 51#define dprintk(fmt, args...) do {} while (0)
@@ -111,19 +112,15 @@ enum mac_version {
111 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd 112 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
112 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe 113 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
113 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb 114 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
114 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf 115 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
115 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 8101Ec 116 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
116 RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 117 RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ?
117 RTL_GIGA_MAC_VER_15 = 0x0f // 8101 118 RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ?
118}; 119 RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec
119 120 RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf
120enum phy_version { 121 RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP
121 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */ 122 RTL_GIGA_MAC_VER_19 = 0x13, // 8168C
122 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */ 123 RTL_GIGA_MAC_VER_20 = 0x14 // 8168C
123 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
124 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
125 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
126 RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
127}; 124};
128 125
129#define _R(NAME,MAC,MASK) \ 126#define _R(NAME,MAC,MASK) \
@@ -144,7 +141,12 @@ static const struct {
144 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E 141 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
145 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 142 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
146 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139 143 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
147 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139 144 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139
145 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E
146 _R("RTL8101e", RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E
147 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E
148 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E
149 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_20, 0xff7e1880) // PCI-E
148}; 150};
149#undef _R 151#undef _R
150 152
@@ -165,7 +167,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
165 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 167 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
166 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 168 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
167 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 169 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
168 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 }, 170 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
169 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, 171 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
170 { PCI_VENDOR_ID_LINKSYS, 0x1032, 172 { PCI_VENDOR_ID_LINKSYS, 0x1032,
171 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, 173 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
@@ -277,6 +279,7 @@ enum rtl_register_content {
277 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 279 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
278 280
279 /* Config1 register p.24 */ 281 /* Config1 register p.24 */
282 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
280 PMEnable = (1 << 0), /* Power Management Enable */ 283 PMEnable = (1 << 0), /* Power Management Enable */
281 284
282 /* Config2 register p. 25 */ 285 /* Config2 register p. 25 */
@@ -380,17 +383,20 @@ struct ring_info {
380 u8 __pad[sizeof(void *) - sizeof(u32)]; 383 u8 __pad[sizeof(void *) - sizeof(u32)];
381}; 384};
382 385
386enum features {
387 RTL_FEATURE_WOL = (1 << 0),
388 RTL_FEATURE_MSI = (1 << 1),
389};
390
383struct rtl8169_private { 391struct rtl8169_private {
384 void __iomem *mmio_addr; /* memory map physical address */ 392 void __iomem *mmio_addr; /* memory map physical address */
385 struct pci_dev *pci_dev; /* Index of PCI device */ 393 struct pci_dev *pci_dev; /* Index of PCI device */
386 struct net_device *dev; 394 struct net_device *dev;
387 struct napi_struct napi; 395 struct napi_struct napi;
388 struct net_device_stats stats; /* statistics of net device */
389 spinlock_t lock; /* spin lock flag */ 396 spinlock_t lock; /* spin lock flag */
390 u32 msg_enable; 397 u32 msg_enable;
391 int chipset; 398 int chipset;
392 int mac_version; 399 int mac_version;
393 int phy_version;
394 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ 400 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
395 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ 401 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
396 u32 dirty_rx; 402 u32 dirty_rx;
@@ -420,7 +426,7 @@ struct rtl8169_private {
420 unsigned int (*phy_reset_pending)(void __iomem *); 426 unsigned int (*phy_reset_pending)(void __iomem *);
421 unsigned int (*link_ok)(void __iomem *); 427 unsigned int (*link_ok)(void __iomem *);
422 struct delayed_work task; 428 struct delayed_work task;
423 unsigned wol_enabled : 1; 429 unsigned features;
424}; 430};
425 431
426MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 432MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -626,7 +632,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
626 632
627 RTL_W8(Cfg9346, Cfg9346_Lock); 633 RTL_W8(Cfg9346, Cfg9346_Lock);
628 634
629 tp->wol_enabled = (wol->wolopts) ? 1 : 0; 635 if (wol->wolopts)
636 tp->features |= RTL_FEATURE_WOL;
637 else
638 tp->features &= ~RTL_FEATURE_WOL;
630 639
631 spin_unlock_irq(&tp->lock); 640 spin_unlock_irq(&tp->lock);
632 641
@@ -707,7 +716,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
707 716
708 /* This tweak comes straight from Realtek's driver. */ 717 /* This tweak comes straight from Realtek's driver. */
709 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) && 718 if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
710 (tp->mac_version == RTL_GIGA_MAC_VER_13)) { 719 ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
720 (tp->mac_version == RTL_GIGA_MAC_VER_16))) {
711 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA; 721 auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
712 } 722 }
713 } 723 }
@@ -715,7 +725,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
715 /* The 8100e/8101e do Fast Ethernet only. */ 725 /* The 8100e/8101e do Fast Ethernet only. */
716 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 726 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
717 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 727 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
718 (tp->mac_version == RTL_GIGA_MAC_VER_15)) { 728 (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
729 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
719 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) && 730 if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
720 netif_msg_link(tp)) { 731 netif_msg_link(tp)) {
721 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", 732 printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
@@ -726,7 +737,8 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
726 737
727 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 738 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
728 739
729 if (tp->mac_version == RTL_GIGA_MAC_VER_12) { 740 if ((tp->mac_version == RTL_GIGA_MAC_VER_12) ||
741 (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
730 /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ 742 /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */
731 mdio_write(ioaddr, 0x1f, 0x0000); 743 mdio_write(ioaddr, 0x1f, 0x0000);
732 mdio_write(ioaddr, 0x0e, 0x0000); 744 mdio_write(ioaddr, 0x0e, 0x0000);
@@ -1104,26 +1116,51 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1104 */ 1116 */
1105 const struct { 1117 const struct {
1106 u32 mask; 1118 u32 mask;
1119 u32 val;
1107 int mac_version; 1120 int mac_version;
1108 } mac_info[] = { 1121 } mac_info[] = {
1109 { 0x38800000, RTL_GIGA_MAC_VER_15 }, 1122 /* 8168B family. */
1110 { 0x38000000, RTL_GIGA_MAC_VER_12 }, 1123 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
1111 { 0x34000000, RTL_GIGA_MAC_VER_13 }, 1124 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
1112 { 0x30800000, RTL_GIGA_MAC_VER_14 }, 1125 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
1113 { 0x30000000, RTL_GIGA_MAC_VER_11 }, 1126 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_20 },
1114 { 0x98000000, RTL_GIGA_MAC_VER_06 }, 1127
1115 { 0x18000000, RTL_GIGA_MAC_VER_05 }, 1128 /* 8168B family. */
1116 { 0x10000000, RTL_GIGA_MAC_VER_04 }, 1129 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
1117 { 0x04000000, RTL_GIGA_MAC_VER_03 }, 1130 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
1118 { 0x00800000, RTL_GIGA_MAC_VER_02 }, 1131 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
1119 { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */ 1132 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1133
1134 /* 8101 family. */
1135 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1136 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1137 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1138 /* FIXME: where did these entries come from ? -- FR */
1139 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
1140 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
1141
1142 /* 8110 family. */
1143 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
1144 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
1145 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
1146 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
1147 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
1148 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
1149
1150 { 0x00000000, 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
1120 }, *p = mac_info; 1151 }, *p = mac_info;
1121 u32 reg; 1152 u32 reg;
1122 1153
1123 reg = RTL_R32(TxConfig) & 0xfc800000; 1154 reg = RTL_R32(TxConfig);
1124 while ((reg & p->mask) != p->mask) 1155 while ((reg & p->mask) != p->val)
1125 p++; 1156 p++;
1126 tp->mac_version = p->mac_version; 1157 tp->mac_version = p->mac_version;
1158
1159 if (p->mask == 0x00000000) {
1160 struct pci_dev *pdev = tp->pci_dev;
1161
1162 dev_info(&pdev->dev, "unknown MAC (%08x)\n", reg);
1163 }
1127} 1164}
1128 1165
1129static void rtl8169_print_mac_version(struct rtl8169_private *tp) 1166static void rtl8169_print_mac_version(struct rtl8169_private *tp)
@@ -1131,54 +1168,21 @@ static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1131 dprintk("mac_version = 0x%02x\n", tp->mac_version); 1168 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1132} 1169}
1133 1170
1134static void rtl8169_get_phy_version(struct rtl8169_private *tp, 1171struct phy_reg {
1135 void __iomem *ioaddr)
1136{
1137 const struct {
1138 u16 mask;
1139 u16 set;
1140 int phy_version;
1141 } phy_info[] = {
1142 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
1143 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
1144 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
1145 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
1146 }, *p = phy_info;
1147 u16 reg; 1172 u16 reg;
1173 u16 val;
1174};
1148 1175
1149 reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff; 1176static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len)
1150 while ((reg & p->mask) != p->set)
1151 p++;
1152 tp->phy_version = p->phy_version;
1153}
1154
1155static void rtl8169_print_phy_version(struct rtl8169_private *tp)
1156{ 1177{
1157 struct { 1178 while (len-- > 0) {
1158 int version; 1179 mdio_write(ioaddr, regs->reg, regs->val);
1159 char *msg; 1180 regs++;
1160 u32 reg;
1161 } phy_print[] = {
1162 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
1163 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
1164 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
1165 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
1166 { 0, NULL, 0x0000 }
1167 }, *p;
1168
1169 for (p = phy_print; p->msg; p++) {
1170 if (tp->phy_version == p->version) {
1171 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
1172 return;
1173 }
1174 } 1181 }
1175 dprintk("phy_version == Unknown\n");
1176} 1182}
1177 1183
1178static void rtl8169_hw_phy_config(struct net_device *dev) 1184static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
1179{ 1185{
1180 struct rtl8169_private *tp = netdev_priv(dev);
1181 void __iomem *ioaddr = tp->mmio_addr;
1182 struct { 1186 struct {
1183 u16 regs[5]; /* Beware of bit-sign propagation */ 1187 u16 regs[5]; /* Beware of bit-sign propagation */
1184 } phy_magic[5] = { { 1188 } phy_magic[5] = { {
@@ -1211,33 +1215,9 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
1211 }, *p = phy_magic; 1215 }, *p = phy_magic;
1212 unsigned int i; 1216 unsigned int i;
1213 1217
1214 rtl8169_print_mac_version(tp); 1218 mdio_write(ioaddr, 0x1f, 0x0001); //w 31 2 0 1
1215 rtl8169_print_phy_version(tp); 1219 mdio_write(ioaddr, 0x15, 0x1000); //w 21 15 0 1000
1216 1220 mdio_write(ioaddr, 0x18, 0x65c7); //w 24 15 0 65c7
1217 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1218 return;
1219 if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
1220 return;
1221
1222 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
1223 dprintk("Do final_reg2.cfg\n");
1224
1225 /* Shazam ! */
1226
1227 if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
1228 mdio_write(ioaddr, 31, 0x0002);
1229 mdio_write(ioaddr, 1, 0x90d0);
1230 mdio_write(ioaddr, 31, 0x0000);
1231 return;
1232 }
1233
1234 if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1235 (tp->mac_version != RTL_GIGA_MAC_VER_03))
1236 return;
1237
1238 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1239 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1240 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
1241 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 1221 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1242 1222
1243 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) { 1223 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
@@ -1250,7 +1230,115 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
1250 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1 1230 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
1251 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 1231 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
1252 } 1232 }
1253 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0 1233 mdio_write(ioaddr, 0x1f, 0x0000); //w 31 2 0 0
1234}
1235
1236static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
1237{
1238 struct phy_reg phy_reg_init[] = {
1239 { 0x1f, 0x0002 },
1240 { 0x01, 0x90d0 },
1241 { 0x1f, 0x0000 }
1242 };
1243
1244 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1245}
1246static void rtl8168b_hw_phy_config(void __iomem *ioaddr)
1247{
1248 struct phy_reg phy_reg_init[] = {
1249 { 0x1f, 0x0000 },
1250 { 0x10, 0xf41b },
1251 { 0x1f, 0x0000 }
1252 };
1253
1254 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1255}
1256
1257static void rtl8168cp_hw_phy_config(void __iomem *ioaddr)
1258{
1259 struct phy_reg phy_reg_init[] = {
1260 { 0x1f, 0x0000 },
1261 { 0x1d, 0x0f00 },
1262 { 0x1f, 0x0002 },
1263 { 0x0c, 0x1ec8 },
1264 { 0x1f, 0x0000 }
1265 };
1266
1267 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1268}
1269
1270static void rtl8168c_hw_phy_config(void __iomem *ioaddr)
1271{
1272 struct phy_reg phy_reg_init[] = {
1273 { 0x1f, 0x0001 },
1274 { 0x12, 0x2300 },
1275 { 0x1f, 0x0002 },
1276 { 0x00, 0x88d4 },
1277 { 0x01, 0x82b1 },
1278 { 0x03, 0x7002 },
1279 { 0x08, 0x9e30 },
1280 { 0x09, 0x01f0 },
1281 { 0x0a, 0x5500 },
1282 { 0x0c, 0x00c8 },
1283 { 0x1f, 0x0003 },
1284 { 0x12, 0xc096 },
1285 { 0x16, 0x000a },
1286 { 0x1f, 0x0000 }
1287 };
1288
1289 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1290}
1291
1292static void rtl8168cx_hw_phy_config(void __iomem *ioaddr)
1293{
1294 struct phy_reg phy_reg_init[] = {
1295 { 0x1f, 0x0000 },
1296 { 0x12, 0x2300 },
1297 { 0x1f, 0x0003 },
1298 { 0x16, 0x0f0a },
1299 { 0x1f, 0x0000 },
1300 { 0x1f, 0x0002 },
1301 { 0x0c, 0x7eb8 },
1302 { 0x1f, 0x0000 }
1303 };
1304
1305 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1306}
1307
1308static void rtl_hw_phy_config(struct net_device *dev)
1309{
1310 struct rtl8169_private *tp = netdev_priv(dev);
1311 void __iomem *ioaddr = tp->mmio_addr;
1312
1313 rtl8169_print_mac_version(tp);
1314
1315 switch (tp->mac_version) {
1316 case RTL_GIGA_MAC_VER_01:
1317 break;
1318 case RTL_GIGA_MAC_VER_02:
1319 case RTL_GIGA_MAC_VER_03:
1320 rtl8169s_hw_phy_config(ioaddr);
1321 break;
1322 case RTL_GIGA_MAC_VER_04:
1323 rtl8169sb_hw_phy_config(ioaddr);
1324 break;
1325 case RTL_GIGA_MAC_VER_11:
1326 case RTL_GIGA_MAC_VER_12:
1327 case RTL_GIGA_MAC_VER_17:
1328 rtl8168b_hw_phy_config(ioaddr);
1329 break;
1330 case RTL_GIGA_MAC_VER_18:
1331 rtl8168cp_hw_phy_config(ioaddr);
1332 break;
1333 case RTL_GIGA_MAC_VER_19:
1334 rtl8168c_hw_phy_config(ioaddr);
1335 break;
1336 case RTL_GIGA_MAC_VER_20:
1337 rtl8168cx_hw_phy_config(ioaddr);
1338 break;
1339 default:
1340 break;
1341 }
1254} 1342}
1255 1343
1256static void rtl8169_phy_timer(unsigned long __opaque) 1344static void rtl8169_phy_timer(unsigned long __opaque)
@@ -1262,7 +1350,6 @@ static void rtl8169_phy_timer(unsigned long __opaque)
1262 unsigned long timeout = RTL8169_PHY_TIMEOUT; 1350 unsigned long timeout = RTL8169_PHY_TIMEOUT;
1263 1351
1264 assert(tp->mac_version > RTL_GIGA_MAC_VER_01); 1352 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
1265 assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
1266 1353
1267 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) 1354 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1268 return; 1355 return;
@@ -1297,8 +1384,7 @@ static inline void rtl8169_delete_timer(struct net_device *dev)
1297 struct rtl8169_private *tp = netdev_priv(dev); 1384 struct rtl8169_private *tp = netdev_priv(dev);
1298 struct timer_list *timer = &tp->timer; 1385 struct timer_list *timer = &tp->timer;
1299 1386
1300 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || 1387 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1301 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1302 return; 1388 return;
1303 1389
1304 del_timer_sync(timer); 1390 del_timer_sync(timer);
@@ -1309,8 +1395,7 @@ static inline void rtl8169_request_timer(struct net_device *dev)
1309 struct rtl8169_private *tp = netdev_priv(dev); 1395 struct rtl8169_private *tp = netdev_priv(dev);
1310 struct timer_list *timer = &tp->timer; 1396 struct timer_list *timer = &tp->timer;
1311 1397
1312 if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || 1398 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
1313 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1314 return; 1399 return;
1315 1400
1316 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT); 1401 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -1362,7 +1447,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1362{ 1447{
1363 void __iomem *ioaddr = tp->mmio_addr; 1448 void __iomem *ioaddr = tp->mmio_addr;
1364 1449
1365 rtl8169_hw_phy_config(dev); 1450 rtl_hw_phy_config(dev);
1366 1451
1367 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); 1452 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1368 RTL_W8(0x82, 0x01); 1453 RTL_W8(0x82, 0x01);
@@ -1457,6 +1542,7 @@ static const struct rtl_cfg_info {
1457 unsigned int align; 1542 unsigned int align;
1458 u16 intr_event; 1543 u16 intr_event;
1459 u16 napi_event; 1544 u16 napi_event;
1545 unsigned msi;
1460} rtl_cfg_infos [] = { 1546} rtl_cfg_infos [] = {
1461 [RTL_CFG_0] = { 1547 [RTL_CFG_0] = {
1462 .hw_start = rtl_hw_start_8169, 1548 .hw_start = rtl_hw_start_8169,
@@ -1464,7 +1550,8 @@ static const struct rtl_cfg_info {
1464 .align = 0, 1550 .align = 0,
1465 .intr_event = SYSErr | LinkChg | RxOverflow | 1551 .intr_event = SYSErr | LinkChg | RxOverflow |
1466 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1552 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1467 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow 1553 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1554 .msi = 0
1468 }, 1555 },
1469 [RTL_CFG_1] = { 1556 [RTL_CFG_1] = {
1470 .hw_start = rtl_hw_start_8168, 1557 .hw_start = rtl_hw_start_8168,
@@ -1472,7 +1559,8 @@ static const struct rtl_cfg_info {
1472 .align = 8, 1559 .align = 8,
1473 .intr_event = SYSErr | LinkChg | RxOverflow | 1560 .intr_event = SYSErr | LinkChg | RxOverflow |
1474 TxErr | TxOK | RxOK | RxErr, 1561 TxErr | TxOK | RxOK | RxErr,
1475 .napi_event = TxErr | TxOK | RxOK | RxOverflow 1562 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
1563 .msi = RTL_FEATURE_MSI
1476 }, 1564 },
1477 [RTL_CFG_2] = { 1565 [RTL_CFG_2] = {
1478 .hw_start = rtl_hw_start_8101, 1566 .hw_start = rtl_hw_start_8101,
@@ -1480,10 +1568,39 @@ static const struct rtl_cfg_info {
1480 .align = 8, 1568 .align = 8,
1481 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 1569 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
1482 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1570 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1483 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow 1571 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1572 .msi = RTL_FEATURE_MSI
1484 } 1573 }
1485}; 1574};
1486 1575
1576/* Cfg9346_Unlock assumed. */
1577static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
1578 const struct rtl_cfg_info *cfg)
1579{
1580 unsigned msi = 0;
1581 u8 cfg2;
1582
1583 cfg2 = RTL_R8(Config2) & ~MSIEnable;
1584 if (cfg->msi) {
1585 if (pci_enable_msi(pdev)) {
1586 dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
1587 } else {
1588 cfg2 |= MSIEnable;
1589 msi = RTL_FEATURE_MSI;
1590 }
1591 }
1592 RTL_W8(Config2, cfg2);
1593 return msi;
1594}
1595
1596static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
1597{
1598 if (tp->features & RTL_FEATURE_MSI) {
1599 pci_disable_msi(pdev);
1600 tp->features &= ~RTL_FEATURE_MSI;
1601 }
1602}
1603
1487static int __devinit 1604static int __devinit
1488rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1605rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1489{ 1606{
@@ -1596,10 +1713,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1596 1713
1597 /* Identify chip attached to board */ 1714 /* Identify chip attached to board */
1598 rtl8169_get_mac_version(tp, ioaddr); 1715 rtl8169_get_mac_version(tp, ioaddr);
1599 rtl8169_get_phy_version(tp, ioaddr);
1600 1716
1601 rtl8169_print_mac_version(tp); 1717 rtl8169_print_mac_version(tp);
1602 rtl8169_print_phy_version(tp);
1603 1718
1604 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { 1719 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
1605 if (tp->mac_version == rtl_chip_info[i].mac_version) 1720 if (tp->mac_version == rtl_chip_info[i].mac_version)
@@ -1619,6 +1734,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1619 RTL_W8(Cfg9346, Cfg9346_Unlock); 1734 RTL_W8(Cfg9346, Cfg9346_Unlock);
1620 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 1735 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
1621 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); 1736 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
1737 tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
1622 RTL_W8(Cfg9346, Cfg9346_Lock); 1738 RTL_W8(Cfg9346, Cfg9346_Lock);
1623 1739
1624 if (RTL_R8(PHYstatus) & TBI_Enable) { 1740 if (RTL_R8(PHYstatus) & TBI_Enable) {
@@ -1686,7 +1802,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1686 1802
1687 rc = register_netdev(dev); 1803 rc = register_netdev(dev);
1688 if (rc < 0) 1804 if (rc < 0)
1689 goto err_out_unmap_5; 1805 goto err_out_msi_5;
1690 1806
1691 pci_set_drvdata(pdev, dev); 1807 pci_set_drvdata(pdev, dev);
1692 1808
@@ -1709,7 +1825,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1709out: 1825out:
1710 return rc; 1826 return rc;
1711 1827
1712err_out_unmap_5: 1828err_out_msi_5:
1829 rtl_disable_msi(pdev, tp);
1713 iounmap(ioaddr); 1830 iounmap(ioaddr);
1714err_out_free_res_4: 1831err_out_free_res_4:
1715 pci_release_regions(pdev); 1832 pci_release_regions(pdev);
@@ -1730,6 +1847,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
1730 flush_scheduled_work(); 1847 flush_scheduled_work();
1731 1848
1732 unregister_netdev(dev); 1849 unregister_netdev(dev);
1850 rtl_disable_msi(pdev, tp);
1733 rtl8169_release_board(pdev, dev, tp->mmio_addr); 1851 rtl8169_release_board(pdev, dev, tp->mmio_addr);
1734 pci_set_drvdata(pdev, NULL); 1852 pci_set_drvdata(pdev, NULL);
1735} 1853}
@@ -1773,7 +1891,8 @@ static int rtl8169_open(struct net_device *dev)
1773 1891
1774 smp_mb(); 1892 smp_mb();
1775 1893
1776 retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED, 1894 retval = request_irq(dev->irq, rtl8169_interrupt,
1895 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
1777 dev->name, dev); 1896 dev->name, dev);
1778 if (retval < 0) 1897 if (retval < 0)
1779 goto err_release_ring_2; 1898 goto err_release_ring_2;
@@ -1933,7 +2052,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
1933 2052
1934 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || 2053 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1935 (tp->mac_version == RTL_GIGA_MAC_VER_03)) { 2054 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
1936 dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. " 2055 dprintk("Set MAC Reg C+CR Offset 0xE0. "
1937 "Bit-3 and bit-14 MUST be 1\n"); 2056 "Bit-3 and bit-14 MUST be 1\n");
1938 tp->cp_cmd |= (1 << 14); 2057 tp->cp_cmd |= (1 << 14);
1939 } 2058 }
@@ -2029,7 +2148,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
2029 void __iomem *ioaddr = tp->mmio_addr; 2148 void __iomem *ioaddr = tp->mmio_addr;
2030 struct pci_dev *pdev = tp->pci_dev; 2149 struct pci_dev *pdev = tp->pci_dev;
2031 2150
2032 if (tp->mac_version == RTL_GIGA_MAC_VER_13) { 2151 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2152 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
2033 pci_write_config_word(pdev, 0x68, 0x00); 2153 pci_write_config_word(pdev, 0x68, 0x00);
2034 pci_write_config_word(pdev, 0x69, 0x08); 2154 pci_write_config_word(pdev, 0x69, 0x08);
2035 } 2155 }
@@ -2259,7 +2379,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
2259 dev_kfree_skb(skb); 2379 dev_kfree_skb(skb);
2260 tx_skb->skb = NULL; 2380 tx_skb->skb = NULL;
2261 } 2381 }
2262 tp->stats.tx_dropped++; 2382 tp->dev->stats.tx_dropped++;
2263 } 2383 }
2264 } 2384 }
2265 tp->cur_tx = tp->dirty_tx = 0; 2385 tp->cur_tx = tp->dirty_tx = 0;
@@ -2310,7 +2430,7 @@ static void rtl8169_reinit_task(struct work_struct *work)
2310 ret = rtl8169_open(dev); 2430 ret = rtl8169_open(dev);
2311 if (unlikely(ret < 0)) { 2431 if (unlikely(ret < 0)) {
2312 if (net_ratelimit() && netif_msg_drv(tp)) { 2432 if (net_ratelimit() && netif_msg_drv(tp)) {
2313 printk(PFX KERN_ERR "%s: reinit failure (status = %d)." 2433 printk(KERN_ERR PFX "%s: reinit failure (status = %d)."
2314 " Rescheduling.\n", dev->name, ret); 2434 " Rescheduling.\n", dev->name, ret);
2315 } 2435 }
2316 rtl8169_schedule_work(dev, rtl8169_reinit_task); 2436 rtl8169_schedule_work(dev, rtl8169_reinit_task);
@@ -2340,9 +2460,10 @@ static void rtl8169_reset_task(struct work_struct *work)
2340 rtl8169_init_ring_indexes(tp); 2460 rtl8169_init_ring_indexes(tp);
2341 rtl_hw_start(dev); 2461 rtl_hw_start(dev);
2342 netif_wake_queue(dev); 2462 netif_wake_queue(dev);
2463 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
2343 } else { 2464 } else {
2344 if (net_ratelimit() && netif_msg_intr(tp)) { 2465 if (net_ratelimit() && netif_msg_intr(tp)) {
2345 printk(PFX KERN_EMERG "%s: Rx buffers shortage\n", 2466 printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
2346 dev->name); 2467 dev->name);
2347 } 2468 }
2348 rtl8169_schedule_work(dev, rtl8169_reset_task); 2469 rtl8169_schedule_work(dev, rtl8169_reset_task);
@@ -2496,7 +2617,7 @@ err_stop:
2496 netif_stop_queue(dev); 2617 netif_stop_queue(dev);
2497 ret = NETDEV_TX_BUSY; 2618 ret = NETDEV_TX_BUSY;
2498err_update_stats: 2619err_update_stats:
2499 tp->stats.tx_dropped++; 2620 dev->stats.tx_dropped++;
2500 goto out; 2621 goto out;
2501} 2622}
2502 2623
@@ -2571,8 +2692,8 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
2571 if (status & DescOwn) 2692 if (status & DescOwn)
2572 break; 2693 break;
2573 2694
2574 tp->stats.tx_bytes += len; 2695 dev->stats.tx_bytes += len;
2575 tp->stats.tx_packets++; 2696 dev->stats.tx_packets++;
2576 2697
2577 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); 2698 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2578 2699
@@ -2672,14 +2793,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2672 "%s: Rx ERROR. status = %08x\n", 2793 "%s: Rx ERROR. status = %08x\n",
2673 dev->name, status); 2794 dev->name, status);
2674 } 2795 }
2675 tp->stats.rx_errors++; 2796 dev->stats.rx_errors++;
2676 if (status & (RxRWT | RxRUNT)) 2797 if (status & (RxRWT | RxRUNT))
2677 tp->stats.rx_length_errors++; 2798 dev->stats.rx_length_errors++;
2678 if (status & RxCRC) 2799 if (status & RxCRC)
2679 tp->stats.rx_crc_errors++; 2800 dev->stats.rx_crc_errors++;
2680 if (status & RxFOVF) { 2801 if (status & RxFOVF) {
2681 rtl8169_schedule_work(dev, rtl8169_reset_task); 2802 rtl8169_schedule_work(dev, rtl8169_reset_task);
2682 tp->stats.rx_fifo_errors++; 2803 dev->stats.rx_fifo_errors++;
2683 } 2804 }
2684 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2805 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2685 } else { 2806 } else {
@@ -2694,8 +2815,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2694 * sized frames. 2815 * sized frames.
2695 */ 2816 */
2696 if (unlikely(rtl8169_fragmented_frame(status))) { 2817 if (unlikely(rtl8169_fragmented_frame(status))) {
2697 tp->stats.rx_dropped++; 2818 dev->stats.rx_dropped++;
2698 tp->stats.rx_length_errors++; 2819 dev->stats.rx_length_errors++;
2699 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2820 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2700 continue; 2821 continue;
2701 } 2822 }
@@ -2719,8 +2840,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2719 rtl8169_rx_skb(skb); 2840 rtl8169_rx_skb(skb);
2720 2841
2721 dev->last_rx = jiffies; 2842 dev->last_rx = jiffies;
2722 tp->stats.rx_bytes += pkt_size; 2843 dev->stats.rx_bytes += pkt_size;
2723 tp->stats.rx_packets++; 2844 dev->stats.rx_packets++;
2724 } 2845 }
2725 2846
2726 /* Work around for AMD plateform. */ 2847 /* Work around for AMD plateform. */
@@ -2881,7 +3002,7 @@ core_down:
2881 rtl8169_asic_down(ioaddr); 3002 rtl8169_asic_down(ioaddr);
2882 3003
2883 /* Update the error counts. */ 3004 /* Update the error counts. */
2884 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3005 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
2885 RTL_W32(RxMissed, 0); 3006 RTL_W32(RxMissed, 0);
2886 3007
2887 spin_unlock_irq(&tp->lock); 3008 spin_unlock_irq(&tp->lock);
@@ -2984,7 +3105,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
2984 (tp->mac_version == RTL_GIGA_MAC_VER_12) || 3105 (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
2985 (tp->mac_version == RTL_GIGA_MAC_VER_13) || 3106 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2986 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 3107 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
2987 (tp->mac_version == RTL_GIGA_MAC_VER_15)) { 3108 (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
3109 (tp->mac_version == RTL_GIGA_MAC_VER_16) ||
3110 (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
2988 mc_filter[0] = 0xffffffff; 3111 mc_filter[0] = 0xffffffff;
2989 mc_filter[1] = 0xffffffff; 3112 mc_filter[1] = 0xffffffff;
2990 } 3113 }
@@ -3011,12 +3134,12 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3011 3134
3012 if (netif_running(dev)) { 3135 if (netif_running(dev)) {
3013 spin_lock_irqsave(&tp->lock, flags); 3136 spin_lock_irqsave(&tp->lock, flags);
3014 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3137 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
3015 RTL_W32(RxMissed, 0); 3138 RTL_W32(RxMissed, 0);
3016 spin_unlock_irqrestore(&tp->lock, flags); 3139 spin_unlock_irqrestore(&tp->lock, flags);
3017 } 3140 }
3018 3141
3019 return &tp->stats; 3142 return &dev->stats;
3020} 3143}
3021 3144
3022#ifdef CONFIG_PM 3145#ifdef CONFIG_PM
@@ -3037,14 +3160,15 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3037 3160
3038 rtl8169_asic_down(ioaddr); 3161 rtl8169_asic_down(ioaddr);
3039 3162
3040 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3163 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
3041 RTL_W32(RxMissed, 0); 3164 RTL_W32(RxMissed, 0);
3042 3165
3043 spin_unlock_irq(&tp->lock); 3166 spin_unlock_irq(&tp->lock);
3044 3167
3045out_pci_suspend: 3168out_pci_suspend:
3046 pci_save_state(pdev); 3169 pci_save_state(pdev);
3047 pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); 3170 pci_enable_wake(pdev, pci_choose_state(pdev, state),
3171 (tp->features & RTL_FEATURE_WOL) ? 1 : 0);
3048 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3172 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3049 3173
3050 return 0; 3174 return 0;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 24cfb6275d9b..c27c7d63b6a5 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4271,7 +4271,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
4271 del_timer_sync(&hw->watchdog_timer); 4271 del_timer_sync(&hw->watchdog_timer);
4272 cancel_work_sync(&hw->restart_work); 4272 cancel_work_sync(&hw->restart_work);
4273 4273
4274 for (i = hw->ports; i >= 0; --i) 4274 for (i = hw->ports-1; i >= 0; --i)
4275 unregister_netdev(hw->dev[i]); 4275 unregister_netdev(hw->dev[i]);
4276 4276
4277 sky2_write32(hw, B0_IMSK, 0); 4277 sky2_write32(hw, B0_IMSK, 0);
@@ -4289,7 +4289,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
4289 pci_release_regions(pdev); 4289 pci_release_regions(pdev);
4290 pci_disable_device(pdev); 4290 pci_disable_device(pdev);
4291 4291
4292 for (i = hw->ports; i >= 0; --i) 4292 for (i = hw->ports-1; i >= 0; --i)
4293 free_netdev(hw->dev[i]); 4293 free_netdev(hw->dev[i]);
4294 4294
4295 iounmap(hw->regs); 4295 iounmap(hw->regs);
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index 9c8049005052..d2ae6185f03b 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -14,6 +14,7 @@
14#define __ASM_MV643XX_H 14#define __ASM_MV643XX_H
15 15
16#include <asm/types.h> 16#include <asm/types.h>
17#include <linux/mv643xx_eth.h>
17 18
18/****************************************/ 19/****************************************/
19/* Processor Address Space */ 20/* Processor Address Space */
@@ -658,120 +659,6 @@
658/* Ethernet Unit Registers */ 659/* Ethernet Unit Registers */
659/****************************************/ 660/****************************************/
660 661
661#define MV643XX_ETH_SHARED_REGS 0x2000
662#define MV643XX_ETH_SHARED_REGS_SIZE 0x2000
663
664#define MV643XX_ETH_PHY_ADDR_REG 0x2000
665#define MV643XX_ETH_SMI_REG 0x2004
666#define MV643XX_ETH_UNIT_DEFAULT_ADDR_REG 0x2008
667#define MV643XX_ETH_UNIT_DEFAULTID_REG 0x200c
668#define MV643XX_ETH_UNIT_INTERRUPT_CAUSE_REG 0x2080
669#define MV643XX_ETH_UNIT_INTERRUPT_MASK_REG 0x2084
670#define MV643XX_ETH_UNIT_INTERNAL_USE_REG 0x24fc
671#define MV643XX_ETH_UNIT_ERROR_ADDR_REG 0x2094
672#define MV643XX_ETH_BAR_0 0x2200
673#define MV643XX_ETH_BAR_1 0x2208
674#define MV643XX_ETH_BAR_2 0x2210
675#define MV643XX_ETH_BAR_3 0x2218
676#define MV643XX_ETH_BAR_4 0x2220
677#define MV643XX_ETH_BAR_5 0x2228
678#define MV643XX_ETH_SIZE_REG_0 0x2204
679#define MV643XX_ETH_SIZE_REG_1 0x220c
680#define MV643XX_ETH_SIZE_REG_2 0x2214
681#define MV643XX_ETH_SIZE_REG_3 0x221c
682#define MV643XX_ETH_SIZE_REG_4 0x2224
683#define MV643XX_ETH_SIZE_REG_5 0x222c
684#define MV643XX_ETH_HEADERS_RETARGET_BASE_REG 0x2230
685#define MV643XX_ETH_HEADERS_RETARGET_CONTROL_REG 0x2234
686#define MV643XX_ETH_HIGH_ADDR_REMAP_REG_0 0x2280
687#define MV643XX_ETH_HIGH_ADDR_REMAP_REG_1 0x2284
688#define MV643XX_ETH_HIGH_ADDR_REMAP_REG_2 0x2288
689#define MV643XX_ETH_HIGH_ADDR_REMAP_REG_3 0x228c
690#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290
691#define MV643XX_ETH_ACCESS_PROTECTION_REG(port) (0x2294 + (port<<2))
692#define MV643XX_ETH_MIB_COUNTERS_BASE(port) (0x3000 + (port<<7))
693#define MV643XX_ETH_PORT_CONFIG_REG(port) (0x2400 + (port<<10))
694#define MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port) (0x2404 + (port<<10))
695#define MV643XX_ETH_MII_SERIAL_PARAMETRS_REG(port) (0x2408 + (port<<10))
696#define MV643XX_ETH_GMII_SERIAL_PARAMETRS_REG(port) (0x240c + (port<<10))
697#define MV643XX_ETH_VLAN_ETHERTYPE_REG(port) (0x2410 + (port<<10))
698#define MV643XX_ETH_MAC_ADDR_LOW(port) (0x2414 + (port<<10))
699#define MV643XX_ETH_MAC_ADDR_HIGH(port) (0x2418 + (port<<10))
700#define MV643XX_ETH_SDMA_CONFIG_REG(port) (0x241c + (port<<10))
701#define MV643XX_ETH_DSCP_0(port) (0x2420 + (port<<10))
702#define MV643XX_ETH_DSCP_1(port) (0x2424 + (port<<10))
703#define MV643XX_ETH_DSCP_2(port) (0x2428 + (port<<10))
704#define MV643XX_ETH_DSCP_3(port) (0x242c + (port<<10))
705#define MV643XX_ETH_DSCP_4(port) (0x2430 + (port<<10))
706#define MV643XX_ETH_DSCP_5(port) (0x2434 + (port<<10))
707#define MV643XX_ETH_DSCP_6(port) (0x2438 + (port<<10))
708#define MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port) (0x243c + (port<<10))
709#define MV643XX_ETH_VLAN_PRIORITY_TAG_TO_PRIORITY(port) (0x2440 + (port<<10))
710#define MV643XX_ETH_PORT_STATUS_REG(port) (0x2444 + (port<<10))
711#define MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port) (0x2448 + (port<<10))
712#define MV643XX_ETH_TX_QUEUE_FIXED_PRIORITY(port) (0x244c + (port<<10))
713#define MV643XX_ETH_PORT_TX_TOKEN_BUCKET_RATE_CONFIG(port) (0x2450 + (port<<10))
714#define MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port) (0x2458 + (port<<10))
715#define MV643XX_ETH_PORT_MAXIMUM_TOKEN_BUCKET_SIZE(port) (0x245c + (port<<10))
716#define MV643XX_ETH_INTERRUPT_CAUSE_REG(port) (0x2460 + (port<<10))
717#define MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port) (0x2464 + (port<<10))
718#define MV643XX_ETH_INTERRUPT_MASK_REG(port) (0x2468 + (port<<10))
719#define MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port) (0x246c + (port<<10))
720#define MV643XX_ETH_RX_FIFO_URGENT_THRESHOLD_REG(port) (0x2470 + (port<<10))
721#define MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port) (0x2474 + (port<<10))
722#define MV643XX_ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (0x247c + (port<<10))
723#define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port) (0x2484 + (port<<10))
724#define MV643XX_ETH_PORT_DEBUG_0_REG(port) (0x248c + (port<<10))
725#define MV643XX_ETH_PORT_DEBUG_1_REG(port) (0x2490 + (port<<10))
726#define MV643XX_ETH_PORT_INTERNAL_ADDR_ERROR_REG(port) (0x2494 + (port<<10))
727#define MV643XX_ETH_INTERNAL_USE_REG(port) (0x24fc + (port<<10))
728#define MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port) (0x2680 + (port<<10))
729#define MV643XX_ETH_CURRENT_SERVED_TX_DESC_PTR(port) (0x2684 + (port<<10))
730#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port) (0x260c + (port<<10))
731#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_1(port) (0x261c + (port<<10))
732#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_2(port) (0x262c + (port<<10))
733#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_3(port) (0x263c + (port<<10))
734#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_4(port) (0x264c + (port<<10))
735#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_5(port) (0x265c + (port<<10))
736#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_6(port) (0x266c + (port<<10))
737#define MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_7(port) (0x267c + (port<<10))
738#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port) (0x26c0 + (port<<10))
739#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_1(port) (0x26c4 + (port<<10))
740#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_2(port) (0x26c8 + (port<<10))
741#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_3(port) (0x26cc + (port<<10))
742#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_4(port) (0x26d0 + (port<<10))
743#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_5(port) (0x26d4 + (port<<10))
744#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_6(port) (0x26d8 + (port<<10))
745#define MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_7(port) (0x26dc + (port<<10))
746#define MV643XX_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT(port) (0x2700 + (port<<10))
747#define MV643XX_ETH_TX_QUEUE_1_TOKEN_BUCKET_COUNT(port) (0x2710 + (port<<10))
748#define MV643XX_ETH_TX_QUEUE_2_TOKEN_BUCKET_COUNT(port) (0x2720 + (port<<10))
749#define MV643XX_ETH_TX_QUEUE_3_TOKEN_BUCKET_COUNT(port) (0x2730 + (port<<10))
750#define MV643XX_ETH_TX_QUEUE_4_TOKEN_BUCKET_COUNT(port) (0x2740 + (port<<10))
751#define MV643XX_ETH_TX_QUEUE_5_TOKEN_BUCKET_COUNT(port) (0x2750 + (port<<10))
752#define MV643XX_ETH_TX_QUEUE_6_TOKEN_BUCKET_COUNT(port) (0x2760 + (port<<10))
753#define MV643XX_ETH_TX_QUEUE_7_TOKEN_BUCKET_COUNT(port) (0x2770 + (port<<10))
754#define MV643XX_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG(port) (0x2704 + (port<<10))
755#define MV643XX_ETH_TX_QUEUE_1_TOKEN_BUCKET_CONFIG(port) (0x2714 + (port<<10))
756#define MV643XX_ETH_TX_QUEUE_2_TOKEN_BUCKET_CONFIG(port) (0x2724 + (port<<10))
757#define MV643XX_ETH_TX_QUEUE_3_TOKEN_BUCKET_CONFIG(port) (0x2734 + (port<<10))
758#define MV643XX_ETH_TX_QUEUE_4_TOKEN_BUCKET_CONFIG(port) (0x2744 + (port<<10))
759#define MV643XX_ETH_TX_QUEUE_5_TOKEN_BUCKET_CONFIG(port) (0x2754 + (port<<10))
760#define MV643XX_ETH_TX_QUEUE_6_TOKEN_BUCKET_CONFIG(port) (0x2764 + (port<<10))
761#define MV643XX_ETH_TX_QUEUE_7_TOKEN_BUCKET_CONFIG(port) (0x2774 + (port<<10))
762#define MV643XX_ETH_TX_QUEUE_0_ARBITER_CONFIG(port) (0x2708 + (port<<10))
763#define MV643XX_ETH_TX_QUEUE_1_ARBITER_CONFIG(port) (0x2718 + (port<<10))
764#define MV643XX_ETH_TX_QUEUE_2_ARBITER_CONFIG(port) (0x2728 + (port<<10))
765#define MV643XX_ETH_TX_QUEUE_3_ARBITER_CONFIG(port) (0x2738 + (port<<10))
766#define MV643XX_ETH_TX_QUEUE_4_ARBITER_CONFIG(port) (0x2748 + (port<<10))
767#define MV643XX_ETH_TX_QUEUE_5_ARBITER_CONFIG(port) (0x2758 + (port<<10))
768#define MV643XX_ETH_TX_QUEUE_6_ARBITER_CONFIG(port) (0x2768 + (port<<10))
769#define MV643XX_ETH_TX_QUEUE_7_ARBITER_CONFIG(port) (0x2778 + (port<<10))
770#define MV643XX_ETH_PORT_TX_TOKEN_BUCKET_COUNT(port) (0x2780 + (port<<10))
771#define MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port) (0x3400 + (port<<10))
772#define MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port) (0x3500 + (port<<10))
773#define MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port) (0x3600 + (port<<10))
774
775/*******************************************/ 662/*******************************************/
776/* CUNIT Registers */ 663/* CUNIT Registers */
777/*******************************************/ 664/*******************************************/
@@ -1089,219 +976,6 @@ struct mv64xxx_i2c_pdata {
1089 u32 retries; 976 u32 retries;
1090}; 977};
1091 978
1092/* These macros describe Ethernet Port configuration reg (Px_cR) bits */
1093#define MV643XX_ETH_UNICAST_NORMAL_MODE 0
1094#define MV643XX_ETH_UNICAST_PROMISCUOUS_MODE (1<<0)
1095#define MV643XX_ETH_DEFAULT_RX_QUEUE_0 0
1096#define MV643XX_ETH_DEFAULT_RX_QUEUE_1 (1<<1)
1097#define MV643XX_ETH_DEFAULT_RX_QUEUE_2 (1<<2)
1098#define MV643XX_ETH_DEFAULT_RX_QUEUE_3 ((1<<2) | (1<<1))
1099#define MV643XX_ETH_DEFAULT_RX_QUEUE_4 (1<<3)
1100#define MV643XX_ETH_DEFAULT_RX_QUEUE_5 ((1<<3) | (1<<1))
1101#define MV643XX_ETH_DEFAULT_RX_QUEUE_6 ((1<<3) | (1<<2))
1102#define MV643XX_ETH_DEFAULT_RX_QUEUE_7 ((1<<3) | (1<<2) | (1<<1))
1103#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_0 0
1104#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_1 (1<<4)
1105#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_2 (1<<5)
1106#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_3 ((1<<5) | (1<<4))
1107#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_4 (1<<6)
1108#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_5 ((1<<6) | (1<<4))
1109#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_6 ((1<<6) | (1<<5))
1110#define MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_7 ((1<<6) | (1<<5) | (1<<4))
1111#define MV643XX_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP 0
1112#define MV643XX_ETH_REJECT_BC_IF_NOT_IP_OR_ARP (1<<7)
1113#define MV643XX_ETH_RECEIVE_BC_IF_IP 0
1114#define MV643XX_ETH_REJECT_BC_IF_IP (1<<8)
1115#define MV643XX_ETH_RECEIVE_BC_IF_ARP 0
1116#define MV643XX_ETH_REJECT_BC_IF_ARP (1<<9)
1117#define MV643XX_ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY (1<<12)
1118#define MV643XX_ETH_CAPTURE_TCP_FRAMES_DIS 0
1119#define MV643XX_ETH_CAPTURE_TCP_FRAMES_EN (1<<14)
1120#define MV643XX_ETH_CAPTURE_UDP_FRAMES_DIS 0
1121#define MV643XX_ETH_CAPTURE_UDP_FRAMES_EN (1<<15)
1122#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_0 0
1123#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_1 (1<<16)
1124#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_2 (1<<17)
1125#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_3 ((1<<17) | (1<<16))
1126#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_4 (1<<18)
1127#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_5 ((1<<18) | (1<<16))
1128#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_6 ((1<<18) | (1<<17))
1129#define MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_7 ((1<<18) | (1<<17) | (1<<16))
1130#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_0 0
1131#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_1 (1<<19)
1132#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_2 (1<<20)
1133#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_3 ((1<<20) | (1<<19))
1134#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4 (1<<21)
1135#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_5 ((1<<21) | (1<<19))
1136#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_6 ((1<<21) | (1<<20))
1137#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_7 ((1<<21) | (1<<20) | (1<<19))
1138#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_0 0
1139#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_1 (1<<22)
1140#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_2 (1<<23)
1141#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_3 ((1<<23) | (1<<22))
1142#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_4 (1<<24)
1143#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_5 ((1<<24) | (1<<22))
1144#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_6 ((1<<24) | (1<<23))
1145#define MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_7 ((1<<24) | (1<<23) | (1<<22))
1146
1147#define MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE \
1148 MV643XX_ETH_UNICAST_NORMAL_MODE | \
1149 MV643XX_ETH_DEFAULT_RX_QUEUE_0 | \
1150 MV643XX_ETH_DEFAULT_RX_ARP_QUEUE_0 | \
1151 MV643XX_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP | \
1152 MV643XX_ETH_RECEIVE_BC_IF_IP | \
1153 MV643XX_ETH_RECEIVE_BC_IF_ARP | \
1154 MV643XX_ETH_CAPTURE_TCP_FRAMES_DIS | \
1155 MV643XX_ETH_CAPTURE_UDP_FRAMES_DIS | \
1156 MV643XX_ETH_DEFAULT_RX_TCP_QUEUE_0 | \
1157 MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_0 | \
1158 MV643XX_ETH_DEFAULT_RX_BPDU_QUEUE_0
1159
1160/* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/
1161#define MV643XX_ETH_CLASSIFY_EN (1<<0)
1162#define MV643XX_ETH_SPAN_BPDU_PACKETS_AS_NORMAL 0
1163#define MV643XX_ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1<<1)
1164#define MV643XX_ETH_PARTITION_DISABLE 0
1165#define MV643XX_ETH_PARTITION_ENABLE (1<<2)
1166
1167#define MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE \
1168 MV643XX_ETH_SPAN_BPDU_PACKETS_AS_NORMAL | \
1169 MV643XX_ETH_PARTITION_DISABLE
1170
1171/* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */
1172#define MV643XX_ETH_RIFB (1<<0)
1173#define MV643XX_ETH_RX_BURST_SIZE_1_64BIT 0
1174#define MV643XX_ETH_RX_BURST_SIZE_2_64BIT (1<<1)
1175#define MV643XX_ETH_RX_BURST_SIZE_4_64BIT (1<<2)
1176#define MV643XX_ETH_RX_BURST_SIZE_8_64BIT ((1<<2) | (1<<1))
1177#define MV643XX_ETH_RX_BURST_SIZE_16_64BIT (1<<3)
1178#define MV643XX_ETH_BLM_RX_NO_SWAP (1<<4)
1179#define MV643XX_ETH_BLM_RX_BYTE_SWAP 0
1180#define MV643XX_ETH_BLM_TX_NO_SWAP (1<<5)
1181#define MV643XX_ETH_BLM_TX_BYTE_SWAP 0
1182#define MV643XX_ETH_DESCRIPTORS_BYTE_SWAP (1<<6)
1183#define MV643XX_ETH_DESCRIPTORS_NO_SWAP 0
1184#define MV643XX_ETH_TX_BURST_SIZE_1_64BIT 0
1185#define MV643XX_ETH_TX_BURST_SIZE_2_64BIT (1<<22)
1186#define MV643XX_ETH_TX_BURST_SIZE_4_64BIT (1<<23)
1187#define MV643XX_ETH_TX_BURST_SIZE_8_64BIT ((1<<23) | (1<<22))
1188#define MV643XX_ETH_TX_BURST_SIZE_16_64BIT (1<<24)
1189
1190#define MV643XX_ETH_IPG_INT_RX(value) ((value & 0x3fff) << 8)
1191
1192#define MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE \
1193 MV643XX_ETH_RX_BURST_SIZE_4_64BIT | \
1194 MV643XX_ETH_IPG_INT_RX(0) | \
1195 MV643XX_ETH_TX_BURST_SIZE_4_64BIT
1196
1197/* These macros describe Ethernet Port serial control reg (PSCR) bits */
1198#define MV643XX_ETH_SERIAL_PORT_DISABLE 0
1199#define MV643XX_ETH_SERIAL_PORT_ENABLE (1<<0)
1200#define MV643XX_ETH_FORCE_LINK_PASS (1<<1)
1201#define MV643XX_ETH_DO_NOT_FORCE_LINK_PASS 0
1202#define MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX 0
1203#define MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX (1<<2)
1204#define MV643XX_ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL 0
1205#define MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1<<3)
1206#define MV643XX_ETH_ADV_NO_FLOW_CTRL 0
1207#define MV643XX_ETH_ADV_SYMMETRIC_FLOW_CTRL (1<<4)
1208#define MV643XX_ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0
1209#define MV643XX_ETH_FORCE_FC_MODE_TX_PAUSE_DIS (1<<5)
1210#define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0
1211#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7)
1212#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8)
1213#define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9)
1214#define MV643XX_ETH_FORCE_LINK_FAIL 0
1215#define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10)
1216#define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0
1217#define MV643XX_ETH_RETRANSMIT_FOREVER (1<<11)
1218#define MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII (1<<13)
1219#define MV643XX_ETH_ENABLE_AUTO_NEG_SPEED_GMII 0
1220#define MV643XX_ETH_DTE_ADV_0 0
1221#define MV643XX_ETH_DTE_ADV_1 (1<<14)
1222#define MV643XX_ETH_DISABLE_AUTO_NEG_BYPASS 0
1223#define MV643XX_ETH_ENABLE_AUTO_NEG_BYPASS (1<<15)
1224#define MV643XX_ETH_AUTO_NEG_NO_CHANGE 0
1225#define MV643XX_ETH_RESTART_AUTO_NEG (1<<16)
1226#define MV643XX_ETH_MAX_RX_PACKET_1518BYTE 0
1227#define MV643XX_ETH_MAX_RX_PACKET_1522BYTE (1<<17)
1228#define MV643XX_ETH_MAX_RX_PACKET_1552BYTE (1<<18)
1229#define MV643XX_ETH_MAX_RX_PACKET_9022BYTE ((1<<18) | (1<<17))
1230#define MV643XX_ETH_MAX_RX_PACKET_9192BYTE (1<<19)
1231#define MV643XX_ETH_MAX_RX_PACKET_9700BYTE ((1<<19) | (1<<17))
1232#define MV643XX_ETH_SET_EXT_LOOPBACK (1<<20)
1233#define MV643XX_ETH_CLR_EXT_LOOPBACK 0
1234#define MV643XX_ETH_SET_FULL_DUPLEX_MODE (1<<21)
1235#define MV643XX_ETH_SET_HALF_DUPLEX_MODE 0
1236#define MV643XX_ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1<<22)
1237#define MV643XX_ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0
1238#define MV643XX_ETH_SET_GMII_SPEED_TO_10_100 0
1239#define MV643XX_ETH_SET_GMII_SPEED_TO_1000 (1<<23)
1240#define MV643XX_ETH_SET_MII_SPEED_TO_10 0
1241#define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24)
1242
1243#define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17)
1244
1245#define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \
1246 MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \
1247 MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \
1248 MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
1249 MV643XX_ETH_ADV_SYMMETRIC_FLOW_CTRL | \
1250 MV643XX_ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
1251 MV643XX_ETH_FORCE_BP_MODE_NO_JAM | \
1252 (1<<9) /* reserved */ | \
1253 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | \
1254 MV643XX_ETH_RETRANSMIT_16_ATTEMPTS | \
1255 MV643XX_ETH_ENABLE_AUTO_NEG_SPEED_GMII | \
1256 MV643XX_ETH_DTE_ADV_0 | \
1257 MV643XX_ETH_DISABLE_AUTO_NEG_BYPASS | \
1258 MV643XX_ETH_AUTO_NEG_NO_CHANGE | \
1259 MV643XX_ETH_MAX_RX_PACKET_9700BYTE | \
1260 MV643XX_ETH_CLR_EXT_LOOPBACK | \
1261 MV643XX_ETH_SET_FULL_DUPLEX_MODE | \
1262 MV643XX_ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
1263
1264/* These macros describe Ethernet Serial Status reg (PSR) bits */
1265#define MV643XX_ETH_PORT_STATUS_MODE_10_BIT (1<<0)
1266#define MV643XX_ETH_PORT_STATUS_LINK_UP (1<<1)
1267#define MV643XX_ETH_PORT_STATUS_FULL_DUPLEX (1<<2)
1268#define MV643XX_ETH_PORT_STATUS_FLOW_CONTROL (1<<3)
1269#define MV643XX_ETH_PORT_STATUS_GMII_1000 (1<<4)
1270#define MV643XX_ETH_PORT_STATUS_MII_100 (1<<5)
1271/* PSR bit 6 is undocumented */
1272#define MV643XX_ETH_PORT_STATUS_TX_IN_PROGRESS (1<<7)
1273#define MV643XX_ETH_PORT_STATUS_AUTONEG_BYPASSED (1<<8)
1274#define MV643XX_ETH_PORT_STATUS_PARTITION (1<<9)
1275#define MV643XX_ETH_PORT_STATUS_TX_FIFO_EMPTY (1<<10)
1276/* PSR bits 11-31 are reserved */
1277
1278#define MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800
1279#define MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400
1280
1281#define MV643XX_ETH_DESC_SIZE 64
1282
1283#define MV643XX_ETH_SHARED_NAME "mv643xx_eth_shared"
1284#define MV643XX_ETH_NAME "mv643xx_eth"
1285
1286struct mv643xx_eth_platform_data {
1287 int port_number;
1288 u16 force_phy_addr; /* force override if phy_addr == 0 */
1289 u16 phy_addr;
1290
1291 /* If speed is 0, then speed and duplex are autonegotiated. */
1292 int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */
1293 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
1294
1295 /* non-zero values of the following fields override defaults */
1296 u32 tx_queue_size;
1297 u32 rx_queue_size;
1298 u32 tx_sram_addr;
1299 u32 tx_sram_size;
1300 u32 rx_sram_addr;
1301 u32 rx_sram_size;
1302 u8 mac_addr[6]; /* mac address if non-zero*/
1303};
1304
1305/* Watchdog Platform Device, Driver Data */ 979/* Watchdog Platform Device, Driver Data */
1306#define MV64x60_WDT_NAME "mv64x60_wdt" 980#define MV64x60_WDT_NAME "mv64x60_wdt"
1307 981
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
new file mode 100644
index 000000000000..3f272396642b
--- /dev/null
+++ b/include/linux/mv643xx_eth.h
@@ -0,0 +1,31 @@
1/*
2 * MV-643XX ethernet platform device data definition file.
3 */
4#ifndef __LINUX_MV643XX_ETH_H
5#define __LINUX_MV643XX_ETH_H
6
7#define MV643XX_ETH_SHARED_NAME "mv643xx_eth_shared"
8#define MV643XX_ETH_NAME "mv643xx_eth"
9#define MV643XX_ETH_SHARED_REGS 0x2000
10#define MV643XX_ETH_SHARED_REGS_SIZE 0x2000
11
12struct mv643xx_eth_platform_data {
13 int port_number;
14 u16 force_phy_addr; /* force override if phy_addr == 0 */
15 u16 phy_addr;
16
17 /* If speed is 0, then speed and duplex are autonegotiated. */
18 int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */
19 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
20
21 /* non-zero values of the following fields override defaults */
22 u32 tx_queue_size;
23 u32 rx_queue_size;
24 u32 tx_sram_addr;
25 u32 tx_sram_size;
26 u32 rx_sram_addr;
27 u32 rx_sram_size;
28 u8 mac_addr[6]; /* mac address if non-zero*/
29};
30
31#endif /* __LINUX_MV643XX_ETH_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4a3f54e358e5..c4de536cefa3 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -834,7 +834,7 @@ static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
834 const void *daddr, const void *saddr, 834 const void *daddr, const void *saddr,
835 unsigned len) 835 unsigned len)
836{ 836{
837 if (!dev->header_ops) 837 if (!dev->header_ops || !dev->header_ops->create)
838 return 0; 838 return 0;
839 839
840 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 840 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);