aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-05-31 19:03:23 -0400
committerLennert Buytenhek <buytenh@wantstofly.org>2008-06-12 02:40:23 -0400
commit3cb4667c5b35ad3cafe57f24dab5284056d89df1 (patch)
tree7f6e9cdb8efc3585debfcdc53c2da9796b2fcf9b /drivers/net/mv643xx_eth.c
parenta779d38ccf558576ab911e8ca0d66bbd92fcaba9 (diff)
mv643xx_eth: shorten reg names
Shorten the various oversized register names in mv643xx_eth.c, to increase readability. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c204
1 files changed, 96 insertions, 108 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index f6ff30ed6377..76cf89c3ee27 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -84,38 +84,38 @@ static char mv643xx_driver_version[] = "1.0";
84/* 84/*
85 * Registers shared between all ports. 85 * Registers shared between all ports.
86 */ 86 */
87#define PHY_ADDR_REG 0x0000 87#define PHY_ADDR 0x0000
88#define SMI_REG 0x0004 88#define SMI_REG 0x0004
89#define WINDOW_BASE(i) (0x0200 + ((i) << 3)) 89#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
90#define WINDOW_SIZE(i) (0x0204 + ((i) << 3)) 90#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
91#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2)) 91#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
92#define WINDOW_BAR_ENABLE 0x0290 92#define WINDOW_BAR_ENABLE 0x0290
93#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4)) 93#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
94 94
95/* 95/*
96 * Per-port registers. 96 * Per-port registers.
97 */ 97 */
98#define PORT_CONFIG_REG(p) (0x0400 + ((p) << 10)) 98#define PORT_CONFIG(p) (0x0400 + ((p) << 10))
99#define PORT_CONFIG_EXTEND_REG(p) (0x0404 + ((p) << 10)) 99#define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10))
100#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10)) 100#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
101#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10)) 101#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
102#define SDMA_CONFIG_REG(p) (0x041c + ((p) << 10)) 102#define SDMA_CONFIG(p) (0x041c + ((p) << 10))
103#define PORT_SERIAL_CONTROL_REG(p) (0x043c + ((p) << 10)) 103#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
104#define PORT_STATUS_REG(p) (0x0444 + ((p) << 10)) 104#define PORT_STATUS(p) (0x0444 + ((p) << 10))
105#define TRANSMIT_QUEUE_COMMAND_REG(p) (0x0448 + ((p) << 10)) 105#define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
106#define MAXIMUM_TRANSMIT_UNIT(p) (0x0458 + ((p) << 10)) 106#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
107#define INTERRUPT_CAUSE_REG(p) (0x0460 + ((p) << 10)) 107#define INT_CAUSE(p) (0x0460 + ((p) << 10))
108#define INTERRUPT_CAUSE_EXTEND_REG(p) (0x0464 + ((p) << 10)) 108#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
109#define INTERRUPT_MASK_REG(p) (0x0468 + ((p) << 10)) 109#define INT_MASK(p) (0x0468 + ((p) << 10))
110#define INTERRUPT_EXTEND_MASK_REG(p) (0x046c + ((p) << 10)) 110#define INT_MASK_EXT(p) (0x046c + ((p) << 10))
111#define TX_FIFO_URGENT_THRESHOLD_REG(p) (0x0474 + ((p) << 10)) 111#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
112#define RX_CURRENT_QUEUE_DESC_PTR_0(p) (0x060c + ((p) << 10)) 112#define RXQ_CURRENT_DESC_PTR(p) (0x060c + ((p) << 10))
113#define RECEIVE_QUEUE_COMMAND_REG(p) (0x0680 + ((p) << 10)) 113#define RXQ_COMMAND(p) (0x0680 + ((p) << 10))
114#define TX_CURRENT_QUEUE_DESC_PTR_0(p) (0x06c0 + ((p) << 10)) 114#define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10))
115#define MIB_COUNTERS_BASE(p) (0x1000 + ((p) << 7)) 115#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
116#define DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(p) (0x1400 + ((p) << 10)) 116#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
117#define DA_FILTER_OTHER_MULTICAST_TABLE_BASE(p) (0x1500 + ((p) << 10)) 117#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
118#define DA_FILTER_UNICAST_TABLE_BASE(p) (0x1600 + ((p) << 10)) 118#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
119 119
120/* These macros describe Ethernet Port configuration reg (Px_cR) bits */ 120/* These macros describe Ethernet Port configuration reg (Px_cR) bits */
121#define UNICAST_NORMAL_MODE (0 << 0) 121#define UNICAST_NORMAL_MODE (0 << 0)
@@ -600,7 +600,7 @@ static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
600static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, 600static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
601 unsigned int queues) 601 unsigned int queues)
602{ 602{
603 wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues); 603 wrl(mp, RXQ_COMMAND(mp->port_num), queues);
604} 604}
605 605
606static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp) 606static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
@@ -609,14 +609,14 @@ static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
609 u32 queues; 609 u32 queues;
610 610
611 /* Stop Rx port activity. Check port Rx activity. */ 611 /* Stop Rx port activity. Check port Rx activity. */
612 queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; 612 queues = rdl(mp, RXQ_COMMAND(port_num)) & 0xFF;
613 if (queues) { 613 if (queues) {
614 /* Issue stop command for active queues only */ 614 /* Issue stop command for active queues only */
615 wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); 615 wrl(mp, RXQ_COMMAND(port_num), (queues << 8));
616 616
617 /* Wait for all Rx activity to terminate. */ 617 /* Wait for all Rx activity to terminate. */
618 /* Check port cause register that all Rx queues are stopped */ 618 /* Check port cause register that all Rx queues are stopped */
619 while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) 619 while (rdl(mp, RXQ_COMMAND(port_num)) & 0xFF)
620 udelay(PHY_WAIT_MICRO_SECONDS); 620 udelay(PHY_WAIT_MICRO_SECONDS);
621 } 621 }
622 622
@@ -626,7 +626,7 @@ static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
626static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, 626static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
627 unsigned int queues) 627 unsigned int queues)
628{ 628{
629 wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues); 629 wrl(mp, TXQ_COMMAND(mp->port_num), queues);
630} 630}
631 631
632static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) 632static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
@@ -635,19 +635,18 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
635 u32 queues; 635 u32 queues;
636 636
637 /* Stop Tx port activity. Check port Tx activity. */ 637 /* Stop Tx port activity. Check port Tx activity. */
638 queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; 638 queues = rdl(mp, TXQ_COMMAND(port_num)) & 0xFF;
639 if (queues) { 639 if (queues) {
640 /* Issue stop command for active queues only */ 640 /* Issue stop command for active queues only */
641 wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); 641 wrl(mp, TXQ_COMMAND(port_num), (queues << 8));
642 642
643 /* Wait for all Tx activity to terminate. */ 643 /* Wait for all Tx activity to terminate. */
644 /* Check port cause register that all Tx queues are stopped */ 644 /* Check port cause register that all Tx queues are stopped */
645 while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) 645 while (rdl(mp, TXQ_COMMAND(port_num)) & 0xFF)
646 udelay(PHY_WAIT_MICRO_SECONDS); 646 udelay(PHY_WAIT_MICRO_SECONDS);
647 647
648 /* Wait for Tx FIFO to empty */ 648 /* Wait for Tx FIFO to empty */
649 while (rdl(mp, PORT_STATUS_REG(port_num)) & 649 while (rdl(mp, PORT_STATUS(port_num)) & ETH_PORT_TX_FIFO_EMPTY)
650 ETH_PORT_TX_FIFO_EMPTY)
651 udelay(PHY_WAIT_MICRO_SECONDS); 650 udelay(PHY_WAIT_MICRO_SECONDS);
652 } 651 }
653 652
@@ -954,15 +953,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget)
954#endif 953#endif
955 954
956 work_done = 0; 955 work_done = 0;
957 if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) 956 if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num)))
958 != (u32) mp->rx_used_desc_q) 957 != (u32) mp->rx_used_desc_q)
959 work_done = mv643xx_eth_receive_queue(dev, budget); 958 work_done = mv643xx_eth_receive_queue(dev, budget);
960 959
961 if (work_done < budget) { 960 if (work_done < budget) {
962 netif_rx_complete(dev, napi); 961 netif_rx_complete(dev, napi);
963 wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); 962 wrl(mp, INT_CAUSE(port_num), 0);
964 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 963 wrl(mp, INT_CAUSE_EXT(port_num), 0);
965 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 964 wrl(mp, INT_MASK(port_num), ETH_INT_UNMASK_ALL);
966 } 965 }
967 966
968 return work_done; 967 return work_done;
@@ -1298,12 +1297,12 @@ static void eth_clear_mib_counters(struct mv643xx_private *mp)
1298 /* Perform dummy reads from MIB counters */ 1297 /* Perform dummy reads from MIB counters */
1299 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; 1298 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
1300 i += 4) 1299 i += 4)
1301 rdl(mp, MIB_COUNTERS_BASE(port_num) + i); 1300 rdl(mp, MIB_COUNTERS(port_num) + i);
1302} 1301}
1303 1302
1304static inline u32 read_mib(struct mv643xx_private *mp, int offset) 1303static inline u32 read_mib(struct mv643xx_private *mp, int offset)
1305{ 1304{
1306 return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset); 1305 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1307} 1306}
1308 1307
1309static void eth_update_mib_counters(struct mv643xx_private *mp) 1308static void eth_update_mib_counters(struct mv643xx_private *mp)
@@ -1536,16 +1535,13 @@ static void eth_port_init_mac_tables(struct mv643xx_private *mp)
1536 1535
1537 /* Clear DA filter unicast table (Ex_dFUT) */ 1536 /* Clear DA filter unicast table (Ex_dFUT) */
1538 for (table_index = 0; table_index <= 0xC; table_index += 4) 1537 for (table_index = 0; table_index <= 0xC; table_index += 4)
1539 wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) + 1538 wrl(mp, UNICAST_TABLE(port_num) + table_index, 0);
1540 table_index, 0);
1541 1539
1542 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 1540 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
1543 /* Clear DA filter special multicast table (Ex_dFSMT) */ 1541 /* Clear DA filter special multicast table (Ex_dFSMT) */
1544 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) + 1542 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1545 table_index, 0);
1546 /* Clear DA filter other multicast table (Ex_dFOMT) */ 1543 /* Clear DA filter other multicast table (Ex_dFOMT) */
1547 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) + 1544 wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
1548 table_index, 0);
1549 } 1545 }
1550} 1546}
1551 1547
@@ -1593,7 +1589,7 @@ static void eth_port_uc_addr_set(struct mv643xx_private *mp,
1593 wrl(mp, MAC_ADDR_HIGH(port_num), mac_h); 1589 wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
1594 1590
1595 /* Accept frames with this address */ 1591 /* Accept frames with this address */
1596 table = DA_FILTER_UNICAST_TABLE_BASE(port_num); 1592 table = UNICAST_TABLE(port_num);
1597 eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f); 1593 eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f);
1598} 1594}
1599 1595
@@ -1662,7 +1658,7 @@ static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr)
1662 1658
1663 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && 1659 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
1664 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { 1660 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
1665 table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num); 1661 table = SPECIAL_MCAST_TABLE(port_num);
1666 eth_port_set_filter_table_entry(mp, table, p_addr[5]); 1662 eth_port_set_filter_table_entry(mp, table, p_addr[5]);
1667 return; 1663 return;
1668 } 1664 }
@@ -1735,7 +1731,7 @@ static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr)
1735 for (i = 0; i < 8; i++) 1731 for (i = 0; i < 8; i++)
1736 crc_result = crc_result | (crc[i] << i); 1732 crc_result = crc_result | (crc[i] << i);
1737 1733
1738 table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num); 1734 table = OTHER_MCAST_TABLE(port_num);
1739 eth_port_set_filter_table_entry(mp, table, crc_result); 1735 eth_port_set_filter_table_entry(mp, table, crc_result);
1740} 1736}
1741 1737
@@ -1765,7 +1761,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
1765 * 3-1 Queue ETH_Q0=0 1761 * 3-1 Queue ETH_Q0=0
1766 * 7-4 Reserved = 0; 1762 * 7-4 Reserved = 0;
1767 */ 1763 */
1768 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 1764 wrl(mp, SPECIAL_MCAST_TABLE(eth_port_num) + table_index, 0x01010101);
1769 1765
1770 /* Set all entries in DA filter other multicast 1766 /* Set all entries in DA filter other multicast
1771 * table (Ex_dFOMT) 1767 * table (Ex_dFOMT)
@@ -1775,7 +1771,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
1775 * 3-1 Queue ETH_Q0=0 1771 * 3-1 Queue ETH_Q0=0
1776 * 7-4 Reserved = 0; 1772 * 7-4 Reserved = 0;
1777 */ 1773 */
1778 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 1774 wrl(mp, OTHER_MCAST_TABLE(eth_port_num) + table_index, 0x01010101);
1779 } 1775 }
1780 return; 1776 return;
1781 } 1777 }
@@ -1785,12 +1781,10 @@ static void eth_port_set_multicast_list(struct net_device *dev)
1785 */ 1781 */
1786 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 1782 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
1787 /* Clear DA filter special multicast table (Ex_dFSMT) */ 1783 /* Clear DA filter special multicast table (Ex_dFSMT) */
1788 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 1784 wrl(mp, SPECIAL_MCAST_TABLE(eth_port_num) + table_index, 0);
1789 (eth_port_num) + table_index, 0);
1790 1785
1791 /* Clear DA filter other multicast table (Ex_dFOMT) */ 1786 /* Clear DA filter other multicast table (Ex_dFOMT) */
1792 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE 1787 wrl(mp, OTHER_MCAST_TABLE(eth_port_num) + table_index, 0);
1793 (eth_port_num) + table_index, 0);
1794 } 1788 }
1795 1789
1796 /* Get pointer to net_device multicast list and add each one... */ 1790 /* Get pointer to net_device multicast list and add each one... */
@@ -1814,12 +1808,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1814 struct mv643xx_private *mp = netdev_priv(dev); 1808 struct mv643xx_private *mp = netdev_priv(dev);
1815 u32 config_reg; 1809 u32 config_reg;
1816 1810
1817 config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num)); 1811 config_reg = rdl(mp, PORT_CONFIG(mp->port_num));
1818 if (dev->flags & IFF_PROMISC) 1812 if (dev->flags & IFF_PROMISC)
1819 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; 1813 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
1820 else 1814 else
1821 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; 1815 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
1822 wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg); 1816 wrl(mp, PORT_CONFIG(mp->port_num), config_reg);
1823 1817
1824 eth_port_set_multicast_list(dev); 1818 eth_port_set_multicast_list(dev);
1825} 1819}
@@ -2050,7 +2044,7 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
2050 u32 o_pscr, n_pscr; 2044 u32 o_pscr, n_pscr;
2051 unsigned int queues; 2045 unsigned int queues;
2052 2046
2053 o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); 2047 o_pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
2054 n_pscr = o_pscr; 2048 n_pscr = o_pscr;
2055 2049
2056 /* clear speed, duplex and rx buffer size fields */ 2050 /* clear speed, duplex and rx buffer size fields */
@@ -2073,14 +2067,14 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
2073 2067
2074 if (n_pscr != o_pscr) { 2068 if (n_pscr != o_pscr) {
2075 if ((o_pscr & SERIAL_PORT_ENABLE) == 0) 2069 if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
2076 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 2070 wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
2077 else { 2071 else {
2078 queues = mv643xx_eth_port_disable_tx(mp); 2072 queues = mv643xx_eth_port_disable_tx(mp);
2079 2073
2080 o_pscr &= ~SERIAL_PORT_ENABLE; 2074 o_pscr &= ~SERIAL_PORT_ENABLE;
2081 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr); 2075 wrl(mp, PORT_SERIAL_CONTROL(port_num), o_pscr);
2082 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 2076 wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
2083 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 2077 wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
2084 if (queues) 2078 if (queues)
2085 mv643xx_eth_port_enable_tx(mp, queues); 2079 mv643xx_eth_port_enable_tx(mp, queues);
2086 } 2080 }
@@ -2106,14 +2100,11 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
2106 unsigned int port_num = mp->port_num; 2100 unsigned int port_num = mp->port_num;
2107 2101
2108 /* Read interrupt cause registers */ 2102 /* Read interrupt cause registers */
2109 eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) & 2103 eth_int_cause = rdl(mp, INT_CAUSE(port_num)) & ETH_INT_UNMASK_ALL;
2110 ETH_INT_UNMASK_ALL;
2111 if (eth_int_cause & ETH_INT_CAUSE_EXT) { 2104 if (eth_int_cause & ETH_INT_CAUSE_EXT) {
2112 eth_int_cause_ext = rdl(mp, 2105 eth_int_cause_ext = rdl(mp, INT_CAUSE_EXT(port_num))
2113 INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 2106 & ETH_INT_UNMASK_ALL_EXT;
2114 ETH_INT_UNMASK_ALL_EXT; 2107 wrl(mp, INT_CAUSE_EXT(port_num), ~eth_int_cause_ext);
2115 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num),
2116 ~eth_int_cause_ext);
2117 } 2108 }
2118 2109
2119 /* PHY status changed */ 2110 /* PHY status changed */
@@ -2139,10 +2130,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
2139#ifdef MV643XX_NAPI 2130#ifdef MV643XX_NAPI
2140 if (eth_int_cause & ETH_INT_CAUSE_RX) { 2131 if (eth_int_cause & ETH_INT_CAUSE_RX) {
2141 /* schedule the NAPI poll routine to maintain port */ 2132 /* schedule the NAPI poll routine to maintain port */
2142 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 2133 wrl(mp, INT_MASK(port_num), ETH_INT_MASK_ALL);
2143 2134
2144 /* wait for previous write to complete */ 2135 /* wait for previous write to complete */
2145 rdl(mp, INTERRUPT_MASK_REG(port_num)); 2136 rdl(mp, INT_MASK(port_num));
2146 2137
2147 netif_rx_schedule(dev, &mp->napi); 2138 netif_rx_schedule(dev, &mp->napi);
2148 } 2139 }
@@ -2232,28 +2223,26 @@ static void eth_port_start(struct net_device *dev)
2232 2223
2233 /* Assignment of Tx CTRP of given queue */ 2224 /* Assignment of Tx CTRP of given queue */
2234 tx_curr_desc = mp->tx_curr_desc_q; 2225 tx_curr_desc = mp->tx_curr_desc_q;
2235 wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2226 wrl(mp, TXQ_CURRENT_DESC_PTR(port_num),
2236 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); 2227 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
2237 2228
2238 /* Assignment of Rx CRDP of given queue */ 2229 /* Assignment of Rx CRDP of given queue */
2239 rx_curr_desc = mp->rx_curr_desc_q; 2230 rx_curr_desc = mp->rx_curr_desc_q;
2240 wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2231 wrl(mp, RXQ_CURRENT_DESC_PTR(port_num),
2241 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 2232 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
2242 2233
2243 /* Add the assigned Ethernet address to the port's address table */ 2234 /* Add the assigned Ethernet address to the port's address table */
2244 eth_port_uc_addr_set(mp, dev->dev_addr); 2235 eth_port_uc_addr_set(mp, dev->dev_addr);
2245 2236
2246 /* Assign port configuration and command. */ 2237 /* Assign port configuration and command. */
2247 wrl(mp, PORT_CONFIG_REG(port_num), 2238 wrl(mp, PORT_CONFIG(port_num), PORT_CONFIG_DEFAULT_VALUE);
2248 PORT_CONFIG_DEFAULT_VALUE);
2249 2239
2250 wrl(mp, PORT_CONFIG_EXTEND_REG(port_num), 2240 wrl(mp, PORT_CONFIG_EXT(port_num), PORT_CONFIG_EXTEND_DEFAULT_VALUE);
2251 PORT_CONFIG_EXTEND_DEFAULT_VALUE);
2252 2241
2253 pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); 2242 pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
2254 2243
2255 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); 2244 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
2256 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); 2245 wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
2257 2246
2258 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 2247 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
2259 DISABLE_AUTO_NEG_SPEED_GMII | 2248 DISABLE_AUTO_NEG_SPEED_GMII |
@@ -2261,20 +2250,19 @@ static void eth_port_start(struct net_device *dev)
2261 DO_NOT_FORCE_LINK_FAIL | 2250 DO_NOT_FORCE_LINK_FAIL |
2262 SERIAL_PORT_CONTROL_RESERVED; 2251 SERIAL_PORT_CONTROL_RESERVED;
2263 2252
2264 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); 2253 wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
2265 2254
2266 pscr |= SERIAL_PORT_ENABLE; 2255 pscr |= SERIAL_PORT_ENABLE;
2267 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); 2256 wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
2268 2257
2269 /* Assign port SDMA configuration */ 2258 /* Assign port SDMA configuration */
2270 wrl(mp, SDMA_CONFIG_REG(port_num), 2259 wrl(mp, SDMA_CONFIG(port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
2271 PORT_SDMA_CONFIG_DEFAULT_VALUE);
2272 2260
2273 /* Enable port Rx. */ 2261 /* Enable port Rx. */
2274 mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED); 2262 mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED);
2275 2263
2276 /* Disable port bandwidth limits by clearing MTU register */ 2264 /* Disable port bandwidth limits by clearing MTU register */
2277 wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0); 2265 wrl(mp, TX_BW_MTU(port_num), 0);
2278 2266
2279 /* save phy settings across reset */ 2267 /* save phy settings across reset */
2280 mv643xx_get_settings(dev, &ethtool_cmd); 2268 mv643xx_get_settings(dev, &ethtool_cmd);
@@ -2313,9 +2301,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
2313 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 2301 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
2314 2302
2315 /* Set RX Coalescing mechanism */ 2303 /* Set RX Coalescing mechanism */
2316 wrl(mp, SDMA_CONFIG_REG(port_num), 2304 wrl(mp, SDMA_CONFIG(port_num),
2317 ((coal & 0x3fff) << 8) | 2305 ((coal & 0x3fff) << 8) |
2318 (rdl(mp, SDMA_CONFIG_REG(port_num)) 2306 (rdl(mp, SDMA_CONFIG(port_num))
2319 & 0xffc000ff)); 2307 & 0xffc000ff));
2320 2308
2321 return coal; 2309 return coal;
@@ -2350,7 +2338,7 @@ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
2350 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 2338 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
2351 2339
2352 /* Set TX Coalescing mechanism */ 2340 /* Set TX Coalescing mechanism */
2353 wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); 2341 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), coal << 4);
2354 2342
2355 return coal; 2343 return coal;
2356} 2344}
@@ -2409,10 +2397,10 @@ static int mv643xx_eth_open(struct net_device *dev)
2409 int err; 2397 int err;
2410 2398
2411 /* Clear any pending ethernet port interrupts */ 2399 /* Clear any pending ethernet port interrupts */
2412 wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); 2400 wrl(mp, INT_CAUSE(port_num), 0);
2413 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 2401 wrl(mp, INT_CAUSE_EXT(port_num), 0);
2414 /* wait for previous write to complete */ 2402 /* wait for previous write to complete */
2415 rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num)); 2403 rdl(mp, INT_CAUSE_EXT(port_num));
2416 2404
2417 err = request_irq(dev->irq, mv643xx_eth_int_handler, 2405 err = request_irq(dev->irq, mv643xx_eth_int_handler,
2418 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 2406 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
@@ -2518,10 +2506,10 @@ static int mv643xx_eth_open(struct net_device *dev)
2518 eth_port_set_tx_coal(mp, MV643XX_TX_COAL); 2506 eth_port_set_tx_coal(mp, MV643XX_TX_COAL);
2519 2507
2520 /* Unmask phy and link status changes interrupts */ 2508 /* Unmask phy and link status changes interrupts */
2521 wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); 2509 wrl(mp, INT_MASK_EXT(port_num), ETH_INT_UNMASK_ALL_EXT);
2522 2510
2523 /* Unmask RX buffer and TX end interrupt */ 2511 /* Unmask RX buffer and TX end interrupt */
2524 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 2512 wrl(mp, INT_MASK(port_num), ETH_INT_UNMASK_ALL);
2525 2513
2526 return 0; 2514 return 0;
2527 2515
@@ -2565,11 +2553,11 @@ static void eth_port_reset(struct mv643xx_private *mp)
2565 eth_clear_mib_counters(mp); 2553 eth_clear_mib_counters(mp);
2566 2554
2567 /* Reset the Enable bit in the Configuration Register */ 2555 /* Reset the Enable bit in the Configuration Register */
2568 reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); 2556 reg_data = rdl(mp, PORT_SERIAL_CONTROL(port_num));
2569 reg_data &= ~(SERIAL_PORT_ENABLE | 2557 reg_data &= ~(SERIAL_PORT_ENABLE |
2570 DO_NOT_FORCE_LINK_FAIL | 2558 DO_NOT_FORCE_LINK_FAIL |
2571 FORCE_LINK_PASS); 2559 FORCE_LINK_PASS);
2572 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2560 wrl(mp, PORT_SERIAL_CONTROL(port_num), reg_data);
2573} 2561}
2574 2562
2575/* 2563/*
@@ -2588,9 +2576,9 @@ static int mv643xx_eth_stop(struct net_device *dev)
2588 unsigned int port_num = mp->port_num; 2576 unsigned int port_num = mp->port_num;
2589 2577
2590 /* Mask all interrupts on ethernet port */ 2578 /* Mask all interrupts on ethernet port */
2591 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 2579 wrl(mp, INT_MASK(port_num), ETH_INT_MASK_ALL);
2592 /* wait for previous write to complete */ 2580 /* wait for previous write to complete */
2593 rdl(mp, INTERRUPT_MASK_REG(port_num)); 2581 rdl(mp, INT_MASK(port_num));
2594 2582
2595#ifdef MV643XX_NAPI 2583#ifdef MV643XX_NAPI
2596 napi_disable(&mp->napi); 2584 napi_disable(&mp->napi);
@@ -2693,13 +2681,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
2693 struct mv643xx_private *mp = netdev_priv(netdev); 2681 struct mv643xx_private *mp = netdev_priv(netdev);
2694 int port_num = mp->port_num; 2682 int port_num = mp->port_num;
2695 2683
2696 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 2684 wrl(mp, INT_MASK(port_num), ETH_INT_MASK_ALL);
2697 /* wait for previous write to complete */ 2685 /* wait for previous write to complete */
2698 rdl(mp, INTERRUPT_MASK_REG(port_num)); 2686 rdl(mp, INT_MASK(port_num));
2699 2687
2700 mv643xx_eth_int_handler(netdev->irq, netdev); 2688 mv643xx_eth_int_handler(netdev->irq, netdev);
2701 2689
2702 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 2690 wrl(mp, INT_MASK(port_num), ETH_INT_UNMASK_ALL);
2703} 2691}
2704#endif 2692#endif
2705 2693
@@ -2843,10 +2831,10 @@ static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr)
2843 u32 reg_data; 2831 u32 reg_data;
2844 int addr_shift = 5 * mp->port_num; 2832 int addr_shift = 5 * mp->port_num;
2845 2833
2846 reg_data = rdl(mp, PHY_ADDR_REG); 2834 reg_data = rdl(mp, PHY_ADDR);
2847 reg_data &= ~(0x1f << addr_shift); 2835 reg_data &= ~(0x1f << addr_shift);
2848 reg_data |= (phy_addr & 0x1f) << addr_shift; 2836 reg_data |= (phy_addr & 0x1f) << addr_shift;
2849 wrl(mp, PHY_ADDR_REG, reg_data); 2837 wrl(mp, PHY_ADDR, reg_data);
2850} 2838}
2851 2839
2852/* 2840/*
@@ -2869,7 +2857,7 @@ static int ethernet_phy_get(struct mv643xx_private *mp)
2869{ 2857{
2870 unsigned int reg_data; 2858 unsigned int reg_data;
2871 2859
2872 reg_data = rdl(mp, PHY_ADDR_REG); 2860 reg_data = rdl(mp, PHY_ADDR);
2873 2861
2874 return ((reg_data >> (5 * mp->port_num)) & 0x1f); 2862 return ((reg_data >> (5 * mp->port_num)) & 0x1f);
2875} 2863}
@@ -3147,8 +3135,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
3147 unsigned int port_num = mp->port_num; 3135 unsigned int port_num = mp->port_num;
3148 3136
3149 /* Mask all interrupts on ethernet port */ 3137 /* Mask all interrupts on ethernet port */
3150 wrl(mp, INTERRUPT_MASK_REG(port_num), 0); 3138 wrl(mp, INT_MASK(port_num), 0);
3151 rdl(mp, INTERRUPT_MASK_REG(port_num)); 3139 rdl(mp, INT_MASK(port_num));
3152 3140
3153 eth_port_reset(mp); 3141 eth_port_reset(mp);
3154} 3142}