aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@marvell.com>2008-11-20 06:57:36 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 06:57:36 -0500
commit37a6084f4b4693a408ac2fb229843af9f9f301ce (patch)
treef3ab879e190b9f1299c532afb73095b1ee2ed50a /drivers
parent10a9948d13eb51d757684da4354cf67891dc3481 (diff)
mv643xx_eth: introduce per-port register area pointer
The mv643xx_eth driver uses the rdl()/wrl() macros to read and write hardware registers. Per-port registers are accessed in the following way: #define PORT_STATUS(p) (0x0444 + ((p) << 10)) [...] static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) { return readl(mp->shared->base + offset); } [...] port_status = rdl(mp, PORT_STATUS(mp->port_num)); By giving the per-port 'struct mv643xx_eth_private' its own 'void __iomem *base' pointer that points to the per-port register area, we can get rid of both the double indirection and the << 10 that is done for every per-port register access -- this patch does that. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/mv643xx_eth.c230
1 files changed, 121 insertions, 109 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 1d54ba315a3b..0f73d85a6e9d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -78,16 +78,17 @@ static char mv643xx_eth_driver_version[] = "1.4";
78#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 78#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
79 79
80/* 80/*
81 * Per-port registers. 81 * Main per-port registers. These live at offset 0x0400 for
82 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
82 */ 83 */
83#define PORT_CONFIG(p) (0x0400 + ((p) << 10)) 84#define PORT_CONFIG 0x0000
84#define UNICAST_PROMISCUOUS_MODE 0x00000001 85#define UNICAST_PROMISCUOUS_MODE 0x00000001
85#define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10)) 86#define PORT_CONFIG_EXT 0x0004
86#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10)) 87#define MAC_ADDR_LOW 0x0014
87#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10)) 88#define MAC_ADDR_HIGH 0x0018
88#define SDMA_CONFIG(p) (0x041c + ((p) << 10)) 89#define SDMA_CONFIG 0x001c
89#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10)) 90#define PORT_SERIAL_CONTROL 0x003c
90#define PORT_STATUS(p) (0x0444 + ((p) << 10)) 91#define PORT_STATUS 0x0044
91#define TX_FIFO_EMPTY 0x00000400 92#define TX_FIFO_EMPTY 0x00000400
92#define TX_IN_PROGRESS 0x00000080 93#define TX_IN_PROGRESS 0x00000080
93#define PORT_SPEED_MASK 0x00000030 94#define PORT_SPEED_MASK 0x00000030
@@ -97,31 +98,35 @@ static char mv643xx_eth_driver_version[] = "1.4";
97#define FLOW_CONTROL_ENABLED 0x00000008 98#define FLOW_CONTROL_ENABLED 0x00000008
98#define FULL_DUPLEX 0x00000004 99#define FULL_DUPLEX 0x00000004
99#define LINK_UP 0x00000002 100#define LINK_UP 0x00000002
100#define TXQ_COMMAND(p) (0x0448 + ((p) << 10)) 101#define TXQ_COMMAND 0x0048
101#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10)) 102#define TXQ_FIX_PRIO_CONF 0x004c
102#define TX_BW_RATE(p) (0x0450 + ((p) << 10)) 103#define TX_BW_RATE 0x0050
103#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 104#define TX_BW_MTU 0x0058
104#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 105#define TX_BW_BURST 0x005c
105#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 106#define INT_CAUSE 0x0060
106#define INT_TX_END 0x07f80000 107#define INT_TX_END 0x07f80000
107#define INT_RX 0x000003fc 108#define INT_RX 0x000003fc
108#define INT_EXT 0x00000002 109#define INT_EXT 0x00000002
109#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10)) 110#define INT_CAUSE_EXT 0x0064
110#define INT_EXT_LINK_PHY 0x00110000 111#define INT_EXT_LINK_PHY 0x00110000
111#define INT_EXT_TX 0x000000ff 112#define INT_EXT_TX 0x000000ff
112#define INT_MASK(p) (0x0468 + ((p) << 10)) 113#define INT_MASK 0x0068
113#define INT_MASK_EXT(p) (0x046c + ((p) << 10)) 114#define INT_MASK_EXT 0x006c
114#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) 115#define TX_FIFO_URGENT_THRESHOLD 0x0074
115#define TXQ_FIX_PRIO_CONF_MOVED(p) (0x04dc + ((p) << 10)) 116#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
116#define TX_BW_RATE_MOVED(p) (0x04e0 + ((p) << 10)) 117#define TX_BW_RATE_MOVED 0x00e0
117#define TX_BW_MTU_MOVED(p) (0x04e8 + ((p) << 10)) 118#define TX_BW_MTU_MOVED 0x00e8
118#define TX_BW_BURST_MOVED(p) (0x04ec + ((p) << 10)) 119#define TX_BW_BURST_MOVED 0x00ec
119#define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4)) 120#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
120#define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) 121#define RXQ_COMMAND 0x0280
121#define TXQ_CURRENT_DESC_PTR(p, q) (0x06c0 + ((p) << 10) + ((q) << 2)) 122#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
122#define TXQ_BW_TOKENS(p, q) (0x0700 + ((p) << 10) + ((q) << 4)) 123#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
123#define TXQ_BW_CONF(p, q) (0x0704 + ((p) << 10) + ((q) << 4)) 124#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
124#define TXQ_BW_WRR_CONF(p, q) (0x0708 + ((p) << 10) + ((q) << 4)) 125#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
126
127/*
128 * Misc per-port registers.
129 */
125#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 130#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
126#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 131#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
127#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 132#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
@@ -351,6 +356,7 @@ struct tx_queue {
351 356
352struct mv643xx_eth_private { 357struct mv643xx_eth_private {
353 struct mv643xx_eth_shared_private *shared; 358 struct mv643xx_eth_shared_private *shared;
359 void __iomem *base;
354 int port_num; 360 int port_num;
355 361
356 struct net_device *dev; 362 struct net_device *dev;
@@ -401,11 +407,21 @@ static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
401 return readl(mp->shared->base + offset); 407 return readl(mp->shared->base + offset);
402} 408}
403 409
410static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
411{
412 return readl(mp->base + offset);
413}
414
404static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 415static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
405{ 416{
406 writel(data, mp->shared->base + offset); 417 writel(data, mp->shared->base + offset);
407} 418}
408 419
420static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
421{
422 writel(data, mp->base + offset);
423}
424
409 425
410/* rxq/txq helper functions *************************************************/ 426/* rxq/txq helper functions *************************************************/
411static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 427static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
@@ -421,7 +437,7 @@ static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
421static void rxq_enable(struct rx_queue *rxq) 437static void rxq_enable(struct rx_queue *rxq)
422{ 438{
423 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 439 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
424 wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index); 440 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
425} 441}
426 442
427static void rxq_disable(struct rx_queue *rxq) 443static void rxq_disable(struct rx_queue *rxq)
@@ -429,26 +445,25 @@ static void rxq_disable(struct rx_queue *rxq)
429 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 445 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
430 u8 mask = 1 << rxq->index; 446 u8 mask = 1 << rxq->index;
431 447
432 wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8); 448 wrlp(mp, RXQ_COMMAND, mask << 8);
433 while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask) 449 while (rdlp(mp, RXQ_COMMAND) & mask)
434 udelay(10); 450 udelay(10);
435} 451}
436 452
437static void txq_reset_hw_ptr(struct tx_queue *txq) 453static void txq_reset_hw_ptr(struct tx_queue *txq)
438{ 454{
439 struct mv643xx_eth_private *mp = txq_to_mp(txq); 455 struct mv643xx_eth_private *mp = txq_to_mp(txq);
440 int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
441 u32 addr; 456 u32 addr;
442 457
443 addr = (u32)txq->tx_desc_dma; 458 addr = (u32)txq->tx_desc_dma;
444 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 459 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
445 wrl(mp, off, addr); 460 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
446} 461}
447 462
448static void txq_enable(struct tx_queue *txq) 463static void txq_enable(struct tx_queue *txq)
449{ 464{
450 struct mv643xx_eth_private *mp = txq_to_mp(txq); 465 struct mv643xx_eth_private *mp = txq_to_mp(txq);
451 wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index); 466 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
452} 467}
453 468
454static void txq_disable(struct tx_queue *txq) 469static void txq_disable(struct tx_queue *txq)
@@ -456,8 +471,8 @@ static void txq_disable(struct tx_queue *txq)
456 struct mv643xx_eth_private *mp = txq_to_mp(txq); 471 struct mv643xx_eth_private *mp = txq_to_mp(txq);
457 u8 mask = 1 << txq->index; 472 u8 mask = 1 << txq->index;
458 473
459 wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8); 474 wrlp(mp, TXQ_COMMAND, mask << 8);
460 while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask) 475 while (rdlp(mp, TXQ_COMMAND) & mask)
461 udelay(10); 476 udelay(10);
462} 477}
463 478
@@ -829,10 +844,10 @@ static void txq_kick(struct tx_queue *txq)
829 844
830 __netif_tx_lock(nq, smp_processor_id()); 845 __netif_tx_lock(nq, smp_processor_id());
831 846
832 if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index)) 847 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
833 goto out; 848 goto out;
834 849
835 hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index)); 850 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
836 expected_ptr = (u32)txq->tx_desc_dma + 851 expected_ptr = (u32)txq->tx_desc_dma +
837 txq->tx_curr_desc * sizeof(struct tx_desc); 852 txq->tx_curr_desc * sizeof(struct tx_desc);
838 853
@@ -938,14 +953,14 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
938 953
939 switch (mp->shared->tx_bw_control) { 954 switch (mp->shared->tx_bw_control) {
940 case TX_BW_CONTROL_OLD_LAYOUT: 955 case TX_BW_CONTROL_OLD_LAYOUT:
941 wrl(mp, TX_BW_RATE(mp->port_num), token_rate); 956 wrlp(mp, TX_BW_RATE, token_rate);
942 wrl(mp, TX_BW_MTU(mp->port_num), mtu); 957 wrlp(mp, TX_BW_MTU, mtu);
943 wrl(mp, TX_BW_BURST(mp->port_num), bucket_size); 958 wrlp(mp, TX_BW_BURST, bucket_size);
944 break; 959 break;
945 case TX_BW_CONTROL_NEW_LAYOUT: 960 case TX_BW_CONTROL_NEW_LAYOUT:
946 wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate); 961 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
947 wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu); 962 wrlp(mp, TX_BW_MTU_MOVED, mtu);
948 wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size); 963 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
949 break; 964 break;
950 } 965 }
951} 966}
@@ -964,9 +979,8 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
964 if (bucket_size > 65535) 979 if (bucket_size > 65535)
965 bucket_size = 65535; 980 bucket_size = 65535;
966 981
967 wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14); 982 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
968 wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index), 983 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
969 (bucket_size << 10) | token_rate);
970} 984}
971 985
972static void txq_set_fixed_prio_mode(struct tx_queue *txq) 986static void txq_set_fixed_prio_mode(struct tx_queue *txq)
@@ -981,17 +995,17 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
981 off = 0; 995 off = 0;
982 switch (mp->shared->tx_bw_control) { 996 switch (mp->shared->tx_bw_control) {
983 case TX_BW_CONTROL_OLD_LAYOUT: 997 case TX_BW_CONTROL_OLD_LAYOUT:
984 off = TXQ_FIX_PRIO_CONF(mp->port_num); 998 off = TXQ_FIX_PRIO_CONF;
985 break; 999 break;
986 case TX_BW_CONTROL_NEW_LAYOUT: 1000 case TX_BW_CONTROL_NEW_LAYOUT:
987 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 1001 off = TXQ_FIX_PRIO_CONF_MOVED;
988 break; 1002 break;
989 } 1003 }
990 1004
991 if (off) { 1005 if (off) {
992 val = rdl(mp, off); 1006 val = rdlp(mp, off);
993 val |= 1 << txq->index; 1007 val |= 1 << txq->index;
994 wrl(mp, off, val); 1008 wrlp(mp, off, val);
995 } 1009 }
996} 1010}
997 1011
@@ -1007,26 +1021,25 @@ static void txq_set_wrr(struct tx_queue *txq, int weight)
1007 off = 0; 1021 off = 0;
1008 switch (mp->shared->tx_bw_control) { 1022 switch (mp->shared->tx_bw_control) {
1009 case TX_BW_CONTROL_OLD_LAYOUT: 1023 case TX_BW_CONTROL_OLD_LAYOUT:
1010 off = TXQ_FIX_PRIO_CONF(mp->port_num); 1024 off = TXQ_FIX_PRIO_CONF;
1011 break; 1025 break;
1012 case TX_BW_CONTROL_NEW_LAYOUT: 1026 case TX_BW_CONTROL_NEW_LAYOUT:
1013 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 1027 off = TXQ_FIX_PRIO_CONF_MOVED;
1014 break; 1028 break;
1015 } 1029 }
1016 1030
1017 if (off) { 1031 if (off) {
1018 val = rdl(mp, off); 1032 val = rdlp(mp, off);
1019 val &= ~(1 << txq->index); 1033 val &= ~(1 << txq->index);
1020 wrl(mp, off, val); 1034 wrlp(mp, off, val);
1021 1035
1022 /* 1036 /*
1023 * Configure WRR weight for this queue. 1037 * Configure WRR weight for this queue.
1024 */ 1038 */
1025 off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
1026 1039
1027 val = rdl(mp, off); 1040 val = rdlp(mp, off);
1028 val = (val & ~0xff) | (weight & 0xff); 1041 val = (val & ~0xff) | (weight & 0xff);
1029 wrl(mp, off, val); 1042 wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val);
1030 } 1043 }
1031} 1044}
1032 1045
@@ -1294,7 +1307,7 @@ mv643xx_eth_get_settings_phyless(struct net_device *dev,
1294 struct mv643xx_eth_private *mp = netdev_priv(dev); 1307 struct mv643xx_eth_private *mp = netdev_priv(dev);
1295 u32 port_status; 1308 u32 port_status;
1296 1309
1297 port_status = rdl(mp, PORT_STATUS(mp->port_num)); 1310 port_status = rdlp(mp, PORT_STATUS);
1298 1311
1299 cmd->supported = SUPPORTED_MII; 1312 cmd->supported = SUPPORTED_MII;
1300 cmd->advertising = ADVERTISED_MII; 1313 cmd->advertising = ADVERTISED_MII;
@@ -1449,8 +1462,8 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1449 unsigned int mac_h; 1462 unsigned int mac_h;
1450 unsigned int mac_l; 1463 unsigned int mac_l;
1451 1464
1452 mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num)); 1465 mac_h = rdlp(mp, MAC_ADDR_HIGH);
1453 mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num)); 1466 mac_l = rdlp(mp, MAC_ADDR_LOW);
1454 1467
1455 addr[0] = (mac_h >> 24) & 0xff; 1468 addr[0] = (mac_h >> 24) & 0xff;
1456 addr[1] = (mac_h >> 16) & 0xff; 1469 addr[1] = (mac_h >> 16) & 0xff;
@@ -1493,8 +1506,8 @@ static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1493 mac_l = (addr[4] << 8) | addr[5]; 1506 mac_l = (addr[4] << 8) | addr[5];
1494 mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 1507 mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1495 1508
1496 wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l); 1509 wrlp(mp, MAC_ADDR_LOW, mac_l);
1497 wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h); 1510 wrlp(mp, MAC_ADDR_HIGH, mac_h);
1498 1511
1499 table = UNICAST_TABLE(mp->port_num); 1512 table = UNICAST_TABLE(mp->port_num);
1500 set_filter_table_entry(mp, table, addr[5] & 0x0f); 1513 set_filter_table_entry(mp, table, addr[5] & 0x0f);
@@ -1538,12 +1551,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1538 struct dev_addr_list *addr; 1551 struct dev_addr_list *addr;
1539 int i; 1552 int i;
1540 1553
1541 port_config = rdl(mp, PORT_CONFIG(mp->port_num)); 1554 port_config = rdlp(mp, PORT_CONFIG);
1542 if (dev->flags & IFF_PROMISC) 1555 if (dev->flags & IFF_PROMISC)
1543 port_config |= UNICAST_PROMISCUOUS_MODE; 1556 port_config |= UNICAST_PROMISCUOUS_MODE;
1544 else 1557 else
1545 port_config &= ~UNICAST_PROMISCUOUS_MODE; 1558 port_config &= ~UNICAST_PROMISCUOUS_MODE;
1546 wrl(mp, PORT_CONFIG(mp->port_num), port_config); 1559 wrlp(mp, PORT_CONFIG, port_config);
1547 1560
1548 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1561 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1549 int port_num = mp->port_num; 1562 int port_num = mp->port_num;
@@ -1761,26 +1774,25 @@ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
1761 u32 int_cause; 1774 u32 int_cause;
1762 u32 int_cause_ext; 1775 u32 int_cause_ext;
1763 1776
1764 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & 1777 int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
1765 (INT_TX_END | INT_RX | INT_EXT);
1766 if (int_cause == 0) 1778 if (int_cause == 0)
1767 return 0; 1779 return 0;
1768 1780
1769 int_cause_ext = 0; 1781 int_cause_ext = 0;
1770 if (int_cause & INT_EXT) 1782 if (int_cause & INT_EXT)
1771 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)); 1783 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
1772 1784
1773 int_cause &= INT_TX_END | INT_RX; 1785 int_cause &= INT_TX_END | INT_RX;
1774 if (int_cause) { 1786 if (int_cause) {
1775 wrl(mp, INT_CAUSE(mp->port_num), ~int_cause); 1787 wrlp(mp, INT_CAUSE, ~int_cause);
1776 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 1788 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
1777 ~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff); 1789 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
1778 mp->work_rx |= (int_cause & INT_RX) >> 2; 1790 mp->work_rx |= (int_cause & INT_RX) >> 2;
1779 } 1791 }
1780 1792
1781 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 1793 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
1782 if (int_cause_ext) { 1794 if (int_cause_ext) {
1783 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); 1795 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
1784 if (int_cause_ext & INT_EXT_LINK_PHY) 1796 if (int_cause_ext & INT_EXT_LINK_PHY)
1785 mp->work_link = 1; 1797 mp->work_link = 1;
1786 mp->work_tx |= int_cause_ext & INT_EXT_TX; 1798 mp->work_tx |= int_cause_ext & INT_EXT_TX;
@@ -1797,7 +1809,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1797 if (unlikely(!mv643xx_eth_collect_events(mp))) 1809 if (unlikely(!mv643xx_eth_collect_events(mp)))
1798 return IRQ_NONE; 1810 return IRQ_NONE;
1799 1811
1800 wrl(mp, INT_MASK(mp->port_num), 0); 1812 wrlp(mp, INT_MASK, 0);
1801 napi_schedule(&mp->napi); 1813 napi_schedule(&mp->napi);
1802 1814
1803 return IRQ_HANDLED; 1815 return IRQ_HANDLED;
@@ -1811,7 +1823,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
1811 int duplex; 1823 int duplex;
1812 int fc; 1824 int fc;
1813 1825
1814 port_status = rdl(mp, PORT_STATUS(mp->port_num)); 1826 port_status = rdlp(mp, PORT_STATUS);
1815 if (!(port_status & LINK_UP)) { 1827 if (!(port_status & LINK_UP)) {
1816 if (netif_carrier_ok(dev)) { 1828 if (netif_carrier_ok(dev)) {
1817 int i; 1829 int i;
@@ -1911,7 +1923,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
1911 if (mp->work_rx_oom) 1923 if (mp->work_rx_oom)
1912 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 1924 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
1913 napi_complete(napi); 1925 napi_complete(napi);
1914 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 1926 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
1915 } 1927 }
1916 1928
1917 return work_done; 1929 return work_done;
@@ -1960,17 +1972,17 @@ static void port_start(struct mv643xx_eth_private *mp)
1960 /* 1972 /*
1961 * Configure basic link parameters. 1973 * Configure basic link parameters.
1962 */ 1974 */
1963 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 1975 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1964 1976
1965 pscr |= SERIAL_PORT_ENABLE; 1977 pscr |= SERIAL_PORT_ENABLE;
1966 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1978 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1967 1979
1968 pscr |= DO_NOT_FORCE_LINK_FAIL; 1980 pscr |= DO_NOT_FORCE_LINK_FAIL;
1969 if (mp->phy == NULL) 1981 if (mp->phy == NULL)
1970 pscr |= FORCE_LINK_PASS; 1982 pscr |= FORCE_LINK_PASS;
1971 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1983 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1972 1984
1973 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE); 1985 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
1974 1986
1975 /* 1987 /*
1976 * Configure TX path and queues. 1988 * Configure TX path and queues.
@@ -1994,24 +2006,23 @@ static void port_start(struct mv643xx_eth_private *mp)
1994 * frames to RX queue #0, and include the pseudo-header when 2006 * frames to RX queue #0, and include the pseudo-header when
1995 * calculating receive checksums. 2007 * calculating receive checksums.
1996 */ 2008 */
1997 wrl(mp, PORT_CONFIG(mp->port_num), 0x02000000); 2009 wrlp(mp, PORT_CONFIG, 0x02000000);
1998 2010
1999 /* 2011 /*
2000 * Treat BPDUs as normal multicasts, and disable partition mode. 2012 * Treat BPDUs as normal multicasts, and disable partition mode.
2001 */ 2013 */
2002 wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000); 2014 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2003 2015
2004 /* 2016 /*
2005 * Enable the receive queues. 2017 * Enable the receive queues.
2006 */ 2018 */
2007 for (i = 0; i < mp->rxq_count; i++) { 2019 for (i = 0; i < mp->rxq_count; i++) {
2008 struct rx_queue *rxq = mp->rxq + i; 2020 struct rx_queue *rxq = mp->rxq + i;
2009 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
2010 u32 addr; 2021 u32 addr;
2011 2022
2012 addr = (u32)rxq->rx_desc_dma; 2023 addr = (u32)rxq->rx_desc_dma;
2013 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2024 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2014 wrl(mp, off, addr); 2025 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2015 2026
2016 rxq_enable(rxq); 2027 rxq_enable(rxq);
2017 } 2028 }
@@ -2022,7 +2033,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2022 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 2033 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
2023 u32 val; 2034 u32 val;
2024 2035
2025 val = rdl(mp, SDMA_CONFIG(mp->port_num)); 2036 val = rdlp(mp, SDMA_CONFIG);
2026 if (mp->shared->extended_rx_coal_limit) { 2037 if (mp->shared->extended_rx_coal_limit) {
2027 if (coal > 0xffff) 2038 if (coal > 0xffff)
2028 coal = 0xffff; 2039 coal = 0xffff;
@@ -2035,7 +2046,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2035 val &= ~0x003fff00; 2046 val &= ~0x003fff00;
2036 val |= (coal & 0x3fff) << 8; 2047 val |= (coal & 0x3fff) << 8;
2037 } 2048 }
2038 wrl(mp, SDMA_CONFIG(mp->port_num), val); 2049 wrlp(mp, SDMA_CONFIG, val);
2039} 2050}
2040 2051
2041static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay) 2052static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
@@ -2044,7 +2055,7 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2044 2055
2045 if (coal > 0x3fff) 2056 if (coal > 0x3fff)
2046 coal = 0x3fff; 2057 coal = 0x3fff;
2047 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4); 2058 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, (coal & 0x3fff) << 4);
2048} 2059}
2049 2060
2050static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2061static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
@@ -2073,9 +2084,9 @@ static int mv643xx_eth_open(struct net_device *dev)
2073 int err; 2084 int err;
2074 int i; 2085 int i;
2075 2086
2076 wrl(mp, INT_CAUSE(mp->port_num), 0); 2087 wrlp(mp, INT_CAUSE, 0);
2077 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); 2088 wrlp(mp, INT_CAUSE_EXT, 0);
2078 rdl(mp, INT_CAUSE_EXT(mp->port_num)); 2089 rdlp(mp, INT_CAUSE_EXT);
2079 2090
2080 err = request_irq(dev->irq, mv643xx_eth_irq, 2091 err = request_irq(dev->irq, mv643xx_eth_irq,
2081 IRQF_SHARED, dev->name, dev); 2092 IRQF_SHARED, dev->name, dev);
@@ -2124,8 +2135,8 @@ static int mv643xx_eth_open(struct net_device *dev)
2124 set_rx_coal(mp, 0); 2135 set_rx_coal(mp, 0);
2125 set_tx_coal(mp, 0); 2136 set_tx_coal(mp, 0);
2126 2137
2127 wrl(mp, INT_MASK_EXT(mp->port_num), INT_EXT_LINK_PHY | INT_EXT_TX); 2138 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2128 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2139 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
2129 2140
2130 return 0; 2141 return 0;
2131 2142
@@ -2150,7 +2161,7 @@ static void port_reset(struct mv643xx_eth_private *mp)
2150 txq_disable(mp->txq + i); 2161 txq_disable(mp->txq + i);
2151 2162
2152 while (1) { 2163 while (1) {
2153 u32 ps = rdl(mp, PORT_STATUS(mp->port_num)); 2164 u32 ps = rdlp(mp, PORT_STATUS);
2154 2165
2155 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2166 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2156 break; 2167 break;
@@ -2158,11 +2169,11 @@ static void port_reset(struct mv643xx_eth_private *mp)
2158 } 2169 }
2159 2170
2160 /* Reset the Enable bit in the Configuration Register */ 2171 /* Reset the Enable bit in the Configuration Register */
2161 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 2172 data = rdlp(mp, PORT_SERIAL_CONTROL);
2162 data &= ~(SERIAL_PORT_ENABLE | 2173 data &= ~(SERIAL_PORT_ENABLE |
2163 DO_NOT_FORCE_LINK_FAIL | 2174 DO_NOT_FORCE_LINK_FAIL |
2164 FORCE_LINK_PASS); 2175 FORCE_LINK_PASS);
2165 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data); 2176 wrlp(mp, PORT_SERIAL_CONTROL, data);
2166} 2177}
2167 2178
2168static int mv643xx_eth_stop(struct net_device *dev) 2179static int mv643xx_eth_stop(struct net_device *dev)
@@ -2170,8 +2181,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
2170 struct mv643xx_eth_private *mp = netdev_priv(dev); 2181 struct mv643xx_eth_private *mp = netdev_priv(dev);
2171 int i; 2182 int i;
2172 2183
2173 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 2184 wrlp(mp, INT_MASK, 0x00000000);
2174 rdl(mp, INT_MASK(mp->port_num)); 2185 rdlp(mp, INT_MASK);
2175 2186
2176 del_timer_sync(&mp->mib_counters_timer); 2187 del_timer_sync(&mp->mib_counters_timer);
2177 2188
@@ -2264,12 +2275,12 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
2264{ 2275{
2265 struct mv643xx_eth_private *mp = netdev_priv(dev); 2276 struct mv643xx_eth_private *mp = netdev_priv(dev);
2266 2277
2267 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 2278 wrlp(mp, INT_MASK, 0x00000000);
2268 rdl(mp, INT_MASK(mp->port_num)); 2279 rdlp(mp, INT_MASK);
2269 2280
2270 mv643xx_eth_irq(dev->irq, dev); 2281 mv643xx_eth_irq(dev->irq, dev);
2271 2282
2272 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2283 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
2273} 2284}
2274#endif 2285#endif
2275 2286
@@ -2317,8 +2328,8 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2317 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2328 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2318 * SDMA config register. 2329 * SDMA config register.
2319 */ 2330 */
2320 writel(0x02000000, msp->base + SDMA_CONFIG(0)); 2331 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2321 if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000) 2332 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2322 msp->extended_rx_coal_limit = 1; 2333 msp->extended_rx_coal_limit = 1;
2323 else 2334 else
2324 msp->extended_rx_coal_limit = 0; 2335 msp->extended_rx_coal_limit = 0;
@@ -2328,12 +2339,12 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2328 * yes, whether its associated registers are in the old or 2339 * yes, whether its associated registers are in the old or
2329 * the new place. 2340 * the new place.
2330 */ 2341 */
2331 writel(1, msp->base + TX_BW_MTU_MOVED(0)); 2342 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2332 if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) { 2343 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2333 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2344 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2334 } else { 2345 } else {
2335 writel(7, msp->base + TX_BW_RATE(0)); 2346 writel(7, msp->base + 0x0400 + TX_BW_RATE);
2336 if (readl(msp->base + TX_BW_RATE(0)) & 7) 2347 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2337 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2348 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2338 else 2349 else
2339 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2350 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
@@ -2566,10 +2577,10 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2566{ 2577{
2567 u32 pscr; 2578 u32 pscr;
2568 2579
2569 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 2580 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2570 if (pscr & SERIAL_PORT_ENABLE) { 2581 if (pscr & SERIAL_PORT_ENABLE) {
2571 pscr &= ~SERIAL_PORT_ENABLE; 2582 pscr &= ~SERIAL_PORT_ENABLE;
2572 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 2583 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2573 } 2584 }
2574 2585
2575 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2586 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
@@ -2587,7 +2598,7 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2587 pscr |= SET_FULL_DUPLEX_MODE; 2598 pscr |= SET_FULL_DUPLEX_MODE;
2588 } 2599 }
2589 2600
2590 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 2601 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2591} 2602}
2592 2603
2593static int mv643xx_eth_probe(struct platform_device *pdev) 2604static int mv643xx_eth_probe(struct platform_device *pdev)
@@ -2619,6 +2630,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2619 platform_set_drvdata(pdev, mp); 2630 platform_set_drvdata(pdev, mp);
2620 2631
2621 mp->shared = platform_get_drvdata(pd->shared); 2632 mp->shared = platform_get_drvdata(pd->shared);
2633 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
2622 mp->port_num = pd->port_number; 2634 mp->port_num = pd->port_number;
2623 2635
2624 mp->dev = dev; 2636 mp->dev = dev;
@@ -2723,8 +2735,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
2723 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2735 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2724 2736
2725 /* Mask all interrupts on ethernet port */ 2737 /* Mask all interrupts on ethernet port */
2726 wrl(mp, INT_MASK(mp->port_num), 0); 2738 wrlp(mp, INT_MASK, 0);
2727 rdl(mp, INT_MASK(mp->port_num)); 2739 rdlp(mp, INT_MASK);
2728 2740
2729 if (netif_running(mp->dev)) 2741 if (netif_running(mp->dev))
2730 port_reset(mp); 2742 port_reset(mp);