diff options
-rw-r--r-- | drivers/net/mv643xx_eth.c | 156 |
1 files changed, 78 insertions, 78 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 5a4ae86e145f..39314c99520a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -615,12 +615,12 @@ static void __iomem *mv643xx_eth_base; | |||
615 | /* used to protect SMI_REG, which is shared across ports */ | 615 | /* used to protect SMI_REG, which is shared across ports */ |
616 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); | 616 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); |
617 | 617 | ||
618 | static inline u32 mv_read(int offset) | 618 | static inline u32 rdl(struct mv643xx_private *mp, int offset) |
619 | { | 619 | { |
620 | return readl(mv643xx_eth_base + offset); | 620 | return readl(mv643xx_eth_base + offset); |
621 | } | 621 | } |
622 | 622 | ||
623 | static inline void mv_write(int offset, u32 data) | 623 | static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) |
624 | { | 624 | { |
625 | writel(data, mv643xx_eth_base + offset); | 625 | writel(data, mv643xx_eth_base + offset); |
626 | } | 626 | } |
@@ -746,12 +746,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) | |||
746 | struct mv643xx_private *mp = netdev_priv(dev); | 746 | struct mv643xx_private *mp = netdev_priv(dev); |
747 | u32 config_reg; | 747 | u32 config_reg; |
748 | 748 | ||
749 | config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); | 749 | config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num)); |
750 | if (dev->flags & IFF_PROMISC) | 750 | if (dev->flags & IFF_PROMISC) |
751 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; | 751 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; |
752 | else | 752 | else |
753 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; | 753 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; |
754 | mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); | 754 | wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg); |
755 | 755 | ||
756 | eth_port_set_multicast_list(dev); | 756 | eth_port_set_multicast_list(dev); |
757 | } | 757 | } |
@@ -987,7 +987,7 @@ static void mv643xx_eth_update_pscr(struct net_device *dev, | |||
987 | u32 o_pscr, n_pscr; | 987 | u32 o_pscr, n_pscr; |
988 | unsigned int queues; | 988 | unsigned int queues; |
989 | 989 | ||
990 | o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); | 990 | o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); |
991 | n_pscr = o_pscr; | 991 | n_pscr = o_pscr; |
992 | 992 | ||
993 | /* clear speed, duplex and rx buffer size fields */ | 993 | /* clear speed, duplex and rx buffer size fields */ |
@@ -1010,14 +1010,14 @@ static void mv643xx_eth_update_pscr(struct net_device *dev, | |||
1010 | 1010 | ||
1011 | if (n_pscr != o_pscr) { | 1011 | if (n_pscr != o_pscr) { |
1012 | if ((o_pscr & SERIAL_PORT_ENABLE) == 0) | 1012 | if ((o_pscr & SERIAL_PORT_ENABLE) == 0) |
1013 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); | 1013 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
1014 | else { | 1014 | else { |
1015 | queues = mv643xx_eth_port_disable_tx(mp); | 1015 | queues = mv643xx_eth_port_disable_tx(mp); |
1016 | 1016 | ||
1017 | o_pscr &= ~SERIAL_PORT_ENABLE; | 1017 | o_pscr &= ~SERIAL_PORT_ENABLE; |
1018 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); | 1018 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr); |
1019 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); | 1019 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
1020 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); | 1020 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
1021 | if (queues) | 1021 | if (queues) |
1022 | mv643xx_eth_port_enable_tx(mp, queues); | 1022 | mv643xx_eth_port_enable_tx(mp, queues); |
1023 | } | 1023 | } |
@@ -1043,13 +1043,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1043 | unsigned int port_num = mp->port_num; | 1043 | unsigned int port_num = mp->port_num; |
1044 | 1044 | ||
1045 | /* Read interrupt cause registers */ | 1045 | /* Read interrupt cause registers */ |
1046 | eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & | 1046 | eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) & |
1047 | ETH_INT_UNMASK_ALL; | 1047 | ETH_INT_UNMASK_ALL; |
1048 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { | 1048 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { |
1049 | eth_int_cause_ext = mv_read( | 1049 | eth_int_cause_ext = rdl(mp, |
1050 | INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | 1050 | INTERRUPT_CAUSE_EXTEND_REG(port_num)) & |
1051 | ETH_INT_UNMASK_ALL_EXT; | 1051 | ETH_INT_UNMASK_ALL_EXT; |
1052 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), | 1052 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), |
1053 | ~eth_int_cause_ext); | 1053 | ~eth_int_cause_ext); |
1054 | } | 1054 | } |
1055 | 1055 | ||
@@ -1076,10 +1076,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1076 | #ifdef MV643XX_NAPI | 1076 | #ifdef MV643XX_NAPI |
1077 | if (eth_int_cause & ETH_INT_CAUSE_RX) { | 1077 | if (eth_int_cause & ETH_INT_CAUSE_RX) { |
1078 | /* schedule the NAPI poll routine to maintain port */ | 1078 | /* schedule the NAPI poll routine to maintain port */ |
1079 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1079 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1080 | 1080 | ||
1081 | /* wait for previous write to complete */ | 1081 | /* wait for previous write to complete */ |
1082 | mv_read(INTERRUPT_MASK_REG(port_num)); | 1082 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
1083 | 1083 | ||
1084 | netif_rx_schedule(dev, &mp->napi); | 1084 | netif_rx_schedule(dev, &mp->napi); |
1085 | } | 1085 | } |
@@ -1132,9 +1132,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, | |||
1132 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; | 1132 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; |
1133 | 1133 | ||
1134 | /* Set RX Coalescing mechanism */ | 1134 | /* Set RX Coalescing mechanism */ |
1135 | mv_write(SDMA_CONFIG_REG(port_num), | 1135 | wrl(mp, SDMA_CONFIG_REG(port_num), |
1136 | ((coal & 0x3fff) << 8) | | 1136 | ((coal & 0x3fff) << 8) | |
1137 | (mv_read(SDMA_CONFIG_REG(port_num)) | 1137 | (rdl(mp, SDMA_CONFIG_REG(port_num)) |
1138 | & 0xffc000ff)); | 1138 | & 0xffc000ff)); |
1139 | 1139 | ||
1140 | return coal; | 1140 | return coal; |
@@ -1170,7 +1170,7 @@ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, | |||
1170 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; | 1170 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; |
1171 | 1171 | ||
1172 | /* Set TX Coalescing mechanism */ | 1172 | /* Set TX Coalescing mechanism */ |
1173 | mv_write(TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); | 1173 | wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); |
1174 | 1174 | ||
1175 | return coal; | 1175 | return coal; |
1176 | } | 1176 | } |
@@ -1307,10 +1307,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1307 | int err; | 1307 | int err; |
1308 | 1308 | ||
1309 | /* Clear any pending ethernet port interrupts */ | 1309 | /* Clear any pending ethernet port interrupts */ |
1310 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); | 1310 | wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); |
1311 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1311 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1312 | /* wait for previous write to complete */ | 1312 | /* wait for previous write to complete */ |
1313 | mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); | 1313 | rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num)); |
1314 | 1314 | ||
1315 | err = request_irq(dev->irq, mv643xx_eth_int_handler, | 1315 | err = request_irq(dev->irq, mv643xx_eth_int_handler, |
1316 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); | 1316 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); |
@@ -1416,10 +1416,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1416 | eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); | 1416 | eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); |
1417 | 1417 | ||
1418 | /* Unmask phy and link status changes interrupts */ | 1418 | /* Unmask phy and link status changes interrupts */ |
1419 | mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); | 1419 | wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); |
1420 | 1420 | ||
1421 | /* Unmask RX buffer and TX end interrupt */ | 1421 | /* Unmask RX buffer and TX end interrupt */ |
1422 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1422 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1423 | 1423 | ||
1424 | return 0; | 1424 | return 0; |
1425 | 1425 | ||
@@ -1498,9 +1498,9 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1498 | unsigned int port_num = mp->port_num; | 1498 | unsigned int port_num = mp->port_num; |
1499 | 1499 | ||
1500 | /* Mask all interrupts on ethernet port */ | 1500 | /* Mask all interrupts on ethernet port */ |
1501 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1501 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1502 | /* wait for previous write to complete */ | 1502 | /* wait for previous write to complete */ |
1503 | mv_read(INTERRUPT_MASK_REG(port_num)); | 1503 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
1504 | 1504 | ||
1505 | #ifdef MV643XX_NAPI | 1505 | #ifdef MV643XX_NAPI |
1506 | napi_disable(&mp->napi); | 1506 | napi_disable(&mp->napi); |
@@ -1539,15 +1539,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget) | |||
1539 | #endif | 1539 | #endif |
1540 | 1540 | ||
1541 | work_done = 0; | 1541 | work_done = 0; |
1542 | if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | 1542 | if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) |
1543 | != (u32) mp->rx_used_desc_q) | 1543 | != (u32) mp->rx_used_desc_q) |
1544 | work_done = mv643xx_eth_receive_queue(dev, budget); | 1544 | work_done = mv643xx_eth_receive_queue(dev, budget); |
1545 | 1545 | ||
1546 | if (work_done < budget) { | 1546 | if (work_done < budget) { |
1547 | netif_rx_complete(dev, napi); | 1547 | netif_rx_complete(dev, napi); |
1548 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); | 1548 | wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); |
1549 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1549 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1550 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1550 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1551 | } | 1551 | } |
1552 | 1552 | ||
1553 | return work_done; | 1553 | return work_done; |
@@ -1753,13 +1753,13 @@ static void mv643xx_netpoll(struct net_device *netdev) | |||
1753 | struct mv643xx_private *mp = netdev_priv(netdev); | 1753 | struct mv643xx_private *mp = netdev_priv(netdev); |
1754 | int port_num = mp->port_num; | 1754 | int port_num = mp->port_num; |
1755 | 1755 | ||
1756 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1756 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1757 | /* wait for previous write to complete */ | 1757 | /* wait for previous write to complete */ |
1758 | mv_read(INTERRUPT_MASK_REG(port_num)); | 1758 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
1759 | 1759 | ||
1760 | mv643xx_eth_int_handler(netdev->irq, netdev); | 1760 | mv643xx_eth_int_handler(netdev->irq, netdev); |
1761 | 1761 | ||
1762 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1762 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1763 | } | 1763 | } |
1764 | #endif | 1764 | #endif |
1765 | 1765 | ||
@@ -2012,8 +2012,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) | |||
2012 | unsigned int port_num = mp->port_num; | 2012 | unsigned int port_num = mp->port_num; |
2013 | 2013 | ||
2014 | /* Mask all interrupts on ethernet port */ | 2014 | /* Mask all interrupts on ethernet port */ |
2015 | mv_write(INTERRUPT_MASK_REG(port_num), 0); | 2015 | wrl(mp, INTERRUPT_MASK_REG(port_num), 0); |
2016 | mv_read (INTERRUPT_MASK_REG(port_num)); | 2016 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
2017 | 2017 | ||
2018 | eth_port_reset(mp); | 2018 | eth_port_reset(mp); |
2019 | } | 2019 | } |
@@ -2278,28 +2278,28 @@ static void eth_port_start(struct net_device *dev) | |||
2278 | 2278 | ||
2279 | /* Assignment of Tx CTRP of given queue */ | 2279 | /* Assignment of Tx CTRP of given queue */ |
2280 | tx_curr_desc = mp->tx_curr_desc_q; | 2280 | tx_curr_desc = mp->tx_curr_desc_q; |
2281 | mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 2281 | wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
2282 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | 2282 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); |
2283 | 2283 | ||
2284 | /* Assignment of Rx CRDP of given queue */ | 2284 | /* Assignment of Rx CRDP of given queue */ |
2285 | rx_curr_desc = mp->rx_curr_desc_q; | 2285 | rx_curr_desc = mp->rx_curr_desc_q; |
2286 | mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 2286 | wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
2287 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 2287 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
2288 | 2288 | ||
2289 | /* Add the assigned Ethernet address to the port's address table */ | 2289 | /* Add the assigned Ethernet address to the port's address table */ |
2290 | eth_port_uc_addr_set(mp, dev->dev_addr); | 2290 | eth_port_uc_addr_set(mp, dev->dev_addr); |
2291 | 2291 | ||
2292 | /* Assign port configuration and command. */ | 2292 | /* Assign port configuration and command. */ |
2293 | mv_write(PORT_CONFIG_REG(port_num), | 2293 | wrl(mp, PORT_CONFIG_REG(port_num), |
2294 | PORT_CONFIG_DEFAULT_VALUE); | 2294 | PORT_CONFIG_DEFAULT_VALUE); |
2295 | 2295 | ||
2296 | mv_write(PORT_CONFIG_EXTEND_REG(port_num), | 2296 | wrl(mp, PORT_CONFIG_EXTEND_REG(port_num), |
2297 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); | 2297 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); |
2298 | 2298 | ||
2299 | pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); | 2299 | pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); |
2300 | 2300 | ||
2301 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); | 2301 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); |
2302 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2302 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); |
2303 | 2303 | ||
2304 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | 2304 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
2305 | DISABLE_AUTO_NEG_SPEED_GMII | | 2305 | DISABLE_AUTO_NEG_SPEED_GMII | |
@@ -2307,20 +2307,20 @@ static void eth_port_start(struct net_device *dev) | |||
2307 | DO_NOT_FORCE_LINK_FAIL | | 2307 | DO_NOT_FORCE_LINK_FAIL | |
2308 | SERIAL_PORT_CONTROL_RESERVED; | 2308 | SERIAL_PORT_CONTROL_RESERVED; |
2309 | 2309 | ||
2310 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2310 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); |
2311 | 2311 | ||
2312 | pscr |= SERIAL_PORT_ENABLE; | 2312 | pscr |= SERIAL_PORT_ENABLE; |
2313 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2313 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); |
2314 | 2314 | ||
2315 | /* Assign port SDMA configuration */ | 2315 | /* Assign port SDMA configuration */ |
2316 | mv_write(SDMA_CONFIG_REG(port_num), | 2316 | wrl(mp, SDMA_CONFIG_REG(port_num), |
2317 | PORT_SDMA_CONFIG_DEFAULT_VALUE); | 2317 | PORT_SDMA_CONFIG_DEFAULT_VALUE); |
2318 | 2318 | ||
2319 | /* Enable port Rx. */ | 2319 | /* Enable port Rx. */ |
2320 | mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED); | 2320 | mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED); |
2321 | 2321 | ||
2322 | /* Disable port bandwidth limits by clearing MTU register */ | 2322 | /* Disable port bandwidth limits by clearing MTU register */ |
2323 | mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); | 2323 | wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0); |
2324 | 2324 | ||
2325 | /* save phy settings across reset */ | 2325 | /* save phy settings across reset */ |
2326 | mv643xx_get_settings(dev, ðtool_cmd); | 2326 | mv643xx_get_settings(dev, ðtool_cmd); |
@@ -2343,8 +2343,8 @@ static void eth_port_uc_addr_set(struct mv643xx_private *mp, | |||
2343 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | 2343 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | |
2344 | (p_addr[3] << 0); | 2344 | (p_addr[3] << 0); |
2345 | 2345 | ||
2346 | mv_write(MAC_ADDR_LOW(port_num), mac_l); | 2346 | wrl(mp, MAC_ADDR_LOW(port_num), mac_l); |
2347 | mv_write(MAC_ADDR_HIGH(port_num), mac_h); | 2347 | wrl(mp, MAC_ADDR_HIGH(port_num), mac_h); |
2348 | 2348 | ||
2349 | /* Accept frames with this address */ | 2349 | /* Accept frames with this address */ |
2350 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); | 2350 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); |
@@ -2361,8 +2361,8 @@ static void eth_port_uc_addr_get(struct mv643xx_private *mp, | |||
2361 | unsigned int mac_h; | 2361 | unsigned int mac_h; |
2362 | unsigned int mac_l; | 2362 | unsigned int mac_l; |
2363 | 2363 | ||
2364 | mac_h = mv_read(MAC_ADDR_HIGH(port_num)); | 2364 | mac_h = rdl(mp, MAC_ADDR_HIGH(port_num)); |
2365 | mac_l = mv_read(MAC_ADDR_LOW(port_num)); | 2365 | mac_l = rdl(mp, MAC_ADDR_LOW(port_num)); |
2366 | 2366 | ||
2367 | p_addr[0] = (mac_h >> 24) & 0xff; | 2367 | p_addr[0] = (mac_h >> 24) & 0xff; |
2368 | p_addr[1] = (mac_h >> 16) & 0xff; | 2368 | p_addr[1] = (mac_h >> 16) & 0xff; |
@@ -2392,9 +2392,9 @@ static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, | |||
2392 | reg_offset = entry % 4; /* Entry offset within the register */ | 2392 | reg_offset = entry % 4; /* Entry offset within the register */ |
2393 | 2393 | ||
2394 | /* Set "accepts frame bit" at specified table entry */ | 2394 | /* Set "accepts frame bit" at specified table entry */ |
2395 | table_reg = mv_read(table + tbl_offset); | 2395 | table_reg = rdl(mp, table + tbl_offset); |
2396 | table_reg |= 0x01 << (8 * reg_offset); | 2396 | table_reg |= 0x01 << (8 * reg_offset); |
2397 | mv_write(table + tbl_offset, table_reg); | 2397 | wrl(mp, table + tbl_offset, table_reg); |
2398 | } | 2398 | } |
2399 | 2399 | ||
2400 | /* | 2400 | /* |
@@ -2527,7 +2527,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2527 | * 3-1 Queue ETH_Q0=0 | 2527 | * 3-1 Queue ETH_Q0=0 |
2528 | * 7-4 Reserved = 0; | 2528 | * 7-4 Reserved = 0; |
2529 | */ | 2529 | */ |
2530 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 2530 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2531 | 2531 | ||
2532 | /* Set all entries in DA filter other multicast | 2532 | /* Set all entries in DA filter other multicast |
2533 | * table (Ex_dFOMT) | 2533 | * table (Ex_dFOMT) |
@@ -2537,7 +2537,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2537 | * 3-1 Queue ETH_Q0=0 | 2537 | * 3-1 Queue ETH_Q0=0 |
2538 | * 7-4 Reserved = 0; | 2538 | * 7-4 Reserved = 0; |
2539 | */ | 2539 | */ |
2540 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 2540 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2541 | } | 2541 | } |
2542 | return; | 2542 | return; |
2543 | } | 2543 | } |
@@ -2547,11 +2547,11 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2547 | */ | 2547 | */ |
2548 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2548 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2549 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2549 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2550 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2550 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
2551 | (eth_port_num) + table_index, 0); | 2551 | (eth_port_num) + table_index, 0); |
2552 | 2552 | ||
2553 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2553 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2554 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE | 2554 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE |
2555 | (eth_port_num) + table_index, 0); | 2555 | (eth_port_num) + table_index, 0); |
2556 | } | 2556 | } |
2557 | 2557 | ||
@@ -2586,15 +2586,15 @@ static void eth_port_init_mac_tables(struct mv643xx_private *mp) | |||
2586 | 2586 | ||
2587 | /* Clear DA filter unicast table (Ex_dFUT) */ | 2587 | /* Clear DA filter unicast table (Ex_dFUT) */ |
2588 | for (table_index = 0; table_index <= 0xC; table_index += 4) | 2588 | for (table_index = 0; table_index <= 0xC; table_index += 4) |
2589 | mv_write(DA_FILTER_UNICAST_TABLE_BASE(port_num) + | 2589 | wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) + |
2590 | table_index, 0); | 2590 | table_index, 0); |
2591 | 2591 | ||
2592 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2592 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2593 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2593 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2594 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) + | 2594 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) + |
2595 | table_index, 0); | 2595 | table_index, 0); |
2596 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2596 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2597 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) + | 2597 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) + |
2598 | table_index, 0); | 2598 | table_index, 0); |
2599 | } | 2599 | } |
2600 | } | 2600 | } |
@@ -2624,12 +2624,12 @@ static void eth_clear_mib_counters(struct mv643xx_private *mp) | |||
2624 | /* Perform dummy reads from MIB counters */ | 2624 | /* Perform dummy reads from MIB counters */ |
2625 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; | 2625 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; |
2626 | i += 4) | 2626 | i += 4) |
2627 | mv_read(MIB_COUNTERS_BASE(port_num) + i); | 2627 | rdl(mp, MIB_COUNTERS_BASE(port_num) + i); |
2628 | } | 2628 | } |
2629 | 2629 | ||
2630 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) | 2630 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) |
2631 | { | 2631 | { |
2632 | return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); | 2632 | return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset); |
2633 | } | 2633 | } |
2634 | 2634 | ||
2635 | static void eth_update_mib_counters(struct mv643xx_private *mp) | 2635 | static void eth_update_mib_counters(struct mv643xx_private *mp) |
@@ -2714,7 +2714,7 @@ static int ethernet_phy_get(struct mv643xx_private *mp) | |||
2714 | { | 2714 | { |
2715 | unsigned int reg_data; | 2715 | unsigned int reg_data; |
2716 | 2716 | ||
2717 | reg_data = mv_read(PHY_ADDR_REG); | 2717 | reg_data = rdl(mp, PHY_ADDR_REG); |
2718 | 2718 | ||
2719 | return ((reg_data >> (5 * mp->port_num)) & 0x1f); | 2719 | return ((reg_data >> (5 * mp->port_num)) & 0x1f); |
2720 | } | 2720 | } |
@@ -2741,10 +2741,10 @@ static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr) | |||
2741 | u32 reg_data; | 2741 | u32 reg_data; |
2742 | int addr_shift = 5 * mp->port_num; | 2742 | int addr_shift = 5 * mp->port_num; |
2743 | 2743 | ||
2744 | reg_data = mv_read(PHY_ADDR_REG); | 2744 | reg_data = rdl(mp, PHY_ADDR_REG); |
2745 | reg_data &= ~(0x1f << addr_shift); | 2745 | reg_data &= ~(0x1f << addr_shift); |
2746 | reg_data |= (phy_addr & 0x1f) << addr_shift; | 2746 | reg_data |= (phy_addr & 0x1f) << addr_shift; |
2747 | mv_write(PHY_ADDR_REG, reg_data); | 2747 | wrl(mp, PHY_ADDR_REG, reg_data); |
2748 | } | 2748 | } |
2749 | 2749 | ||
2750 | /* | 2750 | /* |
@@ -2782,13 +2782,13 @@ static void ethernet_phy_reset(struct mv643xx_private *mp) | |||
2782 | static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, | 2782 | static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, |
2783 | unsigned int queues) | 2783 | unsigned int queues) |
2784 | { | 2784 | { |
2785 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues); | 2785 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues); |
2786 | } | 2786 | } |
2787 | 2787 | ||
2788 | static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, | 2788 | static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, |
2789 | unsigned int queues) | 2789 | unsigned int queues) |
2790 | { | 2790 | { |
2791 | mv_write(RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues); | 2791 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues); |
2792 | } | 2792 | } |
2793 | 2793 | ||
2794 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) | 2794 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) |
@@ -2797,18 +2797,18 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) | |||
2797 | u32 queues; | 2797 | u32 queues; |
2798 | 2798 | ||
2799 | /* Stop Tx port activity. Check port Tx activity. */ | 2799 | /* Stop Tx port activity. Check port Tx activity. */ |
2800 | queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; | 2800 | queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2801 | if (queues) { | 2801 | if (queues) { |
2802 | /* Issue stop command for active queues only */ | 2802 | /* Issue stop command for active queues only */ |
2803 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); | 2803 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2804 | 2804 | ||
2805 | /* Wait for all Tx activity to terminate. */ | 2805 | /* Wait for all Tx activity to terminate. */ |
2806 | /* Check port cause register that all Tx queues are stopped */ | 2806 | /* Check port cause register that all Tx queues are stopped */ |
2807 | while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) | 2807 | while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2808 | udelay(PHY_WAIT_MICRO_SECONDS); | 2808 | udelay(PHY_WAIT_MICRO_SECONDS); |
2809 | 2809 | ||
2810 | /* Wait for Tx FIFO to empty */ | 2810 | /* Wait for Tx FIFO to empty */ |
2811 | while (mv_read(PORT_STATUS_REG(port_num)) & | 2811 | while (rdl(mp, PORT_STATUS_REG(port_num)) & |
2812 | ETH_PORT_TX_FIFO_EMPTY) | 2812 | ETH_PORT_TX_FIFO_EMPTY) |
2813 | udelay(PHY_WAIT_MICRO_SECONDS); | 2813 | udelay(PHY_WAIT_MICRO_SECONDS); |
2814 | } | 2814 | } |
@@ -2822,14 +2822,14 @@ static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp) | |||
2822 | u32 queues; | 2822 | u32 queues; |
2823 | 2823 | ||
2824 | /* Stop Rx port activity. Check port Rx activity. */ | 2824 | /* Stop Rx port activity. Check port Rx activity. */ |
2825 | queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; | 2825 | queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2826 | if (queues) { | 2826 | if (queues) { |
2827 | /* Issue stop command for active queues only */ | 2827 | /* Issue stop command for active queues only */ |
2828 | mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); | 2828 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2829 | 2829 | ||
2830 | /* Wait for all Rx activity to terminate. */ | 2830 | /* Wait for all Rx activity to terminate. */ |
2831 | /* Check port cause register that all Rx queues are stopped */ | 2831 | /* Check port cause register that all Rx queues are stopped */ |
2832 | while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) | 2832 | while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2833 | udelay(PHY_WAIT_MICRO_SECONDS); | 2833 | udelay(PHY_WAIT_MICRO_SECONDS); |
2834 | } | 2834 | } |
2835 | 2835 | ||
@@ -2866,11 +2866,11 @@ static void eth_port_reset(struct mv643xx_private *mp) | |||
2866 | eth_clear_mib_counters(mp); | 2866 | eth_clear_mib_counters(mp); |
2867 | 2867 | ||
2868 | /* Reset the Enable bit in the Configuration Register */ | 2868 | /* Reset the Enable bit in the Configuration Register */ |
2869 | reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); | 2869 | reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); |
2870 | reg_data &= ~(SERIAL_PORT_ENABLE | | 2870 | reg_data &= ~(SERIAL_PORT_ENABLE | |
2871 | DO_NOT_FORCE_LINK_FAIL | | 2871 | DO_NOT_FORCE_LINK_FAIL | |
2872 | FORCE_LINK_PASS); | 2872 | FORCE_LINK_PASS); |
2873 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); | 2873 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
2874 | } | 2874 | } |
2875 | 2875 | ||
2876 | 2876 | ||
@@ -2905,7 +2905,7 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, | |||
2905 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2905 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2906 | 2906 | ||
2907 | /* wait for the SMI register to become available */ | 2907 | /* wait for the SMI register to become available */ |
2908 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { | 2908 | for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { |
2909 | if (i == PHY_WAIT_ITERATIONS) { | 2909 | if (i == PHY_WAIT_ITERATIONS) { |
2910 | printk("%s: PHY busy timeout\n", mp->dev->name); | 2910 | printk("%s: PHY busy timeout\n", mp->dev->name); |
2911 | goto out; | 2911 | goto out; |
@@ -2913,11 +2913,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, | |||
2913 | udelay(PHY_WAIT_MICRO_SECONDS); | 2913 | udelay(PHY_WAIT_MICRO_SECONDS); |
2914 | } | 2914 | } |
2915 | 2915 | ||
2916 | mv_write(SMI_REG, | 2916 | wrl(mp, SMI_REG, |
2917 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); | 2917 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); |
2918 | 2918 | ||
2919 | /* now wait for the data to be valid */ | 2919 | /* now wait for the data to be valid */ |
2920 | for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { | 2920 | for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) { |
2921 | if (i == PHY_WAIT_ITERATIONS) { | 2921 | if (i == PHY_WAIT_ITERATIONS) { |
2922 | printk("%s: PHY read timeout\n", mp->dev->name); | 2922 | printk("%s: PHY read timeout\n", mp->dev->name); |
2923 | goto out; | 2923 | goto out; |
@@ -2925,7 +2925,7 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, | |||
2925 | udelay(PHY_WAIT_MICRO_SECONDS); | 2925 | udelay(PHY_WAIT_MICRO_SECONDS); |
2926 | } | 2926 | } |
2927 | 2927 | ||
2928 | *value = mv_read(SMI_REG) & 0xffff; | 2928 | *value = rdl(mp, SMI_REG) & 0xffff; |
2929 | out: | 2929 | out: |
2930 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2930 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |
2931 | } | 2931 | } |
@@ -2963,7 +2963,7 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp, | |||
2963 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2963 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2964 | 2964 | ||
2965 | /* wait for the SMI register to become available */ | 2965 | /* wait for the SMI register to become available */ |
2966 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { | 2966 | for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { |
2967 | if (i == PHY_WAIT_ITERATIONS) { | 2967 | if (i == PHY_WAIT_ITERATIONS) { |
2968 | printk("%s: PHY busy timeout\n", mp->dev->name); | 2968 | printk("%s: PHY busy timeout\n", mp->dev->name); |
2969 | goto out; | 2969 | goto out; |
@@ -2971,7 +2971,7 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp, | |||
2971 | udelay(PHY_WAIT_MICRO_SECONDS); | 2971 | udelay(PHY_WAIT_MICRO_SECONDS); |
2972 | } | 2972 | } |
2973 | 2973 | ||
2974 | mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | | 2974 | wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) | |
2975 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); | 2975 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); |
2976 | out: | 2976 | out: |
2977 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2977 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |