diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2007-10-18 22:11:28 -0400 |
---|---|---|
committer | Dale Farnsworth <dale@farnsworth.org> | 2007-10-23 11:23:07 -0400 |
commit | e4d00fa9bfed733051652a32686b9911e8549ac8 (patch) | |
tree | 459436b87003e24b8fb663bf2507bf4f423fb1c1 /drivers/net/mv643xx_eth.c | |
parent | f9fbbc18dfcdc6156306f475de8b0bb96f97cd0d (diff) |
mv643xx_eth: Remove MV643XX_ETH_ register prefix
Now that all register address and bit defines are in private
namespace (drivers/net/mv643xx_eth.h), we can safely remove the
MV643XX_ETH_ prefix to conserve horizontal space.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Acked-by: Tzachi Perelstein <tzachi@marvell.com>
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 227 |
1 files changed, 107 insertions, 120 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 98b30e52f07d..ca120e53b582 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -80,7 +80,7 @@ static char mv643xx_driver_version[] = "1.0"; | |||
80 | 80 | ||
81 | static void __iomem *mv643xx_eth_base; | 81 | static void __iomem *mv643xx_eth_base; |
82 | 82 | ||
83 | /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */ | 83 | /* used to protect SMI_REG, which is shared across ports */ |
84 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); | 84 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); |
85 | 85 | ||
86 | static inline u32 mv_read(int offset) | 86 | static inline u32 mv_read(int offset) |
@@ -214,12 +214,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) | |||
214 | struct mv643xx_private *mp = netdev_priv(dev); | 214 | struct mv643xx_private *mp = netdev_priv(dev); |
215 | u32 config_reg; | 215 | u32 config_reg; |
216 | 216 | ||
217 | config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); | 217 | config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); |
218 | if (dev->flags & IFF_PROMISC) | 218 | if (dev->flags & IFF_PROMISC) |
219 | config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 219 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; |
220 | else | 220 | else |
221 | config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 221 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; |
222 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); | 222 | mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); |
223 | 223 | ||
224 | eth_port_set_multicast_list(dev); | 224 | eth_port_set_multicast_list(dev); |
225 | } | 225 | } |
@@ -455,41 +455,37 @@ static void mv643xx_eth_update_pscr(struct net_device *dev, | |||
455 | u32 o_pscr, n_pscr; | 455 | u32 o_pscr, n_pscr; |
456 | unsigned int queues; | 456 | unsigned int queues; |
457 | 457 | ||
458 | o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 458 | o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); |
459 | n_pscr = o_pscr; | 459 | n_pscr = o_pscr; |
460 | 460 | ||
461 | /* clear speed, duplex and rx buffer size fields */ | 461 | /* clear speed, duplex and rx buffer size fields */ |
462 | n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | | 462 | n_pscr &= ~(SET_MII_SPEED_TO_100 | |
463 | MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | 463 | SET_GMII_SPEED_TO_1000 | |
464 | MV643XX_ETH_SET_FULL_DUPLEX_MODE | | 464 | SET_FULL_DUPLEX_MODE | |
465 | MV643XX_ETH_MAX_RX_PACKET_MASK); | 465 | MAX_RX_PACKET_MASK); |
466 | 466 | ||
467 | if (ecmd->duplex == DUPLEX_FULL) | 467 | if (ecmd->duplex == DUPLEX_FULL) |
468 | n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; | 468 | n_pscr |= SET_FULL_DUPLEX_MODE; |
469 | 469 | ||
470 | if (ecmd->speed == SPEED_1000) | 470 | if (ecmd->speed == SPEED_1000) |
471 | n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | 471 | n_pscr |= SET_GMII_SPEED_TO_1000 | |
472 | MV643XX_ETH_MAX_RX_PACKET_9700BYTE; | 472 | MAX_RX_PACKET_9700BYTE; |
473 | else { | 473 | else { |
474 | if (ecmd->speed == SPEED_100) | 474 | if (ecmd->speed == SPEED_100) |
475 | n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; | 475 | n_pscr |= SET_MII_SPEED_TO_100; |
476 | n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; | 476 | n_pscr |= MAX_RX_PACKET_1522BYTE; |
477 | } | 477 | } |
478 | 478 | ||
479 | if (n_pscr != o_pscr) { | 479 | if (n_pscr != o_pscr) { |
480 | if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) | 480 | if ((o_pscr & SERIAL_PORT_ENABLE) == 0) |
481 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 481 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
482 | n_pscr); | ||
483 | else { | 482 | else { |
484 | queues = mv643xx_eth_port_disable_tx(port_num); | 483 | queues = mv643xx_eth_port_disable_tx(port_num); |
485 | 484 | ||
486 | o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | 485 | o_pscr &= ~SERIAL_PORT_ENABLE; |
487 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 486 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); |
488 | o_pscr); | 487 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
489 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 488 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
490 | n_pscr); | ||
491 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
492 | n_pscr); | ||
493 | if (queues) | 489 | if (queues) |
494 | mv643xx_eth_port_enable_tx(port_num, queues); | 490 | mv643xx_eth_port_enable_tx(port_num, queues); |
495 | } | 491 | } |
@@ -515,13 +511,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
515 | unsigned int port_num = mp->port_num; | 511 | unsigned int port_num = mp->port_num; |
516 | 512 | ||
517 | /* Read interrupt cause registers */ | 513 | /* Read interrupt cause registers */ |
518 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & | 514 | eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & |
519 | ETH_INT_UNMASK_ALL; | 515 | ETH_INT_UNMASK_ALL; |
520 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { | 516 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { |
521 | eth_int_cause_ext = mv_read( | 517 | eth_int_cause_ext = mv_read( |
522 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | 518 | INTERRUPT_CAUSE_EXTEND_REG(port_num)) & |
523 | ETH_INT_UNMASK_ALL_EXT; | 519 | ETH_INT_UNMASK_ALL_EXT; |
524 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), | 520 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), |
525 | ~eth_int_cause_ext); | 521 | ~eth_int_cause_ext); |
526 | } | 522 | } |
527 | 523 | ||
@@ -549,10 +545,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
549 | #ifdef MV643XX_NAPI | 545 | #ifdef MV643XX_NAPI |
550 | if (eth_int_cause & ETH_INT_CAUSE_RX) { | 546 | if (eth_int_cause & ETH_INT_CAUSE_RX) { |
551 | /* schedule the NAPI poll routine to maintain port */ | 547 | /* schedule the NAPI poll routine to maintain port */ |
552 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 548 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
553 | ETH_INT_MASK_ALL); | 549 | |
554 | /* wait for previous write to complete */ | 550 | /* wait for previous write to complete */ |
555 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 551 | mv_read(INTERRUPT_MASK_REG(port_num)); |
556 | 552 | ||
557 | netif_rx_schedule(dev, &mp->napi); | 553 | netif_rx_schedule(dev, &mp->napi); |
558 | } | 554 | } |
@@ -604,9 +600,9 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, | |||
604 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; | 600 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; |
605 | 601 | ||
606 | /* Set RX Coalescing mechanism */ | 602 | /* Set RX Coalescing mechanism */ |
607 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num), | 603 | mv_write(SDMA_CONFIG_REG(eth_port_num), |
608 | ((coal & 0x3fff) << 8) | | 604 | ((coal & 0x3fff) << 8) | |
609 | (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num)) | 605 | (mv_read(SDMA_CONFIG_REG(eth_port_num)) |
610 | & 0xffc000ff)); | 606 | & 0xffc000ff)); |
611 | 607 | ||
612 | return coal; | 608 | return coal; |
@@ -642,8 +638,7 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, | |||
642 | unsigned int coal; | 638 | unsigned int coal; |
643 | coal = ((t_clk / 1000000) * delay) / 64; | 639 | coal = ((t_clk / 1000000) * delay) / 64; |
644 | /* Set TX Coalescing mechanism */ | 640 | /* Set TX Coalescing mechanism */ |
645 | mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), | 641 | mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4); |
646 | coal << 4); | ||
647 | return coal; | 642 | return coal; |
648 | } | 643 | } |
649 | 644 | ||
@@ -779,10 +774,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
779 | int err; | 774 | int err; |
780 | 775 | ||
781 | /* Clear any pending ethernet port interrupts */ | 776 | /* Clear any pending ethernet port interrupts */ |
782 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 777 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); |
783 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 778 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
784 | /* wait for previous write to complete */ | 779 | /* wait for previous write to complete */ |
785 | mv_read (MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)); | 780 | mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); |
786 | 781 | ||
787 | err = request_irq(dev->irq, mv643xx_eth_int_handler, | 782 | err = request_irq(dev->irq, mv643xx_eth_int_handler, |
788 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); | 783 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); |
@@ -889,11 +884,10 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
889 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); | 884 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); |
890 | 885 | ||
891 | /* Unmask phy and link status changes interrupts */ | 886 | /* Unmask phy and link status changes interrupts */ |
892 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), | 887 | mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); |
893 | ETH_INT_UNMASK_ALL_EXT); | ||
894 | 888 | ||
895 | /* Unmask RX buffer and TX end interrupt */ | 889 | /* Unmask RX buffer and TX end interrupt */ |
896 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 890 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
897 | 891 | ||
898 | return 0; | 892 | return 0; |
899 | 893 | ||
@@ -973,9 +967,9 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
973 | unsigned int port_num = mp->port_num; | 967 | unsigned int port_num = mp->port_num; |
974 | 968 | ||
975 | /* Mask all interrupts on ethernet port */ | 969 | /* Mask all interrupts on ethernet port */ |
976 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 970 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
977 | /* wait for previous write to complete */ | 971 | /* wait for previous write to complete */ |
978 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 972 | mv_read(INTERRUPT_MASK_REG(port_num)); |
979 | 973 | ||
980 | #ifdef MV643XX_NAPI | 974 | #ifdef MV643XX_NAPI |
981 | napi_disable(&mp->napi); | 975 | napi_disable(&mp->napi); |
@@ -1014,16 +1008,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget) | |||
1014 | #endif | 1008 | #endif |
1015 | 1009 | ||
1016 | work_done = 0; | 1010 | work_done = 0; |
1017 | if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | 1011 | if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) |
1018 | != (u32) mp->rx_used_desc_q) | 1012 | != (u32) mp->rx_used_desc_q) |
1019 | work_done = mv643xx_eth_receive_queue(dev, budget); | 1013 | work_done = mv643xx_eth_receive_queue(dev, budget); |
1020 | 1014 | ||
1021 | if (work_done < budget) { | 1015 | if (work_done < budget) { |
1022 | netif_rx_complete(dev, napi); | 1016 | netif_rx_complete(dev, napi); |
1023 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 1017 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); |
1024 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1018 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1025 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 1019 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1026 | ETH_INT_UNMASK_ALL); | ||
1027 | } | 1020 | } |
1028 | 1021 | ||
1029 | return work_done; | 1022 | return work_done; |
@@ -1226,13 +1219,13 @@ static void mv643xx_netpoll(struct net_device *netdev) | |||
1226 | struct mv643xx_private *mp = netdev_priv(netdev); | 1219 | struct mv643xx_private *mp = netdev_priv(netdev); |
1227 | int port_num = mp->port_num; | 1220 | int port_num = mp->port_num; |
1228 | 1221 | ||
1229 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1222 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1230 | /* wait for previous write to complete */ | 1223 | /* wait for previous write to complete */ |
1231 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1224 | mv_read(INTERRUPT_MASK_REG(port_num)); |
1232 | 1225 | ||
1233 | mv643xx_eth_int_handler(netdev->irq, netdev); | 1226 | mv643xx_eth_int_handler(netdev->irq, netdev); |
1234 | 1227 | ||
1235 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1228 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1236 | } | 1229 | } |
1237 | #endif | 1230 | #endif |
1238 | 1231 | ||
@@ -1350,8 +1343,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1350 | 1343 | ||
1351 | /* set default config values */ | 1344 | /* set default config values */ |
1352 | eth_port_uc_addr_get(port_num, dev->dev_addr); | 1345 | eth_port_uc_addr_get(port_num, dev->dev_addr); |
1353 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1346 | mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1354 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1347 | mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1355 | 1348 | ||
1356 | if (is_valid_ether_addr(pd->mac_addr)) | 1349 | if (is_valid_ether_addr(pd->mac_addr)) |
1357 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 1350 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
@@ -1486,8 +1479,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) | |||
1486 | unsigned int port_num = mp->port_num; | 1479 | unsigned int port_num = mp->port_num; |
1487 | 1480 | ||
1488 | /* Mask all interrupts on ethernet port */ | 1481 | /* Mask all interrupts on ethernet port */ |
1489 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); | 1482 | mv_write(INTERRUPT_MASK_REG(port_num), 0); |
1490 | mv_read (MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1483 | mv_read (INTERRUPT_MASK_REG(port_num)); |
1491 | 1484 | ||
1492 | eth_port_reset(port_num); | 1485 | eth_port_reset(port_num); |
1493 | } | 1486 | } |
@@ -1754,49 +1747,49 @@ static void eth_port_start(struct net_device *dev) | |||
1754 | 1747 | ||
1755 | /* Assignment of Tx CTRP of given queue */ | 1748 | /* Assignment of Tx CTRP of given queue */ |
1756 | tx_curr_desc = mp->tx_curr_desc_q; | 1749 | tx_curr_desc = mp->tx_curr_desc_q; |
1757 | mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 1750 | mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
1758 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | 1751 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); |
1759 | 1752 | ||
1760 | /* Assignment of Rx CRDP of given queue */ | 1753 | /* Assignment of Rx CRDP of given queue */ |
1761 | rx_curr_desc = mp->rx_curr_desc_q; | 1754 | rx_curr_desc = mp->rx_curr_desc_q; |
1762 | mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 1755 | mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
1763 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 1756 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
1764 | 1757 | ||
1765 | /* Add the assigned Ethernet address to the port's address table */ | 1758 | /* Add the assigned Ethernet address to the port's address table */ |
1766 | eth_port_uc_addr_set(port_num, dev->dev_addr); | 1759 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
1767 | 1760 | ||
1768 | /* Assign port configuration and command. */ | 1761 | /* Assign port configuration and command. */ |
1769 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), | 1762 | mv_write(PORT_CONFIG_REG(port_num), |
1770 | MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); | 1763 | PORT_CONFIG_DEFAULT_VALUE); |
1771 | 1764 | ||
1772 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), | 1765 | mv_write(PORT_CONFIG_EXTEND_REG(port_num), |
1773 | MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); | 1766 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); |
1774 | 1767 | ||
1775 | pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 1768 | pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); |
1776 | 1769 | ||
1777 | pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); | 1770 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); |
1778 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | 1771 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1779 | 1772 | ||
1780 | pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | 1773 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
1781 | MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | | 1774 | DISABLE_AUTO_NEG_SPEED_GMII | |
1782 | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | | 1775 | DISABLE_AUTO_NEG_FOR_DUPLX | |
1783 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | | 1776 | DO_NOT_FORCE_LINK_FAIL | |
1784 | MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; | 1777 | SERIAL_PORT_CONTROL_RESERVED; |
1785 | 1778 | ||
1786 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | 1779 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1787 | 1780 | ||
1788 | pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; | 1781 | pscr |= SERIAL_PORT_ENABLE; |
1789 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | 1782 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1790 | 1783 | ||
1791 | /* Assign port SDMA configuration */ | 1784 | /* Assign port SDMA configuration */ |
1792 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), | 1785 | mv_write(SDMA_CONFIG_REG(port_num), |
1793 | MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); | 1786 | PORT_SDMA_CONFIG_DEFAULT_VALUE); |
1794 | 1787 | ||
1795 | /* Enable port Rx. */ | 1788 | /* Enable port Rx. */ |
1796 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); | 1789 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); |
1797 | 1790 | ||
1798 | /* Disable port bandwidth limits by clearing MTU register */ | 1791 | /* Disable port bandwidth limits by clearing MTU register */ |
1799 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); | 1792 | mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); |
1800 | 1793 | ||
1801 | /* save phy settings across reset */ | 1794 | /* save phy settings across reset */ |
1802 | mv643xx_get_settings(dev, ðtool_cmd); | 1795 | mv643xx_get_settings(dev, ðtool_cmd); |
@@ -1817,11 +1810,11 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr) | |||
1817 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | 1810 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | |
1818 | (p_addr[3] << 0); | 1811 | (p_addr[3] << 0); |
1819 | 1812 | ||
1820 | mv_write(MV643XX_ETH_MAC_ADDR_LOW(port_num), mac_l); | 1813 | mv_write(MAC_ADDR_LOW(port_num), mac_l); |
1821 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(port_num), mac_h); | 1814 | mv_write(MAC_ADDR_HIGH(port_num), mac_h); |
1822 | 1815 | ||
1823 | /* Accept frames with this address */ | 1816 | /* Accept frames with this address */ |
1824 | table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num); | 1817 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); |
1825 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); | 1818 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); |
1826 | } | 1819 | } |
1827 | 1820 | ||
@@ -1833,8 +1826,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr) | |||
1833 | unsigned int mac_h; | 1826 | unsigned int mac_h; |
1834 | unsigned int mac_l; | 1827 | unsigned int mac_l; |
1835 | 1828 | ||
1836 | mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(port_num)); | 1829 | mac_h = mv_read(MAC_ADDR_HIGH(port_num)); |
1837 | mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(port_num)); | 1830 | mac_l = mv_read(MAC_ADDR_LOW(port_num)); |
1838 | 1831 | ||
1839 | p_addr[0] = (mac_h >> 24) & 0xff; | 1832 | p_addr[0] = (mac_h >> 24) & 0xff; |
1840 | p_addr[1] = (mac_h >> 16) & 0xff; | 1833 | p_addr[1] = (mac_h >> 16) & 0xff; |
@@ -1894,7 +1887,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) | |||
1894 | 1887 | ||
1895 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && | 1888 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && |
1896 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { | 1889 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { |
1897 | table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 1890 | table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
1898 | (eth_port_num); | 1891 | (eth_port_num); |
1899 | eth_port_set_filter_table_entry(table, p_addr[5]); | 1892 | eth_port_set_filter_table_entry(table, p_addr[5]); |
1900 | return; | 1893 | return; |
@@ -1968,7 +1961,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) | |||
1968 | for (i = 0; i < 8; i++) | 1961 | for (i = 0; i < 8; i++) |
1969 | crc_result = crc_result | (crc[i] << i); | 1962 | crc_result = crc_result | (crc[i] << i); |
1970 | 1963 | ||
1971 | table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); | 1964 | table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); |
1972 | eth_port_set_filter_table_entry(table, crc_result); | 1965 | eth_port_set_filter_table_entry(table, crc_result); |
1973 | } | 1966 | } |
1974 | 1967 | ||
@@ -1998,7 +1991,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
1998 | * 3-1 Queue ETH_Q0=0 | 1991 | * 3-1 Queue ETH_Q0=0 |
1999 | * 7-4 Reserved = 0; | 1992 | * 7-4 Reserved = 0; |
2000 | */ | 1993 | */ |
2001 | mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 1994 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2002 | 1995 | ||
2003 | /* Set all entries in DA filter other multicast | 1996 | /* Set all entries in DA filter other multicast |
2004 | * table (Ex_dFOMT) | 1997 | * table (Ex_dFOMT) |
@@ -2008,7 +2001,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2008 | * 3-1 Queue ETH_Q0=0 | 2001 | * 3-1 Queue ETH_Q0=0 |
2009 | * 7-4 Reserved = 0; | 2002 | * 7-4 Reserved = 0; |
2010 | */ | 2003 | */ |
2011 | mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 2004 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2012 | } | 2005 | } |
2013 | return; | 2006 | return; |
2014 | } | 2007 | } |
@@ -2018,11 +2011,11 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2018 | */ | 2011 | */ |
2019 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2012 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2020 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2013 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2021 | mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2014 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
2022 | (eth_port_num) + table_index, 0); | 2015 | (eth_port_num) + table_index, 0); |
2023 | 2016 | ||
2024 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2017 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2025 | mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE | 2018 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE |
2026 | (eth_port_num) + table_index, 0); | 2019 | (eth_port_num) + table_index, 0); |
2027 | } | 2020 | } |
2028 | 2021 | ||
@@ -2056,15 +2049,15 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) | |||
2056 | 2049 | ||
2057 | /* Clear DA filter unicast table (Ex_dFUT) */ | 2050 | /* Clear DA filter unicast table (Ex_dFUT) */ |
2058 | for (table_index = 0; table_index <= 0xC; table_index += 4) | 2051 | for (table_index = 0; table_index <= 0xC; table_index += 4) |
2059 | mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | 2052 | mv_write(DA_FILTER_UNICAST_TABLE_BASE |
2060 | (eth_port_num) + table_index, 0); | 2053 | (eth_port_num) + table_index, 0); |
2061 | 2054 | ||
2062 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2055 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2063 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2056 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2064 | mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2057 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
2065 | (eth_port_num) + table_index, 0); | 2058 | (eth_port_num) + table_index, 0); |
2066 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2059 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2067 | mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE | 2060 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE |
2068 | (eth_port_num) + table_index, 0); | 2061 | (eth_port_num) + table_index, 0); |
2069 | } | 2062 | } |
2070 | } | 2063 | } |
@@ -2093,12 +2086,12 @@ static void eth_clear_mib_counters(unsigned int eth_port_num) | |||
2093 | /* Perform dummy reads from MIB counters */ | 2086 | /* Perform dummy reads from MIB counters */ |
2094 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; | 2087 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; |
2095 | i += 4) | 2088 | i += 4) |
2096 | mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); | 2089 | mv_read(MIB_COUNTERS_BASE(eth_port_num) + i); |
2097 | } | 2090 | } |
2098 | 2091 | ||
2099 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) | 2092 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) |
2100 | { | 2093 | { |
2101 | return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset); | 2094 | return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); |
2102 | } | 2095 | } |
2103 | 2096 | ||
2104 | static void eth_update_mib_counters(struct mv643xx_private *mp) | 2097 | static void eth_update_mib_counters(struct mv643xx_private *mp) |
@@ -2183,7 +2176,7 @@ static int ethernet_phy_get(unsigned int eth_port_num) | |||
2183 | { | 2176 | { |
2184 | unsigned int reg_data; | 2177 | unsigned int reg_data; |
2185 | 2178 | ||
2186 | reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); | 2179 | reg_data = mv_read(PHY_ADDR_REG); |
2187 | 2180 | ||
2188 | return ((reg_data >> (5 * eth_port_num)) & 0x1f); | 2181 | return ((reg_data >> (5 * eth_port_num)) & 0x1f); |
2189 | } | 2182 | } |
@@ -2210,10 +2203,10 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) | |||
2210 | u32 reg_data; | 2203 | u32 reg_data; |
2211 | int addr_shift = 5 * eth_port_num; | 2204 | int addr_shift = 5 * eth_port_num; |
2212 | 2205 | ||
2213 | reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); | 2206 | reg_data = mv_read(PHY_ADDR_REG); |
2214 | reg_data &= ~(0x1f << addr_shift); | 2207 | reg_data &= ~(0x1f << addr_shift); |
2215 | reg_data |= (phy_addr & 0x1f) << addr_shift; | 2208 | reg_data |= (phy_addr & 0x1f) << addr_shift; |
2216 | mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data); | 2209 | mv_write(PHY_ADDR_REG, reg_data); |
2217 | } | 2210 | } |
2218 | 2211 | ||
2219 | /* | 2212 | /* |
@@ -2251,13 +2244,13 @@ static void ethernet_phy_reset(unsigned int eth_port_num) | |||
2251 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | 2244 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, |
2252 | unsigned int queues) | 2245 | unsigned int queues) |
2253 | { | 2246 | { |
2254 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); | 2247 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); |
2255 | } | 2248 | } |
2256 | 2249 | ||
2257 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | 2250 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, |
2258 | unsigned int queues) | 2251 | unsigned int queues) |
2259 | { | 2252 | { |
2260 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); | 2253 | mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues); |
2261 | } | 2254 | } |
2262 | 2255 | ||
2263 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | 2256 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) |
@@ -2265,21 +2258,18 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | |||
2265 | u32 queues; | 2258 | u32 queues; |
2266 | 2259 | ||
2267 | /* Stop Tx port activity. Check port Tx activity. */ | 2260 | /* Stop Tx port activity. Check port Tx activity. */ |
2268 | queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | 2261 | queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2269 | & 0xFF; | ||
2270 | if (queues) { | 2262 | if (queues) { |
2271 | /* Issue stop command for active queues only */ | 2263 | /* Issue stop command for active queues only */ |
2272 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | 2264 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2273 | (queues << 8)); | ||
2274 | 2265 | ||
2275 | /* Wait for all Tx activity to terminate. */ | 2266 | /* Wait for all Tx activity to terminate. */ |
2276 | /* Check port cause register that all Tx queues are stopped */ | 2267 | /* Check port cause register that all Tx queues are stopped */ |
2277 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | 2268 | while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2278 | & 0xFF) | ||
2279 | udelay(PHY_WAIT_MICRO_SECONDS); | 2269 | udelay(PHY_WAIT_MICRO_SECONDS); |
2280 | 2270 | ||
2281 | /* Wait for Tx FIFO to empty */ | 2271 | /* Wait for Tx FIFO to empty */ |
2282 | while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & | 2272 | while (mv_read(PORT_STATUS_REG(port_num)) & |
2283 | ETH_PORT_TX_FIFO_EMPTY) | 2273 | ETH_PORT_TX_FIFO_EMPTY) |
2284 | udelay(PHY_WAIT_MICRO_SECONDS); | 2274 | udelay(PHY_WAIT_MICRO_SECONDS); |
2285 | } | 2275 | } |
@@ -2292,17 +2282,14 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | |||
2292 | u32 queues; | 2282 | u32 queues; |
2293 | 2283 | ||
2294 | /* Stop Rx port activity. Check port Rx activity. */ | 2284 | /* Stop Rx port activity. Check port Rx activity. */ |
2295 | queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | 2285 | queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2296 | & 0xFF; | ||
2297 | if (queues) { | 2286 | if (queues) { |
2298 | /* Issue stop command for active queues only */ | 2287 | /* Issue stop command for active queues only */ |
2299 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | 2288 | mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2300 | (queues << 8)); | ||
2301 | 2289 | ||
2302 | /* Wait for all Rx activity to terminate. */ | 2290 | /* Wait for all Rx activity to terminate. */ |
2303 | /* Check port cause register that all Rx queues are stopped */ | 2291 | /* Check port cause register that all Rx queues are stopped */ |
2304 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | 2292 | while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2305 | & 0xFF) | ||
2306 | udelay(PHY_WAIT_MICRO_SECONDS); | 2293 | udelay(PHY_WAIT_MICRO_SECONDS); |
2307 | } | 2294 | } |
2308 | 2295 | ||
@@ -2338,11 +2325,11 @@ static void eth_port_reset(unsigned int port_num) | |||
2338 | eth_clear_mib_counters(port_num); | 2325 | eth_clear_mib_counters(port_num); |
2339 | 2326 | ||
2340 | /* Reset the Enable bit in the Configuration Register */ | 2327 | /* Reset the Enable bit in the Configuration Register */ |
2341 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 2328 | reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); |
2342 | reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | | 2329 | reg_data &= ~(SERIAL_PORT_ENABLE | |
2343 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | | 2330 | DO_NOT_FORCE_LINK_FAIL | |
2344 | MV643XX_ETH_FORCE_LINK_PASS); | 2331 | FORCE_LINK_PASS); |
2345 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); | 2332 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
2346 | } | 2333 | } |
2347 | 2334 | ||
2348 | 2335 | ||
@@ -2377,7 +2364,7 @@ static void eth_port_read_smi_reg(unsigned int port_num, | |||
2377 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2364 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2378 | 2365 | ||
2379 | /* wait for the SMI register to become available */ | 2366 | /* wait for the SMI register to become available */ |
2380 | for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { | 2367 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { |
2381 | if (i == PHY_WAIT_ITERATIONS) { | 2368 | if (i == PHY_WAIT_ITERATIONS) { |
2382 | printk("mv643xx PHY busy timeout, port %d\n", port_num); | 2369 | printk("mv643xx PHY busy timeout, port %d\n", port_num); |
2383 | goto out; | 2370 | goto out; |
@@ -2385,11 +2372,11 @@ static void eth_port_read_smi_reg(unsigned int port_num, | |||
2385 | udelay(PHY_WAIT_MICRO_SECONDS); | 2372 | udelay(PHY_WAIT_MICRO_SECONDS); |
2386 | } | 2373 | } |
2387 | 2374 | ||
2388 | mv_write(MV643XX_ETH_SMI_REG, | 2375 | mv_write(SMI_REG, |
2389 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); | 2376 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); |
2390 | 2377 | ||
2391 | /* now wait for the data to be valid */ | 2378 | /* now wait for the data to be valid */ |
2392 | for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) { | 2379 | for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { |
2393 | if (i == PHY_WAIT_ITERATIONS) { | 2380 | if (i == PHY_WAIT_ITERATIONS) { |
2394 | printk("mv643xx PHY read timeout, port %d\n", port_num); | 2381 | printk("mv643xx PHY read timeout, port %d\n", port_num); |
2395 | goto out; | 2382 | goto out; |
@@ -2397,7 +2384,7 @@ static void eth_port_read_smi_reg(unsigned int port_num, | |||
2397 | udelay(PHY_WAIT_MICRO_SECONDS); | 2384 | udelay(PHY_WAIT_MICRO_SECONDS); |
2398 | } | 2385 | } |
2399 | 2386 | ||
2400 | *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff; | 2387 | *value = mv_read(SMI_REG) & 0xffff; |
2401 | out: | 2388 | out: |
2402 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2389 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |
2403 | } | 2390 | } |
@@ -2435,7 +2422,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num, | |||
2435 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2422 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2436 | 2423 | ||
2437 | /* wait for the SMI register to become available */ | 2424 | /* wait for the SMI register to become available */ |
2438 | for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { | 2425 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { |
2439 | if (i == PHY_WAIT_ITERATIONS) { | 2426 | if (i == PHY_WAIT_ITERATIONS) { |
2440 | printk("mv643xx PHY busy timeout, port %d\n", | 2427 | printk("mv643xx PHY busy timeout, port %d\n", |
2441 | eth_port_num); | 2428 | eth_port_num); |
@@ -2444,7 +2431,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num, | |||
2444 | udelay(PHY_WAIT_MICRO_SECONDS); | 2431 | udelay(PHY_WAIT_MICRO_SECONDS); |
2445 | } | 2432 | } |
2446 | 2433 | ||
2447 | mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | | 2434 | mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | |
2448 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); | 2435 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); |
2449 | out: | 2436 | out: |
2450 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2437 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |