diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-05-31 20:10:27 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-12 02:40:29 -0400 |
commit | cc9754b3334ea371c25041b03b2fdc08404f38b4 (patch) | |
tree | 68182e83afff74804a93aa5337939e467861737e /drivers/net/mv643xx_eth.c | |
parent | 7ca72a3b166fe5e7e7e2bf0fb9e48089cba6c25e (diff) |
mv643xx_eth: get rid of ETH_/ethernet_/eth_ prefixes
The fact that mv643xx_eth is an ethernet driver is pretty obvious,
and having a lot of internal-use-only functions and defines prefixed
with ETH_/ethernet_/eth_ prefixes is rather pointless. So, get rid
of most of those prefixes.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 399 |
1 files changed, 195 insertions, 204 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 307cfbeb2d47..624f80775506 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -167,31 +167,31 @@ static char mv643xx_driver_version[] = "1.0"; | |||
167 | #define FORCE_LINK_PASS (1 << 1) | 167 | #define FORCE_LINK_PASS (1 << 1) |
168 | #define SERIAL_PORT_ENABLE (1 << 0) | 168 | #define SERIAL_PORT_ENABLE (1 << 0) |
169 | 169 | ||
170 | #define PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800 | 170 | #define DEFAULT_RX_QUEUE_SIZE 400 |
171 | #define PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400 | 171 | #define DEFAULT_TX_QUEUE_SIZE 800 |
172 | 172 | ||
173 | /* SMI reg */ | 173 | /* SMI reg */ |
174 | #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ | 174 | #define SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ |
175 | #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ | 175 | #define SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ |
176 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ | 176 | #define SMI_OPCODE_WRITE 0 /* Completion of Read */ |
177 | #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ | 177 | #define SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ |
178 | 178 | ||
179 | /* typedefs */ | 179 | /* typedefs */ |
180 | 180 | ||
181 | typedef enum _eth_func_ret_status { | 181 | typedef enum _func_ret_status { |
182 | ETH_OK, /* Returned as expected. */ | 182 | ETH_OK, /* Returned as expected. */ |
183 | ETH_ERROR, /* Fundamental error. */ | 183 | ETH_ERROR, /* Fundamental error. */ |
184 | ETH_RETRY, /* Could not process request. Try later.*/ | 184 | ETH_RETRY, /* Could not process request. Try later.*/ |
185 | ETH_END_OF_JOB, /* Ring has nothing to process. */ | 185 | ETH_END_OF_JOB, /* Ring has nothing to process. */ |
186 | ETH_QUEUE_FULL, /* Ring resource error. */ | 186 | ETH_QUEUE_FULL, /* Ring resource error. */ |
187 | ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ | 187 | ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ |
188 | } ETH_FUNC_RET_STATUS; | 188 | } FUNC_RET_STATUS; |
189 | 189 | ||
190 | /* | 190 | /* |
191 | * RX/TX descriptors. | 191 | * RX/TX descriptors. |
192 | */ | 192 | */ |
193 | #if defined(__BIG_ENDIAN) | 193 | #if defined(__BIG_ENDIAN) |
194 | struct eth_rx_desc { | 194 | struct rx_desc { |
195 | u16 byte_cnt; /* Descriptor buffer byte count */ | 195 | u16 byte_cnt; /* Descriptor buffer byte count */ |
196 | u16 buf_size; /* Buffer size */ | 196 | u16 buf_size; /* Buffer size */ |
197 | u32 cmd_sts; /* Descriptor command status */ | 197 | u32 cmd_sts; /* Descriptor command status */ |
@@ -199,7 +199,7 @@ struct eth_rx_desc { | |||
199 | u32 buf_ptr; /* Descriptor buffer pointer */ | 199 | u32 buf_ptr; /* Descriptor buffer pointer */ |
200 | }; | 200 | }; |
201 | 201 | ||
202 | struct eth_tx_desc { | 202 | struct tx_desc { |
203 | u16 byte_cnt; /* buffer byte count */ | 203 | u16 byte_cnt; /* buffer byte count */ |
204 | u16 l4i_chk; /* CPU provided TCP checksum */ | 204 | u16 l4i_chk; /* CPU provided TCP checksum */ |
205 | u32 cmd_sts; /* Command/status field */ | 205 | u32 cmd_sts; /* Command/status field */ |
@@ -207,7 +207,7 @@ struct eth_tx_desc { | |||
207 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | 207 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ |
208 | }; | 208 | }; |
209 | #elif defined(__LITTLE_ENDIAN) | 209 | #elif defined(__LITTLE_ENDIAN) |
210 | struct eth_rx_desc { | 210 | struct rx_desc { |
211 | u32 cmd_sts; /* Descriptor command status */ | 211 | u32 cmd_sts; /* Descriptor command status */ |
212 | u16 buf_size; /* Buffer size */ | 212 | u16 buf_size; /* Buffer size */ |
213 | u16 byte_cnt; /* Descriptor buffer byte count */ | 213 | u16 byte_cnt; /* Descriptor buffer byte count */ |
@@ -215,7 +215,7 @@ struct eth_rx_desc { | |||
215 | u32 next_desc_ptr; /* Next descriptor pointer */ | 215 | u32 next_desc_ptr; /* Next descriptor pointer */ |
216 | }; | 216 | }; |
217 | 217 | ||
218 | struct eth_tx_desc { | 218 | struct tx_desc { |
219 | u32 cmd_sts; /* Command/status field */ | 219 | u32 cmd_sts; /* Command/status field */ |
220 | u16 l4i_chk; /* CPU provided TCP checksum */ | 220 | u16 l4i_chk; /* CPU provided TCP checksum */ |
221 | u16 byte_cnt; /* buffer byte count */ | 221 | u16 byte_cnt; /* buffer byte count */ |
@@ -227,28 +227,28 @@ struct eth_tx_desc { | |||
227 | #endif | 227 | #endif |
228 | 228 | ||
229 | /* RX & TX descriptor command */ | 229 | /* RX & TX descriptor command */ |
230 | #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 | 230 | #define BUFFER_OWNED_BY_DMA 0x80000000 |
231 | 231 | ||
232 | /* RX & TX descriptor status */ | 232 | /* RX & TX descriptor status */ |
233 | #define ETH_ERROR_SUMMARY 0x00000001 | 233 | #define ERROR_SUMMARY 0x00000001 |
234 | 234 | ||
235 | /* RX descriptor status */ | 235 | /* RX descriptor status */ |
236 | #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 | 236 | #define LAYER_4_CHECKSUM_OK 0x40000000 |
237 | #define ETH_RX_ENABLE_INTERRUPT 0x20000000 | 237 | #define RX_ENABLE_INTERRUPT 0x20000000 |
238 | #define ETH_RX_FIRST_DESC 0x08000000 | 238 | #define RX_FIRST_DESC 0x08000000 |
239 | #define ETH_RX_LAST_DESC 0x04000000 | 239 | #define RX_LAST_DESC 0x04000000 |
240 | 240 | ||
241 | /* TX descriptor command */ | 241 | /* TX descriptor command */ |
242 | #define ETH_TX_ENABLE_INTERRUPT 0x00800000 | 242 | #define TX_ENABLE_INTERRUPT 0x00800000 |
243 | #define ETH_GEN_CRC 0x00400000 | 243 | #define GEN_CRC 0x00400000 |
244 | #define ETH_TX_FIRST_DESC 0x00200000 | 244 | #define TX_FIRST_DESC 0x00200000 |
245 | #define ETH_TX_LAST_DESC 0x00100000 | 245 | #define TX_LAST_DESC 0x00100000 |
246 | #define ETH_ZERO_PADDING 0x00080000 | 246 | #define ZERO_PADDING 0x00080000 |
247 | #define ETH_GEN_IP_V4_CHECKSUM 0x00040000 | 247 | #define GEN_IP_V4_CHECKSUM 0x00040000 |
248 | #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 | 248 | #define GEN_TCP_UDP_CHECKSUM 0x00020000 |
249 | #define ETH_UDP_FRAME 0x00010000 | 249 | #define UDP_FRAME 0x00010000 |
250 | 250 | ||
251 | #define ETH_TX_IHL_SHIFT 11 | 251 | #define TX_IHL_SHIFT 11 |
252 | 252 | ||
253 | 253 | ||
254 | /* Unified struct for Rx and Tx operations. The user is not required to */ | 254 | /* Unified struct for Rx and Tx operations. The user is not required to */ |
@@ -264,7 +264,7 @@ struct pkt_info { | |||
264 | 264 | ||
265 | /* global *******************************************************************/ | 265 | /* global *******************************************************************/ |
266 | struct mv643xx_shared_private { | 266 | struct mv643xx_shared_private { |
267 | void __iomem *eth_base; | 267 | void __iomem *base; |
268 | 268 | ||
269 | /* used to protect SMI_REG, which is shared across ports */ | 269 | /* used to protect SMI_REG, which is shared across ports */ |
270 | spinlock_t phy_lock; | 270 | spinlock_t phy_lock; |
@@ -334,12 +334,12 @@ struct mv643xx_private { | |||
334 | u32 tx_clean_threshold; | 334 | u32 tx_clean_threshold; |
335 | #endif | 335 | #endif |
336 | 336 | ||
337 | struct eth_rx_desc *p_rx_desc_area; | 337 | struct rx_desc *p_rx_desc_area; |
338 | dma_addr_t rx_desc_dma; | 338 | dma_addr_t rx_desc_dma; |
339 | int rx_desc_area_size; | 339 | int rx_desc_area_size; |
340 | struct sk_buff **rx_skb; | 340 | struct sk_buff **rx_skb; |
341 | 341 | ||
342 | struct eth_tx_desc *p_tx_desc_area; | 342 | struct tx_desc *p_tx_desc_area; |
343 | dma_addr_t tx_desc_dma; | 343 | dma_addr_t tx_desc_dma; |
344 | int tx_desc_area_size; | 344 | int tx_desc_area_size; |
345 | struct sk_buff **tx_skb; | 345 | struct sk_buff **tx_skb; |
@@ -375,12 +375,12 @@ struct mv643xx_private { | |||
375 | /* port register accessors **************************************************/ | 375 | /* port register accessors **************************************************/ |
376 | static inline u32 rdl(struct mv643xx_private *mp, int offset) | 376 | static inline u32 rdl(struct mv643xx_private *mp, int offset) |
377 | { | 377 | { |
378 | return readl(mp->shared->eth_base + offset); | 378 | return readl(mp->shared->base + offset); |
379 | } | 379 | } |
380 | 380 | ||
381 | static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) | 381 | static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) |
382 | { | 382 | { |
383 | writel(data, mp->shared->eth_base + offset); | 383 | writel(data, mp->shared->base + offset); |
384 | } | 384 | } |
385 | 385 | ||
386 | 386 | ||
@@ -446,7 +446,7 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) | |||
446 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); | 446 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); |
447 | 447 | ||
448 | /* | 448 | /* |
449 | * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring. | 449 | * rx_return_buff - Returns a Rx buffer back to the Rx ring. |
450 | * | 450 | * |
451 | * DESCRIPTION: | 451 | * DESCRIPTION: |
452 | * This routine returns a Rx buffer back to the Rx ring. It retrieves the | 452 | * This routine returns a Rx buffer back to the Rx ring. It retrieves the |
@@ -465,11 +465,11 @@ static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); | |||
465 | * ETH_ERROR in case the routine can not access Rx desc ring. | 465 | * ETH_ERROR in case the routine can not access Rx desc ring. |
466 | * ETH_OK otherwise. | 466 | * ETH_OK otherwise. |
467 | */ | 467 | */ |
468 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | 468 | static FUNC_RET_STATUS rx_return_buff(struct mv643xx_private *mp, |
469 | struct pkt_info *p_pkt_info) | 469 | struct pkt_info *p_pkt_info) |
470 | { | 470 | { |
471 | int used_rx_desc; /* Where to return Rx resource */ | 471 | int used_rx_desc; /* Where to return Rx resource */ |
472 | volatile struct eth_rx_desc *p_used_rx_desc; | 472 | volatile struct rx_desc *p_used_rx_desc; |
473 | unsigned long flags; | 473 | unsigned long flags; |
474 | 474 | ||
475 | spin_lock_irqsave(&mp->lock, flags); | 475 | spin_lock_irqsave(&mp->lock, flags); |
@@ -486,8 +486,7 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | |||
486 | 486 | ||
487 | /* Return the descriptor to DMA ownership */ | 487 | /* Return the descriptor to DMA ownership */ |
488 | wmb(); | 488 | wmb(); |
489 | p_used_rx_desc->cmd_sts = | 489 | p_used_rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; |
490 | ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT; | ||
491 | wmb(); | 490 | wmb(); |
492 | 491 | ||
493 | /* Move the used descriptor pointer to the next descriptor */ | 492 | /* Move the used descriptor pointer to the next descriptor */ |
@@ -524,12 +523,12 @@ static void mv643xx_eth_rx_refill_descs(struct net_device *dev) | |||
524 | unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); | 523 | unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); |
525 | if (unaligned) | 524 | if (unaligned) |
526 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); | 525 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); |
527 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; | 526 | pkt_info.cmd_sts = RX_ENABLE_INTERRUPT; |
528 | pkt_info.byte_cnt = ETH_RX_SKB_SIZE; | 527 | pkt_info.byte_cnt = ETH_RX_SKB_SIZE; |
529 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, | 528 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, |
530 | ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); | 529 | ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); |
531 | pkt_info.return_info = skb; | 530 | pkt_info.return_info = skb; |
532 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { | 531 | if (rx_return_buff(mp, &pkt_info) != ETH_OK) { |
533 | printk(KERN_ERR | 532 | printk(KERN_ERR |
534 | "%s: Error allocating RX Ring\n", dev->name); | 533 | "%s: Error allocating RX Ring\n", dev->name); |
535 | break; | 534 | break; |
@@ -563,7 +562,7 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) | |||
563 | } | 562 | } |
564 | 563 | ||
565 | /* | 564 | /* |
566 | * eth_port_receive - Get received information from Rx ring. | 565 | * port_receive - Get received information from Rx ring. |
567 | * | 566 | * |
568 | * DESCRIPTION: | 567 | * DESCRIPTION: |
569 | * This routine returns the received data to the caller. There is no | 568 | * This routine returns the received data to the caller. There is no |
@@ -585,11 +584,11 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) | |||
585 | * ETH_END_OF_JOB if there is no received data. | 584 | * ETH_END_OF_JOB if there is no received data. |
586 | * ETH_OK otherwise. | 585 | * ETH_OK otherwise. |
587 | */ | 586 | */ |
588 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | 587 | static FUNC_RET_STATUS port_receive(struct mv643xx_private *mp, |
589 | struct pkt_info *p_pkt_info) | 588 | struct pkt_info *p_pkt_info) |
590 | { | 589 | { |
591 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; | 590 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; |
592 | volatile struct eth_rx_desc *p_rx_desc; | 591 | volatile struct rx_desc *p_rx_desc; |
593 | unsigned int command_status; | 592 | unsigned int command_status; |
594 | unsigned long flags; | 593 | unsigned long flags; |
595 | 594 | ||
@@ -610,7 +609,7 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | |||
610 | rmb(); | 609 | rmb(); |
611 | 610 | ||
612 | /* Nothing to receive... */ | 611 | /* Nothing to receive... */ |
613 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { | 612 | if (command_status & BUFFER_OWNED_BY_DMA) { |
614 | spin_unlock_irqrestore(&mp->lock, flags); | 613 | spin_unlock_irqrestore(&mp->lock, flags); |
615 | return ETH_END_OF_JOB; | 614 | return ETH_END_OF_JOB; |
616 | } | 615 | } |
@@ -659,7 +658,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
659 | struct sk_buff *skb; | 658 | struct sk_buff *skb; |
660 | struct pkt_info pkt_info; | 659 | struct pkt_info pkt_info; |
661 | 660 | ||
662 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { | 661 | while (budget-- > 0 && port_receive(mp, &pkt_info) == ETH_OK) { |
663 | dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE, | 662 | dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE, |
664 | DMA_FROM_DEVICE); | 663 | DMA_FROM_DEVICE); |
665 | mp->rx_desc_count--; | 664 | mp->rx_desc_count--; |
@@ -676,21 +675,20 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
676 | * In case received a packet without first / last bits on OR | 675 | * In case received a packet without first / last bits on OR |
677 | * the error summary bit is on, the packets needs to be dropeed. | 676 | * the error summary bit is on, the packets needs to be dropeed. |
678 | */ | 677 | */ |
679 | if (((pkt_info.cmd_sts | 678 | if (((pkt_info.cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != |
680 | & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) != | 679 | (RX_FIRST_DESC | RX_LAST_DESC)) |
681 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) | 680 | || (pkt_info.cmd_sts & ERROR_SUMMARY)) { |
682 | || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) { | ||
683 | stats->rx_dropped++; | 681 | stats->rx_dropped++; |
684 | if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC | | 682 | if ((pkt_info.cmd_sts & (RX_FIRST_DESC | |
685 | ETH_RX_LAST_DESC)) != | 683 | RX_LAST_DESC)) != |
686 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) { | 684 | (RX_FIRST_DESC | RX_LAST_DESC)) { |
687 | if (net_ratelimit()) | 685 | if (net_ratelimit()) |
688 | printk(KERN_ERR | 686 | printk(KERN_ERR |
689 | "%s: Received packet spread " | 687 | "%s: Received packet spread " |
690 | "on multiple descriptors\n", | 688 | "on multiple descriptors\n", |
691 | dev->name); | 689 | dev->name); |
692 | } | 690 | } |
693 | if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) | 691 | if (pkt_info.cmd_sts & ERROR_SUMMARY) |
694 | stats->rx_errors++; | 692 | stats->rx_errors++; |
695 | 693 | ||
696 | dev_kfree_skb_irq(skb); | 694 | dev_kfree_skb_irq(skb); |
@@ -701,7 +699,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
701 | */ | 699 | */ |
702 | skb_put(skb, pkt_info.byte_cnt - 4); | 700 | skb_put(skb, pkt_info.byte_cnt - 4); |
703 | 701 | ||
704 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { | 702 | if (pkt_info.cmd_sts & LAYER_4_CHECKSUM_OK) { |
705 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 703 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
706 | skb->csum = htons( | 704 | skb->csum = htons( |
707 | (pkt_info.cmd_sts & 0x0007fff8) >> 3); | 705 | (pkt_info.cmd_sts & 0x0007fff8) >> 3); |
@@ -779,9 +777,9 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | |||
779 | } | 777 | } |
780 | 778 | ||
781 | /** | 779 | /** |
782 | * eth_alloc_tx_desc_index - return the index of the next available tx desc | 780 | * alloc_tx_desc_index - return the index of the next available tx desc |
783 | */ | 781 | */ |
784 | static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | 782 | static int alloc_tx_desc_index(struct mv643xx_private *mp) |
785 | { | 783 | { |
786 | int tx_desc_curr; | 784 | int tx_desc_curr; |
787 | 785 | ||
@@ -796,30 +794,30 @@ static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | |||
796 | } | 794 | } |
797 | 795 | ||
798 | /** | 796 | /** |
799 | * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. | 797 | * tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. |
800 | * | 798 | * |
801 | * Ensure the data for each fragment to be transmitted is mapped properly, | 799 | * Ensure the data for each fragment to be transmitted is mapped properly, |
802 | * then fill in descriptors in the tx hw queue. | 800 | * then fill in descriptors in the tx hw queue. |
803 | */ | 801 | */ |
804 | static void eth_tx_fill_frag_descs(struct mv643xx_private *mp, | 802 | static void tx_fill_frag_descs(struct mv643xx_private *mp, |
805 | struct sk_buff *skb) | 803 | struct sk_buff *skb) |
806 | { | 804 | { |
807 | int frag; | 805 | int frag; |
808 | int tx_index; | 806 | int tx_index; |
809 | struct eth_tx_desc *desc; | 807 | struct tx_desc *desc; |
810 | 808 | ||
811 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 809 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
812 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | 810 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
813 | 811 | ||
814 | tx_index = eth_alloc_tx_desc_index(mp); | 812 | tx_index = alloc_tx_desc_index(mp); |
815 | desc = &mp->p_tx_desc_area[tx_index]; | 813 | desc = &mp->p_tx_desc_area[tx_index]; |
816 | 814 | ||
817 | desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA; | 815 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; |
818 | /* Last Frag enables interrupt and frees the skb */ | 816 | /* Last Frag enables interrupt and frees the skb */ |
819 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | 817 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { |
820 | desc->cmd_sts |= ETH_ZERO_PADDING | | 818 | desc->cmd_sts |= ZERO_PADDING | |
821 | ETH_TX_LAST_DESC | | 819 | TX_LAST_DESC | |
822 | ETH_TX_ENABLE_INTERRUPT; | 820 | TX_ENABLE_INTERRUPT; |
823 | mp->tx_skb[tx_index] = skb; | 821 | mp->tx_skb[tx_index] = skb; |
824 | } else | 822 | } else |
825 | mp->tx_skb[tx_index] = NULL; | 823 | mp->tx_skb[tx_index] = NULL; |
@@ -840,34 +838,32 @@ static inline __be16 sum16_as_be(__sum16 sum) | |||
840 | } | 838 | } |
841 | 839 | ||
842 | /** | 840 | /** |
843 | * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw | 841 | * tx_submit_descs_for_skb - submit data from an skb to the tx hw |
844 | * | 842 | * |
845 | * Ensure the data for an skb to be transmitted is mapped properly, | 843 | * Ensure the data for an skb to be transmitted is mapped properly, |
846 | * then fill in descriptors in the tx hw queue and start the hardware. | 844 | * then fill in descriptors in the tx hw queue and start the hardware. |
847 | */ | 845 | */ |
848 | static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | 846 | static void tx_submit_descs_for_skb(struct mv643xx_private *mp, |
849 | struct sk_buff *skb) | 847 | struct sk_buff *skb) |
850 | { | 848 | { |
851 | int tx_index; | 849 | int tx_index; |
852 | struct eth_tx_desc *desc; | 850 | struct tx_desc *desc; |
853 | u32 cmd_sts; | 851 | u32 cmd_sts; |
854 | int length; | 852 | int length; |
855 | int nr_frags = skb_shinfo(skb)->nr_frags; | 853 | int nr_frags = skb_shinfo(skb)->nr_frags; |
856 | 854 | ||
857 | cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; | 855 | cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; |
858 | 856 | ||
859 | tx_index = eth_alloc_tx_desc_index(mp); | 857 | tx_index = alloc_tx_desc_index(mp); |
860 | desc = &mp->p_tx_desc_area[tx_index]; | 858 | desc = &mp->p_tx_desc_area[tx_index]; |
861 | 859 | ||
862 | if (nr_frags) { | 860 | if (nr_frags) { |
863 | eth_tx_fill_frag_descs(mp, skb); | 861 | tx_fill_frag_descs(mp, skb); |
864 | 862 | ||
865 | length = skb_headlen(skb); | 863 | length = skb_headlen(skb); |
866 | mp->tx_skb[tx_index] = NULL; | 864 | mp->tx_skb[tx_index] = NULL; |
867 | } else { | 865 | } else { |
868 | cmd_sts |= ETH_ZERO_PADDING | | 866 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; |
869 | ETH_TX_LAST_DESC | | ||
870 | ETH_TX_ENABLE_INTERRUPT; | ||
871 | length = skb->len; | 867 | length = skb->len; |
872 | mp->tx_skb[tx_index] = skb; | 868 | mp->tx_skb[tx_index] = skb; |
873 | } | 869 | } |
@@ -878,13 +874,13 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | |||
878 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 874 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
879 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 875 | BUG_ON(skb->protocol != htons(ETH_P_IP)); |
880 | 876 | ||
881 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | | 877 | cmd_sts |= GEN_TCP_UDP_CHECKSUM | |
882 | ETH_GEN_IP_V4_CHECKSUM | | 878 | GEN_IP_V4_CHECKSUM | |
883 | ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT; | 879 | ip_hdr(skb)->ihl << TX_IHL_SHIFT; |
884 | 880 | ||
885 | switch (ip_hdr(skb)->protocol) { | 881 | switch (ip_hdr(skb)->protocol) { |
886 | case IPPROTO_UDP: | 882 | case IPPROTO_UDP: |
887 | cmd_sts |= ETH_UDP_FRAME; | 883 | cmd_sts |= UDP_FRAME; |
888 | desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); | 884 | desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); |
889 | break; | 885 | break; |
890 | case IPPROTO_TCP: | 886 | case IPPROTO_TCP: |
@@ -895,7 +891,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | |||
895 | } | 891 | } |
896 | } else { | 892 | } else { |
897 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | 893 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ |
898 | cmd_sts |= 5 << ETH_TX_IHL_SHIFT; | 894 | cmd_sts |= 5 << TX_IHL_SHIFT; |
899 | desc->l4i_chk = 0; | 895 | desc->l4i_chk = 0; |
900 | } | 896 | } |
901 | 897 | ||
@@ -938,7 +934,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
938 | return NETDEV_TX_BUSY; | 934 | return NETDEV_TX_BUSY; |
939 | } | 935 | } |
940 | 936 | ||
941 | eth_tx_submit_descs_for_skb(mp, skb); | 937 | tx_submit_descs_for_skb(mp, skb); |
942 | stats->tx_bytes += skb->len; | 938 | stats->tx_bytes += skb->len; |
943 | stats->tx_packets++; | 939 | stats->tx_packets++; |
944 | dev->trans_start = jiffies; | 940 | dev->trans_start = jiffies; |
@@ -953,10 +949,10 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
953 | 949 | ||
954 | 950 | ||
955 | /* mii management interface *************************************************/ | 951 | /* mii management interface *************************************************/ |
956 | static int ethernet_phy_get(struct mv643xx_private *mp); | 952 | static int phy_addr_get(struct mv643xx_private *mp); |
957 | 953 | ||
958 | /* | 954 | /* |
959 | * eth_port_read_smi_reg - Read PHY registers | 955 | * read_smi_reg - Read PHY registers |
960 | * | 956 | * |
961 | * DESCRIPTION: | 957 | * DESCRIPTION: |
962 | * This routine utilize the SMI interface to interact with the PHY in | 958 | * This routine utilize the SMI interface to interact with the PHY in |
@@ -975,11 +971,11 @@ static int ethernet_phy_get(struct mv643xx_private *mp); | |||
975 | * true otherwise. | 971 | * true otherwise. |
976 | * | 972 | * |
977 | */ | 973 | */ |
978 | static void eth_port_read_smi_reg(struct mv643xx_private *mp, | 974 | static void read_smi_reg(struct mv643xx_private *mp, |
979 | unsigned int phy_reg, unsigned int *value) | 975 | unsigned int phy_reg, unsigned int *value) |
980 | { | 976 | { |
981 | void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; | 977 | void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; |
982 | int phy_addr = ethernet_phy_get(mp); | 978 | int phy_addr = phy_addr_get(mp); |
983 | unsigned long flags; | 979 | unsigned long flags; |
984 | int i; | 980 | int i; |
985 | 981 | ||
@@ -987,7 +983,7 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, | |||
987 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); | 983 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); |
988 | 984 | ||
989 | /* wait for the SMI register to become available */ | 985 | /* wait for the SMI register to become available */ |
990 | for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { | 986 | for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { |
991 | if (i == 1000) { | 987 | if (i == 1000) { |
992 | printk("%s: PHY busy timeout\n", mp->dev->name); | 988 | printk("%s: PHY busy timeout\n", mp->dev->name); |
993 | goto out; | 989 | goto out; |
@@ -995,11 +991,10 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, | |||
995 | udelay(10); | 991 | udelay(10); |
996 | } | 992 | } |
997 | 993 | ||
998 | writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ, | 994 | writel((phy_addr << 16) | (phy_reg << 21) | SMI_OPCODE_READ, smi_reg); |
999 | smi_reg); | ||
1000 | 995 | ||
1001 | /* now wait for the data to be valid */ | 996 | /* now wait for the data to be valid */ |
1002 | for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) { | 997 | for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) { |
1003 | if (i == 1000) { | 998 | if (i == 1000) { |
1004 | printk("%s: PHY read timeout\n", mp->dev->name); | 999 | printk("%s: PHY read timeout\n", mp->dev->name); |
1005 | goto out; | 1000 | goto out; |
@@ -1013,7 +1008,7 @@ out: | |||
1013 | } | 1008 | } |
1014 | 1009 | ||
1015 | /* | 1010 | /* |
1016 | * eth_port_write_smi_reg - Write to PHY registers | 1011 | * write_smi_reg - Write to PHY registers |
1017 | * | 1012 | * |
1018 | * DESCRIPTION: | 1013 | * DESCRIPTION: |
1019 | * This routine utilize the SMI interface to interact with the PHY in | 1014 | * This routine utilize the SMI interface to interact with the PHY in |
@@ -1032,11 +1027,11 @@ out: | |||
1032 | * true otherwise. | 1027 | * true otherwise. |
1033 | * | 1028 | * |
1034 | */ | 1029 | */ |
1035 | static void eth_port_write_smi_reg(struct mv643xx_private *mp, | 1030 | static void write_smi_reg(struct mv643xx_private *mp, |
1036 | unsigned int phy_reg, unsigned int value) | 1031 | unsigned int phy_reg, unsigned int value) |
1037 | { | 1032 | { |
1038 | void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; | 1033 | void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; |
1039 | int phy_addr = ethernet_phy_get(mp); | 1034 | int phy_addr = phy_addr_get(mp); |
1040 | unsigned long flags; | 1035 | unsigned long flags; |
1041 | int i; | 1036 | int i; |
1042 | 1037 | ||
@@ -1044,7 +1039,7 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp, | |||
1044 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); | 1039 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); |
1045 | 1040 | ||
1046 | /* wait for the SMI register to become available */ | 1041 | /* wait for the SMI register to become available */ |
1047 | for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { | 1042 | for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { |
1048 | if (i == 1000) { | 1043 | if (i == 1000) { |
1049 | printk("%s: PHY busy timeout\n", mp->dev->name); | 1044 | printk("%s: PHY busy timeout\n", mp->dev->name); |
1050 | goto out; | 1045 | goto out; |
@@ -1053,7 +1048,7 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp, | |||
1053 | } | 1048 | } |
1054 | 1049 | ||
1055 | writel((phy_addr << 16) | (phy_reg << 21) | | 1050 | writel((phy_addr << 16) | (phy_reg << 21) | |
1056 | ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); | 1051 | SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); |
1057 | out: | 1052 | out: |
1058 | spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); | 1053 | spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); |
1059 | } | 1054 | } |
@@ -1061,7 +1056,7 @@ out: | |||
1061 | 1056 | ||
1062 | /* mib counters *************************************************************/ | 1057 | /* mib counters *************************************************************/ |
1063 | /* | 1058 | /* |
1064 | * eth_clear_mib_counters - Clear all MIB counters | 1059 | * clear_mib_counters - Clear all MIB counters |
1065 | * | 1060 | * |
1066 | * DESCRIPTION: | 1061 | * DESCRIPTION: |
1067 | * This function clears all MIB counters of a specific ethernet port. | 1062 | * This function clears all MIB counters of a specific ethernet port. |
@@ -1077,7 +1072,7 @@ out: | |||
1077 | * MIB counter value. | 1072 | * MIB counter value. |
1078 | * | 1073 | * |
1079 | */ | 1074 | */ |
1080 | static void eth_clear_mib_counters(struct mv643xx_private *mp) | 1075 | static void clear_mib_counters(struct mv643xx_private *mp) |
1081 | { | 1076 | { |
1082 | unsigned int port_num = mp->port_num; | 1077 | unsigned int port_num = mp->port_num; |
1083 | int i; | 1078 | int i; |
@@ -1092,7 +1087,7 @@ static inline u32 read_mib(struct mv643xx_private *mp, int offset) | |||
1092 | return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); | 1087 | return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); |
1093 | } | 1088 | } |
1094 | 1089 | ||
1095 | static void eth_update_mib_counters(struct mv643xx_private *mp) | 1090 | static void update_mib_counters(struct mv643xx_private *mp) |
1096 | { | 1091 | { |
1097 | struct mv643xx_mib_counters *p = &mp->mib_counters; | 1092 | struct mv643xx_mib_counters *p = &mp->mib_counters; |
1098 | 1093 | ||
@@ -1258,7 +1253,7 @@ static void mv643xx_get_ethtool_stats(struct net_device *netdev, | |||
1258 | struct mv643xx_private *mp = netdev->priv; | 1253 | struct mv643xx_private *mp = netdev->priv; |
1259 | int i; | 1254 | int i; |
1260 | 1255 | ||
1261 | eth_update_mib_counters(mp); | 1256 | update_mib_counters(mp); |
1262 | 1257 | ||
1263 | for (i = 0; i < MV643XX_STATS_LEN; i++) { | 1258 | for (i = 0; i < MV643XX_STATS_LEN; i++) { |
1264 | char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset; | 1259 | char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset; |
@@ -1292,10 +1287,9 @@ static const struct ethtool_ops mv643xx_ethtool_ops = { | |||
1292 | 1287 | ||
1293 | /* address handling *********************************************************/ | 1288 | /* address handling *********************************************************/ |
1294 | /* | 1289 | /* |
1295 | * eth_port_uc_addr_get - Read the MAC address from the port's hw registers | 1290 | * uc_addr_get - Read the MAC address from the port's hw registers |
1296 | */ | 1291 | */ |
1297 | static void eth_port_uc_addr_get(struct mv643xx_private *mp, | 1292 | static void uc_addr_get(struct mv643xx_private *mp, unsigned char *p_addr) |
1298 | unsigned char *p_addr) | ||
1299 | { | 1293 | { |
1300 | unsigned int port_num = mp->port_num; | 1294 | unsigned int port_num = mp->port_num; |
1301 | unsigned int mac_h; | 1295 | unsigned int mac_h; |
@@ -1313,7 +1307,7 @@ static void eth_port_uc_addr_get(struct mv643xx_private *mp, | |||
1313 | } | 1307 | } |
1314 | 1308 | ||
1315 | /* | 1309 | /* |
1316 | * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables | 1310 | * init_mac_tables - Clear all entrance in the UC, SMC and OMC tables |
1317 | * | 1311 | * |
1318 | * DESCRIPTION: | 1312 | * DESCRIPTION: |
1319 | * Go through all the DA filter tables (Unicast, Special Multicast & | 1313 | * Go through all the DA filter tables (Unicast, Special Multicast & |
@@ -1328,7 +1322,7 @@ static void eth_port_uc_addr_get(struct mv643xx_private *mp, | |||
1328 | * RETURN: | 1322 | * RETURN: |
1329 | * None. | 1323 | * None. |
1330 | */ | 1324 | */ |
1331 | static void eth_port_init_mac_tables(struct mv643xx_private *mp) | 1325 | static void init_mac_tables(struct mv643xx_private *mp) |
1332 | { | 1326 | { |
1333 | unsigned int port_num = mp->port_num; | 1327 | unsigned int port_num = mp->port_num; |
1334 | int table_index; | 1328 | int table_index; |
@@ -1354,7 +1348,7 @@ static void eth_port_init_mac_tables(struct mv643xx_private *mp) | |||
1354 | * 3-1 Queue (ETH_Q0=0) | 1348 | * 3-1 Queue (ETH_Q0=0) |
1355 | * 7-4 Reserved = 0; | 1349 | * 7-4 Reserved = 0; |
1356 | */ | 1350 | */ |
1357 | static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, | 1351 | static void set_filter_table_entry(struct mv643xx_private *mp, |
1358 | int table, unsigned char entry) | 1352 | int table, unsigned char entry) |
1359 | { | 1353 | { |
1360 | unsigned int table_reg; | 1354 | unsigned int table_reg; |
@@ -1371,10 +1365,9 @@ static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, | |||
1371 | } | 1365 | } |
1372 | 1366 | ||
1373 | /* | 1367 | /* |
1374 | * eth_port_uc_addr_set - Write a MAC address into the port's hw registers | 1368 | * uc_addr_set - Write a MAC address into the port's hw registers |
1375 | */ | 1369 | */ |
1376 | static void eth_port_uc_addr_set(struct mv643xx_private *mp, | 1370 | static void uc_addr_set(struct mv643xx_private *mp, unsigned char *p_addr) |
1377 | unsigned char *p_addr) | ||
1378 | { | 1371 | { |
1379 | unsigned int port_num = mp->port_num; | 1372 | unsigned int port_num = mp->port_num; |
1380 | unsigned int mac_h; | 1373 | unsigned int mac_h; |
@@ -1390,7 +1383,7 @@ static void eth_port_uc_addr_set(struct mv643xx_private *mp, | |||
1390 | 1383 | ||
1391 | /* Accept frames with this address */ | 1384 | /* Accept frames with this address */ |
1392 | table = UNICAST_TABLE(port_num); | 1385 | table = UNICAST_TABLE(port_num); |
1393 | eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f); | 1386 | set_filter_table_entry(mp, table, p_addr[5] & 0x0f); |
1394 | } | 1387 | } |
1395 | 1388 | ||
1396 | /* | 1389 | /* |
@@ -1405,8 +1398,8 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev) | |||
1405 | { | 1398 | { |
1406 | struct mv643xx_private *mp = netdev_priv(dev); | 1399 | struct mv643xx_private *mp = netdev_priv(dev); |
1407 | 1400 | ||
1408 | eth_port_init_mac_tables(mp); | 1401 | init_mac_tables(mp); |
1409 | eth_port_uc_addr_set(mp, dev->dev_addr); | 1402 | uc_addr_set(mp, dev->dev_addr); |
1410 | } | 1403 | } |
1411 | 1404 | ||
1412 | /* | 1405 | /* |
@@ -1432,7 +1425,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) | |||
1432 | } | 1425 | } |
1433 | 1426 | ||
1434 | /* | 1427 | /* |
1435 | * eth_port_mc_addr - Multicast address settings. | 1428 | * mc_addr - Multicast address settings. |
1436 | * | 1429 | * |
1437 | * The MV device supports multicast using two tables: | 1430 | * The MV device supports multicast using two tables: |
1438 | * 1) Special Multicast Table for MAC addresses of the form | 1431 | * 1) Special Multicast Table for MAC addresses of the form |
@@ -1442,10 +1435,10 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) | |||
1442 | * 2) Other Multicast Table for multicast of another type. A CRC-8bit | 1435 | * 2) Other Multicast Table for multicast of another type. A CRC-8bit |
1443 | * is used as an index to the Other Multicast Table entries in the | 1436 | * is used as an index to the Other Multicast Table entries in the |
1444 | * DA-Filter table. This function calculates the CRC-8bit value. | 1437 | * DA-Filter table. This function calculates the CRC-8bit value. |
1445 | * In either case, eth_port_set_filter_table_entry() is then called | 1438 | * In either case, set_filter_table_entry() is then called |
1446 | * to set to set the actual table entry. | 1439 | * to set to set the actual table entry. |
1447 | */ | 1440 | */ |
1448 | static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr) | 1441 | static void mc_addr(struct mv643xx_private *mp, unsigned char *p_addr) |
1449 | { | 1442 | { |
1450 | unsigned int port_num = mp->port_num; | 1443 | unsigned int port_num = mp->port_num; |
1451 | unsigned int mac_h; | 1444 | unsigned int mac_h; |
@@ -1459,7 +1452,7 @@ static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr) | |||
1459 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && | 1452 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && |
1460 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { | 1453 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { |
1461 | table = SPECIAL_MCAST_TABLE(port_num); | 1454 | table = SPECIAL_MCAST_TABLE(port_num); |
1462 | eth_port_set_filter_table_entry(mp, table, p_addr[5]); | 1455 | set_filter_table_entry(mp, table, p_addr[5]); |
1463 | return; | 1456 | return; |
1464 | } | 1457 | } |
1465 | 1458 | ||
@@ -1532,20 +1525,20 @@ static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr) | |||
1532 | crc_result = crc_result | (crc[i] << i); | 1525 | crc_result = crc_result | (crc[i] << i); |
1533 | 1526 | ||
1534 | table = OTHER_MCAST_TABLE(port_num); | 1527 | table = OTHER_MCAST_TABLE(port_num); |
1535 | eth_port_set_filter_table_entry(mp, table, crc_result); | 1528 | set_filter_table_entry(mp, table, crc_result); |
1536 | } | 1529 | } |
1537 | 1530 | ||
1538 | /* | 1531 | /* |
1539 | * Set the entire multicast list based on dev->mc_list. | 1532 | * Set the entire multicast list based on dev->mc_list. |
1540 | */ | 1533 | */ |
1541 | static void eth_port_set_multicast_list(struct net_device *dev) | 1534 | static void set_multicast_list(struct net_device *dev) |
1542 | { | 1535 | { |
1543 | 1536 | ||
1544 | struct dev_mc_list *mc_list; | 1537 | struct dev_mc_list *mc_list; |
1545 | int i; | 1538 | int i; |
1546 | int table_index; | 1539 | int table_index; |
1547 | struct mv643xx_private *mp = netdev_priv(dev); | 1540 | struct mv643xx_private *mp = netdev_priv(dev); |
1548 | unsigned int eth_port_num = mp->port_num; | 1541 | unsigned int port_num = mp->port_num; |
1549 | 1542 | ||
1550 | /* If the device is in promiscuous mode or in all multicast mode, | 1543 | /* If the device is in promiscuous mode or in all multicast mode, |
1551 | * we will fully populate both multicast tables with accept. | 1544 | * we will fully populate both multicast tables with accept. |
@@ -1561,7 +1554,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
1561 | * 3-1 Queue ETH_Q0=0 | 1554 | * 3-1 Queue ETH_Q0=0 |
1562 | * 7-4 Reserved = 0; | 1555 | * 7-4 Reserved = 0; |
1563 | */ | 1556 | */ |
1564 | wrl(mp, SPECIAL_MCAST_TABLE(eth_port_num) + table_index, 0x01010101); | 1557 | wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0x01010101); |
1565 | 1558 | ||
1566 | /* Set all entries in DA filter other multicast | 1559 | /* Set all entries in DA filter other multicast |
1567 | * table (Ex_dFOMT) | 1560 | * table (Ex_dFOMT) |
@@ -1571,7 +1564,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
1571 | * 3-1 Queue ETH_Q0=0 | 1564 | * 3-1 Queue ETH_Q0=0 |
1572 | * 7-4 Reserved = 0; | 1565 | * 7-4 Reserved = 0; |
1573 | */ | 1566 | */ |
1574 | wrl(mp, OTHER_MCAST_TABLE(eth_port_num) + table_index, 0x01010101); | 1567 | wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0x01010101); |
1575 | } | 1568 | } |
1576 | return; | 1569 | return; |
1577 | } | 1570 | } |
@@ -1581,10 +1574,10 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
1581 | */ | 1574 | */ |
1582 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 1575 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
1583 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 1576 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
1584 | wrl(mp, SPECIAL_MCAST_TABLE(eth_port_num) + table_index, 0); | 1577 | wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0); |
1585 | 1578 | ||
1586 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 1579 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
1587 | wrl(mp, OTHER_MCAST_TABLE(eth_port_num) + table_index, 0); | 1580 | wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0); |
1588 | } | 1581 | } |
1589 | 1582 | ||
1590 | /* Get pointer to net_device multicast list and add each one... */ | 1583 | /* Get pointer to net_device multicast list and add each one... */ |
@@ -1592,7 +1585,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
1592 | (i < 256) && (mc_list != NULL) && (i < dev->mc_count); | 1585 | (i < 256) && (mc_list != NULL) && (i < dev->mc_count); |
1593 | i++, mc_list = mc_list->next) | 1586 | i++, mc_list = mc_list->next) |
1594 | if (mc_list->dmi_addrlen == 6) | 1587 | if (mc_list->dmi_addrlen == 6) |
1595 | eth_port_mc_addr(mp, mc_list->dmi_addr); | 1588 | mc_addr(mp, mc_list->dmi_addr); |
1596 | } | 1589 | } |
1597 | 1590 | ||
1598 | /* | 1591 | /* |
@@ -1615,7 +1608,7 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) | |||
1615 | config_reg &= ~UNICAST_PROMISCUOUS_MODE; | 1608 | config_reg &= ~UNICAST_PROMISCUOUS_MODE; |
1616 | wrl(mp, PORT_CONFIG(mp->port_num), config_reg); | 1609 | wrl(mp, PORT_CONFIG(mp->port_num), config_reg); |
1617 | 1610 | ||
1618 | eth_port_set_multicast_list(dev); | 1611 | set_multicast_list(dev); |
1619 | } | 1612 | } |
1620 | 1613 | ||
1621 | 1614 | ||
@@ -1644,22 +1637,22 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) | |||
1644 | */ | 1637 | */ |
1645 | static void ether_init_rx_desc_ring(struct mv643xx_private *mp) | 1638 | static void ether_init_rx_desc_ring(struct mv643xx_private *mp) |
1646 | { | 1639 | { |
1647 | volatile struct eth_rx_desc *p_rx_desc; | 1640 | volatile struct rx_desc *p_rx_desc; |
1648 | int rx_desc_num = mp->rx_ring_size; | 1641 | int rx_desc_num = mp->rx_ring_size; |
1649 | int i; | 1642 | int i; |
1650 | 1643 | ||
1651 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ | 1644 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ |
1652 | p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area; | 1645 | p_rx_desc = (struct rx_desc *)mp->p_rx_desc_area; |
1653 | for (i = 0; i < rx_desc_num; i++) { | 1646 | for (i = 0; i < rx_desc_num; i++) { |
1654 | p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + | 1647 | p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + |
1655 | ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc); | 1648 | ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); |
1656 | } | 1649 | } |
1657 | 1650 | ||
1658 | /* Save Rx desc pointer to driver struct. */ | 1651 | /* Save Rx desc pointer to driver struct. */ |
1659 | mp->rx_curr_desc_q = 0; | 1652 | mp->rx_curr_desc_q = 0; |
1660 | mp->rx_used_desc_q = 0; | 1653 | mp->rx_used_desc_q = 0; |
1661 | 1654 | ||
1662 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); | 1655 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); |
1663 | } | 1656 | } |
1664 | 1657 | ||
1665 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) | 1658 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) |
@@ -1716,20 +1709,20 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev) | |||
1716 | static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | 1709 | static void ether_init_tx_desc_ring(struct mv643xx_private *mp) |
1717 | { | 1710 | { |
1718 | int tx_desc_num = mp->tx_ring_size; | 1711 | int tx_desc_num = mp->tx_ring_size; |
1719 | struct eth_tx_desc *p_tx_desc; | 1712 | struct tx_desc *p_tx_desc; |
1720 | int i; | 1713 | int i; |
1721 | 1714 | ||
1722 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | 1715 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ |
1723 | p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area; | 1716 | p_tx_desc = (struct tx_desc *)mp->p_tx_desc_area; |
1724 | for (i = 0; i < tx_desc_num; i++) { | 1717 | for (i = 0; i < tx_desc_num; i++) { |
1725 | p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma + | 1718 | p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma + |
1726 | ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc); | 1719 | ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); |
1727 | } | 1720 | } |
1728 | 1721 | ||
1729 | mp->tx_curr_desc_q = 0; | 1722 | mp->tx_curr_desc_q = 0; |
1730 | mp->tx_used_desc_q = 0; | 1723 | mp->tx_used_desc_q = 0; |
1731 | 1724 | ||
1732 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | 1725 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); |
1733 | } | 1726 | } |
1734 | 1727 | ||
1735 | /** | 1728 | /** |
@@ -1740,7 +1733,7 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | |||
1740 | static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) | 1733 | static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) |
1741 | { | 1734 | { |
1742 | struct mv643xx_private *mp = netdev_priv(dev); | 1735 | struct mv643xx_private *mp = netdev_priv(dev); |
1743 | struct eth_tx_desc *desc; | 1736 | struct tx_desc *desc; |
1744 | u32 cmd_sts; | 1737 | u32 cmd_sts; |
1745 | struct sk_buff *skb; | 1738 | struct sk_buff *skb; |
1746 | unsigned long flags; | 1739 | unsigned long flags; |
@@ -1762,7 +1755,7 @@ static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) | |||
1762 | desc = &mp->p_tx_desc_area[tx_index]; | 1755 | desc = &mp->p_tx_desc_area[tx_index]; |
1763 | cmd_sts = desc->cmd_sts; | 1756 | cmd_sts = desc->cmd_sts; |
1764 | 1757 | ||
1765 | if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) { | 1758 | if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) { |
1766 | spin_unlock_irqrestore(&mp->lock, flags); | 1759 | spin_unlock_irqrestore(&mp->lock, flags); |
1767 | return released; | 1760 | return released; |
1768 | } | 1761 | } |
@@ -1776,14 +1769,14 @@ static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) | |||
1776 | if (skb) | 1769 | if (skb) |
1777 | mp->tx_skb[tx_index] = NULL; | 1770 | mp->tx_skb[tx_index] = NULL; |
1778 | 1771 | ||
1779 | if (cmd_sts & ETH_ERROR_SUMMARY) { | 1772 | if (cmd_sts & ERROR_SUMMARY) { |
1780 | printk("%s: Error in TX\n", dev->name); | 1773 | printk("%s: Error in TX\n", dev->name); |
1781 | dev->stats.tx_errors++; | 1774 | dev->stats.tx_errors++; |
1782 | } | 1775 | } |
1783 | 1776 | ||
1784 | spin_unlock_irqrestore(&mp->lock, flags); | 1777 | spin_unlock_irqrestore(&mp->lock, flags); |
1785 | 1778 | ||
1786 | if (cmd_sts & ETH_TX_FIRST_DESC) | 1779 | if (cmd_sts & TX_FIRST_DESC) |
1787 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); | 1780 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); |
1788 | else | 1781 | else |
1789 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); | 1782 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); |
@@ -1833,7 +1826,7 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev) | |||
1833 | 1826 | ||
1834 | 1827 | ||
1835 | /* netdev ops and related ***************************************************/ | 1828 | /* netdev ops and related ***************************************************/ |
1836 | static void eth_port_reset(struct mv643xx_private *mp); | 1829 | static void port_reset(struct mv643xx_private *mp); |
1837 | 1830 | ||
1838 | /* Set the mv643xx port configuration register for the speed/duplex mode. */ | 1831 | /* Set the mv643xx port configuration register for the speed/duplex mode. */ |
1839 | static void mv643xx_eth_update_pscr(struct net_device *dev, | 1832 | static void mv643xx_eth_update_pscr(struct net_device *dev, |
@@ -1896,19 +1889,19 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1896 | { | 1889 | { |
1897 | struct net_device *dev = (struct net_device *)dev_id; | 1890 | struct net_device *dev = (struct net_device *)dev_id; |
1898 | struct mv643xx_private *mp = netdev_priv(dev); | 1891 | struct mv643xx_private *mp = netdev_priv(dev); |
1899 | u32 eth_int_cause, eth_int_cause_ext = 0; | 1892 | u32 int_cause, int_cause_ext = 0; |
1900 | unsigned int port_num = mp->port_num; | 1893 | unsigned int port_num = mp->port_num; |
1901 | 1894 | ||
1902 | /* Read interrupt cause registers */ | 1895 | /* Read interrupt cause registers */ |
1903 | eth_int_cause = rdl(mp, INT_CAUSE(port_num)) & (INT_RX | INT_EXT); | 1896 | int_cause = rdl(mp, INT_CAUSE(port_num)) & (INT_RX | INT_EXT); |
1904 | if (eth_int_cause & INT_EXT) { | 1897 | if (int_cause & INT_EXT) { |
1905 | eth_int_cause_ext = rdl(mp, INT_CAUSE_EXT(port_num)) | 1898 | int_cause_ext = rdl(mp, INT_CAUSE_EXT(port_num)) |
1906 | & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); | 1899 | & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); |
1907 | wrl(mp, INT_CAUSE_EXT(port_num), ~eth_int_cause_ext); | 1900 | wrl(mp, INT_CAUSE_EXT(port_num), ~int_cause_ext); |
1908 | } | 1901 | } |
1909 | 1902 | ||
1910 | /* PHY status changed */ | 1903 | /* PHY status changed */ |
1911 | if (eth_int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) { | 1904 | if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) { |
1912 | struct ethtool_cmd cmd; | 1905 | struct ethtool_cmd cmd; |
1913 | 1906 | ||
1914 | if (mii_link_ok(&mp->mii)) { | 1907 | if (mii_link_ok(&mp->mii)) { |
@@ -1928,7 +1921,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1928 | } | 1921 | } |
1929 | 1922 | ||
1930 | #ifdef MV643XX_NAPI | 1923 | #ifdef MV643XX_NAPI |
1931 | if (eth_int_cause & INT_RX) { | 1924 | if (int_cause & INT_RX) { |
1932 | /* schedule the NAPI poll routine to maintain port */ | 1925 | /* schedule the NAPI poll routine to maintain port */ |
1933 | wrl(mp, INT_MASK(port_num), 0x00000000); | 1926 | wrl(mp, INT_MASK(port_num), 0x00000000); |
1934 | 1927 | ||
@@ -1938,24 +1931,24 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1938 | netif_rx_schedule(dev, &mp->napi); | 1931 | netif_rx_schedule(dev, &mp->napi); |
1939 | } | 1932 | } |
1940 | #else | 1933 | #else |
1941 | if (eth_int_cause & INT_RX) | 1934 | if (int_cause & INT_RX) |
1942 | mv643xx_eth_receive_queue(dev, INT_MAX); | 1935 | mv643xx_eth_receive_queue(dev, INT_MAX); |
1943 | #endif | 1936 | #endif |
1944 | if (eth_int_cause_ext & INT_EXT_TX) | 1937 | if (int_cause_ext & INT_EXT_TX) |
1945 | mv643xx_eth_free_completed_tx_descs(dev); | 1938 | mv643xx_eth_free_completed_tx_descs(dev); |
1946 | 1939 | ||
1947 | /* | 1940 | /* |
1948 | * If no real interrupt occured, exit. | 1941 | * If no real interrupt occured, exit. |
1949 | * This can happen when using gigE interrupt coalescing mechanism. | 1942 | * This can happen when using gigE interrupt coalescing mechanism. |
1950 | */ | 1943 | */ |
1951 | if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0)) | 1944 | if ((int_cause == 0x0) && (int_cause_ext == 0x0)) |
1952 | return IRQ_NONE; | 1945 | return IRQ_NONE; |
1953 | 1946 | ||
1954 | return IRQ_HANDLED; | 1947 | return IRQ_HANDLED; |
1955 | } | 1948 | } |
1956 | 1949 | ||
1957 | /* | 1950 | /* |
1958 | * ethernet_phy_reset - Reset Ethernet port PHY. | 1951 | * phy_reset - Reset Ethernet port PHY. |
1959 | * | 1952 | * |
1960 | * DESCRIPTION: | 1953 | * DESCRIPTION: |
1961 | * This routine utilizes the SMI interface to reset the ethernet port PHY. | 1954 | * This routine utilizes the SMI interface to reset the ethernet port PHY. |
@@ -1970,24 +1963,24 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1970 | * None. | 1963 | * None. |
1971 | * | 1964 | * |
1972 | */ | 1965 | */ |
1973 | static void ethernet_phy_reset(struct mv643xx_private *mp) | 1966 | static void phy_reset(struct mv643xx_private *mp) |
1974 | { | 1967 | { |
1975 | unsigned int phy_reg_data; | 1968 | unsigned int phy_reg_data; |
1976 | 1969 | ||
1977 | /* Reset the PHY */ | 1970 | /* Reset the PHY */ |
1978 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); | 1971 | read_smi_reg(mp, 0, &phy_reg_data); |
1979 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | 1972 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ |
1980 | eth_port_write_smi_reg(mp, 0, phy_reg_data); | 1973 | write_smi_reg(mp, 0, phy_reg_data); |
1981 | 1974 | ||
1982 | /* wait for PHY to come out of reset */ | 1975 | /* wait for PHY to come out of reset */ |
1983 | do { | 1976 | do { |
1984 | udelay(1); | 1977 | udelay(1); |
1985 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); | 1978 | read_smi_reg(mp, 0, &phy_reg_data); |
1986 | } while (phy_reg_data & 0x8000); | 1979 | } while (phy_reg_data & 0x8000); |
1987 | } | 1980 | } |
1988 | 1981 | ||
1989 | /* | 1982 | /* |
1990 | * eth_port_start - Start the Ethernet port activity. | 1983 | * port_start - Start the Ethernet port activity. |
1991 | * | 1984 | * |
1992 | * DESCRIPTION: | 1985 | * DESCRIPTION: |
1993 | * This routine prepares the Ethernet port for Rx and Tx activity: | 1986 | * This routine prepares the Ethernet port for Rx and Tx activity: |
@@ -2013,7 +2006,7 @@ static void ethernet_phy_reset(struct mv643xx_private *mp) | |||
2013 | * RETURN: | 2006 | * RETURN: |
2014 | * None. | 2007 | * None. |
2015 | */ | 2008 | */ |
2016 | static void eth_port_start(struct net_device *dev) | 2009 | static void port_start(struct net_device *dev) |
2017 | { | 2010 | { |
2018 | struct mv643xx_private *mp = netdev_priv(dev); | 2011 | struct mv643xx_private *mp = netdev_priv(dev); |
2019 | unsigned int port_num = mp->port_num; | 2012 | unsigned int port_num = mp->port_num; |
@@ -2024,15 +2017,15 @@ static void eth_port_start(struct net_device *dev) | |||
2024 | /* Assignment of Tx CTRP of given queue */ | 2017 | /* Assignment of Tx CTRP of given queue */ |
2025 | tx_curr_desc = mp->tx_curr_desc_q; | 2018 | tx_curr_desc = mp->tx_curr_desc_q; |
2026 | wrl(mp, TXQ_CURRENT_DESC_PTR(port_num), | 2019 | wrl(mp, TXQ_CURRENT_DESC_PTR(port_num), |
2027 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | 2020 | (u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc)); |
2028 | 2021 | ||
2029 | /* Assignment of Rx CRDP of given queue */ | 2022 | /* Assignment of Rx CRDP of given queue */ |
2030 | rx_curr_desc = mp->rx_curr_desc_q; | 2023 | rx_curr_desc = mp->rx_curr_desc_q; |
2031 | wrl(mp, RXQ_CURRENT_DESC_PTR(port_num), | 2024 | wrl(mp, RXQ_CURRENT_DESC_PTR(port_num), |
2032 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 2025 | (u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
2033 | 2026 | ||
2034 | /* Add the assigned Ethernet address to the port's address table */ | 2027 | /* Add the assigned Ethernet address to the port's address table */ |
2035 | eth_port_uc_addr_set(mp, dev->dev_addr); | 2028 | uc_addr_set(mp, dev->dev_addr); |
2036 | 2029 | ||
2037 | /* | 2030 | /* |
2038 | * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast | 2031 | * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast |
@@ -2072,14 +2065,14 @@ static void eth_port_start(struct net_device *dev) | |||
2072 | 2065 | ||
2073 | /* save phy settings across reset */ | 2066 | /* save phy settings across reset */ |
2074 | mv643xx_get_settings(dev, ðtool_cmd); | 2067 | mv643xx_get_settings(dev, ðtool_cmd); |
2075 | ethernet_phy_reset(mp); | 2068 | phy_reset(mp); |
2076 | mv643xx_set_settings(dev, ðtool_cmd); | 2069 | mv643xx_set_settings(dev, ðtool_cmd); |
2077 | } | 2070 | } |
2078 | 2071 | ||
2079 | #ifdef MV643XX_COAL | 2072 | #ifdef MV643XX_COAL |
2080 | 2073 | ||
2081 | /* | 2074 | /* |
2082 | * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path | 2075 | * set_rx_coal - Sets coalescing interrupt mechanism on RX path |
2083 | * | 2076 | * |
2084 | * DESCRIPTION: | 2077 | * DESCRIPTION: |
2085 | * This routine sets the RX coalescing interrupt mechanism parameter. | 2078 | * This routine sets the RX coalescing interrupt mechanism parameter. |
@@ -2100,7 +2093,7 @@ static void eth_port_start(struct net_device *dev) | |||
2100 | * The interrupt coalescing value set in the gigE port. | 2093 | * The interrupt coalescing value set in the gigE port. |
2101 | * | 2094 | * |
2102 | */ | 2095 | */ |
2103 | static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, | 2096 | static unsigned int set_rx_coal(struct mv643xx_private *mp, |
2104 | unsigned int delay) | 2097 | unsigned int delay) |
2105 | { | 2098 | { |
2106 | unsigned int port_num = mp->port_num; | 2099 | unsigned int port_num = mp->port_num; |
@@ -2117,7 +2110,7 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, | |||
2117 | #endif | 2110 | #endif |
2118 | 2111 | ||
2119 | /* | 2112 | /* |
2120 | * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path | 2113 | * set_tx_coal - Sets coalescing interrupt mechanism on TX path |
2121 | * | 2114 | * |
2122 | * DESCRIPTION: | 2115 | * DESCRIPTION: |
2123 | * This routine sets the TX coalescing interrupt mechanism parameter. | 2116 | * This routine sets the TX coalescing interrupt mechanism parameter. |
@@ -2138,7 +2131,7 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, | |||
2138 | * The interrupt coalescing value set in the gigE port. | 2131 | * The interrupt coalescing value set in the gigE port. |
2139 | * | 2132 | * |
2140 | */ | 2133 | */ |
2141 | static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, | 2134 | static unsigned int set_tx_coal(struct mv643xx_private *mp, |
2142 | unsigned int delay) | 2135 | unsigned int delay) |
2143 | { | 2136 | { |
2144 | unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; | 2137 | unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; |
@@ -2150,7 +2143,7 @@ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, | |||
2150 | } | 2143 | } |
2151 | 2144 | ||
2152 | /* | 2145 | /* |
2153 | * eth_port_init - Initialize the Ethernet port driver | 2146 | * port_init - Initialize the Ethernet port driver |
2154 | * | 2147 | * |
2155 | * DESCRIPTION: | 2148 | * DESCRIPTION: |
2156 | * This function prepares the ethernet port to start its activity: | 2149 | * This function prepares the ethernet port to start its activity: |
@@ -2160,7 +2153,7 @@ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, | |||
2160 | * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM. | 2153 | * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM. |
2161 | * 4) Clean MAC tables. The reset status of those tables is unknown. | 2154 | * 4) Clean MAC tables. The reset status of those tables is unknown. |
2162 | * 5) Set PHY address. | 2155 | * 5) Set PHY address. |
2163 | * Note: Call this routine prior to eth_port_start routine and after | 2156 | * Note: Call this routine prior to port_start routine and after |
2164 | * setting user values in the user fields of Ethernet port control | 2157 | * setting user values in the user fields of Ethernet port control |
2165 | * struct. | 2158 | * struct. |
2166 | * | 2159 | * |
@@ -2173,13 +2166,13 @@ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, | |||
2173 | * RETURN: | 2166 | * RETURN: |
2174 | * None. | 2167 | * None. |
2175 | */ | 2168 | */ |
2176 | static void eth_port_init(struct mv643xx_private *mp) | 2169 | static void port_init(struct mv643xx_private *mp) |
2177 | { | 2170 | { |
2178 | mp->rx_resource_err = 0; | 2171 | mp->rx_resource_err = 0; |
2179 | 2172 | ||
2180 | eth_port_reset(mp); | 2173 | port_reset(mp); |
2181 | 2174 | ||
2182 | eth_port_init_mac_tables(mp); | 2175 | init_mac_tables(mp); |
2183 | } | 2176 | } |
2184 | 2177 | ||
2185 | /* | 2178 | /* |
@@ -2215,7 +2208,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2215 | return -EAGAIN; | 2208 | return -EAGAIN; |
2216 | } | 2209 | } |
2217 | 2210 | ||
2218 | eth_port_init(mp); | 2211 | port_init(mp); |
2219 | 2212 | ||
2220 | memset(&mp->timeout, 0, sizeof(struct timer_list)); | 2213 | memset(&mp->timeout, 0, sizeof(struct timer_list)); |
2221 | mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper; | 2214 | mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper; |
@@ -2239,7 +2232,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2239 | 2232 | ||
2240 | /* Allocate TX ring */ | 2233 | /* Allocate TX ring */ |
2241 | mp->tx_desc_count = 0; | 2234 | mp->tx_desc_count = 0; |
2242 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); | 2235 | size = mp->tx_ring_size * sizeof(struct tx_desc); |
2243 | mp->tx_desc_area_size = size; | 2236 | mp->tx_desc_area_size = size; |
2244 | 2237 | ||
2245 | if (mp->tx_sram_size) { | 2238 | if (mp->tx_sram_size) { |
@@ -2264,7 +2257,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2264 | 2257 | ||
2265 | /* Allocate RX ring */ | 2258 | /* Allocate RX ring */ |
2266 | mp->rx_desc_count = 0; | 2259 | mp->rx_desc_count = 0; |
2267 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); | 2260 | size = mp->rx_ring_size * sizeof(struct rx_desc); |
2268 | mp->rx_desc_area_size = size; | 2261 | mp->rx_desc_area_size = size; |
2269 | 2262 | ||
2270 | if (mp->rx_sram_size) { | 2263 | if (mp->rx_sram_size) { |
@@ -2299,17 +2292,15 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2299 | napi_enable(&mp->napi); | 2292 | napi_enable(&mp->napi); |
2300 | #endif | 2293 | #endif |
2301 | 2294 | ||
2302 | eth_port_start(dev); | 2295 | port_start(dev); |
2303 | 2296 | ||
2304 | /* Interrupt Coalescing */ | 2297 | /* Interrupt Coalescing */ |
2305 | 2298 | ||
2306 | #ifdef MV643XX_COAL | 2299 | #ifdef MV643XX_COAL |
2307 | mp->rx_int_coal = | 2300 | mp->rx_int_coal = set_rx_coal(mp, MV643XX_RX_COAL); |
2308 | eth_port_set_rx_coal(mp, MV643XX_RX_COAL); | ||
2309 | #endif | 2301 | #endif |
2310 | 2302 | ||
2311 | mp->tx_int_coal = | 2303 | mp->tx_int_coal = set_tx_coal(mp, MV643XX_TX_COAL); |
2312 | eth_port_set_tx_coal(mp, MV643XX_TX_COAL); | ||
2313 | 2304 | ||
2314 | /* Unmask phy and link status changes interrupts */ | 2305 | /* Unmask phy and link status changes interrupts */ |
2315 | wrl(mp, INT_MASK_EXT(port_num), INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); | 2306 | wrl(mp, INT_MASK_EXT(port_num), INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); |
@@ -2330,7 +2321,7 @@ out_free_irq: | |||
2330 | } | 2321 | } |
2331 | 2322 | ||
2332 | /* | 2323 | /* |
2333 | * eth_port_reset - Reset Ethernet port | 2324 | * port_reset - Reset Ethernet port |
2334 | * | 2325 | * |
2335 | * DESCRIPTION: | 2326 | * DESCRIPTION: |
2336 | * This routine resets the chip by aborting any SDMA engine activity and | 2327 | * This routine resets the chip by aborting any SDMA engine activity and |
@@ -2347,7 +2338,7 @@ out_free_irq: | |||
2347 | * None. | 2338 | * None. |
2348 | * | 2339 | * |
2349 | */ | 2340 | */ |
2350 | static void eth_port_reset(struct mv643xx_private *mp) | 2341 | static void port_reset(struct mv643xx_private *mp) |
2351 | { | 2342 | { |
2352 | unsigned int port_num = mp->port_num; | 2343 | unsigned int port_num = mp->port_num; |
2353 | unsigned int reg_data; | 2344 | unsigned int reg_data; |
@@ -2356,7 +2347,7 @@ static void eth_port_reset(struct mv643xx_private *mp) | |||
2356 | mv643xx_eth_port_disable_rx(mp); | 2347 | mv643xx_eth_port_disable_rx(mp); |
2357 | 2348 | ||
2358 | /* Clear all MIB counters */ | 2349 | /* Clear all MIB counters */ |
2359 | eth_clear_mib_counters(mp); | 2350 | clear_mib_counters(mp); |
2360 | 2351 | ||
2361 | /* Reset the Enable bit in the Configuration Register */ | 2352 | /* Reset the Enable bit in the Configuration Register */ |
2362 | reg_data = rdl(mp, PORT_SERIAL_CONTROL(port_num)); | 2353 | reg_data = rdl(mp, PORT_SERIAL_CONTROL(port_num)); |
@@ -2392,7 +2383,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
2392 | netif_carrier_off(dev); | 2383 | netif_carrier_off(dev); |
2393 | netif_stop_queue(dev); | 2384 | netif_stop_queue(dev); |
2394 | 2385 | ||
2395 | eth_port_reset(mp); | 2386 | port_reset(mp); |
2396 | 2387 | ||
2397 | mv643xx_eth_free_tx_rings(dev); | 2388 | mv643xx_eth_free_tx_rings(dev); |
2398 | mv643xx_eth_free_rx_rings(dev); | 2389 | mv643xx_eth_free_rx_rings(dev); |
@@ -2456,8 +2447,8 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) | |||
2456 | 2447 | ||
2457 | netif_stop_queue(dev); | 2448 | netif_stop_queue(dev); |
2458 | 2449 | ||
2459 | eth_port_reset(mp); | 2450 | port_reset(mp); |
2460 | eth_port_start(dev); | 2451 | port_start(dev); |
2461 | 2452 | ||
2462 | if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | 2453 | if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) |
2463 | netif_wake_queue(dev); | 2454 | netif_wake_queue(dev); |
@@ -2505,14 +2496,14 @@ static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) | |||
2505 | struct mv643xx_private *mp = netdev_priv(dev); | 2496 | struct mv643xx_private *mp = netdev_priv(dev); |
2506 | int val; | 2497 | int val; |
2507 | 2498 | ||
2508 | eth_port_read_smi_reg(mp, location, &val); | 2499 | read_smi_reg(mp, location, &val); |
2509 | return val; | 2500 | return val; |
2510 | } | 2501 | } |
2511 | 2502 | ||
2512 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) | 2503 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) |
2513 | { | 2504 | { |
2514 | struct mv643xx_private *mp = netdev_priv(dev); | 2505 | struct mv643xx_private *mp = netdev_priv(dev); |
2515 | eth_port_write_smi_reg(mp, location, val); | 2506 | write_smi_reg(mp, location, val); |
2516 | } | 2507 | } |
2517 | 2508 | ||
2518 | 2509 | ||
@@ -2520,7 +2511,7 @@ static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, | |||
2520 | static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp, | 2511 | static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp, |
2521 | struct mbus_dram_target_info *dram) | 2512 | struct mbus_dram_target_info *dram) |
2522 | { | 2513 | { |
2523 | void __iomem *base = msp->eth_base; | 2514 | void __iomem *base = msp->base; |
2524 | u32 win_enable; | 2515 | u32 win_enable; |
2525 | u32 win_protect; | 2516 | u32 win_protect; |
2526 | int i; | 2517 | int i; |
@@ -2573,8 +2564,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2573 | goto out; | 2564 | goto out; |
2574 | memset(msp, 0, sizeof(*msp)); | 2565 | memset(msp, 0, sizeof(*msp)); |
2575 | 2566 | ||
2576 | msp->eth_base = ioremap(res->start, res->end - res->start + 1); | 2567 | msp->base = ioremap(res->start, res->end - res->start + 1); |
2577 | if (msp->eth_base == NULL) | 2568 | if (msp->base == NULL) |
2578 | goto out_free; | 2569 | goto out_free; |
2579 | 2570 | ||
2580 | spin_lock_init(&msp->phy_lock); | 2571 | spin_lock_init(&msp->phy_lock); |
@@ -2600,7 +2591,7 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev) | |||
2600 | { | 2591 | { |
2601 | struct mv643xx_shared_private *msp = platform_get_drvdata(pdev); | 2592 | struct mv643xx_shared_private *msp = platform_get_drvdata(pdev); |
2602 | 2593 | ||
2603 | iounmap(msp->eth_base); | 2594 | iounmap(msp->base); |
2604 | kfree(msp); | 2595 | kfree(msp); |
2605 | 2596 | ||
2606 | return 0; | 2597 | return 0; |
@@ -2616,7 +2607,7 @@ static struct platform_driver mv643xx_eth_shared_driver = { | |||
2616 | }; | 2607 | }; |
2617 | 2608 | ||
2618 | /* | 2609 | /* |
2619 | * ethernet_phy_set - Set the ethernet port PHY address. | 2610 | * phy_addr_set - Set the ethernet port PHY address. |
2620 | * | 2611 | * |
2621 | * DESCRIPTION: | 2612 | * DESCRIPTION: |
2622 | * This routine sets the given ethernet port PHY address. | 2613 | * This routine sets the given ethernet port PHY address. |
@@ -2632,7 +2623,7 @@ static struct platform_driver mv643xx_eth_shared_driver = { | |||
2632 | * None. | 2623 | * None. |
2633 | * | 2624 | * |
2634 | */ | 2625 | */ |
2635 | static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr) | 2626 | static void phy_addr_set(struct mv643xx_private *mp, int phy_addr) |
2636 | { | 2627 | { |
2637 | u32 reg_data; | 2628 | u32 reg_data; |
2638 | int addr_shift = 5 * mp->port_num; | 2629 | int addr_shift = 5 * mp->port_num; |
@@ -2644,7 +2635,7 @@ static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr) | |||
2644 | } | 2635 | } |
2645 | 2636 | ||
2646 | /* | 2637 | /* |
2647 | * ethernet_phy_get - Get the ethernet port PHY address. | 2638 | * phy_addr_get - Get the ethernet port PHY address. |
2648 | * | 2639 | * |
2649 | * DESCRIPTION: | 2640 | * DESCRIPTION: |
2650 | * This routine returns the given ethernet port PHY address. | 2641 | * This routine returns the given ethernet port PHY address. |
@@ -2659,7 +2650,7 @@ static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr) | |||
2659 | * PHY address. | 2650 | * PHY address. |
2660 | * | 2651 | * |
2661 | */ | 2652 | */ |
2662 | static int ethernet_phy_get(struct mv643xx_private *mp) | 2653 | static int phy_addr_get(struct mv643xx_private *mp) |
2663 | { | 2654 | { |
2664 | unsigned int reg_data; | 2655 | unsigned int reg_data; |
2665 | 2656 | ||
@@ -2669,7 +2660,7 @@ static int ethernet_phy_get(struct mv643xx_private *mp) | |||
2669 | } | 2660 | } |
2670 | 2661 | ||
2671 | /* | 2662 | /* |
2672 | * ethernet_phy_detect - Detect whether a phy is present | 2663 | * phy_detect - Detect whether a phy is present |
2673 | * | 2664 | * |
2674 | * DESCRIPTION: | 2665 | * DESCRIPTION: |
2675 | * This function tests whether there is a PHY present on | 2666 | * This function tests whether there is a PHY present on |
@@ -2686,22 +2677,22 @@ static int ethernet_phy_get(struct mv643xx_private *mp) | |||
2686 | * -ENODEV on failure | 2677 | * -ENODEV on failure |
2687 | * | 2678 | * |
2688 | */ | 2679 | */ |
2689 | static int ethernet_phy_detect(struct mv643xx_private *mp) | 2680 | static int phy_detect(struct mv643xx_private *mp) |
2690 | { | 2681 | { |
2691 | unsigned int phy_reg_data0; | 2682 | unsigned int phy_reg_data0; |
2692 | int auto_neg; | 2683 | int auto_neg; |
2693 | 2684 | ||
2694 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); | 2685 | read_smi_reg(mp, 0, &phy_reg_data0); |
2695 | auto_neg = phy_reg_data0 & 0x1000; | 2686 | auto_neg = phy_reg_data0 & 0x1000; |
2696 | phy_reg_data0 ^= 0x1000; /* invert auto_neg */ | 2687 | phy_reg_data0 ^= 0x1000; /* invert auto_neg */ |
2697 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); | 2688 | write_smi_reg(mp, 0, phy_reg_data0); |
2698 | 2689 | ||
2699 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); | 2690 | read_smi_reg(mp, 0, &phy_reg_data0); |
2700 | if ((phy_reg_data0 & 0x1000) == auto_neg) | 2691 | if ((phy_reg_data0 & 0x1000) == auto_neg) |
2701 | return -ENODEV; /* change didn't take */ | 2692 | return -ENODEV; /* change didn't take */ |
2702 | 2693 | ||
2703 | phy_reg_data0 ^= 0x1000; | 2694 | phy_reg_data0 ^= 0x1000; |
2704 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); | 2695 | write_smi_reg(mp, 0, phy_reg_data0); |
2705 | return 0; | 2696 | return 0; |
2706 | } | 2697 | } |
2707 | 2698 | ||
@@ -2831,15 +2822,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2831 | mp->shared_smi = platform_get_drvdata(pd->shared_smi); | 2822 | mp->shared_smi = platform_get_drvdata(pd->shared_smi); |
2832 | 2823 | ||
2833 | /* set default config values */ | 2824 | /* set default config values */ |
2834 | eth_port_uc_addr_get(mp, dev->dev_addr); | 2825 | uc_addr_get(mp, dev->dev_addr); |
2835 | mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 2826 | mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; |
2836 | mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 2827 | mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; |
2837 | 2828 | ||
2838 | if (is_valid_ether_addr(pd->mac_addr)) | 2829 | if (is_valid_ether_addr(pd->mac_addr)) |
2839 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 2830 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
2840 | 2831 | ||
2841 | if (pd->phy_addr || pd->force_phy_addr) | 2832 | if (pd->phy_addr || pd->force_phy_addr) |
2842 | ethernet_phy_set(mp, pd->phy_addr); | 2833 | phy_addr_set(mp, pd->phy_addr); |
2843 | 2834 | ||
2844 | if (pd->rx_queue_size) | 2835 | if (pd->rx_queue_size) |
2845 | mp->rx_ring_size = pd->rx_queue_size; | 2836 | mp->rx_ring_size = pd->rx_queue_size; |
@@ -2864,18 +2855,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2864 | mp->mii.dev = dev; | 2855 | mp->mii.dev = dev; |
2865 | mp->mii.mdio_read = mv643xx_mdio_read; | 2856 | mp->mii.mdio_read = mv643xx_mdio_read; |
2866 | mp->mii.mdio_write = mv643xx_mdio_write; | 2857 | mp->mii.mdio_write = mv643xx_mdio_write; |
2867 | mp->mii.phy_id = ethernet_phy_get(mp); | 2858 | mp->mii.phy_id = phy_addr_get(mp); |
2868 | mp->mii.phy_id_mask = 0x3f; | 2859 | mp->mii.phy_id_mask = 0x3f; |
2869 | mp->mii.reg_num_mask = 0x1f; | 2860 | mp->mii.reg_num_mask = 0x1f; |
2870 | 2861 | ||
2871 | err = ethernet_phy_detect(mp); | 2862 | err = phy_detect(mp); |
2872 | if (err) { | 2863 | if (err) { |
2873 | pr_debug("%s: No PHY detected at addr %d\n", | 2864 | pr_debug("%s: No PHY detected at addr %d\n", |
2874 | dev->name, ethernet_phy_get(mp)); | 2865 | dev->name, phy_addr_get(mp)); |
2875 | goto out; | 2866 | goto out; |
2876 | } | 2867 | } |
2877 | 2868 | ||
2878 | ethernet_phy_reset(mp); | 2869 | phy_reset(mp); |
2879 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); | 2870 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); |
2880 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); | 2871 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); |
2881 | mv643xx_eth_update_pscr(dev, &cmd); | 2872 | mv643xx_eth_update_pscr(dev, &cmd); |
@@ -2944,7 +2935,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) | |||
2944 | wrl(mp, INT_MASK(port_num), 0); | 2935 | wrl(mp, INT_MASK(port_num), 0); |
2945 | rdl(mp, INT_MASK(port_num)); | 2936 | rdl(mp, INT_MASK(port_num)); |
2946 | 2937 | ||
2947 | eth_port_reset(mp); | 2938 | port_reset(mp); |
2948 | } | 2939 | } |
2949 | 2940 | ||
2950 | static struct platform_driver mv643xx_eth_driver = { | 2941 | static struct platform_driver mv643xx_eth_driver = { |