diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-01 14:51:22 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-12 02:40:34 -0400 |
commit | 13d6428538feaefa6c796f76b9918de165ae69f8 (patch) | |
tree | 5b9d1b5c791bd91696916ec44b24b440730efae9 /drivers/net/mv643xx_eth.c | |
parent | 8a578111e343350ff8fa75fc3630d4bba5475cae (diff) |
mv643xx_eth: split out tx queue state
Split all TX queue related state into 'struct tx_queue', in
preparation for multiple TX queue support.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 510 |
1 files changed, 254 insertions, 256 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 5bd4b38e953d..2ef71c48ef47 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -297,38 +297,30 @@ struct rx_queue { | |||
297 | struct timer_list rx_oom; | 297 | struct timer_list rx_oom; |
298 | }; | 298 | }; |
299 | 299 | ||
300 | struct mv643xx_eth_private { | 300 | struct tx_queue { |
301 | struct mv643xx_eth_shared_private *shared; | 301 | int tx_ring_size; |
302 | int port_num; /* User Ethernet port number */ | ||
303 | |||
304 | struct mv643xx_eth_shared_private *shared_smi; | ||
305 | |||
306 | u32 tx_sram_addr; /* Base address of tx sram area */ | ||
307 | u32 tx_sram_size; /* Size of tx sram area */ | ||
308 | |||
309 | /* Tx/Rx rings managment indexes fields. For driver use */ | ||
310 | |||
311 | /* Next available and first returning Tx resource */ | ||
312 | int tx_curr_desc, tx_used_desc; | ||
313 | 302 | ||
314 | #ifdef MV643XX_ETH_TX_FAST_REFILL | 303 | int tx_desc_count; |
315 | u32 tx_clean_threshold; | 304 | int tx_curr_desc; |
316 | #endif | 305 | int tx_used_desc; |
317 | 306 | ||
318 | struct tx_desc *tx_desc_area; | 307 | struct tx_desc *tx_desc_area; |
319 | dma_addr_t tx_desc_dma; | 308 | dma_addr_t tx_desc_dma; |
320 | int tx_desc_area_size; | 309 | int tx_desc_area_size; |
321 | struct sk_buff **tx_skb; | 310 | struct sk_buff **tx_skb; |
311 | }; | ||
312 | |||
313 | struct mv643xx_eth_private { | ||
314 | struct mv643xx_eth_shared_private *shared; | ||
315 | int port_num; /* User Ethernet port number */ | ||
316 | |||
317 | struct mv643xx_eth_shared_private *shared_smi; | ||
322 | 318 | ||
323 | struct work_struct tx_timeout_task; | 319 | struct work_struct tx_timeout_task; |
324 | 320 | ||
325 | struct net_device *dev; | 321 | struct net_device *dev; |
326 | struct mib_counters mib_counters; | 322 | struct mib_counters mib_counters; |
327 | spinlock_t lock; | 323 | spinlock_t lock; |
328 | /* Size of Tx Ring per queue */ | ||
329 | int tx_ring_size; | ||
330 | /* Number of tx descriptors in use */ | ||
331 | int tx_desc_count; | ||
332 | 324 | ||
333 | u32 rx_int_coal; | 325 | u32 rx_int_coal; |
334 | u32 tx_int_coal; | 326 | u32 tx_int_coal; |
@@ -342,6 +334,17 @@ struct mv643xx_eth_private { | |||
342 | int rx_desc_sram_size; | 334 | int rx_desc_sram_size; |
343 | struct napi_struct napi; | 335 | struct napi_struct napi; |
344 | struct rx_queue rxq[1]; | 336 | struct rx_queue rxq[1]; |
337 | |||
338 | /* | ||
339 | * TX state. | ||
340 | */ | ||
341 | int default_tx_ring_size; | ||
342 | unsigned long tx_desc_sram_addr; | ||
343 | int tx_desc_sram_size; | ||
344 | struct tx_queue txq[1]; | ||
345 | #ifdef MV643XX_ETH_TX_FAST_REFILL | ||
346 | int tx_clean_threshold; | ||
347 | #endif | ||
345 | }; | 348 | }; |
346 | 349 | ||
347 | 350 | ||
@@ -363,6 +366,11 @@ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) | |||
363 | return container_of(rxq, struct mv643xx_eth_private, rxq[0]); | 366 | return container_of(rxq, struct mv643xx_eth_private, rxq[0]); |
364 | } | 367 | } |
365 | 368 | ||
369 | static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) | ||
370 | { | ||
371 | return container_of(txq, struct mv643xx_eth_private, txq[0]); | ||
372 | } | ||
373 | |||
366 | static void rxq_enable(struct rx_queue *rxq) | 374 | static void rxq_enable(struct rx_queue *rxq) |
367 | { | 375 | { |
368 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); | 376 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
@@ -379,39 +387,33 @@ static void rxq_disable(struct rx_queue *rxq) | |||
379 | udelay(10); | 387 | udelay(10); |
380 | } | 388 | } |
381 | 389 | ||
382 | static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp, | 390 | static void txq_enable(struct tx_queue *txq) |
383 | unsigned int queues) | ||
384 | { | 391 | { |
385 | wrl(mp, TXQ_COMMAND(mp->port_num), queues); | 392 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
393 | wrl(mp, TXQ_COMMAND(mp->port_num), 1); | ||
386 | } | 394 | } |
387 | 395 | ||
388 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp) | 396 | static void txq_disable(struct tx_queue *txq) |
389 | { | 397 | { |
390 | unsigned int port_num = mp->port_num; | 398 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
391 | u32 queues; | 399 | u8 mask = 1; |
392 | |||
393 | /* Stop Tx port activity. Check port Tx activity. */ | ||
394 | queues = rdl(mp, TXQ_COMMAND(port_num)) & 0xFF; | ||
395 | if (queues) { | ||
396 | /* Issue stop command for active queues only */ | ||
397 | wrl(mp, TXQ_COMMAND(port_num), (queues << 8)); | ||
398 | |||
399 | /* Wait for all Tx activity to terminate. */ | ||
400 | /* Check port cause register that all Tx queues are stopped */ | ||
401 | while (rdl(mp, TXQ_COMMAND(port_num)) & 0xFF) | ||
402 | udelay(10); | ||
403 | |||
404 | /* Wait for Tx FIFO to empty */ | ||
405 | while (rdl(mp, PORT_STATUS(port_num)) & TX_FIFO_EMPTY) | ||
406 | udelay(10); | ||
407 | } | ||
408 | 400 | ||
409 | return queues; | 401 | wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8); |
402 | while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask) | ||
403 | udelay(10); | ||
404 | } | ||
405 | |||
406 | static void __txq_maybe_wake(struct tx_queue *txq) | ||
407 | { | ||
408 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
409 | |||
410 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB) | ||
411 | netif_wake_queue(mp->dev); | ||
410 | } | 412 | } |
411 | 413 | ||
412 | 414 | ||
413 | /* rx ***********************************************************************/ | 415 | /* rx ***********************************************************************/ |
414 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); | 416 | static void txq_reclaim(struct tx_queue *txq, int force); |
415 | 417 | ||
416 | static void rxq_refill(struct rx_queue *rxq) | 418 | static void rxq_refill(struct rx_queue *rxq) |
417 | { | 419 | { |
@@ -571,7 +573,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
571 | 573 | ||
572 | #ifdef MV643XX_ETH_TX_FAST_REFILL | 574 | #ifdef MV643XX_ETH_TX_FAST_REFILL |
573 | if (++mp->tx_clean_threshold > 5) { | 575 | if (++mp->tx_clean_threshold > 5) { |
574 | mv643xx_eth_free_completed_tx_descs(mp->dev); | 576 | txq_reclaim(mp->txq, 0); |
575 | mp->tx_clean_threshold = 0; | 577 | mp->tx_clean_threshold = 0; |
576 | } | 578 | } |
577 | #endif | 579 | #endif |
@@ -593,55 +595,59 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
593 | /* tx ***********************************************************************/ | 595 | /* tx ***********************************************************************/ |
594 | static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | 596 | static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) |
595 | { | 597 | { |
596 | unsigned int frag; | 598 | int frag; |
597 | skb_frag_t *fragp; | ||
598 | 599 | ||
599 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 600 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
600 | fragp = &skb_shinfo(skb)->frags[frag]; | 601 | skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; |
601 | if (fragp->size <= 8 && fragp->page_offset & 0x7) | 602 | if (fragp->size <= 8 && fragp->page_offset & 7) |
602 | return 1; | 603 | return 1; |
603 | } | 604 | } |
605 | |||
604 | return 0; | 606 | return 0; |
605 | } | 607 | } |
606 | 608 | ||
607 | static int alloc_tx_desc_index(struct mv643xx_eth_private *mp) | 609 | static int txq_alloc_desc_index(struct tx_queue *txq) |
608 | { | 610 | { |
609 | int tx_desc_curr; | 611 | int tx_desc_curr; |
610 | 612 | ||
611 | BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); | 613 | BUG_ON(txq->tx_desc_count >= txq->tx_ring_size); |
612 | 614 | ||
613 | tx_desc_curr = mp->tx_curr_desc; | 615 | tx_desc_curr = txq->tx_curr_desc; |
614 | mp->tx_curr_desc = (tx_desc_curr + 1) % mp->tx_ring_size; | 616 | txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size; |
615 | 617 | ||
616 | BUG_ON(mp->tx_curr_desc == mp->tx_used_desc); | 618 | BUG_ON(txq->tx_curr_desc == txq->tx_used_desc); |
617 | 619 | ||
618 | return tx_desc_curr; | 620 | return tx_desc_curr; |
619 | } | 621 | } |
620 | 622 | ||
621 | static void tx_fill_frag_descs(struct mv643xx_eth_private *mp, | 623 | static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) |
622 | struct sk_buff *skb) | ||
623 | { | 624 | { |
625 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
624 | int frag; | 626 | int frag; |
625 | int tx_index; | ||
626 | struct tx_desc *desc; | ||
627 | 627 | ||
628 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 628 | for (frag = 0; frag < nr_frags; frag++) { |
629 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | 629 | skb_frag_t *this_frag; |
630 | 630 | int tx_index; | |
631 | tx_index = alloc_tx_desc_index(mp); | 631 | struct tx_desc *desc; |
632 | desc = &mp->tx_desc_area[tx_index]; | 632 | |
633 | 633 | this_frag = &skb_shinfo(skb)->frags[frag]; | |
634 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; | 634 | tx_index = txq_alloc_desc_index(txq); |
635 | /* Last Frag enables interrupt and frees the skb */ | 635 | desc = &txq->tx_desc_area[tx_index]; |
636 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | 636 | |
637 | desc->cmd_sts |= ZERO_PADDING | | 637 | /* |
638 | TX_LAST_DESC | | 638 | * The last fragment will generate an interrupt |
639 | TX_ENABLE_INTERRUPT; | 639 | * which will free the skb on TX completion. |
640 | mp->tx_skb[tx_index] = skb; | 640 | */ |
641 | } else | 641 | if (frag == nr_frags - 1) { |
642 | mp->tx_skb[tx_index] = NULL; | 642 | desc->cmd_sts = BUFFER_OWNED_BY_DMA | |
643 | 643 | ZERO_PADDING | TX_LAST_DESC | | |
644 | desc = &mp->tx_desc_area[tx_index]; | 644 | TX_ENABLE_INTERRUPT; |
645 | txq->tx_skb[tx_index] = skb; | ||
646 | } else { | ||
647 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; | ||
648 | txq->tx_skb[tx_index] = NULL; | ||
649 | } | ||
650 | |||
645 | desc->l4i_chk = 0; | 651 | desc->l4i_chk = 0; |
646 | desc->byte_cnt = this_frag->size; | 652 | desc->byte_cnt = this_frag->size; |
647 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, | 653 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, |
@@ -656,29 +662,28 @@ static inline __be16 sum16_as_be(__sum16 sum) | |||
656 | return (__force __be16)sum; | 662 | return (__force __be16)sum; |
657 | } | 663 | } |
658 | 664 | ||
659 | static void tx_submit_descs_for_skb(struct mv643xx_eth_private *mp, | 665 | static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) |
660 | struct sk_buff *skb) | ||
661 | { | 666 | { |
667 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
662 | int tx_index; | 668 | int tx_index; |
663 | struct tx_desc *desc; | 669 | struct tx_desc *desc; |
664 | u32 cmd_sts; | 670 | u32 cmd_sts; |
665 | int length; | 671 | int length; |
666 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
667 | 672 | ||
668 | cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; | 673 | cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; |
669 | 674 | ||
670 | tx_index = alloc_tx_desc_index(mp); | 675 | tx_index = txq_alloc_desc_index(txq); |
671 | desc = &mp->tx_desc_area[tx_index]; | 676 | desc = &txq->tx_desc_area[tx_index]; |
672 | 677 | ||
673 | if (nr_frags) { | 678 | if (nr_frags) { |
674 | tx_fill_frag_descs(mp, skb); | 679 | txq_submit_frag_skb(txq, skb); |
675 | 680 | ||
676 | length = skb_headlen(skb); | 681 | length = skb_headlen(skb); |
677 | mp->tx_skb[tx_index] = NULL; | 682 | txq->tx_skb[tx_index] = NULL; |
678 | } else { | 683 | } else { |
679 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; | 684 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; |
680 | length = skb->len; | 685 | length = skb->len; |
681 | mp->tx_skb[tx_index] = skb; | 686 | txq->tx_skb[tx_index] = skb; |
682 | } | 687 | } |
683 | 688 | ||
684 | desc->byte_cnt = length; | 689 | desc->byte_cnt = length; |
@@ -714,15 +719,16 @@ static void tx_submit_descs_for_skb(struct mv643xx_eth_private *mp, | |||
714 | 719 | ||
715 | /* ensure all descriptors are written before poking hardware */ | 720 | /* ensure all descriptors are written before poking hardware */ |
716 | wmb(); | 721 | wmb(); |
717 | mv643xx_eth_port_enable_tx(mp, 1); | 722 | txq_enable(txq); |
718 | 723 | ||
719 | mp->tx_desc_count += nr_frags + 1; | 724 | txq->tx_desc_count += nr_frags + 1; |
720 | } | 725 | } |
721 | 726 | ||
722 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 727 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
723 | { | 728 | { |
724 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 729 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
725 | struct net_device_stats *stats = &dev->stats; | 730 | struct net_device_stats *stats = &dev->stats; |
731 | struct tx_queue *txq; | ||
726 | unsigned long flags; | 732 | unsigned long flags; |
727 | 733 | ||
728 | BUG_ON(netif_queue_stopped(dev)); | 734 | BUG_ON(netif_queue_stopped(dev)); |
@@ -736,19 +742,21 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
736 | 742 | ||
737 | spin_lock_irqsave(&mp->lock, flags); | 743 | spin_lock_irqsave(&mp->lock, flags); |
738 | 744 | ||
739 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) { | 745 | txq = mp->txq; |
746 | |||
747 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) { | ||
740 | printk(KERN_ERR "%s: transmit with queue full\n", dev->name); | 748 | printk(KERN_ERR "%s: transmit with queue full\n", dev->name); |
741 | netif_stop_queue(dev); | 749 | netif_stop_queue(dev); |
742 | spin_unlock_irqrestore(&mp->lock, flags); | 750 | spin_unlock_irqrestore(&mp->lock, flags); |
743 | return NETDEV_TX_BUSY; | 751 | return NETDEV_TX_BUSY; |
744 | } | 752 | } |
745 | 753 | ||
746 | tx_submit_descs_for_skb(mp, skb); | 754 | txq_submit_skb(txq, skb); |
747 | stats->tx_bytes += skb->len; | 755 | stats->tx_bytes += skb->len; |
748 | stats->tx_packets++; | 756 | stats->tx_packets++; |
749 | dev->trans_start = jiffies; | 757 | dev->trans_start = jiffies; |
750 | 758 | ||
751 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) | 759 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) |
752 | netif_stop_queue(dev); | 760 | netif_stop_queue(dev); |
753 | 761 | ||
754 | spin_unlock_irqrestore(&mp->lock, flags); | 762 | spin_unlock_irqrestore(&mp->lock, flags); |
@@ -1348,69 +1356,106 @@ static void rxq_deinit(struct rx_queue *rxq) | |||
1348 | kfree(rxq->rx_skb); | 1356 | kfree(rxq->rx_skb); |
1349 | } | 1357 | } |
1350 | 1358 | ||
1351 | static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp) | 1359 | static int txq_init(struct mv643xx_eth_private *mp) |
1352 | { | 1360 | { |
1353 | int tx_desc_num = mp->tx_ring_size; | 1361 | struct tx_queue *txq = mp->txq; |
1354 | struct tx_desc *p_tx_desc; | 1362 | struct tx_desc *tx_desc; |
1363 | int size; | ||
1355 | int i; | 1364 | int i; |
1356 | 1365 | ||
1357 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | 1366 | txq->tx_ring_size = mp->default_tx_ring_size; |
1358 | p_tx_desc = (struct tx_desc *)mp->tx_desc_area; | 1367 | |
1359 | for (i = 0; i < tx_desc_num; i++) { | 1368 | txq->tx_desc_count = 0; |
1360 | p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma + | 1369 | txq->tx_curr_desc = 0; |
1361 | ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); | 1370 | txq->tx_used_desc = 0; |
1371 | |||
1372 | size = txq->tx_ring_size * sizeof(struct tx_desc); | ||
1373 | |||
1374 | if (size <= mp->tx_desc_sram_size) { | ||
1375 | txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, | ||
1376 | mp->tx_desc_sram_size); | ||
1377 | txq->tx_desc_dma = mp->tx_desc_sram_addr; | ||
1378 | } else { | ||
1379 | txq->tx_desc_area = dma_alloc_coherent(NULL, size, | ||
1380 | &txq->tx_desc_dma, | ||
1381 | GFP_KERNEL); | ||
1382 | } | ||
1383 | |||
1384 | if (txq->tx_desc_area == NULL) { | ||
1385 | dev_printk(KERN_ERR, &mp->dev->dev, | ||
1386 | "can't allocate tx ring (%d bytes)\n", size); | ||
1387 | goto out; | ||
1362 | } | 1388 | } |
1389 | memset(txq->tx_desc_area, 0, size); | ||
1390 | |||
1391 | txq->tx_desc_area_size = size; | ||
1392 | txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb), | ||
1393 | GFP_KERNEL); | ||
1394 | if (txq->tx_skb == NULL) { | ||
1395 | dev_printk(KERN_ERR, &mp->dev->dev, | ||
1396 | "can't allocate tx skb ring\n"); | ||
1397 | goto out_free; | ||
1398 | } | ||
1399 | |||
1400 | tx_desc = (struct tx_desc *)txq->tx_desc_area; | ||
1401 | for (i = 0; i < txq->tx_ring_size; i++) { | ||
1402 | int nexti = (i + 1) % txq->tx_ring_size; | ||
1403 | tx_desc[i].next_desc_ptr = txq->tx_desc_dma + | ||
1404 | nexti * sizeof(struct tx_desc); | ||
1405 | } | ||
1406 | |||
1407 | return 0; | ||
1408 | |||
1363 | 1409 | ||
1364 | mp->tx_curr_desc = 0; | 1410 | out_free: |
1365 | mp->tx_used_desc = 0; | 1411 | if (size <= mp->tx_desc_sram_size) |
1412 | iounmap(txq->tx_desc_area); | ||
1413 | else | ||
1414 | dma_free_coherent(NULL, size, | ||
1415 | txq->tx_desc_area, | ||
1416 | txq->tx_desc_dma); | ||
1366 | 1417 | ||
1367 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); | 1418 | out: |
1419 | return -ENOMEM; | ||
1368 | } | 1420 | } |
1369 | 1421 | ||
1370 | static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) | 1422 | static void txq_reclaim(struct tx_queue *txq, int force) |
1371 | { | 1423 | { |
1372 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1424 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1373 | struct tx_desc *desc; | ||
1374 | u32 cmd_sts; | ||
1375 | struct sk_buff *skb; | ||
1376 | unsigned long flags; | 1425 | unsigned long flags; |
1377 | int tx_index; | ||
1378 | dma_addr_t addr; | ||
1379 | int count; | ||
1380 | int released = 0; | ||
1381 | 1426 | ||
1382 | while (mp->tx_desc_count > 0) { | 1427 | spin_lock_irqsave(&mp->lock, flags); |
1383 | spin_lock_irqsave(&mp->lock, flags); | 1428 | while (txq->tx_desc_count > 0) { |
1384 | 1429 | int tx_index; | |
1385 | /* tx_desc_count might have changed before acquiring the lock */ | 1430 | struct tx_desc *desc; |
1386 | if (mp->tx_desc_count <= 0) { | 1431 | u32 cmd_sts; |
1387 | spin_unlock_irqrestore(&mp->lock, flags); | 1432 | struct sk_buff *skb; |
1388 | return released; | 1433 | dma_addr_t addr; |
1389 | } | 1434 | int count; |
1390 | 1435 | ||
1391 | tx_index = mp->tx_used_desc; | 1436 | tx_index = txq->tx_used_desc; |
1392 | desc = &mp->tx_desc_area[tx_index]; | 1437 | desc = &txq->tx_desc_area[tx_index]; |
1393 | cmd_sts = desc->cmd_sts; | 1438 | cmd_sts = desc->cmd_sts; |
1394 | 1439 | ||
1395 | if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) { | 1440 | if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) |
1396 | spin_unlock_irqrestore(&mp->lock, flags); | 1441 | break; |
1397 | return released; | ||
1398 | } | ||
1399 | 1442 | ||
1400 | mp->tx_used_desc = (tx_index + 1) % mp->tx_ring_size; | 1443 | txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; |
1401 | mp->tx_desc_count--; | 1444 | txq->tx_desc_count--; |
1402 | 1445 | ||
1403 | addr = desc->buf_ptr; | 1446 | addr = desc->buf_ptr; |
1404 | count = desc->byte_cnt; | 1447 | count = desc->byte_cnt; |
1405 | skb = mp->tx_skb[tx_index]; | 1448 | skb = txq->tx_skb[tx_index]; |
1406 | if (skb) | 1449 | txq->tx_skb[tx_index] = NULL; |
1407 | mp->tx_skb[tx_index] = NULL; | ||
1408 | 1450 | ||
1409 | if (cmd_sts & ERROR_SUMMARY) { | 1451 | if (cmd_sts & ERROR_SUMMARY) { |
1410 | printk("%s: Error in TX\n", dev->name); | 1452 | dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); |
1411 | dev->stats.tx_errors++; | 1453 | mp->dev->stats.tx_errors++; |
1412 | } | 1454 | } |
1413 | 1455 | ||
1456 | /* | ||
1457 | * Drop mp->lock while we free the skb. | ||
1458 | */ | ||
1414 | spin_unlock_irqrestore(&mp->lock, flags); | 1459 | spin_unlock_irqrestore(&mp->lock, flags); |
1415 | 1460 | ||
1416 | if (cmd_sts & TX_FIRST_DESC) | 1461 | if (cmd_sts & TX_FIRST_DESC) |
@@ -1421,91 +1466,68 @@ static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) | |||
1421 | if (skb) | 1466 | if (skb) |
1422 | dev_kfree_skb_irq(skb); | 1467 | dev_kfree_skb_irq(skb); |
1423 | 1468 | ||
1424 | released = 1; | 1469 | spin_lock_irqsave(&mp->lock, flags); |
1425 | } | 1470 | } |
1426 | 1471 | spin_unlock_irqrestore(&mp->lock, flags); | |
1427 | return released; | ||
1428 | } | ||
1429 | |||
1430 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev) | ||
1431 | { | ||
1432 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
1433 | |||
1434 | if (mv643xx_eth_free_tx_descs(dev, 0) && | ||
1435 | mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | ||
1436 | netif_wake_queue(dev); | ||
1437 | } | ||
1438 | |||
1439 | static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) | ||
1440 | { | ||
1441 | mv643xx_eth_free_tx_descs(dev, 1); | ||
1442 | } | 1472 | } |
1443 | 1473 | ||
1444 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) | 1474 | static void txq_deinit(struct tx_queue *txq) |
1445 | { | 1475 | { |
1446 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1476 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1447 | |||
1448 | /* Stop Tx Queues */ | ||
1449 | mv643xx_eth_port_disable_tx(mp); | ||
1450 | 1477 | ||
1451 | /* Free outstanding skb's on TX ring */ | 1478 | txq_disable(txq); |
1452 | mv643xx_eth_free_all_tx_descs(dev); | 1479 | txq_reclaim(txq, 1); |
1453 | 1480 | ||
1454 | BUG_ON(mp->tx_used_desc != mp->tx_curr_desc); | 1481 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); |
1455 | 1482 | ||
1456 | /* Free TX ring */ | 1483 | if (txq->tx_desc_area_size <= mp->tx_desc_sram_size) |
1457 | if (mp->tx_sram_size) | 1484 | iounmap(txq->tx_desc_area); |
1458 | iounmap(mp->tx_desc_area); | ||
1459 | else | 1485 | else |
1460 | dma_free_coherent(NULL, mp->tx_desc_area_size, | 1486 | dma_free_coherent(NULL, txq->tx_desc_area_size, |
1461 | mp->tx_desc_area, mp->tx_desc_dma); | 1487 | txq->tx_desc_area, txq->tx_desc_dma); |
1488 | |||
1489 | kfree(txq->tx_skb); | ||
1462 | } | 1490 | } |
1463 | 1491 | ||
1464 | 1492 | ||
1465 | /* netdev ops and related ***************************************************/ | 1493 | /* netdev ops and related ***************************************************/ |
1466 | static void port_reset(struct mv643xx_eth_private *mp); | 1494 | static void port_reset(struct mv643xx_eth_private *mp); |
1467 | 1495 | ||
1468 | static void mv643xx_eth_update_pscr(struct net_device *dev, | 1496 | static void mv643xx_eth_update_pscr(struct mv643xx_eth_private *mp, |
1469 | struct ethtool_cmd *ecmd) | 1497 | struct ethtool_cmd *ecmd) |
1470 | { | 1498 | { |
1471 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1499 | u32 pscr_o; |
1472 | int port_num = mp->port_num; | 1500 | u32 pscr_n; |
1473 | u32 o_pscr, n_pscr; | ||
1474 | unsigned int queues; | ||
1475 | 1501 | ||
1476 | o_pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num)); | 1502 | pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); |
1477 | n_pscr = o_pscr; | ||
1478 | 1503 | ||
1479 | /* clear speed, duplex and rx buffer size fields */ | 1504 | /* clear speed, duplex and rx buffer size fields */ |
1480 | n_pscr &= ~(SET_MII_SPEED_TO_100 | | 1505 | pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100 | |
1481 | SET_GMII_SPEED_TO_1000 | | 1506 | SET_GMII_SPEED_TO_1000 | |
1482 | SET_FULL_DUPLEX_MODE | | 1507 | SET_FULL_DUPLEX_MODE | |
1483 | MAX_RX_PACKET_MASK); | 1508 | MAX_RX_PACKET_MASK); |
1484 | |||
1485 | if (ecmd->duplex == DUPLEX_FULL) | ||
1486 | n_pscr |= SET_FULL_DUPLEX_MODE; | ||
1487 | 1509 | ||
1488 | if (ecmd->speed == SPEED_1000) | 1510 | if (ecmd->speed == SPEED_1000) { |
1489 | n_pscr |= SET_GMII_SPEED_TO_1000 | | 1511 | pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE; |
1490 | MAX_RX_PACKET_9700BYTE; | 1512 | } else { |
1491 | else { | ||
1492 | if (ecmd->speed == SPEED_100) | 1513 | if (ecmd->speed == SPEED_100) |
1493 | n_pscr |= SET_MII_SPEED_TO_100; | 1514 | pscr_n |= SET_MII_SPEED_TO_100; |
1494 | n_pscr |= MAX_RX_PACKET_1522BYTE; | 1515 | pscr_n |= MAX_RX_PACKET_1522BYTE; |
1495 | } | 1516 | } |
1496 | 1517 | ||
1497 | if (n_pscr != o_pscr) { | 1518 | if (ecmd->duplex == DUPLEX_FULL) |
1498 | if ((o_pscr & SERIAL_PORT_ENABLE) == 0) | 1519 | pscr_n |= SET_FULL_DUPLEX_MODE; |
1499 | wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr); | 1520 | |
1521 | if (pscr_n != pscr_o) { | ||
1522 | if ((pscr_o & SERIAL_PORT_ENABLE) == 0) | ||
1523 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); | ||
1500 | else { | 1524 | else { |
1501 | queues = mv643xx_eth_port_disable_tx(mp); | 1525 | txq_disable(mp->txq); |
1502 | 1526 | pscr_o &= ~SERIAL_PORT_ENABLE; | |
1503 | o_pscr &= ~SERIAL_PORT_ENABLE; | 1527 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o); |
1504 | wrl(mp, PORT_SERIAL_CONTROL(port_num), o_pscr); | 1528 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); |
1505 | wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr); | 1529 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); |
1506 | wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr); | 1530 | txq_enable(mp->txq); |
1507 | if (queues) | ||
1508 | mv643xx_eth_port_enable_tx(mp, queues); | ||
1509 | } | 1531 | } |
1510 | } | 1532 | } |
1511 | } | 1533 | } |
@@ -1515,29 +1537,26 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1515 | struct net_device *dev = (struct net_device *)dev_id; | 1537 | struct net_device *dev = (struct net_device *)dev_id; |
1516 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1538 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1517 | u32 int_cause, int_cause_ext = 0; | 1539 | u32 int_cause, int_cause_ext = 0; |
1518 | unsigned int port_num = mp->port_num; | ||
1519 | 1540 | ||
1520 | /* Read interrupt cause registers */ | 1541 | /* Read interrupt cause registers */ |
1521 | int_cause = rdl(mp, INT_CAUSE(port_num)) & (INT_RX | INT_EXT); | 1542 | int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT); |
1522 | if (int_cause & INT_EXT) { | 1543 | if (int_cause & INT_EXT) { |
1523 | int_cause_ext = rdl(mp, INT_CAUSE_EXT(port_num)) | 1544 | int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)) |
1524 | & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); | 1545 | & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); |
1525 | wrl(mp, INT_CAUSE_EXT(port_num), ~int_cause_ext); | 1546 | wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); |
1526 | } | 1547 | } |
1527 | 1548 | ||
1528 | /* PHY status changed */ | 1549 | /* PHY status changed */ |
1529 | if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) { | 1550 | if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) { |
1530 | struct ethtool_cmd cmd; | ||
1531 | |||
1532 | if (mii_link_ok(&mp->mii)) { | 1551 | if (mii_link_ok(&mp->mii)) { |
1552 | struct ethtool_cmd cmd; | ||
1553 | |||
1533 | mii_ethtool_gset(&mp->mii, &cmd); | 1554 | mii_ethtool_gset(&mp->mii, &cmd); |
1534 | mv643xx_eth_update_pscr(dev, &cmd); | 1555 | mv643xx_eth_update_pscr(mp, &cmd); |
1535 | mv643xx_eth_port_enable_tx(mp, 1); | 1556 | txq_enable(mp->txq); |
1536 | if (!netif_carrier_ok(dev)) { | 1557 | if (!netif_carrier_ok(dev)) { |
1537 | netif_carrier_on(dev); | 1558 | netif_carrier_on(dev); |
1538 | if (mp->tx_ring_size - mp->tx_desc_count >= | 1559 | __txq_maybe_wake(mp->txq); |
1539 | MAX_DESCS_PER_SKB) | ||
1540 | netif_wake_queue(dev); | ||
1541 | } | 1560 | } |
1542 | } else if (netif_carrier_ok(dev)) { | 1561 | } else if (netif_carrier_ok(dev)) { |
1543 | netif_stop_queue(dev); | 1562 | netif_stop_queue(dev); |
@@ -1548,10 +1567,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1548 | #ifdef MV643XX_ETH_NAPI | 1567 | #ifdef MV643XX_ETH_NAPI |
1549 | if (int_cause & INT_RX) { | 1568 | if (int_cause & INT_RX) { |
1550 | /* schedule the NAPI poll routine to maintain port */ | 1569 | /* schedule the NAPI poll routine to maintain port */ |
1551 | wrl(mp, INT_MASK(port_num), 0x00000000); | 1570 | wrl(mp, INT_MASK(mp->port_num), 0x00000000); |
1552 | 1571 | ||
1553 | /* wait for previous write to complete */ | 1572 | /* wait for previous write to complete */ |
1554 | rdl(mp, INT_MASK(port_num)); | 1573 | rdl(mp, INT_MASK(mp->port_num)); |
1555 | 1574 | ||
1556 | netif_rx_schedule(dev, &mp->napi); | 1575 | netif_rx_schedule(dev, &mp->napi); |
1557 | } | 1576 | } |
@@ -1559,8 +1578,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1559 | if (int_cause & INT_RX) | 1578 | if (int_cause & INT_RX) |
1560 | rxq_process(mp->rxq, INT_MAX); | 1579 | rxq_process(mp->rxq, INT_MAX); |
1561 | #endif | 1580 | #endif |
1562 | if (int_cause_ext & INT_EXT_TX) | 1581 | if (int_cause_ext & INT_EXT_TX) { |
1563 | mv643xx_eth_free_completed_tx_descs(dev); | 1582 | txq_reclaim(mp->txq, 0); |
1583 | __txq_maybe_wake(mp->txq); | ||
1584 | } | ||
1564 | 1585 | ||
1565 | /* | 1586 | /* |
1566 | * If no real interrupt occured, exit. | 1587 | * If no real interrupt occured, exit. |
@@ -1616,6 +1637,20 @@ static void port_start(struct net_device *dev) | |||
1616 | phy_reset(mp); | 1637 | phy_reset(mp); |
1617 | mv643xx_eth_set_settings(dev, ðtool_cmd); | 1638 | mv643xx_eth_set_settings(dev, ðtool_cmd); |
1618 | 1639 | ||
1640 | /* | ||
1641 | * Configure TX path and queues. | ||
1642 | */ | ||
1643 | wrl(mp, TX_BW_MTU(mp->port_num), 0); | ||
1644 | for (i = 0; i < 1; i++) { | ||
1645 | struct tx_queue *txq = mp->txq; | ||
1646 | int off = TXQ_CURRENT_DESC_PTR(mp->port_num); | ||
1647 | u32 addr; | ||
1648 | |||
1649 | addr = (u32)txq->tx_desc_dma; | ||
1650 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); | ||
1651 | wrl(mp, off, addr); | ||
1652 | } | ||
1653 | |||
1619 | /* Add the assigned Ethernet address to the port's address table */ | 1654 | /* Add the assigned Ethernet address to the port's address table */ |
1620 | uc_addr_set(mp, dev->dev_addr); | 1655 | uc_addr_set(mp, dev->dev_addr); |
1621 | 1656 | ||
@@ -1644,13 +1679,6 @@ static void port_start(struct net_device *dev) | |||
1644 | 1679 | ||
1645 | rxq_enable(rxq); | 1680 | rxq_enable(rxq); |
1646 | } | 1681 | } |
1647 | |||
1648 | |||
1649 | wrl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num), | ||
1650 | (u32)((struct tx_desc *)mp->tx_desc_dma + mp->tx_curr_desc)); | ||
1651 | |||
1652 | /* Disable port bandwidth limits by clearing MTU register */ | ||
1653 | wrl(mp, TX_BW_MTU(mp->port_num), 0); | ||
1654 | } | 1682 | } |
1655 | 1683 | ||
1656 | #ifdef MV643XX_ETH_COAL | 1684 | #ifdef MV643XX_ETH_COAL |
@@ -1692,7 +1720,6 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1692 | { | 1720 | { |
1693 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1721 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1694 | unsigned int port_num = mp->port_num; | 1722 | unsigned int port_num = mp->port_num; |
1695 | unsigned int size; | ||
1696 | int err; | 1723 | int err; |
1697 | 1724 | ||
1698 | /* Clear any pending ethernet port interrupts */ | 1725 | /* Clear any pending ethernet port interrupts */ |
@@ -1715,38 +1742,9 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1715 | goto out_free_irq; | 1742 | goto out_free_irq; |
1716 | rxq_refill(mp->rxq); | 1743 | rxq_refill(mp->rxq); |
1717 | 1744 | ||
1718 | mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, | 1745 | err = txq_init(mp); |
1719 | GFP_KERNEL); | 1746 | if (err) |
1720 | if (!mp->tx_skb) { | ||
1721 | printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name); | ||
1722 | err = -ENOMEM; | ||
1723 | goto out_free_rx_skb; | 1747 | goto out_free_rx_skb; |
1724 | } | ||
1725 | |||
1726 | /* Allocate TX ring */ | ||
1727 | mp->tx_desc_count = 0; | ||
1728 | size = mp->tx_ring_size * sizeof(struct tx_desc); | ||
1729 | mp->tx_desc_area_size = size; | ||
1730 | |||
1731 | if (mp->tx_sram_size) { | ||
1732 | mp->tx_desc_area = ioremap(mp->tx_sram_addr, | ||
1733 | mp->tx_sram_size); | ||
1734 | mp->tx_desc_dma = mp->tx_sram_addr; | ||
1735 | } else | ||
1736 | mp->tx_desc_area = dma_alloc_coherent(NULL, size, | ||
1737 | &mp->tx_desc_dma, | ||
1738 | GFP_KERNEL); | ||
1739 | |||
1740 | if (!mp->tx_desc_area) { | ||
1741 | printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", | ||
1742 | dev->name, size); | ||
1743 | err = -ENOMEM; | ||
1744 | goto out_free_tx_skb; | ||
1745 | } | ||
1746 | BUG_ON((u32) mp->tx_desc_area & 0xf); /* check 16-byte alignment */ | ||
1747 | memset((void *)mp->tx_desc_area, 0, mp->tx_desc_area_size); | ||
1748 | |||
1749 | ether_init_tx_desc_ring(mp); | ||
1750 | 1748 | ||
1751 | #ifdef MV643XX_ETH_NAPI | 1749 | #ifdef MV643XX_ETH_NAPI |
1752 | napi_enable(&mp->napi); | 1750 | napi_enable(&mp->napi); |
@@ -1770,8 +1768,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1770 | 1768 | ||
1771 | return 0; | 1769 | return 0; |
1772 | 1770 | ||
1773 | out_free_tx_skb: | 1771 | |
1774 | kfree(mp->tx_skb); | ||
1775 | out_free_rx_skb: | 1772 | out_free_rx_skb: |
1776 | rxq_deinit(mp->rxq); | 1773 | rxq_deinit(mp->rxq); |
1777 | out_free_irq: | 1774 | out_free_irq: |
@@ -1785,8 +1782,10 @@ static void port_reset(struct mv643xx_eth_private *mp) | |||
1785 | unsigned int port_num = mp->port_num; | 1782 | unsigned int port_num = mp->port_num; |
1786 | unsigned int reg_data; | 1783 | unsigned int reg_data; |
1787 | 1784 | ||
1788 | mv643xx_eth_port_disable_tx(mp); | 1785 | txq_disable(mp->txq); |
1789 | rxq_disable(mp->rxq); | 1786 | rxq_disable(mp->rxq); |
1787 | while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) | ||
1788 | udelay(10); | ||
1790 | 1789 | ||
1791 | /* Clear all MIB counters */ | 1790 | /* Clear all MIB counters */ |
1792 | clear_mib_counters(mp); | 1791 | clear_mib_counters(mp); |
@@ -1817,7 +1816,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1817 | 1816 | ||
1818 | port_reset(mp); | 1817 | port_reset(mp); |
1819 | 1818 | ||
1820 | mv643xx_eth_free_tx_rings(dev); | 1819 | txq_deinit(mp->txq); |
1821 | rxq_deinit(mp->rxq); | 1820 | rxq_deinit(mp->rxq); |
1822 | 1821 | ||
1823 | free_irq(dev->irq, dev); | 1822 | free_irq(dev->irq, dev); |
@@ -1870,8 +1869,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) | |||
1870 | port_reset(mp); | 1869 | port_reset(mp); |
1871 | port_start(dev); | 1870 | port_start(dev); |
1872 | 1871 | ||
1873 | if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | 1872 | __txq_maybe_wake(mp->txq); |
1874 | netif_wake_queue(dev); | ||
1875 | } | 1873 | } |
1876 | 1874 | ||
1877 | static void mv643xx_eth_tx_timeout(struct net_device *dev) | 1875 | static void mv643xx_eth_tx_timeout(struct net_device *dev) |
@@ -2171,7 +2169,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2171 | 2169 | ||
2172 | /* set default config values */ | 2170 | /* set default config values */ |
2173 | uc_addr_get(mp, dev->dev_addr); | 2171 | uc_addr_get(mp, dev->dev_addr); |
2174 | mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; | ||
2175 | 2172 | ||
2176 | if (is_valid_ether_addr(pd->mac_addr)) | 2173 | if (is_valid_ether_addr(pd->mac_addr)) |
2177 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 2174 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
@@ -2183,12 +2180,13 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2183 | if (pd->rx_queue_size) | 2180 | if (pd->rx_queue_size) |
2184 | mp->default_rx_ring_size = pd->rx_queue_size; | 2181 | mp->default_rx_ring_size = pd->rx_queue_size; |
2185 | 2182 | ||
2183 | mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; | ||
2186 | if (pd->tx_queue_size) | 2184 | if (pd->tx_queue_size) |
2187 | mp->tx_ring_size = pd->tx_queue_size; | 2185 | mp->default_tx_ring_size = pd->tx_queue_size; |
2188 | 2186 | ||
2189 | if (pd->tx_sram_size) { | 2187 | if (pd->tx_sram_size) { |
2190 | mp->tx_sram_size = pd->tx_sram_size; | 2188 | mp->tx_desc_sram_size = pd->tx_sram_size; |
2191 | mp->tx_sram_addr = pd->tx_sram_addr; | 2189 | mp->tx_desc_sram_addr = pd->tx_sram_addr; |
2192 | } | 2190 | } |
2193 | 2191 | ||
2194 | if (pd->rx_sram_size) { | 2192 | if (pd->rx_sram_size) { |
@@ -2217,7 +2215,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2217 | phy_reset(mp); | 2215 | phy_reset(mp); |
2218 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); | 2216 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); |
2219 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); | 2217 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); |
2220 | mv643xx_eth_update_pscr(dev, &cmd); | 2218 | mv643xx_eth_update_pscr(mp, &cmd); |
2221 | mv643xx_eth_set_settings(dev, &cmd); | 2219 | mv643xx_eth_set_settings(dev, &cmd); |
2222 | 2220 | ||
2223 | SET_NETDEV_DEV(dev, &pdev->dev); | 2221 | SET_NETDEV_DEV(dev, &pdev->dev); |
@@ -2250,7 +2248,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2250 | printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name); | 2248 | printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name); |
2251 | #endif | 2249 | #endif |
2252 | 2250 | ||
2253 | if (mp->tx_sram_size > 0) | 2251 | if (mp->tx_desc_sram_size > 0) |
2254 | printk(KERN_NOTICE "%s: Using SRAM\n", dev->name); | 2252 | printk(KERN_NOTICE "%s: Using SRAM\n", dev->name); |
2255 | 2253 | ||
2256 | return 0; | 2254 | return 0; |