aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-06-01 04:07:49 -0400
committerLennert Buytenhek <buytenh@wantstofly.org>2008-06-12 02:40:32 -0400
commitde34f225e112ddfabb56043a2be64bf7f69c1885 (patch)
treea9bccd4cd11cc3be0e0d074cea12bf1dc0f4f718 /drivers/net/mv643xx_eth.c
parent9658766152c0e3759f61cc4e75aef6d8450bd5bb (diff)
mv643xx_eth: move rx_return_buff() into its only caller
rx_return_buff() is also a remnant of the HAL layering that the original mv643xx_eth driver used. Moving it into its caller kills the last reference to FUNC_RET_STATUS/pkt_info. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c80
1 files changed, 28 insertions, 52 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 58c1c1b0aa54..0e4babbe37ae 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -443,74 +443,50 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp)
443/* rx ***********************************************************************/ 443/* rx ***********************************************************************/
444static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); 444static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev);
445 445
446static FUNC_RET_STATUS rx_return_buff(struct mv643xx_eth_private *mp, 446static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
447 struct pkt_info *pkt_info)
448{ 447{
449 int used_rx_desc; /* Where to return Rx resource */ 448 struct mv643xx_eth_private *mp = netdev_priv(dev);
450 volatile struct rx_desc *rx_desc;
451 unsigned long flags; 449 unsigned long flags;
452 450
453 spin_lock_irqsave(&mp->lock, flags); 451 spin_lock_irqsave(&mp->lock, flags);
454 452
455 /* Get 'used' Rx descriptor */
456 used_rx_desc = mp->rx_used_desc;
457 rx_desc = &mp->rx_desc_area[used_rx_desc];
458
459 rx_desc->buf_ptr = pkt_info->buf_ptr;
460 rx_desc->buf_size = pkt_info->byte_cnt;
461 mp->rx_skb[used_rx_desc] = pkt_info->return_info;
462
463 /* Flush the write pipe */
464
465 /* Return the descriptor to DMA ownership */
466 wmb();
467 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
468 wmb();
469
470 /* Move the used descriptor pointer to the next descriptor */
471 mp->rx_used_desc = (used_rx_desc + 1) % mp->rx_ring_size;
472
473 spin_unlock_irqrestore(&mp->lock, flags);
474
475 return ETH_OK;
476}
477
478static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
479{
480 struct mv643xx_eth_private *mp = netdev_priv(dev);
481 struct pkt_info pkt_info;
482 struct sk_buff *skb;
483 int unaligned;
484
485 while (mp->rx_desc_count < mp->rx_ring_size) { 453 while (mp->rx_desc_count < mp->rx_ring_size) {
454 struct sk_buff *skb;
455 int unaligned;
456 int rx;
457
486 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment()); 458 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
487 if (!skb) 459 if (skb == NULL)
488 break; 460 break;
489 mp->rx_desc_count++; 461
490 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); 462 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
491 if (unaligned) 463 if (unaligned)
492 skb_reserve(skb, dma_get_cache_alignment() - unaligned); 464 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
493 pkt_info.cmd_sts = RX_ENABLE_INTERRUPT; 465
494 pkt_info.byte_cnt = ETH_RX_SKB_SIZE; 466 mp->rx_desc_count++;
495 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, 467 rx = mp->rx_used_desc;
496 ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); 468 mp->rx_used_desc = (rx + 1) % mp->rx_ring_size;
497 pkt_info.return_info = skb; 469
498 if (rx_return_buff(mp, &pkt_info) != ETH_OK) { 470 mp->rx_desc_area[rx].buf_ptr = dma_map_single(NULL,
499 printk(KERN_ERR 471 skb->data,
500 "%s: Error allocating RX Ring\n", dev->name); 472 ETH_RX_SKB_SIZE,
501 break; 473 DMA_FROM_DEVICE);
502 } 474 mp->rx_desc_area[rx].buf_size = ETH_RX_SKB_SIZE;
475 mp->rx_skb[rx] = skb;
476 wmb();
477 mp->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
478 RX_ENABLE_INTERRUPT;
479 wmb();
480
503 skb_reserve(skb, ETH_HW_IP_ALIGN); 481 skb_reserve(skb, ETH_HW_IP_ALIGN);
504 } 482 }
505 /* 483
506 * If RX ring is empty of SKB, set a timer to try allocating
507 * again at a later time.
508 */
509 if (mp->rx_desc_count == 0) { 484 if (mp->rx_desc_count == 0) {
510 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); 485 mp->timeout.expires = jiffies + (HZ / 10);
511 mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */
512 add_timer(&mp->timeout); 486 add_timer(&mp->timeout);
513 } 487 }
488
489 spin_unlock_irqrestore(&mp->lock, flags);
514} 490}
515 491
516static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) 492static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)