diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-01 04:31:56 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-12 02:40:32 -0400 |
commit | 9658766152c0e3759f61cc4e75aef6d8450bd5bb (patch) | |
tree | 7ea31a36fa86180d48bb55b105a1972de939cca0 /drivers/net/mv643xx_eth.c | |
parent | 5daffe945a60376b626896c371ef78f24698b988 (diff) |
mv643xx_eth: move port_receive() into its only caller
The port_receive() function is a remnant of the original mv643xx_eth
HAL split. This patch moves port_receive() into its caller, so that
the top and the bottom half of RX processing no longer communicate
via the HAL FUNC_RET_STATUS/pkt_info mechanism abstraction anymore.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 91 |
1 files changed, 32 insertions, 59 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index ac7fc7a53678..58c1c1b0aa54 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -518,64 +518,38 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) | |||
518 | mv643xx_eth_rx_refill_descs((struct net_device *)data); | 518 | mv643xx_eth_rx_refill_descs((struct net_device *)data); |
519 | } | 519 | } |
520 | 520 | ||
521 | static FUNC_RET_STATUS port_receive(struct mv643xx_eth_private *mp, | 521 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) |
522 | struct pkt_info *pkt_info) | ||
523 | { | 522 | { |
524 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; | 523 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
525 | volatile struct rx_desc *rx_desc; | 524 | struct net_device_stats *stats = &dev->stats; |
526 | unsigned int command_status; | 525 | unsigned int received_packets = 0; |
527 | unsigned long flags; | ||
528 | |||
529 | spin_lock_irqsave(&mp->lock, flags); | ||
530 | |||
531 | /* Get the Rx Desc ring 'curr and 'used' indexes */ | ||
532 | rx_curr_desc = mp->rx_curr_desc; | ||
533 | rx_used_desc = mp->rx_used_desc; | ||
534 | |||
535 | rx_desc = &mp->rx_desc_area[rx_curr_desc]; | ||
536 | |||
537 | /* The following parameters are used to save readings from memory */ | ||
538 | command_status = rx_desc->cmd_sts; | ||
539 | rmb(); | ||
540 | 526 | ||
541 | /* Nothing to receive... */ | 527 | while (budget-- > 0) { |
542 | if (command_status & BUFFER_OWNED_BY_DMA) { | 528 | struct sk_buff *skb; |
543 | spin_unlock_irqrestore(&mp->lock, flags); | 529 | volatile struct rx_desc *rx_desc; |
544 | return ETH_END_OF_JOB; | 530 | unsigned int cmd_sts; |
545 | } | 531 | unsigned long flags; |
546 | 532 | ||
547 | pkt_info->byte_cnt = rx_desc->byte_cnt - ETH_HW_IP_ALIGN; | 533 | spin_lock_irqsave(&mp->lock, flags); |
548 | pkt_info->cmd_sts = command_status; | ||
549 | pkt_info->buf_ptr = rx_desc->buf_ptr + ETH_HW_IP_ALIGN; | ||
550 | pkt_info->return_info = mp->rx_skb[rx_curr_desc]; | ||
551 | pkt_info->l4i_chk = rx_desc->buf_size; | ||
552 | 534 | ||
553 | /* | 535 | rx_desc = &mp->rx_desc_area[mp->rx_curr_desc]; |
554 | * Clean the return info field to indicate that the | ||
555 | * packet has been moved to the upper layers | ||
556 | */ | ||
557 | mp->rx_skb[rx_curr_desc] = NULL; | ||
558 | 536 | ||
559 | /* Update current index in data structure */ | 537 | cmd_sts = rx_desc->cmd_sts; |
560 | rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size; | 538 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
561 | mp->rx_curr_desc = rx_next_curr_desc; | 539 | spin_unlock_irqrestore(&mp->lock, flags); |
540 | break; | ||
541 | } | ||
542 | rmb(); | ||
562 | 543 | ||
563 | spin_unlock_irqrestore(&mp->lock, flags); | 544 | skb = mp->rx_skb[mp->rx_curr_desc]; |
545 | mp->rx_skb[mp->rx_curr_desc] = NULL; | ||
564 | 546 | ||
565 | return ETH_OK; | 547 | mp->rx_curr_desc = (mp->rx_curr_desc + 1) % mp->rx_ring_size; |
566 | } | ||
567 | 548 | ||
568 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | 549 | spin_unlock_irqrestore(&mp->lock, flags); |
569 | { | ||
570 | struct mv643xx_eth_private *mp = netdev_priv(dev); | ||
571 | struct net_device_stats *stats = &dev->stats; | ||
572 | unsigned int received_packets = 0; | ||
573 | struct sk_buff *skb; | ||
574 | struct pkt_info pkt_info; | ||
575 | 550 | ||
576 | while (budget-- > 0 && port_receive(mp, &pkt_info) == ETH_OK) { | 551 | dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN, |
577 | dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE, | 552 | ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); |
578 | DMA_FROM_DEVICE); | ||
579 | mp->rx_desc_count--; | 553 | mp->rx_desc_count--; |
580 | received_packets++; | 554 | received_packets++; |
581 | 555 | ||
@@ -584,18 +558,17 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
584 | * Note byte count includes 4 byte CRC count | 558 | * Note byte count includes 4 byte CRC count |
585 | */ | 559 | */ |
586 | stats->rx_packets++; | 560 | stats->rx_packets++; |
587 | stats->rx_bytes += pkt_info.byte_cnt; | 561 | stats->rx_bytes += rx_desc->byte_cnt - ETH_HW_IP_ALIGN; |
588 | skb = pkt_info.return_info; | 562 | |
589 | /* | 563 | /* |
590 | * In case received a packet without first / last bits on OR | 564 | * In case received a packet without first / last bits on OR |
591 | * the error summary bit is on, the packets needs to be dropeed. | 565 | * the error summary bit is on, the packets needs to be dropeed. |
592 | */ | 566 | */ |
593 | if (((pkt_info.cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != | 567 | if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != |
594 | (RX_FIRST_DESC | RX_LAST_DESC)) | 568 | (RX_FIRST_DESC | RX_LAST_DESC)) |
595 | || (pkt_info.cmd_sts & ERROR_SUMMARY)) { | 569 | || (cmd_sts & ERROR_SUMMARY)) { |
596 | stats->rx_dropped++; | 570 | stats->rx_dropped++; |
597 | if ((pkt_info.cmd_sts & (RX_FIRST_DESC | | 571 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != |
598 | RX_LAST_DESC)) != | ||
599 | (RX_FIRST_DESC | RX_LAST_DESC)) { | 572 | (RX_FIRST_DESC | RX_LAST_DESC)) { |
600 | if (net_ratelimit()) | 573 | if (net_ratelimit()) |
601 | printk(KERN_ERR | 574 | printk(KERN_ERR |
@@ -603,7 +576,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
603 | "on multiple descriptors\n", | 576 | "on multiple descriptors\n", |
604 | dev->name); | 577 | dev->name); |
605 | } | 578 | } |
606 | if (pkt_info.cmd_sts & ERROR_SUMMARY) | 579 | if (cmd_sts & ERROR_SUMMARY) |
607 | stats->rx_errors++; | 580 | stats->rx_errors++; |
608 | 581 | ||
609 | dev_kfree_skb_irq(skb); | 582 | dev_kfree_skb_irq(skb); |
@@ -612,12 +585,12 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
612 | * The -4 is for the CRC in the trailer of the | 585 | * The -4 is for the CRC in the trailer of the |
613 | * received packet | 586 | * received packet |
614 | */ | 587 | */ |
615 | skb_put(skb, pkt_info.byte_cnt - 4); | 588 | skb_put(skb, rx_desc->byte_cnt - ETH_HW_IP_ALIGN - 4); |
616 | 589 | ||
617 | if (pkt_info.cmd_sts & LAYER_4_CHECKSUM_OK) { | 590 | if (cmd_sts & LAYER_4_CHECKSUM_OK) { |
618 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 591 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
619 | skb->csum = htons( | 592 | skb->csum = htons( |
620 | (pkt_info.cmd_sts & 0x0007fff8) >> 3); | 593 | (cmd_sts & 0x0007fff8) >> 3); |
621 | } | 594 | } |
622 | skb->protocol = eth_type_trans(skb, dev); | 595 | skb->protocol = eth_type_trans(skb, dev); |
623 | #ifdef MV643XX_ETH_NAPI | 596 | #ifdef MV643XX_ETH_NAPI |