diff options
author | Lennert Buytenhek <buytenh@marvell.com> | 2008-11-20 06:58:09 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-20 06:58:09 -0500 |
commit | 66823b928d746df21485deeff6744c77702abf29 (patch) | |
tree | e6125581bc11445456236696900c2a4af767a87b /drivers/net/mv643xx_eth.c | |
parent | 37a6084f4b4693a408ac2fb229843af9f9f301ce (diff) |
mv643xx_eth: inline txq_alloc_desc_index()
Since txq_alloc_desc_index() is a very simple function, and since
descriptor ring index handling for transmit reclaim, receive
processing and receive refill is already handled inline as well,
inline txq_alloc_desc_index() into its two call sites.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 23 |
1 files changed, 6 insertions, 17 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 0f73d85a6e9d..3326587d8ce3 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -651,21 +651,6 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | |||
651 | return 0; | 651 | return 0; |
652 | } | 652 | } |
653 | 653 | ||
654 | static int txq_alloc_desc_index(struct tx_queue *txq) | ||
655 | { | ||
656 | int tx_desc_curr; | ||
657 | |||
658 | BUG_ON(txq->tx_desc_count >= txq->tx_ring_size); | ||
659 | |||
660 | tx_desc_curr = txq->tx_curr_desc++; | ||
661 | if (txq->tx_curr_desc == txq->tx_ring_size) | ||
662 | txq->tx_curr_desc = 0; | ||
663 | |||
664 | BUG_ON(txq->tx_curr_desc == txq->tx_used_desc); | ||
665 | |||
666 | return tx_desc_curr; | ||
667 | } | ||
668 | |||
669 | static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | 654 | static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) |
670 | { | 655 | { |
671 | int nr_frags = skb_shinfo(skb)->nr_frags; | 656 | int nr_frags = skb_shinfo(skb)->nr_frags; |
@@ -677,7 +662,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
677 | struct tx_desc *desc; | 662 | struct tx_desc *desc; |
678 | 663 | ||
679 | this_frag = &skb_shinfo(skb)->frags[frag]; | 664 | this_frag = &skb_shinfo(skb)->frags[frag]; |
680 | tx_index = txq_alloc_desc_index(txq); | 665 | tx_index = txq->tx_curr_desc++; |
666 | if (txq->tx_curr_desc == txq->tx_ring_size) | ||
667 | txq->tx_curr_desc = 0; | ||
681 | desc = &txq->tx_desc_area[tx_index]; | 668 | desc = &txq->tx_desc_area[tx_index]; |
682 | 669 | ||
683 | /* | 670 | /* |
@@ -759,7 +746,9 @@ no_csum: | |||
759 | cmd_sts |= 5 << TX_IHL_SHIFT; | 746 | cmd_sts |= 5 << TX_IHL_SHIFT; |
760 | } | 747 | } |
761 | 748 | ||
762 | tx_index = txq_alloc_desc_index(txq); | 749 | tx_index = txq->tx_curr_desc++; |
750 | if (txq->tx_curr_desc == txq->tx_ring_size) | ||
751 | txq->tx_curr_desc = 0; | ||
763 | desc = &txq->tx_desc_area[tx_index]; | 752 | desc = &txq->tx_desc_area[tx_index]; |
764 | 753 | ||
765 | if (nr_frags) { | 754 | if (nr_frags) { |