diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-08-23 17:45:28 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@marvell.com> | 2008-09-05 00:33:57 -0400 |
commit | 9da7874575468ad3b126d1b9197b6ae387950bb4 (patch) | |
tree | 9fd86fe5907a59cd77ea445c7d1d9a5fb1f1f4b5 /drivers/net/mv643xx_eth.c | |
parent | 2a1867a76fc13499521af1f0dbcf08ddb3ef78ba (diff) |
mv643xx_eth: get rid of modulo operations
Get rid of the modulo operations that are currently used for
computing successive TX/RX descriptor ring indexes.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 32 |
1 files changed, 24 insertions, 8 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 8c812c3d1b7d..2f6cec4e8499 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -497,8 +497,10 @@ static void rxq_refill(struct rx_queue *rxq) | |||
497 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); | 497 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); |
498 | 498 | ||
499 | rxq->rx_desc_count++; | 499 | rxq->rx_desc_count++; |
500 | rx = rxq->rx_used_desc; | 500 | |
501 | rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size; | 501 | rx = rxq->rx_used_desc++; |
502 | if (rxq->rx_used_desc == rxq->rx_ring_size) | ||
503 | rxq->rx_used_desc = 0; | ||
502 | 504 | ||
503 | rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, | 505 | rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, |
504 | skb_size, DMA_FROM_DEVICE); | 506 | skb_size, DMA_FROM_DEVICE); |
@@ -555,7 +557,9 @@ static int rxq_process(struct rx_queue *rxq, int budget) | |||
555 | skb = rxq->rx_skb[rxq->rx_curr_desc]; | 557 | skb = rxq->rx_skb[rxq->rx_curr_desc]; |
556 | rxq->rx_skb[rxq->rx_curr_desc] = NULL; | 558 | rxq->rx_skb[rxq->rx_curr_desc] = NULL; |
557 | 559 | ||
558 | rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size; | 560 | rxq->rx_curr_desc++; |
561 | if (rxq->rx_curr_desc == rxq->rx_ring_size) | ||
562 | rxq->rx_curr_desc = 0; | ||
559 | 563 | ||
560 | spin_unlock_irqrestore(&mp->lock, flags); | 564 | spin_unlock_irqrestore(&mp->lock, flags); |
561 | 565 | ||
@@ -684,8 +688,9 @@ static int txq_alloc_desc_index(struct tx_queue *txq) | |||
684 | 688 | ||
685 | BUG_ON(txq->tx_desc_count >= txq->tx_ring_size); | 689 | BUG_ON(txq->tx_desc_count >= txq->tx_ring_size); |
686 | 690 | ||
687 | tx_desc_curr = txq->tx_curr_desc; | 691 | tx_desc_curr = txq->tx_curr_desc++; |
688 | txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size; | 692 | if (txq->tx_curr_desc == txq->tx_ring_size) |
693 | txq->tx_curr_desc = 0; | ||
689 | 694 | ||
690 | BUG_ON(txq->tx_curr_desc == txq->tx_used_desc); | 695 | BUG_ON(txq->tx_curr_desc == txq->tx_used_desc); |
691 | 696 | ||
@@ -1515,7 +1520,12 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) | |||
1515 | 1520 | ||
1516 | rx_desc = (struct rx_desc *)rxq->rx_desc_area; | 1521 | rx_desc = (struct rx_desc *)rxq->rx_desc_area; |
1517 | for (i = 0; i < rxq->rx_ring_size; i++) { | 1522 | for (i = 0; i < rxq->rx_ring_size; i++) { |
1518 | int nexti = (i + 1) % rxq->rx_ring_size; | 1523 | int nexti; |
1524 | |||
1525 | nexti = i + 1; | ||
1526 | if (nexti == rxq->rx_ring_size) | ||
1527 | nexti = 0; | ||
1528 | |||
1519 | rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + | 1529 | rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + |
1520 | nexti * sizeof(struct rx_desc); | 1530 | nexti * sizeof(struct rx_desc); |
1521 | } | 1531 | } |
@@ -1617,7 +1627,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
1617 | tx_desc = (struct tx_desc *)txq->tx_desc_area; | 1627 | tx_desc = (struct tx_desc *)txq->tx_desc_area; |
1618 | for (i = 0; i < txq->tx_ring_size; i++) { | 1628 | for (i = 0; i < txq->tx_ring_size; i++) { |
1619 | struct tx_desc *txd = tx_desc + i; | 1629 | struct tx_desc *txd = tx_desc + i; |
1620 | int nexti = (i + 1) % txq->tx_ring_size; | 1630 | int nexti; |
1631 | |||
1632 | nexti = i + 1; | ||
1633 | if (nexti == txq->tx_ring_size) | ||
1634 | nexti = 0; | ||
1621 | 1635 | ||
1622 | txd->cmd_sts = 0; | 1636 | txd->cmd_sts = 0; |
1623 | txd->next_desc_ptr = txq->tx_desc_dma + | 1637 | txd->next_desc_ptr = txq->tx_desc_dma + |
@@ -1663,7 +1677,9 @@ static void txq_reclaim(struct tx_queue *txq, int force) | |||
1663 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; | 1677 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; |
1664 | } | 1678 | } |
1665 | 1679 | ||
1666 | txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; | 1680 | txq->tx_used_desc = tx_index + 1; |
1681 | if (txq->tx_used_desc == txq->tx_ring_size) | ||
1682 | txq->tx_used_desc = 0; | ||
1667 | txq->tx_desc_count--; | 1683 | txq->tx_desc_count--; |
1668 | 1684 | ||
1669 | addr = desc->buf_ptr; | 1685 | addr = desc->buf_ptr; |