diff options
author | Daniel Hellstrom <daniel@gaisler.com> | 2011-01-13 22:02:39 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-14 15:45:53 -0500 |
commit | 2a2bc012b98729ce9a39386faed28d11ee021683 (patch) | |
tree | 0f7de3c1caf167444abfe9f6f449759f94a094d4 /drivers/net/greth.c | |
parent | bbe9e637330abe55442aebe799425e224086959f (diff) |
GRETH: GBit transmit descriptor handling optimization
It is safe to enable all fragments before enabling the first descriptor,
this way all descriptors don't have to be processed twice, added extra
memory barrier.
Signed-off-by: Daniel Hellstrom <daniel@gaisler.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/greth.c')
-rw-r--r-- | drivers/net/greth.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/drivers/net/greth.c b/drivers/net/greth.c index b307696e0f63..869e38d6f41b 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c | |||
@@ -503,7 +503,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
503 | greth->tx_skbuff[curr_tx] = NULL; | 503 | greth->tx_skbuff[curr_tx] = NULL; |
504 | bdp = greth->tx_bd_base + curr_tx; | 504 | bdp = greth->tx_bd_base + curr_tx; |
505 | 505 | ||
506 | status = GRETH_TXBD_CSALL; | 506 | status = GRETH_TXBD_CSALL | GRETH_BD_EN; |
507 | status |= frag->size & GRETH_BD_LEN; | 507 | status |= frag->size & GRETH_BD_LEN; |
508 | 508 | ||
509 | /* Wrap around descriptor ring */ | 509 | /* Wrap around descriptor ring */ |
@@ -540,26 +540,27 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
540 | 540 | ||
541 | wmb(); | 541 | wmb(); |
542 | 542 | ||
543 | /* Enable the descriptors that we configured ... */ | 543 | /* Enable the descriptor chain by enabling the first descriptor */ |
544 | for (i = 0; i < nr_frags + 1; i++) { | 544 | bdp = greth->tx_bd_base + greth->tx_next; |
545 | bdp = greth->tx_bd_base + greth->tx_next; | 545 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
546 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); | 546 | greth->tx_next = curr_tx; |
547 | greth->tx_next = NEXT_TX(greth->tx_next); | 547 | greth->tx_free -= nr_frags + 1; |
548 | greth->tx_free--; | 548 | |
549 | } | 549 | wmb(); |
550 | 550 | ||
551 | greth_enable_tx(greth); | 551 | greth_enable_tx(greth); |
552 | 552 | ||
553 | return NETDEV_TX_OK; | 553 | return NETDEV_TX_OK; |
554 | 554 | ||
555 | frag_map_error: | 555 | frag_map_error: |
556 | /* Unmap SKB mappings that succeeded */ | 556 | /* Unmap SKB mappings that succeeded and disable descriptor */ |
557 | for (i = 0; greth->tx_next + i != curr_tx; i++) { | 557 | for (i = 0; greth->tx_next + i != curr_tx; i++) { |
558 | bdp = greth->tx_bd_base + greth->tx_next + i; | 558 | bdp = greth->tx_bd_base + greth->tx_next + i; |
559 | dma_unmap_single(greth->dev, | 559 | dma_unmap_single(greth->dev, |
560 | greth_read_bd(&bdp->addr), | 560 | greth_read_bd(&bdp->addr), |
561 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, | 561 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, |
562 | DMA_TO_DEVICE); | 562 | DMA_TO_DEVICE); |
563 | greth_write_bd(&bdp->stat, 0); | ||
563 | } | 564 | } |
564 | map_error: | 565 | map_error: |
565 | if (net_ratelimit()) | 566 | if (net_ratelimit()) |