aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2009-05-05 23:01:22 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-06 18:33:39 -0400
commit7fd96ce47ff83fc17ab78d465d8e067467a7f51e (patch)
tree8267f14b3d6fe21feafe6b8dfd5768fc6010f32f /drivers/net/mv643xx_eth.c
parentbecfad979d1875aca15ef2a1eda68782e7ac7769 (diff)
mv643xx_eth: rework receive skb cache alignment
On the platforms that mv643xx_eth is used on, the manual skb->data alignment logic in mv643xx_eth can be simplified, as the only case we need to handle is where NET_SKB_PAD is not a multiple of the cache line size. If this is the case, the extra padding we need can be computed at compile time, while if NET_SKB_PAD _is_ a multiple of the cache line size, the code can be optimised out entirely. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index cc16f3e4d89c..05bb1c55da66 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -174,6 +174,7 @@ static char mv643xx_eth_driver_version[] = "1.4";
174 */ 174 */
175#define DEFAULT_RX_QUEUE_SIZE 128 175#define DEFAULT_RX_QUEUE_SIZE 128
176#define DEFAULT_TX_QUEUE_SIZE 256 176#define DEFAULT_TX_QUEUE_SIZE 256
177#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
177 178
178 179
179/* 180/*
@@ -649,23 +650,20 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
649 refilled = 0; 650 refilled = 0;
650 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 651 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
651 struct sk_buff *skb; 652 struct sk_buff *skb;
652 int unaligned;
653 int rx; 653 int rx;
654 struct rx_desc *rx_desc; 654 struct rx_desc *rx_desc;
655 655
656 skb = __skb_dequeue(&mp->rx_recycle); 656 skb = __skb_dequeue(&mp->rx_recycle);
657 if (skb == NULL) 657 if (skb == NULL)
658 skb = dev_alloc_skb(mp->skb_size + 658 skb = dev_alloc_skb(mp->skb_size);
659 dma_get_cache_alignment() - 1);
660 659
661 if (skb == NULL) { 660 if (skb == NULL) {
662 mp->oom = 1; 661 mp->oom = 1;
663 goto oom; 662 goto oom;
664 } 663 }
665 664
666 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); 665 if (SKB_DMA_REALIGN)
667 if (unaligned) 666 skb_reserve(skb, SKB_DMA_REALIGN);
668 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
669 667
670 refilled++; 668 refilled++;
671 rxq->rx_desc_count++; 669 rxq->rx_desc_count++;
@@ -964,8 +962,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
964 if (skb != NULL) { 962 if (skb != NULL) {
965 if (skb_queue_len(&mp->rx_recycle) < 963 if (skb_queue_len(&mp->rx_recycle) <
966 mp->rx_ring_size && 964 mp->rx_ring_size &&
967 skb_recycle_check(skb, mp->skb_size + 965 skb_recycle_check(skb, mp->skb_size))
968 dma_get_cache_alignment() - 1))
969 __skb_queue_head(&mp->rx_recycle, skb); 966 __skb_queue_head(&mp->rx_recycle, skb);
970 else 967 else
971 dev_kfree_skb(skb); 968 dev_kfree_skb(skb);
@@ -2336,6 +2333,14 @@ static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2336 * size field are ignored by the hardware. 2333 * size field are ignored by the hardware.
2337 */ 2334 */
2338 mp->skb_size = (skb_size + 7) & ~7; 2335 mp->skb_size = (skb_size + 7) & ~7;
2336
2337 /*
2338 * If NET_SKB_PAD is smaller than a cache line,
2339 * netdev_alloc_skb() will cause skb->data to be misaligned
2340 * to a cache line boundary. If this is the case, include
2341 * some extra space to allow re-aligning the data area.
2342 */
2343 mp->skb_size += SKB_DMA_REALIGN;
2339} 2344}
2340 2345
2341static int mv643xx_eth_open(struct net_device *dev) 2346static int mv643xx_eth_open(struct net_device *dev)