diff options
author | Eran Liberty <liberty@extricom.com> | 2010-07-07 18:54:54 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-07-07 18:54:54 -0400 |
commit | acbc0f039ff4b93da737c91937b7c70018ded39f (patch) | |
tree | 8139b01aaaaf6301f246b0c3b468ad8d4e8f55df /drivers/net/gianfar.c | |
parent | f29a3d040727a80c3307a2bea057206be049c305 (diff) |
gianfar: code cleanup
This patch relates to "[PATCH] gainfar.c : skb_over_panic
(kernel-2.6.32.15)"
While in 2.6.32.15 it actually fixed a bug here it merely cleans up
the previous attempts to fix the bug with a more coherent code.
Currently before queuing skb into the rx_recycle it is
"un-skb_reserve"-ed so when taken out in gfar_new_skb() it wont be
reserved twice.
This patch makes sure the alignment skb_reserve is done once, upon
allocating the skb and not when taken out of the rx_recycle
pool. Eliminating the need to undo anything before queue skb back to
the pool.
NOTE: This patch will compile and is fairly straight forward but I do
not have environment to test it as I did with the 2.6.32.15 fix.
Signed-off-by: Eran Liberty <liberty@extricom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 54 |
1 files changed, 27 insertions, 27 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index fccb7a371cc8..746a776a1653 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -2420,6 +2420,15 @@ static void gfar_timeout(struct net_device *dev) | |||
2420 | schedule_work(&priv->reset_task); | 2420 | schedule_work(&priv->reset_task); |
2421 | } | 2421 | } |
2422 | 2422 | ||
2423 | static void gfar_align_skb(struct sk_buff *skb) | ||
2424 | { | ||
2425 | /* We need the data buffer to be aligned properly. We will reserve | ||
2426 | * as many bytes as needed to align the data properly | ||
2427 | */ | ||
2428 | skb_reserve(skb, RXBUF_ALIGNMENT - | ||
2429 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); | ||
2430 | } | ||
2431 | |||
2423 | /* Interrupt Handler for Transmit complete */ | 2432 | /* Interrupt Handler for Transmit complete */ |
2424 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | 2433 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
2425 | { | 2434 | { |
@@ -2504,9 +2513,10 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2504 | */ | 2513 | */ |
2505 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && | 2514 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
2506 | skb_recycle_check(skb, priv->rx_buffer_size + | 2515 | skb_recycle_check(skb, priv->rx_buffer_size + |
2507 | RXBUF_ALIGNMENT)) | 2516 | RXBUF_ALIGNMENT)) { |
2517 | gfar_align_skb(skb); | ||
2508 | __skb_queue_head(&priv->rx_recycle, skb); | 2518 | __skb_queue_head(&priv->rx_recycle, skb); |
2509 | else | 2519 | } else |
2510 | dev_kfree_skb_any(skb); | 2520 | dev_kfree_skb_any(skb); |
2511 | 2521 | ||
2512 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; | 2522 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
@@ -2569,29 +2579,28 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | |||
2569 | gfar_init_rxbdp(rx_queue, bdp, buf); | 2579 | gfar_init_rxbdp(rx_queue, bdp, buf); |
2570 | } | 2580 | } |
2571 | 2581 | ||
2572 | 2582 | static struct sk_buff * gfar_alloc_skb(struct net_device *dev) | |
2573 | struct sk_buff * gfar_new_skb(struct net_device *dev) | ||
2574 | { | 2583 | { |
2575 | unsigned int alignamount; | ||
2576 | struct gfar_private *priv = netdev_priv(dev); | 2584 | struct gfar_private *priv = netdev_priv(dev); |
2577 | struct sk_buff *skb = NULL; | 2585 | struct sk_buff *skb = NULL; |
2578 | 2586 | ||
2579 | skb = __skb_dequeue(&priv->rx_recycle); | 2587 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
2580 | if (!skb) | ||
2581 | skb = netdev_alloc_skb(dev, | ||
2582 | priv->rx_buffer_size + RXBUF_ALIGNMENT); | ||
2583 | |||
2584 | if (!skb) | 2588 | if (!skb) |
2585 | return NULL; | 2589 | return NULL; |
2586 | 2590 | ||
2587 | alignamount = RXBUF_ALIGNMENT - | 2591 | gfar_align_skb(skb); |
2588 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); | ||
2589 | 2592 | ||
2590 | /* We need the data buffer to be aligned properly. We will reserve | 2593 | return skb; |
2591 | * as many bytes as needed to align the data properly | 2594 | } |
2592 | */ | 2595 | |
2593 | skb_reserve(skb, alignamount); | 2596 | struct sk_buff * gfar_new_skb(struct net_device *dev) |
2594 | GFAR_CB(skb)->alignamount = alignamount; | 2597 | { |
2598 | struct gfar_private *priv = netdev_priv(dev); | ||
2599 | struct sk_buff *skb = NULL; | ||
2600 | |||
2601 | skb = __skb_dequeue(&priv->rx_recycle); | ||
2602 | if (!skb) | ||
2603 | skb = gfar_alloc_skb(dev); | ||
2595 | 2604 | ||
2596 | return skb; | 2605 | return skb; |
2597 | } | 2606 | } |
@@ -2744,17 +2753,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
2744 | 2753 | ||
2745 | if (unlikely(!newskb)) | 2754 | if (unlikely(!newskb)) |
2746 | newskb = skb; | 2755 | newskb = skb; |
2747 | else if (skb) { | 2756 | else if (skb) |
2748 | /* | ||
2749 | * We need to un-reserve() the skb to what it | ||
2750 | * was before gfar_new_skb() re-aligned | ||
2751 | * it to an RXBUF_ALIGNMENT boundary | ||
2752 | * before we put the skb back on the | ||
2753 | * recycle list. | ||
2754 | */ | ||
2755 | skb_reserve(skb, -GFAR_CB(skb)->alignamount); | ||
2756 | __skb_queue_head(&priv->rx_recycle, skb); | 2757 | __skb_queue_head(&priv->rx_recycle, skb); |
2757 | } | ||
2758 | } else { | 2758 | } else { |
2759 | /* Increment the number of packets */ | 2759 | /* Increment the number of packets */ |
2760 | rx_queue->stats.rx_packets++; | 2760 | rx_queue->stats.rx_packets++; |