aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-15 20:54:27 -0400
committerEric W. Biederman <ebiederm@xmission.com>2014-03-25 00:19:08 -0400
commit1616566c4fa590d4b2011458408a8b39cd46bcc1 (patch)
tree9ac2e16ab9a651f4c03120232a1f08332dfcc4b6
parent6956d73aaf28d1449bc1222a0b2e997273cbf520 (diff)
forcedeth: Call dev_kfree_skb_any instead of kfree_skb.
Replace kfree_skb with dev_kfree_skb_any in functions that can be called in hard irq and other contexts. Every location changes is a drop making dev_kfree_skby_any appropriate. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 811be0bccd14..fddb464aeab3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2231 if (pci_dma_mapping_error(np->pci_dev, 2231 if (pci_dma_mapping_error(np->pci_dev,
2232 np->put_tx_ctx->dma)) { 2232 np->put_tx_ctx->dma)) {
2233 /* on DMA mapping error - drop the packet */ 2233 /* on DMA mapping error - drop the packet */
2234 kfree_skb(skb); 2234 dev_kfree_skb_any(skb);
2235 u64_stats_update_begin(&np->swstats_tx_syncp); 2235 u64_stats_update_begin(&np->swstats_tx_syncp);
2236 np->stat_tx_dropped++; 2236 np->stat_tx_dropped++;
2237 u64_stats_update_end(&np->swstats_tx_syncp); 2237 u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2278 tmp_tx_ctx = np->first_tx_ctx; 2278 tmp_tx_ctx = np->first_tx_ctx;
2279 } while (tmp_tx_ctx != np->put_tx_ctx); 2279 } while (tmp_tx_ctx != np->put_tx_ctx);
2280 kfree_skb(skb); 2280 dev_kfree_skb_any(skb);
2281 np->put_tx_ctx = start_tx_ctx; 2281 np->put_tx_ctx = start_tx_ctx;
2282 u64_stats_update_begin(&np->swstats_tx_syncp); 2282 u64_stats_update_begin(&np->swstats_tx_syncp);
2283 np->stat_tx_dropped++; 2283 np->stat_tx_dropped++;
@@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2380 if (pci_dma_mapping_error(np->pci_dev, 2380 if (pci_dma_mapping_error(np->pci_dev,
2381 np->put_tx_ctx->dma)) { 2381 np->put_tx_ctx->dma)) {
2382 /* on DMA mapping error - drop the packet */ 2382 /* on DMA mapping error - drop the packet */
2383 kfree_skb(skb); 2383 dev_kfree_skb_any(skb);
2384 u64_stats_update_begin(&np->swstats_tx_syncp); 2384 u64_stats_update_begin(&np->swstats_tx_syncp);
2385 np->stat_tx_dropped++; 2385 np->stat_tx_dropped++;
2386 u64_stats_update_end(&np->swstats_tx_syncp); 2386 u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2428 tmp_tx_ctx = np->first_tx_ctx; 2428 tmp_tx_ctx = np->first_tx_ctx;
2429 } while (tmp_tx_ctx != np->put_tx_ctx); 2429 } while (tmp_tx_ctx != np->put_tx_ctx);
2430 kfree_skb(skb); 2430 dev_kfree_skb_any(skb);
2431 np->put_tx_ctx = start_tx_ctx; 2431 np->put_tx_ctx = start_tx_ctx;
2432 u64_stats_update_begin(&np->swstats_tx_syncp); 2432 u64_stats_update_begin(&np->swstats_tx_syncp);
2433 np->stat_tx_dropped++; 2433 np->stat_tx_dropped++;