aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/brocade
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-15 19:06:40 -0400
committerEric W. Biederman <ebiederm@xmission.com>2014-03-25 00:18:55 -0400
commit27400df8e92b0e2934ef9de8eb7a08e7e490b784 (patch)
tree298894f5340213a9ca79354a282158b9daafb865 /drivers/net/ethernet/brocade
parent07641c8fa45774d5e99f4bdc8c37a7d174a2e973 (diff)
bnad: Call dev_kfree_skb_any instead of dev_kfree_skb.
Replace dev_kfree_skb with dev_kfree_skb_any in bnad_start_xmit that can be called in hard irq and other contexts. dev_kfree_skb_any is used as bnad_start_xmit only frees skbs when to drop them, normally transmitted packets are handled elsewhere. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'drivers/net/ethernet/brocade')
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index cb7625366ec2..a881e982a084 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2946,17 +2946,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2946 /* Sanity checks for the skb */ 2946 /* Sanity checks for the skb */
2947 2947
2948 if (unlikely(skb->len <= ETH_HLEN)) { 2948 if (unlikely(skb->len <= ETH_HLEN)) {
2949 dev_kfree_skb(skb); 2949 dev_kfree_skb_any(skb);
2950 BNAD_UPDATE_CTR(bnad, tx_skb_too_short); 2950 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2951 return NETDEV_TX_OK; 2951 return NETDEV_TX_OK;
2952 } 2952 }
2953 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { 2953 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2954 dev_kfree_skb(skb); 2954 dev_kfree_skb_any(skb);
2955 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2955 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2956 return NETDEV_TX_OK; 2956 return NETDEV_TX_OK;
2957 } 2957 }
2958 if (unlikely(len == 0)) { 2958 if (unlikely(len == 0)) {
2959 dev_kfree_skb(skb); 2959 dev_kfree_skb_any(skb);
2960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2961 return NETDEV_TX_OK; 2961 return NETDEV_TX_OK;
2962 } 2962 }
@@ -2968,7 +2968,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2968 * and the netif_tx_stop_all_queues() call. 2968 * and the netif_tx_stop_all_queues() call.
2969 */ 2969 */
2970 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { 2970 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2971 dev_kfree_skb(skb); 2971 dev_kfree_skb_any(skb);
2972 BNAD_UPDATE_CTR(bnad, tx_skb_stopping); 2972 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2973 return NETDEV_TX_OK; 2973 return NETDEV_TX_OK;
2974 } 2974 }
@@ -2981,7 +2981,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2981 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ 2981 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2982 2982
2983 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { 2983 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2984 dev_kfree_skb(skb); 2984 dev_kfree_skb_any(skb);
2985 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); 2985 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2986 return NETDEV_TX_OK; 2986 return NETDEV_TX_OK;
2987 } 2987 }
@@ -3021,7 +3021,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3021 3021
3022 /* Program the opcode, flags, frame_len, num_vectors in WI */ 3022 /* Program the opcode, flags, frame_len, num_vectors in WI */
3023 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { 3023 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3024 dev_kfree_skb(skb); 3024 dev_kfree_skb_any(skb);
3025 return NETDEV_TX_OK; 3025 return NETDEV_TX_OK;
3026 } 3026 }
3027 txqent->hdr.wi.reserved = 0; 3027 txqent->hdr.wi.reserved = 0;
@@ -3047,7 +3047,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3047 /* Undo the changes starting at tcb->producer_index */ 3047 /* Undo the changes starting at tcb->producer_index */
3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, 3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3049 tcb->producer_index); 3049 tcb->producer_index);
3050 dev_kfree_skb(skb); 3050 dev_kfree_skb_any(skb);
3051 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); 3051 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3052 return NETDEV_TX_OK; 3052 return NETDEV_TX_OK;
3053 } 3053 }
@@ -3076,7 +3076,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3076 if (unlikely(len != skb->len)) { 3076 if (unlikely(len != skb->len)) {
3077 /* Undo the changes starting at tcb->producer_index */ 3077 /* Undo the changes starting at tcb->producer_index */
3078 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); 3078 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3079 dev_kfree_skb(skb); 3079 dev_kfree_skb_any(skb);
3080 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); 3080 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3081 return NETDEV_TX_OK; 3081 return NETDEV_TX_OK;
3082 } 3082 }