aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bonding/bond_main.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e717db301d46..cbadd6dccb2b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2957,7 +2957,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
2957 fk->ports = 0; 2957 fk->ports = 0;
2958 noff = skb_network_offset(skb); 2958 noff = skb_network_offset(skb);
2959 if (skb->protocol == htons(ETH_P_IP)) { 2959 if (skb->protocol == htons(ETH_P_IP)) {
2960 if (!pskb_may_pull(skb, noff + sizeof(*iph))) 2960 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
2961 return false; 2961 return false;
2962 iph = ip_hdr(skb); 2962 iph = ip_hdr(skb);
2963 fk->src = iph->saddr; 2963 fk->src = iph->saddr;
@@ -2966,7 +2966,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
2966 if (!ip_is_fragment(iph)) 2966 if (!ip_is_fragment(iph))
2967 proto = iph->protocol; 2967 proto = iph->protocol;
2968 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2968 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2969 if (!pskb_may_pull(skb, noff + sizeof(*iph6))) 2969 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
2970 return false; 2970 return false;
2971 iph6 = ipv6_hdr(skb); 2971 iph6 = ipv6_hdr(skb);
2972 fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr); 2972 fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
@@ -3656,8 +3656,8 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3656 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 3656 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3657 3657
3658 if (!skb2) { 3658 if (!skb2) {
3659 pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n", 3659 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
3660 bond_dev->name); 3660 bond_dev->name, __func__);
3661 continue; 3661 continue;
3662 } 3662 }
3663 /* bond_dev_queue_xmit always returns 0 */ 3663 /* bond_dev_queue_xmit always returns 0 */
@@ -3768,7 +3768,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
3768 * If we risk deadlock from transmitting this in the 3768 * If we risk deadlock from transmitting this in the
3769 * netpoll path, tell netpoll to queue the frame for later tx 3769 * netpoll path, tell netpoll to queue the frame for later tx
3770 */ 3770 */
3771 if (is_netpoll_tx_blocked(dev)) 3771 if (unlikely(is_netpoll_tx_blocked(dev)))
3772 return NETDEV_TX_BUSY; 3772 return NETDEV_TX_BUSY;
3773 3773
3774 rcu_read_lock(); 3774 rcu_read_lock();