aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-10 18:37:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-10 18:37:11 -0500
commit228fdc083b017eaf90e578fa86fb1ecfd5ffae87 (patch)
tree459719a7583fe6c756873ae7c0d17e498d118eb0
parente2bc44706faa1852471cc101f3c5cdd757dddcd0 (diff)
parentd6e9c89a8d3cf0a5184badbcd50169179af27721 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Famouse last words: "final pull request" :-) I'm sending this because Jason Wang's fixes are pretty important 1) Add missing per-cpu stats initialization to ip6_vti. Otherwise lockdep spits out a call trace. From Li RongQing. 2) Fix NULL oops in wireless hwsim, from Javier Lopez 3) TIPC deferred packet queue unlink must NULL out skb->next to avoid crashes. From Erik Hugne 4) Fix access to uninitialized buffer in nf_nat netfilter code, from Daniel Borkmann 5) Fix lifetime of ipv6 loopback and SIT tunnel addresses, otherwise they basically timeout immediately. From Hannes Frederic Sowa 6) Fix DMA unmapping of TSO packets in bnx2x driver, from Michal Schmidt 7) Do not allow L2 forwarding offload via macvtap device, the way things are now it will not end up being forwaded at all. From Jason Wang 8) Fix transmit queue selection via ndo_dfwd_start_xmit(), fixing things like applying NETIF_F_LLTX to the wrong device (!!) and eliding the proper transmit watchdog handling 9) qlcnic driver was not updating tx statistics at all, from Manish Chopra" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: qlcnic: Fix ethtool statistics length calculation qlcnic: Fix bug in TX statistics net: core: explicitly select a txq before doing l2 forwarding macvlan: forbid L2 fowarding offload for macvtap bnx2x: fix DMA unmapping of TSO split BDs ipv6: add link-local, sit and loopback address with INFINITY_LIFE_TIME bnx2x: prevent WARN during driver unload tipc: correctly unlink packets from deferred packet queue ipv6: pcpu_tstats.syncp should be initialised in ip6_vti.c netfilter: only warn once on wrong seqadj usage netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper NFC: Fix target mode p2p link establishment iwlwifi: add new devices for 7265 series mac80211: move "bufferable MMPDU" check to fix AP mode scan mac80211_hwsim: Fix NULL pointer dereference
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h44
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/tile/tilegx.c3
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c3
-rw-r--r--drivers/staging/bcm/Bcmnet.c3
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--include/linux/netdevice.h12
-rw-r--r--net/core/dev.c29
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ip6_vti.c6
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/tx.c23
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c2
-rw-r--r--net/netfilter/nf_nat_irc.c32
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/tipc/link.c1
34 files changed, 219 insertions, 126 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 398e299ee1bd..4b8c58b0ec24 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
3732} 3732}
3733 3733
3734 3734
3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3736 void *accel_priv)
3736{ 3737{
3737 /* 3738 /*
3738 * This helper function exists to help dev_pick_tx get the correct 3739 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2d5fce4c9751..ec6119089b82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
520#define BNX2X_FP_STATE_IDLE 0 520#define BNX2X_FP_STATE_IDLE 0
521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
523#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 523#define BNX2X_FP_STATE_DISABLED (1 << 2)
524#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 524#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
525#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
526#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
525#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 527#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
526#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 528#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
527#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 529#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
528 /* protect state */ 530 /* protect state */
529 spinlock_t lock; 531 spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
613{ 615{
614 bool rc = true; 616 bool rc = true;
615 617
616 spin_lock(&fp->lock); 618 spin_lock_bh(&fp->lock);
617 if (fp->state & BNX2X_FP_LOCKED) { 619 if (fp->state & BNX2X_FP_LOCKED) {
618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 620 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 621 fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
622 /* we don't care if someone yielded */ 624 /* we don't care if someone yielded */
623 fp->state = BNX2X_FP_STATE_NAPI; 625 fp->state = BNX2X_FP_STATE_NAPI;
624 } 626 }
625 spin_unlock(&fp->lock); 627 spin_unlock_bh(&fp->lock);
626 return rc; 628 return rc;
627} 629}
628 630
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
631{ 633{
632 bool rc = false; 634 bool rc = false;
633 635
634 spin_lock(&fp->lock); 636 spin_lock_bh(&fp->lock);
635 WARN_ON(fp->state & 637 WARN_ON(fp->state &
636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 638 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
637 639
638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 640 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
639 rc = true; 641 rc = true;
640 fp->state = BNX2X_FP_STATE_IDLE; 642
641 spin_unlock(&fp->lock); 643 /* state ==> idle, unless currently disabled */
644 fp->state &= BNX2X_FP_STATE_DISABLED;
645 spin_unlock_bh(&fp->lock);
642 return rc; 646 return rc;
643} 647}
644 648
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
669 673
670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 674 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
671 rc = true; 675 rc = true;
672 fp->state = BNX2X_FP_STATE_IDLE; 676
677 /* state ==> idle, unless currently disabled */
678 fp->state &= BNX2X_FP_STATE_DISABLED;
673 spin_unlock_bh(&fp->lock); 679 spin_unlock_bh(&fp->lock);
674 return rc; 680 return rc;
675} 681}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
677/* true if a socket is polling, even if it did not get the lock */ 683/* true if a socket is polling, even if it did not get the lock */
678static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 684static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
679{ 685{
680 WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 686 WARN_ON(!(fp->state & BNX2X_FP_OWNED));
681 return fp->state & BNX2X_FP_USER_PEND; 687 return fp->state & BNX2X_FP_USER_PEND;
682} 688}
689
690/* false if fp is currently owned */
691static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
692{
693 int rc = true;
694
695 spin_lock_bh(&fp->lock);
696 if (fp->state & BNX2X_FP_OWNED)
697 rc = false;
698 fp->state |= BNX2X_FP_STATE_DISABLED;
699 spin_unlock_bh(&fp->lock);
700
701 return rc;
702}
683#else 703#else
684static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 704static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
685{ 705{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
709{ 729{
710 return false; 730 return false;
711} 731}
732static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
733{
734 return true;
735}
712#endif /* CONFIG_NET_RX_BUSY_POLL */ 736#endif /* CONFIG_NET_RX_BUSY_POLL */
713 737
714/* Use 2500 as a mini-jumbo MTU for FCoE */ 738/* Use 2500 as a mini-jumbo MTU for FCoE */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..bf811565ee24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
160 struct sk_buff *skb = tx_buf->skb; 160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd; 162 int nbd;
163 u16 split_bd_len = 0;
163 164
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end); 166 prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb); 169 txdata->txq_index, idx, tx_buf, skb);
169 170
170 /* unmap first bd */
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
174 172
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 173 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR 174#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
188 --nbd; 186 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190 188
191 /* ...and the TSO split header bd since they have no mapping */ 189 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
191 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
192 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
193 --nbd; 193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 } 195 }
196 196
197 /* unmap first bd */
198 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
199 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
200 DMA_TO_DEVICE);
201
197 /* now free frags */ 202 /* now free frags */
198 while (nbd > 0) { 203 while (nbd > 0) {
199 204
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1790{ 1795{
1791 int i; 1796 int i;
1792 1797
1793 local_bh_disable();
1794 for_each_rx_queue_cnic(bp, i) { 1798 for_each_rx_queue_cnic(bp, i) {
1795 napi_disable(&bnx2x_fp(bp, i, napi)); 1799 napi_disable(&bnx2x_fp(bp, i, napi));
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1800 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1797 mdelay(1); 1801 usleep_range(1000, 2000);
1798 } 1802 }
1799 local_bh_enable();
1800} 1803}
1801 1804
1802static void bnx2x_napi_disable(struct bnx2x *bp) 1805static void bnx2x_napi_disable(struct bnx2x *bp)
1803{ 1806{
1804 int i; 1807 int i;
1805 1808
1806 local_bh_disable();
1807 for_each_eth_queue(bp, i) { 1809 for_each_eth_queue(bp, i) {
1808 napi_disable(&bnx2x_fp(bp, i, napi)); 1810 napi_disable(&bnx2x_fp(bp, i, napi));
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1811 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1810 mdelay(1); 1812 usleep_range(1000, 2000);
1811 } 1813 }
1812 local_bh_enable();
1813} 1814}
1814 1815
1815void bnx2x_netif_start(struct bnx2x *bp) 1816void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1832 bnx2x_napi_disable_cnic(bp); 1833 bnx2x_napi_disable_cnic(bp);
1833} 1834}
1834 1835
1835u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1836u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1837 void *accel_priv)
1836{ 1838{
1837 struct bnx2x *bp = netdev_priv(dev); 1839 struct bnx2x *bp = netdev_priv(dev);
1838 1840
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa74495..41f3ca5ad972 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
525 525
526/* select_queue callback */ 526/* select_queue callback */
527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
528 void *accel_priv);
528 529
529static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
530 struct bnx2x_fastpath *fp, 531 struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc06854296a3..5bcc870f8367 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6827 return __ixgbe_maybe_stop_tx(tx_ring, size);
6828} 6828}
6829 6829
6830#ifdef IXGBE_FCOE 6830static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6831static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6831 void *accel_priv)
6832{ 6832{
6833 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
6834#ifdef IXGBE_FCOE
6833 struct ixgbe_adapter *adapter; 6835 struct ixgbe_adapter *adapter;
6834 struct ixgbe_ring_feature *f; 6836 struct ixgbe_ring_feature *f;
6835 int txq; 6837 int txq;
6838#endif
6839
6840 if (fwd_adapter)
6841 return skb->queue_mapping + fwd_adapter->tx_base_queue;
6842
6843#ifdef IXGBE_FCOE
6836 6844
6837 /* 6845 /*
6838 * only execute the code below if protocol is FCoE 6846 * only execute the code below if protocol is FCoE
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6858 txq -= f->indices; 6866 txq -= f->indices;
6859 6867
6860 return txq + f->offset; 6868 return txq + f->offset;
6869#else
6870 return __netdev_pick_tx(dev, skb);
6871#endif
6861} 6872}
6862 6873
6863#endif
6864netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6874netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6865 struct ixgbe_adapter *adapter, 6875 struct ixgbe_adapter *adapter,
6866 struct ixgbe_ring *tx_ring) 6876 struct ixgbe_ring *tx_ring)
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
7629 kfree(fwd_adapter); 7639 kfree(fwd_adapter);
7630} 7640}
7631 7641
7632static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
7633 struct net_device *dev,
7634 void *priv)
7635{
7636 struct ixgbe_fwd_adapter *fwd_adapter = priv;
7637 unsigned int queue;
7638 struct ixgbe_ring *tx_ring;
7639
7640 queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
7641 tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
7642
7643 return __ixgbe_xmit_frame(skb, dev, tx_ring);
7644}
7645
7646static const struct net_device_ops ixgbe_netdev_ops = { 7642static const struct net_device_ops ixgbe_netdev_ops = {
7647 .ndo_open = ixgbe_open, 7643 .ndo_open = ixgbe_open,
7648 .ndo_stop = ixgbe_close, 7644 .ndo_stop = ixgbe_close,
7649 .ndo_start_xmit = ixgbe_xmit_frame, 7645 .ndo_start_xmit = ixgbe_xmit_frame,
7650#ifdef IXGBE_FCOE
7651 .ndo_select_queue = ixgbe_select_queue, 7646 .ndo_select_queue = ixgbe_select_queue,
7652#endif
7653 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7647 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7654 .ndo_validate_addr = eth_validate_addr, 7648 .ndo_validate_addr = eth_validate_addr,
7655 .ndo_set_mac_address = ixgbe_set_mac, 7649 .ndo_set_mac_address = ixgbe_set_mac,
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7689 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7683 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7690 .ndo_dfwd_add_station = ixgbe_fwd_add, 7684 .ndo_dfwd_add_station = ixgbe_fwd_add,
7691 .ndo_dfwd_del_station = ixgbe_fwd_del, 7685 .ndo_dfwd_del_station = ixgbe_fwd_del,
7692 .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
7693}; 7686};
7694 7687
7695/** 7688/**
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f76d8e0..ec94a20d7099 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
619} 619}
620 620
621static u16 621static u16
622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
623 void *accel_priv)
623{ 624{
624 /* we are currently only using the first queue */ 625 /* we are currently only using the first queue */
625 return 0; 626 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5a1702..a7fcd593b2db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
592 } 592 }
593} 593}
594 594
595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
596 void *accel_priv)
596{ 597{
597 struct mlx4_en_priv *priv = netdev_priv(dev); 598 struct mlx4_en_priv *priv = netdev_priv(dev);
598 u16 rings_p_up = priv->num_tx_rings_p_up; 599 u16 rings_p_up = priv->num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de59c05..d5758adceaa2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
715 715
716void mlx4_en_tx_irq(struct mlx4_cq *mcq); 716void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
718 void *accel_priv);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 719netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 720
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 721int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index ff80cd8f6d2b..f2a7c7166e24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1711,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
1711void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1711void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
1712void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1712void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
1713void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1713void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
1714void qlcnic_update_stats(struct qlcnic_adapter *);
1714 1715
1715/* Adapter hardware abstraction */ 1716/* Adapter hardware abstraction */
1716struct qlcnic_hardware_ops { 1717struct qlcnic_hardware_ops {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index e3be2760665c..6b08194aa0d4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
167 167
168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
169 169
170static inline int qlcnic_82xx_statistics(void) 170static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
171{ 171{
172 return ARRAY_SIZE(qlcnic_device_gstrings_stats) + 172 return ARRAY_SIZE(qlcnic_gstrings_stats) +
173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
174 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
174} 175}
175 176
176static inline int qlcnic_83xx_statistics(void) 177static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
177{ 178{
178 return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 return ARRAY_SIZE(qlcnic_gstrings_stats) +
180 ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
179 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 181 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
180 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); 182 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
183 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
181} 184}
182 185
183static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 186static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
184{ 187{
185 if (qlcnic_82xx_check(adapter)) 188 int len = -1;
186 return qlcnic_82xx_statistics(); 189
187 else if (qlcnic_83xx_check(adapter)) 190 if (qlcnic_82xx_check(adapter)) {
188 return qlcnic_83xx_statistics(); 191 len = qlcnic_82xx_statistics(adapter);
189 else 192 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
190 return -1; 193 len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
194 } else if (qlcnic_83xx_check(adapter)) {
195 len = qlcnic_83xx_statistics(adapter);
196 }
197
198 return len;
191} 199}
192 200
193#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 201#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
@@ -920,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
920 928
921static int qlcnic_get_sset_count(struct net_device *dev, int sset) 929static int qlcnic_get_sset_count(struct net_device *dev, int sset)
922{ 930{
923 int len;
924 931
925 struct qlcnic_adapter *adapter = netdev_priv(dev); 932 struct qlcnic_adapter *adapter = netdev_priv(dev);
926 switch (sset) { 933 switch (sset) {
927 case ETH_SS_TEST: 934 case ETH_SS_TEST:
928 return QLCNIC_TEST_LEN; 935 return QLCNIC_TEST_LEN;
929 case ETH_SS_STATS: 936 case ETH_SS_STATS:
930 len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; 937 return qlcnic_dev_statistics_len(adapter);
931 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
932 qlcnic_83xx_check(adapter))
933 return len;
934 return qlcnic_82xx_statistics();
935 default: 938 default:
936 return -EOPNOTSUPP; 939 return -EOPNOTSUPP;
937 } 940 }
@@ -1267,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1267 return data; 1270 return data;
1268} 1271}
1269 1272
1270static void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1273void qlcnic_update_stats(struct qlcnic_adapter *adapter)
1271{ 1274{
1272 struct qlcnic_host_tx_ring *tx_ring; 1275 struct qlcnic_host_tx_ring *tx_ring;
1273 int ring; 1276 int ring;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b8a245a79de3..550791b8fbae 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2780,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2780 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2781 struct net_device_stats *stats = &netdev->stats; 2781 struct net_device_stats *stats = &netdev->stats;
2782 2782
2783 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2784 qlcnic_update_stats(adapter);
2785
2783 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2786 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2784 stats->tx_packets = adapter->stats.xmitfinished; 2787 stats->tx_packets = adapter->stats.xmitfinished;
2785 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; 2788 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736e5ae7..0e9fb3301b11 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2080} 2080}
2081 2081
2082/* Return subqueue id on this core (one per core). */ 2082/* Return subqueue id on this core (one per core). */
2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2084 void *accel_priv)
2084{ 2085{
2085 return smp_processor_id(); 2086 return smp_processor_id();
2086} 2087}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 60406b01f9eb..bc8faaec33f5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
299 299
300 if (vlan->fwd_priv) { 300 if (vlan->fwd_priv) {
301 skb->dev = vlan->lowerdev; 301 skb->dev = vlan->lowerdev;
302 ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 302 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
303 } else { 303 } else {
304 ret = macvlan_queue_xmit(skb, dev); 304 ret = macvlan_queue_xmit(skb, dev);
305 } 305 }
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
338 .cache_update = eth_header_cache_update, 338 .cache_update = eth_header_cache_update,
339}; 339};
340 340
341static struct rtnl_link_ops macvlan_link_ops;
342
341static int macvlan_open(struct net_device *dev) 343static int macvlan_open(struct net_device *dev)
342{ 344{
343 struct macvlan_dev *vlan = netdev_priv(dev); 345 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
353 goto hash_add; 355 goto hash_add;
354 } 356 }
355 357
356 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 358 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
359 dev->rtnl_link_ops == &macvlan_link_ops) {
357 vlan->fwd_priv = 360 vlan->fwd_priv =
358 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 361 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
359 362
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
362 */ 365 */
363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 366 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
364 vlan->fwd_priv = NULL; 367 vlan->fwd_priv = NULL;
365 } else { 368 } else
366 dev->features &= ~NETIF_F_LLTX;
367 return 0; 369 return 0;
368 }
369 } 370 }
370 371
371 err = -EBUSY; 372 err = -EBUSY;
@@ -699,8 +700,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
699 features = netdev_increment_features(vlan->lowerdev->features, 700 features = netdev_increment_features(vlan->lowerdev->features,
700 features, 701 features,
701 mask); 702 mask);
702 if (!vlan->fwd_priv) 703 features |= NETIF_F_LLTX;
703 features |= NETIF_F_LLTX;
704 704
705 return features; 705 return features;
706} 706}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 736050d6b451..b75ae5bde673 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1647 return NETDEV_TX_OK; 1647 return NETDEV_TX_OK;
1648} 1648}
1649 1649
1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) 1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1651 void *accel_priv)
1651{ 1652{
1652 /* 1653 /*
1653 * This helper function exists to help dev_pick_tx get the correct 1654 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7c8343a4f918..ecec8029c5e8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -348,7 +348,8 @@ unlock:
348 * different rxq no. here. If we could not get rxhash, then we would 348 * different rxq no. here. If we could not get rxhash, then we would
349 * hope the rxq no. may help here. 349 * hope the rxq no. may help here.
350 */ 350 */
351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) 351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
352 void *accel_priv)
352{ 353{
353 struct tun_struct *tun = netdev_priv(dev); 354 struct tun_struct *tun = netdev_priv(dev);
354 struct tun_flow_entry *e; 355 struct tun_flow_entry *e;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 86605027c41d..e6272546395a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
360 {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, 361 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 364 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 365 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
365 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 366 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
366 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
367 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 368 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 376 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
377#endif /* CONFIG_IWLMVM */ 383#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c72438bb2faf..a1b32ee9594a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) {
2012 if (skb->len >= 16) { 2012 if (skb->len >= 16) {
2013 hdr = (struct ieee80211_hdr *) skb->data; 2013 hdr = (struct ieee80211_hdr *) skb->data;
2014 mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], 2014 mac80211_hwsim_monitor_ack(data2->channel,
2015 hdr->addr2); 2015 hdr->addr2);
2016 } 2016 }
2017 txi->flags |= IEEE80211_TX_STAT_ACK; 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 78e8a6666cc6..8bb8988c435c 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
746} 746}
747 747
748static u16 748static u16
749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) 749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
750 void *accel_priv)
750{ 751{
751 skb->priority = cfg80211_classify8021d(skb); 752 skb->priority = cfg80211_classify8021d(skb);
752 return mwifiex_1d_to_wmm_queue[skb->priority]; 753 return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 53fee2f9a498..8dfdd2732bdc 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
39 return 0; 39 return 0;
40} 40}
41 41
42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) 42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
43 void *accel_priv)
43{ 44{
44 return ClassifyPacket(netdev_priv(dev), skb); 45 return ClassifyPacket(netdev_priv(dev), skb);
45} 46}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 235d2b1ec593..eedffed17e39 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
306 return NETDEV_TX_OK; 306 return NETDEV_TX_OK;
307} 307}
308 308
309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) 309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
310 void *accel_priv)
310{ 311{
311 return (u16)smp_processor_id(); 312 return (u16)smp_processor_id();
312} 313}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 17659bb04bef..dd69e344e409 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
652 return dscp >> 5; 652 return dscp >> 5;
653} 653}
654 654
655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) 655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
656 void *accel_priv)
656{ 657{
657 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct adapter *padapter = rtw_netdev_priv(dev);
658 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5faaadb0c74f..ce2a1f5f9a1e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -769,7 +769,8 @@ struct netdev_phys_port_id {
769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
770 * Required can not be NULL. 770 * Required can not be NULL.
771 * 771 *
772 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 772 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
773 * void *accel_priv);
773 * Called to decide which queue to when device supports multiple 774 * Called to decide which queue to when device supports multiple
774 * transmit queues. 775 * transmit queues.
775 * 776 *
@@ -990,7 +991,8 @@ struct net_device_ops {
990 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 991 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
991 struct net_device *dev); 992 struct net_device *dev);
992 u16 (*ndo_select_queue)(struct net_device *dev, 993 u16 (*ndo_select_queue)(struct net_device *dev,
993 struct sk_buff *skb); 994 struct sk_buff *skb,
995 void *accel_priv);
994 void (*ndo_change_rx_flags)(struct net_device *dev, 996 void (*ndo_change_rx_flags)(struct net_device *dev,
995 int flags); 997 int flags);
996 void (*ndo_set_rx_mode)(struct net_device *dev); 998 void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1529} 1531}
1530 1532
1531struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1533struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1532 struct sk_buff *skb); 1534 struct sk_buff *skb,
1535 void *accel_priv);
1533u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1536u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1534 1537
1535/* 1538/*
@@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev);
1819void dev_disable_lro(struct net_device *dev); 1822void dev_disable_lro(struct net_device *dev);
1820int dev_loopback_xmit(struct sk_buff *newskb); 1823int dev_loopback_xmit(struct sk_buff *newskb);
1821int dev_queue_xmit(struct sk_buff *skb); 1824int dev_queue_xmit(struct sk_buff *skb);
1825int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
1822int register_netdevice(struct net_device *dev); 1826int register_netdevice(struct net_device *dev);
1823void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 1827void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
1824void unregister_netdevice_many(struct list_head *head); 1828void unregister_netdevice_many(struct list_head *head);
@@ -2426,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier);
2426int dev_get_phys_port_id(struct net_device *dev, 2430int dev_get_phys_port_id(struct net_device *dev,
2427 struct netdev_phys_port_id *ppid); 2431 struct netdev_phys_port_id *ppid);
2428int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2432int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2429 struct netdev_queue *txq, void *accel_priv); 2433 struct netdev_queue *txq);
2430int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2434int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2431 2435
2432extern int netdev_budget; 2436extern int netdev_budget;
diff --git a/net/core/dev.c b/net/core/dev.c
index 4fc17221545d..0ce469e5ec80 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
2539} 2539}
2540 2540
2541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2542 struct netdev_queue *txq, void *accel_priv) 2542 struct netdev_queue *txq)
2543{ 2543{
2544 const struct net_device_ops *ops = dev->netdev_ops; 2544 const struct net_device_ops *ops = dev->netdev_ops;
2545 int rc = NETDEV_TX_OK; 2545 int rc = NETDEV_TX_OK;
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2605 dev_queue_xmit_nit(skb, dev); 2605 dev_queue_xmit_nit(skb, dev);
2606 2606
2607 skb_len = skb->len; 2607 skb_len = skb->len;
2608 if (accel_priv)
2609 rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
2610 else
2611 rc = ops->ndo_start_xmit(skb, dev); 2608 rc = ops->ndo_start_xmit(skb, dev);
2612 2609
2613 trace_net_dev_xmit(skb, rc, dev, skb_len); 2610 trace_net_dev_xmit(skb, rc, dev, skb_len);
2614 if (rc == NETDEV_TX_OK && txq) 2611 if (rc == NETDEV_TX_OK)
2615 txq_trans_update(txq); 2612 txq_trans_update(txq);
2616 return rc; 2613 return rc;
2617 } 2614 }
@@ -2627,10 +2624,7 @@ gso:
2627 dev_queue_xmit_nit(nskb, dev); 2624 dev_queue_xmit_nit(nskb, dev);
2628 2625
2629 skb_len = nskb->len; 2626 skb_len = nskb->len;
2630 if (accel_priv) 2627 rc = ops->ndo_start_xmit(nskb, dev);
2631 rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
2632 else
2633 rc = ops->ndo_start_xmit(nskb, dev);
2634 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2628 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2635 if (unlikely(rc != NETDEV_TX_OK)) { 2629 if (unlikely(rc != NETDEV_TX_OK)) {
2636 if (rc & ~NETDEV_TX_MASK) 2630 if (rc & ~NETDEV_TX_MASK)
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
2811 * the BH enable code must have IRQs enabled so that it will not deadlock. 2805 * the BH enable code must have IRQs enabled so that it will not deadlock.
2812 * --BLG 2806 * --BLG
2813 */ 2807 */
2814int dev_queue_xmit(struct sk_buff *skb) 2808int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2815{ 2809{
2816 struct net_device *dev = skb->dev; 2810 struct net_device *dev = skb->dev;
2817 struct netdev_queue *txq; 2811 struct netdev_queue *txq;
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2827 2821
2828 skb_update_prio(skb); 2822 skb_update_prio(skb);
2829 2823
2830 txq = netdev_pick_tx(dev, skb); 2824 txq = netdev_pick_tx(dev, skb, accel_priv);
2831 q = rcu_dereference_bh(txq->qdisc); 2825 q = rcu_dereference_bh(txq->qdisc);
2832 2826
2833#ifdef CONFIG_NET_CLS_ACT 2827#ifdef CONFIG_NET_CLS_ACT
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2863 2857
2864 if (!netif_xmit_stopped(txq)) { 2858 if (!netif_xmit_stopped(txq)) {
2865 __this_cpu_inc(xmit_recursion); 2859 __this_cpu_inc(xmit_recursion);
2866 rc = dev_hard_start_xmit(skb, dev, txq, NULL); 2860 rc = dev_hard_start_xmit(skb, dev, txq);
2867 __this_cpu_dec(xmit_recursion); 2861 __this_cpu_dec(xmit_recursion);
2868 if (dev_xmit_complete(rc)) { 2862 if (dev_xmit_complete(rc)) {
2869 HARD_TX_UNLOCK(dev, txq); 2863 HARD_TX_UNLOCK(dev, txq);
@@ -2892,8 +2886,19 @@ out:
2892 rcu_read_unlock_bh(); 2886 rcu_read_unlock_bh();
2893 return rc; 2887 return rc;
2894} 2888}
2889
2890int dev_queue_xmit(struct sk_buff *skb)
2891{
2892 return __dev_queue_xmit(skb, NULL);
2893}
2895EXPORT_SYMBOL(dev_queue_xmit); 2894EXPORT_SYMBOL(dev_queue_xmit);
2896 2895
2896int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2897{
2898 return __dev_queue_xmit(skb, accel_priv);
2899}
2900EXPORT_SYMBOL(dev_queue_xmit_accel);
2901
2897 2902
2898/*======================================================================= 2903/*=======================================================================
2899 Receiver routines 2904 Receiver routines
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6ef17322500..2fc5beaf5783 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
395EXPORT_SYMBOL(__netdev_pick_tx); 395EXPORT_SYMBOL(__netdev_pick_tx);
396 396
397struct netdev_queue *netdev_pick_tx(struct net_device *dev, 397struct netdev_queue *netdev_pick_tx(struct net_device *dev,
398 struct sk_buff *skb) 398 struct sk_buff *skb,
399 void *accel_priv)
399{ 400{
400 int queue_index = 0; 401 int queue_index = 0;
401 402
402 if (dev->real_num_tx_queues != 1) { 403 if (dev->real_num_tx_queues != 1) {
403 const struct net_device_ops *ops = dev->netdev_ops; 404 const struct net_device_ops *ops = dev->netdev_ops;
404 if (ops->ndo_select_queue) 405 if (ops->ndo_select_queue)
405 queue_index = ops->ndo_select_queue(dev, skb); 406 queue_index = ops->ndo_select_queue(dev, skb,
407 accel_priv);
406 else 408 else
407 queue_index = __netdev_pick_tx(dev, skb); 409 queue_index = __netdev_pick_tx(dev, skb);
408 queue_index = dev_cap_txqueue(dev, queue_index); 410
411 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index);
409 } 413 }
410 414
411 skb_set_queue_mapping(skb, queue_index); 415 skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 303097874633..19fe9c717ced 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
376 struct netdev_queue *txq; 376 struct netdev_queue *txq;
377 377
378 txq = netdev_pick_tx(dev, skb); 378 txq = netdev_pick_tx(dev, skb, NULL);
379 379
380 /* try until next clock tick */ 380 /* try until next clock tick */
381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f62c72b59f8e..abe46a4228ce 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2509 struct inet6_ifaddr *ifp; 2509 struct inet6_ifaddr *ifp;
2510 2510
2511 ifp = ipv6_add_addr(idev, addr, NULL, plen, 2511 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2512 scope, IFA_F_PERMANENT, 0, 0); 2512 scope, IFA_F_PERMANENT,
2513 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2513 if (!IS_ERR(ifp)) { 2514 if (!IS_ERR(ifp)) {
2514 spin_lock_bh(&ifp->lock); 2515 spin_lock_bh(&ifp->lock);
2515 ifp->flags &= ~IFA_F_TENTATIVE; 2516 ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2637#endif 2638#endif
2638 2639
2639 2640
2640 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); 2641 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
2642 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2641 if (!IS_ERR(ifp)) { 2643 if (!IS_ERR(ifp)) {
2642 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2644 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
2643 addrconf_dad_start(ifp); 2645 addrconf_dad_start(ifp);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index a4564b05c47b..7b42d5ef868d 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -732,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev)
732static inline int vti6_dev_init_gen(struct net_device *dev) 732static inline int vti6_dev_init_gen(struct net_device *dev)
733{ 733{
734 struct ip6_tnl *t = netdev_priv(dev); 734 struct ip6_tnl *t = netdev_priv(dev);
735 int i;
735 736
736 t->dev = dev; 737 t->dev = dev;
737 t->net = dev_net(dev); 738 t->net = dev_net(dev);
738 dev->tstats = alloc_percpu(struct pcpu_tstats); 739 dev->tstats = alloc_percpu(struct pcpu_tstats);
739 if (!dev->tstats) 740 if (!dev->tstats)
740 return -ENOMEM; 741 return -ENOMEM;
742 for_each_possible_cpu(i) {
743 struct pcpu_tstats *stats;
744 stats = per_cpu_ptr(dev->tstats, i);
745 u64_stats_init(&stats->syncp);
746 }
741 return 0; 747 return 0;
742} 748}
743 749
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 36c3a4cbcabf..a0757913046e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev)
1061} 1061}
1062 1062
1063static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1063static u16 ieee80211_netdev_select_queue(struct net_device *dev,
1064 struct sk_buff *skb) 1064 struct sk_buff *skb,
1065 void *accel_priv)
1065{ 1066{
1066 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1067 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1067} 1068}
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
1078}; 1079};
1079 1080
1080static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1081static u16 ieee80211_monitor_select_queue(struct net_device *dev,
1081 struct sk_buff *skb) 1082 struct sk_buff *skb,
1083 void *accel_priv)
1082{ 1084{
1083 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1085 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1084 struct ieee80211_local *local = sdata->local; 1086 struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c558b246ef00..ca7fa7f0613d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
463{ 463{
464 struct sta_info *sta = tx->sta; 464 struct sta_info *sta = tx->sta;
465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
466 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
467 struct ieee80211_local *local = tx->local; 466 struct ieee80211_local *local = tx->local;
468 467
469 if (unlikely(!sta)) 468 if (unlikely(!sta))
@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
474 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 473 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
475 int ac = skb_get_queue_mapping(tx->skb); 474 int ac = skb_get_queue_mapping(tx->skb);
476 475
477 /* only deauth, disassoc and action are bufferable MMPDUs */
478 if (ieee80211_is_mgmt(hdr->frame_control) &&
479 !ieee80211_is_deauth(hdr->frame_control) &&
480 !ieee80211_is_disassoc(hdr->frame_control) &&
481 !ieee80211_is_action(hdr->frame_control)) {
482 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
483 return TX_CONTINUE;
484 }
485
486 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 476 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
487 sta->sta.addr, sta->sta.aid, ac); 477 sta->sta.addr, sta->sta.aid, ac);
488 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 478 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
525static ieee80211_tx_result debug_noinline 515static ieee80211_tx_result debug_noinline
526ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 516ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
527{ 517{
518 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
519 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
520
528 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 521 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
529 return TX_CONTINUE; 522 return TX_CONTINUE;
530 523
524 /* only deauth, disassoc and action are bufferable MMPDUs */
525 if (ieee80211_is_mgmt(hdr->frame_control) &&
526 !ieee80211_is_deauth(hdr->frame_control) &&
527 !ieee80211_is_disassoc(hdr->frame_control) &&
528 !ieee80211_is_action(hdr->frame_control)) {
529 if (tx->flags & IEEE80211_TX_UNICAST)
530 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
531 return TX_CONTINUE;
532 }
533
531 if (tx->flags & IEEE80211_TX_UNICAST) 534 if (tx->flags & IEEE80211_TX_UNICAST)
532 return ieee80211_tx_h_unicast_ps_buf(tx); 535 return ieee80211_tx_h_unicast_ps_buf(tx);
533 else 536 else
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index b2d38da67822..f6e2ae91a80b 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -37,7 +37,7 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
37 return 0; 37 return 0;
38 38
39 if (unlikely(!seqadj)) { 39 if (unlikely(!seqadj)) {
40 WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n"); 40 WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
41 return 0; 41 return 0;
42 } 42 }
43 43
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index f02b3605823e..1fb2258c3535 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
34 struct nf_conntrack_expect *exp) 34 struct nf_conntrack_expect *exp)
35{ 35{
36 char buffer[sizeof("4294967296 65635")]; 36 char buffer[sizeof("4294967296 65635")];
37 struct nf_conn *ct = exp->master;
38 union nf_inet_addr newaddr;
37 u_int16_t port; 39 u_int16_t port;
38 unsigned int ret; 40 unsigned int ret;
39 41
40 /* Reply comes from server. */ 42 /* Reply comes from server. */
43 newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
44
41 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 45 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
42 exp->dir = IP_CT_DIR_REPLY; 46 exp->dir = IP_CT_DIR_REPLY;
43 exp->expectfn = nf_nat_follow_master; 47 exp->expectfn = nf_nat_follow_master;
@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
57 } 61 }
58 62
59 if (port == 0) { 63 if (port == 0) {
60 nf_ct_helper_log(skb, exp->master, "all ports in use"); 64 nf_ct_helper_log(skb, ct, "all ports in use");
61 return NF_DROP; 65 return NF_DROP;
62 } 66 }
63 67
64 ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, 68 /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
65 protoff, matchoff, matchlen, buffer, 69 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
66 strlen(buffer)); 70 * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
71 * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
72 * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
73 *
74 * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
75 * 255.255.255.255==4294967296, 10 digits)
76 * P: bound port (min 1 d, max 5d (65635))
77 * F: filename (min 1 d )
78 * S: size (min 1 d )
79 * 0x01, \n: terminators
80 */
81 /* AAA = "us", ie. where server normally talks to. */
82 snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
83 pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
84 buffer, &newaddr.ip, port);
85
86 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
87 matchlen, buffer, strlen(buffer));
67 if (ret != NF_ACCEPT) { 88 if (ret != NF_ACCEPT) {
68 nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); 89 nf_ct_helper_log(skb, ct, "cannot mangle packet");
69 nf_ct_unexpect_related(exp); 90 nf_ct_unexpect_related(exp);
70 } 91 }
92
71 return ret; 93 return ret;
72} 94}
73 95
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 872529105abc..83b9927e7d19 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
384{ 384{
385 dev->dep_link_up = true; 385 dev->dep_link_up = true;
386 386
387 if (!dev->active_target) { 387 if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) {
388 struct nfc_target *target; 388 struct nfc_target *target;
389 389
390 target = nfc_find_target(dev, target_idx); 390 target = nfc_find_target(dev, target_idx);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 922a09406ba7..7fc899a943a8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
126 126
127 HARD_TX_LOCK(dev, txq, smp_processor_id()); 127 HARD_TX_LOCK(dev, txq, smp_processor_id());
128 if (!netif_xmit_frozen_or_stopped(txq)) 128 if (!netif_xmit_frozen_or_stopped(txq))
129 ret = dev_hard_start_xmit(skb, dev, txq, NULL); 129 ret = dev_hard_start_xmit(skb, dev, txq);
130 130
131 HARD_TX_UNLOCK(dev, txq); 131 HARD_TX_UNLOCK(dev, txq);
132 132
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 69cd9bf3f561..13b987745820 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1498 int type; 1498 int type;
1499 1499
1500 head = head->next; 1500 head = head->next;
1501 buf->next = NULL;
1501 1502
1502 /* Ensure bearer is still enabled */ 1503 /* Ensure bearer is still enabled */
1503 if (unlikely(!b_ptr->active)) 1504 if (unlikely(!b_ptr->active))