aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-14 17:37:09 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-14 17:42:42 -0500
commit0a379e21c503b2ff66b44d588df9f231e9b0b9ca (patch)
tree22b875fcf4b67fcd007726f00c5fc1748ce985d0 /net
parenta49da8811e71c5355b52c65ee32976741d5834cd (diff)
parentfdc3452cd2c7b2bfe0f378f92123f4f9a98fa2bd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c29
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ip6_vti.c6
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/tx.c23
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c2
-rw-r--r--net/netfilter/nf_nat_irc.c32
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/tipc/link.c1
13 files changed, 87 insertions, 39 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 87312dcf0aa8..2bee80591f9a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2530,7 +2530,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2530EXPORT_SYMBOL(netif_skb_features); 2530EXPORT_SYMBOL(netif_skb_features);
2531 2531
2532int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2532int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2533 struct netdev_queue *txq, void *accel_priv) 2533 struct netdev_queue *txq)
2534{ 2534{
2535 const struct net_device_ops *ops = dev->netdev_ops; 2535 const struct net_device_ops *ops = dev->netdev_ops;
2536 int rc = NETDEV_TX_OK; 2536 int rc = NETDEV_TX_OK;
@@ -2596,13 +2596,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2596 dev_queue_xmit_nit(skb, dev); 2596 dev_queue_xmit_nit(skb, dev);
2597 2597
2598 skb_len = skb->len; 2598 skb_len = skb->len;
2599 if (accel_priv)
2600 rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
2601 else
2602 rc = ops->ndo_start_xmit(skb, dev); 2599 rc = ops->ndo_start_xmit(skb, dev);
2603 2600
2604 trace_net_dev_xmit(skb, rc, dev, skb_len); 2601 trace_net_dev_xmit(skb, rc, dev, skb_len);
2605 if (rc == NETDEV_TX_OK && txq) 2602 if (rc == NETDEV_TX_OK)
2606 txq_trans_update(txq); 2603 txq_trans_update(txq);
2607 return rc; 2604 return rc;
2608 } 2605 }
@@ -2618,10 +2615,7 @@ gso:
2618 dev_queue_xmit_nit(nskb, dev); 2615 dev_queue_xmit_nit(nskb, dev);
2619 2616
2620 skb_len = nskb->len; 2617 skb_len = nskb->len;
2621 if (accel_priv) 2618 rc = ops->ndo_start_xmit(nskb, dev);
2622 rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
2623 else
2624 rc = ops->ndo_start_xmit(nskb, dev);
2625 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2619 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2626 if (unlikely(rc != NETDEV_TX_OK)) { 2620 if (unlikely(rc != NETDEV_TX_OK)) {
2627 if (rc & ~NETDEV_TX_MASK) 2621 if (rc & ~NETDEV_TX_MASK)
@@ -2802,7 +2796,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
2802 * the BH enable code must have IRQs enabled so that it will not deadlock. 2796 * the BH enable code must have IRQs enabled so that it will not deadlock.
2803 * --BLG 2797 * --BLG
2804 */ 2798 */
2805int dev_queue_xmit(struct sk_buff *skb) 2799int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2806{ 2800{
2807 struct net_device *dev = skb->dev; 2801 struct net_device *dev = skb->dev;
2808 struct netdev_queue *txq; 2802 struct netdev_queue *txq;
@@ -2818,7 +2812,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2818 2812
2819 skb_update_prio(skb); 2813 skb_update_prio(skb);
2820 2814
2821 txq = netdev_pick_tx(dev, skb); 2815 txq = netdev_pick_tx(dev, skb, accel_priv);
2822 q = rcu_dereference_bh(txq->qdisc); 2816 q = rcu_dereference_bh(txq->qdisc);
2823 2817
2824#ifdef CONFIG_NET_CLS_ACT 2818#ifdef CONFIG_NET_CLS_ACT
@@ -2854,7 +2848,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2854 2848
2855 if (!netif_xmit_stopped(txq)) { 2849 if (!netif_xmit_stopped(txq)) {
2856 __this_cpu_inc(xmit_recursion); 2850 __this_cpu_inc(xmit_recursion);
2857 rc = dev_hard_start_xmit(skb, dev, txq, NULL); 2851 rc = dev_hard_start_xmit(skb, dev, txq);
2858 __this_cpu_dec(xmit_recursion); 2852 __this_cpu_dec(xmit_recursion);
2859 if (dev_xmit_complete(rc)) { 2853 if (dev_xmit_complete(rc)) {
2860 HARD_TX_UNLOCK(dev, txq); 2854 HARD_TX_UNLOCK(dev, txq);
@@ -2883,8 +2877,19 @@ out:
2883 rcu_read_unlock_bh(); 2877 rcu_read_unlock_bh();
2884 return rc; 2878 return rc;
2885} 2879}
2880
2881int dev_queue_xmit(struct sk_buff *skb)
2882{
2883 return __dev_queue_xmit(skb, NULL);
2884}
2886EXPORT_SYMBOL(dev_queue_xmit); 2885EXPORT_SYMBOL(dev_queue_xmit);
2887 2886
2887int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2888{
2889 return __dev_queue_xmit(skb, accel_priv);
2890}
2891EXPORT_SYMBOL(dev_queue_xmit_accel);
2892
2888 2893
2889/*======================================================================= 2894/*=======================================================================
2890 Receiver routines 2895 Receiver routines
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index b324bfa3485c..87577d447554 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
395EXPORT_SYMBOL(__netdev_pick_tx); 395EXPORT_SYMBOL(__netdev_pick_tx);
396 396
397struct netdev_queue *netdev_pick_tx(struct net_device *dev, 397struct netdev_queue *netdev_pick_tx(struct net_device *dev,
398 struct sk_buff *skb) 398 struct sk_buff *skb,
399 void *accel_priv)
399{ 400{
400 int queue_index = 0; 401 int queue_index = 0;
401 402
402 if (dev->real_num_tx_queues != 1) { 403 if (dev->real_num_tx_queues != 1) {
403 const struct net_device_ops *ops = dev->netdev_ops; 404 const struct net_device_ops *ops = dev->netdev_ops;
404 if (ops->ndo_select_queue) 405 if (ops->ndo_select_queue)
405 queue_index = ops->ndo_select_queue(dev, skb); 406 queue_index = ops->ndo_select_queue(dev, skb,
407 accel_priv);
406 else 408 else
407 queue_index = __netdev_pick_tx(dev, skb); 409 queue_index = __netdev_pick_tx(dev, skb);
408 queue_index = dev_cap_txqueue(dev, queue_index); 410
411 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index);
409 } 413 }
410 414
411 skb_set_queue_mapping(skb, queue_index); 415 skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 303097874633..19fe9c717ced 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
376 struct netdev_queue *txq; 376 struct netdev_queue *txq;
377 377
378 txq = netdev_pick_tx(dev, skb); 378 txq = netdev_pick_tx(dev, skb, NULL);
379 379
380 /* try until next clock tick */ 380 /* try until next clock tick */
381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index a0f52dac8940..e34dccbc4d70 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -930,12 +930,15 @@ skip_listen_ht:
930 spin_lock_bh(lock); 930 spin_lock_bh(lock);
931 sk_nulls_for_each(sk, node, &head->chain) { 931 sk_nulls_for_each(sk, node, &head->chain) {
932 int res; 932 int res;
933 int state;
933 934
934 if (!net_eq(sock_net(sk), net)) 935 if (!net_eq(sock_net(sk), net))
935 continue; 936 continue;
936 if (num < s_num) 937 if (num < s_num)
937 goto next_normal; 938 goto next_normal;
938 if (!(r->idiag_states & (1 << sk->sk_state))) 939 state = (sk->sk_state == TCP_TIME_WAIT) ?
940 inet_twsk(sk)->tw_substate : sk->sk_state;
941 if (!(r->idiag_states & (1 << state)))
939 goto next_normal; 942 goto next_normal;
940 if (r->sdiag_family != AF_UNSPEC && 943 if (r->sdiag_family != AF_UNSPEC &&
941 sk->sk_family != r->sdiag_family) 944 sk->sk_family != r->sdiag_family)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a9fa6c1feed5..b0cd122e5fb4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2529,7 +2529,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2529 struct inet6_ifaddr *ifp; 2529 struct inet6_ifaddr *ifp;
2530 2530
2531 ifp = ipv6_add_addr(idev, addr, NULL, plen, 2531 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2532 scope, IFA_F_PERMANENT, 0, 0); 2532 scope, IFA_F_PERMANENT,
2533 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2533 if (!IS_ERR(ifp)) { 2534 if (!IS_ERR(ifp)) {
2534 spin_lock_bh(&ifp->lock); 2535 spin_lock_bh(&ifp->lock);
2535 ifp->flags &= ~IFA_F_TENTATIVE; 2536 ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2657,7 +2658,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2657#endif 2658#endif
2658 2659
2659 2660
2660 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); 2661 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
2662 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2661 if (!IS_ERR(ifp)) { 2663 if (!IS_ERR(ifp)) {
2662 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2664 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
2663 addrconf_dad_start(ifp); 2665 addrconf_dad_start(ifp);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b50acd5e75d2..2d19272b8cee 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -731,12 +731,18 @@ static void vti6_dev_setup(struct net_device *dev)
731static inline int vti6_dev_init_gen(struct net_device *dev) 731static inline int vti6_dev_init_gen(struct net_device *dev)
732{ 732{
733 struct ip6_tnl *t = netdev_priv(dev); 733 struct ip6_tnl *t = netdev_priv(dev);
734 int i;
734 735
735 t->dev = dev; 736 t->dev = dev;
736 t->net = dev_net(dev); 737 t->net = dev_net(dev);
737 dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 738 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
738 if (!dev->tstats) 739 if (!dev->tstats)
739 return -ENOMEM; 740 return -ENOMEM;
741 for_each_possible_cpu(i) {
742 struct pcpu_sw_netstats *stats;
743 stats = per_cpu_ptr(dev->tstats, i);
744 u64_stats_init(&stats->syncp);
745 }
740 return 0; 746 return 0;
741} 747}
742 748
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b2c83c0f06d0..3dfd20a453ab 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1045,7 +1045,8 @@ static void ieee80211_uninit(struct net_device *dev)
1045} 1045}
1046 1046
1047static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1047static u16 ieee80211_netdev_select_queue(struct net_device *dev,
1048 struct sk_buff *skb) 1048 struct sk_buff *skb,
1049 void *accel_priv)
1049{ 1050{
1050 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1051 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1051} 1052}
@@ -1062,7 +1063,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
1062}; 1063};
1063 1064
1064static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1065static u16 ieee80211_monitor_select_queue(struct net_device *dev,
1065 struct sk_buff *skb) 1066 struct sk_buff *skb,
1067 void *accel_priv)
1066{ 1068{
1067 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1069 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1068 struct ieee80211_local *local = sdata->local; 1070 struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 377cf974d97d..ef3555e16cf9 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -464,7 +464,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
464{ 464{
465 struct sta_info *sta = tx->sta; 465 struct sta_info *sta = tx->sta;
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
468 struct ieee80211_local *local = tx->local; 467 struct ieee80211_local *local = tx->local;
469 468
470 if (unlikely(!sta)) 469 if (unlikely(!sta))
@@ -475,15 +474,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
475 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 474 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
476 int ac = skb_get_queue_mapping(tx->skb); 475 int ac = skb_get_queue_mapping(tx->skb);
477 476
478 /* only deauth, disassoc and action are bufferable MMPDUs */
479 if (ieee80211_is_mgmt(hdr->frame_control) &&
480 !ieee80211_is_deauth(hdr->frame_control) &&
481 !ieee80211_is_disassoc(hdr->frame_control) &&
482 !ieee80211_is_action(hdr->frame_control)) {
483 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
484 return TX_CONTINUE;
485 }
486
487 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 477 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
488 sta->sta.addr, sta->sta.aid, ac); 478 sta->sta.addr, sta->sta.aid, ac);
489 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 479 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -526,9 +516,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
526static ieee80211_tx_result debug_noinline 516static ieee80211_tx_result debug_noinline
527ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 517ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
528{ 518{
519 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
520 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
521
529 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 522 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
530 return TX_CONTINUE; 523 return TX_CONTINUE;
531 524
525 /* only deauth, disassoc and action are bufferable MMPDUs */
526 if (ieee80211_is_mgmt(hdr->frame_control) &&
527 !ieee80211_is_deauth(hdr->frame_control) &&
528 !ieee80211_is_disassoc(hdr->frame_control) &&
529 !ieee80211_is_action(hdr->frame_control)) {
530 if (tx->flags & IEEE80211_TX_UNICAST)
531 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
532 return TX_CONTINUE;
533 }
534
532 if (tx->flags & IEEE80211_TX_UNICAST) 535 if (tx->flags & IEEE80211_TX_UNICAST)
533 return ieee80211_tx_h_unicast_ps_buf(tx); 536 return ieee80211_tx_h_unicast_ps_buf(tx);
534 else 537 else
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index b2d38da67822..f6e2ae91a80b 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -37,7 +37,7 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
37 return 0; 37 return 0;
38 38
39 if (unlikely(!seqadj)) { 39 if (unlikely(!seqadj)) {
40 WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n"); 40 WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
41 return 0; 41 return 0;
42 } 42 }
43 43
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index f02b3605823e..1fb2258c3535 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
34 struct nf_conntrack_expect *exp) 34 struct nf_conntrack_expect *exp)
35{ 35{
36 char buffer[sizeof("4294967296 65635")]; 36 char buffer[sizeof("4294967296 65635")];
37 struct nf_conn *ct = exp->master;
38 union nf_inet_addr newaddr;
37 u_int16_t port; 39 u_int16_t port;
38 unsigned int ret; 40 unsigned int ret;
39 41
40 /* Reply comes from server. */ 42 /* Reply comes from server. */
43 newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
44
41 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 45 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
42 exp->dir = IP_CT_DIR_REPLY; 46 exp->dir = IP_CT_DIR_REPLY;
43 exp->expectfn = nf_nat_follow_master; 47 exp->expectfn = nf_nat_follow_master;
@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
57 } 61 }
58 62
59 if (port == 0) { 63 if (port == 0) {
60 nf_ct_helper_log(skb, exp->master, "all ports in use"); 64 nf_ct_helper_log(skb, ct, "all ports in use");
61 return NF_DROP; 65 return NF_DROP;
62 } 66 }
63 67
64 ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, 68 /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
65 protoff, matchoff, matchlen, buffer, 69 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
66 strlen(buffer)); 70 * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
71 * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
72 * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
73 *
74 * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
75 * 255.255.255.255==4294967296, 10 digits)
76 * P: bound port (min 1 d, max 5d (65635))
77 * F: filename (min 1 d )
78 * S: size (min 1 d )
79 * 0x01, \n: terminators
80 */
81 /* AAA = "us", ie. where server normally talks to. */
82 snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
83 pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
84 buffer, &newaddr.ip, port);
85
86 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
87 matchlen, buffer, strlen(buffer));
67 if (ret != NF_ACCEPT) { 88 if (ret != NF_ACCEPT) {
68 nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); 89 nf_ct_helper_log(skb, ct, "cannot mangle packet");
69 nf_ct_unexpect_related(exp); 90 nf_ct_unexpect_related(exp);
70 } 91 }
92
71 return ret; 93 return ret;
72} 94}
73 95
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 02ab34132157..b675fa4a6f19 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -382,7 +382,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
382{ 382{
383 dev->dep_link_up = true; 383 dev->dep_link_up = true;
384 384
385 if (!dev->active_target) { 385 if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) {
386 struct nfc_target *target; 386 struct nfc_target *target;
387 387
388 target = nfc_find_target(dev, target_idx); 388 target = nfc_find_target(dev, target_idx);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 32bb942d2faa..e82e43b69c33 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
126 126
127 HARD_TX_LOCK(dev, txq, smp_processor_id()); 127 HARD_TX_LOCK(dev, txq, smp_processor_id());
128 if (!netif_xmit_frozen_or_stopped(txq)) 128 if (!netif_xmit_frozen_or_stopped(txq))
129 ret = dev_hard_start_xmit(skb, dev, txq, NULL); 129 ret = dev_hard_start_xmit(skb, dev, txq);
130 130
131 HARD_TX_UNLOCK(dev, txq); 131 HARD_TX_UNLOCK(dev, txq);
132 132
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 471973ff134f..d4b5de41b682 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1438,6 +1438,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1438 int type; 1438 int type;
1439 1439
1440 head = head->next; 1440 head = head->next;
1441 buf->next = NULL;
1441 1442
1442 /* Ensure bearer is still enabled */ 1443 /* Ensure bearer is still enabled */
1443 if (unlikely(!b_ptr->active)) 1444 if (unlikely(!b_ptr->active))