aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-09-02 14:28:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-09-02 14:28:00 -0400
commit0b1a34c992853ecb47daa5be598d7ed2930342dc (patch)
treed21e98ea17bb5f1d77a78b8b56c1c453a0fe65ef /net
parent4cbe5a555fa58a79b6ecbb6c531b8bab0650778d (diff)
parent5002200599429e83fc13e0d9a2d4788b79515b0c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) NLA_PUT* --> nla_put_* conversion got one case wrong in nfnetlink_log, fix from Patrick McHardy. 2) Missed error return check in ipw2100 driver, from Julia Lawall. 3) PMTU updates in ipv4 were setting the expiry time incorrectly, fix from Eric Dumazet. 4) SFC driver erroneously reversed src and dst when reporting filters via ethtool. 5) Memory leak in CAN protocol and wrong setting of IRQF_SHARED in sja1000 can platform driver, from Alexey Khoroshilov and Sven Schmitt. 6) Fix multicast traffic scaling regression in ipv4_dst_destroy, only take the lock when we really need to. From Eric Dumazet. 7) Fix non-root process spoofing in netlink, from Pablo Neira Ayuso. 8) CWND reduction in TCP is done incorrectly during non-SACK recovery, fix from Yuchung Cheng. 9) Revert netpoll change, and fix what was actually a driver specific problem. From Amerigo Wang. This should cure bootup hangs with netconsole some people reported. 10) Fix xen-netfront invoking __skb_fill_page_desc() with a NULL page pointer. From Ian Campbell. 11) SIP NAT fix for expectiontation creation, from Pablo Neira Ayuso. 12) __ip_rt_update_pmtu() needs RCU locking, from Eric Dumazet. 13) Fix usbnet deadlock on resume, can't use GFP_KERNEL in this situation. From Oliver Neukum. 14) The davinci ethernet driver triggers an OOPS on removal because it frees an MDIO object before unregistering it. Fix from Bin Liu. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits) net: qmi_wwan: add several new Gobi devices fddi: 64 bit bug in smt_add_para() net: ethernet: fix kernel OOPS when remove davinci_mdio module net/xfrm/xfrm_state.c: fix error return code net: ipv6: fix error return code net: qmi_wwan: new device: Foxconn/Novatel E396 usbnet: fix deadlock in resume cs89x0 : packet reception not working netfilter: nf_conntrack: fix racy timer handling with reliable events bnx2x: Correct the ndo_poll_controller call bnx2x: Move netif_napi_add to the open call ipv4: must use rcu protection while calling fib_lookup bnx2x: fix 57840_MF pci id net: ipv4: ipmr_expire_timer causes crash when removing net namespace e1000e: DoS while TSO enabled caused by link partner with small MSS l2tp: avoid to use synchronize_rcu in tunnel free function gianfar: fix default tx vlan offload feature flag netfilter: nf_nat_sip: fix incorrect handling of EBUSY for RTCP expectation xen-netfront: use __pskb_pull_tail to ensure linear area is big enough on RX netfilter: nfnetlink_log: fix error return code in init path ...
Diffstat (limited to 'net')
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c5
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/mac80211/tx.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/xfrm/xfrm_state.c4
16 files changed, 76 insertions, 61 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 346b1eb83a1f..e4ba3e70c174 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
168 struct napi_struct *napi; 168 struct napi_struct *napi;
169 int budget = 16; 169 int budget = 16;
170 170
171 WARN_ON_ONCE(!irqs_disabled());
172
173 list_for_each_entry(napi, &dev->napi_list, dev_list) { 171 list_for_each_entry(napi, &dev->napi_list, dev_list) {
174 local_irq_enable();
175 if (napi->poll_owner != smp_processor_id() && 172 if (napi->poll_owner != smp_processor_id() &&
176 spin_trylock(&napi->poll_lock)) { 173 spin_trylock(&napi->poll_lock)) {
177 rcu_read_lock_bh();
178 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), 174 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
179 napi, budget); 175 napi, budget);
180 rcu_read_unlock_bh();
181 spin_unlock(&napi->poll_lock); 176 spin_unlock(&napi->poll_lock);
182 177
183 if (!budget) { 178 if (!budget)
184 local_irq_disable();
185 break; 179 break;
186 }
187 } 180 }
188 local_irq_disable();
189 } 181 }
190} 182}
191 183
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8eec8f4a0536..ebdf06f938bf 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
124static struct kmem_cache *mrt_cachep __read_mostly; 124static struct kmem_cache *mrt_cachep __read_mostly;
125 125
126static struct mr_table *ipmr_new_table(struct net *net, u32 id); 126static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127static void ipmr_free_table(struct mr_table *mrt);
128
127static int ip_mr_forward(struct net *net, struct mr_table *mrt, 129static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 struct sk_buff *skb, struct mfc_cache *cache, 130 struct sk_buff *skb, struct mfc_cache *cache,
129 int local); 131 int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
131 struct sk_buff *pkt, vifi_t vifi, int assert); 133 struct sk_buff *pkt, vifi_t vifi, int assert);
132static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 134static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
133 struct mfc_cache *c, struct rtmsg *rtm); 135 struct mfc_cache *c, struct rtmsg *rtm);
136static void mroute_clean_tables(struct mr_table *mrt);
134static void ipmr_expire_process(unsigned long arg); 137static void ipmr_expire_process(unsigned long arg);
135 138
136#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 139#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
271 274
272 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 275 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
273 list_del(&mrt->list); 276 list_del(&mrt->list);
274 kfree(mrt); 277 ipmr_free_table(mrt);
275 } 278 }
276 fib_rules_unregister(net->ipv4.mr_rules_ops); 279 fib_rules_unregister(net->ipv4.mr_rules_ops);
277} 280}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
299 302
300static void __net_exit ipmr_rules_exit(struct net *net) 303static void __net_exit ipmr_rules_exit(struct net *net)
301{ 304{
302 kfree(net->ipv4.mrt); 305 ipmr_free_table(net->ipv4.mrt);
303} 306}
304#endif 307#endif
305 308
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
336 return mrt; 339 return mrt;
337} 340}
338 341
342static void ipmr_free_table(struct mr_table *mrt)
343{
344 del_timer_sync(&mrt->ipmr_expire_timer);
345 mroute_clean_tables(mrt);
346 kfree(mrt);
347}
348
339/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 349/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340 350
341static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 351static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 4ad9cf173992..9c87cde28ff8 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
502 ret = nf_ct_expect_related(rtcp_exp); 502 ret = nf_ct_expect_related(rtcp_exp);
503 if (ret == 0) 503 if (ret == 0)
504 break; 504 break;
505 else if (ret != -EBUSY) { 505 else if (ret == -EBUSY) {
506 nf_ct_unexpect_related(rtp_exp);
507 continue;
508 } else if (ret < 0) {
506 nf_ct_unexpect_related(rtp_exp); 509 nf_ct_unexpect_related(rtp_exp);
507 port = 0; 510 port = 0;
508 break; 511 break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fd9ecb52c66b..82cf2a722b23 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
934 if (mtu < ip_rt_min_pmtu) 934 if (mtu < ip_rt_min_pmtu)
935 mtu = ip_rt_min_pmtu; 935 mtu = ip_rt_min_pmtu;
936 936
937 rcu_read_lock();
937 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { 938 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
938 struct fib_nh *nh = &FIB_RES_NH(res); 939 struct fib_nh *nh = &FIB_RES_NH(res);
939 940
940 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 941 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
941 jiffies + ip_rt_mtu_expires); 942 jiffies + ip_rt_mtu_expires);
942 } 943 }
944 rcu_read_unlock();
943 return mtu; 945 return mtu;
944} 946}
945 947
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
956 dst->obsolete = DST_OBSOLETE_KILL; 958 dst->obsolete = DST_OBSOLETE_KILL;
957 } else { 959 } else {
958 rt->rt_pmtu = mtu; 960 rt->rt_pmtu = mtu;
959 dst_set_expires(&rt->dst, ip_rt_mtu_expires); 961 rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
960 } 962 }
961} 963}
962 964
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1263{ 1265{
1264 struct rtable *rt = (struct rtable *) dst; 1266 struct rtable *rt = (struct rtable *) dst;
1265 1267
1266 if (dst->flags & DST_NOCACHE) { 1268 if (!list_empty(&rt->rt_uncached)) {
1267 spin_lock_bh(&rt_uncached_lock); 1269 spin_lock_bh(&rt_uncached_lock);
1268 list_del(&rt->rt_uncached); 1270 list_del(&rt->rt_uncached);
1269 spin_unlock_bh(&rt_uncached_lock); 1271 spin_unlock_bh(&rt_uncached_lock);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 85308b90df80..6e38c6c23caa 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2926 * tcp_xmit_retransmit_queue(). 2926 * tcp_xmit_retransmit_queue().
2927 */ 2927 */
2928static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 2928static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2929 int newly_acked_sacked, bool is_dupack, 2929 int prior_sacked, bool is_dupack,
2930 int flag) 2930 int flag)
2931{ 2931{
2932 struct inet_connection_sock *icsk = inet_csk(sk); 2932 struct inet_connection_sock *icsk = inet_csk(sk);
2933 struct tcp_sock *tp = tcp_sk(sk); 2933 struct tcp_sock *tp = tcp_sk(sk);
2934 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2934 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2935 (tcp_fackets_out(tp) > tp->reordering)); 2935 (tcp_fackets_out(tp) > tp->reordering));
2936 int newly_acked_sacked = 0;
2936 int fast_rexmit = 0; 2937 int fast_rexmit = 0;
2937 2938
2938 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2939 if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2992 tcp_add_reno_sack(sk); 2993 tcp_add_reno_sack(sk);
2993 } else 2994 } else
2994 do_lost = tcp_try_undo_partial(sk, pkts_acked); 2995 do_lost = tcp_try_undo_partial(sk, pkts_acked);
2996 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
2995 break; 2997 break;
2996 case TCP_CA_Loss: 2998 case TCP_CA_Loss:
2997 if (flag & FLAG_DATA_ACKED) 2999 if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3013 if (is_dupack) 3015 if (is_dupack)
3014 tcp_add_reno_sack(sk); 3016 tcp_add_reno_sack(sk);
3015 } 3017 }
3018 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
3016 3019
3017 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3020 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3018 tcp_try_undo_dsack(sk); 3021 tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3590 int prior_packets; 3593 int prior_packets;
3591 int prior_sacked = tp->sacked_out; 3594 int prior_sacked = tp->sacked_out;
3592 int pkts_acked = 0; 3595 int pkts_acked = 0;
3593 int newly_acked_sacked = 0;
3594 bool frto_cwnd = false; 3596 bool frto_cwnd = false;
3595 3597
3596 /* If the ack is older than previous acks 3598 /* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3666 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3668 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
3667 3669
3668 pkts_acked = prior_packets - tp->packets_out; 3670 pkts_acked = prior_packets - tp->packets_out;
3669 newly_acked_sacked = (prior_packets - prior_sacked) -
3670 (tp->packets_out - tp->sacked_out);
3671 3671
3672 if (tp->frto_counter) 3672 if (tp->frto_counter)
3673 frto_cwnd = tcp_process_frto(sk, flag); 3673 frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3681 tcp_may_raise_cwnd(sk, flag)) 3681 tcp_may_raise_cwnd(sk, flag))
3682 tcp_cong_avoid(sk, ack, prior_in_flight); 3682 tcp_cong_avoid(sk, ack, prior_in_flight);
3683 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3683 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3684 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3684 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3685 is_dupack, flag); 3685 is_dupack, flag);
3686 } else { 3686 } else {
3687 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3687 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3698no_queue: 3698no_queue:
3699 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3699 /* If data was DSACKed, see if we can undo a cwnd reduction. */
3700 if (flag & FLAG_DSACKING_ACK) 3700 if (flag & FLAG_DSACKING_ACK)
3701 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3701 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3702 is_dupack, flag); 3702 is_dupack, flag);
3703 /* If this ack opens up a zero window, clear backoff. It was 3703 /* If this ack opens up a zero window, clear backoff. It was
3704 * being used to time the probes, and is probably far higher than 3704 * being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
3718 */ 3718 */
3719 if (TCP_SKB_CB(skb)->sacked) { 3719 if (TCP_SKB_CB(skb)->sacked) {
3720 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3720 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3721 newly_acked_sacked = tp->sacked_out - prior_sacked; 3721 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3722 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
3723 is_dupack, flag); 3722 is_dupack, flag);
3724 } 3723 }
3725 3724
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6dc7fd353ef5..282f3723ee19 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
167 struct esp_data *esp = x->data; 167 struct esp_data *esp = x->data;
168 168
169 /* skb is pure payload to encrypt */ 169 /* skb is pure payload to encrypt */
170 err = -ENOMEM;
171
172 aead = esp->aead; 170 aead = esp->aead;
173 alen = crypto_aead_authsize(aead); 171 alen = crypto_aead_authsize(aead);
174 172
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
203 } 201 }
204 202
205 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); 203 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
206 if (!tmp) 204 if (!tmp) {
205 err = -ENOMEM;
207 goto error; 206 goto error;
207 }
208 208
209 seqhi = esp_tmp_seqhi(tmp); 209 seqhi = esp_tmp_seqhi(tmp);
210 iv = esp_tmp_iv(aead, tmp, seqhilen); 210 iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b47..513cab08a986 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1347 /* Remove from tunnel list */ 1347 /* Remove from tunnel list */
1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1349 list_del_rcu(&tunnel->list); 1349 list_del_rcu(&tunnel->list);
1350 kfree_rcu(tunnel, rcu);
1350 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1351 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1351 synchronize_rcu();
1352 1352
1353 atomic_dec(&l2tp_tunnel_count); 1353 atomic_dec(&l2tp_tunnel_count);
1354 kfree(tunnel);
1355} 1354}
1356 1355
1357/* Create a socket for the tunnel, if one isn't set up by 1356/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee1..56d583e083a7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
163 163
164struct l2tp_tunnel { 164struct l2tp_tunnel {
165 int magic; /* Should be L2TP_TUNNEL_MAGIC */ 165 int magic; /* Should be L2TP_TUNNEL_MAGIC */
166 struct rcu_head rcu;
166 rwlock_t hlist_lock; /* protect session_hlist */ 167 rwlock_t hlist_lock; /* protect session_hlist */
167 struct hlist_head session_hlist[L2TP_HASH_SIZE]; 168 struct hlist_head session_hlist[L2TP_HASH_SIZE];
168 /* hashed list of sessions, 169 /* hashed list of sessions,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e6..c5e8c9c31f76 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1811 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1811 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1812 sdata, NULL, NULL); 1812 sdata, NULL, NULL);
1813 } else { 1813 } else {
1814 int is_mesh_mcast = 1; 1814 /* DS -> MBSS (802.11-2012 13.11.3.3).
1815 const u8 *mesh_da; 1815 * For unicast with unknown forwarding information,
1816 * destination might be in the MBSS or if that fails
1817 * forwarded to another mesh gate. In either case
1818 * resolution will be handled in ieee80211_xmit(), so
1819 * leave the original DA. This also works for mcast */
1820 const u8 *mesh_da = skb->data;
1821
1822 if (mppath)
1823 mesh_da = mppath->mpp;
1824 else if (mpath)
1825 mesh_da = mpath->dst;
1826 rcu_read_unlock();
1816 1827
1817 if (is_multicast_ether_addr(skb->data))
1818 /* DA TA mSA AE:SA */
1819 mesh_da = skb->data;
1820 else {
1821 static const u8 bcast[ETH_ALEN] =
1822 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1823 if (mppath) {
1824 /* RA TA mDA mSA AE:DA SA */
1825 mesh_da = mppath->mpp;
1826 is_mesh_mcast = 0;
1827 } else if (mpath) {
1828 mesh_da = mpath->dst;
1829 is_mesh_mcast = 0;
1830 } else {
1831 /* DA TA mSA AE:SA */
1832 mesh_da = bcast;
1833 }
1834 }
1835 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1828 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1836 mesh_da, sdata->vif.addr); 1829 mesh_da, sdata->vif.addr);
1837 rcu_read_unlock(); 1830 if (is_multicast_ether_addr(mesh_da))
1838 if (is_mesh_mcast) 1831 /* DA TA mSA AE:SA */
1839 meshhdrlen = 1832 meshhdrlen =
1840 ieee80211_new_mesh_header(&mesh_hdr, 1833 ieee80211_new_mesh_header(&mesh_hdr,
1841 sdata, 1834 sdata,
1842 skb->data + ETH_ALEN, 1835 skb->data + ETH_ALEN,
1843 NULL); 1836 NULL);
1844 else 1837 else
1838 /* RA TA mDA mSA AE:DA SA */
1845 meshhdrlen = 1839 meshhdrlen =
1846 ieee80211_new_mesh_header(&mesh_hdr, 1840 ieee80211_new_mesh_header(&mesh_hdr,
1847 sdata, 1841 sdata,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 72bf32a84874..f51013c07b9f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1171 goto out_err; 1171 goto out_err;
1172 } 1172 }
1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
1174 if (!svc->stats.cpustats) 1174 if (!svc->stats.cpustats) {
1175 ret = -ENOMEM;
1175 goto out_err; 1176 goto out_err;
1177 }
1176 1178
1177 /* I'm the first user of the service */ 1179 /* I'm the first user of the service */
1178 atomic_set(&svc->usecnt, 0); 1180 atomic_set(&svc->usecnt, 0);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cf4875565d67..2ceec64b19f9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
249{ 249{
250 struct nf_conn *ct = (void *)ul_conntrack; 250 struct nf_conn *ct = (void *)ul_conntrack;
251 struct net *net = nf_ct_net(ct); 251 struct net *net = nf_ct_net(ct);
252 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
253
254 BUG_ON(ecache == NULL);
252 255
253 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { 256 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
254 /* bad luck, let's retry again */ 257 /* bad luck, let's retry again */
255 ct->timeout.expires = jiffies + 258 ecache->timeout.expires = jiffies +
256 (random32() % net->ct.sysctl_events_retry_timeout); 259 (random32() % net->ct.sysctl_events_retry_timeout);
257 add_timer(&ct->timeout); 260 add_timer(&ecache->timeout);
258 return; 261 return;
259 } 262 }
260 /* we've got the event delivered, now it's dying */ 263 /* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
268void nf_ct_insert_dying_list(struct nf_conn *ct) 271void nf_ct_insert_dying_list(struct nf_conn *ct)
269{ 272{
270 struct net *net = nf_ct_net(ct); 273 struct net *net = nf_ct_net(ct);
274 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
275
276 BUG_ON(ecache == NULL);
271 277
272 /* add this conntrack to the dying list */ 278 /* add this conntrack to the dying list */
273 spin_lock_bh(&nf_conntrack_lock); 279 spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
275 &net->ct.dying); 281 &net->ct.dying);
276 spin_unlock_bh(&nf_conntrack_lock); 282 spin_unlock_bh(&nf_conntrack_lock);
277 /* set a new timer to retry event delivery */ 283 /* set a new timer to retry event delivery */
278 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); 284 setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
279 ct->timeout.expires = jiffies + 285 ecache->timeout.expires = jiffies +
280 (random32() % net->ct.sysctl_events_retry_timeout); 286 (random32() % net->ct.sysctl_events_retry_timeout);
281 add_timer(&ct->timeout); 287 add_timer(&ecache->timeout);
282} 288}
283EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); 289EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
284 290
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index da4fc37a8578..9807f3278fcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
2790 goto err_unreg_subsys; 2790 goto err_unreg_subsys;
2791 } 2791 }
2792 2792
2793 if (register_pernet_subsys(&ctnetlink_net_ops)) { 2793 ret = register_pernet_subsys(&ctnetlink_net_ops);
2794 if (ret < 0) {
2794 pr_err("ctnetlink_init: cannot register pernet operations\n"); 2795 pr_err("ctnetlink_init: cannot register pernet operations\n");
2795 goto err_unreg_exp_subsys; 2796 goto err_unreg_exp_subsys;
2796 } 2797 }
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d4..14e2f3903142 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
480 } 480 }
481 481
482 if (indev && skb_mac_header_was_set(skb)) { 482 if (indev && skb_mac_header_was_set(skb)) {
483 if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || 483 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
484 nla_put_be16(inst->skb, NFULA_HWLEN, 484 nla_put_be16(inst->skb, NFULA_HWLEN,
485 htons(skb->dev->hard_header_len)) || 485 htons(skb->dev->hard_header_len)) ||
486 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 486 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
996 996
997#ifdef CONFIG_PROC_FS 997#ifdef CONFIG_PROC_FS
998 if (!proc_create("nfnetlink_log", 0440, 998 if (!proc_create("nfnetlink_log", 0440,
999 proc_net_netfilter, &nful_file_ops)) 999 proc_net_netfilter, &nful_file_ops)) {
1000 status = -ENOMEM;
1000 goto cleanup_logger; 1001 goto cleanup_logger;
1002 }
1001#endif 1003#endif
1002 return status; 1004 return status;
1003 1005
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1445d73533ed..527023823b5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1373 dst_pid = addr->nl_pid; 1373 dst_pid = addr->nl_pid;
1374 dst_group = ffs(addr->nl_groups); 1374 dst_group = ffs(addr->nl_groups);
1375 err = -EPERM; 1375 err = -EPERM;
1376 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) 1376 if ((dst_group || dst_pid) &&
1377 !netlink_capable(sock, NL_NONROOT_SEND))
1377 goto out; 1378 goto out;
1378 } else { 1379 } else {
1379 dst_pid = nlk->dst_pid; 1380 dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
2147 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2148 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2148 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2149 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2149 nl_table[NETLINK_USERSOCK].registered = 1; 2150 nl_table[NETLINK_USERSOCK].registered = 1;
2151 nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
2150 2152
2151 netlink_table_ungrab(); 2153 netlink_table_ungrab();
2152} 2154}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index aee7196aac36..c5c9e2a54218 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1273 spin_unlock(&f->lock); 1273 spin_unlock(&f->lock);
1274} 1274}
1275 1275
1276bool match_fanout_group(struct packet_type *ptype, struct sock * sk) 1276static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1277{ 1277{
1278 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) 1278 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1279 return true; 1279 return true;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 87cd0e4d4282..210be48d8ae3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
1994 goto error; 1994 goto error;
1995 1995
1996 x->outer_mode = xfrm_get_mode(x->props.mode, family); 1996 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1997 if (x->outer_mode == NULL) 1997 if (x->outer_mode == NULL) {
1998 err = -EPROTONOSUPPORT;
1998 goto error; 1999 goto error;
2000 }
1999 2001
2000 if (init_replay) { 2002 if (init_replay) {
2001 err = xfrm_init_replay(x); 2003 err = xfrm_init_replay(x);