aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-03-09 21:17:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-03-09 21:17:21 -0400
commit36bef88380037288d5b575ed2029de694533b1ec (patch)
treeb1e657eec07b8049ff5c966db208f0a3241e963f /net
parente93df634aac6b6dccaa2c23a5a5a504ed502b97e (diff)
parente6441bae326271090755e1707196ad05aa1dc703 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) nft_compat accidently truncates ethernet protocol to 8-bits, from Arturo Borrero. 2) Memory leak in ip_vs_proc_conn(), from Julian Anastasov. 3) Don't allow the space required for nftables rules to exceed the maximum value representable in the dlen field. From Patrick McHardy. 4) bcm63xx_enet can accidently leave interrupts permanently disabled due to errors in the NAPI polling exit logic. Fix from Nicolas Schichan. 5) Fix OOPSes triggerable by the ping protocol module, due to missing address family validations etc. From Lorenzo Colitti. 6) Don't use RCU locking in sleepable context in team driver, from Jiri Pirko. 7) xen-netback miscalculates statistic offset pointers when reporting the stats to userspace. From David Vrabel. 8) Fix a leak of up to 256 pages per VIF destroy in xen-netaback, also from David Vrabel. 9) ip_check_defrag() cannot assume that skb_network_offset(), particularly when it is used by the AF_PACKET fanout defrag code. From Alexander Drozdov. 10) gianfar driver doesn't query OF node names properly when trying to determine the number of hw queues available. Fix it to explicitly check for OF nodes named queue-group. From Tobias Waldekranz. 11) MID field in macb driver should be 12 bits, not 16. From Punnaiah Choudary Kalluri. 12) Fix unintentional regression in traceroute due to timestamp socket option changes. Empty ICMP payloads should be allowed in non-timestamp cases. From Willem de Bruijn. 13) When devices are unregistered, we have to get rid of AF_PACKET multicast list entries that point to it via ifindex. Fix from Francesco Ruggeri. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (38 commits) tipc: fix bug in link failover handling net: delete stale packet_mclist entries net: macb: constify macb configuration data MAINTAINERS: add Marc Kleine-Budde as co maintainer for CAN networking layer MAINTAINERS: linux-can moved to github can: kvaser_usb: Read all messages in a bulk-in URB buffer can: kvaser_usb: Avoid double free on URB submission failures can: peak_usb: fix missing ctrlmode_ init for every dev can: add missing initialisations in CAN related skbuffs ip: fix error queue empty skb handling bgmac: Clean warning messages tcp: align tcp_xmit_size_goal() on tcp_tso_autosize() net: fec: fix unbalanced clk disable on driver unbind net: macb: Correct the MID field length value net: gianfar: correctly determine the number of queue groups ipv4: ip_check_defrag should not assume that skb_network_offset is zero net: bcmgenet: properly disable password matching net: eth: xgene: fix booting with devicetree bnx2x: Force fundamental reset for EEH recovery xen-netback: refactor xenvif_handle_frag_list() ...
Diffstat (limited to 'net')
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_sockglue.c33
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv6/datagram.c39
-rw-r--r--net/ipv6/ping.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c3
-rw-r--r--net/netfilter/nf_tables_api.c61
-rw-r--r--net/netfilter/nft_compat.c14
-rw-r--r--net/packet/af_packet.c22
-rw-r--r--net/rxrpc/ar-error.c4
-rw-r--r--net/tipc/link.c7
13 files changed, 143 insertions, 81 deletions
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 66e08040ced7..32d710eaf1fc 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop)
259 goto inval_skb; 259 goto inval_skb;
260 } 260 }
261 261
262 skb->ip_summed = CHECKSUM_UNNECESSARY;
263
264 skb_reset_mac_header(skb);
262 skb_reset_network_header(skb); 265 skb_reset_network_header(skb);
263 skb_reset_transport_header(skb); 266 skb_reset_transport_header(skb);
264 267
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 2c8d98e728c0..145a50c4d566 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
659struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 659struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
660{ 660{
661 struct iphdr iph; 661 struct iphdr iph;
662 int netoff;
662 u32 len; 663 u32 len;
663 664
664 if (skb->protocol != htons(ETH_P_IP)) 665 if (skb->protocol != htons(ETH_P_IP))
665 return skb; 666 return skb;
666 667
667 if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0) 668 netoff = skb_network_offset(skb);
669
670 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
668 return skb; 671 return skb;
669 672
670 if (iph.ihl < 5 || iph.version != 4) 673 if (iph.ihl < 5 || iph.version != 4)
671 return skb; 674 return skb;
672 675
673 len = ntohs(iph.tot_len); 676 len = ntohs(iph.tot_len);
674 if (skb->len < len || len < (iph.ihl * 4)) 677 if (skb->len < netoff + len || len < (iph.ihl * 4))
675 return skb; 678 return skb;
676 679
677 if (ip_is_fragment(&iph)) { 680 if (ip_is_fragment(&iph)) {
678 skb = skb_share_check(skb, GFP_ATOMIC); 681 skb = skb_share_check(skb, GFP_ATOMIC);
679 if (skb) { 682 if (skb) {
680 if (!pskb_may_pull(skb, iph.ihl*4)) 683 if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
681 return skb; 684 return skb;
682 if (pskb_trim_rcsum(skb, len)) 685 if (pskb_trim_rcsum(skb, netoff + len))
683 return skb; 686 return skb;
684 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 687 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
685 if (ip_defrag(skb, user)) 688 if (ip_defrag(skb, user))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 31d8c71986b4..5cd99271d3a6 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
432 kfree_skb(skb); 432 kfree_skb(skb);
433} 433}
434 434
435static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, 435/* IPv4 supports cmsg on all imcp errors and some timestamps
436 const struct sk_buff *skb, 436 *
437 int ee_origin) 437 * Timestamp code paths do not initialize the fields expected by cmsg:
438 * the PKTINFO fields in skb->cb[]. Fill those in here.
439 */
440static bool ipv4_datagram_support_cmsg(const struct sock *sk,
441 struct sk_buff *skb,
442 int ee_origin)
438{ 443{
439 struct in_pktinfo *info = PKTINFO_SKB_CB(skb); 444 struct in_pktinfo *info;
445
446 if (ee_origin == SO_EE_ORIGIN_ICMP)
447 return true;
440 448
441 if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || 449 if (ee_origin == SO_EE_ORIGIN_LOCAL)
442 (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || 450 return false;
451
452 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
453 * timestamp with egress dev. Not possible for packets without dev
454 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
455 */
456 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
443 (!skb->dev)) 457 (!skb->dev))
444 return false; 458 return false;
445 459
460 info = PKTINFO_SKB_CB(skb);
446 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; 461 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
447 info->ipi_ifindex = skb->dev->ifindex; 462 info->ipi_ifindex = skb->dev->ifindex;
448 return true; 463 return true;
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
483 498
484 serr = SKB_EXT_ERR(skb); 499 serr = SKB_EXT_ERR(skb);
485 500
486 if (sin && skb->len) { 501 if (sin && serr->port) {
487 sin->sin_family = AF_INET; 502 sin->sin_family = AF_INET;
488 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + 503 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
489 serr->addr_offset); 504 serr->addr_offset);
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
496 sin = &errhdr.offender; 511 sin = &errhdr.offender;
497 memset(sin, 0, sizeof(*sin)); 512 memset(sin, 0, sizeof(*sin));
498 513
499 if (skb->len && 514 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
500 (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
501 ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
502 sin->sin_family = AF_INET; 515 sin->sin_family = AF_INET;
503 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 516 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
504 if (inet_sk(sk)->cmsg_flags) 517 if (inet_sk(sk)->cmsg_flags)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index e9f66e1cda50..208d5439e59b 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
259 kgid_t low, high; 259 kgid_t low, high;
260 int ret = 0; 260 int ret = 0;
261 261
262 if (sk->sk_family == AF_INET6)
263 sk->sk_ipv6only = 1;
264
262 inet_get_ping_group_range_net(net, &low, &high); 265 inet_get_ping_group_range_net(net, &low, &high);
263 if (gid_lte(low, group) && gid_lte(group, high)) 266 if (gid_lte(low, group) && gid_lte(group, high))
264 return 0; 267 return 0;
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
305 if (addr_len < sizeof(*addr)) 308 if (addr_len < sizeof(*addr))
306 return -EINVAL; 309 return -EINVAL;
307 310
311 if (addr->sin_family != AF_INET &&
312 !(addr->sin_family == AF_UNSPEC &&
313 addr->sin_addr.s_addr == htonl(INADDR_ANY)))
314 return -EAFNOSUPPORT;
315
308 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", 316 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
309 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); 317 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
310 318
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
330 return -EINVAL; 338 return -EINVAL;
331 339
332 if (addr->sin6_family != AF_INET6) 340 if (addr->sin6_family != AF_INET6)
333 return -EINVAL; 341 return -EAFNOSUPPORT;
334 342
335 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", 343 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
336 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); 344 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
716 if (msg->msg_namelen < sizeof(*usin)) 724 if (msg->msg_namelen < sizeof(*usin))
717 return -EINVAL; 725 return -EINVAL;
718 if (usin->sin_family != AF_INET) 726 if (usin->sin_family != AF_INET)
719 return -EINVAL; 727 return -EAFNOSUPPORT;
720 daddr = usin->sin_addr.s_addr; 728 daddr = usin->sin_addr.s_addr;
721 /* no remote port */ 729 /* no remote port */
722 } else { 730 } else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9d72a0fcd928..995a2259bcfc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
835 int large_allowed) 835 int large_allowed)
836{ 836{
837 struct tcp_sock *tp = tcp_sk(sk); 837 struct tcp_sock *tp = tcp_sk(sk);
838 u32 new_size_goal, size_goal, hlen; 838 u32 new_size_goal, size_goal;
839 839
840 if (!large_allowed || !sk_can_gso(sk)) 840 if (!large_allowed || !sk_can_gso(sk))
841 return mss_now; 841 return mss_now;
842 842
843 /* Maybe we should/could use sk->sk_prot->max_header here ? */ 843 /* Note : tcp_tso_autosize() will eventually split this later */
844 hlen = inet_csk(sk)->icsk_af_ops->net_header_len + 844 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
845 inet_csk(sk)->icsk_ext_hdr_len +
846 tp->tcp_header_len;
847
848 new_size_goal = sk->sk_gso_max_size - 1 - hlen;
849 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); 845 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
850 846
851 /* We try hard to avoid divides here */ 847 /* We try hard to avoid divides here */
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index c215be70cac0..ace8daca5c83 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
325 kfree_skb(skb); 325 kfree_skb(skb);
326} 326}
327 327
328static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) 328/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
329 *
330 * At one point, excluding local errors was a quick test to identify icmp/icmp6
331 * errors. This is no longer true, but the test remained, so the v6 stack,
332 * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
333 *
334 * Timestamp code paths do not initialize the fields expected by cmsg:
335 * the PKTINFO fields in skb->cb[]. Fill those in here.
336 */
337static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
338 struct sock_exterr_skb *serr)
329{ 339{
330 int ifindex = skb->dev ? skb->dev->ifindex : -1; 340 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
341 serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
342 return true;
343
344 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
345 return false;
346
347 if (!skb->dev)
348 return false;
331 349
332 if (skb->protocol == htons(ETH_P_IPV6)) 350 if (skb->protocol == htons(ETH_P_IPV6))
333 IP6CB(skb)->iif = ifindex; 351 IP6CB(skb)->iif = skb->dev->ifindex;
334 else 352 else
335 PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; 353 PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
354
355 return true;
336} 356}
337 357
338/* 358/*
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
369 389
370 serr = SKB_EXT_ERR(skb); 390 serr = SKB_EXT_ERR(skb);
371 391
372 if (sin && skb->len) { 392 if (sin && serr->port) {
373 const unsigned char *nh = skb_network_header(skb); 393 const unsigned char *nh = skb_network_header(skb);
374 sin->sin6_family = AF_INET6; 394 sin->sin6_family = AF_INET6;
375 sin->sin6_flowinfo = 0; 395 sin->sin6_flowinfo = 0;
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
394 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 414 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
395 sin = &errhdr.offender; 415 sin = &errhdr.offender;
396 memset(sin, 0, sizeof(*sin)); 416 memset(sin, 0, sizeof(*sin));
397 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) { 417
418 if (ip6_datagram_support_cmsg(skb, serr)) {
398 sin->sin6_family = AF_INET6; 419 sin->sin6_family = AF_INET6;
399 if (np->rxopt.all) { 420 if (np->rxopt.all)
400 if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
401 serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
402 ip6_datagram_prepare_pktinfo_errqueue(skb);
403 ip6_datagram_recv_common_ctl(sk, msg, skb); 421 ip6_datagram_recv_common_ctl(sk, msg, skb);
404 }
405 if (skb->protocol == htons(ETH_P_IPV6)) { 422 if (skb->protocol == htons(ETH_P_IPV6)) {
406 sin->sin6_addr = ipv6_hdr(skb)->saddr; 423 sin->sin6_addr = ipv6_hdr(skb)->saddr;
407 if (np->rxopt.all) 424 if (np->rxopt.all)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bd46f736f61d..a2dfff6ff227 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102 102
103 if (msg->msg_name) { 103 if (msg->msg_name) {
104 DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); 104 DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
105 if (msg->msg_namelen < sizeof(struct sockaddr_in6) || 105 if (msg->msg_namelen < sizeof(*u))
106 u->sin6_family != AF_INET6) {
107 return -EINVAL; 106 return -EINVAL;
107 if (u->sin6_family != AF_INET6) {
108 return -EAFNOSUPPORT;
108 } 109 }
109 if (sk->sk_bound_dev_if && 110 if (sk->sk_bound_dev_if &&
110 sk->sk_bound_dev_if != u->sin6_scope_id) { 111 sk->sk_bound_dev_if != u->sin6_scope_id) {
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index c47ffd7a0a70..d93ceeb3ef04 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
896 IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); 896 IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
897 return; 897 return;
898 } 898 }
899 if (!(flags & IP_VS_CONN_F_TEMPLATE))
900 kfree(param->pe_data);
899 } 901 }
900 902
901 if (opt) 903 if (opt)
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
1169 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) 1171 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
1170 ); 1172 );
1171#endif 1173#endif
1174 ip_vs_pe_put(param.pe);
1172 return 0; 1175 return 0;
1173 /* Error exit */ 1176 /* Error exit */
1174out: 1177out:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 199fd0f27b0e..6ab777912237 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
227 227
228static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) 228static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
229{ 229{
230 rule->genmask = 0; 230 rule->genmask &= ~(1 << gencursor_next(net));
231} 231}
232 232
233static int 233static int
@@ -1711,9 +1711,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
1711 } 1711 }
1712 nla_nest_end(skb, list); 1712 nla_nest_end(skb, list);
1713 1713
1714 if (rule->ulen && 1714 if (rule->udata) {
1715 nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) 1715 struct nft_userdata *udata = nft_userdata(rule);
1716 goto nla_put_failure; 1716 if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1,
1717 udata->data) < 0)
1718 goto nla_put_failure;
1719 }
1717 1720
1718 nlmsg_end(skb, nlh); 1721 nlmsg_end(skb, nlh);
1719 return 0; 1722 return 0;
@@ -1896,11 +1899,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1896 struct nft_table *table; 1899 struct nft_table *table;
1897 struct nft_chain *chain; 1900 struct nft_chain *chain;
1898 struct nft_rule *rule, *old_rule = NULL; 1901 struct nft_rule *rule, *old_rule = NULL;
1902 struct nft_userdata *udata;
1899 struct nft_trans *trans = NULL; 1903 struct nft_trans *trans = NULL;
1900 struct nft_expr *expr; 1904 struct nft_expr *expr;
1901 struct nft_ctx ctx; 1905 struct nft_ctx ctx;
1902 struct nlattr *tmp; 1906 struct nlattr *tmp;
1903 unsigned int size, i, n, ulen = 0; 1907 unsigned int size, i, n, ulen = 0, usize = 0;
1904 int err, rem; 1908 int err, rem;
1905 bool create; 1909 bool create;
1906 u64 handle, pos_handle; 1910 u64 handle, pos_handle;
@@ -1968,12 +1972,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1968 n++; 1972 n++;
1969 } 1973 }
1970 } 1974 }
1975 /* Check for overflow of dlen field */
1976 err = -EFBIG;
1977 if (size >= 1 << 12)
1978 goto err1;
1971 1979
1972 if (nla[NFTA_RULE_USERDATA]) 1980 if (nla[NFTA_RULE_USERDATA]) {
1973 ulen = nla_len(nla[NFTA_RULE_USERDATA]); 1981 ulen = nla_len(nla[NFTA_RULE_USERDATA]);
1982 if (ulen > 0)
1983 usize = sizeof(struct nft_userdata) + ulen;
1984 }
1974 1985
1975 err = -ENOMEM; 1986 err = -ENOMEM;
1976 rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); 1987 rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
1977 if (rule == NULL) 1988 if (rule == NULL)
1978 goto err1; 1989 goto err1;
1979 1990
@@ -1981,10 +1992,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1981 1992
1982 rule->handle = handle; 1993 rule->handle = handle;
1983 rule->dlen = size; 1994 rule->dlen = size;
1984 rule->ulen = ulen; 1995 rule->udata = ulen ? 1 : 0;
1985 1996
1986 if (ulen) 1997 if (ulen) {
1987 nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); 1998 udata = nft_userdata(rule);
1999 udata->len = ulen - 1;
2000 nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen);
2001 }
1988 2002
1989 expr = nft_expr_first(rule); 2003 expr = nft_expr_first(rule);
1990 for (i = 0; i < n; i++) { 2004 for (i = 0; i < n; i++) {
@@ -2031,12 +2045,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
2031 2045
2032err3: 2046err3:
2033 list_del_rcu(&rule->list); 2047 list_del_rcu(&rule->list);
2034 if (trans) {
2035 list_del_rcu(&nft_trans_rule(trans)->list);
2036 nft_rule_clear(net, nft_trans_rule(trans));
2037 nft_trans_destroy(trans);
2038 chain->use++;
2039 }
2040err2: 2048err2:
2041 nf_tables_rule_destroy(&ctx, rule); 2049 nf_tables_rule_destroy(&ctx, rule);
2042err1: 2050err1:
@@ -3612,12 +3620,11 @@ static int nf_tables_commit(struct sk_buff *skb)
3612 &te->elem, 3620 &te->elem,
3613 NFT_MSG_DELSETELEM, 0); 3621 NFT_MSG_DELSETELEM, 0);
3614 te->set->ops->get(te->set, &te->elem); 3622 te->set->ops->get(te->set, &te->elem);
3615 te->set->ops->remove(te->set, &te->elem);
3616 nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); 3623 nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
3617 if (te->elem.flags & NFT_SET_MAP) { 3624 if (te->set->flags & NFT_SET_MAP &&
3618 nft_data_uninit(&te->elem.data, 3625 !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
3619 te->set->dtype); 3626 nft_data_uninit(&te->elem.data, te->set->dtype);
3620 } 3627 te->set->ops->remove(te->set, &te->elem);
3621 nft_trans_destroy(trans); 3628 nft_trans_destroy(trans);
3622 break; 3629 break;
3623 } 3630 }
@@ -3658,7 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb)
3658{ 3665{
3659 struct net *net = sock_net(skb->sk); 3666 struct net *net = sock_net(skb->sk);
3660 struct nft_trans *trans, *next; 3667 struct nft_trans *trans, *next;
3661 struct nft_set *set; 3668 struct nft_trans_elem *te;
3662 3669
3663 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { 3670 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3664 switch (trans->msg_type) { 3671 switch (trans->msg_type) {
@@ -3719,9 +3726,13 @@ static int nf_tables_abort(struct sk_buff *skb)
3719 break; 3726 break;
3720 case NFT_MSG_NEWSETELEM: 3727 case NFT_MSG_NEWSETELEM:
3721 nft_trans_elem_set(trans)->nelems--; 3728 nft_trans_elem_set(trans)->nelems--;
3722 set = nft_trans_elem_set(trans); 3729 te = (struct nft_trans_elem *)trans->data;
3723 set->ops->get(set, &nft_trans_elem(trans)); 3730 te->set->ops->get(te->set, &te->elem);
3724 set->ops->remove(set, &nft_trans_elem(trans)); 3731 nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
3732 if (te->set->flags & NFT_SET_MAP &&
3733 !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
3734 nft_data_uninit(&te->elem.data, te->set->dtype);
3735 te->set->ops->remove(te->set, &te->elem);
3725 nft_trans_destroy(trans); 3736 nft_trans_destroy(trans);
3726 break; 3737 break;
3727 case NFT_MSG_DELSETELEM: 3738 case NFT_MSG_DELSETELEM:
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 1279cd85663e..213584cf04b3 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -123,7 +123,7 @@ static void
123nft_target_set_tgchk_param(struct xt_tgchk_param *par, 123nft_target_set_tgchk_param(struct xt_tgchk_param *par,
124 const struct nft_ctx *ctx, 124 const struct nft_ctx *ctx,
125 struct xt_target *target, void *info, 125 struct xt_target *target, void *info,
126 union nft_entry *entry, u8 proto, bool inv) 126 union nft_entry *entry, u16 proto, bool inv)
127{ 127{
128 par->net = ctx->net; 128 par->net = ctx->net;
129 par->table = ctx->table->name; 129 par->table = ctx->table->name;
@@ -137,7 +137,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
137 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 137 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
138 break; 138 break;
139 case NFPROTO_BRIDGE: 139 case NFPROTO_BRIDGE:
140 entry->ebt.ethproto = proto; 140 entry->ebt.ethproto = (__force __be16)proto;
141 entry->ebt.invflags = inv ? EBT_IPROTO : 0; 141 entry->ebt.invflags = inv ? EBT_IPROTO : 0;
142 break; 142 break;
143 } 143 }
@@ -171,7 +171,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
171 [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, 171 [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 },
172}; 172};
173 173
174static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv) 174static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
175{ 175{
176 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; 176 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
177 u32 flags; 177 u32 flags;
@@ -203,7 +203,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
203 struct xt_target *target = expr->ops->data; 203 struct xt_target *target = expr->ops->data;
204 struct xt_tgchk_param par; 204 struct xt_tgchk_param par;
205 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); 205 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
206 u8 proto = 0; 206 u16 proto = 0;
207 bool inv = false; 207 bool inv = false;
208 union nft_entry e = {}; 208 union nft_entry e = {};
209 int ret; 209 int ret;
@@ -334,7 +334,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
334static void 334static void
335nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, 335nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
336 struct xt_match *match, void *info, 336 struct xt_match *match, void *info,
337 union nft_entry *entry, u8 proto, bool inv) 337 union nft_entry *entry, u16 proto, bool inv)
338{ 338{
339 par->net = ctx->net; 339 par->net = ctx->net;
340 par->table = ctx->table->name; 340 par->table = ctx->table->name;
@@ -348,7 +348,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
348 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 348 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
349 break; 349 break;
350 case NFPROTO_BRIDGE: 350 case NFPROTO_BRIDGE:
351 entry->ebt.ethproto = proto; 351 entry->ebt.ethproto = (__force __be16)proto;
352 entry->ebt.invflags = inv ? EBT_IPROTO : 0; 352 entry->ebt.invflags = inv ? EBT_IPROTO : 0;
353 break; 353 break;
354 } 354 }
@@ -385,7 +385,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
385 struct xt_match *match = expr->ops->data; 385 struct xt_match *match = expr->ops->data;
386 struct xt_mtchk_param par; 386 struct xt_mtchk_param par;
387 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); 387 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
388 u8 proto = 0; 388 u16 proto = 0;
389 bool inv = false; 389 bool inv = false;
390 union nft_entry e = {}; 390 union nft_entry e = {};
391 int ret; 391 int ret;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5bf1e968a728..f8db7064d81c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3123,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3123 return 0; 3123 return 0;
3124} 3124}
3125 3125
3126static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) 3126static void packet_dev_mclist_delete(struct net_device *dev,
3127 struct packet_mclist **mlp)
3127{ 3128{
3128 for ( ; i; i = i->next) { 3129 struct packet_mclist *ml;
3129 if (i->ifindex == dev->ifindex) 3130
3130 packet_dev_mc(dev, i, what); 3131 while ((ml = *mlp) != NULL) {
3132 if (ml->ifindex == dev->ifindex) {
3133 packet_dev_mc(dev, ml, -1);
3134 *mlp = ml->next;
3135 kfree(ml);
3136 } else
3137 mlp = &ml->next;
3131 } 3138 }
3132} 3139}
3133 3140
@@ -3204,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3204 packet_dev_mc(dev, ml, -1); 3211 packet_dev_mc(dev, ml, -1);
3205 kfree(ml); 3212 kfree(ml);
3206 } 3213 }
3207 rtnl_unlock(); 3214 break;
3208 return 0;
3209 } 3215 }
3210 } 3216 }
3211 rtnl_unlock(); 3217 rtnl_unlock();
3212 return -EADDRNOTAVAIL; 3218 return 0;
3213} 3219}
3214 3220
3215static void packet_flush_mclist(struct sock *sk) 3221static void packet_flush_mclist(struct sock *sk)
@@ -3559,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this,
3559 switch (msg) { 3565 switch (msg) {
3560 case NETDEV_UNREGISTER: 3566 case NETDEV_UNREGISTER:
3561 if (po->mclist) 3567 if (po->mclist)
3562 packet_dev_mclist(dev, po->mclist, -1); 3568 packet_dev_mclist_delete(dev, &po->mclist);
3563 /* fallthrough */ 3569 /* fallthrough */
3564 3570
3565 case NETDEV_DOWN: 3571 case NETDEV_DOWN:
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5394b6be46ec..0610efa83d72 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk)
42 _leave("UDP socket errqueue empty"); 42 _leave("UDP socket errqueue empty");
43 return; 43 return;
44 } 44 }
45 if (!skb->len) { 45 serr = SKB_EXT_ERR(skb);
46 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
46 _leave("UDP empty message"); 47 _leave("UDP empty message");
47 kfree_skb(skb); 48 kfree_skb(skb);
48 return; 49 return;
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
50 51
51 rxrpc_new_skb(skb); 52 rxrpc_new_skb(skb);
52 53
53 serr = SKB_EXT_ERR(skb);
54 addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); 54 addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
55 port = serr->port; 55 port = serr->port;
56 56
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a4cf364316de..14f09b3cb87c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr)
464 /* Clean up all queues, except inputq: */ 464 /* Clean up all queues, except inputq: */
465 __skb_queue_purge(&l_ptr->outqueue); 465 __skb_queue_purge(&l_ptr->outqueue);
466 __skb_queue_purge(&l_ptr->deferred_queue); 466 __skb_queue_purge(&l_ptr->deferred_queue);
467 skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); 467 if (!owner->inputq)
468 if (!skb_queue_empty(&l_ptr->inputq)) 468 owner->inputq = &l_ptr->inputq;
469 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
470 if (!skb_queue_empty(owner->inputq))
469 owner->action_flags |= TIPC_MSG_EVT; 471 owner->action_flags |= TIPC_MSG_EVT;
470 owner->inputq = &l_ptr->inputq;
471 l_ptr->next_out = NULL; 472 l_ptr->next_out = NULL;
472 l_ptr->unacked_window = 0; 473 l_ptr->unacked_window = 0;
473 l_ptr->checkpoint = 1; 474 l_ptr->checkpoint = 1;