aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-02 14:45:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-02 14:45:27 -0500
commit8bca927f13bc1cebe23a3709af6ce3016400f7ac (patch)
treed602bde61c8dfd26d2716b1c68f04d04a9a6b7d8 /net
parented8d747fd2b9d9204762ca6ab8c843c72c42cc41 (diff)
parentb98b0bc8c431e3ceb4b26b0dfc8db509518fb290 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Lots more phydev and probe error path leaks in various drivers by Johan Hovold. 2) Fix race in packet_set_ring(), from Philip Pettersson. 3) Use after free in dccp_invalid_packet(), from Eric Dumazet. 4) Signnedness overflow in SO_{SND,RCV}BUFFORCE, also from Eric Dumazet. 5) When tunneling between ipv4 and ipv6 we can be left with the wrong skb->protocol value as we enter the IPSEC engine and this causes all kinds of problems. Set it before the output path does any dst_output() calls, from Eli Cooper. 6) bcmgenet uses wrong device struct pointer in DMA API calls, fix from Florian Fainelli. 7) Various netfilter nat bug fixes from FLorian Westphal. 8) Fix memory leak in ipvlan_link_new(), from Gao Feng. 9) Locking fixes, particularly wrt. socket lookups, in l2tp from Guillaume Nault. 10) Avoid invoking rhash teardowns in atomic context by moving netlink cb->done() dump completion from a worker thread. Fix from Herbert Xu. 11) Buffer refcount problems in tun and macvtap on errors, from Jason Wang. 12) We don't set Kconfig symbol DEFAULT_TCP_CONG properly when the user selects BBR. Fix from Julian Wollrath. 13) Fix deadlock in transmit path on altera TSE driver, from Lino Sanfilippo. 14) Fix unbalanced reference counting in dsa_switch_tree, from Nikita Yushchenko. 15) tc_tunnel_key needs to be properly exported to userspace via uapi, fix from Roi Dayan. 16) rds_tcp_init_net() doesn't unregister notifier in error path, fix from Sowmini Varadhan. 17) Stale packet header pointer access after pskb_expand_head() in genenve driver, fix from Sabrina Dubroca. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (103 commits) net: avoid signed overflows for SO_{SND|RCV}BUFFORCE geneve: avoid use-after-free of skb->data tipc: check minimum bearer MTU net: renesas: ravb: unintialized return value sh_eth: remove unchecked interrupts for RZ/A1 net: bcmgenet: Utilize correct struct device for all DMA operations NET: usb: qmi_wwan: add support for Telit LE922A PID 0x1040 cdc_ether: Fix handling connection notification ip6_offload: check segs for NULL in ipv6_gso_segment. RDS: TCP: unregister_netdevice_notifier() in error path of rds_tcp_init_net Revert: "ip6_tunnel: Update skb->protocol to ETH_P_IPV6 in ip6_tnl_xmit()" ipv6: Set skb->protocol properly for local output ipv4: Set skb->protocol properly for local output packet: fix race condition in packet_set_ring net: ethernet: altera: TSE: do not use tx queue lock in tx completion handler net: ethernet: altera: TSE: Remove unneeded dma sync for tx buffers net: ethernet: stmmac: fix of-node and fixed-link-phydev leaks net: ethernet: stmmac: platform: fix outdated function header net: ethernet: stmmac: dwmac-meson8b: fix probe error path net: ethernet: stmmac: dwmac-generic: fix probe error path ...
Diffstat (limited to 'net')
-rw-r--r--net/core/flow.c6
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/dsa/dsa.c13
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/slave.c19
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6_vti.c31
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c2
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
-rw-r--r--net/ipv6/output_core.c2
-rw-r--r--net/l2tp/l2tp_ip.c63
-rw-r--r--net/l2tp/l2tp_ip6.c79
-rw-r--r--net/netfilter/nf_nat_core.c49
-rw-r--r--net/netfilter/nf_tables_api.c14
-rw-r--r--net/netfilter/nft_hash.c7
-rw-r--r--net/netfilter/nft_range.c6
-rw-r--r--net/netlink/af_netlink.c27
-rw-r--r--net/netlink/af_netlink.h2
-rw-r--r--net/openvswitch/conntrack.c5
-rw-r--r--net/packet/af_packet.c18
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_cgroup.c7
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_flower.c41
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_rsvp.h3
-rw-r--r--net/sched/cls_tcindex.c1
-rw-r--r--net/tipc/bearer.c11
-rw-r--r--net/tipc/bearer.h13
-rw-r--r--net/tipc/link.c35
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--net/xfrm/xfrm_user.c2
49 files changed, 372 insertions, 195 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 3937b1b68d5b..18e8893d4be5 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -95,7 +95,6 @@ static void flow_cache_gc_task(struct work_struct *work)
95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { 95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
96 flow_entry_kill(fce, xfrm); 96 flow_entry_kill(fce, xfrm);
97 atomic_dec(&xfrm->flow_cache_gc_count); 97 atomic_dec(&xfrm->flow_cache_gc_count);
98 WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
99 } 98 }
100} 99}
101 100
@@ -236,9 +235,8 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
236 if (fcp->hash_count > fc->high_watermark) 235 if (fcp->hash_count > fc->high_watermark)
237 flow_cache_shrink(fc, fcp); 236 flow_cache_shrink(fc, fcp);
238 237
239 if (fcp->hash_count > 2 * fc->high_watermark || 238 if (atomic_read(&net->xfrm.flow_cache_gc_count) >
240 atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) { 239 2 * num_online_cpus() * fc->high_watermark) {
241 atomic_inc(&net->xfrm.flow_cache_genid);
242 flo = ERR_PTR(-ENOBUFS); 240 flo = ERR_PTR(-ENOBUFS);
243 goto ret_object; 241 goto ret_object;
244 } 242 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index deb35acbefd0..a6196cf844f6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -931,8 +931,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
931 + nla_total_size(4) /* IFLA_PROMISCUITY */ 931 + nla_total_size(4) /* IFLA_PROMISCUITY */
932 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 932 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
933 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 933 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
934 + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */ 934 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
935 + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */ 935 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
936 + nla_total_size(1) /* IFLA_OPERSTATE */ 936 + nla_total_size(1) /* IFLA_OPERSTATE */
937 + nla_total_size(1) /* IFLA_LINKMODE */ 937 + nla_total_size(1) /* IFLA_LINKMODE */
938 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 938 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
diff --git a/net/core/sock.c b/net/core/sock.c
index 5e3ca414357e..00a074dbfe9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -715,7 +715,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
715 val = min_t(u32, val, sysctl_wmem_max); 715 val = min_t(u32, val, sysctl_wmem_max);
716set_sndbuf: 716set_sndbuf:
717 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 717 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
718 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 718 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
719 /* Wake up sending tasks if we upped the value. */ 719 /* Wake up sending tasks if we upped the value. */
720 sk->sk_write_space(sk); 720 sk->sk_write_space(sk);
721 break; 721 break;
@@ -751,7 +751,7 @@ set_rcvbuf:
751 * returning the value we actually used in getsockopt 751 * returning the value we actually used in getsockopt
752 * is the most desirable behavior. 752 * is the most desirable behavior.
753 */ 753 */
754 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 754 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
755 break; 755 break;
756 756
757 case SO_RCVBUFFORCE: 757 case SO_RCVBUFFORCE:
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b567c8725aea..edbe59d203ef 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -700,6 +700,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
700{ 700{
701 const struct dccp_hdr *dh; 701 const struct dccp_hdr *dh;
702 unsigned int cscov; 702 unsigned int cscov;
703 u8 dccph_doff;
703 704
704 if (skb->pkt_type != PACKET_HOST) 705 if (skb->pkt_type != PACKET_HOST)
705 return 1; 706 return 1;
@@ -721,18 +722,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
721 /* 722 /*
722 * If P.Data Offset is too small for packet type, drop packet and return 723 * If P.Data Offset is too small for packet type, drop packet and return
723 */ 724 */
724 if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { 725 dccph_doff = dh->dccph_doff;
725 DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); 726 if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
727 DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
726 return 1; 728 return 1;
727 } 729 }
728 /* 730 /*
729 * If P.Data Offset is too too large for packet, drop packet and return 731 * If P.Data Offset is too too large for packet, drop packet and return
730 */ 732 */
731 if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { 733 if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
732 DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); 734 DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
733 return 1; 735 return 1;
734 } 736 }
735 737 dh = dccp_hdr(skb);
736 /* 738 /*
737 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet 739 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
738 * has short sequence numbers), drop packet and return 740 * has short sequence numbers), drop packet and return
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index a6902c1e2f28..7899919cd9f0 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -233,6 +233,8 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
233 genphy_read_status(phydev); 233 genphy_read_status(phydev);
234 if (ds->ops->adjust_link) 234 if (ds->ops->adjust_link)
235 ds->ops->adjust_link(ds, port, phydev); 235 ds->ops->adjust_link(ds, port, phydev);
236
237 put_device(&phydev->mdio.dev);
236 } 238 }
237 239
238 return 0; 240 return 0;
@@ -504,15 +506,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
504 506
505void dsa_cpu_dsa_destroy(struct device_node *port_dn) 507void dsa_cpu_dsa_destroy(struct device_node *port_dn)
506{ 508{
507 struct phy_device *phydev; 509 if (of_phy_is_fixed_link(port_dn))
508 510 of_phy_deregister_fixed_link(port_dn);
509 if (of_phy_is_fixed_link(port_dn)) {
510 phydev = of_phy_find_device(port_dn);
511 if (phydev) {
512 phy_device_free(phydev);
513 fixed_phy_unregister(phydev);
514 }
515 }
516} 511}
517 512
518static void dsa_switch_destroy(struct dsa_switch *ds) 513static void dsa_switch_destroy(struct dsa_switch *ds)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index f8a7d9aab437..5fff951a0a49 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -28,8 +28,10 @@ static struct dsa_switch_tree *dsa_get_dst(u32 tree)
28 struct dsa_switch_tree *dst; 28 struct dsa_switch_tree *dst;
29 29
30 list_for_each_entry(dst, &dsa_switch_trees, list) 30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree) 31 if (dst->tree == tree) {
32 kref_get(&dst->refcount);
32 return dst; 33 return dst;
34 }
33 return NULL; 35 return NULL;
34} 36}
35 37
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6b1282c006b1..30e2e21d7619 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1125,7 +1125,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1125 p->phy_interface = mode; 1125 p->phy_interface = mode;
1126 1126
1127 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); 1127 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
1128 if (of_phy_is_fixed_link(port_dn)) { 1128 if (!phy_dn && of_phy_is_fixed_link(port_dn)) {
1129 /* In the case of a fixed PHY, the DT node associated 1129 /* In the case of a fixed PHY, the DT node associated
1130 * to the fixed PHY is the Port DT node 1130 * to the fixed PHY is the Port DT node
1131 */ 1131 */
@@ -1135,7 +1135,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1135 return ret; 1135 return ret;
1136 } 1136 }
1137 phy_is_fixed = true; 1137 phy_is_fixed = true;
1138 phy_dn = port_dn; 1138 phy_dn = of_node_get(port_dn);
1139 } 1139 }
1140 1140
1141 if (ds->ops->get_phy_flags) 1141 if (ds->ops->get_phy_flags)
@@ -1154,6 +1154,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1154 ret = dsa_slave_phy_connect(p, slave_dev, phy_id); 1154 ret = dsa_slave_phy_connect(p, slave_dev, phy_id);
1155 if (ret) { 1155 if (ret) {
1156 netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret); 1156 netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret);
1157 of_node_put(phy_dn);
1157 return ret; 1158 return ret;
1158 } 1159 }
1159 } else { 1160 } else {
@@ -1162,6 +1163,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1162 phy_flags, 1163 phy_flags,
1163 p->phy_interface); 1164 p->phy_interface);
1164 } 1165 }
1166
1167 of_node_put(phy_dn);
1165 } 1168 }
1166 1169
1167 if (p->phy && phy_is_fixed) 1170 if (p->phy && phy_is_fixed)
@@ -1174,6 +1177,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1174 ret = dsa_slave_phy_connect(p, slave_dev, p->port); 1177 ret = dsa_slave_phy_connect(p, slave_dev, p->port);
1175 if (ret) { 1178 if (ret) {
1176 netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret); 1179 netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
1180 if (phy_is_fixed)
1181 of_phy_deregister_fixed_link(port_dn);
1177 return ret; 1182 return ret;
1178 } 1183 }
1179 } 1184 }
@@ -1289,10 +1294,18 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1289void dsa_slave_destroy(struct net_device *slave_dev) 1294void dsa_slave_destroy(struct net_device *slave_dev)
1290{ 1295{
1291 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1296 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1297 struct dsa_switch *ds = p->parent;
1298 struct device_node *port_dn;
1299
1300 port_dn = ds->ports[p->port].dn;
1292 1301
1293 netif_carrier_off(slave_dev); 1302 netif_carrier_off(slave_dev);
1294 if (p->phy) 1303 if (p->phy) {
1295 phy_disconnect(p->phy); 1304 phy_disconnect(p->phy);
1305
1306 if (of_phy_is_fixed_link(port_dn))
1307 of_phy_deregister_fixed_link(port_dn);
1308 }
1296 unregister_netdev(slave_dev); 1309 unregister_netdev(slave_dev);
1297 free_netdev(slave_dev); 1310 free_netdev(slave_dev);
1298} 1311}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 300b06888fdf..b54b3ca939db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -715,6 +715,7 @@ config DEFAULT_TCP_CONG
715 default "reno" if DEFAULT_RENO 715 default "reno" if DEFAULT_RENO
716 default "dctcp" if DEFAULT_DCTCP 716 default "dctcp" if DEFAULT_DCTCP
717 default "cdg" if DEFAULT_CDG 717 default "cdg" if DEFAULT_CDG
718 default "bbr" if DEFAULT_BBR
718 default "cubic" 719 default "cubic"
719 720
720config TCP_MD5SIG 721config TCP_MD5SIG
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 5ddf5cda07f4..215143246e4b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1233,7 +1233,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1233 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); 1233 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1234 1234
1235 /* fixed ID is invalid if DF bit is not set */ 1235 /* fixed ID is invalid if DF bit is not set */
1236 if (fixedid && !(iph->frag_off & htons(IP_DF))) 1236 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1237 goto out; 1237 goto out;
1238 } 1238 }
1239 1239
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d95631d09248..20fb25e3027b 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
476 esph = (void *)skb_push(skb, 4); 476 esph = (void *)skb_push(skb, 4);
477 *seqhi = esph->spi; 477 *seqhi = esph->spi;
478 esph->spi = esph->seq_no; 478 esph->spi = esph->seq_no;
479 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); 479 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
480 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 480 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
481 } 481 }
482 482
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 105908d841a3..877bdb02e887 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -107,6 +107,8 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
107 if (unlikely(!skb)) 107 if (unlikely(!skb))
108 return 0; 108 return 0;
109 109
110 skb->protocol = htons(ETH_P_IP);
111
110 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 112 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
111 net, sk, skb, NULL, skb_dst(skb)->dev, 113 net, sk, skb, NULL, skb_dst(skb)->dev,
112 dst_output); 114 dst_output);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c3776ff6749f..b3cc1335adbc 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -24,10 +24,11 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
24 struct flowi4 fl4 = {}; 24 struct flowi4 fl4 = {};
25 __be32 saddr = iph->saddr; 25 __be32 saddr = iph->saddr;
26 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 26 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
27 struct net_device *dev = skb_dst(skb)->dev;
27 unsigned int hh_len; 28 unsigned int hh_len;
28 29
29 if (addr_type == RTN_UNSPEC) 30 if (addr_type == RTN_UNSPEC)
30 addr_type = inet_addr_type(net, saddr); 31 addr_type = inet_addr_type_dev_table(net, dev, saddr);
31 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) 32 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
32 flags |= FLOWI_FLAG_ANYSRC; 33 flags |= FLOWI_FLAG_ANYSRC;
33 else 34 else
@@ -40,6 +41,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
40 fl4.saddr = saddr; 41 fl4.saddr = saddr;
41 fl4.flowi4_tos = RT_TOS(iph->tos); 42 fl4.flowi4_tos = RT_TOS(iph->tos);
42 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 43 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
44 if (!fl4.flowi4_oif)
45 fl4.flowi4_oif = l3mdev_master_ifindex(dev);
43 fl4.flowi4_mark = skb->mark; 46 fl4.flowi4_mark = skb->mark;
44 fl4.flowi4_flags = flags; 47 fl4.flowi4_flags = flags;
45 rt = ip_route_output_key(net, &fl4); 48 rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index b31df597fd37..697538464e6e 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1201,8 +1201,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
1201 1201
1202 newinfo->number = compatr->num_entries; 1202 newinfo->number = compatr->num_entries;
1203 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1203 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1204 newinfo->hook_entry[i] = info->hook_entry[i]; 1204 newinfo->hook_entry[i] = compatr->hook_entry[i];
1205 newinfo->underflow[i] = info->underflow[i]; 1205 newinfo->underflow[i] = compatr->underflow[i];
1206 } 1206 }
1207 entry1 = newinfo->entries; 1207 entry1 = newinfo->entries;
1208 pos = entry1; 1208 pos = entry1;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 37874e2f30ed..ccf40550c475 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -139,7 +139,8 @@ void ip6_datagram_release_cb(struct sock *sk)
139} 139}
140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); 140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
141 141
142static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 142int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
143 int addr_len)
143{ 144{
144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 145 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
145 struct inet_sock *inet = inet_sk(sk); 146 struct inet_sock *inet = inet_sk(sk);
@@ -252,6 +253,7 @@ ipv4_connected:
252out: 253out:
253 return err; 254 return err;
254} 255}
256EXPORT_SYMBOL_GPL(__ip6_datagram_connect);
255 257
256int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 258int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
257{ 259{
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 060a60b2f8a6..111ba55fd512 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
418 esph = (void *)skb_push(skb, 4); 418 esph = (void *)skb_push(skb, 4);
419 *seqhi = esph->spi; 419 *seqhi = esph->spi;
420 esph->spi = esph->seq_no; 420 esph->spi = esph->seq_no;
421 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); 421 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
422 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 422 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
423 } 423 }
424 424
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 7370ad2e693a..2772004ba5a1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -447,8 +447,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
447 447
448 if (__ipv6_addr_needs_scope_id(addr_type)) 448 if (__ipv6_addr_needs_scope_id(addr_type))
449 iif = skb->dev->ifindex; 449 iif = skb->dev->ifindex;
450 else 450 else {
451 iif = l3mdev_master_ifindex(skb_dst(skb)->dev); 451 dst = skb_dst(skb);
452 iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
453 }
452 454
453 /* 455 /*
454 * Must not send error if the source does not uniquely 456 * Must not send error if the source does not uniquely
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1fcf61f1cbc3..89c59e656f44 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -99,7 +99,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
99 segs = ops->callbacks.gso_segment(skb, features); 99 segs = ops->callbacks.gso_segment(skb, features);
100 } 100 }
101 101
102 if (IS_ERR(segs)) 102 if (IS_ERR_OR_NULL(segs))
103 goto out; 103 goto out;
104 104
105 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 105 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0a4759b89da2..d76674efe523 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1181,7 +1181,6 @@ route_lookup:
1181 if (err) 1181 if (err)
1182 return err; 1182 return err;
1183 1183
1184 skb->protocol = htons(ETH_P_IPV6);
1185 skb_push(skb, sizeof(struct ipv6hdr)); 1184 skb_push(skb, sizeof(struct ipv6hdr));
1186 skb_reset_network_header(skb); 1185 skb_reset_network_header(skb);
1187 ipv6h = ipv6_hdr(skb); 1186 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 8a02ca8a11af..c299c1e2bbf0 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1138,6 +1138,33 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = {
1138 .priority = 100, 1138 .priority = 100,
1139}; 1139};
1140 1140
1141static bool is_vti6_tunnel(const struct net_device *dev)
1142{
1143 return dev->netdev_ops == &vti6_netdev_ops;
1144}
1145
1146static int vti6_device_event(struct notifier_block *unused,
1147 unsigned long event, void *ptr)
1148{
1149 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1150 struct ip6_tnl *t = netdev_priv(dev);
1151
1152 if (!is_vti6_tunnel(dev))
1153 return NOTIFY_DONE;
1154
1155 switch (event) {
1156 case NETDEV_DOWN:
1157 if (!net_eq(t->net, dev_net(dev)))
1158 xfrm_garbage_collect(t->net);
1159 break;
1160 }
1161 return NOTIFY_DONE;
1162}
1163
1164static struct notifier_block vti6_notifier_block __read_mostly = {
1165 .notifier_call = vti6_device_event,
1166};
1167
1141/** 1168/**
1142 * vti6_tunnel_init - register protocol and reserve needed resources 1169 * vti6_tunnel_init - register protocol and reserve needed resources
1143 * 1170 *
@@ -1148,6 +1175,8 @@ static int __init vti6_tunnel_init(void)
1148 const char *msg; 1175 const char *msg;
1149 int err; 1176 int err;
1150 1177
1178 register_netdevice_notifier(&vti6_notifier_block);
1179
1151 msg = "tunnel device"; 1180 msg = "tunnel device";
1152 err = register_pernet_device(&vti6_net_ops); 1181 err = register_pernet_device(&vti6_net_ops);
1153 if (err < 0) 1182 if (err < 0)
@@ -1180,6 +1209,7 @@ xfrm_proto_ah_failed:
1180xfrm_proto_esp_failed: 1209xfrm_proto_esp_failed:
1181 unregister_pernet_device(&vti6_net_ops); 1210 unregister_pernet_device(&vti6_net_ops);
1182pernet_dev_failed: 1211pernet_dev_failed:
1212 unregister_netdevice_notifier(&vti6_notifier_block);
1183 pr_err("vti6 init: failed to register %s\n", msg); 1213 pr_err("vti6 init: failed to register %s\n", msg);
1184 return err; 1214 return err;
1185} 1215}
@@ -1194,6 +1224,7 @@ static void __exit vti6_tunnel_cleanup(void)
1194 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1224 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
1195 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1225 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1196 unregister_pernet_device(&vti6_net_ops); 1226 unregister_pernet_device(&vti6_net_ops);
1227 unregister_netdevice_notifier(&vti6_notifier_block);
1197} 1228}
1198 1229
1199module_init(vti6_tunnel_init); 1230module_init(vti6_tunnel_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e4347aeb2e65..9948b5ce52da 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -576,11 +576,11 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
576 /* Jumbo payload inhibits frag. header */ 576 /* Jumbo payload inhibits frag. header */
577 if (ipv6_hdr(skb)->payload_len == 0) { 577 if (ipv6_hdr(skb)->payload_len == 0) {
578 pr_debug("payload len = 0\n"); 578 pr_debug("payload len = 0\n");
579 return -EINVAL; 579 return 0;
580 } 580 }
581 581
582 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) 582 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
583 return -EINVAL; 583 return 0;
584 584
585 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) 585 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
586 return -ENOMEM; 586 return -ENOMEM;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index f7aab5ab93a5..f06b0471f39f 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -69,7 +69,7 @@ static unsigned int ipv6_defrag(void *priv,
69 if (err == -EINPROGRESS) 69 if (err == -EINPROGRESS)
70 return NF_STOLEN; 70 return NF_STOLEN;
71 71
72 return NF_ACCEPT; 72 return err == 0 ? NF_ACCEPT : NF_DROP;
73} 73}
74 74
75static struct nf_hook_ops ipv6_defrag_ops[] = { 75static struct nf_hook_ops ipv6_defrag_ops[] = {
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index a5400223fd74..10090400c72f 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -156,6 +156,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
156 fl6.daddr = oip6h->saddr; 156 fl6.daddr = oip6h->saddr;
157 fl6.fl6_sport = otcph->dest; 157 fl6.fl6_sport = otcph->dest;
158 fl6.fl6_dport = otcph->source; 158 fl6.fl6_dport = otcph->source;
159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
159 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 160 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
160 dst = ip6_route_output(net, NULL, &fl6); 161 dst = ip6_route_output(net, NULL, &fl6);
161 if (dst->error) { 162 if (dst->error) {
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 7cca8ac66fe9..cd4252346a32 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -155,6 +155,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
155 if (unlikely(!skb)) 155 if (unlikely(!skb))
156 return 0; 156 return 0;
157 157
158 skb->protocol = htons(ETH_P_IPV6);
159
158 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, 160 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
159 net, sk, skb, NULL, skb_dst(skb)->dev, 161 net, sk, skb, NULL, skb_dst(skb)->dev,
160 dst_output); 162 dst_output);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 982f6c44ea01..8938b6ba57a0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -61,7 +61,8 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
61 if ((l2tp->conn_id == tunnel_id) && 61 if ((l2tp->conn_id == tunnel_id) &&
62 net_eq(sock_net(sk), net) && 62 net_eq(sock_net(sk), net) &&
63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && 63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
64 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 64 (!sk->sk_bound_dev_if || !dif ||
65 sk->sk_bound_dev_if == dif))
65 goto found; 66 goto found;
66 } 67 }
67 68
@@ -182,15 +183,17 @@ pass_up:
182 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 183 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
183 184
184 read_lock_bh(&l2tp_ip_lock); 185 read_lock_bh(&l2tp_ip_lock);
185 sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); 186 sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
187 tunnel_id);
188 if (!sk) {
189 read_unlock_bh(&l2tp_ip_lock);
190 goto discard;
191 }
192
193 sock_hold(sk);
186 read_unlock_bh(&l2tp_ip_lock); 194 read_unlock_bh(&l2tp_ip_lock);
187 } 195 }
188 196
189 if (sk == NULL)
190 goto discard;
191
192 sock_hold(sk);
193
194 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 197 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
195 goto discard_put; 198 goto discard_put;
196 199
@@ -256,15 +259,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
256 if (addr->l2tp_family != AF_INET) 259 if (addr->l2tp_family != AF_INET)
257 return -EINVAL; 260 return -EINVAL;
258 261
259 ret = -EADDRINUSE;
260 read_lock_bh(&l2tp_ip_lock);
261 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
262 sk->sk_bound_dev_if, addr->l2tp_conn_id))
263 goto out_in_use;
264
265 read_unlock_bh(&l2tp_ip_lock);
266
267 lock_sock(sk); 262 lock_sock(sk);
263
264 ret = -EINVAL;
268 if (!sock_flag(sk, SOCK_ZAPPED)) 265 if (!sock_flag(sk, SOCK_ZAPPED))
269 goto out; 266 goto out;
270 267
@@ -281,14 +278,22 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
281 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 278 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
282 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 279 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
283 inet->inet_saddr = 0; /* Use device */ 280 inet->inet_saddr = 0; /* Use device */
284 sk_dst_reset(sk);
285 281
282 write_lock_bh(&l2tp_ip_lock);
283 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
284 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
285 write_unlock_bh(&l2tp_ip_lock);
286 ret = -EADDRINUSE;
287 goto out;
288 }
289
290 sk_dst_reset(sk);
286 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; 291 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
287 292
288 write_lock_bh(&l2tp_ip_lock);
289 sk_add_bind_node(sk, &l2tp_ip_bind_table); 293 sk_add_bind_node(sk, &l2tp_ip_bind_table);
290 sk_del_node_init(sk); 294 sk_del_node_init(sk);
291 write_unlock_bh(&l2tp_ip_lock); 295 write_unlock_bh(&l2tp_ip_lock);
296
292 ret = 0; 297 ret = 0;
293 sock_reset_flag(sk, SOCK_ZAPPED); 298 sock_reset_flag(sk, SOCK_ZAPPED);
294 299
@@ -296,11 +301,6 @@ out:
296 release_sock(sk); 301 release_sock(sk);
297 302
298 return ret; 303 return ret;
299
300out_in_use:
301 read_unlock_bh(&l2tp_ip_lock);
302
303 return ret;
304} 304}
305 305
306static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 306static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -308,21 +308,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
308 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 308 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
309 int rc; 309 int rc;
310 310
311 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
312 return -EINVAL;
313
314 if (addr_len < sizeof(*lsa)) 311 if (addr_len < sizeof(*lsa))
315 return -EINVAL; 312 return -EINVAL;
316 313
317 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 314 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
318 return -EINVAL; 315 return -EINVAL;
319 316
320 rc = ip4_datagram_connect(sk, uaddr, addr_len);
321 if (rc < 0)
322 return rc;
323
324 lock_sock(sk); 317 lock_sock(sk);
325 318
319 /* Must bind first - autobinding does not work */
320 if (sock_flag(sk, SOCK_ZAPPED)) {
321 rc = -EINVAL;
322 goto out_sk;
323 }
324
325 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
326 if (rc < 0)
327 goto out_sk;
328
326 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 329 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
327 330
328 write_lock_bh(&l2tp_ip_lock); 331 write_lock_bh(&l2tp_ip_lock);
@@ -330,7 +333,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
330 sk_add_bind_node(sk, &l2tp_ip_bind_table); 333 sk_add_bind_node(sk, &l2tp_ip_bind_table);
331 write_unlock_bh(&l2tp_ip_lock); 334 write_unlock_bh(&l2tp_ip_lock);
332 335
336out_sk:
333 release_sock(sk); 337 release_sock(sk);
338
334 return rc; 339 return rc;
335} 340}
336 341
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 9978d01ba0ba..aa821cb639e5 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -72,8 +72,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
72 72
73 if ((l2tp->conn_id == tunnel_id) && 73 if ((l2tp->conn_id == tunnel_id) &&
74 net_eq(sock_net(sk), net) && 74 net_eq(sock_net(sk), net) &&
75 !(addr && ipv6_addr_equal(addr, laddr)) && 75 (!addr || ipv6_addr_equal(addr, laddr)) &&
76 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 76 (!sk->sk_bound_dev_if || !dif ||
77 sk->sk_bound_dev_if == dif))
77 goto found; 78 goto found;
78 } 79 }
79 80
@@ -196,16 +197,17 @@ pass_up:
196 struct ipv6hdr *iph = ipv6_hdr(skb); 197 struct ipv6hdr *iph = ipv6_hdr(skb);
197 198
198 read_lock_bh(&l2tp_ip6_lock); 199 read_lock_bh(&l2tp_ip6_lock);
199 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, 200 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
200 0, tunnel_id); 201 tunnel_id);
202 if (!sk) {
203 read_unlock_bh(&l2tp_ip6_lock);
204 goto discard;
205 }
206
207 sock_hold(sk);
201 read_unlock_bh(&l2tp_ip6_lock); 208 read_unlock_bh(&l2tp_ip6_lock);
202 } 209 }
203 210
204 if (sk == NULL)
205 goto discard;
206
207 sock_hold(sk);
208
209 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 211 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
210 goto discard_put; 212 goto discard_put;
211 213
@@ -266,6 +268,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
266 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; 268 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
267 struct net *net = sock_net(sk); 269 struct net *net = sock_net(sk);
268 __be32 v4addr = 0; 270 __be32 v4addr = 0;
271 int bound_dev_if;
269 int addr_type; 272 int addr_type;
270 int err; 273 int err;
271 274
@@ -284,13 +287,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
284 if (addr_type & IPV6_ADDR_MULTICAST) 287 if (addr_type & IPV6_ADDR_MULTICAST)
285 return -EADDRNOTAVAIL; 288 return -EADDRNOTAVAIL;
286 289
287 err = -EADDRINUSE;
288 read_lock_bh(&l2tp_ip6_lock);
289 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
290 sk->sk_bound_dev_if, addr->l2tp_conn_id))
291 goto out_in_use;
292 read_unlock_bh(&l2tp_ip6_lock);
293
294 lock_sock(sk); 290 lock_sock(sk);
295 291
296 err = -EINVAL; 292 err = -EINVAL;
@@ -300,28 +296,25 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
300 if (sk->sk_state != TCP_CLOSE) 296 if (sk->sk_state != TCP_CLOSE)
301 goto out_unlock; 297 goto out_unlock;
302 298
299 bound_dev_if = sk->sk_bound_dev_if;
300
303 /* Check if the address belongs to the host. */ 301 /* Check if the address belongs to the host. */
304 rcu_read_lock(); 302 rcu_read_lock();
305 if (addr_type != IPV6_ADDR_ANY) { 303 if (addr_type != IPV6_ADDR_ANY) {
306 struct net_device *dev = NULL; 304 struct net_device *dev = NULL;
307 305
308 if (addr_type & IPV6_ADDR_LINKLOCAL) { 306 if (addr_type & IPV6_ADDR_LINKLOCAL) {
309 if (addr_len >= sizeof(struct sockaddr_in6) && 307 if (addr->l2tp_scope_id)
310 addr->l2tp_scope_id) { 308 bound_dev_if = addr->l2tp_scope_id;
311 /* Override any existing binding, if another
312 * one is supplied by user.
313 */
314 sk->sk_bound_dev_if = addr->l2tp_scope_id;
315 }
316 309
317 /* Binding to link-local address requires an 310 /* Binding to link-local address requires an
318 interface */ 311 * interface.
319 if (!sk->sk_bound_dev_if) 312 */
313 if (!bound_dev_if)
320 goto out_unlock_rcu; 314 goto out_unlock_rcu;
321 315
322 err = -ENODEV; 316 err = -ENODEV;
323 dev = dev_get_by_index_rcu(sock_net(sk), 317 dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
324 sk->sk_bound_dev_if);
325 if (!dev) 318 if (!dev)
326 goto out_unlock_rcu; 319 goto out_unlock_rcu;
327 } 320 }
@@ -336,13 +329,22 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
336 } 329 }
337 rcu_read_unlock(); 330 rcu_read_unlock();
338 331
339 inet->inet_rcv_saddr = inet->inet_saddr = v4addr; 332 write_lock_bh(&l2tp_ip6_lock);
333 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
334 addr->l2tp_conn_id)) {
335 write_unlock_bh(&l2tp_ip6_lock);
336 err = -EADDRINUSE;
337 goto out_unlock;
338 }
339
340 inet->inet_saddr = v4addr;
341 inet->inet_rcv_saddr = v4addr;
342 sk->sk_bound_dev_if = bound_dev_if;
340 sk->sk_v6_rcv_saddr = addr->l2tp_addr; 343 sk->sk_v6_rcv_saddr = addr->l2tp_addr;
341 np->saddr = addr->l2tp_addr; 344 np->saddr = addr->l2tp_addr;
342 345
343 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; 346 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
344 347
345 write_lock_bh(&l2tp_ip6_lock);
346 sk_add_bind_node(sk, &l2tp_ip6_bind_table); 348 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
347 sk_del_node_init(sk); 349 sk_del_node_init(sk);
348 write_unlock_bh(&l2tp_ip6_lock); 350 write_unlock_bh(&l2tp_ip6_lock);
@@ -355,10 +357,7 @@ out_unlock_rcu:
355 rcu_read_unlock(); 357 rcu_read_unlock();
356out_unlock: 358out_unlock:
357 release_sock(sk); 359 release_sock(sk);
358 return err;
359 360
360out_in_use:
361 read_unlock_bh(&l2tp_ip6_lock);
362 return err; 361 return err;
363} 362}
364 363
@@ -371,9 +370,6 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
371 int addr_type; 370 int addr_type;
372 int rc; 371 int rc;
373 372
374 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
375 return -EINVAL;
376
377 if (addr_len < sizeof(*lsa)) 373 if (addr_len < sizeof(*lsa))
378 return -EINVAL; 374 return -EINVAL;
379 375
@@ -390,10 +386,18 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
390 return -EINVAL; 386 return -EINVAL;
391 } 387 }
392 388
393 rc = ip6_datagram_connect(sk, uaddr, addr_len);
394
395 lock_sock(sk); 389 lock_sock(sk);
396 390
391 /* Must bind first - autobinding does not work */
392 if (sock_flag(sk, SOCK_ZAPPED)) {
393 rc = -EINVAL;
394 goto out_sk;
395 }
396
397 rc = __ip6_datagram_connect(sk, uaddr, addr_len);
398 if (rc < 0)
399 goto out_sk;
400
397 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 401 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
398 402
399 write_lock_bh(&l2tp_ip6_lock); 403 write_lock_bh(&l2tp_ip6_lock);
@@ -401,6 +405,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
401 sk_add_bind_node(sk, &l2tp_ip6_bind_table); 405 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
402 write_unlock_bh(&l2tp_ip6_lock); 406 write_unlock_bh(&l2tp_ip6_lock);
403 407
408out_sk:
404 release_sock(sk); 409 release_sock(sk);
405 410
406 return rc; 411 return rc;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index bbb8f3df79f7..5b9c884a452e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -42,7 +42,7 @@ struct nf_nat_conn_key {
42 const struct nf_conntrack_zone *zone; 42 const struct nf_conntrack_zone *zone;
43}; 43};
44 44
45static struct rhashtable nf_nat_bysource_table; 45static struct rhltable nf_nat_bysource_table;
46 46
47inline const struct nf_nat_l3proto * 47inline const struct nf_nat_l3proto *
48__nf_nat_l3proto_find(u8 family) 48__nf_nat_l3proto_find(u8 family)
@@ -193,9 +193,12 @@ static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
193 const struct nf_nat_conn_key *key = arg->key; 193 const struct nf_nat_conn_key *key = arg->key;
194 const struct nf_conn *ct = obj; 194 const struct nf_conn *ct = obj;
195 195
196 return same_src(ct, key->tuple) && 196 if (!same_src(ct, key->tuple) ||
197 net_eq(nf_ct_net(ct), key->net) && 197 !net_eq(nf_ct_net(ct), key->net) ||
198 nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL); 198 !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
199 return 1;
200
201 return 0;
199} 202}
200 203
201static struct rhashtable_params nf_nat_bysource_params = { 204static struct rhashtable_params nf_nat_bysource_params = {
@@ -204,7 +207,6 @@ static struct rhashtable_params nf_nat_bysource_params = {
204 .obj_cmpfn = nf_nat_bysource_cmp, 207 .obj_cmpfn = nf_nat_bysource_cmp,
205 .nelem_hint = 256, 208 .nelem_hint = 256,
206 .min_size = 1024, 209 .min_size = 1024,
207 .nulls_base = (1U << RHT_BASE_SHIFT),
208}; 210};
209 211
210/* Only called for SRC manip */ 212/* Only called for SRC manip */
@@ -223,12 +225,15 @@ find_appropriate_src(struct net *net,
223 .tuple = tuple, 225 .tuple = tuple,
224 .zone = zone 226 .zone = zone
225 }; 227 };
228 struct rhlist_head *hl;
226 229
227 ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key, 230 hl = rhltable_lookup(&nf_nat_bysource_table, &key,
228 nf_nat_bysource_params); 231 nf_nat_bysource_params);
229 if (!ct) 232 if (!hl)
230 return 0; 233 return 0;
231 234
235 ct = container_of(hl, typeof(*ct), nat_bysource);
236
232 nf_ct_invert_tuplepr(result, 237 nf_ct_invert_tuplepr(result,
233 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 238 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
234 result->dst = tuple->dst; 239 result->dst = tuple->dst;
@@ -446,11 +451,17 @@ nf_nat_setup_info(struct nf_conn *ct,
446 } 451 }
447 452
448 if (maniptype == NF_NAT_MANIP_SRC) { 453 if (maniptype == NF_NAT_MANIP_SRC) {
454 struct nf_nat_conn_key key = {
455 .net = nf_ct_net(ct),
456 .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
457 .zone = nf_ct_zone(ct),
458 };
449 int err; 459 int err;
450 460
451 err = rhashtable_insert_fast(&nf_nat_bysource_table, 461 err = rhltable_insert_key(&nf_nat_bysource_table,
452 &ct->nat_bysource, 462 &key,
453 nf_nat_bysource_params); 463 &ct->nat_bysource,
464 nf_nat_bysource_params);
454 if (err) 465 if (err)
455 return NF_DROP; 466 return NF_DROP;
456 } 467 }
@@ -567,8 +578,8 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
567 * will delete entry from already-freed table. 578 * will delete entry from already-freed table.
568 */ 579 */
569 ct->status &= ~IPS_NAT_DONE_MASK; 580 ct->status &= ~IPS_NAT_DONE_MASK;
570 rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, 581 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
571 nf_nat_bysource_params); 582 nf_nat_bysource_params);
572 583
573 /* don't delete conntrack. Although that would make things a lot 584 /* don't delete conntrack. Although that would make things a lot
574 * simpler, we'd end up flushing all conntracks on nat rmmod. 585 * simpler, we'd end up flushing all conntracks on nat rmmod.
@@ -698,8 +709,8 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
698 if (!nat) 709 if (!nat)
699 return; 710 return;
700 711
701 rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, 712 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
702 nf_nat_bysource_params); 713 nf_nat_bysource_params);
703} 714}
704 715
705static struct nf_ct_ext_type nat_extend __read_mostly = { 716static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -834,13 +845,13 @@ static int __init nf_nat_init(void)
834{ 845{
835 int ret; 846 int ret;
836 847
837 ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); 848 ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
838 if (ret) 849 if (ret)
839 return ret; 850 return ret;
840 851
841 ret = nf_ct_extend_register(&nat_extend); 852 ret = nf_ct_extend_register(&nat_extend);
842 if (ret < 0) { 853 if (ret < 0) {
843 rhashtable_destroy(&nf_nat_bysource_table); 854 rhltable_destroy(&nf_nat_bysource_table);
844 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 855 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
845 return ret; 856 return ret;
846 } 857 }
@@ -864,7 +875,7 @@ static int __init nf_nat_init(void)
864 return 0; 875 return 0;
865 876
866 cleanup_extend: 877 cleanup_extend:
867 rhashtable_destroy(&nf_nat_bysource_table); 878 rhltable_destroy(&nf_nat_bysource_table);
868 nf_ct_extend_unregister(&nat_extend); 879 nf_ct_extend_unregister(&nat_extend);
869 return ret; 880 return ret;
870} 881}
@@ -883,7 +894,7 @@ static void __exit nf_nat_cleanup(void)
883 for (i = 0; i < NFPROTO_NUMPROTO; i++) 894 for (i = 0; i < NFPROTO_NUMPROTO; i++)
884 kfree(nf_nat_l4protos[i]); 895 kfree(nf_nat_l4protos[i]);
885 896
886 rhashtable_destroy(&nf_nat_bysource_table); 897 rhltable_destroy(&nf_nat_bysource_table);
887} 898}
888 899
889MODULE_LICENSE("GPL"); 900MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 026581b04ea8..e5194f6f906c 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2570,7 +2570,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2570 } 2570 }
2571 2571
2572 if (set->timeout && 2572 if (set->timeout &&
2573 nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout), 2573 nla_put_be64(skb, NFTA_SET_TIMEOUT,
2574 cpu_to_be64(jiffies_to_msecs(set->timeout)),
2574 NFTA_SET_PAD)) 2575 NFTA_SET_PAD))
2575 goto nla_put_failure; 2576 goto nla_put_failure;
2576 if (set->gc_int && 2577 if (set->gc_int &&
@@ -2859,7 +2860,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
2859 if (nla[NFTA_SET_TIMEOUT] != NULL) { 2860 if (nla[NFTA_SET_TIMEOUT] != NULL) {
2860 if (!(flags & NFT_SET_TIMEOUT)) 2861 if (!(flags & NFT_SET_TIMEOUT))
2861 return -EINVAL; 2862 return -EINVAL;
2862 timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT])); 2863 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
2864 nla[NFTA_SET_TIMEOUT])));
2863 } 2865 }
2864 gc_int = 0; 2866 gc_int = 0;
2865 if (nla[NFTA_SET_GC_INTERVAL] != NULL) { 2867 if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
@@ -3178,7 +3180,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
3178 3180
3179 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) && 3181 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
3180 nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, 3182 nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
3181 cpu_to_be64(*nft_set_ext_timeout(ext)), 3183 cpu_to_be64(jiffies_to_msecs(
3184 *nft_set_ext_timeout(ext))),
3182 NFTA_SET_ELEM_PAD)) 3185 NFTA_SET_ELEM_PAD))
3183 goto nla_put_failure; 3186 goto nla_put_failure;
3184 3187
@@ -3447,7 +3450,7 @@ void *nft_set_elem_init(const struct nft_set *set,
3447 memcpy(nft_set_ext_data(ext), data, set->dlen); 3450 memcpy(nft_set_ext_data(ext), data, set->dlen);
3448 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) 3451 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
3449 *nft_set_ext_expiration(ext) = 3452 *nft_set_ext_expiration(ext) =
3450 jiffies + msecs_to_jiffies(timeout); 3453 jiffies + timeout;
3451 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) 3454 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
3452 *nft_set_ext_timeout(ext) = timeout; 3455 *nft_set_ext_timeout(ext) = timeout;
3453 3456
@@ -3535,7 +3538,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3535 if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { 3538 if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
3536 if (!(set->flags & NFT_SET_TIMEOUT)) 3539 if (!(set->flags & NFT_SET_TIMEOUT))
3537 return -EINVAL; 3540 return -EINVAL;
3538 timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT])); 3541 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
3542 nla[NFTA_SET_ELEM_TIMEOUT])));
3539 } else if (set->flags & NFT_SET_TIMEOUT) { 3543 } else if (set->flags & NFT_SET_TIMEOUT) {
3540 timeout = set->timeout; 3544 timeout = set->timeout;
3541 } 3545 }
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index baf694de3935..d5447a22275c 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -53,6 +53,7 @@ static int nft_hash_init(const struct nft_ctx *ctx,
53{ 53{
54 struct nft_hash *priv = nft_expr_priv(expr); 54 struct nft_hash *priv = nft_expr_priv(expr);
55 u32 len; 55 u32 len;
56 int err;
56 57
57 if (!tb[NFTA_HASH_SREG] || 58 if (!tb[NFTA_HASH_SREG] ||
58 !tb[NFTA_HASH_DREG] || 59 !tb[NFTA_HASH_DREG] ||
@@ -67,8 +68,10 @@ static int nft_hash_init(const struct nft_ctx *ctx,
67 priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]); 68 priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
68 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); 69 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
69 70
70 len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN])); 71 err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
71 if (len == 0 || len > U8_MAX) 72 if (err < 0)
73 return err;
74 if (len == 0)
72 return -ERANGE; 75 return -ERANGE;
73 76
74 priv->len = len; 77 priv->len = len;
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index fbc88009ca2e..8f0aaaea1376 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -59,6 +59,12 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
59 int err; 59 int err;
60 u32 op; 60 u32 op;
61 61
62 if (!tb[NFTA_RANGE_SREG] ||
63 !tb[NFTA_RANGE_OP] ||
64 !tb[NFTA_RANGE_FROM_DATA] ||
65 !tb[NFTA_RANGE_TO_DATA])
66 return -EINVAL;
67
62 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), 68 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
63 &desc_from, tb[NFTA_RANGE_FROM_DATA]); 69 &desc_from, tb[NFTA_RANGE_FROM_DATA]);
64 if (err < 0) 70 if (err < 0)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 62bea4591054..602e5ebe9db3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -322,14 +322,11 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
322 sk_mem_charge(sk, skb->truesize); 322 sk_mem_charge(sk, skb->truesize);
323} 323}
324 324
325static void netlink_sock_destruct(struct sock *sk) 325static void __netlink_sock_destruct(struct sock *sk)
326{ 326{
327 struct netlink_sock *nlk = nlk_sk(sk); 327 struct netlink_sock *nlk = nlk_sk(sk);
328 328
329 if (nlk->cb_running) { 329 if (nlk->cb_running) {
330 if (nlk->cb.done)
331 nlk->cb.done(&nlk->cb);
332
333 module_put(nlk->cb.module); 330 module_put(nlk->cb.module);
334 kfree_skb(nlk->cb.skb); 331 kfree_skb(nlk->cb.skb);
335 } 332 }
@@ -346,6 +343,28 @@ static void netlink_sock_destruct(struct sock *sk)
346 WARN_ON(nlk_sk(sk)->groups); 343 WARN_ON(nlk_sk(sk)->groups);
347} 344}
348 345
346static void netlink_sock_destruct_work(struct work_struct *work)
347{
348 struct netlink_sock *nlk = container_of(work, struct netlink_sock,
349 work);
350
351 nlk->cb.done(&nlk->cb);
352 __netlink_sock_destruct(&nlk->sk);
353}
354
355static void netlink_sock_destruct(struct sock *sk)
356{
357 struct netlink_sock *nlk = nlk_sk(sk);
358
359 if (nlk->cb_running && nlk->cb.done) {
360 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
361 schedule_work(&nlk->work);
362 return;
363 }
364
365 __netlink_sock_destruct(sk);
366}
367
349/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on 368/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
350 * SMP. Look, when several writers sleep and reader wakes them up, all but one 369 * SMP. Look, when several writers sleep and reader wakes them up, all but one
351 * immediately hit write lock and grab all the cpus. Exclusive sleep solves 370 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 3cfd6cc60504..4fdb38318977 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/rhashtable.h> 4#include <linux/rhashtable.h>
5#include <linux/atomic.h> 5#include <linux/atomic.h>
6#include <linux/workqueue.h>
6#include <net/sock.h> 7#include <net/sock.h>
7 8
8#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 9#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -33,6 +34,7 @@ struct netlink_sock {
33 34
34 struct rhash_head node; 35 struct rhash_head node;
35 struct rcu_head rcu; 36 struct rcu_head rcu;
37 struct work_struct work;
36}; 38};
37 39
38static inline struct netlink_sock *nlk_sk(struct sock *sk) 40static inline struct netlink_sock *nlk_sk(struct sock *sk)
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 31045ef44a82..fecefa2dc94e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -370,8 +370,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
370 skb_orphan(skb); 370 skb_orphan(skb);
371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
372 err = nf_ct_frag6_gather(net, skb, user); 372 err = nf_ct_frag6_gather(net, skb, user);
373 if (err) 373 if (err) {
374 if (err != -EINPROGRESS)
375 kfree_skb(skb);
374 return err; 376 return err;
377 }
375 378
376 key->ip.proto = ipv6_hdr(skb)->nexthdr; 379 key->ip.proto = ipv6_hdr(skb)->nexthdr;
377 ovs_cb.mru = IP6CB(skb)->frag_max_size; 380 ovs_cb.mru = IP6CB(skb)->frag_max_size;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d2238b204691..dd2332390c45 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3648 3648
3649 if (optlen != sizeof(val)) 3649 if (optlen != sizeof(val))
3650 return -EINVAL; 3650 return -EINVAL;
3651 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3652 return -EBUSY;
3653 if (copy_from_user(&val, optval, sizeof(val))) 3651 if (copy_from_user(&val, optval, sizeof(val)))
3654 return -EFAULT; 3652 return -EFAULT;
3655 switch (val) { 3653 switch (val) {
3656 case TPACKET_V1: 3654 case TPACKET_V1:
3657 case TPACKET_V2: 3655 case TPACKET_V2:
3658 case TPACKET_V3: 3656 case TPACKET_V3:
3659 po->tp_version = val; 3657 break;
3660 return 0;
3661 default: 3658 default:
3662 return -EINVAL; 3659 return -EINVAL;
3663 } 3660 }
3661 lock_sock(sk);
3662 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3663 ret = -EBUSY;
3664 } else {
3665 po->tp_version = val;
3666 ret = 0;
3667 }
3668 release_sock(sk);
3669 return ret;
3664 } 3670 }
3665 case PACKET_RESERVE: 3671 case PACKET_RESERVE:
3666 { 3672 {
@@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4164 /* Added to avoid minimal code churn */ 4170 /* Added to avoid minimal code churn */
4165 struct tpacket_req *req = &req_u->req; 4171 struct tpacket_req *req = &req_u->req;
4166 4172
4173 lock_sock(sk);
4167 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 4174 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4168 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 4175 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4169 net_warn_ratelimited("Tx-ring is not supported.\n"); 4176 net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4245,7 +4252,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4245 goto out; 4252 goto out;
4246 } 4253 }
4247 4254
4248 lock_sock(sk);
4249 4255
4250 /* Detach socket from network */ 4256 /* Detach socket from network */
4251 spin_lock(&po->bind_lock); 4257 spin_lock(&po->bind_lock);
@@ -4294,11 +4300,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4294 if (!tx_ring) 4300 if (!tx_ring)
4295 prb_shutdown_retire_blk_timer(po, rb_queue); 4301 prb_shutdown_retire_blk_timer(po, rb_queue);
4296 } 4302 }
4297 release_sock(sk);
4298 4303
4299 if (pg_vec) 4304 if (pg_vec)
4300 free_pg_vec(pg_vec, order, req->tp_block_nr); 4305 free_pg_vec(pg_vec, order, req->tp_block_nr);
4301out: 4306out:
4307 release_sock(sk);
4302 return err; 4308 return err;
4303} 4309}
4304 4310
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index fcddacc92e01..20e2923dc827 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -659,6 +659,8 @@ out_recv:
659out_pernet: 659out_pernet:
660 unregister_pernet_subsys(&rds_tcp_net_ops); 660 unregister_pernet_subsys(&rds_tcp_net_ops);
661out_slab: 661out_slab:
662 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
663 pr_warn("could not unregister rds_tcp_dev_notifier\n");
662 kmem_cache_destroy(rds_tcp_conn_slab); 664 kmem_cache_destroy(rds_tcp_conn_slab);
663out: 665out:
664 return ret; 666 return ret;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index b54d56d4959b..cf9b2fe8eac6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -108,6 +108,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind)
108 kfree(keys); 108 kfree(keys);
109} 109}
110 110
111static bool offset_valid(struct sk_buff *skb, int offset)
112{
113 if (offset > 0 && offset > skb->len)
114 return false;
115
116 if (offset < 0 && -offset > skb_headroom(skb))
117 return false;
118
119 return true;
120}
121
111static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, 122static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
112 struct tcf_result *res) 123 struct tcf_result *res)
113{ 124{
@@ -134,6 +145,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
134 if (tkey->offmask) { 145 if (tkey->offmask) {
135 char *d, _d; 146 char *d, _d;
136 147
148 if (!offset_valid(skb, off + tkey->at)) {
149 pr_info("tc filter pedit 'at' offset %d out of bounds\n",
150 off + tkey->at);
151 goto bad;
152 }
137 d = skb_header_pointer(skb, off + tkey->at, 1, 153 d = skb_header_pointer(skb, off + tkey->at, 1,
138 &_d); 154 &_d);
139 if (!d) 155 if (!d)
@@ -146,10 +162,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
146 " offset must be on 32 bit boundaries\n"); 162 " offset must be on 32 bit boundaries\n");
147 goto bad; 163 goto bad;
148 } 164 }
149 if (offset > 0 && offset > skb->len) { 165
150 pr_info("tc filter pedit" 166 if (!offset_valid(skb, off + offset)) {
151 " offset %d can't exceed pkt length %d\n", 167 pr_info("tc filter pedit offset %d out of bounds\n",
152 offset, skb->len); 168 offset);
153 goto bad; 169 goto bad;
154 } 170 }
155 171
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index eb219b78cd49..5877f6061b57 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
62 struct basic_head *head = rtnl_dereference(tp->root); 62 struct basic_head *head = rtnl_dereference(tp->root);
63 struct basic_filter *f; 63 struct basic_filter *f;
64 64
65 if (head == NULL)
66 return 0UL;
67
68 list_for_each_entry(f, &head->flist, link) { 65 list_for_each_entry(f, &head->flist, link) {
69 if (f->handle == handle) { 66 if (f->handle == handle) {
70 l = (unsigned long) f; 67 l = (unsigned long) f;
@@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force)
109 tcf_unbind_filter(tp, &f->res); 106 tcf_unbind_filter(tp, &f->res);
110 call_rcu(&f->rcu, basic_delete_filter); 107 call_rcu(&f->rcu, basic_delete_filter);
111 } 108 }
112 RCU_INIT_POINTER(tp->root, NULL);
113 kfree_rcu(head, rcu); 109 kfree_rcu(head, rcu);
114 return true; 110 return true;
115} 111}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index bb1d5a487081..0a47ba5e6109 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -292,7 +292,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
292 call_rcu(&prog->rcu, __cls_bpf_delete_prog); 292 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
293 } 293 }
294 294
295 RCU_INIT_POINTER(tp->root, NULL);
296 kfree_rcu(head, rcu); 295 kfree_rcu(head, rcu);
297 return true; 296 return true;
298} 297}
@@ -303,9 +302,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
303 struct cls_bpf_prog *prog; 302 struct cls_bpf_prog *prog;
304 unsigned long ret = 0UL; 303 unsigned long ret = 0UL;
305 304
306 if (head == NULL)
307 return 0UL;
308
309 list_for_each_entry(prog, &head->plist, link) { 305 list_for_each_entry(prog, &head->plist, link) {
310 if (prog->handle == handle) { 306 if (prog->handle == handle) {
311 ret = (unsigned long) prog; 307 ret = (unsigned long) prog;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 85233c470035..c1f20077837f 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -137,11 +137,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
137 137
138 if (!force) 138 if (!force)
139 return false; 139 return false;
140 140 /* Head can still be NULL due to cls_cgroup_init(). */
141 if (head) { 141 if (head)
142 RCU_INIT_POINTER(tp->root, NULL);
143 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 142 call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
144 } 143
145 return true; 144 return true;
146} 145}
147 146
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e39672394c7b..6575aba87630 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -596,7 +596,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force)
596 list_del_rcu(&f->list); 596 list_del_rcu(&f->list);
597 call_rcu(&f->rcu, flow_destroy_filter); 597 call_rcu(&f->rcu, flow_destroy_filter);
598 } 598 }
599 RCU_INIT_POINTER(tp->root, NULL);
600 kfree_rcu(head, rcu); 599 kfree_rcu(head, rcu);
601 return true; 600 return true;
602} 601}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f6f40fba599b..904442421db3 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/rhashtable.h> 15#include <linux/rhashtable.h>
16#include <linux/workqueue.h>
16 17
17#include <linux/if_ether.h> 18#include <linux/if_ether.h>
18#include <linux/in6.h> 19#include <linux/in6.h>
@@ -64,7 +65,10 @@ struct cls_fl_head {
64 bool mask_assigned; 65 bool mask_assigned;
65 struct list_head filters; 66 struct list_head filters;
66 struct rhashtable_params ht_params; 67 struct rhashtable_params ht_params;
67 struct rcu_head rcu; 68 union {
69 struct work_struct work;
70 struct rcu_head rcu;
71 };
68}; 72};
69 73
70struct cls_fl_filter { 74struct cls_fl_filter {
@@ -269,6 +273,24 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
269 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); 273 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
270} 274}
271 275
276static void fl_destroy_sleepable(struct work_struct *work)
277{
278 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
279 work);
280 if (head->mask_assigned)
281 rhashtable_destroy(&head->ht);
282 kfree(head);
283 module_put(THIS_MODULE);
284}
285
286static void fl_destroy_rcu(struct rcu_head *rcu)
287{
288 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
289
290 INIT_WORK(&head->work, fl_destroy_sleepable);
291 schedule_work(&head->work);
292}
293
272static bool fl_destroy(struct tcf_proto *tp, bool force) 294static bool fl_destroy(struct tcf_proto *tp, bool force)
273{ 295{
274 struct cls_fl_head *head = rtnl_dereference(tp->root); 296 struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -282,10 +304,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
282 list_del_rcu(&f->list); 304 list_del_rcu(&f->list);
283 call_rcu(&f->rcu, fl_destroy_filter); 305 call_rcu(&f->rcu, fl_destroy_filter);
284 } 306 }
285 RCU_INIT_POINTER(tp->root, NULL); 307
286 if (head->mask_assigned) 308 __module_get(THIS_MODULE);
287 rhashtable_destroy(&head->ht); 309 call_rcu(&head->rcu, fl_destroy_rcu);
288 kfree_rcu(head, rcu);
289 return true; 310 return true;
290} 311}
291 312
@@ -711,8 +732,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
711 goto errout; 732 goto errout;
712 733
713 if (fold) { 734 if (fold) {
714 rhashtable_remove_fast(&head->ht, &fold->ht_node, 735 if (!tc_skip_sw(fold->flags))
715 head->ht_params); 736 rhashtable_remove_fast(&head->ht, &fold->ht_node,
737 head->ht_params);
716 fl_hw_destroy_filter(tp, (unsigned long)fold); 738 fl_hw_destroy_filter(tp, (unsigned long)fold);
717 } 739 }
718 740
@@ -739,8 +761,9 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg)
739 struct cls_fl_head *head = rtnl_dereference(tp->root); 761 struct cls_fl_head *head = rtnl_dereference(tp->root);
740 struct cls_fl_filter *f = (struct cls_fl_filter *) arg; 762 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
741 763
742 rhashtable_remove_fast(&head->ht, &f->ht_node, 764 if (!tc_skip_sw(f->flags))
743 head->ht_params); 765 rhashtable_remove_fast(&head->ht, &f->ht_node,
766 head->ht_params);
744 list_del_rcu(&f->list); 767 list_del_rcu(&f->list);
745 fl_hw_destroy_filter(tp, (unsigned long)f); 768 fl_hw_destroy_filter(tp, (unsigned long)f);
746 tcf_unbind_filter(tp, &f->res); 769 tcf_unbind_filter(tp, &f->res);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 25927b6c4436..f935429bd5ef 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -114,7 +114,6 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
114 114
115 call_rcu(&f->rcu, mall_destroy_filter); 115 call_rcu(&f->rcu, mall_destroy_filter);
116 } 116 }
117 RCU_INIT_POINTER(tp->root, NULL);
118 kfree_rcu(head, rcu); 117 kfree_rcu(head, rcu);
119 return true; 118 return true;
120} 119}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 4f05a19fb073..322438fb3ffc 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
152 return -1; 152 return -1;
153 nhptr = ip_hdr(skb); 153 nhptr = ip_hdr(skb);
154#endif 154#endif
155 155 if (unlikely(!head))
156 return -1;
156restart: 157restart:
157 158
158#if RSVP_DST_LEN == 4 159#if RSVP_DST_LEN == 4
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 96144bdf30db..0751245a6ace 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -543,7 +543,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
543 walker.fn = tcindex_destroy_element; 543 walker.fn = tcindex_destroy_element;
544 tcindex_walk(tp, &walker); 544 tcindex_walk(tp, &walker);
545 545
546 RCU_INIT_POINTER(tp->root, NULL);
547 call_rcu(&p->rcu, __tcindex_destroy); 546 call_rcu(&p->rcu, __tcindex_destroy);
548 return true; 547 return true;
549} 548}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 975dbeb60ab0..52d74760fb68 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -421,6 +421,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
421 dev = dev_get_by_name(net, driver_name); 421 dev = dev_get_by_name(net, driver_name);
422 if (!dev) 422 if (!dev)
423 return -ENODEV; 423 return -ENODEV;
424 if (tipc_mtu_bad(dev, 0)) {
425 dev_put(dev);
426 return -EINVAL;
427 }
424 428
425 /* Associate TIPC bearer with L2 bearer */ 429 /* Associate TIPC bearer with L2 bearer */
426 rcu_assign_pointer(b->media_ptr, dev); 430 rcu_assign_pointer(b->media_ptr, dev);
@@ -610,8 +614,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
610 if (!b) 614 if (!b)
611 return NOTIFY_DONE; 615 return NOTIFY_DONE;
612 616
613 b->mtu = dev->mtu;
614
615 switch (evt) { 617 switch (evt) {
616 case NETDEV_CHANGE: 618 case NETDEV_CHANGE:
617 if (netif_carrier_ok(dev)) 619 if (netif_carrier_ok(dev))
@@ -624,6 +626,11 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
624 tipc_reset_bearer(net, b); 626 tipc_reset_bearer(net, b);
625 break; 627 break;
626 case NETDEV_CHANGEMTU: 628 case NETDEV_CHANGEMTU:
629 if (tipc_mtu_bad(dev, 0)) {
630 bearer_disable(net, b);
631 break;
632 }
633 b->mtu = dev->mtu;
627 tipc_reset_bearer(net, b); 634 tipc_reset_bearer(net, b);
628 break; 635 break;
629 case NETDEV_CHANGEADDR: 636 case NETDEV_CHANGEADDR:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 78892e2f53e3..278ff7f616f9 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,6 +39,7 @@
39 39
40#include "netlink.h" 40#include "netlink.h"
41#include "core.h" 41#include "core.h"
42#include "msg.h"
42#include <net/genetlink.h> 43#include <net/genetlink.h>
43 44
44#define MAX_MEDIA 3 45#define MAX_MEDIA 3
@@ -59,6 +60,9 @@
59#define TIPC_MEDIA_TYPE_IB 2 60#define TIPC_MEDIA_TYPE_IB 2
60#define TIPC_MEDIA_TYPE_UDP 3 61#define TIPC_MEDIA_TYPE_UDP 3
61 62
63/* minimum bearer MTU */
64#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
65
62/** 66/**
63 * struct tipc_media_addr - destination address used by TIPC bearers 67 * struct tipc_media_addr - destination address used by TIPC bearers
64 * @value: address info (format defined by media) 68 * @value: address info (format defined by media)
@@ -215,4 +219,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
215void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, 219void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
216 struct sk_buff_head *xmitq); 220 struct sk_buff_head *xmitq);
217 221
222/* check if device MTU is too low for tipc headers */
223static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
224{
225 if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
226 return false;
227 netdev_warn(dev, "MTU too low for tipc bearer\n");
228 return true;
229}
230
218#endif /* _TIPC_BEARER_H */ 231#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ecc12411155e..bda89bf9f4ff 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -47,8 +47,8 @@
47#include <linux/pkt_sched.h> 47#include <linux/pkt_sched.h>
48 48
49struct tipc_stats { 49struct tipc_stats {
50 u32 sent_info; /* used in counting # sent packets */ 50 u32 sent_pkts;
51 u32 recv_info; /* used in counting # recv'd packets */ 51 u32 recv_pkts;
52 u32 sent_states; 52 u32 sent_states;
53 u32 recv_states; 53 u32 recv_states;
54 u32 sent_probes; 54 u32 sent_probes;
@@ -857,7 +857,6 @@ void tipc_link_reset(struct tipc_link *l)
857 l->acked = 0; 857 l->acked = 0;
858 l->silent_intv_cnt = 0; 858 l->silent_intv_cnt = 0;
859 l->rst_cnt = 0; 859 l->rst_cnt = 0;
860 l->stats.recv_info = 0;
861 l->stale_count = 0; 860 l->stale_count = 0;
862 l->bc_peer_is_up = false; 861 l->bc_peer_is_up = false;
863 memset(&l->mon_state, 0, sizeof(l->mon_state)); 862 memset(&l->mon_state, 0, sizeof(l->mon_state));
@@ -888,6 +887,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
888 struct sk_buff_head *transmq = &l->transmq; 887 struct sk_buff_head *transmq = &l->transmq;
889 struct sk_buff_head *backlogq = &l->backlogq; 888 struct sk_buff_head *backlogq = &l->backlogq;
890 struct sk_buff *skb, *_skb, *bskb; 889 struct sk_buff *skb, *_skb, *bskb;
890 int pkt_cnt = skb_queue_len(list);
891 891
892 /* Match msg importance against this and all higher backlog limits: */ 892 /* Match msg importance against this and all higher backlog limits: */
893 if (!skb_queue_empty(backlogq)) { 893 if (!skb_queue_empty(backlogq)) {
@@ -901,6 +901,11 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
901 return -EMSGSIZE; 901 return -EMSGSIZE;
902 } 902 }
903 903
904 if (pkt_cnt > 1) {
905 l->stats.sent_fragmented++;
906 l->stats.sent_fragments += pkt_cnt;
907 }
908
904 /* Prepare each packet for sending, and add to relevant queue: */ 909 /* Prepare each packet for sending, and add to relevant queue: */
905 while (skb_queue_len(list)) { 910 while (skb_queue_len(list)) {
906 skb = skb_peek(list); 911 skb = skb_peek(list);
@@ -920,6 +925,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
920 __skb_queue_tail(xmitq, _skb); 925 __skb_queue_tail(xmitq, _skb);
921 TIPC_SKB_CB(skb)->ackers = l->ackers; 926 TIPC_SKB_CB(skb)->ackers = l->ackers;
922 l->rcv_unacked = 0; 927 l->rcv_unacked = 0;
928 l->stats.sent_pkts++;
923 seqno++; 929 seqno++;
924 continue; 930 continue;
925 } 931 }
@@ -968,6 +974,7 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
968 msg_set_ack(hdr, ack); 974 msg_set_ack(hdr, ack);
969 msg_set_bcast_ack(hdr, bc_ack); 975 msg_set_bcast_ack(hdr, bc_ack);
970 l->rcv_unacked = 0; 976 l->rcv_unacked = 0;
977 l->stats.sent_pkts++;
971 seqno++; 978 seqno++;
972 } 979 }
973 l->snd_nxt = seqno; 980 l->snd_nxt = seqno;
@@ -1260,7 +1267,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1260 1267
1261 /* Deliver packet */ 1268 /* Deliver packet */
1262 l->rcv_nxt++; 1269 l->rcv_nxt++;
1263 l->stats.recv_info++; 1270 l->stats.recv_pkts++;
1264 if (!tipc_data_input(l, skb, l->inputq)) 1271 if (!tipc_data_input(l, skb, l->inputq))
1265 rc |= tipc_link_input(l, skb, l->inputq); 1272 rc |= tipc_link_input(l, skb, l->inputq);
1266 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) 1273 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
@@ -1800,10 +1807,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1800void tipc_link_reset_stats(struct tipc_link *l) 1807void tipc_link_reset_stats(struct tipc_link *l)
1801{ 1808{
1802 memset(&l->stats, 0, sizeof(l->stats)); 1809 memset(&l->stats, 0, sizeof(l->stats));
1803 if (!link_is_bc_sndlink(l)) {
1804 l->stats.sent_info = l->snd_nxt;
1805 l->stats.recv_info = l->rcv_nxt;
1806 }
1807} 1810}
1808 1811
1809static void link_print(struct tipc_link *l, const char *str) 1812static void link_print(struct tipc_link *l, const char *str)
@@ -1867,12 +1870,12 @@ static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1867 }; 1870 };
1868 1871
1869 struct nla_map map[] = { 1872 struct nla_map map[] = {
1870 {TIPC_NLA_STATS_RX_INFO, s->recv_info}, 1873 {TIPC_NLA_STATS_RX_INFO, 0},
1871 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1874 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1872 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1875 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1873 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1876 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1874 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1877 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1875 {TIPC_NLA_STATS_TX_INFO, s->sent_info}, 1878 {TIPC_NLA_STATS_TX_INFO, 0},
1876 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1879 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1877 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1880 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1878 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1881 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
@@ -1947,9 +1950,9 @@ int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1947 goto attr_msg_full; 1950 goto attr_msg_full;
1948 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1951 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1949 goto attr_msg_full; 1952 goto attr_msg_full;
1950 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) 1953 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
1951 goto attr_msg_full; 1954 goto attr_msg_full;
1952 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) 1955 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
1953 goto attr_msg_full; 1956 goto attr_msg_full;
1954 1957
1955 if (tipc_link_is_up(link)) 1958 if (tipc_link_is_up(link))
@@ -2004,12 +2007,12 @@ static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2004 }; 2007 };
2005 2008
2006 struct nla_map map[] = { 2009 struct nla_map map[] = {
2007 {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, 2010 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2008 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 2011 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2009 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 2012 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2010 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 2013 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2011 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 2014 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2012 {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, 2015 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2013 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 2016 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2014 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 2017 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2015 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 2018 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
@@ -2076,9 +2079,9 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2076 goto attr_msg_full; 2079 goto attr_msg_full;
2077 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 2080 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2078 goto attr_msg_full; 2081 goto attr_msg_full;
2079 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt)) 2082 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2080 goto attr_msg_full; 2083 goto attr_msg_full;
2081 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt)) 2084 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2082 goto attr_msg_full; 2085 goto attr_msg_full;
2083 2086
2084 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 2087 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 78cab9c5a445..b58dc95f3d35 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -697,6 +697,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
697 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 697 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
698 udp_conf.use_udp_checksums = false; 698 udp_conf.use_udp_checksums = false;
699 ub->ifindex = dev->ifindex; 699 ub->ifindex = dev->ifindex;
700 if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
701 sizeof(struct udphdr))) {
702 err = -EINVAL;
703 goto err;
704 }
700 b->mtu = dev->mtu - sizeof(struct iphdr) 705 b->mtu = dev->mtu - sizeof(struct iphdr)
701 - sizeof(struct udphdr); 706 - sizeof(struct udphdr);
702#if IS_ENABLED(CONFIG_IPV6) 707#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index fd6986634e6f..5bf7e1bfeac7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1268,12 +1268,14 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1268 err = security_xfrm_policy_lookup(pol->security, 1268 err = security_xfrm_policy_lookup(pol->security,
1269 fl->flowi_secid, 1269 fl->flowi_secid,
1270 policy_to_flow_dir(dir)); 1270 policy_to_flow_dir(dir));
1271 if (!err && !xfrm_pol_hold_rcu(pol)) 1271 if (!err) {
1272 goto again; 1272 if (!xfrm_pol_hold_rcu(pol))
1273 else if (err == -ESRCH) 1273 goto again;
1274 } else if (err == -ESRCH) {
1274 pol = NULL; 1275 pol = NULL;
1275 else 1276 } else {
1276 pol = ERR_PTR(err); 1277 pol = ERR_PTR(err);
1278 }
1277 } else 1279 } else
1278 pol = NULL; 1280 pol = NULL;
1279 } 1281 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 08892091cfe3..671a1d0333f0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2450,7 +2450,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2450 2450
2451#ifdef CONFIG_COMPAT 2451#ifdef CONFIG_COMPAT
2452 if (in_compat_syscall()) 2452 if (in_compat_syscall())
2453 return -ENOTSUPP; 2453 return -EOPNOTSUPP;
2454#endif 2454#endif
2455 2455
2456 type = nlh->nlmsg_type; 2456 type = nlh->nlmsg_type;