aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/mrp.c4
-rw-r--r--net/8021q/vlan.c14
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/batman-adv/bat_iv_ogm.c6
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c1
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/can/gw.c6
-rw-r--r--net/ceph/osdmap.c42
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dev_addr_lists.c6
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/scm.c4
-rw-r--r--net/dcb/dcbnl.c8
-rw-r--r--net/ieee802154/6lowpan.h2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/devinet.c66
-rw-r--r--net/ipv4/esp4.c6
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_fragment.c20
-rw-r--r--net/ipv4/ip_fragment.c25
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_options.c5
-rw-r--r--net/ipv4/ipconfig.c3
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/syncookies.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_output.c17
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/addrconf.c77
-rw-r--r--net/ipv6/addrconf_core.c19
-rw-r--r--net/ipv6/ip6_input.c15
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c12
-rw-r--r--net/ipv6/reassembly.c20
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/irda/af_irda.c8
-rw-r--r--net/iucv/af_iucv.c36
-rw-r--r--net/key/af_key.c9
-rw-r--r--net/l2tp/l2tp_core.c206
-rw-r--r--net/l2tp/l2tp_core.h22
-rw-r--r--net/l2tp/l2tp_debugfs.c28
-rw-r--r--net/l2tp/l2tp_ip.c6
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_netlink.c72
-rw-r--r--net/l2tp/l2tp_ppp.c111
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c18
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c18
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c16
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c12
-rw-r--r--net/netfilter/nf_conntrack_sip.c6
-rw-r--r--net/netfilter/nf_conntrack_standalone.c1
-rw-r--r--net/netfilter/nf_nat_core.c40
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/netfilter/nfnetlink_queue_core.c6
-rw-r--r--net/netfilter/xt_AUDIT.c3
-rw-r--r--net/netlabel/netlabel_unlabeled.c27
-rw-r--r--net/netlink/genetlink.c1
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/nfc/llcp/sock.c3
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/datapath.c33
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/openvswitch/vport-netdev.c3
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/rds/stats.c1
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/sched/sch_cbq.c5
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c12
-rw-r--r--net/sunrpc/clnt.c11
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/sunrpc/xprtsock.c15
-rw-r--r--net/tipc/socket.c7
-rw-r--r--net/unix/af_unix.c9
-rw-r--r--net/vmw_vsock/af_vsock.c8
-rw-r--r--net/vmw_vsock/vmci_transport.c34
-rw-r--r--net/vmw_vsock/vsock_addr.c10
-rw-r--r--net/vmw_vsock/vsock_addr.h2
-rw-r--r--net/xfrm/xfrm_replay.c66
112 files changed, 929 insertions, 633 deletions
diff --git a/net/802/mrp.c b/net/802/mrp.c
index a4cc3229952a..e085bcc754f6 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
870 * all pending messages before the applicant is gone. 870 * all pending messages before the applicant is gone.
871 */ 871 */
872 del_timer_sync(&app->join_timer); 872 del_timer_sync(&app->join_timer);
873
874 spin_lock(&app->lock);
873 mrp_mad_event(app, MRP_EVENT_TX); 875 mrp_mad_event(app, MRP_EVENT_TX);
874 mrp_pdu_queue(app); 876 mrp_pdu_queue(app);
877 spin_unlock(&app->lock);
878
875 mrp_queue_xmit(app); 879 mrp_queue_xmit(app);
876 880
877 dev_mc_del(dev, appl->group_address); 881 dev_mc_del(dev, appl->group_address);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a18714469bf7..85addcd9372b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
86 86
87 grp = &vlan_info->grp; 87 grp = &vlan_info->grp;
88 88
89 /* Take it out of our own structures, but be sure to interlock with
90 * HW accelerating devices or SW vlan input packet processing if
91 * VLAN is not 0 (leave it there for 802.1p).
92 */
93 if (vlan_id)
94 vlan_vid_del(real_dev, vlan_id);
95
96 grp->nr_vlan_devs--; 89 grp->nr_vlan_devs--;
97 90
98 if (vlan->flags & VLAN_FLAG_MVRP) 91 if (vlan->flags & VLAN_FLAG_MVRP)
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
114 vlan_gvrp_uninit_applicant(real_dev); 107 vlan_gvrp_uninit_applicant(real_dev);
115 } 108 }
116 109
110 /* Take it out of our own structures, but be sure to interlock with
111 * HW accelerating devices or SW vlan input packet processing if
112 * VLAN is not 0 (leave it there for 802.1p).
113 */
114 if (vlan_id)
115 vlan_vid_del(real_dev, vlan_id);
116
117 /* Get rid of the vlan's reference to real_dev */ 117 /* Get rid of the vlan's reference to real_dev */
118 dev_put(real_dev); 118 dev_put(real_dev);
119} 119}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 74dea377fe5b..de2e950a0a7a 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = {
655 .create = p9_virtio_create, 655 .create = p9_virtio_create,
656 .close = p9_virtio_close, 656 .close = p9_virtio_close,
657 .request = p9_virtio_request, 657 .request = p9_virtio_request,
658 //.zc_request = p9_virtio_zc_request, 658 .zc_request = p9_virtio_zc_request,
659 .cancel = p9_virtio_cancel, 659 .cancel = p9_virtio_cancel,
660 /* 660 /*
661 * We leave one entry for input and one entry for response 661 * We leave one entry for input and one entry for response
diff --git a/net/atm/common.c b/net/atm/common.c
index 7b491006eaf4..737bef59ce89 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
531 struct sk_buff *skb; 531 struct sk_buff *skb;
532 int copied, error = -EINVAL; 532 int copied, error = -EINVAL;
533 533
534 msg->msg_namelen = 0;
535
534 if (sock->state != SS_CONNECTED) 536 if (sock->state != SS_CONNECTED)
535 return -ENOTCONN; 537 return -ENOTCONN;
536 538
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 7b11f8bc5071..e277e38f736b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
1642 ax25_address src; 1642 ax25_address src;
1643 const unsigned char *mac = skb_mac_header(skb); 1643 const unsigned char *mac = skb_mac_header(skb);
1644 1644
1645 memset(sax, 0, sizeof(struct full_sockaddr_ax25));
1645 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, 1646 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
1646 &digi, NULL, NULL); 1647 &digi, NULL, NULL);
1647 sax->sax25_family = AF_AX25; 1648 sax->sax25_family = AF_AX25;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a0b253ecadaf..a5bb0a769eb9 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1288 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; 1288 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
1289 1289
1290 /* unpack the aggregated packets and process them one by one */ 1290 /* unpack the aggregated packets and process them one by one */
1291 do { 1291 while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
1292 batadv_ogm_packet->tt_num_changes)) {
1292 tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; 1293 tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
1293 1294
1294 batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, 1295 batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
@@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1299 1300
1300 packet_pos = packet_buff + buff_pos; 1301 packet_pos = packet_buff + buff_pos;
1301 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; 1302 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
1302 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, 1303 }
1303 batadv_ogm_packet->tt_num_changes));
1304 1304
1305 kfree_skb(skb); 1305 kfree_skb(skb);
1306 return NET_RX_SUCCESS; 1306 return NET_RX_SUCCESS;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index d3ee69b35a78..0d1b08cc76e1 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
230 if (flags & (MSG_OOB)) 230 if (flags & (MSG_OOB))
231 return -EOPNOTSUPP; 231 return -EOPNOTSUPP;
232 232
233 msg->msg_namelen = 0;
234
233 skb = skb_recv_datagram(sk, flags, noblock, &err); 235 skb = skb_recv_datagram(sk, flags, noblock, &err);
234 if (!skb) { 236 if (!skb) {
235 if (sk->sk_shutdown & RCV_SHUTDOWN) 237 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
237 return err; 239 return err;
238 } 240 }
239 241
240 msg->msg_namelen = 0;
241
242 copied = skb->len; 242 copied = skb->len;
243 if (len < copied) { 243 if (len < copied) {
244 msg->msg_flags |= MSG_TRUNC; 244 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index c23bae86263b..7c9224bcce17 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
608 608
609 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 609 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
610 rfcomm_dlc_accept(d); 610 rfcomm_dlc_accept(d);
611 msg->msg_namelen = 0;
611 return 0; 612 return 0;
612 } 613 }
613 614
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index fad0302bdb32..fb6192c9812e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
665 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { 665 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
666 hci_conn_accept(pi->conn->hcon, 0); 666 hci_conn_accept(pi->conn->hcon, 0);
667 sk->sk_state = BT_CONFIG; 667 sk->sk_state = BT_CONFIG;
668 msg->msg_namelen = 0;
668 669
669 release_sock(sk); 670 release_sock(sk);
670 return 0; 671 return 0;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index d5f1d3fd4b28..314c73ed418f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
66 goto out; 66 goto out;
67 } 67 }
68 68
69 mdst = br_mdb_get(br, skb); 69 mdst = br_mdb_get(br, skb, vid);
70 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) 70 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
71 br_multicast_deliver(mdst, skb); 71 br_multicast_deliver(mdst, skb);
72 else 72 else
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index b0812c91c0f0..bab338e6270d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
423 return 0; 423 return 0;
424 br_warn(br, "adding interface %s with same address " 424 br_warn(br, "adding interface %s with same address "
425 "as a received packet\n", 425 "as a received packet\n",
426 source->dev->name); 426 source ? source->dev->name : br->dev->name);
427 fdb_delete(br, fdb); 427 fdb_delete(br, fdb);
428 } 428 }
429 429
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ef1b91431c6b..459dab22b3f6 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p)
67 struct net_device *dev = p->dev; 67 struct net_device *dev = p->dev;
68 struct net_bridge *br = p->br; 68 struct net_bridge *br = p->br;
69 69
70 if (netif_running(dev) && netif_oper_up(dev)) 70 if (!(p->flags & BR_ADMIN_COST) &&
71 netif_running(dev) && netif_oper_up(dev))
71 p->path_cost = port_cost(dev); 72 p->path_cost = port_cost(dev);
72 73
73 if (!netif_running(br->dev)) 74 if (!netif_running(br->dev))
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 480330151898..828e2bcc1f52 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
97 if (is_broadcast_ether_addr(dest)) 97 if (is_broadcast_ether_addr(dest))
98 skb2 = skb; 98 skb2 = skb;
99 else if (is_multicast_ether_addr(dest)) { 99 else if (is_multicast_ether_addr(dest)) {
100 mdst = br_mdb_get(br, skb); 100 mdst = br_mdb_get(br, skb, vid);
101 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 101 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
102 if ((mdst && mdst->mglist) || 102 if ((mdst && mdst->mglist) ||
103 br_multicast_is_router(br)) 103 br_multicast_is_router(br))
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 9f97b850fc65..ee79f3f20383 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
80 port = p->port; 80 port = p->port;
81 if (port) { 81 if (port) {
82 struct br_mdb_entry e; 82 struct br_mdb_entry e;
83 memset(&e, 0, sizeof(e));
83 e.ifindex = port->dev->ifindex; 84 e.ifindex = port->dev->ifindex;
84 e.state = p->state; 85 e.state = p->state;
85 if (p->addr.proto == htons(ETH_P_IP)) 86 if (p->addr.proto == htons(ETH_P_IP))
@@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
136 break; 137 break;
137 138
138 bpm = nlmsg_data(nlh); 139 bpm = nlmsg_data(nlh);
140 memset(bpm, 0, sizeof(*bpm));
139 bpm->ifindex = dev->ifindex; 141 bpm->ifindex = dev->ifindex;
140 if (br_mdb_fill_info(skb, cb, dev) < 0) 142 if (br_mdb_fill_info(skb, cb, dev) < 0)
141 goto out; 143 goto out;
@@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
171 return -EMSGSIZE; 173 return -EMSGSIZE;
172 174
173 bpm = nlmsg_data(nlh); 175 bpm = nlmsg_data(nlh);
176 memset(bpm, 0, sizeof(*bpm));
174 bpm->family = AF_BRIDGE; 177 bpm->family = AF_BRIDGE;
175 bpm->ifindex = dev->ifindex; 178 bpm->ifindex = dev->ifindex;
176 nest = nla_nest_start(skb, MDBA_MDB); 179 nest = nla_nest_start(skb, MDBA_MDB);
@@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
228{ 231{
229 struct br_mdb_entry entry; 232 struct br_mdb_entry entry;
230 233
234 memset(&entry, 0, sizeof(entry));
231 entry.ifindex = port->dev->ifindex; 235 entry.ifindex = port->dev->ifindex;
232 entry.addr.proto = group->proto; 236 entry.addr.proto = group->proto;
233 entry.addr.u.ip4 = group->u.ip4; 237 entry.addr.u.ip4 = group->u.ip4;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 10e6fce1bb62..923fbeaf7afd 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
132#endif 132#endif
133 133
134struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 134struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
135 struct sk_buff *skb) 135 struct sk_buff *skb, u16 vid)
136{ 136{
137 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 137 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
138 struct br_ip ip; 138 struct br_ip ip;
@@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
144 return NULL; 144 return NULL;
145 145
146 ip.proto = skb->protocol; 146 ip.proto = skb->protocol;
147 ip.vid = vid;
147 148
148 switch (skb->protocol) { 149 switch (skb->protocol) {
149 case htons(ETH_P_IP): 150 case htons(ETH_P_IP):
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 27aa3ee517ce..299fc5f40a26 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void)
29 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 29 + nla_total_size(1) /* IFLA_BRPORT_MODE */
30 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 30 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
31 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 31 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
32 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
32 + 0; 33 + 0;
33} 34}
34 35
@@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
329 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 330 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
330 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 331 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
331 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 332 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
333 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
332 334
333 if (tb[IFLA_BRPORT_COST]) { 335 if (tb[IFLA_BRPORT_COST]) {
334 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 336 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 6d314c4e6bcb..d2c043a857b6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -156,6 +156,7 @@ struct net_bridge_port
156#define BR_BPDU_GUARD 0x00000002 156#define BR_BPDU_GUARD 0x00000002
157#define BR_ROOT_BLOCK 0x00000004 157#define BR_ROOT_BLOCK 0x00000004
158#define BR_MULTICAST_FAST_LEAVE 0x00000008 158#define BR_MULTICAST_FAST_LEAVE 0x00000008
159#define BR_ADMIN_COST 0x00000010
159 160
160#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 161#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
161 u32 multicast_startup_queries_sent; 162 u32 multicast_startup_queries_sent;
@@ -442,7 +443,7 @@ extern int br_multicast_rcv(struct net_bridge *br,
442 struct net_bridge_port *port, 443 struct net_bridge_port *port,
443 struct sk_buff *skb); 444 struct sk_buff *skb);
444extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 445extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
445 struct sk_buff *skb); 446 struct sk_buff *skb, u16 vid);
446extern void br_multicast_add_port(struct net_bridge_port *port); 447extern void br_multicast_add_port(struct net_bridge_port *port);
447extern void br_multicast_del_port(struct net_bridge_port *port); 448extern void br_multicast_del_port(struct net_bridge_port *port);
448extern void br_multicast_enable_port(struct net_bridge_port *port); 449extern void br_multicast_enable_port(struct net_bridge_port *port);
@@ -504,7 +505,7 @@ static inline int br_multicast_rcv(struct net_bridge *br,
504} 505}
505 506
506static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 507static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
507 struct sk_buff *skb) 508 struct sk_buff *skb, u16 vid)
508{ 509{
509 return NULL; 510 return NULL;
510} 511}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 0bdb4ebd362b..d45e760141bb 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
288 path_cost > BR_MAX_PATH_COST) 288 path_cost > BR_MAX_PATH_COST)
289 return -ERANGE; 289 return -ERANGE;
290 290
291 p->flags |= BR_ADMIN_COST;
291 p->path_cost = path_cost; 292 p->path_cost = path_cost;
292 br_configuration_update(p->br); 293 br_configuration_update(p->br);
293 br_port_state_selection(p->br); 294 br_port_state_selection(p->br);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 095259f83902..ff2ff3ce6965 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
286 if (m->msg_flags&MSG_OOB) 286 if (m->msg_flags&MSG_OOB)
287 goto read_error; 287 goto read_error;
288 288
289 m->msg_namelen = 0;
290
289 skb = skb_recv_datagram(sk, flags, 0 , &ret); 291 skb = skb_recv_datagram(sk, flags, 0 , &ret);
290 if (!skb) 292 if (!skb)
291 goto read_error; 293 goto read_error;
diff --git a/net/can/gw.c b/net/can/gw.c
index 2d117dc5ebea..117814a7e73c 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,
466 if (gwj->src.dev == dev || gwj->dst.dev == dev) { 466 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
467 hlist_del(&gwj->list); 467 hlist_del(&gwj->list);
468 cgw_unregister_filter(gwj); 468 cgw_unregister_filter(gwj);
469 kfree(gwj); 469 kmem_cache_free(cgw_cache, gwj);
470 } 470 }
471 } 471 }
472 } 472 }
@@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)
864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { 864 hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
865 hlist_del(&gwj->list); 865 hlist_del(&gwj->list);
866 cgw_unregister_filter(gwj); 866 cgw_unregister_filter(gwj);
867 kfree(gwj); 867 kmem_cache_free(cgw_cache, gwj);
868 } 868 }
869} 869}
870 870
@@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
920 920
921 hlist_del(&gwj->list); 921 hlist_del(&gwj->list);
922 cgw_unregister_filter(gwj); 922 cgw_unregister_filter(gwj);
923 kfree(gwj); 923 kmem_cache_free(cgw_cache, gwj);
924 err = 0; 924 err = 0;
925 break; 925 break;
926 } 926 }
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 69bc4bf89e3e..4543b9aba40c 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
654 return 0; 654 return 0;
655} 655}
656 656
657static int __decode_pgid(void **p, void *end, struct ceph_pg *pg)
658{
659 u8 v;
660
661 ceph_decode_need(p, end, 1+8+4+4, bad);
662 v = ceph_decode_8(p);
663 if (v != 1)
664 goto bad;
665 pg->pool = ceph_decode_64(p);
666 pg->seed = ceph_decode_32(p);
667 *p += 4; /* skip preferred */
668 return 0;
669
670bad:
671 dout("error decoding pgid\n");
672 return -EINVAL;
673}
674
657/* 675/*
658 * decode a full map. 676 * decode a full map.
659 */ 677 */
@@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
745 for (i = 0; i < len; i++) { 763 for (i = 0; i < len; i++) {
746 int n, j; 764 int n, j;
747 struct ceph_pg pgid; 765 struct ceph_pg pgid;
748 struct ceph_pg_v1 pgid_v1;
749 struct ceph_pg_mapping *pg; 766 struct ceph_pg_mapping *pg;
750 767
751 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 768 err = __decode_pgid(p, end, &pgid);
752 ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); 769 if (err)
753 pgid.pool = le32_to_cpu(pgid_v1.pool); 770 goto bad;
754 pgid.seed = le16_to_cpu(pgid_v1.ps); 771 ceph_decode_need(p, end, sizeof(u32), bad);
755 n = ceph_decode_32(p); 772 n = ceph_decode_32(p);
756 err = -EINVAL; 773 err = -EINVAL;
757 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 774 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
@@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
818 u16 version; 835 u16 version;
819 836
820 ceph_decode_16_safe(p, end, version, bad); 837 ceph_decode_16_safe(p, end, version, bad);
821 if (version > 6) { 838 if (version != 6) {
822 pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); 839 pr_warning("got unknown v %d != 6 of inc osdmap\n", version);
823 goto bad; 840 goto bad;
824 } 841 }
825 842
@@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
963 while (len--) { 980 while (len--) {
964 struct ceph_pg_mapping *pg; 981 struct ceph_pg_mapping *pg;
965 int j; 982 int j;
966 struct ceph_pg_v1 pgid_v1;
967 struct ceph_pg pgid; 983 struct ceph_pg pgid;
968 u32 pglen; 984 u32 pglen;
969 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
970 ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
971 pgid.pool = le32_to_cpu(pgid_v1.pool);
972 pgid.seed = le16_to_cpu(pgid_v1.ps);
973 pglen = ceph_decode_32(p);
974 985
986 err = __decode_pgid(p, end, &pgid);
987 if (err)
988 goto bad;
989 ceph_decode_need(p, end, sizeof(u32), bad);
990 pglen = ceph_decode_32(p);
975 if (pglen) { 991 if (pglen) {
976 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 992 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
977 993
diff --git a/net/core/dev.c b/net/core/dev.c
index 8f152f904f70..e7d68ed8aafe 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)
1545 return; 1545 return;
1546 } 1546 }
1547#endif 1547#endif
1548 WARN_ON(in_interrupt());
1549 static_key_slow_inc(&netstamp_needed); 1548 static_key_slow_inc(&netstamp_needed);
1550} 1549}
1551EXPORT_SYMBOL(net_enable_timestamp); 1550EXPORT_SYMBOL(net_enable_timestamp);
@@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1625 } 1624 }
1626 1625
1627 skb_orphan(skb); 1626 skb_orphan(skb);
1628 nf_reset(skb);
1629 1627
1630 if (unlikely(!is_skb_forwardable(dev, skb))) { 1628 if (unlikely(!is_skb_forwardable(dev, skb))) {
1631 atomic_long_inc(&dev->rx_dropped); 1629 atomic_long_inc(&dev->rx_dropped);
@@ -1641,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1641 skb->mark = 0; 1639 skb->mark = 0;
1642 secpath_reset(skb); 1640 secpath_reset(skb);
1643 nf_reset(skb); 1641 nf_reset(skb);
1642 nf_reset_trace(skb);
1644 return netif_rx(skb); 1643 return netif_rx(skb);
1645} 1644}
1646EXPORT_SYMBOL_GPL(dev_forward_skb); 1645EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -2219,9 +2218,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2219 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2218 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2220 struct packet_offload *ptype; 2219 struct packet_offload *ptype;
2221 __be16 type = skb->protocol; 2220 __be16 type = skb->protocol;
2221 int vlan_depth = ETH_HLEN;
2222 2222
2223 while (type == htons(ETH_P_8021Q)) { 2223 while (type == htons(ETH_P_8021Q)) {
2224 int vlan_depth = ETH_HLEN;
2225 struct vlan_hdr *vh; 2224 struct vlan_hdr *vh;
2226 2225
2227 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2226 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
@@ -3315,6 +3314,7 @@ int netdev_rx_handler_register(struct net_device *dev,
3315 if (dev->rx_handler) 3314 if (dev->rx_handler)
3316 return -EBUSY; 3315 return -EBUSY;
3317 3316
3317 /* Note: rx_handler_data must be set before rx_handler */
3318 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3318 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3319 rcu_assign_pointer(dev->rx_handler, rx_handler); 3319 rcu_assign_pointer(dev->rx_handler, rx_handler);
3320 3320
@@ -3335,6 +3335,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3335 3335
3336 ASSERT_RTNL(); 3336 ASSERT_RTNL();
3337 RCU_INIT_POINTER(dev->rx_handler, NULL); 3337 RCU_INIT_POINTER(dev->rx_handler, NULL);
3338 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3339 * section has a guarantee to see a non NULL rx_handler_data
3340 * as well.
3341 */
3342 synchronize_net();
3338 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3343 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3339} 3344}
3340EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3345EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
@@ -3444,6 +3449,7 @@ ncls:
3444 } 3449 }
3445 switch (rx_handler(&skb)) { 3450 switch (rx_handler(&skb)) {
3446 case RX_HANDLER_CONSUMED: 3451 case RX_HANDLER_CONSUMED:
3452 ret = NET_RX_SUCCESS;
3447 goto unlock; 3453 goto unlock;
3448 case RX_HANDLER_ANOTHER: 3454 case RX_HANDLER_ANOTHER:
3449 goto another_round; 3455 goto another_round;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index bd2eb9d3e369..abdc9e6ef33e 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
37 ha->type = addr_type; 37 ha->type = addr_type;
38 ha->refcount = 1; 38 ha->refcount = 1;
39 ha->global_use = global; 39 ha->global_use = global;
40 ha->synced = false; 40 ha->synced = 0;
41 list_add_tail_rcu(&ha->list, &list->list); 41 list_add_tail_rcu(&ha->list, &list->list);
42 list->count++; 42 list->count++;
43 43
@@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
165 addr_len, ha->type); 165 addr_len, ha->type);
166 if (err) 166 if (err)
167 break; 167 break;
168 ha->synced = true; 168 ha->synced++;
169 ha->refcount++; 169 ha->refcount++;
170 } else if (ha->refcount == 1) { 170 } else if (ha->refcount == 1) {
171 __hw_addr_del(to_list, ha->addr, addr_len, ha->type); 171 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
@@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
186 if (ha->synced) { 186 if (ha->synced) {
187 __hw_addr_del(to_list, ha->addr, 187 __hw_addr_del(to_list, ha->addr,
188 addr_len, ha->type); 188 addr_len, ha->type);
189 ha->synced = false; 189 ha->synced--;
190 __hw_addr_del(from_list, ha->addr, 190 __hw_addr_del(from_list, ha->addr,
191 addr_len, ha->type); 191 addr_len, ha->type);
192 } 192 }
diff --git a/net/core/flow.c b/net/core/flow.c
index c56ea6f7f6c7..2bfd081c59f7 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
328 struct flow_flush_info *info = data; 328 struct flow_flush_info *info = data;
329 struct tasklet_struct *tasklet; 329 struct tasklet_struct *tasklet;
330 330
331 tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); 331 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
332 tasklet->data = (unsigned long)info; 332 tasklet->data = (unsigned long)info;
333 tasklet_schedule(tasklet); 333 tasklet_schedule(tasklet);
334} 334}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9d4c7201400d..e187bf06d673 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -140,6 +140,8 @@ ipv6:
140 flow->ports = *ports; 140 flow->ports = *ports;
141 } 141 }
142 142
143 flow->thoff = (u16) nhoff;
144
143 return true; 145 return true;
144} 146}
145EXPORT_SYMBOL(skb_flow_dissect); 147EXPORT_SYMBOL(skb_flow_dissect);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b376410ff259..23854b51a259 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
496 } 496 }
497 if (ops->fill_info) { 497 if (ops->fill_info) {
498 data = nla_nest_start(skb, IFLA_INFO_DATA); 498 data = nla_nest_start(skb, IFLA_INFO_DATA);
499 if (data == NULL) 499 if (data == NULL) {
500 err = -EMSGSIZE;
500 goto err_cancel_link; 501 goto err_cancel_link;
502 }
501 err = ops->fill_info(skb, dev); 503 err = ops->fill_info(skb, dev);
502 if (err < 0) 504 if (err < 0)
503 goto err_cancel_data; 505 goto err_cancel_data;
@@ -979,6 +981,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
979 * report anything. 981 * report anything.
980 */ 982 */
981 ivi.spoofchk = -1; 983 ivi.spoofchk = -1;
984 memset(ivi.mac, 0, sizeof(ivi.mac));
982 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) 985 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
983 break; 986 break;
984 vf_mac.vf = 987 vf_mac.vf =
@@ -1069,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1069 rcu_read_lock(); 1072 rcu_read_lock();
1070 cb->seq = net->dev_base_seq; 1073 cb->seq = net->dev_base_seq;
1071 1074
1072 if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, 1075 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1073 ifla_policy) >= 0) { 1076 ifla_policy) >= 0) {
1074 1077
1075 if (tb[IFLA_EXT_MASK]) 1078 if (tb[IFLA_EXT_MASK])
@@ -1919,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1919 u32 ext_filter_mask = 0; 1922 u32 ext_filter_mask = 0;
1920 u16 min_ifinfo_dump_size = 0; 1923 u16 min_ifinfo_dump_size = 0;
1921 1924
1922 if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, 1925 if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1923 ifla_policy) >= 0) { 1926 ifla_policy) >= 0) {
1924 if (tb[IFLA_EXT_MASK]) 1927 if (tb[IFLA_EXT_MASK])
1925 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); 1928 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
@@ -2620,7 +2623,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2620 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); 2623 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2621 2624
2622 while (RTA_OK(attr, attrlen)) { 2625 while (RTA_OK(attr, attrlen)) {
2623 unsigned int flavor = attr->rta_type; 2626 unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
2624 if (flavor) { 2627 if (flavor) {
2625 if (flavor > rta_max[sz_idx]) 2628 if (flavor > rta_max[sz_idx])
2626 return -EINVAL; 2629 return -EINVAL;
diff --git a/net/core/scm.c b/net/core/scm.c
index 905dcc6ad1e3..2dc6cdaaae8a 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/pid_namespace.h>
27#include <linux/pid.h> 28#include <linux/pid.h>
28#include <linux/nsproxy.h> 29#include <linux/nsproxy.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
@@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds)
52 if (!uid_valid(uid) || !gid_valid(gid)) 53 if (!uid_valid(uid) || !gid_valid(gid))
53 return -EINVAL; 54 return -EINVAL;
54 55
55 if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && 56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
56 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
57 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
58 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 1b588e23cf80..21291f1abcd6 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
284 if (!netdev->dcbnl_ops->getpermhwaddr) 284 if (!netdev->dcbnl_ops->getpermhwaddr)
285 return -EOPNOTSUPP; 285 return -EOPNOTSUPP;
286 286
287 memset(perm_addr, 0, sizeof(perm_addr));
287 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 288 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
288 289
289 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); 290 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
@@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1042 1043
1043 if (ops->ieee_getets) { 1044 if (ops->ieee_getets) {
1044 struct ieee_ets ets; 1045 struct ieee_ets ets;
1046 memset(&ets, 0, sizeof(ets));
1045 err = ops->ieee_getets(netdev, &ets); 1047 err = ops->ieee_getets(netdev, &ets);
1046 if (!err && 1048 if (!err &&
1047 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) 1049 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
@@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1050 1052
1051 if (ops->ieee_getmaxrate) { 1053 if (ops->ieee_getmaxrate) {
1052 struct ieee_maxrate maxrate; 1054 struct ieee_maxrate maxrate;
1055 memset(&maxrate, 0, sizeof(maxrate));
1053 err = ops->ieee_getmaxrate(netdev, &maxrate); 1056 err = ops->ieee_getmaxrate(netdev, &maxrate);
1054 if (!err) { 1057 if (!err) {
1055 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, 1058 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
@@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1061 1064
1062 if (ops->ieee_getpfc) { 1065 if (ops->ieee_getpfc) {
1063 struct ieee_pfc pfc; 1066 struct ieee_pfc pfc;
1067 memset(&pfc, 0, sizeof(pfc));
1064 err = ops->ieee_getpfc(netdev, &pfc); 1068 err = ops->ieee_getpfc(netdev, &pfc);
1065 if (!err && 1069 if (!err &&
1066 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) 1070 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
@@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1094 /* get peer info if available */ 1098 /* get peer info if available */
1095 if (ops->ieee_peer_getets) { 1099 if (ops->ieee_peer_getets) {
1096 struct ieee_ets ets; 1100 struct ieee_ets ets;
1101 memset(&ets, 0, sizeof(ets));
1097 err = ops->ieee_peer_getets(netdev, &ets); 1102 err = ops->ieee_peer_getets(netdev, &ets);
1098 if (!err && 1103 if (!err &&
1099 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) 1104 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
@@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1102 1107
1103 if (ops->ieee_peer_getpfc) { 1108 if (ops->ieee_peer_getpfc) {
1104 struct ieee_pfc pfc; 1109 struct ieee_pfc pfc;
1110 memset(&pfc, 0, sizeof(pfc));
1105 err = ops->ieee_peer_getpfc(netdev, &pfc); 1111 err = ops->ieee_peer_getpfc(netdev, &pfc);
1106 if (!err && 1112 if (!err &&
1107 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) 1113 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
@@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1280 /* peer info if available */ 1286 /* peer info if available */
1281 if (ops->cee_peer_getpg) { 1287 if (ops->cee_peer_getpg) {
1282 struct cee_pg pg; 1288 struct cee_pg pg;
1289 memset(&pg, 0, sizeof(pg));
1283 err = ops->cee_peer_getpg(netdev, &pg); 1290 err = ops->cee_peer_getpg(netdev, &pg);
1284 if (!err && 1291 if (!err &&
1285 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) 1292 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
@@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1288 1295
1289 if (ops->cee_peer_getpfc) { 1296 if (ops->cee_peer_getpfc) {
1290 struct cee_pfc pfc; 1297 struct cee_pfc pfc;
1298 memset(&pfc, 0, sizeof(pfc));
1291 err = ops->cee_peer_getpfc(netdev, &pfc); 1299 err = ops->cee_peer_getpfc(netdev, &pfc);
1292 if (!err && 1300 if (!err &&
1293 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) 1301 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 8c2251fb0a3f..bba5f8336317 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -84,7 +84,7 @@
84 (memcmp(addr1, addr2, length >> 3) == 0) 84 (memcmp(addr1, addr2, length >> 3) == 0)
85 85
86/* local link, i.e. FE80::/10 */ 86/* local link, i.e. FE80::/10 */
87#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) 87#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
88 88
89/* 89/*
90 * check whether we can compress the IID to 16 bits, 90 * check whether we can compress the IID to 16 bits,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 68f6a94f7661..c929d9c1c4b6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1333 iph->frag_off |= htons(IP_MF); 1333 iph->frag_off |= htons(IP_MF);
1334 offset += (skb->len - skb->mac_len - iph->ihl * 4); 1334 offset += (skb->len - skb->mac_len - iph->ihl * 4);
1335 } else { 1335 } else {
1336 if (!(iph->frag_off & htons(IP_DF))) 1336 iph->id = htons(id++);
1337 iph->id = htons(id++);
1338 } 1337 }
1339 iph->tot_len = htons(skb->len - skb->mac_len); 1338 iph->tot_len = htons(skb->len - skb->mac_len);
1340 iph->check = 0; 1339 iph->check = 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index f678507bc829..c6287cd978c2 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work)
587{ 587{
588 unsigned long now, next, next_sec, next_sched; 588 unsigned long now, next, next_sec, next_sched;
589 struct in_ifaddr *ifa; 589 struct in_ifaddr *ifa;
590 struct hlist_node *n;
590 int i; 591 int i;
591 592
592 now = jiffies; 593 now = jiffies;
593 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); 594 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
594 595
595 rcu_read_lock();
596 for (i = 0; i < IN4_ADDR_HSIZE; i++) { 596 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
597 bool change_needed = false;
598
599 rcu_read_lock();
597 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { 600 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
598 unsigned long age; 601 unsigned long age;
599 602
@@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work)
606 609
607 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && 610 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
608 age >= ifa->ifa_valid_lft) { 611 age >= ifa->ifa_valid_lft) {
609 struct in_ifaddr **ifap ; 612 change_needed = true;
610
611 rtnl_lock();
612 for (ifap = &ifa->ifa_dev->ifa_list;
613 *ifap != NULL; ifap = &ifa->ifa_next) {
614 if (*ifap == ifa)
615 inet_del_ifa(ifa->ifa_dev,
616 ifap, 1);
617 }
618 rtnl_unlock();
619 } else if (ifa->ifa_preferred_lft == 613 } else if (ifa->ifa_preferred_lft ==
620 INFINITY_LIFE_TIME) { 614 INFINITY_LIFE_TIME) {
621 continue; 615 continue;
@@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work)
625 next = ifa->ifa_tstamp + 619 next = ifa->ifa_tstamp +
626 ifa->ifa_valid_lft * HZ; 620 ifa->ifa_valid_lft * HZ;
627 621
628 if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { 622 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
629 ifa->ifa_flags |= IFA_F_DEPRECATED; 623 change_needed = true;
630 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
631 }
632 } else if (time_before(ifa->ifa_tstamp + 624 } else if (time_before(ifa->ifa_tstamp +
633 ifa->ifa_preferred_lft * HZ, 625 ifa->ifa_preferred_lft * HZ,
634 next)) { 626 next)) {
@@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work)
636 ifa->ifa_preferred_lft * HZ; 628 ifa->ifa_preferred_lft * HZ;
637 } 629 }
638 } 630 }
631 rcu_read_unlock();
632 if (!change_needed)
633 continue;
634 rtnl_lock();
635 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
636 unsigned long age;
637
638 if (ifa->ifa_flags & IFA_F_PERMANENT)
639 continue;
640
641 /* We try to batch several events at once. */
642 age = (now - ifa->ifa_tstamp +
643 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
644
645 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
646 age >= ifa->ifa_valid_lft) {
647 struct in_ifaddr **ifap;
648
649 for (ifap = &ifa->ifa_dev->ifa_list;
650 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
651 if (*ifap == ifa) {
652 inet_del_ifa(ifa->ifa_dev,
653 ifap, 1);
654 break;
655 }
656 }
657 } else if (ifa->ifa_preferred_lft !=
658 INFINITY_LIFE_TIME &&
659 age >= ifa->ifa_preferred_lft &&
660 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
661 ifa->ifa_flags |= IFA_F_DEPRECATED;
662 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
663 }
664 }
665 rtnl_unlock();
639 } 666 }
640 rcu_read_unlock();
641 667
642 next_sec = round_jiffies_up(next); 668 next_sec = round_jiffies_up(next);
643 next_sched = next; 669 next_sched = next;
@@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
802 if (nlh->nlmsg_flags & NLM_F_EXCL || 828 if (nlh->nlmsg_flags & NLM_F_EXCL ||
803 !(nlh->nlmsg_flags & NLM_F_REPLACE)) 829 !(nlh->nlmsg_flags & NLM_F_REPLACE))
804 return -EEXIST; 830 return -EEXIST;
805 831 ifa = ifa_existing;
806 set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); 832 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
833 cancel_delayed_work(&check_lifetime_work);
834 schedule_delayed_work(&check_lifetime_work, 0);
835 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
836 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
807 } 837 }
808 return 0; 838 return 0;
809} 839}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 3b4f0cd2e63e..4cfe34d4cc96 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
139 139
140 /* skb is pure payload to encrypt */ 140 /* skb is pure payload to encrypt */
141 141
142 err = -ENOMEM;
143
144 esp = x->data; 142 esp = x->data;
145 aead = esp->aead; 143 aead = esp->aead;
146 alen = crypto_aead_authsize(aead); 144 alen = crypto_aead_authsize(aead);
@@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
176 } 174 }
177 175
178 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); 176 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
179 if (!tmp) 177 if (!tmp) {
178 err = -ENOMEM;
180 goto error; 179 goto error;
180 }
181 181
182 seqhi = esp_tmp_seqhi(tmp); 182 seqhi = esp_tmp_seqhi(tmp);
183 iv = esp_tmp_iv(aead, tmp, seqhilen); 183 iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7d1874be1df3..786d97aee751 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock);
735 * tcp/dccp_create_openreq_child(). 735 * tcp/dccp_create_openreq_child().
736 */ 736 */
737void inet_csk_prepare_forced_close(struct sock *sk) 737void inet_csk_prepare_forced_close(struct sock *sk)
738 __releases(&sk->sk_lock.slock)
738{ 739{
739 /* sk_clone_lock locked the socket and set refcnt to 2 */ 740 /* sk_clone_lock locked the socket and set refcnt to 2 */
740 bh_unlock_sock(sk); 741 bh_unlock_sock(sk);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 245ae078a07f..f4fd23de9b13 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -21,6 +21,7 @@
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include <net/sock.h>
24#include <net/inet_frag.h> 25#include <net/inet_frag.h>
25 26
26static void inet_frag_secret_rebuild(unsigned long dummy) 27static void inet_frag_secret_rebuild(unsigned long dummy)
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
277 __releases(&f->lock) 278 __releases(&f->lock)
278{ 279{
279 struct inet_frag_queue *q; 280 struct inet_frag_queue *q;
281 int depth = 0;
280 282
281 hlist_for_each_entry(q, &f->hash[hash], list) { 283 hlist_for_each_entry(q, &f->hash[hash], list) {
282 if (q->net == nf && f->match(q, key)) { 284 if (q->net == nf && f->match(q, key)) {
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
284 read_unlock(&f->lock); 286 read_unlock(&f->lock);
285 return q; 287 return q;
286 } 288 }
289 depth++;
287 } 290 }
288 read_unlock(&f->lock); 291 read_unlock(&f->lock);
289 292
290 return inet_frag_create(nf, f, key); 293 if (depth <= INETFRAGS_MAXDEPTH)
294 return inet_frag_create(nf, f, key);
295 else
296 return ERR_PTR(-ENOBUFS);
291} 297}
292EXPORT_SYMBOL(inet_frag_find); 298EXPORT_SYMBOL(inet_frag_find);
299
300void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
301 const char *prefix)
302{
303 static const char msg[] = "inet_frag_find: Fragment hash bucket"
304 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
305 ". Dropping fragment.\n";
306
307 if (PTR_ERR(q) == -ENOBUFS)
308 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
309}
310EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b6d30acb600c..52c273ea05c3 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg)
248 if (!head->dev) 248 if (!head->dev)
249 goto out_rcu_unlock; 249 goto out_rcu_unlock;
250 250
251 /* skb dst is stale, drop it, and perform route lookup again */ 251 /* skb has no dst, perform route lookup again */
252 skb_dst_drop(head);
253 iph = ip_hdr(head); 252 iph = ip_hdr(head);
254 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 253 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
255 iph->tos, head->dev); 254 iph->tos, head->dev);
@@ -292,14 +291,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 291 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
293 292
294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 293 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
295 if (q == NULL) 294 if (IS_ERR_OR_NULL(q)) {
296 goto out_nomem; 295 inet_frag_maybe_warn_overflow(q, pr_fmt());
297 296 return NULL;
297 }
298 return container_of(q, struct ipq, q); 298 return container_of(q, struct ipq, q);
299
300out_nomem:
301 LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
302 return NULL;
303} 299}
304 300
305/* Is the fragment too far ahead to be part of ipq? */ 301/* Is the fragment too far ahead to be part of ipq? */
@@ -526,9 +522,16 @@ found:
526 qp->q.max_size = skb->len + ihl; 522 qp->q.max_size = skb->len + ihl;
527 523
528 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 524 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
529 qp->q.meat == qp->q.len) 525 qp->q.meat == qp->q.len) {
530 return ip_frag_reasm(qp, prev, dev); 526 unsigned long orefdst = skb->_skb_refdst;
527
528 skb->_skb_refdst = 0UL;
529 err = ip_frag_reasm(qp, prev, dev);
530 skb->_skb_refdst = orefdst;
531 return err;
532 }
531 533
534 skb_dst_drop(skb);
532 inet_frag_lru_move(&qp->q); 535 inet_frag_lru_move(&qp->q);
533 return -EINPROGRESS; 536 return -EINPROGRESS;
534 537
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ef0e674ec5..91d66dbde9c0 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
798 798
799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
800 gre_hlen = 0; 800 gre_hlen = 0;
801 if (skb->protocol == htons(ETH_P_IP)) 801 tiph = (const struct iphdr *)skb->data;
802 tiph = (const struct iphdr *)skb->data;
803 else
804 tiph = &tunnel->parms.iph;
805 } else { 802 } else {
806 gre_hlen = tunnel->hlen; 803 gre_hlen = tunnel->hlen;
807 tiph = &tunnel->parms.iph; 804 tiph = &tunnel->parms.iph;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 310a3647c83d..ec7264514a82 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -370,7 +370,6 @@ int ip_options_compile(struct net *net,
370 } 370 }
371 switch (optptr[3]&0xF) { 371 switch (optptr[3]&0xF) {
372 case IPOPT_TS_TSONLY: 372 case IPOPT_TS_TSONLY:
373 opt->ts = optptr - iph;
374 if (skb) 373 if (skb)
375 timeptr = &optptr[optptr[2]-1]; 374 timeptr = &optptr[optptr[2]-1];
376 opt->ts_needtime = 1; 375 opt->ts_needtime = 1;
@@ -381,7 +380,6 @@ int ip_options_compile(struct net *net,
381 pp_ptr = optptr + 2; 380 pp_ptr = optptr + 2;
382 goto error; 381 goto error;
383 } 382 }
384 opt->ts = optptr - iph;
385 if (rt) { 383 if (rt) {
386 spec_dst_fill(&spec_dst, skb); 384 spec_dst_fill(&spec_dst, skb);
387 memcpy(&optptr[optptr[2]-1], &spec_dst, 4); 385 memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
@@ -396,7 +394,6 @@ int ip_options_compile(struct net *net,
396 pp_ptr = optptr + 2; 394 pp_ptr = optptr + 2;
397 goto error; 395 goto error;
398 } 396 }
399 opt->ts = optptr - iph;
400 { 397 {
401 __be32 addr; 398 __be32 addr;
402 memcpy(&addr, &optptr[optptr[2]-1], 4); 399 memcpy(&addr, &optptr[optptr[2]-1], 4);
@@ -429,12 +426,12 @@ int ip_options_compile(struct net *net,
429 pp_ptr = optptr + 3; 426 pp_ptr = optptr + 3;
430 goto error; 427 goto error;
431 } 428 }
432 opt->ts = optptr - iph;
433 if (skb) { 429 if (skb) {
434 optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); 430 optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
435 opt->is_changed = 1; 431 opt->is_changed = 1;
436 } 432 }
437 } 433 }
434 opt->ts = optptr - iph;
438 break; 435 break;
439 case IPOPT_RA: 436 case IPOPT_RA:
440 if (optlen < 4) { 437 if (optlen < 4) {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 98cbc6877019..bf6c5cf31aed 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void)
1522 } 1522 }
1523 for (i++; i < CONF_NAMESERVERS_MAX; i++) 1523 for (i++; i < CONF_NAMESERVERS_MAX; i++)
1524 if (ic_nameservers[i] != NONE) 1524 if (ic_nameservers[i] != NONE)
1525 pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); 1525 pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]);
1526 pr_cont("\n");
1526#endif /* !SILENT */ 1527#endif /* !SILENT */
1527 1528
1528 return 0; 1529 return 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index ce2d43e1f09f..0d755c50994b 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT
36 36
37 If unsure, say Y. 37 If unsure, say Y.
38 38
39config IP_NF_QUEUE
40 tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
41 depends on NETFILTER_ADVANCED
42 help
43 Netfilter has the ability to queue packets to user space: the
44 netlink device can be used to access them using this driver.
45
46 This option enables the old IPv4-only "ip_queue" implementation
47 which has been obsoleted by the new "nfnetlink_queue" code (see
48 CONFIG_NETFILTER_NETLINK_QUEUE).
49
50 To compile it as a module, choose M here. If unsure, say N.
51
52config IP_NF_IPTABLES 39config IP_NF_IPTABLES
53 tristate "IP tables support (required for filtering/masq/NAT)" 40 tristate "IP tables support (required for filtering/masq/NAT)"
54 default m if NETFILTER_ADVANCED=n 41 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index ef54377fb11c..397e0f69435f 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
349 * hasn't changed since we received the original syn, but I see 349 * hasn't changed since we received the original syn, but I see
350 * no easy way to do this. 350 * no easy way to do this.
351 */ 351 */
352 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), 352 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
353 RT_SCOPE_UNIVERSE, IPPROTO_TCP, 353 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
354 inet_sk_flowi_flags(sk), 354 inet_sk_flowi_flags(sk),
355 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, 355 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
356 ireq->loc_addr, th->source, th->dest); 356 ireq->loc_addr, th->source, th->dest);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 47e854fcae24..e22020790709 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
775 * Make sure that we have exactly size bytes 775 * Make sure that we have exactly size bytes
776 * available to the caller, no more, no less. 776 * available to the caller, no more, no less.
777 */ 777 */
778 skb->avail_size = size; 778 skb->reserved_tailroom = skb->end - skb->tail - size;
779 return skb; 779 return skb;
780 } 780 }
781 __kfree_skb(skb); 781 __kfree_skb(skb);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0d9bdacce99f..3bd55bad230a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how)
2059 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
2060 tcp_reset_reno_sack(tp); 2060 tcp_reset_reno_sack(tp);
2061 2061
2062 if (!how) { 2062 tp->undo_marker = tp->snd_una;
2063 /* Push undo marker, if it was plain RTO and nothing 2063 if (how) {
2064 * was retransmitted. */
2065 tp->undo_marker = tp->snd_una;
2066 } else {
2067 tp->sacked_out = 0; 2064 tp->sacked_out = 0;
2068 tp->fackets_out = 0; 2065 tp->fackets_out = 0;
2069 } 2066 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4a8ec457310f..d09203c63264 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct inet_sock *inet = inet_sk(sk); 274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info; 275 u32 mtu = tcp_sk(sk)->mtu_info;
276 276
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
279 * unfragmented).
280 */
281 if (sk->sk_state == TCP_LISTEN)
282 return;
283
284 dst = inet_csk_update_pmtu(sk, mtu); 277 dst = inet_csk_update_pmtu(sk, mtu);
285 if (!dst) 278 if (!dst)
286 return; 279 return;
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
408 goto out; 401 goto out;
409 402
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 403 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
407 */
408 if (sk->sk_state == TCP_LISTEN)
409 goto out;
410
411 tp->mtu_info = info; 411 tp->mtu_info = info;
412 if (!sock_owned_by_user(sk)) { 412 if (!sock_owned_by_user(sk)) {
413 tcp_v4_mtu_reduced(sk); 413 tcp_v4_mtu_reduced(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e2b4461074da..509912a5ff98 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1298 eat = min_t(int, len, skb_headlen(skb)); 1298 eat = min_t(int, len, skb_headlen(skb));
1299 if (eat) { 1299 if (eat) {
1300 __skb_pull(skb, eat); 1300 __skb_pull(skb, eat);
1301 skb->avail_size -= eat;
1302 len -= eat; 1301 len -= eat;
1303 if (!len) 1302 if (!len)
1304 return; 1303 return;
@@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1810 goto send_now; 1809 goto send_now;
1811 } 1810 }
1812 1811
1813 /* Ok, it looks like it is advisable to defer. */ 1812 /* Ok, it looks like it is advisable to defer.
1814 tp->tso_deferred = 1 | (jiffies << 1); 1813 * Do not rearm the timer if already set to not break TCP ACK clocking.
1814 */
1815 if (!tp->tso_deferred)
1816 tp->tso_deferred = 1 | (jiffies << 1);
1815 1817
1816 return true; 1818 return true;
1817 1819
@@ -2386,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2386 */ 2388 */
2387 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2389 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2388 2390
2389 /* make sure skb->data is aligned on arches that require it */ 2391 /* make sure skb->data is aligned on arches that require it
2390 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { 2392 * and check if ack-trimming & collapsing extended the headroom
2393 * beyond what csum_start can cover.
2394 */
2395 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2396 skb_headroom(skb) >= 0xFFFF)) {
2391 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2397 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2392 GFP_ATOMIC); 2398 GFP_ATOMIC);
2393 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2399 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
@@ -2707,6 +2713,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2707 skb_reserve(skb, MAX_TCP_HEADER); 2713 skb_reserve(skb, MAX_TCP_HEADER);
2708 2714
2709 skb_dst_set(skb, dst); 2715 skb_dst_set(skb, dst);
2716 security_skb_owned_by(skb, sk);
2710 2717
2711 mss = dst_metric_advmss(dst); 2718 mss = dst_metric_advmss(dst);
2712 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2719 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 265c42cf963c..0a073a263720 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb)
1762 1762
1763void udp_destroy_sock(struct sock *sk) 1763void udp_destroy_sock(struct sock *sk)
1764{ 1764{
1765 struct udp_sock *up = udp_sk(sk);
1765 bool slow = lock_sock_fast(sk); 1766 bool slow = lock_sock_fast(sk);
1766 udp_flush_pending_frames(sk); 1767 udp_flush_pending_frames(sk);
1767 unlock_sock_fast(sk, slow); 1768 unlock_sock_fast(sk, slow);
1769 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1770 void (*encap_destroy)(struct sock *sk);
1771 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1772 if (encap_destroy)
1773 encap_destroy(sk);
1774 }
1768} 1775}
1769 1776
1770/* 1777/*
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f2c7e615f902..dae802c0af7c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
168static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 168static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
169 struct net_device *dev); 169 struct net_device *dev);
170 170
171static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
172
173static struct ipv6_devconf ipv6_devconf __read_mostly = { 171static struct ipv6_devconf ipv6_devconf __read_mostly = {
174 .forwarding = 0, 172 .forwarding = 0,
175 .hop_limit = IPV6_DEFAULT_HOPLIMIT, 173 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
@@ -837,7 +835,7 @@ out2:
837 rcu_read_unlock_bh(); 835 rcu_read_unlock_bh();
838 836
839 if (likely(err == 0)) 837 if (likely(err == 0))
840 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); 838 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
841 else { 839 else {
842 kfree(ifa); 840 kfree(ifa);
843 ifa = ERR_PTR(err); 841 ifa = ERR_PTR(err);
@@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
927 925
928 ipv6_ifa_notify(RTM_DELADDR, ifp); 926 ipv6_ifa_notify(RTM_DELADDR, ifp);
929 927
930 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); 928 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
931 929
932 /* 930 /*
933 * Purge or update corresponding prefix 931 * Purge or update corresponding prefix
@@ -2529,6 +2527,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2529static void init_loopback(struct net_device *dev) 2527static void init_loopback(struct net_device *dev)
2530{ 2528{
2531 struct inet6_dev *idev; 2529 struct inet6_dev *idev;
2530 struct net_device *sp_dev;
2531 struct inet6_ifaddr *sp_ifa;
2532 struct rt6_info *sp_rt;
2532 2533
2533 /* ::1 */ 2534 /* ::1 */
2534 2535
@@ -2540,6 +2541,30 @@ static void init_loopback(struct net_device *dev)
2540 } 2541 }
2541 2542
2542 add_addr(idev, &in6addr_loopback, 128, IFA_HOST); 2543 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2544
2545 /* Add routes to other interface's IPv6 addresses */
2546 for_each_netdev(dev_net(dev), sp_dev) {
2547 if (!strcmp(sp_dev->name, dev->name))
2548 continue;
2549
2550 idev = __in6_dev_get(sp_dev);
2551 if (!idev)
2552 continue;
2553
2554 read_lock_bh(&idev->lock);
2555 list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
2556
2557 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
2558 continue;
2559
2560 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
2561
2562 /* Failure cases are ignored */
2563 if (!IS_ERR(sp_rt))
2564 ip6_ins_rt(sp_rt);
2565 }
2566 read_unlock_bh(&idev->lock);
2567 }
2543} 2568}
2544 2569
2545static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) 2570static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
@@ -2961,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2961 2986
2962 if (state != INET6_IFADDR_STATE_DEAD) { 2987 if (state != INET6_IFADDR_STATE_DEAD) {
2963 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2988 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2964 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); 2989 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
2965 } 2990 }
2966 in6_ifa_put(ifa); 2991 in6_ifa_put(ifa);
2967 2992
@@ -4784,26 +4809,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
4784 4809
4785static int __net_init addrconf_init_net(struct net *net) 4810static int __net_init addrconf_init_net(struct net *net)
4786{ 4811{
4787 int err; 4812 int err = -ENOMEM;
4788 struct ipv6_devconf *all, *dflt; 4813 struct ipv6_devconf *all, *dflt;
4789 4814
4790 err = -ENOMEM; 4815 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
4791 all = &ipv6_devconf; 4816 if (all == NULL)
4792 dflt = &ipv6_devconf_dflt; 4817 goto err_alloc_all;
4793 4818
4794 if (!net_eq(net, &init_net)) { 4819 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
4795 all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); 4820 if (dflt == NULL)
4796 if (all == NULL) 4821 goto err_alloc_dflt;
4797 goto err_alloc_all;
4798 4822
4799 dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); 4823 /* these will be inherited by all namespaces */
4800 if (dflt == NULL) 4824 dflt->autoconf = ipv6_defaults.autoconf;
4801 goto err_alloc_dflt; 4825 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4802 } else {
4803 /* these will be inherited by all namespaces */
4804 dflt->autoconf = ipv6_defaults.autoconf;
4805 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
4806 }
4807 4826
4808 net->ipv6.devconf_all = all; 4827 net->ipv6.devconf_all = all;
4809 net->ipv6.devconf_dflt = dflt; 4828 net->ipv6.devconf_dflt = dflt;
@@ -4848,22 +4867,6 @@ static struct pernet_operations addrconf_ops = {
4848 .exit = addrconf_exit_net, 4867 .exit = addrconf_exit_net,
4849}; 4868};
4850 4869
4851/*
4852 * Device notifier
4853 */
4854
4855int register_inet6addr_notifier(struct notifier_block *nb)
4856{
4857 return atomic_notifier_chain_register(&inet6addr_chain, nb);
4858}
4859EXPORT_SYMBOL(register_inet6addr_notifier);
4860
4861int unregister_inet6addr_notifier(struct notifier_block *nb)
4862{
4863 return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
4864}
4865EXPORT_SYMBOL(unregister_inet6addr_notifier);
4866
4867static struct rtnl_af_ops inet6_ops = { 4870static struct rtnl_af_ops inet6_ops = {
4868 .family = AF_INET6, 4871 .family = AF_INET6,
4869 .fill_link_af = inet6_fill_link_af, 4872 .fill_link_af = inet6_fill_link_af,
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index d051e5f4bf34..72104562c864 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr)
78} 78}
79EXPORT_SYMBOL(__ipv6_addr_type); 79EXPORT_SYMBOL(__ipv6_addr_type);
80 80
81static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
82
83int register_inet6addr_notifier(struct notifier_block *nb)
84{
85 return atomic_notifier_chain_register(&inet6addr_chain, nb);
86}
87EXPORT_SYMBOL(register_inet6addr_notifier);
88
89int unregister_inet6addr_notifier(struct notifier_block *nb)
90{
91 return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
92}
93EXPORT_SYMBOL(unregister_inet6addr_notifier);
94
95int inet6addr_notifier_call_chain(unsigned long val, void *v)
96{
97 return atomic_notifier_call_chain(&inet6addr_chain, val, v);
98}
99EXPORT_SYMBOL(inet6addr_notifier_call_chain);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index b1876e52091e..2bab2aa59745 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
118 ipv6_addr_loopback(&hdr->daddr)) 118 ipv6_addr_loopback(&hdr->daddr))
119 goto err; 119 goto err;
120 120
121 /* RFC4291 Errata ID: 3480
122 * Interface-Local scope spans only a single interface on a
123 * node and is useful only for loopback transmission of
124 * multicast. Packets with interface-local scope received
125 * from another node must be discarded.
126 */
127 if (!(skb->pkt_type == PACKET_LOOPBACK ||
128 dev->flags & IFF_LOOPBACK) &&
129 ipv6_addr_is_multicast(&hdr->daddr) &&
130 IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
131 goto err;
132
121 /* RFC4291 2.7 133 /* RFC4291 2.7
122 * Nodes must not originate a packet to a multicast address whose scope 134 * Nodes must not originate a packet to a multicast address whose scope
123 * field contains the reserved value 0; if such a packet is received, it 135 * field contains the reserved value 0; if such a packet is received, it
@@ -281,7 +293,8 @@ int ip6_mc_input(struct sk_buff *skb)
281 * IPv6 multicast router mode is now supported ;) 293 * IPv6 multicast router mode is now supported ;)
282 */ 294 */
283 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && 295 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
284 !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && 296 !(ipv6_addr_type(&hdr->daddr) &
297 (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
285 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { 298 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
286 /* 299 /*
287 * Okay, we try to forward - split and duplicate 300 * Okay, we try to forward - split and duplicate
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 83acc1405a18..cb631143721c 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
57 if (pfx_len - i >= 32) 57 if (pfx_len - i >= 32)
58 mask = 0; 58 mask = 0;
59 else 59 else
60 mask = htonl(~((1 << (pfx_len - i)) - 1)); 60 mask = htonl((1 << (i - pfx_len + 32)) - 1);
61 61
62 idx = i / 32; 62 idx = i / 32;
63 addr->s6_addr32[idx] &= mask; 63 addr->s6_addr32[idx] &= mask;
@@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
114static struct xt_target ip6t_npt_target_reg[] __read_mostly = { 114static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
115 { 115 {
116 .name = "SNPT", 116 .name = "SNPT",
117 .table = "mangle",
117 .target = ip6t_snpt_tg, 118 .target = ip6t_snpt_tg,
118 .targetsize = sizeof(struct ip6t_npt_tginfo), 119 .targetsize = sizeof(struct ip6t_npt_tginfo),
119 .checkentry = ip6t_npt_checkentry, 120 .checkentry = ip6t_npt_checkentry,
@@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
124 }, 125 },
125 { 126 {
126 .name = "DNPT", 127 .name = "DNPT",
128 .table = "mangle",
127 .target = ip6t_dnpt_tg, 129 .target = ip6t_dnpt_tg,
128 .targetsize = sizeof(struct ip6t_npt_tginfo), 130 .targetsize = sizeof(struct ip6t_npt_tginfo),
129 .checkentry = ip6t_npt_checkentry, 131 .checkentry = ip6t_npt_checkentry,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 54087e96d7b8..6700069949dd 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -14,6 +14,8 @@
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16 16
17#define pr_fmt(fmt) "IPv6-nf: " fmt
18
17#include <linux/errno.h> 19#include <linux/errno.h>
18#include <linux/types.h> 20#include <linux/types.h>
19#include <linux/string.h> 21#include <linux/string.h>
@@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
180 182
181 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); 183 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
182 local_bh_enable(); 184 local_bh_enable();
183 if (q == NULL) 185 if (IS_ERR_OR_NULL(q)) {
184 goto oom; 186 inet_frag_maybe_warn_overflow(q, pr_fmt());
185 187 return NULL;
188 }
186 return container_of(q, struct frag_queue, q); 189 return container_of(q, struct frag_queue, q);
187
188oom:
189 return NULL;
190} 190}
191 191
192 192
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 3c6a77290c6e..0ba10e53a629 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -26,6 +26,9 @@
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly. 27 * calculate ICV correctly.
28 */ 28 */
29
30#define pr_fmt(fmt) "IPv6: " fmt
31
29#include <linux/errno.h> 32#include <linux/errno.h>
30#include <linux/types.h> 33#include <linux/types.h>
31#include <linux/string.h> 34#include <linux/string.h>
@@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
185 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 188 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
186 189
187 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 190 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
188 if (q == NULL) 191 if (IS_ERR_OR_NULL(q)) {
192 inet_frag_maybe_warn_overflow(q, pr_fmt());
189 return NULL; 193 return NULL;
190 194 }
191 return container_of(q, struct frag_queue, q); 195 return container_of(q, struct frag_queue, q);
192} 196}
193 197
@@ -326,9 +330,17 @@ found:
326 } 330 }
327 331
328 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 332 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
329 fq->q.meat == fq->q.len) 333 fq->q.meat == fq->q.len) {
330 return ip6_frag_reasm(fq, prev, dev); 334 int res;
335 unsigned long orefdst = skb->_skb_refdst;
336
337 skb->_skb_refdst = 0UL;
338 res = ip6_frag_reasm(fq, prev, dev);
339 skb->_skb_refdst = orefdst;
340 return res;
341 }
331 342
343 skb_dst_drop(skb);
332 inet_frag_lru_move(&fq->q); 344 inet_frag_lru_move(&fq->q);
333 return -1; 345 return -1;
334 346
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9b6460055df5..46a5be85be87 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -386,9 +386,17 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
386 386
387 if (dst) 387 if (dst)
388 dst->ops->redirect(dst, sk, skb); 388 dst->ops->redirect(dst, sk, skb);
389 goto out;
389 } 390 }
390 391
391 if (type == ICMPV6_PKT_TOOBIG) { 392 if (type == ICMPV6_PKT_TOOBIG) {
393 /* We are not interested in TCP_LISTEN and open_requests
394 * (SYN-ACKs send out by Linux are always <576bytes so
395 * they should go through unfragmented).
396 */
397 if (sk->sk_state == TCP_LISTEN)
398 goto out;
399
392 tp->mtu_info = ntohl(info); 400 tp->mtu_info = ntohl(info);
393 if (!sock_owned_by_user(sk)) 401 if (!sock_owned_by_user(sk))
394 tcp_v6_mtu_reduced(sk); 402 tcp_v6_mtu_reduced(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 599e1ba6d1ce..d8e5e852fc7a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1285,10 +1285,18 @@ do_confirm:
1285 1285
1286void udpv6_destroy_sock(struct sock *sk) 1286void udpv6_destroy_sock(struct sock *sk)
1287{ 1287{
1288 struct udp_sock *up = udp_sk(sk);
1288 lock_sock(sk); 1289 lock_sock(sk);
1289 udp_v6_flush_pending_frames(sk); 1290 udp_v6_flush_pending_frames(sk);
1290 release_sock(sk); 1291 release_sock(sk);
1291 1292
1293 if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
1294 void (*encap_destroy)(struct sock *sk);
1295 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1296 if (encap_destroy)
1297 encap_destroy(sk);
1298 }
1299
1292 inet6_destroy_sock(sk); 1300 inet6_destroy_sock(sk);
1293} 1301}
1294 1302
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index d07e3a626446..e493b3397ae3 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1386 1386
1387 IRDA_DEBUG(4, "%s()\n", __func__); 1387 IRDA_DEBUG(4, "%s()\n", __func__);
1388 1388
1389 msg->msg_namelen = 0;
1390
1389 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1391 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1390 flags & MSG_DONTWAIT, &err); 1392 flags & MSG_DONTWAIT, &err);
1391 if (!skb) 1393 if (!skb)
@@ -2583,8 +2585,10 @@ bed:
2583 NULL, NULL, NULL); 2585 NULL, NULL, NULL);
2584 2586
2585 /* Check if the we got some results */ 2587 /* Check if the we got some results */
2586 if (!self->cachedaddr) 2588 if (!self->cachedaddr) {
2587 return -EAGAIN; /* Didn't find any devices */ 2589 err = -EAGAIN; /* Didn't find any devices */
2590 goto out;
2591 }
2588 daddr = self->cachedaddr; 2592 daddr = self->cachedaddr;
2589 /* Cleanup */ 2593 /* Cleanup */
2590 self->cachedaddr = 0; 2594 self->cachedaddr = 0;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a7d11ffe4284..206ce6db2c36 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
49 49
50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 51
52/* macros to set/get socket control buffer at correct offset */
53#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56#define CB_TRGCLS_LEN (TRGCLS_SIZE)
57
58#define __iucv_sock_wait(sk, condition, timeo, ret) \ 52#define __iucv_sock_wait(sk, condition, timeo, ret) \
59do { \ 53do { \
60 DEFINE_WAIT(__wait); \ 54 DEFINE_WAIT(__wait); \
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1141 1135
1142 /* increment and save iucv message tag for msg_completion cbk */ 1136 /* increment and save iucv message tag for msg_completion cbk */
1143 txmsg.tag = iucv->send_tag++; 1137 txmsg.tag = iucv->send_tag++;
1144 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1138 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1145 1139
1146 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1140 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1147 atomic_inc(&iucv->msg_sent); 1141 atomic_inc(&iucv->msg_sent);
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1224 return -ENOMEM; 1218 return -ENOMEM;
1225 1219
1226 /* copy target class to control buffer of new skb */ 1220 /* copy target class to control buffer of new skb */
1227 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); 1221 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1228 1222
1229 /* copy data fragment */ 1223 /* copy data fragment */
1230 memcpy(nskb->data, skb->data + copied, size); 1224 memcpy(nskb->data, skb->data + copied, size);
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256 1250
1257 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1251 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1258 /* Note: the first 4 bytes are reserved for msg tag */ 1252 /* Note: the first 4 bytes are reserved for msg tag */
1259 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); 1253 IUCV_SKB_CB(skb)->class = msg->class;
1260 1254
1261 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1255 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1262 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1256 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1292 } 1286 }
1293 } 1287 }
1294 1288
1289 IUCV_SKB_CB(skb)->offset = 0;
1295 if (sock_queue_rcv_skb(sk, skb)) 1290 if (sock_queue_rcv_skb(sk, skb))
1296 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 1291 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1297} 1292}
@@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1327 unsigned int copied, rlen; 1322 unsigned int copied, rlen;
1328 struct sk_buff *skb, *rskb, *cskb; 1323 struct sk_buff *skb, *rskb, *cskb;
1329 int err = 0; 1324 int err = 0;
1325 u32 offset;
1326
1327 msg->msg_namelen = 0;
1330 1328
1331 if ((sk->sk_state == IUCV_DISCONN) && 1329 if ((sk->sk_state == IUCV_DISCONN) &&
1332 skb_queue_empty(&iucv->backlog_skb_q) && 1330 skb_queue_empty(&iucv->backlog_skb_q) &&
@@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1346 return err; 1344 return err;
1347 } 1345 }
1348 1346
1349 rlen = skb->len; /* real length of skb */ 1347 offset = IUCV_SKB_CB(skb)->offset;
1348 rlen = skb->len - offset; /* real length of skb */
1350 copied = min_t(unsigned int, rlen, len); 1349 copied = min_t(unsigned int, rlen, len);
1351 if (!rlen) 1350 if (!rlen)
1352 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1351 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1353 1352
1354 cskb = skb; 1353 cskb = skb;
1355 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1354 if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
1356 if (!(flags & MSG_PEEK)) 1355 if (!(flags & MSG_PEEK))
1357 skb_queue_head(&sk->sk_receive_queue, skb); 1356 skb_queue_head(&sk->sk_receive_queue, skb);
1358 return -EFAULT; 1357 return -EFAULT;
@@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1370 * get the trgcls from the control buffer of the skb due to 1369 * get the trgcls from the control buffer of the skb due to
1371 * fragmentation of original iucv message. */ 1370 * fragmentation of original iucv message. */
1372 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1371 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1373 CB_TRGCLS_LEN, CB_TRGCLS(skb)); 1372 sizeof(IUCV_SKB_CB(skb)->class),
1373 (void *)&IUCV_SKB_CB(skb)->class);
1374 if (err) { 1374 if (err) {
1375 if (!(flags & MSG_PEEK)) 1375 if (!(flags & MSG_PEEK))
1376 skb_queue_head(&sk->sk_receive_queue, skb); 1376 skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1382 1382
1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1384 if (sk->sk_type == SOCK_STREAM) { 1384 if (sk->sk_type == SOCK_STREAM) {
1385 skb_pull(skb, copied); 1385 if (copied < rlen) {
1386 if (skb->len) { 1386 IUCV_SKB_CB(skb)->offset = offset + copied;
1387 skb_queue_head(&sk->sk_receive_queue, skb);
1388 goto done; 1387 goto done;
1389 } 1388 }
1390 } 1389 }
@@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1403 spin_lock_bh(&iucv->message_q.lock); 1402 spin_lock_bh(&iucv->message_q.lock);
1404 rskb = skb_dequeue(&iucv->backlog_skb_q); 1403 rskb = skb_dequeue(&iucv->backlog_skb_q);
1405 while (rskb) { 1404 while (rskb) {
1405 IUCV_SKB_CB(rskb)->offset = 0;
1406 if (sock_queue_rcv_skb(sk, rskb)) { 1406 if (sock_queue_rcv_skb(sk, rskb)) {
1407 skb_queue_head(&iucv->backlog_skb_q, 1407 skb_queue_head(&iucv->backlog_skb_q,
1408 rskb); 1408 rskb);
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1830 spin_lock_irqsave(&list->lock, flags); 1830 spin_lock_irqsave(&list->lock, flags);
1831 1831
1832 while (list_skb != (struct sk_buff *)list) { 1832 while (list_skb != (struct sk_buff *)list) {
1833 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { 1833 if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
1834 this = list_skb; 1834 this = list_skb;
1835 break; 1835 break;
1836 } 1836 }
@@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2091 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2091 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2092 skb_reset_transport_header(skb); 2092 skb_reset_transport_header(skb);
2093 skb_reset_network_header(skb); 2093 skb_reset_network_header(skb);
2094 IUCV_SKB_CB(skb)->offset = 0;
2094 spin_lock(&iucv->message_q.lock); 2095 spin_lock(&iucv->message_q.lock);
2095 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2096 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2096 if (sock_queue_rcv_skb(sk, skb)) { 2097 if (sock_queue_rcv_skb(sk, skb)) {
@@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2195 /* fall through and receive zero length data */ 2196 /* fall through and receive zero length data */
2196 case 0: 2197 case 0:
2197 /* plain data frame */ 2198 /* plain data frame */
2198 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2199 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2199 CB_TRGCLS_LEN);
2200 err = afiucv_hs_callback_rx(sk, skb); 2200 err = afiucv_hs_callback_rx(sk, skb);
2201 break; 2201 break;
2202 default: 2202 default:
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 556fdafdd1ea..5b1e5af25713 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2201 XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); 2201 XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW);
2202 xp->priority = pol->sadb_x_policy_priority; 2202 xp->priority = pol->sadb_x_policy_priority;
2203 2203
2204 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2204 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
2205 xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); 2205 xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
2206 if (!xp->family) { 2206 if (!xp->family) {
2207 err = -EINVAL; 2207 err = -EINVAL;
@@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2214 if (xp->selector.sport) 2214 if (xp->selector.sport)
2215 xp->selector.sport_mask = htons(0xffff); 2215 xp->selector.sport_mask = htons(0xffff);
2216 2216
2217 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2217 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
2218 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); 2218 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr);
2219 xp->selector.prefixlen_d = sa->sadb_address_prefixlen; 2219 xp->selector.prefixlen_d = sa->sadb_address_prefixlen;
2220 2220
@@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2315 2315
2316 memset(&sel, 0, sizeof(sel)); 2316 memset(&sel, 0, sizeof(sel));
2317 2317
2318 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2318 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
2319 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); 2319 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
2320 sel.prefixlen_s = sa->sadb_address_prefixlen; 2320 sel.prefixlen_s = sa->sadb_address_prefixlen;
2321 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2321 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2323 if (sel.sport) 2323 if (sel.sport)
2324 sel.sport_mask = htons(0xffff); 2324 sel.sport_mask = htons(0xffff);
2325 2325
2326 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2326 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
2327 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); 2327 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
2328 sel.prefixlen_d = sa->sadb_address_prefixlen; 2328 sel.prefixlen_d = sa->sadb_address_prefixlen;
2329 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2329 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2693 hdr->sadb_msg_pid = c->portid; 2693 hdr->sadb_msg_pid = c->portid;
2694 hdr->sadb_msg_version = PF_KEY_V2; 2694 hdr->sadb_msg_version = PF_KEY_V2;
2695 hdr->sadb_msg_errno = (uint8_t) 0; 2695 hdr->sadb_msg_errno = (uint8_t) 0;
2696 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2696 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 2697 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2697 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 2698 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2698 return 0; 2699 return 0;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index d36875f3427e..8aecf5df6656 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -114,7 +114,6 @@ struct l2tp_net {
114 114
115static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 115static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
118 117
119static inline struct l2tp_net *l2tp_pernet(struct net *net) 118static inline struct l2tp_net *l2tp_pernet(struct net *net)
120{ 119{
@@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
192 } else { 191 } else {
193 /* Socket is owned by kernelspace */ 192 /* Socket is owned by kernelspace */
194 sk = tunnel->sock; 193 sk = tunnel->sock;
194 sock_hold(sk);
195 } 195 }
196 196
197out: 197out:
@@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk)
210 } 210 }
211 sock_put(sk); 211 sock_put(sk);
212 } 212 }
213 sock_put(sk);
213} 214}
214EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215 216
@@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
373 struct sk_buff *skbp; 374 struct sk_buff *skbp;
374 struct sk_buff *tmp; 375 struct sk_buff *tmp;
375 u32 ns = L2TP_SKB_CB(skb)->ns; 376 u32 ns = L2TP_SKB_CB(skb)->ns;
376 struct l2tp_stats *sstats;
377 377
378 spin_lock_bh(&session->reorder_q.lock); 378 spin_lock_bh(&session->reorder_q.lock);
379 sstats = &session->stats;
380 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 379 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
381 if (L2TP_SKB_CB(skbp)->ns > ns) { 380 if (L2TP_SKB_CB(skbp)->ns > ns) {
382 __skb_queue_before(&session->reorder_q, skbp, skb); 381 __skb_queue_before(&session->reorder_q, skbp, skb);
@@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
384 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 383 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
385 session->name, ns, L2TP_SKB_CB(skbp)->ns, 384 session->name, ns, L2TP_SKB_CB(skbp)->ns,
386 skb_queue_len(&session->reorder_q)); 385 skb_queue_len(&session->reorder_q));
387 u64_stats_update_begin(&sstats->syncp); 386 atomic_long_inc(&session->stats.rx_oos_packets);
388 sstats->rx_oos_packets++;
389 u64_stats_update_end(&sstats->syncp);
390 goto out; 387 goto out;
391 } 388 }
392 } 389 }
@@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
403{ 400{
404 struct l2tp_tunnel *tunnel = session->tunnel; 401 struct l2tp_tunnel *tunnel = session->tunnel;
405 int length = L2TP_SKB_CB(skb)->length; 402 int length = L2TP_SKB_CB(skb)->length;
406 struct l2tp_stats *tstats, *sstats;
407 403
408 /* We're about to requeue the skb, so return resources 404 /* We're about to requeue the skb, so return resources
409 * to its current owner (a socket receive buffer). 405 * to its current owner (a socket receive buffer).
410 */ 406 */
411 skb_orphan(skb); 407 skb_orphan(skb);
412 408
413 tstats = &tunnel->stats; 409 atomic_long_inc(&tunnel->stats.rx_packets);
414 u64_stats_update_begin(&tstats->syncp); 410 atomic_long_add(length, &tunnel->stats.rx_bytes);
415 sstats = &session->stats; 411 atomic_long_inc(&session->stats.rx_packets);
416 u64_stats_update_begin(&sstats->syncp); 412 atomic_long_add(length, &session->stats.rx_bytes);
417 tstats->rx_packets++;
418 tstats->rx_bytes += length;
419 sstats->rx_packets++;
420 sstats->rx_bytes += length;
421 u64_stats_update_end(&tstats->syncp);
422 u64_stats_update_end(&sstats->syncp);
423 413
424 if (L2TP_SKB_CB(skb)->has_seq) { 414 if (L2TP_SKB_CB(skb)->has_seq) {
425 /* Bump our Nr */ 415 /* Bump our Nr */
@@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
450{ 440{
451 struct sk_buff *skb; 441 struct sk_buff *skb;
452 struct sk_buff *tmp; 442 struct sk_buff *tmp;
453 struct l2tp_stats *sstats;
454 443
455 /* If the pkt at the head of the queue has the nr that we 444 /* If the pkt at the head of the queue has the nr that we
456 * expect to send up next, dequeue it and any other 445 * expect to send up next, dequeue it and any other
@@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
458 */ 447 */
459start: 448start:
460 spin_lock_bh(&session->reorder_q.lock); 449 spin_lock_bh(&session->reorder_q.lock);
461 sstats = &session->stats;
462 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 450 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
463 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 451 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
464 u64_stats_update_begin(&sstats->syncp); 452 atomic_long_inc(&session->stats.rx_seq_discards);
465 sstats->rx_seq_discards++; 453 atomic_long_inc(&session->stats.rx_errors);
466 sstats->rx_errors++;
467 u64_stats_update_end(&sstats->syncp);
468 l2tp_dbg(session, L2TP_MSG_SEQ, 454 l2tp_dbg(session, L2TP_MSG_SEQ,
469 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", 455 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
470 session->name, L2TP_SKB_CB(skb)->ns, 456 session->name, L2TP_SKB_CB(skb)->ns,
@@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
623 struct l2tp_tunnel *tunnel = session->tunnel; 609 struct l2tp_tunnel *tunnel = session->tunnel;
624 int offset; 610 int offset;
625 u32 ns, nr; 611 u32 ns, nr;
626 struct l2tp_stats *sstats = &session->stats;
627 612
628 /* The ref count is increased since we now hold a pointer to 613 /* The ref count is increased since we now hold a pointer to
629 * the session. Take care to decrement the refcnt when exiting 614 * the session. Take care to decrement the refcnt when exiting
@@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
640 "%s: cookie mismatch (%u/%u). Discarding.\n", 625 "%s: cookie mismatch (%u/%u). Discarding.\n",
641 tunnel->name, tunnel->tunnel_id, 626 tunnel->name, tunnel->tunnel_id,
642 session->session_id); 627 session->session_id);
643 u64_stats_update_begin(&sstats->syncp); 628 atomic_long_inc(&session->stats.rx_cookie_discards);
644 sstats->rx_cookie_discards++;
645 u64_stats_update_end(&sstats->syncp);
646 goto discard; 629 goto discard;
647 } 630 }
648 ptr += session->peer_cookie_len; 631 ptr += session->peer_cookie_len;
@@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
711 l2tp_warn(session, L2TP_MSG_SEQ, 694 l2tp_warn(session, L2TP_MSG_SEQ,
712 "%s: recv data has no seq numbers when required. Discarding.\n", 695 "%s: recv data has no seq numbers when required. Discarding.\n",
713 session->name); 696 session->name);
714 u64_stats_update_begin(&sstats->syncp); 697 atomic_long_inc(&session->stats.rx_seq_discards);
715 sstats->rx_seq_discards++;
716 u64_stats_update_end(&sstats->syncp);
717 goto discard; 698 goto discard;
718 } 699 }
719 700
@@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
732 l2tp_warn(session, L2TP_MSG_SEQ, 713 l2tp_warn(session, L2TP_MSG_SEQ,
733 "%s: recv data has no seq numbers when required. Discarding.\n", 714 "%s: recv data has no seq numbers when required. Discarding.\n",
734 session->name); 715 session->name);
735 u64_stats_update_begin(&sstats->syncp); 716 atomic_long_inc(&session->stats.rx_seq_discards);
736 sstats->rx_seq_discards++;
737 u64_stats_update_end(&sstats->syncp);
738 goto discard; 717 goto discard;
739 } 718 }
740 } 719 }
@@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
788 * packets 767 * packets
789 */ 768 */
790 if (L2TP_SKB_CB(skb)->ns != session->nr) { 769 if (L2TP_SKB_CB(skb)->ns != session->nr) {
791 u64_stats_update_begin(&sstats->syncp); 770 atomic_long_inc(&session->stats.rx_seq_discards);
792 sstats->rx_seq_discards++;
793 u64_stats_update_end(&sstats->syncp);
794 l2tp_dbg(session, L2TP_MSG_SEQ, 771 l2tp_dbg(session, L2TP_MSG_SEQ,
795 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", 772 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
796 session->name, L2TP_SKB_CB(skb)->ns, 773 session->name, L2TP_SKB_CB(skb)->ns,
@@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
816 return; 793 return;
817 794
818discard: 795discard:
819 u64_stats_update_begin(&sstats->syncp); 796 atomic_long_inc(&session->stats.rx_errors);
820 sstats->rx_errors++;
821 u64_stats_update_end(&sstats->syncp);
822 kfree_skb(skb); 797 kfree_skb(skb);
823 798
824 if (session->deref) 799 if (session->deref)
@@ -828,6 +803,23 @@ discard:
828} 803}
829EXPORT_SYMBOL(l2tp_recv_common); 804EXPORT_SYMBOL(l2tp_recv_common);
830 805
806/* Drop skbs from the session's reorder_q
807 */
808int l2tp_session_queue_purge(struct l2tp_session *session)
809{
810 struct sk_buff *skb = NULL;
811 BUG_ON(!session);
812 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
813 while ((skb = skb_dequeue(&session->reorder_q))) {
814 atomic_long_inc(&session->stats.rx_errors);
815 kfree_skb(skb);
816 if (session->deref)
817 (*session->deref)(session);
818 }
819 return 0;
820}
821EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
822
831/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame 823/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
832 * here. The skb is not on a list when we get here. 824 * here. The skb is not on a list when we get here.
833 * Returns 0 if the packet was a data packet and was successfully passed on. 825 * Returns 0 if the packet was a data packet and was successfully passed on.
@@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
843 u32 tunnel_id, session_id; 835 u32 tunnel_id, session_id;
844 u16 version; 836 u16 version;
845 int length; 837 int length;
846 struct l2tp_stats *tstats;
847 838
848 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 839 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
849 goto discard_bad_csum; 840 goto discard_bad_csum;
@@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
932discard_bad_csum: 923discard_bad_csum:
933 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 924 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
934 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 925 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
935 tstats = &tunnel->stats; 926 atomic_long_inc(&tunnel->stats.rx_errors);
936 u64_stats_update_begin(&tstats->syncp);
937 tstats->rx_errors++;
938 u64_stats_update_end(&tstats->syncp);
939 kfree_skb(skb); 927 kfree_skb(skb);
940 928
941 return 0; 929 return 0;
@@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1062 struct l2tp_tunnel *tunnel = session->tunnel; 1050 struct l2tp_tunnel *tunnel = session->tunnel;
1063 unsigned int len = skb->len; 1051 unsigned int len = skb->len;
1064 int error; 1052 int error;
1065 struct l2tp_stats *tstats, *sstats;
1066 1053
1067 /* Debug */ 1054 /* Debug */
1068 if (session->send_seq) 1055 if (session->send_seq)
@@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1091 error = ip_queue_xmit(skb, fl); 1078 error = ip_queue_xmit(skb, fl);
1092 1079
1093 /* Update stats */ 1080 /* Update stats */
1094 tstats = &tunnel->stats;
1095 u64_stats_update_begin(&tstats->syncp);
1096 sstats = &session->stats;
1097 u64_stats_update_begin(&sstats->syncp);
1098 if (error >= 0) { 1081 if (error >= 0) {
1099 tstats->tx_packets++; 1082 atomic_long_inc(&tunnel->stats.tx_packets);
1100 tstats->tx_bytes += len; 1083 atomic_long_add(len, &tunnel->stats.tx_bytes);
1101 sstats->tx_packets++; 1084 atomic_long_inc(&session->stats.tx_packets);
1102 sstats->tx_bytes += len; 1085 atomic_long_add(len, &session->stats.tx_bytes);
1103 } else { 1086 } else {
1104 tstats->tx_errors++; 1087 atomic_long_inc(&tunnel->stats.tx_errors);
1105 sstats->tx_errors++; 1088 atomic_long_inc(&session->stats.tx_errors);
1106 } 1089 }
1107 u64_stats_update_end(&tstats->syncp);
1108 u64_stats_update_end(&sstats->syncp);
1109 1090
1110 return 0; 1091 return 0;
1111} 1092}
@@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1282 /* No longer an encapsulation socket. See net/ipv4/udp.c */ 1263 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1283 (udp_sk(sk))->encap_type = 0; 1264 (udp_sk(sk))->encap_type = 0;
1284 (udp_sk(sk))->encap_rcv = NULL; 1265 (udp_sk(sk))->encap_rcv = NULL;
1266 (udp_sk(sk))->encap_destroy = NULL;
1285 break; 1267 break;
1286 case L2TP_ENCAPTYPE_IP: 1268 case L2TP_ENCAPTYPE_IP:
1287 break; 1269 break;
@@ -1311,7 +1293,7 @@ end:
1311 1293
1312/* When the tunnel is closed, all the attached sessions need to go too. 1294/* When the tunnel is closed, all the attached sessions need to go too.
1313 */ 1295 */
1314static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) 1296void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1315{ 1297{
1316 int hash; 1298 int hash;
1317 struct hlist_node *walk; 1299 struct hlist_node *walk;
@@ -1334,25 +1316,13 @@ again:
1334 1316
1335 hlist_del_init(&session->hlist); 1317 hlist_del_init(&session->hlist);
1336 1318
1337 /* Since we should hold the sock lock while
1338 * doing any unbinding, we need to release the
1339 * lock we're holding before taking that lock.
1340 * Hold a reference to the sock so it doesn't
1341 * disappear as we're jumping between locks.
1342 */
1343 if (session->ref != NULL) 1319 if (session->ref != NULL)
1344 (*session->ref)(session); 1320 (*session->ref)(session);
1345 1321
1346 write_unlock_bh(&tunnel->hlist_lock); 1322 write_unlock_bh(&tunnel->hlist_lock);
1347 1323
1348 if (tunnel->version != L2TP_HDR_VER_2) { 1324 __l2tp_session_unhash(session);
1349 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1325 l2tp_session_queue_purge(session);
1350
1351 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1352 hlist_del_init_rcu(&session->global_hlist);
1353 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1354 synchronize_rcu();
1355 }
1356 1326
1357 if (session->session_close != NULL) 1327 if (session->session_close != NULL)
1358 (*session->session_close)(session); 1328 (*session->session_close)(session);
@@ -1360,6 +1330,8 @@ again:
1360 if (session->deref != NULL) 1330 if (session->deref != NULL)
1361 (*session->deref)(session); 1331 (*session->deref)(session);
1362 1332
1333 l2tp_session_dec_refcount(session);
1334
1363 write_lock_bh(&tunnel->hlist_lock); 1335 write_lock_bh(&tunnel->hlist_lock);
1364 1336
1365 /* Now restart from the beginning of this hash 1337 /* Now restart from the beginning of this hash
@@ -1372,6 +1344,17 @@ again:
1372 } 1344 }
1373 write_unlock_bh(&tunnel->hlist_lock); 1345 write_unlock_bh(&tunnel->hlist_lock);
1374} 1346}
1347EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1348
1349/* Tunnel socket destroy hook for UDP encapsulation */
1350static void l2tp_udp_encap_destroy(struct sock *sk)
1351{
1352 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1353 if (tunnel) {
1354 l2tp_tunnel_closeall(tunnel);
1355 sock_put(sk);
1356 }
1357}
1375 1358
1376/* Really kill the tunnel. 1359/* Really kill the tunnel.
1377 * Come here only when all sessions have been cleared from the tunnel. 1360 * Come here only when all sessions have been cleared from the tunnel.
@@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1397 return; 1380 return;
1398 1381
1399 sock = sk->sk_socket; 1382 sock = sk->sk_socket;
1400 BUG_ON(!sock);
1401 1383
1402 /* If the tunnel socket was created directly by the kernel, use the 1384 /* If the tunnel socket was created by userspace, then go through the
1403 * sk_* API to release the socket now. Otherwise go through the 1385 * inet layer to shut the socket down, and let userspace close it.
1404 * inet_* layer to shut the socket down, and let userspace close it. 1386 * Otherwise, if we created the socket directly within the kernel, use
1387 * the sk API to release it here.
1405 * In either case the tunnel resources are freed in the socket 1388 * In either case the tunnel resources are freed in the socket
1406 * destructor when the tunnel socket goes away. 1389 * destructor when the tunnel socket goes away.
1407 */ 1390 */
1408 if (sock->file == NULL) { 1391 if (tunnel->fd >= 0) {
1409 kernel_sock_shutdown(sock, SHUT_RDWR); 1392 if (sock)
1410 sk_release_kernel(sk); 1393 inet_shutdown(sock, 2);
1411 } else { 1394 } else {
1412 inet_shutdown(sock, 2); 1395 if (sock)
1396 kernel_sock_shutdown(sock, SHUT_RDWR);
1397 sk_release_kernel(sk);
1413 } 1398 }
1414 1399
1415 l2tp_tunnel_sock_put(sk); 1400 l2tp_tunnel_sock_put(sk);
@@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1668 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1653 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1669 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; 1654 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1670 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1655 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1656 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1671#if IS_ENABLED(CONFIG_IPV6) 1657#if IS_ENABLED(CONFIG_IPV6)
1672 if (sk->sk_family == PF_INET6) 1658 if (sk->sk_family == PF_INET6)
1673 udpv6_encap_enable(); 1659 udpv6_encap_enable();
@@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1723 */ 1709 */
1724int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1710int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1725{ 1711{
1712 l2tp_tunnel_closeall(tunnel);
1726 return (false == queue_work(l2tp_wq, &tunnel->del_work)); 1713 return (false == queue_work(l2tp_wq, &tunnel->del_work));
1727} 1714}
1728EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1715EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
@@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1731 */ 1718 */
1732void l2tp_session_free(struct l2tp_session *session) 1719void l2tp_session_free(struct l2tp_session *session)
1733{ 1720{
1734 struct l2tp_tunnel *tunnel; 1721 struct l2tp_tunnel *tunnel = session->tunnel;
1735 1722
1736 BUG_ON(atomic_read(&session->ref_count) != 0); 1723 BUG_ON(atomic_read(&session->ref_count) != 0);
1737 1724
1738 tunnel = session->tunnel; 1725 if (tunnel) {
1739 if (tunnel != NULL) {
1740 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 1726 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1727 if (session->session_id != 0)
1728 atomic_dec(&l2tp_session_count);
1729 sock_put(tunnel->sock);
1730 session->tunnel = NULL;
1731 l2tp_tunnel_dec_refcount(tunnel);
1732 }
1733
1734 kfree(session);
1741 1735
1742 /* Delete the session from the hash */ 1736 return;
1737}
1738EXPORT_SYMBOL_GPL(l2tp_session_free);
1739
1740/* Remove an l2tp session from l2tp_core's hash lists.
1741 * Provides a tidyup interface for pseudowire code which can't just route all
1742 * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
1743 * callback.
1744 */
1745void __l2tp_session_unhash(struct l2tp_session *session)
1746{
1747 struct l2tp_tunnel *tunnel = session->tunnel;
1748
1749 /* Remove the session from core hashes */
1750 if (tunnel) {
1751 /* Remove from the per-tunnel hash */
1743 write_lock_bh(&tunnel->hlist_lock); 1752 write_lock_bh(&tunnel->hlist_lock);
1744 hlist_del_init(&session->hlist); 1753 hlist_del_init(&session->hlist);
1745 write_unlock_bh(&tunnel->hlist_lock); 1754 write_unlock_bh(&tunnel->hlist_lock);
1746 1755
1747 /* Unlink from the global hash if not L2TPv2 */ 1756 /* For L2TPv3 we have a per-net hash: remove from there, too */
1748 if (tunnel->version != L2TP_HDR_VER_2) { 1757 if (tunnel->version != L2TP_HDR_VER_2) {
1749 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1758 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1750
1751 spin_lock_bh(&pn->l2tp_session_hlist_lock); 1759 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1752 hlist_del_init_rcu(&session->global_hlist); 1760 hlist_del_init_rcu(&session->global_hlist);
1753 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1761 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1754 synchronize_rcu(); 1762 synchronize_rcu();
1755 } 1763 }
1756
1757 if (session->session_id != 0)
1758 atomic_dec(&l2tp_session_count);
1759
1760 sock_put(tunnel->sock);
1761
1762 /* This will delete the tunnel context if this
1763 * is the last session on the tunnel.
1764 */
1765 session->tunnel = NULL;
1766 l2tp_tunnel_dec_refcount(tunnel);
1767 } 1764 }
1768
1769 kfree(session);
1770
1771 return;
1772} 1765}
1773EXPORT_SYMBOL_GPL(l2tp_session_free); 1766EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1774 1767
1775/* This function is used by the netlink SESSION_DELETE command and by 1768/* This function is used by the netlink SESSION_DELETE command and by
1776 pseudowire modules. 1769 pseudowire modules.
1777 */ 1770 */
1778int l2tp_session_delete(struct l2tp_session *session) 1771int l2tp_session_delete(struct l2tp_session *session)
1779{ 1772{
1773 if (session->ref)
1774 (*session->ref)(session);
1775 __l2tp_session_unhash(session);
1776 l2tp_session_queue_purge(session);
1780 if (session->session_close != NULL) 1777 if (session->session_close != NULL)
1781 (*session->session_close)(session); 1778 (*session->session_close)(session);
1782 1779 if (session->deref)
1780 (*session->ref)(session);
1783 l2tp_session_dec_refcount(session); 1781 l2tp_session_dec_refcount(session);
1784
1785 return 0; 1782 return 0;
1786} 1783}
1787EXPORT_SYMBOL_GPL(l2tp_session_delete); 1784EXPORT_SYMBOL_GPL(l2tp_session_delete);
1788 1785
1789
1790/* We come here whenever a session's send_seq, cookie_len or 1786/* We come here whenever a session's send_seq, cookie_len or
1791 * l2specific_len parameters are set. 1787 * l2specific_len parameters are set.
1792 */ 1788 */
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 8eb8f1d47f3a..485a490fd990 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -36,16 +36,15 @@ enum {
36struct sk_buff; 36struct sk_buff;
37 37
38struct l2tp_stats { 38struct l2tp_stats {
39 u64 tx_packets; 39 atomic_long_t tx_packets;
40 u64 tx_bytes; 40 atomic_long_t tx_bytes;
41 u64 tx_errors; 41 atomic_long_t tx_errors;
42 u64 rx_packets; 42 atomic_long_t rx_packets;
43 u64 rx_bytes; 43 atomic_long_t rx_bytes;
44 u64 rx_seq_discards; 44 atomic_long_t rx_seq_discards;
45 u64 rx_oos_packets; 45 atomic_long_t rx_oos_packets;
46 u64 rx_errors; 46 atomic_long_t rx_errors;
47 u64 rx_cookie_discards; 47 atomic_long_t rx_cookie_discards;
48 struct u64_stats_sync syncp;
49}; 48};
50 49
51struct l2tp_tunnel; 50struct l2tp_tunnel;
@@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
240extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 239extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
241 240
242extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); 241extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
242extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
243extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); 243extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
244extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); 244extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
245extern void __l2tp_session_unhash(struct l2tp_session *session);
245extern int l2tp_session_delete(struct l2tp_session *session); 246extern int l2tp_session_delete(struct l2tp_session *session);
246extern void l2tp_session_free(struct l2tp_session *session); 247extern void l2tp_session_free(struct l2tp_session *session);
247extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); 248extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
249extern int l2tp_session_queue_purge(struct l2tp_session *session);
248extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); 250extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
249 251
250extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); 252extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index c3813bc84552..072d7202e182 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
146 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, 146 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
147 atomic_read(&tunnel->ref_count)); 147 atomic_read(&tunnel->ref_count));
148 148
149 seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", 149 seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
150 tunnel->debug, 150 tunnel->debug,
151 (unsigned long long)tunnel->stats.tx_packets, 151 atomic_long_read(&tunnel->stats.tx_packets),
152 (unsigned long long)tunnel->stats.tx_bytes, 152 atomic_long_read(&tunnel->stats.tx_bytes),
153 (unsigned long long)tunnel->stats.tx_errors, 153 atomic_long_read(&tunnel->stats.tx_errors),
154 (unsigned long long)tunnel->stats.rx_packets, 154 atomic_long_read(&tunnel->stats.rx_packets),
155 (unsigned long long)tunnel->stats.rx_bytes, 155 atomic_long_read(&tunnel->stats.rx_bytes),
156 (unsigned long long)tunnel->stats.rx_errors); 156 atomic_long_read(&tunnel->stats.rx_errors));
157 157
158 if (tunnel->show != NULL) 158 if (tunnel->show != NULL)
159 tunnel->show(m, tunnel); 159 tunnel->show(m, tunnel);
@@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
203 seq_printf(m, "\n"); 203 seq_printf(m, "\n");
204 } 204 }
205 205
206 seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", 206 seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
207 session->nr, session->ns, 207 session->nr, session->ns,
208 (unsigned long long)session->stats.tx_packets, 208 atomic_long_read(&session->stats.tx_packets),
209 (unsigned long long)session->stats.tx_bytes, 209 atomic_long_read(&session->stats.tx_bytes),
210 (unsigned long long)session->stats.tx_errors, 210 atomic_long_read(&session->stats.tx_errors),
211 (unsigned long long)session->stats.rx_packets, 211 atomic_long_read(&session->stats.rx_packets),
212 (unsigned long long)session->stats.rx_bytes, 212 atomic_long_read(&session->stats.rx_bytes),
213 (unsigned long long)session->stats.rx_errors); 213 atomic_long_read(&session->stats.rx_errors));
214 214
215 if (session->show != NULL) 215 if (session->show != NULL)
216 session->show(m, session); 216 session->show(m, session);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 7f41b7051269..571db8dd2292 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
228static void l2tp_ip_destroy_sock(struct sock *sk) 228static void l2tp_ip_destroy_sock(struct sock *sk)
229{ 229{
230 struct sk_buff *skb; 230 struct sk_buff *skb;
231 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
231 232
232 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 233 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
233 kfree_skb(skb); 234 kfree_skb(skb);
234 235
236 if (tunnel) {
237 l2tp_tunnel_closeall(tunnel);
238 sock_put(sk);
239 }
240
235 sk_refcnt_debug_dec(sk); 241 sk_refcnt_debug_dec(sk);
236} 242}
237 243
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 41f2f8126ebc..b8a6039314e8 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
241 241
242static void l2tp_ip6_destroy_sock(struct sock *sk) 242static void l2tp_ip6_destroy_sock(struct sock *sk)
243{ 243{
244 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
245
244 lock_sock(sk); 246 lock_sock(sk);
245 ip6_flush_pending_frames(sk); 247 ip6_flush_pending_frames(sk);
246 release_sock(sk); 248 release_sock(sk);
247 249
250 if (tunnel) {
251 l2tp_tunnel_closeall(tunnel);
252 sock_put(sk);
253 }
254
248 inet6_destroy_sock(sk); 255 inet6_destroy_sock(sk);
249} 256}
250 257
@@ -683,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
683 lsa->l2tp_addr = ipv6_hdr(skb)->saddr; 690 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
684 lsa->l2tp_flowinfo = 0; 691 lsa->l2tp_flowinfo = 0;
685 lsa->l2tp_scope_id = 0; 692 lsa->l2tp_scope_id = 0;
693 lsa->l2tp_conn_id = 0;
686 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) 694 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
687 lsa->l2tp_scope_id = IP6CB(skb)->iif; 695 lsa->l2tp_scope_id = IP6CB(skb)->iif;
688 } 696 }
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index c1bab22db85e..0825ff26e113 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
246#if IS_ENABLED(CONFIG_IPV6) 246#if IS_ENABLED(CONFIG_IPV6)
247 struct ipv6_pinfo *np = NULL; 247 struct ipv6_pinfo *np = NULL;
248#endif 248#endif
249 struct l2tp_stats stats;
250 unsigned int start;
251 249
252 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, 250 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
253 L2TP_CMD_TUNNEL_GET); 251 L2TP_CMD_TUNNEL_GET);
@@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
265 if (nest == NULL) 263 if (nest == NULL)
266 goto nla_put_failure; 264 goto nla_put_failure;
267 265
268 do { 266 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
269 start = u64_stats_fetch_begin(&tunnel->stats.syncp); 267 atomic_long_read(&tunnel->stats.tx_packets)) ||
270 stats.tx_packets = tunnel->stats.tx_packets; 268 nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
271 stats.tx_bytes = tunnel->stats.tx_bytes; 269 atomic_long_read(&tunnel->stats.tx_bytes)) ||
272 stats.tx_errors = tunnel->stats.tx_errors; 270 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
273 stats.rx_packets = tunnel->stats.rx_packets; 271 atomic_long_read(&tunnel->stats.tx_errors)) ||
274 stats.rx_bytes = tunnel->stats.rx_bytes; 272 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
275 stats.rx_errors = tunnel->stats.rx_errors; 273 atomic_long_read(&tunnel->stats.rx_packets)) ||
276 stats.rx_seq_discards = tunnel->stats.rx_seq_discards; 274 nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
277 stats.rx_oos_packets = tunnel->stats.rx_oos_packets; 275 atomic_long_read(&tunnel->stats.rx_bytes)) ||
278 } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
279
280 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
281 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
282 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
283 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
284 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
285 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 276 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
286 stats.rx_seq_discards) || 277 atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
287 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 278 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
288 stats.rx_oos_packets) || 279 atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
289 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 280 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
281 atomic_long_read(&tunnel->stats.rx_errors)))
290 goto nla_put_failure; 282 goto nla_put_failure;
291 nla_nest_end(skb, nest); 283 nla_nest_end(skb, nest);
292 284
@@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
612 struct nlattr *nest; 604 struct nlattr *nest;
613 struct l2tp_tunnel *tunnel = session->tunnel; 605 struct l2tp_tunnel *tunnel = session->tunnel;
614 struct sock *sk = NULL; 606 struct sock *sk = NULL;
615 struct l2tp_stats stats;
616 unsigned int start;
617 607
618 sk = tunnel->sock; 608 sk = tunnel->sock;
619 609
@@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
656 if (nest == NULL) 646 if (nest == NULL)
657 goto nla_put_failure; 647 goto nla_put_failure;
658 648
659 do { 649 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
660 start = u64_stats_fetch_begin(&session->stats.syncp); 650 atomic_long_read(&session->stats.tx_packets)) ||
661 stats.tx_packets = session->stats.tx_packets; 651 nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
662 stats.tx_bytes = session->stats.tx_bytes; 652 atomic_long_read(&session->stats.tx_bytes)) ||
663 stats.tx_errors = session->stats.tx_errors; 653 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
664 stats.rx_packets = session->stats.rx_packets; 654 atomic_long_read(&session->stats.tx_errors)) ||
665 stats.rx_bytes = session->stats.rx_bytes; 655 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
666 stats.rx_errors = session->stats.rx_errors; 656 atomic_long_read(&session->stats.rx_packets)) ||
667 stats.rx_seq_discards = session->stats.rx_seq_discards; 657 nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
668 stats.rx_oos_packets = session->stats.rx_oos_packets; 658 atomic_long_read(&session->stats.rx_bytes)) ||
669 } while (u64_stats_fetch_retry(&session->stats.syncp, start));
670
671 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
672 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
673 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
674 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
675 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
676 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 659 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
677 stats.rx_seq_discards) || 660 atomic_long_read(&session->stats.rx_seq_discards)) ||
678 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 661 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
679 stats.rx_oos_packets) || 662 atomic_long_read(&session->stats.rx_oos_packets)) ||
680 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 663 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
664 atomic_long_read(&session->stats.rx_errors)))
681 goto nla_put_failure; 665 goto nla_put_failure;
682 nla_nest_end(skb, nest); 666 nla_nest_end(skb, nest);
683 667
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 6a53371dba1f..637a341c1e2d 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -97,6 +97,7 @@
97#include <net/ip.h> 97#include <net/ip.h>
98#include <net/udp.h> 98#include <net/udp.h>
99#include <net/xfrm.h> 99#include <net/xfrm.h>
100#include <net/inet_common.h>
100 101
101#include <asm/byteorder.h> 102#include <asm/byteorder.h>
102#include <linux/atomic.h> 103#include <linux/atomic.h>
@@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
259 session->name); 260 session->name);
260 261
261 /* Not bound. Nothing we can do, so discard. */ 262 /* Not bound. Nothing we can do, so discard. */
262 session->stats.rx_errors++; 263 atomic_long_inc(&session->stats.rx_errors);
263 kfree_skb(skb); 264 kfree_skb(skb);
264 } 265 }
265 266
@@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session)
447{ 448{
448 struct pppol2tp_session *ps = l2tp_session_priv(session); 449 struct pppol2tp_session *ps = l2tp_session_priv(session);
449 struct sock *sk = ps->sock; 450 struct sock *sk = ps->sock;
450 struct sk_buff *skb; 451 struct socket *sock = sk->sk_socket;
451 452
452 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 453 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
453 454
454 if (session->session_id == 0)
455 goto out;
456
457 if (sk != NULL) {
458 lock_sock(sk);
459
460 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
461 pppox_unbind_sock(sk);
462 sk->sk_state = PPPOX_DEAD;
463 sk->sk_state_change(sk);
464 }
465
466 /* Purge any queued data */
467 skb_queue_purge(&sk->sk_receive_queue);
468 skb_queue_purge(&sk->sk_write_queue);
469 while ((skb = skb_dequeue(&session->reorder_q))) {
470 kfree_skb(skb);
471 sock_put(sk);
472 }
473 455
474 release_sock(sk); 456 if (sock) {
457 inet_shutdown(sock, 2);
458 /* Don't let the session go away before our socket does */
459 l2tp_session_inc_refcount(session);
475 } 460 }
476
477out:
478 return; 461 return;
479} 462}
480 463
@@ -483,19 +466,12 @@ out:
483 */ 466 */
484static void pppol2tp_session_destruct(struct sock *sk) 467static void pppol2tp_session_destruct(struct sock *sk)
485{ 468{
486 struct l2tp_session *session; 469 struct l2tp_session *session = sk->sk_user_data;
487 470 if (session) {
488 if (sk->sk_user_data != NULL) {
489 session = sk->sk_user_data;
490 if (session == NULL)
491 goto out;
492
493 sk->sk_user_data = NULL; 471 sk->sk_user_data = NULL;
494 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 472 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
495 l2tp_session_dec_refcount(session); 473 l2tp_session_dec_refcount(session);
496 } 474 }
497
498out:
499 return; 475 return;
500} 476}
501 477
@@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock)
525 session = pppol2tp_sock_to_session(sk); 501 session = pppol2tp_sock_to_session(sk);
526 502
527 /* Purge any queued data */ 503 /* Purge any queued data */
528 skb_queue_purge(&sk->sk_receive_queue);
529 skb_queue_purge(&sk->sk_write_queue);
530 if (session != NULL) { 504 if (session != NULL) {
531 struct sk_buff *skb; 505 __l2tp_session_unhash(session);
532 while ((skb = skb_dequeue(&session->reorder_q))) { 506 l2tp_session_queue_purge(session);
533 kfree_skb(skb);
534 sock_put(sk);
535 }
536 sock_put(sk); 507 sock_put(sk);
537 } 508 }
509 skb_queue_purge(&sk->sk_receive_queue);
510 skb_queue_purge(&sk->sk_write_queue);
538 511
539 release_sock(sk); 512 release_sock(sk);
540 513
@@ -880,18 +853,6 @@ out:
880 return error; 853 return error;
881} 854}
882 855
883/* Called when deleting sessions via the netlink interface.
884 */
885static int pppol2tp_session_delete(struct l2tp_session *session)
886{
887 struct pppol2tp_session *ps = l2tp_session_priv(session);
888
889 if (ps->sock == NULL)
890 l2tp_session_dec_refcount(session);
891
892 return 0;
893}
894
895#endif /* CONFIG_L2TP_V3 */ 856#endif /* CONFIG_L2TP_V3 */
896 857
897/* getname() support. 858/* getname() support.
@@ -1025,14 +986,14 @@ end:
1025static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, 986static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
1026 struct l2tp_stats *stats) 987 struct l2tp_stats *stats)
1027{ 988{
1028 dest->tx_packets = stats->tx_packets; 989 dest->tx_packets = atomic_long_read(&stats->tx_packets);
1029 dest->tx_bytes = stats->tx_bytes; 990 dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
1030 dest->tx_errors = stats->tx_errors; 991 dest->tx_errors = atomic_long_read(&stats->tx_errors);
1031 dest->rx_packets = stats->rx_packets; 992 dest->rx_packets = atomic_long_read(&stats->rx_packets);
1032 dest->rx_bytes = stats->rx_bytes; 993 dest->rx_bytes = atomic_long_read(&stats->rx_bytes);
1033 dest->rx_seq_discards = stats->rx_seq_discards; 994 dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards);
1034 dest->rx_oos_packets = stats->rx_oos_packets; 995 dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets);
1035 dest->rx_errors = stats->rx_errors; 996 dest->rx_errors = atomic_long_read(&stats->rx_errors);
1036} 997}
1037 998
1038/* Session ioctl helper. 999/* Session ioctl helper.
@@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
1666 tunnel->name, 1627 tunnel->name,
1667 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', 1628 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
1668 atomic_read(&tunnel->ref_count) - 1); 1629 atomic_read(&tunnel->ref_count) - 1);
1669 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", 1630 seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
1670 tunnel->debug, 1631 tunnel->debug,
1671 (unsigned long long)tunnel->stats.tx_packets, 1632 atomic_long_read(&tunnel->stats.tx_packets),
1672 (unsigned long long)tunnel->stats.tx_bytes, 1633 atomic_long_read(&tunnel->stats.tx_bytes),
1673 (unsigned long long)tunnel->stats.tx_errors, 1634 atomic_long_read(&tunnel->stats.tx_errors),
1674 (unsigned long long)tunnel->stats.rx_packets, 1635 atomic_long_read(&tunnel->stats.rx_packets),
1675 (unsigned long long)tunnel->stats.rx_bytes, 1636 atomic_long_read(&tunnel->stats.rx_bytes),
1676 (unsigned long long)tunnel->stats.rx_errors); 1637 atomic_long_read(&tunnel->stats.rx_errors));
1677} 1638}
1678 1639
1679static void pppol2tp_seq_session_show(struct seq_file *m, void *v) 1640static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
@@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
1708 session->lns_mode ? "LNS" : "LAC", 1669 session->lns_mode ? "LNS" : "LAC",
1709 session->debug, 1670 session->debug,
1710 jiffies_to_msecs(session->reorder_timeout)); 1671 jiffies_to_msecs(session->reorder_timeout));
1711 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", 1672 seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
1712 session->nr, session->ns, 1673 session->nr, session->ns,
1713 (unsigned long long)session->stats.tx_packets, 1674 atomic_long_read(&session->stats.tx_packets),
1714 (unsigned long long)session->stats.tx_bytes, 1675 atomic_long_read(&session->stats.tx_bytes),
1715 (unsigned long long)session->stats.tx_errors, 1676 atomic_long_read(&session->stats.tx_errors),
1716 (unsigned long long)session->stats.rx_packets, 1677 atomic_long_read(&session->stats.rx_packets),
1717 (unsigned long long)session->stats.rx_bytes, 1678 atomic_long_read(&session->stats.rx_bytes),
1718 (unsigned long long)session->stats.rx_errors); 1679 atomic_long_read(&session->stats.rx_errors));
1719 1680
1720 if (po) 1681 if (po)
1721 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); 1682 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
@@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = {
1839 1800
1840static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { 1801static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
1841 .session_create = pppol2tp_session_create, 1802 .session_create = pppol2tp_session_create,
1842 .session_delete = pppol2tp_session_delete, 1803 .session_delete = l2tp_session_delete,
1843}; 1804};
1844 1805
1845#endif /* CONFIG_L2TP_V3 */ 1806#endif /* CONFIG_L2TP_V3 */
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 88709882c464..48aaa89253e0 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
720 int target; /* Read at least this many bytes */ 720 int target; /* Read at least this many bytes */
721 long timeo; 721 long timeo;
722 722
723 msg->msg_namelen = 0;
724
723 lock_sock(sk); 725 lock_sock(sk);
724 copied = -ENOTCONN; 726 copied = -ENOTCONN;
725 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) 727 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index f2627226a087..10a30b4fc7db 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
105} 105}
106 106
107static inline void
108hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags)
109{
110 if (dst->nomatch) {
111 *flags = IPSET_FLAG_NOMATCH;
112 dst->nomatch = 0;
113 }
114}
115
107static inline int 116static inline int
108hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) 117hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
109{ 118{
@@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
414 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 423 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
415} 424}
416 425
426static inline void
427hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags)
428{
429 if (dst->nomatch) {
430 *flags = IPSET_FLAG_NOMATCH;
431 dst->nomatch = 0;
432 }
433}
434
417static inline int 435static inline int
418hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) 436hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
419{ 437{
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 4b677cf6bf7d..d6a59154d710 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
87static inline void 87static inline void
88hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) 88hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
89{ 89{
90 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 90 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
91}
92
93static inline void
94hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags)
95{
96 if (dst->nomatch) {
97 *flags = IPSET_FLAG_NOMATCH;
98 dst->nomatch = 0;
99 }
91} 100}
92 101
93static inline int 102static inline int
@@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
308static inline void 317static inline void
309hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags) 318hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
310{ 319{
311 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 320 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
321}
322
323static inline void
324hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags)
325{
326 if (dst->nomatch) {
327 *flags = IPSET_FLAG_NOMATCH;
328 dst->nomatch = 0;
329 }
312} 330}
313 331
314static inline int 332static inline int
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 6ba985f1c96f..f2b0a3c30130 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
198static inline void 198static inline void
199hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) 199hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
200{ 200{
201 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 201 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
202}
203
204static inline void
205hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags)
206{
207 if (dst->nomatch) {
208 *flags = IPSET_FLAG_NOMATCH;
209 dst->nomatch = 0;
210 }
202} 211}
203 212
204static inline int 213static inline int
@@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
494static inline void 503static inline void
495hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) 504hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
496{ 505{
497 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 506 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
498} 507}
499 508
500static inline int 509static inline int
@@ -504,6 +513,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
504} 513}
505 514
506static inline void 515static inline void
516hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags)
517{
518 if (dst->nomatch) {
519 *flags = IPSET_FLAG_NOMATCH;
520 dst->nomatch = 0;
521 }
522}
523
524static inline void
507hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) 525hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
508{ 526{
509 elem->elem = 0; 527 elem->elem = 0;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index af20c0c5ced2..349deb672a2d 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
105} 105}
106 106
107static inline void
108hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags)
109{
110 if (dst->nomatch) {
111 *flags = IPSET_FLAG_NOMATCH;
112 dst->nomatch = 0;
113 }
114}
115
107static inline int 116static inline int
108hash_netport4_data_match(const struct hash_netport4_elem *elem) 117hash_netport4_data_match(const struct hash_netport4_elem *elem)
109{ 118{
@@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
375 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 384 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
376} 385}
377 386
387static inline void
388hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags)
389{
390 if (dst->nomatch) {
391 *flags = IPSET_FLAG_NOMATCH;
392 dst->nomatch = 0;
393 }
394}
395
378static inline int 396static inline int
379hash_netport6_data_match(const struct hash_netport6_elem *elem) 397hash_netport6_data_match(const struct hash_netport6_elem *elem)
380{ 398{
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 8371c2bac2e4..09c744aa8982 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
174{ 174{
175 const struct set_elem *e = list_set_elem(map, i); 175 const struct set_elem *e = list_set_elem(map, i);
176 176
177 if (i == map->size - 1 && e->id != IPSET_INVALID_ID) 177 if (e->id != IPSET_INVALID_ID) {
178 /* Last element replaced: e.g. add new,before,last */ 178 const struct set_elem *x = list_set_elem(map, map->size - 1);
179 ip_set_put_byindex(e->id); 179
180 /* Last element replaced or pushed off */
181 if (x->id != IPSET_INVALID_ID)
182 ip_set_put_byindex(x->id);
183 }
180 if (with_timeout(map->timeout)) 184 if (with_timeout(map->timeout))
181 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); 185 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
182 else 186 else
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 47edf5a40a59..61f49d241712 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1394 skb_reset_network_header(skb); 1394 skb_reset_network_header(skb);
1395 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", 1395 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1396 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); 1396 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1397 rcu_read_lock();
1398 ipv4_update_pmtu(skb, dev_net(skb->dev), 1397 ipv4_update_pmtu(skb, dev_net(skb->dev),
1399 mtu, 0, 0, 0, 0); 1398 mtu, 0, 0, 0, 0);
1400 rcu_read_unlock();
1401 /* Client uses PMTUD? */ 1399 /* Client uses PMTUD? */
1402 if (!(cih->frag_off & htons(IP_DF))) 1400 if (!(cih->frag_off & htons(IP_DF)))
1403 goto ignore_ipip; 1401 goto ignore_ipip;
@@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1577 } 1575 }
1578 /* ipvs enabled in this netns ? */ 1576 /* ipvs enabled in this netns ? */
1579 net = skb_net(skb); 1577 net = skb_net(skb);
1580 if (!net_ipvs(net)->enable) 1578 ipvs = net_ipvs(net);
1579 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1581 return NF_ACCEPT; 1580 return NF_ACCEPT;
1582 1581
1583 ip_vs_fill_iph_skb(af, skb, &iph); 1582 ip_vs_fill_iph_skb(af, skb, &iph);
@@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1654 } 1653 }
1655 1654
1656 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); 1655 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1657 ipvs = net_ipvs(net);
1658 /* Check the server status */ 1656 /* Check the server status */
1659 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { 1657 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1660 /* the destination server is not available */ 1658 /* the destination server is not available */
@@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1815{ 1813{
1816 int r; 1814 int r;
1817 struct net *net; 1815 struct net *net;
1816 struct netns_ipvs *ipvs;
1818 1817
1819 if (ip_hdr(skb)->protocol != IPPROTO_ICMP) 1818 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1820 return NF_ACCEPT; 1819 return NF_ACCEPT;
1821 1820
1822 /* ipvs enabled in this netns ? */ 1821 /* ipvs enabled in this netns ? */
1823 net = skb_net(skb); 1822 net = skb_net(skb);
1824 if (!net_ipvs(net)->enable) 1823 ipvs = net_ipvs(net);
1824 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1825 return NF_ACCEPT; 1825 return NF_ACCEPT;
1826 1826
1827 return ip_vs_in_icmp(skb, &r, hooknum); 1827 return ip_vs_in_icmp(skb, &r, hooknum);
@@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1835{ 1835{
1836 int r; 1836 int r;
1837 struct net *net; 1837 struct net *net;
1838 struct netns_ipvs *ipvs;
1838 struct ip_vs_iphdr iphdr; 1839 struct ip_vs_iphdr iphdr;
1839 1840
1840 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); 1841 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
@@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1843 1844
1844 /* ipvs enabled in this netns ? */ 1845 /* ipvs enabled in this netns ? */
1845 net = skb_net(skb); 1846 net = skb_net(skb);
1846 if (!net_ipvs(net)->enable) 1847 ipvs = net_ipvs(net);
1848 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1847 return NF_ACCEPT; 1849 return NF_ACCEPT;
1848 1850
1849 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); 1851 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c68198bf9128..9e2d1cccd1eb 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = {
1808 .mode = 0644, 1808 .mode = 0644,
1809 .proc_handler = proc_dointvec, 1809 .proc_handler = proc_dointvec,
1810 }, 1810 },
1811 {
1812 .procname = "backup_only",
1813 .maxlen = sizeof(int),
1814 .mode = 0644,
1815 .proc_handler = proc_dointvec,
1816 },
1811#ifdef CONFIG_IP_VS_DEBUG 1817#ifdef CONFIG_IP_VS_DEBUG
1812 { 1818 {
1813 .procname = "debug_level", 1819 .procname = "debug_level",
@@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3741 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; 3747 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
3742 ipvs->sysctl_pmtu_disc = 1; 3748 ipvs->sysctl_pmtu_disc = 1;
3743 tbl[idx++].data = &ipvs->sysctl_pmtu_disc; 3749 tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
3750 tbl[idx++].data = &ipvs->sysctl_backup_only;
3744 3751
3745 3752
3746 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); 3753 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index ae8ec6f27688..cd1d7298f7ba 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
906 sctp_chunkhdr_t _sctpch, *sch; 906 sctp_chunkhdr_t _sctpch, *sch;
907 unsigned char chunk_type; 907 unsigned char chunk_type;
908 int event, next_state; 908 int event, next_state;
909 int ihl; 909 int ihl, cofs;
910 910
911#ifdef CONFIG_IP_VS_IPV6 911#ifdef CONFIG_IP_VS_IPV6
912 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); 912 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
@@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
914 ihl = ip_hdrlen(skb); 914 ihl = ip_hdrlen(skb);
915#endif 915#endif
916 916
917 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), 917 cofs = ihl + sizeof(sctp_sctphdr_t);
918 sizeof(_sctpch), &_sctpch); 918 sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
919 if (sch == NULL) 919 if (sch == NULL)
920 return; 920 return;
921 921
@@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
933 */ 933 */
934 if ((sch->type == SCTP_CID_COOKIE_ECHO) || 934 if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
935 (sch->type == SCTP_CID_COOKIE_ACK)) { 935 (sch->type == SCTP_CID_COOKIE_ACK)) {
936 sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + 936 int clen = ntohs(sch->length);
937 sch->length), sizeof(_sctpch), &_sctpch); 937
938 if (sch) { 938 if (clen >= sizeof(sctp_chunkhdr_t)) {
939 if (sch->type == SCTP_CID_ABORT) 939 sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
940 sizeof(_sctpch), &_sctpch);
941 if (sch && sch->type == SCTP_CID_ABORT)
940 chunk_type = sch->type; 942 chunk_type = sch->type;
941 } 943 }
942 } 944 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index a9740bd6fe54..94b4b9853f60 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
339{ 339{
340 const struct nf_conn_help *help; 340 const struct nf_conn_help *help;
341 const struct nf_conntrack_helper *helper; 341 const struct nf_conntrack_helper *helper;
342 struct va_format vaf;
343 va_list args;
344
345 va_start(args, fmt);
346
347 vaf.fmt = fmt;
348 vaf.va = &args;
342 349
343 /* Called from the helper function, this call never fails */ 350 /* Called from the helper function, this call never fails */
344 help = nfct_help(ct); 351 help = nfct_help(ct);
@@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
347 helper = rcu_dereference(help->helper); 354 helper = rcu_dereference(help->helper);
348 355
349 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, 356 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
350 "nf_ct_%s: dropping packet: %s ", helper->name, fmt); 357 "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf);
358
359 va_end(args);
351} 360}
352EXPORT_SYMBOL_GPL(nf_ct_helper_log); 361EXPORT_SYMBOL_GPL(nf_ct_helper_log);
353 362
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 432f95780003..ba65b2041eb4 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void)
969{ 969{
970 int ret; 970 int ret;
971 971
972 ret = register_pernet_subsys(&dccp_net_ops);
973 if (ret < 0)
974 goto out_pernet;
975
972 ret = nf_ct_l4proto_register(&dccp_proto4); 976 ret = nf_ct_l4proto_register(&dccp_proto4);
973 if (ret < 0) 977 if (ret < 0)
974 goto out_dccp4; 978 goto out_dccp4;
@@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void)
977 if (ret < 0) 981 if (ret < 0)
978 goto out_dccp6; 982 goto out_dccp6;
979 983
980 ret = register_pernet_subsys(&dccp_net_ops);
981 if (ret < 0)
982 goto out_pernet;
983
984 return 0; 984 return 0;
985out_pernet:
986 nf_ct_l4proto_unregister(&dccp_proto6);
987out_dccp6: 985out_dccp6:
988 nf_ct_l4proto_unregister(&dccp_proto4); 986 nf_ct_l4proto_unregister(&dccp_proto4);
989out_dccp4: 987out_dccp4:
988 unregister_pernet_subsys(&dccp_net_ops);
989out_pernet:
990 return ret; 990 return ret;
991} 991}
992 992
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index bd7d01d9c7e7..155ce9f8a0db 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void)
420{ 420{
421 int ret; 421 int ret;
422 422
423 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
424 if (ret < 0)
425 goto out_gre4;
426
427 ret = register_pernet_subsys(&proto_gre_net_ops); 423 ret = register_pernet_subsys(&proto_gre_net_ops);
428 if (ret < 0) 424 if (ret < 0)
429 goto out_pernet; 425 goto out_pernet;
430 426
427 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
428 if (ret < 0)
429 goto out_gre4;
430
431 return 0; 431 return 0;
432out_pernet:
433 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
434out_gre4: 432out_gre4:
433 unregister_pernet_subsys(&proto_gre_net_ops);
434out_pernet:
435 return ret; 435 return ret;
436} 436}
437 437
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 480f616d5936..ec83536def9a 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void)
888{ 888{
889 int ret; 889 int ret;
890 890
891 ret = register_pernet_subsys(&sctp_net_ops);
892 if (ret < 0)
893 goto out_pernet;
894
891 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); 895 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);
892 if (ret < 0) 896 if (ret < 0)
893 goto out_sctp4; 897 goto out_sctp4;
@@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void)
896 if (ret < 0) 900 if (ret < 0)
897 goto out_sctp6; 901 goto out_sctp6;
898 902
899 ret = register_pernet_subsys(&sctp_net_ops);
900 if (ret < 0)
901 goto out_pernet;
902
903 return 0; 903 return 0;
904out_pernet:
905 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
906out_sctp6: 904out_sctp6:
907 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 905 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
908out_sctp4: 906out_sctp4:
907 unregister_pernet_subsys(&sctp_net_ops);
908out_pernet:
909 return ret; 909 return ret;
910} 910}
911 911
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 157489581c31..ca969f6273f7 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void)
371{ 371{
372 int ret; 372 int ret;
373 373
374 ret = register_pernet_subsys(&udplite_net_ops);
375 if (ret < 0)
376 goto out_pernet;
377
374 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); 378 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
375 if (ret < 0) 379 if (ret < 0)
376 goto out_udplite4; 380 goto out_udplite4;
@@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void)
379 if (ret < 0) 383 if (ret < 0)
380 goto out_udplite6; 384 goto out_udplite6;
381 385
382 ret = register_pernet_subsys(&udplite_net_ops);
383 if (ret < 0)
384 goto out_pernet;
385
386 return 0; 386 return 0;
387out_pernet:
388 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
389out_udplite6: 387out_udplite6:
390 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); 388 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
391out_udplite4: 389out_udplite4:
390 unregister_pernet_subsys(&udplite_net_ops);
391out_pernet:
392 return ret; 392 return ret;
393} 393}
394 394
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 0e7d423324c3..e0c4373b4747 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1593 end += strlen("\r\n\r\n") + clen; 1593 end += strlen("\r\n\r\n") + clen;
1594 1594
1595 msglen = origlen = end - dptr; 1595 msglen = origlen = end - dptr;
1596 if (msglen > datalen) { 1596 if (msglen > datalen)
1597 nf_ct_helper_log(skb, ct, "incomplete/bad SIP message"); 1597 return NF_ACCEPT;
1598 return NF_DROP;
1599 }
1600 1598
1601 ret = process_sip_msg(skb, ct, protoff, dataoff, 1599 ret = process_sip_msg(skb, ct, protoff, dataoff,
1602 &dptr, &msglen); 1600 &dptr, &msglen);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 6bcce401fd1c..fedee3943661 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void)
568 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); 568 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
569 if (!nf_ct_netfilter_header) { 569 if (!nf_ct_netfilter_header) {
570 pr_err("nf_conntrack: can't register to sysctl.\n"); 570 pr_err("nf_conntrack: can't register to sysctl.\n");
571 ret = -ENOMEM;
571 goto out_sysctl; 572 goto out_sysctl;
572 } 573 }
573#endif 574#endif
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 8d5769c6d16e..ad24be070e53 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);
467struct nf_nat_proto_clean { 467struct nf_nat_proto_clean {
468 u8 l3proto; 468 u8 l3proto;
469 u8 l4proto; 469 u8 l4proto;
470 bool hash;
471}; 470};
472 471
473/* Clear NAT section of all conntracks, in case we're loaded again. */ 472/* kill conntracks with affected NAT section */
474static int nf_nat_proto_clean(struct nf_conn *i, void *data) 473static int nf_nat_proto_remove(struct nf_conn *i, void *data)
475{ 474{
476 const struct nf_nat_proto_clean *clean = data; 475 const struct nf_nat_proto_clean *clean = data;
477 struct nf_conn_nat *nat = nfct_nat(i); 476 struct nf_conn_nat *nat = nfct_nat(i);
478 477
479 if (!nat) 478 if (!nat)
480 return 0; 479 return 0;
481 if (!(i->status & IPS_SRC_NAT_DONE)) 480
482 return 0;
483 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || 481 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
484 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) 482 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
485 return 0; 483 return 0;
486 484
487 if (clean->hash) { 485 return i->status & IPS_NAT_MASK ? 1 : 0;
488 spin_lock_bh(&nf_nat_lock);
489 hlist_del_rcu(&nat->bysource);
490 spin_unlock_bh(&nf_nat_lock);
491 } else {
492 memset(nat, 0, sizeof(*nat));
493 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
494 IPS_SEQ_ADJUST);
495 }
496 return 0;
497} 486}
498 487
499static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 488static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
@@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
505 struct net *net; 494 struct net *net;
506 495
507 rtnl_lock(); 496 rtnl_lock();
508 /* Step 1 - remove from bysource hash */
509 clean.hash = true;
510 for_each_net(net) 497 for_each_net(net)
511 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); 498 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
512 synchronize_rcu();
513
514 /* Step 2 - clean NAT section */
515 clean.hash = false;
516 for_each_net(net)
517 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
518 rtnl_unlock(); 499 rtnl_unlock();
519} 500}
520 501
@@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto)
526 struct net *net; 507 struct net *net;
527 508
528 rtnl_lock(); 509 rtnl_lock();
529 /* Step 1 - remove from bysource hash */
530 clean.hash = true;
531 for_each_net(net)
532 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
533 synchronize_rcu();
534 510
535 /* Step 2 - clean NAT section */
536 clean.hash = false;
537 for_each_net(net) 511 for_each_net(net)
538 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); 512 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
539 rtnl_unlock(); 513 rtnl_unlock();
540} 514}
541 515
@@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
773{ 747{
774 struct nf_nat_proto_clean clean = {}; 748 struct nf_nat_proto_clean clean = {};
775 749
776 nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); 750 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
777 synchronize_rcu(); 751 synchronize_rcu();
778 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 752 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
779} 753}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index d578ec251712..0b1b32cda307 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id)
62} 62}
63EXPORT_SYMBOL_GPL(nfnl_unlock); 63EXPORT_SYMBOL_GPL(nfnl_unlock);
64 64
65static struct mutex *nfnl_get_lock(__u8 subsys_id)
66{
67 return &table[subsys_id].mutex;
68}
69
70int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 65int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
71{ 66{
72 nfnl_lock(n->subsys_id); 67 nfnl_lock(n->subsys_id);
@@ -199,7 +194,7 @@ replay:
199 rcu_read_unlock(); 194 rcu_read_unlock();
200 nfnl_lock(subsys_id); 195 nfnl_lock(subsys_id);
201 if (rcu_dereference_protected(table[subsys_id].subsys, 196 if (rcu_dereference_protected(table[subsys_id].subsys,
202 lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || 197 lockdep_is_held(&table[subsys_id].mutex)) != ss ||
203 nfnetlink_find_client(type, ss) != nc) 198 nfnetlink_find_client(type, ss) != nc)
204 err = -EAGAIN; 199 err = -EAGAIN;
205 else if (nc->call) 200 else if (nc->call)
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 589d686f0b4c..dc3fd5d44464 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
49 return -EINVAL; 49 return -EINVAL;
50 50
51 acct_name = nla_data(tb[NFACCT_NAME]); 51 acct_name = nla_data(tb[NFACCT_NAME]);
52 if (strlen(acct_name) == 0)
53 return -EINVAL;
52 54
53 list_for_each_entry(nfacct, &nfnl_acct_list, head) { 55 list_for_each_entry(nfacct, &nfnl_acct_list, head) {
54 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) 56 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 858fd52c1040..42680b2baa11 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid)
112 inst->queue_num = queue_num; 112 inst->queue_num = queue_num;
113 inst->peer_portid = portid; 113 inst->peer_portid = portid;
114 inst->queue_maxlen = NFQNL_QMAX_DEFAULT; 114 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
115 inst->copy_range = 0xfffff; 115 inst->copy_range = 0xffff;
116 inst->copy_mode = NFQNL_COPY_NONE; 116 inst->copy_mode = NFQNL_COPY_NONE;
117 spin_lock_init(&inst->lock); 117 spin_lock_init(&inst->lock);
118 INIT_LIST_HEAD(&inst->queue_list); 118 INIT_LIST_HEAD(&inst->queue_list);
@@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void)
1062 1062
1063#ifdef CONFIG_PROC_FS 1063#ifdef CONFIG_PROC_FS
1064 if (!proc_create("nfnetlink_queue", 0440, 1064 if (!proc_create("nfnetlink_queue", 0440,
1065 proc_net_netfilter, &nfqnl_file_ops)) 1065 proc_net_netfilter, &nfqnl_file_ops)) {
1066 status = -ENOMEM;
1066 goto cleanup_subsys; 1067 goto cleanup_subsys;
1068 }
1067#endif 1069#endif
1068 1070
1069 register_netdevice_notifier(&nfqnl_dev_notifier); 1071 register_netdevice_notifier(&nfqnl_dev_notifier);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index ba92824086f3..3228d7f24eb4 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
124 const struct xt_audit_info *info = par->targinfo; 124 const struct xt_audit_info *info = par->targinfo;
125 struct audit_buffer *ab; 125 struct audit_buffer *ab;
126 126
127 if (audit_enabled == 0)
128 goto errout;
129
127 ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); 130 ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
128 if (ab == NULL) 131 if (ab == NULL)
129 goto errout; 132 goto errout;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 847d495cd4de..8a6c6ea466d8 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1189 struct netlbl_unlhsh_walk_arg cb_arg; 1189 struct netlbl_unlhsh_walk_arg cb_arg;
1190 u32 skip_bkt = cb->args[0]; 1190 u32 skip_bkt = cb->args[0];
1191 u32 skip_chain = cb->args[1]; 1191 u32 skip_chain = cb->args[1];
1192 u32 skip_addr4 = cb->args[2];
1193 u32 skip_addr6 = cb->args[3];
1194 u32 iter_bkt; 1192 u32 iter_bkt;
1195 u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; 1193 u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
1196 struct netlbl_unlhsh_iface *iface; 1194 struct netlbl_unlhsh_iface *iface;
@@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1215 continue; 1213 continue;
1216 netlbl_af4list_foreach_rcu(addr4, 1214 netlbl_af4list_foreach_rcu(addr4,
1217 &iface->addr4_list) { 1215 &iface->addr4_list) {
1218 if (iter_addr4++ < skip_addr4) 1216 if (iter_addr4++ < cb->args[2])
1219 continue; 1217 continue;
1220 if (netlbl_unlabel_staticlist_gen( 1218 if (netlbl_unlabel_staticlist_gen(
1221 NLBL_UNLABEL_C_STATICLIST, 1219 NLBL_UNLABEL_C_STATICLIST,
@@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1231#if IS_ENABLED(CONFIG_IPV6) 1229#if IS_ENABLED(CONFIG_IPV6)
1232 netlbl_af6list_foreach_rcu(addr6, 1230 netlbl_af6list_foreach_rcu(addr6,
1233 &iface->addr6_list) { 1231 &iface->addr6_list) {
1234 if (iter_addr6++ < skip_addr6) 1232 if (iter_addr6++ < cb->args[3])
1235 continue; 1233 continue;
1236 if (netlbl_unlabel_staticlist_gen( 1234 if (netlbl_unlabel_staticlist_gen(
1237 NLBL_UNLABEL_C_STATICLIST, 1235 NLBL_UNLABEL_C_STATICLIST,
@@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1250 1248
1251unlabel_staticlist_return: 1249unlabel_staticlist_return:
1252 rcu_read_unlock(); 1250 rcu_read_unlock();
1253 cb->args[0] = skip_bkt; 1251 cb->args[0] = iter_bkt;
1254 cb->args[1] = skip_chain; 1252 cb->args[1] = iter_chain;
1255 cb->args[2] = skip_addr4; 1253 cb->args[2] = iter_addr4;
1256 cb->args[3] = skip_addr6; 1254 cb->args[3] = iter_addr6;
1257 return skb->len; 1255 return skb->len;
1258} 1256}
1259 1257
@@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1273{ 1271{
1274 struct netlbl_unlhsh_walk_arg cb_arg; 1272 struct netlbl_unlhsh_walk_arg cb_arg;
1275 struct netlbl_unlhsh_iface *iface; 1273 struct netlbl_unlhsh_iface *iface;
1276 u32 skip_addr4 = cb->args[0]; 1274 u32 iter_addr4 = 0, iter_addr6 = 0;
1277 u32 skip_addr6 = cb->args[1];
1278 u32 iter_addr4 = 0;
1279 struct netlbl_af4list *addr4; 1275 struct netlbl_af4list *addr4;
1280#if IS_ENABLED(CONFIG_IPV6) 1276#if IS_ENABLED(CONFIG_IPV6)
1281 u32 iter_addr6 = 0;
1282 struct netlbl_af6list *addr6; 1277 struct netlbl_af6list *addr6;
1283#endif 1278#endif
1284 1279
@@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1292 goto unlabel_staticlistdef_return; 1287 goto unlabel_staticlistdef_return;
1293 1288
1294 netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { 1289 netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) {
1295 if (iter_addr4++ < skip_addr4) 1290 if (iter_addr4++ < cb->args[0])
1296 continue; 1291 continue;
1297 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, 1292 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
1298 iface, 1293 iface,
@@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1305 } 1300 }
1306#if IS_ENABLED(CONFIG_IPV6) 1301#if IS_ENABLED(CONFIG_IPV6)
1307 netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { 1302 netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
1308 if (iter_addr6++ < skip_addr6) 1303 if (iter_addr6++ < cb->args[1])
1309 continue; 1304 continue;
1310 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, 1305 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
1311 iface, 1306 iface,
@@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1320 1315
1321unlabel_staticlistdef_return: 1316unlabel_staticlistdef_return:
1322 rcu_read_unlock(); 1317 rcu_read_unlock();
1323 cb->args[0] = skip_addr4; 1318 cb->args[0] = iter_addr4;
1324 cb->args[1] = skip_addr6; 1319 cb->args[1] = iter_addr6;
1325 return skb->len; 1320 return skb->len;
1326} 1321}
1327 1322
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f2aabb6f4105..5a55be3f17a5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,
142 int err = 0; 142 int err = 0;
143 143
144 BUG_ON(grp->name[0] == '\0'); 144 BUG_ON(grp->name[0] == '\0');
145 BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
145 146
146 genl_lock(); 147 genl_lock();
147 148
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index d1fa1d9ffd2e..103bd704b5fc 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
1173 } 1173 }
1174 1174
1175 if (sax != NULL) { 1175 if (sax != NULL) {
1176 memset(sax, 0, sizeof(*sax));
1176 sax->sax25_family = AF_NETROM; 1177 sax->sax25_family = AF_NETROM;
1177 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, 1178 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
1178 AX25_ADDR_LEN); 1179 AX25_ADDR_LEN);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 8f025746f337..6c94447ec414 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -646,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
646 646
647 pr_debug("%p %zu\n", sk, len); 647 pr_debug("%p %zu\n", sk, len);
648 648
649 msg->msg_namelen = 0;
650
649 lock_sock(sk); 651 lock_sock(sk);
650 652
651 if (sk->sk_state == LLCP_CLOSED && 653 if (sk->sk_state == LLCP_CLOSED &&
@@ -691,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
691 693
692 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); 694 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
693 695
696 memset(sockaddr, 0, sizeof(*sockaddr));
694 sockaddr->sa_family = AF_NFC; 697 sockaddr->sa_family = AF_NFC;
695 sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; 698 sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
696 sockaddr->dsap = ui_cb->dsap; 699 sockaddr->dsap = ui_cb->dsap;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ac2defeeba83..d4d5363c7ba7 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
58 58
59 if (skb->ip_summed == CHECKSUM_COMPLETE) 59 if (skb->ip_summed == CHECKSUM_COMPLETE)
60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data 60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
61 + ETH_HLEN, VLAN_HLEN, 0)); 61 + (2 * ETH_ALEN), VLAN_HLEN, 0));
62 62
63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
64 *current_tci = vhdr->h_vlan_TCI; 64 *current_tci = vhdr->h_vlan_TCI;
@@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
115 115
116 if (skb->ip_summed == CHECKSUM_COMPLETE) 116 if (skb->ip_summed == CHECKSUM_COMPLETE)
117 skb->csum = csum_add(skb->csum, csum_partial(skb->data 117 skb->csum = csum_add(skb->csum, csum_partial(skb->data
118 + ETH_HLEN, VLAN_HLEN, 0)); 118 + (2 * ETH_ALEN), VLAN_HLEN, 0));
119 119
120 } 120 }
121 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); 121 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e87a26506dba..6980c3e6f066 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
394 394
395 skb_copy_and_csum_dev(skb, nla_data(nla)); 395 skb_copy_and_csum_dev(skb, nla_data(nla));
396 396
397 genlmsg_end(user_skb, upcall);
397 err = genlmsg_unicast(net, user_skb, upcall_info->portid); 398 err = genlmsg_unicast(net, user_skb, upcall_info->portid);
398 399
399out: 400out:
@@ -1592,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1592 return ERR_PTR(-ENOMEM); 1593 return ERR_PTR(-ENOMEM);
1593 1594
1594 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); 1595 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1595 if (retval < 0) { 1596 BUG_ON(retval < 0);
1596 kfree_skb(skb); 1597
1597 return ERR_PTR(retval);
1598 }
1599 return skb; 1598 return skb;
1600} 1599}
1601 1600
@@ -1690,6 +1689,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1690 if (IS_ERR(vport)) 1689 if (IS_ERR(vport))
1691 goto exit_unlock; 1690 goto exit_unlock;
1692 1691
1692 err = 0;
1693 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1693 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1694 OVS_VPORT_CMD_NEW); 1694 OVS_VPORT_CMD_NEW);
1695 if (IS_ERR(reply)) { 1695 if (IS_ERR(reply)) {
@@ -1724,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1724 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) 1724 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
1725 err = -EINVAL; 1725 err = -EINVAL;
1726 1726
1727 reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1728 if (!reply) {
1729 err = -ENOMEM;
1730 goto exit_unlock;
1731 }
1732
1727 if (!err && a[OVS_VPORT_ATTR_OPTIONS]) 1733 if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1728 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); 1734 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1729 if (err) 1735 if (err)
1730 goto exit_unlock; 1736 goto exit_free;
1737
1731 if (a[OVS_VPORT_ATTR_UPCALL_PID]) 1738 if (a[OVS_VPORT_ATTR_UPCALL_PID])
1732 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); 1739 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1733 1740
1734 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1741 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1735 OVS_VPORT_CMD_NEW); 1742 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1736 if (IS_ERR(reply)) { 1743 BUG_ON(err < 0);
1737 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1738 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1739 goto exit_unlock;
1740 }
1741 1744
1742 genl_notify(reply, genl_info_net(info), info->snd_portid, 1745 genl_notify(reply, genl_info_net(info), info->snd_portid,
1743 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); 1746 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1744 1747
1748 rtnl_unlock();
1749 return 0;
1750
1751exit_free:
1752 kfree_skb(reply);
1745exit_unlock: 1753exit_unlock:
1746 rtnl_unlock(); 1754 rtnl_unlock();
1747 return err; 1755 return err;
@@ -1771,6 +1779,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1771 if (IS_ERR(reply)) 1779 if (IS_ERR(reply))
1772 goto exit_unlock; 1780 goto exit_unlock;
1773 1781
1782 err = 0;
1774 ovs_dp_detach_port(vport); 1783 ovs_dp_detach_port(vport);
1775 1784
1776 genl_notify(reply, genl_info_net(info), info->snd_portid, 1785 genl_notify(reply, genl_info_net(info), info->snd_portid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 20605ecf100b..67a2b783fe70 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb)
482 return htons(ETH_P_802_2); 482 return htons(ETH_P_802_2);
483 483
484 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 484 __skb_pull(skb, sizeof(struct llc_snap_hdr));
485 return llc->ethertype; 485
486 if (ntohs(llc->ethertype) >= 1536)
487 return llc->ethertype;
488
489 return htons(ETH_P_802_2);
486} 490}
487 491
488static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, 492static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
@@ -791,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
791 795
792void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 796void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
793{ 797{
798 BUG_ON(table->count == 0);
794 hlist_del_rcu(&flow->hash_node[table->node_ver]); 799 hlist_del_rcu(&flow->hash_node[table->node_ver]);
795 table->count--; 800 table->count--;
796 BUG_ON(table->count < 0);
797} 801}
798 802
799/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ 803/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 670cbc3518de..2130d61c384a 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
43 43
44 /* Make our own copy of the packet. Otherwise we will mangle the 44 /* Make our own copy of the packet. Otherwise we will mangle the
45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
46 * (No one comes after us, since we tell handle_bridge() that we took 46 */
47 * the packet.) */
48 skb = skb_share_check(skb, GFP_ATOMIC); 47 skb = skb_share_check(skb, GFP_ATOMIC);
49 if (unlikely(!skb)) 48 if (unlikely(!skb))
50 return; 49 return;
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index ba717cc038b3..f6b8132ce4cb 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
325 * @skb: skb that was received 325 * @skb: skb that was received
326 * 326 *
327 * Must be called with rcu_read_lock. The packet cannot be shared and 327 * Must be called with rcu_read_lock. The packet cannot be shared and
328 * skb->data should point to the Ethernet header. The caller must have already 328 * skb->data should point to the Ethernet header.
329 * called compute_ip_summed() to initialize the checksumming fields.
330 */ 329 */
331void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) 330void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
332{ 331{
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 7be790d60b90..73be187d389e 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter,
87 for (i = 0; i < nr; i++) { 87 for (i = 0; i < nr; i++) {
88 BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); 88 BUG_ON(strlen(names[i]) >= sizeof(ctr.name));
89 strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); 89 strncpy(ctr.name, names[i], sizeof(ctr.name) - 1);
90 ctr.name[sizeof(ctr.name) - 1] = '\0';
90 ctr.value = values[i]; 91 ctr.value = values[i];
91 92
92 rds_info_copy(iter, &ctr, sizeof(ctr)); 93 rds_info_copy(iter, &ctr, sizeof(ctr));
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index cf68e6e4054a..9c8347451597 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1254 1254
1255 if (srose != NULL) { 1255 if (srose != NULL) {
1256 memset(srose, 0, msg->msg_namelen);
1256 srose->srose_family = AF_ROSE; 1257 srose->srose_family = AF_ROSE;
1257 srose->srose_addr = rose->dest_addr; 1258 srose->srose_addr = rose->dest_addr;
1258 srose->srose_call = rose->dest_call; 1259 srose->srose_call = rose->dest_call;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 13aa47aa2ffb..1bc210ffcba2 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch)
962 cbq_update(q); 962 cbq_update(q);
963 if ((incr -= incr2) < 0) 963 if ((incr -= incr2) < 0)
964 incr = 0; 964 incr = 0;
965 q->now += incr;
966 } else {
967 if (now > q->now)
968 q->now = now;
965 } 969 }
966 q->now += incr;
967 q->now_rt = now; 970 q->now_rt = now;
968 971
969 for (;;) { 972 for (;;) {
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 4e606fcb2534..55786283a3df 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
195 flow->deficit = q->quantum; 195 flow->deficit = q->quantum;
196 flow->dropped = 0; 196 flow->dropped = 0;
197 } 197 }
198 if (++sch->q.qlen < sch->limit) 198 if (++sch->q.qlen <= sch->limit)
199 return NET_XMIT_SUCCESS; 199 return NET_XMIT_SUCCESS;
200 200
201 q->drop_overlimit++; 201 q->drop_overlimit++;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ffad48109a22..eac7e0ee23c1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
904 u64 mult; 904 u64 mult;
905 int shift; 905 int shift;
906 906
907 r->rate_bps = rate << 3; 907 r->rate_bps = (u64)rate << 3;
908 r->shift = 0; 908 r->shift = 0;
909 r->mult = 1; 909 r->mult = 1;
910 /* 910 /*
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 43cd0dd9149d..d2709e2b7be6 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1079 transports) { 1079 transports) {
1080 1080
1081 if (transport == active) 1081 if (transport == active)
1082 break; 1082 continue;
1083 list_for_each_entry(chunk, &transport->transmitted, 1083 list_for_each_entry(chunk, &transport->transmitted,
1084 transmitted_list) { 1084 transmitted_list) {
1085 if (key == chunk->subh.data_hdr->tsn) { 1085 if (key == chunk->subh.data_hdr->tsn) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 5131fcfedb03..de1a0138317f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
2082 } 2082 }
2083 2083
2084 /* Delete the tempory new association. */ 2084 /* Delete the tempory new association. */
2085 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2085 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
2086 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2086 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2087 2087
2088 /* Restore association pointer to provide SCTP command interpeter 2088 /* Restore association pointer to provide SCTP command interpeter
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index f7d34e7b6f81..5ead60550895 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd,
447 else { 447 else {
448 int N, i; 448 int N, i;
449 449
450 /*
451 * NOTE: we skip uid_valid()/gid_valid() checks here:
452 * instead, * -1 id's are later mapped to the
453 * (export-specific) anonymous id by nfsd_setuser.
454 *
455 * (But supplementary gid's get no such special
456 * treatment so are checked for validity here.)
457 */
450 /* uid */ 458 /* uid */
451 rsci.cred.cr_uid = make_kuid(&init_user_ns, id); 459 rsci.cred.cr_uid = make_kuid(&init_user_ns, id);
452 if (!uid_valid(rsci.cred.cr_uid))
453 goto out;
454 460
455 /* gid */ 461 /* gid */
456 if (get_int(&mesg, &id)) 462 if (get_int(&mesg, &id))
457 goto out; 463 goto out;
458 rsci.cred.cr_gid = make_kgid(&init_user_ns, id); 464 rsci.cred.cr_gid = make_kgid(&init_user_ns, id);
459 if (!gid_valid(rsci.cred.cr_gid))
460 goto out;
461 465
462 /* number of additional gid's */ 466 /* number of additional gid's */
463 if (get_int(&mesg, &N)) 467 if (get_int(&mesg, &N))
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index dcc446e7fbf6..d5f35f15af98 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
304 err = rpciod_up(); 304 err = rpciod_up();
305 if (err) 305 if (err)
306 goto out_no_rpciod; 306 goto out_no_rpciod;
307 err = -EINVAL;
308 if (!xprt)
309 goto out_no_xprt;
310 307
308 err = -EINVAL;
311 if (args->version >= program->nrvers) 309 if (args->version >= program->nrvers)
312 goto out_err; 310 goto out_err;
313 version = program->version[args->version]; 311 version = program->version[args->version];
@@ -382,10 +380,9 @@ out_no_principal:
382out_no_stats: 380out_no_stats:
383 kfree(clnt); 381 kfree(clnt);
384out_err: 382out_err:
385 xprt_put(xprt);
386out_no_xprt:
387 rpciod_down(); 383 rpciod_down();
388out_no_rpciod: 384out_no_rpciod:
385 xprt_put(xprt);
389 return ERR_PTR(err); 386 return ERR_PTR(err);
390} 387}
391 388
@@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
512 new = rpc_new_client(args, xprt); 509 new = rpc_new_client(args, xprt);
513 if (IS_ERR(new)) { 510 if (IS_ERR(new)) {
514 err = PTR_ERR(new); 511 err = PTR_ERR(new);
515 goto out_put; 512 goto out_err;
516 } 513 }
517 514
518 atomic_inc(&clnt->cl_count); 515 atomic_inc(&clnt->cl_count);
@@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
525 new->cl_chatty = clnt->cl_chatty; 522 new->cl_chatty = clnt->cl_chatty;
526 return new; 523 return new;
527 524
528out_put:
529 xprt_put(xprt);
530out_err: 525out_err:
531 dprintk("RPC: %s: returned error %d\n", __func__, err); 526 dprintk("RPC: %s: returned error %d\n", __func__, err);
532 return ERR_PTR(err); 527 return ERR_PTR(err);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7b9b40224a27..a9129f8d7070 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1174,6 +1174,8 @@ static struct file_system_type rpc_pipe_fs_type = {
1174 .mount = rpc_mount, 1174 .mount = rpc_mount,
1175 .kill_sb = rpc_kill_sb, 1175 .kill_sb = rpc_kill_sb,
1176}; 1176};
1177MODULE_ALIAS_FS("rpc_pipefs");
1178MODULE_ALIAS("rpc_pipefs");
1177 1179
1178static void 1180static void
1179init_once(void *foo) 1181init_once(void *foo)
@@ -1218,6 +1220,3 @@ void unregister_rpc_pipefs(void)
1218 kmem_cache_destroy(rpc_inode_cachep); 1220 kmem_cache_destroy(rpc_inode_cachep);
1219 unregister_filesystem(&rpc_pipe_fs_type); 1221 unregister_filesystem(&rpc_pipe_fs_type);
1220} 1222}
1221
1222/* Make 'mount -t rpc_pipefs ...' autoload this module. */
1223MODULE_ALIAS("rpc_pipefs");
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fb20f25ddec9..f8529fc8e542 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
181 task->tk_waitqueue = queue; 181 task->tk_waitqueue = queue;
182 queue->qlen++; 182 queue->qlen++;
183 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
184 smp_wmb();
183 rpc_set_queued(task); 185 rpc_set_queued(task);
184 186
185 dprintk("RPC: %5u added to queue %p \"%s\"\n", 187 dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
430 */ 432 */
431static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 433static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
432{ 434{
433 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) 435 if (RPC_IS_QUEUED(task)) {
434 __rpc_do_wake_up_task(queue, task); 436 smp_rmb();
437 if (task->tk_waitqueue == queue)
438 __rpc_do_wake_up_task(queue, task);
439 }
435} 440}
436 441
437/* 442/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c1d8476b7692..3d02130828da 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt)
849 xs_tcp_shutdown(xprt); 849 xs_tcp_shutdown(xprt);
850} 850}
851 851
852static void xs_local_destroy(struct rpc_xprt *xprt)
853{
854 xs_close(xprt);
855 xs_free_peer_addresses(xprt);
856 xprt_free(xprt);
857 module_put(THIS_MODULE);
858}
859
852/** 860/**
853 * xs_destroy - prepare to shutdown a transport 861 * xs_destroy - prepare to shutdown a transport
854 * @xprt: doomed transport 862 * @xprt: doomed transport
@@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
862 870
863 cancel_delayed_work_sync(&transport->connect_worker); 871 cancel_delayed_work_sync(&transport->connect_worker);
864 872
865 xs_close(xprt); 873 xs_local_destroy(xprt);
866 xs_free_peer_addresses(xprt);
867 xprt_free(xprt);
868 module_put(THIS_MODULE);
869} 874}
870 875
871static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 876static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
@@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = {
2482 .send_request = xs_local_send_request, 2487 .send_request = xs_local_send_request,
2483 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2488 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2484 .close = xs_close, 2489 .close = xs_close,
2485 .destroy = xs_destroy, 2490 .destroy = xs_local_destroy,
2486 .print_stats = xs_local_print_stats, 2491 .print_stats = xs_local_print_stats,
2487}; 2492};
2488 2493
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a9622b6cd916..515ce38e4f4c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
790 if (addr) { 790 if (addr) {
791 addr->family = AF_TIPC; 791 addr->family = AF_TIPC;
792 addr->addrtype = TIPC_ADDR_ID; 792 addr->addrtype = TIPC_ADDR_ID;
793 memset(&addr->addr, 0, sizeof(addr->addr));
793 addr->addr.id.ref = msg_origport(msg); 794 addr->addr.id.ref = msg_origport(msg);
794 addr->addr.id.node = msg_orignode(msg); 795 addr->addr.id.node = msg_orignode(msg);
795 addr->addr.name.domain = 0; /* could leave uninitialized */ 796 addr->addr.name.domain = 0; /* could leave uninitialized */
@@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
904 goto exit; 905 goto exit;
905 } 906 }
906 907
908 /* will be updated in set_orig_addr() if needed */
909 m->msg_namelen = 0;
910
907 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 911 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
908restart: 912restart:
909 913
@@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1013 goto exit; 1017 goto exit;
1014 } 1018 }
1015 1019
1020 /* will be updated in set_orig_addr() if needed */
1021 m->msg_namelen = 0;
1022
1016 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); 1023 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1017 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1024 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1018 1025
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 51be64f163ec..2db702d82e7d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)
382#endif 382#endif
383} 383}
384 384
385static int unix_release_sock(struct sock *sk, int embrion) 385static void unix_release_sock(struct sock *sk, int embrion)
386{ 386{
387 struct unix_sock *u = unix_sk(sk); 387 struct unix_sock *u = unix_sk(sk);
388 struct path path; 388 struct path path;
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
451 451
452 if (unix_tot_inflight) 452 if (unix_tot_inflight)
453 unix_gc(); /* Garbage collect fds */ 453 unix_gc(); /* Garbage collect fds */
454
455 return 0;
456} 454}
457 455
458static void init_peercred(struct sock *sk) 456static void init_peercred(struct sock *sk)
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)
699 if (!sk) 697 if (!sk)
700 return 0; 698 return 0;
701 699
700 unix_release_sock(sk, 0);
702 sock->sk = NULL; 701 sock->sk = NULL;
703 702
704 return unix_release_sock(sk, 0); 703 return 0;
705} 704}
706 705
707static int unix_autobind(struct socket *sock) 706static int unix_autobind(struct socket *sock)
@@ -1994,7 +1993,7 @@ again:
1994 if ((UNIXCB(skb).pid != siocb->scm->pid) || 1993 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1995 (UNIXCB(skb).cred != siocb->scm->cred)) 1994 (UNIXCB(skb).cred != siocb->scm->cred))
1996 break; 1995 break;
1997 } else { 1996 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
1998 /* Copy credentials */ 1997 /* Copy credentials */
1999 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); 1998 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
2000 check_creds = 1; 1999 check_creds = 1;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ca511c4f388a..7f93e2a42d7a 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
207 struct vsock_sock *vsk; 207 struct vsock_sock *vsk;
208 208
209 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) 209 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
210 if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) 210 if (addr->svm_port == vsk->local_addr.svm_port)
211 return sk_vsock(vsk); 211 return sk_vsock(vsk);
212 212
213 return NULL; 213 return NULL;
@@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
220 220
221 list_for_each_entry(vsk, vsock_connected_sockets(src, dst), 221 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
222 connected_table) { 222 connected_table) {
223 if (vsock_addr_equals_addr(src, &vsk->remote_addr) 223 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
224 && vsock_addr_equals_addr(dst, &vsk->local_addr)) { 224 dst->svm_port == vsk->local_addr.svm_port) {
225 return sk_vsock(vsk); 225 return sk_vsock(vsk);
226 } 226 }
227 } 227 }
@@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
1670 vsk = vsock_sk(sk); 1670 vsk = vsock_sk(sk);
1671 err = 0; 1671 err = 0;
1672 1672
1673 msg->msg_namelen = 0;
1674
1673 lock_sock(sk); 1675 lock_sock(sk);
1674 1676
1675 if (sk->sk_state != SS_CONNECTED) { 1677 if (sk->sk_state != SS_CONNECTED) {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index a70ace83a153..5e04d3d96285 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending(
464 struct vsock_sock *vlistener; 464 struct vsock_sock *vlistener;
465 struct vsock_sock *vpending; 465 struct vsock_sock *vpending;
466 struct sock *pending; 466 struct sock *pending;
467 struct sockaddr_vm src;
468
469 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
467 470
468 vlistener = vsock_sk(listener); 471 vlistener = vsock_sk(listener);
469 472
470 list_for_each_entry(vpending, &vlistener->pending_links, 473 list_for_each_entry(vpending, &vlistener->pending_links,
471 pending_links) { 474 pending_links) {
472 struct sockaddr_vm src;
473 struct sockaddr_vm dst;
474
475 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
476 vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
477
478 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && 475 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
479 vsock_addr_equals_addr(&dst, &vpending->local_addr)) { 476 pkt->dst_port == vpending->local_addr.svm_port) {
480 pending = sk_vsock(vpending); 477 pending = sk_vsock(vpending);
481 sock_hold(pending); 478 sock_hold(pending);
482 goto found; 479 goto found;
@@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
739 */ 736 */
740 bh_lock_sock(sk); 737 bh_lock_sock(sk);
741 738
742 if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) 739 if (!sock_owned_by_user(sk)) {
743 vmci_trans(vsk)->notify_ops->handle_notify_pkt( 740 /* The local context ID may be out of date, update it. */
744 sk, pkt, true, &dst, &src, 741 vsk->local_addr.svm_cid = dst.svm_cid;
745 &bh_process_pkt); 742
743 if (sk->sk_state == SS_CONNECTED)
744 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
745 sk, pkt, true, &dst, &src,
746 &bh_process_pkt);
747 }
746 748
747 bh_unlock_sock(sk); 749 bh_unlock_sock(sk);
748 750
@@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)
902 904
903 lock_sock(sk); 905 lock_sock(sk);
904 906
907 /* The local context ID may be out of date. */
908 vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
909
905 switch (sk->sk_state) { 910 switch (sk->sk_state) {
906 case SS_LISTEN: 911 case SS_LISTEN:
907 vmci_transport_recv_listen(sk, pkt); 912 vmci_transport_recv_listen(sk, pkt);
@@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk,
958 pending = vmci_transport_get_pending(sk, pkt); 963 pending = vmci_transport_get_pending(sk, pkt);
959 if (pending) { 964 if (pending) {
960 lock_sock(pending); 965 lock_sock(pending);
966
967 /* The local context ID may be out of date. */
968 vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
969
961 switch (pending->sk_state) { 970 switch (pending->sk_state) {
962 case SS_CONNECTING: 971 case SS_CONNECTING:
963 err = vmci_transport_recv_connecting_server(sk, 972 err = vmci_transport_recv_connecting_server(sk,
@@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1727 if (flags & MSG_OOB || flags & MSG_ERRQUEUE) 1736 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1728 return -EOPNOTSUPP; 1737 return -EOPNOTSUPP;
1729 1738
1739 msg->msg_namelen = 0;
1740
1730 /* Retrieve the head sk_buff from the socket's receive queue. */ 1741 /* Retrieve the head sk_buff from the socket's receive queue. */
1731 err = 0; 1742 err = 0;
1732 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1743 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
@@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1759 if (err) 1770 if (err)
1760 goto out; 1771 goto out;
1761 1772
1762 msg->msg_namelen = 0;
1763 if (msg->msg_name) { 1773 if (msg->msg_name) {
1764 struct sockaddr_vm *vm_addr; 1774 struct sockaddr_vm *vm_addr;
1765 1775
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
index b7df1aea7c59..ec2611b4ea0e 100644
--- a/net/vmw_vsock/vsock_addr.c
+++ b/net/vmw_vsock/vsock_addr.c
@@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
64} 64}
65EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); 65EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
66 66
67bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
68 const struct sockaddr_vm *other)
69{
70 return (addr->svm_cid == VMADDR_CID_ANY ||
71 other->svm_cid == VMADDR_CID_ANY ||
72 addr->svm_cid == other->svm_cid) &&
73 addr->svm_port == other->svm_port;
74}
75EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any);
76
77int vsock_addr_cast(const struct sockaddr *addr, 67int vsock_addr_cast(const struct sockaddr *addr,
78 size_t len, struct sockaddr_vm **out_addr) 68 size_t len, struct sockaddr_vm **out_addr)
79{ 69{
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h
index cdfbcefdf843..9ccd5316eac0 100644
--- a/net/vmw_vsock/vsock_addr.h
+++ b/net/vmw_vsock/vsock_addr.h
@@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr);
24void vsock_addr_unbind(struct sockaddr_vm *addr); 24void vsock_addr_unbind(struct sockaddr_vm *addr);
25bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, 25bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
26 const struct sockaddr_vm *other); 26 const struct sockaddr_vm *other);
27bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
28 const struct sockaddr_vm *other);
29int vsock_addr_cast(const struct sockaddr *addr, size_t len, 27int vsock_addr_cast(const struct sockaddr *addr, size_t len,
30 struct sockaddr_vm **out_addr); 28 struct sockaddr_vm **out_addr);
31 29
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 35754cc8a9e5..8dafe6d3c6e4 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
334 x->xflags &= ~XFRM_TIME_DEFER; 334 x->xflags &= ~XFRM_TIME_DEFER;
335} 335}
336 336
337static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
338{
339 u32 seq_diff, oseq_diff;
340 struct km_event c;
341 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
342 struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
343
344 /* we send notify messages in case
345 * 1. we updated on of the sequence numbers, and the seqno difference
346 * is at least x->replay_maxdiff, in this case we also update the
347 * timeout of our timer function
348 * 2. if x->replay_maxage has elapsed since last update,
349 * and there were changes
350 *
351 * The state structure must be locked!
352 */
353
354 switch (event) {
355 case XFRM_REPLAY_UPDATE:
356 if (!x->replay_maxdiff)
357 break;
358
359 if (replay_esn->seq_hi == preplay_esn->seq_hi)
360 seq_diff = replay_esn->seq - preplay_esn->seq;
361 else
362 seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
363
364 if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
365 oseq_diff = replay_esn->oseq - preplay_esn->oseq;
366 else
367 oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
368
369 if (seq_diff < x->replay_maxdiff &&
370 oseq_diff < x->replay_maxdiff) {
371
372 if (x->xflags & XFRM_TIME_DEFER)
373 event = XFRM_REPLAY_TIMEOUT;
374 else
375 return;
376 }
377
378 break;
379
380 case XFRM_REPLAY_TIMEOUT:
381 if (memcmp(x->replay_esn, x->preplay_esn,
382 xfrm_replay_state_esn_len(replay_esn)) == 0) {
383 x->xflags |= XFRM_TIME_DEFER;
384 return;
385 }
386
387 break;
388 }
389
390 memcpy(x->preplay_esn, x->replay_esn,
391 xfrm_replay_state_esn_len(replay_esn));
392 c.event = XFRM_MSG_NEWAE;
393 c.data.aevent = event;
394 km_state_notify(x, &c);
395
396 if (x->replay_maxage &&
397 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
398 x->xflags &= ~XFRM_TIME_DEFER;
399}
400
337static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) 401static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
338{ 402{
339 int err = 0; 403 int err = 0;
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {
510 .advance = xfrm_replay_advance_esn, 574 .advance = xfrm_replay_advance_esn,
511 .check = xfrm_replay_check_esn, 575 .check = xfrm_replay_check_esn,
512 .recheck = xfrm_replay_recheck_esn, 576 .recheck = xfrm_replay_recheck_esn,
513 .notify = xfrm_replay_notify_bmp, 577 .notify = xfrm_replay_notify_esn,
514 .overflow = xfrm_replay_overflow_esn, 578 .overflow = xfrm_replay_overflow_esn,
515}; 579};
516 580