aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-11-01 03:10:58 -0400
committerIngo Molnar <mingo@kernel.org>2013-11-01 03:24:41 -0400
commitfb10d5b7efbcc0aa9e46a9aa5ad86772c7bacb9a (patch)
treeea284fe7b9c17a85b8d3c4ba999d6e26d51a12f6 /net
parentf9f9ffc237dd924f048204e8799da74f9ecf40cf (diff)
parent52469b4fcd4fc433ffc78cec4cf94368e9052890 (diff)
Merge branch 'linus' into sched/core
Resolve cherry-picking conflicts: Conflicts: mm/huge_memory.c mm/memory.c mm/mprotect.c See this upstream merge commit for more details: 52469b4fcd4f Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_netlink.c2
-rw-r--r--net/batman-adv/main.c5
-rw-r--r--net/batman-adv/network-coding.c28
-rw-r--r--net/batman-adv/network-coding.h14
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c38
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_vlan.c125
-rw-r--r--net/compat.c2
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ieee802154/6lowpan.c5
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_output.c13
-rw-r--r--net/ipv4/ip_vti.c14
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/inet6_hashtables.c2
-rw-r--r--net/ipv6/ip6_gre.c6
-rw-r--r--net/ipv6/ip6_output.c29
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/route.c46
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/key/af_key.c3
-rw-r--r--net/l2tp/l2tp_core.c36
-rw-r--r--net/l2tp/l2tp_core.h3
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/scan.c19
-rw-r--r--net/mac80211/status.c3
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/mac80211/util.c9
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/sched/sch_fq.c22
-rw-r--r--net/sched/sch_netem.c17
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/socket.c24
-rw-r--r--net/unix/af_unix.c10
-rw-r--r--net/unix/diag.c1
-rw-r--r--net/wireless/core.c23
-rw-r--r--net/wireless/core.h3
-rw-r--r--net/wireless/ibss.c3
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/radiotap.c7
-rw-r--r--net/xfrm/xfrm_policy.c28
-rw-r--r--net/xfrm/xfrm_replay.c54
-rw-r--r--net/xfrm/xfrm_user.c5
61 files changed, 475 insertions, 236 deletions
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 309129732285..c7e634af8516 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
171 171
172 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ 172 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
173 nla_total_size(2) + /* IFLA_VLAN_ID */ 173 nla_total_size(2) + /* IFLA_VLAN_ID */
174 sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ 174 nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
175 vlan_qos_map_size(vlan->nr_ingress_mappings) + 175 vlan_qos_map_size(vlan->nr_ingress_mappings) +
176 vlan_qos_map_size(vlan->nr_egress_mappings); 176 vlan_qos_map_size(vlan->nr_egress_mappings);
177} 177}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c72d1bcdcf49..1356af660b5b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -65,6 +65,7 @@ static int __init batadv_init(void)
65 batadv_recv_handler_init(); 65 batadv_recv_handler_init();
66 66
67 batadv_iv_init(); 67 batadv_iv_init();
68 batadv_nc_init();
68 69
69 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 70 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
70 71
@@ -142,7 +143,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
142 if (ret < 0) 143 if (ret < 0)
143 goto err; 144 goto err;
144 145
145 ret = batadv_nc_init(bat_priv); 146 ret = batadv_nc_mesh_init(bat_priv);
146 if (ret < 0) 147 if (ret < 0)
147 goto err; 148 goto err;
148 149
@@ -167,7 +168,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
167 batadv_vis_quit(bat_priv); 168 batadv_vis_quit(bat_priv);
168 169
169 batadv_gw_node_purge(bat_priv); 170 batadv_gw_node_purge(bat_priv);
170 batadv_nc_free(bat_priv); 171 batadv_nc_mesh_free(bat_priv);
171 batadv_dat_free(bat_priv); 172 batadv_dat_free(bat_priv);
172 batadv_bla_free(bat_priv); 173 batadv_bla_free(bat_priv);
173 174
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index a487d46e0aec..4ecc0b6bf8ab 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -35,6 +35,20 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
35 struct batadv_hard_iface *recv_if); 35 struct batadv_hard_iface *recv_if);
36 36
37/** 37/**
38 * batadv_nc_init - one-time initialization for network coding
39 */
40int __init batadv_nc_init(void)
41{
42 int ret;
43
44 /* Register our packet type */
45 ret = batadv_recv_handler_register(BATADV_CODED,
46 batadv_nc_recv_coded_packet);
47
48 return ret;
49}
50
51/**
38 * batadv_nc_start_timer - initialise the nc periodic worker 52 * batadv_nc_start_timer - initialise the nc periodic worker
39 * @bat_priv: the bat priv with all the soft interface information 53 * @bat_priv: the bat priv with all the soft interface information
40 */ 54 */
@@ -45,10 +59,10 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
45} 59}
46 60
47/** 61/**
48 * batadv_nc_init - initialise coding hash table and start house keeping 62 * batadv_nc_mesh_init - initialise coding hash table and start house keeping
49 * @bat_priv: the bat priv with all the soft interface information 63 * @bat_priv: the bat priv with all the soft interface information
50 */ 64 */
51int batadv_nc_init(struct batadv_priv *bat_priv) 65int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
52{ 66{
53 bat_priv->nc.timestamp_fwd_flush = jiffies; 67 bat_priv->nc.timestamp_fwd_flush = jiffies;
54 bat_priv->nc.timestamp_sniffed_purge = jiffies; 68 bat_priv->nc.timestamp_sniffed_purge = jiffies;
@@ -70,11 +84,6 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
70 batadv_hash_set_lock_class(bat_priv->nc.coding_hash, 84 batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
71 &batadv_nc_decoding_hash_lock_class_key); 85 &batadv_nc_decoding_hash_lock_class_key);
72 86
73 /* Register our packet type */
74 if (batadv_recv_handler_register(BATADV_CODED,
75 batadv_nc_recv_coded_packet) < 0)
76 goto err;
77
78 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); 87 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
79 batadv_nc_start_timer(bat_priv); 88 batadv_nc_start_timer(bat_priv);
80 89
@@ -1721,12 +1730,11 @@ free_nc_packet:
1721} 1730}
1722 1731
1723/** 1732/**
1724 * batadv_nc_free - clean up network coding memory 1733 * batadv_nc_mesh_free - clean up network coding memory
1725 * @bat_priv: the bat priv with all the soft interface information 1734 * @bat_priv: the bat priv with all the soft interface information
1726 */ 1735 */
1727void batadv_nc_free(struct batadv_priv *bat_priv) 1736void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
1728{ 1737{
1729 batadv_recv_handler_unregister(BATADV_CODED);
1730 cancel_delayed_work_sync(&bat_priv->nc.work); 1738 cancel_delayed_work_sync(&bat_priv->nc.work);
1731 1739
1732 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL); 1740 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 85a4ec81ad50..ddfa618e80bf 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,8 +22,9 @@
22 22
23#ifdef CONFIG_BATMAN_ADV_NC 23#ifdef CONFIG_BATMAN_ADV_NC
24 24
25int batadv_nc_init(struct batadv_priv *bat_priv); 25int batadv_nc_init(void);
26void batadv_nc_free(struct batadv_priv *bat_priv); 26int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
27void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
27void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, 28void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
28 struct batadv_orig_node *orig_node, 29 struct batadv_orig_node *orig_node,
29 struct batadv_orig_node *orig_neigh_node, 30 struct batadv_orig_node *orig_neigh_node,
@@ -46,12 +47,17 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
46 47
47#else /* ifdef CONFIG_BATMAN_ADV_NC */ 48#else /* ifdef CONFIG_BATMAN_ADV_NC */
48 49
49static inline int batadv_nc_init(struct batadv_priv *bat_priv) 50static inline int batadv_nc_init(void)
50{ 51{
51 return 0; 52 return 0;
52} 53}
53 54
54static inline void batadv_nc_free(struct batadv_priv *bat_priv) 55static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
56{
57 return 0;
58}
59
60static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
55{ 61{
56 return; 62 return;
57} 63}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index ffd5874f2592..33e8f23acddd 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
700 700
701 vid = nla_get_u16(tb[NDA_VLAN]); 701 vid = nla_get_u16(tb[NDA_VLAN]);
702 702
703 if (vid >= VLAN_N_VID) { 703 if (!vid || vid >= VLAN_VID_MASK) {
704 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", 704 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
705 vid); 705 vid);
706 return -EINVAL; 706 return -EINVAL;
@@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
794 794
795 vid = nla_get_u16(tb[NDA_VLAN]); 795 vid = nla_get_u16(tb[NDA_VLAN]);
796 796
797 if (vid >= VLAN_N_VID) { 797 if (!vid || vid >= VLAN_VID_MASK) {
798 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", 798 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
799 vid); 799 vid);
800 return -EINVAL; 800 return -EINVAL;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 85a09bb5ca51..b7b1914dfa25 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
453 call_rcu_bh(&p->rcu, br_multicast_free_pg); 453 call_rcu_bh(&p->rcu, br_multicast_free_pg);
454 err = 0; 454 err = 0;
455 455
456 if (!mp->ports && !mp->mglist && mp->timer_armed && 456 if (!mp->ports && !mp->mglist &&
457 netif_running(br->dev)) 457 netif_running(br->dev))
458 mod_timer(&mp->timer, jiffies); 458 mod_timer(&mp->timer, jiffies);
459 break; 459 break;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index d1c578630678..8b0b610ca2c9 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
272 del_timer(&p->timer); 272 del_timer(&p->timer);
273 call_rcu_bh(&p->rcu, br_multicast_free_pg); 273 call_rcu_bh(&p->rcu, br_multicast_free_pg);
274 274
275 if (!mp->ports && !mp->mglist && mp->timer_armed && 275 if (!mp->ports && !mp->mglist &&
276 netif_running(br->dev)) 276 netif_running(br->dev))
277 mod_timer(&mp->timer, jiffies); 277 mod_timer(&mp->timer, jiffies);
278 278
@@ -620,7 +620,6 @@ rehash:
620 620
621 mp->br = br; 621 mp->br = br;
622 mp->addr = *group; 622 mp->addr = *group;
623
624 setup_timer(&mp->timer, br_multicast_group_expired, 623 setup_timer(&mp->timer, br_multicast_group_expired,
625 (unsigned long)mp); 624 (unsigned long)mp);
626 625
@@ -660,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br,
660 struct net_bridge_mdb_entry *mp; 659 struct net_bridge_mdb_entry *mp;
661 struct net_bridge_port_group *p; 660 struct net_bridge_port_group *p;
662 struct net_bridge_port_group __rcu **pp; 661 struct net_bridge_port_group __rcu **pp;
662 unsigned long now = jiffies;
663 int err; 663 int err;
664 664
665 spin_lock(&br->multicast_lock); 665 spin_lock(&br->multicast_lock);
@@ -674,6 +674,7 @@ static int br_multicast_add_group(struct net_bridge *br,
674 674
675 if (!port) { 675 if (!port) {
676 mp->mglist = true; 676 mp->mglist = true;
677 mod_timer(&mp->timer, now + br->multicast_membership_interval);
677 goto out; 678 goto out;
678 } 679 }
679 680
@@ -681,7 +682,7 @@ static int br_multicast_add_group(struct net_bridge *br,
681 (p = mlock_dereference(*pp, br)) != NULL; 682 (p = mlock_dereference(*pp, br)) != NULL;
682 pp = &p->next) { 683 pp = &p->next) {
683 if (p->port == port) 684 if (p->port == port)
684 goto out; 685 goto found;
685 if ((unsigned long)p->port < (unsigned long)port) 686 if ((unsigned long)p->port < (unsigned long)port)
686 break; 687 break;
687 } 688 }
@@ -692,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br,
692 rcu_assign_pointer(*pp, p); 693 rcu_assign_pointer(*pp, p);
693 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 694 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
694 695
696found:
697 mod_timer(&p->timer, now + br->multicast_membership_interval);
695out: 698out:
696 err = 0; 699 err = 0;
697 700
@@ -1191,9 +1194,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1191 if (!mp) 1194 if (!mp)
1192 goto out; 1195 goto out;
1193 1196
1194 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1195 mp->timer_armed = true;
1196
1197 max_delay *= br->multicast_last_member_count; 1197 max_delay *= br->multicast_last_member_count;
1198 1198
1199 if (mp->mglist && 1199 if (mp->mglist &&
@@ -1270,9 +1270,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1270 if (!mp) 1270 if (!mp)
1271 goto out; 1271 goto out;
1272 1272
1273 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1274 mp->timer_armed = true;
1275
1276 max_delay *= br->multicast_last_member_count; 1273 max_delay *= br->multicast_last_member_count;
1277 if (mp->mglist && 1274 if (mp->mglist &&
1278 (timer_pending(&mp->timer) ? 1275 (timer_pending(&mp->timer) ?
@@ -1358,7 +1355,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1358 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1355 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1359 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1356 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1360 1357
1361 if (!mp->ports && !mp->mglist && mp->timer_armed && 1358 if (!mp->ports && !mp->mglist &&
1362 netif_running(br->dev)) 1359 netif_running(br->dev))
1363 mod_timer(&mp->timer, jiffies); 1360 mod_timer(&mp->timer, jiffies);
1364 } 1361 }
@@ -1370,12 +1367,30 @@ static void br_multicast_leave_group(struct net_bridge *br,
1370 br->multicast_last_member_interval; 1367 br->multicast_last_member_interval;
1371 1368
1372 if (!port) { 1369 if (!port) {
1373 if (mp->mglist && mp->timer_armed && 1370 if (mp->mglist &&
1374 (timer_pending(&mp->timer) ? 1371 (timer_pending(&mp->timer) ?
1375 time_after(mp->timer.expires, time) : 1372 time_after(mp->timer.expires, time) :
1376 try_to_del_timer_sync(&mp->timer) >= 0)) { 1373 try_to_del_timer_sync(&mp->timer) >= 0)) {
1377 mod_timer(&mp->timer, time); 1374 mod_timer(&mp->timer, time);
1378 } 1375 }
1376
1377 goto out;
1378 }
1379
1380 for (p = mlock_dereference(mp->ports, br);
1381 p != NULL;
1382 p = mlock_dereference(p->next, br)) {
1383 if (p->port != port)
1384 continue;
1385
1386 if (!hlist_unhashed(&p->mglist) &&
1387 (timer_pending(&p->timer) ?
1388 time_after(p->timer.expires, time) :
1389 try_to_del_timer_sync(&p->timer) >= 0)) {
1390 mod_timer(&p->timer, time);
1391 }
1392
1393 break;
1379 } 1394 }
1380out: 1395out:
1381 spin_unlock(&br->multicast_lock); 1396 spin_unlock(&br->multicast_lock);
@@ -1798,7 +1813,6 @@ void br_multicast_stop(struct net_bridge *br)
1798 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1813 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1799 hlist[ver]) { 1814 hlist[ver]) {
1800 del_timer(&mp->timer); 1815 del_timer(&mp->timer);
1801 mp->timer_armed = false;
1802 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1816 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1803 } 1817 }
1804 } 1818 }
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e74ddc1c29a8..f75d92e4f96b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br,
243 243
244 vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); 244 vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
245 245
246 if (vinfo->vid >= VLAN_N_VID) 246 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
247 return -EINVAL; 247 return -EINVAL;
248 248
249 switch (cmd) { 249 switch (cmd) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index efb57d911569..e14c33b42f75 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -126,7 +126,6 @@ struct net_bridge_mdb_entry
126 struct timer_list timer; 126 struct timer_list timer;
127 struct br_ip addr; 127 struct br_ip addr;
128 bool mglist; 128 bool mglist;
129 bool timer_armed;
130}; 129};
131 130
132struct net_bridge_mdb_htable 131struct net_bridge_mdb_htable
@@ -643,9 +642,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
643 * vid wasn't set 642 * vid wasn't set
644 */ 643 */
645 smp_rmb(); 644 smp_rmb();
646 return (v->pvid & VLAN_TAG_PRESENT) ? 645 return v->pvid ?: VLAN_N_VID;
647 (v->pvid & ~VLAN_TAG_PRESENT) :
648 VLAN_N_VID;
649} 646}
650 647
651#else 648#else
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 108084a04671..656a6f3e40de 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
134 134
135 if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) 135 if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
136 __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); 136 __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
137 else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) 137 else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
138 __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); 138 __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
139 139
140 if (r == 0) { 140 if (r == 0) {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 9a9ffe7e4019..53f0990eab58 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
45 return 0; 45 return 0;
46 } 46 }
47 47
48 if (vid) { 48 if (v->port_idx) {
49 if (v->port_idx) { 49 p = v->parent.port;
50 p = v->parent.port; 50 br = p->br;
51 br = p->br; 51 dev = p->dev;
52 dev = p->dev; 52 } else {
53 } else { 53 br = v->parent.br;
54 br = v->parent.br; 54 dev = br->dev;
55 dev = br->dev; 55 }
56 } 56 ops = dev->netdev_ops;
57 ops = dev->netdev_ops; 57
58 58 if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
59 if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { 59 /* Add VLAN to the device filter if it is supported.
60 /* Add VLAN to the device filter if it is supported. 60 * Stricly speaking, this is not necessary now, since
61 * Stricly speaking, this is not necessary now, since 61 * devices are made promiscuous by the bridge, but if
62 * devices are made promiscuous by the bridge, but if 62 * that ever changes this code will allow tagged
63 * that ever changes this code will allow tagged 63 * traffic to enter the bridge.
64 * traffic to enter the bridge. 64 */
65 */ 65 err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
66 err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), 66 vid);
67 vid); 67 if (err)
68 if (err) 68 return err;
69 return err; 69 }
70 }
71
72 err = br_fdb_insert(br, p, dev->dev_addr, vid);
73 if (err) {
74 br_err(br, "failed insert local address into bridge "
75 "forwarding table\n");
76 goto out_filt;
77 }
78 70
71 err = br_fdb_insert(br, p, dev->dev_addr, vid);
72 if (err) {
73 br_err(br, "failed insert local address into bridge "
74 "forwarding table\n");
75 goto out_filt;
79 } 76 }
80 77
81 set_bit(vid, v->vlan_bitmap); 78 set_bit(vid, v->vlan_bitmap);
@@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
98 __vlan_delete_pvid(v, vid); 95 __vlan_delete_pvid(v, vid);
99 clear_bit(vid, v->untagged_bitmap); 96 clear_bit(vid, v->untagged_bitmap);
100 97
101 if (v->port_idx && vid) { 98 if (v->port_idx) {
102 struct net_device *dev = v->parent.port->dev; 99 struct net_device *dev = v->parent.port->dev;
103 const struct net_device_ops *ops = dev->netdev_ops; 100 const struct net_device_ops *ops = dev->netdev_ops;
104 101
@@ -192,6 +189,8 @@ out:
192bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, 189bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
193 struct sk_buff *skb, u16 *vid) 190 struct sk_buff *skb, u16 *vid)
194{ 191{
192 int err;
193
195 /* If VLAN filtering is disabled on the bridge, all packets are 194 /* If VLAN filtering is disabled on the bridge, all packets are
196 * permitted. 195 * permitted.
197 */ 196 */
@@ -204,20 +203,32 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
204 if (!v) 203 if (!v)
205 return false; 204 return false;
206 205
207 if (br_vlan_get_tag(skb, vid)) { 206 err = br_vlan_get_tag(skb, vid);
207 if (!*vid) {
208 u16 pvid = br_get_pvid(v); 208 u16 pvid = br_get_pvid(v);
209 209
210 /* Frame did not have a tag. See if pvid is set 210 /* Frame had a tag with VID 0 or did not have a tag.
211 * on this port. That tells us which vlan untagged 211 * See if pvid is set on this port. That tells us which
212 * traffic belongs to. 212 * vlan untagged or priority-tagged traffic belongs to.
213 */ 213 */
214 if (pvid == VLAN_N_VID) 214 if (pvid == VLAN_N_VID)
215 return false; 215 return false;
216 216
217 /* PVID is set on this port. Any untagged ingress 217 /* PVID is set on this port. Any untagged or priority-tagged
218 * frame is considered to belong to this vlan. 218 * ingress frame is considered to belong to this vlan.
219 */ 219 */
220 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); 220 *vid = pvid;
221 if (likely(err))
222 /* Untagged Frame. */
223 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
224 else
225 /* Priority-tagged Frame.
226 * At this point, We know that skb->vlan_tci had
227 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
228 * We update only VID field and preserve PCP field.
229 */
230 skb->vlan_tci |= pvid;
231
221 return true; 232 return true;
222 } 233 }
223 234
@@ -248,7 +259,9 @@ bool br_allowed_egress(struct net_bridge *br,
248 return false; 259 return false;
249} 260}
250 261
251/* Must be protected by RTNL */ 262/* Must be protected by RTNL.
263 * Must be called with vid in range from 1 to 4094 inclusive.
264 */
252int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) 265int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
253{ 266{
254 struct net_port_vlans *pv = NULL; 267 struct net_port_vlans *pv = NULL;
@@ -278,7 +291,9 @@ out:
278 return err; 291 return err;
279} 292}
280 293
281/* Must be protected by RTNL */ 294/* Must be protected by RTNL.
295 * Must be called with vid in range from 1 to 4094 inclusive.
296 */
282int br_vlan_delete(struct net_bridge *br, u16 vid) 297int br_vlan_delete(struct net_bridge *br, u16 vid)
283{ 298{
284 struct net_port_vlans *pv; 299 struct net_port_vlans *pv;
@@ -289,14 +304,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
289 if (!pv) 304 if (!pv)
290 return -EINVAL; 305 return -EINVAL;
291 306
292 if (vid) { 307 spin_lock_bh(&br->hash_lock);
293 /* If the VID !=0 remove fdb for this vid. VID 0 is special 308 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
294 * in that it's the default and is always there in the fdb. 309 spin_unlock_bh(&br->hash_lock);
295 */
296 spin_lock_bh(&br->hash_lock);
297 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
298 spin_unlock_bh(&br->hash_lock);
299 }
300 310
301 __vlan_del(pv, vid); 311 __vlan_del(pv, vid);
302 return 0; 312 return 0;
@@ -329,7 +339,9 @@ unlock:
329 return 0; 339 return 0;
330} 340}
331 341
332/* Must be protected by RTNL */ 342/* Must be protected by RTNL.
343 * Must be called with vid in range from 1 to 4094 inclusive.
344 */
333int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 345int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
334{ 346{
335 struct net_port_vlans *pv = NULL; 347 struct net_port_vlans *pv = NULL;
@@ -363,7 +375,9 @@ clean_up:
363 return err; 375 return err;
364} 376}
365 377
366/* Must be protected by RTNL */ 378/* Must be protected by RTNL.
379 * Must be called with vid in range from 1 to 4094 inclusive.
380 */
367int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) 381int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
368{ 382{
369 struct net_port_vlans *pv; 383 struct net_port_vlans *pv;
@@ -374,14 +388,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
374 if (!pv) 388 if (!pv)
375 return -EINVAL; 389 return -EINVAL;
376 390
377 if (vid) { 391 spin_lock_bh(&port->br->hash_lock);
378 /* If the VID !=0 remove fdb for this vid. VID 0 is special 392 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
379 * in that it's the default and is always there in the fdb. 393 spin_unlock_bh(&port->br->hash_lock);
380 */
381 spin_lock_bh(&port->br->hash_lock);
382 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
383 spin_unlock_bh(&port->br->hash_lock);
384 }
385 394
386 return __vlan_del(pv, vid); 395 return __vlan_del(pv, vid);
387} 396}
diff --git a/net/compat.c b/net/compat.c
index f0a1ba6c8086..89032580bd1d 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
71 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || 71 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 72 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73 return -EFAULT; 73 return -EFAULT;
74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
75 return -EINVAL;
74 kmsg->msg_name = compat_ptr(tmp1); 76 kmsg->msg_name = compat_ptr(tmp1);
75 kmsg->msg_iov = compat_ptr(tmp2); 77 kmsg->msg_iov = compat_ptr(tmp2);
76 kmsg->msg_control = compat_ptr(tmp3); 78 kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/dev.c b/net/core/dev.c
index 65f829cfd928..3430b1ed12e5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1917,7 +1917,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
1917 return new_map; 1917 return new_map;
1918} 1918}
1919 1919
1920int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) 1920int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1921 u16 index)
1921{ 1922{
1922 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 1923 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1923 struct xps_map *map, *new_map; 1924 struct xps_map *map, *new_map;
diff --git a/net/core/filter.c b/net/core/filter.c
index 6438f29ff266..01b780856db2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
645 645
646 bpf_jit_free(fp); 646 bpf_jit_free(fp);
647 kfree(fp);
648} 647}
649EXPORT_SYMBOL(sk_filter_release_rcu); 648EXPORT_SYMBOL(sk_filter_release_rcu);
650 649
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
683 if (fprog->filter == NULL) 682 if (fprog->filter == NULL)
684 return -EINVAL; 683 return -EINVAL;
685 684
686 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); 685 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
687 if (!fp) 686 if (!fp)
688 return -ENOMEM; 687 return -ENOMEM;
689 memcpy(fp->insns, fprog->filter, fsize); 688 memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
723{ 722{
724 struct sk_filter *fp, *old_fp; 723 struct sk_filter *fp, *old_fp;
725 unsigned int fsize = sizeof(struct sock_filter) * fprog->len; 724 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
725 unsigned int sk_fsize = sk_filter_size(fprog->len);
726 int err; 726 int err;
727 727
728 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 728 if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
732 if (fprog->filter == NULL) 732 if (fprog->filter == NULL)
733 return -EINVAL; 733 return -EINVAL;
734 734
735 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); 735 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
736 if (!fp) 736 if (!fp)
737 return -ENOMEM; 737 return -ENOMEM;
738 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 738 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
739 sock_kfree_s(sk, fp, fsize+sizeof(*fp)); 739 sock_kfree_s(sk, fp, sk_fsize);
740 return -EFAULT; 740 return -EFAULT;
741 } 741 }
742 742
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 3f1ec1586ae1..8d9d05edd2eb 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -10,6 +10,7 @@
10 10
11#include <net/secure_seq.h> 11#include <net/secure_seq.h>
12 12
13#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
13#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) 14#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
14 15
15static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; 16static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
@@ -29,6 +30,7 @@ static void net_secret_init(void)
29 cmpxchg(&net_secret[--i], 0, tmp); 30 cmpxchg(&net_secret[--i], 0, tmp);
30 } 31 }
31} 32}
33#endif
32 34
33#ifdef CONFIG_INET 35#ifdef CONFIG_INET
34static u32 seq_scale(u32 seq) 36static u32 seq_scale(u32 seq)
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6beba494a3..0b39e7ae4383 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2319,6 +2319,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2319 sk->sk_ll_usec = sysctl_net_busy_read; 2319 sk->sk_ll_usec = sysctl_net_busy_read;
2320#endif 2320#endif
2321 2321
2322 sk->sk_pacing_rate = ~0U;
2322 /* 2323 /*
2323 * Before updating sk_refcnt, we must commit prior changes to memory 2324 * Before updating sk_refcnt, we must commit prior changes to memory
2324 * (Documentation/RCU/rculist_nulls.txt for details) 2325 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index c85e71e0c7ff..ff41b4d60d30 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1372 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 1372 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
1373 if (!real_dev) 1373 if (!real_dev)
1374 return -ENODEV; 1374 return -ENODEV;
1375 if (real_dev->type != ARPHRD_IEEE802154)
1376 return -EINVAL;
1375 1377
1376 lowpan_dev_info(dev)->real_dev = real_dev; 1378 lowpan_dev_info(dev)->real_dev = real_dev;
1377 lowpan_dev_info(dev)->fragment_tag = 0; 1379 lowpan_dev_info(dev)->fragment_tag = 0;
@@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1386 1388
1387 entry->ldev = dev; 1389 entry->ldev = dev;
1388 1390
1391 /* Set the lowpan harware address to the wpan hardware address. */
1392 memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
1393
1389 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1394 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1390 INIT_LIST_HEAD(&entry->list); 1395 INIT_LIST_HEAD(&entry->list);
1391 list_add_tail(&entry->list, &lowpan_devices); 1396 list_add_tail(&entry->list, &lowpan_devices);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7bd8983dbfcf..96da9c77deca 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -287,7 +287,7 @@ begintw:
287 if (unlikely(!INET_TW_MATCH(sk, net, acookie, 287 if (unlikely(!INET_TW_MATCH(sk, net, acookie,
288 saddr, daddr, ports, 288 saddr, daddr, ports,
289 dif))) { 289 dif))) {
290 sock_put(sk); 290 inet_twsk_put(inet_twsk(sk));
291 goto begintw; 291 goto begintw;
292 } 292 }
293 goto out; 293 goto out;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a04d872c54f9..3982eabf61e1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
772 /* initialize protocol header pointer */ 772 /* initialize protocol header pointer */
773 skb->transport_header = skb->network_header + fragheaderlen; 773 skb->transport_header = skb->network_header + fragheaderlen;
774 774
775 skb->ip_summed = CHECKSUM_PARTIAL;
776 skb->csum = 0; 775 skb->csum = 0;
777 776
778 /* specify the length of each IP datagram fragment */ 777
779 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
780 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
781 __skb_queue_tail(queue, skb); 778 __skb_queue_tail(queue, skb);
779 } else if (skb_is_gso(skb)) {
780 goto append;
782 } 781 }
783 782
783 skb->ip_summed = CHECKSUM_PARTIAL;
784 /* specify the length of each IP datagram fragment */
785 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
786 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
787
788append:
784 return skb_append_datato_frags(sk, skb, getfrag, from, 789 return skb_append_datato_frags(sk, skb, getfrag, from,
785 (length - transhdrlen)); 790 (length - transhdrlen));
786} 791}
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e805e7b3030e..6e87f853d033 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -125,8 +125,17 @@ static int vti_rcv(struct sk_buff *skb)
125 iph->saddr, iph->daddr, 0); 125 iph->saddr, iph->daddr, 0);
126 if (tunnel != NULL) { 126 if (tunnel != NULL) {
127 struct pcpu_tstats *tstats; 127 struct pcpu_tstats *tstats;
128 u32 oldmark = skb->mark;
129 int ret;
128 130
129 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 131
132 /* temporarily mark the skb with the tunnel o_key, to
133 * only match policies with this mark.
134 */
135 skb->mark = be32_to_cpu(tunnel->parms.o_key);
136 ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
137 skb->mark = oldmark;
138 if (!ret)
130 return -1; 139 return -1;
131 140
132 tstats = this_cpu_ptr(tunnel->dev->tstats); 141 tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -135,7 +144,6 @@ static int vti_rcv(struct sk_buff *skb)
135 tstats->rx_bytes += skb->len; 144 tstats->rx_bytes += skb->len;
136 u64_stats_update_end(&tstats->syncp); 145 u64_stats_update_end(&tstats->syncp);
137 146
138 skb->mark = 0;
139 secpath_reset(skb); 147 secpath_reset(skb);
140 skb->dev = tunnel->dev; 148 skb->dev = tunnel->dev;
141 return 1; 149 return 1;
@@ -167,7 +175,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
167 175
168 memset(&fl4, 0, sizeof(fl4)); 176 memset(&fl4, 0, sizeof(fl4));
169 flowi4_init_output(&fl4, tunnel->parms.link, 177 flowi4_init_output(&fl4, tunnel->parms.link,
170 be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), 178 be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
171 RT_SCOPE_UNIVERSE, 179 RT_SCOPE_UNIVERSE,
172 IPPROTO_IPIP, 0, 180 IPPROTO_IPIP, 0,
173 dst, tiph->saddr, 0, 0); 181 dst, tiph->saddr, 0, 0);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 727f4365bcdf..6011615e810d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2072 RT_SCOPE_LINK); 2072 RT_SCOPE_LINK);
2073 goto make_route; 2073 goto make_route;
2074 } 2074 }
2075 if (fl4->saddr) { 2075 if (!fl4->saddr) {
2076 if (ipv4_is_multicast(fl4->daddr)) 2076 if (ipv4_is_multicast(fl4->daddr))
2077 fl4->saddr = inet_select_addr(dev_out, 0, 2077 fl4->saddr = inet_select_addr(dev_out, 0,
2078 fl4->flowi4_scope); 2078 fl4->flowi4_scope);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 25a89eaa669d..a16b01b537ba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1284,7 +1284,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1284 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1284 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1285 } 1285 }
1286 1286
1287 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; 1287 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1288 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1289 TCP_SKB_CB(prev)->end_seq++;
1290
1288 if (skb == tcp_highest_sack(sk)) 1291 if (skb == tcp_highest_sack(sk))
1289 tcp_advance_highest_sack(sk, skb); 1292 tcp_advance_highest_sack(sk, skb);
1290 1293
@@ -3288,7 +3291,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3288 tcp_init_cwnd_reduction(sk, true); 3291 tcp_init_cwnd_reduction(sk, true);
3289 tcp_set_ca_state(sk, TCP_CA_CWR); 3292 tcp_set_ca_state(sk, TCP_CA_CWR);
3290 tcp_end_cwnd_reduction(sk); 3293 tcp_end_cwnd_reduction(sk);
3291 tcp_set_ca_state(sk, TCP_CA_Open); 3294 tcp_try_keep_open(sk);
3292 NET_INC_STATS_BH(sock_net(sk), 3295 NET_INC_STATS_BH(sock_net(sk),
3293 LINUX_MIB_TCPLOSSPROBERECOVERY); 3296 LINUX_MIB_TCPLOSSPROBERECOVERY);
3294 } 3297 }
@@ -5709,6 +5712,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5709 } else 5712 } else
5710 tcp_init_metrics(sk); 5713 tcp_init_metrics(sk);
5711 5714
5715 tcp_update_pacing_rate(sk);
5716
5712 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 5717 /* Prevent spurious tcp_cwnd_restart() on first data packet */
5713 tp->lsndtime = tcp_time_stamp; 5718 tp->lsndtime = tcp_time_stamp;
5714 5719
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e6bb8256e59f..d46f2143305c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
637 unsigned int size = 0; 637 unsigned int size = 0;
638 unsigned int eff_sacks; 638 unsigned int eff_sacks;
639 639
640 opts->options = 0;
641
640#ifdef CONFIG_TCP_MD5SIG 642#ifdef CONFIG_TCP_MD5SIG
641 *md5 = tp->af_specific->md5_lookup(sk, sk); 643 *md5 = tp->af_specific->md5_lookup(sk, sk);
642 if (unlikely(*md5)) { 644 if (unlikely(*md5)) {
@@ -984,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
984static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 986static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
985 unsigned int mss_now) 987 unsigned int mss_now)
986{ 988{
987 if (skb->len <= mss_now || !sk_can_gso(sk) || 989 /* Make sure we own this skb before messing gso_size/gso_segs */
988 skb->ip_summed == CHECKSUM_NONE) { 990 WARN_ON_ONCE(skb_cloned(skb));
991
992 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
989 /* Avoid the costly divide in the normal 993 /* Avoid the costly divide in the normal
990 * non-TSO case. 994 * non-TSO case.
991 */ 995 */
@@ -1065,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1065 if (nsize < 0) 1069 if (nsize < 0)
1066 nsize = 0; 1070 nsize = 0;
1067 1071
1068 if (skb_cloned(skb) && 1072 if (skb_unclone(skb, GFP_ATOMIC))
1069 skb_is_nonlinear(skb) &&
1070 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1071 return -ENOMEM; 1073 return -ENOMEM;
1072 1074
1073 /* Get a new skb... force flag on. */ 1075 /* Get a new skb... force flag on. */
@@ -2342,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2342 int oldpcount = tcp_skb_pcount(skb); 2344 int oldpcount = tcp_skb_pcount(skb);
2343 2345
2344 if (unlikely(oldpcount > 1)) { 2346 if (unlikely(oldpcount > 1)) {
2347 if (skb_unclone(skb, GFP_ATOMIC))
2348 return -ENOMEM;
2345 tcp_init_tso_segs(sk, skb, cur_mss); 2349 tcp_init_tso_segs(sk, skb, cur_mss);
2346 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2350 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2347 } 2351 }
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 9a459be24af7..ccde54248c8c 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
107 107
108 memset(fl4, 0, sizeof(struct flowi4)); 108 memset(fl4, 0, sizeof(struct flowi4));
109 fl4->flowi4_mark = skb->mark; 109 fl4->flowi4_mark = skb->mark;
110 fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
110 111
111 if (!ip_is_fragment(iph)) { 112 if (!ip_is_fragment(iph)) {
112 switch (iph->protocol) { 113 switch (iph->protocol) {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 73784c3d4642..82e1da3a40b9 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
618 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); 618 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
619 struct xfrm_state *x; 619 struct xfrm_state *x;
620 620
621 if (type != ICMPV6_DEST_UNREACH && 621 if (type != ICMPV6_PKT_TOOBIG &&
622 type != ICMPV6_PKT_TOOBIG &&
623 type != NDISC_REDIRECT) 622 type != NDISC_REDIRECT)
624 return; 623 return;
625 624
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index d3618a78fcac..e67e63f9858d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -436,8 +436,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
436 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 436 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
437 struct xfrm_state *x; 437 struct xfrm_state *x;
438 438
439 if (type != ICMPV6_DEST_UNREACH && 439 if (type != ICMPV6_PKT_TOOBIG &&
440 type != ICMPV6_PKT_TOOBIG &&
441 type != NDISC_REDIRECT) 440 type != NDISC_REDIRECT)
442 return; 441 return;
443 442
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 32b4a1675d82..066640e0ba8e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -116,7 +116,7 @@ begintw:
116 } 116 }
117 if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, 117 if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
118 ports, dif))) { 118 ports, dif))) {
119 sock_put(sk); 119 inet_twsk_put(inet_twsk(sk));
120 goto begintw; 120 goto begintw;
121 } 121 }
122 goto out; 122 goto out;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 7bb5446b9d73..bf4a9a084de5 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
976 if (t->parms.o_flags&GRE_SEQ) 976 if (t->parms.o_flags&GRE_SEQ)
977 addend += 4; 977 addend += 4;
978 } 978 }
979 t->hlen = addend;
979 980
980 if (p->flags & IP6_TNL_F_CAP_XMIT) { 981 if (p->flags & IP6_TNL_F_CAP_XMIT) {
981 int strict = (ipv6_addr_type(&p->raddr) & 982 int strict = (ipv6_addr_type(&p->raddr) &
@@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1002 } 1003 }
1003 ip6_rt_put(rt); 1004 ip6_rt_put(rt);
1004 } 1005 }
1005
1006 t->hlen = addend;
1007} 1006}
1008 1007
1009static int ip6gre_tnl_change(struct ip6_tnl *t, 1008static int ip6gre_tnl_change(struct ip6_tnl *t,
@@ -1173,9 +1172,8 @@ done:
1173 1172
1174static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1173static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1175{ 1174{
1176 struct ip6_tnl *tunnel = netdev_priv(dev);
1177 if (new_mtu < 68 || 1175 if (new_mtu < 68 ||
1178 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) 1176 new_mtu > 0xFFF8 - dev->hard_header_len)
1179 return -EINVAL; 1177 return -EINVAL;
1180 dev->mtu = new_mtu; 1178 dev->mtu = new_mtu;
1181 return 0; 1179 return 0;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a54c45ce4a48..91fb4e8212f5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
105 } 105 }
106 106
107 rcu_read_lock_bh(); 107 rcu_read_lock_bh();
108 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); 108 nexthop = rt6_nexthop((struct rt6_info *)dst);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); 109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh)) 110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); 111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
874 */ 874 */
875 rt = (struct rt6_info *) *dst; 875 rt = (struct rt6_info *) *dst;
876 rcu_read_lock_bh(); 876 rcu_read_lock_bh();
877 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); 877 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
878 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; 878 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
879 rcu_read_unlock_bh(); 879 rcu_read_unlock_bh();
880 880
@@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1008 1008
1009{ 1009{
1010 struct sk_buff *skb; 1010 struct sk_buff *skb;
1011 struct frag_hdr fhdr;
1011 int err; 1012 int err;
1012 1013
1013 /* There is support for UDP large send offload by network 1014 /* There is support for UDP large send offload by network
@@ -1015,8 +1016,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1015 * udp datagram 1016 * udp datagram
1016 */ 1017 */
1017 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { 1018 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1018 struct frag_hdr fhdr;
1019
1020 skb = sock_alloc_send_skb(sk, 1019 skb = sock_alloc_send_skb(sk,
1021 hh_len + fragheaderlen + transhdrlen + 20, 1020 hh_len + fragheaderlen + transhdrlen + 20,
1022 (flags & MSG_DONTWAIT), &err); 1021 (flags & MSG_DONTWAIT), &err);
@@ -1036,20 +1035,24 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1036 skb->transport_header = skb->network_header + fragheaderlen; 1035 skb->transport_header = skb->network_header + fragheaderlen;
1037 1036
1038 skb->protocol = htons(ETH_P_IPV6); 1037 skb->protocol = htons(ETH_P_IPV6);
1039 skb->ip_summed = CHECKSUM_PARTIAL;
1040 skb->csum = 0; 1038 skb->csum = 0;
1041 1039
1042 /* Specify the length of each IPv6 datagram fragment.
1043 * It has to be a multiple of 8.
1044 */
1045 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1046 sizeof(struct frag_hdr)) & ~7;
1047 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1048 ipv6_select_ident(&fhdr, rt);
1049 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1050 __skb_queue_tail(&sk->sk_write_queue, skb); 1040 __skb_queue_tail(&sk->sk_write_queue, skb);
1041 } else if (skb_is_gso(skb)) {
1042 goto append;
1051 } 1043 }
1052 1044
1045 skb->ip_summed = CHECKSUM_PARTIAL;
1046 /* Specify the length of each IPv6 datagram fragment.
1047 * It has to be a multiple of 8.
1048 */
1049 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1050 sizeof(struct frag_hdr)) & ~7;
1051 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1052 ipv6_select_ident(&fhdr, rt);
1053 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1054
1055append:
1053 return skb_append_datato_frags(sk, skb, getfrag, from, 1056 return skb_append_datato_frags(sk, skb, getfrag, from,
1054 (length - transhdrlen)); 1057 (length - transhdrlen));
1055} 1058}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a791552e0422..583b77e2f69b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1430static int 1430static int
1431ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1431ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1432{ 1432{
1433 if (new_mtu < IPV6_MIN_MTU) { 1433 struct ip6_tnl *tnl = netdev_priv(dev);
1434 return -EINVAL; 1434
1435 if (tnl->parms.proto == IPPROTO_IPIP) {
1436 if (new_mtu < 68)
1437 return -EINVAL;
1438 } else {
1439 if (new_mtu < IPV6_MIN_MTU)
1440 return -EINVAL;
1435 } 1441 }
1442 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1443 return -EINVAL;
1436 dev->mtu = new_mtu; 1444 dev->mtu = new_mtu;
1437 return 0; 1445 return 0;
1438} 1446}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 5636a912074a..ce507d9e1c90 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
64 (struct ip_comp_hdr *)(skb->data + offset); 64 (struct ip_comp_hdr *)(skb->data + offset);
65 struct xfrm_state *x; 65 struct xfrm_state *x;
66 66
67 if (type != ICMPV6_DEST_UNREACH && 67 if (type != ICMPV6_PKT_TOOBIG &&
68 type != ICMPV6_PKT_TOOBIG &&
69 type != NDISC_REDIRECT) 68 type != NDISC_REDIRECT)
70 return; 69 return;
71 70
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c979dd96d82a..f54e3a101098 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -476,6 +476,24 @@ out:
476} 476}
477 477
478#ifdef CONFIG_IPV6_ROUTER_PREF 478#ifdef CONFIG_IPV6_ROUTER_PREF
479struct __rt6_probe_work {
480 struct work_struct work;
481 struct in6_addr target;
482 struct net_device *dev;
483};
484
485static void rt6_probe_deferred(struct work_struct *w)
486{
487 struct in6_addr mcaddr;
488 struct __rt6_probe_work *work =
489 container_of(w, struct __rt6_probe_work, work);
490
491 addrconf_addr_solict_mult(&work->target, &mcaddr);
492 ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
493 dev_put(work->dev);
494 kfree(w);
495}
496
479static void rt6_probe(struct rt6_info *rt) 497static void rt6_probe(struct rt6_info *rt)
480{ 498{
481 struct neighbour *neigh; 499 struct neighbour *neigh;
@@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt)
499 517
500 if (!neigh || 518 if (!neigh ||
501 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { 519 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
502 struct in6_addr mcaddr; 520 struct __rt6_probe_work *work;
503 struct in6_addr *target;
504 521
505 if (neigh) { 522 work = kmalloc(sizeof(*work), GFP_ATOMIC);
523
524 if (neigh && work)
506 neigh->updated = jiffies; 525 neigh->updated = jiffies;
526
527 if (neigh)
507 write_unlock(&neigh->lock); 528 write_unlock(&neigh->lock);
508 }
509 529
510 target = (struct in6_addr *)&rt->rt6i_gateway; 530 if (work) {
511 addrconf_addr_solict_mult(target, &mcaddr); 531 INIT_WORK(&work->work, rt6_probe_deferred);
512 ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); 532 work->target = rt->rt6i_gateway;
533 dev_hold(rt->dst.dev);
534 work->dev = rt->dst.dev;
535 schedule_work(&work->work);
536 }
513 } else { 537 } else {
514out: 538out:
515 write_unlock(&neigh->lock); 539 write_unlock(&neigh->lock);
@@ -851,7 +875,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
851 if (ort->rt6i_dst.plen != 128 && 875 if (ort->rt6i_dst.plen != 128 &&
852 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) 876 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
853 rt->rt6i_flags |= RTF_ANYCAST; 877 rt->rt6i_flags |= RTF_ANYCAST;
854 rt->rt6i_gateway = *daddr;
855 } 878 }
856 879
857 rt->rt6i_flags |= RTF_CACHE; 880 rt->rt6i_flags |= RTF_CACHE;
@@ -1338,6 +1361,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1338 rt->dst.flags |= DST_HOST; 1361 rt->dst.flags |= DST_HOST;
1339 rt->dst.output = ip6_output; 1362 rt->dst.output = ip6_output;
1340 atomic_set(&rt->dst.__refcnt, 1); 1363 atomic_set(&rt->dst.__refcnt, 1);
1364 rt->rt6i_gateway = fl6->daddr;
1341 rt->rt6i_dst.addr = fl6->daddr; 1365 rt->rt6i_dst.addr = fl6->daddr;
1342 rt->rt6i_dst.plen = 128; 1366 rt->rt6i_dst.plen = 128;
1343 rt->rt6i_idev = idev; 1367 rt->rt6i_idev = idev;
@@ -1873,7 +1897,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1873 in6_dev_hold(rt->rt6i_idev); 1897 in6_dev_hold(rt->rt6i_idev);
1874 rt->dst.lastuse = jiffies; 1898 rt->dst.lastuse = jiffies;
1875 1899
1876 rt->rt6i_gateway = ort->rt6i_gateway; 1900 if (ort->rt6i_flags & RTF_GATEWAY)
1901 rt->rt6i_gateway = ort->rt6i_gateway;
1902 else
1903 rt->rt6i_gateway = *dest;
1877 rt->rt6i_flags = ort->rt6i_flags; 1904 rt->rt6i_flags = ort->rt6i_flags;
1878 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == 1905 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1879 (RTF_DEFAULT | RTF_ADDRCONF)) 1906 (RTF_DEFAULT | RTF_ADDRCONF))
@@ -2160,6 +2187,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2160 else 2187 else
2161 rt->rt6i_flags |= RTF_LOCAL; 2188 rt->rt6i_flags |= RTF_LOCAL;
2162 2189
2190 rt->rt6i_gateway = *addr;
2163 rt->rt6i_dst.addr = *addr; 2191 rt->rt6i_dst.addr = *addr;
2164 rt->rt6i_dst.plen = 128; 2192 rt->rt6i_dst.plen = 128;
2165 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); 2193 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 72b7eaaf3ca0..18786098fd41 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1225,9 +1225,6 @@ do_udp_sendmsg:
1225 if (tclass < 0) 1225 if (tclass < 0)
1226 tclass = np->tclass; 1226 tclass = np->tclass;
1227 1227
1228 if (dontfrag < 0)
1229 dontfrag = np->dontfrag;
1230
1231 if (msg->msg_flags&MSG_CONFIRM) 1228 if (msg->msg_flags&MSG_CONFIRM)
1232 goto do_confirm; 1229 goto do_confirm;
1233back_from_confirm: 1230back_from_confirm:
@@ -1246,6 +1243,8 @@ back_from_confirm:
1246 up->pending = AF_INET6; 1243 up->pending = AF_INET6;
1247 1244
1248do_append_data: 1245do_append_data:
1246 if (dontfrag < 0)
1247 dontfrag = np->dontfrag;
1249 up->len += ulen; 1248 up->len += ulen;
1250 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1249 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1251 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, 1250 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 23ed03d786c8..08ed2772b7aa 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -138,6 +138,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
138 138
139 memset(fl6, 0, sizeof(struct flowi6)); 139 memset(fl6, 0, sizeof(struct flowi6));
140 fl6->flowi6_mark = skb->mark; 140 fl6->flowi6_mark = skb->mark;
141 fl6->flowi6_oif = skb_dst(skb)->dev->ifindex;
141 142
142 fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 143 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
143 fl6->saddr = reverse ? hdr->daddr : hdr->saddr; 144 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9d585370c5b4..911ef03bf8fb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1098 1098
1099 x->id.proto = proto; 1099 x->id.proto = proto;
1100 x->id.spi = sa->sadb_sa_spi; 1100 x->id.spi = sa->sadb_sa_spi;
1101 x->props.replay_window = sa->sadb_sa_replay; 1101 x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
1102 (sizeof(x->replay.bitmap) * 8));
1102 if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN) 1103 if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
1103 x->props.flags |= XFRM_STATE_NOECN; 1104 x->props.flags |= XFRM_STATE_NOECN;
1104 if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP) 1105 if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index feae495a0a30..b076e8309bc2 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -115,6 +115,11 @@ struct l2tp_net {
115static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 115static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117 117
118static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
119{
120 return sk->sk_user_data;
121}
122
118static inline struct l2tp_net *l2tp_pernet(struct net *net) 123static inline struct l2tp_net *l2tp_pernet(struct net *net)
119{ 124{
120 BUG_ON(!net); 125 BUG_ON(!net);
@@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
504 return 0; 509 return 0;
505 510
506#if IS_ENABLED(CONFIG_IPV6) 511#if IS_ENABLED(CONFIG_IPV6)
507 if (sk->sk_family == PF_INET6) { 512 if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
508 if (!uh->check) { 513 if (!uh->check) {
509 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); 514 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
510 return 1; 515 return 1;
@@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1128 /* Queue the packet to IP for output */ 1133 /* Queue the packet to IP for output */
1129 skb->local_df = 1; 1134 skb->local_df = 1;
1130#if IS_ENABLED(CONFIG_IPV6) 1135#if IS_ENABLED(CONFIG_IPV6)
1131 if (skb->sk->sk_family == PF_INET6) 1136 if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1132 error = inet6_csk_xmit(skb, NULL); 1137 error = inet6_csk_xmit(skb, NULL);
1133 else 1138 else
1134#endif 1139#endif
@@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1255 1260
1256 /* Calculate UDP checksum if configured to do so */ 1261 /* Calculate UDP checksum if configured to do so */
1257#if IS_ENABLED(CONFIG_IPV6) 1262#if IS_ENABLED(CONFIG_IPV6)
1258 if (sk->sk_family == PF_INET6) 1263 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1259 l2tp_xmit_ipv6_csum(sk, skb, udp_len); 1264 l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1260 else 1265 else
1261#endif 1266#endif
@@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1304 */ 1309 */
1305static void l2tp_tunnel_destruct(struct sock *sk) 1310static void l2tp_tunnel_destruct(struct sock *sk)
1306{ 1311{
1307 struct l2tp_tunnel *tunnel; 1312 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1308 struct l2tp_net *pn; 1313 struct l2tp_net *pn;
1309 1314
1310 tunnel = sk->sk_user_data;
1311 if (tunnel == NULL) 1315 if (tunnel == NULL)
1312 goto end; 1316 goto end;
1313 1317
@@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1675 } 1679 }
1676 1680
1677 /* Check if this socket has already been prepped */ 1681 /* Check if this socket has already been prepped */
1678 tunnel = (struct l2tp_tunnel *)sk->sk_user_data; 1682 tunnel = l2tp_tunnel(sk);
1679 if (tunnel != NULL) { 1683 if (tunnel != NULL) {
1680 /* This socket has already been prepped */ 1684 /* This socket has already been prepped */
1681 err = -EBUSY; 1685 err = -EBUSY;
@@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1704 if (cfg != NULL) 1708 if (cfg != NULL)
1705 tunnel->debug = cfg->debug; 1709 tunnel->debug = cfg->debug;
1706 1710
1711#if IS_ENABLED(CONFIG_IPV6)
1712 if (sk->sk_family == PF_INET6) {
1713 struct ipv6_pinfo *np = inet6_sk(sk);
1714
1715 if (ipv6_addr_v4mapped(&np->saddr) &&
1716 ipv6_addr_v4mapped(&np->daddr)) {
1717 struct inet_sock *inet = inet_sk(sk);
1718
1719 tunnel->v4mapped = true;
1720 inet->inet_saddr = np->saddr.s6_addr32[3];
1721 inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
1722 inet->inet_daddr = np->daddr.s6_addr32[3];
1723 } else {
1724 tunnel->v4mapped = false;
1725 }
1726 }
1727#endif
1728
1707 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1729 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1708 tunnel->encap = encap; 1730 tunnel->encap = encap;
1709 if (encap == L2TP_ENCAPTYPE_UDP) { 1731 if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1712 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1734 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1713 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; 1735 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1714#if IS_ENABLED(CONFIG_IPV6) 1736#if IS_ENABLED(CONFIG_IPV6)
1715 if (sk->sk_family == PF_INET6) 1737 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1716 udpv6_encap_enable(); 1738 udpv6_encap_enable();
1717 else 1739 else
1718#endif 1740#endif
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 66a559b104b6..6f251cbc2ed7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -194,6 +194,9 @@ struct l2tp_tunnel {
194 struct sock *sock; /* Parent socket */ 194 struct sock *sock; /* Parent socket */
195 int fd; /* Parent fd, if tunnel socket 195 int fd; /* Parent fd, if tunnel socket
196 * was created by userspace */ 196 * was created by userspace */
197#if IS_ENABLED(CONFIG_IPV6)
198 bool v4mapped;
199#endif
197 200
198 struct work_struct del_work; 201 struct work_struct del_work;
199 202
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 5ebee2ded9e9..8c46b271064a 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
353 goto error_put_sess_tun; 353 goto error_put_sess_tun;
354 } 354 }
355 355
356 local_bh_disable();
356 l2tp_xmit_skb(session, skb, session->hdr_len); 357 l2tp_xmit_skb(session, skb, session->hdr_len);
358 local_bh_enable();
357 359
358 sock_put(ps->tunnel_sock); 360 sock_put(ps->tunnel_sock);
359 sock_put(sk); 361 sock_put(sk);
@@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
422 skb->data[0] = ppph[0]; 424 skb->data[0] = ppph[0];
423 skb->data[1] = ppph[1]; 425 skb->data[1] = ppph[1];
424 426
427 local_bh_disable();
425 l2tp_xmit_skb(session, skb, session->hdr_len); 428 l2tp_xmit_skb(session, skb, session->hdr_len);
429 local_bh_enable();
426 430
427 sock_put(sk_tun); 431 sock_put(sk_tun);
428 sock_put(sk); 432 sock_put(sk);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2e7855a1b10d..629dee7ec9bf 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3518,7 +3518,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
3518 return -EINVAL; 3518 return -EINVAL;
3519 } 3519 }
3520 band = chanctx_conf->def.chan->band; 3520 band = chanctx_conf->def.chan->band;
3521 sta = sta_info_get(sdata, peer); 3521 sta = sta_info_get_bss(sdata, peer);
3522 if (sta) { 3522 if (sta) {
3523 qos = test_sta_flag(sta, WLAN_STA_WME); 3523 qos = test_sta_flag(sta, WLAN_STA_WME);
3524 } else { 3524 } else {
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b6186517ec56..611abfcfb5eb 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -893,6 +893,8 @@ struct tpt_led_trigger {
893 * that the scan completed. 893 * that the scan completed.
894 * @SCAN_ABORTED: Set for our scan work function when the driver reported 894 * @SCAN_ABORTED: Set for our scan work function when the driver reported
895 * a scan complete for an aborted scan. 895 * a scan complete for an aborted scan.
896 * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
897 * cancelled.
896 */ 898 */
897enum { 899enum {
898 SCAN_SW_SCANNING, 900 SCAN_SW_SCANNING,
@@ -900,6 +902,7 @@ enum {
900 SCAN_ONCHANNEL_SCANNING, 902 SCAN_ONCHANNEL_SCANNING,
901 SCAN_COMPLETED, 903 SCAN_COMPLETED,
902 SCAN_ABORTED, 904 SCAN_ABORTED,
905 SCAN_HW_CANCELLED,
903}; 906};
904 907
905/** 908/**
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index acd1f71adc03..0c2a29484c07 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work)
394 394
395 if (started) 395 if (started)
396 ieee80211_start_next_roc(local); 396 ieee80211_start_next_roc(local);
397 else if (list_empty(&local->roc_list))
398 ieee80211_run_deferred_scan(local);
397 } 399 }
398 400
399 out_unlock: 401 out_unlock:
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 54395d7583ba..674eac1f996c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3056,6 +3056,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
3056 case NL80211_IFTYPE_ADHOC: 3056 case NL80211_IFTYPE_ADHOC:
3057 if (!bssid) 3057 if (!bssid)
3058 return 0; 3058 return 0;
3059 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3060 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3061 return 0;
3059 if (ieee80211_is_beacon(hdr->frame_control)) { 3062 if (ieee80211_is_beacon(hdr->frame_control)) {
3060 return 1; 3063 return 1;
3061 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 3064 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 08afe74b98f4..d2d17a449224 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
238 enum ieee80211_band band; 238 enum ieee80211_band band;
239 int i, ielen, n_chans; 239 int i, ielen, n_chans;
240 240
241 if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
242 return false;
243
241 do { 244 do {
242 if (local->hw_scan_band == IEEE80211_NUM_BANDS) 245 if (local->hw_scan_band == IEEE80211_NUM_BANDS)
243 return false; 246 return false;
@@ -940,7 +943,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
940 if (!local->scan_req) 943 if (!local->scan_req)
941 goto out; 944 goto out;
942 945
946 /*
947 * We have a scan running and the driver already reported completion,
948 * but the worker hasn't run yet or is stuck on the mutex - mark it as
949 * cancelled.
950 */
951 if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
952 test_bit(SCAN_COMPLETED, &local->scanning)) {
953 set_bit(SCAN_HW_CANCELLED, &local->scanning);
954 goto out;
955 }
956
943 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { 957 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
958 /*
959 * Make sure that __ieee80211_scan_completed doesn't trigger a
960 * scan on another band.
961 */
962 set_bit(SCAN_HW_CANCELLED, &local->scanning);
944 if (local->ops->cancel_hw_scan) 963 if (local->ops->cancel_hw_scan)
945 drv_cancel_hw_scan(local, 964 drv_cancel_hw_scan(local,
946 rcu_dereference_protected(local->scan_sdata, 965 rcu_dereference_protected(local->scan_sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 368837fe3b80..78dc2e99027e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
180 struct ieee80211_local *local = sta->local; 180 struct ieee80211_local *local = sta->local;
181 struct ieee80211_sub_if_data *sdata = sta->sdata; 181 struct ieee80211_sub_if_data *sdata = sta->sdata;
182 182
183 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
184 sta->last_rx = jiffies;
185
183 if (ieee80211_is_data_qos(mgmt->frame_control)) { 186 if (ieee80211_is_data_qos(mgmt->frame_control)) {
184 struct ieee80211_hdr *hdr = (void *) skb->data; 187 struct ieee80211_hdr *hdr = (void *) skb->data;
185 u8 *qc = ieee80211_get_qos_ctl(hdr); 188 u8 *qc = ieee80211_get_qos_ctl(hdr);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3456c0486b48..70b5a05c0a4e 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1120 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1120 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1121 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1121 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1122 return TX_DROP; 1122 return TX_DROP;
1123 } else if (info->flags & IEEE80211_TX_CTL_INJECTED || 1123 } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
1124 IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
1124 tx->sdata->control_port_protocol == tx->skb->protocol) { 1125 tx->sdata->control_port_protocol == tx->skb->protocol) {
1125 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1126 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1126 } 1127 }
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e1b34a18b243..69e4ef5348a0 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2103,7 +2103,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
2103{ 2103{
2104 struct ieee80211_local *local = sdata->local; 2104 struct ieee80211_local *local = sdata->local;
2105 struct ieee80211_supported_band *sband; 2105 struct ieee80211_supported_band *sband;
2106 int rate, skip, shift; 2106 int rate, shift;
2107 u8 i, exrates, *pos; 2107 u8 i, exrates, *pos;
2108 u32 basic_rates = sdata->vif.bss_conf.basic_rates; 2108 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
2109 u32 rate_flags; 2109 u32 rate_flags;
@@ -2131,14 +2131,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
2131 pos = skb_put(skb, exrates + 2); 2131 pos = skb_put(skb, exrates + 2);
2132 *pos++ = WLAN_EID_EXT_SUPP_RATES; 2132 *pos++ = WLAN_EID_EXT_SUPP_RATES;
2133 *pos++ = exrates; 2133 *pos++ = exrates;
2134 skip = 0;
2135 for (i = 8; i < sband->n_bitrates; i++) { 2134 for (i = 8; i < sband->n_bitrates; i++) {
2136 u8 basic = 0; 2135 u8 basic = 0;
2137 if ((rate_flags & sband->bitrates[i].flags) 2136 if ((rate_flags & sband->bitrates[i].flags)
2138 != rate_flags) 2137 != rate_flags)
2139 continue; 2138 continue;
2140 if (skip++ < 8)
2141 continue;
2142 if (need_basic && basic_rates & BIT(i)) 2139 if (need_basic && basic_rates & BIT(i))
2143 basic = 0x80; 2140 basic = 0x80;
2144 rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 2141 rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
@@ -2241,6 +2238,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
2241 } 2238 }
2242 2239
2243 rate = cfg80211_calculate_bitrate(&ri); 2240 rate = cfg80211_calculate_bitrate(&ri);
2241 if (WARN_ONCE(!rate,
2242 "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
2243 status->flag, status->rate_idx, status->vht_nss))
2244 return 0;
2244 2245
2245 /* rewind from end of MPDU */ 2246 /* rewind from end of MPDU */
2246 if (status->flag & RX_FLAG_MACTIME_END) 2247 if (status->flag & RX_FLAG_MACTIME_END)
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bdebd03bc8cd..70866d192efc 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src,
778 flowi6_to_flowi(&fl1), false)) { 778 flowi6_to_flowi(&fl1), false)) {
779 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, 779 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
780 flowi6_to_flowi(&fl2), false)) { 780 flowi6_to_flowi(&fl2), false)) {
781 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, 781 if (ipv6_addr_equal(rt6_nexthop(rt1),
782 sizeof(rt1->rt6i_gateway)) && 782 rt6_nexthop(rt2)) &&
783 rt1->dst.dev == rt2->dst.dev) 783 rt1->dst.dev == rt2->dst.dev)
784 ret = 1; 784 ret = 1;
785 dst_release(&rt2->dst); 785 dst_release(&rt2->dst);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a2fef8b10b96..a9dfdda9ed1d 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -472,20 +472,16 @@ begin:
472 if (f->credit > 0 || !q->rate_enable) 472 if (f->credit > 0 || !q->rate_enable)
473 goto out; 473 goto out;
474 474
475 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { 475 rate = q->flow_max_rate;
476 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; 476 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
477 rate = min(skb->sk->sk_pacing_rate, rate);
477 478
478 rate = min(rate, q->flow_max_rate); 479 if (rate != ~0U) {
479 } else {
480 rate = q->flow_max_rate;
481 if (rate == ~0U)
482 goto out;
483 }
484 if (rate) {
485 u32 plen = max(qdisc_pkt_len(skb), q->quantum); 480 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
486 u64 len = (u64)plen * NSEC_PER_SEC; 481 u64 len = (u64)plen * NSEC_PER_SEC;
487 482
488 do_div(len, rate); 483 if (likely(rate))
484 do_div(len, rate);
489 /* Since socket rate can change later, 485 /* Since socket rate can change later,
490 * clamp the delay to 125 ms. 486 * clamp the delay to 125 ms.
491 * TODO: maybe segment the too big skb, as in commit 487 * TODO: maybe segment the too big skb, as in commit
@@ -656,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
656 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 652 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
657 653
658 if (tb[TCA_FQ_INITIAL_QUANTUM]) 654 if (tb[TCA_FQ_INITIAL_QUANTUM])
659 q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 655 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
660 656
661 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 657 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
662 q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); 658 q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
@@ -735,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
735 if (opts == NULL) 731 if (opts == NULL)
736 goto nla_put_failure; 732 goto nla_put_failure;
737 733
734 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
735 * do not bother giving its value
736 */
738 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 737 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
739 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 738 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
740 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 739 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
741 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 740 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
742 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 741 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
743 nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
744 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 742 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
745 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 743 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
746 goto nla_put_failure; 744 goto nla_put_failure;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a6d788d45216..b87e83d07478 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
358 return PSCHED_NS2TICKS(ticks); 358 return PSCHED_NS2TICKS(ticks);
359} 359}
360 360
361static void tfifo_reset(struct Qdisc *sch)
362{
363 struct netem_sched_data *q = qdisc_priv(sch);
364 struct rb_node *p;
365
366 while ((p = rb_first(&q->t_root))) {
367 struct sk_buff *skb = netem_rb_to_skb(p);
368
369 rb_erase(p, &q->t_root);
370 skb->next = NULL;
371 skb->prev = NULL;
372 kfree_skb(skb);
373 }
374}
375
361static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 376static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
362{ 377{
363 struct netem_sched_data *q = qdisc_priv(sch); 378 struct netem_sched_data *q = qdisc_priv(sch);
@@ -520,6 +535,7 @@ static unsigned int netem_drop(struct Qdisc *sch)
520 skb->next = NULL; 535 skb->next = NULL;
521 skb->prev = NULL; 536 skb->prev = NULL;
522 len = qdisc_pkt_len(skb); 537 len = qdisc_pkt_len(skb);
538 sch->qstats.backlog -= len;
523 kfree_skb(skb); 539 kfree_skb(skb);
524 } 540 }
525 } 541 }
@@ -609,6 +625,7 @@ static void netem_reset(struct Qdisc *sch)
609 struct netem_sched_data *q = qdisc_priv(sch); 625 struct netem_sched_data *q = qdisc_priv(sch);
610 626
611 qdisc_reset_queue(sch); 627 qdisc_reset_queue(sch);
628 tfifo_reset(sch);
612 if (q->qdisc) 629 if (q->qdisc)
613 qdisc_reset(q->qdisc); 630 qdisc_reset(q->qdisc);
614 qdisc_watchdog_cancel(&q->watchdog); 631 qdisc_watchdog_cancel(&q->watchdog);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 0ac3a65daccb..319137340d15 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -536,7 +536,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
536 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 536 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
537 */ 537 */
538 if (!sctp_checksum_disable) { 538 if (!sctp_checksum_disable) {
539 if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { 539 if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
540 (dst_xfrm(dst) != NULL) || packet->ipfragok) {
540 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 541 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
541 542
542 /* 3) Put the resultant value into the checksum field in the 543 /* 3) Put the resultant value into the checksum field in the
diff --git a/net/socket.c b/net/socket.c
index ebed4b68f768..c226aceee65b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1964,6 +1964,16 @@ struct used_address {
1964 unsigned int name_len; 1964 unsigned int name_len;
1965}; 1965};
1966 1966
1967static int copy_msghdr_from_user(struct msghdr *kmsg,
1968 struct msghdr __user *umsg)
1969{
1970 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
1971 return -EFAULT;
1972 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1973 return -EINVAL;
1974 return 0;
1975}
1976
1967static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1977static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1968 struct msghdr *msg_sys, unsigned int flags, 1978 struct msghdr *msg_sys, unsigned int flags,
1969 struct used_address *used_address) 1979 struct used_address *used_address)
@@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1982 if (MSG_CMSG_COMPAT & flags) { 1992 if (MSG_CMSG_COMPAT & flags) {
1983 if (get_compat_msghdr(msg_sys, msg_compat)) 1993 if (get_compat_msghdr(msg_sys, msg_compat))
1984 return -EFAULT; 1994 return -EFAULT;
1985 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 1995 } else {
1986 return -EFAULT; 1996 err = copy_msghdr_from_user(msg_sys, msg);
1997 if (err)
1998 return err;
1999 }
1987 2000
1988 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2001 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
1989 err = -EMSGSIZE; 2002 err = -EMSGSIZE;
@@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2191 if (MSG_CMSG_COMPAT & flags) { 2204 if (MSG_CMSG_COMPAT & flags) {
2192 if (get_compat_msghdr(msg_sys, msg_compat)) 2205 if (get_compat_msghdr(msg_sys, msg_compat))
2193 return -EFAULT; 2206 return -EFAULT;
2194 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 2207 } else {
2195 return -EFAULT; 2208 err = copy_msghdr_from_user(msg_sys, msg);
2209 if (err)
2210 return err;
2211 }
2196 2212
2197 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2213 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
2198 err = -EMSGSIZE; 2214 err = -EMSGSIZE;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 86de99ad2976..c1f403bed683 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
1246 return 0; 1246 return 0;
1247} 1247}
1248 1248
1249static void unix_sock_inherit_flags(const struct socket *old,
1250 struct socket *new)
1251{
1252 if (test_bit(SOCK_PASSCRED, &old->flags))
1253 set_bit(SOCK_PASSCRED, &new->flags);
1254 if (test_bit(SOCK_PASSSEC, &old->flags))
1255 set_bit(SOCK_PASSSEC, &new->flags);
1256}
1257
1249static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1258static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1250{ 1259{
1251 struct sock *sk = sock->sk; 1260 struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1280 /* attach accepted sock to socket */ 1289 /* attach accepted sock to socket */
1281 unix_state_lock(tsk); 1290 unix_state_lock(tsk);
1282 newsock->state = SS_CONNECTED; 1291 newsock->state = SS_CONNECTED;
1292 unix_sock_inherit_flags(sock, newsock);
1283 sock_graft(tsk, newsock); 1293 sock_graft(tsk, newsock);
1284 unix_state_unlock(tsk); 1294 unix_state_unlock(tsk);
1285 return 0; 1295 return 0;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index d591091603bf..86fa0f3b2caf 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
124 rep->udiag_family = AF_UNIX; 124 rep->udiag_family = AF_UNIX;
125 rep->udiag_type = sk->sk_type; 125 rep->udiag_type = sk->sk_type;
126 rep->udiag_state = sk->sk_state; 126 rep->udiag_state = sk->sk_state;
127 rep->pad = 0;
127 rep->udiag_ino = sk_ino; 128 rep->udiag_ino = sk_ino;
128 sock_diag_save_cookie(sk, rep->udiag_cookie); 129 sock_diag_save_cookie(sk, rep->udiag_cookie);
129 130
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 67153964aad2..aff959e5a1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy)
566 /* check and set up bitrates */ 566 /* check and set up bitrates */
567 ieee80211_set_bitrate_flags(wiphy); 567 ieee80211_set_bitrate_flags(wiphy);
568 568
569 569 rtnl_lock();
570 res = device_add(&rdev->wiphy.dev); 570 res = device_add(&rdev->wiphy.dev);
571 if (res)
572 return res;
573
574 res = rfkill_register(rdev->rfkill);
575 if (res) { 571 if (res) {
576 device_del(&rdev->wiphy.dev); 572 rtnl_unlock();
577 return res; 573 return res;
578 } 574 }
579 575
580 rtnl_lock();
581 /* set up regulatory info */ 576 /* set up regulatory info */
582 wiphy_regulatory_register(wiphy); 577 wiphy_regulatory_register(wiphy);
583 578
@@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy)
606 601
607 rdev->wiphy.registered = true; 602 rdev->wiphy.registered = true;
608 rtnl_unlock(); 603 rtnl_unlock();
604
605 res = rfkill_register(rdev->rfkill);
606 if (res) {
607 rfkill_destroy(rdev->rfkill);
608 rdev->rfkill = NULL;
609 wiphy_unregister(&rdev->wiphy);
610 return res;
611 }
612
609 return 0; 613 return 0;
610} 614}
611EXPORT_SYMBOL(wiphy_register); 615EXPORT_SYMBOL(wiphy_register);
@@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy)
640 rtnl_unlock(); 644 rtnl_unlock();
641 __count == 0; })); 645 __count == 0; }));
642 646
643 rfkill_unregister(rdev->rfkill); 647 if (rdev->rfkill)
648 rfkill_unregister(rdev->rfkill);
644 649
645 rtnl_lock(); 650 rtnl_lock();
646 rdev->wiphy.registered = false; 651 rdev->wiphy.registered = false;
@@ -953,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
953 case NETDEV_PRE_UP: 958 case NETDEV_PRE_UP:
954 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 959 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
955 return notifier_from_errno(-EOPNOTSUPP); 960 return notifier_from_errno(-EOPNOTSUPP);
956 if (rfkill_blocked(rdev->rfkill))
957 return notifier_from_errno(-ERFKILL);
958 ret = cfg80211_can_add_interface(rdev, wdev->iftype); 961 ret = cfg80211_can_add_interface(rdev, wdev->iftype);
959 if (ret) 962 if (ret)
960 return notifier_from_errno(ret); 963 return notifier_from_errno(ret);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 9ad43c619c54..3159e9c284c5 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -411,6 +411,9 @@ static inline int
411cfg80211_can_add_interface(struct cfg80211_registered_device *rdev, 411cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
412 enum nl80211_iftype iftype) 412 enum nl80211_iftype iftype)
413{ 413{
414 if (rfkill_blocked(rdev->rfkill))
415 return -ERFKILL;
416
414 return cfg80211_can_change_interface(rdev, NULL, iftype); 417 return cfg80211_can_change_interface(rdev, NULL, iftype);
415} 418}
416 419
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 39bff7d36768..403fe29c024d 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
263 if (chan->flags & IEEE80211_CHAN_DISABLED) 263 if (chan->flags & IEEE80211_CHAN_DISABLED)
264 continue; 264 continue;
265 wdev->wext.ibss.chandef.chan = chan; 265 wdev->wext.ibss.chandef.chan = chan;
266 wdev->wext.ibss.chandef.center_freq1 =
267 chan->center_freq;
266 break; 268 break;
267 } 269 }
268 270
@@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
347 if (chan) { 349 if (chan) {
348 wdev->wext.ibss.chandef.chan = chan; 350 wdev->wext.ibss.chandef.chan = chan;
349 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; 351 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
352 wdev->wext.ibss.chandef.center_freq1 = freq;
350 wdev->wext.ibss.channel_fixed = true; 353 wdev->wext.ibss.channel_fixed = true;
351 } else { 354 } else {
352 /* cfg80211_ibss_wext_join will pick one if needed */ 355 /* cfg80211_ibss_wext_join will pick one if needed */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index af8d84a4a5b2..626dc3b5fd8d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
2421 change = true; 2421 change = true;
2422 } 2422 }
2423 2423
2424 if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) && 2424 if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
2425 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2425 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
2426 return -EOPNOTSUPP; 2426 return -EOPNOTSUPP;
2427 2427
@@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2483 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 2483 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
2484 &flags); 2484 &flags);
2485 2485
2486 if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && 2486 if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
2487 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2487 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
2488 return -EOPNOTSUPP; 2488 return -EOPNOTSUPP;
2489 2489
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 7d604c06c3dc..a271c27fac77 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init(
97 struct ieee80211_radiotap_header *radiotap_header, 97 struct ieee80211_radiotap_header *radiotap_header,
98 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) 98 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
99{ 99{
100 /* check the radiotap header can actually be present */
101 if (max_length < sizeof(struct ieee80211_radiotap_header))
102 return -EINVAL;
103
100 /* Linux only supports version 0 radiotap format */ 104 /* Linux only supports version 0 radiotap format */
101 if (radiotap_header->it_version) 105 if (radiotap_header->it_version)
102 return -EINVAL; 106 return -EINVAL;
@@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init(
131 */ 135 */
132 136
133 if ((unsigned long)iterator->_arg - 137 if ((unsigned long)iterator->_arg -
134 (unsigned long)iterator->_rtheader > 138 (unsigned long)iterator->_rtheader +
139 sizeof(uint32_t) >
135 (unsigned long)iterator->_max_length) 140 (unsigned long)iterator->_max_length)
136 return -EINVAL; 141 return -EINVAL;
137 } 142 }
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ed38d5d81f9e..76e1873811d4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
334 334
335 atomic_inc(&policy->genid); 335 atomic_inc(&policy->genid);
336 336
337 del_timer(&policy->polq.hold_timer); 337 if (del_timer(&policy->polq.hold_timer))
338 xfrm_pol_put(policy);
338 xfrm_queue_purge(&policy->polq.hold_queue); 339 xfrm_queue_purge(&policy->polq.hold_queue);
339 340
340 if (del_timer(&policy->timer)) 341 if (del_timer(&policy->timer))
@@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
589 590
590 spin_lock_bh(&pq->hold_queue.lock); 591 spin_lock_bh(&pq->hold_queue.lock);
591 skb_queue_splice_init(&pq->hold_queue, &list); 592 skb_queue_splice_init(&pq->hold_queue, &list);
592 del_timer(&pq->hold_timer); 593 if (del_timer(&pq->hold_timer))
594 xfrm_pol_put(old);
593 spin_unlock_bh(&pq->hold_queue.lock); 595 spin_unlock_bh(&pq->hold_queue.lock);
594 596
595 if (skb_queue_empty(&list)) 597 if (skb_queue_empty(&list))
@@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
600 spin_lock_bh(&pq->hold_queue.lock); 602 spin_lock_bh(&pq->hold_queue.lock);
601 skb_queue_splice(&list, &pq->hold_queue); 603 skb_queue_splice(&list, &pq->hold_queue);
602 pq->timeout = XFRM_QUEUE_TMO_MIN; 604 pq->timeout = XFRM_QUEUE_TMO_MIN;
603 mod_timer(&pq->hold_timer, jiffies); 605 if (!mod_timer(&pq->hold_timer, jiffies))
606 xfrm_pol_hold(new);
604 spin_unlock_bh(&pq->hold_queue.lock); 607 spin_unlock_bh(&pq->hold_queue.lock);
605} 608}
606 609
@@ -1769,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg)
1769 1772
1770 spin_lock(&pq->hold_queue.lock); 1773 spin_lock(&pq->hold_queue.lock);
1771 skb = skb_peek(&pq->hold_queue); 1774 skb = skb_peek(&pq->hold_queue);
1775 if (!skb) {
1776 spin_unlock(&pq->hold_queue.lock);
1777 goto out;
1778 }
1772 dst = skb_dst(skb); 1779 dst = skb_dst(skb);
1773 sk = skb->sk; 1780 sk = skb->sk;
1774 xfrm_decode_session(skb, &fl, dst->ops->family); 1781 xfrm_decode_session(skb, &fl, dst->ops->family);
@@ -1787,8 +1794,9 @@ static void xfrm_policy_queue_process(unsigned long arg)
1787 goto purge_queue; 1794 goto purge_queue;
1788 1795
1789 pq->timeout = pq->timeout << 1; 1796 pq->timeout = pq->timeout << 1;
1790 mod_timer(&pq->hold_timer, jiffies + pq->timeout); 1797 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1791 return; 1798 xfrm_pol_hold(pol);
1799 goto out;
1792 } 1800 }
1793 1801
1794 dst_release(dst); 1802 dst_release(dst);
@@ -1819,11 +1827,14 @@ static void xfrm_policy_queue_process(unsigned long arg)
1819 err = dst_output(skb); 1827 err = dst_output(skb);
1820 } 1828 }
1821 1829
1830out:
1831 xfrm_pol_put(pol);
1822 return; 1832 return;
1823 1833
1824purge_queue: 1834purge_queue:
1825 pq->timeout = 0; 1835 pq->timeout = 0;
1826 xfrm_queue_purge(&pq->hold_queue); 1836 xfrm_queue_purge(&pq->hold_queue);
1837 xfrm_pol_put(pol);
1827} 1838}
1828 1839
1829static int xdst_queue_output(struct sk_buff *skb) 1840static int xdst_queue_output(struct sk_buff *skb)
@@ -1831,7 +1842,8 @@ static int xdst_queue_output(struct sk_buff *skb)
1831 unsigned long sched_next; 1842 unsigned long sched_next;
1832 struct dst_entry *dst = skb_dst(skb); 1843 struct dst_entry *dst = skb_dst(skb);
1833 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 1844 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1834 struct xfrm_policy_queue *pq = &xdst->pols[0]->polq; 1845 struct xfrm_policy *pol = xdst->pols[0];
1846 struct xfrm_policy_queue *pq = &pol->polq;
1835 1847
1836 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 1848 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1837 kfree_skb(skb); 1849 kfree_skb(skb);
@@ -1850,10 +1862,12 @@ static int xdst_queue_output(struct sk_buff *skb)
1850 if (del_timer(&pq->hold_timer)) { 1862 if (del_timer(&pq->hold_timer)) {
1851 if (time_before(pq->hold_timer.expires, sched_next)) 1863 if (time_before(pq->hold_timer.expires, sched_next))
1852 sched_next = pq->hold_timer.expires; 1864 sched_next = pq->hold_timer.expires;
1865 xfrm_pol_put(pol);
1853 } 1866 }
1854 1867
1855 __skb_queue_tail(&pq->hold_queue, skb); 1868 __skb_queue_tail(&pq->hold_queue, skb);
1856 mod_timer(&pq->hold_timer, sched_next); 1869 if (!mod_timer(&pq->hold_timer, sched_next))
1870 xfrm_pol_hold(pol);
1857 1871
1858 spin_unlock_bh(&pq->hold_queue.lock); 1872 spin_unlock_bh(&pq->hold_queue.lock);
1859 1873
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 8dafe6d3c6e4..dab57daae408 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
61 61
62 switch (event) { 62 switch (event) {
63 case XFRM_REPLAY_UPDATE: 63 case XFRM_REPLAY_UPDATE:
64 if (x->replay_maxdiff && 64 if (!x->replay_maxdiff ||
65 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && 65 ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
66 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { 66 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) {
67 if (x->xflags & XFRM_TIME_DEFER) 67 if (x->xflags & XFRM_TIME_DEFER)
68 event = XFRM_REPLAY_TIMEOUT; 68 event = XFRM_REPLAY_TIMEOUT;
69 else 69 else
@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
129 return 0; 129 return 0;
130 130
131 diff = x->replay.seq - seq; 131 diff = x->replay.seq - seq;
132 if (diff >= min_t(unsigned int, x->props.replay_window, 132 if (diff >= x->props.replay_window) {
133 sizeof(x->replay.bitmap) * 8)) {
134 x->stats.replay_window++; 133 x->stats.replay_window++;
135 goto err; 134 goto err;
136 } 135 }
@@ -302,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
302 301
303 switch (event) { 302 switch (event) {
304 case XFRM_REPLAY_UPDATE: 303 case XFRM_REPLAY_UPDATE:
305 if (x->replay_maxdiff && 304 if (!x->replay_maxdiff ||
306 (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && 305 ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
307 (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { 306 (replay_esn->oseq - preplay_esn->oseq
307 < x->replay_maxdiff))) {
308 if (x->xflags & XFRM_TIME_DEFER) 308 if (x->xflags & XFRM_TIME_DEFER)
309 event = XFRM_REPLAY_TIMEOUT; 309 event = XFRM_REPLAY_TIMEOUT;
310 else 310 else
@@ -353,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
353 353
354 switch (event) { 354 switch (event) {
355 case XFRM_REPLAY_UPDATE: 355 case XFRM_REPLAY_UPDATE:
356 if (!x->replay_maxdiff) 356 if (x->replay_maxdiff) {
357 break; 357 if (replay_esn->seq_hi == preplay_esn->seq_hi)
358 358 seq_diff = replay_esn->seq - preplay_esn->seq;
359 if (replay_esn->seq_hi == preplay_esn->seq_hi) 359 else
360 seq_diff = replay_esn->seq - preplay_esn->seq; 360 seq_diff = ~preplay_esn->seq + replay_esn->seq
361 else 361 + 1;
362 seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
363
364 if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
365 oseq_diff = replay_esn->oseq - preplay_esn->oseq;
366 else
367 oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
368
369 if (seq_diff < x->replay_maxdiff &&
370 oseq_diff < x->replay_maxdiff) {
371 362
372 if (x->xflags & XFRM_TIME_DEFER) 363 if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
373 event = XFRM_REPLAY_TIMEOUT; 364 oseq_diff = replay_esn->oseq
365 - preplay_esn->oseq;
374 else 366 else
375 return; 367 oseq_diff = ~preplay_esn->oseq
368 + replay_esn->oseq + 1;
369
370 if (seq_diff >= x->replay_maxdiff ||
371 oseq_diff >= x->replay_maxdiff)
372 break;
376 } 373 }
377 374
375 if (x->xflags & XFRM_TIME_DEFER)
376 event = XFRM_REPLAY_TIMEOUT;
377 else
378 return;
379
378 break; 380 break;
379 381
380 case XFRM_REPLAY_TIMEOUT: 382 case XFRM_REPLAY_TIMEOUT:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3f565e495ac6..f964d4c00ffb 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
446 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 446 memcpy(&x->sel, &p->sel, sizeof(x->sel));
447 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 447 memcpy(&x->lft, &p->lft, sizeof(x->lft));
448 x->props.mode = p->mode; 448 x->props.mode = p->mode;
449 x->props.replay_window = p->replay_window; 449 x->props.replay_window = min_t(unsigned int, p->replay_window,
450 sizeof(x->replay.bitmap) * 8);
450 x->props.reqid = p->reqid; 451 x->props.reqid = p->reqid;
451 x->props.family = p->family; 452 x->props.family = p->family;
452 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 453 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
@@ -1856,7 +1857,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1856 if (x->km.state != XFRM_STATE_VALID) 1857 if (x->km.state != XFRM_STATE_VALID)
1857 goto out; 1858 goto out;
1858 1859
1859 err = xfrm_replay_verify_len(x->replay_esn, rp); 1860 err = xfrm_replay_verify_len(x->replay_esn, re);
1860 if (err) 1861 if (err)
1861 goto out; 1862 goto out;
1862 1863