aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-09-05 14:58:52 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-05 14:58:52 -0400
commit06c54055bebf919249aa1eb68312887c3cfe77b4 (patch)
tree223a49c09e5d26516ed0161b8a52d08454ae028e /net
parent1a5bbfc3d6b700178b75743a2ba1fd2e58a8f36f (diff)
parente2e5c4c07caf810d7849658dca42f598b3938e21 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c net/bridge/br_multicast.c net/ipv6/sit.c The conflicts were minor: 1) sit.c changes overlap with change to ip_tunnel_xmit() signature. 2) br_multicast.c had an overlap between computing max_delay using msecs_to_jiffies and turning MLDV2_MRC() into an inline function with a name using lowercase instead of uppercase letters. 3) stmmac had two overlapping changes, one which conditionally allocated and hooked up a dma_cfg based upon the presence of the pbl OF property, and another one handling store-and-forward DMA made. The latter of which should not go into the new of_find_property() basic block. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c14
-rw-r--r--net/bridge/br_multicast.c258
-rw-r--r--net/bridge/br_private.h57
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/core/flow_dissector.c11
-rw-r--r--net/core/scm.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipip.c5
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/xfrm4_output.c16
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/addrlabel.c48
-rw-r--r--net/ipv6/icmp.c10
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/ndisc.c14
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/xfrm6_output.c21
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/netlink/genetlink.c67
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/xfrm/xfrm_output.c21
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_state.c7
34 files changed, 439 insertions, 200 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 0feaaa0d37d1..ca04163635da 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
71 71
72 mdst = br_mdb_get(br, skb, vid); 72 mdst = br_mdb_get(br, skb, vid);
73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
74 br_multicast_querier_exists(br)) 74 br_multicast_querier_exists(br, eth_hdr(skb)))
75 br_multicast_deliver(mdst, skb); 75 br_multicast_deliver(mdst, skb);
76 else 76 else
77 br_flood_deliver(br, skb, false); 77 br_flood_deliver(br, skb, false);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8c561c0aa636..a2fd37ec35f7 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
102 } else if (is_multicast_ether_addr(dest)) { 102 } else if (is_multicast_ether_addr(dest)) {
103 mdst = br_mdb_get(br, skb, vid); 103 mdst = br_mdb_get(br, skb, vid);
104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
105 br_multicast_querier_exists(br)) { 105 br_multicast_querier_exists(br, eth_hdr(skb))) {
106 if ((mdst && mdst->mglist) || 106 if ((mdst && mdst->mglist) ||
107 br_multicast_is_router(br)) 107 br_multicast_is_router(br))
108 skb2 = skb; 108 skb2 = skb;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index de818d95c476..85a09bb5ca51 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -416,16 +416,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
416 if (!netif_running(br->dev) || br->multicast_disabled) 416 if (!netif_running(br->dev) || br->multicast_disabled)
417 return -EINVAL; 417 return -EINVAL;
418 418
419 if (timer_pending(&br->multicast_querier_timer))
420 return -EBUSY;
421
422 ip.proto = entry->addr.proto; 419 ip.proto = entry->addr.proto;
423 if (ip.proto == htons(ETH_P_IP)) 420 if (ip.proto == htons(ETH_P_IP)) {
421 if (timer_pending(&br->ip4_querier.timer))
422 return -EBUSY;
423
424 ip.u.ip4 = entry->addr.u.ip4; 424 ip.u.ip4 = entry->addr.u.ip4;
425#if IS_ENABLED(CONFIG_IPV6) 425#if IS_ENABLED(CONFIG_IPV6)
426 else 426 } else {
427 if (timer_pending(&br->ip6_querier.timer))
428 return -EBUSY;
429
427 ip.u.ip6 = entry->addr.u.ip6; 430 ip.u.ip6 = entry->addr.u.ip6;
428#endif 431#endif
432 }
429 433
430 spin_lock_bh(&br->multicast_lock); 434 spin_lock_bh(&br->multicast_lock);
431 mdb = mlock_dereference(br->mdb, br); 435 mdb = mlock_dereference(br->mdb, br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 23531471f16a..d1c578630678 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -34,7 +34,8 @@
34 34
35#include "br_private.h" 35#include "br_private.h"
36 36
37static void br_multicast_start_querier(struct net_bridge *br); 37static void br_multicast_start_querier(struct net_bridge *br,
38 struct bridge_mcast_query *query);
38unsigned int br_mdb_rehash_seq; 39unsigned int br_mdb_rehash_seq;
39 40
40static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 41static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -756,20 +757,35 @@ static void br_multicast_local_router_expired(unsigned long data)
756{ 757{
757} 758}
758 759
759static void br_multicast_querier_expired(unsigned long data) 760static void br_multicast_querier_expired(struct net_bridge *br,
761 struct bridge_mcast_query *query)
760{ 762{
761 struct net_bridge *br = (void *)data;
762
763 spin_lock(&br->multicast_lock); 763 spin_lock(&br->multicast_lock);
764 if (!netif_running(br->dev) || br->multicast_disabled) 764 if (!netif_running(br->dev) || br->multicast_disabled)
765 goto out; 765 goto out;
766 766
767 br_multicast_start_querier(br); 767 br_multicast_start_querier(br, query);
768 768
769out: 769out:
770 spin_unlock(&br->multicast_lock); 770 spin_unlock(&br->multicast_lock);
771} 771}
772 772
773static void br_ip4_multicast_querier_expired(unsigned long data)
774{
775 struct net_bridge *br = (void *)data;
776
777 br_multicast_querier_expired(br, &br->ip4_query);
778}
779
780#if IS_ENABLED(CONFIG_IPV6)
781static void br_ip6_multicast_querier_expired(unsigned long data)
782{
783 struct net_bridge *br = (void *)data;
784
785 br_multicast_querier_expired(br, &br->ip6_query);
786}
787#endif
788
773static void __br_multicast_send_query(struct net_bridge *br, 789static void __br_multicast_send_query(struct net_bridge *br,
774 struct net_bridge_port *port, 790 struct net_bridge_port *port,
775 struct br_ip *ip) 791 struct br_ip *ip)
@@ -790,37 +806,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
790} 806}
791 807
792static void br_multicast_send_query(struct net_bridge *br, 808static void br_multicast_send_query(struct net_bridge *br,
793 struct net_bridge_port *port, u32 sent) 809 struct net_bridge_port *port,
810 struct bridge_mcast_query *query)
794{ 811{
795 unsigned long time; 812 unsigned long time;
796 struct br_ip br_group; 813 struct br_ip br_group;
814 struct bridge_mcast_querier *querier = NULL;
797 815
798 if (!netif_running(br->dev) || br->multicast_disabled || 816 if (!netif_running(br->dev) || br->multicast_disabled ||
799 !br->multicast_querier || 817 !br->multicast_querier)
800 timer_pending(&br->multicast_querier_timer))
801 return; 818 return;
802 819
803 memset(&br_group.u, 0, sizeof(br_group.u)); 820 memset(&br_group.u, 0, sizeof(br_group.u));
804 821
805 br_group.proto = htons(ETH_P_IP); 822 if (port ? (query == &port->ip4_query) :
806 __br_multicast_send_query(br, port, &br_group); 823 (query == &br->ip4_query)) {
807 824 querier = &br->ip4_querier;
825 br_group.proto = htons(ETH_P_IP);
808#if IS_ENABLED(CONFIG_IPV6) 826#if IS_ENABLED(CONFIG_IPV6)
809 br_group.proto = htons(ETH_P_IPV6); 827 } else {
810 __br_multicast_send_query(br, port, &br_group); 828 querier = &br->ip6_querier;
829 br_group.proto = htons(ETH_P_IPV6);
811#endif 830#endif
831 }
832
833 if (!querier || timer_pending(&querier->timer))
834 return;
835
836 __br_multicast_send_query(br, port, &br_group);
812 837
813 time = jiffies; 838 time = jiffies;
814 time += sent < br->multicast_startup_query_count ? 839 time += query->startup_sent < br->multicast_startup_query_count ?
815 br->multicast_startup_query_interval : 840 br->multicast_startup_query_interval :
816 br->multicast_query_interval; 841 br->multicast_query_interval;
817 mod_timer(port ? &port->multicast_query_timer : 842 mod_timer(&query->timer, time);
818 &br->multicast_query_timer, time);
819} 843}
820 844
821static void br_multicast_port_query_expired(unsigned long data) 845static void br_multicast_port_query_expired(struct net_bridge_port *port,
846 struct bridge_mcast_query *query)
822{ 847{
823 struct net_bridge_port *port = (void *)data;
824 struct net_bridge *br = port->br; 848 struct net_bridge *br = port->br;
825 849
826 spin_lock(&br->multicast_lock); 850 spin_lock(&br->multicast_lock);
@@ -828,25 +852,43 @@ static void br_multicast_port_query_expired(unsigned long data)
828 port->state == BR_STATE_BLOCKING) 852 port->state == BR_STATE_BLOCKING)
829 goto out; 853 goto out;
830 854
831 if (port->multicast_startup_queries_sent < 855 if (query->startup_sent < br->multicast_startup_query_count)
832 br->multicast_startup_query_count) 856 query->startup_sent++;
833 port->multicast_startup_queries_sent++;
834 857
835 br_multicast_send_query(port->br, port, 858 br_multicast_send_query(port->br, port, query);
836 port->multicast_startup_queries_sent);
837 859
838out: 860out:
839 spin_unlock(&br->multicast_lock); 861 spin_unlock(&br->multicast_lock);
840} 862}
841 863
864static void br_ip4_multicast_port_query_expired(unsigned long data)
865{
866 struct net_bridge_port *port = (void *)data;
867
868 br_multicast_port_query_expired(port, &port->ip4_query);
869}
870
871#if IS_ENABLED(CONFIG_IPV6)
872static void br_ip6_multicast_port_query_expired(unsigned long data)
873{
874 struct net_bridge_port *port = (void *)data;
875
876 br_multicast_port_query_expired(port, &port->ip6_query);
877}
878#endif
879
842void br_multicast_add_port(struct net_bridge_port *port) 880void br_multicast_add_port(struct net_bridge_port *port)
843{ 881{
844 port->multicast_router = 1; 882 port->multicast_router = 1;
845 883
846 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 884 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
847 (unsigned long)port); 885 (unsigned long)port);
848 setup_timer(&port->multicast_query_timer, 886 setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
849 br_multicast_port_query_expired, (unsigned long)port); 887 (unsigned long)port);
888#if IS_ENABLED(CONFIG_IPV6)
889 setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
890 (unsigned long)port);
891#endif
850} 892}
851 893
852void br_multicast_del_port(struct net_bridge_port *port) 894void br_multicast_del_port(struct net_bridge_port *port)
@@ -854,13 +896,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
854 del_timer_sync(&port->multicast_router_timer); 896 del_timer_sync(&port->multicast_router_timer);
855} 897}
856 898
857static void __br_multicast_enable_port(struct net_bridge_port *port) 899static void br_multicast_enable(struct bridge_mcast_query *query)
858{ 900{
859 port->multicast_startup_queries_sent = 0; 901 query->startup_sent = 0;
860 902
861 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 903 if (try_to_del_timer_sync(&query->timer) >= 0 ||
862 del_timer(&port->multicast_query_timer)) 904 del_timer(&query->timer))
863 mod_timer(&port->multicast_query_timer, jiffies); 905 mod_timer(&query->timer, jiffies);
864} 906}
865 907
866void br_multicast_enable_port(struct net_bridge_port *port) 908void br_multicast_enable_port(struct net_bridge_port *port)
@@ -871,7 +913,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
871 if (br->multicast_disabled || !netif_running(br->dev)) 913 if (br->multicast_disabled || !netif_running(br->dev))
872 goto out; 914 goto out;
873 915
874 __br_multicast_enable_port(port); 916 br_multicast_enable(&port->ip4_query);
917#if IS_ENABLED(CONFIG_IPV6)
918 br_multicast_enable(&port->ip6_query);
919#endif
875 920
876out: 921out:
877 spin_unlock(&br->multicast_lock); 922 spin_unlock(&br->multicast_lock);
@@ -890,7 +935,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
890 if (!hlist_unhashed(&port->rlist)) 935 if (!hlist_unhashed(&port->rlist))
891 hlist_del_init_rcu(&port->rlist); 936 hlist_del_init_rcu(&port->rlist);
892 del_timer(&port->multicast_router_timer); 937 del_timer(&port->multicast_router_timer);
893 del_timer(&port->multicast_query_timer); 938 del_timer(&port->ip4_query.timer);
939#if IS_ENABLED(CONFIG_IPV6)
940 del_timer(&port->ip6_query.timer);
941#endif
894 spin_unlock(&br->multicast_lock); 942 spin_unlock(&br->multicast_lock);
895} 943}
896 944
@@ -1015,14 +1063,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1015} 1063}
1016#endif 1064#endif
1017 1065
1018static void br_multicast_update_querier_timer(struct net_bridge *br, 1066static void
1019 unsigned long max_delay) 1067br_multicast_update_querier_timer(struct net_bridge *br,
1068 struct bridge_mcast_querier *querier,
1069 unsigned long max_delay)
1020{ 1070{
1021 if (!timer_pending(&br->multicast_querier_timer)) 1071 if (!timer_pending(&querier->timer))
1022 br->multicast_querier_delay_time = jiffies + max_delay; 1072 querier->delay_time = jiffies + max_delay;
1023 1073
1024 mod_timer(&br->multicast_querier_timer, 1074 mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
1025 jiffies + br->multicast_querier_interval);
1026} 1075}
1027 1076
1028/* 1077/*
@@ -1075,12 +1124,13 @@ timer:
1075 1124
1076static void br_multicast_query_received(struct net_bridge *br, 1125static void br_multicast_query_received(struct net_bridge *br,
1077 struct net_bridge_port *port, 1126 struct net_bridge_port *port,
1127 struct bridge_mcast_querier *querier,
1078 int saddr, 1128 int saddr,
1079 unsigned long max_delay) 1129 unsigned long max_delay)
1080{ 1130{
1081 if (saddr) 1131 if (saddr)
1082 br_multicast_update_querier_timer(br, max_delay); 1132 br_multicast_update_querier_timer(br, querier, max_delay);
1083 else if (timer_pending(&br->multicast_querier_timer)) 1133 else if (timer_pending(&querier->timer))
1084 return; 1134 return;
1085 1135
1086 br_multicast_mark_router(br, port); 1136 br_multicast_mark_router(br, port);
@@ -1130,7 +1180,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1130 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1180 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1131 } 1181 }
1132 1182
1133 br_multicast_query_received(br, port, !!iph->saddr, max_delay); 1183 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
1184 max_delay);
1134 1185
1135 if (!group) 1186 if (!group)
1136 goto out; 1187 goto out;
@@ -1208,8 +1259,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1208 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1259 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1209 } 1260 }
1210 1261
1211 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), 1262 br_multicast_query_received(br, port, &br->ip6_querier,
1212 max_delay); 1263 !ipv6_addr_any(&ip6h->saddr), max_delay);
1213 1264
1214 if (!group) 1265 if (!group)
1215 goto out; 1266 goto out;
@@ -1246,7 +1297,9 @@ out:
1246 1297
1247static void br_multicast_leave_group(struct net_bridge *br, 1298static void br_multicast_leave_group(struct net_bridge *br,
1248 struct net_bridge_port *port, 1299 struct net_bridge_port *port,
1249 struct br_ip *group) 1300 struct br_ip *group,
1301 struct bridge_mcast_querier *querier,
1302 struct bridge_mcast_query *query)
1250{ 1303{
1251 struct net_bridge_mdb_htable *mdb; 1304 struct net_bridge_mdb_htable *mdb;
1252 struct net_bridge_mdb_entry *mp; 1305 struct net_bridge_mdb_entry *mp;
@@ -1257,7 +1310,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1257 spin_lock(&br->multicast_lock); 1310 spin_lock(&br->multicast_lock);
1258 if (!netif_running(br->dev) || 1311 if (!netif_running(br->dev) ||
1259 (port && port->state == BR_STATE_DISABLED) || 1312 (port && port->state == BR_STATE_DISABLED) ||
1260 timer_pending(&br->multicast_querier_timer)) 1313 timer_pending(&querier->timer))
1261 goto out; 1314 goto out;
1262 1315
1263 mdb = mlock_dereference(br->mdb, br); 1316 mdb = mlock_dereference(br->mdb, br);
@@ -1265,14 +1318,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
1265 if (!mp) 1318 if (!mp)
1266 goto out; 1319 goto out;
1267 1320
1268 if (br->multicast_querier && 1321 if (br->multicast_querier) {
1269 !timer_pending(&br->multicast_querier_timer)) {
1270 __br_multicast_send_query(br, port, &mp->addr); 1322 __br_multicast_send_query(br, port, &mp->addr);
1271 1323
1272 time = jiffies + br->multicast_last_member_count * 1324 time = jiffies + br->multicast_last_member_count *
1273 br->multicast_last_member_interval; 1325 br->multicast_last_member_interval;
1274 mod_timer(port ? &port->multicast_query_timer : 1326
1275 &br->multicast_query_timer, time); 1327 mod_timer(&query->timer, time);
1276 1328
1277 for (p = mlock_dereference(mp->ports, br); 1329 for (p = mlock_dereference(mp->ports, br);
1278 p != NULL; 1330 p != NULL;
@@ -1325,7 +1377,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1325 mod_timer(&mp->timer, time); 1377 mod_timer(&mp->timer, time);
1326 } 1378 }
1327 } 1379 }
1328
1329out: 1380out:
1330 spin_unlock(&br->multicast_lock); 1381 spin_unlock(&br->multicast_lock);
1331} 1382}
@@ -1336,6 +1387,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1336 __u16 vid) 1387 __u16 vid)
1337{ 1388{
1338 struct br_ip br_group; 1389 struct br_ip br_group;
1390 struct bridge_mcast_query *query = port ? &port->ip4_query :
1391 &br->ip4_query;
1339 1392
1340 if (ipv4_is_local_multicast(group)) 1393 if (ipv4_is_local_multicast(group))
1341 return; 1394 return;
@@ -1344,7 +1397,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1344 br_group.proto = htons(ETH_P_IP); 1397 br_group.proto = htons(ETH_P_IP);
1345 br_group.vid = vid; 1398 br_group.vid = vid;
1346 1399
1347 br_multicast_leave_group(br, port, &br_group); 1400 br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
1348} 1401}
1349 1402
1350#if IS_ENABLED(CONFIG_IPV6) 1403#if IS_ENABLED(CONFIG_IPV6)
@@ -1354,6 +1407,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1354 __u16 vid) 1407 __u16 vid)
1355{ 1408{
1356 struct br_ip br_group; 1409 struct br_ip br_group;
1410 struct bridge_mcast_query *query = port ? &port->ip6_query :
1411 &br->ip6_query;
1412
1357 1413
1358 if (ipv6_addr_is_ll_all_nodes(group)) 1414 if (ipv6_addr_is_ll_all_nodes(group))
1359 return; 1415 return;
@@ -1362,7 +1418,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1362 br_group.proto = htons(ETH_P_IPV6); 1418 br_group.proto = htons(ETH_P_IPV6);
1363 br_group.vid = vid; 1419 br_group.vid = vid;
1364 1420
1365 br_multicast_leave_group(br, port, &br_group); 1421 br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
1366} 1422}
1367#endif 1423#endif
1368 1424
@@ -1630,19 +1686,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1630 return 0; 1686 return 0;
1631} 1687}
1632 1688
1633static void br_multicast_query_expired(unsigned long data) 1689static void br_multicast_query_expired(struct net_bridge *br,
1690 struct bridge_mcast_query *query)
1691{
1692 spin_lock(&br->multicast_lock);
1693 if (query->startup_sent < br->multicast_startup_query_count)
1694 query->startup_sent++;
1695
1696 br_multicast_send_query(br, NULL, query);
1697 spin_unlock(&br->multicast_lock);
1698}
1699
1700static void br_ip4_multicast_query_expired(unsigned long data)
1634{ 1701{
1635 struct net_bridge *br = (void *)data; 1702 struct net_bridge *br = (void *)data;
1636 1703
1637 spin_lock(&br->multicast_lock); 1704 br_multicast_query_expired(br, &br->ip4_query);
1638 if (br->multicast_startup_queries_sent < 1705}
1639 br->multicast_startup_query_count)
1640 br->multicast_startup_queries_sent++;
1641 1706
1642 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1707#if IS_ENABLED(CONFIG_IPV6)
1708static void br_ip6_multicast_query_expired(unsigned long data)
1709{
1710 struct net_bridge *br = (void *)data;
1643 1711
1644 spin_unlock(&br->multicast_lock); 1712 br_multicast_query_expired(br, &br->ip6_query);
1645} 1713}
1714#endif
1646 1715
1647void br_multicast_init(struct net_bridge *br) 1716void br_multicast_init(struct net_bridge *br)
1648{ 1717{
@@ -1662,25 +1731,43 @@ void br_multicast_init(struct net_bridge *br)
1662 br->multicast_querier_interval = 255 * HZ; 1731 br->multicast_querier_interval = 255 * HZ;
1663 br->multicast_membership_interval = 260 * HZ; 1732 br->multicast_membership_interval = 260 * HZ;
1664 1733
1665 br->multicast_querier_delay_time = 0; 1734 br->ip4_querier.delay_time = 0;
1735#if IS_ENABLED(CONFIG_IPV6)
1736 br->ip6_querier.delay_time = 0;
1737#endif
1666 1738
1667 spin_lock_init(&br->multicast_lock); 1739 spin_lock_init(&br->multicast_lock);
1668 setup_timer(&br->multicast_router_timer, 1740 setup_timer(&br->multicast_router_timer,
1669 br_multicast_local_router_expired, 0); 1741 br_multicast_local_router_expired, 0);
1670 setup_timer(&br->multicast_querier_timer, 1742 setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
1671 br_multicast_querier_expired, (unsigned long)br); 1743 (unsigned long)br);
1672 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1744 setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
1673 (unsigned long)br); 1745 (unsigned long)br);
1746#if IS_ENABLED(CONFIG_IPV6)
1747 setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
1748 (unsigned long)br);
1749 setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
1750 (unsigned long)br);
1751#endif
1674} 1752}
1675 1753
1676void br_multicast_open(struct net_bridge *br) 1754static void __br_multicast_open(struct net_bridge *br,
1755 struct bridge_mcast_query *query)
1677{ 1756{
1678 br->multicast_startup_queries_sent = 0; 1757 query->startup_sent = 0;
1679 1758
1680 if (br->multicast_disabled) 1759 if (br->multicast_disabled)
1681 return; 1760 return;
1682 1761
1683 mod_timer(&br->multicast_query_timer, jiffies); 1762 mod_timer(&query->timer, jiffies);
1763}
1764
1765void br_multicast_open(struct net_bridge *br)
1766{
1767 __br_multicast_open(br, &br->ip4_query);
1768#if IS_ENABLED(CONFIG_IPV6)
1769 __br_multicast_open(br, &br->ip6_query);
1770#endif
1684} 1771}
1685 1772
1686void br_multicast_stop(struct net_bridge *br) 1773void br_multicast_stop(struct net_bridge *br)
@@ -1692,8 +1779,12 @@ void br_multicast_stop(struct net_bridge *br)
1692 int i; 1779 int i;
1693 1780
1694 del_timer_sync(&br->multicast_router_timer); 1781 del_timer_sync(&br->multicast_router_timer);
1695 del_timer_sync(&br->multicast_querier_timer); 1782 del_timer_sync(&br->ip4_querier.timer);
1696 del_timer_sync(&br->multicast_query_timer); 1783 del_timer_sync(&br->ip4_query.timer);
1784#if IS_ENABLED(CONFIG_IPV6)
1785 del_timer_sync(&br->ip6_querier.timer);
1786 del_timer_sync(&br->ip6_query.timer);
1787#endif
1697 1788
1698 spin_lock_bh(&br->multicast_lock); 1789 spin_lock_bh(&br->multicast_lock);
1699 mdb = mlock_dereference(br->mdb, br); 1790 mdb = mlock_dereference(br->mdb, br);
@@ -1796,18 +1887,24 @@ unlock:
1796 return err; 1887 return err;
1797} 1888}
1798 1889
1799static void br_multicast_start_querier(struct net_bridge *br) 1890static void br_multicast_start_querier(struct net_bridge *br,
1891 struct bridge_mcast_query *query)
1800{ 1892{
1801 struct net_bridge_port *port; 1893 struct net_bridge_port *port;
1802 1894
1803 br_multicast_open(br); 1895 __br_multicast_open(br, query);
1804 1896
1805 list_for_each_entry(port, &br->port_list, list) { 1897 list_for_each_entry(port, &br->port_list, list) {
1806 if (port->state == BR_STATE_DISABLED || 1898 if (port->state == BR_STATE_DISABLED ||
1807 port->state == BR_STATE_BLOCKING) 1899 port->state == BR_STATE_BLOCKING)
1808 continue; 1900 continue;
1809 1901
1810 __br_multicast_enable_port(port); 1902 if (query == &br->ip4_query)
1903 br_multicast_enable(&port->ip4_query);
1904#if IS_ENABLED(CONFIG_IPV6)
1905 else
1906 br_multicast_enable(&port->ip6_query);
1907#endif
1811 } 1908 }
1812} 1909}
1813 1910
@@ -1842,7 +1939,10 @@ rollback:
1842 goto rollback; 1939 goto rollback;
1843 } 1940 }
1844 1941
1845 br_multicast_start_querier(br); 1942 br_multicast_start_querier(br, &br->ip4_query);
1943#if IS_ENABLED(CONFIG_IPV6)
1944 br_multicast_start_querier(br, &br->ip6_query);
1945#endif
1846 1946
1847unlock: 1947unlock:
1848 spin_unlock_bh(&br->multicast_lock); 1948 spin_unlock_bh(&br->multicast_lock);
@@ -1865,10 +1965,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1865 goto unlock; 1965 goto unlock;
1866 1966
1867 max_delay = br->multicast_query_response_interval; 1967 max_delay = br->multicast_query_response_interval;
1868 if (!timer_pending(&br->multicast_querier_timer))
1869 br->multicast_querier_delay_time = jiffies + max_delay;
1870 1968
1871 br_multicast_start_querier(br); 1969 if (!timer_pending(&br->ip4_querier.timer))
1970 br->ip4_querier.delay_time = jiffies + max_delay;
1971
1972 br_multicast_start_querier(br, &br->ip4_query);
1973
1974#if IS_ENABLED(CONFIG_IPV6)
1975 if (!timer_pending(&br->ip6_querier.timer))
1976 br->ip6_querier.delay_time = jiffies + max_delay;
1977
1978 br_multicast_start_querier(br, &br->ip6_query);
1979#endif
1872 1980
1873unlock: 1981unlock:
1874 spin_unlock_bh(&br->multicast_lock); 1982 spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 89b2949be02f..598cb0b333c6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -66,6 +66,20 @@ struct br_ip
66 __u16 vid; 66 __u16 vid;
67}; 67};
68 68
69#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
70/* our own querier */
71struct bridge_mcast_query {
72 struct timer_list timer;
73 u32 startup_sent;
74};
75
76/* other querier */
77struct bridge_mcast_querier {
78 struct timer_list timer;
79 unsigned long delay_time;
80};
81#endif
82
69struct net_port_vlans { 83struct net_port_vlans {
70 u16 port_idx; 84 u16 port_idx;
71 u16 pvid; 85 u16 pvid;
@@ -162,10 +176,12 @@ struct net_bridge_port
162#define BR_FLOOD 0x00000040 176#define BR_FLOOD 0x00000040
163 177
164#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 178#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
165 u32 multicast_startup_queries_sent; 179 struct bridge_mcast_query ip4_query;
180#if IS_ENABLED(CONFIG_IPV6)
181 struct bridge_mcast_query ip6_query;
182#endif /* IS_ENABLED(CONFIG_IPV6) */
166 unsigned char multicast_router; 183 unsigned char multicast_router;
167 struct timer_list multicast_router_timer; 184 struct timer_list multicast_router_timer;
168 struct timer_list multicast_query_timer;
169 struct hlist_head mglist; 185 struct hlist_head mglist;
170 struct hlist_node rlist; 186 struct hlist_node rlist;
171#endif 187#endif
@@ -258,7 +274,6 @@ struct net_bridge
258 u32 hash_max; 274 u32 hash_max;
259 275
260 u32 multicast_last_member_count; 276 u32 multicast_last_member_count;
261 u32 multicast_startup_queries_sent;
262 u32 multicast_startup_query_count; 277 u32 multicast_startup_query_count;
263 278
264 unsigned long multicast_last_member_interval; 279 unsigned long multicast_last_member_interval;
@@ -267,15 +282,18 @@ struct net_bridge
267 unsigned long multicast_query_interval; 282 unsigned long multicast_query_interval;
268 unsigned long multicast_query_response_interval; 283 unsigned long multicast_query_response_interval;
269 unsigned long multicast_startup_query_interval; 284 unsigned long multicast_startup_query_interval;
270 unsigned long multicast_querier_delay_time;
271 285
272 spinlock_t multicast_lock; 286 spinlock_t multicast_lock;
273 struct net_bridge_mdb_htable __rcu *mdb; 287 struct net_bridge_mdb_htable __rcu *mdb;
274 struct hlist_head router_list; 288 struct hlist_head router_list;
275 289
276 struct timer_list multicast_router_timer; 290 struct timer_list multicast_router_timer;
277 struct timer_list multicast_querier_timer; 291 struct bridge_mcast_querier ip4_querier;
278 struct timer_list multicast_query_timer; 292 struct bridge_mcast_query ip4_query;
293#if IS_ENABLED(CONFIG_IPV6)
294 struct bridge_mcast_querier ip6_querier;
295 struct bridge_mcast_query ip6_query;
296#endif /* IS_ENABLED(CONFIG_IPV6) */
279#endif 297#endif
280 298
281 struct timer_list hello_timer; 299 struct timer_list hello_timer;
@@ -483,11 +501,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
483 timer_pending(&br->multicast_router_timer)); 501 timer_pending(&br->multicast_router_timer));
484} 502}
485 503
486static inline bool br_multicast_querier_exists(struct net_bridge *br) 504static inline bool
505__br_multicast_querier_exists(struct net_bridge *br,
506 struct bridge_mcast_querier *querier)
507{
508 return time_is_before_jiffies(querier->delay_time) &&
509 (br->multicast_querier || timer_pending(&querier->timer));
510}
511
512static inline bool br_multicast_querier_exists(struct net_bridge *br,
513 struct ethhdr *eth)
487{ 514{
488 return time_is_before_jiffies(br->multicast_querier_delay_time) && 515 switch (eth->h_proto) {
489 (br->multicast_querier || 516 case (htons(ETH_P_IP)):
490 timer_pending(&br->multicast_querier_timer)); 517 return __br_multicast_querier_exists(br, &br->ip4_querier);
518#if IS_ENABLED(CONFIG_IPV6)
519 case (htons(ETH_P_IPV6)):
520 return __br_multicast_querier_exists(br, &br->ip6_querier);
521#endif
522 default:
523 return false;
524 }
491} 525}
492#else 526#else
493static inline int br_multicast_rcv(struct net_bridge *br, 527static inline int br_multicast_rcv(struct net_bridge *br,
@@ -545,7 +579,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
545{ 579{
546 return 0; 580 return 0;
547} 581}
548static inline bool br_multicast_querier_exists(struct net_bridge *br) 582static inline bool br_multicast_querier_exists(struct net_bridge *br,
583 struct ethhdr *eth)
549{ 584{
550 return false; 585 return false;
551} 586}
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 2bd4b58f4372..0f455227da83 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -293,9 +293,10 @@ int cfctrl_linkup_request(struct cflayer *layer,
293 293
294 count = cfctrl_cancel_req(&cfctrl->serv.layer, 294 count = cfctrl_cancel_req(&cfctrl->serv.layer,
295 user_layer); 295 user_layer);
296 if (count != 1) 296 if (count != 1) {
297 pr_err("Could not remove request (%d)", count); 297 pr_err("Could not remove request (%d)", count);
298 return -ENODEV; 298 return -ENODEV;
299 }
299 } 300 }
300 return 0; 301 return 0;
301} 302}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 159737cac76c..0ff42f029ace 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -350,14 +350,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
350 if (new_index < 0) 350 if (new_index < 0)
351 new_index = skb_tx_hash(dev, skb); 351 new_index = skb_tx_hash(dev, skb);
352 352
353 if (queue_index != new_index && sk) { 353 if (queue_index != new_index && sk &&
354 struct dst_entry *dst = 354 rcu_access_pointer(sk->sk_dst_cache))
355 rcu_dereference_check(sk->sk_dst_cache, 1); 355 sk_tx_queue_set(sk, queue_index);
356
357 if (dst && skb_dst(skb) == dst)
358 sk_tx_queue_set(sk, queue_index);
359
360 }
361 356
362 queue_index = new_index; 357 queue_index = new_index;
363 } 358 }
diff --git a/net/core/scm.c b/net/core/scm.c
index 03795d0147f2..b4da80b1cc07 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
54 return -EINVAL; 54 return -EINVAL;
55 55
56 if ((creds->pid == task_tgid_vnr(current) || 56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && 57 ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4bcabf3ab4ca..9ee17e3d11c3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 211 return -EINVAL;
212} 212}
213 213
214static inline int ip_skb_dst_mtu(struct sk_buff *skb)
215{
216 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
217
218 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
219 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
220}
221
222static int ip_finish_output(struct sk_buff *skb) 214static int ip_finish_output(struct sk_buff *skb)
223{ 215{
224#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 216#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 87bd2952c733..7f80fb4b82d3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
190 struct ip_tunnel *tunnel; 190 struct ip_tunnel *tunnel;
191 const struct iphdr *iph; 191 const struct iphdr *iph;
192 192
193 if (iptunnel_pull_header(skb, 0, tpi.proto))
194 goto drop;
195
196 iph = ip_hdr(skb); 193 iph = ip_hdr(skb);
197 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 194 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
198 iph->saddr, iph->daddr, 0); 195 iph->saddr, iph->daddr, 0);
199 if (tunnel) { 196 if (tunnel) {
200 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 197 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
201 goto drop; 198 goto drop;
199 if (iptunnel_pull_header(skb, 0, tpi.proto))
200 goto drop;
202 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 201 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
203 } 202 }
204 203
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 41d84505a922..a86c7ae71881 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
572 RT_SCOPE_UNIVERSE, 572 RT_SCOPE_UNIVERSE,
573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, 574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
575 (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
575 daddr, saddr, 0, 0); 576 daddr, saddr, 0, 0);
576 577
577 if (!inet->hdrincl) { 578 if (!inet->hdrincl) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fdf74090a001..6e5617b9f9db 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2468,10 +2468,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2468 case TCP_THIN_DUPACK: 2468 case TCP_THIN_DUPACK:
2469 if (val < 0 || val > 1) 2469 if (val < 0 || val > 1)
2470 err = -EINVAL; 2470 err = -EINVAL;
2471 else 2471 else {
2472 tp->thin_dupack = val; 2472 tp->thin_dupack = val;
2473 if (tp->thin_dupack) 2473 if (tp->thin_dupack)
2474 tcp_disable_early_retrans(tp); 2474 tcp_disable_early_retrans(tp);
2475 }
2475 break; 2476 break;
2476 2477
2477 case TCP_REPAIR: 2478 case TCP_REPAIR:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 93d7e9de4143..1969e16d936d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3574,7 +3574,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3574 ++ptr; 3574 ++ptr;
3575 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3575 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3576 ++ptr; 3576 ++ptr;
3577 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3577 if (*ptr)
3578 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3579 else
3580 tp->rx_opt.rcv_tsecr = 0;
3578 return true; 3581 return true;
3579 } 3582 }
3580 return false; 3583 return false;
@@ -3599,7 +3602,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3599 } 3602 }
3600 3603
3601 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); 3604 tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
3602 if (tp->rx_opt.saw_tstamp) 3605 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
3603 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3606 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3604 3607
3605 return true; 3608 return true;
@@ -5354,7 +5357,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5354 int saved_clamp = tp->rx_opt.mss_clamp; 5357 int saved_clamp = tp->rx_opt.mss_clamp;
5355 5358
5356 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5359 tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
5357 if (tp->rx_opt.saw_tstamp) 5360 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
5358 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5361 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5359 5362
5360 if (th->ack) { 5363 if (th->ack) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e63ae4c9691d..7c83cb8bf137 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2673,7 +2673,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2673 int tcp_header_size; 2673 int tcp_header_size;
2674 int mss; 2674 int mss;
2675 2675
2676 skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); 2676 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2677 if (unlikely(!skb)) { 2677 if (unlikely(!skb)) {
2678 dst_release(dst); 2678 dst_release(dst);
2679 return NULL; 2679 return NULL;
@@ -2817,6 +2817,8 @@ void tcp_connect_init(struct sock *sk)
2817 2817
2818 if (likely(!tp->repair)) 2818 if (likely(!tp->repair))
2819 tp->rcv_nxt = 0; 2819 tp->rcv_nxt = 0;
2820 else
2821 tp->rcv_tstamp = tcp_time_stamp;
2820 tp->rcv_wup = tp->rcv_nxt; 2822 tp->rcv_wup = tp->rcv_nxt;
2821 tp->copied_seq = tp->rcv_nxt; 2823 tp->copied_seq = tp->rcv_nxt;
2822 2824
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 327a617d594c..baa0f63731fd 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -21,7 +21,6 @@
21static int xfrm4_tunnel_check_size(struct sk_buff *skb) 21static int xfrm4_tunnel_check_size(struct sk_buff *skb)
22{ 22{
23 int mtu, ret = 0; 23 int mtu, ret = 0;
24 struct dst_entry *dst;
25 24
26 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
27 goto out; 26 goto out;
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
30 goto out; 29 goto out;
31 30
32 dst = skb_dst(skb); 31 mtu = dst_mtu(skb_dst(skb));
33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 32 if (skb->len > mtu) {
35 if (skb->sk) 33 if (skb->sk)
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, 34 xfrm_local_error(skb, mtu);
37 inet_sk(skb->sk)->inet_dport, mtu);
38 else 35 else
39 icmp_send(skb, ICMP_DEST_UNREACH, 36 icmp_send(skb, ICMP_DEST_UNREACH,
40 ICMP_FRAG_NEEDED, htonl(mtu)); 37 ICMP_FRAG_NEEDED, htonl(mtu));
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
99 x->outer_mode->afinfo->output_finish, 96 x->outer_mode->afinfo->output_finish,
100 !(IPCB(skb)->flags & IPSKB_REROUTED)); 97 !(IPCB(skb)->flags & IPSKB_REROUTED));
101} 98}
99
100void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
101{
102 struct iphdr *hdr;
103
104 hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
105 ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
106 inet_sk(skb->sk)->inet_dport, mtu);
107}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 9258e751baba..0b2a0641526a 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
83 .extract_input = xfrm4_extract_input, 83 .extract_input = xfrm4_extract_input,
84 .extract_output = xfrm4_extract_output, 84 .extract_output = xfrm4_extract_output,
85 .transport_finish = xfrm4_transport_finish, 85 .transport_finish = xfrm4_transport_finish,
86 .local_error = xfrm4_local_error,
86}; 87};
87 88
88void __init xfrm4_state_init(void) 89void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2a66eaad047f..d6ff12617f36 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1022,7 +1022,6 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
1022 unsigned long regen_advance; 1022 unsigned long regen_advance;
1023 int tmp_plen; 1023 int tmp_plen;
1024 int ret = 0; 1024 int ret = 0;
1025 int max_addresses;
1026 u32 addr_flags; 1025 u32 addr_flags;
1027 unsigned long now = jiffies; 1026 unsigned long now = jiffies;
1028 1027
@@ -1068,7 +1067,6 @@ retry:
1068 idev->cnf.temp_prefered_lft + age - 1067 idev->cnf.temp_prefered_lft + age -
1069 idev->cnf.max_desync_factor); 1068 idev->cnf.max_desync_factor);
1070 tmp_plen = ifp->prefix_len; 1069 tmp_plen = ifp->prefix_len;
1071 max_addresses = idev->cnf.max_addresses;
1072 tmp_tstamp = ifp->tstamp; 1070 tmp_tstamp = ifp->tstamp;
1073 spin_unlock_bh(&ifp->lock); 1071 spin_unlock_bh(&ifp->lock);
1074 1072
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f083a583a05c..b30ad3741b46 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -251,38 +251,36 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
251/* add a label */ 251/* add a label */
252static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) 252static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
253{ 253{
254 struct hlist_node *n;
255 struct ip6addrlbl_entry *last = NULL, *p = NULL;
254 int ret = 0; 256 int ret = 0;
255 257
256 ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", 258 ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
257 __func__, 259 replace);
258 newp, replace);
259 260
260 if (hlist_empty(&ip6addrlbl_table.head)) { 261 hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
261 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); 262 if (p->prefixlen == newp->prefixlen &&
262 } else { 263 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
263 struct hlist_node *n; 264 p->ifindex == newp->ifindex &&
264 struct ip6addrlbl_entry *p = NULL; 265 ipv6_addr_equal(&p->prefix, &newp->prefix)) {
265 hlist_for_each_entry_safe(p, n, 266 if (!replace) {
266 &ip6addrlbl_table.head, list) { 267 ret = -EEXIST;
267 if (p->prefixlen == newp->prefixlen &&
268 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
269 p->ifindex == newp->ifindex &&
270 ipv6_addr_equal(&p->prefix, &newp->prefix)) {
271 if (!replace) {
272 ret = -EEXIST;
273 goto out;
274 }
275 hlist_replace_rcu(&p->list, &newp->list);
276 ip6addrlbl_put(p);
277 goto out;
278 } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
279 (p->prefixlen < newp->prefixlen)) {
280 hlist_add_before_rcu(&newp->list, &p->list);
281 goto out; 268 goto out;
282 } 269 }
270 hlist_replace_rcu(&p->list, &newp->list);
271 ip6addrlbl_put(p);
272 goto out;
273 } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
274 (p->prefixlen < newp->prefixlen)) {
275 hlist_add_before_rcu(&newp->list, &p->list);
276 goto out;
283 } 277 }
284 hlist_add_after_rcu(&p->list, &newp->list); 278 last = p;
285 } 279 }
280 if (last)
281 hlist_add_after_rcu(&last->list, &newp->list);
282 else
283 hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
286out: 284out:
287 if (!ret) 285 if (!ret)
288 ip6addrlbl_table.seq++; 286 ip6addrlbl_table.seq++;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 73681c227453..eef8d945b362 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -940,6 +940,14 @@ static const struct icmp6_err {
940 .err = ECONNREFUSED, 940 .err = ECONNREFUSED,
941 .fatal = 1, 941 .fatal = 1,
942 }, 942 },
943 { /* POLICY_FAIL */
944 .err = EACCES,
945 .fatal = 1,
946 },
947 { /* REJECT_ROUTE */
948 .err = EACCES,
949 .fatal = 1,
950 },
943}; 951};
944 952
945int icmpv6_err_convert(u8 type, u8 code, int *err) 953int icmpv6_err_convert(u8 type, u8 code, int *err)
@@ -951,7 +959,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
951 switch (type) { 959 switch (type) {
952 case ICMPV6_DEST_UNREACH: 960 case ICMPV6_DEST_UNREACH:
953 fatal = 1; 961 fatal = 1;
954 if (code <= ICMPV6_PORT_UNREACH) { 962 if (code < ARRAY_SIZE(tab_unreach)) {
955 *err = tab_unreach[code].err; 963 *err = tab_unreach[code].err;
956 fatal = tab_unreach[code].fatal; 964 fatal = tab_unreach[code].fatal;
957 } 965 }
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index db992a373011..6b26e9feafb9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -722,6 +722,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
722 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 722 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
723 } 723 }
724 724
725 if (likely(!skb->encapsulation)) {
726 skb_reset_inner_headers(skb);
727 skb->encapsulation = 1;
728 }
729
725 skb_push(skb, gre_hlen); 730 skb_push(skb, gre_hlen);
726 skb_reset_network_header(skb); 731 skb_reset_network_header(skb);
727 skb_set_transport_header(skb, sizeof(*ipv6h)); 732 skb_set_transport_header(skb, sizeof(*ipv6h));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dd08cfd8999e..3a692d529163 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -213,6 +213,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
213 hdr->saddr = fl6->saddr; 213 hdr->saddr = fl6->saddr;
214 hdr->daddr = *first_hop; 214 hdr->daddr = *first_hop;
215 215
216 skb->protocol = htons(ETH_P_IPV6);
216 skb->priority = sk->sk_priority; 217 skb->priority = sk->sk_priority;
217 skb->mark = sk->sk_mark; 218 skb->mark = sk->sk_mark;
218 219
@@ -1032,6 +1033,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1032 /* initialize protocol header pointer */ 1033 /* initialize protocol header pointer */
1033 skb->transport_header = skb->network_header + fragheaderlen; 1034 skb->transport_header = skb->network_header + fragheaderlen;
1034 1035
1036 skb->protocol = htons(ETH_P_IPV6);
1035 skb->ip_summed = CHECKSUM_PARTIAL; 1037 skb->ip_summed = CHECKSUM_PARTIAL;
1036 skb->csum = 0; 1038 skb->csum = 0;
1037 } 1039 }
@@ -1334,6 +1336,7 @@ alloc_new_skb:
1334 /* 1336 /*
1335 * Fill in the control structures 1337 * Fill in the control structures
1336 */ 1338 */
1339 skb->protocol = htons(ETH_P_IPV6);
1337 skb->ip_summed = CHECKSUM_NONE; 1340 skb->ip_summed = CHECKSUM_NONE;
1338 skb->csum = 0; 1341 skb->csum = 0;
1339 /* reserve for fragmentation and ipsec header */ 1342 /* reserve for fragmentation and ipsec header */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 55999d923f26..61355f7f4da5 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1028,6 +1028,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1028 init_tel_txopt(&opt, encap_limit); 1028 init_tel_txopt(&opt, encap_limit);
1029 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 1029 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
1030 } 1030 }
1031
1032 if (likely(!skb->encapsulation)) {
1033 skb_reset_inner_headers(skb);
1034 skb->encapsulation = 1;
1035 }
1036
1031 skb_push(skb, sizeof(struct ipv6hdr)); 1037 skb_push(skb, sizeof(struct ipv6hdr));
1032 skb_reset_network_header(skb); 1038 skb_reset_network_header(skb);
1033 ipv6h = ipv6_hdr(skb); 1039 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c4bc7a35cd56..12179457b2cd 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -372,14 +372,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
372 int tlen = dev->needed_tailroom; 372 int tlen = dev->needed_tailroom;
373 struct sock *sk = dev_net(dev)->ipv6.ndisc_sk; 373 struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
374 struct sk_buff *skb; 374 struct sk_buff *skb;
375 int err;
376 375
377 skb = sock_alloc_send_skb(sk, 376 skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
378 hlen + sizeof(struct ipv6hdr) + len + tlen,
379 1, &err);
380 if (!skb) { 377 if (!skb) {
381 ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n", 378 ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
382 __func__, err); 379 __func__);
383 return NULL; 380 return NULL;
384 } 381 }
385 382
@@ -389,6 +386,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
389 skb_reserve(skb, hlen + sizeof(struct ipv6hdr)); 386 skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
390 skb_reset_transport_header(skb); 387 skb_reset_transport_header(skb);
391 388
389 /* Manually assign socket ownership as we avoid calling
390 * sock_alloc_send_pskb() to bypass wmem buffer limits
391 */
392 skb_set_owner_w(skb, sk);
393
392 return skb; 394 return skb;
393} 395}
394 396
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c1e533498203..58916bbb1728 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -633,6 +633,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
633 goto error; 633 goto error;
634 skb_reserve(skb, hlen); 634 skb_reserve(skb, hlen);
635 635
636 skb->protocol = htons(ETH_P_IPV6);
636 skb->priority = sk->sk_priority; 637 skb->priority = sk->sk_priority;
637 skb->mark = sk->sk_mark; 638 skb->mark = sk->sk_mark;
638 skb_dst_set(skb, &rt->dst); 639 skb_dst_set(skb, &rt->dst);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 19abcc9d6a1a..7ee5cb96db34 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -641,11 +641,7 @@ static int ipip_rcv(struct sk_buff *skb)
641 const struct iphdr *iph; 641 const struct iphdr *iph;
642 struct ip_tunnel *tunnel; 642 struct ip_tunnel *tunnel;
643 643
644 if (iptunnel_pull_header(skb, 0, tpi.proto))
645 goto drop;
646
647 iph = ip_hdr(skb); 644 iph = ip_hdr(skb);
648
649 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 645 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
650 iph->saddr, iph->daddr); 646 iph->saddr, iph->daddr);
651 if (tunnel != NULL) { 647 if (tunnel != NULL) {
@@ -655,6 +651,8 @@ static int ipip_rcv(struct sk_buff *skb)
655 651
656 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 652 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
657 goto drop; 653 goto drop;
654 if (iptunnel_pull_header(skb, 0, tpi.proto))
655 goto drop;
658 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 656 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
659 } 657 }
660 658
@@ -881,6 +879,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
881 ttl = iph6->hop_limit; 879 ttl = iph6->hop_limit;
882 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 880 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
883 881
882 if (likely(!skb->encapsulation)) {
883 skb_reset_inner_headers(skb);
884 skb->encapsulation = 1;
885 }
886
884 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, 887 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
885 ttl, df, !net_eq(tunnel->net, dev_net(dev))); 888 ttl, df, !net_eq(tunnel->net, dev_net(dev)));
886 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 889 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9acdcedf9a14..5c71501fc917 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1425,7 +1425,7 @@ ipv6_pktoptions:
1425 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1425 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1426 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1426 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1427 if (np->rxopt.bits.rxtclass) 1427 if (np->rxopt.bits.rxtclass)
1428 np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb)); 1428 np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
1429 if (ipv6_opt_accepted(sk, opt_skb)) { 1429 if (ipv6_opt_accepted(sk, opt_skb)) {
1430 skb_set_owner_r(opt_skb, sk); 1430 skb_set_owner_r(opt_skb, sk);
1431 opt_skb = xchg(&np->pktoptions, opt_skb); 1431 opt_skb = xchg(&np->pktoptions, opt_skb);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8755a3079d0f..6cd625e37706 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
34 struct sock *sk = skb->sk; 34 struct sock *sk = skb->sk;
35 35
36 if (sk) { 36 if (sk) {
37 proto = sk->sk_protocol; 37 if (sk->sk_family != AF_INET6)
38 return 0;
38 39
40 proto = sk->sk_protocol;
39 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) 41 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
40 return inet6_sk(sk)->dontfrag; 42 return inet6_sk(sk)->dontfrag;
41 } 43 }
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
54 ipv6_local_rxpmtu(sk, &fl6, mtu); 56 ipv6_local_rxpmtu(sk, &fl6, mtu);
55} 57}
56 58
57static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) 59void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
58{ 60{
59 struct flowi6 fl6; 61 struct flowi6 fl6;
62 const struct ipv6hdr *hdr;
60 struct sock *sk = skb->sk; 63 struct sock *sk = skb->sk;
61 64
65 hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
62 fl6.fl6_dport = inet_sk(sk)->inet_dport; 66 fl6.fl6_dport = inet_sk(sk)->inet_dport;
63 fl6.daddr = ipv6_hdr(skb)->daddr; 67 fl6.daddr = hdr->daddr;
64 68
65 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); 69 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
66} 70}
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
80 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
81 xfrm6_local_rxpmtu(skb, mtu); 85 xfrm6_local_rxpmtu(skb, mtu);
82 else if (skb->sk) 86 else if (skb->sk)
83 xfrm6_local_error(skb, mtu); 87 xfrm_local_error(skb, mtu);
84 else 88 else
85 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 89 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
86 ret = -EMSGSIZE; 90 ret = -EMSGSIZE;
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
136{ 140{
137 struct dst_entry *dst = skb_dst(skb); 141 struct dst_entry *dst = skb_dst(skb);
138 struct xfrm_state *x = dst->xfrm; 142 struct xfrm_state *x = dst->xfrm;
139 int mtu = ip6_skb_dst_mtu(skb); 143 int mtu;
144
145 if (skb->protocol == htons(ETH_P_IPV6))
146 mtu = ip6_skb_dst_mtu(skb);
147 else
148 mtu = dst_mtu(skb_dst(skb));
140 149
141 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 150 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
142 xfrm6_local_rxpmtu(skb, mtu); 151 xfrm6_local_rxpmtu(skb, mtu);
143 return -EMSGSIZE; 152 return -EMSGSIZE;
144 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 153 } else if (!skb->local_df && skb->len > mtu && skb->sk) {
145 xfrm6_local_error(skb, mtu); 154 xfrm_local_error(skb, mtu);
146 return -EMSGSIZE; 155 return -EMSGSIZE;
147 } 156 }
148 157
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index d8c70b8efc24..3fc970135fc6 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
183 .extract_input = xfrm6_extract_input, 183 .extract_input = xfrm6_extract_input,
184 .extract_output = xfrm6_extract_output, 184 .extract_output = xfrm6_extract_output,
185 .transport_finish = xfrm6_transport_finish, 185 .transport_finish = xfrm6_transport_finish,
186 .local_error = xfrm6_local_error,
186}; 187};
187 188
188int __init xfrm6_state_init(void) 189int __init xfrm6_state_init(void)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 512718adb0d5..0c741cec4d0d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
364EXPORT_SYMBOL(genl_unregister_ops); 364EXPORT_SYMBOL(genl_unregister_ops);
365 365
366/** 366/**
367 * genl_register_family - register a generic netlink family 367 * __genl_register_family - register a generic netlink family
368 * @family: generic netlink family 368 * @family: generic netlink family
369 * 369 *
370 * Registers the specified family after validating it first. Only one 370 * Registers the specified family after validating it first. Only one
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
374 * 374 *
375 * Return 0 on success or a negative error code. 375 * Return 0 on success or a negative error code.
376 */ 376 */
377int genl_register_family(struct genl_family *family) 377int __genl_register_family(struct genl_family *family)
378{ 378{
379 int err = -EINVAL; 379 int err = -EINVAL;
380 380
@@ -430,10 +430,10 @@ errout_locked:
430errout: 430errout:
431 return err; 431 return err;
432} 432}
433EXPORT_SYMBOL(genl_register_family); 433EXPORT_SYMBOL(__genl_register_family);
434 434
435/** 435/**
436 * genl_register_family_with_ops - register a generic netlink family 436 * __genl_register_family_with_ops - register a generic netlink family
437 * @family: generic netlink family 437 * @family: generic netlink family
438 * @ops: operations to be registered 438 * @ops: operations to be registered
439 * @n_ops: number of elements to register 439 * @n_ops: number of elements to register
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
457 * 457 *
458 * Return 0 on success or a negative error code. 458 * Return 0 on success or a negative error code.
459 */ 459 */
460int genl_register_family_with_ops(struct genl_family *family, 460int __genl_register_family_with_ops(struct genl_family *family,
461 struct genl_ops *ops, size_t n_ops) 461 struct genl_ops *ops, size_t n_ops)
462{ 462{
463 int err, i; 463 int err, i;
464 464
465 err = genl_register_family(family); 465 err = __genl_register_family(family);
466 if (err) 466 if (err)
467 return err; 467 return err;
468 468
@@ -476,7 +476,7 @@ err_out:
476 genl_unregister_family(family); 476 genl_unregister_family(family);
477 return err; 477 return err;
478} 478}
479EXPORT_SYMBOL(genl_register_family_with_ops); 479EXPORT_SYMBOL(__genl_register_family_with_ops);
480 480
481/** 481/**
482 * genl_unregister_family - unregister generic netlink family 482 * genl_unregister_family - unregister generic netlink family
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
544} 544}
545EXPORT_SYMBOL(genlmsg_put); 545EXPORT_SYMBOL(genlmsg_put);
546 546
547static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
548{
549 struct genl_ops *ops = cb->data;
550 int rc;
551
552 genl_lock();
553 rc = ops->dumpit(skb, cb);
554 genl_unlock();
555 return rc;
556}
557
558static int genl_lock_done(struct netlink_callback *cb)
559{
560 struct genl_ops *ops = cb->data;
561 int rc = 0;
562
563 if (ops->done) {
564 genl_lock();
565 rc = ops->done(cb);
566 genl_unlock();
567 }
568 return rc;
569}
570
547static int genl_family_rcv_msg(struct genl_family *family, 571static int genl_family_rcv_msg(struct genl_family *family,
548 struct sk_buff *skb, 572 struct sk_buff *skb,
549 struct nlmsghdr *nlh) 573 struct nlmsghdr *nlh)
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
572 return -EPERM; 596 return -EPERM;
573 597
574 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 598 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
575 struct netlink_dump_control c = { 599 int rc;
576 .dump = ops->dumpit,
577 .done = ops->done,
578 };
579 600
580 if (ops->dumpit == NULL) 601 if (ops->dumpit == NULL)
581 return -EOPNOTSUPP; 602 return -EOPNOTSUPP;
582 603
583 return netlink_dump_start(net->genl_sock, skb, nlh, &c); 604 if (!family->parallel_ops) {
605 struct netlink_dump_control c = {
606 .module = family->module,
607 .data = ops,
608 .dump = genl_lock_dumpit,
609 .done = genl_lock_done,
610 };
611
612 genl_unlock();
613 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
614 genl_lock();
615
616 } else {
617 struct netlink_dump_control c = {
618 .module = family->module,
619 .dump = ops->dumpit,
620 .done = ops->done,
621 };
622
623 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
624 }
625
626 return rc;
584 } 627 }
585 628
586 if (ops->doit == NULL) 629 if (ops->doit == NULL)
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 75edcfad6e26..1504bb11e4f3 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
207 pgfrom_base -= copy; 207 pgfrom_base -= copy;
208 208
209 vto = kmap_atomic(*pgto); 209 vto = kmap_atomic(*pgto);
210 vfrom = kmap_atomic(*pgfrom); 210 if (*pgto != *pgfrom) {
211 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 211 vfrom = kmap_atomic(*pgfrom);
212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 kunmap_atomic(vfrom);
214 } else
215 memmove(vto + pgto_base, vto + pgfrom_base, copy);
212 flush_dcache_page(*pgto); 216 flush_dcache_page(*pgto);
213 kunmap_atomic(vfrom);
214 kunmap_atomic(vto); 217 kunmap_atomic(vto);
215 218
216 } while ((len -= copy) != 0); 219 } while ((len -= copy) != 0);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ce8249c76827..6cc7ddd2fb7c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1257 /* Accept only ACK or NACK message */ 1257 /* Accept only ACK or NACK message */
1258 if (unlikely(msg_errcode(msg))) { 1258 if (unlikely(msg_errcode(msg))) {
1259 sock->state = SS_DISCONNECTING; 1259 sock->state = SS_DISCONNECTING;
1260 sk->sk_err = -ECONNREFUSED; 1260 sk->sk_err = ECONNREFUSED;
1261 retval = TIPC_OK; 1261 retval = TIPC_OK;
1262 break; 1262 break;
1263 } 1263 }
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1268 res = auto_connect(sock, msg); 1268 res = auto_connect(sock, msg);
1269 if (res) { 1269 if (res) {
1270 sock->state = SS_DISCONNECTING; 1270 sock->state = SS_DISCONNECTING;
1271 sk->sk_err = res; 1271 sk->sk_err = -res;
1272 retval = TIPC_OK; 1272 retval = TIPC_OK;
1273 break; 1273 break;
1274 } 1274 }
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index eb4a84288648..3bb2cdc13b46 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
214 return inner_mode->afinfo->extract_output(x, skb); 214 return inner_mode->afinfo->extract_output(x, skb);
215} 215}
216 216
217void xfrm_local_error(struct sk_buff *skb, int mtu)
218{
219 unsigned int proto;
220 struct xfrm_state_afinfo *afinfo;
221
222 if (skb->protocol == htons(ETH_P_IP))
223 proto = AF_INET;
224 else if (skb->protocol == htons(ETH_P_IPV6))
225 proto = AF_INET6;
226 else
227 return;
228
229 afinfo = xfrm_state_get_afinfo(proto);
230 if (!afinfo)
231 return;
232
233 afinfo->local_error(skb, mtu);
234 xfrm_state_put_afinfo(afinfo);
235}
236
217EXPORT_SYMBOL_GPL(xfrm_output); 237EXPORT_SYMBOL_GPL(xfrm_output);
218EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); 238EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
239EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ad8cc7bcf065..ed38d5d81f9e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
320{ 320{
321 struct sk_buff *skb; 321 struct sk_buff *skb;
322 322
323 while ((skb = skb_dequeue(list)) != NULL) { 323 while ((skb = skb_dequeue(list)) != NULL)
324 dev_put(skb->dev);
325 kfree_skb(skb); 324 kfree_skb(skb);
326 }
327} 325}
328 326
329/* Rule must be locked. Release descentant resources, announce 327/* Rule must be locked. Release descentant resources, announce
@@ -1764,7 +1762,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1764 struct sk_buff *skb; 1762 struct sk_buff *skb;
1765 struct sock *sk; 1763 struct sock *sk;
1766 struct dst_entry *dst; 1764 struct dst_entry *dst;
1767 struct net_device *dev;
1768 struct xfrm_policy *pol = (struct xfrm_policy *)arg; 1765 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1769 struct xfrm_policy_queue *pq = &pol->polq; 1766 struct xfrm_policy_queue *pq = &pol->polq;
1770 struct flowi fl; 1767 struct flowi fl;
@@ -1811,7 +1808,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1811 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, 1808 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1812 &fl, skb->sk, 0); 1809 &fl, skb->sk, 0);
1813 if (IS_ERR(dst)) { 1810 if (IS_ERR(dst)) {
1814 dev_put(skb->dev);
1815 kfree_skb(skb); 1811 kfree_skb(skb);
1816 continue; 1812 continue;
1817 } 1813 }
@@ -1820,9 +1816,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
1820 skb_dst_drop(skb); 1816 skb_dst_drop(skb);
1821 skb_dst_set(skb, dst); 1817 skb_dst_set(skb, dst);
1822 1818
1823 dev = skb->dev;
1824 err = dst_output(skb); 1819 err = dst_output(skb);
1825 dev_put(dev);
1826 } 1820 }
1827 1821
1828 return; 1822 return;
@@ -1845,7 +1839,6 @@ static int xdst_queue_output(struct sk_buff *skb)
1845 } 1839 }
1846 1840
1847 skb_dst_force(skb); 1841 skb_dst_force(skb);
1848 dev_hold(skb->dev);
1849 1842
1850 spin_lock_bh(&pq->hold_queue.lock); 1843 spin_lock_bh(&pq->hold_queue.lock);
1851 1844
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 4f8ace855864..b9c3f9e943a9 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
39 39
40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
41 41
42static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
43static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
44
45static inline unsigned int xfrm_dst_hash(struct net *net, 42static inline unsigned int xfrm_dst_hash(struct net *net,
46 const xfrm_address_t *daddr, 43 const xfrm_address_t *daddr,
47 const xfrm_address_t *saddr, 44 const xfrm_address_t *saddr,
@@ -1863,7 +1860,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1863} 1860}
1864EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1861EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1865 1862
1866static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1863struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1867{ 1864{
1868 struct xfrm_state_afinfo *afinfo; 1865 struct xfrm_state_afinfo *afinfo;
1869 if (unlikely(family >= NPROTO)) 1866 if (unlikely(family >= NPROTO))
@@ -1875,7 +1872,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1875 return afinfo; 1872 return afinfo;
1876} 1873}
1877 1874
1878static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1875void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1879{ 1876{
1880 rcu_read_unlock(); 1877 rcu_read_unlock();
1881} 1878}