diff options
author | Takashi Iwai <tiwai@suse.de> | 2013-09-11 06:38:45 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2013-09-11 06:38:45 -0400 |
commit | 3d0049e8529adaa36c38a7b400792f6c37b66c92 (patch) | |
tree | 7d2e30a5e3f12207305ffc76d80d310c0a95fdcc /net | |
parent | be8cf44526d8972c2dbf6e561162dad924a712a5 (diff) | |
parent | 2ae2caff8348378c9e464353e9860ee97583c00b (diff) |
Merge tag 'asoc-v3.12-4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v3.12
A few small fixes, nothing with any broad impact but all useful for the
affected systems. The Kirkwood compatible string change is fixing up a
string just added in the merge window so that we don't get any changes
in released kernels.
Diffstat (limited to 'net')
-rw-r--r-- | net/bridge/br_device.c | 2 | ||||
-rw-r--r-- | net/bridge/br_input.c | 2 | ||||
-rw-r--r-- | net/bridge/br_mdb.c | 14 | ||||
-rw-r--r-- | net/bridge/br_multicast.c | 261 | ||||
-rw-r--r-- | net/bridge/br_private.h | 57 | ||||
-rw-r--r-- | net/core/flow_dissector.c | 11 | ||||
-rw-r--r-- | net/core/scm.c | 2 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 8 | ||||
-rw-r--r-- | net/ipv4/ipip.c | 5 | ||||
-rw-r--r-- | net/ipv4/raw.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 9 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 4 | ||||
-rw-r--r-- | net/ipv4/xfrm4_output.c | 16 | ||||
-rw-r--r-- | net/ipv4/xfrm4_state.c | 1 | ||||
-rw-r--r-- | net/ipv6/ip6_gre.c | 5 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 3 | ||||
-rw-r--r-- | net/ipv6/ip6_tunnel.c | 6 | ||||
-rw-r--r-- | net/ipv6/raw.c | 1 | ||||
-rw-r--r-- | net/ipv6/sit.c | 11 | ||||
-rw-r--r-- | net/ipv6/xfrm6_output.c | 21 | ||||
-rw-r--r-- | net/ipv6/xfrm6_state.c | 1 | ||||
-rw-r--r-- | net/mac80211/ibss.c | 34 | ||||
-rw-r--r-- | net/mac80211/rc80211_minstrel_ht.c | 3 | ||||
-rw-r--r-- | net/netlink/genetlink.c | 67 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 9 | ||||
-rw-r--r-- | net/tipc/socket.c | 4 | ||||
-rw-r--r-- | net/xfrm/xfrm_output.c | 21 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 9 | ||||
-rw-r--r-- | net/xfrm/xfrm_state.c | 7 |
29 files changed, 429 insertions, 168 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 69363bd37f64..89659d4ed1f9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
71 | 71 | ||
72 | mdst = br_mdb_get(br, skb, vid); | 72 | mdst = br_mdb_get(br, skb, vid); |
73 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && | 73 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
74 | br_multicast_querier_exists(br)) | 74 | br_multicast_querier_exists(br, eth_hdr(skb))) |
75 | br_multicast_deliver(mdst, skb); | 75 | br_multicast_deliver(mdst, skb); |
76 | else | 76 | else |
77 | br_flood_deliver(br, skb, false); | 77 | br_flood_deliver(br, skb, false); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 8c561c0aa636..a2fd37ec35f7 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
102 | } else if (is_multicast_ether_addr(dest)) { | 102 | } else if (is_multicast_ether_addr(dest)) { |
103 | mdst = br_mdb_get(br, skb, vid); | 103 | mdst = br_mdb_get(br, skb, vid); |
104 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && | 104 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
105 | br_multicast_querier_exists(br)) { | 105 | br_multicast_querier_exists(br, eth_hdr(skb))) { |
106 | if ((mdst && mdst->mglist) || | 106 | if ((mdst && mdst->mglist) || |
107 | br_multicast_is_router(br)) | 107 | br_multicast_is_router(br)) |
108 | skb2 = skb; | 108 | skb2 = skb; |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 0daae3ec2355..6319c4333c39 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) | |||
414 | if (!netif_running(br->dev) || br->multicast_disabled) | 414 | if (!netif_running(br->dev) || br->multicast_disabled) |
415 | return -EINVAL; | 415 | return -EINVAL; |
416 | 416 | ||
417 | if (timer_pending(&br->multicast_querier_timer)) | ||
418 | return -EBUSY; | ||
419 | |||
420 | ip.proto = entry->addr.proto; | 417 | ip.proto = entry->addr.proto; |
421 | if (ip.proto == htons(ETH_P_IP)) | 418 | if (ip.proto == htons(ETH_P_IP)) { |
419 | if (timer_pending(&br->ip4_querier.timer)) | ||
420 | return -EBUSY; | ||
421 | |||
422 | ip.u.ip4 = entry->addr.u.ip4; | 422 | ip.u.ip4 = entry->addr.u.ip4; |
423 | #if IS_ENABLED(CONFIG_IPV6) | 423 | #if IS_ENABLED(CONFIG_IPV6) |
424 | else | 424 | } else { |
425 | if (timer_pending(&br->ip6_querier.timer)) | ||
426 | return -EBUSY; | ||
427 | |||
425 | ip.u.ip6 = entry->addr.u.ip6; | 428 | ip.u.ip6 = entry->addr.u.ip6; |
426 | #endif | 429 | #endif |
430 | } | ||
427 | 431 | ||
428 | spin_lock_bh(&br->multicast_lock); | 432 | spin_lock_bh(&br->multicast_lock); |
429 | mdb = mlock_dereference(br->mdb, br); | 433 | mdb = mlock_dereference(br->mdb, br); |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 08e576ada0b2..bbcb43582496 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -33,7 +33,8 @@ | |||
33 | 33 | ||
34 | #include "br_private.h" | 34 | #include "br_private.h" |
35 | 35 | ||
36 | static void br_multicast_start_querier(struct net_bridge *br); | 36 | static void br_multicast_start_querier(struct net_bridge *br, |
37 | struct bridge_mcast_query *query); | ||
37 | unsigned int br_mdb_rehash_seq; | 38 | unsigned int br_mdb_rehash_seq; |
38 | 39 | ||
39 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) | 40 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) |
@@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data) | |||
755 | { | 756 | { |
756 | } | 757 | } |
757 | 758 | ||
758 | static void br_multicast_querier_expired(unsigned long data) | 759 | static void br_multicast_querier_expired(struct net_bridge *br, |
760 | struct bridge_mcast_query *query) | ||
759 | { | 761 | { |
760 | struct net_bridge *br = (void *)data; | ||
761 | |||
762 | spin_lock(&br->multicast_lock); | 762 | spin_lock(&br->multicast_lock); |
763 | if (!netif_running(br->dev) || br->multicast_disabled) | 763 | if (!netif_running(br->dev) || br->multicast_disabled) |
764 | goto out; | 764 | goto out; |
765 | 765 | ||
766 | br_multicast_start_querier(br); | 766 | br_multicast_start_querier(br, query); |
767 | 767 | ||
768 | out: | 768 | out: |
769 | spin_unlock(&br->multicast_lock); | 769 | spin_unlock(&br->multicast_lock); |
770 | } | 770 | } |
771 | 771 | ||
772 | static void br_ip4_multicast_querier_expired(unsigned long data) | ||
773 | { | ||
774 | struct net_bridge *br = (void *)data; | ||
775 | |||
776 | br_multicast_querier_expired(br, &br->ip4_query); | ||
777 | } | ||
778 | |||
779 | #if IS_ENABLED(CONFIG_IPV6) | ||
780 | static void br_ip6_multicast_querier_expired(unsigned long data) | ||
781 | { | ||
782 | struct net_bridge *br = (void *)data; | ||
783 | |||
784 | br_multicast_querier_expired(br, &br->ip6_query); | ||
785 | } | ||
786 | #endif | ||
787 | |||
772 | static void __br_multicast_send_query(struct net_bridge *br, | 788 | static void __br_multicast_send_query(struct net_bridge *br, |
773 | struct net_bridge_port *port, | 789 | struct net_bridge_port *port, |
774 | struct br_ip *ip) | 790 | struct br_ip *ip) |
@@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br, | |||
789 | } | 805 | } |
790 | 806 | ||
791 | static void br_multicast_send_query(struct net_bridge *br, | 807 | static void br_multicast_send_query(struct net_bridge *br, |
792 | struct net_bridge_port *port, u32 sent) | 808 | struct net_bridge_port *port, |
809 | struct bridge_mcast_query *query) | ||
793 | { | 810 | { |
794 | unsigned long time; | 811 | unsigned long time; |
795 | struct br_ip br_group; | 812 | struct br_ip br_group; |
813 | struct bridge_mcast_querier *querier = NULL; | ||
796 | 814 | ||
797 | if (!netif_running(br->dev) || br->multicast_disabled || | 815 | if (!netif_running(br->dev) || br->multicast_disabled || |
798 | !br->multicast_querier || | 816 | !br->multicast_querier) |
799 | timer_pending(&br->multicast_querier_timer)) | ||
800 | return; | 817 | return; |
801 | 818 | ||
802 | memset(&br_group.u, 0, sizeof(br_group.u)); | 819 | memset(&br_group.u, 0, sizeof(br_group.u)); |
803 | 820 | ||
804 | br_group.proto = htons(ETH_P_IP); | 821 | if (port ? (query == &port->ip4_query) : |
805 | __br_multicast_send_query(br, port, &br_group); | 822 | (query == &br->ip4_query)) { |
806 | 823 | querier = &br->ip4_querier; | |
824 | br_group.proto = htons(ETH_P_IP); | ||
807 | #if IS_ENABLED(CONFIG_IPV6) | 825 | #if IS_ENABLED(CONFIG_IPV6) |
808 | br_group.proto = htons(ETH_P_IPV6); | 826 | } else { |
809 | __br_multicast_send_query(br, port, &br_group); | 827 | querier = &br->ip6_querier; |
828 | br_group.proto = htons(ETH_P_IPV6); | ||
810 | #endif | 829 | #endif |
830 | } | ||
831 | |||
832 | if (!querier || timer_pending(&querier->timer)) | ||
833 | return; | ||
834 | |||
835 | __br_multicast_send_query(br, port, &br_group); | ||
811 | 836 | ||
812 | time = jiffies; | 837 | time = jiffies; |
813 | time += sent < br->multicast_startup_query_count ? | 838 | time += query->startup_sent < br->multicast_startup_query_count ? |
814 | br->multicast_startup_query_interval : | 839 | br->multicast_startup_query_interval : |
815 | br->multicast_query_interval; | 840 | br->multicast_query_interval; |
816 | mod_timer(port ? &port->multicast_query_timer : | 841 | mod_timer(&query->timer, time); |
817 | &br->multicast_query_timer, time); | ||
818 | } | 842 | } |
819 | 843 | ||
820 | static void br_multicast_port_query_expired(unsigned long data) | 844 | static void br_multicast_port_query_expired(struct net_bridge_port *port, |
845 | struct bridge_mcast_query *query) | ||
821 | { | 846 | { |
822 | struct net_bridge_port *port = (void *)data; | ||
823 | struct net_bridge *br = port->br; | 847 | struct net_bridge *br = port->br; |
824 | 848 | ||
825 | spin_lock(&br->multicast_lock); | 849 | spin_lock(&br->multicast_lock); |
@@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data) | |||
827 | port->state == BR_STATE_BLOCKING) | 851 | port->state == BR_STATE_BLOCKING) |
828 | goto out; | 852 | goto out; |
829 | 853 | ||
830 | if (port->multicast_startup_queries_sent < | 854 | if (query->startup_sent < br->multicast_startup_query_count) |
831 | br->multicast_startup_query_count) | 855 | query->startup_sent++; |
832 | port->multicast_startup_queries_sent++; | ||
833 | 856 | ||
834 | br_multicast_send_query(port->br, port, | 857 | br_multicast_send_query(port->br, port, query); |
835 | port->multicast_startup_queries_sent); | ||
836 | 858 | ||
837 | out: | 859 | out: |
838 | spin_unlock(&br->multicast_lock); | 860 | spin_unlock(&br->multicast_lock); |
839 | } | 861 | } |
840 | 862 | ||
863 | static void br_ip4_multicast_port_query_expired(unsigned long data) | ||
864 | { | ||
865 | struct net_bridge_port *port = (void *)data; | ||
866 | |||
867 | br_multicast_port_query_expired(port, &port->ip4_query); | ||
868 | } | ||
869 | |||
870 | #if IS_ENABLED(CONFIG_IPV6) | ||
871 | static void br_ip6_multicast_port_query_expired(unsigned long data) | ||
872 | { | ||
873 | struct net_bridge_port *port = (void *)data; | ||
874 | |||
875 | br_multicast_port_query_expired(port, &port->ip6_query); | ||
876 | } | ||
877 | #endif | ||
878 | |||
841 | void br_multicast_add_port(struct net_bridge_port *port) | 879 | void br_multicast_add_port(struct net_bridge_port *port) |
842 | { | 880 | { |
843 | port->multicast_router = 1; | 881 | port->multicast_router = 1; |
844 | 882 | ||
845 | setup_timer(&port->multicast_router_timer, br_multicast_router_expired, | 883 | setup_timer(&port->multicast_router_timer, br_multicast_router_expired, |
846 | (unsigned long)port); | 884 | (unsigned long)port); |
847 | setup_timer(&port->multicast_query_timer, | 885 | setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired, |
848 | br_multicast_port_query_expired, (unsigned long)port); | 886 | (unsigned long)port); |
887 | #if IS_ENABLED(CONFIG_IPV6) | ||
888 | setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired, | ||
889 | (unsigned long)port); | ||
890 | #endif | ||
849 | } | 891 | } |
850 | 892 | ||
851 | void br_multicast_del_port(struct net_bridge_port *port) | 893 | void br_multicast_del_port(struct net_bridge_port *port) |
@@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port) | |||
853 | del_timer_sync(&port->multicast_router_timer); | 895 | del_timer_sync(&port->multicast_router_timer); |
854 | } | 896 | } |
855 | 897 | ||
856 | static void __br_multicast_enable_port(struct net_bridge_port *port) | 898 | static void br_multicast_enable(struct bridge_mcast_query *query) |
857 | { | 899 | { |
858 | port->multicast_startup_queries_sent = 0; | 900 | query->startup_sent = 0; |
859 | 901 | ||
860 | if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || | 902 | if (try_to_del_timer_sync(&query->timer) >= 0 || |
861 | del_timer(&port->multicast_query_timer)) | 903 | del_timer(&query->timer)) |
862 | mod_timer(&port->multicast_query_timer, jiffies); | 904 | mod_timer(&query->timer, jiffies); |
863 | } | 905 | } |
864 | 906 | ||
865 | void br_multicast_enable_port(struct net_bridge_port *port) | 907 | void br_multicast_enable_port(struct net_bridge_port *port) |
@@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port) | |||
870 | if (br->multicast_disabled || !netif_running(br->dev)) | 912 | if (br->multicast_disabled || !netif_running(br->dev)) |
871 | goto out; | 913 | goto out; |
872 | 914 | ||
873 | __br_multicast_enable_port(port); | 915 | br_multicast_enable(&port->ip4_query); |
916 | #if IS_ENABLED(CONFIG_IPV6) | ||
917 | br_multicast_enable(&port->ip6_query); | ||
918 | #endif | ||
874 | 919 | ||
875 | out: | 920 | out: |
876 | spin_unlock(&br->multicast_lock); | 921 | spin_unlock(&br->multicast_lock); |
@@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
889 | if (!hlist_unhashed(&port->rlist)) | 934 | if (!hlist_unhashed(&port->rlist)) |
890 | hlist_del_init_rcu(&port->rlist); | 935 | hlist_del_init_rcu(&port->rlist); |
891 | del_timer(&port->multicast_router_timer); | 936 | del_timer(&port->multicast_router_timer); |
892 | del_timer(&port->multicast_query_timer); | 937 | del_timer(&port->ip4_query.timer); |
938 | #if IS_ENABLED(CONFIG_IPV6) | ||
939 | del_timer(&port->ip6_query.timer); | ||
940 | #endif | ||
893 | spin_unlock(&br->multicast_lock); | 941 | spin_unlock(&br->multicast_lock); |
894 | } | 942 | } |
895 | 943 | ||
@@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1014 | } | 1062 | } |
1015 | #endif | 1063 | #endif |
1016 | 1064 | ||
1017 | static void br_multicast_update_querier_timer(struct net_bridge *br, | 1065 | static void |
1018 | unsigned long max_delay) | 1066 | br_multicast_update_querier_timer(struct net_bridge *br, |
1067 | struct bridge_mcast_querier *querier, | ||
1068 | unsigned long max_delay) | ||
1019 | { | 1069 | { |
1020 | if (!timer_pending(&br->multicast_querier_timer)) | 1070 | if (!timer_pending(&querier->timer)) |
1021 | br->multicast_querier_delay_time = jiffies + max_delay; | 1071 | querier->delay_time = jiffies + max_delay; |
1022 | 1072 | ||
1023 | mod_timer(&br->multicast_querier_timer, | 1073 | mod_timer(&querier->timer, jiffies + br->multicast_querier_interval); |
1024 | jiffies + br->multicast_querier_interval); | ||
1025 | } | 1074 | } |
1026 | 1075 | ||
1027 | /* | 1076 | /* |
@@ -1074,12 +1123,13 @@ timer: | |||
1074 | 1123 | ||
1075 | static void br_multicast_query_received(struct net_bridge *br, | 1124 | static void br_multicast_query_received(struct net_bridge *br, |
1076 | struct net_bridge_port *port, | 1125 | struct net_bridge_port *port, |
1126 | struct bridge_mcast_querier *querier, | ||
1077 | int saddr, | 1127 | int saddr, |
1078 | unsigned long max_delay) | 1128 | unsigned long max_delay) |
1079 | { | 1129 | { |
1080 | if (saddr) | 1130 | if (saddr) |
1081 | br_multicast_update_querier_timer(br, max_delay); | 1131 | br_multicast_update_querier_timer(br, querier, max_delay); |
1082 | else if (timer_pending(&br->multicast_querier_timer)) | 1132 | else if (timer_pending(&querier->timer)) |
1083 | return; | 1133 | return; |
1084 | 1134 | ||
1085 | br_multicast_mark_router(br, port); | 1135 | br_multicast_mark_router(br, port); |
@@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1129 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; | 1179 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
1130 | } | 1180 | } |
1131 | 1181 | ||
1132 | br_multicast_query_received(br, port, !!iph->saddr, max_delay); | 1182 | br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, |
1183 | max_delay); | ||
1133 | 1184 | ||
1134 | if (!group) | 1185 | if (!group) |
1135 | goto out; | 1186 | goto out; |
@@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1203 | mld2q = (struct mld2_query *)icmp6_hdr(skb); | 1254 | mld2q = (struct mld2_query *)icmp6_hdr(skb); |
1204 | if (!mld2q->mld2q_nsrcs) | 1255 | if (!mld2q->mld2q_nsrcs) |
1205 | group = &mld2q->mld2q_mca; | 1256 | group = &mld2q->mld2q_mca; |
1206 | max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; | 1257 | |
1258 | max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL); | ||
1207 | } | 1259 | } |
1208 | 1260 | ||
1209 | br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), | 1261 | br_multicast_query_received(br, port, &br->ip6_querier, |
1210 | max_delay); | 1262 | !ipv6_addr_any(&ip6h->saddr), max_delay); |
1211 | 1263 | ||
1212 | if (!group) | 1264 | if (!group) |
1213 | goto out; | 1265 | goto out; |
@@ -1244,7 +1296,9 @@ out: | |||
1244 | 1296 | ||
1245 | static void br_multicast_leave_group(struct net_bridge *br, | 1297 | static void br_multicast_leave_group(struct net_bridge *br, |
1246 | struct net_bridge_port *port, | 1298 | struct net_bridge_port *port, |
1247 | struct br_ip *group) | 1299 | struct br_ip *group, |
1300 | struct bridge_mcast_querier *querier, | ||
1301 | struct bridge_mcast_query *query) | ||
1248 | { | 1302 | { |
1249 | struct net_bridge_mdb_htable *mdb; | 1303 | struct net_bridge_mdb_htable *mdb; |
1250 | struct net_bridge_mdb_entry *mp; | 1304 | struct net_bridge_mdb_entry *mp; |
@@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1255 | spin_lock(&br->multicast_lock); | 1309 | spin_lock(&br->multicast_lock); |
1256 | if (!netif_running(br->dev) || | 1310 | if (!netif_running(br->dev) || |
1257 | (port && port->state == BR_STATE_DISABLED) || | 1311 | (port && port->state == BR_STATE_DISABLED) || |
1258 | timer_pending(&br->multicast_querier_timer)) | 1312 | timer_pending(&querier->timer)) |
1259 | goto out; | 1313 | goto out; |
1260 | 1314 | ||
1261 | mdb = mlock_dereference(br->mdb, br); | 1315 | mdb = mlock_dereference(br->mdb, br); |
@@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1263 | if (!mp) | 1317 | if (!mp) |
1264 | goto out; | 1318 | goto out; |
1265 | 1319 | ||
1266 | if (br->multicast_querier && | 1320 | if (br->multicast_querier) { |
1267 | !timer_pending(&br->multicast_querier_timer)) { | ||
1268 | __br_multicast_send_query(br, port, &mp->addr); | 1321 | __br_multicast_send_query(br, port, &mp->addr); |
1269 | 1322 | ||
1270 | time = jiffies + br->multicast_last_member_count * | 1323 | time = jiffies + br->multicast_last_member_count * |
1271 | br->multicast_last_member_interval; | 1324 | br->multicast_last_member_interval; |
1272 | mod_timer(port ? &port->multicast_query_timer : | 1325 | |
1273 | &br->multicast_query_timer, time); | 1326 | mod_timer(&query->timer, time); |
1274 | 1327 | ||
1275 | for (p = mlock_dereference(mp->ports, br); | 1328 | for (p = mlock_dereference(mp->ports, br); |
1276 | p != NULL; | 1329 | p != NULL; |
@@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1323 | mod_timer(&mp->timer, time); | 1376 | mod_timer(&mp->timer, time); |
1324 | } | 1377 | } |
1325 | } | 1378 | } |
1326 | |||
1327 | out: | 1379 | out: |
1328 | spin_unlock(&br->multicast_lock); | 1380 | spin_unlock(&br->multicast_lock); |
1329 | } | 1381 | } |
@@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
1334 | __u16 vid) | 1386 | __u16 vid) |
1335 | { | 1387 | { |
1336 | struct br_ip br_group; | 1388 | struct br_ip br_group; |
1389 | struct bridge_mcast_query *query = port ? &port->ip4_query : | ||
1390 | &br->ip4_query; | ||
1337 | 1391 | ||
1338 | if (ipv4_is_local_multicast(group)) | 1392 | if (ipv4_is_local_multicast(group)) |
1339 | return; | 1393 | return; |
@@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
1342 | br_group.proto = htons(ETH_P_IP); | 1396 | br_group.proto = htons(ETH_P_IP); |
1343 | br_group.vid = vid; | 1397 | br_group.vid = vid; |
1344 | 1398 | ||
1345 | br_multicast_leave_group(br, port, &br_group); | 1399 | br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query); |
1346 | } | 1400 | } |
1347 | 1401 | ||
1348 | #if IS_ENABLED(CONFIG_IPV6) | 1402 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1352 | __u16 vid) | 1406 | __u16 vid) |
1353 | { | 1407 | { |
1354 | struct br_ip br_group; | 1408 | struct br_ip br_group; |
1409 | struct bridge_mcast_query *query = port ? &port->ip6_query : | ||
1410 | &br->ip6_query; | ||
1411 | |||
1355 | 1412 | ||
1356 | if (!ipv6_is_transient_multicast(group)) | 1413 | if (!ipv6_is_transient_multicast(group)) |
1357 | return; | 1414 | return; |
@@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1360 | br_group.proto = htons(ETH_P_IPV6); | 1417 | br_group.proto = htons(ETH_P_IPV6); |
1361 | br_group.vid = vid; | 1418 | br_group.vid = vid; |
1362 | 1419 | ||
1363 | br_multicast_leave_group(br, port, &br_group); | 1420 | br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query); |
1364 | } | 1421 | } |
1365 | #endif | 1422 | #endif |
1366 | 1423 | ||
@@ -1622,19 +1679,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | |||
1622 | return 0; | 1679 | return 0; |
1623 | } | 1680 | } |
1624 | 1681 | ||
1625 | static void br_multicast_query_expired(unsigned long data) | 1682 | static void br_multicast_query_expired(struct net_bridge *br, |
1683 | struct bridge_mcast_query *query) | ||
1684 | { | ||
1685 | spin_lock(&br->multicast_lock); | ||
1686 | if (query->startup_sent < br->multicast_startup_query_count) | ||
1687 | query->startup_sent++; | ||
1688 | |||
1689 | br_multicast_send_query(br, NULL, query); | ||
1690 | spin_unlock(&br->multicast_lock); | ||
1691 | } | ||
1692 | |||
1693 | static void br_ip4_multicast_query_expired(unsigned long data) | ||
1626 | { | 1694 | { |
1627 | struct net_bridge *br = (void *)data; | 1695 | struct net_bridge *br = (void *)data; |
1628 | 1696 | ||
1629 | spin_lock(&br->multicast_lock); | 1697 | br_multicast_query_expired(br, &br->ip4_query); |
1630 | if (br->multicast_startup_queries_sent < | 1698 | } |
1631 | br->multicast_startup_query_count) | ||
1632 | br->multicast_startup_queries_sent++; | ||
1633 | 1699 | ||
1634 | br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); | 1700 | #if IS_ENABLED(CONFIG_IPV6) |
1701 | static void br_ip6_multicast_query_expired(unsigned long data) | ||
1702 | { | ||
1703 | struct net_bridge *br = (void *)data; | ||
1635 | 1704 | ||
1636 | spin_unlock(&br->multicast_lock); | 1705 | br_multicast_query_expired(br, &br->ip6_query); |
1637 | } | 1706 | } |
1707 | #endif | ||
1638 | 1708 | ||
1639 | void br_multicast_init(struct net_bridge *br) | 1709 | void br_multicast_init(struct net_bridge *br) |
1640 | { | 1710 | { |
@@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br) | |||
1654 | br->multicast_querier_interval = 255 * HZ; | 1724 | br->multicast_querier_interval = 255 * HZ; |
1655 | br->multicast_membership_interval = 260 * HZ; | 1725 | br->multicast_membership_interval = 260 * HZ; |
1656 | 1726 | ||
1657 | br->multicast_querier_delay_time = 0; | 1727 | br->ip4_querier.delay_time = 0; |
1728 | #if IS_ENABLED(CONFIG_IPV6) | ||
1729 | br->ip6_querier.delay_time = 0; | ||
1730 | #endif | ||
1658 | 1731 | ||
1659 | spin_lock_init(&br->multicast_lock); | 1732 | spin_lock_init(&br->multicast_lock); |
1660 | setup_timer(&br->multicast_router_timer, | 1733 | setup_timer(&br->multicast_router_timer, |
1661 | br_multicast_local_router_expired, 0); | 1734 | br_multicast_local_router_expired, 0); |
1662 | setup_timer(&br->multicast_querier_timer, | 1735 | setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired, |
1663 | br_multicast_querier_expired, (unsigned long)br); | 1736 | (unsigned long)br); |
1664 | setup_timer(&br->multicast_query_timer, br_multicast_query_expired, | 1737 | setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired, |
1665 | (unsigned long)br); | 1738 | (unsigned long)br); |
1739 | #if IS_ENABLED(CONFIG_IPV6) | ||
1740 | setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired, | ||
1741 | (unsigned long)br); | ||
1742 | setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired, | ||
1743 | (unsigned long)br); | ||
1744 | #endif | ||
1666 | } | 1745 | } |
1667 | 1746 | ||
1668 | void br_multicast_open(struct net_bridge *br) | 1747 | static void __br_multicast_open(struct net_bridge *br, |
1748 | struct bridge_mcast_query *query) | ||
1669 | { | 1749 | { |
1670 | br->multicast_startup_queries_sent = 0; | 1750 | query->startup_sent = 0; |
1671 | 1751 | ||
1672 | if (br->multicast_disabled) | 1752 | if (br->multicast_disabled) |
1673 | return; | 1753 | return; |
1674 | 1754 | ||
1675 | mod_timer(&br->multicast_query_timer, jiffies); | 1755 | mod_timer(&query->timer, jiffies); |
1756 | } | ||
1757 | |||
1758 | void br_multicast_open(struct net_bridge *br) | ||
1759 | { | ||
1760 | __br_multicast_open(br, &br->ip4_query); | ||
1761 | #if IS_ENABLED(CONFIG_IPV6) | ||
1762 | __br_multicast_open(br, &br->ip6_query); | ||
1763 | #endif | ||
1676 | } | 1764 | } |
1677 | 1765 | ||
1678 | void br_multicast_stop(struct net_bridge *br) | 1766 | void br_multicast_stop(struct net_bridge *br) |
@@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br) | |||
1684 | int i; | 1772 | int i; |
1685 | 1773 | ||
1686 | del_timer_sync(&br->multicast_router_timer); | 1774 | del_timer_sync(&br->multicast_router_timer); |
1687 | del_timer_sync(&br->multicast_querier_timer); | 1775 | del_timer_sync(&br->ip4_querier.timer); |
1688 | del_timer_sync(&br->multicast_query_timer); | 1776 | del_timer_sync(&br->ip4_query.timer); |
1777 | #if IS_ENABLED(CONFIG_IPV6) | ||
1778 | del_timer_sync(&br->ip6_querier.timer); | ||
1779 | del_timer_sync(&br->ip6_query.timer); | ||
1780 | #endif | ||
1689 | 1781 | ||
1690 | spin_lock_bh(&br->multicast_lock); | 1782 | spin_lock_bh(&br->multicast_lock); |
1691 | mdb = mlock_dereference(br->mdb, br); | 1783 | mdb = mlock_dereference(br->mdb, br); |
@@ -1788,18 +1880,24 @@ unlock: | |||
1788 | return err; | 1880 | return err; |
1789 | } | 1881 | } |
1790 | 1882 | ||
1791 | static void br_multicast_start_querier(struct net_bridge *br) | 1883 | static void br_multicast_start_querier(struct net_bridge *br, |
1884 | struct bridge_mcast_query *query) | ||
1792 | { | 1885 | { |
1793 | struct net_bridge_port *port; | 1886 | struct net_bridge_port *port; |
1794 | 1887 | ||
1795 | br_multicast_open(br); | 1888 | __br_multicast_open(br, query); |
1796 | 1889 | ||
1797 | list_for_each_entry(port, &br->port_list, list) { | 1890 | list_for_each_entry(port, &br->port_list, list) { |
1798 | if (port->state == BR_STATE_DISABLED || | 1891 | if (port->state == BR_STATE_DISABLED || |
1799 | port->state == BR_STATE_BLOCKING) | 1892 | port->state == BR_STATE_BLOCKING) |
1800 | continue; | 1893 | continue; |
1801 | 1894 | ||
1802 | __br_multicast_enable_port(port); | 1895 | if (query == &br->ip4_query) |
1896 | br_multicast_enable(&port->ip4_query); | ||
1897 | #if IS_ENABLED(CONFIG_IPV6) | ||
1898 | else | ||
1899 | br_multicast_enable(&port->ip6_query); | ||
1900 | #endif | ||
1803 | } | 1901 | } |
1804 | } | 1902 | } |
1805 | 1903 | ||
@@ -1834,7 +1932,10 @@ rollback: | |||
1834 | goto rollback; | 1932 | goto rollback; |
1835 | } | 1933 | } |
1836 | 1934 | ||
1837 | br_multicast_start_querier(br); | 1935 | br_multicast_start_querier(br, &br->ip4_query); |
1936 | #if IS_ENABLED(CONFIG_IPV6) | ||
1937 | br_multicast_start_querier(br, &br->ip6_query); | ||
1938 | #endif | ||
1838 | 1939 | ||
1839 | unlock: | 1940 | unlock: |
1840 | spin_unlock_bh(&br->multicast_lock); | 1941 | spin_unlock_bh(&br->multicast_lock); |
@@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val) | |||
1857 | goto unlock; | 1958 | goto unlock; |
1858 | 1959 | ||
1859 | max_delay = br->multicast_query_response_interval; | 1960 | max_delay = br->multicast_query_response_interval; |
1860 | if (!timer_pending(&br->multicast_querier_timer)) | ||
1861 | br->multicast_querier_delay_time = jiffies + max_delay; | ||
1862 | 1961 | ||
1863 | br_multicast_start_querier(br); | 1962 | if (!timer_pending(&br->ip4_querier.timer)) |
1963 | br->ip4_querier.delay_time = jiffies + max_delay; | ||
1964 | |||
1965 | br_multicast_start_querier(br, &br->ip4_query); | ||
1966 | |||
1967 | #if IS_ENABLED(CONFIG_IPV6) | ||
1968 | if (!timer_pending(&br->ip6_querier.timer)) | ||
1969 | br->ip6_querier.delay_time = jiffies + max_delay; | ||
1970 | |||
1971 | br_multicast_start_querier(br, &br->ip6_query); | ||
1972 | #endif | ||
1864 | 1973 | ||
1865 | unlock: | 1974 | unlock: |
1866 | spin_unlock_bh(&br->multicast_lock); | 1975 | spin_unlock_bh(&br->multicast_lock); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2f7da41851bf..263ba9034468 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -66,6 +66,20 @@ struct br_ip | |||
66 | __u16 vid; | 66 | __u16 vid; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | ||
70 | /* our own querier */ | ||
71 | struct bridge_mcast_query { | ||
72 | struct timer_list timer; | ||
73 | u32 startup_sent; | ||
74 | }; | ||
75 | |||
76 | /* other querier */ | ||
77 | struct bridge_mcast_querier { | ||
78 | struct timer_list timer; | ||
79 | unsigned long delay_time; | ||
80 | }; | ||
81 | #endif | ||
82 | |||
69 | struct net_port_vlans { | 83 | struct net_port_vlans { |
70 | u16 port_idx; | 84 | u16 port_idx; |
71 | u16 pvid; | 85 | u16 pvid; |
@@ -162,10 +176,12 @@ struct net_bridge_port | |||
162 | #define BR_FLOOD 0x00000040 | 176 | #define BR_FLOOD 0x00000040 |
163 | 177 | ||
164 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 178 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
165 | u32 multicast_startup_queries_sent; | 179 | struct bridge_mcast_query ip4_query; |
180 | #if IS_ENABLED(CONFIG_IPV6) | ||
181 | struct bridge_mcast_query ip6_query; | ||
182 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
166 | unsigned char multicast_router; | 183 | unsigned char multicast_router; |
167 | struct timer_list multicast_router_timer; | 184 | struct timer_list multicast_router_timer; |
168 | struct timer_list multicast_query_timer; | ||
169 | struct hlist_head mglist; | 185 | struct hlist_head mglist; |
170 | struct hlist_node rlist; | 186 | struct hlist_node rlist; |
171 | #endif | 187 | #endif |
@@ -258,7 +274,6 @@ struct net_bridge | |||
258 | u32 hash_max; | 274 | u32 hash_max; |
259 | 275 | ||
260 | u32 multicast_last_member_count; | 276 | u32 multicast_last_member_count; |
261 | u32 multicast_startup_queries_sent; | ||
262 | u32 multicast_startup_query_count; | 277 | u32 multicast_startup_query_count; |
263 | 278 | ||
264 | unsigned long multicast_last_member_interval; | 279 | unsigned long multicast_last_member_interval; |
@@ -267,15 +282,18 @@ struct net_bridge | |||
267 | unsigned long multicast_query_interval; | 282 | unsigned long multicast_query_interval; |
268 | unsigned long multicast_query_response_interval; | 283 | unsigned long multicast_query_response_interval; |
269 | unsigned long multicast_startup_query_interval; | 284 | unsigned long multicast_startup_query_interval; |
270 | unsigned long multicast_querier_delay_time; | ||
271 | 285 | ||
272 | spinlock_t multicast_lock; | 286 | spinlock_t multicast_lock; |
273 | struct net_bridge_mdb_htable __rcu *mdb; | 287 | struct net_bridge_mdb_htable __rcu *mdb; |
274 | struct hlist_head router_list; | 288 | struct hlist_head router_list; |
275 | 289 | ||
276 | struct timer_list multicast_router_timer; | 290 | struct timer_list multicast_router_timer; |
277 | struct timer_list multicast_querier_timer; | 291 | struct bridge_mcast_querier ip4_querier; |
278 | struct timer_list multicast_query_timer; | 292 | struct bridge_mcast_query ip4_query; |
293 | #if IS_ENABLED(CONFIG_IPV6) | ||
294 | struct bridge_mcast_querier ip6_querier; | ||
295 | struct bridge_mcast_query ip6_query; | ||
296 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
279 | #endif | 297 | #endif |
280 | 298 | ||
281 | struct timer_list hello_timer; | 299 | struct timer_list hello_timer; |
@@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br) | |||
503 | timer_pending(&br->multicast_router_timer)); | 521 | timer_pending(&br->multicast_router_timer)); |
504 | } | 522 | } |
505 | 523 | ||
506 | static inline bool br_multicast_querier_exists(struct net_bridge *br) | 524 | static inline bool |
525 | __br_multicast_querier_exists(struct net_bridge *br, | ||
526 | struct bridge_mcast_querier *querier) | ||
527 | { | ||
528 | return time_is_before_jiffies(querier->delay_time) && | ||
529 | (br->multicast_querier || timer_pending(&querier->timer)); | ||
530 | } | ||
531 | |||
532 | static inline bool br_multicast_querier_exists(struct net_bridge *br, | ||
533 | struct ethhdr *eth) | ||
507 | { | 534 | { |
508 | return time_is_before_jiffies(br->multicast_querier_delay_time) && | 535 | switch (eth->h_proto) { |
509 | (br->multicast_querier || | 536 | case (htons(ETH_P_IP)): |
510 | timer_pending(&br->multicast_querier_timer)); | 537 | return __br_multicast_querier_exists(br, &br->ip4_querier); |
538 | #if IS_ENABLED(CONFIG_IPV6) | ||
539 | case (htons(ETH_P_IPV6)): | ||
540 | return __br_multicast_querier_exists(br, &br->ip6_querier); | ||
541 | #endif | ||
542 | default: | ||
543 | return false; | ||
544 | } | ||
511 | } | 545 | } |
512 | #else | 546 | #else |
513 | static inline int br_multicast_rcv(struct net_bridge *br, | 547 | static inline int br_multicast_rcv(struct net_bridge *br, |
@@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br) | |||
565 | { | 599 | { |
566 | return 0; | 600 | return 0; |
567 | } | 601 | } |
568 | static inline bool br_multicast_querier_exists(struct net_bridge *br) | 602 | static inline bool br_multicast_querier_exists(struct net_bridge *br, |
603 | struct ethhdr *eth) | ||
569 | { | 604 | { |
570 | return false; | 605 | return false; |
571 | } | 606 | } |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index b84a1b155bc1..d12e3a9a5356 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -346,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
346 | if (new_index < 0) | 346 | if (new_index < 0) |
347 | new_index = skb_tx_hash(dev, skb); | 347 | new_index = skb_tx_hash(dev, skb); |
348 | 348 | ||
349 | if (queue_index != new_index && sk) { | 349 | if (queue_index != new_index && sk && |
350 | struct dst_entry *dst = | 350 | rcu_access_pointer(sk->sk_dst_cache)) |
351 | rcu_dereference_check(sk->sk_dst_cache, 1); | 351 | sk_tx_queue_set(sk, queue_index); |
352 | |||
353 | if (dst && skb_dst(skb) == dst) | ||
354 | sk_tx_queue_set(sk, queue_index); | ||
355 | |||
356 | } | ||
357 | 352 | ||
358 | queue_index = new_index; | 353 | queue_index = new_index; |
359 | } | 354 | } |
diff --git a/net/core/scm.c b/net/core/scm.c index 03795d0147f2..b4da80b1cc07 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds) | |||
54 | return -EINVAL; | 54 | return -EINVAL; |
55 | 55 | ||
56 | if ((creds->pid == task_tgid_vnr(current) || | 56 | if ((creds->pid == task_tgid_vnr(current) || |
57 | ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && | 57 | ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) && |
58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || | 58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && | 59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && |
60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || | 60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4bcabf3ab4ca..9ee17e3d11c3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
211 | return -EINVAL; | 211 | return -EINVAL; |
212 | } | 212 | } |
213 | 213 | ||
214 | static inline int ip_skb_dst_mtu(struct sk_buff *skb) | ||
215 | { | ||
216 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; | ||
217 | |||
218 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? | ||
219 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); | ||
220 | } | ||
221 | |||
222 | static int ip_finish_output(struct sk_buff *skb) | 214 | static int ip_finish_output(struct sk_buff *skb) |
223 | { | 215 | { |
224 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 216 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 51fc2a1dcdd3..b3ac3c3f6219 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb) | |||
190 | struct ip_tunnel *tunnel; | 190 | struct ip_tunnel *tunnel; |
191 | const struct iphdr *iph; | 191 | const struct iphdr *iph; |
192 | 192 | ||
193 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
194 | goto drop; | ||
195 | |||
196 | iph = ip_hdr(skb); | 193 | iph = ip_hdr(skb); |
197 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | 194 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, |
198 | iph->saddr, iph->daddr, 0); | 195 | iph->saddr, iph->daddr, 0); |
199 | if (tunnel) { | 196 | if (tunnel) { |
200 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 197 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
201 | goto drop; | 198 | goto drop; |
199 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
200 | goto drop; | ||
202 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); | 201 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); |
203 | } | 202 | } |
204 | 203 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index dd44e0ab600c..61e60d67adca 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
571 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 571 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, |
572 | RT_SCOPE_UNIVERSE, | 572 | RT_SCOPE_UNIVERSE, |
573 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, | 573 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, |
574 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, | 574 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP | |
575 | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), | ||
575 | daddr, saddr, 0, 0); | 576 | daddr, saddr, 0, 0); |
576 | 577 | ||
577 | if (!inet->hdrincl) { | 578 | if (!inet->hdrincl) { |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 28af45abe062..3ca2139a130b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr | |||
3535 | ++ptr; | 3535 | ++ptr; |
3536 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | 3536 | tp->rx_opt.rcv_tsval = ntohl(*ptr); |
3537 | ++ptr; | 3537 | ++ptr; |
3538 | tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; | 3538 | if (*ptr) |
3539 | tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; | ||
3540 | else | ||
3541 | tp->rx_opt.rcv_tsecr = 0; | ||
3539 | return true; | 3542 | return true; |
3540 | } | 3543 | } |
3541 | return false; | 3544 | return false; |
@@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb, | |||
3560 | } | 3563 | } |
3561 | 3564 | ||
3562 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); | 3565 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); |
3563 | if (tp->rx_opt.saw_tstamp) | 3566 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
3564 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; | 3567 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; |
3565 | 3568 | ||
3566 | return true; | 3569 | return true; |
@@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5316 | int saved_clamp = tp->rx_opt.mss_clamp; | 5319 | int saved_clamp = tp->rx_opt.mss_clamp; |
5317 | 5320 | ||
5318 | tcp_parse_options(skb, &tp->rx_opt, 0, &foc); | 5321 | tcp_parse_options(skb, &tp->rx_opt, 0, &foc); |
5319 | if (tp->rx_opt.saw_tstamp) | 5322 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
5320 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; | 5323 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; |
5321 | 5324 | ||
5322 | if (th->ack) { | 5325 | if (th->ack) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 92fde8d1aa82..170737a9d56d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2670 | int tcp_header_size; | 2670 | int tcp_header_size; |
2671 | int mss; | 2671 | int mss; |
2672 | 2672 | ||
2673 | skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); | 2673 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2674 | if (unlikely(!skb)) { | 2674 | if (unlikely(!skb)) { |
2675 | dst_release(dst); | 2675 | dst_release(dst); |
2676 | return NULL; | 2676 | return NULL; |
@@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk) | |||
2814 | 2814 | ||
2815 | if (likely(!tp->repair)) | 2815 | if (likely(!tp->repair)) |
2816 | tp->rcv_nxt = 0; | 2816 | tp->rcv_nxt = 0; |
2817 | else | ||
2818 | tp->rcv_tstamp = tcp_time_stamp; | ||
2817 | tp->rcv_wup = tp->rcv_nxt; | 2819 | tp->rcv_wup = tp->rcv_nxt; |
2818 | tp->copied_seq = tp->rcv_nxt; | 2820 | tp->copied_seq = tp->rcv_nxt; |
2819 | 2821 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 327a617d594c..baa0f63731fd 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -21,7 +21,6 @@ | |||
21 | static int xfrm4_tunnel_check_size(struct sk_buff *skb) | 21 | static int xfrm4_tunnel_check_size(struct sk_buff *skb) |
22 | { | 22 | { |
23 | int mtu, ret = 0; | 23 | int mtu, ret = 0; |
24 | struct dst_entry *dst; | ||
25 | 24 | ||
26 | if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) | 25 | if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) |
27 | goto out; | 26 | goto out; |
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
29 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) | 28 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) |
30 | goto out; | 29 | goto out; |
31 | 30 | ||
32 | dst = skb_dst(skb); | 31 | mtu = dst_mtu(skb_dst(skb)); |
33 | mtu = dst_mtu(dst); | ||
34 | if (skb->len > mtu) { | 32 | if (skb->len > mtu) { |
35 | if (skb->sk) | 33 | if (skb->sk) |
36 | ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, | 34 | xfrm_local_error(skb, mtu); |
37 | inet_sk(skb->sk)->inet_dport, mtu); | ||
38 | else | 35 | else |
39 | icmp_send(skb, ICMP_DEST_UNREACH, | 36 | icmp_send(skb, ICMP_DEST_UNREACH, |
40 | ICMP_FRAG_NEEDED, htonl(mtu)); | 37 | ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb) | |||
99 | x->outer_mode->afinfo->output_finish, | 96 | x->outer_mode->afinfo->output_finish, |
100 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 97 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
101 | } | 98 | } |
99 | |||
100 | void xfrm4_local_error(struct sk_buff *skb, u32 mtu) | ||
101 | { | ||
102 | struct iphdr *hdr; | ||
103 | |||
104 | hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); | ||
105 | ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, | ||
106 | inet_sk(skb->sk)->inet_dport, mtu); | ||
107 | } | ||
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 9258e751baba..0b2a0641526a 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c | |||
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { | |||
83 | .extract_input = xfrm4_extract_input, | 83 | .extract_input = xfrm4_extract_input, |
84 | .extract_output = xfrm4_extract_output, | 84 | .extract_output = xfrm4_extract_output, |
85 | .transport_finish = xfrm4_transport_finish, | 85 | .transport_finish = xfrm4_transport_finish, |
86 | .local_error = xfrm4_local_error, | ||
86 | }; | 87 | }; |
87 | 88 | ||
88 | void __init xfrm4_state_init(void) | 89 | void __init xfrm4_state_init(void) |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index ecd60733e5e2..90747f1973fe 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
724 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | 724 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
725 | } | 725 | } |
726 | 726 | ||
727 | if (likely(!skb->encapsulation)) { | ||
728 | skb_reset_inner_headers(skb); | ||
729 | skb->encapsulation = 1; | ||
730 | } | ||
731 | |||
727 | skb_push(skb, gre_hlen); | 732 | skb_push(skb, gre_hlen); |
728 | skb_reset_network_header(skb); | 733 | skb_reset_network_header(skb); |
729 | skb_set_transport_header(skb, sizeof(*ipv6h)); | 734 | skb_set_transport_header(skb, sizeof(*ipv6h)); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6e3ddf806ec2..e7ceb6c871d1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
238 | hdr->saddr = fl6->saddr; | 238 | hdr->saddr = fl6->saddr; |
239 | hdr->daddr = *first_hop; | 239 | hdr->daddr = *first_hop; |
240 | 240 | ||
241 | skb->protocol = htons(ETH_P_IPV6); | ||
241 | skb->priority = sk->sk_priority; | 242 | skb->priority = sk->sk_priority; |
242 | skb->mark = sk->sk_mark; | 243 | skb->mark = sk->sk_mark; |
243 | 244 | ||
@@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
1057 | /* initialize protocol header pointer */ | 1058 | /* initialize protocol header pointer */ |
1058 | skb->transport_header = skb->network_header + fragheaderlen; | 1059 | skb->transport_header = skb->network_header + fragheaderlen; |
1059 | 1060 | ||
1061 | skb->protocol = htons(ETH_P_IPV6); | ||
1060 | skb->ip_summed = CHECKSUM_PARTIAL; | 1062 | skb->ip_summed = CHECKSUM_PARTIAL; |
1061 | skb->csum = 0; | 1063 | skb->csum = 0; |
1062 | } | 1064 | } |
@@ -1359,6 +1361,7 @@ alloc_new_skb: | |||
1359 | /* | 1361 | /* |
1360 | * Fill in the control structures | 1362 | * Fill in the control structures |
1361 | */ | 1363 | */ |
1364 | skb->protocol = htons(ETH_P_IPV6); | ||
1362 | skb->ip_summed = CHECKSUM_NONE; | 1365 | skb->ip_summed = CHECKSUM_NONE; |
1363 | skb->csum = 0; | 1366 | skb->csum = 0; |
1364 | /* reserve for fragmentation and ipsec header */ | 1367 | /* reserve for fragmentation and ipsec header */ |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 1e55866cead7..46ba243605a3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1027 | init_tel_txopt(&opt, encap_limit); | 1027 | init_tel_txopt(&opt, encap_limit); |
1028 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | 1028 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
1029 | } | 1029 | } |
1030 | |||
1031 | if (likely(!skb->encapsulation)) { | ||
1032 | skb_reset_inner_headers(skb); | ||
1033 | skb->encapsulation = 1; | ||
1034 | } | ||
1035 | |||
1030 | skb_push(skb, sizeof(struct ipv6hdr)); | 1036 | skb_push(skb, sizeof(struct ipv6hdr)); |
1031 | skb_reset_network_header(skb); | 1037 | skb_reset_network_header(skb); |
1032 | ipv6h = ipv6_hdr(skb); | 1038 | ipv6h = ipv6_hdr(skb); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c45f7a5c36e9..cdaed47ba932 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
628 | goto error; | 628 | goto error; |
629 | skb_reserve(skb, hlen); | 629 | skb_reserve(skb, hlen); |
630 | 630 | ||
631 | skb->protocol = htons(ETH_P_IPV6); | ||
631 | skb->priority = sk->sk_priority; | 632 | skb->priority = sk->sk_priority; |
632 | skb->mark = sk->sk_mark; | 633 | skb->mark = sk->sk_mark; |
633 | skb_dst_set(skb, &rt->dst); | 634 | skb_dst_set(skb, &rt->dst); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a3437a4cd07e..21b25dd8466b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb) | |||
645 | const struct iphdr *iph; | 645 | const struct iphdr *iph; |
646 | struct ip_tunnel *tunnel; | 646 | struct ip_tunnel *tunnel; |
647 | 647 | ||
648 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
649 | goto drop; | ||
650 | |||
651 | iph = ip_hdr(skb); | 648 | iph = ip_hdr(skb); |
652 | |||
653 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, | 649 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, |
654 | iph->saddr, iph->daddr); | 650 | iph->saddr, iph->daddr); |
655 | if (tunnel != NULL) { | 651 | if (tunnel != NULL) { |
@@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb) | |||
659 | 655 | ||
660 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 656 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
661 | goto drop; | 657 | goto drop; |
658 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
659 | goto drop; | ||
662 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); | 660 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); |
663 | } | 661 | } |
664 | 662 | ||
@@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
888 | ttl = iph6->hop_limit; | 886 | ttl = iph6->hop_limit; |
889 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); | 887 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); |
890 | 888 | ||
889 | if (likely(!skb->encapsulation)) { | ||
890 | skb_reset_inner_headers(skb); | ||
891 | skb->encapsulation = 1; | ||
892 | } | ||
893 | |||
891 | err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, | 894 | err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, |
892 | IPPROTO_IPV6, tos, ttl, df); | 895 | IPPROTO_IPV6, tos, ttl, df); |
893 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 896 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8755a3079d0f..6cd625e37706 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb) | |||
34 | struct sock *sk = skb->sk; | 34 | struct sock *sk = skb->sk; |
35 | 35 | ||
36 | if (sk) { | 36 | if (sk) { |
37 | proto = sk->sk_protocol; | 37 | if (sk->sk_family != AF_INET6) |
38 | return 0; | ||
38 | 39 | ||
40 | proto = sk->sk_protocol; | ||
39 | if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) | 41 | if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) |
40 | return inet6_sk(sk)->dontfrag; | 42 | return inet6_sk(sk)->dontfrag; |
41 | } | 43 | } |
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) | |||
54 | ipv6_local_rxpmtu(sk, &fl6, mtu); | 56 | ipv6_local_rxpmtu(sk, &fl6, mtu); |
55 | } | 57 | } |
56 | 58 | ||
57 | static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) | 59 | void xfrm6_local_error(struct sk_buff *skb, u32 mtu) |
58 | { | 60 | { |
59 | struct flowi6 fl6; | 61 | struct flowi6 fl6; |
62 | const struct ipv6hdr *hdr; | ||
60 | struct sock *sk = skb->sk; | 63 | struct sock *sk = skb->sk; |
61 | 64 | ||
65 | hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); | ||
62 | fl6.fl6_dport = inet_sk(sk)->inet_dport; | 66 | fl6.fl6_dport = inet_sk(sk)->inet_dport; |
63 | fl6.daddr = ipv6_hdr(skb)->daddr; | 67 | fl6.daddr = hdr->daddr; |
64 | 68 | ||
65 | ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); | 69 | ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); |
66 | } | 70 | } |
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) | |||
80 | if (xfrm6_local_dontfrag(skb)) | 84 | if (xfrm6_local_dontfrag(skb)) |
81 | xfrm6_local_rxpmtu(skb, mtu); | 85 | xfrm6_local_rxpmtu(skb, mtu); |
82 | else if (skb->sk) | 86 | else if (skb->sk) |
83 | xfrm6_local_error(skb, mtu); | 87 | xfrm_local_error(skb, mtu); |
84 | else | 88 | else |
85 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 89 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
86 | ret = -EMSGSIZE; | 90 | ret = -EMSGSIZE; |
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
136 | { | 140 | { |
137 | struct dst_entry *dst = skb_dst(skb); | 141 | struct dst_entry *dst = skb_dst(skb); |
138 | struct xfrm_state *x = dst->xfrm; | 142 | struct xfrm_state *x = dst->xfrm; |
139 | int mtu = ip6_skb_dst_mtu(skb); | 143 | int mtu; |
144 | |||
145 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
146 | mtu = ip6_skb_dst_mtu(skb); | ||
147 | else | ||
148 | mtu = dst_mtu(skb_dst(skb)); | ||
140 | 149 | ||
141 | if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { | 150 | if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { |
142 | xfrm6_local_rxpmtu(skb, mtu); | 151 | xfrm6_local_rxpmtu(skb, mtu); |
143 | return -EMSGSIZE; | 152 | return -EMSGSIZE; |
144 | } else if (!skb->local_df && skb->len > mtu && skb->sk) { | 153 | } else if (!skb->local_df && skb->len > mtu && skb->sk) { |
145 | xfrm6_local_error(skb, mtu); | 154 | xfrm_local_error(skb, mtu); |
146 | return -EMSGSIZE; | 155 | return -EMSGSIZE; |
147 | } | 156 | } |
148 | 157 | ||
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index d8c70b8efc24..3fc970135fc6 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { | |||
183 | .extract_input = xfrm6_extract_input, | 183 | .extract_input = xfrm6_extract_input, |
184 | .extract_output = xfrm6_extract_output, | 184 | .extract_output = xfrm6_extract_output, |
185 | .transport_finish = xfrm6_transport_finish, | 185 | .transport_finish = xfrm6_transport_finish, |
186 | .local_error = xfrm6_local_error, | ||
186 | }; | 187 | }; |
187 | 188 | ||
188 | int __init xfrm6_state_init(void) | 189 | int __init xfrm6_state_init(void) |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index ea7b9c2c7e66..2d45643c964e 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | 37 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, |
38 | const u8 *bssid, const int beacon_int, | 38 | const u8 *bssid, const int beacon_int, |
39 | struct ieee80211_channel *chan, | 39 | struct cfg80211_chan_def *req_chandef, |
40 | const u32 basic_rates, | 40 | const u32 basic_rates, |
41 | const u16 capability, u64 tsf, | 41 | const u16 capability, u64 tsf, |
42 | bool creator) | 42 | bool creator) |
@@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
51 | u32 bss_change; | 51 | u32 bss_change; |
52 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; | 52 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; |
53 | struct cfg80211_chan_def chandef; | 53 | struct cfg80211_chan_def chandef; |
54 | struct ieee80211_channel *chan; | ||
54 | struct beacon_data *presp; | 55 | struct beacon_data *presp; |
55 | int frame_len; | 56 | int frame_len; |
56 | 57 | ||
@@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
81 | 82 | ||
82 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 83 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
83 | 84 | ||
84 | chandef = ifibss->chandef; | 85 | /* make a copy of the chandef, it could be modified below. */ |
86 | chandef = *req_chandef; | ||
87 | chan = chandef.chan; | ||
85 | if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { | 88 | if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { |
86 | chandef.width = NL80211_CHAN_WIDTH_20; | 89 | chandef.width = NL80211_CHAN_WIDTH_20; |
87 | chandef.center_freq1 = chan->center_freq; | 90 | chandef.center_freq1 = chan->center_freq; |
@@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
259 | struct cfg80211_bss *cbss = | 262 | struct cfg80211_bss *cbss = |
260 | container_of((void *)bss, struct cfg80211_bss, priv); | 263 | container_of((void *)bss, struct cfg80211_bss, priv); |
261 | struct ieee80211_supported_band *sband; | 264 | struct ieee80211_supported_band *sband; |
265 | struct cfg80211_chan_def chandef; | ||
262 | u32 basic_rates; | 266 | u32 basic_rates; |
263 | int i, j; | 267 | int i, j; |
264 | u16 beacon_int = cbss->beacon_interval; | 268 | u16 beacon_int = cbss->beacon_interval; |
265 | const struct cfg80211_bss_ies *ies; | 269 | const struct cfg80211_bss_ies *ies; |
270 | enum nl80211_channel_type chan_type; | ||
266 | u64 tsf; | 271 | u64 tsf; |
267 | 272 | ||
268 | sdata_assert_lock(sdata); | 273 | sdata_assert_lock(sdata); |
@@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
270 | if (beacon_int < 10) | 275 | if (beacon_int < 10) |
271 | beacon_int = 10; | 276 | beacon_int = 10; |
272 | 277 | ||
278 | switch (sdata->u.ibss.chandef.width) { | ||
279 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
280 | case NL80211_CHAN_WIDTH_20: | ||
281 | case NL80211_CHAN_WIDTH_40: | ||
282 | chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); | ||
283 | cfg80211_chandef_create(&chandef, cbss->channel, chan_type); | ||
284 | break; | ||
285 | case NL80211_CHAN_WIDTH_5: | ||
286 | case NL80211_CHAN_WIDTH_10: | ||
287 | cfg80211_chandef_create(&chandef, cbss->channel, | ||
288 | NL80211_CHAN_WIDTH_20_NOHT); | ||
289 | chandef.width = sdata->u.ibss.chandef.width; | ||
290 | break; | ||
291 | default: | ||
292 | /* fall back to 20 MHz for unsupported modes */ | ||
293 | cfg80211_chandef_create(&chandef, cbss->channel, | ||
294 | NL80211_CHAN_WIDTH_20_NOHT); | ||
295 | break; | ||
296 | } | ||
297 | |||
273 | sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; | 298 | sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; |
274 | 299 | ||
275 | basic_rates = 0; | 300 | basic_rates = 0; |
@@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
294 | 319 | ||
295 | __ieee80211_sta_join_ibss(sdata, cbss->bssid, | 320 | __ieee80211_sta_join_ibss(sdata, cbss->bssid, |
296 | beacon_int, | 321 | beacon_int, |
297 | cbss->channel, | 322 | &chandef, |
298 | basic_rates, | 323 | basic_rates, |
299 | cbss->capability, | 324 | cbss->capability, |
300 | tsf, false); | 325 | tsf, false); |
@@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) | |||
736 | sdata->drop_unencrypted = 0; | 761 | sdata->drop_unencrypted = 0; |
737 | 762 | ||
738 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, | 763 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, |
739 | ifibss->chandef.chan, ifibss->basic_rates, | 764 | &ifibss->chandef, ifibss->basic_rates, |
740 | capability, 0, true); | 765 | capability, 0, true); |
741 | } | 766 | } |
742 | 767 | ||
@@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
1138 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); | 1163 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); |
1139 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | | 1164 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | |
1140 | BSS_CHANGED_IBSS); | 1165 | BSS_CHANGED_IBSS); |
1166 | ieee80211_vif_release_channel(sdata); | ||
1141 | synchronize_rcu(); | 1167 | synchronize_rcu(); |
1142 | kfree(presp); | 1168 | kfree(presp); |
1143 | 1169 | ||
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index f5aed963b22e..f3bbea1eb9e7 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | |||
828 | if (sband->band != IEEE80211_BAND_2GHZ) | 828 | if (sband->band != IEEE80211_BAND_2GHZ) |
829 | return; | 829 | return; |
830 | 830 | ||
831 | if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES)) | ||
832 | return; | ||
833 | |||
831 | mi->cck_supported = 0; | 834 | mi->cck_supported = 0; |
832 | mi->cck_supported_short = 0; | 835 | mi->cck_supported_short = 0; |
833 | for (i = 0; i < 4; i++) { | 836 | for (i = 0; i < 4; i++) { |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 512718adb0d5..0c741cec4d0d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops) | |||
364 | EXPORT_SYMBOL(genl_unregister_ops); | 364 | EXPORT_SYMBOL(genl_unregister_ops); |
365 | 365 | ||
366 | /** | 366 | /** |
367 | * genl_register_family - register a generic netlink family | 367 | * __genl_register_family - register a generic netlink family |
368 | * @family: generic netlink family | 368 | * @family: generic netlink family |
369 | * | 369 | * |
370 | * Registers the specified family after validating it first. Only one | 370 | * Registers the specified family after validating it first. Only one |
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops); | |||
374 | * | 374 | * |
375 | * Return 0 on success or a negative error code. | 375 | * Return 0 on success or a negative error code. |
376 | */ | 376 | */ |
377 | int genl_register_family(struct genl_family *family) | 377 | int __genl_register_family(struct genl_family *family) |
378 | { | 378 | { |
379 | int err = -EINVAL; | 379 | int err = -EINVAL; |
380 | 380 | ||
@@ -430,10 +430,10 @@ errout_locked: | |||
430 | errout: | 430 | errout: |
431 | return err; | 431 | return err; |
432 | } | 432 | } |
433 | EXPORT_SYMBOL(genl_register_family); | 433 | EXPORT_SYMBOL(__genl_register_family); |
434 | 434 | ||
435 | /** | 435 | /** |
436 | * genl_register_family_with_ops - register a generic netlink family | 436 | * __genl_register_family_with_ops - register a generic netlink family |
437 | * @family: generic netlink family | 437 | * @family: generic netlink family |
438 | * @ops: operations to be registered | 438 | * @ops: operations to be registered |
439 | * @n_ops: number of elements to register | 439 | * @n_ops: number of elements to register |
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family); | |||
457 | * | 457 | * |
458 | * Return 0 on success or a negative error code. | 458 | * Return 0 on success or a negative error code. |
459 | */ | 459 | */ |
460 | int genl_register_family_with_ops(struct genl_family *family, | 460 | int __genl_register_family_with_ops(struct genl_family *family, |
461 | struct genl_ops *ops, size_t n_ops) | 461 | struct genl_ops *ops, size_t n_ops) |
462 | { | 462 | { |
463 | int err, i; | 463 | int err, i; |
464 | 464 | ||
465 | err = genl_register_family(family); | 465 | err = __genl_register_family(family); |
466 | if (err) | 466 | if (err) |
467 | return err; | 467 | return err; |
468 | 468 | ||
@@ -476,7 +476,7 @@ err_out: | |||
476 | genl_unregister_family(family); | 476 | genl_unregister_family(family); |
477 | return err; | 477 | return err; |
478 | } | 478 | } |
479 | EXPORT_SYMBOL(genl_register_family_with_ops); | 479 | EXPORT_SYMBOL(__genl_register_family_with_ops); |
480 | 480 | ||
481 | /** | 481 | /** |
482 | * genl_unregister_family - unregister generic netlink family | 482 | * genl_unregister_family - unregister generic netlink family |
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, | |||
544 | } | 544 | } |
545 | EXPORT_SYMBOL(genlmsg_put); | 545 | EXPORT_SYMBOL(genlmsg_put); |
546 | 546 | ||
547 | static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | ||
548 | { | ||
549 | struct genl_ops *ops = cb->data; | ||
550 | int rc; | ||
551 | |||
552 | genl_lock(); | ||
553 | rc = ops->dumpit(skb, cb); | ||
554 | genl_unlock(); | ||
555 | return rc; | ||
556 | } | ||
557 | |||
558 | static int genl_lock_done(struct netlink_callback *cb) | ||
559 | { | ||
560 | struct genl_ops *ops = cb->data; | ||
561 | int rc = 0; | ||
562 | |||
563 | if (ops->done) { | ||
564 | genl_lock(); | ||
565 | rc = ops->done(cb); | ||
566 | genl_unlock(); | ||
567 | } | ||
568 | return rc; | ||
569 | } | ||
570 | |||
547 | static int genl_family_rcv_msg(struct genl_family *family, | 571 | static int genl_family_rcv_msg(struct genl_family *family, |
548 | struct sk_buff *skb, | 572 | struct sk_buff *skb, |
549 | struct nlmsghdr *nlh) | 573 | struct nlmsghdr *nlh) |
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family, | |||
572 | return -EPERM; | 596 | return -EPERM; |
573 | 597 | ||
574 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 598 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { |
575 | struct netlink_dump_control c = { | 599 | int rc; |
576 | .dump = ops->dumpit, | ||
577 | .done = ops->done, | ||
578 | }; | ||
579 | 600 | ||
580 | if (ops->dumpit == NULL) | 601 | if (ops->dumpit == NULL) |
581 | return -EOPNOTSUPP; | 602 | return -EOPNOTSUPP; |
582 | 603 | ||
583 | return netlink_dump_start(net->genl_sock, skb, nlh, &c); | 604 | if (!family->parallel_ops) { |
605 | struct netlink_dump_control c = { | ||
606 | .module = family->module, | ||
607 | .data = ops, | ||
608 | .dump = genl_lock_dumpit, | ||
609 | .done = genl_lock_done, | ||
610 | }; | ||
611 | |||
612 | genl_unlock(); | ||
613 | rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); | ||
614 | genl_lock(); | ||
615 | |||
616 | } else { | ||
617 | struct netlink_dump_control c = { | ||
618 | .module = family->module, | ||
619 | .dump = ops->dumpit, | ||
620 | .done = ops->done, | ||
621 | }; | ||
622 | |||
623 | rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); | ||
624 | } | ||
625 | |||
626 | return rc; | ||
584 | } | 627 | } |
585 | 628 | ||
586 | if (ops->doit == NULL) | 629 | if (ops->doit == NULL) |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 75edcfad6e26..1504bb11e4f3 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, | |||
207 | pgfrom_base -= copy; | 207 | pgfrom_base -= copy; |
208 | 208 | ||
209 | vto = kmap_atomic(*pgto); | 209 | vto = kmap_atomic(*pgto); |
210 | vfrom = kmap_atomic(*pgfrom); | 210 | if (*pgto != *pgfrom) { |
211 | memmove(vto + pgto_base, vfrom + pgfrom_base, copy); | 211 | vfrom = kmap_atomic(*pgfrom); |
212 | memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); | ||
213 | kunmap_atomic(vfrom); | ||
214 | } else | ||
215 | memmove(vto + pgto_base, vto + pgfrom_base, copy); | ||
212 | flush_dcache_page(*pgto); | 216 | flush_dcache_page(*pgto); |
213 | kunmap_atomic(vfrom); | ||
214 | kunmap_atomic(vto); | 217 | kunmap_atomic(vto); |
215 | 218 | ||
216 | } while ((len -= copy) != 0); | 219 | } while ((len -= copy) != 0); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ce8249c76827..6cc7ddd2fb7c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) | |||
1257 | /* Accept only ACK or NACK message */ | 1257 | /* Accept only ACK or NACK message */ |
1258 | if (unlikely(msg_errcode(msg))) { | 1258 | if (unlikely(msg_errcode(msg))) { |
1259 | sock->state = SS_DISCONNECTING; | 1259 | sock->state = SS_DISCONNECTING; |
1260 | sk->sk_err = -ECONNREFUSED; | 1260 | sk->sk_err = ECONNREFUSED; |
1261 | retval = TIPC_OK; | 1261 | retval = TIPC_OK; |
1262 | break; | 1262 | break; |
1263 | } | 1263 | } |
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) | |||
1268 | res = auto_connect(sock, msg); | 1268 | res = auto_connect(sock, msg); |
1269 | if (res) { | 1269 | if (res) { |
1270 | sock->state = SS_DISCONNECTING; | 1270 | sock->state = SS_DISCONNECTING; |
1271 | sk->sk_err = res; | 1271 | sk->sk_err = -res; |
1272 | retval = TIPC_OK; | 1272 | retval = TIPC_OK; |
1273 | break; | 1273 | break; |
1274 | } | 1274 | } |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index eb4a84288648..3bb2cdc13b46 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) | |||
214 | return inner_mode->afinfo->extract_output(x, skb); | 214 | return inner_mode->afinfo->extract_output(x, skb); |
215 | } | 215 | } |
216 | 216 | ||
217 | void xfrm_local_error(struct sk_buff *skb, int mtu) | ||
218 | { | ||
219 | unsigned int proto; | ||
220 | struct xfrm_state_afinfo *afinfo; | ||
221 | |||
222 | if (skb->protocol == htons(ETH_P_IP)) | ||
223 | proto = AF_INET; | ||
224 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
225 | proto = AF_INET6; | ||
226 | else | ||
227 | return; | ||
228 | |||
229 | afinfo = xfrm_state_get_afinfo(proto); | ||
230 | if (!afinfo) | ||
231 | return; | ||
232 | |||
233 | afinfo->local_error(skb, mtu); | ||
234 | xfrm_state_put_afinfo(afinfo); | ||
235 | } | ||
236 | |||
217 | EXPORT_SYMBOL_GPL(xfrm_output); | 237 | EXPORT_SYMBOL_GPL(xfrm_output); |
218 | EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); | 238 | EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); |
239 | EXPORT_SYMBOL_GPL(xfrm_local_error); | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e52cab3591dd..f77c371ea72b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list) | |||
320 | { | 320 | { |
321 | struct sk_buff *skb; | 321 | struct sk_buff *skb; |
322 | 322 | ||
323 | while ((skb = skb_dequeue(list)) != NULL) { | 323 | while ((skb = skb_dequeue(list)) != NULL) |
324 | dev_put(skb->dev); | ||
325 | kfree_skb(skb); | 324 | kfree_skb(skb); |
326 | } | ||
327 | } | 325 | } |
328 | 326 | ||
329 | /* Rule must be locked. Release descentant resources, announce | 327 | /* Rule must be locked. Release descentant resources, announce |
@@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1758 | struct sk_buff *skb; | 1756 | struct sk_buff *skb; |
1759 | struct sock *sk; | 1757 | struct sock *sk; |
1760 | struct dst_entry *dst; | 1758 | struct dst_entry *dst; |
1761 | struct net_device *dev; | ||
1762 | struct xfrm_policy *pol = (struct xfrm_policy *)arg; | 1759 | struct xfrm_policy *pol = (struct xfrm_policy *)arg; |
1763 | struct xfrm_policy_queue *pq = &pol->polq; | 1760 | struct xfrm_policy_queue *pq = &pol->polq; |
1764 | struct flowi fl; | 1761 | struct flowi fl; |
@@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1805 | dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, | 1802 | dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, |
1806 | &fl, skb->sk, 0); | 1803 | &fl, skb->sk, 0); |
1807 | if (IS_ERR(dst)) { | 1804 | if (IS_ERR(dst)) { |
1808 | dev_put(skb->dev); | ||
1809 | kfree_skb(skb); | 1805 | kfree_skb(skb); |
1810 | continue; | 1806 | continue; |
1811 | } | 1807 | } |
@@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1814 | skb_dst_drop(skb); | 1810 | skb_dst_drop(skb); |
1815 | skb_dst_set(skb, dst); | 1811 | skb_dst_set(skb, dst); |
1816 | 1812 | ||
1817 | dev = skb->dev; | ||
1818 | err = dst_output(skb); | 1813 | err = dst_output(skb); |
1819 | dev_put(dev); | ||
1820 | } | 1814 | } |
1821 | 1815 | ||
1822 | return; | 1816 | return; |
@@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb) | |||
1839 | } | 1833 | } |
1840 | 1834 | ||
1841 | skb_dst_force(skb); | 1835 | skb_dst_force(skb); |
1842 | dev_hold(skb->dev); | ||
1843 | 1836 | ||
1844 | spin_lock_bh(&pq->hold_queue.lock); | 1837 | spin_lock_bh(&pq->hold_queue.lock); |
1845 | 1838 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 78f66fa92449..54c0acd29468 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock); | |||
39 | 39 | ||
40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | 40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; |
41 | 41 | ||
42 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | ||
43 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | ||
44 | |||
45 | static inline unsigned int xfrm_dst_hash(struct net *net, | 42 | static inline unsigned int xfrm_dst_hash(struct net *net, |
46 | const xfrm_address_t *daddr, | 43 | const xfrm_address_t *daddr, |
47 | const xfrm_address_t *saddr, | 44 | const xfrm_address_t *saddr, |
@@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) | |||
1860 | } | 1857 | } |
1861 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); | 1858 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); |
1862 | 1859 | ||
1863 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) | 1860 | struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) |
1864 | { | 1861 | { |
1865 | struct xfrm_state_afinfo *afinfo; | 1862 | struct xfrm_state_afinfo *afinfo; |
1866 | if (unlikely(family >= NPROTO)) | 1863 | if (unlikely(family >= NPROTO)) |
@@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) | |||
1872 | return afinfo; | 1869 | return afinfo; |
1873 | } | 1870 | } |
1874 | 1871 | ||
1875 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) | 1872 | void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) |
1876 | { | 1873 | { |
1877 | rcu_read_unlock(); | 1874 | rcu_read_unlock(); |
1878 | } | 1875 | } |