diff options
author | Dave Airlie <airlied@redhat.com> | 2018-03-28 00:30:41 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-03-28 00:30:41 -0400 |
commit | 2b4f44eec2be2688511c2b617d0e1b4f94c45ba4 (patch) | |
tree | 533c03602f4ae6d6404db6fa56c88e6f83e1bebe /net | |
parent | 33d009cd889490838c5db9b9339856c9e3d3facc (diff) | |
parent | 3eb2ce825ea1ad89d20f7a3b5780df850e4be274 (diff) |
Backmerge tag 'v4.16-rc7' into drm-next
Linux 4.16-rc7
This was requested by Daniel, and things were getting
a bit hard to reconcile, most of the conflicts were
trivial though.
Diffstat (limited to 'net')
199 files changed, 1540 insertions, 1088 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 64aa9f755e1d..45c9bf5ff3a0 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp) | |||
48 | * original position later | 48 | * original position later |
49 | */ | 49 | */ |
50 | skb_push(skb, offset); | 50 | skb_push(skb, offset); |
51 | skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, | 51 | skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto, |
52 | skb->vlan_tci); | 52 | skb->vlan_tci, skb->mac_len); |
53 | if (!skb) | 53 | if (!skb) |
54 | return false; | 54 | return false; |
55 | skb_pull(skb, offset + VLAN_HLEN); | 55 | skb_pull(skb, offset + VLAN_HLEN); |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index f3a4efcf1456..3aa5a93ad107 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq) | |||
160 | spin_unlock_irqrestore(&chan->lock, flags); | 160 | spin_unlock_irqrestore(&chan->lock, flags); |
161 | /* Wakeup if anyone waiting for VirtIO ring space. */ | 161 | /* Wakeup if anyone waiting for VirtIO ring space. */ |
162 | wake_up(chan->vc_wq); | 162 | wake_up(chan->vc_wq); |
163 | p9_client_cb(chan->client, req, REQ_STATUS_RCVD); | 163 | if (len) |
164 | p9_client_cb(chan->client, req, REQ_STATUS_RCVD); | ||
164 | } | 165 | } |
165 | } | 166 | } |
166 | 167 | ||
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 79e326383726..99abeadf416e 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -157,7 +157,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node) | |||
157 | * Return: 0 on success, a negative error code otherwise. | 157 | * Return: 0 on success, a negative error code otherwise. |
158 | */ | 158 | */ |
159 | static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, | 159 | static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, |
160 | int max_if_num) | 160 | unsigned int max_if_num) |
161 | { | 161 | { |
162 | void *data_ptr; | 162 | void *data_ptr; |
163 | size_t old_size; | 163 | size_t old_size; |
@@ -201,7 +201,8 @@ unlock: | |||
201 | */ | 201 | */ |
202 | static void | 202 | static void |
203 | batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, | 203 | batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, |
204 | int max_if_num, int del_if_num) | 204 | unsigned int max_if_num, |
205 | unsigned int del_if_num) | ||
205 | { | 206 | { |
206 | size_t chunk_size; | 207 | size_t chunk_size; |
207 | size_t if_offset; | 208 | size_t if_offset; |
@@ -239,7 +240,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, | |||
239 | */ | 240 | */ |
240 | static void | 241 | static void |
241 | batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, | 242 | batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, |
242 | int max_if_num, int del_if_num) | 243 | unsigned int max_if_num, |
244 | unsigned int del_if_num) | ||
243 | { | 245 | { |
244 | size_t if_offset; | 246 | size_t if_offset; |
245 | void *data_ptr; | 247 | void *data_ptr; |
@@ -276,7 +278,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, | |||
276 | * Return: 0 on success, a negative error code otherwise. | 278 | * Return: 0 on success, a negative error code otherwise. |
277 | */ | 279 | */ |
278 | static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, | 280 | static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, |
279 | int max_if_num, int del_if_num) | 281 | unsigned int max_if_num, |
282 | unsigned int del_if_num) | ||
280 | { | 283 | { |
281 | spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); | 284 | spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); |
282 | 285 | ||
@@ -311,7 +314,8 @@ static struct batadv_orig_node * | |||
311 | batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) | 314 | batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) |
312 | { | 315 | { |
313 | struct batadv_orig_node *orig_node; | 316 | struct batadv_orig_node *orig_node; |
314 | int size, hash_added; | 317 | int hash_added; |
318 | size_t size; | ||
315 | 319 | ||
316 | orig_node = batadv_orig_hash_find(bat_priv, addr); | 320 | orig_node = batadv_orig_hash_find(bat_priv, addr); |
317 | if (orig_node) | 321 | if (orig_node) |
@@ -893,7 +897,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) | |||
893 | u32 i; | 897 | u32 i; |
894 | size_t word_index; | 898 | size_t word_index; |
895 | u8 *w; | 899 | u8 *w; |
896 | int if_num; | 900 | unsigned int if_num; |
897 | 901 | ||
898 | for (i = 0; i < hash->size; i++) { | 902 | for (i = 0; i < hash->size; i++) { |
899 | head = &hash->table[i]; | 903 | head = &hash->table[i]; |
@@ -1023,7 +1027,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
1023 | struct batadv_neigh_node *tmp_neigh_node = NULL; | 1027 | struct batadv_neigh_node *tmp_neigh_node = NULL; |
1024 | struct batadv_neigh_node *router = NULL; | 1028 | struct batadv_neigh_node *router = NULL; |
1025 | struct batadv_orig_node *orig_node_tmp; | 1029 | struct batadv_orig_node *orig_node_tmp; |
1026 | int if_num; | 1030 | unsigned int if_num; |
1027 | u8 sum_orig, sum_neigh; | 1031 | u8 sum_orig, sum_neigh; |
1028 | u8 *neigh_addr; | 1032 | u8 *neigh_addr; |
1029 | u8 tq_avg; | 1033 | u8 tq_avg; |
@@ -1182,7 +1186,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
1182 | u8 total_count; | 1186 | u8 total_count; |
1183 | u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; | 1187 | u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; |
1184 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; | 1188 | unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; |
1185 | int if_num; | 1189 | unsigned int if_num; |
1186 | unsigned int tq_asym_penalty, inv_asym_penalty; | 1190 | unsigned int tq_asym_penalty, inv_asym_penalty; |
1187 | unsigned int combined_tq; | 1191 | unsigned int combined_tq; |
1188 | unsigned int tq_iface_penalty; | 1192 | unsigned int tq_iface_penalty; |
@@ -1702,9 +1706,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, | |||
1702 | 1706 | ||
1703 | if (is_my_orig) { | 1707 | if (is_my_orig) { |
1704 | unsigned long *word; | 1708 | unsigned long *word; |
1705 | int offset; | 1709 | size_t offset; |
1706 | s32 bit_pos; | 1710 | s32 bit_pos; |
1707 | s16 if_num; | 1711 | unsigned int if_num; |
1708 | u8 *weight; | 1712 | u8 *weight; |
1709 | 1713 | ||
1710 | orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, | 1714 | orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, |
@@ -2729,7 +2733,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
2729 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; | 2733 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; |
2730 | struct batadv_neigh_node *router; | 2734 | struct batadv_neigh_node *router; |
2731 | struct batadv_gw_node *curr_gw; | 2735 | struct batadv_gw_node *curr_gw; |
2732 | int ret = -EINVAL; | 2736 | int ret = 0; |
2733 | void *hdr; | 2737 | void *hdr; |
2734 | 2738 | ||
2735 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); | 2739 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); |
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 27e165ac9302..c74f81341dab 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c | |||
@@ -928,7 +928,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
928 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; | 928 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; |
929 | struct batadv_neigh_node *router; | 929 | struct batadv_neigh_node *router; |
930 | struct batadv_gw_node *curr_gw; | 930 | struct batadv_gw_node *curr_gw; |
931 | int ret = -EINVAL; | 931 | int ret = 0; |
932 | void *hdr; | 932 | void *hdr; |
933 | 933 | ||
934 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); | 934 | router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index fad47853ad3c..b1a08374088b 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, | |||
2161 | { | 2161 | { |
2162 | struct batadv_bla_claim *claim; | 2162 | struct batadv_bla_claim *claim; |
2163 | int idx = 0; | 2163 | int idx = 0; |
2164 | int ret = 0; | ||
2164 | 2165 | ||
2165 | rcu_read_lock(); | 2166 | rcu_read_lock(); |
2166 | hlist_for_each_entry_rcu(claim, head, hash_entry) { | 2167 | hlist_for_each_entry_rcu(claim, head, hash_entry) { |
2167 | if (idx++ < *idx_skip) | 2168 | if (idx++ < *idx_skip) |
2168 | continue; | 2169 | continue; |
2169 | if (batadv_bla_claim_dump_entry(msg, portid, seq, | 2170 | |
2170 | primary_if, claim)) { | 2171 | ret = batadv_bla_claim_dump_entry(msg, portid, seq, |
2172 | primary_if, claim); | ||
2173 | if (ret) { | ||
2171 | *idx_skip = idx - 1; | 2174 | *idx_skip = idx - 1; |
2172 | goto unlock; | 2175 | goto unlock; |
2173 | } | 2176 | } |
2174 | } | 2177 | } |
2175 | 2178 | ||
2176 | *idx_skip = idx; | 2179 | *idx_skip = 0; |
2177 | unlock: | 2180 | unlock: |
2178 | rcu_read_unlock(); | 2181 | rcu_read_unlock(); |
2179 | return 0; | 2182 | return ret; |
2180 | } | 2183 | } |
2181 | 2184 | ||
2182 | /** | 2185 | /** |
@@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, | |||
2391 | { | 2394 | { |
2392 | struct batadv_bla_backbone_gw *backbone_gw; | 2395 | struct batadv_bla_backbone_gw *backbone_gw; |
2393 | int idx = 0; | 2396 | int idx = 0; |
2397 | int ret = 0; | ||
2394 | 2398 | ||
2395 | rcu_read_lock(); | 2399 | rcu_read_lock(); |
2396 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { | 2400 | hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { |
2397 | if (idx++ < *idx_skip) | 2401 | if (idx++ < *idx_skip) |
2398 | continue; | 2402 | continue; |
2399 | if (batadv_bla_backbone_dump_entry(msg, portid, seq, | 2403 | |
2400 | primary_if, backbone_gw)) { | 2404 | ret = batadv_bla_backbone_dump_entry(msg, portid, seq, |
2405 | primary_if, backbone_gw); | ||
2406 | if (ret) { | ||
2401 | *idx_skip = idx - 1; | 2407 | *idx_skip = idx - 1; |
2402 | goto unlock; | 2408 | goto unlock; |
2403 | } | 2409 | } |
2404 | } | 2410 | } |
2405 | 2411 | ||
2406 | *idx_skip = idx; | 2412 | *idx_skip = 0; |
2407 | unlock: | 2413 | unlock: |
2408 | rcu_read_unlock(); | 2414 | rcu_read_unlock(); |
2409 | return 0; | 2415 | return ret; |
2410 | } | 2416 | } |
2411 | 2417 | ||
2412 | /** | 2418 | /** |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 9703c791ffc5..87cd962d28d5 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -393,7 +393,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
393 | batadv_arp_hw_src(skb, hdr_size), &ip_src, | 393 | batadv_arp_hw_src(skb, hdr_size), &ip_src, |
394 | batadv_arp_hw_dst(skb, hdr_size), &ip_dst); | 394 | batadv_arp_hw_dst(skb, hdr_size), &ip_dst); |
395 | 395 | ||
396 | if (hdr_size == 0) | 396 | if (hdr_size < sizeof(struct batadv_unicast_packet)) |
397 | return; | 397 | return; |
398 | 398 | ||
399 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 399 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 22dde42fd80e..5afe641ee4b0 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
@@ -288,7 +288,8 @@ batadv_frag_merge_packets(struct hlist_head *chain) | |||
288 | /* Move the existing MAC header to just before the payload. (Override | 288 | /* Move the existing MAC header to just before the payload. (Override |
289 | * the fragment header.) | 289 | * the fragment header.) |
290 | */ | 290 | */ |
291 | skb_pull_rcsum(skb_out, hdr_size); | 291 | skb_pull(skb_out, hdr_size); |
292 | skb_out->ip_summed = CHECKSUM_NONE; | ||
292 | memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); | 293 | memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); |
293 | skb_set_mac_header(skb_out, -ETH_HLEN); | 294 | skb_set_mac_header(skb_out, -ETH_HLEN); |
294 | skb_reset_network_header(skb_out); | 295 | skb_reset_network_header(skb_out); |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 5f186bff284a..68b54a39c51d 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -763,6 +763,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, | |||
763 | hard_iface->soft_iface = soft_iface; | 763 | hard_iface->soft_iface = soft_iface; |
764 | bat_priv = netdev_priv(hard_iface->soft_iface); | 764 | bat_priv = netdev_priv(hard_iface->soft_iface); |
765 | 765 | ||
766 | if (bat_priv->num_ifaces >= UINT_MAX) { | ||
767 | ret = -ENOSPC; | ||
768 | goto err_dev; | ||
769 | } | ||
770 | |||
766 | ret = netdev_master_upper_dev_link(hard_iface->net_dev, | 771 | ret = netdev_master_upper_dev_link(hard_iface->net_dev, |
767 | soft_iface, NULL, NULL, NULL); | 772 | soft_iface, NULL, NULL, NULL); |
768 | if (ret) | 773 | if (ret) |
@@ -876,7 +881,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, | |||
876 | batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); | 881 | batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); |
877 | 882 | ||
878 | /* nobody uses this interface anymore */ | 883 | /* nobody uses this interface anymore */ |
879 | if (!bat_priv->num_ifaces) { | 884 | if (bat_priv->num_ifaces == 0) { |
880 | batadv_gw_check_client_stop(bat_priv); | 885 | batadv_gw_check_client_stop(bat_priv); |
881 | 886 | ||
882 | if (autodel == BATADV_IF_CLEANUP_AUTO) | 887 | if (autodel == BATADV_IF_CLEANUP_AUTO) |
@@ -912,7 +917,7 @@ batadv_hardif_add_interface(struct net_device *net_dev) | |||
912 | if (ret) | 917 | if (ret) |
913 | goto free_if; | 918 | goto free_if; |
914 | 919 | ||
915 | hard_iface->if_num = -1; | 920 | hard_iface->if_num = 0; |
916 | hard_iface->net_dev = net_dev; | 921 | hard_iface->net_dev = net_dev; |
917 | hard_iface->soft_iface = NULL; | 922 | hard_iface->soft_iface = NULL; |
918 | hard_iface->if_status = BATADV_IF_NOT_IN_USE; | 923 | hard_iface->if_status = BATADV_IF_NOT_IN_USE; |
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index e91f29c7c638..5daa3d50da17 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
27 | #include <linux/eventpoll.h> | ||
27 | #include <linux/export.h> | 28 | #include <linux/export.h> |
28 | #include <linux/fcntl.h> | 29 | #include <linux/fcntl.h> |
29 | #include <linux/fs.h> | 30 | #include <linux/fs.h> |
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c index dc9fa37ddd14..cdbe0e5e208b 100644 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/eventpoll.h> | ||
25 | #include <linux/export.h> | 26 | #include <linux/export.h> |
26 | #include <linux/fcntl.h> | 27 | #include <linux/fcntl.h> |
27 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index cbdeb47ec3f6..d70640135e3a 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c | |||
@@ -543,8 +543,8 @@ update: | |||
543 | bat_priv->mcast.enabled = true; | 543 | bat_priv->mcast.enabled = true; |
544 | } | 544 | } |
545 | 545 | ||
546 | return !(mcast_data.flags & | 546 | return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 && |
547 | (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); | 547 | mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6); |
548 | } | 548 | } |
549 | 549 | ||
550 | /** | 550 | /** |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 58a7d9274435..74782426bb77 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -1569,7 +1569,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) | |||
1569 | * Return: 0 on success or negative error number in case of failure | 1569 | * Return: 0 on success or negative error number in case of failure |
1570 | */ | 1570 | */ |
1571 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | 1571 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, |
1572 | int max_if_num) | 1572 | unsigned int max_if_num) |
1573 | { | 1573 | { |
1574 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 1574 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
1575 | struct batadv_algo_ops *bao = bat_priv->algo_ops; | 1575 | struct batadv_algo_ops *bao = bat_priv->algo_ops; |
@@ -1611,7 +1611,7 @@ err: | |||
1611 | * Return: 0 on success or negative error number in case of failure | 1611 | * Return: 0 on success or negative error number in case of failure |
1612 | */ | 1612 | */ |
1613 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | 1613 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, |
1614 | int max_if_num) | 1614 | unsigned int max_if_num) |
1615 | { | 1615 | { |
1616 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 1616 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
1617 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 1617 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 8e543a3cdc6c..15d896b2de6f 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -73,9 +73,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset); | |||
73 | int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); | 73 | int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); |
74 | int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); | 74 | int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); |
75 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, | 75 | int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, |
76 | int max_if_num); | 76 | unsigned int max_if_num); |
77 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, | 77 | int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, |
78 | int max_if_num); | 78 | unsigned int max_if_num); |
79 | struct batadv_orig_node_vlan * | 79 | struct batadv_orig_node_vlan * |
80 | batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, | 80 | batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, |
81 | unsigned short vid); | 81 | unsigned short vid); |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index b6891e8b741c..e61dc1293bb5 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -759,6 +759,7 @@ free_skb: | |||
759 | /** | 759 | /** |
760 | * batadv_reroute_unicast_packet() - update the unicast header for re-routing | 760 | * batadv_reroute_unicast_packet() - update the unicast header for re-routing |
761 | * @bat_priv: the bat priv with all the soft interface information | 761 | * @bat_priv: the bat priv with all the soft interface information |
762 | * @skb: unicast packet to process | ||
762 | * @unicast_packet: the unicast header to be updated | 763 | * @unicast_packet: the unicast header to be updated |
763 | * @dst_addr: the payload destination | 764 | * @dst_addr: the payload destination |
764 | * @vid: VLAN identifier | 765 | * @vid: VLAN identifier |
@@ -770,7 +771,7 @@ free_skb: | |||
770 | * Return: true if the packet header has been updated, false otherwise | 771 | * Return: true if the packet header has been updated, false otherwise |
771 | */ | 772 | */ |
772 | static bool | 773 | static bool |
773 | batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, | 774 | batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, |
774 | struct batadv_unicast_packet *unicast_packet, | 775 | struct batadv_unicast_packet *unicast_packet, |
775 | u8 *dst_addr, unsigned short vid) | 776 | u8 *dst_addr, unsigned short vid) |
776 | { | 777 | { |
@@ -799,8 +800,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, | |||
799 | } | 800 | } |
800 | 801 | ||
801 | /* update the packet header */ | 802 | /* update the packet header */ |
803 | skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); | ||
802 | ether_addr_copy(unicast_packet->dest, orig_addr); | 804 | ether_addr_copy(unicast_packet->dest, orig_addr); |
803 | unicast_packet->ttvn = orig_ttvn; | 805 | unicast_packet->ttvn = orig_ttvn; |
806 | skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); | ||
804 | 807 | ||
805 | ret = true; | 808 | ret = true; |
806 | out: | 809 | out: |
@@ -841,7 +844,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
841 | * the packet to | 844 | * the packet to |
842 | */ | 845 | */ |
843 | if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) { | 846 | if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) { |
844 | if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, | 847 | if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet, |
845 | ethhdr->h_dest, vid)) | 848 | ethhdr->h_dest, vid)) |
846 | batadv_dbg_ratelimited(BATADV_DBG_TT, | 849 | batadv_dbg_ratelimited(BATADV_DBG_TT, |
847 | bat_priv, | 850 | bat_priv, |
@@ -887,7 +890,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
887 | * destination can possibly be updated and forwarded towards the new | 890 | * destination can possibly be updated and forwarded towards the new |
888 | * target host | 891 | * target host |
889 | */ | 892 | */ |
890 | if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, | 893 | if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet, |
891 | ethhdr->h_dest, vid)) { | 894 | ethhdr->h_dest, vid)) { |
892 | batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, | 895 | batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, |
893 | "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", | 896 | "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", |
@@ -910,12 +913,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
910 | if (!primary_if) | 913 | if (!primary_if) |
911 | return false; | 914 | return false; |
912 | 915 | ||
916 | /* update the packet header */ | ||
917 | skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); | ||
913 | ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); | 918 | ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); |
919 | unicast_packet->ttvn = curr_ttvn; | ||
920 | skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); | ||
914 | 921 | ||
915 | batadv_hardif_put(primary_if); | 922 | batadv_hardif_put(primary_if); |
916 | 923 | ||
917 | unicast_packet->ttvn = curr_ttvn; | ||
918 | |||
919 | return true; | 924 | return true; |
920 | } | 925 | } |
921 | 926 | ||
@@ -968,14 +973,10 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
968 | struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL; | 973 | struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL; |
969 | int check, hdr_size = sizeof(*unicast_packet); | 974 | int check, hdr_size = sizeof(*unicast_packet); |
970 | enum batadv_subtype subtype; | 975 | enum batadv_subtype subtype; |
971 | struct ethhdr *ethhdr; | ||
972 | int ret = NET_RX_DROP; | 976 | int ret = NET_RX_DROP; |
973 | bool is4addr, is_gw; | 977 | bool is4addr, is_gw; |
974 | 978 | ||
975 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 979 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
976 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | ||
977 | ethhdr = eth_hdr(skb); | ||
978 | |||
979 | is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; | 980 | is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; |
980 | /* the caller function should have already pulled 2 bytes */ | 981 | /* the caller function should have already pulled 2 bytes */ |
981 | if (is4addr) | 982 | if (is4addr) |
@@ -995,12 +996,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
995 | if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size)) | 996 | if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size)) |
996 | goto free_skb; | 997 | goto free_skb; |
997 | 998 | ||
999 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | ||
1000 | |||
998 | /* packet for me */ | 1001 | /* packet for me */ |
999 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { | 1002 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
1000 | /* If this is a unicast packet from another backgone gw, | 1003 | /* If this is a unicast packet from another backgone gw, |
1001 | * drop it. | 1004 | * drop it. |
1002 | */ | 1005 | */ |
1003 | orig_addr_gw = ethhdr->h_source; | 1006 | orig_addr_gw = eth_hdr(skb)->h_source; |
1004 | orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw); | 1007 | orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw); |
1005 | if (orig_node_gw) { | 1008 | if (orig_node_gw) { |
1006 | is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw, | 1009 | is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw, |
@@ -1015,6 +1018,8 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
1015 | } | 1018 | } |
1016 | 1019 | ||
1017 | if (is4addr) { | 1020 | if (is4addr) { |
1021 | unicast_4addr_packet = | ||
1022 | (struct batadv_unicast_4addr_packet *)skb->data; | ||
1018 | subtype = unicast_4addr_packet->subtype; | 1023 | subtype = unicast_4addr_packet->subtype; |
1019 | batadv_dat_inc_counter(bat_priv, subtype); | 1024 | batadv_dat_inc_counter(bat_priv, subtype); |
1020 | 1025 | ||
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 900c5ce21cd4..367a81fb785f 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -459,13 +459,7 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
459 | 459 | ||
460 | /* skb->dev & skb->pkt_type are set here */ | 460 | /* skb->dev & skb->pkt_type are set here */ |
461 | skb->protocol = eth_type_trans(skb, soft_iface); | 461 | skb->protocol = eth_type_trans(skb, soft_iface); |
462 | 462 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | |
463 | /* should not be necessary anymore as we use skb_pull_rcsum() | ||
464 | * TODO: please verify this and remove this TODO | ||
465 | * -- Dec 21st 2009, Simon Wunderlich | ||
466 | */ | ||
467 | |||
468 | /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ | ||
469 | 463 | ||
470 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); | 464 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); |
471 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, | 465 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index bb1578410e0c..a5aa6d61f4e2 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -167,7 +167,7 @@ struct batadv_hard_iface { | |||
167 | struct list_head list; | 167 | struct list_head list; |
168 | 168 | ||
169 | /** @if_num: identificator of the interface */ | 169 | /** @if_num: identificator of the interface */ |
170 | s16 if_num; | 170 | unsigned int if_num; |
171 | 171 | ||
172 | /** @if_status: status of the interface for batman-adv */ | 172 | /** @if_status: status of the interface for batman-adv */ |
173 | char if_status; | 173 | char if_status; |
@@ -1596,7 +1596,7 @@ struct batadv_priv { | |||
1596 | atomic_t batman_queue_left; | 1596 | atomic_t batman_queue_left; |
1597 | 1597 | ||
1598 | /** @num_ifaces: number of interfaces assigned to this mesh interface */ | 1598 | /** @num_ifaces: number of interfaces assigned to this mesh interface */ |
1599 | char num_ifaces; | 1599 | unsigned int num_ifaces; |
1600 | 1600 | ||
1601 | /** @mesh_obj: kobject for sysfs mesh subdirectory */ | 1601 | /** @mesh_obj: kobject for sysfs mesh subdirectory */ |
1602 | struct kobject *mesh_obj; | 1602 | struct kobject *mesh_obj; |
@@ -2186,15 +2186,16 @@ struct batadv_algo_orig_ops { | |||
2186 | * orig_node due to a new hard-interface being added into the mesh | 2186 | * orig_node due to a new hard-interface being added into the mesh |
2187 | * (optional) | 2187 | * (optional) |
2188 | */ | 2188 | */ |
2189 | int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); | 2189 | int (*add_if)(struct batadv_orig_node *orig_node, |
2190 | unsigned int max_if_num); | ||
2190 | 2191 | ||
2191 | /** | 2192 | /** |
2192 | * @del_if: ask the routing algorithm to apply the needed changes to the | 2193 | * @del_if: ask the routing algorithm to apply the needed changes to the |
2193 | * orig_node due to an hard-interface being removed from the mesh | 2194 | * orig_node due to an hard-interface being removed from the mesh |
2194 | * (optional) | 2195 | * (optional) |
2195 | */ | 2196 | */ |
2196 | int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, | 2197 | int (*del_if)(struct batadv_orig_node *orig_node, |
2197 | int del_if_num); | 2198 | unsigned int max_if_num, unsigned int del_if_num); |
2198 | 2199 | ||
2199 | #ifdef CONFIG_BATMAN_ADV_DEBUGFS | 2200 | #ifdef CONFIG_BATMAN_ADV_DEBUGFS |
2200 | /** @print: print the originator table (optional) */ | 2201 | /** @print: print the originator table (optional) */ |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 01117ae84f1d..a2ddae2f37d7 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -2296,8 +2296,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2296 | else | 2296 | else |
2297 | sec_level = authreq_to_seclevel(auth); | 2297 | sec_level = authreq_to_seclevel(auth); |
2298 | 2298 | ||
2299 | if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) | 2299 | if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) { |
2300 | /* If link is already encrypted with sufficient security we | ||
2301 | * still need refresh encryption as per Core Spec 5.0 Vol 3, | ||
2302 | * Part H 2.4.6 | ||
2303 | */ | ||
2304 | smp_ltk_encrypt(conn, hcon->sec_level); | ||
2300 | return 0; | 2305 | return 0; |
2306 | } | ||
2301 | 2307 | ||
2302 | if (sec_level > hcon->pending_sec_level) | 2308 | if (sec_level > hcon->pending_sec_level) |
2303 | hcon->pending_sec_level = sec_level; | 2309 | hcon->pending_sec_level = sec_level; |
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 27f1d4f2114a..9b16eaf33819 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
@@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb) | |||
214 | 214 | ||
215 | iph = ip_hdr(skb); | 215 | iph = ip_hdr(skb); |
216 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) | 216 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
217 | goto inhdr_error; | 217 | goto csum_error; |
218 | 218 | ||
219 | len = ntohs(iph->tot_len); | 219 | len = ntohs(iph->tot_len); |
220 | if (skb->len < len) { | 220 | if (skb->len < len) { |
@@ -236,6 +236,8 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb) | |||
236 | */ | 236 | */ |
237 | return 0; | 237 | return 0; |
238 | 238 | ||
239 | csum_error: | ||
240 | __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); | ||
239 | inhdr_error: | 241 | inhdr_error: |
240 | __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); | 242 | __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); |
241 | drop: | 243 | drop: |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 0254c35b2bf0..126a8ea73c96 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj, | |||
255 | struct brport_attribute *brport_attr = to_brport_attr(attr); | 255 | struct brport_attribute *brport_attr = to_brport_attr(attr); |
256 | struct net_bridge_port *p = to_brport(kobj); | 256 | struct net_bridge_port *p = to_brport(kobj); |
257 | 257 | ||
258 | if (!brport_attr->show) | ||
259 | return -EINVAL; | ||
260 | |||
258 | return brport_attr->show(p, buf); | 261 | return brport_attr->show(p, buf); |
259 | } | 262 | } |
260 | 263 | ||
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 51935270c651..9896f4975353 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid | |||
168 | masterv = br_vlan_find(vg, vid); | 168 | masterv = br_vlan_find(vg, vid); |
169 | if (WARN_ON(!masterv)) | 169 | if (WARN_ON(!masterv)) |
170 | return NULL; | 170 | return NULL; |
171 | refcount_set(&masterv->refcnt, 1); | ||
172 | return masterv; | ||
171 | } | 173 | } |
172 | refcount_inc(&masterv->refcnt); | 174 | refcount_inc(&masterv->refcnt); |
173 | 175 | ||
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index 279527f8b1fe..620e54f08296 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c | |||
@@ -172,32 +172,83 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
172 | return true; | 172 | return true; |
173 | } | 173 | } |
174 | 174 | ||
175 | static bool poolsize_invalid(const struct ebt_mac_wormhash *w) | ||
176 | { | ||
177 | return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); | ||
178 | } | ||
179 | |||
180 | static bool wormhash_offset_invalid(int off, unsigned int len) | ||
181 | { | ||
182 | if (off == 0) /* not present */ | ||
183 | return false; | ||
184 | |||
185 | if (off < (int)sizeof(struct ebt_among_info) || | ||
186 | off % __alignof__(struct ebt_mac_wormhash)) | ||
187 | return true; | ||
188 | |||
189 | off += sizeof(struct ebt_mac_wormhash); | ||
190 | |||
191 | return off > len; | ||
192 | } | ||
193 | |||
194 | static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b) | ||
195 | { | ||
196 | if (a == 0) | ||
197 | a = sizeof(struct ebt_among_info); | ||
198 | |||
199 | return ebt_mac_wormhash_size(wh) + a == b; | ||
200 | } | ||
201 | |||
175 | static int ebt_among_mt_check(const struct xt_mtchk_param *par) | 202 | static int ebt_among_mt_check(const struct xt_mtchk_param *par) |
176 | { | 203 | { |
177 | const struct ebt_among_info *info = par->matchinfo; | 204 | const struct ebt_among_info *info = par->matchinfo; |
178 | const struct ebt_entry_match *em = | 205 | const struct ebt_entry_match *em = |
179 | container_of(par->matchinfo, const struct ebt_entry_match, data); | 206 | container_of(par->matchinfo, const struct ebt_entry_match, data); |
180 | int expected_length = sizeof(struct ebt_among_info); | 207 | unsigned int expected_length = sizeof(struct ebt_among_info); |
181 | const struct ebt_mac_wormhash *wh_dst, *wh_src; | 208 | const struct ebt_mac_wormhash *wh_dst, *wh_src; |
182 | int err; | 209 | int err; |
183 | 210 | ||
211 | if (expected_length > em->match_size) | ||
212 | return -EINVAL; | ||
213 | |||
214 | if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) || | ||
215 | wormhash_offset_invalid(info->wh_src_ofs, em->match_size)) | ||
216 | return -EINVAL; | ||
217 | |||
184 | wh_dst = ebt_among_wh_dst(info); | 218 | wh_dst = ebt_among_wh_dst(info); |
185 | wh_src = ebt_among_wh_src(info); | 219 | if (poolsize_invalid(wh_dst)) |
220 | return -EINVAL; | ||
221 | |||
186 | expected_length += ebt_mac_wormhash_size(wh_dst); | 222 | expected_length += ebt_mac_wormhash_size(wh_dst); |
223 | if (expected_length > em->match_size) | ||
224 | return -EINVAL; | ||
225 | |||
226 | wh_src = ebt_among_wh_src(info); | ||
227 | if (poolsize_invalid(wh_src)) | ||
228 | return -EINVAL; | ||
229 | |||
230 | if (info->wh_src_ofs < info->wh_dst_ofs) { | ||
231 | if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs)) | ||
232 | return -EINVAL; | ||
233 | } else { | ||
234 | if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs)) | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | |||
187 | expected_length += ebt_mac_wormhash_size(wh_src); | 238 | expected_length += ebt_mac_wormhash_size(wh_src); |
188 | 239 | ||
189 | if (em->match_size != EBT_ALIGN(expected_length)) { | 240 | if (em->match_size != EBT_ALIGN(expected_length)) { |
190 | pr_info("wrong size: %d against expected %d, rounded to %zd\n", | 241 | pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n", |
191 | em->match_size, expected_length, | 242 | em->match_size, expected_length, |
192 | EBT_ALIGN(expected_length)); | 243 | EBT_ALIGN(expected_length)); |
193 | return -EINVAL; | 244 | return -EINVAL; |
194 | } | 245 | } |
195 | if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { | 246 | if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { |
196 | pr_info("dst integrity fail: %x\n", -err); | 247 | pr_err_ratelimited("dst integrity fail: %x\n", -err); |
197 | return -EINVAL; | 248 | return -EINVAL; |
198 | } | 249 | } |
199 | if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { | 250 | if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { |
200 | pr_info("src integrity fail: %x\n", -err); | 251 | pr_err_ratelimited("src integrity fail: %x\n", -err); |
201 | return -EINVAL; | 252 | return -EINVAL; |
202 | } | 253 | } |
203 | return 0; | 254 | return 0; |
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c index 61a9f1be1263..165b9d678cf1 100644 --- a/net/bridge/netfilter/ebt_limit.c +++ b/net/bridge/netfilter/ebt_limit.c | |||
@@ -72,8 +72,8 @@ static int ebt_limit_mt_check(const struct xt_mtchk_param *par) | |||
72 | /* Check for overflow. */ | 72 | /* Check for overflow. */ |
73 | if (info->burst == 0 || | 73 | if (info->burst == 0 || |
74 | user2credits(info->avg * info->burst) < user2credits(info->avg)) { | 74 | user2credits(info->avg * info->burst) < user2credits(info->avg)) { |
75 | pr_info("overflow, try lower: %u/%u\n", | 75 | pr_info_ratelimited("overflow, try lower: %u/%u\n", |
76 | info->avg, info->burst); | 76 | info->avg, info->burst); |
77 | return -EINVAL; | 77 | return -EINVAL; |
78 | } | 78 | } |
79 | 79 | ||
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 02c4b409d317..a94d23b0a9af 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, | |||
1641 | int off = ebt_compat_match_offset(match, m->match_size); | 1641 | int off = ebt_compat_match_offset(match, m->match_size); |
1642 | compat_uint_t msize = m->match_size - off; | 1642 | compat_uint_t msize = m->match_size - off; |
1643 | 1643 | ||
1644 | BUG_ON(off >= m->match_size); | 1644 | if (WARN_ON(off >= m->match_size)) |
1645 | return -EINVAL; | ||
1645 | 1646 | ||
1646 | if (copy_to_user(cm->u.name, match->name, | 1647 | if (copy_to_user(cm->u.name, match->name, |
1647 | strlen(match->name) + 1) || put_user(msize, &cm->match_size)) | 1648 | strlen(match->name) + 1) || put_user(msize, &cm->match_size)) |
@@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t, | |||
1671 | int off = xt_compat_target_offset(target); | 1672 | int off = xt_compat_target_offset(target); |
1672 | compat_uint_t tsize = t->target_size - off; | 1673 | compat_uint_t tsize = t->target_size - off; |
1673 | 1674 | ||
1674 | BUG_ON(off >= t->target_size); | 1675 | if (WARN_ON(off >= t->target_size)) |
1676 | return -EINVAL; | ||
1675 | 1677 | ||
1676 | if (copy_to_user(cm->u.name, target->name, | 1678 | if (copy_to_user(cm->u.name, target->name, |
1677 | strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) | 1679 | strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) |
@@ -1902,7 +1904,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state, | |||
1902 | if (state->buf_kern_start == NULL) | 1904 | if (state->buf_kern_start == NULL) |
1903 | goto count_only; | 1905 | goto count_only; |
1904 | 1906 | ||
1905 | BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); | 1907 | if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) |
1908 | return -EINVAL; | ||
1906 | 1909 | ||
1907 | memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); | 1910 | memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); |
1908 | 1911 | ||
@@ -1915,7 +1918,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) | |||
1915 | { | 1918 | { |
1916 | char *b = state->buf_kern_start; | 1919 | char *b = state->buf_kern_start; |
1917 | 1920 | ||
1918 | BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); | 1921 | if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) |
1922 | return -EINVAL; | ||
1919 | 1923 | ||
1920 | if (b != NULL && sz > 0) | 1924 | if (b != NULL && sz > 0) |
1921 | memset(b + state->buf_kern_offset, 0, sz); | 1925 | memset(b + state->buf_kern_offset, 0, sz); |
@@ -1992,8 +1996,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1992 | pad = XT_ALIGN(size_kern) - size_kern; | 1996 | pad = XT_ALIGN(size_kern) - size_kern; |
1993 | 1997 | ||
1994 | if (pad > 0 && dst) { | 1998 | if (pad > 0 && dst) { |
1995 | BUG_ON(state->buf_kern_len <= pad); | 1999 | if (WARN_ON(state->buf_kern_len <= pad)) |
1996 | BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); | 2000 | return -EINVAL; |
2001 | if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) | ||
2002 | return -EINVAL; | ||
1997 | memset(dst + size_kern, 0, pad); | 2003 | memset(dst + size_kern, 0, pad); |
1998 | } | 2004 | } |
1999 | return off + match_size; | 2005 | return off + match_size; |
@@ -2043,7 +2049,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | |||
2043 | if (ret < 0) | 2049 | if (ret < 0) |
2044 | return ret; | 2050 | return ret; |
2045 | 2051 | ||
2046 | BUG_ON(ret < match32->match_size); | 2052 | if (WARN_ON(ret < match32->match_size)) |
2053 | return -EINVAL; | ||
2047 | growth += ret - match32->match_size; | 2054 | growth += ret - match32->match_size; |
2048 | growth += ebt_compat_entry_padsize(); | 2055 | growth += ebt_compat_entry_padsize(); |
2049 | 2056 | ||
@@ -2053,7 +2060,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | |||
2053 | if (match_kern) | 2060 | if (match_kern) |
2054 | match_kern->match_size = ret; | 2061 | match_kern->match_size = ret; |
2055 | 2062 | ||
2056 | WARN_ON(type == EBT_COMPAT_TARGET && size_left); | 2063 | if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) |
2064 | return -EINVAL; | ||
2065 | |||
2057 | match32 = (struct compat_ebt_entry_mwt *) buf; | 2066 | match32 = (struct compat_ebt_entry_mwt *) buf; |
2058 | } | 2067 | } |
2059 | 2068 | ||
@@ -2109,6 +2118,19 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | |||
2109 | * | 2118 | * |
2110 | * offsets are relative to beginning of struct ebt_entry (i.e., 0). | 2119 | * offsets are relative to beginning of struct ebt_entry (i.e., 0). |
2111 | */ | 2120 | */ |
2121 | for (i = 0; i < 4 ; ++i) { | ||
2122 | if (offsets[i] > *total) | ||
2123 | return -EINVAL; | ||
2124 | |||
2125 | if (i < 3 && offsets[i] == *total) | ||
2126 | return -EINVAL; | ||
2127 | |||
2128 | if (i == 0) | ||
2129 | continue; | ||
2130 | if (offsets[i-1] > offsets[i]) | ||
2131 | return -EINVAL; | ||
2132 | } | ||
2133 | |||
2112 | for (i = 0, j = 1 ; j < 4 ; j++, i++) { | 2134 | for (i = 0, j = 1 ; j < 4 ; j++, i++) { |
2113 | struct compat_ebt_entry_mwt *match32; | 2135 | struct compat_ebt_entry_mwt *match32; |
2114 | unsigned int size; | 2136 | unsigned int size; |
@@ -2140,7 +2162,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | |||
2140 | 2162 | ||
2141 | startoff = state->buf_user_offset - startoff; | 2163 | startoff = state->buf_user_offset - startoff; |
2142 | 2164 | ||
2143 | BUG_ON(*total < startoff); | 2165 | if (WARN_ON(*total < startoff)) |
2166 | return -EINVAL; | ||
2144 | *total -= startoff; | 2167 | *total -= startoff; |
2145 | return 0; | 2168 | return 0; |
2146 | } | 2169 | } |
@@ -2267,7 +2290,8 @@ static int compat_do_replace(struct net *net, void __user *user, | |||
2267 | state.buf_kern_len = size64; | 2290 | state.buf_kern_len = size64; |
2268 | 2291 | ||
2269 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); | 2292 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); |
2270 | BUG_ON(ret < 0); /* parses same data again */ | 2293 | if (WARN_ON(ret < 0)) |
2294 | goto out_unlock; | ||
2271 | 2295 | ||
2272 | vfree(entries_tmp); | 2296 | vfree(entries_tmp); |
2273 | tmp.entries_size = size64; | 2297 | tmp.entries_size = size64; |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 1e492ef2a33d..4d4c82229e9e 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -418,6 +418,7 @@ ceph_parse_options(char *options, const char *dev_name, | |||
418 | opt->flags |= CEPH_OPT_FSID; | 418 | opt->flags |= CEPH_OPT_FSID; |
419 | break; | 419 | break; |
420 | case Opt_name: | 420 | case Opt_name: |
421 | kfree(opt->name); | ||
421 | opt->name = kstrndup(argstr[0].from, | 422 | opt->name = kstrndup(argstr[0].from, |
422 | argstr[0].to-argstr[0].from, | 423 | argstr[0].to-argstr[0].from, |
423 | GFP_KERNEL); | 424 | GFP_KERNEL); |
@@ -427,6 +428,9 @@ ceph_parse_options(char *options, const char *dev_name, | |||
427 | } | 428 | } |
428 | break; | 429 | break; |
429 | case Opt_secret: | 430 | case Opt_secret: |
431 | ceph_crypto_key_destroy(opt->key); | ||
432 | kfree(opt->key); | ||
433 | |||
430 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); | 434 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); |
431 | if (!opt->key) { | 435 | if (!opt->key) { |
432 | err = -ENOMEM; | 436 | err = -ENOMEM; |
@@ -437,6 +441,9 @@ ceph_parse_options(char *options, const char *dev_name, | |||
437 | goto out; | 441 | goto out; |
438 | break; | 442 | break; |
439 | case Opt_key: | 443 | case Opt_key: |
444 | ceph_crypto_key_destroy(opt->key); | ||
445 | kfree(opt->key); | ||
446 | |||
440 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); | 447 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); |
441 | if (!opt->key) { | 448 | if (!opt->key) { |
442 | err = -ENOMEM; | 449 | err = -ENOMEM; |
diff --git a/net/core/dev.c b/net/core/dev.c index dda9d7b9a840..12be20535714 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2382,8 +2382,11 @@ EXPORT_SYMBOL(netdev_set_num_tc); | |||
2382 | */ | 2382 | */ |
2383 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | 2383 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) |
2384 | { | 2384 | { |
2385 | bool disabling; | ||
2385 | int rc; | 2386 | int rc; |
2386 | 2387 | ||
2388 | disabling = txq < dev->real_num_tx_queues; | ||
2389 | |||
2387 | if (txq < 1 || txq > dev->num_tx_queues) | 2390 | if (txq < 1 || txq > dev->num_tx_queues) |
2388 | return -EINVAL; | 2391 | return -EINVAL; |
2389 | 2392 | ||
@@ -2399,15 +2402,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | |||
2399 | if (dev->num_tc) | 2402 | if (dev->num_tc) |
2400 | netif_setup_tc(dev, txq); | 2403 | netif_setup_tc(dev, txq); |
2401 | 2404 | ||
2402 | if (txq < dev->real_num_tx_queues) { | 2405 | dev->real_num_tx_queues = txq; |
2406 | |||
2407 | if (disabling) { | ||
2408 | synchronize_net(); | ||
2403 | qdisc_reset_all_tx_gt(dev, txq); | 2409 | qdisc_reset_all_tx_gt(dev, txq); |
2404 | #ifdef CONFIG_XPS | 2410 | #ifdef CONFIG_XPS |
2405 | netif_reset_xps_queues_gt(dev, txq); | 2411 | netif_reset_xps_queues_gt(dev, txq); |
2406 | #endif | 2412 | #endif |
2407 | } | 2413 | } |
2414 | } else { | ||
2415 | dev->real_num_tx_queues = txq; | ||
2408 | } | 2416 | } |
2409 | 2417 | ||
2410 | dev->real_num_tx_queues = txq; | ||
2411 | return 0; | 2418 | return 0; |
2412 | } | 2419 | } |
2413 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | 2420 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); |
@@ -3271,15 +3278,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
3271 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) | 3278 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
3272 | static void skb_update_prio(struct sk_buff *skb) | 3279 | static void skb_update_prio(struct sk_buff *skb) |
3273 | { | 3280 | { |
3274 | struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); | 3281 | const struct netprio_map *map; |
3282 | const struct sock *sk; | ||
3283 | unsigned int prioidx; | ||
3275 | 3284 | ||
3276 | if (!skb->priority && skb->sk && map) { | 3285 | if (skb->priority) |
3277 | unsigned int prioidx = | 3286 | return; |
3278 | sock_cgroup_prioidx(&skb->sk->sk_cgrp_data); | 3287 | map = rcu_dereference_bh(skb->dev->priomap); |
3288 | if (!map) | ||
3289 | return; | ||
3290 | sk = skb_to_full_sk(skb); | ||
3291 | if (!sk) | ||
3292 | return; | ||
3279 | 3293 | ||
3280 | if (prioidx < map->priomap_len) | 3294 | prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); |
3281 | skb->priority = map->priomap[prioidx]; | 3295 | |
3282 | } | 3296 | if (prioidx < map->priomap_len) |
3297 | skb->priority = map->priomap[prioidx]; | ||
3283 | } | 3298 | } |
3284 | #else | 3299 | #else |
3285 | #define skb_update_prio(skb) | 3300 | #define skb_update_prio(skb) |
@@ -6389,6 +6404,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, | |||
6389 | .linking = true, | 6404 | .linking = true, |
6390 | .upper_info = upper_info, | 6405 | .upper_info = upper_info, |
6391 | }; | 6406 | }; |
6407 | struct net_device *master_dev; | ||
6392 | int ret = 0; | 6408 | int ret = 0; |
6393 | 6409 | ||
6394 | ASSERT_RTNL(); | 6410 | ASSERT_RTNL(); |
@@ -6400,11 +6416,14 @@ static int __netdev_upper_dev_link(struct net_device *dev, | |||
6400 | if (netdev_has_upper_dev(upper_dev, dev)) | 6416 | if (netdev_has_upper_dev(upper_dev, dev)) |
6401 | return -EBUSY; | 6417 | return -EBUSY; |
6402 | 6418 | ||
6403 | if (netdev_has_upper_dev(dev, upper_dev)) | 6419 | if (!master) { |
6404 | return -EEXIST; | 6420 | if (netdev_has_upper_dev(dev, upper_dev)) |
6405 | 6421 | return -EEXIST; | |
6406 | if (master && netdev_master_upper_dev_get(dev)) | 6422 | } else { |
6407 | return -EBUSY; | 6423 | master_dev = netdev_master_upper_dev_get(dev); |
6424 | if (master_dev) | ||
6425 | return master_dev == upper_dev ? -EEXIST : -EBUSY; | ||
6426 | } | ||
6408 | 6427 | ||
6409 | ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, | 6428 | ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, |
6410 | &changeupper_info.info); | 6429 | &changeupper_info.info); |
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 0ab1af04296c..a04e1e88bf3a 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c | |||
@@ -402,8 +402,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
402 | if (colon) | 402 | if (colon) |
403 | *colon = 0; | 403 | *colon = 0; |
404 | 404 | ||
405 | dev_load(net, ifr->ifr_name); | ||
406 | |||
407 | /* | 405 | /* |
408 | * See which interface the caller is talking about. | 406 | * See which interface the caller is talking about. |
409 | */ | 407 | */ |
@@ -423,6 +421,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
423 | case SIOCGIFMAP: | 421 | case SIOCGIFMAP: |
424 | case SIOCGIFINDEX: | 422 | case SIOCGIFINDEX: |
425 | case SIOCGIFTXQLEN: | 423 | case SIOCGIFTXQLEN: |
424 | dev_load(net, ifr->ifr_name); | ||
426 | rcu_read_lock(); | 425 | rcu_read_lock(); |
427 | ret = dev_ifsioc_locked(net, ifr, cmd); | 426 | ret = dev_ifsioc_locked(net, ifr, cmd); |
428 | rcu_read_unlock(); | 427 | rcu_read_unlock(); |
@@ -431,6 +430,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
431 | return ret; | 430 | return ret; |
432 | 431 | ||
433 | case SIOCETHTOOL: | 432 | case SIOCETHTOOL: |
433 | dev_load(net, ifr->ifr_name); | ||
434 | rtnl_lock(); | 434 | rtnl_lock(); |
435 | ret = dev_ethtool(net, ifr); | 435 | ret = dev_ethtool(net, ifr); |
436 | rtnl_unlock(); | 436 | rtnl_unlock(); |
@@ -447,6 +447,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
447 | case SIOCGMIIPHY: | 447 | case SIOCGMIIPHY: |
448 | case SIOCGMIIREG: | 448 | case SIOCGMIIREG: |
449 | case SIOCSIFNAME: | 449 | case SIOCSIFNAME: |
450 | dev_load(net, ifr->ifr_name); | ||
450 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) | 451 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
451 | return -EPERM; | 452 | return -EPERM; |
452 | rtnl_lock(); | 453 | rtnl_lock(); |
@@ -494,6 +495,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
494 | /* fall through */ | 495 | /* fall through */ |
495 | case SIOCBONDSLAVEINFOQUERY: | 496 | case SIOCBONDSLAVEINFOQUERY: |
496 | case SIOCBONDINFOQUERY: | 497 | case SIOCBONDINFOQUERY: |
498 | dev_load(net, ifr->ifr_name); | ||
497 | rtnl_lock(); | 499 | rtnl_lock(); |
498 | ret = dev_ifsioc(net, ifr, cmd); | 500 | ret = dev_ifsioc(net, ifr, cmd); |
499 | rtnl_unlock(); | 501 | rtnl_unlock(); |
@@ -518,6 +520,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
518 | cmd == SIOCGHWTSTAMP || | 520 | cmd == SIOCGHWTSTAMP || |
519 | (cmd >= SIOCDEVPRIVATE && | 521 | (cmd >= SIOCDEVPRIVATE && |
520 | cmd <= SIOCDEVPRIVATE + 15)) { | 522 | cmd <= SIOCDEVPRIVATE + 15)) { |
523 | dev_load(net, ifr->ifr_name); | ||
521 | rtnl_lock(); | 524 | rtnl_lock(); |
522 | ret = dev_ifsioc(net, ifr, cmd); | 525 | ret = dev_ifsioc(net, ifr, cmd); |
523 | rtnl_unlock(); | 526 | rtnl_unlock(); |
diff --git a/net/core/devlink.c b/net/core/devlink.c index 18d385ed8237..effd4848c2b4 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
@@ -1695,10 +1695,11 @@ static int devlink_dpipe_table_put(struct sk_buff *skb, | |||
1695 | goto nla_put_failure; | 1695 | goto nla_put_failure; |
1696 | 1696 | ||
1697 | if (table->resource_valid) { | 1697 | if (table->resource_valid) { |
1698 | nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, | 1698 | if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, |
1699 | table->resource_id, DEVLINK_ATTR_PAD); | 1699 | table->resource_id, DEVLINK_ATTR_PAD) || |
1700 | nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, | 1700 | nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, |
1701 | table->resource_units, DEVLINK_ATTR_PAD); | 1701 | table->resource_units, DEVLINK_ATTR_PAD)) |
1702 | goto nla_put_failure; | ||
1702 | } | 1703 | } |
1703 | if (devlink_dpipe_matches_put(table, skb)) | 1704 | if (devlink_dpipe_matches_put(table, skb)) |
1704 | goto nla_put_failure; | 1705 | goto nla_put_failure; |
@@ -1797,7 +1798,7 @@ send_done: | |||
1797 | if (!nlh) { | 1798 | if (!nlh) { |
1798 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); | 1799 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
1799 | if (err) | 1800 | if (err) |
1800 | goto err_skb_send_alloc; | 1801 | return err; |
1801 | goto send_done; | 1802 | goto send_done; |
1802 | } | 1803 | } |
1803 | 1804 | ||
@@ -1806,7 +1807,6 @@ send_done: | |||
1806 | nla_put_failure: | 1807 | nla_put_failure: |
1807 | err = -EMSGSIZE; | 1808 | err = -EMSGSIZE; |
1808 | err_table_put: | 1809 | err_table_put: |
1809 | err_skb_send_alloc: | ||
1810 | genlmsg_cancel(skb, hdr); | 1810 | genlmsg_cancel(skb, hdr); |
1811 | nlmsg_free(skb); | 1811 | nlmsg_free(skb); |
1812 | return err; | 1812 | return err; |
@@ -2072,7 +2072,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info, | |||
2072 | table->counters_enabled, | 2072 | table->counters_enabled, |
2073 | &dump_ctx); | 2073 | &dump_ctx); |
2074 | if (err) | 2074 | if (err) |
2075 | goto err_entries_dump; | 2075 | return err; |
2076 | 2076 | ||
2077 | send_done: | 2077 | send_done: |
2078 | nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, | 2078 | nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, |
@@ -2080,16 +2080,10 @@ send_done: | |||
2080 | if (!nlh) { | 2080 | if (!nlh) { |
2081 | err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); | 2081 | err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); |
2082 | if (err) | 2082 | if (err) |
2083 | goto err_skb_send_alloc; | 2083 | return err; |
2084 | goto send_done; | 2084 | goto send_done; |
2085 | } | 2085 | } |
2086 | return genlmsg_reply(dump_ctx.skb, info); | 2086 | return genlmsg_reply(dump_ctx.skb, info); |
2087 | |||
2088 | err_entries_dump: | ||
2089 | err_skb_send_alloc: | ||
2090 | genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr); | ||
2091 | nlmsg_free(dump_ctx.skb); | ||
2092 | return err; | ||
2093 | } | 2087 | } |
2094 | 2088 | ||
2095 | static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, | 2089 | static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, |
@@ -2228,7 +2222,7 @@ send_done: | |||
2228 | if (!nlh) { | 2222 | if (!nlh) { |
2229 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); | 2223 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
2230 | if (err) | 2224 | if (err) |
2231 | goto err_skb_send_alloc; | 2225 | return err; |
2232 | goto send_done; | 2226 | goto send_done; |
2233 | } | 2227 | } |
2234 | return genlmsg_reply(skb, info); | 2228 | return genlmsg_reply(skb, info); |
@@ -2236,7 +2230,6 @@ send_done: | |||
2236 | nla_put_failure: | 2230 | nla_put_failure: |
2237 | err = -EMSGSIZE; | 2231 | err = -EMSGSIZE; |
2238 | err_table_put: | 2232 | err_table_put: |
2239 | err_skb_send_alloc: | ||
2240 | genlmsg_cancel(skb, hdr); | 2233 | genlmsg_cancel(skb, hdr); |
2241 | nlmsg_free(skb); | 2234 | nlmsg_free(skb); |
2242 | return err; | 2235 | return err; |
@@ -2332,7 +2325,7 @@ devlink_resource_validate_children(struct devlink_resource *resource) | |||
2332 | list_for_each_entry(child_resource, &resource->resource_list, list) | 2325 | list_for_each_entry(child_resource, &resource->resource_list, list) |
2333 | parts_size += child_resource->size_new; | 2326 | parts_size += child_resource->size_new; |
2334 | 2327 | ||
2335 | if (parts_size > resource->size) | 2328 | if (parts_size > resource->size_new) |
2336 | size_valid = false; | 2329 | size_valid = false; |
2337 | out: | 2330 | out: |
2338 | resource->size_valid = size_valid; | 2331 | resource->size_valid = size_valid; |
@@ -2372,20 +2365,22 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb, | |||
2372 | return 0; | 2365 | return 0; |
2373 | } | 2366 | } |
2374 | 2367 | ||
2375 | static void | 2368 | static int |
2376 | devlink_resource_size_params_put(struct devlink_resource *resource, | 2369 | devlink_resource_size_params_put(struct devlink_resource *resource, |
2377 | struct sk_buff *skb) | 2370 | struct sk_buff *skb) |
2378 | { | 2371 | { |
2379 | struct devlink_resource_size_params *size_params; | 2372 | struct devlink_resource_size_params *size_params; |
2380 | 2373 | ||
2381 | size_params = resource->size_params; | 2374 | size_params = &resource->size_params; |
2382 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, | 2375 | if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, |
2383 | size_params->size_granularity, DEVLINK_ATTR_PAD); | 2376 | size_params->size_granularity, DEVLINK_ATTR_PAD) || |
2384 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, | 2377 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, |
2385 | size_params->size_max, DEVLINK_ATTR_PAD); | 2378 | size_params->size_max, DEVLINK_ATTR_PAD) || |
2386 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, | 2379 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, |
2387 | size_params->size_min, DEVLINK_ATTR_PAD); | 2380 | size_params->size_min, DEVLINK_ATTR_PAD) || |
2388 | nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit); | 2381 | nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit)) |
2382 | return -EMSGSIZE; | ||
2383 | return 0; | ||
2389 | } | 2384 | } |
2390 | 2385 | ||
2391 | static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, | 2386 | static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, |
@@ -2409,10 +2404,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, | |||
2409 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, | 2404 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, |
2410 | resource->size_new, DEVLINK_ATTR_PAD); | 2405 | resource->size_new, DEVLINK_ATTR_PAD); |
2411 | if (resource->resource_ops && resource->resource_ops->occ_get) | 2406 | if (resource->resource_ops && resource->resource_ops->occ_get) |
2412 | nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, | 2407 | if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, |
2413 | resource->resource_ops->occ_get(devlink), | 2408 | resource->resource_ops->occ_get(devlink), |
2414 | DEVLINK_ATTR_PAD); | 2409 | DEVLINK_ATTR_PAD)) |
2415 | devlink_resource_size_params_put(resource, skb); | 2410 | goto nla_put_failure; |
2411 | if (devlink_resource_size_params_put(resource, skb)) | ||
2412 | goto nla_put_failure; | ||
2416 | if (list_empty(&resource->resource_list)) | 2413 | if (list_empty(&resource->resource_list)) |
2417 | goto out; | 2414 | goto out; |
2418 | 2415 | ||
@@ -3151,7 +3148,7 @@ int devlink_resource_register(struct devlink *devlink, | |||
3151 | u64 resource_size, | 3148 | u64 resource_size, |
3152 | u64 resource_id, | 3149 | u64 resource_id, |
3153 | u64 parent_resource_id, | 3150 | u64 parent_resource_id, |
3154 | struct devlink_resource_size_params *size_params, | 3151 | const struct devlink_resource_size_params *size_params, |
3155 | const struct devlink_resource_ops *resource_ops) | 3152 | const struct devlink_resource_ops *resource_ops) |
3156 | { | 3153 | { |
3157 | struct devlink_resource *resource; | 3154 | struct devlink_resource *resource; |
@@ -3194,7 +3191,8 @@ int devlink_resource_register(struct devlink *devlink, | |||
3194 | resource->id = resource_id; | 3191 | resource->id = resource_id; |
3195 | resource->resource_ops = resource_ops; | 3192 | resource->resource_ops = resource_ops; |
3196 | resource->size_valid = true; | 3193 | resource->size_valid = true; |
3197 | resource->size_params = size_params; | 3194 | memcpy(&resource->size_params, size_params, |
3195 | sizeof(resource->size_params)); | ||
3198 | INIT_LIST_HEAD(&resource->resource_list); | 3196 | INIT_LIST_HEAD(&resource->resource_list); |
3199 | list_add_tail(&resource->list, resource_list); | 3197 | list_add_tail(&resource->list, resource_list); |
3200 | out: | 3198 | out: |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 494e6a5d7306..3f89c76d5c24 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -2520,11 +2520,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr) | |||
2520 | static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) | 2520 | static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) |
2521 | { | 2521 | { |
2522 | struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; | 2522 | struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; |
2523 | int rc; | ||
2523 | 2524 | ||
2524 | if (!dev->ethtool_ops->get_fecparam) | 2525 | if (!dev->ethtool_ops->get_fecparam) |
2525 | return -EOPNOTSUPP; | 2526 | return -EOPNOTSUPP; |
2526 | 2527 | ||
2527 | dev->ethtool_ops->get_fecparam(dev, &fecparam); | 2528 | rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); |
2529 | if (rc) | ||
2530 | return rc; | ||
2528 | 2531 | ||
2529 | if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) | 2532 | if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) |
2530 | return -EFAULT; | 2533 | return -EFAULT; |
diff --git a/net/core/filter.c b/net/core/filter.c index 08ab4c65a998..48aa7c7320db 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2087,6 +2087,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |||
2087 | u32 off = skb_mac_header_len(skb); | 2087 | u32 off = skb_mac_header_len(skb); |
2088 | int ret; | 2088 | int ret; |
2089 | 2089 | ||
2090 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2091 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2092 | return -ENOTSUPP; | ||
2093 | |||
2090 | ret = skb_cow(skb, len_diff); | 2094 | ret = skb_cow(skb, len_diff); |
2091 | if (unlikely(ret < 0)) | 2095 | if (unlikely(ret < 0)) |
2092 | return ret; | 2096 | return ret; |
@@ -2096,19 +2100,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |||
2096 | return ret; | 2100 | return ret; |
2097 | 2101 | ||
2098 | if (skb_is_gso(skb)) { | 2102 | if (skb_is_gso(skb)) { |
2103 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2104 | |||
2099 | /* SKB_GSO_TCPV4 needs to be changed into | 2105 | /* SKB_GSO_TCPV4 needs to be changed into |
2100 | * SKB_GSO_TCPV6. | 2106 | * SKB_GSO_TCPV6. |
2101 | */ | 2107 | */ |
2102 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { | 2108 | if (shinfo->gso_type & SKB_GSO_TCPV4) { |
2103 | skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; | 2109 | shinfo->gso_type &= ~SKB_GSO_TCPV4; |
2104 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; | 2110 | shinfo->gso_type |= SKB_GSO_TCPV6; |
2105 | } | 2111 | } |
2106 | 2112 | ||
2107 | /* Due to IPv6 header, MSS needs to be downgraded. */ | 2113 | /* Due to IPv6 header, MSS needs to be downgraded. */ |
2108 | skb_shinfo(skb)->gso_size -= len_diff; | 2114 | skb_decrease_gso_size(shinfo, len_diff); |
2109 | /* Header must be checked, and gso_segs recomputed. */ | 2115 | /* Header must be checked, and gso_segs recomputed. */ |
2110 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2116 | shinfo->gso_type |= SKB_GSO_DODGY; |
2111 | skb_shinfo(skb)->gso_segs = 0; | 2117 | shinfo->gso_segs = 0; |
2112 | } | 2118 | } |
2113 | 2119 | ||
2114 | skb->protocol = htons(ETH_P_IPV6); | 2120 | skb->protocol = htons(ETH_P_IPV6); |
@@ -2123,6 +2129,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |||
2123 | u32 off = skb_mac_header_len(skb); | 2129 | u32 off = skb_mac_header_len(skb); |
2124 | int ret; | 2130 | int ret; |
2125 | 2131 | ||
2132 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2133 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2134 | return -ENOTSUPP; | ||
2135 | |||
2126 | ret = skb_unclone(skb, GFP_ATOMIC); | 2136 | ret = skb_unclone(skb, GFP_ATOMIC); |
2127 | if (unlikely(ret < 0)) | 2137 | if (unlikely(ret < 0)) |
2128 | return ret; | 2138 | return ret; |
@@ -2132,19 +2142,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |||
2132 | return ret; | 2142 | return ret; |
2133 | 2143 | ||
2134 | if (skb_is_gso(skb)) { | 2144 | if (skb_is_gso(skb)) { |
2145 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2146 | |||
2135 | /* SKB_GSO_TCPV6 needs to be changed into | 2147 | /* SKB_GSO_TCPV6 needs to be changed into |
2136 | * SKB_GSO_TCPV4. | 2148 | * SKB_GSO_TCPV4. |
2137 | */ | 2149 | */ |
2138 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 2150 | if (shinfo->gso_type & SKB_GSO_TCPV6) { |
2139 | skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; | 2151 | shinfo->gso_type &= ~SKB_GSO_TCPV6; |
2140 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; | 2152 | shinfo->gso_type |= SKB_GSO_TCPV4; |
2141 | } | 2153 | } |
2142 | 2154 | ||
2143 | /* Due to IPv4 header, MSS can be upgraded. */ | 2155 | /* Due to IPv4 header, MSS can be upgraded. */ |
2144 | skb_shinfo(skb)->gso_size += len_diff; | 2156 | skb_increase_gso_size(shinfo, len_diff); |
2145 | /* Header must be checked, and gso_segs recomputed. */ | 2157 | /* Header must be checked, and gso_segs recomputed. */ |
2146 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2158 | shinfo->gso_type |= SKB_GSO_DODGY; |
2147 | skb_shinfo(skb)->gso_segs = 0; | 2159 | shinfo->gso_segs = 0; |
2148 | } | 2160 | } |
2149 | 2161 | ||
2150 | skb->protocol = htons(ETH_P_IP); | 2162 | skb->protocol = htons(ETH_P_IP); |
@@ -2243,6 +2255,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |||
2243 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2255 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2244 | int ret; | 2256 | int ret; |
2245 | 2257 | ||
2258 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2259 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2260 | return -ENOTSUPP; | ||
2261 | |||
2246 | ret = skb_cow(skb, len_diff); | 2262 | ret = skb_cow(skb, len_diff); |
2247 | if (unlikely(ret < 0)) | 2263 | if (unlikely(ret < 0)) |
2248 | return ret; | 2264 | return ret; |
@@ -2252,11 +2268,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |||
2252 | return ret; | 2268 | return ret; |
2253 | 2269 | ||
2254 | if (skb_is_gso(skb)) { | 2270 | if (skb_is_gso(skb)) { |
2271 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2272 | |||
2255 | /* Due to header grow, MSS needs to be downgraded. */ | 2273 | /* Due to header grow, MSS needs to be downgraded. */ |
2256 | skb_shinfo(skb)->gso_size -= len_diff; | 2274 | skb_decrease_gso_size(shinfo, len_diff); |
2257 | /* Header must be checked, and gso_segs recomputed. */ | 2275 | /* Header must be checked, and gso_segs recomputed. */ |
2258 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2276 | shinfo->gso_type |= SKB_GSO_DODGY; |
2259 | skb_shinfo(skb)->gso_segs = 0; | 2277 | shinfo->gso_segs = 0; |
2260 | } | 2278 | } |
2261 | 2279 | ||
2262 | return 0; | 2280 | return 0; |
@@ -2267,6 +2285,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
2267 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2285 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2268 | int ret; | 2286 | int ret; |
2269 | 2287 | ||
2288 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2289 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2290 | return -ENOTSUPP; | ||
2291 | |||
2270 | ret = skb_unclone(skb, GFP_ATOMIC); | 2292 | ret = skb_unclone(skb, GFP_ATOMIC); |
2271 | if (unlikely(ret < 0)) | 2293 | if (unlikely(ret < 0)) |
2272 | return ret; | 2294 | return ret; |
@@ -2276,11 +2298,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
2276 | return ret; | 2298 | return ret; |
2277 | 2299 | ||
2278 | if (skb_is_gso(skb)) { | 2300 | if (skb_is_gso(skb)) { |
2301 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2302 | |||
2279 | /* Due to header shrink, MSS can be upgraded. */ | 2303 | /* Due to header shrink, MSS can be upgraded. */ |
2280 | skb_shinfo(skb)->gso_size += len_diff; | 2304 | skb_increase_gso_size(shinfo, len_diff); |
2281 | /* Header must be checked, and gso_segs recomputed. */ | 2305 | /* Header must be checked, and gso_segs recomputed. */ |
2282 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2306 | shinfo->gso_type |= SKB_GSO_DODGY; |
2283 | skb_shinfo(skb)->gso_segs = 0; | 2307 | shinfo->gso_segs = 0; |
2284 | } | 2308 | } |
2285 | 2309 | ||
2286 | return 0; | 2310 | return 0; |
@@ -3381,17 +3405,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, | |||
3381 | struct sock *sk = bpf_sock->sk; | 3405 | struct sock *sk = bpf_sock->sk; |
3382 | int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; | 3406 | int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; |
3383 | 3407 | ||
3384 | if (!sk_fullsock(sk)) | 3408 | if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) |
3385 | return -EINVAL; | 3409 | return -EINVAL; |
3386 | 3410 | ||
3387 | #ifdef CONFIG_INET | ||
3388 | if (val) | 3411 | if (val) |
3389 | tcp_sk(sk)->bpf_sock_ops_cb_flags = val; | 3412 | tcp_sk(sk)->bpf_sock_ops_cb_flags = val; |
3390 | 3413 | ||
3391 | return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); | 3414 | return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); |
3392 | #else | ||
3393 | return -EINVAL; | ||
3394 | #endif | ||
3395 | } | 3415 | } |
3396 | 3416 | ||
3397 | static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { | 3417 | static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 0a3f88f08727..98fd12721221 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -66,6 +66,7 @@ struct net_rate_estimator { | |||
66 | static void est_fetch_counters(struct net_rate_estimator *e, | 66 | static void est_fetch_counters(struct net_rate_estimator *e, |
67 | struct gnet_stats_basic_packed *b) | 67 | struct gnet_stats_basic_packed *b) |
68 | { | 68 | { |
69 | memset(b, 0, sizeof(*b)); | ||
69 | if (e->stats_lock) | 70 | if (e->stats_lock) |
70 | spin_lock(e->stats_lock); | 71 | spin_lock(e->stats_lock); |
71 | 72 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 09bd89c90a71..1e7acdc30732 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4179,7 +4179,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | |||
4179 | 4179 | ||
4180 | skb_queue_tail(&sk->sk_error_queue, skb); | 4180 | skb_queue_tail(&sk->sk_error_queue, skb); |
4181 | if (!sock_flag(sk, SOCK_DEAD)) | 4181 | if (!sock_flag(sk, SOCK_DEAD)) |
4182 | sk->sk_data_ready(sk); | 4182 | sk->sk_error_report(sk); |
4183 | return 0; | 4183 | return 0; |
4184 | } | 4184 | } |
4185 | EXPORT_SYMBOL(sock_queue_err_skb); | 4185 | EXPORT_SYMBOL(sock_queue_err_skb); |
@@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); | |||
4891 | * | 4891 | * |
4892 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. | 4892 | * The MAC/L2 or network (IP, IPv6) headers are not accounted for. |
4893 | */ | 4893 | */ |
4894 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | 4894 | static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
4895 | { | 4895 | { |
4896 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | 4896 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
4897 | unsigned int thlen = 0; | 4897 | unsigned int thlen = 0; |
@@ -4904,7 +4904,7 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | |||
4904 | thlen += inner_tcp_hdrlen(skb); | 4904 | thlen += inner_tcp_hdrlen(skb); |
4905 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { | 4905 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
4906 | thlen = tcp_hdrlen(skb); | 4906 | thlen = tcp_hdrlen(skb); |
4907 | } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) { | 4907 | } else if (unlikely(skb_is_gso_sctp(skb))) { |
4908 | thlen = sizeof(struct sctphdr); | 4908 | thlen = sizeof(struct sctphdr); |
4909 | } | 4909 | } |
4910 | /* UFO sets gso_size to the size of the fragmentation | 4910 | /* UFO sets gso_size to the size of the fragmentation |
@@ -4913,7 +4913,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | |||
4913 | */ | 4913 | */ |
4914 | return thlen + shinfo->gso_size; | 4914 | return thlen + shinfo->gso_size; |
4915 | } | 4915 | } |
4916 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); | 4916 | |
4917 | /** | ||
4918 | * skb_gso_network_seglen - Return length of individual segments of a gso packet | ||
4919 | * | ||
4920 | * @skb: GSO skb | ||
4921 | * | ||
4922 | * skb_gso_network_seglen is used to determine the real size of the | ||
4923 | * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). | ||
4924 | * | ||
4925 | * The MAC/L2 header is not accounted for. | ||
4926 | */ | ||
4927 | static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) | ||
4928 | { | ||
4929 | unsigned int hdr_len = skb_transport_header(skb) - | ||
4930 | skb_network_header(skb); | ||
4931 | |||
4932 | return hdr_len + skb_gso_transport_seglen(skb); | ||
4933 | } | ||
4934 | |||
4935 | /** | ||
4936 | * skb_gso_mac_seglen - Return length of individual segments of a gso packet | ||
4937 | * | ||
4938 | * @skb: GSO skb | ||
4939 | * | ||
4940 | * skb_gso_mac_seglen is used to determine the real size of the | ||
4941 | * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 | ||
4942 | * headers (TCP/UDP). | ||
4943 | */ | ||
4944 | static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) | ||
4945 | { | ||
4946 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | ||
4947 | |||
4948 | return hdr_len + skb_gso_transport_seglen(skb); | ||
4949 | } | ||
4917 | 4950 | ||
4918 | /** | 4951 | /** |
4919 | * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS | 4952 | * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS |
@@ -4955,19 +4988,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb, | |||
4955 | } | 4988 | } |
4956 | 4989 | ||
4957 | /** | 4990 | /** |
4958 | * skb_gso_validate_mtu - Return in case such skb fits a given MTU | 4991 | * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? |
4959 | * | 4992 | * |
4960 | * @skb: GSO skb | 4993 | * @skb: GSO skb |
4961 | * @mtu: MTU to validate against | 4994 | * @mtu: MTU to validate against |
4962 | * | 4995 | * |
4963 | * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU | 4996 | * skb_gso_validate_network_len validates if a given skb will fit a |
4964 | * once split. | 4997 | * wanted MTU once split. It considers L3 headers, L4 headers, and the |
4998 | * payload. | ||
4965 | */ | 4999 | */ |
4966 | bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) | 5000 | bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) |
4967 | { | 5001 | { |
4968 | return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); | 5002 | return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); |
4969 | } | 5003 | } |
4970 | EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); | 5004 | EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); |
4971 | 5005 | ||
4972 | /** | 5006 | /** |
4973 | * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? | 5007 | * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? |
@@ -4986,13 +5020,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); | |||
4986 | 5020 | ||
4987 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | 5021 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
4988 | { | 5022 | { |
5023 | int mac_len; | ||
5024 | |||
4989 | if (skb_cow(skb, skb_headroom(skb)) < 0) { | 5025 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
4990 | kfree_skb(skb); | 5026 | kfree_skb(skb); |
4991 | return NULL; | 5027 | return NULL; |
4992 | } | 5028 | } |
4993 | 5029 | ||
4994 | memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, | 5030 | mac_len = skb->data - skb_mac_header(skb); |
4995 | 2 * ETH_ALEN); | 5031 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), |
5032 | mac_len - VLAN_HLEN - ETH_TLEN); | ||
4996 | skb->mac_header += VLAN_HLEN; | 5033 | skb->mac_header += VLAN_HLEN; |
4997 | return skb; | 5034 | return skb; |
4998 | } | 5035 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index c501499a04fe..85b0b64e7f9d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -3261,6 +3261,27 @@ void proto_unregister(struct proto *prot) | |||
3261 | } | 3261 | } |
3262 | EXPORT_SYMBOL(proto_unregister); | 3262 | EXPORT_SYMBOL(proto_unregister); |
3263 | 3263 | ||
3264 | int sock_load_diag_module(int family, int protocol) | ||
3265 | { | ||
3266 | if (!protocol) { | ||
3267 | if (!sock_is_registered(family)) | ||
3268 | return -ENOENT; | ||
3269 | |||
3270 | return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | ||
3271 | NETLINK_SOCK_DIAG, family); | ||
3272 | } | ||
3273 | |||
3274 | #ifdef CONFIG_INET | ||
3275 | if (family == AF_INET && | ||
3276 | !rcu_access_pointer(inet_protos[protocol])) | ||
3277 | return -ENOENT; | ||
3278 | #endif | ||
3279 | |||
3280 | return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, | ||
3281 | NETLINK_SOCK_DIAG, family, protocol); | ||
3282 | } | ||
3283 | EXPORT_SYMBOL(sock_load_diag_module); | ||
3284 | |||
3264 | #ifdef CONFIG_PROC_FS | 3285 | #ifdef CONFIG_PROC_FS |
3265 | static void *proto_seq_start(struct seq_file *seq, loff_t *pos) | 3286 | static void *proto_seq_start(struct seq_file *seq, loff_t *pos) |
3266 | __acquires(proto_list_mutex) | 3287 | __acquires(proto_list_mutex) |
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 146b50e30659..c37b5be7c5e4 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c | |||
@@ -220,8 +220,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
220 | return -EINVAL; | 220 | return -EINVAL; |
221 | 221 | ||
222 | if (sock_diag_handlers[req->sdiag_family] == NULL) | 222 | if (sock_diag_handlers[req->sdiag_family] == NULL) |
223 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 223 | sock_load_diag_module(req->sdiag_family, 0); |
224 | NETLINK_SOCK_DIAG, req->sdiag_family); | ||
225 | 224 | ||
226 | mutex_lock(&sock_diag_table_mutex); | 225 | mutex_lock(&sock_diag_table_mutex); |
227 | hndl = sock_diag_handlers[req->sdiag_family]; | 226 | hndl = sock_diag_handlers[req->sdiag_family]; |
@@ -247,8 +246,7 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
247 | case TCPDIAG_GETSOCK: | 246 | case TCPDIAG_GETSOCK: |
248 | case DCCPDIAG_GETSOCK: | 247 | case DCCPDIAG_GETSOCK: |
249 | if (inet_rcv_compat == NULL) | 248 | if (inet_rcv_compat == NULL) |
250 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 249 | sock_load_diag_module(AF_INET, 0); |
251 | NETLINK_SOCK_DIAG, AF_INET); | ||
252 | 250 | ||
253 | mutex_lock(&sock_diag_table_mutex); | 251 | mutex_lock(&sock_diag_table_mutex); |
254 | if (inet_rcv_compat != NULL) | 252 | if (inet_rcv_compat != NULL) |
@@ -281,14 +279,12 @@ static int sock_diag_bind(struct net *net, int group) | |||
281 | case SKNLGRP_INET_TCP_DESTROY: | 279 | case SKNLGRP_INET_TCP_DESTROY: |
282 | case SKNLGRP_INET_UDP_DESTROY: | 280 | case SKNLGRP_INET_UDP_DESTROY: |
283 | if (!sock_diag_handlers[AF_INET]) | 281 | if (!sock_diag_handlers[AF_INET]) |
284 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 282 | sock_load_diag_module(AF_INET, 0); |
285 | NETLINK_SOCK_DIAG, AF_INET); | ||
286 | break; | 283 | break; |
287 | case SKNLGRP_INET6_TCP_DESTROY: | 284 | case SKNLGRP_INET6_TCP_DESTROY: |
288 | case SKNLGRP_INET6_UDP_DESTROY: | 285 | case SKNLGRP_INET6_UDP_DESTROY: |
289 | if (!sock_diag_handlers[AF_INET6]) | 286 | if (!sock_diag_handlers[AF_INET6]) |
290 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 287 | sock_load_diag_module(AF_INET6, 0); |
291 | NETLINK_SOCK_DIAG, AF_INET6); | ||
292 | break; | 288 | break; |
293 | } | 289 | } |
294 | return 0; | 290 | return 0; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 15bdc002d90c..84cd4e3fd01b 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -794,6 +794,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
794 | if (skb == NULL) | 794 | if (skb == NULL) |
795 | goto out_release; | 795 | goto out_release; |
796 | 796 | ||
797 | if (sk->sk_state == DCCP_CLOSED) { | ||
798 | rc = -ENOTCONN; | ||
799 | goto out_discard; | ||
800 | } | ||
801 | |||
797 | skb_reserve(skb, sk->sk_prot->max_header); | 802 | skb_reserve(skb, sk->sk_prot->max_header); |
798 | rc = memcpy_from_msg(skb_put(skb, len), msg, len); | 803 | rc = memcpy_from_msg(skb_put(skb, len), msg, len); |
799 | if (rc != 0) | 804 | if (rc != 0) |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 91dd09f79808..791aff68af88 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -1338,6 +1338,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use | |||
1338 | lock_sock(sk); | 1338 | lock_sock(sk); |
1339 | err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); | 1339 | err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); |
1340 | release_sock(sk); | 1340 | release_sock(sk); |
1341 | #ifdef CONFIG_NETFILTER | ||
1342 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | ||
1343 | if (err == -ENOPROTOOPT && optname != DSO_LINKINFO && | ||
1344 | optname != DSO_STREAM && optname != DSO_SEQPACKET) | ||
1345 | err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); | ||
1346 | #endif | ||
1341 | 1347 | ||
1342 | return err; | 1348 | return err; |
1343 | } | 1349 | } |
@@ -1445,15 +1451,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us | |||
1445 | dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); | 1451 | dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); |
1446 | break; | 1452 | break; |
1447 | 1453 | ||
1448 | default: | ||
1449 | #ifdef CONFIG_NETFILTER | ||
1450 | return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); | ||
1451 | #endif | ||
1452 | case DSO_LINKINFO: | ||
1453 | case DSO_STREAM: | ||
1454 | case DSO_SEQPACKET: | ||
1455 | return -ENOPROTOOPT; | ||
1456 | |||
1457 | case DSO_MAXWINDOW: | 1454 | case DSO_MAXWINDOW: |
1458 | if (optlen != sizeof(unsigned long)) | 1455 | if (optlen != sizeof(unsigned long)) |
1459 | return -EINVAL; | 1456 | return -EINVAL; |
@@ -1501,6 +1498,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us | |||
1501 | return -EINVAL; | 1498 | return -EINVAL; |
1502 | scp->info_loc = u.info; | 1499 | scp->info_loc = u.info; |
1503 | break; | 1500 | break; |
1501 | |||
1502 | case DSO_LINKINFO: | ||
1503 | case DSO_STREAM: | ||
1504 | case DSO_SEQPACKET: | ||
1505 | default: | ||
1506 | return -ENOPROTOOPT; | ||
1504 | } | 1507 | } |
1505 | 1508 | ||
1506 | return 0; | 1509 | return 0; |
@@ -1514,6 +1517,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use | |||
1514 | lock_sock(sk); | 1517 | lock_sock(sk); |
1515 | err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); | 1518 | err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); |
1516 | release_sock(sk); | 1519 | release_sock(sk); |
1520 | #ifdef CONFIG_NETFILTER | ||
1521 | if (err == -ENOPROTOOPT && optname != DSO_STREAM && | ||
1522 | optname != DSO_SEQPACKET && optname != DSO_CONACCEPT && | ||
1523 | optname != DSO_CONREJECT) { | ||
1524 | int len; | ||
1525 | |||
1526 | if (get_user(len, optlen)) | ||
1527 | return -EFAULT; | ||
1528 | |||
1529 | err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); | ||
1530 | if (err >= 0) | ||
1531 | err = put_user(len, optlen); | ||
1532 | } | ||
1533 | #endif | ||
1517 | 1534 | ||
1518 | return err; | 1535 | return err; |
1519 | } | 1536 | } |
@@ -1579,26 +1596,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us | |||
1579 | r_data = &link; | 1596 | r_data = &link; |
1580 | break; | 1597 | break; |
1581 | 1598 | ||
1582 | default: | ||
1583 | #ifdef CONFIG_NETFILTER | ||
1584 | { | ||
1585 | int ret, len; | ||
1586 | |||
1587 | if (get_user(len, optlen)) | ||
1588 | return -EFAULT; | ||
1589 | |||
1590 | ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); | ||
1591 | if (ret >= 0) | ||
1592 | ret = put_user(len, optlen); | ||
1593 | return ret; | ||
1594 | } | ||
1595 | #endif | ||
1596 | case DSO_STREAM: | ||
1597 | case DSO_SEQPACKET: | ||
1598 | case DSO_CONACCEPT: | ||
1599 | case DSO_CONREJECT: | ||
1600 | return -ENOPROTOOPT; | ||
1601 | |||
1602 | case DSO_MAXWINDOW: | 1599 | case DSO_MAXWINDOW: |
1603 | if (r_len > sizeof(unsigned long)) | 1600 | if (r_len > sizeof(unsigned long)) |
1604 | r_len = sizeof(unsigned long); | 1601 | r_len = sizeof(unsigned long); |
@@ -1630,6 +1627,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us | |||
1630 | r_len = sizeof(unsigned char); | 1627 | r_len = sizeof(unsigned char); |
1631 | r_data = &scp->info_rem; | 1628 | r_data = &scp->info_rem; |
1632 | break; | 1629 | break; |
1630 | |||
1631 | case DSO_STREAM: | ||
1632 | case DSO_SEQPACKET: | ||
1633 | case DSO_CONACCEPT: | ||
1634 | case DSO_CONREJECT: | ||
1635 | default: | ||
1636 | return -ENOPROTOOPT; | ||
1633 | } | 1637 | } |
1634 | 1638 | ||
1635 | if (r_data) { | 1639 | if (r_data) { |
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index cb54b81d0bd9..42a7b85b84e1 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c | |||
@@ -194,7 +194,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, | |||
194 | ds->ports[i].dn = cd->port_dn[i]; | 194 | ds->ports[i].dn = cd->port_dn[i]; |
195 | ds->ports[i].cpu_dp = dst->cpu_dp; | 195 | ds->ports[i].cpu_dp = dst->cpu_dp; |
196 | 196 | ||
197 | if (dsa_is_user_port(ds, i)) | 197 | if (!dsa_is_user_port(ds, i)) |
198 | continue; | 198 | continue; |
199 | 199 | ||
200 | ret = dsa_slave_create(&ds->ports[i]); | 200 | ret = dsa_slave_create(&ds->ports[i]); |
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 974765b7d92a..e9f0489e4229 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c | |||
@@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void) | |||
206 | static int lowpan_device_event(struct notifier_block *unused, | 206 | static int lowpan_device_event(struct notifier_block *unused, |
207 | unsigned long event, void *ptr) | 207 | unsigned long event, void *ptr) |
208 | { | 208 | { |
209 | struct net_device *wdev = netdev_notifier_info_to_dev(ptr); | 209 | struct net_device *ndev = netdev_notifier_info_to_dev(ptr); |
210 | struct wpan_dev *wpan_dev; | ||
210 | 211 | ||
211 | if (wdev->type != ARPHRD_IEEE802154) | 212 | if (ndev->type != ARPHRD_IEEE802154) |
213 | return NOTIFY_DONE; | ||
214 | wpan_dev = ndev->ieee802154_ptr; | ||
215 | if (!wpan_dev) | ||
212 | return NOTIFY_DONE; | 216 | return NOTIFY_DONE; |
213 | 217 | ||
214 | switch (event) { | 218 | switch (event) { |
@@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused, | |||
217 | * also delete possible lowpan interfaces which belongs | 221 | * also delete possible lowpan interfaces which belongs |
218 | * to the wpan interface. | 222 | * to the wpan interface. |
219 | */ | 223 | */ |
220 | if (wdev->ieee802154_ptr->lowpan_dev) | 224 | if (wpan_dev->lowpan_dev) |
221 | lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL); | 225 | lowpan_dellink(wpan_dev->lowpan_dev, NULL); |
222 | break; | 226 | break; |
223 | default: | 227 | default: |
224 | return NOTIFY_DONE; | 228 | return NOTIFY_DONE; |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index c586597da20d..7d36a950d961 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, | |||
646 | fi->fib_nh, cfg, extack)) | 646 | fi->fib_nh, cfg, extack)) |
647 | return 1; | 647 | return 1; |
648 | } | 648 | } |
649 | #ifdef CONFIG_IP_ROUTE_CLASSID | ||
650 | if (cfg->fc_flow && | ||
651 | cfg->fc_flow != fi->fib_nh->nh_tclassid) | ||
652 | return 1; | ||
653 | #endif | ||
649 | if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && | 654 | if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && |
650 | (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) | 655 | (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) |
651 | return 0; | 656 | return 0; |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index a383f299ce24..4e5bc4b2f14e 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -53,8 +53,7 @@ static DEFINE_MUTEX(inet_diag_table_mutex); | |||
53 | static const struct inet_diag_handler *inet_diag_lock_handler(int proto) | 53 | static const struct inet_diag_handler *inet_diag_lock_handler(int proto) |
54 | { | 54 | { |
55 | if (!inet_diag_table[proto]) | 55 | if (!inet_diag_table[proto]) |
56 | request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, | 56 | sock_load_diag_module(AF_INET, proto); |
57 | NETLINK_SOCK_DIAG, AF_INET, proto); | ||
58 | 57 | ||
59 | mutex_lock(&inet_diag_table_mutex); | 58 | mutex_lock(&inet_diag_table_mutex); |
60 | if (!inet_diag_table[proto]) | 59 | if (!inet_diag_table[proto]) |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 26a3d0315728..e8ec28999f5c 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -119,6 +119,9 @@ out: | |||
119 | 119 | ||
120 | static bool inet_fragq_should_evict(const struct inet_frag_queue *q) | 120 | static bool inet_fragq_should_evict(const struct inet_frag_queue *q) |
121 | { | 121 | { |
122 | if (!hlist_unhashed(&q->list_evictor)) | ||
123 | return false; | ||
124 | |||
122 | return q->net->low_thresh == 0 || | 125 | return q->net->low_thresh == 0 || |
123 | frag_mem_limit(q->net) >= q->net->low_thresh; | 126 | frag_mem_limit(q->net) >= q->net->low_thresh; |
124 | } | 127 | } |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 2dd21c3281a1..b54b948b0596 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -55,7 +55,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | |||
55 | if (skb->ignore_df) | 55 | if (skb->ignore_df) |
56 | return false; | 56 | return false; |
57 | 57 | ||
58 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 58 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
59 | return false; | 59 | return false; |
60 | 60 | ||
61 | return true; | 61 | return true; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 45d97e9b2759..0901de42ed85 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -970,9 +970,6 @@ static void __gre_tunnel_init(struct net_device *dev) | |||
970 | 970 | ||
971 | t_hlen = tunnel->hlen + sizeof(struct iphdr); | 971 | t_hlen = tunnel->hlen + sizeof(struct iphdr); |
972 | 972 | ||
973 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; | ||
974 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; | ||
975 | |||
976 | dev->features |= GRE_FEATURES; | 973 | dev->features |= GRE_FEATURES; |
977 | dev->hw_features |= GRE_FEATURES; | 974 | dev->hw_features |= GRE_FEATURES; |
978 | 975 | ||
@@ -1290,8 +1287,6 @@ static int erspan_tunnel_init(struct net_device *dev) | |||
1290 | erspan_hdr_len(tunnel->erspan_ver); | 1287 | erspan_hdr_len(tunnel->erspan_ver); |
1291 | t_hlen = tunnel->hlen + sizeof(struct iphdr); | 1288 | t_hlen = tunnel->hlen + sizeof(struct iphdr); |
1292 | 1289 | ||
1293 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; | ||
1294 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; | ||
1295 | dev->features |= GRE_FEATURES; | 1290 | dev->features |= GRE_FEATURES; |
1296 | dev->hw_features |= GRE_FEATURES; | 1291 | dev->hw_features |= GRE_FEATURES; |
1297 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1292 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e8e675be60ec..66340ab750e6 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -248,7 +248,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, | |||
248 | 248 | ||
249 | /* common case: seglen is <= mtu | 249 | /* common case: seglen is <= mtu |
250 | */ | 250 | */ |
251 | if (skb_gso_validate_mtu(skb, mtu)) | 251 | if (skb_gso_validate_network_len(skb, mtu)) |
252 | return ip_finish_output2(net, sk, skb); | 252 | return ip_finish_output2(net, sk, skb); |
253 | 253 | ||
254 | /* Slowpath - GSO segment length exceeds the egress MTU. | 254 | /* Slowpath - GSO segment length exceeds the egress MTU. |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 008be04ac1cc..74c962b9b09c 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -258,7 +258,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, | |||
258 | src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); | 258 | src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); |
259 | if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) | 259 | if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) |
260 | return -EINVAL; | 260 | return -EINVAL; |
261 | ipc->oif = src_info->ipi6_ifindex; | 261 | if (src_info->ipi6_ifindex) |
262 | ipc->oif = src_info->ipi6_ifindex; | ||
262 | ipc->addr = src_info->ipi6_addr.s6_addr32[3]; | 263 | ipc->addr = src_info->ipi6_addr.s6_addr32[3]; |
263 | continue; | 264 | continue; |
264 | } | 265 | } |
@@ -288,7 +289,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, | |||
288 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) | 289 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) |
289 | return -EINVAL; | 290 | return -EINVAL; |
290 | info = (struct in_pktinfo *)CMSG_DATA(cmsg); | 291 | info = (struct in_pktinfo *)CMSG_DATA(cmsg); |
291 | ipc->oif = info->ipi_ifindex; | 292 | if (info->ipi_ifindex) |
293 | ipc->oif = info->ipi_ifindex; | ||
292 | ipc->addr = info->ipi_spec_dst.s_addr; | 294 | ipc->addr = info->ipi_spec_dst.s_addr; |
293 | break; | 295 | break; |
294 | } | 296 | } |
@@ -1567,10 +1569,7 @@ int ip_getsockopt(struct sock *sk, int level, | |||
1567 | if (get_user(len, optlen)) | 1569 | if (get_user(len, optlen)) |
1568 | return -EFAULT; | 1570 | return -EFAULT; |
1569 | 1571 | ||
1570 | lock_sock(sk); | 1572 | err = nf_getsockopt(sk, PF_INET, optname, optval, &len); |
1571 | err = nf_getsockopt(sk, PF_INET, optname, optval, | ||
1572 | &len); | ||
1573 | release_sock(sk); | ||
1574 | if (err >= 0) | 1573 | if (err >= 0) |
1575 | err = put_user(len, optlen); | 1574 | err = put_user(len, optlen); |
1576 | return err; | 1575 | return err; |
@@ -1602,9 +1601,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1602 | if (get_user(len, optlen)) | 1601 | if (get_user(len, optlen)) |
1603 | return -EFAULT; | 1602 | return -EFAULT; |
1604 | 1603 | ||
1605 | lock_sock(sk); | ||
1606 | err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); | 1604 | err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); |
1607 | release_sock(sk); | ||
1608 | if (err >= 0) | 1605 | if (err >= 0) |
1609 | err = put_user(len, optlen); | 1606 | err = put_user(len, optlen); |
1610 | return err; | 1607 | return err; |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index d786a8441bce..6d21068f9b55 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -710,16 +710,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
710 | } | 710 | } |
711 | } | 711 | } |
712 | 712 | ||
713 | if (tunnel->fwmark) { | 713 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, |
714 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, | 714 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, |
715 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, | 715 | tunnel->fwmark); |
716 | tunnel->fwmark); | ||
717 | } | ||
718 | else { | ||
719 | init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, | ||
720 | tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, | ||
721 | skb->mark); | ||
722 | } | ||
723 | 716 | ||
724 | if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) | 717 | if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) |
725 | goto tx_error; | 718 | goto tx_error; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 4ffe302f9b82..e3e420f3ba7b 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -252,6 +252,10 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
252 | } | 252 | } |
253 | if (table_base + v | 253 | if (table_base + v |
254 | != arpt_next_entry(e)) { | 254 | != arpt_next_entry(e)) { |
255 | if (unlikely(stackidx >= private->stacksize)) { | ||
256 | verdict = NF_DROP; | ||
257 | break; | ||
258 | } | ||
255 | jumpstack[stackidx++] = e; | 259 | jumpstack[stackidx++] = e; |
256 | } | 260 | } |
257 | 261 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 9a71f3149507..e38395a8dcf2 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -330,8 +330,13 @@ ipt_do_table(struct sk_buff *skb, | |||
330 | continue; | 330 | continue; |
331 | } | 331 | } |
332 | if (table_base + v != ipt_next_entry(e) && | 332 | if (table_base + v != ipt_next_entry(e) && |
333 | !(e->ip.flags & IPT_F_GOTO)) | 333 | !(e->ip.flags & IPT_F_GOTO)) { |
334 | if (unlikely(stackidx >= private->stacksize)) { | ||
335 | verdict = NF_DROP; | ||
336 | break; | ||
337 | } | ||
334 | jumpstack[stackidx++] = e; | 338 | jumpstack[stackidx++] = e; |
339 | } | ||
335 | 340 | ||
336 | e = get_entry(table_base, v); | 341 | e = get_entry(table_base, v); |
337 | continue; | 342 | continue; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 3a84a60f6b39..8a8ae61cea71 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c) | |||
107 | 107 | ||
108 | local_bh_disable(); | 108 | local_bh_disable(); |
109 | if (refcount_dec_and_lock(&c->entries, &cn->lock)) { | 109 | if (refcount_dec_and_lock(&c->entries, &cn->lock)) { |
110 | list_del_rcu(&c->list); | ||
111 | spin_unlock(&cn->lock); | ||
112 | local_bh_enable(); | ||
113 | |||
114 | unregister_netdevice_notifier(&c->notifier); | ||
115 | |||
116 | /* In case anyone still accesses the file, the open/close | 110 | /* In case anyone still accesses the file, the open/close |
117 | * functions are also incrementing the refcount on their own, | 111 | * functions are also incrementing the refcount on their own, |
118 | * so it's safe to remove the entry even if it's in use. */ | 112 | * so it's safe to remove the entry even if it's in use. */ |
@@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c) | |||
120 | if (cn->procdir) | 114 | if (cn->procdir) |
121 | proc_remove(c->pde); | 115 | proc_remove(c->pde); |
122 | #endif | 116 | #endif |
117 | list_del_rcu(&c->list); | ||
118 | spin_unlock(&cn->lock); | ||
119 | local_bh_enable(); | ||
120 | |||
121 | unregister_netdevice_notifier(&c->notifier); | ||
122 | |||
123 | return; | 123 | return; |
124 | } | 124 | } |
125 | local_bh_enable(); | 125 | local_bh_enable(); |
@@ -154,8 +154,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry) | |||
154 | #endif | 154 | #endif |
155 | if (unlikely(!refcount_inc_not_zero(&c->refcount))) | 155 | if (unlikely(!refcount_inc_not_zero(&c->refcount))) |
156 | c = NULL; | 156 | c = NULL; |
157 | else if (entry) | 157 | else if (entry) { |
158 | refcount_inc(&c->entries); | 158 | if (unlikely(!refcount_inc_not_zero(&c->entries))) { |
159 | clusterip_config_put(c); | ||
160 | c = NULL; | ||
161 | } | ||
162 | } | ||
159 | } | 163 | } |
160 | rcu_read_unlock_bh(); | 164 | rcu_read_unlock_bh(); |
161 | 165 | ||
@@ -228,7 +232,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, | |||
228 | c->hash_mode = i->hash_mode; | 232 | c->hash_mode = i->hash_mode; |
229 | c->hash_initval = i->hash_initval; | 233 | c->hash_initval = i->hash_initval; |
230 | refcount_set(&c->refcount, 1); | 234 | refcount_set(&c->refcount, 1); |
231 | refcount_set(&c->entries, 1); | ||
232 | 235 | ||
233 | spin_lock_bh(&cn->lock); | 236 | spin_lock_bh(&cn->lock); |
234 | if (__clusterip_config_find(net, ip)) { | 237 | if (__clusterip_config_find(net, ip)) { |
@@ -259,8 +262,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, | |||
259 | 262 | ||
260 | c->notifier.notifier_call = clusterip_netdev_event; | 263 | c->notifier.notifier_call = clusterip_netdev_event; |
261 | err = register_netdevice_notifier(&c->notifier); | 264 | err = register_netdevice_notifier(&c->notifier); |
262 | if (!err) | 265 | if (!err) { |
266 | refcount_set(&c->entries, 1); | ||
263 | return c; | 267 | return c; |
268 | } | ||
264 | 269 | ||
265 | #ifdef CONFIG_PROC_FS | 270 | #ifdef CONFIG_PROC_FS |
266 | proc_remove(c->pde); | 271 | proc_remove(c->pde); |
@@ -269,7 +274,7 @@ err: | |||
269 | spin_lock_bh(&cn->lock); | 274 | spin_lock_bh(&cn->lock); |
270 | list_del_rcu(&c->list); | 275 | list_del_rcu(&c->list); |
271 | spin_unlock_bh(&cn->lock); | 276 | spin_unlock_bh(&cn->lock); |
272 | kfree(c); | 277 | clusterip_config_put(c); |
273 | 278 | ||
274 | return ERR_PTR(err); | 279 | return ERR_PTR(err); |
275 | } | 280 | } |
@@ -492,12 +497,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) | |||
492 | return PTR_ERR(config); | 497 | return PTR_ERR(config); |
493 | } | 498 | } |
494 | } | 499 | } |
495 | cipinfo->config = config; | ||
496 | 500 | ||
497 | ret = nf_ct_netns_get(par->net, par->family); | 501 | ret = nf_ct_netns_get(par->net, par->family); |
498 | if (ret < 0) | 502 | if (ret < 0) { |
499 | pr_info("cannot load conntrack support for proto=%u\n", | 503 | pr_info("cannot load conntrack support for proto=%u\n", |
500 | par->family); | 504 | par->family); |
505 | clusterip_config_entry_put(par->net, config); | ||
506 | clusterip_config_put(config); | ||
507 | return ret; | ||
508 | } | ||
501 | 509 | ||
502 | if (!par->net->xt.clusterip_deprecated_warning) { | 510 | if (!par->net->xt.clusterip_deprecated_warning) { |
503 | pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " | 511 | pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " |
@@ -505,6 +513,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) | |||
505 | par->net->xt.clusterip_deprecated_warning = true; | 513 | par->net->xt.clusterip_deprecated_warning = true; |
506 | } | 514 | } |
507 | 515 | ||
516 | cipinfo->config = config; | ||
508 | return ret; | 517 | return ret; |
509 | } | 518 | } |
510 | 519 | ||
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 270765236f5e..aaaf9a81fbc9 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -98,17 +98,15 @@ static int ecn_tg_check(const struct xt_tgchk_param *par) | |||
98 | const struct ipt_ECN_info *einfo = par->targinfo; | 98 | const struct ipt_ECN_info *einfo = par->targinfo; |
99 | const struct ipt_entry *e = par->entryinfo; | 99 | const struct ipt_entry *e = par->entryinfo; |
100 | 100 | ||
101 | if (einfo->operation & IPT_ECN_OP_MASK) { | 101 | if (einfo->operation & IPT_ECN_OP_MASK) |
102 | pr_info("unsupported ECN operation %x\n", einfo->operation); | ||
103 | return -EINVAL; | 102 | return -EINVAL; |
104 | } | 103 | |
105 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { | 104 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) |
106 | pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect); | ||
107 | return -EINVAL; | 105 | return -EINVAL; |
108 | } | 106 | |
109 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && | 107 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && |
110 | (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { | 108 | (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { |
111 | pr_info("cannot use TCP operations on a non-tcp rule\n"); | 109 | pr_info_ratelimited("cannot use operation on non-tcp rule\n"); |
112 | return -EINVAL; | 110 | return -EINVAL; |
113 | } | 111 | } |
114 | return 0; | 112 | return 0; |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 8bd0d7b26632..e8bed3390e58 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -74,13 +74,13 @@ static int reject_tg_check(const struct xt_tgchk_param *par) | |||
74 | const struct ipt_entry *e = par->entryinfo; | 74 | const struct ipt_entry *e = par->entryinfo; |
75 | 75 | ||
76 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { | 76 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { |
77 | pr_info("ECHOREPLY no longer supported.\n"); | 77 | pr_info_ratelimited("ECHOREPLY no longer supported.\n"); |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | } else if (rejinfo->with == IPT_TCP_RESET) { | 79 | } else if (rejinfo->with == IPT_TCP_RESET) { |
80 | /* Must specify that it's a TCP packet */ | 80 | /* Must specify that it's a TCP packet */ |
81 | if (e->ip.proto != IPPROTO_TCP || | 81 | if (e->ip.proto != IPPROTO_TCP || |
82 | (e->ip.invflags & XT_INV_PROTO)) { | 82 | (e->ip.invflags & XT_INV_PROTO)) { |
83 | pr_info("TCP_RESET invalid for non-tcp\n"); | 83 | pr_info_ratelimited("TCP_RESET invalid for non-tcp\n"); |
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | } | 85 | } |
86 | } | 86 | } |
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index 37fb9552e858..fd01f13c896a 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c | |||
@@ -105,14 +105,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par) | |||
105 | const struct xt_rpfilter_info *info = par->matchinfo; | 105 | const struct xt_rpfilter_info *info = par->matchinfo; |
106 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; | 106 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; |
107 | if (info->flags & options) { | 107 | if (info->flags & options) { |
108 | pr_info("unknown options encountered"); | 108 | pr_info_ratelimited("unknown options\n"); |
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | 110 | } |
111 | 111 | ||
112 | if (strcmp(par->table, "mangle") != 0 && | 112 | if (strcmp(par->table, "mangle") != 0 && |
113 | strcmp(par->table, "raw") != 0) { | 113 | strcmp(par->table, "raw") != 0) { |
114 | pr_info("match only valid in the \'raw\' " | 114 | pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n", |
115 | "or \'mangle\' tables, not \'%s\'.\n", par->table); | 115 | par->table); |
116 | return -EINVAL; | 116 | return -EINVAL; |
117 | } | 117 | } |
118 | 118 | ||
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c index 25d2975da156..0cd46bffa469 100644 --- a/net/ipv4/netfilter/nf_flow_table_ipv4.c +++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c | |||
@@ -111,6 +111,7 @@ static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb, | |||
111 | default: | 111 | default: |
112 | return -1; | 112 | return -1; |
113 | } | 113 | } |
114 | csum_replace4(&iph->check, addr, new_addr); | ||
114 | 115 | ||
115 | return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); | 116 | return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); |
116 | } | 117 | } |
@@ -185,7 +186,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | |||
185 | if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) | 186 | if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) |
186 | return false; | 187 | return false; |
187 | 188 | ||
188 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 189 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
189 | return false; | 190 | return false; |
190 | 191 | ||
191 | return true; | 192 | return true; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 49cc1c1df1ba..299e247b2032 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -128,10 +128,11 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); | |||
128 | static int ip_rt_error_cost __read_mostly = HZ; | 128 | static int ip_rt_error_cost __read_mostly = HZ; |
129 | static int ip_rt_error_burst __read_mostly = 5 * HZ; | 129 | static int ip_rt_error_burst __read_mostly = 5 * HZ; |
130 | static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; | 130 | static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; |
131 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; | 131 | static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; |
132 | static int ip_rt_min_advmss __read_mostly = 256; | 132 | static int ip_rt_min_advmss __read_mostly = 256; |
133 | 133 | ||
134 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; | 134 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; |
135 | |||
135 | /* | 136 | /* |
136 | * Interface to generic destination cache. | 137 | * Interface to generic destination cache. |
137 | */ | 138 | */ |
@@ -633,6 +634,7 @@ static inline u32 fnhe_hashfun(__be32 daddr) | |||
633 | static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) | 634 | static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) |
634 | { | 635 | { |
635 | rt->rt_pmtu = fnhe->fnhe_pmtu; | 636 | rt->rt_pmtu = fnhe->fnhe_pmtu; |
637 | rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; | ||
636 | rt->dst.expires = fnhe->fnhe_expires; | 638 | rt->dst.expires = fnhe->fnhe_expires; |
637 | 639 | ||
638 | if (fnhe->fnhe_gw) { | 640 | if (fnhe->fnhe_gw) { |
@@ -643,7 +645,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh | |||
643 | } | 645 | } |
644 | 646 | ||
645 | static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, | 647 | static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, |
646 | u32 pmtu, unsigned long expires) | 648 | u32 pmtu, bool lock, unsigned long expires) |
647 | { | 649 | { |
648 | struct fnhe_hash_bucket *hash; | 650 | struct fnhe_hash_bucket *hash; |
649 | struct fib_nh_exception *fnhe; | 651 | struct fib_nh_exception *fnhe; |
@@ -680,8 +682,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, | |||
680 | fnhe->fnhe_genid = genid; | 682 | fnhe->fnhe_genid = genid; |
681 | if (gw) | 683 | if (gw) |
682 | fnhe->fnhe_gw = gw; | 684 | fnhe->fnhe_gw = gw; |
683 | if (pmtu) | 685 | if (pmtu) { |
684 | fnhe->fnhe_pmtu = pmtu; | 686 | fnhe->fnhe_pmtu = pmtu; |
687 | fnhe->fnhe_mtu_locked = lock; | ||
688 | } | ||
685 | fnhe->fnhe_expires = max(1UL, expires); | 689 | fnhe->fnhe_expires = max(1UL, expires); |
686 | /* Update all cached dsts too */ | 690 | /* Update all cached dsts too */ |
687 | rt = rcu_dereference(fnhe->fnhe_rth_input); | 691 | rt = rcu_dereference(fnhe->fnhe_rth_input); |
@@ -705,6 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, | |||
705 | fnhe->fnhe_daddr = daddr; | 709 | fnhe->fnhe_daddr = daddr; |
706 | fnhe->fnhe_gw = gw; | 710 | fnhe->fnhe_gw = gw; |
707 | fnhe->fnhe_pmtu = pmtu; | 711 | fnhe->fnhe_pmtu = pmtu; |
712 | fnhe->fnhe_mtu_locked = lock; | ||
708 | fnhe->fnhe_expires = expires; | 713 | fnhe->fnhe_expires = expires; |
709 | 714 | ||
710 | /* Exception created; mark the cached routes for the nexthop | 715 | /* Exception created; mark the cached routes for the nexthop |
@@ -786,7 +791,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow | |||
786 | struct fib_nh *nh = &FIB_RES_NH(res); | 791 | struct fib_nh *nh = &FIB_RES_NH(res); |
787 | 792 | ||
788 | update_or_create_fnhe(nh, fl4->daddr, new_gw, | 793 | update_or_create_fnhe(nh, fl4->daddr, new_gw, |
789 | 0, jiffies + ip_rt_gc_timeout); | 794 | 0, false, |
795 | jiffies + ip_rt_gc_timeout); | ||
790 | } | 796 | } |
791 | if (kill_route) | 797 | if (kill_route) |
792 | rt->dst.obsolete = DST_OBSOLETE_KILL; | 798 | rt->dst.obsolete = DST_OBSOLETE_KILL; |
@@ -930,14 +936,23 @@ out_put_peer: | |||
930 | 936 | ||
931 | static int ip_error(struct sk_buff *skb) | 937 | static int ip_error(struct sk_buff *skb) |
932 | { | 938 | { |
933 | struct in_device *in_dev = __in_dev_get_rcu(skb->dev); | ||
934 | struct rtable *rt = skb_rtable(skb); | 939 | struct rtable *rt = skb_rtable(skb); |
940 | struct net_device *dev = skb->dev; | ||
941 | struct in_device *in_dev; | ||
935 | struct inet_peer *peer; | 942 | struct inet_peer *peer; |
936 | unsigned long now; | 943 | unsigned long now; |
937 | struct net *net; | 944 | struct net *net; |
938 | bool send; | 945 | bool send; |
939 | int code; | 946 | int code; |
940 | 947 | ||
948 | if (netif_is_l3_master(skb->dev)) { | ||
949 | dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); | ||
950 | if (!dev) | ||
951 | goto out; | ||
952 | } | ||
953 | |||
954 | in_dev = __in_dev_get_rcu(dev); | ||
955 | |||
941 | /* IP on this device is disabled. */ | 956 | /* IP on this device is disabled. */ |
942 | if (!in_dev) | 957 | if (!in_dev) |
943 | goto out; | 958 | goto out; |
@@ -999,15 +1014,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | |||
999 | { | 1014 | { |
1000 | struct dst_entry *dst = &rt->dst; | 1015 | struct dst_entry *dst = &rt->dst; |
1001 | struct fib_result res; | 1016 | struct fib_result res; |
1017 | bool lock = false; | ||
1002 | 1018 | ||
1003 | if (dst_metric_locked(dst, RTAX_MTU)) | 1019 | if (ip_mtu_locked(dst)) |
1004 | return; | 1020 | return; |
1005 | 1021 | ||
1006 | if (ipv4_mtu(dst) < mtu) | 1022 | if (ipv4_mtu(dst) < mtu) |
1007 | return; | 1023 | return; |
1008 | 1024 | ||
1009 | if (mtu < ip_rt_min_pmtu) | 1025 | if (mtu < ip_rt_min_pmtu) { |
1026 | lock = true; | ||
1010 | mtu = ip_rt_min_pmtu; | 1027 | mtu = ip_rt_min_pmtu; |
1028 | } | ||
1011 | 1029 | ||
1012 | if (rt->rt_pmtu == mtu && | 1030 | if (rt->rt_pmtu == mtu && |
1013 | time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) | 1031 | time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) |
@@ -1017,7 +1035,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | |||
1017 | if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { | 1035 | if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { |
1018 | struct fib_nh *nh = &FIB_RES_NH(res); | 1036 | struct fib_nh *nh = &FIB_RES_NH(res); |
1019 | 1037 | ||
1020 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, | 1038 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock, |
1021 | jiffies + ip_rt_mtu_expires); | 1039 | jiffies + ip_rt_mtu_expires); |
1022 | } | 1040 | } |
1023 | rcu_read_unlock(); | 1041 | rcu_read_unlock(); |
@@ -1270,7 +1288,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) | |||
1270 | 1288 | ||
1271 | mtu = READ_ONCE(dst->dev->mtu); | 1289 | mtu = READ_ONCE(dst->dev->mtu); |
1272 | 1290 | ||
1273 | if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { | 1291 | if (unlikely(ip_mtu_locked(dst))) { |
1274 | if (rt->rt_uses_gateway && mtu > 576) | 1292 | if (rt->rt_uses_gateway && mtu > 576) |
1275 | mtu = 576; | 1293 | mtu = 576; |
1276 | } | 1294 | } |
@@ -1383,7 +1401,7 @@ struct uncached_list { | |||
1383 | 1401 | ||
1384 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); | 1402 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); |
1385 | 1403 | ||
1386 | static void rt_add_uncached_list(struct rtable *rt) | 1404 | void rt_add_uncached_list(struct rtable *rt) |
1387 | { | 1405 | { |
1388 | struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); | 1406 | struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); |
1389 | 1407 | ||
@@ -1394,14 +1412,8 @@ static void rt_add_uncached_list(struct rtable *rt) | |||
1394 | spin_unlock_bh(&ul->lock); | 1412 | spin_unlock_bh(&ul->lock); |
1395 | } | 1413 | } |
1396 | 1414 | ||
1397 | static void ipv4_dst_destroy(struct dst_entry *dst) | 1415 | void rt_del_uncached_list(struct rtable *rt) |
1398 | { | 1416 | { |
1399 | struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); | ||
1400 | struct rtable *rt = (struct rtable *) dst; | ||
1401 | |||
1402 | if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) | ||
1403 | kfree(p); | ||
1404 | |||
1405 | if (!list_empty(&rt->rt_uncached)) { | 1417 | if (!list_empty(&rt->rt_uncached)) { |
1406 | struct uncached_list *ul = rt->rt_uncached_list; | 1418 | struct uncached_list *ul = rt->rt_uncached_list; |
1407 | 1419 | ||
@@ -1411,6 +1423,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst) | |||
1411 | } | 1423 | } |
1412 | } | 1424 | } |
1413 | 1425 | ||
1426 | static void ipv4_dst_destroy(struct dst_entry *dst) | ||
1427 | { | ||
1428 | struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); | ||
1429 | struct rtable *rt = (struct rtable *)dst; | ||
1430 | |||
1431 | if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) | ||
1432 | kfree(p); | ||
1433 | |||
1434 | rt_del_uncached_list(rt); | ||
1435 | } | ||
1436 | |||
1414 | void rt_flush_dev(struct net_device *dev) | 1437 | void rt_flush_dev(struct net_device *dev) |
1415 | { | 1438 | { |
1416 | struct net *net = dev_net(dev); | 1439 | struct net *net = dev_net(dev); |
@@ -1506,6 +1529,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev, | |||
1506 | rt->rt_is_input = 0; | 1529 | rt->rt_is_input = 0; |
1507 | rt->rt_iif = 0; | 1530 | rt->rt_iif = 0; |
1508 | rt->rt_pmtu = 0; | 1531 | rt->rt_pmtu = 0; |
1532 | rt->rt_mtu_locked = 0; | ||
1509 | rt->rt_gateway = 0; | 1533 | rt->rt_gateway = 0; |
1510 | rt->rt_uses_gateway = 0; | 1534 | rt->rt_uses_gateway = 0; |
1511 | rt->rt_table_id = 0; | 1535 | rt->rt_table_id = 0; |
@@ -1826,6 +1850,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4, | |||
1826 | return skb_get_hash_raw(skb) >> 1; | 1850 | return skb_get_hash_raw(skb) >> 1; |
1827 | memset(&hash_keys, 0, sizeof(hash_keys)); | 1851 | memset(&hash_keys, 0, sizeof(hash_keys)); |
1828 | skb_flow_dissect_flow_keys(skb, &keys, flag); | 1852 | skb_flow_dissect_flow_keys(skb, &keys, flag); |
1853 | |||
1854 | hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; | ||
1829 | hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; | 1855 | hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; |
1830 | hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; | 1856 | hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; |
1831 | hash_keys.ports.src = keys.ports.src; | 1857 | hash_keys.ports.src = keys.ports.src; |
@@ -2529,6 +2555,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
2529 | rt->rt_is_input = ort->rt_is_input; | 2555 | rt->rt_is_input = ort->rt_is_input; |
2530 | rt->rt_iif = ort->rt_iif; | 2556 | rt->rt_iif = ort->rt_iif; |
2531 | rt->rt_pmtu = ort->rt_pmtu; | 2557 | rt->rt_pmtu = ort->rt_pmtu; |
2558 | rt->rt_mtu_locked = ort->rt_mtu_locked; | ||
2532 | 2559 | ||
2533 | rt->rt_genid = rt_genid_ipv4(net); | 2560 | rt->rt_genid = rt_genid_ipv4(net); |
2534 | rt->rt_flags = ort->rt_flags; | 2561 | rt->rt_flags = ort->rt_flags; |
@@ -2631,6 +2658,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, | |||
2631 | memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); | 2658 | memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); |
2632 | if (rt->rt_pmtu && expires) | 2659 | if (rt->rt_pmtu && expires) |
2633 | metrics[RTAX_MTU - 1] = rt->rt_pmtu; | 2660 | metrics[RTAX_MTU - 1] = rt->rt_pmtu; |
2661 | if (rt->rt_mtu_locked && expires) | ||
2662 | metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU); | ||
2634 | if (rtnetlink_put_metrics(skb, metrics) < 0) | 2663 | if (rtnetlink_put_metrics(skb, metrics) < 0) |
2635 | goto nla_put_failure; | 2664 | goto nla_put_failure; |
2636 | 2665 | ||
@@ -2816,6 +2845,7 @@ void ip_rt_multicast_event(struct in_device *in_dev) | |||
2816 | static int ip_rt_gc_interval __read_mostly = 60 * HZ; | 2845 | static int ip_rt_gc_interval __read_mostly = 60 * HZ; |
2817 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; | 2846 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; |
2818 | static int ip_rt_gc_elasticity __read_mostly = 8; | 2847 | static int ip_rt_gc_elasticity __read_mostly = 8; |
2848 | static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; | ||
2819 | 2849 | ||
2820 | static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, | 2850 | static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, |
2821 | void __user *buffer, | 2851 | void __user *buffer, |
@@ -2931,7 +2961,8 @@ static struct ctl_table ipv4_route_table[] = { | |||
2931 | .data = &ip_rt_min_pmtu, | 2961 | .data = &ip_rt_min_pmtu, |
2932 | .maxlen = sizeof(int), | 2962 | .maxlen = sizeof(int), |
2933 | .mode = 0644, | 2963 | .mode = 0644, |
2934 | .proc_handler = proc_dointvec, | 2964 | .proc_handler = proc_dointvec_minmax, |
2965 | .extra1 = &ip_min_valid_pmtu, | ||
2935 | }, | 2966 | }, |
2936 | { | 2967 | { |
2937 | .procname = "min_adv_mss", | 2968 | .procname = "min_adv_mss", |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 48636aee23c3..8b8059b7af4d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -3566,6 +3566,7 @@ int tcp_abort(struct sock *sk, int err) | |||
3566 | 3566 | ||
3567 | bh_unlock_sock(sk); | 3567 | bh_unlock_sock(sk); |
3568 | local_bh_enable(); | 3568 | local_bh_enable(); |
3569 | tcp_write_queue_purge(sk); | ||
3569 | release_sock(sk); | 3570 | release_sock(sk); |
3570 | return 0; | 3571 | return 0; |
3571 | } | 3572 | } |
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 7c843578f233..faddf4f9a707 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * The algorithm is described in: | 6 | * The algorithm is described in: |
7 | * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm | 7 | * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm |
8 | * for High-Speed Networks" | 8 | * for High-Speed Networks" |
9 | * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf | 9 | * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf |
10 | * | 10 | * |
11 | * Implemented from description in paper and ns-2 simulation. | 11 | * Implemented from description in paper and ns-2 simulation. |
12 | * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> | 12 | * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 575d3c1fb6e8..9a1b3c1c1c14 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1971,11 +1971,6 @@ void tcp_enter_loss(struct sock *sk) | |||
1971 | /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous | 1971 | /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous |
1972 | * loss recovery is underway except recurring timeout(s) on | 1972 | * loss recovery is underway except recurring timeout(s) on |
1973 | * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing | 1973 | * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing |
1974 | * | ||
1975 | * In theory F-RTO can be used repeatedly during loss recovery. | ||
1976 | * In practice this interacts badly with broken middle-boxes that | ||
1977 | * falsely raise the receive window, which results in repeated | ||
1978 | * timeouts and stop-and-go behavior. | ||
1979 | */ | 1974 | */ |
1980 | tp->frto = net->ipv4.sysctl_tcp_frto && | 1975 | tp->frto = net->ipv4.sysctl_tcp_frto && |
1981 | (new_recovery || icsk->icsk_retransmits) && | 1976 | (new_recovery || icsk->icsk_retransmits) && |
@@ -2631,18 +2626,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, | |||
2631 | tcp_try_undo_loss(sk, false)) | 2626 | tcp_try_undo_loss(sk, false)) |
2632 | return; | 2627 | return; |
2633 | 2628 | ||
2634 | /* The ACK (s)acks some never-retransmitted data meaning not all | ||
2635 | * the data packets before the timeout were lost. Therefore we | ||
2636 | * undo the congestion window and state. This is essentially | ||
2637 | * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since | ||
2638 | * a retransmitted skb is permantly marked, we can apply such an | ||
2639 | * operation even if F-RTO was not used. | ||
2640 | */ | ||
2641 | if ((flag & FLAG_ORIG_SACK_ACKED) && | ||
2642 | tcp_try_undo_loss(sk, tp->undo_marker)) | ||
2643 | return; | ||
2644 | |||
2645 | if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ | 2629 | if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ |
2630 | /* Step 3.b. A timeout is spurious if not all data are | ||
2631 | * lost, i.e., never-retransmitted data are (s)acked. | ||
2632 | */ | ||
2633 | if ((flag & FLAG_ORIG_SACK_ACKED) && | ||
2634 | tcp_try_undo_loss(sk, true)) | ||
2635 | return; | ||
2636 | |||
2646 | if (after(tp->snd_nxt, tp->high_seq)) { | 2637 | if (after(tp->snd_nxt, tp->high_seq)) { |
2647 | if (flag & FLAG_DATA_SACKED || is_dupack) | 2638 | if (flag & FLAG_DATA_SACKED || is_dupack) |
2648 | tp->frto = 0; /* Step 3.a. loss was real */ | 2639 | tp->frto = 0; /* Step 3.a. loss was real */ |
@@ -4001,6 +3992,7 @@ void tcp_reset(struct sock *sk) | |||
4001 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ | 3992 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ |
4002 | smp_wmb(); | 3993 | smp_wmb(); |
4003 | 3994 | ||
3995 | tcp_write_queue_purge(sk); | ||
4004 | tcp_done(sk); | 3996 | tcp_done(sk); |
4005 | 3997 | ||
4006 | if (!sock_flag(sk, SOCK_DEAD)) | 3998 | if (!sock_flag(sk, SOCK_DEAD)) |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e9f985e42405..6818042cd8a9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, | |||
1730 | */ | 1730 | */ |
1731 | segs = max_t(u32, bytes / mss_now, min_tso_segs); | 1731 | segs = max_t(u32, bytes / mss_now, min_tso_segs); |
1732 | 1732 | ||
1733 | return min_t(u32, segs, sk->sk_gso_max_segs); | 1733 | return segs; |
1734 | } | 1734 | } |
1735 | EXPORT_SYMBOL(tcp_tso_autosize); | 1735 | EXPORT_SYMBOL(tcp_tso_autosize); |
1736 | 1736 | ||
@@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) | |||
1742 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; | 1742 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; |
1743 | u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; | 1743 | u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; |
1744 | 1744 | ||
1745 | return tso_segs ? : | 1745 | if (!tso_segs) |
1746 | tcp_tso_autosize(sk, mss_now, | 1746 | tso_segs = tcp_tso_autosize(sk, mss_now, |
1747 | sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); | 1747 | sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); |
1748 | return min_t(u32, tso_segs, sk->sk_gso_max_segs); | ||
1748 | } | 1749 | } |
1749 | 1750 | ||
1750 | /* Returns the portion of skb which can be sent right away */ | 1751 | /* Returns the portion of skb which can be sent right away */ |
@@ -2027,6 +2028,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) | |||
2027 | } | 2028 | } |
2028 | } | 2029 | } |
2029 | 2030 | ||
2031 | static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) | ||
2032 | { | ||
2033 | struct sk_buff *skb, *next; | ||
2034 | |||
2035 | skb = tcp_send_head(sk); | ||
2036 | tcp_for_write_queue_from_safe(skb, next, sk) { | ||
2037 | if (len <= skb->len) | ||
2038 | break; | ||
2039 | |||
2040 | if (unlikely(TCP_SKB_CB(skb)->eor)) | ||
2041 | return false; | ||
2042 | |||
2043 | len -= skb->len; | ||
2044 | } | ||
2045 | |||
2046 | return true; | ||
2047 | } | ||
2048 | |||
2030 | /* Create a new MTU probe if we are ready. | 2049 | /* Create a new MTU probe if we are ready. |
2031 | * MTU probe is regularly attempting to increase the path MTU by | 2050 | * MTU probe is regularly attempting to increase the path MTU by |
2032 | * deliberately sending larger packets. This discovers routing | 2051 | * deliberately sending larger packets. This discovers routing |
@@ -2099,6 +2118,9 @@ static int tcp_mtu_probe(struct sock *sk) | |||
2099 | return 0; | 2118 | return 0; |
2100 | } | 2119 | } |
2101 | 2120 | ||
2121 | if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) | ||
2122 | return -1; | ||
2123 | |||
2102 | /* We're allowed to probe. Build it now. */ | 2124 | /* We're allowed to probe. Build it now. */ |
2103 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); | 2125 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); |
2104 | if (!nskb) | 2126 | if (!nskb) |
@@ -2134,6 +2156,10 @@ static int tcp_mtu_probe(struct sock *sk) | |||
2134 | /* We've eaten all the data from this skb. | 2156 | /* We've eaten all the data from this skb. |
2135 | * Throw it away. */ | 2157 | * Throw it away. */ |
2136 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; | 2158 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; |
2159 | /* If this is the last SKB we copy and eor is set | ||
2160 | * we need to propagate it to the new skb. | ||
2161 | */ | ||
2162 | TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; | ||
2137 | tcp_unlink_write_queue(skb, sk); | 2163 | tcp_unlink_write_queue(skb, sk); |
2138 | sk_wmem_free_skb(sk, skb); | 2164 | sk_wmem_free_skb(sk, skb); |
2139 | } else { | 2165 | } else { |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 71fc60f1b326..f7d944855f8e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -34,6 +34,7 @@ static void tcp_write_err(struct sock *sk) | |||
34 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; | 34 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; |
35 | sk->sk_error_report(sk); | 35 | sk->sk_error_report(sk); |
36 | 36 | ||
37 | tcp_write_queue_purge(sk); | ||
37 | tcp_done(sk); | 38 | tcp_done(sk); |
38 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); | 39 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
39 | } | 40 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bfaefe560b5c..e5ef7c38c934 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -2024,6 +2024,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
2024 | err = udplite_checksum_init(skb, uh); | 2024 | err = udplite_checksum_init(skb, uh); |
2025 | if (err) | 2025 | if (err) |
2026 | return err; | 2026 | return err; |
2027 | |||
2028 | if (UDP_SKB_CB(skb)->partial_cov) { | ||
2029 | skb->csum = inet_compute_pseudo(skb, proto); | ||
2030 | return 0; | ||
2031 | } | ||
2027 | } | 2032 | } |
2028 | 2033 | ||
2029 | /* Note, we are only interested in != 0 or == 0, thus the | 2034 | /* Note, we are only interested in != 0 or == 0, thus the |
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 63faeee989a9..2a9764bd1719 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c | |||
@@ -92,7 +92,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) | |||
92 | 92 | ||
93 | skb_reset_network_header(skb); | 93 | skb_reset_network_header(skb); |
94 | skb_mac_header_rebuild(skb); | 94 | skb_mac_header_rebuild(skb); |
95 | eth_hdr(skb)->h_proto = skb->protocol; | 95 | if (skb->mac_len) |
96 | eth_hdr(skb)->h_proto = skb->protocol; | ||
96 | 97 | ||
97 | err = 0; | 98 | err = 0; |
98 | 99 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 94b8702603bc..be980c195fc5 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -30,7 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
30 | 30 | ||
31 | mtu = dst_mtu(skb_dst(skb)); | 31 | mtu = dst_mtu(skb_dst(skb)); |
32 | if ((!skb_is_gso(skb) && skb->len > mtu) || | 32 | if ((!skb_is_gso(skb) && skb->len > mtu) || |
33 | (skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) { | 33 | (skb_is_gso(skb) && |
34 | !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) { | ||
34 | skb->protocol = htons(ETH_P_IP); | 35 | skb->protocol = htons(ETH_P_IP); |
35 | 36 | ||
36 | if (skb->sk) | 37 | if (skb->sk) |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 05017e2c849c..fbebda67ac1b 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -100,8 +100,10 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
100 | xdst->u.rt.rt_gateway = rt->rt_gateway; | 100 | xdst->u.rt.rt_gateway = rt->rt_gateway; |
101 | xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; | 101 | xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; |
102 | xdst->u.rt.rt_pmtu = rt->rt_pmtu; | 102 | xdst->u.rt.rt_pmtu = rt->rt_pmtu; |
103 | xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked; | ||
103 | xdst->u.rt.rt_table_id = rt->rt_table_id; | 104 | xdst->u.rt.rt_table_id = rt->rt_table_id; |
104 | INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); | 105 | INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); |
106 | rt_add_uncached_list(&xdst->u.rt); | ||
105 | 107 | ||
106 | return 0; | 108 | return 0; |
107 | } | 109 | } |
@@ -241,7 +243,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst) | |||
241 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | 243 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; |
242 | 244 | ||
243 | dst_destroy_metrics_generic(dst); | 245 | dst_destroy_metrics_generic(dst); |
244 | 246 | if (xdst->u.rt.rt_uncached_list) | |
247 | rt_del_uncached_list(&xdst->u.rt); | ||
245 | xfrm_dst_destroy(xdst); | 248 | xfrm_dst_destroy(xdst); |
246 | } | 249 | } |
247 | 250 | ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index fbf08ce3f5ab..a9f7eca0b6a3 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, | |||
146 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | 146 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
147 | struct inet_sock *inet = inet_sk(sk); | 147 | struct inet_sock *inet = inet_sk(sk); |
148 | struct ipv6_pinfo *np = inet6_sk(sk); | 148 | struct ipv6_pinfo *np = inet6_sk(sk); |
149 | struct in6_addr *daddr; | 149 | struct in6_addr *daddr, old_daddr; |
150 | __be32 fl6_flowlabel = 0; | ||
151 | __be32 old_fl6_flowlabel; | ||
152 | __be16 old_dport; | ||
150 | int addr_type; | 153 | int addr_type; |
151 | int err; | 154 | int err; |
152 | __be32 fl6_flowlabel = 0; | ||
153 | 155 | ||
154 | if (usin->sin6_family == AF_INET) { | 156 | if (usin->sin6_family == AF_INET) { |
155 | if (__ipv6_only_sock(sk)) | 157 | if (__ipv6_only_sock(sk)) |
@@ -238,9 +240,13 @@ ipv4_connected: | |||
238 | } | 240 | } |
239 | } | 241 | } |
240 | 242 | ||
243 | /* save the current peer information before updating it */ | ||
244 | old_daddr = sk->sk_v6_daddr; | ||
245 | old_fl6_flowlabel = np->flow_label; | ||
246 | old_dport = inet->inet_dport; | ||
247 | |||
241 | sk->sk_v6_daddr = *daddr; | 248 | sk->sk_v6_daddr = *daddr; |
242 | np->flow_label = fl6_flowlabel; | 249 | np->flow_label = fl6_flowlabel; |
243 | |||
244 | inet->inet_dport = usin->sin6_port; | 250 | inet->inet_dport = usin->sin6_port; |
245 | 251 | ||
246 | /* | 252 | /* |
@@ -250,11 +256,12 @@ ipv4_connected: | |||
250 | 256 | ||
251 | err = ip6_datagram_dst_update(sk, true); | 257 | err = ip6_datagram_dst_update(sk, true); |
252 | if (err) { | 258 | if (err) { |
253 | /* Reset daddr and dport so that udp_v6_early_demux() | 259 | /* Restore the socket peer info, to keep it consistent with |
254 | * fails to find this socket | 260 | * the old socket state |
255 | */ | 261 | */ |
256 | memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); | 262 | sk->sk_v6_daddr = old_daddr; |
257 | inet->inet_dport = 0; | 263 | np->flow_label = old_fl6_flowlabel; |
264 | inet->inet_dport = old_dport; | ||
258 | goto out; | 265 | goto out; |
259 | } | 266 | } |
260 | 267 | ||
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c index ec43d18b5ff9..547515e8450a 100644 --- a/net/ipv6/ip6_checksum.c +++ b/net/ipv6/ip6_checksum.c | |||
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) | |||
73 | err = udplite_checksum_init(skb, uh); | 73 | err = udplite_checksum_init(skb, uh); |
74 | if (err) | 74 | if (err) |
75 | return err; | 75 | return err; |
76 | |||
77 | if (UDP_SKB_CB(skb)->partial_cov) { | ||
78 | skb->csum = ip6_compute_pseudo(skb, proto); | ||
79 | return 0; | ||
80 | } | ||
76 | } | 81 | } |
77 | 82 | ||
78 | /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) | 83 | /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 3c353125546d..1bbd0930063e 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -126,7 +126,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, | |||
126 | struct ip6_tnl *t, *cand = NULL; | 126 | struct ip6_tnl *t, *cand = NULL; |
127 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); | 127 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
128 | int dev_type = (gre_proto == htons(ETH_P_TEB) || | 128 | int dev_type = (gre_proto == htons(ETH_P_TEB) || |
129 | gre_proto == htons(ETH_P_ERSPAN)) ? | 129 | gre_proto == htons(ETH_P_ERSPAN) || |
130 | gre_proto == htons(ETH_P_ERSPAN2)) ? | ||
130 | ARPHRD_ETHER : ARPHRD_IP6GRE; | 131 | ARPHRD_ETHER : ARPHRD_IP6GRE; |
131 | int score, cand_score = 4; | 132 | int score, cand_score = 4; |
132 | 133 | ||
@@ -902,6 +903,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
902 | truncate = true; | 903 | truncate = true; |
903 | } | 904 | } |
904 | 905 | ||
906 | if (skb_cow_head(skb, dev->needed_headroom)) | ||
907 | goto tx_err; | ||
908 | |||
905 | t->parms.o_flags &= ~TUNNEL_KEY; | 909 | t->parms.o_flags &= ~TUNNEL_KEY; |
906 | IPCB(skb)->flags = 0; | 910 | IPCB(skb)->flags = 0; |
907 | 911 | ||
@@ -944,6 +948,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
944 | md->u.md2.dir, | 948 | md->u.md2.dir, |
945 | get_hwid(&md->u.md2), | 949 | get_hwid(&md->u.md2), |
946 | truncate, false); | 950 | truncate, false); |
951 | } else { | ||
952 | goto tx_err; | ||
947 | } | 953 | } |
948 | } else { | 954 | } else { |
949 | switch (skb->protocol) { | 955 | switch (skb->protocol) { |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 997c7f19ad62..a8a919520090 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -412,7 +412,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) | |||
412 | if (skb->ignore_df) | 412 | if (skb->ignore_df) |
413 | return false; | 413 | return false; |
414 | 414 | ||
415 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 415 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
416 | return false; | 416 | return false; |
417 | 417 | ||
418 | return true; | 418 | return true; |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4b15fe928278..6e0f21eed88a 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1982,14 +1982,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, | |||
1982 | { | 1982 | { |
1983 | struct net *net = dev_net(dev); | 1983 | struct net *net = dev_net(dev); |
1984 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 1984 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
1985 | struct ip6_tnl *nt, *t; | ||
1986 | struct ip_tunnel_encap ipencap; | 1985 | struct ip_tunnel_encap ipencap; |
1986 | struct ip6_tnl *nt, *t; | ||
1987 | int err; | ||
1987 | 1988 | ||
1988 | nt = netdev_priv(dev); | 1989 | nt = netdev_priv(dev); |
1989 | 1990 | ||
1990 | if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { | 1991 | if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { |
1991 | int err = ip6_tnl_encap_setup(nt, &ipencap); | 1992 | err = ip6_tnl_encap_setup(nt, &ipencap); |
1992 | |||
1993 | if (err < 0) | 1993 | if (err < 0) |
1994 | return err; | 1994 | return err; |
1995 | } | 1995 | } |
@@ -2005,7 +2005,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, | |||
2005 | return -EEXIST; | 2005 | return -EEXIST; |
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | return ip6_tnl_create2(dev); | 2008 | err = ip6_tnl_create2(dev); |
2009 | if (!err && tb[IFLA_MTU]) | ||
2010 | ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); | ||
2011 | |||
2012 | return err; | ||
2009 | } | 2013 | } |
2010 | 2014 | ||
2011 | static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], | 2015 | static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d78d41fc4b1a..24535169663d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -1367,10 +1367,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1367 | if (get_user(len, optlen)) | 1367 | if (get_user(len, optlen)) |
1368 | return -EFAULT; | 1368 | return -EFAULT; |
1369 | 1369 | ||
1370 | lock_sock(sk); | 1370 | err = nf_getsockopt(sk, PF_INET6, optname, optval, &len); |
1371 | err = nf_getsockopt(sk, PF_INET6, optname, optval, | ||
1372 | &len); | ||
1373 | release_sock(sk); | ||
1374 | if (err >= 0) | 1371 | if (err >= 0) |
1375 | err = put_user(len, optlen); | 1372 | err = put_user(len, optlen); |
1376 | } | 1373 | } |
@@ -1409,10 +1406,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1409 | if (get_user(len, optlen)) | 1406 | if (get_user(len, optlen)) |
1410 | return -EFAULT; | 1407 | return -EFAULT; |
1411 | 1408 | ||
1412 | lock_sock(sk); | 1409 | err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len); |
1413 | err = compat_nf_getsockopt(sk, PF_INET6, | ||
1414 | optname, optval, &len); | ||
1415 | release_sock(sk); | ||
1416 | if (err >= 0) | 1410 | if (err >= 0) |
1417 | err = put_user(len, optlen); | 1411 | err = put_user(len, optlen); |
1418 | } | 1412 | } |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index f61a5b613b52..ba5e04c6ae17 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb, | |||
1554 | *(opt++) = (rd_len >> 3); | 1554 | *(opt++) = (rd_len >> 3); |
1555 | opt += 6; | 1555 | opt += 6; |
1556 | 1556 | ||
1557 | memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); | 1557 | skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt, |
1558 | rd_len - 8); | ||
1558 | } | 1559 | } |
1559 | 1560 | ||
1560 | void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) | 1561 | void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index d95ceca7ff8f..531d6957af36 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -21,18 +21,19 @@ | |||
21 | int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | 21 | int ip6_route_me_harder(struct net *net, struct sk_buff *skb) |
22 | { | 22 | { |
23 | const struct ipv6hdr *iph = ipv6_hdr(skb); | 23 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
24 | struct sock *sk = sk_to_full_sk(skb->sk); | ||
24 | unsigned int hh_len; | 25 | unsigned int hh_len; |
25 | struct dst_entry *dst; | 26 | struct dst_entry *dst; |
26 | struct flowi6 fl6 = { | 27 | struct flowi6 fl6 = { |
27 | .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, | 28 | .flowi6_oif = sk ? sk->sk_bound_dev_if : 0, |
28 | .flowi6_mark = skb->mark, | 29 | .flowi6_mark = skb->mark, |
29 | .flowi6_uid = sock_net_uid(net, skb->sk), | 30 | .flowi6_uid = sock_net_uid(net, sk), |
30 | .daddr = iph->daddr, | 31 | .daddr = iph->daddr, |
31 | .saddr = iph->saddr, | 32 | .saddr = iph->saddr, |
32 | }; | 33 | }; |
33 | int err; | 34 | int err; |
34 | 35 | ||
35 | dst = ip6_route_output(net, skb->sk, &fl6); | 36 | dst = ip6_route_output(net, sk, &fl6); |
36 | err = dst->error; | 37 | err = dst->error; |
37 | if (err) { | 38 | if (err) { |
38 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | 39 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
@@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |||
50 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | 51 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && |
51 | xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { | 52 | xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { |
52 | skb_dst_set(skb, NULL); | 53 | skb_dst_set(skb, NULL); |
53 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); | 54 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); |
54 | if (IS_ERR(dst)) | 55 | if (IS_ERR(dst)) |
55 | return PTR_ERR(dst); | 56 | return PTR_ERR(dst); |
56 | skb_dst_set(skb, dst); | 57 | skb_dst_set(skb, dst); |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index af4c917e0836..62358b93bbac 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -352,6 +352,10 @@ ip6t_do_table(struct sk_buff *skb, | |||
352 | } | 352 | } |
353 | if (table_base + v != ip6t_next_entry(e) && | 353 | if (table_base + v != ip6t_next_entry(e) && |
354 | !(e->ipv6.flags & IP6T_F_GOTO)) { | 354 | !(e->ipv6.flags & IP6T_F_GOTO)) { |
355 | if (unlikely(stackidx >= private->stacksize)) { | ||
356 | verdict = NF_DROP; | ||
357 | break; | ||
358 | } | ||
355 | jumpstack[stackidx++] = e; | 359 | jumpstack[stackidx++] = e; |
356 | } | 360 | } |
357 | 361 | ||
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index fa51a205918d..38dea8ff680f 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -85,14 +85,14 @@ static int reject_tg6_check(const struct xt_tgchk_param *par) | |||
85 | const struct ip6t_entry *e = par->entryinfo; | 85 | const struct ip6t_entry *e = par->entryinfo; |
86 | 86 | ||
87 | if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { | 87 | if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { |
88 | pr_info("ECHOREPLY is not supported.\n"); | 88 | pr_info_ratelimited("ECHOREPLY is not supported\n"); |
89 | return -EINVAL; | 89 | return -EINVAL; |
90 | } else if (rejinfo->with == IP6T_TCP_RESET) { | 90 | } else if (rejinfo->with == IP6T_TCP_RESET) { |
91 | /* Must specify that it's a TCP packet */ | 91 | /* Must specify that it's a TCP packet */ |
92 | if (!(e->ipv6.flags & IP6T_F_PROTO) || | 92 | if (!(e->ipv6.flags & IP6T_F_PROTO) || |
93 | e->ipv6.proto != IPPROTO_TCP || | 93 | e->ipv6.proto != IPPROTO_TCP || |
94 | (e->ipv6.invflags & XT_INV_PROTO)) { | 94 | (e->ipv6.invflags & XT_INV_PROTO)) { |
95 | pr_info("TCP_RESET illegal for non-tcp\n"); | 95 | pr_info_ratelimited("TCP_RESET illegal for non-tcp\n"); |
96 | return -EINVAL; | 96 | return -EINVAL; |
97 | } | 97 | } |
98 | } | 98 | } |
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index b12e61b7b16c..91ed25a24b79 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c | |||
@@ -48,10 +48,6 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, | |||
48 | } | 48 | } |
49 | 49 | ||
50 | fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; | 50 | fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; |
51 | if ((flags & XT_RPFILTER_LOOSE) == 0) { | ||
52 | fl6.flowi6_oif = dev->ifindex; | ||
53 | lookup_flags |= RT6_LOOKUP_F_IFACE; | ||
54 | } | ||
55 | 51 | ||
56 | rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); | 52 | rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); |
57 | if (rt->dst.error) | 53 | if (rt->dst.error) |
@@ -103,14 +99,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par) | |||
103 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; | 99 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; |
104 | 100 | ||
105 | if (info->flags & options) { | 101 | if (info->flags & options) { |
106 | pr_info("unknown options encountered"); | 102 | pr_info_ratelimited("unknown options\n"); |
107 | return -EINVAL; | 103 | return -EINVAL; |
108 | } | 104 | } |
109 | 105 | ||
110 | if (strcmp(par->table, "mangle") != 0 && | 106 | if (strcmp(par->table, "mangle") != 0 && |
111 | strcmp(par->table, "raw") != 0) { | 107 | strcmp(par->table, "raw") != 0) { |
112 | pr_info("match only valid in the \'raw\' " | 108 | pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n", |
113 | "or \'mangle\' tables, not \'%s\'.\n", par->table); | 109 | par->table); |
114 | return -EINVAL; | 110 | return -EINVAL; |
115 | } | 111 | } |
116 | 112 | ||
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c index 9642164107ce..33719d5560c8 100644 --- a/net/ipv6/netfilter/ip6t_srh.c +++ b/net/ipv6/netfilter/ip6t_srh.c | |||
@@ -122,12 +122,14 @@ static int srh_mt6_check(const struct xt_mtchk_param *par) | |||
122 | const struct ip6t_srh *srhinfo = par->matchinfo; | 122 | const struct ip6t_srh *srhinfo = par->matchinfo; |
123 | 123 | ||
124 | if (srhinfo->mt_flags & ~IP6T_SRH_MASK) { | 124 | if (srhinfo->mt_flags & ~IP6T_SRH_MASK) { |
125 | pr_err("unknown srh match flags %X\n", srhinfo->mt_flags); | 125 | pr_info_ratelimited("unknown srh match flags %X\n", |
126 | srhinfo->mt_flags); | ||
126 | return -EINVAL; | 127 | return -EINVAL; |
127 | } | 128 | } |
128 | 129 | ||
129 | if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) { | 130 | if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) { |
130 | pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags); | 131 | pr_info_ratelimited("unknown srh invflags %X\n", |
132 | srhinfo->mt_invflags); | ||
131 | return -EINVAL; | 133 | return -EINVAL; |
132 | } | 134 | } |
133 | 135 | ||
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c index d346705d6ee6..207cb35569b1 100644 --- a/net/ipv6/netfilter/nf_flow_table_ipv6.c +++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c | |||
@@ -178,7 +178,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | |||
178 | if (skb->len <= mtu) | 178 | if (skb->len <= mtu) |
179 | return false; | 179 | return false; |
180 | 180 | ||
181 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 181 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
182 | return false; | 182 | return false; |
183 | 183 | ||
184 | return true; | 184 | return true; |
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index bed57ee65f7b..6b7f075f811f 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c | |||
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb, | |||
99 | !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, | 99 | !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, |
100 | target, maniptype)) | 100 | target, maniptype)) |
101 | return false; | 101 | return false; |
102 | |||
103 | /* must reload, offset might have changed */ | ||
104 | ipv6h = (void *)skb->data + iphdroff; | ||
105 | |||
102 | manip_addr: | 106 | manip_addr: |
103 | if (maniptype == NF_NAT_MANIP_SRC) | 107 | if (maniptype == NF_NAT_MANIP_SRC) |
104 | ipv6h->saddr = target->src.u3.in6; | 108 | ipv6h->saddr = target->src.u3.in6; |
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index cc5174c7254c..62fc84d7bdff 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c | |||
@@ -180,7 +180,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
180 | } | 180 | } |
181 | 181 | ||
182 | *dest = 0; | 182 | *dest = 0; |
183 | again: | ||
184 | rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); | 183 | rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); |
185 | if (rt->dst.error) | 184 | if (rt->dst.error) |
186 | goto put_rt_err; | 185 | goto put_rt_err; |
@@ -189,15 +188,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
189 | if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) | 188 | if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) |
190 | goto put_rt_err; | 189 | goto put_rt_err; |
191 | 190 | ||
192 | if (oif && oif != rt->rt6i_idev->dev) { | 191 | if (oif && oif != rt->rt6i_idev->dev) |
193 | /* multipath route? Try again with F_IFACE */ | 192 | goto put_rt_err; |
194 | if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) { | ||
195 | lookup_flags |= RT6_LOOKUP_F_IFACE; | ||
196 | fl6.flowi6_oif = oif->ifindex; | ||
197 | ip6_rt_put(rt); | ||
198 | goto again; | ||
199 | } | ||
200 | } | ||
201 | 193 | ||
202 | switch (priv->result) { | 194 | switch (priv->result) { |
203 | case NFT_FIB_RESULT_OIF: | 195 | case NFT_FIB_RESULT_OIF: |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9dcfadddd800..b0d5c64e1978 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -128,7 +128,7 @@ struct uncached_list { | |||
128 | 128 | ||
129 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); | 129 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); |
130 | 130 | ||
131 | static void rt6_uncached_list_add(struct rt6_info *rt) | 131 | void rt6_uncached_list_add(struct rt6_info *rt) |
132 | { | 132 | { |
133 | struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); | 133 | struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); |
134 | 134 | ||
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt) | |||
139 | spin_unlock_bh(&ul->lock); | 139 | spin_unlock_bh(&ul->lock); |
140 | } | 140 | } |
141 | 141 | ||
142 | static void rt6_uncached_list_del(struct rt6_info *rt) | 142 | void rt6_uncached_list_del(struct rt6_info *rt) |
143 | { | 143 | { |
144 | if (!list_empty(&rt->rt6i_uncached)) { | 144 | if (!list_empty(&rt->rt6i_uncached)) { |
145 | struct uncached_list *ul = rt->rt6i_uncached_list; | 145 | struct uncached_list *ul = rt->rt6i_uncached_list; |
@@ -1509,7 +1509,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt) | |||
1509 | } | 1509 | } |
1510 | } | 1510 | } |
1511 | 1511 | ||
1512 | static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) | 1512 | static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, |
1513 | struct rt6_info *rt, int mtu) | ||
1514 | { | ||
1515 | /* If the new MTU is lower than the route PMTU, this new MTU will be the | ||
1516 | * lowest MTU in the path: always allow updating the route PMTU to | ||
1517 | * reflect PMTU decreases. | ||
1518 | * | ||
1519 | * If the new MTU is higher, and the route PMTU is equal to the local | ||
1520 | * MTU, this means the old MTU is the lowest in the path, so allow | ||
1521 | * updating it: if other nodes now have lower MTUs, PMTU discovery will | ||
1522 | * handle this. | ||
1523 | */ | ||
1524 | |||
1525 | if (dst_mtu(&rt->dst) >= mtu) | ||
1526 | return true; | ||
1527 | |||
1528 | if (dst_mtu(&rt->dst) == idev->cnf.mtu6) | ||
1529 | return true; | ||
1530 | |||
1531 | return false; | ||
1532 | } | ||
1533 | |||
1534 | static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, | ||
1535 | struct rt6_info *rt, int mtu) | ||
1513 | { | 1536 | { |
1514 | struct rt6_exception_bucket *bucket; | 1537 | struct rt6_exception_bucket *bucket; |
1515 | struct rt6_exception *rt6_ex; | 1538 | struct rt6_exception *rt6_ex; |
@@ -1518,20 +1541,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) | |||
1518 | bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, | 1541 | bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, |
1519 | lockdep_is_held(&rt6_exception_lock)); | 1542 | lockdep_is_held(&rt6_exception_lock)); |
1520 | 1543 | ||
1521 | if (bucket) { | 1544 | if (!bucket) |
1522 | for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { | 1545 | return; |
1523 | hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { | 1546 | |
1524 | struct rt6_info *entry = rt6_ex->rt6i; | 1547 | for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { |
1525 | /* For RTF_CACHE with rt6i_pmtu == 0 | 1548 | hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { |
1526 | * (i.e. a redirected route), | 1549 | struct rt6_info *entry = rt6_ex->rt6i; |
1527 | * the metrics of its rt->dst.from has already | 1550 | |
1528 | * been updated. | 1551 | /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected |
1529 | */ | 1552 | * route), the metrics of its rt->dst.from have already |
1530 | if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) | 1553 | * been updated. |
1531 | entry->rt6i_pmtu = mtu; | 1554 | */ |
1532 | } | 1555 | if (entry->rt6i_pmtu && |
1533 | bucket++; | 1556 | rt6_mtu_change_route_allowed(idev, entry, mtu)) |
1557 | entry->rt6i_pmtu = mtu; | ||
1534 | } | 1558 | } |
1559 | bucket++; | ||
1535 | } | 1560 | } |
1536 | } | 1561 | } |
1537 | 1562 | ||
@@ -3809,25 +3834,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
3809 | Since RFC 1981 doesn't include administrative MTU increase | 3834 | Since RFC 1981 doesn't include administrative MTU increase |
3810 | update PMTU increase is a MUST. (i.e. jumbo frame) | 3835 | update PMTU increase is a MUST. (i.e. jumbo frame) |
3811 | */ | 3836 | */ |
3812 | /* | ||
3813 | If new MTU is less than route PMTU, this new MTU will be the | ||
3814 | lowest MTU in the path, update the route PMTU to reflect PMTU | ||
3815 | decreases; if new MTU is greater than route PMTU, and the | ||
3816 | old MTU is the lowest MTU in the path, update the route PMTU | ||
3817 | to reflect the increase. In this case if the other nodes' MTU | ||
3818 | also have the lowest MTU, TOO BIG MESSAGE will be lead to | ||
3819 | PMTU discovery. | ||
3820 | */ | ||
3821 | if (rt->dst.dev == arg->dev && | 3837 | if (rt->dst.dev == arg->dev && |
3822 | dst_metric_raw(&rt->dst, RTAX_MTU) && | ||
3823 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { | 3838 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { |
3824 | spin_lock_bh(&rt6_exception_lock); | 3839 | spin_lock_bh(&rt6_exception_lock); |
3825 | if (dst_mtu(&rt->dst) >= arg->mtu || | 3840 | if (dst_metric_raw(&rt->dst, RTAX_MTU) && |
3826 | (dst_mtu(&rt->dst) < arg->mtu && | 3841 | rt6_mtu_change_route_allowed(idev, rt, arg->mtu)) |
3827 | dst_mtu(&rt->dst) == idev->cnf.mtu6)) { | ||
3828 | dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); | 3842 | dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); |
3829 | } | 3843 | rt6_exceptions_update_pmtu(idev, rt, arg->mtu); |
3830 | rt6_exceptions_update_pmtu(rt, arg->mtu); | ||
3831 | spin_unlock_bh(&rt6_exception_lock); | 3844 | spin_unlock_bh(&rt6_exception_lock); |
3832 | } | 3845 | } |
3833 | return 0; | 3846 | return 0; |
@@ -4099,6 +4112,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, | |||
4099 | r_cfg.fc_encap_type = nla_get_u16(nla); | 4112 | r_cfg.fc_encap_type = nla_get_u16(nla); |
4100 | } | 4113 | } |
4101 | 4114 | ||
4115 | r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); | ||
4102 | rt = ip6_route_info_create(&r_cfg, extack); | 4116 | rt = ip6_route_info_create(&r_cfg, extack); |
4103 | if (IS_ERR(rt)) { | 4117 | if (IS_ERR(rt)) { |
4104 | err = PTR_ERR(rt); | 4118 | err = PTR_ERR(rt); |
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index bd6cc688bd19..7a78dcfda68a 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev, | |||
93 | /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ | 93 | /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ |
94 | int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | 94 | int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) |
95 | { | 95 | { |
96 | struct net *net = dev_net(skb_dst(skb)->dev); | 96 | struct dst_entry *dst = skb_dst(skb); |
97 | struct net *net = dev_net(dst->dev); | ||
97 | struct ipv6hdr *hdr, *inner_hdr; | 98 | struct ipv6hdr *hdr, *inner_hdr; |
98 | struct ipv6_sr_hdr *isrh; | 99 | struct ipv6_sr_hdr *isrh; |
99 | int hdrlen, tot_len, err; | 100 | int hdrlen, tot_len, err; |
@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | |||
134 | isrh->nexthdr = proto; | 135 | isrh->nexthdr = proto; |
135 | 136 | ||
136 | hdr->daddr = isrh->segments[isrh->first_segment]; | 137 | hdr->daddr = isrh->segments[isrh->first_segment]; |
137 | set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr); | 138 | set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); |
138 | 139 | ||
139 | #ifdef CONFIG_IPV6_SEG6_HMAC | 140 | #ifdef CONFIG_IPV6_SEG6_HMAC |
140 | if (sr_has_hmac(isrh)) { | 141 | if (sr_has_hmac(isrh)) { |
@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla, | |||
418 | 419 | ||
419 | slwt = seg6_lwt_lwtunnel(newts); | 420 | slwt = seg6_lwt_lwtunnel(newts); |
420 | 421 | ||
421 | err = dst_cache_init(&slwt->cache, GFP_KERNEL); | 422 | err = dst_cache_init(&slwt->cache, GFP_ATOMIC); |
422 | if (err) { | 423 | if (err) { |
423 | kfree(newts); | 424 | kfree(newts); |
424 | return err; | 425 | return err; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3873d3877135..0195598f7bb5 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) | |||
182 | #ifdef CONFIG_IPV6_SIT_6RD | 182 | #ifdef CONFIG_IPV6_SIT_6RD |
183 | struct ip_tunnel *t = netdev_priv(dev); | 183 | struct ip_tunnel *t = netdev_priv(dev); |
184 | 184 | ||
185 | if (t->dev == sitn->fb_tunnel_dev) { | 185 | if (dev == sitn->fb_tunnel_dev) { |
186 | ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); | 186 | ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); |
187 | t->ip6rd.relay_prefix = 0; | 187 | t->ip6rd.relay_prefix = 0; |
188 | t->ip6rd.prefixlen = 16; | 188 | t->ip6rd.prefixlen = 16; |
@@ -1578,6 +1578,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, | |||
1578 | if (err < 0) | 1578 | if (err < 0) |
1579 | return err; | 1579 | return err; |
1580 | 1580 | ||
1581 | if (tb[IFLA_MTU]) { | ||
1582 | u32 mtu = nla_get_u32(tb[IFLA_MTU]); | ||
1583 | |||
1584 | if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) | ||
1585 | dev->mtu = mtu; | ||
1586 | } | ||
1587 | |||
1581 | #ifdef CONFIG_IPV6_SIT_6RD | 1588 | #ifdef CONFIG_IPV6_SIT_6RD |
1582 | if (ipip6_netlink_6rd_parms(data, &ip6rd)) | 1589 | if (ipip6_netlink_6rd_parms(data, &ip6rd)) |
1583 | err = ipip6_tunnel_update_6rd(nt, &ip6rd); | 1590 | err = ipip6_tunnel_update_6rd(nt, &ip6rd); |
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index bb935a3b7fea..de1b0b8c53b0 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c | |||
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) | |||
92 | 92 | ||
93 | skb_reset_network_header(skb); | 93 | skb_reset_network_header(skb); |
94 | skb_mac_header_rebuild(skb); | 94 | skb_mac_header_rebuild(skb); |
95 | eth_hdr(skb)->h_proto = skb->protocol; | 95 | if (skb->mac_len) |
96 | eth_hdr(skb)->h_proto = skb->protocol; | ||
96 | 97 | ||
97 | err = 0; | 98 | err = 0; |
98 | 99 | ||
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8ae87d4ec5ff..5959ce9620eb 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -82,7 +82,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) | |||
82 | 82 | ||
83 | if ((!skb_is_gso(skb) && skb->len > mtu) || | 83 | if ((!skb_is_gso(skb) && skb->len > mtu) || |
84 | (skb_is_gso(skb) && | 84 | (skb_is_gso(skb) && |
85 | skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) { | 85 | !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) { |
86 | skb->dev = dst->dev; | 86 | skb->dev = dst->dev; |
87 | skb->protocol = htons(ETH_P_IPV6); | 87 | skb->protocol = htons(ETH_P_IPV6); |
88 | 88 | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 09fb44ee3b45..416fe67271a9 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
113 | xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; | 113 | xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; |
114 | xdst->u.rt6.rt6i_dst = rt->rt6i_dst; | 114 | xdst->u.rt6.rt6i_dst = rt->rt6i_dst; |
115 | xdst->u.rt6.rt6i_src = rt->rt6i_src; | 115 | xdst->u.rt6.rt6i_src = rt->rt6i_src; |
116 | INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached); | ||
117 | rt6_uncached_list_add(&xdst->u.rt6); | ||
118 | atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache); | ||
116 | 119 | ||
117 | return 0; | 120 | return 0; |
118 | } | 121 | } |
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) | |||
244 | if (likely(xdst->u.rt6.rt6i_idev)) | 247 | if (likely(xdst->u.rt6.rt6i_idev)) |
245 | in6_dev_put(xdst->u.rt6.rt6i_idev); | 248 | in6_dev_put(xdst->u.rt6.rt6i_idev); |
246 | dst_destroy_metrics_generic(dst); | 249 | dst_destroy_metrics_generic(dst); |
250 | if (xdst->u.rt6.rt6i_uncached_list) | ||
251 | rt6_uncached_list_del(&xdst->u.rt6); | ||
247 | xfrm_dst_destroy(xdst); | 252 | xfrm_dst_destroy(xdst); |
248 | } | 253 | } |
249 | 254 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 1e8cc7bcbca3..9e2643ab4ccb 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -2433,9 +2433,11 @@ static int afiucv_iucv_init(void) | |||
2433 | af_iucv_dev->driver = &af_iucv_driver; | 2433 | af_iucv_dev->driver = &af_iucv_driver; |
2434 | err = device_register(af_iucv_dev); | 2434 | err = device_register(af_iucv_dev); |
2435 | if (err) | 2435 | if (err) |
2436 | goto out_driver; | 2436 | goto out_iucv_dev; |
2437 | return 0; | 2437 | return 0; |
2438 | 2438 | ||
2439 | out_iucv_dev: | ||
2440 | put_device(af_iucv_dev); | ||
2439 | out_driver: | 2441 | out_driver: |
2440 | driver_unregister(&af_iucv_driver); | 2442 | driver_unregister(&af_iucv_driver); |
2441 | out_iucv: | 2443 | out_iucv: |
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index f297d53a11aa..34355fd19f27 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c | |||
@@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock, | |||
1381 | .parse_msg = kcm_parse_func_strparser, | 1381 | .parse_msg = kcm_parse_func_strparser, |
1382 | .read_sock_done = kcm_read_sock_done, | 1382 | .read_sock_done = kcm_read_sock_done, |
1383 | }; | 1383 | }; |
1384 | int err; | 1384 | int err = 0; |
1385 | 1385 | ||
1386 | csk = csock->sk; | 1386 | csk = csock->sk; |
1387 | if (!csk) | 1387 | if (!csk) |
1388 | return -EINVAL; | 1388 | return -EINVAL; |
1389 | 1389 | ||
1390 | lock_sock(csk); | ||
1391 | |||
1390 | /* Only allow TCP sockets to be attached for now */ | 1392 | /* Only allow TCP sockets to be attached for now */ |
1391 | if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || | 1393 | if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || |
1392 | csk->sk_protocol != IPPROTO_TCP) | 1394 | csk->sk_protocol != IPPROTO_TCP) { |
1393 | return -EOPNOTSUPP; | 1395 | err = -EOPNOTSUPP; |
1396 | goto out; | ||
1397 | } | ||
1394 | 1398 | ||
1395 | /* Don't allow listeners or closed sockets */ | 1399 | /* Don't allow listeners or closed sockets */ |
1396 | if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) | 1400 | if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) { |
1397 | return -EOPNOTSUPP; | 1401 | err = -EOPNOTSUPP; |
1402 | goto out; | ||
1403 | } | ||
1398 | 1404 | ||
1399 | psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); | 1405 | psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); |
1400 | if (!psock) | 1406 | if (!psock) { |
1401 | return -ENOMEM; | 1407 | err = -ENOMEM; |
1408 | goto out; | ||
1409 | } | ||
1402 | 1410 | ||
1403 | psock->mux = mux; | 1411 | psock->mux = mux; |
1404 | psock->sk = csk; | 1412 | psock->sk = csk; |
@@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock, | |||
1407 | err = strp_init(&psock->strp, csk, &cb); | 1415 | err = strp_init(&psock->strp, csk, &cb); |
1408 | if (err) { | 1416 | if (err) { |
1409 | kmem_cache_free(kcm_psockp, psock); | 1417 | kmem_cache_free(kcm_psockp, psock); |
1410 | return err; | 1418 | goto out; |
1411 | } | 1419 | } |
1412 | 1420 | ||
1413 | write_lock_bh(&csk->sk_callback_lock); | 1421 | write_lock_bh(&csk->sk_callback_lock); |
@@ -1419,7 +1427,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock, | |||
1419 | write_unlock_bh(&csk->sk_callback_lock); | 1427 | write_unlock_bh(&csk->sk_callback_lock); |
1420 | strp_done(&psock->strp); | 1428 | strp_done(&psock->strp); |
1421 | kmem_cache_free(kcm_psockp, psock); | 1429 | kmem_cache_free(kcm_psockp, psock); |
1422 | return -EALREADY; | 1430 | err = -EALREADY; |
1431 | goto out; | ||
1423 | } | 1432 | } |
1424 | 1433 | ||
1425 | psock->save_data_ready = csk->sk_data_ready; | 1434 | psock->save_data_ready = csk->sk_data_ready; |
@@ -1455,7 +1464,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock, | |||
1455 | /* Schedule RX work in case there are already bytes queued */ | 1464 | /* Schedule RX work in case there are already bytes queued */ |
1456 | strp_check_rcv(&psock->strp); | 1465 | strp_check_rcv(&psock->strp); |
1457 | 1466 | ||
1458 | return 0; | 1467 | out: |
1468 | release_sock(csk); | ||
1469 | |||
1470 | return err; | ||
1459 | } | 1471 | } |
1460 | 1472 | ||
1461 | static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info) | 1473 | static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info) |
@@ -1507,6 +1519,7 @@ static void kcm_unattach(struct kcm_psock *psock) | |||
1507 | 1519 | ||
1508 | if (WARN_ON(psock->rx_kcm)) { | 1520 | if (WARN_ON(psock->rx_kcm)) { |
1509 | write_unlock_bh(&csk->sk_callback_lock); | 1521 | write_unlock_bh(&csk->sk_callback_lock); |
1522 | release_sock(csk); | ||
1510 | return; | 1523 | return; |
1511 | } | 1524 | } |
1512 | 1525 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 194a7483bb93..14b67dfacc4b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -111,6 +111,13 @@ struct l2tp_net { | |||
111 | spinlock_t l2tp_session_hlist_lock; | 111 | spinlock_t l2tp_session_hlist_lock; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | #if IS_ENABLED(CONFIG_IPV6) | ||
115 | static bool l2tp_sk_is_v6(struct sock *sk) | ||
116 | { | ||
117 | return sk->sk_family == PF_INET6 && | ||
118 | !ipv6_addr_v4mapped(&sk->sk_v6_daddr); | ||
119 | } | ||
120 | #endif | ||
114 | 121 | ||
115 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) | 122 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) |
116 | { | 123 | { |
@@ -136,51 +143,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) | |||
136 | 143 | ||
137 | } | 144 | } |
138 | 145 | ||
139 | /* Lookup the tunnel socket, possibly involving the fs code if the socket is | ||
140 | * owned by userspace. A struct sock returned from this function must be | ||
141 | * released using l2tp_tunnel_sock_put once you're done with it. | ||
142 | */ | ||
143 | static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) | ||
144 | { | ||
145 | int err = 0; | ||
146 | struct socket *sock = NULL; | ||
147 | struct sock *sk = NULL; | ||
148 | |||
149 | if (!tunnel) | ||
150 | goto out; | ||
151 | |||
152 | if (tunnel->fd >= 0) { | ||
153 | /* Socket is owned by userspace, who might be in the process | ||
154 | * of closing it. Look the socket up using the fd to ensure | ||
155 | * consistency. | ||
156 | */ | ||
157 | sock = sockfd_lookup(tunnel->fd, &err); | ||
158 | if (sock) | ||
159 | sk = sock->sk; | ||
160 | } else { | ||
161 | /* Socket is owned by kernelspace */ | ||
162 | sk = tunnel->sock; | ||
163 | sock_hold(sk); | ||
164 | } | ||
165 | |||
166 | out: | ||
167 | return sk; | ||
168 | } | ||
169 | |||
170 | /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ | ||
171 | static void l2tp_tunnel_sock_put(struct sock *sk) | ||
172 | { | ||
173 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
174 | if (tunnel) { | ||
175 | if (tunnel->fd >= 0) { | ||
176 | /* Socket is owned by userspace */ | ||
177 | sockfd_put(sk->sk_socket); | ||
178 | } | ||
179 | sock_put(sk); | ||
180 | } | ||
181 | sock_put(sk); | ||
182 | } | ||
183 | |||
184 | /* Session hash list. | 146 | /* Session hash list. |
185 | * The session_id SHOULD be random according to RFC2661, but several | 147 | * The session_id SHOULD be random according to RFC2661, but several |
186 | * L2TP implementations (Cisco and Microsoft) use incrementing | 148 | * L2TP implementations (Cisco and Microsoft) use incrementing |
@@ -193,6 +155,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) | |||
193 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; | 155 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; |
194 | } | 156 | } |
195 | 157 | ||
158 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | ||
159 | { | ||
160 | sock_put(tunnel->sock); | ||
161 | /* the tunnel is freed in the socket destructor */ | ||
162 | } | ||
163 | EXPORT_SYMBOL(l2tp_tunnel_free); | ||
164 | |||
196 | /* Lookup a tunnel. A new reference is held on the returned tunnel. */ | 165 | /* Lookup a tunnel. A new reference is held on the returned tunnel. */ |
197 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) | 166 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) |
198 | { | 167 | { |
@@ -345,13 +314,11 @@ int l2tp_session_register(struct l2tp_session *session, | |||
345 | } | 314 | } |
346 | 315 | ||
347 | l2tp_tunnel_inc_refcount(tunnel); | 316 | l2tp_tunnel_inc_refcount(tunnel); |
348 | sock_hold(tunnel->sock); | ||
349 | hlist_add_head_rcu(&session->global_hlist, g_head); | 317 | hlist_add_head_rcu(&session->global_hlist, g_head); |
350 | 318 | ||
351 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | 319 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); |
352 | } else { | 320 | } else { |
353 | l2tp_tunnel_inc_refcount(tunnel); | 321 | l2tp_tunnel_inc_refcount(tunnel); |
354 | sock_hold(tunnel->sock); | ||
355 | } | 322 | } |
356 | 323 | ||
357 | hlist_add_head(&session->hlist, head); | 324 | hlist_add_head(&session->hlist, head); |
@@ -969,7 +936,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
969 | { | 936 | { |
970 | struct l2tp_tunnel *tunnel; | 937 | struct l2tp_tunnel *tunnel; |
971 | 938 | ||
972 | tunnel = l2tp_sock_to_tunnel(sk); | 939 | tunnel = l2tp_tunnel(sk); |
973 | if (tunnel == NULL) | 940 | if (tunnel == NULL) |
974 | goto pass_up; | 941 | goto pass_up; |
975 | 942 | ||
@@ -977,13 +944,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
977 | tunnel->name, skb->len); | 944 | tunnel->name, skb->len); |
978 | 945 | ||
979 | if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) | 946 | if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) |
980 | goto pass_up_put; | 947 | goto pass_up; |
981 | 948 | ||
982 | sock_put(sk); | ||
983 | return 0; | 949 | return 0; |
984 | 950 | ||
985 | pass_up_put: | ||
986 | sock_put(sk); | ||
987 | pass_up: | 951 | pass_up: |
988 | return 1; | 952 | return 1; |
989 | } | 953 | } |
@@ -1092,7 +1056,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
1092 | /* Queue the packet to IP for output */ | 1056 | /* Queue the packet to IP for output */ |
1093 | skb->ignore_df = 1; | 1057 | skb->ignore_df = 1; |
1094 | #if IS_ENABLED(CONFIG_IPV6) | 1058 | #if IS_ENABLED(CONFIG_IPV6) |
1095 | if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) | 1059 | if (l2tp_sk_is_v6(tunnel->sock)) |
1096 | error = inet6_csk_xmit(tunnel->sock, skb, NULL); | 1060 | error = inet6_csk_xmit(tunnel->sock, skb, NULL); |
1097 | else | 1061 | else |
1098 | #endif | 1062 | #endif |
@@ -1155,6 +1119,15 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
1155 | goto out_unlock; | 1119 | goto out_unlock; |
1156 | } | 1120 | } |
1157 | 1121 | ||
1122 | /* The user-space may change the connection status for the user-space | ||
1123 | * provided socket at run time: we must check it under the socket lock | ||
1124 | */ | ||
1125 | if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) { | ||
1126 | kfree_skb(skb); | ||
1127 | ret = NET_XMIT_DROP; | ||
1128 | goto out_unlock; | ||
1129 | } | ||
1130 | |||
1158 | /* Get routing info from the tunnel socket */ | 1131 | /* Get routing info from the tunnel socket */ |
1159 | skb_dst_drop(skb); | 1132 | skb_dst_drop(skb); |
1160 | skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); | 1133 | skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); |
@@ -1174,7 +1147,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
1174 | 1147 | ||
1175 | /* Calculate UDP checksum if configured to do so */ | 1148 | /* Calculate UDP checksum if configured to do so */ |
1176 | #if IS_ENABLED(CONFIG_IPV6) | 1149 | #if IS_ENABLED(CONFIG_IPV6) |
1177 | if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) | 1150 | if (l2tp_sk_is_v6(sk)) |
1178 | udp6_set_csum(udp_get_no_check6_tx(sk), | 1151 | udp6_set_csum(udp_get_no_check6_tx(sk), |
1179 | skb, &inet6_sk(sk)->saddr, | 1152 | skb, &inet6_sk(sk)->saddr, |
1180 | &sk->sk_v6_daddr, udp_len); | 1153 | &sk->sk_v6_daddr, udp_len); |
@@ -1207,14 +1180,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb); | |||
1207 | static void l2tp_tunnel_destruct(struct sock *sk) | 1180 | static void l2tp_tunnel_destruct(struct sock *sk) |
1208 | { | 1181 | { |
1209 | struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); | 1182 | struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); |
1210 | struct l2tp_net *pn; | ||
1211 | 1183 | ||
1212 | if (tunnel == NULL) | 1184 | if (tunnel == NULL) |
1213 | goto end; | 1185 | goto end; |
1214 | 1186 | ||
1215 | l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); | 1187 | l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); |
1216 | 1188 | ||
1217 | |||
1218 | /* Disable udp encapsulation */ | 1189 | /* Disable udp encapsulation */ |
1219 | switch (tunnel->encap) { | 1190 | switch (tunnel->encap) { |
1220 | case L2TP_ENCAPTYPE_UDP: | 1191 | case L2TP_ENCAPTYPE_UDP: |
@@ -1231,18 +1202,11 @@ static void l2tp_tunnel_destruct(struct sock *sk) | |||
1231 | sk->sk_destruct = tunnel->old_sk_destruct; | 1202 | sk->sk_destruct = tunnel->old_sk_destruct; |
1232 | sk->sk_user_data = NULL; | 1203 | sk->sk_user_data = NULL; |
1233 | 1204 | ||
1234 | /* Remove the tunnel struct from the tunnel list */ | ||
1235 | pn = l2tp_pernet(tunnel->l2tp_net); | ||
1236 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1237 | list_del_rcu(&tunnel->list); | ||
1238 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1239 | |||
1240 | tunnel->sock = NULL; | ||
1241 | l2tp_tunnel_dec_refcount(tunnel); | ||
1242 | |||
1243 | /* Call the original destructor */ | 1205 | /* Call the original destructor */ |
1244 | if (sk->sk_destruct) | 1206 | if (sk->sk_destruct) |
1245 | (*sk->sk_destruct)(sk); | 1207 | (*sk->sk_destruct)(sk); |
1208 | |||
1209 | kfree_rcu(tunnel, rcu); | ||
1246 | end: | 1210 | end: |
1247 | return; | 1211 | return; |
1248 | } | 1212 | } |
@@ -1303,49 +1267,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | |||
1303 | /* Tunnel socket destroy hook for UDP encapsulation */ | 1267 | /* Tunnel socket destroy hook for UDP encapsulation */ |
1304 | static void l2tp_udp_encap_destroy(struct sock *sk) | 1268 | static void l2tp_udp_encap_destroy(struct sock *sk) |
1305 | { | 1269 | { |
1306 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | 1270 | struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); |
1307 | if (tunnel) { | 1271 | |
1308 | l2tp_tunnel_closeall(tunnel); | 1272 | if (tunnel) |
1309 | sock_put(sk); | 1273 | l2tp_tunnel_delete(tunnel); |
1310 | } | ||
1311 | } | 1274 | } |
1312 | 1275 | ||
1313 | /* Workqueue tunnel deletion function */ | 1276 | /* Workqueue tunnel deletion function */ |
1314 | static void l2tp_tunnel_del_work(struct work_struct *work) | 1277 | static void l2tp_tunnel_del_work(struct work_struct *work) |
1315 | { | 1278 | { |
1316 | struct l2tp_tunnel *tunnel = NULL; | 1279 | struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel, |
1317 | struct socket *sock = NULL; | 1280 | del_work); |
1318 | struct sock *sk = NULL; | 1281 | struct sock *sk = tunnel->sock; |
1319 | 1282 | struct socket *sock = sk->sk_socket; | |
1320 | tunnel = container_of(work, struct l2tp_tunnel, del_work); | 1283 | struct l2tp_net *pn; |
1321 | 1284 | ||
1322 | l2tp_tunnel_closeall(tunnel); | 1285 | l2tp_tunnel_closeall(tunnel); |
1323 | 1286 | ||
1324 | sk = l2tp_tunnel_sock_lookup(tunnel); | 1287 | /* If the tunnel socket was created within the kernel, use |
1325 | if (!sk) | ||
1326 | goto out; | ||
1327 | |||
1328 | sock = sk->sk_socket; | ||
1329 | |||
1330 | /* If the tunnel socket was created by userspace, then go through the | ||
1331 | * inet layer to shut the socket down, and let userspace close it. | ||
1332 | * Otherwise, if we created the socket directly within the kernel, use | ||
1333 | * the sk API to release it here. | 1288 | * the sk API to release it here. |
1334 | * In either case the tunnel resources are freed in the socket | ||
1335 | * destructor when the tunnel socket goes away. | ||
1336 | */ | 1289 | */ |
1337 | if (tunnel->fd >= 0) { | 1290 | if (tunnel->fd < 0) { |
1338 | if (sock) | ||
1339 | inet_shutdown(sock, 2); | ||
1340 | } else { | ||
1341 | if (sock) { | 1291 | if (sock) { |
1342 | kernel_sock_shutdown(sock, SHUT_RDWR); | 1292 | kernel_sock_shutdown(sock, SHUT_RDWR); |
1343 | sock_release(sock); | 1293 | sock_release(sock); |
1344 | } | 1294 | } |
1345 | } | 1295 | } |
1346 | 1296 | ||
1347 | l2tp_tunnel_sock_put(sk); | 1297 | /* Remove the tunnel struct from the tunnel list */ |
1348 | out: | 1298 | pn = l2tp_pernet(tunnel->l2tp_net); |
1299 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1300 | list_del_rcu(&tunnel->list); | ||
1301 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1302 | |||
1303 | /* drop initial ref */ | ||
1304 | l2tp_tunnel_dec_refcount(tunnel); | ||
1305 | |||
1306 | /* drop workqueue ref */ | ||
1349 | l2tp_tunnel_dec_refcount(tunnel); | 1307 | l2tp_tunnel_dec_refcount(tunnel); |
1350 | } | 1308 | } |
1351 | 1309 | ||
@@ -1515,9 +1473,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1515 | encap = cfg->encap; | 1473 | encap = cfg->encap; |
1516 | 1474 | ||
1517 | /* Quick sanity checks */ | 1475 | /* Quick sanity checks */ |
1476 | err = -EPROTONOSUPPORT; | ||
1477 | if (sk->sk_type != SOCK_DGRAM) { | ||
1478 | pr_debug("tunl %hu: fd %d wrong socket type\n", | ||
1479 | tunnel_id, fd); | ||
1480 | goto err; | ||
1481 | } | ||
1518 | switch (encap) { | 1482 | switch (encap) { |
1519 | case L2TP_ENCAPTYPE_UDP: | 1483 | case L2TP_ENCAPTYPE_UDP: |
1520 | err = -EPROTONOSUPPORT; | ||
1521 | if (sk->sk_protocol != IPPROTO_UDP) { | 1484 | if (sk->sk_protocol != IPPROTO_UDP) { |
1522 | pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | 1485 | pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", |
1523 | tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); | 1486 | tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); |
@@ -1525,7 +1488,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1525 | } | 1488 | } |
1526 | break; | 1489 | break; |
1527 | case L2TP_ENCAPTYPE_IP: | 1490 | case L2TP_ENCAPTYPE_IP: |
1528 | err = -EPROTONOSUPPORT; | ||
1529 | if (sk->sk_protocol != IPPROTO_L2TP) { | 1491 | if (sk->sk_protocol != IPPROTO_L2TP) { |
1530 | pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | 1492 | pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", |
1531 | tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); | 1493 | tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); |
@@ -1565,24 +1527,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1565 | if (cfg != NULL) | 1527 | if (cfg != NULL) |
1566 | tunnel->debug = cfg->debug; | 1528 | tunnel->debug = cfg->debug; |
1567 | 1529 | ||
1568 | #if IS_ENABLED(CONFIG_IPV6) | ||
1569 | if (sk->sk_family == PF_INET6) { | ||
1570 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
1571 | |||
1572 | if (ipv6_addr_v4mapped(&np->saddr) && | ||
1573 | ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { | ||
1574 | struct inet_sock *inet = inet_sk(sk); | ||
1575 | |||
1576 | tunnel->v4mapped = true; | ||
1577 | inet->inet_saddr = np->saddr.s6_addr32[3]; | ||
1578 | inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3]; | ||
1579 | inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3]; | ||
1580 | } else { | ||
1581 | tunnel->v4mapped = false; | ||
1582 | } | ||
1583 | } | ||
1584 | #endif | ||
1585 | |||
1586 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | 1530 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
1587 | tunnel->encap = encap; | 1531 | tunnel->encap = encap; |
1588 | if (encap == L2TP_ENCAPTYPE_UDP) { | 1532 | if (encap == L2TP_ENCAPTYPE_UDP) { |
@@ -1598,13 +1542,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1598 | sk->sk_user_data = tunnel; | 1542 | sk->sk_user_data = tunnel; |
1599 | } | 1543 | } |
1600 | 1544 | ||
1545 | /* Bump the reference count. The tunnel context is deleted | ||
1546 | * only when this drops to zero. A reference is also held on | ||
1547 | * the tunnel socket to ensure that it is not released while | ||
1548 | * the tunnel is extant. Must be done before sk_destruct is | ||
1549 | * set. | ||
1550 | */ | ||
1551 | refcount_set(&tunnel->ref_count, 1); | ||
1552 | sock_hold(sk); | ||
1553 | tunnel->sock = sk; | ||
1554 | tunnel->fd = fd; | ||
1555 | |||
1601 | /* Hook on the tunnel socket destructor so that we can cleanup | 1556 | /* Hook on the tunnel socket destructor so that we can cleanup |
1602 | * if the tunnel socket goes away. | 1557 | * if the tunnel socket goes away. |
1603 | */ | 1558 | */ |
1604 | tunnel->old_sk_destruct = sk->sk_destruct; | 1559 | tunnel->old_sk_destruct = sk->sk_destruct; |
1605 | sk->sk_destruct = &l2tp_tunnel_destruct; | 1560 | sk->sk_destruct = &l2tp_tunnel_destruct; |
1606 | tunnel->sock = sk; | ||
1607 | tunnel->fd = fd; | ||
1608 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); | 1561 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); |
1609 | 1562 | ||
1610 | sk->sk_allocation = GFP_ATOMIC; | 1563 | sk->sk_allocation = GFP_ATOMIC; |
@@ -1614,11 +1567,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1614 | 1567 | ||
1615 | /* Add tunnel to our list */ | 1568 | /* Add tunnel to our list */ |
1616 | INIT_LIST_HEAD(&tunnel->list); | 1569 | INIT_LIST_HEAD(&tunnel->list); |
1617 | |||
1618 | /* Bump the reference count. The tunnel context is deleted | ||
1619 | * only when this drops to zero. Must be done before list insertion | ||
1620 | */ | ||
1621 | refcount_set(&tunnel->ref_count, 1); | ||
1622 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | 1570 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); |
1623 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); | 1571 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); |
1624 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | 1572 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); |
@@ -1659,8 +1607,6 @@ void l2tp_session_free(struct l2tp_session *session) | |||
1659 | 1607 | ||
1660 | if (tunnel) { | 1608 | if (tunnel) { |
1661 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | 1609 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); |
1662 | sock_put(tunnel->sock); | ||
1663 | session->tunnel = NULL; | ||
1664 | l2tp_tunnel_dec_refcount(tunnel); | 1610 | l2tp_tunnel_dec_refcount(tunnel); |
1665 | } | 1611 | } |
1666 | 1612 | ||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9bbee90e9963..2718d0b284d0 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -188,9 +188,6 @@ struct l2tp_tunnel { | |||
188 | struct sock *sock; /* Parent socket */ | 188 | struct sock *sock; /* Parent socket */ |
189 | int fd; /* Parent fd, if tunnel socket | 189 | int fd; /* Parent fd, if tunnel socket |
190 | * was created by userspace */ | 190 | * was created by userspace */ |
191 | #if IS_ENABLED(CONFIG_IPV6) | ||
192 | bool v4mapped; | ||
193 | #endif | ||
194 | 191 | ||
195 | struct work_struct del_work; | 192 | struct work_struct del_work; |
196 | 193 | ||
@@ -214,27 +211,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) | |||
214 | return &session->priv[0]; | 211 | return &session->priv[0]; |
215 | } | 212 | } |
216 | 213 | ||
217 | static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) | ||
218 | { | ||
219 | struct l2tp_tunnel *tunnel; | ||
220 | |||
221 | if (sk == NULL) | ||
222 | return NULL; | ||
223 | |||
224 | sock_hold(sk); | ||
225 | tunnel = (struct l2tp_tunnel *)(sk->sk_user_data); | ||
226 | if (tunnel == NULL) { | ||
227 | sock_put(sk); | ||
228 | goto out; | ||
229 | } | ||
230 | |||
231 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
232 | |||
233 | out: | ||
234 | return tunnel; | ||
235 | } | ||
236 | |||
237 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); | 214 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); |
215 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
238 | 216 | ||
239 | struct l2tp_session *l2tp_session_get(const struct net *net, | 217 | struct l2tp_session *l2tp_session_get(const struct net *net, |
240 | struct l2tp_tunnel *tunnel, | 218 | struct l2tp_tunnel *tunnel, |
@@ -283,7 +261,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) | |||
283 | static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) | 261 | static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) |
284 | { | 262 | { |
285 | if (refcount_dec_and_test(&tunnel->ref_count)) | 263 | if (refcount_dec_and_test(&tunnel->ref_count)) |
286 | kfree_rcu(tunnel, rcu); | 264 | l2tp_tunnel_free(tunnel); |
287 | } | 265 | } |
288 | 266 | ||
289 | /* Session reference counts. Incremented when code obtains a reference | 267 | /* Session reference counts. Incremented when code obtains a reference |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index ff61124fdf59..3428fba6f2b7 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout) | |||
234 | static void l2tp_ip_destroy_sock(struct sock *sk) | 234 | static void l2tp_ip_destroy_sock(struct sock *sk) |
235 | { | 235 | { |
236 | struct sk_buff *skb; | 236 | struct sk_buff *skb; |
237 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | 237 | struct l2tp_tunnel *tunnel = sk->sk_user_data; |
238 | 238 | ||
239 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | 239 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) |
240 | kfree_skb(skb); | 240 | kfree_skb(skb); |
241 | 241 | ||
242 | if (tunnel) { | 242 | if (tunnel) |
243 | l2tp_tunnel_closeall(tunnel); | 243 | l2tp_tunnel_delete(tunnel); |
244 | sock_put(sk); | ||
245 | } | ||
246 | |||
247 | sk_refcnt_debug_dec(sk); | ||
248 | } | 244 | } |
249 | 245 | ||
250 | static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 246 | static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 192344688c06..6f009eaa5fbe 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout) | |||
248 | 248 | ||
249 | static void l2tp_ip6_destroy_sock(struct sock *sk) | 249 | static void l2tp_ip6_destroy_sock(struct sock *sk) |
250 | { | 250 | { |
251 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | 251 | struct l2tp_tunnel *tunnel = sk->sk_user_data; |
252 | 252 | ||
253 | lock_sock(sk); | 253 | lock_sock(sk); |
254 | ip6_flush_pending_frames(sk); | 254 | ip6_flush_pending_frames(sk); |
255 | release_sock(sk); | 255 | release_sock(sk); |
256 | 256 | ||
257 | if (tunnel) { | 257 | if (tunnel) |
258 | l2tp_tunnel_closeall(tunnel); | 258 | l2tp_tunnel_delete(tunnel); |
259 | sock_put(sk); | ||
260 | } | ||
261 | 259 | ||
262 | inet6_destroy_sock(sk); | 260 | inet6_destroy_sock(sk); |
263 | } | 261 | } |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 59f246d7b290..3b02f24ea9ec 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -416,20 +416,28 @@ abort: | |||
416 | * Session (and tunnel control) socket create/destroy. | 416 | * Session (and tunnel control) socket create/destroy. |
417 | *****************************************************************************/ | 417 | *****************************************************************************/ |
418 | 418 | ||
419 | static void pppol2tp_put_sk(struct rcu_head *head) | ||
420 | { | ||
421 | struct pppol2tp_session *ps; | ||
422 | |||
423 | ps = container_of(head, typeof(*ps), rcu); | ||
424 | sock_put(ps->__sk); | ||
425 | } | ||
426 | |||
419 | /* Called by l2tp_core when a session socket is being closed. | 427 | /* Called by l2tp_core when a session socket is being closed. |
420 | */ | 428 | */ |
421 | static void pppol2tp_session_close(struct l2tp_session *session) | 429 | static void pppol2tp_session_close(struct l2tp_session *session) |
422 | { | 430 | { |
423 | struct sock *sk; | 431 | struct pppol2tp_session *ps; |
424 | |||
425 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
426 | 432 | ||
427 | sk = pppol2tp_session_get_sock(session); | 433 | ps = l2tp_session_priv(session); |
428 | if (sk) { | 434 | mutex_lock(&ps->sk_lock); |
429 | if (sk->sk_socket) | 435 | ps->__sk = rcu_dereference_protected(ps->sk, |
430 | inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); | 436 | lockdep_is_held(&ps->sk_lock)); |
431 | sock_put(sk); | 437 | RCU_INIT_POINTER(ps->sk, NULL); |
432 | } | 438 | if (ps->__sk) |
439 | call_rcu(&ps->rcu, pppol2tp_put_sk); | ||
440 | mutex_unlock(&ps->sk_lock); | ||
433 | } | 441 | } |
434 | 442 | ||
435 | /* Really kill the session socket. (Called from sock_put() if | 443 | /* Really kill the session socket. (Called from sock_put() if |
@@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk) | |||
449 | } | 457 | } |
450 | } | 458 | } |
451 | 459 | ||
452 | static void pppol2tp_put_sk(struct rcu_head *head) | ||
453 | { | ||
454 | struct pppol2tp_session *ps; | ||
455 | |||
456 | ps = container_of(head, typeof(*ps), rcu); | ||
457 | sock_put(ps->__sk); | ||
458 | } | ||
459 | |||
460 | /* Called when the PPPoX socket (session) is closed. | 460 | /* Called when the PPPoX socket (session) is closed. |
461 | */ | 461 | */ |
462 | static int pppol2tp_release(struct socket *sock) | 462 | static int pppol2tp_release(struct socket *sock) |
@@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock) | |||
480 | sock_orphan(sk); | 480 | sock_orphan(sk); |
481 | sock->sk = NULL; | 481 | sock->sk = NULL; |
482 | 482 | ||
483 | /* If the socket is associated with a session, | ||
484 | * l2tp_session_delete will call pppol2tp_session_close which | ||
485 | * will drop the session's ref on the socket. | ||
486 | */ | ||
483 | session = pppol2tp_sock_to_session(sk); | 487 | session = pppol2tp_sock_to_session(sk); |
484 | 488 | if (session) { | |
485 | if (session != NULL) { | ||
486 | struct pppol2tp_session *ps; | ||
487 | |||
488 | l2tp_session_delete(session); | 489 | l2tp_session_delete(session); |
489 | 490 | /* drop the ref obtained by pppol2tp_sock_to_session */ | |
490 | ps = l2tp_session_priv(session); | 491 | sock_put(sk); |
491 | mutex_lock(&ps->sk_lock); | ||
492 | ps->__sk = rcu_dereference_protected(ps->sk, | ||
493 | lockdep_is_held(&ps->sk_lock)); | ||
494 | RCU_INIT_POINTER(ps->sk, NULL); | ||
495 | mutex_unlock(&ps->sk_lock); | ||
496 | call_rcu(&ps->rcu, pppol2tp_put_sk); | ||
497 | |||
498 | /* Rely on the sock_put() call at the end of the function for | ||
499 | * dropping the reference held by pppol2tp_sock_to_session(). | ||
500 | * The last reference will be dropped by pppol2tp_put_sk(). | ||
501 | */ | ||
502 | } | 492 | } |
493 | |||
503 | release_sock(sk); | 494 | release_sock(sk); |
504 | 495 | ||
505 | /* This will delete the session context via | 496 | /* This will delete the session context via |
@@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
796 | 787 | ||
797 | out_no_ppp: | 788 | out_no_ppp: |
798 | /* This is how we get the session context from the socket. */ | 789 | /* This is how we get the session context from the socket. */ |
790 | sock_hold(sk); | ||
799 | sk->sk_user_data = session; | 791 | sk->sk_user_data = session; |
800 | rcu_assign_pointer(ps->sk, sk); | 792 | rcu_assign_pointer(ps->sk, sk); |
801 | mutex_unlock(&ps->sk_lock); | 793 | mutex_unlock(&ps->sk_lock); |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index a8b1616cec41..1f3188d03840 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH |
11 | * Copyright (C) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -304,9 +305,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, | |||
304 | * driver so reject the timeout update. | 305 | * driver so reject the timeout update. |
305 | */ | 306 | */ |
306 | status = WLAN_STATUS_REQUEST_DECLINED; | 307 | status = WLAN_STATUS_REQUEST_DECLINED; |
307 | ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, | ||
308 | tid, dialog_token, status, | ||
309 | 1, buf_size, timeout); | ||
310 | goto end; | 308 | goto end; |
311 | } | 309 | } |
312 | 310 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 46028e12e216..f4195a0f0279 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2892,7 +2892,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) | |||
2892 | } | 2892 | } |
2893 | if (beacon->probe_resp_len) { | 2893 | if (beacon->probe_resp_len) { |
2894 | new_beacon->probe_resp_len = beacon->probe_resp_len; | 2894 | new_beacon->probe_resp_len = beacon->probe_resp_len; |
2895 | beacon->probe_resp = pos; | 2895 | new_beacon->probe_resp = pos; |
2896 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); | 2896 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); |
2897 | pos += beacon->probe_resp_len; | 2897 | pos += beacon->probe_resp_len; |
2898 | } | 2898 | } |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 1f466d12a6bc..94c7ee9df33b 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -212,6 +212,7 @@ static const char *hw_flag_names[] = { | |||
212 | FLAG(REPORTS_LOW_ACK), | 212 | FLAG(REPORTS_LOW_ACK), |
213 | FLAG(SUPPORTS_TX_FRAG), | 213 | FLAG(SUPPORTS_TX_FRAG), |
214 | FLAG(SUPPORTS_TDLS_BUFFER_STA), | 214 | FLAG(SUPPORTS_TDLS_BUFFER_STA), |
215 | FLAG(DOESNT_SUPPORT_QOS_NDP), | ||
215 | #undef FLAG | 216 | #undef FLAG |
216 | }; | 217 | }; |
217 | 218 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 26900025de2f..ae9c33cd8ada 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1467,7 +1467,7 @@ struct ieee802_11_elems { | |||
1467 | const struct ieee80211_timeout_interval_ie *timeout_int; | 1467 | const struct ieee80211_timeout_interval_ie *timeout_int; |
1468 | const u8 *opmode_notif; | 1468 | const u8 *opmode_notif; |
1469 | const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; | 1469 | const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; |
1470 | const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; | 1470 | struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; |
1471 | const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; | 1471 | const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; |
1472 | 1472 | ||
1473 | /* length of them, respectively */ | 1473 | /* length of them, respectively */ |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 73ac607beb5d..6a381cbe1e33 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -1255,13 +1255,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, | |||
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, | 1257 | static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, |
1258 | struct ieee80211_mgmt *mgmt, size_t len) | 1258 | struct ieee80211_mgmt *mgmt, size_t len, |
1259 | struct ieee802_11_elems *elems) | ||
1259 | { | 1260 | { |
1260 | struct ieee80211_mgmt *mgmt_fwd; | 1261 | struct ieee80211_mgmt *mgmt_fwd; |
1261 | struct sk_buff *skb; | 1262 | struct sk_buff *skb; |
1262 | struct ieee80211_local *local = sdata->local; | 1263 | struct ieee80211_local *local = sdata->local; |
1263 | u8 *pos = mgmt->u.action.u.chan_switch.variable; | ||
1264 | size_t offset_ttl; | ||
1265 | 1264 | ||
1266 | skb = dev_alloc_skb(local->tx_headroom + len); | 1265 | skb = dev_alloc_skb(local->tx_headroom + len); |
1267 | if (!skb) | 1266 | if (!skb) |
@@ -1269,13 +1268,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, | |||
1269 | skb_reserve(skb, local->tx_headroom); | 1268 | skb_reserve(skb, local->tx_headroom); |
1270 | mgmt_fwd = skb_put(skb, len); | 1269 | mgmt_fwd = skb_put(skb, len); |
1271 | 1270 | ||
1272 | /* offset_ttl is based on whether the secondary channel | 1271 | elems->mesh_chansw_params_ie->mesh_ttl--; |
1273 | * offset is available or not. Subtract 1 from the mesh TTL | 1272 | elems->mesh_chansw_params_ie->mesh_flags &= |
1274 | * and disable the initiator flag before forwarding. | 1273 | ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; |
1275 | */ | ||
1276 | offset_ttl = (len < 42) ? 7 : 10; | ||
1277 | *(pos + offset_ttl) -= 1; | ||
1278 | *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; | ||
1279 | 1274 | ||
1280 | memcpy(mgmt_fwd, mgmt, len); | 1275 | memcpy(mgmt_fwd, mgmt, len); |
1281 | eth_broadcast_addr(mgmt_fwd->da); | 1276 | eth_broadcast_addr(mgmt_fwd->da); |
@@ -1323,7 +1318,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, | |||
1323 | 1318 | ||
1324 | /* forward or re-broadcast the CSA frame */ | 1319 | /* forward or re-broadcast the CSA frame */ |
1325 | if (fwd_csa) { | 1320 | if (fwd_csa) { |
1326 | if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0) | 1321 | if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) |
1327 | mcsa_dbg(sdata, "Failed to forward the CSA frame"); | 1322 | mcsa_dbg(sdata, "Failed to forward the CSA frame"); |
1328 | } | 1323 | } |
1329 | } | 1324 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 39b660b9a908..5f303abac5ad 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -896,7 +896,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, | |||
896 | struct ieee80211_hdr_3addr *nullfunc; | 896 | struct ieee80211_hdr_3addr *nullfunc; |
897 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 897 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
898 | 898 | ||
899 | skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true); | 899 | skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, |
900 | !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP)); | ||
900 | if (!skb) | 901 | if (!skb) |
901 | return; | 902 | return; |
902 | 903 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index fd580614085b..56fe16b07538 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -3921,7 +3921,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, | |||
3921 | if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | | 3921 | if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | |
3922 | IEEE80211_FCTL_TODS)) != | 3922 | IEEE80211_FCTL_TODS)) != |
3923 | fast_rx->expected_ds_bits) | 3923 | fast_rx->expected_ds_bits) |
3924 | goto drop; | 3924 | return false; |
3925 | 3925 | ||
3926 | /* assign the key to drop unencrypted frames (later) | 3926 | /* assign the key to drop unencrypted frames (later) |
3927 | * and strip the IV/MIC if necessary | 3927 | * and strip the IV/MIC if necessary |
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index ee0181778a42..029334835747 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2008, Intel Corporation | 9 | * Copyright 2007-2008, Intel Corporation |
10 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | 10 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> |
11 | * Copyright (C) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | |||
27 | u32 sta_flags, u8 *bssid, | 28 | u32 sta_flags, u8 *bssid, |
28 | struct ieee80211_csa_ie *csa_ie) | 29 | struct ieee80211_csa_ie *csa_ie) |
29 | { | 30 | { |
30 | enum nl80211_band new_band; | 31 | enum nl80211_band new_band = current_band; |
31 | int new_freq; | 32 | int new_freq; |
32 | u8 new_chan_no; | 33 | u8 new_chan_no; |
33 | struct ieee80211_channel *new_chan; | 34 | struct ieee80211_channel *new_chan; |
@@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | |||
55 | elems->ext_chansw_ie->new_operating_class, | 56 | elems->ext_chansw_ie->new_operating_class, |
56 | &new_band)) { | 57 | &new_band)) { |
57 | sdata_info(sdata, | 58 | sdata_info(sdata, |
58 | "cannot understand ECSA IE operating class %d, disconnecting\n", | 59 | "cannot understand ECSA IE operating class, %d, ignoring\n", |
59 | elems->ext_chansw_ie->new_operating_class); | 60 | elems->ext_chansw_ie->new_operating_class); |
60 | return -EINVAL; | ||
61 | } | 61 | } |
62 | new_chan_no = elems->ext_chansw_ie->new_ch_num; | 62 | new_chan_no = elems->ext_chansw_ie->new_ch_num; |
63 | csa_ie->count = elems->ext_chansw_ie->count; | 63 | csa_ie->count = elems->ext_chansw_ie->count; |
64 | csa_ie->mode = elems->ext_chansw_ie->mode; | 64 | csa_ie->mode = elems->ext_chansw_ie->mode; |
65 | } else if (elems->ch_switch_ie) { | 65 | } else if (elems->ch_switch_ie) { |
66 | new_band = current_band; | ||
67 | new_chan_no = elems->ch_switch_ie->new_ch_num; | 66 | new_chan_no = elems->ch_switch_ie->new_ch_num; |
68 | csa_ie->count = elems->ch_switch_ie->count; | 67 | csa_ie->count = elems->ch_switch_ie->count; |
69 | csa_ie->mode = elems->ch_switch_ie->mode; | 68 | csa_ie->mode = elems->ch_switch_ie->mode; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 0c5627f8a104..af0b608ee8ed 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
314 | 314 | ||
315 | if (ieee80211_hw_check(hw, USES_RSS)) { | 315 | if (ieee80211_hw_check(hw, USES_RSS)) { |
316 | sta->pcpu_rx_stats = | 316 | sta->pcpu_rx_stats = |
317 | alloc_percpu(struct ieee80211_sta_rx_stats); | 317 | alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); |
318 | if (!sta->pcpu_rx_stats) | 318 | if (!sta->pcpu_rx_stats) |
319 | goto free; | 319 | goto free; |
320 | } | 320 | } |
@@ -433,6 +433,7 @@ free_txq: | |||
433 | if (sta->sta.txq[0]) | 433 | if (sta->sta.txq[0]) |
434 | kfree(to_txq_info(sta->sta.txq[0])); | 434 | kfree(to_txq_info(sta->sta.txq[0])); |
435 | free: | 435 | free: |
436 | free_percpu(sta->pcpu_rx_stats); | ||
436 | #ifdef CONFIG_MAC80211_MESH | 437 | #ifdef CONFIG_MAC80211_MESH |
437 | kfree(sta->mesh); | 438 | kfree(sta->mesh); |
438 | #endif | 439 | #endif |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 25904af38839..69722504e3e1 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -3574,6 +3574,14 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
3574 | if (!IS_ERR_OR_NULL(sta)) { | 3574 | if (!IS_ERR_OR_NULL(sta)) { |
3575 | struct ieee80211_fast_tx *fast_tx; | 3575 | struct ieee80211_fast_tx *fast_tx; |
3576 | 3576 | ||
3577 | /* We need a bit of data queued to build aggregates properly, so | ||
3578 | * instruct the TCP stack to allow more than a single ms of data | ||
3579 | * to be queued in the stack. The value is a bit-shift of 1 | ||
3580 | * second, so 8 is ~4ms of queued data. Only affects local TCP | ||
3581 | * sockets. | ||
3582 | */ | ||
3583 | sk_pacing_shift_update(skb->sk, 8); | ||
3584 | |||
3577 | fast_tx = rcu_dereference(sta->fast_tx); | 3585 | fast_tx = rcu_dereference(sta->fast_tx); |
3578 | 3586 | ||
3579 | if (fast_tx && | 3587 | if (fast_tx && |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index e545a3c9365f..7a4de6d618b1 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -122,7 +122,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) | |||
122 | if (skb->len <= mtu) | 122 | if (skb->len <= mtu) |
123 | return false; | 123 | return false; |
124 | 124 | ||
125 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 125 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
126 | return false; | 126 | return false; |
127 | 127 | ||
128 | return true; | 128 | return true; |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 3e17d32b629d..58d5d05aec24 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
260 | buf_len = strlen(buf); | 260 | buf_len = strlen(buf); |
261 | 261 | ||
262 | ct = nf_ct_get(skb, &ctinfo); | 262 | ct = nf_ct_get(skb, &ctinfo); |
263 | if (ct && (ct->status & IPS_NAT_MASK)) { | 263 | if (ct) { |
264 | bool mangled; | 264 | bool mangled; |
265 | 265 | ||
266 | /* If mangling fails this function will return 0 | 266 | /* If mangling fails this function will return 0 |
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c index fbce552a796e..7d7466dbf663 100644 --- a/net/netfilter/nf_nat_proto_common.c +++ b/net/netfilter/nf_nat_proto_common.c | |||
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, | |||
41 | const struct nf_conn *ct, | 41 | const struct nf_conn *ct, |
42 | u16 *rover) | 42 | u16 *rover) |
43 | { | 43 | { |
44 | unsigned int range_size, min, i; | 44 | unsigned int range_size, min, max, i; |
45 | __be16 *portptr; | 45 | __be16 *portptr; |
46 | u_int16_t off; | 46 | u_int16_t off; |
47 | 47 | ||
@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, | |||
71 | } | 71 | } |
72 | } else { | 72 | } else { |
73 | min = ntohs(range->min_proto.all); | 73 | min = ntohs(range->min_proto.all); |
74 | range_size = ntohs(range->max_proto.all) - min + 1; | 74 | max = ntohs(range->max_proto.all); |
75 | if (unlikely(max < min)) | ||
76 | swap(max, min); | ||
77 | range_size = max - min + 1; | ||
75 | } | 78 | } |
76 | 79 | ||
77 | if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { | 80 | if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 8b9fe30de0cd..c4acc7340eb1 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -5037,9 +5037,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, | |||
5037 | { | 5037 | { |
5038 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 5038 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
5039 | const struct nf_flowtable_type *type; | 5039 | const struct nf_flowtable_type *type; |
5040 | struct nft_flowtable *flowtable, *ft; | ||
5040 | u8 genmask = nft_genmask_next(net); | 5041 | u8 genmask = nft_genmask_next(net); |
5041 | int family = nfmsg->nfgen_family; | 5042 | int family = nfmsg->nfgen_family; |
5042 | struct nft_flowtable *flowtable; | ||
5043 | struct nft_table *table; | 5043 | struct nft_table *table; |
5044 | struct nft_ctx ctx; | 5044 | struct nft_ctx ctx; |
5045 | int err, i, k; | 5045 | int err, i, k; |
@@ -5099,6 +5099,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, | |||
5099 | goto err3; | 5099 | goto err3; |
5100 | 5100 | ||
5101 | for (i = 0; i < flowtable->ops_len; i++) { | 5101 | for (i = 0; i < flowtable->ops_len; i++) { |
5102 | if (!flowtable->ops[i].dev) | ||
5103 | continue; | ||
5104 | |||
5105 | list_for_each_entry(ft, &table->flowtables, list) { | ||
5106 | for (k = 0; k < ft->ops_len; k++) { | ||
5107 | if (!ft->ops[k].dev) | ||
5108 | continue; | ||
5109 | |||
5110 | if (flowtable->ops[i].dev == ft->ops[k].dev && | ||
5111 | flowtable->ops[i].pf == ft->ops[k].pf) { | ||
5112 | err = -EBUSY; | ||
5113 | goto err4; | ||
5114 | } | ||
5115 | } | ||
5116 | } | ||
5117 | |||
5102 | err = nf_register_net_hook(net, &flowtable->ops[i]); | 5118 | err = nf_register_net_hook(net, &flowtable->ops[i]); |
5103 | if (err < 0) | 5119 | if (err < 0) |
5104 | goto err4; | 5120 | goto err4; |
@@ -5120,7 +5136,7 @@ err5: | |||
5120 | i = flowtable->ops_len; | 5136 | i = flowtable->ops_len; |
5121 | err4: | 5137 | err4: |
5122 | for (k = i - 1; k >= 0; k--) | 5138 | for (k = i - 1; k >= 0; k--) |
5123 | nf_unregister_net_hook(net, &flowtable->ops[i]); | 5139 | nf_unregister_net_hook(net, &flowtable->ops[k]); |
5124 | 5140 | ||
5125 | kfree(flowtable->ops); | 5141 | kfree(flowtable->ops); |
5126 | err3: | 5142 | err3: |
@@ -5145,6 +5161,11 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk, | |||
5145 | struct nft_table *table; | 5161 | struct nft_table *table; |
5146 | struct nft_ctx ctx; | 5162 | struct nft_ctx ctx; |
5147 | 5163 | ||
5164 | if (!nla[NFTA_FLOWTABLE_TABLE] || | ||
5165 | (!nla[NFTA_FLOWTABLE_NAME] && | ||
5166 | !nla[NFTA_FLOWTABLE_HANDLE])) | ||
5167 | return -EINVAL; | ||
5168 | |||
5148 | table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], | 5169 | table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], |
5149 | family, genmask); | 5170 | family, genmask); |
5150 | if (IS_ERR(table)) | 5171 | if (IS_ERR(table)) |
@@ -5402,6 +5423,7 @@ err: | |||
5402 | static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) | 5423 | static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) |
5403 | { | 5424 | { |
5404 | cancel_delayed_work_sync(&flowtable->data.gc_work); | 5425 | cancel_delayed_work_sync(&flowtable->data.gc_work); |
5426 | kfree(flowtable->ops); | ||
5405 | kfree(flowtable->name); | 5427 | kfree(flowtable->name); |
5406 | flowtable->data.type->free(&flowtable->data); | 5428 | flowtable->data.type->free(&flowtable->data); |
5407 | rhashtable_destroy(&flowtable->data.rhashtable); | 5429 | rhashtable_destroy(&flowtable->data.rhashtable); |
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 3f1624ee056f..d40591fe1b2f 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
@@ -674,7 +674,7 @@ static const struct nft_set_ops * | |||
674 | nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, | 674 | nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, |
675 | u32 flags) | 675 | u32 flags) |
676 | { | 676 | { |
677 | if (desc->size) { | 677 | if (desc->size && !(flags & NFT_SET_TIMEOUT)) { |
678 | switch (desc->klen) { | 678 | switch (desc->klen) { |
679 | case 4: | 679 | case 4: |
680 | return &nft_hash_fast_ops; | 680 | return &nft_hash_fast_ops; |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 2f685ee1f9c8..4aa01c90e9d1 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -423,6 +423,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) | |||
423 | return buf; | 423 | return buf; |
424 | } | 424 | } |
425 | 425 | ||
426 | /** | ||
427 | * xt_check_proc_name - check that name is suitable for /proc file creation | ||
428 | * | ||
429 | * @name: file name candidate | ||
430 | * @size: length of buffer | ||
431 | * | ||
432 | * some x_tables modules wish to create a file in /proc. | ||
433 | * This function makes sure that the name is suitable for this | ||
434 | * purpose, it checks that name is NUL terminated and isn't a 'special' | ||
435 | * name, like "..". | ||
436 | * | ||
437 | * returns negative number on error or 0 if name is useable. | ||
438 | */ | ||
439 | int xt_check_proc_name(const char *name, unsigned int size) | ||
440 | { | ||
441 | if (name[0] == '\0') | ||
442 | return -EINVAL; | ||
443 | |||
444 | if (strnlen(name, size) == size) | ||
445 | return -ENAMETOOLONG; | ||
446 | |||
447 | if (strcmp(name, ".") == 0 || | ||
448 | strcmp(name, "..") == 0 || | ||
449 | strchr(name, '/')) | ||
450 | return -EINVAL; | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | EXPORT_SYMBOL(xt_check_proc_name); | ||
455 | |||
426 | int xt_check_match(struct xt_mtchk_param *par, | 456 | int xt_check_match(struct xt_mtchk_param *par, |
427 | unsigned int size, u_int8_t proto, bool inv_proto) | 457 | unsigned int size, u_int8_t proto, bool inv_proto) |
428 | { | 458 | { |
@@ -434,36 +464,35 @@ int xt_check_match(struct xt_mtchk_param *par, | |||
434 | * ebt_among is exempt from centralized matchsize checking | 464 | * ebt_among is exempt from centralized matchsize checking |
435 | * because it uses a dynamic-size data set. | 465 | * because it uses a dynamic-size data set. |
436 | */ | 466 | */ |
437 | pr_err("%s_tables: %s.%u match: invalid size " | 467 | pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n", |
438 | "%u (kernel) != (user) %u\n", | 468 | xt_prefix[par->family], par->match->name, |
439 | xt_prefix[par->family], par->match->name, | 469 | par->match->revision, |
440 | par->match->revision, | 470 | XT_ALIGN(par->match->matchsize), size); |
441 | XT_ALIGN(par->match->matchsize), size); | ||
442 | return -EINVAL; | 471 | return -EINVAL; |
443 | } | 472 | } |
444 | if (par->match->table != NULL && | 473 | if (par->match->table != NULL && |
445 | strcmp(par->match->table, par->table) != 0) { | 474 | strcmp(par->match->table, par->table) != 0) { |
446 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", | 475 | pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n", |
447 | xt_prefix[par->family], par->match->name, | 476 | xt_prefix[par->family], par->match->name, |
448 | par->match->table, par->table); | 477 | par->match->table, par->table); |
449 | return -EINVAL; | 478 | return -EINVAL; |
450 | } | 479 | } |
451 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { | 480 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
452 | char used[64], allow[64]; | 481 | char used[64], allow[64]; |
453 | 482 | ||
454 | pr_err("%s_tables: %s match: used from hooks %s, but only " | 483 | pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n", |
455 | "valid from %s\n", | 484 | xt_prefix[par->family], par->match->name, |
456 | xt_prefix[par->family], par->match->name, | 485 | textify_hooks(used, sizeof(used), |
457 | textify_hooks(used, sizeof(used), par->hook_mask, | 486 | par->hook_mask, par->family), |
458 | par->family), | 487 | textify_hooks(allow, sizeof(allow), |
459 | textify_hooks(allow, sizeof(allow), par->match->hooks, | 488 | par->match->hooks, |
460 | par->family)); | 489 | par->family)); |
461 | return -EINVAL; | 490 | return -EINVAL; |
462 | } | 491 | } |
463 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { | 492 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
464 | pr_err("%s_tables: %s match: only valid for protocol %u\n", | 493 | pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n", |
465 | xt_prefix[par->family], par->match->name, | 494 | xt_prefix[par->family], par->match->name, |
466 | par->match->proto); | 495 | par->match->proto); |
467 | return -EINVAL; | 496 | return -EINVAL; |
468 | } | 497 | } |
469 | if (par->match->checkentry != NULL) { | 498 | if (par->match->checkentry != NULL) { |
@@ -814,36 +843,35 @@ int xt_check_target(struct xt_tgchk_param *par, | |||
814 | int ret; | 843 | int ret; |
815 | 844 | ||
816 | if (XT_ALIGN(par->target->targetsize) != size) { | 845 | if (XT_ALIGN(par->target->targetsize) != size) { |
817 | pr_err("%s_tables: %s.%u target: invalid size " | 846 | pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n", |
818 | "%u (kernel) != (user) %u\n", | 847 | xt_prefix[par->family], par->target->name, |
819 | xt_prefix[par->family], par->target->name, | 848 | par->target->revision, |
820 | par->target->revision, | 849 | XT_ALIGN(par->target->targetsize), size); |
821 | XT_ALIGN(par->target->targetsize), size); | ||
822 | return -EINVAL; | 850 | return -EINVAL; |
823 | } | 851 | } |
824 | if (par->target->table != NULL && | 852 | if (par->target->table != NULL && |
825 | strcmp(par->target->table, par->table) != 0) { | 853 | strcmp(par->target->table, par->table) != 0) { |
826 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", | 854 | pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n", |
827 | xt_prefix[par->family], par->target->name, | 855 | xt_prefix[par->family], par->target->name, |
828 | par->target->table, par->table); | 856 | par->target->table, par->table); |
829 | return -EINVAL; | 857 | return -EINVAL; |
830 | } | 858 | } |
831 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { | 859 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
832 | char used[64], allow[64]; | 860 | char used[64], allow[64]; |
833 | 861 | ||
834 | pr_err("%s_tables: %s target: used from hooks %s, but only " | 862 | pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n", |
835 | "usable from %s\n", | 863 | xt_prefix[par->family], par->target->name, |
836 | xt_prefix[par->family], par->target->name, | 864 | textify_hooks(used, sizeof(used), |
837 | textify_hooks(used, sizeof(used), par->hook_mask, | 865 | par->hook_mask, par->family), |
838 | par->family), | 866 | textify_hooks(allow, sizeof(allow), |
839 | textify_hooks(allow, sizeof(allow), par->target->hooks, | 867 | par->target->hooks, |
840 | par->family)); | 868 | par->family)); |
841 | return -EINVAL; | 869 | return -EINVAL; |
842 | } | 870 | } |
843 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { | 871 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
844 | pr_err("%s_tables: %s target: only valid for protocol %u\n", | 872 | pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n", |
845 | xt_prefix[par->family], par->target->name, | 873 | xt_prefix[par->family], par->target->name, |
846 | par->target->proto); | 874 | par->target->proto); |
847 | return -EINVAL; | 875 | return -EINVAL; |
848 | } | 876 | } |
849 | if (par->target->checkentry != NULL) { | 877 | if (par->target->checkentry != NULL) { |
@@ -1004,10 +1032,6 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) | |||
1004 | if (sz < sizeof(*info)) | 1032 | if (sz < sizeof(*info)) |
1005 | return NULL; | 1033 | return NULL; |
1006 | 1034 | ||
1007 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ | ||
1008 | if ((size >> PAGE_SHIFT) + 2 > totalram_pages) | ||
1009 | return NULL; | ||
1010 | |||
1011 | /* __GFP_NORETRY is not fully supported by kvmalloc but it should | 1035 | /* __GFP_NORETRY is not fully supported by kvmalloc but it should |
1012 | * work reasonably well if sz is too large and bail out rather | 1036 | * work reasonably well if sz is too large and bail out rather |
1013 | * than shoot all processes down before realizing there is nothing | 1037 | * than shoot all processes down before realizing there is nothing |
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index c502419d6306..f368ee6741db 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c | |||
@@ -120,8 +120,8 @@ static int audit_tg_check(const struct xt_tgchk_param *par) | |||
120 | const struct xt_audit_info *info = par->targinfo; | 120 | const struct xt_audit_info *info = par->targinfo; |
121 | 121 | ||
122 | if (info->type > XT_AUDIT_TYPE_MAX) { | 122 | if (info->type > XT_AUDIT_TYPE_MAX) { |
123 | pr_info("Audit type out of range (valid range: 0..%hhu)\n", | 123 | pr_info_ratelimited("Audit type out of range (valid range: 0..%hhu)\n", |
124 | XT_AUDIT_TYPE_MAX); | 124 | XT_AUDIT_TYPE_MAX); |
125 | return -ERANGE; | 125 | return -ERANGE; |
126 | } | 126 | } |
127 | 127 | ||
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c index 0f642ef8cd26..9f4151ec3e06 100644 --- a/net/netfilter/xt_CHECKSUM.c +++ b/net/netfilter/xt_CHECKSUM.c | |||
@@ -36,13 +36,13 @@ static int checksum_tg_check(const struct xt_tgchk_param *par) | |||
36 | const struct xt_CHECKSUM_info *einfo = par->targinfo; | 36 | const struct xt_CHECKSUM_info *einfo = par->targinfo; |
37 | 37 | ||
38 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { | 38 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { |
39 | pr_info("unsupported CHECKSUM operation %x\n", einfo->operation); | 39 | pr_info_ratelimited("unsupported CHECKSUM operation %x\n", |
40 | einfo->operation); | ||
40 | return -EINVAL; | 41 | return -EINVAL; |
41 | } | 42 | } |
42 | if (!einfo->operation) { | 43 | if (!einfo->operation) |
43 | pr_info("no CHECKSUM operation enabled\n"); | ||
44 | return -EINVAL; | 44 | return -EINVAL; |
45 | } | 45 | |
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index da56c06a443c..f3f1caac949b 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c | |||
@@ -91,8 +91,8 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par) | |||
91 | 91 | ||
92 | if (strcmp(par->table, "mangle") != 0 && | 92 | if (strcmp(par->table, "mangle") != 0 && |
93 | strcmp(par->table, "security") != 0) { | 93 | strcmp(par->table, "security") != 0) { |
94 | pr_info("target only valid in the \'mangle\' " | 94 | pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n", |
95 | "or \'security\' tables, not \'%s\'.\n", par->table); | 95 | par->table); |
96 | return -EINVAL; | 96 | return -EINVAL; |
97 | } | 97 | } |
98 | 98 | ||
@@ -102,14 +102,14 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par) | |||
102 | break; | 102 | break; |
103 | 103 | ||
104 | default: | 104 | default: |
105 | pr_info("invalid mode: %hu\n", info->mode); | 105 | pr_info_ratelimited("invalid mode: %hu\n", info->mode); |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | } | 107 | } |
108 | 108 | ||
109 | ret = nf_ct_netns_get(par->net, par->family); | 109 | ret = nf_ct_netns_get(par->net, par->family); |
110 | if (ret < 0) | 110 | if (ret < 0) |
111 | pr_info("cannot load conntrack support for proto=%u\n", | 111 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
112 | par->family); | 112 | par->family); |
113 | return ret; | 113 | return ret; |
114 | } | 114 | } |
115 | 115 | ||
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 5a152e2acfd5..8790190c6feb 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
@@ -82,15 +82,14 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, | |||
82 | 82 | ||
83 | proto = xt_ct_find_proto(par); | 83 | proto = xt_ct_find_proto(par); |
84 | if (!proto) { | 84 | if (!proto) { |
85 | pr_info("You must specify a L4 protocol, and not use " | 85 | pr_info_ratelimited("You must specify a L4 protocol and not use inversions on it\n"); |
86 | "inversions on it.\n"); | ||
87 | return -ENOENT; | 86 | return -ENOENT; |
88 | } | 87 | } |
89 | 88 | ||
90 | helper = nf_conntrack_helper_try_module_get(helper_name, par->family, | 89 | helper = nf_conntrack_helper_try_module_get(helper_name, par->family, |
91 | proto); | 90 | proto); |
92 | if (helper == NULL) { | 91 | if (helper == NULL) { |
93 | pr_info("No such helper \"%s\"\n", helper_name); | 92 | pr_info_ratelimited("No such helper \"%s\"\n", helper_name); |
94 | return -ENOENT; | 93 | return -ENOENT; |
95 | } | 94 | } |
96 | 95 | ||
@@ -124,6 +123,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
124 | const struct nf_conntrack_l4proto *l4proto; | 123 | const struct nf_conntrack_l4proto *l4proto; |
125 | struct ctnl_timeout *timeout; | 124 | struct ctnl_timeout *timeout; |
126 | struct nf_conn_timeout *timeout_ext; | 125 | struct nf_conn_timeout *timeout_ext; |
126 | const char *errmsg = NULL; | ||
127 | int ret = 0; | 127 | int ret = 0; |
128 | u8 proto; | 128 | u8 proto; |
129 | 129 | ||
@@ -131,29 +131,29 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
131 | timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); | 131 | timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); |
132 | if (timeout_find_get == NULL) { | 132 | if (timeout_find_get == NULL) { |
133 | ret = -ENOENT; | 133 | ret = -ENOENT; |
134 | pr_info("Timeout policy base is empty\n"); | 134 | errmsg = "Timeout policy base is empty"; |
135 | goto out; | 135 | goto out; |
136 | } | 136 | } |
137 | 137 | ||
138 | proto = xt_ct_find_proto(par); | 138 | proto = xt_ct_find_proto(par); |
139 | if (!proto) { | 139 | if (!proto) { |
140 | ret = -EINVAL; | 140 | ret = -EINVAL; |
141 | pr_info("You must specify a L4 protocol, and not use " | 141 | errmsg = "You must specify a L4 protocol and not use inversions on it"; |
142 | "inversions on it.\n"); | ||
143 | goto out; | 142 | goto out; |
144 | } | 143 | } |
145 | 144 | ||
146 | timeout = timeout_find_get(par->net, timeout_name); | 145 | timeout = timeout_find_get(par->net, timeout_name); |
147 | if (timeout == NULL) { | 146 | if (timeout == NULL) { |
148 | ret = -ENOENT; | 147 | ret = -ENOENT; |
149 | pr_info("No such timeout policy \"%s\"\n", timeout_name); | 148 | pr_info_ratelimited("No such timeout policy \"%s\"\n", |
149 | timeout_name); | ||
150 | goto out; | 150 | goto out; |
151 | } | 151 | } |
152 | 152 | ||
153 | if (timeout->l3num != par->family) { | 153 | if (timeout->l3num != par->family) { |
154 | ret = -EINVAL; | 154 | ret = -EINVAL; |
155 | pr_info("Timeout policy `%s' can only be used by L3 protocol " | 155 | pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n", |
156 | "number %d\n", timeout_name, timeout->l3num); | 156 | timeout_name, 3, timeout->l3num); |
157 | goto err_put_timeout; | 157 | goto err_put_timeout; |
158 | } | 158 | } |
159 | /* Make sure the timeout policy matches any existing protocol tracker, | 159 | /* Make sure the timeout policy matches any existing protocol tracker, |
@@ -162,9 +162,8 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
162 | l4proto = __nf_ct_l4proto_find(par->family, proto); | 162 | l4proto = __nf_ct_l4proto_find(par->family, proto); |
163 | if (timeout->l4proto->l4proto != l4proto->l4proto) { | 163 | if (timeout->l4proto->l4proto != l4proto->l4proto) { |
164 | ret = -EINVAL; | 164 | ret = -EINVAL; |
165 | pr_info("Timeout policy `%s' can only be used by L4 protocol " | 165 | pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n", |
166 | "number %d\n", | 166 | timeout_name, 4, timeout->l4proto->l4proto); |
167 | timeout_name, timeout->l4proto->l4proto); | ||
168 | goto err_put_timeout; | 167 | goto err_put_timeout; |
169 | } | 168 | } |
170 | timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); | 169 | timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); |
@@ -180,6 +179,8 @@ err_put_timeout: | |||
180 | __xt_ct_tg_timeout_put(timeout); | 179 | __xt_ct_tg_timeout_put(timeout); |
181 | out: | 180 | out: |
182 | rcu_read_unlock(); | 181 | rcu_read_unlock(); |
182 | if (errmsg) | ||
183 | pr_info_ratelimited("%s\n", errmsg); | ||
183 | return ret; | 184 | return ret; |
184 | #else | 185 | #else |
185 | return -EOPNOTSUPP; | 186 | return -EOPNOTSUPP; |
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index 3f83d38c4e5b..098ed851b7a7 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c | |||
@@ -66,10 +66,8 @@ static int dscp_tg_check(const struct xt_tgchk_param *par) | |||
66 | { | 66 | { |
67 | const struct xt_DSCP_info *info = par->targinfo; | 67 | const struct xt_DSCP_info *info = par->targinfo; |
68 | 68 | ||
69 | if (info->dscp > XT_DSCP_MAX) { | 69 | if (info->dscp > XT_DSCP_MAX) |
70 | pr_info("dscp %x out of range\n", info->dscp); | ||
71 | return -EDOM; | 70 | return -EDOM; |
72 | } | ||
73 | return 0; | 71 | return 0; |
74 | } | 72 | } |
75 | 73 | ||
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c index 1535e87ed9bd..4653b071bed4 100644 --- a/net/netfilter/xt_HL.c +++ b/net/netfilter/xt_HL.c | |||
@@ -105,10 +105,8 @@ static int ttl_tg_check(const struct xt_tgchk_param *par) | |||
105 | { | 105 | { |
106 | const struct ipt_TTL_info *info = par->targinfo; | 106 | const struct ipt_TTL_info *info = par->targinfo; |
107 | 107 | ||
108 | if (info->mode > IPT_TTL_MAXMODE) { | 108 | if (info->mode > IPT_TTL_MAXMODE) |
109 | pr_info("TTL: invalid or unknown mode %u\n", info->mode); | ||
110 | return -EINVAL; | 109 | return -EINVAL; |
111 | } | ||
112 | if (info->mode != IPT_TTL_SET && info->ttl == 0) | 110 | if (info->mode != IPT_TTL_SET && info->ttl == 0) |
113 | return -EINVAL; | 111 | return -EINVAL; |
114 | return 0; | 112 | return 0; |
@@ -118,15 +116,10 @@ static int hl_tg6_check(const struct xt_tgchk_param *par) | |||
118 | { | 116 | { |
119 | const struct ip6t_HL_info *info = par->targinfo; | 117 | const struct ip6t_HL_info *info = par->targinfo; |
120 | 118 | ||
121 | if (info->mode > IP6T_HL_MAXMODE) { | 119 | if (info->mode > IP6T_HL_MAXMODE) |
122 | pr_info("invalid or unknown mode %u\n", info->mode); | ||
123 | return -EINVAL; | 120 | return -EINVAL; |
124 | } | 121 | if (info->mode != IP6T_HL_SET && info->hop_limit == 0) |
125 | if (info->mode != IP6T_HL_SET && info->hop_limit == 0) { | ||
126 | pr_info("increment/decrement does not " | ||
127 | "make sense with value 0\n"); | ||
128 | return -EINVAL; | 122 | return -EINVAL; |
129 | } | ||
130 | return 0; | 123 | return 0; |
131 | } | 124 | } |
132 | 125 | ||
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 60e6dbe12460..9c75f419cd80 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
13 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
14 | #include <linux/icmp.h> | 16 | #include <linux/icmp.h> |
@@ -312,29 +314,30 @@ hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par) | |||
312 | static int hmark_tg_check(const struct xt_tgchk_param *par) | 314 | static int hmark_tg_check(const struct xt_tgchk_param *par) |
313 | { | 315 | { |
314 | const struct xt_hmark_info *info = par->targinfo; | 316 | const struct xt_hmark_info *info = par->targinfo; |
317 | const char *errmsg = "proto mask must be zero with L3 mode"; | ||
315 | 318 | ||
316 | if (!info->hmodulus) { | 319 | if (!info->hmodulus) |
317 | pr_info("xt_HMARK: hash modulus can't be zero\n"); | ||
318 | return -EINVAL; | 320 | return -EINVAL; |
319 | } | 321 | |
320 | if (info->proto_mask && | 322 | if (info->proto_mask && |
321 | (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) { | 323 | (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) |
322 | pr_info("xt_HMARK: proto mask must be zero with L3 mode\n"); | 324 | goto err; |
323 | return -EINVAL; | 325 | |
324 | } | ||
325 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && | 326 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && |
326 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | | 327 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | |
327 | XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) { | 328 | XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) |
328 | pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n"); | ||
329 | return -EINVAL; | 329 | return -EINVAL; |
330 | } | 330 | |
331 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && | 331 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && |
332 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | | 332 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | |
333 | XT_HMARK_FLAG(XT_HMARK_DPORT)))) { | 333 | XT_HMARK_FLAG(XT_HMARK_DPORT)))) { |
334 | pr_info("xt_HMARK: spi-set and port-set can't be combined\n"); | 334 | errmsg = "spi-set and port-set can't be combined"; |
335 | return -EINVAL; | 335 | goto err; |
336 | } | 336 | } |
337 | return 0; | 337 | return 0; |
338 | err: | ||
339 | pr_info_ratelimited("%s\n", errmsg); | ||
340 | return -EINVAL; | ||
338 | } | 341 | } |
339 | 342 | ||
340 | static struct xt_target hmark_tg_reg[] __read_mostly = { | 343 | static struct xt_target hmark_tg_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 6c2482b709b1..1ac6600bfafd 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c | |||
@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) | |||
146 | timer_setup(&info->timer->timer, idletimer_tg_expired, 0); | 146 | timer_setup(&info->timer->timer, idletimer_tg_expired, 0); |
147 | info->timer->refcnt = 1; | 147 | info->timer->refcnt = 1; |
148 | 148 | ||
149 | INIT_WORK(&info->timer->work, idletimer_tg_work); | ||
150 | |||
149 | mod_timer(&info->timer->timer, | 151 | mod_timer(&info->timer->timer, |
150 | msecs_to_jiffies(info->timeout * 1000) + jiffies); | 152 | msecs_to_jiffies(info->timeout * 1000) + jiffies); |
151 | 153 | ||
152 | INIT_WORK(&info->timer->work, idletimer_tg_work); | ||
153 | |||
154 | return 0; | 154 | return 0; |
155 | 155 | ||
156 | out_free_attr: | 156 | out_free_attr: |
@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par) | |||
191 | pr_debug("timeout value is zero\n"); | 191 | pr_debug("timeout value is zero\n"); |
192 | return -EINVAL; | 192 | return -EINVAL; |
193 | } | 193 | } |
194 | 194 | if (info->timeout >= INT_MAX / 1000) { | |
195 | pr_debug("timeout value is too big\n"); | ||
196 | return -EINVAL; | ||
197 | } | ||
195 | if (info->label[0] == '\0' || | 198 | if (info->label[0] == '\0' || |
196 | strnlen(info->label, | 199 | strnlen(info->label, |
197 | MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { | 200 | MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { |
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 1dcad893df78..19846445504d 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c | |||
@@ -111,10 +111,8 @@ static int led_tg_check(const struct xt_tgchk_param *par) | |||
111 | struct xt_led_info_internal *ledinternal; | 111 | struct xt_led_info_internal *ledinternal; |
112 | int err; | 112 | int err; |
113 | 113 | ||
114 | if (ledinfo->id[0] == '\0') { | 114 | if (ledinfo->id[0] == '\0') |
115 | pr_info("No 'id' parameter given.\n"); | ||
116 | return -EINVAL; | 115 | return -EINVAL; |
117 | } | ||
118 | 116 | ||
119 | mutex_lock(&xt_led_mutex); | 117 | mutex_lock(&xt_led_mutex); |
120 | 118 | ||
@@ -138,13 +136,14 @@ static int led_tg_check(const struct xt_tgchk_param *par) | |||
138 | 136 | ||
139 | err = led_trigger_register(&ledinternal->netfilter_led_trigger); | 137 | err = led_trigger_register(&ledinternal->netfilter_led_trigger); |
140 | if (err) { | 138 | if (err) { |
141 | pr_err("Trigger name is already in use.\n"); | 139 | pr_info_ratelimited("Trigger name is already in use.\n"); |
142 | goto exit_alloc; | 140 | goto exit_alloc; |
143 | } | 141 | } |
144 | 142 | ||
145 | /* See if we need to set up a timer */ | 143 | /* Since the letinternal timer can be shared between multiple targets, |
146 | if (ledinfo->delay > 0) | 144 | * always set it up, even if the current target does not need it |
147 | timer_setup(&ledinternal->timer, led_timeout_callback, 0); | 145 | */ |
146 | timer_setup(&ledinternal->timer, led_timeout_callback, 0); | ||
148 | 147 | ||
149 | list_add_tail(&ledinternal->list, &xt_led_triggers); | 148 | list_add_tail(&ledinternal->list, &xt_led_triggers); |
150 | 149 | ||
@@ -181,8 +180,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par) | |||
181 | 180 | ||
182 | list_del(&ledinternal->list); | 181 | list_del(&ledinternal->list); |
183 | 182 | ||
184 | if (ledinfo->delay > 0) | 183 | del_timer_sync(&ledinternal->timer); |
185 | del_timer_sync(&ledinternal->timer); | ||
186 | 184 | ||
187 | led_trigger_unregister(&ledinternal->netfilter_led_trigger); | 185 | led_trigger_unregister(&ledinternal->netfilter_led_trigger); |
188 | 186 | ||
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c index a360b99a958a..a9aca80a32ae 100644 --- a/net/netfilter/xt_NFQUEUE.c +++ b/net/netfilter/xt_NFQUEUE.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
12 | |||
11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
13 | 15 | ||
@@ -67,13 +69,13 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par) | |||
67 | init_hashrandom(&jhash_initval); | 69 | init_hashrandom(&jhash_initval); |
68 | 70 | ||
69 | if (info->queues_total == 0) { | 71 | if (info->queues_total == 0) { |
70 | pr_err("NFQUEUE: number of total queues is 0\n"); | 72 | pr_info_ratelimited("number of total queues is 0\n"); |
71 | return -EINVAL; | 73 | return -EINVAL; |
72 | } | 74 | } |
73 | maxid = info->queues_total - 1 + info->queuenum; | 75 | maxid = info->queues_total - 1 + info->queuenum; |
74 | if (maxid > 0xffff) { | 76 | if (maxid > 0xffff) { |
75 | pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n", | 77 | pr_info_ratelimited("number of queues (%u) out of range (got %u)\n", |
76 | info->queues_total, maxid); | 78 | info->queues_total, maxid); |
77 | return -ERANGE; | 79 | return -ERANGE; |
78 | } | 80 | } |
79 | if (par->target->revision == 2 && info->flags > 1) | 81 | if (par->target->revision == 2 && info->flags > 1) |
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index 9faf5e050b79..4ad5fe27e08b 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c | |||
@@ -60,18 +60,20 @@ static int checkentry_lsm(struct xt_secmark_target_info *info) | |||
60 | &info->secid); | 60 | &info->secid); |
61 | if (err) { | 61 | if (err) { |
62 | if (err == -EINVAL) | 62 | if (err == -EINVAL) |
63 | pr_info("invalid security context \'%s\'\n", info->secctx); | 63 | pr_info_ratelimited("invalid security context \'%s\'\n", |
64 | info->secctx); | ||
64 | return err; | 65 | return err; |
65 | } | 66 | } |
66 | 67 | ||
67 | if (!info->secid) { | 68 | if (!info->secid) { |
68 | pr_info("unable to map security context \'%s\'\n", info->secctx); | 69 | pr_info_ratelimited("unable to map security context \'%s\'\n", |
70 | info->secctx); | ||
69 | return -ENOENT; | 71 | return -ENOENT; |
70 | } | 72 | } |
71 | 73 | ||
72 | err = security_secmark_relabel_packet(info->secid); | 74 | err = security_secmark_relabel_packet(info->secid); |
73 | if (err) { | 75 | if (err) { |
74 | pr_info("unable to obtain relabeling permission\n"); | 76 | pr_info_ratelimited("unable to obtain relabeling permission\n"); |
75 | return err; | 77 | return err; |
76 | } | 78 | } |
77 | 79 | ||
@@ -86,14 +88,14 @@ static int secmark_tg_check(const struct xt_tgchk_param *par) | |||
86 | 88 | ||
87 | if (strcmp(par->table, "mangle") != 0 && | 89 | if (strcmp(par->table, "mangle") != 0 && |
88 | strcmp(par->table, "security") != 0) { | 90 | strcmp(par->table, "security") != 0) { |
89 | pr_info("target only valid in the \'mangle\' " | 91 | pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n", |
90 | "or \'security\' tables, not \'%s\'.\n", par->table); | 92 | par->table); |
91 | return -EINVAL; | 93 | return -EINVAL; |
92 | } | 94 | } |
93 | 95 | ||
94 | if (mode && mode != info->mode) { | 96 | if (mode && mode != info->mode) { |
95 | pr_info("mode already set to %hu cannot mix with " | 97 | pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n", |
96 | "rules for mode %hu\n", mode, info->mode); | 98 | mode, info->mode); |
97 | return -EINVAL; | 99 | return -EINVAL; |
98 | } | 100 | } |
99 | 101 | ||
@@ -101,7 +103,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par) | |||
101 | case SECMARK_MODE_SEL: | 103 | case SECMARK_MODE_SEL: |
102 | break; | 104 | break; |
103 | default: | 105 | default: |
104 | pr_info("invalid mode: %hu\n", info->mode); | 106 | pr_info_ratelimited("invalid mode: %hu\n", info->mode); |
105 | return -EINVAL; | 107 | return -EINVAL; |
106 | } | 108 | } |
107 | 109 | ||
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 99bb8e410f22..98efb202f8b4 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -273,8 +273,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par) | |||
273 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | | 273 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | |
274 | (1 << NF_INET_LOCAL_OUT) | | 274 | (1 << NF_INET_LOCAL_OUT) | |
275 | (1 << NF_INET_POST_ROUTING))) != 0) { | 275 | (1 << NF_INET_POST_ROUTING))) != 0) { |
276 | pr_info("path-MTU clamping only supported in " | 276 | pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n"); |
277 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); | ||
278 | return -EINVAL; | 277 | return -EINVAL; |
279 | } | 278 | } |
280 | if (par->nft_compat) | 279 | if (par->nft_compat) |
@@ -283,7 +282,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par) | |||
283 | xt_ematch_foreach(ematch, e) | 282 | xt_ematch_foreach(ematch, e) |
284 | if (find_syn_match(ematch)) | 283 | if (find_syn_match(ematch)) |
285 | return 0; | 284 | return 0; |
286 | pr_info("Only works on TCP SYN packets\n"); | 285 | pr_info_ratelimited("Only works on TCP SYN packets\n"); |
287 | return -EINVAL; | 286 | return -EINVAL; |
288 | } | 287 | } |
289 | 288 | ||
@@ -298,8 +297,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par) | |||
298 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | | 297 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | |
299 | (1 << NF_INET_LOCAL_OUT) | | 298 | (1 << NF_INET_LOCAL_OUT) | |
300 | (1 << NF_INET_POST_ROUTING))) != 0) { | 299 | (1 << NF_INET_POST_ROUTING))) != 0) { |
301 | pr_info("path-MTU clamping only supported in " | 300 | pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n"); |
302 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); | ||
303 | return -EINVAL; | 301 | return -EINVAL; |
304 | } | 302 | } |
305 | if (par->nft_compat) | 303 | if (par->nft_compat) |
@@ -308,7 +306,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par) | |||
308 | xt_ematch_foreach(ematch, e) | 306 | xt_ematch_foreach(ematch, e) |
309 | if (find_syn_match(ematch)) | 307 | if (find_syn_match(ematch)) |
310 | return 0; | 308 | return 0; |
311 | pr_info("Only works on TCP SYN packets\n"); | 309 | pr_info_ratelimited("Only works on TCP SYN packets\n"); |
312 | return -EINVAL; | 310 | return -EINVAL; |
313 | } | 311 | } |
314 | #endif | 312 | #endif |
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 17d7705e3bd4..8c89323c06af 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -540,8 +540,7 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par) | |||
540 | !(i->invflags & IP6T_INV_PROTO)) | 540 | !(i->invflags & IP6T_INV_PROTO)) |
541 | return 0; | 541 | return 0; |
542 | 542 | ||
543 | pr_info("Can be used only in combination with " | 543 | pr_info_ratelimited("Can be used only with -p tcp or -p udp\n"); |
544 | "either -p tcp or -p udp\n"); | ||
545 | return -EINVAL; | 544 | return -EINVAL; |
546 | } | 545 | } |
547 | #endif | 546 | #endif |
@@ -559,8 +558,7 @@ static int tproxy_tg4_check(const struct xt_tgchk_param *par) | |||
559 | && !(i->invflags & IPT_INV_PROTO)) | 558 | && !(i->invflags & IPT_INV_PROTO)) |
560 | return 0; | 559 | return 0; |
561 | 560 | ||
562 | pr_info("Can be used only in combination with " | 561 | pr_info_ratelimited("Can be used only with -p tcp or -p udp\n"); |
563 | "either -p tcp or -p udp\n"); | ||
564 | return -EINVAL; | 562 | return -EINVAL; |
565 | } | 563 | } |
566 | 564 | ||
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index 911a7c0da504..89e281b3bfc2 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c | |||
@@ -164,48 +164,47 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
164 | 164 | ||
165 | static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) | 165 | static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) |
166 | { | 166 | { |
167 | const char *errmsg = "both incoming and outgoing interface limitation cannot be selected"; | ||
167 | struct xt_addrtype_info_v1 *info = par->matchinfo; | 168 | struct xt_addrtype_info_v1 *info = par->matchinfo; |
168 | 169 | ||
169 | if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN && | 170 | if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN && |
170 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { | 171 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) |
171 | pr_info("both incoming and outgoing " | 172 | goto err; |
172 | "interface limitation cannot be selected\n"); | ||
173 | return -EINVAL; | ||
174 | } | ||
175 | 173 | ||
176 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | | 174 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | |
177 | (1 << NF_INET_LOCAL_IN)) && | 175 | (1 << NF_INET_LOCAL_IN)) && |
178 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { | 176 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { |
179 | pr_info("output interface limitation " | 177 | errmsg = "output interface limitation not valid in PREROUTING and INPUT"; |
180 | "not valid in PREROUTING and INPUT\n"); | 178 | goto err; |
181 | return -EINVAL; | ||
182 | } | 179 | } |
183 | 180 | ||
184 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | | 181 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | |
185 | (1 << NF_INET_LOCAL_OUT)) && | 182 | (1 << NF_INET_LOCAL_OUT)) && |
186 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) { | 183 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) { |
187 | pr_info("input interface limitation " | 184 | errmsg = "input interface limitation not valid in POSTROUTING and OUTPUT"; |
188 | "not valid in POSTROUTING and OUTPUT\n"); | 185 | goto err; |
189 | return -EINVAL; | ||
190 | } | 186 | } |
191 | 187 | ||
192 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 188 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
193 | if (par->family == NFPROTO_IPV6) { | 189 | if (par->family == NFPROTO_IPV6) { |
194 | if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) { | 190 | if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) { |
195 | pr_err("ipv6 BLACKHOLE matching not supported\n"); | 191 | errmsg = "ipv6 BLACKHOLE matching not supported"; |
196 | return -EINVAL; | 192 | goto err; |
197 | } | 193 | } |
198 | if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { | 194 | if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { |
199 | pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n"); | 195 | errmsg = "ipv6 PROHIBIT (THROW, NAT ..) matching not supported"; |
200 | return -EINVAL; | 196 | goto err; |
201 | } | 197 | } |
202 | if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { | 198 | if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { |
203 | pr_err("ipv6 does not support BROADCAST matching\n"); | 199 | errmsg = "ipv6 does not support BROADCAST matching"; |
204 | return -EINVAL; | 200 | goto err; |
205 | } | 201 | } |
206 | } | 202 | } |
207 | #endif | 203 | #endif |
208 | return 0; | 204 | return 0; |
205 | err: | ||
206 | pr_info_ratelimited("%s\n", errmsg); | ||
207 | return -EINVAL; | ||
209 | } | 208 | } |
210 | 209 | ||
211 | static struct xt_match addrtype_mt_reg[] __read_mostly = { | 210 | static struct xt_match addrtype_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 06b090d8e901..a2cf8a6236d6 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c | |||
@@ -7,6 +7,8 @@ | |||
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | |||
10 | #include <linux/module.h> | 12 | #include <linux/module.h> |
11 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
12 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
@@ -34,7 +36,7 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len, | |||
34 | program.filter = insns; | 36 | program.filter = insns; |
35 | 37 | ||
36 | if (bpf_prog_create(ret, &program)) { | 38 | if (bpf_prog_create(ret, &program)) { |
37 | pr_info("bpf: check failed: parse error\n"); | 39 | pr_info_ratelimited("check failed: parse error\n"); |
38 | return -EINVAL; | 40 | return -EINVAL; |
39 | } | 41 | } |
40 | 42 | ||
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index 891f4e7e8ea7..7df2dece57d3 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
15 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
16 | #include <linux/module.h> | 18 | #include <linux/module.h> |
17 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
@@ -48,7 +50,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par) | |||
48 | } | 50 | } |
49 | 51 | ||
50 | if (info->has_path && info->has_classid) { | 52 | if (info->has_path && info->has_classid) { |
51 | pr_info("xt_cgroup: both path and classid specified\n"); | 53 | pr_info_ratelimited("path and classid specified\n"); |
52 | return -EINVAL; | 54 | return -EINVAL; |
53 | } | 55 | } |
54 | 56 | ||
@@ -56,8 +58,8 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par) | |||
56 | if (info->has_path) { | 58 | if (info->has_path) { |
57 | cgrp = cgroup_get_from_path(info->path); | 59 | cgrp = cgroup_get_from_path(info->path); |
58 | if (IS_ERR(cgrp)) { | 60 | if (IS_ERR(cgrp)) { |
59 | pr_info("xt_cgroup: invalid path, errno=%ld\n", | 61 | pr_info_ratelimited("invalid path, errno=%ld\n", |
60 | PTR_ERR(cgrp)); | 62 | PTR_ERR(cgrp)); |
61 | return -EINVAL; | 63 | return -EINVAL; |
62 | } | 64 | } |
63 | info->priv = cgrp; | 65 | info->priv = cgrp; |
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 57ef175dfbfa..0068688995c8 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c | |||
@@ -135,14 +135,12 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | |||
135 | struct xt_cluster_match_info *info = par->matchinfo; | 135 | struct xt_cluster_match_info *info = par->matchinfo; |
136 | 136 | ||
137 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { | 137 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { |
138 | pr_info("you have exceeded the maximum " | 138 | pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", |
139 | "number of cluster nodes (%u > %u)\n", | 139 | info->total_nodes, XT_CLUSTER_NODES_MAX); |
140 | info->total_nodes, XT_CLUSTER_NODES_MAX); | ||
141 | return -EINVAL; | 140 | return -EINVAL; |
142 | } | 141 | } |
143 | if (info->node_mask >= (1ULL << info->total_nodes)) { | 142 | if (info->node_mask >= (1ULL << info->total_nodes)) { |
144 | pr_info("this node mask cannot be " | 143 | pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); |
145 | "higher than the total number of nodes\n"); | ||
146 | return -EDOM; | 144 | return -EDOM; |
147 | } | 145 | } |
148 | return 0; | 146 | return 0; |
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index cad0b7b5eb35..93cb018c3055 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c | |||
@@ -112,8 +112,8 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par) | |||
112 | 112 | ||
113 | ret = nf_ct_netns_get(par->net, par->family); | 113 | ret = nf_ct_netns_get(par->net, par->family); |
114 | if (ret < 0) | 114 | if (ret < 0) |
115 | pr_info("cannot load conntrack support for proto=%u\n", | 115 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
116 | par->family); | 116 | par->family); |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * This filter cannot function correctly unless connection tracking | 119 | * This filter cannot function correctly unless connection tracking |
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c index 23372879e6e3..4fa4efd24353 100644 --- a/net/netfilter/xt_connlabel.c +++ b/net/netfilter/xt_connlabel.c | |||
@@ -57,14 +57,15 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par) | |||
57 | int ret; | 57 | int ret; |
58 | 58 | ||
59 | if (info->options & ~options) { | 59 | if (info->options & ~options) { |
60 | pr_err("Unknown options in mask %x\n", info->options); | 60 | pr_info_ratelimited("Unknown options in mask %x\n", |
61 | info->options); | ||
61 | return -EINVAL; | 62 | return -EINVAL; |
62 | } | 63 | } |
63 | 64 | ||
64 | ret = nf_ct_netns_get(par->net, par->family); | 65 | ret = nf_ct_netns_get(par->net, par->family); |
65 | if (ret < 0) { | 66 | if (ret < 0) { |
66 | pr_info("cannot load conntrack support for proto=%u\n", | 67 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
67 | par->family); | 68 | par->family); |
68 | return ret; | 69 | return ret; |
69 | } | 70 | } |
70 | 71 | ||
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index ec377cc6a369..809639ce6f5a 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
@@ -79,8 +79,8 @@ static int connmark_tg_check(const struct xt_tgchk_param *par) | |||
79 | 79 | ||
80 | ret = nf_ct_netns_get(par->net, par->family); | 80 | ret = nf_ct_netns_get(par->net, par->family); |
81 | if (ret < 0) | 81 | if (ret < 0) |
82 | pr_info("cannot load conntrack support for proto=%u\n", | 82 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
83 | par->family); | 83 | par->family); |
84 | return ret; | 84 | return ret; |
85 | } | 85 | } |
86 | 86 | ||
@@ -109,8 +109,8 @@ static int connmark_mt_check(const struct xt_mtchk_param *par) | |||
109 | 109 | ||
110 | ret = nf_ct_netns_get(par->net, par->family); | 110 | ret = nf_ct_netns_get(par->net, par->family); |
111 | if (ret < 0) | 111 | if (ret < 0) |
112 | pr_info("cannot load conntrack support for proto=%u\n", | 112 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
113 | par->family); | 113 | par->family); |
114 | return ret; | 114 | return ret; |
115 | } | 115 | } |
116 | 116 | ||
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 39cf1d019240..df80fe7d391c 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -272,8 +272,8 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par) | |||
272 | 272 | ||
273 | ret = nf_ct_netns_get(par->net, par->family); | 273 | ret = nf_ct_netns_get(par->net, par->family); |
274 | if (ret < 0) | 274 | if (ret < 0) |
275 | pr_info("cannot load conntrack support for proto=%u\n", | 275 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
276 | par->family); | 276 | par->family); |
277 | return ret; | 277 | return ret; |
278 | } | 278 | } |
279 | 279 | ||
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c index 236ac8008909..a4c2b862f820 100644 --- a/net/netfilter/xt_dscp.c +++ b/net/netfilter/xt_dscp.c | |||
@@ -46,10 +46,8 @@ static int dscp_mt_check(const struct xt_mtchk_param *par) | |||
46 | { | 46 | { |
47 | const struct xt_dscp_info *info = par->matchinfo; | 47 | const struct xt_dscp_info *info = par->matchinfo; |
48 | 48 | ||
49 | if (info->dscp > XT_DSCP_MAX) { | 49 | if (info->dscp > XT_DSCP_MAX) |
50 | pr_info("dscp %x out of range\n", info->dscp); | ||
51 | return -EDOM; | 50 | return -EDOM; |
52 | } | ||
53 | 51 | ||
54 | return 0; | 52 | return 0; |
55 | } | 53 | } |
diff --git a/net/netfilter/xt_ecn.c b/net/netfilter/xt_ecn.c index 3c831a8efebc..c7ad4afa5fb8 100644 --- a/net/netfilter/xt_ecn.c +++ b/net/netfilter/xt_ecn.c | |||
@@ -97,7 +97,7 @@ static int ecn_mt_check4(const struct xt_mtchk_param *par) | |||
97 | 97 | ||
98 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && | 98 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && |
99 | (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { | 99 | (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { |
100 | pr_info("cannot match TCP bits in rule for non-tcp packets\n"); | 100 | pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n"); |
101 | return -EINVAL; | 101 | return -EINVAL; |
102 | } | 102 | } |
103 | 103 | ||
@@ -139,7 +139,7 @@ static int ecn_mt_check6(const struct xt_mtchk_param *par) | |||
139 | 139 | ||
140 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && | 140 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && |
141 | (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { | 141 | (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { |
142 | pr_info("cannot match TCP bits in rule for non-tcp packets\n"); | 142 | pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n"); |
143 | return -EINVAL; | 143 | return -EINVAL; |
144 | } | 144 | } |
145 | 145 | ||
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index ca6847403ca2..3360f13dc208 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -523,7 +523,8 @@ static u64 user2rate(u64 user) | |||
523 | if (user != 0) { | 523 | if (user != 0) { |
524 | return div64_u64(XT_HASHLIMIT_SCALE_v2, user); | 524 | return div64_u64(XT_HASHLIMIT_SCALE_v2, user); |
525 | } else { | 525 | } else { |
526 | pr_warn("invalid rate from userspace: %llu\n", user); | 526 | pr_info_ratelimited("invalid rate from userspace: %llu\n", |
527 | user); | ||
527 | return 0; | 528 | return 0; |
528 | } | 529 | } |
529 | } | 530 | } |
@@ -774,7 +775,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par, | |||
774 | if (!dh->rateinfo.prev_window && | 775 | if (!dh->rateinfo.prev_window && |
775 | (dh->rateinfo.current_rate <= dh->rateinfo.burst)) { | 776 | (dh->rateinfo.current_rate <= dh->rateinfo.burst)) { |
776 | spin_unlock(&dh->lock); | 777 | spin_unlock(&dh->lock); |
777 | rcu_read_unlock_bh(); | 778 | local_bh_enable(); |
778 | return !(cfg->mode & XT_HASHLIMIT_INVERT); | 779 | return !(cfg->mode & XT_HASHLIMIT_INVERT); |
779 | } else { | 780 | } else { |
780 | goto overlimit; | 781 | goto overlimit; |
@@ -865,33 +866,34 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par, | |||
865 | } | 866 | } |
866 | 867 | ||
867 | if (cfg->mode & ~XT_HASHLIMIT_ALL) { | 868 | if (cfg->mode & ~XT_HASHLIMIT_ALL) { |
868 | pr_info("Unknown mode mask %X, kernel too old?\n", | 869 | pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n", |
869 | cfg->mode); | 870 | cfg->mode); |
870 | return -EINVAL; | 871 | return -EINVAL; |
871 | } | 872 | } |
872 | 873 | ||
873 | /* Check for overflow. */ | 874 | /* Check for overflow. */ |
874 | if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { | 875 | if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { |
875 | if (cfg->avg == 0 || cfg->avg > U32_MAX) { | 876 | if (cfg->avg == 0 || cfg->avg > U32_MAX) { |
876 | pr_info("hashlimit invalid rate\n"); | 877 | pr_info_ratelimited("invalid rate\n"); |
877 | return -ERANGE; | 878 | return -ERANGE; |
878 | } | 879 | } |
879 | 880 | ||
880 | if (cfg->interval == 0) { | 881 | if (cfg->interval == 0) { |
881 | pr_info("hashlimit invalid interval\n"); | 882 | pr_info_ratelimited("invalid interval\n"); |
882 | return -EINVAL; | 883 | return -EINVAL; |
883 | } | 884 | } |
884 | } else if (cfg->mode & XT_HASHLIMIT_BYTES) { | 885 | } else if (cfg->mode & XT_HASHLIMIT_BYTES) { |
885 | if (user2credits_byte(cfg->avg) == 0) { | 886 | if (user2credits_byte(cfg->avg) == 0) { |
886 | pr_info("overflow, rate too high: %llu\n", cfg->avg); | 887 | pr_info_ratelimited("overflow, rate too high: %llu\n", |
888 | cfg->avg); | ||
887 | return -EINVAL; | 889 | return -EINVAL; |
888 | } | 890 | } |
889 | } else if (cfg->burst == 0 || | 891 | } else if (cfg->burst == 0 || |
890 | user2credits(cfg->avg * cfg->burst, revision) < | 892 | user2credits(cfg->avg * cfg->burst, revision) < |
891 | user2credits(cfg->avg, revision)) { | 893 | user2credits(cfg->avg, revision)) { |
892 | pr_info("overflow, try lower: %llu/%llu\n", | 894 | pr_info_ratelimited("overflow, try lower: %llu/%llu\n", |
893 | cfg->avg, cfg->burst); | 895 | cfg->avg, cfg->burst); |
894 | return -ERANGE; | 896 | return -ERANGE; |
895 | } | 897 | } |
896 | 898 | ||
897 | mutex_lock(&hashlimit_mutex); | 899 | mutex_lock(&hashlimit_mutex); |
@@ -915,8 +917,9 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par) | |||
915 | struct hashlimit_cfg3 cfg = {}; | 917 | struct hashlimit_cfg3 cfg = {}; |
916 | int ret; | 918 | int ret; |
917 | 919 | ||
918 | if (info->name[sizeof(info->name) - 1] != '\0') | 920 | ret = xt_check_proc_name(info->name, sizeof(info->name)); |
919 | return -EINVAL; | 921 | if (ret) |
922 | return ret; | ||
920 | 923 | ||
921 | ret = cfg_copy(&cfg, (void *)&info->cfg, 1); | 924 | ret = cfg_copy(&cfg, (void *)&info->cfg, 1); |
922 | 925 | ||
@@ -933,8 +936,9 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par) | |||
933 | struct hashlimit_cfg3 cfg = {}; | 936 | struct hashlimit_cfg3 cfg = {}; |
934 | int ret; | 937 | int ret; |
935 | 938 | ||
936 | if (info->name[sizeof(info->name) - 1] != '\0') | 939 | ret = xt_check_proc_name(info->name, sizeof(info->name)); |
937 | return -EINVAL; | 940 | if (ret) |
941 | return ret; | ||
938 | 942 | ||
939 | ret = cfg_copy(&cfg, (void *)&info->cfg, 2); | 943 | ret = cfg_copy(&cfg, (void *)&info->cfg, 2); |
940 | 944 | ||
@@ -948,9 +952,11 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par) | |||
948 | static int hashlimit_mt_check(const struct xt_mtchk_param *par) | 952 | static int hashlimit_mt_check(const struct xt_mtchk_param *par) |
949 | { | 953 | { |
950 | struct xt_hashlimit_mtinfo3 *info = par->matchinfo; | 954 | struct xt_hashlimit_mtinfo3 *info = par->matchinfo; |
955 | int ret; | ||
951 | 956 | ||
952 | if (info->name[sizeof(info->name) - 1] != '\0') | 957 | ret = xt_check_proc_name(info->name, sizeof(info->name)); |
953 | return -EINVAL; | 958 | if (ret) |
959 | return ret; | ||
954 | 960 | ||
955 | return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg, | 961 | return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg, |
956 | info->name, 3); | 962 | info->name, 3); |
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c index 38a78151c0e9..fd077aeaaed9 100644 --- a/net/netfilter/xt_helper.c +++ b/net/netfilter/xt_helper.c | |||
@@ -61,8 +61,8 @@ static int helper_mt_check(const struct xt_mtchk_param *par) | |||
61 | 61 | ||
62 | ret = nf_ct_netns_get(par->net, par->family); | 62 | ret = nf_ct_netns_get(par->net, par->family); |
63 | if (ret < 0) { | 63 | if (ret < 0) { |
64 | pr_info("cannot load conntrack support for proto=%u\n", | 64 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
65 | par->family); | 65 | par->family); |
66 | return ret; | 66 | return ret; |
67 | } | 67 | } |
68 | info->name[sizeof(info->name) - 1] = '\0'; | 68 | info->name[sizeof(info->name) - 1] = '\0'; |
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c index 7ca64a50db04..57f1df575701 100644 --- a/net/netfilter/xt_ipcomp.c +++ b/net/netfilter/xt_ipcomp.c | |||
@@ -72,7 +72,7 @@ static int comp_mt_check(const struct xt_mtchk_param *par) | |||
72 | 72 | ||
73 | /* Must specify no unknown invflags */ | 73 | /* Must specify no unknown invflags */ |
74 | if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) { | 74 | if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) { |
75 | pr_err("unknown flags %X\n", compinfo->invflags); | 75 | pr_info_ratelimited("unknown flags %X\n", compinfo->invflags); |
76 | return -EINVAL; | 76 | return -EINVAL; |
77 | } | 77 | } |
78 | return 0; | 78 | return 0; |
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c index 42540d26c2b8..1d950a6100af 100644 --- a/net/netfilter/xt_ipvs.c +++ b/net/netfilter/xt_ipvs.c | |||
@@ -158,7 +158,8 @@ static int ipvs_mt_check(const struct xt_mtchk_param *par) | |||
158 | && par->family != NFPROTO_IPV6 | 158 | && par->family != NFPROTO_IPV6 |
159 | #endif | 159 | #endif |
160 | ) { | 160 | ) { |
161 | pr_info("protocol family %u not supported\n", par->family); | 161 | pr_info_ratelimited("protocol family %u not supported\n", |
162 | par->family); | ||
162 | return -EINVAL; | 163 | return -EINVAL; |
163 | } | 164 | } |
164 | 165 | ||
diff --git a/net/netfilter/xt_l2tp.c b/net/netfilter/xt_l2tp.c index 8aee572771f2..c43482bf48e6 100644 --- a/net/netfilter/xt_l2tp.c +++ b/net/netfilter/xt_l2tp.c | |||
@@ -216,7 +216,7 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par) | |||
216 | /* Check for invalid flags */ | 216 | /* Check for invalid flags */ |
217 | if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION | | 217 | if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION | |
218 | XT_L2TP_TYPE)) { | 218 | XT_L2TP_TYPE)) { |
219 | pr_info("unknown flags: %x\n", info->flags); | 219 | pr_info_ratelimited("unknown flags: %x\n", info->flags); |
220 | return -EINVAL; | 220 | return -EINVAL; |
221 | } | 221 | } |
222 | 222 | ||
@@ -225,7 +225,8 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par) | |||
225 | (!(info->flags & XT_L2TP_SID)) && | 225 | (!(info->flags & XT_L2TP_SID)) && |
226 | ((!(info->flags & XT_L2TP_TYPE)) || | 226 | ((!(info->flags & XT_L2TP_TYPE)) || |
227 | (info->type != XT_L2TP_TYPE_CONTROL))) { | 227 | (info->type != XT_L2TP_TYPE_CONTROL))) { |
228 | pr_info("invalid flags combination: %x\n", info->flags); | 228 | pr_info_ratelimited("invalid flags combination: %x\n", |
229 | info->flags); | ||
229 | return -EINVAL; | 230 | return -EINVAL; |
230 | } | 231 | } |
231 | 232 | ||
@@ -234,19 +235,22 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par) | |||
234 | */ | 235 | */ |
235 | if (info->flags & XT_L2TP_VERSION) { | 236 | if (info->flags & XT_L2TP_VERSION) { |
236 | if ((info->version < 2) || (info->version > 3)) { | 237 | if ((info->version < 2) || (info->version > 3)) { |
237 | pr_info("wrong L2TP version: %u\n", info->version); | 238 | pr_info_ratelimited("wrong L2TP version: %u\n", |
239 | info->version); | ||
238 | return -EINVAL; | 240 | return -EINVAL; |
239 | } | 241 | } |
240 | 242 | ||
241 | if (info->version == 2) { | 243 | if (info->version == 2) { |
242 | if ((info->flags & XT_L2TP_TID) && | 244 | if ((info->flags & XT_L2TP_TID) && |
243 | (info->tid > 0xffff)) { | 245 | (info->tid > 0xffff)) { |
244 | pr_info("v2 tid > 0xffff: %u\n", info->tid); | 246 | pr_info_ratelimited("v2 tid > 0xffff: %u\n", |
247 | info->tid); | ||
245 | return -EINVAL; | 248 | return -EINVAL; |
246 | } | 249 | } |
247 | if ((info->flags & XT_L2TP_SID) && | 250 | if ((info->flags & XT_L2TP_SID) && |
248 | (info->sid > 0xffff)) { | 251 | (info->sid > 0xffff)) { |
249 | pr_info("v2 sid > 0xffff: %u\n", info->sid); | 252 | pr_info_ratelimited("v2 sid > 0xffff: %u\n", |
253 | info->sid); | ||
250 | return -EINVAL; | 254 | return -EINVAL; |
251 | } | 255 | } |
252 | } | 256 | } |
@@ -268,13 +272,13 @@ static int l2tp_mt_check4(const struct xt_mtchk_param *par) | |||
268 | 272 | ||
269 | if ((ip->proto != IPPROTO_UDP) && | 273 | if ((ip->proto != IPPROTO_UDP) && |
270 | (ip->proto != IPPROTO_L2TP)) { | 274 | (ip->proto != IPPROTO_L2TP)) { |
271 | pr_info("missing protocol rule (udp|l2tpip)\n"); | 275 | pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n"); |
272 | return -EINVAL; | 276 | return -EINVAL; |
273 | } | 277 | } |
274 | 278 | ||
275 | if ((ip->proto == IPPROTO_L2TP) && | 279 | if ((ip->proto == IPPROTO_L2TP) && |
276 | (info->version == 2)) { | 280 | (info->version == 2)) { |
277 | pr_info("v2 doesn't support IP mode\n"); | 281 | pr_info_ratelimited("v2 doesn't support IP mode\n"); |
278 | return -EINVAL; | 282 | return -EINVAL; |
279 | } | 283 | } |
280 | 284 | ||
@@ -295,13 +299,13 @@ static int l2tp_mt_check6(const struct xt_mtchk_param *par) | |||
295 | 299 | ||
296 | if ((ip->proto != IPPROTO_UDP) && | 300 | if ((ip->proto != IPPROTO_UDP) && |
297 | (ip->proto != IPPROTO_L2TP)) { | 301 | (ip->proto != IPPROTO_L2TP)) { |
298 | pr_info("missing protocol rule (udp|l2tpip)\n"); | 302 | pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n"); |
299 | return -EINVAL; | 303 | return -EINVAL; |
300 | } | 304 | } |
301 | 305 | ||
302 | if ((ip->proto == IPPROTO_L2TP) && | 306 | if ((ip->proto == IPPROTO_L2TP) && |
303 | (info->version == 2)) { | 307 | (info->version == 2)) { |
304 | pr_info("v2 doesn't support IP mode\n"); | 308 | pr_info_ratelimited("v2 doesn't support IP mode\n"); |
305 | return -EINVAL; | 309 | return -EINVAL; |
306 | } | 310 | } |
307 | 311 | ||
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 61403b77361c..55d18cd67635 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c | |||
@@ -106,8 +106,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par) | |||
106 | /* Check for overflow. */ | 106 | /* Check for overflow. */ |
107 | if (r->burst == 0 | 107 | if (r->burst == 0 |
108 | || user2credits(r->avg * r->burst) < user2credits(r->avg)) { | 108 | || user2credits(r->avg * r->burst) < user2credits(r->avg)) { |
109 | pr_info("Overflow, try lower: %u/%u\n", | 109 | pr_info_ratelimited("Overflow, try lower: %u/%u\n", |
110 | r->avg, r->burst); | 110 | r->avg, r->burst); |
111 | return -ERANGE; | 111 | return -ERANGE; |
112 | } | 112 | } |
113 | 113 | ||
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c index 0fd14d1eb09d..bdb689cdc829 100644 --- a/net/netfilter/xt_nat.c +++ b/net/netfilter/xt_nat.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
12 | |||
11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
13 | #include <linux/netfilter.h> | 15 | #include <linux/netfilter.h> |
@@ -19,8 +21,7 @@ static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par) | |||
19 | const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; | 21 | const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; |
20 | 22 | ||
21 | if (mr->rangesize != 1) { | 23 | if (mr->rangesize != 1) { |
22 | pr_info("%s: multiple ranges no longer supported\n", | 24 | pr_info_ratelimited("multiple ranges no longer supported\n"); |
23 | par->target->name); | ||
24 | return -EINVAL; | 25 | return -EINVAL; |
25 | } | 26 | } |
26 | return nf_ct_netns_get(par->net, par->family); | 27 | return nf_ct_netns_get(par->net, par->family); |
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index 6f92d25590a8..c8674deed4eb 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c | |||
@@ -6,6 +6,8 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 (or any | 6 | * it under the terms of the GNU General Public License version 2 (or any |
7 | * later at your option) as published by the Free Software Foundation. | 7 | * later at your option) as published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
10 | |||
9 | #include <linux/module.h> | 11 | #include <linux/module.h> |
10 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
11 | 13 | ||
@@ -39,8 +41,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par) | |||
39 | 41 | ||
40 | nfacct = nfnl_acct_find_get(par->net, info->name); | 42 | nfacct = nfnl_acct_find_get(par->net, info->name); |
41 | if (nfacct == NULL) { | 43 | if (nfacct == NULL) { |
42 | pr_info("xt_nfacct: accounting object with name `%s' " | 44 | pr_info_ratelimited("accounting object `%s' does not exists\n", |
43 | "does not exists\n", info->name); | 45 | info->name); |
44 | return -ENOENT; | 46 | return -ENOENT; |
45 | } | 47 | } |
46 | info->nfacct = nfacct; | 48 | info->nfacct = nfacct; |
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index bb33598e4530..9d6d67b953ac 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c | |||
@@ -107,9 +107,7 @@ static int physdev_mt_check(const struct xt_mtchk_param *par) | |||
107 | info->invert & XT_PHYSDEV_OP_BRIDGED) && | 107 | info->invert & XT_PHYSDEV_OP_BRIDGED) && |
108 | par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | | 108 | par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | |
109 | (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { | 109 | (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { |
110 | pr_info("using --physdev-out and --physdev-is-out are only " | 110 | pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); |
111 | "supported in the FORWARD and POSTROUTING chains with " | ||
112 | "bridged traffic.\n"); | ||
113 | if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) | 111 | if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) |
114 | return -EINVAL; | 112 | return -EINVAL; |
115 | } | 113 | } |
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c index 5639fb03bdd9..13f8ccf946d6 100644 --- a/net/netfilter/xt_policy.c +++ b/net/netfilter/xt_policy.c | |||
@@ -132,26 +132,29 @@ policy_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
132 | static int policy_mt_check(const struct xt_mtchk_param *par) | 132 | static int policy_mt_check(const struct xt_mtchk_param *par) |
133 | { | 133 | { |
134 | const struct xt_policy_info *info = par->matchinfo; | 134 | const struct xt_policy_info *info = par->matchinfo; |
135 | const char *errmsg = "neither incoming nor outgoing policy selected"; | ||
136 | |||
137 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) | ||
138 | goto err; | ||
135 | 139 | ||
136 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { | ||
137 | pr_info("neither incoming nor outgoing policy selected\n"); | ||
138 | return -EINVAL; | ||
139 | } | ||
140 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | | 140 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | |
141 | (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { | 141 | (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { |
142 | pr_info("output policy not valid in PREROUTING and INPUT\n"); | 142 | errmsg = "output policy not valid in PREROUTING and INPUT"; |
143 | return -EINVAL; | 143 | goto err; |
144 | } | 144 | } |
145 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | | 145 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | |
146 | (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { | 146 | (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { |
147 | pr_info("input policy not valid in POSTROUTING and OUTPUT\n"); | 147 | errmsg = "input policy not valid in POSTROUTING and OUTPUT"; |
148 | return -EINVAL; | 148 | goto err; |
149 | } | 149 | } |
150 | if (info->len > XT_POLICY_MAX_ELEM) { | 150 | if (info->len > XT_POLICY_MAX_ELEM) { |
151 | pr_info("too many policy elements\n"); | 151 | errmsg = "too many policy elements"; |
152 | return -EINVAL; | 152 | goto err; |
153 | } | 153 | } |
154 | return 0; | 154 | return 0; |
155 | err: | ||
156 | pr_info_ratelimited("%s\n", errmsg); | ||
157 | return -EINVAL; | ||
155 | } | 158 | } |
156 | 159 | ||
157 | static struct xt_match policy_mt_reg[] __read_mostly = { | 160 | static struct xt_match policy_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 245fa350a7a8..81ee1d6543b2 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -342,8 +342,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
342 | net_get_random_once(&hash_rnd, sizeof(hash_rnd)); | 342 | net_get_random_once(&hash_rnd, sizeof(hash_rnd)); |
343 | 343 | ||
344 | if (info->check_set & ~XT_RECENT_VALID_FLAGS) { | 344 | if (info->check_set & ~XT_RECENT_VALID_FLAGS) { |
345 | pr_info("Unsupported user space flags (%08x)\n", | 345 | pr_info_ratelimited("Unsupported userspace flags (%08x)\n", |
346 | info->check_set); | 346 | info->check_set); |
347 | return -EINVAL; | 347 | return -EINVAL; |
348 | } | 348 | } |
349 | if (hweight8(info->check_set & | 349 | if (hweight8(info->check_set & |
@@ -357,13 +357,13 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
357 | if ((info->check_set & XT_RECENT_REAP) && !info->seconds) | 357 | if ((info->check_set & XT_RECENT_REAP) && !info->seconds) |
358 | return -EINVAL; | 358 | return -EINVAL; |
359 | if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) { | 359 | if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) { |
360 | pr_info("hitcount (%u) is larger than allowed maximum (%u)\n", | 360 | pr_info_ratelimited("hitcount (%u) is larger than allowed maximum (%u)\n", |
361 | info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); | 361 | info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); |
362 | return -EINVAL; | 362 | return -EINVAL; |
363 | } | 363 | } |
364 | if (info->name[0] == '\0' || | 364 | ret = xt_check_proc_name(info->name, sizeof(info->name)); |
365 | strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) | 365 | if (ret) |
366 | return -EINVAL; | 366 | return ret; |
367 | 367 | ||
368 | if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) | 368 | if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) |
369 | nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; | 369 | nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; |
@@ -587,7 +587,7 @@ recent_mt_proc_write(struct file *file, const char __user *input, | |||
587 | add = true; | 587 | add = true; |
588 | break; | 588 | break; |
589 | default: | 589 | default: |
590 | pr_info("Need \"+ip\", \"-ip\" or \"/\"\n"); | 590 | pr_info_ratelimited("Need \"+ip\", \"-ip\" or \"/\"\n"); |
591 | return -EINVAL; | 591 | return -EINVAL; |
592 | } | 592 | } |
593 | 593 | ||
@@ -601,10 +601,8 @@ recent_mt_proc_write(struct file *file, const char __user *input, | |||
601 | succ = in4_pton(c, size, (void *)&addr, '\n', NULL); | 601 | succ = in4_pton(c, size, (void *)&addr, '\n', NULL); |
602 | } | 602 | } |
603 | 603 | ||
604 | if (!succ) { | 604 | if (!succ) |
605 | pr_info("illegal address written to procfs\n"); | ||
606 | return -EINVAL; | 605 | return -EINVAL; |
607 | } | ||
608 | 606 | ||
609 | spin_lock_bh(&recent_lock); | 607 | spin_lock_bh(&recent_lock); |
610 | e = recent_entry_lookup(t, &addr, family, 0); | 608 | e = recent_entry_lookup(t, &addr, family, 0); |
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 16b6b11ee83f..6f4c5217d835 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c | |||
@@ -92,12 +92,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par) | |||
92 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); | 92 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); |
93 | 93 | ||
94 | if (index == IPSET_INVALID_ID) { | 94 | if (index == IPSET_INVALID_ID) { |
95 | pr_warn("Cannot find set identified by id %u to match\n", | 95 | pr_info_ratelimited("Cannot find set identified by id %u to match\n", |
96 | info->match_set.index); | 96 | info->match_set.index); |
97 | return -ENOENT; | 97 | return -ENOENT; |
98 | } | 98 | } |
99 | if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) { | 99 | if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) { |
100 | pr_warn("Protocol error: set match dimension is over the limit!\n"); | 100 | pr_info_ratelimited("set match dimension is over the limit!\n"); |
101 | ip_set_nfnl_put(par->net, info->match_set.index); | 101 | ip_set_nfnl_put(par->net, info->match_set.index); |
102 | return -ERANGE; | 102 | return -ERANGE; |
103 | } | 103 | } |
@@ -143,12 +143,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par) | |||
143 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); | 143 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); |
144 | 144 | ||
145 | if (index == IPSET_INVALID_ID) { | 145 | if (index == IPSET_INVALID_ID) { |
146 | pr_warn("Cannot find set identified by id %u to match\n", | 146 | pr_info_ratelimited("Cannot find set identified by id %u to match\n", |
147 | info->match_set.index); | 147 | info->match_set.index); |
148 | return -ENOENT; | 148 | return -ENOENT; |
149 | } | 149 | } |
150 | if (info->match_set.dim > IPSET_DIM_MAX) { | 150 | if (info->match_set.dim > IPSET_DIM_MAX) { |
151 | pr_warn("Protocol error: set match dimension is over the limit!\n"); | 151 | pr_info_ratelimited("set match dimension is over the limit!\n"); |
152 | ip_set_nfnl_put(par->net, info->match_set.index); | 152 | ip_set_nfnl_put(par->net, info->match_set.index); |
153 | return -ERANGE; | 153 | return -ERANGE; |
154 | } | 154 | } |
@@ -241,8 +241,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
241 | if (info->add_set.index != IPSET_INVALID_ID) { | 241 | if (info->add_set.index != IPSET_INVALID_ID) { |
242 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); | 242 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); |
243 | if (index == IPSET_INVALID_ID) { | 243 | if (index == IPSET_INVALID_ID) { |
244 | pr_warn("Cannot find add_set index %u as target\n", | 244 | pr_info_ratelimited("Cannot find add_set index %u as target\n", |
245 | info->add_set.index); | 245 | info->add_set.index); |
246 | return -ENOENT; | 246 | return -ENOENT; |
247 | } | 247 | } |
248 | } | 248 | } |
@@ -250,8 +250,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
250 | if (info->del_set.index != IPSET_INVALID_ID) { | 250 | if (info->del_set.index != IPSET_INVALID_ID) { |
251 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); | 251 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); |
252 | if (index == IPSET_INVALID_ID) { | 252 | if (index == IPSET_INVALID_ID) { |
253 | pr_warn("Cannot find del_set index %u as target\n", | 253 | pr_info_ratelimited("Cannot find del_set index %u as target\n", |
254 | info->del_set.index); | 254 | info->del_set.index); |
255 | if (info->add_set.index != IPSET_INVALID_ID) | 255 | if (info->add_set.index != IPSET_INVALID_ID) |
256 | ip_set_nfnl_put(par->net, info->add_set.index); | 256 | ip_set_nfnl_put(par->net, info->add_set.index); |
257 | return -ENOENT; | 257 | return -ENOENT; |
@@ -259,7 +259,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
259 | } | 259 | } |
260 | if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 || | 260 | if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 || |
261 | info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) { | 261 | info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) { |
262 | pr_warn("Protocol error: SET target dimension is over the limit!\n"); | 262 | pr_info_ratelimited("SET target dimension over the limit!\n"); |
263 | if (info->add_set.index != IPSET_INVALID_ID) | 263 | if (info->add_set.index != IPSET_INVALID_ID) |
264 | ip_set_nfnl_put(par->net, info->add_set.index); | 264 | ip_set_nfnl_put(par->net, info->add_set.index); |
265 | if (info->del_set.index != IPSET_INVALID_ID) | 265 | if (info->del_set.index != IPSET_INVALID_ID) |
@@ -316,8 +316,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) | |||
316 | if (info->add_set.index != IPSET_INVALID_ID) { | 316 | if (info->add_set.index != IPSET_INVALID_ID) { |
317 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); | 317 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); |
318 | if (index == IPSET_INVALID_ID) { | 318 | if (index == IPSET_INVALID_ID) { |
319 | pr_warn("Cannot find add_set index %u as target\n", | 319 | pr_info_ratelimited("Cannot find add_set index %u as target\n", |
320 | info->add_set.index); | 320 | info->add_set.index); |
321 | return -ENOENT; | 321 | return -ENOENT; |
322 | } | 322 | } |
323 | } | 323 | } |
@@ -325,8 +325,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) | |||
325 | if (info->del_set.index != IPSET_INVALID_ID) { | 325 | if (info->del_set.index != IPSET_INVALID_ID) { |
326 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); | 326 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); |
327 | if (index == IPSET_INVALID_ID) { | 327 | if (index == IPSET_INVALID_ID) { |
328 | pr_warn("Cannot find del_set index %u as target\n", | 328 | pr_info_ratelimited("Cannot find del_set index %u as target\n", |
329 | info->del_set.index); | 329 | info->del_set.index); |
330 | if (info->add_set.index != IPSET_INVALID_ID) | 330 | if (info->add_set.index != IPSET_INVALID_ID) |
331 | ip_set_nfnl_put(par->net, info->add_set.index); | 331 | ip_set_nfnl_put(par->net, info->add_set.index); |
332 | return -ENOENT; | 332 | return -ENOENT; |
@@ -334,7 +334,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) | |||
334 | } | 334 | } |
335 | if (info->add_set.dim > IPSET_DIM_MAX || | 335 | if (info->add_set.dim > IPSET_DIM_MAX || |
336 | info->del_set.dim > IPSET_DIM_MAX) { | 336 | info->del_set.dim > IPSET_DIM_MAX) { |
337 | pr_warn("Protocol error: SET target dimension is over the limit!\n"); | 337 | pr_info_ratelimited("SET target dimension over the limit!\n"); |
338 | if (info->add_set.index != IPSET_INVALID_ID) | 338 | if (info->add_set.index != IPSET_INVALID_ID) |
339 | ip_set_nfnl_put(par->net, info->add_set.index); | 339 | ip_set_nfnl_put(par->net, info->add_set.index); |
340 | if (info->del_set.index != IPSET_INVALID_ID) | 340 | if (info->del_set.index != IPSET_INVALID_ID) |
@@ -444,8 +444,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
444 | index = ip_set_nfnl_get_byindex(par->net, | 444 | index = ip_set_nfnl_get_byindex(par->net, |
445 | info->add_set.index); | 445 | info->add_set.index); |
446 | if (index == IPSET_INVALID_ID) { | 446 | if (index == IPSET_INVALID_ID) { |
447 | pr_warn("Cannot find add_set index %u as target\n", | 447 | pr_info_ratelimited("Cannot find add_set index %u as target\n", |
448 | info->add_set.index); | 448 | info->add_set.index); |
449 | return -ENOENT; | 449 | return -ENOENT; |
450 | } | 450 | } |
451 | } | 451 | } |
@@ -454,8 +454,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
454 | index = ip_set_nfnl_get_byindex(par->net, | 454 | index = ip_set_nfnl_get_byindex(par->net, |
455 | info->del_set.index); | 455 | info->del_set.index); |
456 | if (index == IPSET_INVALID_ID) { | 456 | if (index == IPSET_INVALID_ID) { |
457 | pr_warn("Cannot find del_set index %u as target\n", | 457 | pr_info_ratelimited("Cannot find del_set index %u as target\n", |
458 | info->del_set.index); | 458 | info->del_set.index); |
459 | if (info->add_set.index != IPSET_INVALID_ID) | 459 | if (info->add_set.index != IPSET_INVALID_ID) |
460 | ip_set_nfnl_put(par->net, | 460 | ip_set_nfnl_put(par->net, |
461 | info->add_set.index); | 461 | info->add_set.index); |
@@ -465,7 +465,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
465 | 465 | ||
466 | if (info->map_set.index != IPSET_INVALID_ID) { | 466 | if (info->map_set.index != IPSET_INVALID_ID) { |
467 | if (strncmp(par->table, "mangle", 7)) { | 467 | if (strncmp(par->table, "mangle", 7)) { |
468 | pr_warn("--map-set only usable from mangle table\n"); | 468 | pr_info_ratelimited("--map-set only usable from mangle table\n"); |
469 | return -EINVAL; | 469 | return -EINVAL; |
470 | } | 470 | } |
471 | if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | | 471 | if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | |
@@ -473,14 +473,14 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
473 | !(par->hook_mask & (1 << NF_INET_FORWARD | | 473 | !(par->hook_mask & (1 << NF_INET_FORWARD | |
474 | 1 << NF_INET_LOCAL_OUT | | 474 | 1 << NF_INET_LOCAL_OUT | |
475 | 1 << NF_INET_POST_ROUTING))) { | 475 | 1 << NF_INET_POST_ROUTING))) { |
476 | pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); | 476 | pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); |
477 | return -EINVAL; | 477 | return -EINVAL; |
478 | } | 478 | } |
479 | index = ip_set_nfnl_get_byindex(par->net, | 479 | index = ip_set_nfnl_get_byindex(par->net, |
480 | info->map_set.index); | 480 | info->map_set.index); |
481 | if (index == IPSET_INVALID_ID) { | 481 | if (index == IPSET_INVALID_ID) { |
482 | pr_warn("Cannot find map_set index %u as target\n", | 482 | pr_info_ratelimited("Cannot find map_set index %u as target\n", |
483 | info->map_set.index); | 483 | info->map_set.index); |
484 | if (info->add_set.index != IPSET_INVALID_ID) | 484 | if (info->add_set.index != IPSET_INVALID_ID) |
485 | ip_set_nfnl_put(par->net, | 485 | ip_set_nfnl_put(par->net, |
486 | info->add_set.index); | 486 | info->add_set.index); |
@@ -494,7 +494,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
494 | if (info->add_set.dim > IPSET_DIM_MAX || | 494 | if (info->add_set.dim > IPSET_DIM_MAX || |
495 | info->del_set.dim > IPSET_DIM_MAX || | 495 | info->del_set.dim > IPSET_DIM_MAX || |
496 | info->map_set.dim > IPSET_DIM_MAX) { | 496 | info->map_set.dim > IPSET_DIM_MAX) { |
497 | pr_warn("Protocol error: SET target dimension is over the limit!\n"); | 497 | pr_info_ratelimited("SET target dimension over the limit!\n"); |
498 | if (info->add_set.index != IPSET_INVALID_ID) | 498 | if (info->add_set.index != IPSET_INVALID_ID) |
499 | ip_set_nfnl_put(par->net, info->add_set.index); | 499 | ip_set_nfnl_put(par->net, info->add_set.index); |
500 | if (info->del_set.index != IPSET_INVALID_ID) | 500 | if (info->del_set.index != IPSET_INVALID_ID) |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 575d2153e3b8..2ac7f674d19b 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -171,7 +171,8 @@ static int socket_mt_v1_check(const struct xt_mtchk_param *par) | |||
171 | return err; | 171 | return err; |
172 | 172 | ||
173 | if (info->flags & ~XT_SOCKET_FLAGS_V1) { | 173 | if (info->flags & ~XT_SOCKET_FLAGS_V1) { |
174 | pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V1); | 174 | pr_info_ratelimited("unknown flags 0x%x\n", |
175 | info->flags & ~XT_SOCKET_FLAGS_V1); | ||
175 | return -EINVAL; | 176 | return -EINVAL; |
176 | } | 177 | } |
177 | return 0; | 178 | return 0; |
@@ -187,7 +188,8 @@ static int socket_mt_v2_check(const struct xt_mtchk_param *par) | |||
187 | return err; | 188 | return err; |
188 | 189 | ||
189 | if (info->flags & ~XT_SOCKET_FLAGS_V2) { | 190 | if (info->flags & ~XT_SOCKET_FLAGS_V2) { |
190 | pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V2); | 191 | pr_info_ratelimited("unknown flags 0x%x\n", |
192 | info->flags & ~XT_SOCKET_FLAGS_V2); | ||
191 | return -EINVAL; | 193 | return -EINVAL; |
192 | } | 194 | } |
193 | return 0; | 195 | return 0; |
@@ -203,8 +205,8 @@ static int socket_mt_v3_check(const struct xt_mtchk_param *par) | |||
203 | if (err) | 205 | if (err) |
204 | return err; | 206 | return err; |
205 | if (info->flags & ~XT_SOCKET_FLAGS_V3) { | 207 | if (info->flags & ~XT_SOCKET_FLAGS_V3) { |
206 | pr_info("unknown flags 0x%x\n", | 208 | pr_info_ratelimited("unknown flags 0x%x\n", |
207 | info->flags & ~XT_SOCKET_FLAGS_V3); | 209 | info->flags & ~XT_SOCKET_FLAGS_V3); |
208 | return -EINVAL; | 210 | return -EINVAL; |
209 | } | 211 | } |
210 | return 0; | 212 | return 0; |
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c index 5fbd79194d21..0b41c0befe3c 100644 --- a/net/netfilter/xt_state.c +++ b/net/netfilter/xt_state.c | |||
@@ -44,8 +44,8 @@ static int state_mt_check(const struct xt_mtchk_param *par) | |||
44 | 44 | ||
45 | ret = nf_ct_netns_get(par->net, par->family); | 45 | ret = nf_ct_netns_get(par->net, par->family); |
46 | if (ret < 0) | 46 | if (ret < 0) |
47 | pr_info("cannot load conntrack support for proto=%u\n", | 47 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
48 | par->family); | 48 | par->family); |
49 | return ret; | 49 | return ret; |
50 | } | 50 | } |
51 | 51 | ||
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 1b01eec1fbda..0160f505e337 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
@@ -235,13 +235,13 @@ static int time_mt_check(const struct xt_mtchk_param *par) | |||
235 | 235 | ||
236 | if (info->daytime_start > XT_TIME_MAX_DAYTIME || | 236 | if (info->daytime_start > XT_TIME_MAX_DAYTIME || |
237 | info->daytime_stop > XT_TIME_MAX_DAYTIME) { | 237 | info->daytime_stop > XT_TIME_MAX_DAYTIME) { |
238 | pr_info("invalid argument - start or " | 238 | pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n"); |
239 | "stop time greater than 23:59:59\n"); | ||
240 | return -EDOM; | 239 | return -EDOM; |
241 | } | 240 | } |
242 | 241 | ||
243 | if (info->flags & ~XT_TIME_ALL_FLAGS) { | 242 | if (info->flags & ~XT_TIME_ALL_FLAGS) { |
244 | pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS); | 243 | pr_info_ratelimited("unknown flags 0x%x\n", |
244 | info->flags & ~XT_TIME_ALL_FLAGS); | ||
245 | return -EINVAL; | 245 | return -EINVAL; |
246 | } | 246 | } |
247 | 247 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2ad445c1d27c..07e8478068f0 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2308,7 +2308,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
2308 | if (cb->start) { | 2308 | if (cb->start) { |
2309 | ret = cb->start(cb); | 2309 | ret = cb->start(cb); |
2310 | if (ret) | 2310 | if (ret) |
2311 | goto error_unlock; | 2311 | goto error_put; |
2312 | } | 2312 | } |
2313 | 2313 | ||
2314 | nlk->cb_running = true; | 2314 | nlk->cb_running = true; |
@@ -2328,6 +2328,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
2328 | */ | 2328 | */ |
2329 | return -EINTR; | 2329 | return -EINTR; |
2330 | 2330 | ||
2331 | error_put: | ||
2332 | module_put(control->module); | ||
2331 | error_unlock: | 2333 | error_unlock: |
2332 | sock_put(sk); | 2334 | sock_put(sk); |
2333 | mutex_unlock(nlk->cb_mutex); | 2335 | mutex_unlock(nlk->cb_mutex); |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 6f02499ef007..b9ce82c9440f 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, | |||
1106 | if (!err) | 1106 | if (!err) |
1107 | delivered = true; | 1107 | delivered = true; |
1108 | else if (err != -ESRCH) | 1108 | else if (err != -ESRCH) |
1109 | goto error; | 1109 | return err; |
1110 | return delivered ? 0 : -ESRCH; | 1110 | return delivered ? 0 : -ESRCH; |
1111 | error: | 1111 | error: |
1112 | kfree_skb(skb); | 1112 | kfree_skb(skb); |
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 367d8c027101..2ceefa183cee 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c | |||
@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri, | |||
149 | 149 | ||
150 | pr_debug("uri: %s, len: %zu\n", uri, uri_len); | 150 | pr_debug("uri: %s, len: %zu\n", uri, uri_len); |
151 | 151 | ||
152 | /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */ | ||
153 | if (WARN_ON_ONCE(uri_len > U8_MAX - 4)) | ||
154 | return NULL; | ||
155 | |||
152 | sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); | 156 | sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); |
153 | if (sdreq == NULL) | 157 | if (sdreq == NULL) |
154 | return NULL; | 158 | return NULL; |
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index c0b83dc9d993..f018eafc2a0d 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c | |||
@@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { | |||
61 | }; | 61 | }; |
62 | 62 | ||
63 | static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { | 63 | static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { |
64 | [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, | 64 | [NFC_SDP_ATTR_URI] = { .type = NLA_STRING, |
65 | .len = U8_MAX - 4 }, | ||
65 | [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, | 66 | [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, |
66 | }; | 67 | }; |
67 | 68 | ||
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index 04b94281a30b..b891a91577f8 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c | |||
@@ -242,14 +242,20 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) | |||
242 | 242 | ||
243 | band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]); | 243 | band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]); |
244 | band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]); | 244 | band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]); |
245 | if (band->rate == 0) { | ||
246 | err = -EINVAL; | ||
247 | goto exit_free_meter; | ||
248 | } | ||
249 | |||
245 | band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]); | 250 | band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]); |
246 | /* Figure out max delta_t that is enough to fill any bucket. | 251 | /* Figure out max delta_t that is enough to fill any bucket. |
247 | * Keep max_delta_t size to the bucket units: | 252 | * Keep max_delta_t size to the bucket units: |
248 | * pkts => 1/1000 packets, kilobits => bits. | 253 | * pkts => 1/1000 packets, kilobits => bits. |
254 | * | ||
255 | * Start with a full bucket. | ||
249 | */ | 256 | */ |
250 | band_max_delta_t = (band->burst_size + band->rate) * 1000; | 257 | band->bucket = (band->burst_size + band->rate) * 1000; |
251 | /* Start with a full bucket. */ | 258 | band_max_delta_t = band->bucket / band->rate; |
252 | band->bucket = band_max_delta_t; | ||
253 | if (band_max_delta_t > meter->max_delta_t) | 259 | if (band_max_delta_t > meter->max_delta_t) |
254 | meter->max_delta_t = band_max_delta_t; | 260 | meter->max_delta_t = band_max_delta_t; |
255 | band++; | 261 | band++; |
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c index 50615d5efac1..9cf089b9754e 100644 --- a/net/qrtr/smd.c +++ b/net/qrtr/smd.c | |||
@@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = { | |||
114 | 114 | ||
115 | module_rpmsg_driver(qcom_smd_qrtr_driver); | 115 | module_rpmsg_driver(qcom_smd_qrtr_driver); |
116 | 116 | ||
117 | MODULE_ALIAS("rpmsg:IPCRTR"); | ||
117 | MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); | 118 | MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); |
118 | MODULE_LICENSE("GPL v2"); | 119 | MODULE_LICENSE("GPL v2"); |
diff --git a/net/rds/connection.c b/net/rds/connection.c index 94e190febfdd..2da3176bf792 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -224,7 +224,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, | |||
224 | if (rds_destroy_pending(conn)) | 224 | if (rds_destroy_pending(conn)) |
225 | ret = -ENETDOWN; | 225 | ret = -ENETDOWN; |
226 | else | 226 | else |
227 | ret = trans->conn_alloc(conn, gfp); | 227 | ret = trans->conn_alloc(conn, GFP_ATOMIC); |
228 | if (ret) { | 228 | if (ret) { |
229 | rcu_read_unlock(); | 229 | rcu_read_unlock(); |
230 | kfree(conn->c_path); | 230 | kfree(conn->c_path); |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index c061d6eb465d..22571189f21e 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | 2 | * Copyright (c) 2006, 2018 Oracle. All rights reserved. |
3 | * | 3 | * |
4 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock) | |||
142 | if (ret) | 142 | if (ret) |
143 | goto out; | 143 | goto out; |
144 | 144 | ||
145 | new_sock->type = sock->type; | ||
146 | new_sock->ops = sock->ops; | ||
147 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); | 145 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); |
148 | if (ret < 0) | 146 | if (ret < 0) |
149 | goto out; | 147 | goto out; |
150 | 148 | ||
149 | /* sock_create_lite() does not get a hold on the owner module so we | ||
150 | * need to do it here. Note that sock_release() uses sock->ops to | ||
151 | * determine if it needs to decrement the reference count. So set | ||
152 | * sock->ops after calling accept() in case that fails. And there's | ||
153 | * no need to do try_module_get() as the listener should have a hold | ||
154 | * already. | ||
155 | */ | ||
156 | new_sock->ops = sock->ops; | ||
157 | __module_get(new_sock->ops->owner); | ||
158 | |||
151 | ret = rds_tcp_keepalive(new_sock); | 159 | ret = rds_tcp_keepalive(new_sock); |
152 | if (ret < 0) | 160 | if (ret < 0) |
153 | goto out; | 161 | goto out; |
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 42410e910aff..cf73dc006c3b 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c | |||
@@ -445,7 +445,7 @@ send_fragmentable: | |||
445 | (char *)&opt, sizeof(opt)); | 445 | (char *)&opt, sizeof(opt)); |
446 | if (ret == 0) { | 446 | if (ret == 0) { |
447 | ret = kernel_sendmsg(conn->params.local->socket, &msg, | 447 | ret = kernel_sendmsg(conn->params.local->socket, &msg, |
448 | iov, 1, iov[0].iov_len); | 448 | iov, 2, len); |
449 | 449 | ||
450 | opt = IPV6_PMTUDISC_DO; | 450 | opt = IPV6_PMTUDISC_DO; |
451 | kernel_setsockopt(conn->params.local->socket, | 451 | kernel_setsockopt(conn->params.local->socket, |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index cc21e8db25b0..9d45d8b56744 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
@@ -517,9 +517,10 @@ try_again: | |||
517 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, | 517 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, |
518 | sizeof(unsigned int), &id32); | 518 | sizeof(unsigned int), &id32); |
519 | } else { | 519 | } else { |
520 | unsigned long idl = call->user_call_ID; | ||
521 | |||
520 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, | 522 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, |
521 | sizeof(unsigned long), | 523 | sizeof(unsigned long), &idl); |
522 | &call->user_call_ID); | ||
523 | } | 524 | } |
524 | if (ret < 0) | 525 | if (ret < 0) |
525 | goto error_unlock_call; | 526 | goto error_unlock_call; |
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index b3f2c15affa7..9d2cabf1dc7e 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -352,7 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, | |||
352 | return res; | 352 | return res; |
353 | out: | 353 | out: |
354 | if (res == ACT_P_CREATED) | 354 | if (res == ACT_P_CREATED) |
355 | tcf_idr_cleanup(*act, est); | 355 | tcf_idr_release(*act, bind); |
356 | 356 | ||
357 | return ret; | 357 | return ret; |
358 | } | 358 | } |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index b7ba9b06b147..2a5c8fd860cf 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -350,7 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, | |||
350 | { | 350 | { |
351 | struct sctphdr *sctph; | 351 | struct sctphdr *sctph; |
352 | 352 | ||
353 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) | 353 | if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) |
354 | return 1; | 354 | return 1; |
355 | 355 | ||
356 | sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); | 356 | sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); |
@@ -626,7 +626,8 @@ static void tcf_csum_cleanup(struct tc_action *a) | |||
626 | struct tcf_csum_params *params; | 626 | struct tcf_csum_params *params; |
627 | 627 | ||
628 | params = rcu_dereference_protected(p->params, 1); | 628 | params = rcu_dereference_protected(p->params, 1); |
629 | kfree_rcu(params, rcu); | 629 | if (params) |
630 | kfree_rcu(params, rcu); | ||
630 | } | 631 | } |
631 | 632 | ||
632 | static int tcf_csum_walker(struct net *net, struct sk_buff *skb, | 633 | static int tcf_csum_walker(struct net *net, struct sk_buff *skb, |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 06e380ae0928..7e06b9b62613 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t) | |||
80 | static void tcf_ipt_release(struct tc_action *a) | 80 | static void tcf_ipt_release(struct tc_action *a) |
81 | { | 81 | { |
82 | struct tcf_ipt *ipt = to_ipt(a); | 82 | struct tcf_ipt *ipt = to_ipt(a); |
83 | ipt_destroy_target(ipt->tcfi_t); | 83 | |
84 | if (ipt->tcfi_t) { | ||
85 | ipt_destroy_target(ipt->tcfi_t); | ||
86 | kfree(ipt->tcfi_t); | ||
87 | } | ||
84 | kfree(ipt->tcfi_tname); | 88 | kfree(ipt->tcfi_tname); |
85 | kfree(ipt->tcfi_t); | ||
86 | } | 89 | } |
87 | 90 | ||
88 | static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { | 91 | static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { |
@@ -187,7 +190,7 @@ err2: | |||
187 | kfree(tname); | 190 | kfree(tname); |
188 | err1: | 191 | err1: |
189 | if (ret == ACT_P_CREATED) | 192 | if (ret == ACT_P_CREATED) |
190 | tcf_idr_cleanup(*a, est); | 193 | tcf_idr_release(*a, bind); |
191 | return err; | 194 | return err; |
192 | } | 195 | } |
193 | 196 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 349beaffb29e..fef08835f26d 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
176 | p = to_pedit(*a); | 176 | p = to_pedit(*a); |
177 | keys = kmalloc(ksize, GFP_KERNEL); | 177 | keys = kmalloc(ksize, GFP_KERNEL); |
178 | if (keys == NULL) { | 178 | if (keys == NULL) { |
179 | tcf_idr_cleanup(*a, est); | 179 | tcf_idr_release(*a, bind); |
180 | kfree(keys_ex); | 180 | kfree(keys_ex); |
181 | return -ENOMEM; | 181 | return -ENOMEM; |
182 | } | 182 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 95d3c9097b25..faebf82b99f1 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -194,7 +194,7 @@ failure: | |||
194 | qdisc_put_rtab(P_tab); | 194 | qdisc_put_rtab(P_tab); |
195 | qdisc_put_rtab(R_tab); | 195 | qdisc_put_rtab(R_tab); |
196 | if (ret == ACT_P_CREATED) | 196 | if (ret == ACT_P_CREATED) |
197 | tcf_idr_cleanup(*a, est); | 197 | tcf_idr_release(*a, bind); |
198 | return err; | 198 | return err; |
199 | } | 199 | } |
200 | 200 | ||
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 1ba0df238756..74c5d7e6a0fa 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
@@ -103,7 +103,8 @@ static void tcf_sample_cleanup(struct tc_action *a) | |||
103 | 103 | ||
104 | psample_group = rtnl_dereference(s->psample_group); | 104 | psample_group = rtnl_dereference(s->psample_group); |
105 | RCU_INIT_POINTER(s->psample_group, NULL); | 105 | RCU_INIT_POINTER(s->psample_group, NULL); |
106 | psample_group_put(psample_group); | 106 | if (psample_group) |
107 | psample_group_put(psample_group); | ||
107 | } | 108 | } |
108 | 109 | ||
109 | static bool tcf_sample_dev_ok_push(struct net_device *dev) | 110 | static bool tcf_sample_dev_ok_push(struct net_device *dev) |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 425eac11f6da..b1f38063ada0 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, | |||
121 | d = to_defact(*a); | 121 | d = to_defact(*a); |
122 | ret = alloc_defdata(d, defdata); | 122 | ret = alloc_defdata(d, defdata); |
123 | if (ret < 0) { | 123 | if (ret < 0) { |
124 | tcf_idr_cleanup(*a, est); | 124 | tcf_idr_release(*a, bind); |
125 | return ret; | 125 | return ret; |
126 | } | 126 | } |
127 | d->tcf_action = parm->action; | 127 | d->tcf_action = parm->action; |
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index fa975262dbac..7b0700f52b50 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c | |||
@@ -152,7 +152,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, | |||
152 | ASSERT_RTNL(); | 152 | ASSERT_RTNL(); |
153 | p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); | 153 | p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); |
154 | if (unlikely(!p)) { | 154 | if (unlikely(!p)) { |
155 | if (ovr) | 155 | if (ret == ACT_P_CREATED) |
156 | tcf_idr_release(*a, bind); | 156 | tcf_idr_release(*a, bind); |
157 | return -ENOMEM; | 157 | return -ENOMEM; |
158 | } | 158 | } |
@@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a) | |||
190 | struct tcf_skbmod_params *p; | 190 | struct tcf_skbmod_params *p; |
191 | 191 | ||
192 | p = rcu_dereference_protected(d->skbmod_p, 1); | 192 | p = rcu_dereference_protected(d->skbmod_p, 1); |
193 | kfree_rcu(p, rcu); | 193 | if (p) |
194 | kfree_rcu(p, rcu); | ||
194 | } | 195 | } |
195 | 196 | ||
196 | static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, | 197 | static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, |
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 0e23aac09ad6..1281ca463727 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
153 | metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; | 153 | metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; |
154 | break; | 154 | break; |
155 | default: | 155 | default: |
156 | ret = -EINVAL; | ||
156 | goto err_out; | 157 | goto err_out; |
157 | } | 158 | } |
158 | 159 | ||
@@ -207,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a) | |||
207 | struct tcf_tunnel_key_params *params; | 208 | struct tcf_tunnel_key_params *params; |
208 | 209 | ||
209 | params = rcu_dereference_protected(t->params, 1); | 210 | params = rcu_dereference_protected(t->params, 1); |
211 | if (params) { | ||
212 | if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | ||
213 | dst_release(¶ms->tcft_enc_metadata->dst); | ||
210 | 214 | ||
211 | if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | 215 | kfree_rcu(params, rcu); |
212 | dst_release(¶ms->tcft_enc_metadata->dst); | 216 | } |
213 | |||
214 | kfree_rcu(params, rcu); | ||
215 | } | 217 | } |
216 | 218 | ||
217 | static int tunnel_key_dump_addresses(struct sk_buff *skb, | 219 | static int tunnel_key_dump_addresses(struct sk_buff *skb, |
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index e1a1b3f3983a..c49cb61adedf 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c | |||
@@ -195,7 +195,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, | |||
195 | ASSERT_RTNL(); | 195 | ASSERT_RTNL(); |
196 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 196 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
197 | if (!p) { | 197 | if (!p) { |
198 | if (ovr) | 198 | if (ret == ACT_P_CREATED) |
199 | tcf_idr_release(*a, bind); | 199 | tcf_idr_release(*a, bind); |
200 | return -ENOMEM; | 200 | return -ENOMEM; |
201 | } | 201 | } |
@@ -225,7 +225,8 @@ static void tcf_vlan_cleanup(struct tc_action *a) | |||
225 | struct tcf_vlan_params *p; | 225 | struct tcf_vlan_params *p; |
226 | 226 | ||
227 | p = rcu_dereference_protected(v->vlan_p, 1); | 227 | p = rcu_dereference_protected(v->vlan_p, 1); |
228 | kfree_rcu(p, rcu); | 228 | if (p) |
229 | kfree_rcu(p, rcu); | ||
229 | } | 230 | } |
230 | 231 | ||
231 | static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, | 232 | static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2bc1bc23d42e..247b7cc20c13 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -376,17 +376,12 @@ struct tcf_net { | |||
376 | static unsigned int tcf_net_id; | 376 | static unsigned int tcf_net_id; |
377 | 377 | ||
378 | static int tcf_block_insert(struct tcf_block *block, struct net *net, | 378 | static int tcf_block_insert(struct tcf_block *block, struct net *net, |
379 | u32 block_index, struct netlink_ext_ack *extack) | 379 | struct netlink_ext_ack *extack) |
380 | { | 380 | { |
381 | struct tcf_net *tn = net_generic(net, tcf_net_id); | 381 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
382 | int err; | ||
383 | 382 | ||
384 | err = idr_alloc_u32(&tn->idr, block, &block_index, block_index, | 383 | return idr_alloc_u32(&tn->idr, block, &block->index, block->index, |
385 | GFP_KERNEL); | 384 | GFP_KERNEL); |
386 | if (err) | ||
387 | return err; | ||
388 | block->index = block_index; | ||
389 | return 0; | ||
390 | } | 385 | } |
391 | 386 | ||
392 | static void tcf_block_remove(struct tcf_block *block, struct net *net) | 387 | static void tcf_block_remove(struct tcf_block *block, struct net *net) |
@@ -397,6 +392,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net) | |||
397 | } | 392 | } |
398 | 393 | ||
399 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, | 394 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, |
395 | u32 block_index, | ||
400 | struct netlink_ext_ack *extack) | 396 | struct netlink_ext_ack *extack) |
401 | { | 397 | { |
402 | struct tcf_block *block; | 398 | struct tcf_block *block; |
@@ -419,10 +415,13 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, | |||
419 | err = -ENOMEM; | 415 | err = -ENOMEM; |
420 | goto err_chain_create; | 416 | goto err_chain_create; |
421 | } | 417 | } |
422 | block->net = qdisc_net(q); | ||
423 | block->refcnt = 1; | 418 | block->refcnt = 1; |
424 | block->net = net; | 419 | block->net = net; |
425 | block->q = q; | 420 | block->index = block_index; |
421 | |||
422 | /* Don't store q pointer for blocks which are shared */ | ||
423 | if (!tcf_block_shared(block)) | ||
424 | block->q = q; | ||
426 | return block; | 425 | return block; |
427 | 426 | ||
428 | err_chain_create: | 427 | err_chain_create: |
@@ -518,13 +517,12 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, | |||
518 | } | 517 | } |
519 | 518 | ||
520 | if (!block) { | 519 | if (!block) { |
521 | block = tcf_block_create(net, q, extack); | 520 | block = tcf_block_create(net, q, ei->block_index, extack); |
522 | if (IS_ERR(block)) | 521 | if (IS_ERR(block)) |
523 | return PTR_ERR(block); | 522 | return PTR_ERR(block); |
524 | created = true; | 523 | created = true; |
525 | if (ei->block_index) { | 524 | if (tcf_block_shared(block)) { |
526 | err = tcf_block_insert(block, net, | 525 | err = tcf_block_insert(block, net, extack); |
527 | ei->block_index, extack); | ||
528 | if (err) | 526 | if (err) |
529 | goto err_block_insert; | 527 | goto err_block_insert; |
530 | } | 528 | } |
@@ -1399,13 +1397,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
1399 | nla_get_u32(tca[TCA_CHAIN]) != chain->index) | 1397 | nla_get_u32(tca[TCA_CHAIN]) != chain->index) |
1400 | continue; | 1398 | continue; |
1401 | if (!tcf_chain_dump(chain, q, parent, skb, cb, | 1399 | if (!tcf_chain_dump(chain, q, parent, skb, cb, |
1402 | index_start, &index)) | 1400 | index_start, &index)) { |
1401 | err = -EMSGSIZE; | ||
1403 | break; | 1402 | break; |
1403 | } | ||
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | cb->args[0] = index; | 1406 | cb->args[0] = index; |
1407 | 1407 | ||
1408 | out: | 1408 | out: |
1409 | /* If we did no progress, the error (EMSGSIZE) is real */ | ||
1410 | if (skb->len == 0 && err) | ||
1411 | return err; | ||
1409 | return skb->len; | 1412 | return skb->len; |
1410 | } | 1413 | } |
1411 | 1414 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 6c7601a530e3..ed8b6a24b9e9 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -96,7 +96,7 @@ struct tc_u_hnode { | |||
96 | 96 | ||
97 | struct tc_u_common { | 97 | struct tc_u_common { |
98 | struct tc_u_hnode __rcu *hlist; | 98 | struct tc_u_hnode __rcu *hlist; |
99 | struct tcf_block *block; | 99 | void *ptr; |
100 | int refcnt; | 100 | int refcnt; |
101 | struct idr handle_idr; | 101 | struct idr handle_idr; |
102 | struct hlist_node hnode; | 102 | struct hlist_node hnode; |
@@ -330,9 +330,25 @@ static struct hlist_head *tc_u_common_hash; | |||
330 | #define U32_HASH_SHIFT 10 | 330 | #define U32_HASH_SHIFT 10 |
331 | #define U32_HASH_SIZE (1 << U32_HASH_SHIFT) | 331 | #define U32_HASH_SIZE (1 << U32_HASH_SHIFT) |
332 | 332 | ||
333 | static void *tc_u_common_ptr(const struct tcf_proto *tp) | ||
334 | { | ||
335 | struct tcf_block *block = tp->chain->block; | ||
336 | |||
337 | /* The block sharing is currently supported only | ||
338 | * for classless qdiscs. In that case we use block | ||
339 | * for tc_u_common identification. In case the | ||
340 | * block is not shared, block->q is a valid pointer | ||
341 | * and we can use that. That works for classful qdiscs. | ||
342 | */ | ||
343 | if (tcf_block_shared(block)) | ||
344 | return block; | ||
345 | else | ||
346 | return block->q; | ||
347 | } | ||
348 | |||
333 | static unsigned int tc_u_hash(const struct tcf_proto *tp) | 349 | static unsigned int tc_u_hash(const struct tcf_proto *tp) |
334 | { | 350 | { |
335 | return hash_ptr(tp->chain->block, U32_HASH_SHIFT); | 351 | return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT); |
336 | } | 352 | } |
337 | 353 | ||
338 | static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) | 354 | static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) |
@@ -342,7 +358,7 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) | |||
342 | 358 | ||
343 | h = tc_u_hash(tp); | 359 | h = tc_u_hash(tp); |
344 | hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { | 360 | hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { |
345 | if (tc->block == tp->chain->block) | 361 | if (tc->ptr == tc_u_common_ptr(tp)) |
346 | return tc; | 362 | return tc; |
347 | } | 363 | } |
348 | return NULL; | 364 | return NULL; |
@@ -371,7 +387,7 @@ static int u32_init(struct tcf_proto *tp) | |||
371 | kfree(root_ht); | 387 | kfree(root_ht); |
372 | return -ENOBUFS; | 388 | return -ENOBUFS; |
373 | } | 389 | } |
374 | tp_c->block = tp->chain->block; | 390 | tp_c->ptr = tc_u_common_ptr(tp); |
375 | INIT_HLIST_NODE(&tp_c->hnode); | 391 | INIT_HLIST_NODE(&tp_c->hnode); |
376 | idr_init(&tp_c->handle_idr); | 392 | idr_init(&tp_c->handle_idr); |
377 | 393 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 190570f21b20..7e3fbe9cc936 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, | |||
106 | 106 | ||
107 | __skb_queue_tail(&q->skb_bad_txq, skb); | 107 | __skb_queue_tail(&q->skb_bad_txq, skb); |
108 | 108 | ||
109 | if (qdisc_is_percpu_stats(q)) { | ||
110 | qdisc_qstats_cpu_backlog_inc(q, skb); | ||
111 | qdisc_qstats_cpu_qlen_inc(q); | ||
112 | } else { | ||
113 | qdisc_qstats_backlog_inc(q, skb); | ||
114 | q->q.qlen++; | ||
115 | } | ||
116 | |||
109 | if (lock) | 117 | if (lock) |
110 | spin_unlock(lock); | 118 | spin_unlock(lock); |
111 | } | 119 | } |
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q, | |||
196 | break; | 204 | break; |
197 | if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { | 205 | if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { |
198 | qdisc_enqueue_skb_bad_txq(q, nskb); | 206 | qdisc_enqueue_skb_bad_txq(q, nskb); |
199 | |||
200 | if (qdisc_is_percpu_stats(q)) { | ||
201 | qdisc_qstats_cpu_backlog_inc(q, nskb); | ||
202 | qdisc_qstats_cpu_qlen_inc(q); | ||
203 | } else { | ||
204 | qdisc_qstats_backlog_inc(q, nskb); | ||
205 | q->q.qlen++; | ||
206 | } | ||
207 | break; | 207 | break; |
208 | } | 208 | } |
209 | skb->next = nskb; | 209 | skb->next = nskb; |
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, | |||
628 | int band = prio2band[skb->priority & TC_PRIO_MAX]; | 628 | int band = prio2band[skb->priority & TC_PRIO_MAX]; |
629 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 629 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
630 | struct skb_array *q = band2list(priv, band); | 630 | struct skb_array *q = band2list(priv, band); |
631 | unsigned int pkt_len = qdisc_pkt_len(skb); | ||
631 | int err; | 632 | int err; |
632 | 633 | ||
633 | err = skb_array_produce(q, skb); | 634 | err = skb_array_produce(q, skb); |
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, | |||
636 | return qdisc_drop_cpu(skb, qdisc, to_free); | 637 | return qdisc_drop_cpu(skb, qdisc, to_free); |
637 | 638 | ||
638 | qdisc_qstats_cpu_qlen_inc(qdisc); | 639 | qdisc_qstats_cpu_qlen_inc(qdisc); |
639 | qdisc_qstats_cpu_backlog_inc(qdisc, skb); | 640 | /* Note: skb can not be used after skb_array_produce(), |
641 | * so we better not use qdisc_qstats_cpu_backlog_inc() | ||
642 | */ | ||
643 | this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len); | ||
640 | return NET_XMIT_SUCCESS; | 644 | return NET_XMIT_SUCCESS; |
641 | } | 645 | } |
642 | 646 | ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 7c179addebcd..7d6801fc5340 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -509,7 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
509 | } | 509 | } |
510 | 510 | ||
511 | if (unlikely(sch->q.qlen >= sch->limit)) | 511 | if (unlikely(sch->q.qlen >= sch->limit)) |
512 | return qdisc_drop(skb, sch, to_free); | 512 | return qdisc_drop_all(skb, sch, to_free); |
513 | 513 | ||
514 | qdisc_qstats_backlog_inc(sch, skb); | 514 | qdisc_qstats_backlog_inc(sch, skb); |
515 | 515 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 229172d509cc..03225a8df973 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -188,7 +188,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
188 | int ret; | 188 | int ret; |
189 | 189 | ||
190 | if (qdisc_pkt_len(skb) > q->max_size) { | 190 | if (qdisc_pkt_len(skb) > q->max_size) { |
191 | if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) | 191 | if (skb_is_gso(skb) && |
192 | skb_gso_validate_mac_len(skb, q->max_size)) | ||
192 | return tbf_segment(skb, sch, to_free); | 193 | return tbf_segment(skb, sch, to_free); |
193 | return qdisc_drop(skb, sch, to_free); | 194 | return qdisc_drop(skb, sch, to_free); |
194 | } | 195 | } |
diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 291c97b07058..8f6c2e8c0953 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c | |||
@@ -81,6 +81,12 @@ const char *sctp_cname(const union sctp_subtype cid) | |||
81 | case SCTP_CID_RECONF: | 81 | case SCTP_CID_RECONF: |
82 | return "RECONF"; | 82 | return "RECONF"; |
83 | 83 | ||
84 | case SCTP_CID_I_DATA: | ||
85 | return "I_DATA"; | ||
86 | |||
87 | case SCTP_CID_I_FWD_TSN: | ||
88 | return "I_FWD_TSN"; | ||
89 | |||
84 | default: | 90 | default: |
85 | break; | 91 | break; |
86 | } | 92 | } |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 141c9c466ec1..b381d78548ac 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -106,6 +106,7 @@ int sctp_rcv(struct sk_buff *skb) | |||
106 | int family; | 106 | int family; |
107 | struct sctp_af *af; | 107 | struct sctp_af *af; |
108 | struct net *net = dev_net(skb->dev); | 108 | struct net *net = dev_net(skb->dev); |
109 | bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb); | ||
109 | 110 | ||
110 | if (skb->pkt_type != PACKET_HOST) | 111 | if (skb->pkt_type != PACKET_HOST) |
111 | goto discard_it; | 112 | goto discard_it; |
@@ -123,8 +124,7 @@ int sctp_rcv(struct sk_buff *skb) | |||
123 | * it's better to just linearize it otherwise crc computing | 124 | * it's better to just linearize it otherwise crc computing |
124 | * takes longer. | 125 | * takes longer. |
125 | */ | 126 | */ |
126 | if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && | 127 | if ((!is_gso && skb_linearize(skb)) || |
127 | skb_linearize(skb)) || | ||
128 | !pskb_may_pull(skb, sizeof(struct sctphdr))) | 128 | !pskb_may_pull(skb, sizeof(struct sctphdr))) |
129 | goto discard_it; | 129 | goto discard_it; |
130 | 130 | ||
@@ -135,7 +135,7 @@ int sctp_rcv(struct sk_buff *skb) | |||
135 | if (skb_csum_unnecessary(skb)) | 135 | if (skb_csum_unnecessary(skb)) |
136 | __skb_decr_checksum_unnecessary(skb); | 136 | __skb_decr_checksum_unnecessary(skb); |
137 | else if (!sctp_checksum_disable && | 137 | else if (!sctp_checksum_disable && |
138 | !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && | 138 | !is_gso && |
139 | sctp_rcv_checksum(net, skb) < 0) | 139 | sctp_rcv_checksum(net, skb) < 0) |
140 | goto discard_it; | 140 | goto discard_it; |
141 | skb->csum_valid = 1; | 141 | skb->csum_valid = 1; |
@@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t) | |||
897 | rhl_for_each_entry_rcu(transport, tmp, list, node) | 897 | rhl_for_each_entry_rcu(transport, tmp, list, node) |
898 | if (transport->asoc->ep == t->asoc->ep) { | 898 | if (transport->asoc->ep == t->asoc->ep) { |
899 | rcu_read_unlock(); | 899 | rcu_read_unlock(); |
900 | err = -EEXIST; | 900 | return -EEXIST; |
901 | goto out; | ||
902 | } | 901 | } |
903 | rcu_read_unlock(); | 902 | rcu_read_unlock(); |
904 | 903 | ||
905 | err = rhltable_insert_key(&sctp_transport_hashtable, &arg, | 904 | err = rhltable_insert_key(&sctp_transport_hashtable, &arg, |
906 | &t->node, sctp_hash_params); | 905 | &t->node, sctp_hash_params); |
907 | |||
908 | out: | ||
909 | if (err) | 906 | if (err) |
910 | pr_err_once("insert transport fail, errno %d\n", err); | 907 | pr_err_once("insert transport fail, errno %d\n", err); |
911 | 908 | ||
@@ -1221,7 +1218,7 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net, | |||
1221 | * issue as packets hitting this are mostly INIT or INIT-ACK and | 1218 | * issue as packets hitting this are mostly INIT or INIT-ACK and |
1222 | * those cannot be on GSO-style anyway. | 1219 | * those cannot be on GSO-style anyway. |
1223 | */ | 1220 | */ |
1224 | if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) | 1221 | if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) |
1225 | return NULL; | 1222 | return NULL; |
1226 | 1223 | ||
1227 | ch = (struct sctp_chunkhdr *)skb->data; | 1224 | ch = (struct sctp_chunkhdr *)skb->data; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 48392552ee7c..23ebc5318edc 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -170,7 +170,7 @@ next_chunk: | |||
170 | 170 | ||
171 | chunk = list_entry(entry, struct sctp_chunk, list); | 171 | chunk = list_entry(entry, struct sctp_chunk, list); |
172 | 172 | ||
173 | if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) { | 173 | if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) { |
174 | /* GSO-marked skbs but without frags, handle | 174 | /* GSO-marked skbs but without frags, handle |
175 | * them normally | 175 | * them normally |
176 | */ | 176 | */ |
diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 35bc7106d182..123e9f2dc226 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c | |||
@@ -45,7 +45,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb, | |||
45 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 45 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
46 | struct sctphdr *sh; | 46 | struct sctphdr *sh; |
47 | 47 | ||
48 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)) | 48 | if (!skb_is_gso_sctp(skb)) |
49 | goto out; | 49 | goto out; |
50 | 50 | ||
51 | sh = sctp_hdr(skb); | 51 | sh = sctp_hdr(skb); |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index cedf672487f9..f799043abec9 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * This file is part of the SCTP kernel implementation | 7 | * This file is part of the SCTP kernel implementation |
8 | * | 8 | * |
9 | * These functions manipulate sctp tsn mapping array. | 9 | * This file contains sctp stream maniuplation primitives and helpers. |
10 | * | 10 | * |
11 | * This SCTP implementation is free software; | 11 | * This SCTP implementation is free software; |
12 | * you can redistribute it and/or modify it under the terms of | 12 | * you can redistribute it and/or modify it under the terms of |
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c index 8c7cf8f08711..d3764c181299 100644 --- a/net/sctp/stream_interleave.c +++ b/net/sctp/stream_interleave.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This file is part of the SCTP kernel implementation | 4 | * This file is part of the SCTP kernel implementation |
5 | * | 5 | * |
6 | * These functions manipulate sctp stream queue/scheduling. | 6 | * These functions implement sctp stream message interleaving, mostly |
7 | * including I-DATA and I-FORWARD-TSN chunks process. | ||
7 | * | 8 | * |
8 | * This SCTP implementation is free software; | 9 | * This SCTP implementation is free software; |
9 | * you can redistribute it and/or modify it under the terms of | 10 | * you can redistribute it and/or modify it under the terms of |
@@ -954,12 +955,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
954 | __u32 freed = 0; | 955 | __u32 freed = 0; |
955 | __u16 needed; | 956 | __u16 needed; |
956 | 957 | ||
957 | if (chunk) { | 958 | needed = ntohs(chunk->chunk_hdr->length) - |
958 | needed = ntohs(chunk->chunk_hdr->length); | 959 | sizeof(struct sctp_idata_chunk); |
959 | needed -= sizeof(struct sctp_idata_chunk); | ||
960 | } else { | ||
961 | needed = SCTP_DEFAULT_MAXWINDOW; | ||
962 | } | ||
963 | 960 | ||
964 | if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { | 961 | if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { |
965 | freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); | 962 | freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); |
@@ -971,9 +968,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
971 | needed); | 968 | needed); |
972 | } | 969 | } |
973 | 970 | ||
974 | if (chunk && freed >= needed) | 971 | if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) |
975 | if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) | 972 | sctp_intl_start_pd(ulpq, gfp); |
976 | sctp_intl_start_pd(ulpq, gfp); | ||
977 | 973 | ||
978 | sk_mem_reclaim(asoc->base.sk); | 974 | sk_mem_reclaim(asoc->base.sk); |
979 | } | 975 | } |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index da1a5cdefd13..1e0d780855c3 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -978,10 +978,6 @@ out: | |||
978 | lsmc->clcsock = NULL; | 978 | lsmc->clcsock = NULL; |
979 | } | 979 | } |
980 | release_sock(lsk); | 980 | release_sock(lsk); |
981 | /* no more listening, wake up smc_close_wait_listen_clcsock and | ||
982 | * accept | ||
983 | */ | ||
984 | lsk->sk_state_change(lsk); | ||
985 | sock_put(&lsmc->sk); /* sock_hold in smc_listen */ | 981 | sock_put(&lsmc->sk); /* sock_hold in smc_listen */ |
986 | } | 982 | } |
987 | 983 | ||
@@ -1406,8 +1402,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol, | |||
1406 | smc->use_fallback = false; /* assume rdma capability first */ | 1402 | smc->use_fallback = false; /* assume rdma capability first */ |
1407 | rc = sock_create_kern(net, PF_INET, SOCK_STREAM, | 1403 | rc = sock_create_kern(net, PF_INET, SOCK_STREAM, |
1408 | IPPROTO_TCP, &smc->clcsock); | 1404 | IPPROTO_TCP, &smc->clcsock); |
1409 | if (rc) | 1405 | if (rc) { |
1410 | sk_common_release(sk); | 1406 | sk_common_release(sk); |
1407 | goto out; | ||
1408 | } | ||
1411 | smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); | 1409 | smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); |
1412 | smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); | 1410 | smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); |
1413 | 1411 | ||
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 3cd086e5bd28..b42395d24cba 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -269,7 +269,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) | |||
269 | 269 | ||
270 | if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) | 270 | if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) |
271 | return; /* short message */ | 271 | return; /* short message */ |
272 | if (cdc->len != sizeof(*cdc)) | 272 | if (cdc->len != SMC_WR_TX_SIZE) |
273 | return; /* invalid message */ | 273 | return; /* invalid message */ |
274 | smc_cdc_msg_recv(cdc, link, wc->wr_id); | 274 | smc_cdc_msg_recv(cdc, link, wc->wr_id); |
275 | } | 275 | } |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index e339c0186dcf..fa41d9881741 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -30,27 +30,6 @@ static void smc_close_cleanup_listen(struct sock *parent) | |||
30 | smc_close_non_accepted(sk); | 30 | smc_close_non_accepted(sk); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void smc_close_wait_listen_clcsock(struct smc_sock *smc) | ||
34 | { | ||
35 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
36 | struct sock *sk = &smc->sk; | ||
37 | signed long timeout; | ||
38 | |||
39 | timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME; | ||
40 | add_wait_queue(sk_sleep(sk), &wait); | ||
41 | do { | ||
42 | release_sock(sk); | ||
43 | if (smc->clcsock) | ||
44 | timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, | ||
45 | timeout); | ||
46 | sched_annotate_sleep(); | ||
47 | lock_sock(sk); | ||
48 | if (!smc->clcsock) | ||
49 | break; | ||
50 | } while (timeout); | ||
51 | remove_wait_queue(sk_sleep(sk), &wait); | ||
52 | } | ||
53 | |||
54 | /* wait for sndbuf data being transmitted */ | 33 | /* wait for sndbuf data being transmitted */ |
55 | static void smc_close_stream_wait(struct smc_sock *smc, long timeout) | 34 | static void smc_close_stream_wait(struct smc_sock *smc, long timeout) |
56 | { | 35 | { |
@@ -204,9 +183,11 @@ again: | |||
204 | rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); | 183 | rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); |
205 | /* wake up kernel_accept of smc_tcp_listen_worker */ | 184 | /* wake up kernel_accept of smc_tcp_listen_worker */ |
206 | smc->clcsock->sk->sk_data_ready(smc->clcsock->sk); | 185 | smc->clcsock->sk->sk_data_ready(smc->clcsock->sk); |
207 | smc_close_wait_listen_clcsock(smc); | ||
208 | } | 186 | } |
209 | smc_close_cleanup_listen(sk); | 187 | smc_close_cleanup_listen(sk); |
188 | release_sock(sk); | ||
189 | flush_work(&smc->tcp_listen_work); | ||
190 | lock_sock(sk); | ||
210 | break; | 191 | break; |
211 | case SMC_ACTIVE: | 192 | case SMC_ACTIVE: |
212 | smc_close_stream_wait(smc, timeout); | 193 | smc_close_stream_wait(smc, timeout); |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2424c7100aaf..645dd226177b 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -177,6 +177,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, | |||
177 | 177 | ||
178 | lnk = &lgr->lnk[SMC_SINGLE_LINK]; | 178 | lnk = &lgr->lnk[SMC_SINGLE_LINK]; |
179 | /* initialize link */ | 179 | /* initialize link */ |
180 | lnk->link_id = SMC_SINGLE_LINK; | ||
180 | lnk->smcibdev = smcibdev; | 181 | lnk->smcibdev = smcibdev; |
181 | lnk->ibport = ibport; | 182 | lnk->ibport = ibport; |
182 | lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; | 183 | lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; |
@@ -465,7 +466,7 @@ create: | |||
465 | rc = smc_link_determine_gid(conn->lgr); | 466 | rc = smc_link_determine_gid(conn->lgr); |
466 | } | 467 | } |
467 | conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; | 468 | conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; |
468 | conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); | 469 | conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; |
469 | #ifndef KERNEL_HAS_ATOMIC64 | 470 | #ifndef KERNEL_HAS_ATOMIC64 |
470 | spin_lock_init(&conn->acurs_lock); | 471 | spin_lock_init(&conn->acurs_lock); |
471 | #endif | 472 | #endif |
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 92fe4cc8c82c..b4aa4fcedb96 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c | |||
@@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], | |||
92 | memcpy(confllc->sender_mac, mac, ETH_ALEN); | 92 | memcpy(confllc->sender_mac, mac, ETH_ALEN); |
93 | memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); | 93 | memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); |
94 | hton24(confllc->sender_qp_num, link->roce_qp->qp_num); | 94 | hton24(confllc->sender_qp_num, link->roce_qp->qp_num); |
95 | /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ | 95 | confllc->link_num = link->link_id; |
96 | memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); | 96 | memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); |
97 | confllc->max_links = SMC_LINKS_PER_LGR_MAX; | 97 | confllc->max_links = SMC_LINKS_PER_LGR_MAX; |
98 | /* send llc message */ | 98 | /* send llc message */ |
diff --git a/net/socket.c b/net/socket.c index a93c99b518ca..08847c3b8c39 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2587,6 +2587,11 @@ void sock_unregister(int family) | |||
2587 | } | 2587 | } |
2588 | EXPORT_SYMBOL(sock_unregister); | 2588 | EXPORT_SYMBOL(sock_unregister); |
2589 | 2589 | ||
2590 | bool sock_is_registered(int family) | ||
2591 | { | ||
2592 | return family < NPROTO && rcu_access_pointer(net_families[family]); | ||
2593 | } | ||
2594 | |||
2590 | static int __init sock_init(void) | 2595 | static int __init sock_init(void) |
2591 | { | 2596 | { |
2592 | int err; | 2597 | int err; |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index c8001471da6c..3e3dce3d4c63 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -813,7 +813,7 @@ err_out: | |||
813 | return err; | 813 | return err; |
814 | } | 814 | } |
815 | 815 | ||
816 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) | 816 | int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) |
817 | { | 817 | { |
818 | int err; | 818 | int err; |
819 | char *name; | 819 | char *name; |
@@ -835,20 +835,27 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) | |||
835 | 835 | ||
836 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); | 836 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
837 | 837 | ||
838 | rtnl_lock(); | ||
839 | bearer = tipc_bearer_find(net, name); | 838 | bearer = tipc_bearer_find(net, name); |
840 | if (!bearer) { | 839 | if (!bearer) |
841 | rtnl_unlock(); | ||
842 | return -EINVAL; | 840 | return -EINVAL; |
843 | } | ||
844 | 841 | ||
845 | bearer_disable(net, bearer); | 842 | bearer_disable(net, bearer); |
846 | rtnl_unlock(); | ||
847 | 843 | ||
848 | return 0; | 844 | return 0; |
849 | } | 845 | } |
850 | 846 | ||
851 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | 847 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) |
848 | { | ||
849 | int err; | ||
850 | |||
851 | rtnl_lock(); | ||
852 | err = __tipc_nl_bearer_disable(skb, info); | ||
853 | rtnl_unlock(); | ||
854 | |||
855 | return err; | ||
856 | } | ||
857 | |||
858 | int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | ||
852 | { | 859 | { |
853 | int err; | 860 | int err; |
854 | char *bearer; | 861 | char *bearer; |
@@ -890,15 +897,18 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | |||
890 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 897 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); |
891 | } | 898 | } |
892 | 899 | ||
900 | return tipc_enable_bearer(net, bearer, domain, prio, attrs); | ||
901 | } | ||
902 | |||
903 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | ||
904 | { | ||
905 | int err; | ||
906 | |||
893 | rtnl_lock(); | 907 | rtnl_lock(); |
894 | err = tipc_enable_bearer(net, bearer, domain, prio, attrs); | 908 | err = __tipc_nl_bearer_enable(skb, info); |
895 | if (err) { | ||
896 | rtnl_unlock(); | ||
897 | return err; | ||
898 | } | ||
899 | rtnl_unlock(); | 909 | rtnl_unlock(); |
900 | 910 | ||
901 | return 0; | 911 | return err; |
902 | } | 912 | } |
903 | 913 | ||
904 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) | 914 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) |
@@ -944,7 +954,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) | |||
944 | return 0; | 954 | return 0; |
945 | } | 955 | } |
946 | 956 | ||
947 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | 957 | int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) |
948 | { | 958 | { |
949 | int err; | 959 | int err; |
950 | char *name; | 960 | char *name; |
@@ -965,22 +975,17 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
965 | return -EINVAL; | 975 | return -EINVAL; |
966 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); | 976 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
967 | 977 | ||
968 | rtnl_lock(); | ||
969 | b = tipc_bearer_find(net, name); | 978 | b = tipc_bearer_find(net, name); |
970 | if (!b) { | 979 | if (!b) |
971 | rtnl_unlock(); | ||
972 | return -EINVAL; | 980 | return -EINVAL; |
973 | } | ||
974 | 981 | ||
975 | if (attrs[TIPC_NLA_BEARER_PROP]) { | 982 | if (attrs[TIPC_NLA_BEARER_PROP]) { |
976 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; | 983 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; |
977 | 984 | ||
978 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], | 985 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], |
979 | props); | 986 | props); |
980 | if (err) { | 987 | if (err) |
981 | rtnl_unlock(); | ||
982 | return err; | 988 | return err; |
983 | } | ||
984 | 989 | ||
985 | if (props[TIPC_NLA_PROP_TOL]) | 990 | if (props[TIPC_NLA_PROP_TOL]) |
986 | b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 991 | b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
@@ -989,11 +994,21 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
989 | if (props[TIPC_NLA_PROP_WIN]) | 994 | if (props[TIPC_NLA_PROP_WIN]) |
990 | b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); | 995 | b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
991 | } | 996 | } |
992 | rtnl_unlock(); | ||
993 | 997 | ||
994 | return 0; | 998 | return 0; |
995 | } | 999 | } |
996 | 1000 | ||
1001 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | ||
1002 | { | ||
1003 | int err; | ||
1004 | |||
1005 | rtnl_lock(); | ||
1006 | err = __tipc_nl_bearer_set(skb, info); | ||
1007 | rtnl_unlock(); | ||
1008 | |||
1009 | return err; | ||
1010 | } | ||
1011 | |||
997 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, | 1012 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, |
998 | struct tipc_media *media, int nlflags) | 1013 | struct tipc_media *media, int nlflags) |
999 | { | 1014 | { |
@@ -1115,7 +1130,7 @@ err_out: | |||
1115 | return err; | 1130 | return err; |
1116 | } | 1131 | } |
1117 | 1132 | ||
1118 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | 1133 | int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) |
1119 | { | 1134 | { |
1120 | int err; | 1135 | int err; |
1121 | char *name; | 1136 | char *name; |
@@ -1133,22 +1148,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | |||
1133 | return -EINVAL; | 1148 | return -EINVAL; |
1134 | name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); | 1149 | name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); |
1135 | 1150 | ||
1136 | rtnl_lock(); | ||
1137 | m = tipc_media_find(name); | 1151 | m = tipc_media_find(name); |
1138 | if (!m) { | 1152 | if (!m) |
1139 | rtnl_unlock(); | ||
1140 | return -EINVAL; | 1153 | return -EINVAL; |
1141 | } | ||
1142 | 1154 | ||
1143 | if (attrs[TIPC_NLA_MEDIA_PROP]) { | 1155 | if (attrs[TIPC_NLA_MEDIA_PROP]) { |
1144 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; | 1156 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; |
1145 | 1157 | ||
1146 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], | 1158 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], |
1147 | props); | 1159 | props); |
1148 | if (err) { | 1160 | if (err) |
1149 | rtnl_unlock(); | ||
1150 | return err; | 1161 | return err; |
1151 | } | ||
1152 | 1162 | ||
1153 | if (props[TIPC_NLA_PROP_TOL]) | 1163 | if (props[TIPC_NLA_PROP_TOL]) |
1154 | m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 1164 | m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
@@ -1157,7 +1167,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | |||
1157 | if (props[TIPC_NLA_PROP_WIN]) | 1167 | if (props[TIPC_NLA_PROP_WIN]) |
1158 | m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); | 1168 | m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
1159 | } | 1169 | } |
1160 | rtnl_unlock(); | ||
1161 | 1170 | ||
1162 | return 0; | 1171 | return 0; |
1163 | } | 1172 | } |
1173 | |||
1174 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | ||
1175 | { | ||
1176 | int err; | ||
1177 | |||
1178 | rtnl_lock(); | ||
1179 | err = __tipc_nl_media_set(skb, info); | ||
1180 | rtnl_unlock(); | ||
1181 | |||
1182 | return err; | ||
1183 | } | ||
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 42d6eeeb646d..a53613d95bc9 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -188,15 +188,19 @@ extern struct tipc_media udp_media_info; | |||
188 | #endif | 188 | #endif |
189 | 189 | ||
190 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); | 190 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); |
191 | int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); | ||
191 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); | 192 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); |
193 | int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); | ||
192 | int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); | 194 | int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); |
193 | int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); | 195 | int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); |
194 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); | 196 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); |
197 | int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); | ||
195 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info); | 198 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info); |
196 | 199 | ||
197 | int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); | 200 | int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); |
198 | int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); | 201 | int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); |
199 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); | 202 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); |
203 | int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); | ||
200 | 204 | ||
201 | int tipc_media_set_priority(const char *name, u32 new_value); | 205 | int tipc_media_set_priority(const char *name, u32 new_value); |
202 | int tipc_media_set_window(const char *name, u32 new_value); | 206 | int tipc_media_set_window(const char *name, u32 new_value); |
diff --git a/net/tipc/group.c b/net/tipc/group.c index 122162a31816..04e516d18054 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c | |||
@@ -189,6 +189,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid, | |||
189 | grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; | 189 | grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; |
190 | grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; | 190 | grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; |
191 | grp->open = group_is_open; | 191 | grp->open = group_is_open; |
192 | *grp->open = false; | ||
192 | filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; | 193 | filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; |
193 | if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, | 194 | if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, |
194 | filter, &grp->subid)) | 195 | filter, &grp->subid)) |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 719c5924b638..1a2fde0d6f61 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -200,7 +200,7 @@ out: | |||
200 | return skb->len; | 200 | return skb->len; |
201 | } | 201 | } |
202 | 202 | ||
203 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | 203 | int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) |
204 | { | 204 | { |
205 | struct net *net = sock_net(skb->sk); | 205 | struct net *net = sock_net(skb->sk); |
206 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 206 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
@@ -241,10 +241,19 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | |||
241 | if (!tipc_addr_node_valid(addr)) | 241 | if (!tipc_addr_node_valid(addr)) |
242 | return -EINVAL; | 242 | return -EINVAL; |
243 | 243 | ||
244 | rtnl_lock(); | ||
245 | tipc_net_start(net, addr); | 244 | tipc_net_start(net, addr); |
246 | rtnl_unlock(); | ||
247 | } | 245 | } |
248 | 246 | ||
249 | return 0; | 247 | return 0; |
250 | } | 248 | } |
249 | |||
250 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | ||
251 | { | ||
252 | int err; | ||
253 | |||
254 | rtnl_lock(); | ||
255 | err = __tipc_nl_net_set(skb, info); | ||
256 | rtnl_unlock(); | ||
257 | |||
258 | return err; | ||
259 | } | ||
diff --git a/net/tipc/net.h b/net/tipc/net.h index c7c254902873..c0306aa2374b 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h | |||
@@ -47,5 +47,6 @@ void tipc_net_stop(struct net *net); | |||
47 | 47 | ||
48 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); | 48 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); |
49 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); | 49 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); |
50 | int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); | ||
50 | 51 | ||
51 | #endif | 52 | #endif |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index e48f0b2c01b9..4492cda45566 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
@@ -285,10 +285,6 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, | |||
285 | if (!trans_buf) | 285 | if (!trans_buf) |
286 | return -ENOMEM; | 286 | return -ENOMEM; |
287 | 287 | ||
288 | err = (*cmd->transcode)(cmd, trans_buf, msg); | ||
289 | if (err) | ||
290 | goto trans_out; | ||
291 | |||
292 | attrbuf = kmalloc((tipc_genl_family.maxattr + 1) * | 288 | attrbuf = kmalloc((tipc_genl_family.maxattr + 1) * |
293 | sizeof(struct nlattr *), GFP_KERNEL); | 289 | sizeof(struct nlattr *), GFP_KERNEL); |
294 | if (!attrbuf) { | 290 | if (!attrbuf) { |
@@ -296,27 +292,34 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, | |||
296 | goto trans_out; | 292 | goto trans_out; |
297 | } | 293 | } |
298 | 294 | ||
299 | err = nla_parse(attrbuf, tipc_genl_family.maxattr, | ||
300 | (const struct nlattr *)trans_buf->data, | ||
301 | trans_buf->len, NULL, NULL); | ||
302 | if (err) | ||
303 | goto parse_out; | ||
304 | |||
305 | doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 295 | doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
306 | if (!doit_buf) { | 296 | if (!doit_buf) { |
307 | err = -ENOMEM; | 297 | err = -ENOMEM; |
308 | goto parse_out; | 298 | goto attrbuf_out; |
309 | } | 299 | } |
310 | 300 | ||
311 | doit_buf->sk = msg->dst_sk; | ||
312 | |||
313 | memset(&info, 0, sizeof(info)); | 301 | memset(&info, 0, sizeof(info)); |
314 | info.attrs = attrbuf; | 302 | info.attrs = attrbuf; |
315 | 303 | ||
304 | rtnl_lock(); | ||
305 | err = (*cmd->transcode)(cmd, trans_buf, msg); | ||
306 | if (err) | ||
307 | goto doit_out; | ||
308 | |||
309 | err = nla_parse(attrbuf, tipc_genl_family.maxattr, | ||
310 | (const struct nlattr *)trans_buf->data, | ||
311 | trans_buf->len, NULL, NULL); | ||
312 | if (err) | ||
313 | goto doit_out; | ||
314 | |||
315 | doit_buf->sk = msg->dst_sk; | ||
316 | |||
316 | err = (*cmd->doit)(doit_buf, &info); | 317 | err = (*cmd->doit)(doit_buf, &info); |
318 | doit_out: | ||
319 | rtnl_unlock(); | ||
317 | 320 | ||
318 | kfree_skb(doit_buf); | 321 | kfree_skb(doit_buf); |
319 | parse_out: | 322 | attrbuf_out: |
320 | kfree(attrbuf); | 323 | kfree(attrbuf); |
321 | trans_out: | 324 | trans_out: |
322 | kfree_skb(trans_buf); | 325 | kfree_skb(trans_buf); |
@@ -722,13 +725,13 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, | |||
722 | 725 | ||
723 | media = tipc_media_find(lc->name); | 726 | media = tipc_media_find(lc->name); |
724 | if (media) { | 727 | if (media) { |
725 | cmd->doit = &tipc_nl_media_set; | 728 | cmd->doit = &__tipc_nl_media_set; |
726 | return tipc_nl_compat_media_set(skb, msg); | 729 | return tipc_nl_compat_media_set(skb, msg); |
727 | } | 730 | } |
728 | 731 | ||
729 | bearer = tipc_bearer_find(msg->net, lc->name); | 732 | bearer = tipc_bearer_find(msg->net, lc->name); |
730 | if (bearer) { | 733 | if (bearer) { |
731 | cmd->doit = &tipc_nl_bearer_set; | 734 | cmd->doit = &__tipc_nl_bearer_set; |
732 | return tipc_nl_compat_bearer_set(skb, msg); | 735 | return tipc_nl_compat_bearer_set(skb, msg); |
733 | } | 736 | } |
734 | 737 | ||
@@ -1089,12 +1092,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg) | |||
1089 | return tipc_nl_compat_dumpit(&dump, msg); | 1092 | return tipc_nl_compat_dumpit(&dump, msg); |
1090 | case TIPC_CMD_ENABLE_BEARER: | 1093 | case TIPC_CMD_ENABLE_BEARER: |
1091 | msg->req_type = TIPC_TLV_BEARER_CONFIG; | 1094 | msg->req_type = TIPC_TLV_BEARER_CONFIG; |
1092 | doit.doit = tipc_nl_bearer_enable; | 1095 | doit.doit = __tipc_nl_bearer_enable; |
1093 | doit.transcode = tipc_nl_compat_bearer_enable; | 1096 | doit.transcode = tipc_nl_compat_bearer_enable; |
1094 | return tipc_nl_compat_doit(&doit, msg); | 1097 | return tipc_nl_compat_doit(&doit, msg); |
1095 | case TIPC_CMD_DISABLE_BEARER: | 1098 | case TIPC_CMD_DISABLE_BEARER: |
1096 | msg->req_type = TIPC_TLV_BEARER_NAME; | 1099 | msg->req_type = TIPC_TLV_BEARER_NAME; |
1097 | doit.doit = tipc_nl_bearer_disable; | 1100 | doit.doit = __tipc_nl_bearer_disable; |
1098 | doit.transcode = tipc_nl_compat_bearer_disable; | 1101 | doit.transcode = tipc_nl_compat_bearer_disable; |
1099 | return tipc_nl_compat_doit(&doit, msg); | 1102 | return tipc_nl_compat_doit(&doit, msg); |
1100 | case TIPC_CMD_SHOW_LINK_STATS: | 1103 | case TIPC_CMD_SHOW_LINK_STATS: |
@@ -1148,12 +1151,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg) | |||
1148 | return tipc_nl_compat_dumpit(&dump, msg); | 1151 | return tipc_nl_compat_dumpit(&dump, msg); |
1149 | case TIPC_CMD_SET_NODE_ADDR: | 1152 | case TIPC_CMD_SET_NODE_ADDR: |
1150 | msg->req_type = TIPC_TLV_NET_ADDR; | 1153 | msg->req_type = TIPC_TLV_NET_ADDR; |
1151 | doit.doit = tipc_nl_net_set; | 1154 | doit.doit = __tipc_nl_net_set; |
1152 | doit.transcode = tipc_nl_compat_net_set; | 1155 | doit.transcode = tipc_nl_compat_net_set; |
1153 | return tipc_nl_compat_doit(&doit, msg); | 1156 | return tipc_nl_compat_doit(&doit, msg); |
1154 | case TIPC_CMD_SET_NETID: | 1157 | case TIPC_CMD_SET_NETID: |
1155 | msg->req_type = TIPC_TLV_UNSIGNED; | 1158 | msg->req_type = TIPC_TLV_UNSIGNED; |
1156 | doit.doit = tipc_nl_net_set; | 1159 | doit.doit = __tipc_nl_net_set; |
1157 | doit.transcode = tipc_nl_compat_net_set; | 1160 | doit.transcode = tipc_nl_compat_net_set; |
1158 | return tipc_nl_compat_doit(&doit, msg); | 1161 | return tipc_nl_compat_doit(&doit, msg); |
1159 | case TIPC_CMD_GET_NETID: | 1162 | case TIPC_CMD_GET_NETID: |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index b0323ec7971e..7dfa9fc99ec3 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -473,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
473 | sk->sk_write_space = tipc_write_space; | 473 | sk->sk_write_space = tipc_write_space; |
474 | sk->sk_destruct = tipc_sock_destruct; | 474 | sk->sk_destruct = tipc_sock_destruct; |
475 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; | 475 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; |
476 | tsk->group_is_open = true; | ||
476 | atomic_set(&tsk->dupl_rcvcnt, 0); | 477 | atomic_set(&tsk->dupl_rcvcnt, 0); |
477 | 478 | ||
478 | /* Start out with safe limits until we receive an advertised window */ | 479 | /* Start out with safe limits until we receive an advertised window */ |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index b0d5fcea47e7..d824d548447e 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -46,16 +46,26 @@ MODULE_DESCRIPTION("Transport Layer Security Support"); | |||
46 | MODULE_LICENSE("Dual BSD/GPL"); | 46 | MODULE_LICENSE("Dual BSD/GPL"); |
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | TLSV4, | ||
50 | TLSV6, | ||
51 | TLS_NUM_PROTS, | ||
52 | }; | ||
53 | |||
54 | enum { | ||
49 | TLS_BASE_TX, | 55 | TLS_BASE_TX, |
50 | TLS_SW_TX, | 56 | TLS_SW_TX, |
51 | TLS_NUM_CONFIG, | 57 | TLS_NUM_CONFIG, |
52 | }; | 58 | }; |
53 | 59 | ||
54 | static struct proto tls_prots[TLS_NUM_CONFIG]; | 60 | static struct proto *saved_tcpv6_prot; |
61 | static DEFINE_MUTEX(tcpv6_prot_mutex); | ||
62 | static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG]; | ||
55 | 63 | ||
56 | static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) | 64 | static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) |
57 | { | 65 | { |
58 | sk->sk_prot = &tls_prots[ctx->tx_conf]; | 66 | int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; |
67 | |||
68 | sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf]; | ||
59 | } | 69 | } |
60 | 70 | ||
61 | int wait_on_pending_writer(struct sock *sk, long *timeo) | 71 | int wait_on_pending_writer(struct sock *sk, long *timeo) |
@@ -308,8 +318,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, | |||
308 | goto out; | 318 | goto out; |
309 | } | 319 | } |
310 | lock_sock(sk); | 320 | lock_sock(sk); |
311 | memcpy(crypto_info_aes_gcm_128->iv, ctx->iv, | 321 | memcpy(crypto_info_aes_gcm_128->iv, |
322 | ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, | ||
312 | TLS_CIPHER_AES_GCM_128_IV_SIZE); | 323 | TLS_CIPHER_AES_GCM_128_IV_SIZE); |
324 | memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->rec_seq, | ||
325 | TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); | ||
313 | release_sock(sk); | 326 | release_sock(sk); |
314 | if (copy_to_user(optval, | 327 | if (copy_to_user(optval, |
315 | crypto_info_aes_gcm_128, | 328 | crypto_info_aes_gcm_128, |
@@ -375,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval, | |||
375 | rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); | 388 | rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); |
376 | if (rc) { | 389 | if (rc) { |
377 | rc = -EFAULT; | 390 | rc = -EFAULT; |
378 | goto out; | 391 | goto err_crypto_info; |
379 | } | 392 | } |
380 | 393 | ||
381 | /* check version */ | 394 | /* check version */ |
@@ -450,8 +463,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname, | |||
450 | return do_tls_setsockopt(sk, optname, optval, optlen); | 463 | return do_tls_setsockopt(sk, optname, optval, optlen); |
451 | } | 464 | } |
452 | 465 | ||
466 | static void build_protos(struct proto *prot, struct proto *base) | ||
467 | { | ||
468 | prot[TLS_BASE_TX] = *base; | ||
469 | prot[TLS_BASE_TX].setsockopt = tls_setsockopt; | ||
470 | prot[TLS_BASE_TX].getsockopt = tls_getsockopt; | ||
471 | prot[TLS_BASE_TX].close = tls_sk_proto_close; | ||
472 | |||
473 | prot[TLS_SW_TX] = prot[TLS_BASE_TX]; | ||
474 | prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg; | ||
475 | prot[TLS_SW_TX].sendpage = tls_sw_sendpage; | ||
476 | } | ||
477 | |||
453 | static int tls_init(struct sock *sk) | 478 | static int tls_init(struct sock *sk) |
454 | { | 479 | { |
480 | int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; | ||
455 | struct inet_connection_sock *icsk = inet_csk(sk); | 481 | struct inet_connection_sock *icsk = inet_csk(sk); |
456 | struct tls_context *ctx; | 482 | struct tls_context *ctx; |
457 | int rc = 0; | 483 | int rc = 0; |
@@ -476,6 +502,17 @@ static int tls_init(struct sock *sk) | |||
476 | ctx->getsockopt = sk->sk_prot->getsockopt; | 502 | ctx->getsockopt = sk->sk_prot->getsockopt; |
477 | ctx->sk_proto_close = sk->sk_prot->close; | 503 | ctx->sk_proto_close = sk->sk_prot->close; |
478 | 504 | ||
505 | /* Build IPv6 TLS whenever the address of tcpv6_prot changes */ | ||
506 | if (ip_ver == TLSV6 && | ||
507 | unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { | ||
508 | mutex_lock(&tcpv6_prot_mutex); | ||
509 | if (likely(sk->sk_prot != saved_tcpv6_prot)) { | ||
510 | build_protos(tls_prots[TLSV6], sk->sk_prot); | ||
511 | smp_store_release(&saved_tcpv6_prot, sk->sk_prot); | ||
512 | } | ||
513 | mutex_unlock(&tcpv6_prot_mutex); | ||
514 | } | ||
515 | |||
479 | ctx->tx_conf = TLS_BASE_TX; | 516 | ctx->tx_conf = TLS_BASE_TX; |
480 | update_sk_prot(sk, ctx); | 517 | update_sk_prot(sk, ctx); |
481 | out: | 518 | out: |
@@ -490,21 +527,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { | |||
490 | .init = tls_init, | 527 | .init = tls_init, |
491 | }; | 528 | }; |
492 | 529 | ||
493 | static void build_protos(struct proto *prot, struct proto *base) | ||
494 | { | ||
495 | prot[TLS_BASE_TX] = *base; | ||
496 | prot[TLS_BASE_TX].setsockopt = tls_setsockopt; | ||
497 | prot[TLS_BASE_TX].getsockopt = tls_getsockopt; | ||
498 | prot[TLS_BASE_TX].close = tls_sk_proto_close; | ||
499 | |||
500 | prot[TLS_SW_TX] = prot[TLS_BASE_TX]; | ||
501 | prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg; | ||
502 | prot[TLS_SW_TX].sendpage = tls_sw_sendpage; | ||
503 | } | ||
504 | |||
505 | static int __init tls_register(void) | 530 | static int __init tls_register(void) |
506 | { | 531 | { |
507 | build_protos(tls_prots, &tcp_prot); | 532 | build_protos(tls_prots[TLSV4], &tcp_prot); |
508 | 533 | ||
509 | tcp_register_ulp(&tcp_tls_ulp_ops); | 534 | tcp_register_ulp(&tcp_tls_ulp_ops); |
510 | 535 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index d545e1d0dea2..2d465bdeccbc 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1825,7 +1825,7 @@ out: | |||
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | /* We use paged skbs for stream sockets, and limit occupancy to 32768 | 1827 | /* We use paged skbs for stream sockets, and limit occupancy to 32768 |
1828 | * bytes, and a minimun of a full page. | 1828 | * bytes, and a minimum of a full page. |
1829 | */ | 1829 | */ |
1830 | #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) | 1830 | #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) |
1831 | 1831 | ||
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 1abcc4fc4df1..41722046b937 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -34,9 +34,10 @@ config CFG80211 | |||
34 | 34 | ||
35 | When built as a module it will be called cfg80211. | 35 | When built as a module it will be called cfg80211. |
36 | 36 | ||
37 | if CFG80211 | ||
38 | |||
37 | config NL80211_TESTMODE | 39 | config NL80211_TESTMODE |
38 | bool "nl80211 testmode command" | 40 | bool "nl80211 testmode command" |
39 | depends on CFG80211 | ||
40 | help | 41 | help |
41 | The nl80211 testmode command helps implementing things like | 42 | The nl80211 testmode command helps implementing things like |
42 | factory calibration or validation tools for wireless chips. | 43 | factory calibration or validation tools for wireless chips. |
@@ -51,7 +52,6 @@ config NL80211_TESTMODE | |||
51 | 52 | ||
52 | config CFG80211_DEVELOPER_WARNINGS | 53 | config CFG80211_DEVELOPER_WARNINGS |
53 | bool "enable developer warnings" | 54 | bool "enable developer warnings" |
54 | depends on CFG80211 | ||
55 | default n | 55 | default n |
56 | help | 56 | help |
57 | This option enables some additional warnings that help | 57 | This option enables some additional warnings that help |
@@ -68,7 +68,7 @@ config CFG80211_DEVELOPER_WARNINGS | |||
68 | 68 | ||
69 | config CFG80211_CERTIFICATION_ONUS | 69 | config CFG80211_CERTIFICATION_ONUS |
70 | bool "cfg80211 certification onus" | 70 | bool "cfg80211 certification onus" |
71 | depends on CFG80211 && EXPERT | 71 | depends on EXPERT |
72 | default n | 72 | default n |
73 | ---help--- | 73 | ---help--- |
74 | You should disable this option unless you are both capable | 74 | You should disable this option unless you are both capable |
@@ -159,7 +159,6 @@ config CFG80211_REG_RELAX_NO_IR | |||
159 | 159 | ||
160 | config CFG80211_DEFAULT_PS | 160 | config CFG80211_DEFAULT_PS |
161 | bool "enable powersave by default" | 161 | bool "enable powersave by default" |
162 | depends on CFG80211 | ||
163 | default y | 162 | default y |
164 | help | 163 | help |
165 | This option enables powersave mode by default. | 164 | This option enables powersave mode by default. |
@@ -170,7 +169,6 @@ config CFG80211_DEFAULT_PS | |||
170 | 169 | ||
171 | config CFG80211_DEBUGFS | 170 | config CFG80211_DEBUGFS |
172 | bool "cfg80211 DebugFS entries" | 171 | bool "cfg80211 DebugFS entries" |
173 | depends on CFG80211 | ||
174 | depends on DEBUG_FS | 172 | depends on DEBUG_FS |
175 | ---help--- | 173 | ---help--- |
176 | You can enable this if you want debugfs entries for cfg80211. | 174 | You can enable this if you want debugfs entries for cfg80211. |
@@ -180,7 +178,6 @@ config CFG80211_DEBUGFS | |||
180 | config CFG80211_CRDA_SUPPORT | 178 | config CFG80211_CRDA_SUPPORT |
181 | bool "support CRDA" if EXPERT | 179 | bool "support CRDA" if EXPERT |
182 | default y | 180 | default y |
183 | depends on CFG80211 | ||
184 | help | 181 | help |
185 | You should enable this option unless you know for sure you have no | 182 | You should enable this option unless you know for sure you have no |
186 | need for it, for example when using internal regdb (above) or the | 183 | need for it, for example when using internal regdb (above) or the |
@@ -190,7 +187,6 @@ config CFG80211_CRDA_SUPPORT | |||
190 | 187 | ||
191 | config CFG80211_WEXT | 188 | config CFG80211_WEXT |
192 | bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT | 189 | bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT |
193 | depends on CFG80211 | ||
194 | select WEXT_CORE | 190 | select WEXT_CORE |
195 | default y if CFG80211_WEXT_EXPORT | 191 | default y if CFG80211_WEXT_EXPORT |
196 | help | 192 | help |
@@ -199,11 +195,12 @@ config CFG80211_WEXT | |||
199 | 195 | ||
200 | config CFG80211_WEXT_EXPORT | 196 | config CFG80211_WEXT_EXPORT |
201 | bool | 197 | bool |
202 | depends on CFG80211 | ||
203 | help | 198 | help |
204 | Drivers should select this option if they require cfg80211's | 199 | Drivers should select this option if they require cfg80211's |
205 | wext compatibility symbols to be exported. | 200 | wext compatibility symbols to be exported. |
206 | 201 | ||
202 | endif # CFG80211 | ||
203 | |||
207 | config LIB80211 | 204 | config LIB80211 |
208 | tristate | 205 | tristate |
209 | default n | 206 | default n |
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 51aa55618ef7..b12da6ef3c12 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c | |||
@@ -170,9 +170,28 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, | |||
170 | enum nl80211_bss_scan_width scan_width; | 170 | enum nl80211_bss_scan_width scan_width; |
171 | struct ieee80211_supported_band *sband = | 171 | struct ieee80211_supported_band *sband = |
172 | rdev->wiphy.bands[setup->chandef.chan->band]; | 172 | rdev->wiphy.bands[setup->chandef.chan->band]; |
173 | scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); | 173 | |
174 | setup->basic_rates = ieee80211_mandatory_rates(sband, | 174 | if (setup->chandef.chan->band == NL80211_BAND_2GHZ) { |
175 | scan_width); | 175 | int i; |
176 | |||
177 | /* | ||
178 | * Older versions selected the mandatory rates for | ||
179 | * 2.4 GHz as well, but were broken in that only | ||
180 | * 1 Mbps was regarded as a mandatory rate. Keep | ||
181 | * using just 1 Mbps as the default basic rate for | ||
182 | * mesh to be interoperable with older versions. | ||
183 | */ | ||
184 | for (i = 0; i < sband->n_bitrates; i++) { | ||
185 | if (sband->bitrates[i].bitrate == 10) { | ||
186 | setup->basic_rates = BIT(i); | ||
187 | break; | ||
188 | } | ||
189 | } | ||
190 | } else { | ||
191 | scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); | ||
192 | setup->basic_rates = ieee80211_mandatory_rates(sband, | ||
193 | scan_width); | ||
194 | } | ||
176 | } | 195 | } |
177 | 196 | ||
178 | err = cfg80211_chandef_dfs_required(&rdev->wiphy, | 197 | err = cfg80211_chandef_dfs_required(&rdev->wiphy, |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index fdb3646274a5..701cfd7acc1b 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -1032,6 +1032,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
1032 | wdev->current_bss = NULL; | 1032 | wdev->current_bss = NULL; |
1033 | wdev->ssid_len = 0; | 1033 | wdev->ssid_len = 0; |
1034 | wdev->conn_owner_nlportid = 0; | 1034 | wdev->conn_owner_nlportid = 0; |
1035 | kzfree(wdev->connect_keys); | ||
1036 | wdev->connect_keys = NULL; | ||
1035 | 1037 | ||
1036 | nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); | 1038 | nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); |
1037 | 1039 | ||
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 8e70291e586a..e87d6c4dd5b6 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c | |||
@@ -217,7 +217,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) | |||
217 | if (skb->len <= mtu) | 217 | if (skb->len <= mtu) |
218 | goto ok; | 218 | goto ok; |
219 | 219 | ||
220 | if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) | 220 | if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) |
221 | goto ok; | 221 | goto ok; |
222 | } | 222 | } |
223 | 223 | ||
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index ccfdc7115a83..a00ec715aa46 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c | |||
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name) | |||
283 | struct crypto_comp *tfm; | 283 | struct crypto_comp *tfm; |
284 | 284 | ||
285 | /* This can be any valid CPU ID so we don't need locking. */ | 285 | /* This can be any valid CPU ID so we don't need locking. */ |
286 | tfm = __this_cpu_read(*pos->tfms); | 286 | tfm = this_cpu_read(*pos->tfms); |
287 | 287 | ||
288 | if (!strcmp(crypto_comp_name(tfm), alg_name)) { | 288 | if (!strcmp(crypto_comp_name(tfm), alg_name)) { |
289 | pos->users++; | 289 | pos->users++; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7a23078132cf..625b3fca5704 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1458,10 +1458,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, | |||
1458 | static int xfrm_get_tos(const struct flowi *fl, int family) | 1458 | static int xfrm_get_tos(const struct flowi *fl, int family) |
1459 | { | 1459 | { |
1460 | const struct xfrm_policy_afinfo *afinfo; | 1460 | const struct xfrm_policy_afinfo *afinfo; |
1461 | int tos = 0; | 1461 | int tos; |
1462 | 1462 | ||
1463 | afinfo = xfrm_policy_get_afinfo(family); | 1463 | afinfo = xfrm_policy_get_afinfo(family); |
1464 | tos = afinfo ? afinfo->get_tos(fl) : 0; | 1464 | if (!afinfo) |
1465 | return 0; | ||
1466 | |||
1467 | tos = afinfo->get_tos(fl); | ||
1465 | 1468 | ||
1466 | rcu_read_unlock(); | 1469 | rcu_read_unlock(); |
1467 | 1470 | ||
@@ -1891,7 +1894,7 @@ static void xfrm_policy_queue_process(struct timer_list *t) | |||
1891 | spin_unlock(&pq->hold_queue.lock); | 1894 | spin_unlock(&pq->hold_queue.lock); |
1892 | 1895 | ||
1893 | dst_hold(xfrm_dst_path(dst)); | 1896 | dst_hold(xfrm_dst_path(dst)); |
1894 | dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0); | 1897 | dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE); |
1895 | if (IS_ERR(dst)) | 1898 | if (IS_ERR(dst)) |
1896 | goto purge_queue; | 1899 | goto purge_queue; |
1897 | 1900 | ||
@@ -2729,14 +2732,14 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst, | |||
2729 | while (dst->xfrm) { | 2732 | while (dst->xfrm) { |
2730 | const struct xfrm_state *xfrm = dst->xfrm; | 2733 | const struct xfrm_state *xfrm = dst->xfrm; |
2731 | 2734 | ||
2735 | dst = xfrm_dst_child(dst); | ||
2736 | |||
2732 | if (xfrm->props.mode == XFRM_MODE_TRANSPORT) | 2737 | if (xfrm->props.mode == XFRM_MODE_TRANSPORT) |
2733 | continue; | 2738 | continue; |
2734 | if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) | 2739 | if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) |
2735 | daddr = xfrm->coaddr; | 2740 | daddr = xfrm->coaddr; |
2736 | else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) | 2741 | else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) |
2737 | daddr = &xfrm->id.daddr; | 2742 | daddr = &xfrm->id.daddr; |
2738 | |||
2739 | dst = xfrm_dst_child(dst); | ||
2740 | } | 2743 | } |
2741 | return daddr; | 2744 | return daddr; |
2742 | } | 2745 | } |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 1d38c6acf8af..9e3a5e85f828 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -660,7 +660,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff | |||
660 | } else { | 660 | } else { |
661 | XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; | 661 | XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; |
662 | XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; | 662 | XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; |
663 | xo->seq.low = oseq = oseq + 1; | 663 | xo->seq.low = oseq + 1; |
664 | xo->seq.hi = oseq_hi; | 664 | xo->seq.hi = oseq_hi; |
665 | oseq += skb_shinfo(skb)->gso_segs; | 665 | oseq += skb_shinfo(skb)->gso_segs; |
666 | } | 666 | } |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 54e21f19d722..f9d2f2233f09 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -2056,6 +2056,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen | |||
2056 | struct xfrm_mgr *km; | 2056 | struct xfrm_mgr *km; |
2057 | struct xfrm_policy *pol = NULL; | 2057 | struct xfrm_policy *pol = NULL; |
2058 | 2058 | ||
2059 | #ifdef CONFIG_COMPAT | ||
2060 | if (in_compat_syscall()) | ||
2061 | return -EOPNOTSUPP; | ||
2062 | #endif | ||
2063 | |||
2059 | if (!optval && !optlen) { | 2064 | if (!optval && !optlen) { |
2060 | xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); | 2065 | xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); |
2061 | xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); | 2066 | xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7f52b8eb177d..080035f056d9 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p, | |||
121 | struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; | 121 | struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; |
122 | struct xfrm_replay_state_esn *rs; | 122 | struct xfrm_replay_state_esn *rs; |
123 | 123 | ||
124 | if (p->flags & XFRM_STATE_ESN) { | 124 | if (!rt) |
125 | if (!rt) | 125 | return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0; |
126 | return -EINVAL; | ||
127 | 126 | ||
128 | rs = nla_data(rt); | 127 | rs = nla_data(rt); |
129 | 128 | ||
130 | if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) | 129 | if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) |
131 | return -EINVAL; | 130 | return -EINVAL; |
132 | |||
133 | if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) && | ||
134 | nla_len(rt) != sizeof(*rs)) | ||
135 | return -EINVAL; | ||
136 | } | ||
137 | 131 | ||
138 | if (!rt) | 132 | if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) && |
139 | return 0; | 133 | nla_len(rt) != sizeof(*rs)) |
134 | return -EINVAL; | ||
140 | 135 | ||
141 | /* As only ESP and AH support ESN feature. */ | 136 | /* As only ESP and AH support ESN feature. */ |
142 | if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) | 137 | if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) |