diff options
Diffstat (limited to 'net')
70 files changed, 665 insertions, 420 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index c25eb36f1320..aecdeba052d3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { | |||
| 2282 | .arg2_type = ARG_ANYTHING, | 2282 | .arg2_type = ARG_ANYTHING, |
| 2283 | }; | 2283 | }; |
| 2284 | 2284 | ||
| 2285 | #define sk_msg_iter_var(var) \ | ||
| 2286 | do { \ | ||
| 2287 | var++; \ | ||
| 2288 | if (var == MAX_SKB_FRAGS) \ | ||
| 2289 | var = 0; \ | ||
| 2290 | } while (0) | ||
| 2291 | |||
| 2285 | BPF_CALL_4(bpf_msg_pull_data, | 2292 | BPF_CALL_4(bpf_msg_pull_data, |
| 2286 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) | 2293 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) |
| 2287 | { | 2294 | { |
| 2288 | unsigned int len = 0, offset = 0, copy = 0; | 2295 | unsigned int len = 0, offset = 0, copy = 0, poffset = 0; |
| 2296 | int bytes = end - start, bytes_sg_total; | ||
| 2289 | struct scatterlist *sg = msg->sg_data; | 2297 | struct scatterlist *sg = msg->sg_data; |
| 2290 | int first_sg, last_sg, i, shift; | 2298 | int first_sg, last_sg, i, shift; |
| 2291 | unsigned char *p, *to, *from; | 2299 | unsigned char *p, *to, *from; |
| 2292 | int bytes = end - start; | ||
| 2293 | struct page *page; | 2300 | struct page *page; |
| 2294 | 2301 | ||
| 2295 | if (unlikely(flags || end <= start)) | 2302 | if (unlikely(flags || end <= start)) |
| @@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
| 2299 | i = msg->sg_start; | 2306 | i = msg->sg_start; |
| 2300 | do { | 2307 | do { |
| 2301 | len = sg[i].length; | 2308 | len = sg[i].length; |
| 2302 | offset += len; | ||
| 2303 | if (start < offset + len) | 2309 | if (start < offset + len) |
| 2304 | break; | 2310 | break; |
| 2305 | i++; | 2311 | offset += len; |
| 2306 | if (i == MAX_SKB_FRAGS) | 2312 | sk_msg_iter_var(i); |
| 2307 | i = 0; | ||
| 2308 | } while (i != msg->sg_end); | 2313 | } while (i != msg->sg_end); |
| 2309 | 2314 | ||
| 2310 | if (unlikely(start >= offset + len)) | 2315 | if (unlikely(start >= offset + len)) |
| 2311 | return -EINVAL; | 2316 | return -EINVAL; |
| 2312 | 2317 | ||
| 2313 | if (!msg->sg_copy[i] && bytes <= len) | ||
| 2314 | goto out; | ||
| 2315 | |||
| 2316 | first_sg = i; | 2318 | first_sg = i; |
| 2319 | /* The start may point into the sg element so we need to also | ||
| 2320 | * account for the headroom. | ||
| 2321 | */ | ||
| 2322 | bytes_sg_total = start - offset + bytes; | ||
| 2323 | if (!msg->sg_copy[i] && bytes_sg_total <= len) | ||
| 2324 | goto out; | ||
| 2317 | 2325 | ||
| 2318 | /* At this point we need to linearize multiple scatterlist | 2326 | /* At this point we need to linearize multiple scatterlist |
| 2319 | * elements or a single shared page. Either way we need to | 2327 | * elements or a single shared page. Either way we need to |
| @@ -2327,37 +2335,32 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
| 2327 | */ | 2335 | */ |
| 2328 | do { | 2336 | do { |
| 2329 | copy += sg[i].length; | 2337 | copy += sg[i].length; |
| 2330 | i++; | 2338 | sk_msg_iter_var(i); |
| 2331 | if (i == MAX_SKB_FRAGS) | 2339 | if (bytes_sg_total <= copy) |
| 2332 | i = 0; | ||
| 2333 | if (bytes < copy) | ||
| 2334 | break; | 2340 | break; |
| 2335 | } while (i != msg->sg_end); | 2341 | } while (i != msg->sg_end); |
| 2336 | last_sg = i; | 2342 | last_sg = i; |
| 2337 | 2343 | ||
| 2338 | if (unlikely(copy < end - start)) | 2344 | if (unlikely(bytes_sg_total > copy)) |
| 2339 | return -EINVAL; | 2345 | return -EINVAL; |
| 2340 | 2346 | ||
| 2341 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); | 2347 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); |
| 2342 | if (unlikely(!page)) | 2348 | if (unlikely(!page)) |
| 2343 | return -ENOMEM; | 2349 | return -ENOMEM; |
| 2344 | p = page_address(page); | 2350 | p = page_address(page); |
| 2345 | offset = 0; | ||
| 2346 | 2351 | ||
| 2347 | i = first_sg; | 2352 | i = first_sg; |
| 2348 | do { | 2353 | do { |
| 2349 | from = sg_virt(&sg[i]); | 2354 | from = sg_virt(&sg[i]); |
| 2350 | len = sg[i].length; | 2355 | len = sg[i].length; |
| 2351 | to = p + offset; | 2356 | to = p + poffset; |
| 2352 | 2357 | ||
| 2353 | memcpy(to, from, len); | 2358 | memcpy(to, from, len); |
| 2354 | offset += len; | 2359 | poffset += len; |
| 2355 | sg[i].length = 0; | 2360 | sg[i].length = 0; |
| 2356 | put_page(sg_page(&sg[i])); | 2361 | put_page(sg_page(&sg[i])); |
| 2357 | 2362 | ||
| 2358 | i++; | 2363 | sk_msg_iter_var(i); |
| 2359 | if (i == MAX_SKB_FRAGS) | ||
| 2360 | i = 0; | ||
| 2361 | } while (i != last_sg); | 2364 | } while (i != last_sg); |
| 2362 | 2365 | ||
| 2363 | sg[first_sg].length = copy; | 2366 | sg[first_sg].length = copy; |
| @@ -2367,11 +2370,15 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
| 2367 | * had a single entry though we can just replace it and | 2370 | * had a single entry though we can just replace it and |
| 2368 | * be done. Otherwise walk the ring and shift the entries. | 2371 | * be done. Otherwise walk the ring and shift the entries. |
| 2369 | */ | 2372 | */ |
| 2370 | shift = last_sg - first_sg - 1; | 2373 | WARN_ON_ONCE(last_sg == first_sg); |
| 2374 | shift = last_sg > first_sg ? | ||
| 2375 | last_sg - first_sg - 1 : | ||
| 2376 | MAX_SKB_FRAGS - first_sg + last_sg - 1; | ||
| 2371 | if (!shift) | 2377 | if (!shift) |
| 2372 | goto out; | 2378 | goto out; |
| 2373 | 2379 | ||
| 2374 | i = first_sg + 1; | 2380 | i = first_sg; |
| 2381 | sk_msg_iter_var(i); | ||
| 2375 | do { | 2382 | do { |
| 2376 | int move_from; | 2383 | int move_from; |
| 2377 | 2384 | ||
| @@ -2388,15 +2395,13 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
| 2388 | sg[move_from].page_link = 0; | 2395 | sg[move_from].page_link = 0; |
| 2389 | sg[move_from].offset = 0; | 2396 | sg[move_from].offset = 0; |
| 2390 | 2397 | ||
| 2391 | i++; | 2398 | sk_msg_iter_var(i); |
| 2392 | if (i == MAX_SKB_FRAGS) | ||
| 2393 | i = 0; | ||
| 2394 | } while (1); | 2399 | } while (1); |
| 2395 | msg->sg_end -= shift; | 2400 | msg->sg_end -= shift; |
| 2396 | if (msg->sg_end < 0) | 2401 | if (msg->sg_end < 0) |
| 2397 | msg->sg_end += MAX_SKB_FRAGS; | 2402 | msg->sg_end += MAX_SKB_FRAGS; |
| 2398 | out: | 2403 | out: |
| 2399 | msg->data = sg_virt(&sg[i]) + start - offset; | 2404 | msg->data = sg_virt(&sg[first_sg]) + start - offset; |
| 2400 | msg->data_end = msg->data + bytes; | 2405 | msg->data_end = msg->data + bytes; |
| 2401 | 2406 | ||
| 2402 | return 0; | 2407 | return 0; |
| @@ -7281,7 +7286,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, | |||
| 7281 | break; | 7286 | break; |
| 7282 | 7287 | ||
| 7283 | case offsetof(struct sk_reuseport_md, ip_protocol): | 7288 | case offsetof(struct sk_reuseport_md, ip_protocol): |
| 7284 | BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE); | 7289 | BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); |
| 7285 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, | 7290 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, |
| 7286 | BPF_W, 0); | 7291 | BPF_W, 0); |
| 7287 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); | 7292 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 24431e578310..60c928894a78 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol) | |||
| 324 | 324 | ||
| 325 | rtnl_lock(); | 325 | rtnl_lock(); |
| 326 | tab = rtnl_msg_handlers[protocol]; | 326 | tab = rtnl_msg_handlers[protocol]; |
| 327 | if (!tab) { | ||
| 328 | rtnl_unlock(); | ||
| 329 | return; | ||
| 330 | } | ||
| 327 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); | 331 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); |
| 328 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { | 332 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { |
| 329 | link = tab[msgindex]; | 333 | link = tab[msgindex]; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c996c09d095f..b2c807f67aba 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) | |||
| 939 | 939 | ||
| 940 | WARN_ON_ONCE(!in_task()); | 940 | WARN_ON_ONCE(!in_task()); |
| 941 | 941 | ||
| 942 | if (!sock_flag(sk, SOCK_ZEROCOPY)) | ||
| 943 | return NULL; | ||
| 944 | |||
| 945 | skb = sock_omalloc(sk, 0, GFP_KERNEL); | 942 | skb = sock_omalloc(sk, 0, GFP_KERNEL); |
| 946 | if (!skb) | 943 | if (!skb) |
| 947 | return NULL; | 944 | return NULL; |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e63c554e0623..9f3209ff7ffd 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
| @@ -19,12 +19,10 @@ | |||
| 19 | #include <linux/of_mdio.h> | 19 | #include <linux/of_mdio.h> |
| 20 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
| 21 | #include <linux/of_net.h> | 21 | #include <linux/of_net.h> |
| 22 | #include <linux/of_gpio.h> | ||
| 23 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
| 24 | #include <linux/sysfs.h> | 23 | #include <linux/sysfs.h> |
| 25 | #include <linux/phy_fixed.h> | 24 | #include <linux/phy_fixed.h> |
| 26 | #include <linux/ptp_classify.h> | 25 | #include <linux/ptp_classify.h> |
| 27 | #include <linux/gpio/consumer.h> | ||
| 28 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
| 29 | 27 | ||
| 30 | #include "dsa_priv.h" | 28 | #include "dsa_priv.h" |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index cf75f8944b05..4da39446da2d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
| @@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t) | |||
| 820 | spin_lock(&im->lock); | 820 | spin_lock(&im->lock); |
| 821 | im->tm_running = 0; | 821 | im->tm_running = 0; |
| 822 | 822 | ||
| 823 | if (im->unsolicit_count) { | 823 | if (im->unsolicit_count && --im->unsolicit_count) |
| 824 | im->unsolicit_count--; | ||
| 825 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); | 824 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); |
| 826 | } | 825 | |
| 827 | im->reporter = 1; | 826 | im->reporter = 1; |
| 828 | spin_unlock(&im->lock); | 827 | spin_unlock(&im->lock); |
| 829 | 828 | ||
| @@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im) | |||
| 1308 | 1307 | ||
| 1309 | if (in_dev->dead) | 1308 | if (in_dev->dead) |
| 1310 | return; | 1309 | return; |
| 1310 | |||
| 1311 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
| 1311 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { | 1312 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { |
| 1312 | spin_lock_bh(&im->lock); | 1313 | spin_lock_bh(&im->lock); |
| 1313 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); | 1314 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); |
| @@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
| 1391 | unsigned int mode) | 1392 | unsigned int mode) |
| 1392 | { | 1393 | { |
| 1393 | struct ip_mc_list *im; | 1394 | struct ip_mc_list *im; |
| 1394 | #ifdef CONFIG_IP_MULTICAST | ||
| 1395 | struct net *net = dev_net(in_dev->dev); | ||
| 1396 | #endif | ||
| 1397 | 1395 | ||
| 1398 | ASSERT_RTNL(); | 1396 | ASSERT_RTNL(); |
| 1399 | 1397 | ||
| @@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
| 1420 | spin_lock_init(&im->lock); | 1418 | spin_lock_init(&im->lock); |
| 1421 | #ifdef CONFIG_IP_MULTICAST | 1419 | #ifdef CONFIG_IP_MULTICAST |
| 1422 | timer_setup(&im->timer, igmp_timer_expire, 0); | 1420 | timer_setup(&im->timer, igmp_timer_expire, 0); |
| 1423 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
| 1424 | #endif | 1421 | #endif |
| 1425 | 1422 | ||
| 1426 | im->next_rcu = in_dev->mc_list; | 1423 | im->next_rcu = in_dev->mc_list; |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 88281fbce88c..e7227128df2c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, | |||
| 599 | nextp = &fp->next; | 599 | nextp = &fp->next; |
| 600 | fp->prev = NULL; | 600 | fp->prev = NULL; |
| 601 | memset(&fp->rbnode, 0, sizeof(fp->rbnode)); | 601 | memset(&fp->rbnode, 0, sizeof(fp->rbnode)); |
| 602 | fp->sk = NULL; | ||
| 602 | head->data_len += fp->len; | 603 | head->data_len += fp->len; |
| 603 | head->len += fp->len; | 604 | head->len += fp->len; |
| 604 | if (head->ip_summed != fp->ip_summed) | 605 | if (head->ip_summed != fp->ip_summed) |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 51a5d06085ac..8cce0e9ea08c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info, | |||
| 178 | 178 | ||
| 179 | if (tpi->proto == htons(ETH_P_TEB)) | 179 | if (tpi->proto == htons(ETH_P_TEB)) |
| 180 | itn = net_generic(net, gre_tap_net_id); | 180 | itn = net_generic(net, gre_tap_net_id); |
| 181 | else if (tpi->proto == htons(ETH_P_ERSPAN) || | ||
| 182 | tpi->proto == htons(ETH_P_ERSPAN2)) | ||
| 183 | itn = net_generic(net, erspan_net_id); | ||
| 181 | else | 184 | else |
| 182 | itn = net_generic(net, ipgre_net_id); | 185 | itn = net_generic(net, ipgre_net_id); |
| 183 | 186 | ||
| @@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 328 | ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); | 331 | ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); |
| 329 | return PACKET_RCVD; | 332 | return PACKET_RCVD; |
| 330 | } | 333 | } |
| 334 | return PACKET_REJECT; | ||
| 335 | |||
| 331 | drop: | 336 | drop: |
| 332 | kfree_skb(skb); | 337 | kfree_skb(skb); |
| 333 | return PACKET_RCVD; | 338 | return PACKET_RCVD; |
| @@ -1508,11 +1513,14 @@ nla_put_failure: | |||
| 1508 | 1513 | ||
| 1509 | static void erspan_setup(struct net_device *dev) | 1514 | static void erspan_setup(struct net_device *dev) |
| 1510 | { | 1515 | { |
| 1516 | struct ip_tunnel *t = netdev_priv(dev); | ||
| 1517 | |||
| 1511 | ether_setup(dev); | 1518 | ether_setup(dev); |
| 1512 | dev->netdev_ops = &erspan_netdev_ops; | 1519 | dev->netdev_ops = &erspan_netdev_ops; |
| 1513 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1520 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
| 1514 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1521 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
| 1515 | ip_tunnel_setup(dev, erspan_net_id); | 1522 | ip_tunnel_setup(dev, erspan_net_id); |
| 1523 | t->erspan_ver = 1; | ||
| 1516 | } | 1524 | } |
| 1517 | 1525 | ||
| 1518 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { | 1526 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index d9504adc47b3..184bf2e0a1ed 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
| @@ -106,6 +106,10 @@ config NF_NAT_IPV4 | |||
| 106 | 106 | ||
| 107 | if NF_NAT_IPV4 | 107 | if NF_NAT_IPV4 |
| 108 | 108 | ||
| 109 | config NF_NAT_MASQUERADE_IPV4 | ||
| 110 | bool | ||
| 111 | |||
| 112 | if NF_TABLES | ||
| 109 | config NFT_CHAIN_NAT_IPV4 | 113 | config NFT_CHAIN_NAT_IPV4 |
| 110 | depends on NF_TABLES_IPV4 | 114 | depends on NF_TABLES_IPV4 |
| 111 | tristate "IPv4 nf_tables nat chain support" | 115 | tristate "IPv4 nf_tables nat chain support" |
| @@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4 | |||
| 115 | packet transformations such as the source, destination address and | 119 | packet transformations such as the source, destination address and |
| 116 | source and destination ports. | 120 | source and destination ports. |
| 117 | 121 | ||
| 118 | config NF_NAT_MASQUERADE_IPV4 | ||
| 119 | bool | ||
| 120 | |||
| 121 | config NFT_MASQ_IPV4 | 122 | config NFT_MASQ_IPV4 |
| 122 | tristate "IPv4 masquerading support for nf_tables" | 123 | tristate "IPv4 masquerading support for nf_tables" |
| 123 | depends on NF_TABLES_IPV4 | 124 | depends on NF_TABLES_IPV4 |
| @@ -135,6 +136,7 @@ config NFT_REDIR_IPV4 | |||
| 135 | help | 136 | help |
| 136 | This is the expression that provides IPv4 redirect support for | 137 | This is the expression that provides IPv4 redirect support for |
| 137 | nf_tables. | 138 | nf_tables. |
| 139 | endif # NF_TABLES | ||
| 138 | 140 | ||
| 139 | config NF_NAT_SNMP_BASIC | 141 | config NF_NAT_SNMP_BASIC |
| 140 | tristate "Basic SNMP-ALG support" | 142 | tristate "Basic SNMP-ALG support" |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b8af2fec5ad5..10c6246396cc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) | |||
| 1185 | 1185 | ||
| 1186 | flags = msg->msg_flags; | 1186 | flags = msg->msg_flags; |
| 1187 | 1187 | ||
| 1188 | if (flags & MSG_ZEROCOPY && size) { | 1188 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { |
| 1189 | if (sk->sk_state != TCP_ESTABLISHED) { | 1189 | if (sk->sk_state != TCP_ESTABLISHED) { |
| 1190 | err = -EINVAL; | 1190 | err = -EINVAL; |
| 1191 | goto out_err; | 1191 | goto out_err; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4c2dd9f863f7..4cf2f7bb2802 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -6367,8 +6367,8 @@ static bool tcp_syn_flood_action(const struct sock *sk, | |||
| 6367 | if (!queue->synflood_warned && | 6367 | if (!queue->synflood_warned && |
| 6368 | net->ipv4.sysctl_tcp_syncookies != 2 && | 6368 | net->ipv4.sysctl_tcp_syncookies != 2 && |
| 6369 | xchg(&queue->synflood_warned, 1) == 0) | 6369 | xchg(&queue->synflood_warned, 1) == 0) |
| 6370 | pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", | 6370 | net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", |
| 6371 | proto, ntohs(tcp_hdr(skb)->dest), msg); | 6371 | proto, ntohs(tcp_hdr(skb)->dest), msg); |
| 6372 | 6372 | ||
| 6373 | return want_cookie; | 6373 | return want_cookie; |
| 6374 | } | 6374 | } |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 75ef332a7caf..12affb7864d9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
| @@ -184,8 +184,9 @@ kill: | |||
| 184 | inet_twsk_deschedule_put(tw); | 184 | inet_twsk_deschedule_put(tw); |
| 185 | return TCP_TW_SUCCESS; | 185 | return TCP_TW_SUCCESS; |
| 186 | } | 186 | } |
| 187 | } else { | ||
| 188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
| 187 | } | 189 | } |
| 188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
| 189 | 190 | ||
| 190 | if (tmp_opt.saw_tstamp) { | 191 | if (tmp_opt.saw_tstamp) { |
| 191 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; | 192 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 673bba31eb18..9a4261e50272 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
| @@ -938,14 +938,14 @@ static int __init inet6_init(void) | |||
| 938 | 938 | ||
| 939 | err = proto_register(&pingv6_prot, 1); | 939 | err = proto_register(&pingv6_prot, 1); |
| 940 | if (err) | 940 | if (err) |
| 941 | goto out_unregister_ping_proto; | 941 | goto out_unregister_raw_proto; |
| 942 | 942 | ||
| 943 | /* We MUST register RAW sockets before we create the ICMP6, | 943 | /* We MUST register RAW sockets before we create the ICMP6, |
| 944 | * IGMP6, or NDISC control sockets. | 944 | * IGMP6, or NDISC control sockets. |
| 945 | */ | 945 | */ |
| 946 | err = rawv6_init(); | 946 | err = rawv6_init(); |
| 947 | if (err) | 947 | if (err) |
| 948 | goto out_unregister_raw_proto; | 948 | goto out_unregister_ping_proto; |
| 949 | 949 | ||
| 950 | /* Register the family here so that the init calls below will | 950 | /* Register the family here so that the init calls below will |
| 951 | * be able to create sockets. (?? is this dangerous ??) | 951 | * be able to create sockets. (?? is this dangerous ??) |
| @@ -1113,11 +1113,11 @@ netfilter_fail: | |||
| 1113 | igmp_fail: | 1113 | igmp_fail: |
| 1114 | ndisc_cleanup(); | 1114 | ndisc_cleanup(); |
| 1115 | ndisc_fail: | 1115 | ndisc_fail: |
| 1116 | ip6_mr_cleanup(); | 1116 | icmpv6_cleanup(); |
| 1117 | icmp_fail: | 1117 | icmp_fail: |
| 1118 | unregister_pernet_subsys(&inet6_net_ops); | 1118 | ip6_mr_cleanup(); |
| 1119 | ipmr_fail: | 1119 | ipmr_fail: |
| 1120 | icmpv6_cleanup(); | 1120 | unregister_pernet_subsys(&inet6_net_ops); |
| 1121 | register_pernet_fail: | 1121 | register_pernet_fail: |
| 1122 | sock_unregister(PF_INET6); | 1122 | sock_unregister(PF_INET6); |
| 1123 | rtnl_unregister_all(PF_INET6); | 1123 | rtnl_unregister_all(PF_INET6); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index c861a6d4671d..5516f55e214b 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -989,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 989 | fib6_clean_expires(iter); | 989 | fib6_clean_expires(iter); |
| 990 | else | 990 | else |
| 991 | fib6_set_expires(iter, rt->expires); | 991 | fib6_set_expires(iter, rt->expires); |
| 992 | fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); | 992 | |
| 993 | if (rt->fib6_pmtu) | ||
| 994 | fib6_metric_set(iter, RTAX_MTU, | ||
| 995 | rt->fib6_pmtu); | ||
| 993 | return -EEXIST; | 996 | return -EEXIST; |
| 994 | } | 997 | } |
| 995 | /* If we have the same destination and the same metric, | 998 | /* If we have the same destination and the same metric, |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 18a3794b0f52..e493b041d4ac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
| 1778 | if (data[IFLA_GRE_COLLECT_METADATA]) | 1778 | if (data[IFLA_GRE_COLLECT_METADATA]) |
| 1779 | parms->collect_md = true; | 1779 | parms->collect_md = true; |
| 1780 | 1780 | ||
| 1781 | parms->erspan_ver = 1; | ||
| 1781 | if (data[IFLA_GRE_ERSPAN_VER]) | 1782 | if (data[IFLA_GRE_ERSPAN_VER]) |
| 1782 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | 1783 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); |
| 1783 | 1784 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 5df2a58d945c..419960b0ba16 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -1188,7 +1188,15 @@ route_lookup: | |||
| 1188 | init_tel_txopt(&opt, encap_limit); | 1188 | init_tel_txopt(&opt, encap_limit); |
| 1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); | 1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); |
| 1190 | } | 1190 | } |
| 1191 | hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); | 1191 | |
| 1192 | if (hop_limit == 0) { | ||
| 1193 | if (skb->protocol == htons(ETH_P_IP)) | ||
| 1194 | hop_limit = ip_hdr(skb)->ttl; | ||
| 1195 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
| 1196 | hop_limit = ipv6_hdr(skb)->hop_limit; | ||
| 1197 | else | ||
| 1198 | hop_limit = ip6_dst_hoplimit(dst); | ||
| 1199 | } | ||
| 1192 | 1200 | ||
| 1193 | /* Calculate max headroom for all the headers and adjust | 1201 | /* Calculate max headroom for all the headers and adjust |
| 1194 | * needed_headroom if necessary. | 1202 | * needed_headroom if necessary. |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 5095367c7204..eeaf7455d51e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
| @@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | mtu = dst_mtu(dst); | 483 | mtu = dst_mtu(dst); |
| 484 | if (!skb->ignore_df && skb->len > mtu) { | 484 | if (skb->len > mtu) { |
| 485 | skb_dst_update_pmtu(skb, mtu); | 485 | skb_dst_update_pmtu(skb, mtu); |
| 486 | 486 | ||
| 487 | if (skb->protocol == htons(ETH_P_IPV6)) { | 487 | if (skb->protocol == htons(ETH_P_IPV6)) { |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 2a14d8b65924..8f68a518d9db 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic | |||
| 445 | else if (head->ip_summed == CHECKSUM_COMPLETE) | 445 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
| 446 | head->csum = csum_add(head->csum, fp->csum); | 446 | head->csum = csum_add(head->csum, fp->csum); |
| 447 | head->truesize += fp->truesize; | 447 | head->truesize += fp->truesize; |
| 448 | fp->sk = NULL; | ||
| 448 | } | 449 | } |
| 449 | sub_frag_mem_limit(fq->q.net, head->truesize); | 450 | sub_frag_mem_limit(fq->q.net, head->truesize); |
| 450 | 451 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c4ea13e8360b..18e00ce1719a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) | |||
| 996 | rt->rt6i_src = ort->fib6_src; | 996 | rt->rt6i_src = ort->fib6_src; |
| 997 | #endif | 997 | #endif |
| 998 | rt->rt6i_prefsrc = ort->fib6_prefsrc; | 998 | rt->rt6i_prefsrc = ort->fib6_prefsrc; |
| 999 | rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); | ||
| 1000 | } | 999 | } |
| 1001 | 1000 | ||
| 1002 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, | 1001 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a21d8ed0a325..e2f16a0173a9 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
| 351 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); | 351 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); |
| 352 | 352 | ||
| 353 | skb->dev = iucv->hs_dev; | 353 | skb->dev = iucv->hs_dev; |
| 354 | if (!skb->dev) | 354 | if (!skb->dev) { |
| 355 | return -ENODEV; | 355 | err = -ENODEV; |
| 356 | if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) | 356 | goto err_free; |
| 357 | return -ENETDOWN; | 357 | } |
| 358 | if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { | ||
| 359 | err = -ENETDOWN; | ||
| 360 | goto err_free; | ||
| 361 | } | ||
| 358 | if (skb->len > skb->dev->mtu) { | 362 | if (skb->len > skb->dev->mtu) { |
| 359 | if (sock->sk_type == SOCK_SEQPACKET) | 363 | if (sock->sk_type == SOCK_SEQPACKET) { |
| 360 | return -EMSGSIZE; | 364 | err = -EMSGSIZE; |
| 361 | else | 365 | goto err_free; |
| 362 | skb_trim(skb, skb->dev->mtu); | 366 | } |
| 367 | skb_trim(skb, skb->dev->mtu); | ||
| 363 | } | 368 | } |
| 364 | skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); | 369 | skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); |
| 365 | nskb = skb_clone(skb, GFP_ATOMIC); | 370 | nskb = skb_clone(skb, GFP_ATOMIC); |
| 366 | if (!nskb) | 371 | if (!nskb) { |
| 367 | return -ENOMEM; | 372 | err = -ENOMEM; |
| 373 | goto err_free; | ||
| 374 | } | ||
| 375 | |||
| 368 | skb_queue_tail(&iucv->send_skb_q, nskb); | 376 | skb_queue_tail(&iucv->send_skb_q, nskb); |
| 369 | err = dev_queue_xmit(skb); | 377 | err = dev_queue_xmit(skb); |
| 370 | if (net_xmit_eval(err)) { | 378 | if (net_xmit_eval(err)) { |
| @@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
| 375 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); | 383 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); |
| 376 | } | 384 | } |
| 377 | return net_xmit_eval(err); | 385 | return net_xmit_eval(err); |
| 386 | |||
| 387 | err_free: | ||
| 388 | kfree_skb(skb); | ||
| 389 | return err; | ||
| 378 | } | 390 | } |
| 379 | 391 | ||
| 380 | static struct sock *__iucv_get_sock_by_name(char *nm) | 392 | static struct sock *__iucv_get_sock_by_name(char *nm) |
| @@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, | |||
| 1167 | err = afiucv_hs_send(&txmsg, sk, skb, 0); | 1179 | err = afiucv_hs_send(&txmsg, sk, skb, 0); |
| 1168 | if (err) { | 1180 | if (err) { |
| 1169 | atomic_dec(&iucv->msg_sent); | 1181 | atomic_dec(&iucv->msg_sent); |
| 1170 | goto fail; | 1182 | goto out; |
| 1171 | } | 1183 | } |
| 1172 | } else { /* Classic VM IUCV transport */ | 1184 | } else { /* Classic VM IUCV transport */ |
| 1173 | skb_queue_tail(&iucv->send_skb_q, skb); | 1185 | skb_queue_tail(&iucv->send_skb_q, skb); |
| @@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 2155 | struct sock *sk; | 2167 | struct sock *sk; |
| 2156 | struct iucv_sock *iucv; | 2168 | struct iucv_sock *iucv; |
| 2157 | struct af_iucv_trans_hdr *trans_hdr; | 2169 | struct af_iucv_trans_hdr *trans_hdr; |
| 2170 | int err = NET_RX_SUCCESS; | ||
| 2158 | char nullstring[8]; | 2171 | char nullstring[8]; |
| 2159 | int err = 0; | ||
| 2160 | 2172 | ||
| 2161 | if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { | 2173 | if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { |
| 2162 | WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", | 2174 | WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", |
| @@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 2254 | err = afiucv_hs_callback_rx(sk, skb); | 2266 | err = afiucv_hs_callback_rx(sk, skb); |
| 2255 | break; | 2267 | break; |
| 2256 | default: | 2268 | default: |
| 2257 | ; | 2269 | kfree_skb(skb); |
| 2258 | } | 2270 | } |
| 2259 | 2271 | ||
| 2260 | return err; | 2272 | return err; |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 8f7ef167c45a..eb502c6290c2 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
| @@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev) | |||
| 1874 | * Returns 0 if there are still iucv pathes defined | 1874 | * Returns 0 if there are still iucv pathes defined |
| 1875 | * 1 if there are no iucv pathes defined | 1875 | * 1 if there are no iucv pathes defined |
| 1876 | */ | 1876 | */ |
| 1877 | int iucv_path_table_empty(void) | 1877 | static int iucv_path_table_empty(void) |
| 1878 | { | 1878 | { |
| 1879 | int i; | 1879 | int i; |
| 1880 | 1880 | ||
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 6449a1c2283b..f0f5fedb8caa 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
| @@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, | |||
| 947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) | 947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) |
| 948 | return; | 948 | return; |
| 949 | 949 | ||
| 950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", | 950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
| 951 | mgmt->sa, mgmt->da, mgmt->bssid, reason); | 951 | ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); |
| 952 | sta_info_destroy_addr(sdata, mgmt->sa); | 952 | sta_info_destroy_addr(sdata, mgmt->sa); |
| 953 | } | 953 | } |
| 954 | 954 | ||
| @@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, | |||
| 966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); | 966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); |
| 967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
| 968 | 968 | ||
| 969 | ibss_dbg(sdata, | 969 | ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
| 970 | "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", | 970 | ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", |
| 971 | mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); | 971 | mgmt->bssid, auth_transaction); |
| 972 | 972 | ||
| 973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | 973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) |
| 974 | return; | 974 | return; |
| @@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
| 1175 | rx_timestamp = drv_get_tsf(local, sdata); | 1175 | rx_timestamp = drv_get_tsf(local, sdata); |
| 1176 | } | 1176 | } |
| 1177 | 1177 | ||
| 1178 | ibss_dbg(sdata, | 1178 | ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", |
| 1179 | "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", | ||
| 1180 | mgmt->sa, mgmt->bssid, | 1179 | mgmt->sa, mgmt->bssid, |
| 1181 | (unsigned long long)rx_timestamp, | 1180 | (unsigned long long)rx_timestamp); |
| 1181 | ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", | ||
| 1182 | (unsigned long long)beacon_timestamp, | 1182 | (unsigned long long)beacon_timestamp, |
| 1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), | 1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), |
| 1184 | jiffies); | 1184 | jiffies); |
| @@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, | |||
| 1537 | 1537 | ||
| 1538 | tx_last_beacon = drv_tx_last_beacon(local); | 1538 | tx_last_beacon = drv_tx_last_beacon(local); |
| 1539 | 1539 | ||
| 1540 | ibss_dbg(sdata, | 1540 | ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
| 1541 | "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", | 1541 | ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", |
| 1542 | mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); | 1542 | mgmt->bssid, tx_last_beacon); |
| 1543 | 1543 | ||
| 1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) | 1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) |
| 1545 | return; | 1545 | return; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 4fb2709cb527..513627896204 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
| @@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work) | |||
| 256 | 256 | ||
| 257 | flush_work(&local->radar_detected_work); | 257 | flush_work(&local->radar_detected_work); |
| 258 | rtnl_lock(); | 258 | rtnl_lock(); |
| 259 | list_for_each_entry(sdata, &local->interfaces, list) | 259 | list_for_each_entry(sdata, &local->interfaces, list) { |
| 260 | /* | ||
| 261 | * XXX: there may be more work for other vif types and even | ||
| 262 | * for station mode: a good thing would be to run most of | ||
| 263 | * the iface type's dependent _stop (ieee80211_mg_stop, | ||
| 264 | * ieee80211_ibss_stop) etc... | ||
| 265 | * For now, fix only the specific bug that was seen: race | ||
| 266 | * between csa_connection_drop_work and us. | ||
| 267 | */ | ||
| 268 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
| 269 | /* | ||
| 270 | * This worker is scheduled from the iface worker that | ||
| 271 | * runs on mac80211's workqueue, so we can't be | ||
| 272 | * scheduling this worker after the cancel right here. | ||
| 273 | * The exception is ieee80211_chswitch_done. | ||
| 274 | * Then we can have a race... | ||
| 275 | */ | ||
| 276 | cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); | ||
| 277 | } | ||
| 260 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); | 278 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); |
| 279 | } | ||
| 261 | ieee80211_scan_cancel(local); | 280 | ieee80211_scan_cancel(local); |
| 262 | 281 | ||
| 263 | /* make sure any new ROC will consider local->in_reconfig */ | 282 | /* make sure any new ROC will consider local->in_reconfig */ |
| @@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { | |||
| 471 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | | 490 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | |
| 472 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 491 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
| 473 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 492 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
| 474 | IEEE80211_VHT_CAP_RXSTBC_1 | | 493 | IEEE80211_VHT_CAP_RXSTBC_MASK | |
| 475 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
| 476 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
| 477 | IEEE80211_VHT_CAP_RXSTBC_4 | | ||
| 478 | IEEE80211_VHT_CAP_TXSTBC | | 494 | IEEE80211_VHT_CAP_TXSTBC | |
| 479 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | | 495 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | |
| 480 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | | 496 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | |
| @@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
| 1208 | #if IS_ENABLED(CONFIG_IPV6) | 1224 | #if IS_ENABLED(CONFIG_IPV6) |
| 1209 | unregister_inet6addr_notifier(&local->ifa6_notifier); | 1225 | unregister_inet6addr_notifier(&local->ifa6_notifier); |
| 1210 | #endif | 1226 | #endif |
| 1227 | ieee80211_txq_teardown_flows(local); | ||
| 1211 | 1228 | ||
| 1212 | rtnl_lock(); | 1229 | rtnl_lock(); |
| 1213 | 1230 | ||
| @@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
| 1236 | skb_queue_purge(&local->skb_queue); | 1253 | skb_queue_purge(&local->skb_queue); |
| 1237 | skb_queue_purge(&local->skb_queue_unreliable); | 1254 | skb_queue_purge(&local->skb_queue_unreliable); |
| 1238 | skb_queue_purge(&local->skb_queue_tdls_chsw); | 1255 | skb_queue_purge(&local->skb_queue_tdls_chsw); |
| 1239 | ieee80211_txq_teardown_flows(local); | ||
| 1240 | 1256 | ||
| 1241 | destroy_workqueue(local->workqueue); | 1257 | destroy_workqueue(local->workqueue); |
| 1242 | wiphy_unregister(local->hw.wiphy); | 1258 | wiphy_unregister(local->hw.wiphy); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 35ad3983ae4b..daf9db3c8f24 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
| @@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
| 572 | forward = false; | 572 | forward = false; |
| 573 | reply = true; | 573 | reply = true; |
| 574 | target_metric = 0; | 574 | target_metric = 0; |
| 575 | |||
| 576 | if (SN_GT(target_sn, ifmsh->sn)) | ||
| 577 | ifmsh->sn = target_sn; | ||
| 578 | |||
| 575 | if (time_after(jiffies, ifmsh->last_sn_update + | 579 | if (time_after(jiffies, ifmsh->last_sn_update + |
| 576 | net_traversal_jiffies(sdata)) || | 580 | net_traversal_jiffies(sdata)) || |
| 577 | time_before(jiffies, ifmsh->last_sn_update)) { | 581 | time_before(jiffies, ifmsh->last_sn_update)) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 7fb9957359a3..3dbecae4be73 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
| 1073 | */ | 1073 | */ |
| 1074 | 1074 | ||
| 1075 | if (sdata->reserved_chanctx) { | 1075 | if (sdata->reserved_chanctx) { |
| 1076 | struct ieee80211_supported_band *sband = NULL; | ||
| 1077 | struct sta_info *mgd_sta = NULL; | ||
| 1078 | enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20; | ||
| 1079 | |||
| 1076 | /* | 1080 | /* |
| 1077 | * with multi-vif csa driver may call ieee80211_csa_finish() | 1081 | * with multi-vif csa driver may call ieee80211_csa_finish() |
| 1078 | * many times while waiting for other interfaces to use their | 1082 | * many times while waiting for other interfaces to use their |
| @@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
| 1081 | if (sdata->reserved_ready) | 1085 | if (sdata->reserved_ready) |
| 1082 | goto out; | 1086 | goto out; |
| 1083 | 1087 | ||
| 1088 | if (sdata->vif.bss_conf.chandef.width != | ||
| 1089 | sdata->csa_chandef.width) { | ||
| 1090 | /* | ||
| 1091 | * For managed interface, we need to also update the AP | ||
| 1092 | * station bandwidth and align the rate scale algorithm | ||
| 1093 | * on the bandwidth change. Here we only consider the | ||
| 1094 | * bandwidth of the new channel definition (as channel | ||
| 1095 | * switch flow does not have the full HT/VHT/HE | ||
| 1096 | * information), assuming that if additional changes are | ||
| 1097 | * required they would be done as part of the processing | ||
| 1098 | * of the next beacon from the AP. | ||
| 1099 | */ | ||
| 1100 | switch (sdata->csa_chandef.width) { | ||
| 1101 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
| 1102 | case NL80211_CHAN_WIDTH_20: | ||
| 1103 | default: | ||
| 1104 | bw = IEEE80211_STA_RX_BW_20; | ||
| 1105 | break; | ||
| 1106 | case NL80211_CHAN_WIDTH_40: | ||
| 1107 | bw = IEEE80211_STA_RX_BW_40; | ||
| 1108 | break; | ||
| 1109 | case NL80211_CHAN_WIDTH_80: | ||
| 1110 | bw = IEEE80211_STA_RX_BW_80; | ||
| 1111 | break; | ||
| 1112 | case NL80211_CHAN_WIDTH_80P80: | ||
| 1113 | case NL80211_CHAN_WIDTH_160: | ||
| 1114 | bw = IEEE80211_STA_RX_BW_160; | ||
| 1115 | break; | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | mgd_sta = sta_info_get(sdata, ifmgd->bssid); | ||
| 1119 | sband = | ||
| 1120 | local->hw.wiphy->bands[sdata->csa_chandef.chan->band]; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | if (sdata->vif.bss_conf.chandef.width > | ||
| 1124 | sdata->csa_chandef.width) { | ||
| 1125 | mgd_sta->sta.bandwidth = bw; | ||
| 1126 | rate_control_rate_update(local, sband, mgd_sta, | ||
| 1127 | IEEE80211_RC_BW_CHANGED); | ||
| 1128 | } | ||
| 1129 | |||
| 1084 | ret = ieee80211_vif_use_reserved_context(sdata); | 1130 | ret = ieee80211_vif_use_reserved_context(sdata); |
| 1085 | if (ret) { | 1131 | if (ret) { |
| 1086 | sdata_info(sdata, | 1132 | sdata_info(sdata, |
| @@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
| 1091 | goto out; | 1137 | goto out; |
| 1092 | } | 1138 | } |
| 1093 | 1139 | ||
| 1140 | if (sdata->vif.bss_conf.chandef.width < | ||
| 1141 | sdata->csa_chandef.width) { | ||
| 1142 | mgd_sta->sta.bandwidth = bw; | ||
| 1143 | rate_control_rate_update(local, sband, mgd_sta, | ||
| 1144 | IEEE80211_RC_BW_CHANGED); | ||
| 1145 | } | ||
| 1146 | |||
| 1094 | goto out; | 1147 | goto out; |
| 1095 | } | 1148 | } |
| 1096 | 1149 | ||
| @@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
| 1312 | cbss->beacon_interval)); | 1365 | cbss->beacon_interval)); |
| 1313 | return; | 1366 | return; |
| 1314 | drop_connection: | 1367 | drop_connection: |
| 1368 | /* | ||
| 1369 | * This is just so that the disconnect flow will know that | ||
| 1370 | * we were trying to switch channel and failed. In case the | ||
| 1371 | * mode is 1 (we are not allowed to Tx), we will know not to | ||
| 1372 | * send a deauthentication frame. Those two fields will be | ||
| 1373 | * reset when the disconnection worker runs. | ||
| 1374 | */ | ||
| 1375 | sdata->vif.csa_active = true; | ||
| 1376 | sdata->csa_block_tx = csa_ie.mode; | ||
| 1377 | |||
| 1315 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); | 1378 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); |
| 1316 | mutex_unlock(&local->chanctx_mtx); | 1379 | mutex_unlock(&local->chanctx_mtx); |
| 1317 | mutex_unlock(&local->mtx); | 1380 | mutex_unlock(&local->mtx); |
| @@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
| 2522 | struct ieee80211_local *local = sdata->local; | 2585 | struct ieee80211_local *local = sdata->local; |
| 2523 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2586 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
| 2524 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; | 2587 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
| 2588 | bool tx; | ||
| 2525 | 2589 | ||
| 2526 | sdata_lock(sdata); | 2590 | sdata_lock(sdata); |
| 2527 | if (!ifmgd->associated) { | 2591 | if (!ifmgd->associated) { |
| @@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
| 2529 | return; | 2593 | return; |
| 2530 | } | 2594 | } |
| 2531 | 2595 | ||
| 2596 | tx = !sdata->csa_block_tx; | ||
| 2597 | |||
| 2532 | /* AP is probably out of range (or not reachable for another reason) so | 2598 | /* AP is probably out of range (or not reachable for another reason) so |
| 2533 | * remove the bss struct for that AP. | 2599 | * remove the bss struct for that AP. |
| 2534 | */ | 2600 | */ |
| @@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
| 2536 | 2602 | ||
| 2537 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | 2603 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, |
| 2538 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | 2604 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, |
| 2539 | true, frame_buf); | 2605 | tx, frame_buf); |
| 2540 | mutex_lock(&local->mtx); | 2606 | mutex_lock(&local->mtx); |
| 2541 | sdata->vif.csa_active = false; | 2607 | sdata->vif.csa_active = false; |
| 2542 | ifmgd->csa_waiting_bcn = false; | 2608 | ifmgd->csa_waiting_bcn = false; |
| @@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
| 2547 | } | 2613 | } |
| 2548 | mutex_unlock(&local->mtx); | 2614 | mutex_unlock(&local->mtx); |
| 2549 | 2615 | ||
| 2550 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, | 2616 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, |
| 2551 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); | 2617 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); |
| 2552 | 2618 | ||
| 2553 | sdata_unlock(sdata); | 2619 | sdata_unlock(sdata); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 64742f2765c4..96611d5dfadb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
| 1728 | */ | 1728 | */ |
| 1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && | 1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && |
| 1730 | !ieee80211_has_morefrags(hdr->frame_control) && | 1730 | !ieee80211_has_morefrags(hdr->frame_control) && |
| 1731 | !is_multicast_ether_addr(hdr->addr1) && | ||
| 1731 | (ieee80211_is_mgmt(hdr->frame_control) || | 1732 | (ieee80211_is_mgmt(hdr->frame_control) || |
| 1732 | ieee80211_is_data(hdr->frame_control)) && | 1733 | ieee80211_is_data(hdr->frame_control)) && |
| 1733 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | 1734 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cd332e3e1134..f353d9db54bc 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta) | |||
| 3078 | } | 3078 | } |
| 3079 | 3079 | ||
| 3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, | 3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, |
| 3081 | struct sk_buff *skb, int headroom, | 3081 | struct sk_buff *skb, int headroom) |
| 3082 | int *subframe_len) | ||
| 3083 | { | 3082 | { |
| 3084 | int amsdu_len = *subframe_len + sizeof(struct ethhdr); | 3083 | if (skb_headroom(skb) < headroom) { |
| 3085 | int padding = (4 - amsdu_len) & 3; | ||
| 3086 | |||
| 3087 | if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) { | ||
| 3088 | I802_DEBUG_INC(local->tx_expand_skb_head); | 3084 | I802_DEBUG_INC(local->tx_expand_skb_head); |
| 3089 | 3085 | ||
| 3090 | if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) { | 3086 | if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { |
| 3091 | wiphy_debug(local->hw.wiphy, | 3087 | wiphy_debug(local->hw.wiphy, |
| 3092 | "failed to reallocate TX buffer\n"); | 3088 | "failed to reallocate TX buffer\n"); |
| 3093 | return false; | 3089 | return false; |
| 3094 | } | 3090 | } |
| 3095 | } | 3091 | } |
| 3096 | 3092 | ||
| 3097 | if (padding) { | ||
| 3098 | *subframe_len += padding; | ||
| 3099 | skb_put_zero(skb, padding); | ||
| 3100 | } | ||
| 3101 | |||
| 3102 | return true; | 3093 | return true; |
| 3103 | } | 3094 | } |
| 3104 | 3095 | ||
| @@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, | |||
| 3122 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) | 3113 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) |
| 3123 | return true; | 3114 | return true; |
| 3124 | 3115 | ||
| 3125 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr), | 3116 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr))) |
| 3126 | &subframe_len)) | ||
| 3127 | return false; | 3117 | return false; |
| 3128 | 3118 | ||
| 3129 | data = skb_push(skb, sizeof(*amsdu_hdr)); | 3119 | data = skb_push(skb, sizeof(*amsdu_hdr)); |
| @@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
| 3189 | void *data; | 3179 | void *data; |
| 3190 | bool ret = false; | 3180 | bool ret = false; |
| 3191 | unsigned int orig_len; | 3181 | unsigned int orig_len; |
| 3192 | int n = 1, nfrags; | 3182 | int n = 2, nfrags, pad = 0; |
| 3183 | u16 hdrlen; | ||
| 3193 | 3184 | ||
| 3194 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) | 3185 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) |
| 3195 | return false; | 3186 | return false; |
| @@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
| 3222 | if (skb->len + head->len > max_amsdu_len) | 3213 | if (skb->len + head->len > max_amsdu_len) |
| 3223 | goto out; | 3214 | goto out; |
| 3224 | 3215 | ||
| 3225 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) | ||
| 3226 | goto out; | ||
| 3227 | |||
| 3228 | nfrags = 1 + skb_shinfo(skb)->nr_frags; | 3216 | nfrags = 1 + skb_shinfo(skb)->nr_frags; |
| 3229 | nfrags += 1 + skb_shinfo(head)->nr_frags; | 3217 | nfrags += 1 + skb_shinfo(head)->nr_frags; |
| 3230 | frag_tail = &skb_shinfo(head)->frag_list; | 3218 | frag_tail = &skb_shinfo(head)->frag_list; |
| @@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
| 3240 | if (max_frags && nfrags > max_frags) | 3228 | if (max_frags && nfrags > max_frags) |
| 3241 | goto out; | 3229 | goto out; |
| 3242 | 3230 | ||
| 3243 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2, | 3231 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) |
| 3244 | &subframe_len)) | ||
| 3245 | goto out; | 3232 | goto out; |
| 3246 | 3233 | ||
| 3234 | /* | ||
| 3235 | * Pad out the previous subframe to a multiple of 4 by adding the | ||
| 3236 | * padding to the next one, that's being added. Note that head->len | ||
| 3237 | * is the length of the full A-MSDU, but that works since each time | ||
| 3238 | * we add a new subframe we pad out the previous one to a multiple | ||
| 3239 | * of 4 and thus it no longer matters in the next round. | ||
| 3240 | */ | ||
| 3241 | hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); | ||
| 3242 | if ((head->len - hdrlen) & 3) | ||
| 3243 | pad = 4 - ((head->len - hdrlen) & 3); | ||
| 3244 | |||
| 3245 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + | ||
| 3246 | 2 + pad)) | ||
| 3247 | goto out_recalc; | ||
| 3248 | |||
| 3247 | ret = true; | 3249 | ret = true; |
| 3248 | data = skb_push(skb, ETH_ALEN + 2); | 3250 | data = skb_push(skb, ETH_ALEN + 2); |
| 3249 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); | 3251 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); |
| @@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
| 3253 | memcpy(data, &len, 2); | 3255 | memcpy(data, &len, 2); |
| 3254 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); | 3256 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); |
| 3255 | 3257 | ||
| 3258 | memset(skb_push(skb, pad), 0, pad); | ||
| 3259 | |||
| 3256 | head->len += skb->len; | 3260 | head->len += skb->len; |
| 3257 | head->data_len += skb->len; | 3261 | head->data_len += skb->len; |
| 3258 | *frag_tail = skb; | 3262 | *frag_tail = skb; |
| 3259 | 3263 | ||
| 3260 | flow->backlog += head->len - orig_len; | 3264 | out_recalc: |
| 3261 | tin->backlog_bytes += head->len - orig_len; | 3265 | if (head->len != orig_len) { |
| 3262 | 3266 | flow->backlog += head->len - orig_len; | |
| 3263 | fq_recalc_backlog(fq, tin, flow); | 3267 | tin->backlog_bytes += head->len - orig_len; |
| 3264 | 3268 | ||
| 3269 | fq_recalc_backlog(fq, tin, flow); | ||
| 3270 | } | ||
| 3265 | out: | 3271 | out: |
| 3266 | spin_unlock_bh(&fq->lock); | 3272 | spin_unlock_bh(&fq->lock); |
| 3267 | 3273 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 88efda7c9f8a..716cd6442d86 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
| @@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
| 1135 | { | 1135 | { |
| 1136 | struct ieee80211_chanctx_conf *chanctx_conf; | 1136 | struct ieee80211_chanctx_conf *chanctx_conf; |
| 1137 | const struct ieee80211_reg_rule *rrule; | 1137 | const struct ieee80211_reg_rule *rrule; |
| 1138 | struct ieee80211_wmm_ac *wmm_ac; | 1138 | const struct ieee80211_wmm_ac *wmm_ac; |
| 1139 | u16 center_freq = 0; | 1139 | u16 center_freq = 0; |
| 1140 | 1140 | ||
| 1141 | if (sdata->vif.type != NL80211_IFTYPE_AP && | 1141 | if (sdata->vif.type != NL80211_IFTYPE_AP && |
| @@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
| 1154 | 1154 | ||
| 1155 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); | 1155 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); |
| 1156 | 1156 | ||
| 1157 | if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) { | 1157 | if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { |
| 1158 | rcu_read_unlock(); | 1158 | rcu_read_unlock(); |
| 1159 | return; | 1159 | return; |
| 1160 | } | 1160 | } |
| 1161 | 1161 | ||
| 1162 | if (sdata->vif.type == NL80211_IFTYPE_AP) | 1162 | if (sdata->vif.type == NL80211_IFTYPE_AP) |
| 1163 | wmm_ac = &rrule->wmm_rule->ap[ac]; | 1163 | wmm_ac = &rrule->wmm_rule.ap[ac]; |
| 1164 | else | 1164 | else |
| 1165 | wmm_ac = &rrule->wmm_rule->client[ac]; | 1165 | wmm_ac = &rrule->wmm_rule.client[ac]; |
| 1166 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); | 1166 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); |
| 1167 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); | 1167 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); |
| 1168 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); | 1168 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); |
| 1169 | qparam->txop = !qparam->txop ? wmm_ac->cot / 32 : | 1169 | qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); |
| 1170 | min_t(u16, qparam->txop, wmm_ac->cot / 32); | ||
| 1171 | rcu_read_unlock(); | 1170 | rcu_read_unlock(); |
| 1172 | } | 1171 | } |
| 1173 | 1172 | ||
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 71709c104081..f61c306de1d0 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM | |||
| 771 | depends on NETFILTER_ADVANCED | 771 | depends on NETFILTER_ADVANCED |
| 772 | ---help--- | 772 | ---help--- |
| 773 | This option adds a `CHECKSUM' target, which can be used in the iptables mangle | 773 | This option adds a `CHECKSUM' target, which can be used in the iptables mangle |
| 774 | table. | 774 | table to work around buggy DHCP clients in virtualized environments. |
| 775 | 775 | ||
| 776 | You can use this target to compute and fill in the checksum in | 776 | Some old DHCP clients drop packets because they are not aware |
| 777 | a packet that lacks a checksum. This is particularly useful, | 777 | that the checksum would normally be offloaded to hardware and |
| 778 | if you need to work around old applications such as dhcp clients, | 778 | thus should be considered valid. |
| 779 | that do not work well with checksum offloads, but don't want to disable | 779 | This target can be used to fill in the checksum using iptables |
| 780 | checksum offload in your device. | 780 | when such packets are sent via a virtual network device. |
| 781 | 781 | ||
| 782 | To compile it as a module, choose M here. If unsure, say N. | 782 | To compile it as a module, choose M here. If unsure, say N. |
| 783 | 783 | ||
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 9f14b0df6960..51c5d7eec0a3 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c | |||
| @@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = { | |||
| 776 | }; | 776 | }; |
| 777 | #endif | 777 | #endif |
| 778 | 778 | ||
| 779 | static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto) | ||
| 780 | { | ||
| 781 | u8 nfproto = (unsigned long)_nfproto; | ||
| 782 | |||
| 783 | if (nf_ct_l3num(ct) != nfproto) | ||
| 784 | return 0; | ||
| 785 | |||
| 786 | if (nf_ct_protonum(ct) == IPPROTO_TCP && | ||
| 787 | ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) { | ||
| 788 | ct->proto.tcp.seen[0].td_maxwin = 0; | ||
| 789 | ct->proto.tcp.seen[1].td_maxwin = 0; | ||
| 790 | } | ||
| 791 | |||
| 792 | return 0; | ||
| 793 | } | ||
| 794 | |||
| 779 | static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | 795 | static int nf_ct_netns_do_get(struct net *net, u8 nfproto) |
| 780 | { | 796 | { |
| 781 | struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); | 797 | struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); |
| 798 | bool fixup_needed = false; | ||
| 782 | int err = 0; | 799 | int err = 0; |
| 783 | 800 | ||
| 784 | mutex_lock(&nf_ct_proto_mutex); | 801 | mutex_lock(&nf_ct_proto_mutex); |
| @@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | |||
| 798 | ARRAY_SIZE(ipv4_conntrack_ops)); | 815 | ARRAY_SIZE(ipv4_conntrack_ops)); |
| 799 | if (err) | 816 | if (err) |
| 800 | cnet->users4 = 0; | 817 | cnet->users4 = 0; |
| 818 | else | ||
| 819 | fixup_needed = true; | ||
| 801 | break; | 820 | break; |
| 802 | #if IS_ENABLED(CONFIG_IPV6) | 821 | #if IS_ENABLED(CONFIG_IPV6) |
| 803 | case NFPROTO_IPV6: | 822 | case NFPROTO_IPV6: |
| @@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | |||
| 814 | ARRAY_SIZE(ipv6_conntrack_ops)); | 833 | ARRAY_SIZE(ipv6_conntrack_ops)); |
| 815 | if (err) | 834 | if (err) |
| 816 | cnet->users6 = 0; | 835 | cnet->users6 = 0; |
| 836 | else | ||
| 837 | fixup_needed = true; | ||
| 817 | break; | 838 | break; |
| 818 | #endif | 839 | #endif |
| 819 | default: | 840 | default: |
| @@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | |||
| 822 | } | 843 | } |
| 823 | out_unlock: | 844 | out_unlock: |
| 824 | mutex_unlock(&nf_ct_proto_mutex); | 845 | mutex_unlock(&nf_ct_proto_mutex); |
| 846 | |||
| 847 | if (fixup_needed) | ||
| 848 | nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup, | ||
| 849 | (void *)(unsigned long)nfproto, 0, 0); | ||
| 850 | |||
| 825 | return err; | 851 | return err; |
| 826 | } | 852 | } |
| 827 | 853 | ||
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 8c58f96b59e7..f3f91ed2c21a 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
| @@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) | |||
| 675 | } | 675 | } |
| 676 | #endif | 676 | #endif |
| 677 | 677 | ||
| 678 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 678 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 679 | 679 | ||
| 680 | #include <linux/netfilter/nfnetlink.h> | 680 | #include <linux/netfilter/nfnetlink.h> |
| 681 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 681 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
| 697 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; | 697 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; |
| 698 | } | 698 | } |
| 699 | } | 699 | } |
| 700 | |||
| 701 | timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST]; | ||
| 700 | return 0; | 702 | return 0; |
| 701 | } | 703 | } |
| 702 | 704 | ||
| @@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = { | |||
| 726 | [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, | 728 | [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, |
| 727 | [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, | 729 | [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, |
| 728 | }; | 730 | }; |
| 729 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 731 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 730 | 732 | ||
| 731 | #ifdef CONFIG_SYSCTL | 733 | #ifdef CONFIG_SYSCTL |
| 732 | /* template, data assigned later */ | 734 | /* template, data assigned later */ |
| @@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto) | |||
| 827 | dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; | 829 | dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; |
| 828 | dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; | 830 | dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; |
| 829 | dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; | 831 | dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; |
| 832 | |||
| 833 | /* timeouts[0] is unused, make it same as SYN_SENT so | ||
| 834 | * ->timeouts[0] contains 'new' timeout, like udp or icmp. | ||
| 835 | */ | ||
| 836 | dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST]; | ||
| 830 | } | 837 | } |
| 831 | 838 | ||
| 832 | return dccp_kmemdup_sysctl_table(net, pn, dn); | 839 | return dccp_kmemdup_sysctl_table(net, pn, dn); |
| @@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { | |||
| 856 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 863 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
| 857 | .nla_policy = nf_ct_port_nla_policy, | 864 | .nla_policy = nf_ct_port_nla_policy, |
| 858 | #endif | 865 | #endif |
| 859 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 866 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 860 | .ctnl_timeout = { | 867 | .ctnl_timeout = { |
| 861 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, | 868 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, |
| 862 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, | 869 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, |
| @@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { | |||
| 864 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, | 871 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, |
| 865 | .nla_policy = dccp_timeout_nla_policy, | 872 | .nla_policy = dccp_timeout_nla_policy, |
| 866 | }, | 873 | }, |
| 867 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 874 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 868 | .init_net = dccp_init_net, | 875 | .init_net = dccp_init_net, |
| 869 | .get_net_proto = dccp_get_net_proto, | 876 | .get_net_proto = dccp_get_net_proto, |
| 870 | }; | 877 | }; |
| @@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { | |||
| 889 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 896 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
| 890 | .nla_policy = nf_ct_port_nla_policy, | 897 | .nla_policy = nf_ct_port_nla_policy, |
| 891 | #endif | 898 | #endif |
| 892 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 899 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 893 | .ctnl_timeout = { | 900 | .ctnl_timeout = { |
| 894 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, | 901 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, |
| 895 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, | 902 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, |
| @@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { | |||
| 897 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, | 904 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, |
| 898 | .nla_policy = dccp_timeout_nla_policy, | 905 | .nla_policy = dccp_timeout_nla_policy, |
| 899 | }, | 906 | }, |
| 900 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 907 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 901 | .init_net = dccp_init_net, | 908 | .init_net = dccp_init_net, |
| 902 | .get_net_proto = dccp_get_net_proto, | 909 | .get_net_proto = dccp_get_net_proto, |
| 903 | }; | 910 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index ac4a0b296dcd..1df3244ecd07 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c | |||
| @@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, | |||
| 70 | return ret; | 70 | return ret; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 73 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 74 | 74 | ||
| 75 | #include <linux/netfilter/nfnetlink.h> | 75 | #include <linux/netfilter/nfnetlink.h> |
| 76 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 76 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -113,7 +113,7 @@ static const struct nla_policy | |||
| 113 | generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { | 113 | generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { |
| 114 | [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, | 114 | [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, |
| 115 | }; | 115 | }; |
| 116 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 116 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 117 | 117 | ||
| 118 | #ifdef CONFIG_SYSCTL | 118 | #ifdef CONFIG_SYSCTL |
| 119 | static struct ctl_table generic_sysctl_table[] = { | 119 | static struct ctl_table generic_sysctl_table[] = { |
| @@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = | |||
| 164 | .pkt_to_tuple = generic_pkt_to_tuple, | 164 | .pkt_to_tuple = generic_pkt_to_tuple, |
| 165 | .packet = generic_packet, | 165 | .packet = generic_packet, |
| 166 | .new = generic_new, | 166 | .new = generic_new, |
| 167 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 167 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 168 | .ctnl_timeout = { | 168 | .ctnl_timeout = { |
| 169 | .nlattr_to_obj = generic_timeout_nlattr_to_obj, | 169 | .nlattr_to_obj = generic_timeout_nlattr_to_obj, |
| 170 | .obj_to_nlattr = generic_timeout_obj_to_nlattr, | 170 | .obj_to_nlattr = generic_timeout_obj_to_nlattr, |
| @@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = | |||
| 172 | .obj_size = sizeof(unsigned int), | 172 | .obj_size = sizeof(unsigned int), |
| 173 | .nla_policy = generic_timeout_nla_policy, | 173 | .nla_policy = generic_timeout_nla_policy, |
| 174 | }, | 174 | }, |
| 175 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 175 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 176 | .init_net = generic_init_net, | 176 | .init_net = generic_init_net, |
| 177 | .get_net_proto = generic_get_net_proto, | 177 | .get_net_proto = generic_get_net_proto, |
| 178 | }; | 178 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index d1632252bf5b..650eb4fba2c5 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
| @@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct) | |||
| 285 | nf_ct_gre_keymap_destroy(master); | 285 | nf_ct_gre_keymap_destroy(master); |
| 286 | } | 286 | } |
| 287 | 287 | ||
| 288 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 288 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 289 | 289 | ||
| 290 | #include <linux/netfilter/nfnetlink.h> | 290 | #include <linux/netfilter/nfnetlink.h> |
| 291 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 291 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = { | |||
| 334 | [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, | 334 | [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, |
| 335 | [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, | 335 | [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, |
| 336 | }; | 336 | }; |
| 337 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 337 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 338 | 338 | ||
| 339 | static int gre_init_net(struct net *net, u_int16_t proto) | 339 | static int gre_init_net(struct net *net, u_int16_t proto) |
| 340 | { | 340 | { |
| @@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { | |||
| 367 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 367 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
| 368 | .nla_policy = nf_ct_port_nla_policy, | 368 | .nla_policy = nf_ct_port_nla_policy, |
| 369 | #endif | 369 | #endif |
| 370 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 370 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 371 | .ctnl_timeout = { | 371 | .ctnl_timeout = { |
| 372 | .nlattr_to_obj = gre_timeout_nlattr_to_obj, | 372 | .nlattr_to_obj = gre_timeout_nlattr_to_obj, |
| 373 | .obj_to_nlattr = gre_timeout_obj_to_nlattr, | 373 | .obj_to_nlattr = gre_timeout_obj_to_nlattr, |
| @@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { | |||
| 375 | .obj_size = sizeof(unsigned int) * GRE_CT_MAX, | 375 | .obj_size = sizeof(unsigned int) * GRE_CT_MAX, |
| 376 | .nla_policy = gre_timeout_nla_policy, | 376 | .nla_policy = gre_timeout_nla_policy, |
| 377 | }, | 377 | }, |
| 378 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 378 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 379 | .net_id = &proto_gre_net_id, | 379 | .net_id = &proto_gre_net_id, |
| 380 | .init_net = gre_init_net, | 380 | .init_net = gre_init_net, |
| 381 | }; | 381 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c index 036670b38282..43c7e1a217b9 100644 --- a/net/netfilter/nf_conntrack_proto_icmp.c +++ b/net/netfilter/nf_conntrack_proto_icmp.c | |||
| @@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void) | |||
| 273 | } | 273 | } |
| 274 | #endif | 274 | #endif |
| 275 | 275 | ||
| 276 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 276 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 277 | 277 | ||
| 278 | #include <linux/netfilter/nfnetlink.h> | 278 | #include <linux/netfilter/nfnetlink.h> |
| 279 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 279 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -313,7 +313,7 @@ static const struct nla_policy | |||
| 313 | icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { | 313 | icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { |
| 314 | [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, | 314 | [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, |
| 315 | }; | 315 | }; |
| 316 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 316 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 317 | 317 | ||
| 318 | #ifdef CONFIG_SYSCTL | 318 | #ifdef CONFIG_SYSCTL |
| 319 | static struct ctl_table icmp_sysctl_table[] = { | 319 | static struct ctl_table icmp_sysctl_table[] = { |
| @@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = | |||
| 374 | .nlattr_to_tuple = icmp_nlattr_to_tuple, | 374 | .nlattr_to_tuple = icmp_nlattr_to_tuple, |
| 375 | .nla_policy = icmp_nla_policy, | 375 | .nla_policy = icmp_nla_policy, |
| 376 | #endif | 376 | #endif |
| 377 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 377 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 378 | .ctnl_timeout = { | 378 | .ctnl_timeout = { |
| 379 | .nlattr_to_obj = icmp_timeout_nlattr_to_obj, | 379 | .nlattr_to_obj = icmp_timeout_nlattr_to_obj, |
| 380 | .obj_to_nlattr = icmp_timeout_obj_to_nlattr, | 380 | .obj_to_nlattr = icmp_timeout_obj_to_nlattr, |
| @@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = | |||
| 382 | .obj_size = sizeof(unsigned int), | 382 | .obj_size = sizeof(unsigned int), |
| 383 | .nla_policy = icmp_timeout_nla_policy, | 383 | .nla_policy = icmp_timeout_nla_policy, |
| 384 | }, | 384 | }, |
| 385 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 385 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 386 | .init_net = icmp_init_net, | 386 | .init_net = icmp_init_net, |
| 387 | .get_net_proto = icmp_get_net_proto, | 387 | .get_net_proto = icmp_get_net_proto, |
| 388 | }; | 388 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c index bed07b998a10..97e40f77d678 100644 --- a/net/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/netfilter/nf_conntrack_proto_icmpv6.c | |||
| @@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void) | |||
| 274 | } | 274 | } |
| 275 | #endif | 275 | #endif |
| 276 | 276 | ||
| 277 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 277 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 278 | 278 | ||
| 279 | #include <linux/netfilter/nfnetlink.h> | 279 | #include <linux/netfilter/nfnetlink.h> |
| 280 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 280 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -314,7 +314,7 @@ static const struct nla_policy | |||
| 314 | icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { | 314 | icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { |
| 315 | [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 }, | 315 | [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 }, |
| 316 | }; | 316 | }; |
| 317 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 317 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 318 | 318 | ||
| 319 | #ifdef CONFIG_SYSCTL | 319 | #ifdef CONFIG_SYSCTL |
| 320 | static struct ctl_table icmpv6_sysctl_table[] = { | 320 | static struct ctl_table icmpv6_sysctl_table[] = { |
| @@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = | |||
| 373 | .nlattr_to_tuple = icmpv6_nlattr_to_tuple, | 373 | .nlattr_to_tuple = icmpv6_nlattr_to_tuple, |
| 374 | .nla_policy = icmpv6_nla_policy, | 374 | .nla_policy = icmpv6_nla_policy, |
| 375 | #endif | 375 | #endif |
| 376 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 376 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 377 | .ctnl_timeout = { | 377 | .ctnl_timeout = { |
| 378 | .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj, | 378 | .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj, |
| 379 | .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr, | 379 | .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr, |
| @@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = | |||
| 381 | .obj_size = sizeof(unsigned int), | 381 | .obj_size = sizeof(unsigned int), |
| 382 | .nla_policy = icmpv6_timeout_nla_policy, | 382 | .nla_policy = icmpv6_timeout_nla_policy, |
| 383 | }, | 383 | }, |
| 384 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 384 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 385 | .init_net = icmpv6_init_net, | 385 | .init_net = icmpv6_init_net, |
| 386 | .get_net_proto = icmpv6_get_net_proto, | 386 | .get_net_proto = icmpv6_get_net_proto, |
| 387 | }; | 387 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 8d1e085fc14a..e4d738d34cd0 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
| @@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) | |||
| 591 | } | 591 | } |
| 592 | #endif | 592 | #endif |
| 593 | 593 | ||
| 594 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 594 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 595 | 595 | ||
| 596 | #include <linux/netfilter/nfnetlink.h> | 596 | #include <linux/netfilter/nfnetlink.h> |
| 597 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 597 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
| 613 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; | 613 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; |
| 614 | } | 614 | } |
| 615 | } | 615 | } |
| 616 | |||
| 617 | timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED]; | ||
| 616 | return 0; | 618 | return 0; |
| 617 | } | 619 | } |
| 618 | 620 | ||
| @@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = { | |||
| 644 | [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, | 646 | [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, |
| 645 | [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, | 647 | [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, |
| 646 | }; | 648 | }; |
| 647 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 649 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 648 | 650 | ||
| 649 | 651 | ||
| 650 | #ifdef CONFIG_SYSCTL | 652 | #ifdef CONFIG_SYSCTL |
| @@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto) | |||
| 743 | 745 | ||
| 744 | for (i = 0; i < SCTP_CONNTRACK_MAX; i++) | 746 | for (i = 0; i < SCTP_CONNTRACK_MAX; i++) |
| 745 | sn->timeouts[i] = sctp_timeouts[i]; | 747 | sn->timeouts[i] = sctp_timeouts[i]; |
| 748 | |||
| 749 | /* timeouts[0] is unused, init it so ->timeouts[0] contains | ||
| 750 | * 'new' timeout, like udp or icmp. | ||
| 751 | */ | ||
| 752 | sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED]; | ||
| 746 | } | 753 | } |
| 747 | 754 | ||
| 748 | return sctp_kmemdup_sysctl_table(pn, sn); | 755 | return sctp_kmemdup_sysctl_table(pn, sn); |
| @@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { | |||
| 773 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 780 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
| 774 | .nla_policy = nf_ct_port_nla_policy, | 781 | .nla_policy = nf_ct_port_nla_policy, |
| 775 | #endif | 782 | #endif |
| 776 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 783 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 777 | .ctnl_timeout = { | 784 | .ctnl_timeout = { |
| 778 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, | 785 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, |
| 779 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, | 786 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, |
| @@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { | |||
| 781 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, | 788 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, |
| 782 | .nla_policy = sctp_timeout_nla_policy, | 789 | .nla_policy = sctp_timeout_nla_policy, |
| 783 | }, | 790 | }, |
| 784 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 791 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 785 | .init_net = sctp_init_net, | 792 | .init_net = sctp_init_net, |
| 786 | .get_net_proto = sctp_get_net_proto, | 793 | .get_net_proto = sctp_get_net_proto, |
| 787 | }; | 794 | }; |
| @@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { | |||
| 806 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 813 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
| 807 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 814 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
| 808 | .nla_policy = nf_ct_port_nla_policy, | 815 | .nla_policy = nf_ct_port_nla_policy, |
| 809 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 816 | #endif |
| 817 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT | ||
| 810 | .ctnl_timeout = { | 818 | .ctnl_timeout = { |
| 811 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, | 819 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, |
| 812 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, | 820 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, |
| @@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { | |||
| 814 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, | 822 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, |
| 815 | .nla_policy = sctp_timeout_nla_policy, | 823 | .nla_policy = sctp_timeout_nla_policy, |
| 816 | }, | 824 | }, |
| 817 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 825 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 818 | #endif | ||
| 819 | .init_net = sctp_init_net, | 826 | .init_net = sctp_init_net, |
| 820 | .get_net_proto = sctp_get_net_proto, | 827 | .get_net_proto = sctp_get_net_proto, |
| 821 | }; | 828 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index d80d322b9d8b..b4bdf9eda7b7 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
| @@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void) | |||
| 1279 | } | 1279 | } |
| 1280 | #endif | 1280 | #endif |
| 1281 | 1281 | ||
| 1282 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 1282 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 1283 | 1283 | ||
| 1284 | #include <linux/netfilter/nfnetlink.h> | 1284 | #include <linux/netfilter/nfnetlink.h> |
| 1285 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 1285 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
| 1301 | timeouts[TCP_CONNTRACK_SYN_SENT] = | 1301 | timeouts[TCP_CONNTRACK_SYN_SENT] = |
| 1302 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; | 1302 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; |
| 1303 | } | 1303 | } |
| 1304 | |||
| 1304 | if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { | 1305 | if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { |
| 1305 | timeouts[TCP_CONNTRACK_SYN_RECV] = | 1306 | timeouts[TCP_CONNTRACK_SYN_RECV] = |
| 1306 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; | 1307 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; |
| @@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
| 1341 | timeouts[TCP_CONNTRACK_UNACK] = | 1342 | timeouts[TCP_CONNTRACK_UNACK] = |
| 1342 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; | 1343 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; |
| 1343 | } | 1344 | } |
| 1345 | |||
| 1346 | timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT]; | ||
| 1344 | return 0; | 1347 | return 0; |
| 1345 | } | 1348 | } |
| 1346 | 1349 | ||
| @@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = { | |||
| 1391 | [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 }, | 1394 | [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 }, |
| 1392 | [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 }, | 1395 | [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 }, |
| 1393 | }; | 1396 | }; |
| 1394 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 1397 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 1395 | 1398 | ||
| 1396 | #ifdef CONFIG_SYSCTL | 1399 | #ifdef CONFIG_SYSCTL |
| 1397 | static struct ctl_table tcp_sysctl_table[] = { | 1400 | static struct ctl_table tcp_sysctl_table[] = { |
| @@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto) | |||
| 1518 | for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) | 1521 | for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) |
| 1519 | tn->timeouts[i] = tcp_timeouts[i]; | 1522 | tn->timeouts[i] = tcp_timeouts[i]; |
| 1520 | 1523 | ||
| 1524 | /* timeouts[0] is unused, make it same as SYN_SENT so | ||
| 1525 | * ->timeouts[0] contains 'new' timeout, like udp or icmp. | ||
| 1526 | */ | ||
| 1527 | tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT]; | ||
| 1521 | tn->tcp_loose = nf_ct_tcp_loose; | 1528 | tn->tcp_loose = nf_ct_tcp_loose; |
| 1522 | tn->tcp_be_liberal = nf_ct_tcp_be_liberal; | 1529 | tn->tcp_be_liberal = nf_ct_tcp_be_liberal; |
| 1523 | tn->tcp_max_retrans = nf_ct_tcp_max_retrans; | 1530 | tn->tcp_max_retrans = nf_ct_tcp_max_retrans; |
| @@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = | |||
| 1551 | .nlattr_size = TCP_NLATTR_SIZE, | 1558 | .nlattr_size = TCP_NLATTR_SIZE, |
| 1552 | .nla_policy = nf_ct_port_nla_policy, | 1559 | .nla_policy = nf_ct_port_nla_policy, |
| 1553 | #endif | 1560 | #endif |
| 1554 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 1561 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 1555 | .ctnl_timeout = { | 1562 | .ctnl_timeout = { |
| 1556 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, | 1563 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, |
| 1557 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, | 1564 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, |
| @@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = | |||
| 1560 | TCP_CONNTRACK_TIMEOUT_MAX, | 1567 | TCP_CONNTRACK_TIMEOUT_MAX, |
| 1561 | .nla_policy = tcp_timeout_nla_policy, | 1568 | .nla_policy = tcp_timeout_nla_policy, |
| 1562 | }, | 1569 | }, |
| 1563 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 1570 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 1564 | .init_net = tcp_init_net, | 1571 | .init_net = tcp_init_net, |
| 1565 | .get_net_proto = tcp_get_net_proto, | 1572 | .get_net_proto = tcp_get_net_proto, |
| 1566 | }; | 1573 | }; |
| @@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = | |||
| 1586 | .nlattr_tuple_size = tcp_nlattr_tuple_size, | 1593 | .nlattr_tuple_size = tcp_nlattr_tuple_size, |
| 1587 | .nla_policy = nf_ct_port_nla_policy, | 1594 | .nla_policy = nf_ct_port_nla_policy, |
| 1588 | #endif | 1595 | #endif |
| 1589 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 1596 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 1590 | .ctnl_timeout = { | 1597 | .ctnl_timeout = { |
| 1591 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, | 1598 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, |
| 1592 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, | 1599 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, |
| @@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = | |||
| 1595 | TCP_CONNTRACK_TIMEOUT_MAX, | 1602 | TCP_CONNTRACK_TIMEOUT_MAX, |
| 1596 | .nla_policy = tcp_timeout_nla_policy, | 1603 | .nla_policy = tcp_timeout_nla_policy, |
| 1597 | }, | 1604 | }, |
| 1598 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 1605 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 1599 | .init_net = tcp_init_net, | 1606 | .init_net = tcp_init_net, |
| 1600 | .get_net_proto = tcp_get_net_proto, | 1607 | .get_net_proto = tcp_get_net_proto, |
| 1601 | }; | 1608 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 7a1b8988a931..3065fb8ef91b 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c | |||
| @@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, | |||
| 171 | return NF_ACCEPT; | 171 | return NF_ACCEPT; |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 174 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 175 | 175 | ||
| 176 | #include <linux/netfilter/nfnetlink.h> | 176 | #include <linux/netfilter/nfnetlink.h> |
| 177 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 177 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
| @@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = { | |||
| 221 | [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, | 221 | [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, |
| 222 | [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, | 222 | [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, |
| 223 | }; | 223 | }; |
| 224 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 224 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 225 | 225 | ||
| 226 | #ifdef CONFIG_SYSCTL | 226 | #ifdef CONFIG_SYSCTL |
| 227 | static struct ctl_table udp_sysctl_table[] = { | 227 | static struct ctl_table udp_sysctl_table[] = { |
| @@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = | |||
| 292 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 292 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
| 293 | .nla_policy = nf_ct_port_nla_policy, | 293 | .nla_policy = nf_ct_port_nla_policy, |
| 294 | #endif | 294 | #endif |
| 295 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 295 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 296 | .ctnl_timeout = { | 296 | .ctnl_timeout = { |
| 297 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 297 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
| 298 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 298 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
| @@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = | |||
| 300 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 300 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
| 301 | .nla_policy = udp_timeout_nla_policy, | 301 | .nla_policy = udp_timeout_nla_policy, |
| 302 | }, | 302 | }, |
| 303 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 303 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 304 | .init_net = udp_init_net, | 304 | .init_net = udp_init_net, |
| 305 | .get_net_proto = udp_get_net_proto, | 305 | .get_net_proto = udp_get_net_proto, |
| 306 | }; | 306 | }; |
| @@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = | |||
| 321 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 321 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
| 322 | .nla_policy = nf_ct_port_nla_policy, | 322 | .nla_policy = nf_ct_port_nla_policy, |
| 323 | #endif | 323 | #endif |
| 324 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 324 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 325 | .ctnl_timeout = { | 325 | .ctnl_timeout = { |
| 326 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 326 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
| 327 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 327 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
| @@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = | |||
| 329 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 329 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
| 330 | .nla_policy = udp_timeout_nla_policy, | 330 | .nla_policy = udp_timeout_nla_policy, |
| 331 | }, | 331 | }, |
| 332 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 332 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 333 | .init_net = udp_init_net, | 333 | .init_net = udp_init_net, |
| 334 | .get_net_proto = udp_get_net_proto, | 334 | .get_net_proto = udp_get_net_proto, |
| 335 | }; | 335 | }; |
| @@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = | |||
| 350 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 350 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
| 351 | .nla_policy = nf_ct_port_nla_policy, | 351 | .nla_policy = nf_ct_port_nla_policy, |
| 352 | #endif | 352 | #endif |
| 353 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 353 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 354 | .ctnl_timeout = { | 354 | .ctnl_timeout = { |
| 355 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 355 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
| 356 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 356 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
| @@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = | |||
| 358 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 358 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
| 359 | .nla_policy = udp_timeout_nla_policy, | 359 | .nla_policy = udp_timeout_nla_policy, |
| 360 | }, | 360 | }, |
| 361 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 361 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 362 | .init_net = udp_init_net, | 362 | .init_net = udp_init_net, |
| 363 | .get_net_proto = udp_get_net_proto, | 363 | .get_net_proto = udp_get_net_proto, |
| 364 | }; | 364 | }; |
| @@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = | |||
| 379 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 379 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
| 380 | .nla_policy = nf_ct_port_nla_policy, | 380 | .nla_policy = nf_ct_port_nla_policy, |
| 381 | #endif | 381 | #endif |
| 382 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 382 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
| 383 | .ctnl_timeout = { | 383 | .ctnl_timeout = { |
| 384 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 384 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
| 385 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 385 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
| @@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = | |||
| 387 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 387 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
| 388 | .nla_policy = udp_timeout_nla_policy, | 388 | .nla_policy = udp_timeout_nla_policy, |
| 389 | }, | 389 | }, |
| 390 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 390 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
| 391 | .init_net = udp_init_net, | 391 | .init_net = udp_init_net, |
| 392 | .get_net_proto = udp_get_net_proto, | 392 | .get_net_proto = udp_get_net_proto, |
| 393 | }; | 393 | }; |
| 394 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); | 394 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); |
| 395 | #endif | 395 | #endif |
| 396 | #include <net/netfilter/nf_conntrack_timeout.h> | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1dca5683f59f..2cfb173cd0b2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx, | |||
| 4637 | } | 4637 | } |
| 4638 | set->ndeact++; | 4638 | set->ndeact++; |
| 4639 | 4639 | ||
| 4640 | nft_set_elem_deactivate(ctx->net, set, elem); | ||
| 4640 | nft_trans_elem_set(trans) = set; | 4641 | nft_trans_elem_set(trans) = set; |
| 4641 | nft_trans_elem(trans) = *elem; | 4642 | nft_trans_elem(trans) = *elem; |
| 4642 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); | 4643 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); |
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index d46a236cdf31..a30f8ba4b89a 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c | |||
| @@ -489,8 +489,8 @@ err: | |||
| 489 | return err; | 489 | return err; |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | static struct ctnl_timeout * | 492 | static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net, |
| 493 | ctnl_timeout_find_get(struct net *net, const char *name) | 493 | const char *name) |
| 494 | { | 494 | { |
| 495 | struct ctnl_timeout *timeout, *matching = NULL; | 495 | struct ctnl_timeout *timeout, *matching = NULL; |
| 496 | 496 | ||
| @@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name) | |||
| 509 | break; | 509 | break; |
| 510 | } | 510 | } |
| 511 | err: | 511 | err: |
| 512 | return matching; | 512 | return matching ? &matching->timeout : NULL; |
| 513 | } | 513 | } |
| 514 | 514 | ||
| 515 | static void ctnl_timeout_put(struct nf_ct_timeout *t) | 515 | static void ctnl_timeout_put(struct nf_ct_timeout *t) |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index ea4ba551abb2..d33094f4ec41 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
| @@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
| 233 | int err; | 233 | int err; |
| 234 | 234 | ||
| 235 | if (verdict == NF_ACCEPT || | 235 | if (verdict == NF_ACCEPT || |
| 236 | verdict == NF_REPEAT || | ||
| 236 | verdict == NF_STOP) { | 237 | verdict == NF_STOP) { |
| 237 | rcu_read_lock(); | 238 | rcu_read_lock(); |
| 238 | ct_hook = rcu_dereference(nf_ct_hook); | 239 | ct_hook = rcu_dereference(nf_ct_hook); |
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 26a8baebd072..5dd87748afa8 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
| @@ -799,7 +799,7 @@ err: | |||
| 799 | } | 799 | } |
| 800 | 800 | ||
| 801 | struct nft_ct_timeout_obj { | 801 | struct nft_ct_timeout_obj { |
| 802 | struct nf_conn *tmpl; | 802 | struct nf_ct_timeout *timeout; |
| 803 | u8 l4proto; | 803 | u8 l4proto; |
| 804 | }; | 804 | }; |
| 805 | 805 | ||
| @@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj, | |||
| 809 | { | 809 | { |
| 810 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 810 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
| 811 | struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); | 811 | struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); |
| 812 | struct sk_buff *skb = pkt->skb; | 812 | struct nf_conn_timeout *timeout; |
| 813 | const unsigned int *values; | ||
| 814 | |||
| 815 | if (priv->l4proto != pkt->tprot) | ||
| 816 | return; | ||
| 813 | 817 | ||
| 814 | if (ct || | 818 | if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct)) |
| 815 | priv->l4proto != pkt->tprot) | ||
| 816 | return; | 819 | return; |
| 817 | 820 | ||
| 818 | nf_ct_set(skb, priv->tmpl, IP_CT_NEW); | 821 | timeout = nf_ct_timeout_find(ct); |
| 822 | if (!timeout) { | ||
| 823 | timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC); | ||
| 824 | if (!timeout) { | ||
| 825 | regs->verdict.code = NF_DROP; | ||
| 826 | return; | ||
| 827 | } | ||
| 828 | } | ||
| 829 | |||
| 830 | rcu_assign_pointer(timeout->timeout, priv->timeout); | ||
| 831 | |||
| 832 | /* adjust the timeout as per 'new' state. ct is unconfirmed, | ||
| 833 | * so the current timestamp must not be added. | ||
| 834 | */ | ||
| 835 | values = nf_ct_timeout_data(timeout); | ||
| 836 | if (values) | ||
| 837 | nf_ct_refresh(ct, pkt->skb, values[0]); | ||
| 819 | } | 838 | } |
| 820 | 839 | ||
| 821 | static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, | 840 | static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, |
| 822 | const struct nlattr * const tb[], | 841 | const struct nlattr * const tb[], |
| 823 | struct nft_object *obj) | 842 | struct nft_object *obj) |
| 824 | { | 843 | { |
| 825 | const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; | ||
| 826 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 844 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
| 827 | const struct nf_conntrack_l4proto *l4proto; | 845 | const struct nf_conntrack_l4proto *l4proto; |
| 828 | struct nf_conn_timeout *timeout_ext; | ||
| 829 | struct nf_ct_timeout *timeout; | 846 | struct nf_ct_timeout *timeout; |
| 830 | int l3num = ctx->family; | 847 | int l3num = ctx->family; |
| 831 | struct nf_conn *tmpl; | ||
| 832 | __u8 l4num; | 848 | __u8 l4num; |
| 833 | int ret; | 849 | int ret; |
| 834 | 850 | ||
| @@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, | |||
| 863 | 879 | ||
| 864 | timeout->l3num = l3num; | 880 | timeout->l3num = l3num; |
| 865 | timeout->l4proto = l4proto; | 881 | timeout->l4proto = l4proto; |
| 866 | tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC); | ||
| 867 | if (!tmpl) { | ||
| 868 | ret = -ENOMEM; | ||
| 869 | goto err_free_timeout; | ||
| 870 | } | ||
| 871 | |||
| 872 | timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC); | ||
| 873 | if (!timeout_ext) { | ||
| 874 | ret = -ENOMEM; | ||
| 875 | goto err_free_tmpl; | ||
| 876 | } | ||
| 877 | 882 | ||
| 878 | ret = nf_ct_netns_get(ctx->net, ctx->family); | 883 | ret = nf_ct_netns_get(ctx->net, ctx->family); |
| 879 | if (ret < 0) | 884 | if (ret < 0) |
| 880 | goto err_free_tmpl; | 885 | goto err_free_timeout; |
| 881 | |||
| 882 | priv->tmpl = tmpl; | ||
| 883 | 886 | ||
| 887 | priv->timeout = timeout; | ||
| 884 | return 0; | 888 | return 0; |
| 885 | 889 | ||
| 886 | err_free_tmpl: | ||
| 887 | nf_ct_tmpl_free(tmpl); | ||
| 888 | err_free_timeout: | 890 | err_free_timeout: |
| 889 | kfree(timeout); | 891 | kfree(timeout); |
| 890 | err_proto_put: | 892 | err_proto_put: |
| @@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx, | |||
| 896 | struct nft_object *obj) | 898 | struct nft_object *obj) |
| 897 | { | 899 | { |
| 898 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 900 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
| 899 | struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); | 901 | struct nf_ct_timeout *timeout = priv->timeout; |
| 900 | struct nf_ct_timeout *timeout; | ||
| 901 | 902 | ||
| 902 | timeout = rcu_dereference_raw(t->timeout); | ||
| 903 | nf_ct_untimeout(ctx->net, timeout); | 903 | nf_ct_untimeout(ctx->net, timeout); |
| 904 | nf_ct_l4proto_put(timeout->l4proto); | 904 | nf_ct_l4proto_put(timeout->l4proto); |
| 905 | nf_ct_netns_put(ctx->net, ctx->family); | 905 | nf_ct_netns_put(ctx->net, ctx->family); |
| 906 | nf_ct_tmpl_free(priv->tmpl); | 906 | kfree(priv->timeout); |
| 907 | } | 907 | } |
| 908 | 908 | ||
| 909 | static int nft_ct_timeout_obj_dump(struct sk_buff *skb, | 909 | static int nft_ct_timeout_obj_dump(struct sk_buff *skb, |
| 910 | struct nft_object *obj, bool reset) | 910 | struct nft_object *obj, bool reset) |
| 911 | { | 911 | { |
| 912 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 912 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
| 913 | const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); | 913 | const struct nf_ct_timeout *timeout = priv->timeout; |
| 914 | const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout); | ||
| 915 | struct nlattr *nest_params; | 914 | struct nlattr *nest_params; |
| 916 | int ret; | 915 | int ret; |
| 917 | 916 | ||
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c index 9f4151ec3e06..6c7aa6a0a0d2 100644 --- a/net/netfilter/xt_CHECKSUM.c +++ b/net/netfilter/xt_CHECKSUM.c | |||
| @@ -16,6 +16,9 @@ | |||
| 16 | #include <linux/netfilter/x_tables.h> | 16 | #include <linux/netfilter/x_tables.h> |
| 17 | #include <linux/netfilter/xt_CHECKSUM.h> | 17 | #include <linux/netfilter/xt_CHECKSUM.h> |
| 18 | 18 | ||
| 19 | #include <linux/netfilter_ipv4/ip_tables.h> | ||
| 20 | #include <linux/netfilter_ipv6/ip6_tables.h> | ||
| 21 | |||
| 19 | MODULE_LICENSE("GPL"); | 22 | MODULE_LICENSE("GPL"); |
| 20 | MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>"); | 23 | MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>"); |
| 21 | MODULE_DESCRIPTION("Xtables: checksum modification"); | 24 | MODULE_DESCRIPTION("Xtables: checksum modification"); |
| @@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM"); | |||
| 25 | static unsigned int | 28 | static unsigned int |
| 26 | checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) | 29 | checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) |
| 27 | { | 30 | { |
| 28 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 31 | if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb)) |
| 29 | skb_checksum_help(skb); | 32 | skb_checksum_help(skb); |
| 30 | 33 | ||
| 31 | return XT_CONTINUE; | 34 | return XT_CONTINUE; |
| @@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 34 | static int checksum_tg_check(const struct xt_tgchk_param *par) | 37 | static int checksum_tg_check(const struct xt_tgchk_param *par) |
| 35 | { | 38 | { |
| 36 | const struct xt_CHECKSUM_info *einfo = par->targinfo; | 39 | const struct xt_CHECKSUM_info *einfo = par->targinfo; |
| 40 | const struct ip6t_ip6 *i6 = par->entryinfo; | ||
| 41 | const struct ipt_ip *i4 = par->entryinfo; | ||
| 37 | 42 | ||
| 38 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { | 43 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { |
| 39 | pr_info_ratelimited("unsupported CHECKSUM operation %x\n", | 44 | pr_info_ratelimited("unsupported CHECKSUM operation %x\n", |
| @@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par) | |||
| 43 | if (!einfo->operation) | 48 | if (!einfo->operation) |
| 44 | return -EINVAL; | 49 | return -EINVAL; |
| 45 | 50 | ||
| 51 | switch (par->family) { | ||
| 52 | case NFPROTO_IPV4: | ||
| 53 | if (i4->proto == IPPROTO_UDP && | ||
| 54 | (i4->invflags & XT_INV_PROTO) == 0) | ||
| 55 | return 0; | ||
| 56 | break; | ||
| 57 | case NFPROTO_IPV6: | ||
| 58 | if ((i6->flags & IP6T_F_PROTO) && | ||
| 59 | i6->proto == IPPROTO_UDP && | ||
| 60 | (i6->invflags & XT_INV_PROTO) == 0) | ||
| 61 | return 0; | ||
| 62 | break; | ||
| 63 | } | ||
| 64 | |||
| 65 | pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n"); | ||
| 46 | return 0; | 66 | return 0; |
| 47 | } | 67 | } |
| 48 | 68 | ||
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index dfbdbb2fc0ed..51d0c257e7a5 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c | |||
| @@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 125 | static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | 125 | static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) |
| 126 | { | 126 | { |
| 127 | struct xt_cluster_match_info *info = par->matchinfo; | 127 | struct xt_cluster_match_info *info = par->matchinfo; |
| 128 | int ret; | ||
| 128 | 129 | ||
| 129 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { | 130 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { |
| 130 | pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", | 131 | pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", |
| @@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | |||
| 135 | pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); | 136 | pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); |
| 136 | return -EDOM; | 137 | return -EDOM; |
| 137 | } | 138 | } |
| 138 | return 0; | 139 | |
| 140 | ret = nf_ct_netns_get(par->net, par->family); | ||
| 141 | if (ret < 0) | ||
| 142 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", | ||
| 143 | par->family); | ||
| 144 | return ret; | ||
| 145 | } | ||
| 146 | |||
| 147 | static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par) | ||
| 148 | { | ||
| 149 | nf_ct_netns_put(par->net, par->family); | ||
| 139 | } | 150 | } |
| 140 | 151 | ||
| 141 | static struct xt_match xt_cluster_match __read_mostly = { | 152 | static struct xt_match xt_cluster_match __read_mostly = { |
| @@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = { | |||
| 144 | .match = xt_cluster_mt, | 155 | .match = xt_cluster_mt, |
| 145 | .checkentry = xt_cluster_mt_checkentry, | 156 | .checkentry = xt_cluster_mt_checkentry, |
| 146 | .matchsize = sizeof(struct xt_cluster_match_info), | 157 | .matchsize = sizeof(struct xt_cluster_match_info), |
| 158 | .destroy = xt_cluster_mt_destroy, | ||
| 147 | .me = THIS_MODULE, | 159 | .me = THIS_MODULE, |
| 148 | }; | 160 | }; |
| 149 | 161 | ||
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9b16402f29af..3e7d259e5d8d 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
| @@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { | |||
| 1057 | static void *dl_seq_start(struct seq_file *s, loff_t *pos) | 1057 | static void *dl_seq_start(struct seq_file *s, loff_t *pos) |
| 1058 | __acquires(htable->lock) | 1058 | __acquires(htable->lock) |
| 1059 | { | 1059 | { |
| 1060 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1060 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
| 1061 | unsigned int *bucket; | 1061 | unsigned int *bucket; |
| 1062 | 1062 | ||
| 1063 | spin_lock_bh(&htable->lock); | 1063 | spin_lock_bh(&htable->lock); |
| @@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos) | |||
| 1074 | 1074 | ||
| 1075 | static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) | 1075 | static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) |
| 1076 | { | 1076 | { |
| 1077 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1077 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
| 1078 | unsigned int *bucket = v; | 1078 | unsigned int *bucket = v; |
| 1079 | 1079 | ||
| 1080 | *pos = ++(*bucket); | 1080 | *pos = ++(*bucket); |
| @@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
| 1088 | static void dl_seq_stop(struct seq_file *s, void *v) | 1088 | static void dl_seq_stop(struct seq_file *s, void *v) |
| 1089 | __releases(htable->lock) | 1089 | __releases(htable->lock) |
| 1090 | { | 1090 | { |
| 1091 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1091 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
| 1092 | unsigned int *bucket = v; | 1092 | unsigned int *bucket = v; |
| 1093 | 1093 | ||
| 1094 | if (!IS_ERR(bucket)) | 1094 | if (!IS_ERR(bucket)) |
| @@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family, | |||
| 1130 | static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, | 1130 | static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, |
| 1131 | struct seq_file *s) | 1131 | struct seq_file *s) |
| 1132 | { | 1132 | { |
| 1133 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); | 1133 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); |
| 1134 | 1134 | ||
| 1135 | spin_lock(&ent->lock); | 1135 | spin_lock(&ent->lock); |
| 1136 | /* recalculate to show accurate numbers */ | 1136 | /* recalculate to show accurate numbers */ |
| @@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, | |||
| 1145 | static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, | 1145 | static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, |
| 1146 | struct seq_file *s) | 1146 | struct seq_file *s) |
| 1147 | { | 1147 | { |
| 1148 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); | 1148 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); |
| 1149 | 1149 | ||
| 1150 | spin_lock(&ent->lock); | 1150 | spin_lock(&ent->lock); |
| 1151 | /* recalculate to show accurate numbers */ | 1151 | /* recalculate to show accurate numbers */ |
| @@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, | |||
| 1160 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | 1160 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, |
| 1161 | struct seq_file *s) | 1161 | struct seq_file *s) |
| 1162 | { | 1162 | { |
| 1163 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); | 1163 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); |
| 1164 | 1164 | ||
| 1165 | spin_lock(&ent->lock); | 1165 | spin_lock(&ent->lock); |
| 1166 | /* recalculate to show accurate numbers */ | 1166 | /* recalculate to show accurate numbers */ |
| @@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | |||
| 1174 | 1174 | ||
| 1175 | static int dl_seq_show_v2(struct seq_file *s, void *v) | 1175 | static int dl_seq_show_v2(struct seq_file *s, void *v) |
| 1176 | { | 1176 | { |
| 1177 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1177 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
| 1178 | unsigned int *bucket = (unsigned int *)v; | 1178 | unsigned int *bucket = (unsigned int *)v; |
| 1179 | struct dsthash_ent *ent; | 1179 | struct dsthash_ent *ent; |
| 1180 | 1180 | ||
| @@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v) | |||
| 1188 | 1188 | ||
| 1189 | static int dl_seq_show_v1(struct seq_file *s, void *v) | 1189 | static int dl_seq_show_v1(struct seq_file *s, void *v) |
| 1190 | { | 1190 | { |
| 1191 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1191 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
| 1192 | unsigned int *bucket = v; | 1192 | unsigned int *bucket = v; |
| 1193 | struct dsthash_ent *ent; | 1193 | struct dsthash_ent *ent; |
| 1194 | 1194 | ||
| @@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v) | |||
| 1202 | 1202 | ||
| 1203 | static int dl_seq_show(struct seq_file *s, void *v) | 1203 | static int dl_seq_show(struct seq_file *s, void *v) |
| 1204 | { | 1204 | { |
| 1205 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1205 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
| 1206 | unsigned int *bucket = v; | 1206 | unsigned int *bucket = v; |
| 1207 | struct dsthash_ent *ent; | 1207 | struct dsthash_ent *ent; |
| 1208 | 1208 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5610061e7f2e..75c92a87e7b2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = { | |||
| 4137 | .close = packet_mm_close, | 4137 | .close = packet_mm_close, |
| 4138 | }; | 4138 | }; |
| 4139 | 4139 | ||
| 4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int len) | 4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
| 4141 | unsigned int len) | ||
| 4141 | { | 4142 | { |
| 4142 | int i; | 4143 | int i; |
| 4143 | 4144 | ||
| 4144 | for (i = 0; i < len; i++) { | 4145 | for (i = 0; i < len; i++) { |
| 4145 | if (likely(pg_vec[i].buffer)) { | 4146 | if (likely(pg_vec[i].buffer)) { |
| 4146 | kvfree(pg_vec[i].buffer); | 4147 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
| 4148 | vfree(pg_vec[i].buffer); | ||
| 4149 | else | ||
| 4150 | free_pages((unsigned long)pg_vec[i].buffer, | ||
| 4151 | order); | ||
| 4147 | pg_vec[i].buffer = NULL; | 4152 | pg_vec[i].buffer = NULL; |
| 4148 | } | 4153 | } |
| 4149 | } | 4154 | } |
| 4150 | kfree(pg_vec); | 4155 | kfree(pg_vec); |
| 4151 | } | 4156 | } |
| 4152 | 4157 | ||
| 4153 | static char *alloc_one_pg_vec_page(unsigned long size) | 4158 | static char *alloc_one_pg_vec_page(unsigned long order) |
| 4154 | { | 4159 | { |
| 4155 | char *buffer; | 4160 | char *buffer; |
| 4161 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | | ||
| 4162 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | ||
| 4156 | 4163 | ||
| 4157 | buffer = kvzalloc(size, GFP_KERNEL); | 4164 | buffer = (char *) __get_free_pages(gfp_flags, order); |
| 4158 | if (buffer) | 4165 | if (buffer) |
| 4159 | return buffer; | 4166 | return buffer; |
| 4160 | 4167 | ||
| 4161 | buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); | 4168 | /* __get_free_pages failed, fall back to vmalloc */ |
| 4169 | buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); | ||
| 4170 | if (buffer) | ||
| 4171 | return buffer; | ||
| 4162 | 4172 | ||
| 4163 | return buffer; | 4173 | /* vmalloc failed, lets dig into swap here */ |
| 4174 | gfp_flags &= ~__GFP_NORETRY; | ||
| 4175 | buffer = (char *) __get_free_pages(gfp_flags, order); | ||
| 4176 | if (buffer) | ||
| 4177 | return buffer; | ||
| 4178 | |||
| 4179 | /* complete and utter failure */ | ||
| 4180 | return NULL; | ||
| 4164 | } | 4181 | } |
| 4165 | 4182 | ||
| 4166 | static struct pgv *alloc_pg_vec(struct tpacket_req *req) | 4183 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
| 4167 | { | 4184 | { |
| 4168 | unsigned int block_nr = req->tp_block_nr; | 4185 | unsigned int block_nr = req->tp_block_nr; |
| 4169 | unsigned long size = req->tp_block_size; | ||
| 4170 | struct pgv *pg_vec; | 4186 | struct pgv *pg_vec; |
| 4171 | int i; | 4187 | int i; |
| 4172 | 4188 | ||
| @@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req) | |||
| 4175 | goto out; | 4191 | goto out; |
| 4176 | 4192 | ||
| 4177 | for (i = 0; i < block_nr; i++) { | 4193 | for (i = 0; i < block_nr; i++) { |
| 4178 | pg_vec[i].buffer = alloc_one_pg_vec_page(size); | 4194 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
| 4179 | if (unlikely(!pg_vec[i].buffer)) | 4195 | if (unlikely(!pg_vec[i].buffer)) |
| 4180 | goto out_free_pgvec; | 4196 | goto out_free_pgvec; |
| 4181 | } | 4197 | } |
| @@ -4184,7 +4200,7 @@ out: | |||
| 4184 | return pg_vec; | 4200 | return pg_vec; |
| 4185 | 4201 | ||
| 4186 | out_free_pgvec: | 4202 | out_free_pgvec: |
| 4187 | free_pg_vec(pg_vec, block_nr); | 4203 | free_pg_vec(pg_vec, order, block_nr); |
| 4188 | pg_vec = NULL; | 4204 | pg_vec = NULL; |
| 4189 | goto out; | 4205 | goto out; |
| 4190 | } | 4206 | } |
| @@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4194 | { | 4210 | { |
| 4195 | struct pgv *pg_vec = NULL; | 4211 | struct pgv *pg_vec = NULL; |
| 4196 | struct packet_sock *po = pkt_sk(sk); | 4212 | struct packet_sock *po = pkt_sk(sk); |
| 4213 | int was_running, order = 0; | ||
| 4197 | struct packet_ring_buffer *rb; | 4214 | struct packet_ring_buffer *rb; |
| 4198 | struct sk_buff_head *rb_queue; | 4215 | struct sk_buff_head *rb_queue; |
| 4199 | int was_running; | ||
| 4200 | __be16 num; | 4216 | __be16 num; |
| 4201 | int err = -EINVAL; | 4217 | int err = -EINVAL; |
| 4202 | /* Added to avoid minimal code churn */ | 4218 | /* Added to avoid minimal code churn */ |
| @@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4258 | goto out; | 4274 | goto out; |
| 4259 | 4275 | ||
| 4260 | err = -ENOMEM; | 4276 | err = -ENOMEM; |
| 4261 | pg_vec = alloc_pg_vec(req); | 4277 | order = get_order(req->tp_block_size); |
| 4278 | pg_vec = alloc_pg_vec(req, order); | ||
| 4262 | if (unlikely(!pg_vec)) | 4279 | if (unlikely(!pg_vec)) |
| 4263 | goto out; | 4280 | goto out; |
| 4264 | switch (po->tp_version) { | 4281 | switch (po->tp_version) { |
| @@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4312 | rb->frame_size = req->tp_frame_size; | 4329 | rb->frame_size = req->tp_frame_size; |
| 4313 | spin_unlock_bh(&rb_queue->lock); | 4330 | spin_unlock_bh(&rb_queue->lock); |
| 4314 | 4331 | ||
| 4332 | swap(rb->pg_vec_order, order); | ||
| 4315 | swap(rb->pg_vec_len, req->tp_block_nr); | 4333 | swap(rb->pg_vec_len, req->tp_block_nr); |
| 4316 | 4334 | ||
| 4317 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | 4335 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; |
| @@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4337 | } | 4355 | } |
| 4338 | 4356 | ||
| 4339 | if (pg_vec) | 4357 | if (pg_vec) |
| 4340 | free_pg_vec(pg_vec, req->tp_block_nr); | 4358 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
| 4341 | out: | 4359 | out: |
| 4342 | return err; | 4360 | return err; |
| 4343 | } | 4361 | } |
diff --git a/net/packet/internal.h b/net/packet/internal.h index 8f50036f62f0..3bb7c5fb3bff 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h | |||
| @@ -64,6 +64,7 @@ struct packet_ring_buffer { | |||
| 64 | unsigned int frame_size; | 64 | unsigned int frame_size; |
| 65 | unsigned int frame_max; | 65 | unsigned int frame_max; |
| 66 | 66 | ||
| 67 | unsigned int pg_vec_order; | ||
| 67 | unsigned int pg_vec_pages; | 68 | unsigned int pg_vec_pages; |
| 68 | unsigned int pg_vec_len; | 69 | unsigned int pg_vec_len; |
| 69 | 70 | ||
diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 01b3bd6a3708..b9092111bc45 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | 1 | ||
| 2 | config RDS | 2 | config RDS |
| 3 | tristate "The RDS Protocol" | 3 | tristate "The Reliable Datagram Sockets Protocol" |
| 4 | depends on INET | 4 | depends on INET |
| 5 | ---help--- | 5 | ---help--- |
| 6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, | 6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 3ab55784b637..762d2c6788a3 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
| @@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, | |||
| 76 | struct rds_sock *rs; | 76 | struct rds_sock *rs; |
| 77 | 77 | ||
| 78 | __rds_create_bind_key(key, addr, port, scope_id); | 78 | __rds_create_bind_key(key, addr, port, scope_id); |
| 79 | rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms); | 79 | rcu_read_lock(); |
| 80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); | ||
| 80 | if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) | 81 | if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) |
| 81 | rds_sock_addref(rs); | 82 | rds_sock_addref(rs); |
| 82 | else | 83 | else |
| 83 | rs = NULL; | 84 | rs = NULL; |
| 85 | rcu_read_unlock(); | ||
| 84 | 86 | ||
| 85 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, | 87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, |
| 86 | ntohs(port)); | 88 | ntohs(port)); |
| @@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 235 | goto out; | 237 | goto out; |
| 236 | } | 238 | } |
| 237 | 239 | ||
| 240 | sock_set_flag(sk, SOCK_RCU_FREE); | ||
| 238 | ret = rds_add_bound(rs, binding_addr, &port, scope_id); | 241 | ret = rds_add_bound(rs, binding_addr, &port, scope_id); |
| 239 | if (ret) | 242 | if (ret) |
| 240 | goto out; | 243 | goto out; |
diff --git a/net/rds/ib.c b/net/rds/ib.c index c1d97640c0be..eba75c1ba359 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
| @@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, | |||
| 341 | 341 | ||
| 342 | if (rds_conn_state(conn) == RDS_CONN_UP) { | 342 | if (rds_conn_state(conn) == RDS_CONN_UP) { |
| 343 | struct rds_ib_device *rds_ibdev; | 343 | struct rds_ib_device *rds_ibdev; |
| 344 | struct rdma_dev_addr *dev_addr; | ||
| 345 | 344 | ||
| 346 | ic = conn->c_transport_data; | 345 | ic = conn->c_transport_data; |
| 347 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; | 346 | rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, |
| 348 | rdma_addr_get_sgid(dev_addr, | 347 | (union ib_gid *)&iinfo6->dst_gid); |
| 349 | (union ib_gid *)&iinfo6->src_gid); | ||
| 350 | rdma_addr_get_dgid(dev_addr, | ||
| 351 | (union ib_gid *)&iinfo6->dst_gid); | ||
| 352 | |||
| 353 | rds_ibdev = ic->rds_ibdev; | 348 | rds_ibdev = ic->rds_ibdev; |
| 354 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; | 349 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; |
| 355 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; | 350 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 00192a996be0..0f8465852254 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| 21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 23 | #include <linux/mod_devicetable.h> | ||
| 23 | #include <linux/rfkill.h> | 24 | #include <linux/rfkill.h> |
| 24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index db83dac1e7f4..e12f8ef7baa4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -662,6 +662,13 @@ int tcf_action_destroy(struct tc_action *actions[], int bind) | |||
| 662 | return ret; | 662 | return ret; |
| 663 | } | 663 | } |
| 664 | 664 | ||
| 665 | static int tcf_action_destroy_1(struct tc_action *a, int bind) | ||
| 666 | { | ||
| 667 | struct tc_action *actions[] = { a, NULL }; | ||
| 668 | |||
| 669 | return tcf_action_destroy(actions, bind); | ||
| 670 | } | ||
| 671 | |||
| 665 | static int tcf_action_put(struct tc_action *p) | 672 | static int tcf_action_put(struct tc_action *p) |
| 666 | { | 673 | { |
| 667 | return __tcf_action_put(p, false); | 674 | return __tcf_action_put(p, false); |
| @@ -881,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, | |||
| 881 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { | 888 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { |
| 882 | err = tcf_action_goto_chain_init(a, tp); | 889 | err = tcf_action_goto_chain_init(a, tp); |
| 883 | if (err) { | 890 | if (err) { |
| 884 | struct tc_action *actions[] = { a, NULL }; | 891 | tcf_action_destroy_1(a, bind); |
| 885 | |||
| 886 | tcf_action_destroy(actions, bind); | ||
| 887 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); | 892 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); |
| 888 | return ERR_PTR(err); | 893 | return ERR_PTR(err); |
| 889 | } | 894 | } |
| 890 | } | 895 | } |
| 891 | 896 | ||
| 892 | if (!tcf_action_valid(a->tcfa_action)) { | 897 | if (!tcf_action_valid(a->tcfa_action)) { |
| 893 | NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead"); | 898 | tcf_action_destroy_1(a, bind); |
| 894 | a->tcfa_action = TC_ACT_UNSPEC; | 899 | NL_SET_ERR_MSG(extack, "Invalid control action value"); |
| 900 | return ERR_PTR(-EINVAL); | ||
| 895 | } | 901 | } |
| 896 | 902 | ||
| 897 | return a; | 903 | return a; |
| @@ -1173,6 +1179,7 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) | |||
| 1173 | struct tcf_idrinfo *idrinfo = a->idrinfo; | 1179 | struct tcf_idrinfo *idrinfo = a->idrinfo; |
| 1174 | u32 act_index = a->tcfa_index; | 1180 | u32 act_index = a->tcfa_index; |
| 1175 | 1181 | ||
| 1182 | actions[i] = NULL; | ||
| 1176 | if (tcf_action_put(a)) { | 1183 | if (tcf_action_put(a)) { |
| 1177 | /* last reference, action was deleted concurrently */ | 1184 | /* last reference, action was deleted concurrently */ |
| 1178 | module_put(ops->owner); | 1185 | module_put(ops->owner); |
| @@ -1184,7 +1191,6 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) | |||
| 1184 | if (ret < 0) | 1191 | if (ret < 0) |
| 1185 | return ret; | 1192 | return ret; |
| 1186 | } | 1193 | } |
| 1187 | actions[i] = NULL; | ||
| 1188 | } | 1194 | } |
| 1189 | return 0; | 1195 | return 0; |
| 1190 | } | 1196 | } |
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 196430aefe87..06a3d4801878 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
| @@ -326,6 +326,20 @@ static int __add_metainfo(const struct tcf_meta_ops *ops, | |||
| 326 | return ret; | 326 | return ret; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, | ||
| 330 | struct tcf_ife_info *ife, u32 metaid, | ||
| 331 | bool exists) | ||
| 332 | { | ||
| 333 | int ret; | ||
| 334 | |||
| 335 | if (!try_module_get(ops->owner)) | ||
| 336 | return -ENOENT; | ||
| 337 | ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); | ||
| 338 | if (ret) | ||
| 339 | module_put(ops->owner); | ||
| 340 | return ret; | ||
| 341 | } | ||
| 342 | |||
| 329 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | 343 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, |
| 330 | int len, bool exists) | 344 | int len, bool exists) |
| 331 | { | 345 | { |
| @@ -349,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists) | |||
| 349 | 363 | ||
| 350 | read_lock(&ife_mod_lock); | 364 | read_lock(&ife_mod_lock); |
| 351 | list_for_each_entry(o, &ifeoplist, list) { | 365 | list_for_each_entry(o, &ifeoplist, list) { |
| 352 | rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists); | 366 | rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); |
| 353 | if (rc == 0) | 367 | if (rc == 0) |
| 354 | installed += 1; | 368 | installed += 1; |
| 355 | } | 369 | } |
| @@ -400,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
| 400 | struct tcf_meta_info *e, *n; | 414 | struct tcf_meta_info *e, *n; |
| 401 | 415 | ||
| 402 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { | 416 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { |
| 403 | module_put(e->ops->owner); | ||
| 404 | list_del(&e->metalist); | 417 | list_del(&e->metalist); |
| 405 | if (e->metaval) { | 418 | if (e->metaval) { |
| 406 | if (e->ops->release) | 419 | if (e->ops->release) |
| @@ -408,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
| 408 | else | 421 | else |
| 409 | kfree(e->metaval); | 422 | kfree(e->metaval); |
| 410 | } | 423 | } |
| 424 | module_put(e->ops->owner); | ||
| 411 | kfree(e); | 425 | kfree(e); |
| 412 | } | 426 | } |
| 413 | } | 427 | } |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 107034070019..ad99a99f11f6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
| @@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
| 109 | { | 109 | { |
| 110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); | 110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); |
| 111 | 111 | ||
| 112 | if (!keys_start) | ||
| 113 | goto nla_failure; | ||
| 112 | for (; n > 0; n--) { | 114 | for (; n > 0; n--) { |
| 113 | struct nlattr *key_start; | 115 | struct nlattr *key_start; |
| 114 | 116 | ||
| 115 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); | 117 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); |
| 118 | if (!key_start) | ||
| 119 | goto nla_failure; | ||
| 116 | 120 | ||
| 117 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || | 121 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || |
| 118 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { | 122 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) |
| 119 | nlmsg_trim(skb, keys_start); | 123 | goto nla_failure; |
| 120 | return -EINVAL; | ||
| 121 | } | ||
| 122 | 124 | ||
| 123 | nla_nest_end(skb, key_start); | 125 | nla_nest_end(skb, key_start); |
| 124 | 126 | ||
| @@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
| 128 | nla_nest_end(skb, keys_start); | 130 | nla_nest_end(skb, keys_start); |
| 129 | 131 | ||
| 130 | return 0; | 132 | return 0; |
| 133 | nla_failure: | ||
| 134 | nla_nest_cancel(skb, keys_start); | ||
| 135 | return -EINVAL; | ||
| 131 | } | 136 | } |
| 132 | 137 | ||
| 133 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, | 138 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
| @@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
| 418 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; | 423 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; |
| 419 | 424 | ||
| 420 | if (p->tcfp_keys_ex) { | 425 | if (p->tcfp_keys_ex) { |
| 421 | tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); | 426 | if (tcf_pedit_key_ex_dump(skb, |
| 427 | p->tcfp_keys_ex, | ||
| 428 | p->tcfp_nkeys)) | ||
| 429 | goto nla_put_failure; | ||
| 422 | 430 | ||
| 423 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) | 431 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) |
| 424 | goto nla_put_failure; | 432 | goto nla_put_failure; |
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 420759153d5f..681f6f04e7da 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
| @@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
| 317 | &metadata->u.tun_info, | 317 | &metadata->u.tun_info, |
| 318 | opts_len, extack); | 318 | opts_len, extack); |
| 319 | if (ret < 0) | 319 | if (ret < 0) |
| 320 | goto err_out; | 320 | goto release_tun_meta; |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; | 323 | metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; |
| @@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
| 333 | &act_tunnel_key_ops, bind, true); | 333 | &act_tunnel_key_ops, bind, true); |
| 334 | if (ret) { | 334 | if (ret) { |
| 335 | NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); | 335 | NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); |
| 336 | goto err_out; | 336 | goto release_tun_meta; |
| 337 | } | 337 | } |
| 338 | 338 | ||
| 339 | ret = ACT_P_CREATED; | 339 | ret = ACT_P_CREATED; |
| 340 | } else if (!ovr) { | 340 | } else if (!ovr) { |
| 341 | tcf_idr_release(*a, bind); | ||
| 342 | NL_SET_ERR_MSG(extack, "TC IDR already exists"); | 341 | NL_SET_ERR_MSG(extack, "TC IDR already exists"); |
| 343 | return -EEXIST; | 342 | ret = -EEXIST; |
| 343 | goto release_tun_meta; | ||
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | t = to_tunnel_key(*a); | 346 | t = to_tunnel_key(*a); |
| 347 | 347 | ||
| 348 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); | 348 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); |
| 349 | if (unlikely(!params_new)) { | 349 | if (unlikely(!params_new)) { |
| 350 | tcf_idr_release(*a, bind); | ||
| 351 | NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); | 350 | NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); |
| 352 | return -ENOMEM; | 351 | ret = -ENOMEM; |
| 352 | exists = true; | ||
| 353 | goto release_tun_meta; | ||
| 353 | } | 354 | } |
| 354 | params_new->tcft_action = parm->t_action; | 355 | params_new->tcft_action = parm->t_action; |
| 355 | params_new->tcft_enc_metadata = metadata; | 356 | params_new->tcft_enc_metadata = metadata; |
| @@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
| 367 | 368 | ||
| 368 | return ret; | 369 | return ret; |
| 369 | 370 | ||
| 371 | release_tun_meta: | ||
| 372 | dst_release(&metadata->dst); | ||
| 373 | |||
| 370 | err_out: | 374 | err_out: |
| 371 | if (exists) | 375 | if (exists) |
| 372 | tcf_idr_release(*a, bind); | 376 | tcf_idr_release(*a, bind); |
| @@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, | |||
| 408 | nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, | 412 | nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, |
| 409 | opt->type) || | 413 | opt->type) || |
| 410 | nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, | 414 | nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, |
| 411 | opt->length * 4, opt + 1)) | 415 | opt->length * 4, opt + 1)) { |
| 416 | nla_nest_cancel(skb, start); | ||
| 412 | return -EMSGSIZE; | 417 | return -EMSGSIZE; |
| 418 | } | ||
| 413 | 419 | ||
| 414 | len -= sizeof(struct geneve_opt) + opt->length * 4; | 420 | len -= sizeof(struct geneve_opt) + opt->length * 4; |
| 415 | src += sizeof(struct geneve_opt) + opt->length * 4; | 421 | src += sizeof(struct geneve_opt) + opt->length * 4; |
| @@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb, | |||
| 423 | const struct ip_tunnel_info *info) | 429 | const struct ip_tunnel_info *info) |
| 424 | { | 430 | { |
| 425 | struct nlattr *start; | 431 | struct nlattr *start; |
| 426 | int err; | 432 | int err = -EINVAL; |
| 427 | 433 | ||
| 428 | if (!info->options_len) | 434 | if (!info->options_len) |
| 429 | return 0; | 435 | return 0; |
| @@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb, | |||
| 435 | if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { | 441 | if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { |
| 436 | err = tunnel_key_geneve_opts_dump(skb, info); | 442 | err = tunnel_key_geneve_opts_dump(skb, info); |
| 437 | if (err) | 443 | if (err) |
| 438 | return err; | 444 | goto err_out; |
| 439 | } else { | 445 | } else { |
| 440 | return -EINVAL; | 446 | err_out: |
| 447 | nla_nest_cancel(skb, start); | ||
| 448 | return err; | ||
| 441 | } | 449 | } |
| 442 | 450 | ||
| 443 | nla_nest_end(skb, start); | 451 | nla_nest_end(skb, start); |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 31bd1439cf60..1a67af8a6e8c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -1252,7 +1252,7 @@ replay: | |||
| 1252 | } | 1252 | } |
| 1253 | chain = tcf_chain_get(block, chain_index, true); | 1253 | chain = tcf_chain_get(block, chain_index, true); |
| 1254 | if (!chain) { | 1254 | if (!chain) { |
| 1255 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1255 | NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); |
| 1256 | err = -ENOMEM; | 1256 | err = -ENOMEM; |
| 1257 | goto errout; | 1257 | goto errout; |
| 1258 | } | 1258 | } |
| @@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | |||
| 1399 | goto errout; | 1399 | goto errout; |
| 1400 | } | 1400 | } |
| 1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
| 1402 | err = -EINVAL; | 1402 | err = -ENOENT; |
| 1403 | goto errout; | 1403 | goto errout; |
| 1404 | } | 1404 | } |
| 1405 | 1405 | ||
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index ef5c9a82d4e8..a644292f9faf 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
| @@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = { | |||
| 215 | struct sctp_ht_iter { | 215 | struct sctp_ht_iter { |
| 216 | struct seq_net_private p; | 216 | struct seq_net_private p; |
| 217 | struct rhashtable_iter hti; | 217 | struct rhashtable_iter hti; |
| 218 | int start_fail; | ||
| 219 | }; | 218 | }; |
| 220 | 219 | ||
| 221 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | 220 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) |
| @@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 224 | 223 | ||
| 225 | sctp_transport_walk_start(&iter->hti); | 224 | sctp_transport_walk_start(&iter->hti); |
| 226 | 225 | ||
| 227 | iter->start_fail = 0; | ||
| 228 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); | 226 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); |
| 229 | } | 227 | } |
| 230 | 228 | ||
| @@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) | |||
| 232 | { | 230 | { |
| 233 | struct sctp_ht_iter *iter = seq->private; | 231 | struct sctp_ht_iter *iter = seq->private; |
| 234 | 232 | ||
| 235 | if (iter->start_fail) | ||
| 236 | return; | ||
| 237 | sctp_transport_walk_stop(&iter->hti); | 233 | sctp_transport_walk_stop(&iter->hti); |
| 238 | } | 234 | } |
| 239 | 235 | ||
| @@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
| 264 | } | 260 | } |
| 265 | 261 | ||
| 266 | transport = (struct sctp_transport *)v; | 262 | transport = (struct sctp_transport *)v; |
| 267 | if (!sctp_transport_hold(transport)) | ||
| 268 | return 0; | ||
| 269 | assoc = transport->asoc; | 263 | assoc = transport->asoc; |
| 270 | epb = &assoc->base; | 264 | epb = &assoc->base; |
| 271 | sk = epb->sk; | 265 | sk = epb->sk; |
| @@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
| 322 | } | 316 | } |
| 323 | 317 | ||
| 324 | transport = (struct sctp_transport *)v; | 318 | transport = (struct sctp_transport *)v; |
| 325 | if (!sctp_transport_hold(transport)) | ||
| 326 | return 0; | ||
| 327 | assoc = transport->asoc; | 319 | assoc = transport->asoc; |
| 328 | 320 | ||
| 329 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, | 321 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e96b15a66aba..f73e9d38d5ba 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
| 2658 | } | 2658 | } |
| 2659 | 2659 | ||
| 2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { | 2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { |
| 2661 | if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { | 2661 | if (trans) { |
| 2662 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2662 | if (trans->ipaddr.sa.sa_family == AF_INET6) { |
| 2663 | SCTP_FLOWLABEL_VAL_MASK; | ||
| 2664 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
| 2665 | } else if (asoc) { | ||
| 2666 | list_for_each_entry(trans, | ||
| 2667 | &asoc->peer.transport_addr_list, | ||
| 2668 | transports) { | ||
| 2669 | if (trans->ipaddr.sa.sa_family != AF_INET6) | ||
| 2670 | continue; | ||
| 2671 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2663 | trans->flowlabel = params->spp_ipv6_flowlabel & |
| 2672 | SCTP_FLOWLABEL_VAL_MASK; | 2664 | SCTP_FLOWLABEL_VAL_MASK; |
| 2673 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2665 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
| 2674 | } | 2666 | } |
| 2667 | } else if (asoc) { | ||
| 2668 | struct sctp_transport *t; | ||
| 2669 | |||
| 2670 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
| 2671 | transports) { | ||
| 2672 | if (t->ipaddr.sa.sa_family != AF_INET6) | ||
| 2673 | continue; | ||
| 2674 | t->flowlabel = params->spp_ipv6_flowlabel & | ||
| 2675 | SCTP_FLOWLABEL_VAL_MASK; | ||
| 2676 | t->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
| 2677 | } | ||
| 2675 | asoc->flowlabel = params->spp_ipv6_flowlabel & | 2678 | asoc->flowlabel = params->spp_ipv6_flowlabel & |
| 2676 | SCTP_FLOWLABEL_VAL_MASK; | 2679 | SCTP_FLOWLABEL_VAL_MASK; |
| 2677 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2680 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
| @@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
| 2687 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2690 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
| 2688 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2691 | trans->dscp |= SCTP_DSCP_SET_MASK; |
| 2689 | } else if (asoc) { | 2692 | } else if (asoc) { |
| 2690 | list_for_each_entry(trans, | 2693 | struct sctp_transport *t; |
| 2691 | &asoc->peer.transport_addr_list, | 2694 | |
| 2695 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
| 2692 | transports) { | 2696 | transports) { |
| 2693 | trans->dscp = params->spp_dscp & | 2697 | t->dscp = params->spp_dscp & |
| 2694 | SCTP_DSCP_VAL_MASK; | 2698 | SCTP_DSCP_VAL_MASK; |
| 2695 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2699 | t->dscp |= SCTP_DSCP_SET_MASK; |
| 2696 | } | 2700 | } |
| 2697 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2701 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
| 2698 | asoc->dscp |= SCTP_DSCP_SET_MASK; | 2702 | asoc->dscp |= SCTP_DSCP_SET_MASK; |
| @@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, | |||
| 5005 | break; | 5009 | break; |
| 5006 | } | 5010 | } |
| 5007 | 5011 | ||
| 5012 | if (!sctp_transport_hold(t)) | ||
| 5013 | continue; | ||
| 5014 | |||
| 5008 | if (net_eq(sock_net(t->asoc->base.sk), net) && | 5015 | if (net_eq(sock_net(t->asoc->base.sk), net) && |
| 5009 | t->asoc->peer.primary_path == t) | 5016 | t->asoc->peer.primary_path == t) |
| 5010 | break; | 5017 | break; |
| 5018 | |||
| 5019 | sctp_transport_put(t); | ||
| 5011 | } | 5020 | } |
| 5012 | 5021 | ||
| 5013 | return t; | 5022 | return t; |
| @@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net, | |||
| 5017 | struct rhashtable_iter *iter, | 5026 | struct rhashtable_iter *iter, |
| 5018 | int pos) | 5027 | int pos) |
| 5019 | { | 5028 | { |
| 5020 | void *obj = SEQ_START_TOKEN; | 5029 | struct sctp_transport *t; |
| 5021 | 5030 | ||
| 5022 | while (pos && (obj = sctp_transport_get_next(net, iter)) && | 5031 | if (!pos) |
| 5023 | !IS_ERR(obj)) | 5032 | return SEQ_START_TOKEN; |
| 5024 | pos--; | ||
| 5025 | 5033 | ||
| 5026 | return obj; | 5034 | while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) { |
| 5035 | if (!--pos) | ||
| 5036 | break; | ||
| 5037 | sctp_transport_put(t); | ||
| 5038 | } | ||
| 5039 | |||
| 5040 | return t; | ||
| 5027 | } | 5041 | } |
| 5028 | 5042 | ||
| 5029 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), | 5043 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), |
| @@ -5082,8 +5096,6 @@ again: | |||
| 5082 | 5096 | ||
| 5083 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); | 5097 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); |
| 5084 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { | 5098 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { |
| 5085 | if (!sctp_transport_hold(tsp)) | ||
| 5086 | continue; | ||
| 5087 | ret = cb(tsp, p); | 5099 | ret = cb(tsp, p); |
| 5088 | if (ret) | 5100 | if (ret) |
| 5089 | break; | 5101 | break; |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 9ee6cfea56dd..d8026543bf4c 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
| @@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link"; | |||
| 51 | * struct tipc_bc_base - base structure for keeping broadcast send state | 51 | * struct tipc_bc_base - base structure for keeping broadcast send state |
| 52 | * @link: broadcast send link structure | 52 | * @link: broadcast send link structure |
| 53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages | 53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages |
| 54 | * @dest: array keeping number of reachable destinations per bearer | 54 | * @dests: array keeping number of reachable destinations per bearer |
| 55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any | 55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any |
| 56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast | 56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast |
| 57 | * @rcast_support: indicates if all peer nodes support replicast | 57 | * @rcast_support: indicates if all peer nodes support replicast |
| 58 | * @rc_ratio: dest count as percentage of cluster size where send method changes | 58 | * @rc_ratio: dest count as percentage of cluster size where send method changes |
| 59 | * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast | 59 | * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast |
| 60 | */ | 60 | */ |
| 61 | struct tipc_bc_base { | 61 | struct tipc_bc_base { |
| 62 | struct tipc_link *link; | 62 | struct tipc_link *link; |
diff --git a/net/tipc/diag.c b/net/tipc/diag.c index aaabb0b776dd..73137f4aeb68 100644 --- a/net/tipc/diag.c +++ b/net/tipc/diag.c | |||
| @@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb, | |||
| 84 | 84 | ||
| 85 | if (h->nlmsg_flags & NLM_F_DUMP) { | 85 | if (h->nlmsg_flags & NLM_F_DUMP) { |
| 86 | struct netlink_dump_control c = { | 86 | struct netlink_dump_control c = { |
| 87 | .start = tipc_dump_start, | ||
| 87 | .dump = tipc_diag_dump, | 88 | .dump = tipc_diag_dump, |
| 89 | .done = tipc_dump_done, | ||
| 88 | }; | 90 | }; |
| 89 | netlink_dump_start(net->diag_nlsk, skb, h, &c); | 91 | netlink_dump_start(net->diag_nlsk, skb, h, &c); |
| 90 | return 0; | 92 | return 0; |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 88f027b502f6..66d5b2c5987a 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
| @@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 980 | 980 | ||
| 981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) | 981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) |
| 982 | { | 982 | { |
| 983 | u64 value = (u64)node << 32 | port; | ||
| 984 | struct tipc_dest *dst; | 983 | struct tipc_dest *dst; |
| 985 | 984 | ||
| 986 | list_for_each_entry(dst, l, list) { | 985 | list_for_each_entry(dst, l, list) { |
| 987 | if (dst->value != value) | 986 | if (dst->node == node && dst->port == port) |
| 988 | continue; | 987 | return dst; |
| 989 | return dst; | ||
| 990 | } | 988 | } |
| 991 | return NULL; | 989 | return NULL; |
| 992 | } | 990 | } |
| 993 | 991 | ||
| 994 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | 992 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) |
| 995 | { | 993 | { |
| 996 | u64 value = (u64)node << 32 | port; | ||
| 997 | struct tipc_dest *dst; | 994 | struct tipc_dest *dst; |
| 998 | 995 | ||
| 999 | if (tipc_dest_find(l, node, port)) | 996 | if (tipc_dest_find(l, node, port)) |
| @@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | |||
| 1002 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); | 999 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); |
| 1003 | if (unlikely(!dst)) | 1000 | if (unlikely(!dst)) |
| 1004 | return false; | 1001 | return false; |
| 1005 | dst->value = value; | 1002 | dst->node = node; |
| 1003 | dst->port = port; | ||
| 1006 | list_add(&dst->list, l); | 1004 | list_add(&dst->list, l); |
| 1007 | return true; | 1005 | return true; |
| 1008 | } | 1006 | } |
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 0febba41da86..892bd750b85f 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h | |||
| @@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net); | |||
| 133 | 133 | ||
| 134 | struct tipc_dest { | 134 | struct tipc_dest { |
| 135 | struct list_head list; | 135 | struct list_head list; |
| 136 | union { | 136 | u32 port; |
| 137 | struct { | 137 | u32 node; |
| 138 | u32 port; | ||
| 139 | u32 node; | ||
| 140 | }; | ||
| 141 | u64 value; | ||
| 142 | }; | ||
| 143 | }; | 138 | }; |
| 144 | 139 | ||
| 145 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); | 140 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 6ff2254088f6..99ee419210ba 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
| @@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = { | |||
| 167 | }, | 167 | }, |
| 168 | { | 168 | { |
| 169 | .cmd = TIPC_NL_SOCK_GET, | 169 | .cmd = TIPC_NL_SOCK_GET, |
| 170 | .start = tipc_dump_start, | ||
| 170 | .dumpit = tipc_nl_sk_dump, | 171 | .dumpit = tipc_nl_sk_dump, |
| 172 | .done = tipc_dump_done, | ||
| 171 | .policy = tipc_nl_policy, | 173 | .policy = tipc_nl_policy, |
| 172 | }, | 174 | }, |
| 173 | { | 175 | { |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index a2f76743c73a..6376467e78f8 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
| @@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | |||
| 185 | return -ENOMEM; | 185 | return -ENOMEM; |
| 186 | 186 | ||
| 187 | buf->sk = msg->dst_sk; | 187 | buf->sk = msg->dst_sk; |
| 188 | if (__tipc_dump_start(&cb, msg->net)) { | ||
| 189 | kfree_skb(buf); | ||
| 190 | return -ENOMEM; | ||
| 191 | } | ||
| 188 | 192 | ||
| 189 | do { | 193 | do { |
| 190 | int rem; | 194 | int rem; |
| @@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | |||
| 216 | err = 0; | 220 | err = 0; |
| 217 | 221 | ||
| 218 | err_out: | 222 | err_out: |
| 223 | tipc_dump_done(&cb); | ||
| 219 | kfree_skb(buf); | 224 | kfree_skb(buf); |
| 220 | 225 | ||
| 221 | if (err == -EMSGSIZE) { | 226 | if (err == -EMSGSIZE) { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c1e93c9515bc..3f03ddd0e35b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock) | |||
| 576 | sk_stop_timer(sk, &sk->sk_timer); | 576 | sk_stop_timer(sk, &sk->sk_timer); |
| 577 | tipc_sk_remove(tsk); | 577 | tipc_sk_remove(tsk); |
| 578 | 578 | ||
| 579 | sock_orphan(sk); | ||
| 579 | /* Reject any messages that accumulated in backlog queue */ | 580 | /* Reject any messages that accumulated in backlog queue */ |
| 580 | release_sock(sk); | 581 | release_sock(sk); |
| 581 | tipc_dest_list_purge(&tsk->cong_links); | 582 | tipc_dest_list_purge(&tsk->cong_links); |
| @@ -2672,6 +2673,8 @@ void tipc_sk_reinit(struct net *net) | |||
| 2672 | 2673 | ||
| 2673 | rhashtable_walk_stop(&iter); | 2674 | rhashtable_walk_stop(&iter); |
| 2674 | } while (tsk == ERR_PTR(-EAGAIN)); | 2675 | } while (tsk == ERR_PTR(-EAGAIN)); |
| 2676 | |||
| 2677 | rhashtable_walk_exit(&iter); | ||
| 2675 | } | 2678 | } |
| 2676 | 2679 | ||
| 2677 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) | 2680 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) |
| @@ -3227,45 +3230,74 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 3227 | struct netlink_callback *cb, | 3230 | struct netlink_callback *cb, |
| 3228 | struct tipc_sock *tsk)) | 3231 | struct tipc_sock *tsk)) |
| 3229 | { | 3232 | { |
| 3230 | struct net *net = sock_net(skb->sk); | 3233 | struct rhashtable_iter *iter = (void *)cb->args[4]; |
| 3231 | struct tipc_net *tn = tipc_net(net); | ||
| 3232 | const struct bucket_table *tbl; | ||
| 3233 | u32 prev_portid = cb->args[1]; | ||
| 3234 | u32 tbl_id = cb->args[0]; | ||
| 3235 | struct rhash_head *pos; | ||
| 3236 | struct tipc_sock *tsk; | 3234 | struct tipc_sock *tsk; |
| 3237 | int err; | 3235 | int err; |
| 3238 | 3236 | ||
| 3239 | rcu_read_lock(); | 3237 | rhashtable_walk_start(iter); |
| 3240 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); | 3238 | while ((tsk = rhashtable_walk_next(iter)) != NULL) { |
| 3241 | for (; tbl_id < tbl->size; tbl_id++) { | 3239 | if (IS_ERR(tsk)) { |
| 3242 | rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { | 3240 | err = PTR_ERR(tsk); |
| 3243 | spin_lock_bh(&tsk->sk.sk_lock.slock); | 3241 | if (err == -EAGAIN) { |
| 3244 | if (prev_portid && prev_portid != tsk->portid) { | 3242 | err = 0; |
| 3245 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
| 3246 | continue; | 3243 | continue; |
| 3247 | } | 3244 | } |
| 3245 | break; | ||
| 3246 | } | ||
| 3248 | 3247 | ||
| 3249 | err = skb_handler(skb, cb, tsk); | 3248 | sock_hold(&tsk->sk); |
| 3250 | if (err) { | 3249 | rhashtable_walk_stop(iter); |
| 3251 | prev_portid = tsk->portid; | 3250 | lock_sock(&tsk->sk); |
| 3252 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 3251 | err = skb_handler(skb, cb, tsk); |
| 3253 | goto out; | 3252 | if (err) { |
| 3254 | } | 3253 | release_sock(&tsk->sk); |
| 3255 | 3254 | sock_put(&tsk->sk); | |
| 3256 | prev_portid = 0; | 3255 | goto out; |
| 3257 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
| 3258 | } | 3256 | } |
| 3257 | release_sock(&tsk->sk); | ||
| 3258 | rhashtable_walk_start(iter); | ||
| 3259 | sock_put(&tsk->sk); | ||
| 3259 | } | 3260 | } |
| 3261 | rhashtable_walk_stop(iter); | ||
| 3260 | out: | 3262 | out: |
| 3261 | rcu_read_unlock(); | ||
| 3262 | cb->args[0] = tbl_id; | ||
| 3263 | cb->args[1] = prev_portid; | ||
| 3264 | |||
| 3265 | return skb->len; | 3263 | return skb->len; |
| 3266 | } | 3264 | } |
| 3267 | EXPORT_SYMBOL(tipc_nl_sk_walk); | 3265 | EXPORT_SYMBOL(tipc_nl_sk_walk); |
| 3268 | 3266 | ||
| 3267 | int tipc_dump_start(struct netlink_callback *cb) | ||
| 3268 | { | ||
| 3269 | return __tipc_dump_start(cb, sock_net(cb->skb->sk)); | ||
| 3270 | } | ||
| 3271 | EXPORT_SYMBOL(tipc_dump_start); | ||
| 3272 | |||
| 3273 | int __tipc_dump_start(struct netlink_callback *cb, struct net *net) | ||
| 3274 | { | ||
| 3275 | /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ | ||
| 3276 | struct rhashtable_iter *iter = (void *)cb->args[4]; | ||
| 3277 | struct tipc_net *tn = tipc_net(net); | ||
| 3278 | |||
| 3279 | if (!iter) { | ||
| 3280 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | ||
| 3281 | if (!iter) | ||
| 3282 | return -ENOMEM; | ||
| 3283 | |||
| 3284 | cb->args[4] = (long)iter; | ||
| 3285 | } | ||
| 3286 | |||
| 3287 | rhashtable_walk_enter(&tn->sk_rht, iter); | ||
| 3288 | return 0; | ||
| 3289 | } | ||
| 3290 | |||
| 3291 | int tipc_dump_done(struct netlink_callback *cb) | ||
| 3292 | { | ||
| 3293 | struct rhashtable_iter *hti = (void *)cb->args[4]; | ||
| 3294 | |||
| 3295 | rhashtable_walk_exit(hti); | ||
| 3296 | kfree(hti); | ||
| 3297 | return 0; | ||
| 3298 | } | ||
| 3299 | EXPORT_SYMBOL(tipc_dump_done); | ||
| 3300 | |||
| 3269 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, | 3301 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, |
| 3270 | struct tipc_sock *tsk, u32 sk_filter_state, | 3302 | struct tipc_sock *tsk, u32 sk_filter_state, |
| 3271 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) | 3303 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) |
diff --git a/net/tipc/socket.h b/net/tipc/socket.h index aff9b2ae5a1f..5e575f205afe 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h | |||
| @@ -68,4 +68,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 68 | int (*skb_handler)(struct sk_buff *skb, | 68 | int (*skb_handler)(struct sk_buff *skb, |
| 69 | struct netlink_callback *cb, | 69 | struct netlink_callback *cb, |
| 70 | struct tipc_sock *tsk)); | 70 | struct tipc_sock *tsk)); |
| 71 | int tipc_dump_start(struct netlink_callback *cb); | ||
| 72 | int __tipc_dump_start(struct netlink_callback *cb, struct net *net); | ||
| 73 | int tipc_dump_done(struct netlink_callback *cb); | ||
| 71 | #endif | 74 | #endif |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index c8e34ef22c30..2627b5d812e9 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c | |||
| @@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work) | |||
| 313 | conn_put(con); | 313 | conn_put(con); |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | /* tipc_conn_queue_evt() - interrupt level call from a subscription instance | 316 | /* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance |
| 317 | * The queued work is launched into tipc_send_work()->tipc_send_to_sock() | 317 | * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock() |
| 318 | */ | 318 | */ |
| 319 | void tipc_topsrv_queue_evt(struct net *net, int conid, | 319 | void tipc_topsrv_queue_evt(struct net *net, int conid, |
| 320 | u32 event, struct tipc_event *evt) | 320 | u32 event, struct tipc_event *evt) |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 52fbe727d7c1..e28a6ff25d96 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len) | |||
| 125 | &ctx->sg_encrypted_num_elem, | 125 | &ctx->sg_encrypted_num_elem, |
| 126 | &ctx->sg_encrypted_size, 0); | 126 | &ctx->sg_encrypted_size, 0); |
| 127 | 127 | ||
| 128 | if (rc == -ENOSPC) | ||
| 129 | ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data); | ||
| 130 | |||
| 128 | return rc; | 131 | return rc; |
| 129 | } | 132 | } |
| 130 | 133 | ||
| @@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len) | |||
| 138 | &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, | 141 | &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, |
| 139 | tls_ctx->pending_open_record_frags); | 142 | tls_ctx->pending_open_record_frags); |
| 140 | 143 | ||
| 144 | if (rc == -ENOSPC) | ||
| 145 | ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data); | ||
| 146 | |||
| 141 | return rc; | 147 | return rc; |
| 142 | } | 148 | } |
| 143 | 149 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5fb9b7dd9831..4b8ec659e797 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, | |||
| 669 | goto nla_put_failure; | 669 | goto nla_put_failure; |
| 670 | 670 | ||
| 671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, | 671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, |
| 672 | rule->wmm_rule->client[j].cw_min) || | 672 | rule->wmm_rule.client[j].cw_min) || |
| 673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, | 673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, |
| 674 | rule->wmm_rule->client[j].cw_max) || | 674 | rule->wmm_rule.client[j].cw_max) || |
| 675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, | 675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, |
| 676 | rule->wmm_rule->client[j].aifsn) || | 676 | rule->wmm_rule.client[j].aifsn) || |
| 677 | nla_put_u8(msg, NL80211_WMMR_TXOP, | 677 | nla_put_u16(msg, NL80211_WMMR_TXOP, |
| 678 | rule->wmm_rule->client[j].cot)) | 678 | rule->wmm_rule.client[j].cot)) |
| 679 | goto nla_put_failure; | 679 | goto nla_put_failure; |
| 680 | 680 | ||
| 681 | nla_nest_end(msg, nl_wmm_rule); | 681 | nla_nest_end(msg, nl_wmm_rule); |
| @@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, | |||
| 766 | 766 | ||
| 767 | if (large) { | 767 | if (large) { |
| 768 | const struct ieee80211_reg_rule *rule = | 768 | const struct ieee80211_reg_rule *rule = |
| 769 | freq_reg_info(wiphy, chan->center_freq); | 769 | freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); |
| 770 | 770 | ||
| 771 | if (!IS_ERR(rule) && rule->wmm_rule) { | 771 | if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { |
| 772 | if (nl80211_msg_put_wmm_rules(msg, rule)) | 772 | if (nl80211_msg_put_wmm_rules(msg, rule)) |
| 773 | goto nla_put_failure; | 773 | goto nla_put_failure; |
| 774 | } | 774 | } |
| @@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) | |||
| 12205 | return -EOPNOTSUPP; | 12205 | return -EOPNOTSUPP; |
| 12206 | 12206 | ||
| 12207 | if (!info->attrs[NL80211_ATTR_MDID] || | 12207 | if (!info->attrs[NL80211_ATTR_MDID] || |
| 12208 | !info->attrs[NL80211_ATTR_IE] || | ||
| 12208 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 12209 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
| 12209 | return -EINVAL; | 12210 | return -EINVAL; |
| 12210 | 12211 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 4fc66a117b7d..2f702adf2912 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -425,36 +425,23 @@ static const struct ieee80211_regdomain * | |||
| 425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) | 425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) |
| 426 | { | 426 | { |
| 427 | struct ieee80211_regdomain *regd; | 427 | struct ieee80211_regdomain *regd; |
| 428 | int size_of_regd, size_of_wmms; | 428 | int size_of_regd; |
| 429 | unsigned int i; | 429 | unsigned int i; |
| 430 | struct ieee80211_wmm_rule *d_wmm, *s_wmm; | ||
| 431 | 430 | ||
| 432 | size_of_regd = | 431 | size_of_regd = |
| 433 | sizeof(struct ieee80211_regdomain) + | 432 | sizeof(struct ieee80211_regdomain) + |
| 434 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); | 433 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); |
| 435 | size_of_wmms = src_regd->n_wmm_rules * | ||
| 436 | sizeof(struct ieee80211_wmm_rule); | ||
| 437 | 434 | ||
| 438 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | 435 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
| 439 | if (!regd) | 436 | if (!regd) |
| 440 | return ERR_PTR(-ENOMEM); | 437 | return ERR_PTR(-ENOMEM); |
| 441 | 438 | ||
| 442 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); | 439 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); |
| 443 | 440 | ||
| 444 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | 441 | for (i = 0; i < src_regd->n_reg_rules; i++) |
| 445 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd); | ||
| 446 | memcpy(d_wmm, s_wmm, size_of_wmms); | ||
| 447 | |||
| 448 | for (i = 0; i < src_regd->n_reg_rules; i++) { | ||
| 449 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], | 442 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], |
| 450 | sizeof(struct ieee80211_reg_rule)); | 443 | sizeof(struct ieee80211_reg_rule)); |
| 451 | if (!src_regd->reg_rules[i].wmm_rule) | ||
| 452 | continue; | ||
| 453 | 444 | ||
| 454 | regd->reg_rules[i].wmm_rule = d_wmm + | ||
| 455 | (src_regd->reg_rules[i].wmm_rule - s_wmm) / | ||
| 456 | sizeof(struct ieee80211_wmm_rule); | ||
| 457 | } | ||
| 458 | return regd; | 445 | return regd; |
| 459 | } | 446 | } |
| 460 | 447 | ||
| @@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size) | |||
| 860 | return true; | 847 | return true; |
| 861 | } | 848 | } |
| 862 | 849 | ||
| 863 | static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | 850 | static void set_wmm_rule(struct ieee80211_reg_rule *rrule, |
| 864 | struct fwdb_wmm_rule *wmm) | 851 | struct fwdb_wmm_rule *wmm) |
| 865 | { | 852 | { |
| 853 | struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; | ||
| 866 | unsigned int i; | 854 | unsigned int i; |
| 867 | 855 | ||
| 868 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | 856 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
| @@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | |||
| 876 | rule->ap[i].aifsn = wmm->ap[i].aifsn; | 864 | rule->ap[i].aifsn = wmm->ap[i].aifsn; |
| 877 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); | 865 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); |
| 878 | } | 866 | } |
| 867 | |||
| 868 | rrule->has_wmm = true; | ||
| 879 | } | 869 | } |
| 880 | 870 | ||
| 881 | static int __regdb_query_wmm(const struct fwdb_header *db, | 871 | static int __regdb_query_wmm(const struct fwdb_header *db, |
| 882 | const struct fwdb_country *country, int freq, | 872 | const struct fwdb_country *country, int freq, |
| 883 | u32 *dbptr, struct ieee80211_wmm_rule *rule) | 873 | struct ieee80211_reg_rule *rule) |
| 884 | { | 874 | { |
| 885 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 875 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
| 886 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 876 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
| @@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
| 901 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; | 891 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; |
| 902 | wmm = (void *)((u8 *)db + wmm_ptr); | 892 | wmm = (void *)((u8 *)db + wmm_ptr); |
| 903 | set_wmm_rule(rule, wmm); | 893 | set_wmm_rule(rule, wmm); |
| 904 | if (dbptr) | ||
| 905 | *dbptr = wmm_ptr; | ||
| 906 | return 0; | 894 | return 0; |
| 907 | } | 895 | } |
| 908 | } | 896 | } |
| @@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
| 910 | return -ENODATA; | 898 | return -ENODATA; |
| 911 | } | 899 | } |
| 912 | 900 | ||
| 913 | int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | 901 | int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) |
| 914 | struct ieee80211_wmm_rule *rule) | ||
| 915 | { | 902 | { |
| 916 | const struct fwdb_header *hdr = regdb; | 903 | const struct fwdb_header *hdr = regdb; |
| 917 | const struct fwdb_country *country; | 904 | const struct fwdb_country *country; |
| @@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
| 925 | country = &hdr->country[0]; | 912 | country = &hdr->country[0]; |
| 926 | while (country->coll_ptr) { | 913 | while (country->coll_ptr) { |
| 927 | if (alpha2_equal(alpha2, country->alpha2)) | 914 | if (alpha2_equal(alpha2, country->alpha2)) |
| 928 | return __regdb_query_wmm(regdb, country, freq, dbptr, | 915 | return __regdb_query_wmm(regdb, country, freq, rule); |
| 929 | rule); | ||
| 930 | 916 | ||
| 931 | country++; | 917 | country++; |
| 932 | } | 918 | } |
| @@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
| 935 | } | 921 | } |
| 936 | EXPORT_SYMBOL(reg_query_regdb_wmm); | 922 | EXPORT_SYMBOL(reg_query_regdb_wmm); |
| 937 | 923 | ||
| 938 | struct wmm_ptrs { | ||
| 939 | struct ieee80211_wmm_rule *rule; | ||
| 940 | u32 ptr; | ||
| 941 | }; | ||
| 942 | |||
| 943 | static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs, | ||
| 944 | u32 wmm_ptr, int n_wmms) | ||
| 945 | { | ||
| 946 | int i; | ||
| 947 | |||
| 948 | for (i = 0; i < n_wmms; i++) { | ||
| 949 | if (wmm_ptrs[i].ptr == wmm_ptr) | ||
| 950 | return wmm_ptrs[i].rule; | ||
| 951 | } | ||
| 952 | return NULL; | ||
| 953 | } | ||
| 954 | |||
| 955 | static int regdb_query_country(const struct fwdb_header *db, | 924 | static int regdb_query_country(const struct fwdb_header *db, |
| 956 | const struct fwdb_country *country) | 925 | const struct fwdb_country *country) |
| 957 | { | 926 | { |
| 958 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 927 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
| 959 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 928 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
| 960 | struct ieee80211_regdomain *regdom; | 929 | struct ieee80211_regdomain *regdom; |
| 961 | struct ieee80211_regdomain *tmp_rd; | 930 | unsigned int size_of_regd, i; |
| 962 | unsigned int size_of_regd, i, n_wmms = 0; | ||
| 963 | struct wmm_ptrs *wmm_ptrs; | ||
| 964 | 931 | ||
| 965 | size_of_regd = sizeof(struct ieee80211_regdomain) + | 932 | size_of_regd = sizeof(struct ieee80211_regdomain) + |
| 966 | coll->n_rules * sizeof(struct ieee80211_reg_rule); | 933 | coll->n_rules * sizeof(struct ieee80211_reg_rule); |
| @@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
| 969 | if (!regdom) | 936 | if (!regdom) |
| 970 | return -ENOMEM; | 937 | return -ENOMEM; |
| 971 | 938 | ||
| 972 | wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL); | ||
| 973 | if (!wmm_ptrs) { | ||
| 974 | kfree(regdom); | ||
| 975 | return -ENOMEM; | ||
| 976 | } | ||
| 977 | |||
| 978 | regdom->n_reg_rules = coll->n_rules; | 939 | regdom->n_reg_rules = coll->n_rules; |
| 979 | regdom->alpha2[0] = country->alpha2[0]; | 940 | regdom->alpha2[0] = country->alpha2[0]; |
| 980 | regdom->alpha2[1] = country->alpha2[1]; | 941 | regdom->alpha2[1] = country->alpha2[1]; |
| @@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
| 1013 | 1000 * be16_to_cpu(rule->cac_timeout); | 974 | 1000 * be16_to_cpu(rule->cac_timeout); |
| 1014 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { | 975 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { |
| 1015 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; | 976 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; |
| 1016 | struct ieee80211_wmm_rule *wmm_pos = | 977 | struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); |
| 1017 | find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms); | ||
| 1018 | struct fwdb_wmm_rule *wmm; | ||
| 1019 | struct ieee80211_wmm_rule *wmm_rule; | ||
| 1020 | |||
| 1021 | if (wmm_pos) { | ||
| 1022 | rrule->wmm_rule = wmm_pos; | ||
| 1023 | continue; | ||
| 1024 | } | ||
| 1025 | wmm = (void *)((u8 *)db + wmm_ptr); | ||
| 1026 | tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) * | ||
| 1027 | sizeof(struct ieee80211_wmm_rule), | ||
| 1028 | GFP_KERNEL); | ||
| 1029 | |||
| 1030 | if (!tmp_rd) { | ||
| 1031 | kfree(regdom); | ||
| 1032 | kfree(wmm_ptrs); | ||
| 1033 | return -ENOMEM; | ||
| 1034 | } | ||
| 1035 | regdom = tmp_rd; | ||
| 1036 | |||
| 1037 | wmm_rule = (struct ieee80211_wmm_rule *) | ||
| 1038 | ((u8 *)regdom + size_of_regd + n_wmms * | ||
| 1039 | sizeof(struct ieee80211_wmm_rule)); | ||
| 1040 | 978 | ||
| 1041 | set_wmm_rule(wmm_rule, wmm); | 979 | set_wmm_rule(rrule, wmm); |
| 1042 | wmm_ptrs[n_wmms].ptr = wmm_ptr; | ||
| 1043 | wmm_ptrs[n_wmms++].rule = wmm_rule; | ||
| 1044 | } | 980 | } |
| 1045 | } | 981 | } |
| 1046 | kfree(wmm_ptrs); | ||
| 1047 | 982 | ||
| 1048 | return reg_schedule_apply(regdom); | 983 | return reg_schedule_apply(regdom); |
| 1049 | } | 984 | } |
diff --git a/net/wireless/util.c b/net/wireless/util.c index e0825a019e9f..959ed3acd240 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, | |||
| 1456 | u8 *op_class) | 1456 | u8 *op_class) |
| 1457 | { | 1457 | { |
| 1458 | u8 vht_opclass; | 1458 | u8 vht_opclass; |
| 1459 | u16 freq = chandef->center_freq1; | 1459 | u32 freq = chandef->center_freq1; |
| 1460 | 1460 | ||
| 1461 | if (freq >= 2412 && freq <= 2472) { | 1461 | if (freq >= 2412 && freq <= 2472) { |
| 1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) | 1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) |
