diff options
author | Dave Airlie <airlied@redhat.com> | 2018-09-26 21:06:46 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-09-26 21:06:46 -0400 |
commit | bf78296ab1cb215d0609ac6cff4e43e941e51265 (patch) | |
tree | a193615b327d9ee538e71ca5f13bbfb4f3db4e6b /net | |
parent | 18eb2f6e19d77900695987e3a2b775cccbe5b84e (diff) | |
parent | 6bf4ca7fbc85d80446ac01c0d1d77db4d91a6d84 (diff) |
BackMerge v4.19-rc5 into drm-next
Sean Paul requested an -rc5 backmerge from some sun4i fixes.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'net')
82 files changed, 836 insertions, 526 deletions
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ae91e2d40056..3a7b0773536b 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -83,6 +83,7 @@ enum { | |||
83 | 83 | ||
84 | struct smp_dev { | 84 | struct smp_dev { |
85 | /* Secure Connections OOB data */ | 85 | /* Secure Connections OOB data */ |
86 | bool local_oob; | ||
86 | u8 local_pk[64]; | 87 | u8 local_pk[64]; |
87 | u8 local_rand[16]; | 88 | u8 local_rand[16]; |
88 | bool debug_key; | 89 | bool debug_key; |
@@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) | |||
599 | 600 | ||
600 | memcpy(rand, smp->local_rand, 16); | 601 | memcpy(rand, smp->local_rand, 16); |
601 | 602 | ||
603 | smp->local_oob = true; | ||
604 | |||
602 | return 0; | 605 | return 0; |
603 | } | 606 | } |
604 | 607 | ||
@@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1785 | * successfully received our local OOB data - therefore set the | 1788 | * successfully received our local OOB data - therefore set the |
1786 | * flag to indicate that local OOB is in use. | 1789 | * flag to indicate that local OOB is in use. |
1787 | */ | 1790 | */ |
1788 | if (req->oob_flag == SMP_OOB_PRESENT) | 1791 | if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) |
1789 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); | 1792 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); |
1790 | 1793 | ||
1791 | /* SMP over BR/EDR requires special treatment */ | 1794 | /* SMP over BR/EDR requires special treatment */ |
@@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1967 | * successfully received our local OOB data - therefore set the | 1970 | * successfully received our local OOB data - therefore set the |
1968 | * flag to indicate that local OOB is in use. | 1971 | * flag to indicate that local OOB is in use. |
1969 | */ | 1972 | */ |
1970 | if (rsp->oob_flag == SMP_OOB_PRESENT) | 1973 | if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) |
1971 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); | 1974 | set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); |
1972 | 1975 | ||
1973 | smp->prsp[0] = SMP_CMD_PAIRING_RSP; | 1976 | smp->prsp[0] = SMP_CMD_PAIRING_RSP; |
@@ -2697,7 +2700,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2697 | * key was set/generated. | 2700 | * key was set/generated. |
2698 | */ | 2701 | */ |
2699 | if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { | 2702 | if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { |
2700 | struct smp_dev *smp_dev = chan->data; | 2703 | struct l2cap_chan *hchan = hdev->smp_data; |
2704 | struct smp_dev *smp_dev; | ||
2705 | |||
2706 | if (!hchan || !hchan->data) | ||
2707 | return SMP_UNSPECIFIED; | ||
2708 | |||
2709 | smp_dev = hchan->data; | ||
2701 | 2710 | ||
2702 | tfm_ecdh = smp_dev->tfm_ecdh; | 2711 | tfm_ecdh = smp_dev->tfm_ecdh; |
2703 | } else { | 2712 | } else { |
@@ -3230,6 +3239,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) | |||
3230 | return ERR_CAST(tfm_ecdh); | 3239 | return ERR_CAST(tfm_ecdh); |
3231 | } | 3240 | } |
3232 | 3241 | ||
3242 | smp->local_oob = false; | ||
3233 | smp->tfm_aes = tfm_aes; | 3243 | smp->tfm_aes = tfm_aes; |
3234 | smp->tfm_cmac = tfm_cmac; | 3244 | smp->tfm_cmac = tfm_cmac; |
3235 | smp->tfm_ecdh = tfm_ecdh; | 3245 | smp->tfm_ecdh = tfm_ecdh; |
diff --git a/net/core/filter.c b/net/core/filter.c index c25eb36f1320..5e00f2b85a56 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { | |||
2282 | .arg2_type = ARG_ANYTHING, | 2282 | .arg2_type = ARG_ANYTHING, |
2283 | }; | 2283 | }; |
2284 | 2284 | ||
2285 | #define sk_msg_iter_var(var) \ | ||
2286 | do { \ | ||
2287 | var++; \ | ||
2288 | if (var == MAX_SKB_FRAGS) \ | ||
2289 | var = 0; \ | ||
2290 | } while (0) | ||
2291 | |||
2285 | BPF_CALL_4(bpf_msg_pull_data, | 2292 | BPF_CALL_4(bpf_msg_pull_data, |
2286 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) | 2293 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) |
2287 | { | 2294 | { |
2288 | unsigned int len = 0, offset = 0, copy = 0; | 2295 | unsigned int len = 0, offset = 0, copy = 0, poffset = 0; |
2296 | int bytes = end - start, bytes_sg_total; | ||
2289 | struct scatterlist *sg = msg->sg_data; | 2297 | struct scatterlist *sg = msg->sg_data; |
2290 | int first_sg, last_sg, i, shift; | 2298 | int first_sg, last_sg, i, shift; |
2291 | unsigned char *p, *to, *from; | 2299 | unsigned char *p, *to, *from; |
2292 | int bytes = end - start; | ||
2293 | struct page *page; | 2300 | struct page *page; |
2294 | 2301 | ||
2295 | if (unlikely(flags || end <= start)) | 2302 | if (unlikely(flags || end <= start)) |
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2299 | i = msg->sg_start; | 2306 | i = msg->sg_start; |
2300 | do { | 2307 | do { |
2301 | len = sg[i].length; | 2308 | len = sg[i].length; |
2302 | offset += len; | ||
2303 | if (start < offset + len) | 2309 | if (start < offset + len) |
2304 | break; | 2310 | break; |
2305 | i++; | 2311 | offset += len; |
2306 | if (i == MAX_SKB_FRAGS) | 2312 | sk_msg_iter_var(i); |
2307 | i = 0; | ||
2308 | } while (i != msg->sg_end); | 2313 | } while (i != msg->sg_end); |
2309 | 2314 | ||
2310 | if (unlikely(start >= offset + len)) | 2315 | if (unlikely(start >= offset + len)) |
2311 | return -EINVAL; | 2316 | return -EINVAL; |
2312 | 2317 | ||
2313 | if (!msg->sg_copy[i] && bytes <= len) | ||
2314 | goto out; | ||
2315 | |||
2316 | first_sg = i; | 2318 | first_sg = i; |
2319 | /* The start may point into the sg element so we need to also | ||
2320 | * account for the headroom. | ||
2321 | */ | ||
2322 | bytes_sg_total = start - offset + bytes; | ||
2323 | if (!msg->sg_copy[i] && bytes_sg_total <= len) | ||
2324 | goto out; | ||
2317 | 2325 | ||
2318 | /* At this point we need to linearize multiple scatterlist | 2326 | /* At this point we need to linearize multiple scatterlist |
2319 | * elements or a single shared page. Either way we need to | 2327 | * elements or a single shared page. Either way we need to |
@@ -2327,37 +2335,33 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2327 | */ | 2335 | */ |
2328 | do { | 2336 | do { |
2329 | copy += sg[i].length; | 2337 | copy += sg[i].length; |
2330 | i++; | 2338 | sk_msg_iter_var(i); |
2331 | if (i == MAX_SKB_FRAGS) | 2339 | if (bytes_sg_total <= copy) |
2332 | i = 0; | ||
2333 | if (bytes < copy) | ||
2334 | break; | 2340 | break; |
2335 | } while (i != msg->sg_end); | 2341 | } while (i != msg->sg_end); |
2336 | last_sg = i; | 2342 | last_sg = i; |
2337 | 2343 | ||
2338 | if (unlikely(copy < end - start)) | 2344 | if (unlikely(bytes_sg_total > copy)) |
2339 | return -EINVAL; | 2345 | return -EINVAL; |
2340 | 2346 | ||
2341 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); | 2347 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, |
2348 | get_order(copy)); | ||
2342 | if (unlikely(!page)) | 2349 | if (unlikely(!page)) |
2343 | return -ENOMEM; | 2350 | return -ENOMEM; |
2344 | p = page_address(page); | 2351 | p = page_address(page); |
2345 | offset = 0; | ||
2346 | 2352 | ||
2347 | i = first_sg; | 2353 | i = first_sg; |
2348 | do { | 2354 | do { |
2349 | from = sg_virt(&sg[i]); | 2355 | from = sg_virt(&sg[i]); |
2350 | len = sg[i].length; | 2356 | len = sg[i].length; |
2351 | to = p + offset; | 2357 | to = p + poffset; |
2352 | 2358 | ||
2353 | memcpy(to, from, len); | 2359 | memcpy(to, from, len); |
2354 | offset += len; | 2360 | poffset += len; |
2355 | sg[i].length = 0; | 2361 | sg[i].length = 0; |
2356 | put_page(sg_page(&sg[i])); | 2362 | put_page(sg_page(&sg[i])); |
2357 | 2363 | ||
2358 | i++; | 2364 | sk_msg_iter_var(i); |
2359 | if (i == MAX_SKB_FRAGS) | ||
2360 | i = 0; | ||
2361 | } while (i != last_sg); | 2365 | } while (i != last_sg); |
2362 | 2366 | ||
2363 | sg[first_sg].length = copy; | 2367 | sg[first_sg].length = copy; |
@@ -2367,11 +2371,15 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2367 | * had a single entry though we can just replace it and | 2371 | * had a single entry though we can just replace it and |
2368 | * be done. Otherwise walk the ring and shift the entries. | 2372 | * be done. Otherwise walk the ring and shift the entries. |
2369 | */ | 2373 | */ |
2370 | shift = last_sg - first_sg - 1; | 2374 | WARN_ON_ONCE(last_sg == first_sg); |
2375 | shift = last_sg > first_sg ? | ||
2376 | last_sg - first_sg - 1 : | ||
2377 | MAX_SKB_FRAGS - first_sg + last_sg - 1; | ||
2371 | if (!shift) | 2378 | if (!shift) |
2372 | goto out; | 2379 | goto out; |
2373 | 2380 | ||
2374 | i = first_sg + 1; | 2381 | i = first_sg; |
2382 | sk_msg_iter_var(i); | ||
2375 | do { | 2383 | do { |
2376 | int move_from; | 2384 | int move_from; |
2377 | 2385 | ||
@@ -2388,15 +2396,13 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2388 | sg[move_from].page_link = 0; | 2396 | sg[move_from].page_link = 0; |
2389 | sg[move_from].offset = 0; | 2397 | sg[move_from].offset = 0; |
2390 | 2398 | ||
2391 | i++; | 2399 | sk_msg_iter_var(i); |
2392 | if (i == MAX_SKB_FRAGS) | ||
2393 | i = 0; | ||
2394 | } while (1); | 2400 | } while (1); |
2395 | msg->sg_end -= shift; | 2401 | msg->sg_end -= shift; |
2396 | if (msg->sg_end < 0) | 2402 | if (msg->sg_end < 0) |
2397 | msg->sg_end += MAX_SKB_FRAGS; | 2403 | msg->sg_end += MAX_SKB_FRAGS; |
2398 | out: | 2404 | out: |
2399 | msg->data = sg_virt(&sg[i]) + start - offset; | 2405 | msg->data = sg_virt(&sg[first_sg]) + start - offset; |
2400 | msg->data_end = msg->data + bytes; | 2406 | msg->data_end = msg->data + bytes; |
2401 | 2407 | ||
2402 | return 0; | 2408 | return 0; |
@@ -7281,7 +7287,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, | |||
7281 | break; | 7287 | break; |
7282 | 7288 | ||
7283 | case offsetof(struct sk_reuseport_md, ip_protocol): | 7289 | case offsetof(struct sk_reuseport_md, ip_protocol): |
7284 | BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE); | 7290 | BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); |
7285 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, | 7291 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, |
7286 | BPF_W, 0); | 7292 | BPF_W, 0); |
7287 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); | 7293 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index aa19d86937af..91592fceeaad 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1180,6 +1180,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1180 | lladdr = neigh->ha; | 1180 | lladdr = neigh->ha; |
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | /* Update confirmed timestamp for neighbour entry after we | ||
1184 | * received ARP packet even if it doesn't change IP to MAC binding. | ||
1185 | */ | ||
1186 | if (new & NUD_CONNECTED) | ||
1187 | neigh->confirmed = jiffies; | ||
1188 | |||
1183 | /* If entry was valid and address is not changed, | 1189 | /* If entry was valid and address is not changed, |
1184 | do not change entry state, if new one is STALE. | 1190 | do not change entry state, if new one is STALE. |
1185 | */ | 1191 | */ |
@@ -1201,15 +1207,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1201 | } | 1207 | } |
1202 | } | 1208 | } |
1203 | 1209 | ||
1204 | /* Update timestamps only once we know we will make a change to the | 1210 | /* Update timestamp only once we know we will make a change to the |
1205 | * neighbour entry. Otherwise we risk to move the locktime window with | 1211 | * neighbour entry. Otherwise we risk to move the locktime window with |
1206 | * noop updates and ignore relevant ARP updates. | 1212 | * noop updates and ignore relevant ARP updates. |
1207 | */ | 1213 | */ |
1208 | if (new != old || lladdr != neigh->ha) { | 1214 | if (new != old || lladdr != neigh->ha) |
1209 | if (new & NUD_CONNECTED) | ||
1210 | neigh->confirmed = jiffies; | ||
1211 | neigh->updated = jiffies; | 1215 | neigh->updated = jiffies; |
1212 | } | ||
1213 | 1216 | ||
1214 | if (new != old) { | 1217 | if (new != old) { |
1215 | neigh_del_timer(neigh); | 1218 | neigh_del_timer(neigh); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 24431e578310..63ce2283a456 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol) | |||
324 | 324 | ||
325 | rtnl_lock(); | 325 | rtnl_lock(); |
326 | tab = rtnl_msg_handlers[protocol]; | 326 | tab = rtnl_msg_handlers[protocol]; |
327 | if (!tab) { | ||
328 | rtnl_unlock(); | ||
329 | return; | ||
330 | } | ||
327 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); | 331 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); |
328 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { | 332 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { |
329 | link = tab[msgindex]; | 333 | link = tab[msgindex]; |
@@ -2806,7 +2810,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) | |||
2806 | } | 2810 | } |
2807 | 2811 | ||
2808 | if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { | 2812 | if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { |
2809 | __dev_notify_flags(dev, old_flags, 0U); | 2813 | __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); |
2810 | } else { | 2814 | } else { |
2811 | dev->rtnl_link_state = RTNL_LINK_INITIALIZED; | 2815 | dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
2812 | __dev_notify_flags(dev, old_flags, ~0U); | 2816 | __dev_notify_flags(dev, old_flags, ~0U); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c996c09d095f..b2c807f67aba 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) | |||
939 | 939 | ||
940 | WARN_ON_ONCE(!in_task()); | 940 | WARN_ON_ONCE(!in_task()); |
941 | 941 | ||
942 | if (!sock_flag(sk, SOCK_ZEROCOPY)) | ||
943 | return NULL; | ||
944 | |||
945 | skb = sock_omalloc(sk, 0, GFP_KERNEL); | 942 | skb = sock_omalloc(sk, 0, GFP_KERNEL); |
946 | if (!skb) | 943 | if (!skb) |
947 | return NULL; | 944 | return NULL; |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e63c554e0623..9f3209ff7ffd 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -19,12 +19,10 @@ | |||
19 | #include <linux/of_mdio.h> | 19 | #include <linux/of_mdio.h> |
20 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
21 | #include <linux/of_net.h> | 21 | #include <linux/of_net.h> |
22 | #include <linux/of_gpio.h> | ||
23 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
24 | #include <linux/sysfs.h> | 23 | #include <linux/sysfs.h> |
25 | #include <linux/phy_fixed.h> | 24 | #include <linux/phy_fixed.h> |
26 | #include <linux/ptp_classify.h> | 25 | #include <linux/ptp_classify.h> |
27 | #include <linux/gpio/consumer.h> | ||
28 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
29 | 27 | ||
30 | #include "dsa_priv.h" | 28 | #include "dsa_priv.h" |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 20fda8fb8ffd..1fbe2f815474 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1377 | if (encap) | 1377 | if (encap) |
1378 | skb_reset_inner_headers(skb); | 1378 | skb_reset_inner_headers(skb); |
1379 | skb->network_header = (u8 *)iph - skb->head; | 1379 | skb->network_header = (u8 *)iph - skb->head; |
1380 | skb_reset_mac_len(skb); | ||
1380 | } while ((skb = skb->next)); | 1381 | } while ((skb = skb->next)); |
1381 | 1382 | ||
1382 | out: | 1383 | out: |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index cf75f8944b05..4da39446da2d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t) | |||
820 | spin_lock(&im->lock); | 820 | spin_lock(&im->lock); |
821 | im->tm_running = 0; | 821 | im->tm_running = 0; |
822 | 822 | ||
823 | if (im->unsolicit_count) { | 823 | if (im->unsolicit_count && --im->unsolicit_count) |
824 | im->unsolicit_count--; | ||
825 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); | 824 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); |
826 | } | 825 | |
827 | im->reporter = 1; | 826 | im->reporter = 1; |
828 | spin_unlock(&im->lock); | 827 | spin_unlock(&im->lock); |
829 | 828 | ||
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im) | |||
1308 | 1307 | ||
1309 | if (in_dev->dead) | 1308 | if (in_dev->dead) |
1310 | return; | 1309 | return; |
1310 | |||
1311 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
1311 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { | 1312 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { |
1312 | spin_lock_bh(&im->lock); | 1313 | spin_lock_bh(&im->lock); |
1313 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); | 1314 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); |
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
1391 | unsigned int mode) | 1392 | unsigned int mode) |
1392 | { | 1393 | { |
1393 | struct ip_mc_list *im; | 1394 | struct ip_mc_list *im; |
1394 | #ifdef CONFIG_IP_MULTICAST | ||
1395 | struct net *net = dev_net(in_dev->dev); | ||
1396 | #endif | ||
1397 | 1395 | ||
1398 | ASSERT_RTNL(); | 1396 | ASSERT_RTNL(); |
1399 | 1397 | ||
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
1420 | spin_lock_init(&im->lock); | 1418 | spin_lock_init(&im->lock); |
1421 | #ifdef CONFIG_IP_MULTICAST | 1419 | #ifdef CONFIG_IP_MULTICAST |
1422 | timer_setup(&im->timer, igmp_timer_expire, 0); | 1420 | timer_setup(&im->timer, igmp_timer_expire, 0); |
1423 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
1424 | #endif | 1421 | #endif |
1425 | 1422 | ||
1426 | im->next_rcu = in_dev->mc_list; | 1423 | im->next_rcu = in_dev->mc_list; |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 88281fbce88c..e7227128df2c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, | |||
599 | nextp = &fp->next; | 599 | nextp = &fp->next; |
600 | fp->prev = NULL; | 600 | fp->prev = NULL; |
601 | memset(&fp->rbnode, 0, sizeof(fp->rbnode)); | 601 | memset(&fp->rbnode, 0, sizeof(fp->rbnode)); |
602 | fp->sk = NULL; | ||
602 | head->data_len += fp->len; | 603 | head->data_len += fp->len; |
603 | head->len += fp->len; | 604 | head->len += fp->len; |
604 | if (head->ip_summed != fp->ip_summed) | 605 | if (head->ip_summed != fp->ip_summed) |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 51a5d06085ac..8cce0e9ea08c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info, | |||
178 | 178 | ||
179 | if (tpi->proto == htons(ETH_P_TEB)) | 179 | if (tpi->proto == htons(ETH_P_TEB)) |
180 | itn = net_generic(net, gre_tap_net_id); | 180 | itn = net_generic(net, gre_tap_net_id); |
181 | else if (tpi->proto == htons(ETH_P_ERSPAN) || | ||
182 | tpi->proto == htons(ETH_P_ERSPAN2)) | ||
183 | itn = net_generic(net, erspan_net_id); | ||
181 | else | 184 | else |
182 | itn = net_generic(net, ipgre_net_id); | 185 | itn = net_generic(net, ipgre_net_id); |
183 | 186 | ||
@@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
328 | ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); | 331 | ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); |
329 | return PACKET_RCVD; | 332 | return PACKET_RCVD; |
330 | } | 333 | } |
334 | return PACKET_REJECT; | ||
335 | |||
331 | drop: | 336 | drop: |
332 | kfree_skb(skb); | 337 | kfree_skb(skb); |
333 | return PACKET_RCVD; | 338 | return PACKET_RCVD; |
@@ -1508,11 +1513,14 @@ nla_put_failure: | |||
1508 | 1513 | ||
1509 | static void erspan_setup(struct net_device *dev) | 1514 | static void erspan_setup(struct net_device *dev) |
1510 | { | 1515 | { |
1516 | struct ip_tunnel *t = netdev_priv(dev); | ||
1517 | |||
1511 | ether_setup(dev); | 1518 | ether_setup(dev); |
1512 | dev->netdev_ops = &erspan_netdev_ops; | 1519 | dev->netdev_ops = &erspan_netdev_ops; |
1513 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1520 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1514 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1521 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1515 | ip_tunnel_setup(dev, erspan_net_id); | 1522 | ip_tunnel_setup(dev, erspan_net_id); |
1523 | t->erspan_ver = 1; | ||
1516 | } | 1524 | } |
1517 | 1525 | ||
1518 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { | 1526 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index d9504adc47b3..184bf2e0a1ed 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -106,6 +106,10 @@ config NF_NAT_IPV4 | |||
106 | 106 | ||
107 | if NF_NAT_IPV4 | 107 | if NF_NAT_IPV4 |
108 | 108 | ||
109 | config NF_NAT_MASQUERADE_IPV4 | ||
110 | bool | ||
111 | |||
112 | if NF_TABLES | ||
109 | config NFT_CHAIN_NAT_IPV4 | 113 | config NFT_CHAIN_NAT_IPV4 |
110 | depends on NF_TABLES_IPV4 | 114 | depends on NF_TABLES_IPV4 |
111 | tristate "IPv4 nf_tables nat chain support" | 115 | tristate "IPv4 nf_tables nat chain support" |
@@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4 | |||
115 | packet transformations such as the source, destination address and | 119 | packet transformations such as the source, destination address and |
116 | source and destination ports. | 120 | source and destination ports. |
117 | 121 | ||
118 | config NF_NAT_MASQUERADE_IPV4 | ||
119 | bool | ||
120 | |||
121 | config NFT_MASQ_IPV4 | 122 | config NFT_MASQ_IPV4 |
122 | tristate "IPv4 masquerading support for nf_tables" | 123 | tristate "IPv4 masquerading support for nf_tables" |
123 | depends on NF_TABLES_IPV4 | 124 | depends on NF_TABLES_IPV4 |
@@ -135,6 +136,7 @@ config NFT_REDIR_IPV4 | |||
135 | help | 136 | help |
136 | This is the expression that provides IPv4 redirect support for | 137 | This is the expression that provides IPv4 redirect support for |
137 | nf_tables. | 138 | nf_tables. |
139 | endif # NF_TABLES | ||
138 | 140 | ||
139 | config NF_NAT_SNMP_BASIC | 141 | config NF_NAT_SNMP_BASIC |
140 | tristate "Basic SNMP-ALG support" | 142 | tristate "Basic SNMP-ALG support" |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b8af2fec5ad5..10c6246396cc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) | |||
1185 | 1185 | ||
1186 | flags = msg->msg_flags; | 1186 | flags = msg->msg_flags; |
1187 | 1187 | ||
1188 | if (flags & MSG_ZEROCOPY && size) { | 1188 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { |
1189 | if (sk->sk_state != TCP_ESTABLISHED) { | 1189 | if (sk->sk_state != TCP_ESTABLISHED) { |
1190 | err = -EINVAL; | 1190 | err = -EINVAL; |
1191 | goto out_err; | 1191 | goto out_err; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4c2dd9f863f7..4cf2f7bb2802 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -6367,8 +6367,8 @@ static bool tcp_syn_flood_action(const struct sock *sk, | |||
6367 | if (!queue->synflood_warned && | 6367 | if (!queue->synflood_warned && |
6368 | net->ipv4.sysctl_tcp_syncookies != 2 && | 6368 | net->ipv4.sysctl_tcp_syncookies != 2 && |
6369 | xchg(&queue->synflood_warned, 1) == 0) | 6369 | xchg(&queue->synflood_warned, 1) == 0) |
6370 | pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", | 6370 | net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", |
6371 | proto, ntohs(tcp_hdr(skb)->dest), msg); | 6371 | proto, ntohs(tcp_hdr(skb)->dest), msg); |
6372 | 6372 | ||
6373 | return want_cookie; | 6373 | return want_cookie; |
6374 | } | 6374 | } |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 75ef332a7caf..12affb7864d9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -184,8 +184,9 @@ kill: | |||
184 | inet_twsk_deschedule_put(tw); | 184 | inet_twsk_deschedule_put(tw); |
185 | return TCP_TW_SUCCESS; | 185 | return TCP_TW_SUCCESS; |
186 | } | 186 | } |
187 | } else { | ||
188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
187 | } | 189 | } |
188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
189 | 190 | ||
190 | if (tmp_opt.saw_tstamp) { | 191 | if (tmp_opt.saw_tstamp) { |
191 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; | 192 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f4e35b2ff8b8..7d69dd6fa7e8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
2124 | inet_compute_pseudo); | 2124 | inet_compute_pseudo); |
2125 | } | 2125 | } |
2126 | 2126 | ||
2127 | /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and | ||
2128 | * return code conversion for ip layer consumption | ||
2129 | */ | ||
2130 | static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, | ||
2131 | struct udphdr *uh) | ||
2132 | { | ||
2133 | int ret; | ||
2134 | |||
2135 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
2136 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
2137 | inet_compute_pseudo); | ||
2138 | |||
2139 | ret = udp_queue_rcv_skb(sk, skb); | ||
2140 | |||
2141 | /* a return value > 0 means to resubmit the input, but | ||
2142 | * it wants the return to be -protocol, or 0 | ||
2143 | */ | ||
2144 | if (ret > 0) | ||
2145 | return -ret; | ||
2146 | return 0; | ||
2147 | } | ||
2148 | |||
2127 | /* | 2149 | /* |
2128 | * All we need to do is get the socket, and then do a checksum. | 2150 | * All we need to do is get the socket, and then do a checksum. |
2129 | */ | 2151 | */ |
@@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
2170 | if (unlikely(sk->sk_rx_dst != dst)) | 2192 | if (unlikely(sk->sk_rx_dst != dst)) |
2171 | udp_sk_rx_dst_set(sk, dst); | 2193 | udp_sk_rx_dst_set(sk, dst); |
2172 | 2194 | ||
2173 | ret = udp_queue_rcv_skb(sk, skb); | 2195 | ret = udp_unicast_rcv_skb(sk, skb, uh); |
2174 | sock_put(sk); | 2196 | sock_put(sk); |
2175 | /* a return value > 0 means to resubmit the input, but | 2197 | return ret; |
2176 | * it wants the return to be -protocol, or 0 | ||
2177 | */ | ||
2178 | if (ret > 0) | ||
2179 | return -ret; | ||
2180 | return 0; | ||
2181 | } | 2198 | } |
2182 | 2199 | ||
2183 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | 2200 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
@@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
2185 | saddr, daddr, udptable, proto); | 2202 | saddr, daddr, udptable, proto); |
2186 | 2203 | ||
2187 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | 2204 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
2188 | if (sk) { | 2205 | if (sk) |
2189 | int ret; | 2206 | return udp_unicast_rcv_skb(sk, skb, uh); |
2190 | |||
2191 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
2192 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
2193 | inet_compute_pseudo); | ||
2194 | |||
2195 | ret = udp_queue_rcv_skb(sk, skb); | ||
2196 | |||
2197 | /* a return value > 0 means to resubmit the input, but | ||
2198 | * it wants the return to be -protocol, or 0 | ||
2199 | */ | ||
2200 | if (ret > 0) | ||
2201 | return -ret; | ||
2202 | return 0; | ||
2203 | } | ||
2204 | 2207 | ||
2205 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 2208 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
2206 | goto drop; | 2209 | goto drop; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 673bba31eb18..9a4261e50272 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -938,14 +938,14 @@ static int __init inet6_init(void) | |||
938 | 938 | ||
939 | err = proto_register(&pingv6_prot, 1); | 939 | err = proto_register(&pingv6_prot, 1); |
940 | if (err) | 940 | if (err) |
941 | goto out_unregister_ping_proto; | 941 | goto out_unregister_raw_proto; |
942 | 942 | ||
943 | /* We MUST register RAW sockets before we create the ICMP6, | 943 | /* We MUST register RAW sockets before we create the ICMP6, |
944 | * IGMP6, or NDISC control sockets. | 944 | * IGMP6, or NDISC control sockets. |
945 | */ | 945 | */ |
946 | err = rawv6_init(); | 946 | err = rawv6_init(); |
947 | if (err) | 947 | if (err) |
948 | goto out_unregister_raw_proto; | 948 | goto out_unregister_ping_proto; |
949 | 949 | ||
950 | /* Register the family here so that the init calls below will | 950 | /* Register the family here so that the init calls below will |
951 | * be able to create sockets. (?? is this dangerous ??) | 951 | * be able to create sockets. (?? is this dangerous ??) |
@@ -1113,11 +1113,11 @@ netfilter_fail: | |||
1113 | igmp_fail: | 1113 | igmp_fail: |
1114 | ndisc_cleanup(); | 1114 | ndisc_cleanup(); |
1115 | ndisc_fail: | 1115 | ndisc_fail: |
1116 | ip6_mr_cleanup(); | 1116 | icmpv6_cleanup(); |
1117 | icmp_fail: | 1117 | icmp_fail: |
1118 | unregister_pernet_subsys(&inet6_net_ops); | 1118 | ip6_mr_cleanup(); |
1119 | ipmr_fail: | 1119 | ipmr_fail: |
1120 | icmpv6_cleanup(); | 1120 | unregister_pernet_subsys(&inet6_net_ops); |
1121 | register_pernet_fail: | 1121 | register_pernet_fail: |
1122 | sock_unregister(PF_INET6); | 1122 | sock_unregister(PF_INET6); |
1123 | rtnl_unregister_all(PF_INET6); | 1123 | rtnl_unregister_all(PF_INET6); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index c861a6d4671d..5516f55e214b 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -989,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
989 | fib6_clean_expires(iter); | 989 | fib6_clean_expires(iter); |
990 | else | 990 | else |
991 | fib6_set_expires(iter, rt->expires); | 991 | fib6_set_expires(iter, rt->expires); |
992 | fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); | 992 | |
993 | if (rt->fib6_pmtu) | ||
994 | fib6_metric_set(iter, RTAX_MTU, | ||
995 | rt->fib6_pmtu); | ||
993 | return -EEXIST; | 996 | return -EEXIST; |
994 | } | 997 | } |
995 | /* If we have the same destination and the same metric, | 998 | /* If we have the same destination and the same metric, |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 18a3794b0f52..e493b041d4ac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1778 | if (data[IFLA_GRE_COLLECT_METADATA]) | 1778 | if (data[IFLA_GRE_COLLECT_METADATA]) |
1779 | parms->collect_md = true; | 1779 | parms->collect_md = true; |
1780 | 1780 | ||
1781 | parms->erspan_ver = 1; | ||
1781 | if (data[IFLA_GRE_ERSPAN_VER]) | 1782 | if (data[IFLA_GRE_ERSPAN_VER]) |
1782 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | 1783 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); |
1783 | 1784 | ||
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 37ff4805b20c..c7e495f12011 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
115 | payload_len = skb->len - nhoff - sizeof(*ipv6h); | 115 | payload_len = skb->len - nhoff - sizeof(*ipv6h); |
116 | ipv6h->payload_len = htons(payload_len); | 116 | ipv6h->payload_len = htons(payload_len); |
117 | skb->network_header = (u8 *)ipv6h - skb->head; | 117 | skb->network_header = (u8 *)ipv6h - skb->head; |
118 | skb_reset_mac_len(skb); | ||
118 | 119 | ||
119 | if (udpfrag) { | 120 | if (udpfrag) { |
120 | int err = ip6_find_1stfragopt(skb, &prevhdr); | 121 | int err = ip6_find_1stfragopt(skb, &prevhdr); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16f200f06500..f9f8f554d141 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
219 | kfree_skb(skb); | 219 | kfree_skb(skb); |
220 | return -ENOBUFS; | 220 | return -ENOBUFS; |
221 | } | 221 | } |
222 | if (skb->sk) | ||
223 | skb_set_owner_w(skb2, skb->sk); | ||
222 | consume_skb(skb); | 224 | consume_skb(skb); |
223 | skb = skb2; | 225 | skb = skb2; |
224 | /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically, | ||
225 | * it is safe to call in our context (socket lock not held) | ||
226 | */ | ||
227 | skb_set_owner_w(skb, (struct sock *)sk); | ||
228 | } | 226 | } |
229 | if (opt->opt_flen) | 227 | if (opt->opt_flen) |
230 | ipv6_push_frag_opts(skb, opt, &proto); | 228 | ipv6_push_frag_opts(skb, opt, &proto); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 5df2a58d945c..419960b0ba16 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1188,7 +1188,15 @@ route_lookup: | |||
1188 | init_tel_txopt(&opt, encap_limit); | 1188 | init_tel_txopt(&opt, encap_limit); |
1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); | 1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); |
1190 | } | 1190 | } |
1191 | hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); | 1191 | |
1192 | if (hop_limit == 0) { | ||
1193 | if (skb->protocol == htons(ETH_P_IP)) | ||
1194 | hop_limit = ip_hdr(skb)->ttl; | ||
1195 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
1196 | hop_limit = ipv6_hdr(skb)->hop_limit; | ||
1197 | else | ||
1198 | hop_limit = ip6_dst_hoplimit(dst); | ||
1199 | } | ||
1192 | 1200 | ||
1193 | /* Calculate max headroom for all the headers and adjust | 1201 | /* Calculate max headroom for all the headers and adjust |
1194 | * needed_headroom if necessary. | 1202 | * needed_headroom if necessary. |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 5095367c7204..eeaf7455d51e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
481 | } | 481 | } |
482 | 482 | ||
483 | mtu = dst_mtu(dst); | 483 | mtu = dst_mtu(dst); |
484 | if (!skb->ignore_df && skb->len > mtu) { | 484 | if (skb->len > mtu) { |
485 | skb_dst_update_pmtu(skb, mtu); | 485 | skb_dst_update_pmtu(skb, mtu); |
486 | 486 | ||
487 | if (skb->protocol == htons(ETH_P_IPV6)) { | 487 | if (skb->protocol == htons(ETH_P_IPV6)) { |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 2a14d8b65924..8f68a518d9db 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic | |||
445 | else if (head->ip_summed == CHECKSUM_COMPLETE) | 445 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
446 | head->csum = csum_add(head->csum, fp->csum); | 446 | head->csum = csum_add(head->csum, fp->csum); |
447 | head->truesize += fp->truesize; | 447 | head->truesize += fp->truesize; |
448 | fp->sk = NULL; | ||
448 | } | 449 | } |
449 | sub_frag_mem_limit(fq->q.net, head->truesize); | 450 | sub_frag_mem_limit(fq->q.net, head->truesize); |
450 | 451 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c4ea13e8360b..480a79f47c52 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -946,8 +946,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort) | |||
946 | 946 | ||
947 | static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) | 947 | static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) |
948 | { | 948 | { |
949 | rt->dst.flags |= fib6_info_dst_flags(ort); | ||
950 | |||
951 | if (ort->fib6_flags & RTF_REJECT) { | 949 | if (ort->fib6_flags & RTF_REJECT) { |
952 | ip6_rt_init_dst_reject(rt, ort); | 950 | ip6_rt_init_dst_reject(rt, ort); |
953 | return; | 951 | return; |
@@ -996,7 +994,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) | |||
996 | rt->rt6i_src = ort->fib6_src; | 994 | rt->rt6i_src = ort->fib6_src; |
997 | #endif | 995 | #endif |
998 | rt->rt6i_prefsrc = ort->fib6_prefsrc; | 996 | rt->rt6i_prefsrc = ort->fib6_prefsrc; |
999 | rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); | ||
1000 | } | 997 | } |
1001 | 998 | ||
1002 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, | 999 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, |
@@ -4671,20 +4668,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4671 | int iif, int type, u32 portid, u32 seq, | 4668 | int iif, int type, u32 portid, u32 seq, |
4672 | unsigned int flags) | 4669 | unsigned int flags) |
4673 | { | 4670 | { |
4674 | struct rtmsg *rtm; | 4671 | struct rt6_info *rt6 = (struct rt6_info *)dst; |
4672 | struct rt6key *rt6_dst, *rt6_src; | ||
4673 | u32 *pmetrics, table, rt6_flags; | ||
4675 | struct nlmsghdr *nlh; | 4674 | struct nlmsghdr *nlh; |
4675 | struct rtmsg *rtm; | ||
4676 | long expires = 0; | 4676 | long expires = 0; |
4677 | u32 *pmetrics; | ||
4678 | u32 table; | ||
4679 | 4677 | ||
4680 | nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); | 4678 | nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); |
4681 | if (!nlh) | 4679 | if (!nlh) |
4682 | return -EMSGSIZE; | 4680 | return -EMSGSIZE; |
4683 | 4681 | ||
4682 | if (rt6) { | ||
4683 | rt6_dst = &rt6->rt6i_dst; | ||
4684 | rt6_src = &rt6->rt6i_src; | ||
4685 | rt6_flags = rt6->rt6i_flags; | ||
4686 | } else { | ||
4687 | rt6_dst = &rt->fib6_dst; | ||
4688 | rt6_src = &rt->fib6_src; | ||
4689 | rt6_flags = rt->fib6_flags; | ||
4690 | } | ||
4691 | |||
4684 | rtm = nlmsg_data(nlh); | 4692 | rtm = nlmsg_data(nlh); |
4685 | rtm->rtm_family = AF_INET6; | 4693 | rtm->rtm_family = AF_INET6; |
4686 | rtm->rtm_dst_len = rt->fib6_dst.plen; | 4694 | rtm->rtm_dst_len = rt6_dst->plen; |
4687 | rtm->rtm_src_len = rt->fib6_src.plen; | 4695 | rtm->rtm_src_len = rt6_src->plen; |
4688 | rtm->rtm_tos = 0; | 4696 | rtm->rtm_tos = 0; |
4689 | if (rt->fib6_table) | 4697 | if (rt->fib6_table) |
4690 | table = rt->fib6_table->tb6_id; | 4698 | table = rt->fib6_table->tb6_id; |
@@ -4699,7 +4707,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4699 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | 4707 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; |
4700 | rtm->rtm_protocol = rt->fib6_protocol; | 4708 | rtm->rtm_protocol = rt->fib6_protocol; |
4701 | 4709 | ||
4702 | if (rt->fib6_flags & RTF_CACHE) | 4710 | if (rt6_flags & RTF_CACHE) |
4703 | rtm->rtm_flags |= RTM_F_CLONED; | 4711 | rtm->rtm_flags |= RTM_F_CLONED; |
4704 | 4712 | ||
4705 | if (dest) { | 4713 | if (dest) { |
@@ -4707,7 +4715,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4707 | goto nla_put_failure; | 4715 | goto nla_put_failure; |
4708 | rtm->rtm_dst_len = 128; | 4716 | rtm->rtm_dst_len = 128; |
4709 | } else if (rtm->rtm_dst_len) | 4717 | } else if (rtm->rtm_dst_len) |
4710 | if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr)) | 4718 | if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr)) |
4711 | goto nla_put_failure; | 4719 | goto nla_put_failure; |
4712 | #ifdef CONFIG_IPV6_SUBTREES | 4720 | #ifdef CONFIG_IPV6_SUBTREES |
4713 | if (src) { | 4721 | if (src) { |
@@ -4715,12 +4723,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4715 | goto nla_put_failure; | 4723 | goto nla_put_failure; |
4716 | rtm->rtm_src_len = 128; | 4724 | rtm->rtm_src_len = 128; |
4717 | } else if (rtm->rtm_src_len && | 4725 | } else if (rtm->rtm_src_len && |
4718 | nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr)) | 4726 | nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr)) |
4719 | goto nla_put_failure; | 4727 | goto nla_put_failure; |
4720 | #endif | 4728 | #endif |
4721 | if (iif) { | 4729 | if (iif) { |
4722 | #ifdef CONFIG_IPV6_MROUTE | 4730 | #ifdef CONFIG_IPV6_MROUTE |
4723 | if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) { | 4731 | if (ipv6_addr_is_multicast(&rt6_dst->addr)) { |
4724 | int err = ip6mr_get_route(net, skb, rtm, portid); | 4732 | int err = ip6mr_get_route(net, skb, rtm, portid); |
4725 | 4733 | ||
4726 | if (err == 0) | 4734 | if (err == 0) |
@@ -4755,7 +4763,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4755 | /* For multipath routes, walk the siblings list and add | 4763 | /* For multipath routes, walk the siblings list and add |
4756 | * each as a nexthop within RTA_MULTIPATH. | 4764 | * each as a nexthop within RTA_MULTIPATH. |
4757 | */ | 4765 | */ |
4758 | if (rt->fib6_nsiblings) { | 4766 | if (rt6) { |
4767 | if (rt6_flags & RTF_GATEWAY && | ||
4768 | nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway)) | ||
4769 | goto nla_put_failure; | ||
4770 | |||
4771 | if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex)) | ||
4772 | goto nla_put_failure; | ||
4773 | } else if (rt->fib6_nsiblings) { | ||
4759 | struct fib6_info *sibling, *next_sibling; | 4774 | struct fib6_info *sibling, *next_sibling; |
4760 | struct nlattr *mp; | 4775 | struct nlattr *mp; |
4761 | 4776 | ||
@@ -4778,7 +4793,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4778 | goto nla_put_failure; | 4793 | goto nla_put_failure; |
4779 | } | 4794 | } |
4780 | 4795 | ||
4781 | if (rt->fib6_flags & RTF_EXPIRES) { | 4796 | if (rt6_flags & RTF_EXPIRES) { |
4782 | expires = dst ? dst->expires : rt->expires; | 4797 | expires = dst ? dst->expires : rt->expires; |
4783 | expires -= jiffies; | 4798 | expires -= jiffies; |
4784 | } | 4799 | } |
@@ -4786,7 +4801,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4786 | if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) | 4801 | if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) |
4787 | goto nla_put_failure; | 4802 | goto nla_put_failure; |
4788 | 4803 | ||
4789 | if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags))) | 4804 | if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags))) |
4790 | goto nla_put_failure; | 4805 | goto nla_put_failure; |
4791 | 4806 | ||
4792 | 4807 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 83f4c77c79d8..28c4aa5078fc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -752,6 +752,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | |||
752 | } | 752 | } |
753 | } | 753 | } |
754 | 754 | ||
755 | /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and | ||
756 | * return code conversion for ip layer consumption | ||
757 | */ | ||
758 | static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, | ||
759 | struct udphdr *uh) | ||
760 | { | ||
761 | int ret; | ||
762 | |||
763 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
764 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
765 | ip6_compute_pseudo); | ||
766 | |||
767 | ret = udpv6_queue_rcv_skb(sk, skb); | ||
768 | |||
769 | /* a return value > 0 means to resubmit the input, but | ||
770 | * it wants the return to be -protocol, or 0 | ||
771 | */ | ||
772 | if (ret > 0) | ||
773 | return -ret; | ||
774 | return 0; | ||
775 | } | ||
776 | |||
755 | int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | 777 | int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, |
756 | int proto) | 778 | int proto) |
757 | { | 779 | { |
@@ -803,13 +825,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
803 | if (unlikely(sk->sk_rx_dst != dst)) | 825 | if (unlikely(sk->sk_rx_dst != dst)) |
804 | udp6_sk_rx_dst_set(sk, dst); | 826 | udp6_sk_rx_dst_set(sk, dst); |
805 | 827 | ||
806 | ret = udpv6_queue_rcv_skb(sk, skb); | 828 | if (!uh->check && !udp_sk(sk)->no_check6_rx) { |
807 | sock_put(sk); | 829 | sock_put(sk); |
830 | goto report_csum_error; | ||
831 | } | ||
808 | 832 | ||
809 | /* a return value > 0 means to resubmit the input */ | 833 | ret = udp6_unicast_rcv_skb(sk, skb, uh); |
810 | if (ret > 0) | 834 | sock_put(sk); |
811 | return ret; | 835 | return ret; |
812 | return 0; | ||
813 | } | 836 | } |
814 | 837 | ||
815 | /* | 838 | /* |
@@ -822,30 +845,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
822 | /* Unicast */ | 845 | /* Unicast */ |
823 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | 846 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
824 | if (sk) { | 847 | if (sk) { |
825 | int ret; | 848 | if (!uh->check && !udp_sk(sk)->no_check6_rx) |
826 | 849 | goto report_csum_error; | |
827 | if (!uh->check && !udp_sk(sk)->no_check6_rx) { | 850 | return udp6_unicast_rcv_skb(sk, skb, uh); |
828 | udp6_csum_zero_error(skb); | ||
829 | goto csum_error; | ||
830 | } | ||
831 | |||
832 | if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) | ||
833 | skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
834 | ip6_compute_pseudo); | ||
835 | |||
836 | ret = udpv6_queue_rcv_skb(sk, skb); | ||
837 | |||
838 | /* a return value > 0 means to resubmit the input */ | ||
839 | if (ret > 0) | ||
840 | return ret; | ||
841 | |||
842 | return 0; | ||
843 | } | 851 | } |
844 | 852 | ||
845 | if (!uh->check) { | 853 | if (!uh->check) |
846 | udp6_csum_zero_error(skb); | 854 | goto report_csum_error; |
847 | goto csum_error; | ||
848 | } | ||
849 | 855 | ||
850 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 856 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
851 | goto discard; | 857 | goto discard; |
@@ -866,6 +872,9 @@ short_packet: | |||
866 | ulen, skb->len, | 872 | ulen, skb->len, |
867 | daddr, ntohs(uh->dest)); | 873 | daddr, ntohs(uh->dest)); |
868 | goto discard; | 874 | goto discard; |
875 | |||
876 | report_csum_error: | ||
877 | udp6_csum_zero_error(skb); | ||
869 | csum_error: | 878 | csum_error: |
870 | __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); | 879 | __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); |
871 | discard: | 880 | discard: |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a21d8ed0a325..e2f16a0173a9 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
351 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); | 351 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); |
352 | 352 | ||
353 | skb->dev = iucv->hs_dev; | 353 | skb->dev = iucv->hs_dev; |
354 | if (!skb->dev) | 354 | if (!skb->dev) { |
355 | return -ENODEV; | 355 | err = -ENODEV; |
356 | if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) | 356 | goto err_free; |
357 | return -ENETDOWN; | 357 | } |
358 | if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { | ||
359 | err = -ENETDOWN; | ||
360 | goto err_free; | ||
361 | } | ||
358 | if (skb->len > skb->dev->mtu) { | 362 | if (skb->len > skb->dev->mtu) { |
359 | if (sock->sk_type == SOCK_SEQPACKET) | 363 | if (sock->sk_type == SOCK_SEQPACKET) { |
360 | return -EMSGSIZE; | 364 | err = -EMSGSIZE; |
361 | else | 365 | goto err_free; |
362 | skb_trim(skb, skb->dev->mtu); | 366 | } |
367 | skb_trim(skb, skb->dev->mtu); | ||
363 | } | 368 | } |
364 | skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); | 369 | skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); |
365 | nskb = skb_clone(skb, GFP_ATOMIC); | 370 | nskb = skb_clone(skb, GFP_ATOMIC); |
366 | if (!nskb) | 371 | if (!nskb) { |
367 | return -ENOMEM; | 372 | err = -ENOMEM; |
373 | goto err_free; | ||
374 | } | ||
375 | |||
368 | skb_queue_tail(&iucv->send_skb_q, nskb); | 376 | skb_queue_tail(&iucv->send_skb_q, nskb); |
369 | err = dev_queue_xmit(skb); | 377 | err = dev_queue_xmit(skb); |
370 | if (net_xmit_eval(err)) { | 378 | if (net_xmit_eval(err)) { |
@@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
375 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); | 383 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); |
376 | } | 384 | } |
377 | return net_xmit_eval(err); | 385 | return net_xmit_eval(err); |
386 | |||
387 | err_free: | ||
388 | kfree_skb(skb); | ||
389 | return err; | ||
378 | } | 390 | } |
379 | 391 | ||
380 | static struct sock *__iucv_get_sock_by_name(char *nm) | 392 | static struct sock *__iucv_get_sock_by_name(char *nm) |
@@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1167 | err = afiucv_hs_send(&txmsg, sk, skb, 0); | 1179 | err = afiucv_hs_send(&txmsg, sk, skb, 0); |
1168 | if (err) { | 1180 | if (err) { |
1169 | atomic_dec(&iucv->msg_sent); | 1181 | atomic_dec(&iucv->msg_sent); |
1170 | goto fail; | 1182 | goto out; |
1171 | } | 1183 | } |
1172 | } else { /* Classic VM IUCV transport */ | 1184 | } else { /* Classic VM IUCV transport */ |
1173 | skb_queue_tail(&iucv->send_skb_q, skb); | 1185 | skb_queue_tail(&iucv->send_skb_q, skb); |
@@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2155 | struct sock *sk; | 2167 | struct sock *sk; |
2156 | struct iucv_sock *iucv; | 2168 | struct iucv_sock *iucv; |
2157 | struct af_iucv_trans_hdr *trans_hdr; | 2169 | struct af_iucv_trans_hdr *trans_hdr; |
2170 | int err = NET_RX_SUCCESS; | ||
2158 | char nullstring[8]; | 2171 | char nullstring[8]; |
2159 | int err = 0; | ||
2160 | 2172 | ||
2161 | if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { | 2173 | if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { |
2162 | WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", | 2174 | WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", |
@@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2254 | err = afiucv_hs_callback_rx(sk, skb); | 2266 | err = afiucv_hs_callback_rx(sk, skb); |
2255 | break; | 2267 | break; |
2256 | default: | 2268 | default: |
2257 | ; | 2269 | kfree_skb(skb); |
2258 | } | 2270 | } |
2259 | 2271 | ||
2260 | return err; | 2272 | return err; |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 8f7ef167c45a..eb502c6290c2 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev) | |||
1874 | * Returns 0 if there are still iucv pathes defined | 1874 | * Returns 0 if there are still iucv pathes defined |
1875 | * 1 if there are no iucv pathes defined | 1875 | * 1 if there are no iucv pathes defined |
1876 | */ | 1876 | */ |
1877 | int iucv_path_table_empty(void) | 1877 | static int iucv_path_table_empty(void) |
1878 | { | 1878 | { |
1879 | int i; | 1879 | int i; |
1880 | 1880 | ||
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 6449a1c2283b..f0f5fedb8caa 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, | |||
947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) | 947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) |
948 | return; | 948 | return; |
949 | 949 | ||
950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", | 950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
951 | mgmt->sa, mgmt->da, mgmt->bssid, reason); | 951 | ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); |
952 | sta_info_destroy_addr(sdata, mgmt->sa); | 952 | sta_info_destroy_addr(sdata, mgmt->sa); |
953 | } | 953 | } |
954 | 954 | ||
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, | |||
966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); | 966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); |
967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
968 | 968 | ||
969 | ibss_dbg(sdata, | 969 | ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
970 | "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", | 970 | ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", |
971 | mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); | 971 | mgmt->bssid, auth_transaction); |
972 | 972 | ||
973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | 973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) |
974 | return; | 974 | return; |
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
1175 | rx_timestamp = drv_get_tsf(local, sdata); | 1175 | rx_timestamp = drv_get_tsf(local, sdata); |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | ibss_dbg(sdata, | 1178 | ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", |
1179 | "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", | ||
1180 | mgmt->sa, mgmt->bssid, | 1179 | mgmt->sa, mgmt->bssid, |
1181 | (unsigned long long)rx_timestamp, | 1180 | (unsigned long long)rx_timestamp); |
1181 | ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", | ||
1182 | (unsigned long long)beacon_timestamp, | 1182 | (unsigned long long)beacon_timestamp, |
1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), | 1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), |
1184 | jiffies); | 1184 | jiffies); |
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, | |||
1537 | 1537 | ||
1538 | tx_last_beacon = drv_tx_last_beacon(local); | 1538 | tx_last_beacon = drv_tx_last_beacon(local); |
1539 | 1539 | ||
1540 | ibss_dbg(sdata, | 1540 | ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
1541 | "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", | 1541 | ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", |
1542 | mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); | 1542 | mgmt->bssid, tx_last_beacon); |
1543 | 1543 | ||
1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) | 1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) |
1545 | return; | 1545 | return; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 4fb2709cb527..513627896204 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work) | |||
256 | 256 | ||
257 | flush_work(&local->radar_detected_work); | 257 | flush_work(&local->radar_detected_work); |
258 | rtnl_lock(); | 258 | rtnl_lock(); |
259 | list_for_each_entry(sdata, &local->interfaces, list) | 259 | list_for_each_entry(sdata, &local->interfaces, list) { |
260 | /* | ||
261 | * XXX: there may be more work for other vif types and even | ||
262 | * for station mode: a good thing would be to run most of | ||
263 | * the iface type's dependent _stop (ieee80211_mg_stop, | ||
264 | * ieee80211_ibss_stop) etc... | ||
265 | * For now, fix only the specific bug that was seen: race | ||
266 | * between csa_connection_drop_work and us. | ||
267 | */ | ||
268 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
269 | /* | ||
270 | * This worker is scheduled from the iface worker that | ||
271 | * runs on mac80211's workqueue, so we can't be | ||
272 | * scheduling this worker after the cancel right here. | ||
273 | * The exception is ieee80211_chswitch_done. | ||
274 | * Then we can have a race... | ||
275 | */ | ||
276 | cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); | ||
277 | } | ||
260 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); | 278 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); |
279 | } | ||
261 | ieee80211_scan_cancel(local); | 280 | ieee80211_scan_cancel(local); |
262 | 281 | ||
263 | /* make sure any new ROC will consider local->in_reconfig */ | 282 | /* make sure any new ROC will consider local->in_reconfig */ |
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { | |||
471 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | | 490 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | |
472 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 491 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
473 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 492 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
474 | IEEE80211_VHT_CAP_RXSTBC_1 | | 493 | IEEE80211_VHT_CAP_RXSTBC_MASK | |
475 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
476 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
477 | IEEE80211_VHT_CAP_RXSTBC_4 | | ||
478 | IEEE80211_VHT_CAP_TXSTBC | | 494 | IEEE80211_VHT_CAP_TXSTBC | |
479 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | | 495 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | |
480 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | | 496 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1208 | #if IS_ENABLED(CONFIG_IPV6) | 1224 | #if IS_ENABLED(CONFIG_IPV6) |
1209 | unregister_inet6addr_notifier(&local->ifa6_notifier); | 1225 | unregister_inet6addr_notifier(&local->ifa6_notifier); |
1210 | #endif | 1226 | #endif |
1227 | ieee80211_txq_teardown_flows(local); | ||
1211 | 1228 | ||
1212 | rtnl_lock(); | 1229 | rtnl_lock(); |
1213 | 1230 | ||
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1236 | skb_queue_purge(&local->skb_queue); | 1253 | skb_queue_purge(&local->skb_queue); |
1237 | skb_queue_purge(&local->skb_queue_unreliable); | 1254 | skb_queue_purge(&local->skb_queue_unreliable); |
1238 | skb_queue_purge(&local->skb_queue_tdls_chsw); | 1255 | skb_queue_purge(&local->skb_queue_tdls_chsw); |
1239 | ieee80211_txq_teardown_flows(local); | ||
1240 | 1256 | ||
1241 | destroy_workqueue(local->workqueue); | 1257 | destroy_workqueue(local->workqueue); |
1242 | wiphy_unregister(local->hw.wiphy); | 1258 | wiphy_unregister(local->hw.wiphy); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 35ad3983ae4b..daf9db3c8f24 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
572 | forward = false; | 572 | forward = false; |
573 | reply = true; | 573 | reply = true; |
574 | target_metric = 0; | 574 | target_metric = 0; |
575 | |||
576 | if (SN_GT(target_sn, ifmsh->sn)) | ||
577 | ifmsh->sn = target_sn; | ||
578 | |||
575 | if (time_after(jiffies, ifmsh->last_sn_update + | 579 | if (time_after(jiffies, ifmsh->last_sn_update + |
576 | net_traversal_jiffies(sdata)) || | 580 | net_traversal_jiffies(sdata)) || |
577 | time_before(jiffies, ifmsh->last_sn_update)) { | 581 | time_before(jiffies, ifmsh->last_sn_update)) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 7fb9957359a3..3dbecae4be73 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1073 | */ | 1073 | */ |
1074 | 1074 | ||
1075 | if (sdata->reserved_chanctx) { | 1075 | if (sdata->reserved_chanctx) { |
1076 | struct ieee80211_supported_band *sband = NULL; | ||
1077 | struct sta_info *mgd_sta = NULL; | ||
1078 | enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20; | ||
1079 | |||
1076 | /* | 1080 | /* |
1077 | * with multi-vif csa driver may call ieee80211_csa_finish() | 1081 | * with multi-vif csa driver may call ieee80211_csa_finish() |
1078 | * many times while waiting for other interfaces to use their | 1082 | * many times while waiting for other interfaces to use their |
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1081 | if (sdata->reserved_ready) | 1085 | if (sdata->reserved_ready) |
1082 | goto out; | 1086 | goto out; |
1083 | 1087 | ||
1088 | if (sdata->vif.bss_conf.chandef.width != | ||
1089 | sdata->csa_chandef.width) { | ||
1090 | /* | ||
1091 | * For managed interface, we need to also update the AP | ||
1092 | * station bandwidth and align the rate scale algorithm | ||
1093 | * on the bandwidth change. Here we only consider the | ||
1094 | * bandwidth of the new channel definition (as channel | ||
1095 | * switch flow does not have the full HT/VHT/HE | ||
1096 | * information), assuming that if additional changes are | ||
1097 | * required they would be done as part of the processing | ||
1098 | * of the next beacon from the AP. | ||
1099 | */ | ||
1100 | switch (sdata->csa_chandef.width) { | ||
1101 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
1102 | case NL80211_CHAN_WIDTH_20: | ||
1103 | default: | ||
1104 | bw = IEEE80211_STA_RX_BW_20; | ||
1105 | break; | ||
1106 | case NL80211_CHAN_WIDTH_40: | ||
1107 | bw = IEEE80211_STA_RX_BW_40; | ||
1108 | break; | ||
1109 | case NL80211_CHAN_WIDTH_80: | ||
1110 | bw = IEEE80211_STA_RX_BW_80; | ||
1111 | break; | ||
1112 | case NL80211_CHAN_WIDTH_80P80: | ||
1113 | case NL80211_CHAN_WIDTH_160: | ||
1114 | bw = IEEE80211_STA_RX_BW_160; | ||
1115 | break; | ||
1116 | } | ||
1117 | |||
1118 | mgd_sta = sta_info_get(sdata, ifmgd->bssid); | ||
1119 | sband = | ||
1120 | local->hw.wiphy->bands[sdata->csa_chandef.chan->band]; | ||
1121 | } | ||
1122 | |||
1123 | if (sdata->vif.bss_conf.chandef.width > | ||
1124 | sdata->csa_chandef.width) { | ||
1125 | mgd_sta->sta.bandwidth = bw; | ||
1126 | rate_control_rate_update(local, sband, mgd_sta, | ||
1127 | IEEE80211_RC_BW_CHANGED); | ||
1128 | } | ||
1129 | |||
1084 | ret = ieee80211_vif_use_reserved_context(sdata); | 1130 | ret = ieee80211_vif_use_reserved_context(sdata); |
1085 | if (ret) { | 1131 | if (ret) { |
1086 | sdata_info(sdata, | 1132 | sdata_info(sdata, |
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1091 | goto out; | 1137 | goto out; |
1092 | } | 1138 | } |
1093 | 1139 | ||
1140 | if (sdata->vif.bss_conf.chandef.width < | ||
1141 | sdata->csa_chandef.width) { | ||
1142 | mgd_sta->sta.bandwidth = bw; | ||
1143 | rate_control_rate_update(local, sband, mgd_sta, | ||
1144 | IEEE80211_RC_BW_CHANGED); | ||
1145 | } | ||
1146 | |||
1094 | goto out; | 1147 | goto out; |
1095 | } | 1148 | } |
1096 | 1149 | ||
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1312 | cbss->beacon_interval)); | 1365 | cbss->beacon_interval)); |
1313 | return; | 1366 | return; |
1314 | drop_connection: | 1367 | drop_connection: |
1368 | /* | ||
1369 | * This is just so that the disconnect flow will know that | ||
1370 | * we were trying to switch channel and failed. In case the | ||
1371 | * mode is 1 (we are not allowed to Tx), we will know not to | ||
1372 | * send a deauthentication frame. Those two fields will be | ||
1373 | * reset when the disconnection worker runs. | ||
1374 | */ | ||
1375 | sdata->vif.csa_active = true; | ||
1376 | sdata->csa_block_tx = csa_ie.mode; | ||
1377 | |||
1315 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); | 1378 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); |
1316 | mutex_unlock(&local->chanctx_mtx); | 1379 | mutex_unlock(&local->chanctx_mtx); |
1317 | mutex_unlock(&local->mtx); | 1380 | mutex_unlock(&local->mtx); |
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2522 | struct ieee80211_local *local = sdata->local; | 2585 | struct ieee80211_local *local = sdata->local; |
2523 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2586 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2524 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; | 2587 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
2588 | bool tx; | ||
2525 | 2589 | ||
2526 | sdata_lock(sdata); | 2590 | sdata_lock(sdata); |
2527 | if (!ifmgd->associated) { | 2591 | if (!ifmgd->associated) { |
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2529 | return; | 2593 | return; |
2530 | } | 2594 | } |
2531 | 2595 | ||
2596 | tx = !sdata->csa_block_tx; | ||
2597 | |||
2532 | /* AP is probably out of range (or not reachable for another reason) so | 2598 | /* AP is probably out of range (or not reachable for another reason) so |
2533 | * remove the bss struct for that AP. | 2599 | * remove the bss struct for that AP. |
2534 | */ | 2600 | */ |
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2536 | 2602 | ||
2537 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | 2603 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, |
2538 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | 2604 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, |
2539 | true, frame_buf); | 2605 | tx, frame_buf); |
2540 | mutex_lock(&local->mtx); | 2606 | mutex_lock(&local->mtx); |
2541 | sdata->vif.csa_active = false; | 2607 | sdata->vif.csa_active = false; |
2542 | ifmgd->csa_waiting_bcn = false; | 2608 | ifmgd->csa_waiting_bcn = false; |
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2547 | } | 2613 | } |
2548 | mutex_unlock(&local->mtx); | 2614 | mutex_unlock(&local->mtx); |
2549 | 2615 | ||
2550 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, | 2616 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, |
2551 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); | 2617 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); |
2552 | 2618 | ||
2553 | sdata_unlock(sdata); | 2619 | sdata_unlock(sdata); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 64742f2765c4..96611d5dfadb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1728 | */ | 1728 | */ |
1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && | 1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && |
1730 | !ieee80211_has_morefrags(hdr->frame_control) && | 1730 | !ieee80211_has_morefrags(hdr->frame_control) && |
1731 | !is_multicast_ether_addr(hdr->addr1) && | ||
1731 | (ieee80211_is_mgmt(hdr->frame_control) || | 1732 | (ieee80211_is_mgmt(hdr->frame_control) || |
1732 | ieee80211_is_data(hdr->frame_control)) && | 1733 | ieee80211_is_data(hdr->frame_control)) && |
1733 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | 1734 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cd332e3e1134..f353d9db54bc 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta) | |||
3078 | } | 3078 | } |
3079 | 3079 | ||
3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, | 3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, |
3081 | struct sk_buff *skb, int headroom, | 3081 | struct sk_buff *skb, int headroom) |
3082 | int *subframe_len) | ||
3083 | { | 3082 | { |
3084 | int amsdu_len = *subframe_len + sizeof(struct ethhdr); | 3083 | if (skb_headroom(skb) < headroom) { |
3085 | int padding = (4 - amsdu_len) & 3; | ||
3086 | |||
3087 | if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) { | ||
3088 | I802_DEBUG_INC(local->tx_expand_skb_head); | 3084 | I802_DEBUG_INC(local->tx_expand_skb_head); |
3089 | 3085 | ||
3090 | if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) { | 3086 | if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { |
3091 | wiphy_debug(local->hw.wiphy, | 3087 | wiphy_debug(local->hw.wiphy, |
3092 | "failed to reallocate TX buffer\n"); | 3088 | "failed to reallocate TX buffer\n"); |
3093 | return false; | 3089 | return false; |
3094 | } | 3090 | } |
3095 | } | 3091 | } |
3096 | 3092 | ||
3097 | if (padding) { | ||
3098 | *subframe_len += padding; | ||
3099 | skb_put_zero(skb, padding); | ||
3100 | } | ||
3101 | |||
3102 | return true; | 3093 | return true; |
3103 | } | 3094 | } |
3104 | 3095 | ||
@@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, | |||
3122 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) | 3113 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) |
3123 | return true; | 3114 | return true; |
3124 | 3115 | ||
3125 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr), | 3116 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr))) |
3126 | &subframe_len)) | ||
3127 | return false; | 3117 | return false; |
3128 | 3118 | ||
3129 | data = skb_push(skb, sizeof(*amsdu_hdr)); | 3119 | data = skb_push(skb, sizeof(*amsdu_hdr)); |
@@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3189 | void *data; | 3179 | void *data; |
3190 | bool ret = false; | 3180 | bool ret = false; |
3191 | unsigned int orig_len; | 3181 | unsigned int orig_len; |
3192 | int n = 1, nfrags; | 3182 | int n = 2, nfrags, pad = 0; |
3183 | u16 hdrlen; | ||
3193 | 3184 | ||
3194 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) | 3185 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) |
3195 | return false; | 3186 | return false; |
@@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3222 | if (skb->len + head->len > max_amsdu_len) | 3213 | if (skb->len + head->len > max_amsdu_len) |
3223 | goto out; | 3214 | goto out; |
3224 | 3215 | ||
3225 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) | ||
3226 | goto out; | ||
3227 | |||
3228 | nfrags = 1 + skb_shinfo(skb)->nr_frags; | 3216 | nfrags = 1 + skb_shinfo(skb)->nr_frags; |
3229 | nfrags += 1 + skb_shinfo(head)->nr_frags; | 3217 | nfrags += 1 + skb_shinfo(head)->nr_frags; |
3230 | frag_tail = &skb_shinfo(head)->frag_list; | 3218 | frag_tail = &skb_shinfo(head)->frag_list; |
@@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3240 | if (max_frags && nfrags > max_frags) | 3228 | if (max_frags && nfrags > max_frags) |
3241 | goto out; | 3229 | goto out; |
3242 | 3230 | ||
3243 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2, | 3231 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) |
3244 | &subframe_len)) | ||
3245 | goto out; | 3232 | goto out; |
3246 | 3233 | ||
3234 | /* | ||
3235 | * Pad out the previous subframe to a multiple of 4 by adding the | ||
3236 | * padding to the next one, that's being added. Note that head->len | ||
3237 | * is the length of the full A-MSDU, but that works since each time | ||
3238 | * we add a new subframe we pad out the previous one to a multiple | ||
3239 | * of 4 and thus it no longer matters in the next round. | ||
3240 | */ | ||
3241 | hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); | ||
3242 | if ((head->len - hdrlen) & 3) | ||
3243 | pad = 4 - ((head->len - hdrlen) & 3); | ||
3244 | |||
3245 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + | ||
3246 | 2 + pad)) | ||
3247 | goto out_recalc; | ||
3248 | |||
3247 | ret = true; | 3249 | ret = true; |
3248 | data = skb_push(skb, ETH_ALEN + 2); | 3250 | data = skb_push(skb, ETH_ALEN + 2); |
3249 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); | 3251 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); |
@@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3253 | memcpy(data, &len, 2); | 3255 | memcpy(data, &len, 2); |
3254 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); | 3256 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); |
3255 | 3257 | ||
3258 | memset(skb_push(skb, pad), 0, pad); | ||
3259 | |||
3256 | head->len += skb->len; | 3260 | head->len += skb->len; |
3257 | head->data_len += skb->len; | 3261 | head->data_len += skb->len; |
3258 | *frag_tail = skb; | 3262 | *frag_tail = skb; |
3259 | 3263 | ||
3260 | flow->backlog += head->len - orig_len; | 3264 | out_recalc: |
3261 | tin->backlog_bytes += head->len - orig_len; | 3265 | if (head->len != orig_len) { |
3262 | 3266 | flow->backlog += head->len - orig_len; | |
3263 | fq_recalc_backlog(fq, tin, flow); | 3267 | tin->backlog_bytes += head->len - orig_len; |
3264 | 3268 | ||
3269 | fq_recalc_backlog(fq, tin, flow); | ||
3270 | } | ||
3265 | out: | 3271 | out: |
3266 | spin_unlock_bh(&fq->lock); | 3272 | spin_unlock_bh(&fq->lock); |
3267 | 3273 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 88efda7c9f8a..716cd6442d86 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
1135 | { | 1135 | { |
1136 | struct ieee80211_chanctx_conf *chanctx_conf; | 1136 | struct ieee80211_chanctx_conf *chanctx_conf; |
1137 | const struct ieee80211_reg_rule *rrule; | 1137 | const struct ieee80211_reg_rule *rrule; |
1138 | struct ieee80211_wmm_ac *wmm_ac; | 1138 | const struct ieee80211_wmm_ac *wmm_ac; |
1139 | u16 center_freq = 0; | 1139 | u16 center_freq = 0; |
1140 | 1140 | ||
1141 | if (sdata->vif.type != NL80211_IFTYPE_AP && | 1141 | if (sdata->vif.type != NL80211_IFTYPE_AP && |
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
1154 | 1154 | ||
1155 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); | 1155 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); |
1156 | 1156 | ||
1157 | if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) { | 1157 | if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { |
1158 | rcu_read_unlock(); | 1158 | rcu_read_unlock(); |
1159 | return; | 1159 | return; |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | if (sdata->vif.type == NL80211_IFTYPE_AP) | 1162 | if (sdata->vif.type == NL80211_IFTYPE_AP) |
1163 | wmm_ac = &rrule->wmm_rule->ap[ac]; | 1163 | wmm_ac = &rrule->wmm_rule.ap[ac]; |
1164 | else | 1164 | else |
1165 | wmm_ac = &rrule->wmm_rule->client[ac]; | 1165 | wmm_ac = &rrule->wmm_rule.client[ac]; |
1166 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); | 1166 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); |
1167 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); | 1167 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); |
1168 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); | 1168 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); |
1169 | qparam->txop = !qparam->txop ? wmm_ac->cot / 32 : | 1169 | qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); |
1170 | min_t(u16, qparam->txop, wmm_ac->cot / 32); | ||
1171 | rcu_read_unlock(); | 1170 | rcu_read_unlock(); |
1172 | } | 1171 | } |
1173 | 1172 | ||
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 71709c104081..f61c306de1d0 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM | |||
771 | depends on NETFILTER_ADVANCED | 771 | depends on NETFILTER_ADVANCED |
772 | ---help--- | 772 | ---help--- |
773 | This option adds a `CHECKSUM' target, which can be used in the iptables mangle | 773 | This option adds a `CHECKSUM' target, which can be used in the iptables mangle |
774 | table. | 774 | table to work around buggy DHCP clients in virtualized environments. |
775 | 775 | ||
776 | You can use this target to compute and fill in the checksum in | 776 | Some old DHCP clients drop packets because they are not aware |
777 | a packet that lacks a checksum. This is particularly useful, | 777 | that the checksum would normally be offloaded to hardware and |
778 | if you need to work around old applications such as dhcp clients, | 778 | thus should be considered valid. |
779 | that do not work well with checksum offloads, but don't want to disable | 779 | This target can be used to fill in the checksum using iptables |
780 | checksum offload in your device. | 780 | when such packets are sent via a virtual network device. |
781 | 781 | ||
782 | To compile it as a module, choose M here. If unsure, say N. | 782 | To compile it as a module, choose M here. If unsure, say N. |
783 | 783 | ||
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 9f14b0df6960..51c5d7eec0a3 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c | |||
@@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = { | |||
776 | }; | 776 | }; |
777 | #endif | 777 | #endif |
778 | 778 | ||
779 | static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto) | ||
780 | { | ||
781 | u8 nfproto = (unsigned long)_nfproto; | ||
782 | |||
783 | if (nf_ct_l3num(ct) != nfproto) | ||
784 | return 0; | ||
785 | |||
786 | if (nf_ct_protonum(ct) == IPPROTO_TCP && | ||
787 | ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) { | ||
788 | ct->proto.tcp.seen[0].td_maxwin = 0; | ||
789 | ct->proto.tcp.seen[1].td_maxwin = 0; | ||
790 | } | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
779 | static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | 795 | static int nf_ct_netns_do_get(struct net *net, u8 nfproto) |
780 | { | 796 | { |
781 | struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); | 797 | struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); |
798 | bool fixup_needed = false; | ||
782 | int err = 0; | 799 | int err = 0; |
783 | 800 | ||
784 | mutex_lock(&nf_ct_proto_mutex); | 801 | mutex_lock(&nf_ct_proto_mutex); |
@@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | |||
798 | ARRAY_SIZE(ipv4_conntrack_ops)); | 815 | ARRAY_SIZE(ipv4_conntrack_ops)); |
799 | if (err) | 816 | if (err) |
800 | cnet->users4 = 0; | 817 | cnet->users4 = 0; |
818 | else | ||
819 | fixup_needed = true; | ||
801 | break; | 820 | break; |
802 | #if IS_ENABLED(CONFIG_IPV6) | 821 | #if IS_ENABLED(CONFIG_IPV6) |
803 | case NFPROTO_IPV6: | 822 | case NFPROTO_IPV6: |
@@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | |||
814 | ARRAY_SIZE(ipv6_conntrack_ops)); | 833 | ARRAY_SIZE(ipv6_conntrack_ops)); |
815 | if (err) | 834 | if (err) |
816 | cnet->users6 = 0; | 835 | cnet->users6 = 0; |
836 | else | ||
837 | fixup_needed = true; | ||
817 | break; | 838 | break; |
818 | #endif | 839 | #endif |
819 | default: | 840 | default: |
@@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) | |||
822 | } | 843 | } |
823 | out_unlock: | 844 | out_unlock: |
824 | mutex_unlock(&nf_ct_proto_mutex); | 845 | mutex_unlock(&nf_ct_proto_mutex); |
846 | |||
847 | if (fixup_needed) | ||
848 | nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup, | ||
849 | (void *)(unsigned long)nfproto, 0, 0); | ||
850 | |||
825 | return err; | 851 | return err; |
826 | } | 852 | } |
827 | 853 | ||
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 8c58f96b59e7..f3f91ed2c21a 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) | |||
675 | } | 675 | } |
676 | #endif | 676 | #endif |
677 | 677 | ||
678 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 678 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
679 | 679 | ||
680 | #include <linux/netfilter/nfnetlink.h> | 680 | #include <linux/netfilter/nfnetlink.h> |
681 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 681 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
697 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; | 697 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; |
698 | } | 698 | } |
699 | } | 699 | } |
700 | |||
701 | timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST]; | ||
700 | return 0; | 702 | return 0; |
701 | } | 703 | } |
702 | 704 | ||
@@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = { | |||
726 | [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, | 728 | [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, |
727 | [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, | 729 | [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, |
728 | }; | 730 | }; |
729 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 731 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
730 | 732 | ||
731 | #ifdef CONFIG_SYSCTL | 733 | #ifdef CONFIG_SYSCTL |
732 | /* template, data assigned later */ | 734 | /* template, data assigned later */ |
@@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto) | |||
827 | dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; | 829 | dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; |
828 | dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; | 830 | dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; |
829 | dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; | 831 | dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; |
832 | |||
833 | /* timeouts[0] is unused, make it same as SYN_SENT so | ||
834 | * ->timeouts[0] contains 'new' timeout, like udp or icmp. | ||
835 | */ | ||
836 | dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST]; | ||
830 | } | 837 | } |
831 | 838 | ||
832 | return dccp_kmemdup_sysctl_table(net, pn, dn); | 839 | return dccp_kmemdup_sysctl_table(net, pn, dn); |
@@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { | |||
856 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 863 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
857 | .nla_policy = nf_ct_port_nla_policy, | 864 | .nla_policy = nf_ct_port_nla_policy, |
858 | #endif | 865 | #endif |
859 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 866 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
860 | .ctnl_timeout = { | 867 | .ctnl_timeout = { |
861 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, | 868 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, |
862 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, | 869 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, |
@@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { | |||
864 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, | 871 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, |
865 | .nla_policy = dccp_timeout_nla_policy, | 872 | .nla_policy = dccp_timeout_nla_policy, |
866 | }, | 873 | }, |
867 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 874 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
868 | .init_net = dccp_init_net, | 875 | .init_net = dccp_init_net, |
869 | .get_net_proto = dccp_get_net_proto, | 876 | .get_net_proto = dccp_get_net_proto, |
870 | }; | 877 | }; |
@@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { | |||
889 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 896 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
890 | .nla_policy = nf_ct_port_nla_policy, | 897 | .nla_policy = nf_ct_port_nla_policy, |
891 | #endif | 898 | #endif |
892 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 899 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
893 | .ctnl_timeout = { | 900 | .ctnl_timeout = { |
894 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, | 901 | .nlattr_to_obj = dccp_timeout_nlattr_to_obj, |
895 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, | 902 | .obj_to_nlattr = dccp_timeout_obj_to_nlattr, |
@@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { | |||
897 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, | 904 | .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, |
898 | .nla_policy = dccp_timeout_nla_policy, | 905 | .nla_policy = dccp_timeout_nla_policy, |
899 | }, | 906 | }, |
900 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 907 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
901 | .init_net = dccp_init_net, | 908 | .init_net = dccp_init_net, |
902 | .get_net_proto = dccp_get_net_proto, | 909 | .get_net_proto = dccp_get_net_proto, |
903 | }; | 910 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index ac4a0b296dcd..1df3244ecd07 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c | |||
@@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 73 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
74 | 74 | ||
75 | #include <linux/netfilter/nfnetlink.h> | 75 | #include <linux/netfilter/nfnetlink.h> |
76 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 76 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -113,7 +113,7 @@ static const struct nla_policy | |||
113 | generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { | 113 | generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { |
114 | [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, | 114 | [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, |
115 | }; | 115 | }; |
116 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 116 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
117 | 117 | ||
118 | #ifdef CONFIG_SYSCTL | 118 | #ifdef CONFIG_SYSCTL |
119 | static struct ctl_table generic_sysctl_table[] = { | 119 | static struct ctl_table generic_sysctl_table[] = { |
@@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = | |||
164 | .pkt_to_tuple = generic_pkt_to_tuple, | 164 | .pkt_to_tuple = generic_pkt_to_tuple, |
165 | .packet = generic_packet, | 165 | .packet = generic_packet, |
166 | .new = generic_new, | 166 | .new = generic_new, |
167 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 167 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
168 | .ctnl_timeout = { | 168 | .ctnl_timeout = { |
169 | .nlattr_to_obj = generic_timeout_nlattr_to_obj, | 169 | .nlattr_to_obj = generic_timeout_nlattr_to_obj, |
170 | .obj_to_nlattr = generic_timeout_obj_to_nlattr, | 170 | .obj_to_nlattr = generic_timeout_obj_to_nlattr, |
@@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = | |||
172 | .obj_size = sizeof(unsigned int), | 172 | .obj_size = sizeof(unsigned int), |
173 | .nla_policy = generic_timeout_nla_policy, | 173 | .nla_policy = generic_timeout_nla_policy, |
174 | }, | 174 | }, |
175 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 175 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
176 | .init_net = generic_init_net, | 176 | .init_net = generic_init_net, |
177 | .get_net_proto = generic_get_net_proto, | 177 | .get_net_proto = generic_get_net_proto, |
178 | }; | 178 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index d1632252bf5b..650eb4fba2c5 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct) | |||
285 | nf_ct_gre_keymap_destroy(master); | 285 | nf_ct_gre_keymap_destroy(master); |
286 | } | 286 | } |
287 | 287 | ||
288 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 288 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
289 | 289 | ||
290 | #include <linux/netfilter/nfnetlink.h> | 290 | #include <linux/netfilter/nfnetlink.h> |
291 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 291 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = { | |||
334 | [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, | 334 | [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, |
335 | [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, | 335 | [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, |
336 | }; | 336 | }; |
337 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 337 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
338 | 338 | ||
339 | static int gre_init_net(struct net *net, u_int16_t proto) | 339 | static int gre_init_net(struct net *net, u_int16_t proto) |
340 | { | 340 | { |
@@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { | |||
367 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 367 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
368 | .nla_policy = nf_ct_port_nla_policy, | 368 | .nla_policy = nf_ct_port_nla_policy, |
369 | #endif | 369 | #endif |
370 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 370 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
371 | .ctnl_timeout = { | 371 | .ctnl_timeout = { |
372 | .nlattr_to_obj = gre_timeout_nlattr_to_obj, | 372 | .nlattr_to_obj = gre_timeout_nlattr_to_obj, |
373 | .obj_to_nlattr = gre_timeout_obj_to_nlattr, | 373 | .obj_to_nlattr = gre_timeout_obj_to_nlattr, |
@@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { | |||
375 | .obj_size = sizeof(unsigned int) * GRE_CT_MAX, | 375 | .obj_size = sizeof(unsigned int) * GRE_CT_MAX, |
376 | .nla_policy = gre_timeout_nla_policy, | 376 | .nla_policy = gre_timeout_nla_policy, |
377 | }, | 377 | }, |
378 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 378 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
379 | .net_id = &proto_gre_net_id, | 379 | .net_id = &proto_gre_net_id, |
380 | .init_net = gre_init_net, | 380 | .init_net = gre_init_net, |
381 | }; | 381 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c index 036670b38282..43c7e1a217b9 100644 --- a/net/netfilter/nf_conntrack_proto_icmp.c +++ b/net/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void) | |||
273 | } | 273 | } |
274 | #endif | 274 | #endif |
275 | 275 | ||
276 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 276 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
277 | 277 | ||
278 | #include <linux/netfilter/nfnetlink.h> | 278 | #include <linux/netfilter/nfnetlink.h> |
279 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 279 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -313,7 +313,7 @@ static const struct nla_policy | |||
313 | icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { | 313 | icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { |
314 | [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, | 314 | [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, |
315 | }; | 315 | }; |
316 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 316 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
317 | 317 | ||
318 | #ifdef CONFIG_SYSCTL | 318 | #ifdef CONFIG_SYSCTL |
319 | static struct ctl_table icmp_sysctl_table[] = { | 319 | static struct ctl_table icmp_sysctl_table[] = { |
@@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = | |||
374 | .nlattr_to_tuple = icmp_nlattr_to_tuple, | 374 | .nlattr_to_tuple = icmp_nlattr_to_tuple, |
375 | .nla_policy = icmp_nla_policy, | 375 | .nla_policy = icmp_nla_policy, |
376 | #endif | 376 | #endif |
377 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 377 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
378 | .ctnl_timeout = { | 378 | .ctnl_timeout = { |
379 | .nlattr_to_obj = icmp_timeout_nlattr_to_obj, | 379 | .nlattr_to_obj = icmp_timeout_nlattr_to_obj, |
380 | .obj_to_nlattr = icmp_timeout_obj_to_nlattr, | 380 | .obj_to_nlattr = icmp_timeout_obj_to_nlattr, |
@@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = | |||
382 | .obj_size = sizeof(unsigned int), | 382 | .obj_size = sizeof(unsigned int), |
383 | .nla_policy = icmp_timeout_nla_policy, | 383 | .nla_policy = icmp_timeout_nla_policy, |
384 | }, | 384 | }, |
385 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 385 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
386 | .init_net = icmp_init_net, | 386 | .init_net = icmp_init_net, |
387 | .get_net_proto = icmp_get_net_proto, | 387 | .get_net_proto = icmp_get_net_proto, |
388 | }; | 388 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c index bed07b998a10..97e40f77d678 100644 --- a/net/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void) | |||
274 | } | 274 | } |
275 | #endif | 275 | #endif |
276 | 276 | ||
277 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 277 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
278 | 278 | ||
279 | #include <linux/netfilter/nfnetlink.h> | 279 | #include <linux/netfilter/nfnetlink.h> |
280 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 280 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -314,7 +314,7 @@ static const struct nla_policy | |||
314 | icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { | 314 | icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { |
315 | [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 }, | 315 | [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 }, |
316 | }; | 316 | }; |
317 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 317 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
318 | 318 | ||
319 | #ifdef CONFIG_SYSCTL | 319 | #ifdef CONFIG_SYSCTL |
320 | static struct ctl_table icmpv6_sysctl_table[] = { | 320 | static struct ctl_table icmpv6_sysctl_table[] = { |
@@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = | |||
373 | .nlattr_to_tuple = icmpv6_nlattr_to_tuple, | 373 | .nlattr_to_tuple = icmpv6_nlattr_to_tuple, |
374 | .nla_policy = icmpv6_nla_policy, | 374 | .nla_policy = icmpv6_nla_policy, |
375 | #endif | 375 | #endif |
376 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 376 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
377 | .ctnl_timeout = { | 377 | .ctnl_timeout = { |
378 | .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj, | 378 | .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj, |
379 | .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr, | 379 | .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr, |
@@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = | |||
381 | .obj_size = sizeof(unsigned int), | 381 | .obj_size = sizeof(unsigned int), |
382 | .nla_policy = icmpv6_timeout_nla_policy, | 382 | .nla_policy = icmpv6_timeout_nla_policy, |
383 | }, | 383 | }, |
384 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 384 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
385 | .init_net = icmpv6_init_net, | 385 | .init_net = icmpv6_init_net, |
386 | .get_net_proto = icmpv6_get_net_proto, | 386 | .get_net_proto = icmpv6_get_net_proto, |
387 | }; | 387 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 8d1e085fc14a..e4d738d34cd0 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) | |||
591 | } | 591 | } |
592 | #endif | 592 | #endif |
593 | 593 | ||
594 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 594 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
595 | 595 | ||
596 | #include <linux/netfilter/nfnetlink.h> | 596 | #include <linux/netfilter/nfnetlink.h> |
597 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 597 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
613 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; | 613 | timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; |
614 | } | 614 | } |
615 | } | 615 | } |
616 | |||
617 | timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED]; | ||
616 | return 0; | 618 | return 0; |
617 | } | 619 | } |
618 | 620 | ||
@@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = { | |||
644 | [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, | 646 | [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, |
645 | [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, | 647 | [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, |
646 | }; | 648 | }; |
647 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 649 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
648 | 650 | ||
649 | 651 | ||
650 | #ifdef CONFIG_SYSCTL | 652 | #ifdef CONFIG_SYSCTL |
@@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto) | |||
743 | 745 | ||
744 | for (i = 0; i < SCTP_CONNTRACK_MAX; i++) | 746 | for (i = 0; i < SCTP_CONNTRACK_MAX; i++) |
745 | sn->timeouts[i] = sctp_timeouts[i]; | 747 | sn->timeouts[i] = sctp_timeouts[i]; |
748 | |||
749 | /* timeouts[0] is unused, init it so ->timeouts[0] contains | ||
750 | * 'new' timeout, like udp or icmp. | ||
751 | */ | ||
752 | sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED]; | ||
746 | } | 753 | } |
747 | 754 | ||
748 | return sctp_kmemdup_sysctl_table(pn, sn); | 755 | return sctp_kmemdup_sysctl_table(pn, sn); |
@@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { | |||
773 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 780 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
774 | .nla_policy = nf_ct_port_nla_policy, | 781 | .nla_policy = nf_ct_port_nla_policy, |
775 | #endif | 782 | #endif |
776 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 783 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
777 | .ctnl_timeout = { | 784 | .ctnl_timeout = { |
778 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, | 785 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, |
779 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, | 786 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, |
@@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { | |||
781 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, | 788 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, |
782 | .nla_policy = sctp_timeout_nla_policy, | 789 | .nla_policy = sctp_timeout_nla_policy, |
783 | }, | 790 | }, |
784 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 791 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
785 | .init_net = sctp_init_net, | 792 | .init_net = sctp_init_net, |
786 | .get_net_proto = sctp_get_net_proto, | 793 | .get_net_proto = sctp_get_net_proto, |
787 | }; | 794 | }; |
@@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { | |||
806 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 813 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
807 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 814 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
808 | .nla_policy = nf_ct_port_nla_policy, | 815 | .nla_policy = nf_ct_port_nla_policy, |
809 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 816 | #endif |
817 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT | ||
810 | .ctnl_timeout = { | 818 | .ctnl_timeout = { |
811 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, | 819 | .nlattr_to_obj = sctp_timeout_nlattr_to_obj, |
812 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, | 820 | .obj_to_nlattr = sctp_timeout_obj_to_nlattr, |
@@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { | |||
814 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, | 822 | .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, |
815 | .nla_policy = sctp_timeout_nla_policy, | 823 | .nla_policy = sctp_timeout_nla_policy, |
816 | }, | 824 | }, |
817 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 825 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
818 | #endif | ||
819 | .init_net = sctp_init_net, | 826 | .init_net = sctp_init_net, |
820 | .get_net_proto = sctp_get_net_proto, | 827 | .get_net_proto = sctp_get_net_proto, |
821 | }; | 828 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index d80d322b9d8b..b4bdf9eda7b7 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void) | |||
1279 | } | 1279 | } |
1280 | #endif | 1280 | #endif |
1281 | 1281 | ||
1282 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 1282 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
1283 | 1283 | ||
1284 | #include <linux/netfilter/nfnetlink.h> | 1284 | #include <linux/netfilter/nfnetlink.h> |
1285 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 1285 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
1301 | timeouts[TCP_CONNTRACK_SYN_SENT] = | 1301 | timeouts[TCP_CONNTRACK_SYN_SENT] = |
1302 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; | 1302 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; |
1303 | } | 1303 | } |
1304 | |||
1304 | if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { | 1305 | if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { |
1305 | timeouts[TCP_CONNTRACK_SYN_RECV] = | 1306 | timeouts[TCP_CONNTRACK_SYN_RECV] = |
1306 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; | 1307 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; |
@@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], | |||
1341 | timeouts[TCP_CONNTRACK_UNACK] = | 1342 | timeouts[TCP_CONNTRACK_UNACK] = |
1342 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; | 1343 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; |
1343 | } | 1344 | } |
1345 | |||
1346 | timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT]; | ||
1344 | return 0; | 1347 | return 0; |
1345 | } | 1348 | } |
1346 | 1349 | ||
@@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = { | |||
1391 | [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 }, | 1394 | [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 }, |
1392 | [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 }, | 1395 | [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 }, |
1393 | }; | 1396 | }; |
1394 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 1397 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
1395 | 1398 | ||
1396 | #ifdef CONFIG_SYSCTL | 1399 | #ifdef CONFIG_SYSCTL |
1397 | static struct ctl_table tcp_sysctl_table[] = { | 1400 | static struct ctl_table tcp_sysctl_table[] = { |
@@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto) | |||
1518 | for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) | 1521 | for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) |
1519 | tn->timeouts[i] = tcp_timeouts[i]; | 1522 | tn->timeouts[i] = tcp_timeouts[i]; |
1520 | 1523 | ||
1524 | /* timeouts[0] is unused, make it same as SYN_SENT so | ||
1525 | * ->timeouts[0] contains 'new' timeout, like udp or icmp. | ||
1526 | */ | ||
1527 | tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT]; | ||
1521 | tn->tcp_loose = nf_ct_tcp_loose; | 1528 | tn->tcp_loose = nf_ct_tcp_loose; |
1522 | tn->tcp_be_liberal = nf_ct_tcp_be_liberal; | 1529 | tn->tcp_be_liberal = nf_ct_tcp_be_liberal; |
1523 | tn->tcp_max_retrans = nf_ct_tcp_max_retrans; | 1530 | tn->tcp_max_retrans = nf_ct_tcp_max_retrans; |
@@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = | |||
1551 | .nlattr_size = TCP_NLATTR_SIZE, | 1558 | .nlattr_size = TCP_NLATTR_SIZE, |
1552 | .nla_policy = nf_ct_port_nla_policy, | 1559 | .nla_policy = nf_ct_port_nla_policy, |
1553 | #endif | 1560 | #endif |
1554 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 1561 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
1555 | .ctnl_timeout = { | 1562 | .ctnl_timeout = { |
1556 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, | 1563 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, |
1557 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, | 1564 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, |
@@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = | |||
1560 | TCP_CONNTRACK_TIMEOUT_MAX, | 1567 | TCP_CONNTRACK_TIMEOUT_MAX, |
1561 | .nla_policy = tcp_timeout_nla_policy, | 1568 | .nla_policy = tcp_timeout_nla_policy, |
1562 | }, | 1569 | }, |
1563 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 1570 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
1564 | .init_net = tcp_init_net, | 1571 | .init_net = tcp_init_net, |
1565 | .get_net_proto = tcp_get_net_proto, | 1572 | .get_net_proto = tcp_get_net_proto, |
1566 | }; | 1573 | }; |
@@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = | |||
1586 | .nlattr_tuple_size = tcp_nlattr_tuple_size, | 1593 | .nlattr_tuple_size = tcp_nlattr_tuple_size, |
1587 | .nla_policy = nf_ct_port_nla_policy, | 1594 | .nla_policy = nf_ct_port_nla_policy, |
1588 | #endif | 1595 | #endif |
1589 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 1596 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
1590 | .ctnl_timeout = { | 1597 | .ctnl_timeout = { |
1591 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, | 1598 | .nlattr_to_obj = tcp_timeout_nlattr_to_obj, |
1592 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, | 1599 | .obj_to_nlattr = tcp_timeout_obj_to_nlattr, |
@@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = | |||
1595 | TCP_CONNTRACK_TIMEOUT_MAX, | 1602 | TCP_CONNTRACK_TIMEOUT_MAX, |
1596 | .nla_policy = tcp_timeout_nla_policy, | 1603 | .nla_policy = tcp_timeout_nla_policy, |
1597 | }, | 1604 | }, |
1598 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 1605 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
1599 | .init_net = tcp_init_net, | 1606 | .init_net = tcp_init_net, |
1600 | .get_net_proto = tcp_get_net_proto, | 1607 | .get_net_proto = tcp_get_net_proto, |
1601 | }; | 1608 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 7a1b8988a931..3065fb8ef91b 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c | |||
@@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, | |||
171 | return NF_ACCEPT; | 171 | return NF_ACCEPT; |
172 | } | 172 | } |
173 | 173 | ||
174 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 174 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
175 | 175 | ||
176 | #include <linux/netfilter/nfnetlink.h> | 176 | #include <linux/netfilter/nfnetlink.h> |
177 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 177 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
@@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = { | |||
221 | [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, | 221 | [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, |
222 | [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, | 222 | [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, |
223 | }; | 223 | }; |
224 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 224 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
225 | 225 | ||
226 | #ifdef CONFIG_SYSCTL | 226 | #ifdef CONFIG_SYSCTL |
227 | static struct ctl_table udp_sysctl_table[] = { | 227 | static struct ctl_table udp_sysctl_table[] = { |
@@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = | |||
292 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 292 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
293 | .nla_policy = nf_ct_port_nla_policy, | 293 | .nla_policy = nf_ct_port_nla_policy, |
294 | #endif | 294 | #endif |
295 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 295 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
296 | .ctnl_timeout = { | 296 | .ctnl_timeout = { |
297 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 297 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
298 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 298 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
@@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = | |||
300 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 300 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
301 | .nla_policy = udp_timeout_nla_policy, | 301 | .nla_policy = udp_timeout_nla_policy, |
302 | }, | 302 | }, |
303 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 303 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
304 | .init_net = udp_init_net, | 304 | .init_net = udp_init_net, |
305 | .get_net_proto = udp_get_net_proto, | 305 | .get_net_proto = udp_get_net_proto, |
306 | }; | 306 | }; |
@@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = | |||
321 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 321 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
322 | .nla_policy = nf_ct_port_nla_policy, | 322 | .nla_policy = nf_ct_port_nla_policy, |
323 | #endif | 323 | #endif |
324 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 324 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
325 | .ctnl_timeout = { | 325 | .ctnl_timeout = { |
326 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 326 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
327 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 327 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
@@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = | |||
329 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 329 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
330 | .nla_policy = udp_timeout_nla_policy, | 330 | .nla_policy = udp_timeout_nla_policy, |
331 | }, | 331 | }, |
332 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 332 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
333 | .init_net = udp_init_net, | 333 | .init_net = udp_init_net, |
334 | .get_net_proto = udp_get_net_proto, | 334 | .get_net_proto = udp_get_net_proto, |
335 | }; | 335 | }; |
@@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = | |||
350 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 350 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
351 | .nla_policy = nf_ct_port_nla_policy, | 351 | .nla_policy = nf_ct_port_nla_policy, |
352 | #endif | 352 | #endif |
353 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 353 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
354 | .ctnl_timeout = { | 354 | .ctnl_timeout = { |
355 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 355 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
356 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 356 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
@@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = | |||
358 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 358 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
359 | .nla_policy = udp_timeout_nla_policy, | 359 | .nla_policy = udp_timeout_nla_policy, |
360 | }, | 360 | }, |
361 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 361 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
362 | .init_net = udp_init_net, | 362 | .init_net = udp_init_net, |
363 | .get_net_proto = udp_get_net_proto, | 363 | .get_net_proto = udp_get_net_proto, |
364 | }; | 364 | }; |
@@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = | |||
379 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 379 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
380 | .nla_policy = nf_ct_port_nla_policy, | 380 | .nla_policy = nf_ct_port_nla_policy, |
381 | #endif | 381 | #endif |
382 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) | 382 | #ifdef CONFIG_NF_CONNTRACK_TIMEOUT |
383 | .ctnl_timeout = { | 383 | .ctnl_timeout = { |
384 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, | 384 | .nlattr_to_obj = udp_timeout_nlattr_to_obj, |
385 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, | 385 | .obj_to_nlattr = udp_timeout_obj_to_nlattr, |
@@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = | |||
387 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, | 387 | .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, |
388 | .nla_policy = udp_timeout_nla_policy, | 388 | .nla_policy = udp_timeout_nla_policy, |
389 | }, | 389 | }, |
390 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 390 | #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ |
391 | .init_net = udp_init_net, | 391 | .init_net = udp_init_net, |
392 | .get_net_proto = udp_get_net_proto, | 392 | .get_net_proto = udp_get_net_proto, |
393 | }; | 393 | }; |
394 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); | 394 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); |
395 | #endif | 395 | #endif |
396 | #include <net/netfilter/nf_conntrack_timeout.h> | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1dca5683f59f..2cfb173cd0b2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx, | |||
4637 | } | 4637 | } |
4638 | set->ndeact++; | 4638 | set->ndeact++; |
4639 | 4639 | ||
4640 | nft_set_elem_deactivate(ctx->net, set, elem); | ||
4640 | nft_trans_elem_set(trans) = set; | 4641 | nft_trans_elem_set(trans) = set; |
4641 | nft_trans_elem(trans) = *elem; | 4642 | nft_trans_elem(trans) = *elem; |
4642 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); | 4643 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); |
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index d46a236cdf31..a30f8ba4b89a 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c | |||
@@ -489,8 +489,8 @@ err: | |||
489 | return err; | 489 | return err; |
490 | } | 490 | } |
491 | 491 | ||
492 | static struct ctnl_timeout * | 492 | static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net, |
493 | ctnl_timeout_find_get(struct net *net, const char *name) | 493 | const char *name) |
494 | { | 494 | { |
495 | struct ctnl_timeout *timeout, *matching = NULL; | 495 | struct ctnl_timeout *timeout, *matching = NULL; |
496 | 496 | ||
@@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name) | |||
509 | break; | 509 | break; |
510 | } | 510 | } |
511 | err: | 511 | err: |
512 | return matching; | 512 | return matching ? &matching->timeout : NULL; |
513 | } | 513 | } |
514 | 514 | ||
515 | static void ctnl_timeout_put(struct nf_ct_timeout *t) | 515 | static void ctnl_timeout_put(struct nf_ct_timeout *t) |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index ea4ba551abb2..d33094f4ec41 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
233 | int err; | 233 | int err; |
234 | 234 | ||
235 | if (verdict == NF_ACCEPT || | 235 | if (verdict == NF_ACCEPT || |
236 | verdict == NF_REPEAT || | ||
236 | verdict == NF_STOP) { | 237 | verdict == NF_STOP) { |
237 | rcu_read_lock(); | 238 | rcu_read_lock(); |
238 | ct_hook = rcu_dereference(nf_ct_hook); | 239 | ct_hook = rcu_dereference(nf_ct_hook); |
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 26a8baebd072..5dd87748afa8 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
@@ -799,7 +799,7 @@ err: | |||
799 | } | 799 | } |
800 | 800 | ||
801 | struct nft_ct_timeout_obj { | 801 | struct nft_ct_timeout_obj { |
802 | struct nf_conn *tmpl; | 802 | struct nf_ct_timeout *timeout; |
803 | u8 l4proto; | 803 | u8 l4proto; |
804 | }; | 804 | }; |
805 | 805 | ||
@@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj, | |||
809 | { | 809 | { |
810 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 810 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
811 | struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); | 811 | struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); |
812 | struct sk_buff *skb = pkt->skb; | 812 | struct nf_conn_timeout *timeout; |
813 | const unsigned int *values; | ||
814 | |||
815 | if (priv->l4proto != pkt->tprot) | ||
816 | return; | ||
813 | 817 | ||
814 | if (ct || | 818 | if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct)) |
815 | priv->l4proto != pkt->tprot) | ||
816 | return; | 819 | return; |
817 | 820 | ||
818 | nf_ct_set(skb, priv->tmpl, IP_CT_NEW); | 821 | timeout = nf_ct_timeout_find(ct); |
822 | if (!timeout) { | ||
823 | timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC); | ||
824 | if (!timeout) { | ||
825 | regs->verdict.code = NF_DROP; | ||
826 | return; | ||
827 | } | ||
828 | } | ||
829 | |||
830 | rcu_assign_pointer(timeout->timeout, priv->timeout); | ||
831 | |||
832 | /* adjust the timeout as per 'new' state. ct is unconfirmed, | ||
833 | * so the current timestamp must not be added. | ||
834 | */ | ||
835 | values = nf_ct_timeout_data(timeout); | ||
836 | if (values) | ||
837 | nf_ct_refresh(ct, pkt->skb, values[0]); | ||
819 | } | 838 | } |
820 | 839 | ||
821 | static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, | 840 | static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, |
822 | const struct nlattr * const tb[], | 841 | const struct nlattr * const tb[], |
823 | struct nft_object *obj) | 842 | struct nft_object *obj) |
824 | { | 843 | { |
825 | const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; | ||
826 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 844 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
827 | const struct nf_conntrack_l4proto *l4proto; | 845 | const struct nf_conntrack_l4proto *l4proto; |
828 | struct nf_conn_timeout *timeout_ext; | ||
829 | struct nf_ct_timeout *timeout; | 846 | struct nf_ct_timeout *timeout; |
830 | int l3num = ctx->family; | 847 | int l3num = ctx->family; |
831 | struct nf_conn *tmpl; | ||
832 | __u8 l4num; | 848 | __u8 l4num; |
833 | int ret; | 849 | int ret; |
834 | 850 | ||
@@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, | |||
863 | 879 | ||
864 | timeout->l3num = l3num; | 880 | timeout->l3num = l3num; |
865 | timeout->l4proto = l4proto; | 881 | timeout->l4proto = l4proto; |
866 | tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC); | ||
867 | if (!tmpl) { | ||
868 | ret = -ENOMEM; | ||
869 | goto err_free_timeout; | ||
870 | } | ||
871 | |||
872 | timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC); | ||
873 | if (!timeout_ext) { | ||
874 | ret = -ENOMEM; | ||
875 | goto err_free_tmpl; | ||
876 | } | ||
877 | 882 | ||
878 | ret = nf_ct_netns_get(ctx->net, ctx->family); | 883 | ret = nf_ct_netns_get(ctx->net, ctx->family); |
879 | if (ret < 0) | 884 | if (ret < 0) |
880 | goto err_free_tmpl; | 885 | goto err_free_timeout; |
881 | |||
882 | priv->tmpl = tmpl; | ||
883 | 886 | ||
887 | priv->timeout = timeout; | ||
884 | return 0; | 888 | return 0; |
885 | 889 | ||
886 | err_free_tmpl: | ||
887 | nf_ct_tmpl_free(tmpl); | ||
888 | err_free_timeout: | 890 | err_free_timeout: |
889 | kfree(timeout); | 891 | kfree(timeout); |
890 | err_proto_put: | 892 | err_proto_put: |
@@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx, | |||
896 | struct nft_object *obj) | 898 | struct nft_object *obj) |
897 | { | 899 | { |
898 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 900 | struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
899 | struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); | 901 | struct nf_ct_timeout *timeout = priv->timeout; |
900 | struct nf_ct_timeout *timeout; | ||
901 | 902 | ||
902 | timeout = rcu_dereference_raw(t->timeout); | ||
903 | nf_ct_untimeout(ctx->net, timeout); | 903 | nf_ct_untimeout(ctx->net, timeout); |
904 | nf_ct_l4proto_put(timeout->l4proto); | 904 | nf_ct_l4proto_put(timeout->l4proto); |
905 | nf_ct_netns_put(ctx->net, ctx->family); | 905 | nf_ct_netns_put(ctx->net, ctx->family); |
906 | nf_ct_tmpl_free(priv->tmpl); | 906 | kfree(priv->timeout); |
907 | } | 907 | } |
908 | 908 | ||
909 | static int nft_ct_timeout_obj_dump(struct sk_buff *skb, | 909 | static int nft_ct_timeout_obj_dump(struct sk_buff *skb, |
910 | struct nft_object *obj, bool reset) | 910 | struct nft_object *obj, bool reset) |
911 | { | 911 | { |
912 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); | 912 | const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); |
913 | const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); | 913 | const struct nf_ct_timeout *timeout = priv->timeout; |
914 | const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout); | ||
915 | struct nlattr *nest_params; | 914 | struct nlattr *nest_params; |
916 | int ret; | 915 | int ret; |
917 | 916 | ||
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c index 9f4151ec3e06..6c7aa6a0a0d2 100644 --- a/net/netfilter/xt_CHECKSUM.c +++ b/net/netfilter/xt_CHECKSUM.c | |||
@@ -16,6 +16,9 @@ | |||
16 | #include <linux/netfilter/x_tables.h> | 16 | #include <linux/netfilter/x_tables.h> |
17 | #include <linux/netfilter/xt_CHECKSUM.h> | 17 | #include <linux/netfilter/xt_CHECKSUM.h> |
18 | 18 | ||
19 | #include <linux/netfilter_ipv4/ip_tables.h> | ||
20 | #include <linux/netfilter_ipv6/ip6_tables.h> | ||
21 | |||
19 | MODULE_LICENSE("GPL"); | 22 | MODULE_LICENSE("GPL"); |
20 | MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>"); | 23 | MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>"); |
21 | MODULE_DESCRIPTION("Xtables: checksum modification"); | 24 | MODULE_DESCRIPTION("Xtables: checksum modification"); |
@@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM"); | |||
25 | static unsigned int | 28 | static unsigned int |
26 | checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) | 29 | checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) |
27 | { | 30 | { |
28 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 31 | if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb)) |
29 | skb_checksum_help(skb); | 32 | skb_checksum_help(skb); |
30 | 33 | ||
31 | return XT_CONTINUE; | 34 | return XT_CONTINUE; |
@@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
34 | static int checksum_tg_check(const struct xt_tgchk_param *par) | 37 | static int checksum_tg_check(const struct xt_tgchk_param *par) |
35 | { | 38 | { |
36 | const struct xt_CHECKSUM_info *einfo = par->targinfo; | 39 | const struct xt_CHECKSUM_info *einfo = par->targinfo; |
40 | const struct ip6t_ip6 *i6 = par->entryinfo; | ||
41 | const struct ipt_ip *i4 = par->entryinfo; | ||
37 | 42 | ||
38 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { | 43 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { |
39 | pr_info_ratelimited("unsupported CHECKSUM operation %x\n", | 44 | pr_info_ratelimited("unsupported CHECKSUM operation %x\n", |
@@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par) | |||
43 | if (!einfo->operation) | 48 | if (!einfo->operation) |
44 | return -EINVAL; | 49 | return -EINVAL; |
45 | 50 | ||
51 | switch (par->family) { | ||
52 | case NFPROTO_IPV4: | ||
53 | if (i4->proto == IPPROTO_UDP && | ||
54 | (i4->invflags & XT_INV_PROTO) == 0) | ||
55 | return 0; | ||
56 | break; | ||
57 | case NFPROTO_IPV6: | ||
58 | if ((i6->flags & IP6T_F_PROTO) && | ||
59 | i6->proto == IPPROTO_UDP && | ||
60 | (i6->invflags & XT_INV_PROTO) == 0) | ||
61 | return 0; | ||
62 | break; | ||
63 | } | ||
64 | |||
65 | pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n"); | ||
46 | return 0; | 66 | return 0; |
47 | } | 67 | } |
48 | 68 | ||
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index dfbdbb2fc0ed..51d0c257e7a5 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c | |||
@@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
125 | static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | 125 | static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) |
126 | { | 126 | { |
127 | struct xt_cluster_match_info *info = par->matchinfo; | 127 | struct xt_cluster_match_info *info = par->matchinfo; |
128 | int ret; | ||
128 | 129 | ||
129 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { | 130 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { |
130 | pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", | 131 | pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", |
@@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | |||
135 | pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); | 136 | pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); |
136 | return -EDOM; | 137 | return -EDOM; |
137 | } | 138 | } |
138 | return 0; | 139 | |
140 | ret = nf_ct_netns_get(par->net, par->family); | ||
141 | if (ret < 0) | ||
142 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", | ||
143 | par->family); | ||
144 | return ret; | ||
145 | } | ||
146 | |||
147 | static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par) | ||
148 | { | ||
149 | nf_ct_netns_put(par->net, par->family); | ||
139 | } | 150 | } |
140 | 151 | ||
141 | static struct xt_match xt_cluster_match __read_mostly = { | 152 | static struct xt_match xt_cluster_match __read_mostly = { |
@@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = { | |||
144 | .match = xt_cluster_mt, | 155 | .match = xt_cluster_mt, |
145 | .checkentry = xt_cluster_mt_checkentry, | 156 | .checkentry = xt_cluster_mt_checkentry, |
146 | .matchsize = sizeof(struct xt_cluster_match_info), | 157 | .matchsize = sizeof(struct xt_cluster_match_info), |
158 | .destroy = xt_cluster_mt_destroy, | ||
147 | .me = THIS_MODULE, | 159 | .me = THIS_MODULE, |
148 | }; | 160 | }; |
149 | 161 | ||
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9b16402f29af..3e7d259e5d8d 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { | |||
1057 | static void *dl_seq_start(struct seq_file *s, loff_t *pos) | 1057 | static void *dl_seq_start(struct seq_file *s, loff_t *pos) |
1058 | __acquires(htable->lock) | 1058 | __acquires(htable->lock) |
1059 | { | 1059 | { |
1060 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1060 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
1061 | unsigned int *bucket; | 1061 | unsigned int *bucket; |
1062 | 1062 | ||
1063 | spin_lock_bh(&htable->lock); | 1063 | spin_lock_bh(&htable->lock); |
@@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos) | |||
1074 | 1074 | ||
1075 | static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) | 1075 | static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) |
1076 | { | 1076 | { |
1077 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1077 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
1078 | unsigned int *bucket = v; | 1078 | unsigned int *bucket = v; |
1079 | 1079 | ||
1080 | *pos = ++(*bucket); | 1080 | *pos = ++(*bucket); |
@@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
1088 | static void dl_seq_stop(struct seq_file *s, void *v) | 1088 | static void dl_seq_stop(struct seq_file *s, void *v) |
1089 | __releases(htable->lock) | 1089 | __releases(htable->lock) |
1090 | { | 1090 | { |
1091 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1091 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
1092 | unsigned int *bucket = v; | 1092 | unsigned int *bucket = v; |
1093 | 1093 | ||
1094 | if (!IS_ERR(bucket)) | 1094 | if (!IS_ERR(bucket)) |
@@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family, | |||
1130 | static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, | 1130 | static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, |
1131 | struct seq_file *s) | 1131 | struct seq_file *s) |
1132 | { | 1132 | { |
1133 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); | 1133 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); |
1134 | 1134 | ||
1135 | spin_lock(&ent->lock); | 1135 | spin_lock(&ent->lock); |
1136 | /* recalculate to show accurate numbers */ | 1136 | /* recalculate to show accurate numbers */ |
@@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, | |||
1145 | static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, | 1145 | static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, |
1146 | struct seq_file *s) | 1146 | struct seq_file *s) |
1147 | { | 1147 | { |
1148 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); | 1148 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); |
1149 | 1149 | ||
1150 | spin_lock(&ent->lock); | 1150 | spin_lock(&ent->lock); |
1151 | /* recalculate to show accurate numbers */ | 1151 | /* recalculate to show accurate numbers */ |
@@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, | |||
1160 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | 1160 | static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, |
1161 | struct seq_file *s) | 1161 | struct seq_file *s) |
1162 | { | 1162 | { |
1163 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); | 1163 | struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); |
1164 | 1164 | ||
1165 | spin_lock(&ent->lock); | 1165 | spin_lock(&ent->lock); |
1166 | /* recalculate to show accurate numbers */ | 1166 | /* recalculate to show accurate numbers */ |
@@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, | |||
1174 | 1174 | ||
1175 | static int dl_seq_show_v2(struct seq_file *s, void *v) | 1175 | static int dl_seq_show_v2(struct seq_file *s, void *v) |
1176 | { | 1176 | { |
1177 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1177 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
1178 | unsigned int *bucket = (unsigned int *)v; | 1178 | unsigned int *bucket = (unsigned int *)v; |
1179 | struct dsthash_ent *ent; | 1179 | struct dsthash_ent *ent; |
1180 | 1180 | ||
@@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v) | |||
1188 | 1188 | ||
1189 | static int dl_seq_show_v1(struct seq_file *s, void *v) | 1189 | static int dl_seq_show_v1(struct seq_file *s, void *v) |
1190 | { | 1190 | { |
1191 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1191 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
1192 | unsigned int *bucket = v; | 1192 | unsigned int *bucket = v; |
1193 | struct dsthash_ent *ent; | 1193 | struct dsthash_ent *ent; |
1194 | 1194 | ||
@@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v) | |||
1202 | 1202 | ||
1203 | static int dl_seq_show(struct seq_file *s, void *v) | 1203 | static int dl_seq_show(struct seq_file *s, void *v) |
1204 | { | 1204 | { |
1205 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); | 1205 | struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); |
1206 | unsigned int *bucket = v; | 1206 | unsigned int *bucket = v; |
1207 | struct dsthash_ent *ent; | 1207 | struct dsthash_ent *ent; |
1208 | 1208 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5610061e7f2e..75c92a87e7b2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = { | |||
4137 | .close = packet_mm_close, | 4137 | .close = packet_mm_close, |
4138 | }; | 4138 | }; |
4139 | 4139 | ||
4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int len) | 4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
4141 | unsigned int len) | ||
4141 | { | 4142 | { |
4142 | int i; | 4143 | int i; |
4143 | 4144 | ||
4144 | for (i = 0; i < len; i++) { | 4145 | for (i = 0; i < len; i++) { |
4145 | if (likely(pg_vec[i].buffer)) { | 4146 | if (likely(pg_vec[i].buffer)) { |
4146 | kvfree(pg_vec[i].buffer); | 4147 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
4148 | vfree(pg_vec[i].buffer); | ||
4149 | else | ||
4150 | free_pages((unsigned long)pg_vec[i].buffer, | ||
4151 | order); | ||
4147 | pg_vec[i].buffer = NULL; | 4152 | pg_vec[i].buffer = NULL; |
4148 | } | 4153 | } |
4149 | } | 4154 | } |
4150 | kfree(pg_vec); | 4155 | kfree(pg_vec); |
4151 | } | 4156 | } |
4152 | 4157 | ||
4153 | static char *alloc_one_pg_vec_page(unsigned long size) | 4158 | static char *alloc_one_pg_vec_page(unsigned long order) |
4154 | { | 4159 | { |
4155 | char *buffer; | 4160 | char *buffer; |
4161 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | | ||
4162 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | ||
4156 | 4163 | ||
4157 | buffer = kvzalloc(size, GFP_KERNEL); | 4164 | buffer = (char *) __get_free_pages(gfp_flags, order); |
4158 | if (buffer) | 4165 | if (buffer) |
4159 | return buffer; | 4166 | return buffer; |
4160 | 4167 | ||
4161 | buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); | 4168 | /* __get_free_pages failed, fall back to vmalloc */ |
4169 | buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); | ||
4170 | if (buffer) | ||
4171 | return buffer; | ||
4162 | 4172 | ||
4163 | return buffer; | 4173 | /* vmalloc failed, lets dig into swap here */ |
4174 | gfp_flags &= ~__GFP_NORETRY; | ||
4175 | buffer = (char *) __get_free_pages(gfp_flags, order); | ||
4176 | if (buffer) | ||
4177 | return buffer; | ||
4178 | |||
4179 | /* complete and utter failure */ | ||
4180 | return NULL; | ||
4164 | } | 4181 | } |
4165 | 4182 | ||
4166 | static struct pgv *alloc_pg_vec(struct tpacket_req *req) | 4183 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
4167 | { | 4184 | { |
4168 | unsigned int block_nr = req->tp_block_nr; | 4185 | unsigned int block_nr = req->tp_block_nr; |
4169 | unsigned long size = req->tp_block_size; | ||
4170 | struct pgv *pg_vec; | 4186 | struct pgv *pg_vec; |
4171 | int i; | 4187 | int i; |
4172 | 4188 | ||
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req) | |||
4175 | goto out; | 4191 | goto out; |
4176 | 4192 | ||
4177 | for (i = 0; i < block_nr; i++) { | 4193 | for (i = 0; i < block_nr; i++) { |
4178 | pg_vec[i].buffer = alloc_one_pg_vec_page(size); | 4194 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
4179 | if (unlikely(!pg_vec[i].buffer)) | 4195 | if (unlikely(!pg_vec[i].buffer)) |
4180 | goto out_free_pgvec; | 4196 | goto out_free_pgvec; |
4181 | } | 4197 | } |
@@ -4184,7 +4200,7 @@ out: | |||
4184 | return pg_vec; | 4200 | return pg_vec; |
4185 | 4201 | ||
4186 | out_free_pgvec: | 4202 | out_free_pgvec: |
4187 | free_pg_vec(pg_vec, block_nr); | 4203 | free_pg_vec(pg_vec, order, block_nr); |
4188 | pg_vec = NULL; | 4204 | pg_vec = NULL; |
4189 | goto out; | 4205 | goto out; |
4190 | } | 4206 | } |
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4194 | { | 4210 | { |
4195 | struct pgv *pg_vec = NULL; | 4211 | struct pgv *pg_vec = NULL; |
4196 | struct packet_sock *po = pkt_sk(sk); | 4212 | struct packet_sock *po = pkt_sk(sk); |
4213 | int was_running, order = 0; | ||
4197 | struct packet_ring_buffer *rb; | 4214 | struct packet_ring_buffer *rb; |
4198 | struct sk_buff_head *rb_queue; | 4215 | struct sk_buff_head *rb_queue; |
4199 | int was_running; | ||
4200 | __be16 num; | 4216 | __be16 num; |
4201 | int err = -EINVAL; | 4217 | int err = -EINVAL; |
4202 | /* Added to avoid minimal code churn */ | 4218 | /* Added to avoid minimal code churn */ |
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4258 | goto out; | 4274 | goto out; |
4259 | 4275 | ||
4260 | err = -ENOMEM; | 4276 | err = -ENOMEM; |
4261 | pg_vec = alloc_pg_vec(req); | 4277 | order = get_order(req->tp_block_size); |
4278 | pg_vec = alloc_pg_vec(req, order); | ||
4262 | if (unlikely(!pg_vec)) | 4279 | if (unlikely(!pg_vec)) |
4263 | goto out; | 4280 | goto out; |
4264 | switch (po->tp_version) { | 4281 | switch (po->tp_version) { |
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4312 | rb->frame_size = req->tp_frame_size; | 4329 | rb->frame_size = req->tp_frame_size; |
4313 | spin_unlock_bh(&rb_queue->lock); | 4330 | spin_unlock_bh(&rb_queue->lock); |
4314 | 4331 | ||
4332 | swap(rb->pg_vec_order, order); | ||
4315 | swap(rb->pg_vec_len, req->tp_block_nr); | 4333 | swap(rb->pg_vec_len, req->tp_block_nr); |
4316 | 4334 | ||
4317 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | 4335 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; |
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4337 | } | 4355 | } |
4338 | 4356 | ||
4339 | if (pg_vec) | 4357 | if (pg_vec) |
4340 | free_pg_vec(pg_vec, req->tp_block_nr); | 4358 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
4341 | out: | 4359 | out: |
4342 | return err; | 4360 | return err; |
4343 | } | 4361 | } |
diff --git a/net/packet/internal.h b/net/packet/internal.h index 8f50036f62f0..3bb7c5fb3bff 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h | |||
@@ -64,6 +64,7 @@ struct packet_ring_buffer { | |||
64 | unsigned int frame_size; | 64 | unsigned int frame_size; |
65 | unsigned int frame_max; | 65 | unsigned int frame_max; |
66 | 66 | ||
67 | unsigned int pg_vec_order; | ||
67 | unsigned int pg_vec_pages; | 68 | unsigned int pg_vec_pages; |
68 | unsigned int pg_vec_len; | 69 | unsigned int pg_vec_len; |
69 | 70 | ||
diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 01b3bd6a3708..b9092111bc45 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | 1 | ||
2 | config RDS | 2 | config RDS |
3 | tristate "The RDS Protocol" | 3 | tristate "The Reliable Datagram Sockets Protocol" |
4 | depends on INET | 4 | depends on INET |
5 | ---help--- | 5 | ---help--- |
6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, | 6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 3ab55784b637..762d2c6788a3 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
@@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, | |||
76 | struct rds_sock *rs; | 76 | struct rds_sock *rs; |
77 | 77 | ||
78 | __rds_create_bind_key(key, addr, port, scope_id); | 78 | __rds_create_bind_key(key, addr, port, scope_id); |
79 | rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms); | 79 | rcu_read_lock(); |
80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); | ||
80 | if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) | 81 | if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) |
81 | rds_sock_addref(rs); | 82 | rds_sock_addref(rs); |
82 | else | 83 | else |
83 | rs = NULL; | 84 | rs = NULL; |
85 | rcu_read_unlock(); | ||
84 | 86 | ||
85 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, | 87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, |
86 | ntohs(port)); | 88 | ntohs(port)); |
@@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
235 | goto out; | 237 | goto out; |
236 | } | 238 | } |
237 | 239 | ||
240 | sock_set_flag(sk, SOCK_RCU_FREE); | ||
238 | ret = rds_add_bound(rs, binding_addr, &port, scope_id); | 241 | ret = rds_add_bound(rs, binding_addr, &port, scope_id); |
239 | if (ret) | 242 | if (ret) |
240 | goto out; | 243 | goto out; |
diff --git a/net/rds/ib.c b/net/rds/ib.c index c1d97640c0be..eba75c1ba359 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, | |||
341 | 341 | ||
342 | if (rds_conn_state(conn) == RDS_CONN_UP) { | 342 | if (rds_conn_state(conn) == RDS_CONN_UP) { |
343 | struct rds_ib_device *rds_ibdev; | 343 | struct rds_ib_device *rds_ibdev; |
344 | struct rdma_dev_addr *dev_addr; | ||
345 | 344 | ||
346 | ic = conn->c_transport_data; | 345 | ic = conn->c_transport_data; |
347 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; | 346 | rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, |
348 | rdma_addr_get_sgid(dev_addr, | 347 | (union ib_gid *)&iinfo6->dst_gid); |
349 | (union ib_gid *)&iinfo6->src_gid); | ||
350 | rdma_addr_get_dgid(dev_addr, | ||
351 | (union ib_gid *)&iinfo6->dst_gid); | ||
352 | |||
353 | rds_ibdev = ic->rds_ibdev; | 348 | rds_ibdev = ic->rds_ibdev; |
354 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; | 349 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; |
355 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; | 350 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 00192a996be0..0f8465852254 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/mod_devicetable.h> | ||
23 | #include <linux/rfkill.h> | 24 | #include <linux/rfkill.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index db83dac1e7f4..e12f8ef7baa4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -662,6 +662,13 @@ int tcf_action_destroy(struct tc_action *actions[], int bind) | |||
662 | return ret; | 662 | return ret; |
663 | } | 663 | } |
664 | 664 | ||
665 | static int tcf_action_destroy_1(struct tc_action *a, int bind) | ||
666 | { | ||
667 | struct tc_action *actions[] = { a, NULL }; | ||
668 | |||
669 | return tcf_action_destroy(actions, bind); | ||
670 | } | ||
671 | |||
665 | static int tcf_action_put(struct tc_action *p) | 672 | static int tcf_action_put(struct tc_action *p) |
666 | { | 673 | { |
667 | return __tcf_action_put(p, false); | 674 | return __tcf_action_put(p, false); |
@@ -881,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, | |||
881 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { | 888 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { |
882 | err = tcf_action_goto_chain_init(a, tp); | 889 | err = tcf_action_goto_chain_init(a, tp); |
883 | if (err) { | 890 | if (err) { |
884 | struct tc_action *actions[] = { a, NULL }; | 891 | tcf_action_destroy_1(a, bind); |
885 | |||
886 | tcf_action_destroy(actions, bind); | ||
887 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); | 892 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); |
888 | return ERR_PTR(err); | 893 | return ERR_PTR(err); |
889 | } | 894 | } |
890 | } | 895 | } |
891 | 896 | ||
892 | if (!tcf_action_valid(a->tcfa_action)) { | 897 | if (!tcf_action_valid(a->tcfa_action)) { |
893 | NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead"); | 898 | tcf_action_destroy_1(a, bind); |
894 | a->tcfa_action = TC_ACT_UNSPEC; | 899 | NL_SET_ERR_MSG(extack, "Invalid control action value"); |
900 | return ERR_PTR(-EINVAL); | ||
895 | } | 901 | } |
896 | 902 | ||
897 | return a; | 903 | return a; |
@@ -1173,6 +1179,7 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) | |||
1173 | struct tcf_idrinfo *idrinfo = a->idrinfo; | 1179 | struct tcf_idrinfo *idrinfo = a->idrinfo; |
1174 | u32 act_index = a->tcfa_index; | 1180 | u32 act_index = a->tcfa_index; |
1175 | 1181 | ||
1182 | actions[i] = NULL; | ||
1176 | if (tcf_action_put(a)) { | 1183 | if (tcf_action_put(a)) { |
1177 | /* last reference, action was deleted concurrently */ | 1184 | /* last reference, action was deleted concurrently */ |
1178 | module_put(ops->owner); | 1185 | module_put(ops->owner); |
@@ -1184,7 +1191,6 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) | |||
1184 | if (ret < 0) | 1191 | if (ret < 0) |
1185 | return ret; | 1192 | return ret; |
1186 | } | 1193 | } |
1187 | actions[i] = NULL; | ||
1188 | } | 1194 | } |
1189 | return 0; | 1195 | return 0; |
1190 | } | 1196 | } |
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 196430aefe87..06a3d4801878 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
@@ -326,6 +326,20 @@ static int __add_metainfo(const struct tcf_meta_ops *ops, | |||
326 | return ret; | 326 | return ret; |
327 | } | 327 | } |
328 | 328 | ||
329 | static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, | ||
330 | struct tcf_ife_info *ife, u32 metaid, | ||
331 | bool exists) | ||
332 | { | ||
333 | int ret; | ||
334 | |||
335 | if (!try_module_get(ops->owner)) | ||
336 | return -ENOENT; | ||
337 | ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); | ||
338 | if (ret) | ||
339 | module_put(ops->owner); | ||
340 | return ret; | ||
341 | } | ||
342 | |||
329 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | 343 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, |
330 | int len, bool exists) | 344 | int len, bool exists) |
331 | { | 345 | { |
@@ -349,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists) | |||
349 | 363 | ||
350 | read_lock(&ife_mod_lock); | 364 | read_lock(&ife_mod_lock); |
351 | list_for_each_entry(o, &ifeoplist, list) { | 365 | list_for_each_entry(o, &ifeoplist, list) { |
352 | rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists); | 366 | rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); |
353 | if (rc == 0) | 367 | if (rc == 0) |
354 | installed += 1; | 368 | installed += 1; |
355 | } | 369 | } |
@@ -400,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
400 | struct tcf_meta_info *e, *n; | 414 | struct tcf_meta_info *e, *n; |
401 | 415 | ||
402 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { | 416 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { |
403 | module_put(e->ops->owner); | ||
404 | list_del(&e->metalist); | 417 | list_del(&e->metalist); |
405 | if (e->metaval) { | 418 | if (e->metaval) { |
406 | if (e->ops->release) | 419 | if (e->ops->release) |
@@ -408,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
408 | else | 421 | else |
409 | kfree(e->metaval); | 422 | kfree(e->metaval); |
410 | } | 423 | } |
424 | module_put(e->ops->owner); | ||
411 | kfree(e); | 425 | kfree(e); |
412 | } | 426 | } |
413 | } | 427 | } |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 107034070019..ad99a99f11f6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
109 | { | 109 | { |
110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); | 110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); |
111 | 111 | ||
112 | if (!keys_start) | ||
113 | goto nla_failure; | ||
112 | for (; n > 0; n--) { | 114 | for (; n > 0; n--) { |
113 | struct nlattr *key_start; | 115 | struct nlattr *key_start; |
114 | 116 | ||
115 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); | 117 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); |
118 | if (!key_start) | ||
119 | goto nla_failure; | ||
116 | 120 | ||
117 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || | 121 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || |
118 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { | 122 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) |
119 | nlmsg_trim(skb, keys_start); | 123 | goto nla_failure; |
120 | return -EINVAL; | ||
121 | } | ||
122 | 124 | ||
123 | nla_nest_end(skb, key_start); | 125 | nla_nest_end(skb, key_start); |
124 | 126 | ||
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
128 | nla_nest_end(skb, keys_start); | 130 | nla_nest_end(skb, keys_start); |
129 | 131 | ||
130 | return 0; | 132 | return 0; |
133 | nla_failure: | ||
134 | nla_nest_cancel(skb, keys_start); | ||
135 | return -EINVAL; | ||
131 | } | 136 | } |
132 | 137 | ||
133 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, | 138 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
418 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; | 423 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; |
419 | 424 | ||
420 | if (p->tcfp_keys_ex) { | 425 | if (p->tcfp_keys_ex) { |
421 | tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); | 426 | if (tcf_pedit_key_ex_dump(skb, |
427 | p->tcfp_keys_ex, | ||
428 | p->tcfp_nkeys)) | ||
429 | goto nla_put_failure; | ||
422 | 430 | ||
423 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) | 431 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) |
424 | goto nla_put_failure; | 432 | goto nla_put_failure; |
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 44e9c00657bc..6b67aa13d2dd 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, | |||
69 | 69 | ||
70 | if (!exists) { | 70 | if (!exists) { |
71 | ret = tcf_idr_create(tn, parm->index, est, a, | 71 | ret = tcf_idr_create(tn, parm->index, est, a, |
72 | &act_sample_ops, bind, false); | 72 | &act_sample_ops, bind, true); |
73 | if (ret) { | 73 | if (ret) { |
74 | tcf_idr_cleanup(tn, parm->index); | 74 | tcf_idr_cleanup(tn, parm->index); |
75 | return ret; | 75 | return ret; |
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 420759153d5f..681f6f04e7da 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
@@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
317 | &metadata->u.tun_info, | 317 | &metadata->u.tun_info, |
318 | opts_len, extack); | 318 | opts_len, extack); |
319 | if (ret < 0) | 319 | if (ret < 0) |
320 | goto err_out; | 320 | goto release_tun_meta; |
321 | } | 321 | } |
322 | 322 | ||
323 | metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; | 323 | metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; |
@@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
333 | &act_tunnel_key_ops, bind, true); | 333 | &act_tunnel_key_ops, bind, true); |
334 | if (ret) { | 334 | if (ret) { |
335 | NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); | 335 | NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); |
336 | goto err_out; | 336 | goto release_tun_meta; |
337 | } | 337 | } |
338 | 338 | ||
339 | ret = ACT_P_CREATED; | 339 | ret = ACT_P_CREATED; |
340 | } else if (!ovr) { | 340 | } else if (!ovr) { |
341 | tcf_idr_release(*a, bind); | ||
342 | NL_SET_ERR_MSG(extack, "TC IDR already exists"); | 341 | NL_SET_ERR_MSG(extack, "TC IDR already exists"); |
343 | return -EEXIST; | 342 | ret = -EEXIST; |
343 | goto release_tun_meta; | ||
344 | } | 344 | } |
345 | 345 | ||
346 | t = to_tunnel_key(*a); | 346 | t = to_tunnel_key(*a); |
347 | 347 | ||
348 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); | 348 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); |
349 | if (unlikely(!params_new)) { | 349 | if (unlikely(!params_new)) { |
350 | tcf_idr_release(*a, bind); | ||
351 | NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); | 350 | NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); |
352 | return -ENOMEM; | 351 | ret = -ENOMEM; |
352 | exists = true; | ||
353 | goto release_tun_meta; | ||
353 | } | 354 | } |
354 | params_new->tcft_action = parm->t_action; | 355 | params_new->tcft_action = parm->t_action; |
355 | params_new->tcft_enc_metadata = metadata; | 356 | params_new->tcft_enc_metadata = metadata; |
@@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
367 | 368 | ||
368 | return ret; | 369 | return ret; |
369 | 370 | ||
371 | release_tun_meta: | ||
372 | dst_release(&metadata->dst); | ||
373 | |||
370 | err_out: | 374 | err_out: |
371 | if (exists) | 375 | if (exists) |
372 | tcf_idr_release(*a, bind); | 376 | tcf_idr_release(*a, bind); |
@@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, | |||
408 | nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, | 412 | nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, |
409 | opt->type) || | 413 | opt->type) || |
410 | nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, | 414 | nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, |
411 | opt->length * 4, opt + 1)) | 415 | opt->length * 4, opt + 1)) { |
416 | nla_nest_cancel(skb, start); | ||
412 | return -EMSGSIZE; | 417 | return -EMSGSIZE; |
418 | } | ||
413 | 419 | ||
414 | len -= sizeof(struct geneve_opt) + opt->length * 4; | 420 | len -= sizeof(struct geneve_opt) + opt->length * 4; |
415 | src += sizeof(struct geneve_opt) + opt->length * 4; | 421 | src += sizeof(struct geneve_opt) + opt->length * 4; |
@@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb, | |||
423 | const struct ip_tunnel_info *info) | 429 | const struct ip_tunnel_info *info) |
424 | { | 430 | { |
425 | struct nlattr *start; | 431 | struct nlattr *start; |
426 | int err; | 432 | int err = -EINVAL; |
427 | 433 | ||
428 | if (!info->options_len) | 434 | if (!info->options_len) |
429 | return 0; | 435 | return 0; |
@@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb, | |||
435 | if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { | 441 | if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { |
436 | err = tunnel_key_geneve_opts_dump(skb, info); | 442 | err = tunnel_key_geneve_opts_dump(skb, info); |
437 | if (err) | 443 | if (err) |
438 | return err; | 444 | goto err_out; |
439 | } else { | 445 | } else { |
440 | return -EINVAL; | 446 | err_out: |
447 | nla_nest_cancel(skb, start); | ||
448 | return err; | ||
441 | } | 449 | } |
442 | 450 | ||
443 | nla_nest_end(skb, start); | 451 | nla_nest_end(skb, start); |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 31bd1439cf60..0a75cb2e5e7b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -1252,7 +1252,7 @@ replay: | |||
1252 | } | 1252 | } |
1253 | chain = tcf_chain_get(block, chain_index, true); | 1253 | chain = tcf_chain_get(block, chain_index, true); |
1254 | if (!chain) { | 1254 | if (!chain) { |
1255 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1255 | NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); |
1256 | err = -ENOMEM; | 1256 | err = -ENOMEM; |
1257 | goto errout; | 1257 | goto errout; |
1258 | } | 1258 | } |
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | |||
1399 | goto errout; | 1399 | goto errout; |
1400 | } | 1400 | } |
1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
1402 | err = -EINVAL; | 1402 | err = -ENOENT; |
1403 | goto errout; | 1403 | goto errout; |
1404 | } | 1404 | } |
1405 | 1405 | ||
@@ -1902,6 +1902,8 @@ replay: | |||
1902 | RTM_NEWCHAIN, false); | 1902 | RTM_NEWCHAIN, false); |
1903 | break; | 1903 | break; |
1904 | case RTM_DELCHAIN: | 1904 | case RTM_DELCHAIN: |
1905 | tfilter_notify_chain(net, skb, block, q, parent, n, | ||
1906 | chain, RTM_DELTFILTER); | ||
1905 | /* Flush the chain first as the user requested chain removal. */ | 1907 | /* Flush the chain first as the user requested chain removal. */ |
1906 | tcf_chain_flush(chain); | 1908 | tcf_chain_flush(chain); |
1907 | /* In case the chain was successfully deleted, put a reference | 1909 | /* In case the chain was successfully deleted, put a reference |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index ef5c9a82d4e8..a644292f9faf 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = { | |||
215 | struct sctp_ht_iter { | 215 | struct sctp_ht_iter { |
216 | struct seq_net_private p; | 216 | struct seq_net_private p; |
217 | struct rhashtable_iter hti; | 217 | struct rhashtable_iter hti; |
218 | int start_fail; | ||
219 | }; | 218 | }; |
220 | 219 | ||
221 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | 220 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | |||
224 | 223 | ||
225 | sctp_transport_walk_start(&iter->hti); | 224 | sctp_transport_walk_start(&iter->hti); |
226 | 225 | ||
227 | iter->start_fail = 0; | ||
228 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); | 226 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); |
229 | } | 227 | } |
230 | 228 | ||
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) | |||
232 | { | 230 | { |
233 | struct sctp_ht_iter *iter = seq->private; | 231 | struct sctp_ht_iter *iter = seq->private; |
234 | 232 | ||
235 | if (iter->start_fail) | ||
236 | return; | ||
237 | sctp_transport_walk_stop(&iter->hti); | 233 | sctp_transport_walk_stop(&iter->hti); |
238 | } | 234 | } |
239 | 235 | ||
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
264 | } | 260 | } |
265 | 261 | ||
266 | transport = (struct sctp_transport *)v; | 262 | transport = (struct sctp_transport *)v; |
267 | if (!sctp_transport_hold(transport)) | ||
268 | return 0; | ||
269 | assoc = transport->asoc; | 263 | assoc = transport->asoc; |
270 | epb = &assoc->base; | 264 | epb = &assoc->base; |
271 | sk = epb->sk; | 265 | sk = epb->sk; |
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
322 | } | 316 | } |
323 | 317 | ||
324 | transport = (struct sctp_transport *)v; | 318 | transport = (struct sctp_transport *)v; |
325 | if (!sctp_transport_hold(transport)) | ||
326 | return 0; | ||
327 | assoc = transport->asoc; | 319 | assoc = transport->asoc; |
328 | 320 | ||
329 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, | 321 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e96b15a66aba..f73e9d38d5ba 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2658 | } | 2658 | } |
2659 | 2659 | ||
2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { | 2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { |
2661 | if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { | 2661 | if (trans) { |
2662 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2662 | if (trans->ipaddr.sa.sa_family == AF_INET6) { |
2663 | SCTP_FLOWLABEL_VAL_MASK; | ||
2664 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
2665 | } else if (asoc) { | ||
2666 | list_for_each_entry(trans, | ||
2667 | &asoc->peer.transport_addr_list, | ||
2668 | transports) { | ||
2669 | if (trans->ipaddr.sa.sa_family != AF_INET6) | ||
2670 | continue; | ||
2671 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2663 | trans->flowlabel = params->spp_ipv6_flowlabel & |
2672 | SCTP_FLOWLABEL_VAL_MASK; | 2664 | SCTP_FLOWLABEL_VAL_MASK; |
2673 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2665 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
2674 | } | 2666 | } |
2667 | } else if (asoc) { | ||
2668 | struct sctp_transport *t; | ||
2669 | |||
2670 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
2671 | transports) { | ||
2672 | if (t->ipaddr.sa.sa_family != AF_INET6) | ||
2673 | continue; | ||
2674 | t->flowlabel = params->spp_ipv6_flowlabel & | ||
2675 | SCTP_FLOWLABEL_VAL_MASK; | ||
2676 | t->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
2677 | } | ||
2675 | asoc->flowlabel = params->spp_ipv6_flowlabel & | 2678 | asoc->flowlabel = params->spp_ipv6_flowlabel & |
2676 | SCTP_FLOWLABEL_VAL_MASK; | 2679 | SCTP_FLOWLABEL_VAL_MASK; |
2677 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2680 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2687 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2690 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
2688 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2691 | trans->dscp |= SCTP_DSCP_SET_MASK; |
2689 | } else if (asoc) { | 2692 | } else if (asoc) { |
2690 | list_for_each_entry(trans, | 2693 | struct sctp_transport *t; |
2691 | &asoc->peer.transport_addr_list, | 2694 | |
2695 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
2692 | transports) { | 2696 | transports) { |
2693 | trans->dscp = params->spp_dscp & | 2697 | t->dscp = params->spp_dscp & |
2694 | SCTP_DSCP_VAL_MASK; | 2698 | SCTP_DSCP_VAL_MASK; |
2695 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2699 | t->dscp |= SCTP_DSCP_SET_MASK; |
2696 | } | 2700 | } |
2697 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2701 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
2698 | asoc->dscp |= SCTP_DSCP_SET_MASK; | 2702 | asoc->dscp |= SCTP_DSCP_SET_MASK; |
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, | |||
5005 | break; | 5009 | break; |
5006 | } | 5010 | } |
5007 | 5011 | ||
5012 | if (!sctp_transport_hold(t)) | ||
5013 | continue; | ||
5014 | |||
5008 | if (net_eq(sock_net(t->asoc->base.sk), net) && | 5015 | if (net_eq(sock_net(t->asoc->base.sk), net) && |
5009 | t->asoc->peer.primary_path == t) | 5016 | t->asoc->peer.primary_path == t) |
5010 | break; | 5017 | break; |
5018 | |||
5019 | sctp_transport_put(t); | ||
5011 | } | 5020 | } |
5012 | 5021 | ||
5013 | return t; | 5022 | return t; |
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net, | |||
5017 | struct rhashtable_iter *iter, | 5026 | struct rhashtable_iter *iter, |
5018 | int pos) | 5027 | int pos) |
5019 | { | 5028 | { |
5020 | void *obj = SEQ_START_TOKEN; | 5029 | struct sctp_transport *t; |
5021 | 5030 | ||
5022 | while (pos && (obj = sctp_transport_get_next(net, iter)) && | 5031 | if (!pos) |
5023 | !IS_ERR(obj)) | 5032 | return SEQ_START_TOKEN; |
5024 | pos--; | ||
5025 | 5033 | ||
5026 | return obj; | 5034 | while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) { |
5035 | if (!--pos) | ||
5036 | break; | ||
5037 | sctp_transport_put(t); | ||
5038 | } | ||
5039 | |||
5040 | return t; | ||
5027 | } | 5041 | } |
5028 | 5042 | ||
5029 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), | 5043 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), |
@@ -5082,8 +5096,6 @@ again: | |||
5082 | 5096 | ||
5083 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); | 5097 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); |
5084 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { | 5098 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { |
5085 | if (!sctp_transport_hold(tsp)) | ||
5086 | continue; | ||
5087 | ret = cb(tsp, p); | 5099 | ret = cb(tsp, p); |
5088 | if (ret) | 5100 | if (ret) |
5089 | break; | 5101 | break; |
diff --git a/net/socket.c b/net/socket.c index e6945e318f02..01f3f8f32d6f 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
941 | EXPORT_SYMBOL(dlci_ioctl_set); | 941 | EXPORT_SYMBOL(dlci_ioctl_set); |
942 | 942 | ||
943 | static long sock_do_ioctl(struct net *net, struct socket *sock, | 943 | static long sock_do_ioctl(struct net *net, struct socket *sock, |
944 | unsigned int cmd, unsigned long arg) | 944 | unsigned int cmd, unsigned long arg, |
945 | unsigned int ifreq_size) | ||
945 | { | 946 | { |
946 | int err; | 947 | int err; |
947 | void __user *argp = (void __user *)arg; | 948 | void __user *argp = (void __user *)arg; |
@@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, | |||
967 | } else { | 968 | } else { |
968 | struct ifreq ifr; | 969 | struct ifreq ifr; |
969 | bool need_copyout; | 970 | bool need_copyout; |
970 | if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) | 971 | if (copy_from_user(&ifr, argp, ifreq_size)) |
971 | return -EFAULT; | 972 | return -EFAULT; |
972 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); | 973 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); |
973 | if (!err && need_copyout) | 974 | if (!err && need_copyout) |
974 | if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) | 975 | if (copy_to_user(argp, &ifr, ifreq_size)) |
975 | return -EFAULT; | 976 | return -EFAULT; |
976 | } | 977 | } |
977 | return err; | 978 | return err; |
@@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
1070 | err = open_related_ns(&net->ns, get_net_ns); | 1071 | err = open_related_ns(&net->ns, get_net_ns); |
1071 | break; | 1072 | break; |
1072 | default: | 1073 | default: |
1073 | err = sock_do_ioctl(net, sock, cmd, arg); | 1074 | err = sock_do_ioctl(net, sock, cmd, arg, |
1075 | sizeof(struct ifreq)); | ||
1074 | break; | 1076 | break; |
1075 | } | 1077 | } |
1076 | return err; | 1078 | return err; |
@@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock, | |||
2750 | int err; | 2752 | int err; |
2751 | 2753 | ||
2752 | set_fs(KERNEL_DS); | 2754 | set_fs(KERNEL_DS); |
2753 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); | 2755 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, |
2756 | sizeof(struct compat_ifreq)); | ||
2754 | set_fs(old_fs); | 2757 | set_fs(old_fs); |
2755 | if (!err) | 2758 | if (!err) |
2756 | err = compat_put_timeval(&ktv, up); | 2759 | err = compat_put_timeval(&ktv, up); |
@@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock, | |||
2766 | int err; | 2769 | int err; |
2767 | 2770 | ||
2768 | set_fs(KERNEL_DS); | 2771 | set_fs(KERNEL_DS); |
2769 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); | 2772 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, |
2773 | sizeof(struct compat_ifreq)); | ||
2770 | set_fs(old_fs); | 2774 | set_fs(old_fs); |
2771 | if (!err) | 2775 | if (!err) |
2772 | err = compat_put_timespec(&kts, up); | 2776 | err = compat_put_timespec(&kts, up); |
@@ -3072,7 +3076,8 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
3072 | } | 3076 | } |
3073 | 3077 | ||
3074 | set_fs(KERNEL_DS); | 3078 | set_fs(KERNEL_DS); |
3075 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); | 3079 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, |
3080 | sizeof(struct compat_ifreq)); | ||
3076 | set_fs(old_fs); | 3081 | set_fs(old_fs); |
3077 | 3082 | ||
3078 | out: | 3083 | out: |
@@ -3185,7 +3190,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, | |||
3185 | case SIOCBONDSETHWADDR: | 3190 | case SIOCBONDSETHWADDR: |
3186 | case SIOCBONDCHANGEACTIVE: | 3191 | case SIOCBONDCHANGEACTIVE: |
3187 | case SIOCGIFNAME: | 3192 | case SIOCGIFNAME: |
3188 | return sock_do_ioctl(net, sock, cmd, arg); | 3193 | return sock_do_ioctl(net, sock, cmd, arg, |
3194 | sizeof(struct compat_ifreq)); | ||
3189 | } | 3195 | } |
3190 | 3196 | ||
3191 | return -ENOIOCTLCMD; | 3197 | return -ENOIOCTLCMD; |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 9ee6cfea56dd..d8026543bf4c 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link"; | |||
51 | * struct tipc_bc_base - base structure for keeping broadcast send state | 51 | * struct tipc_bc_base - base structure for keeping broadcast send state |
52 | * @link: broadcast send link structure | 52 | * @link: broadcast send link structure |
53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages | 53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages |
54 | * @dest: array keeping number of reachable destinations per bearer | 54 | * @dests: array keeping number of reachable destinations per bearer |
55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any | 55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any |
56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast | 56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast |
57 | * @rcast_support: indicates if all peer nodes support replicast | 57 | * @rcast_support: indicates if all peer nodes support replicast |
58 | * @rc_ratio: dest count as percentage of cluster size where send method changes | 58 | * @rc_ratio: dest count as percentage of cluster size where send method changes |
59 | * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast | 59 | * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast |
60 | */ | 60 | */ |
61 | struct tipc_bc_base { | 61 | struct tipc_bc_base { |
62 | struct tipc_link *link; | 62 | struct tipc_link *link; |
diff --git a/net/tipc/diag.c b/net/tipc/diag.c index aaabb0b776dd..73137f4aeb68 100644 --- a/net/tipc/diag.c +++ b/net/tipc/diag.c | |||
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb, | |||
84 | 84 | ||
85 | if (h->nlmsg_flags & NLM_F_DUMP) { | 85 | if (h->nlmsg_flags & NLM_F_DUMP) { |
86 | struct netlink_dump_control c = { | 86 | struct netlink_dump_control c = { |
87 | .start = tipc_dump_start, | ||
87 | .dump = tipc_diag_dump, | 88 | .dump = tipc_diag_dump, |
89 | .done = tipc_dump_done, | ||
88 | }; | 90 | }; |
89 | netlink_dump_start(net->diag_nlsk, skb, h, &c); | 91 | netlink_dump_start(net->diag_nlsk, skb, h, &c); |
90 | return 0; | 92 | return 0; |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 88f027b502f6..66d5b2c5987a 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
980 | 980 | ||
981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) | 981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) |
982 | { | 982 | { |
983 | u64 value = (u64)node << 32 | port; | ||
984 | struct tipc_dest *dst; | 983 | struct tipc_dest *dst; |
985 | 984 | ||
986 | list_for_each_entry(dst, l, list) { | 985 | list_for_each_entry(dst, l, list) { |
987 | if (dst->value != value) | 986 | if (dst->node == node && dst->port == port) |
988 | continue; | 987 | return dst; |
989 | return dst; | ||
990 | } | 988 | } |
991 | return NULL; | 989 | return NULL; |
992 | } | 990 | } |
993 | 991 | ||
994 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | 992 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) |
995 | { | 993 | { |
996 | u64 value = (u64)node << 32 | port; | ||
997 | struct tipc_dest *dst; | 994 | struct tipc_dest *dst; |
998 | 995 | ||
999 | if (tipc_dest_find(l, node, port)) | 996 | if (tipc_dest_find(l, node, port)) |
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | |||
1002 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); | 999 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); |
1003 | if (unlikely(!dst)) | 1000 | if (unlikely(!dst)) |
1004 | return false; | 1001 | return false; |
1005 | dst->value = value; | 1002 | dst->node = node; |
1003 | dst->port = port; | ||
1006 | list_add(&dst->list, l); | 1004 | list_add(&dst->list, l); |
1007 | return true; | 1005 | return true; |
1008 | } | 1006 | } |
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 0febba41da86..892bd750b85f 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h | |||
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net); | |||
133 | 133 | ||
134 | struct tipc_dest { | 134 | struct tipc_dest { |
135 | struct list_head list; | 135 | struct list_head list; |
136 | union { | 136 | u32 port; |
137 | struct { | 137 | u32 node; |
138 | u32 port; | ||
139 | u32 node; | ||
140 | }; | ||
141 | u64 value; | ||
142 | }; | ||
143 | }; | 138 | }; |
144 | 139 | ||
145 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); | 140 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 6ff2254088f6..99ee419210ba 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = { | |||
167 | }, | 167 | }, |
168 | { | 168 | { |
169 | .cmd = TIPC_NL_SOCK_GET, | 169 | .cmd = TIPC_NL_SOCK_GET, |
170 | .start = tipc_dump_start, | ||
170 | .dumpit = tipc_nl_sk_dump, | 171 | .dumpit = tipc_nl_sk_dump, |
172 | .done = tipc_dump_done, | ||
171 | .policy = tipc_nl_policy, | 173 | .policy = tipc_nl_policy, |
172 | }, | 174 | }, |
173 | { | 175 | { |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index a2f76743c73a..6376467e78f8 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
@@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | |||
185 | return -ENOMEM; | 185 | return -ENOMEM; |
186 | 186 | ||
187 | buf->sk = msg->dst_sk; | 187 | buf->sk = msg->dst_sk; |
188 | if (__tipc_dump_start(&cb, msg->net)) { | ||
189 | kfree_skb(buf); | ||
190 | return -ENOMEM; | ||
191 | } | ||
188 | 192 | ||
189 | do { | 193 | do { |
190 | int rem; | 194 | int rem; |
@@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | |||
216 | err = 0; | 220 | err = 0; |
217 | 221 | ||
218 | err_out: | 222 | err_out: |
223 | tipc_dump_done(&cb); | ||
219 | kfree_skb(buf); | 224 | kfree_skb(buf); |
220 | 225 | ||
221 | if (err == -EMSGSIZE) { | 226 | if (err == -EMSGSIZE) { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c1e93c9515bc..3f03ddd0e35b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock) | |||
576 | sk_stop_timer(sk, &sk->sk_timer); | 576 | sk_stop_timer(sk, &sk->sk_timer); |
577 | tipc_sk_remove(tsk); | 577 | tipc_sk_remove(tsk); |
578 | 578 | ||
579 | sock_orphan(sk); | ||
579 | /* Reject any messages that accumulated in backlog queue */ | 580 | /* Reject any messages that accumulated in backlog queue */ |
580 | release_sock(sk); | 581 | release_sock(sk); |
581 | tipc_dest_list_purge(&tsk->cong_links); | 582 | tipc_dest_list_purge(&tsk->cong_links); |
@@ -2672,6 +2673,8 @@ void tipc_sk_reinit(struct net *net) | |||
2672 | 2673 | ||
2673 | rhashtable_walk_stop(&iter); | 2674 | rhashtable_walk_stop(&iter); |
2674 | } while (tsk == ERR_PTR(-EAGAIN)); | 2675 | } while (tsk == ERR_PTR(-EAGAIN)); |
2676 | |||
2677 | rhashtable_walk_exit(&iter); | ||
2675 | } | 2678 | } |
2676 | 2679 | ||
2677 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) | 2680 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) |
@@ -3227,45 +3230,74 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
3227 | struct netlink_callback *cb, | 3230 | struct netlink_callback *cb, |
3228 | struct tipc_sock *tsk)) | 3231 | struct tipc_sock *tsk)) |
3229 | { | 3232 | { |
3230 | struct net *net = sock_net(skb->sk); | 3233 | struct rhashtable_iter *iter = (void *)cb->args[4]; |
3231 | struct tipc_net *tn = tipc_net(net); | ||
3232 | const struct bucket_table *tbl; | ||
3233 | u32 prev_portid = cb->args[1]; | ||
3234 | u32 tbl_id = cb->args[0]; | ||
3235 | struct rhash_head *pos; | ||
3236 | struct tipc_sock *tsk; | 3234 | struct tipc_sock *tsk; |
3237 | int err; | 3235 | int err; |
3238 | 3236 | ||
3239 | rcu_read_lock(); | 3237 | rhashtable_walk_start(iter); |
3240 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); | 3238 | while ((tsk = rhashtable_walk_next(iter)) != NULL) { |
3241 | for (; tbl_id < tbl->size; tbl_id++) { | 3239 | if (IS_ERR(tsk)) { |
3242 | rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { | 3240 | err = PTR_ERR(tsk); |
3243 | spin_lock_bh(&tsk->sk.sk_lock.slock); | 3241 | if (err == -EAGAIN) { |
3244 | if (prev_portid && prev_portid != tsk->portid) { | 3242 | err = 0; |
3245 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
3246 | continue; | 3243 | continue; |
3247 | } | 3244 | } |
3245 | break; | ||
3246 | } | ||
3248 | 3247 | ||
3249 | err = skb_handler(skb, cb, tsk); | 3248 | sock_hold(&tsk->sk); |
3250 | if (err) { | 3249 | rhashtable_walk_stop(iter); |
3251 | prev_portid = tsk->portid; | 3250 | lock_sock(&tsk->sk); |
3252 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 3251 | err = skb_handler(skb, cb, tsk); |
3253 | goto out; | 3252 | if (err) { |
3254 | } | 3253 | release_sock(&tsk->sk); |
3255 | 3254 | sock_put(&tsk->sk); | |
3256 | prev_portid = 0; | 3255 | goto out; |
3257 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
3258 | } | 3256 | } |
3257 | release_sock(&tsk->sk); | ||
3258 | rhashtable_walk_start(iter); | ||
3259 | sock_put(&tsk->sk); | ||
3259 | } | 3260 | } |
3261 | rhashtable_walk_stop(iter); | ||
3260 | out: | 3262 | out: |
3261 | rcu_read_unlock(); | ||
3262 | cb->args[0] = tbl_id; | ||
3263 | cb->args[1] = prev_portid; | ||
3264 | |||
3265 | return skb->len; | 3263 | return skb->len; |
3266 | } | 3264 | } |
3267 | EXPORT_SYMBOL(tipc_nl_sk_walk); | 3265 | EXPORT_SYMBOL(tipc_nl_sk_walk); |
3268 | 3266 | ||
3267 | int tipc_dump_start(struct netlink_callback *cb) | ||
3268 | { | ||
3269 | return __tipc_dump_start(cb, sock_net(cb->skb->sk)); | ||
3270 | } | ||
3271 | EXPORT_SYMBOL(tipc_dump_start); | ||
3272 | |||
3273 | int __tipc_dump_start(struct netlink_callback *cb, struct net *net) | ||
3274 | { | ||
3275 | /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ | ||
3276 | struct rhashtable_iter *iter = (void *)cb->args[4]; | ||
3277 | struct tipc_net *tn = tipc_net(net); | ||
3278 | |||
3279 | if (!iter) { | ||
3280 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | ||
3281 | if (!iter) | ||
3282 | return -ENOMEM; | ||
3283 | |||
3284 | cb->args[4] = (long)iter; | ||
3285 | } | ||
3286 | |||
3287 | rhashtable_walk_enter(&tn->sk_rht, iter); | ||
3288 | return 0; | ||
3289 | } | ||
3290 | |||
3291 | int tipc_dump_done(struct netlink_callback *cb) | ||
3292 | { | ||
3293 | struct rhashtable_iter *hti = (void *)cb->args[4]; | ||
3294 | |||
3295 | rhashtable_walk_exit(hti); | ||
3296 | kfree(hti); | ||
3297 | return 0; | ||
3298 | } | ||
3299 | EXPORT_SYMBOL(tipc_dump_done); | ||
3300 | |||
3269 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, | 3301 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, |
3270 | struct tipc_sock *tsk, u32 sk_filter_state, | 3302 | struct tipc_sock *tsk, u32 sk_filter_state, |
3271 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) | 3303 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) |
diff --git a/net/tipc/socket.h b/net/tipc/socket.h index aff9b2ae5a1f..5e575f205afe 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h | |||
@@ -68,4 +68,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
68 | int (*skb_handler)(struct sk_buff *skb, | 68 | int (*skb_handler)(struct sk_buff *skb, |
69 | struct netlink_callback *cb, | 69 | struct netlink_callback *cb, |
70 | struct tipc_sock *tsk)); | 70 | struct tipc_sock *tsk)); |
71 | int tipc_dump_start(struct netlink_callback *cb); | ||
72 | int __tipc_dump_start(struct netlink_callback *cb, struct net *net); | ||
73 | int tipc_dump_done(struct netlink_callback *cb); | ||
71 | #endif | 74 | #endif |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index c8e34ef22c30..2627b5d812e9 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c | |||
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work) | |||
313 | conn_put(con); | 313 | conn_put(con); |
314 | } | 314 | } |
315 | 315 | ||
316 | /* tipc_conn_queue_evt() - interrupt level call from a subscription instance | 316 | /* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance |
317 | * The queued work is launched into tipc_send_work()->tipc_send_to_sock() | 317 | * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock() |
318 | */ | 318 | */ |
319 | void tipc_topsrv_queue_evt(struct net *net, int conid, | 319 | void tipc_topsrv_queue_evt(struct net *net, int conid, |
320 | u32 event, struct tipc_event *evt) | 320 | u32 event, struct tipc_event *evt) |
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 292742e50bfa..961b07d4d41c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c | |||
@@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | |||
686 | goto free_marker_record; | 686 | goto free_marker_record; |
687 | } | 687 | } |
688 | 688 | ||
689 | crypto_info = &ctx->crypto_send; | 689 | crypto_info = &ctx->crypto_send.info; |
690 | switch (crypto_info->cipher_type) { | 690 | switch (crypto_info->cipher_type) { |
691 | case TLS_CIPHER_AES_GCM_128: | 691 | case TLS_CIPHER_AES_GCM_128: |
692 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | 692 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; |
@@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | |||
780 | 780 | ||
781 | ctx->priv_ctx_tx = offload_ctx; | 781 | ctx->priv_ctx_tx = offload_ctx; |
782 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, | 782 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, |
783 | &ctx->crypto_send, | 783 | &ctx->crypto_send.info, |
784 | tcp_sk(sk)->write_seq); | 784 | tcp_sk(sk)->write_seq); |
785 | if (rc) | 785 | if (rc) |
786 | goto release_netdev; | 786 | goto release_netdev; |
@@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) | |||
862 | goto release_ctx; | 862 | goto release_ctx; |
863 | 863 | ||
864 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, | 864 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, |
865 | &ctx->crypto_recv, | 865 | &ctx->crypto_recv.info, |
866 | tcp_sk(sk)->copied_seq); | 866 | tcp_sk(sk)->copied_seq); |
867 | if (rc) { | 867 | if (rc) { |
868 | pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", | 868 | pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", |
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 6102169239d1..450a6dbc5a88 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c | |||
@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, | |||
320 | goto free_req; | 320 | goto free_req; |
321 | 321 | ||
322 | iv = buf; | 322 | iv = buf; |
323 | memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt, | 323 | memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt, |
324 | TLS_CIPHER_AES_GCM_128_SALT_SIZE); | 324 | TLS_CIPHER_AES_GCM_128_SALT_SIZE); |
325 | aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + | 325 | aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + |
326 | TLS_CIPHER_AES_GCM_128_IV_SIZE; | 326 | TLS_CIPHER_AES_GCM_128_IV_SIZE; |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 180b6640e531..523622dc74f8 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -241,6 +241,16 @@ static void tls_write_space(struct sock *sk) | |||
241 | ctx->sk_write_space(sk); | 241 | ctx->sk_write_space(sk); |
242 | } | 242 | } |
243 | 243 | ||
244 | static void tls_ctx_free(struct tls_context *ctx) | ||
245 | { | ||
246 | if (!ctx) | ||
247 | return; | ||
248 | |||
249 | memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); | ||
250 | memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); | ||
251 | kfree(ctx); | ||
252 | } | ||
253 | |||
244 | static void tls_sk_proto_close(struct sock *sk, long timeout) | 254 | static void tls_sk_proto_close(struct sock *sk, long timeout) |
245 | { | 255 | { |
246 | struct tls_context *ctx = tls_get_ctx(sk); | 256 | struct tls_context *ctx = tls_get_ctx(sk); |
@@ -294,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) | |||
294 | #else | 304 | #else |
295 | { | 305 | { |
296 | #endif | 306 | #endif |
297 | kfree(ctx); | 307 | tls_ctx_free(ctx); |
298 | ctx = NULL; | 308 | ctx = NULL; |
299 | } | 309 | } |
300 | 310 | ||
@@ -305,7 +315,7 @@ skip_tx_cleanup: | |||
305 | * for sk->sk_prot->unhash [tls_hw_unhash] | 315 | * for sk->sk_prot->unhash [tls_hw_unhash] |
306 | */ | 316 | */ |
307 | if (free_ctx) | 317 | if (free_ctx) |
308 | kfree(ctx); | 318 | tls_ctx_free(ctx); |
309 | } | 319 | } |
310 | 320 | ||
311 | static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, | 321 | static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, |
@@ -330,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, | |||
330 | } | 340 | } |
331 | 341 | ||
332 | /* get user crypto info */ | 342 | /* get user crypto info */ |
333 | crypto_info = &ctx->crypto_send; | 343 | crypto_info = &ctx->crypto_send.info; |
334 | 344 | ||
335 | if (!TLS_CRYPTO_INFO_READY(crypto_info)) { | 345 | if (!TLS_CRYPTO_INFO_READY(crypto_info)) { |
336 | rc = -EBUSY; | 346 | rc = -EBUSY; |
@@ -417,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, | |||
417 | } | 427 | } |
418 | 428 | ||
419 | if (tx) | 429 | if (tx) |
420 | crypto_info = &ctx->crypto_send; | 430 | crypto_info = &ctx->crypto_send.info; |
421 | else | 431 | else |
422 | crypto_info = &ctx->crypto_recv; | 432 | crypto_info = &ctx->crypto_recv.info; |
423 | 433 | ||
424 | /* Currently we don't support set crypto info more than one time */ | 434 | /* Currently we don't support set crypto info more than one time */ |
425 | if (TLS_CRYPTO_INFO_READY(crypto_info)) { | 435 | if (TLS_CRYPTO_INFO_READY(crypto_info)) { |
@@ -499,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, | |||
499 | goto out; | 509 | goto out; |
500 | 510 | ||
501 | err_crypto_info: | 511 | err_crypto_info: |
502 | memset(crypto_info, 0, sizeof(*crypto_info)); | 512 | memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); |
503 | out: | 513 | out: |
504 | return rc; | 514 | return rc; |
505 | } | 515 | } |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 52fbe727d7c1..b9c6ecfbcfea 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
@@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len) | |||
125 | &ctx->sg_encrypted_num_elem, | 125 | &ctx->sg_encrypted_num_elem, |
126 | &ctx->sg_encrypted_size, 0); | 126 | &ctx->sg_encrypted_size, 0); |
127 | 127 | ||
128 | if (rc == -ENOSPC) | ||
129 | ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data); | ||
130 | |||
128 | return rc; | 131 | return rc; |
129 | } | 132 | } |
130 | 133 | ||
@@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len) | |||
138 | &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, | 141 | &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, |
139 | tls_ctx->pending_open_record_frags); | 142 | tls_ctx->pending_open_record_frags); |
140 | 143 | ||
144 | if (rc == -ENOSPC) | ||
145 | ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data); | ||
146 | |||
141 | return rc; | 147 | return rc; |
142 | } | 148 | } |
143 | 149 | ||
@@ -925,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk, | |||
925 | if (control != TLS_RECORD_TYPE_DATA) | 931 | if (control != TLS_RECORD_TYPE_DATA) |
926 | goto recv_end; | 932 | goto recv_end; |
927 | } | 933 | } |
934 | } else { | ||
935 | /* MSG_PEEK right now cannot look beyond current skb | ||
936 | * from strparser, meaning we cannot advance skb here | ||
937 | * and thus unpause strparser since we'd loose original | ||
938 | * one. | ||
939 | */ | ||
940 | break; | ||
928 | } | 941 | } |
942 | |||
929 | /* If we have a new message from strparser, continue now. */ | 943 | /* If we have a new message from strparser, continue now. */ |
930 | if (copied >= target && !ctx->recv_pkt) | 944 | if (copied >= target && !ctx->recv_pkt) |
931 | break; | 945 | break; |
@@ -1049,8 +1063,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) | |||
1049 | goto read_failure; | 1063 | goto read_failure; |
1050 | } | 1064 | } |
1051 | 1065 | ||
1052 | if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) || | 1066 | if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || |
1053 | header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) { | 1067 | header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { |
1054 | ret = -EINVAL; | 1068 | ret = -EINVAL; |
1055 | goto read_failure; | 1069 | goto read_failure; |
1056 | } | 1070 | } |
@@ -1130,7 +1144,6 @@ void tls_sw_free_resources_rx(struct sock *sk) | |||
1130 | 1144 | ||
1131 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | 1145 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) |
1132 | { | 1146 | { |
1133 | char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; | ||
1134 | struct tls_crypto_info *crypto_info; | 1147 | struct tls_crypto_info *crypto_info; |
1135 | struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; | 1148 | struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; |
1136 | struct tls_sw_context_tx *sw_ctx_tx = NULL; | 1149 | struct tls_sw_context_tx *sw_ctx_tx = NULL; |
@@ -1175,12 +1188,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | |||
1175 | 1188 | ||
1176 | if (tx) { | 1189 | if (tx) { |
1177 | crypto_init_wait(&sw_ctx_tx->async_wait); | 1190 | crypto_init_wait(&sw_ctx_tx->async_wait); |
1178 | crypto_info = &ctx->crypto_send; | 1191 | crypto_info = &ctx->crypto_send.info; |
1179 | cctx = &ctx->tx; | 1192 | cctx = &ctx->tx; |
1180 | aead = &sw_ctx_tx->aead_send; | 1193 | aead = &sw_ctx_tx->aead_send; |
1181 | } else { | 1194 | } else { |
1182 | crypto_init_wait(&sw_ctx_rx->async_wait); | 1195 | crypto_init_wait(&sw_ctx_rx->async_wait); |
1183 | crypto_info = &ctx->crypto_recv; | 1196 | crypto_info = &ctx->crypto_recv.info; |
1184 | cctx = &ctx->rx; | 1197 | cctx = &ctx->rx; |
1185 | aead = &sw_ctx_rx->aead_recv; | 1198 | aead = &sw_ctx_rx->aead_recv; |
1186 | } | 1199 | } |
@@ -1259,9 +1272,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | |||
1259 | 1272 | ||
1260 | ctx->push_pending_record = tls_sw_push_pending_record; | 1273 | ctx->push_pending_record = tls_sw_push_pending_record; |
1261 | 1274 | ||
1262 | memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); | 1275 | rc = crypto_aead_setkey(*aead, gcm_128_info->key, |
1263 | |||
1264 | rc = crypto_aead_setkey(*aead, keyval, | ||
1265 | TLS_CIPHER_AES_GCM_128_KEY_SIZE); | 1276 | TLS_CIPHER_AES_GCM_128_KEY_SIZE); |
1266 | if (rc) | 1277 | if (rc) |
1267 | goto free_aead; | 1278 | goto free_aead; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5fb9b7dd9831..4b8ec659e797 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, | |||
669 | goto nla_put_failure; | 669 | goto nla_put_failure; |
670 | 670 | ||
671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, | 671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, |
672 | rule->wmm_rule->client[j].cw_min) || | 672 | rule->wmm_rule.client[j].cw_min) || |
673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, | 673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, |
674 | rule->wmm_rule->client[j].cw_max) || | 674 | rule->wmm_rule.client[j].cw_max) || |
675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, | 675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, |
676 | rule->wmm_rule->client[j].aifsn) || | 676 | rule->wmm_rule.client[j].aifsn) || |
677 | nla_put_u8(msg, NL80211_WMMR_TXOP, | 677 | nla_put_u16(msg, NL80211_WMMR_TXOP, |
678 | rule->wmm_rule->client[j].cot)) | 678 | rule->wmm_rule.client[j].cot)) |
679 | goto nla_put_failure; | 679 | goto nla_put_failure; |
680 | 680 | ||
681 | nla_nest_end(msg, nl_wmm_rule); | 681 | nla_nest_end(msg, nl_wmm_rule); |
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, | |||
766 | 766 | ||
767 | if (large) { | 767 | if (large) { |
768 | const struct ieee80211_reg_rule *rule = | 768 | const struct ieee80211_reg_rule *rule = |
769 | freq_reg_info(wiphy, chan->center_freq); | 769 | freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); |
770 | 770 | ||
771 | if (!IS_ERR(rule) && rule->wmm_rule) { | 771 | if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { |
772 | if (nl80211_msg_put_wmm_rules(msg, rule)) | 772 | if (nl80211_msg_put_wmm_rules(msg, rule)) |
773 | goto nla_put_failure; | 773 | goto nla_put_failure; |
774 | } | 774 | } |
@@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) | |||
12205 | return -EOPNOTSUPP; | 12205 | return -EOPNOTSUPP; |
12206 | 12206 | ||
12207 | if (!info->attrs[NL80211_ATTR_MDID] || | 12207 | if (!info->attrs[NL80211_ATTR_MDID] || |
12208 | !info->attrs[NL80211_ATTR_IE] || | ||
12208 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 12209 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
12209 | return -EINVAL; | 12210 | return -EINVAL; |
12210 | 12211 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 4fc66a117b7d..2f702adf2912 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain * | |||
425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) | 425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) |
426 | { | 426 | { |
427 | struct ieee80211_regdomain *regd; | 427 | struct ieee80211_regdomain *regd; |
428 | int size_of_regd, size_of_wmms; | 428 | int size_of_regd; |
429 | unsigned int i; | 429 | unsigned int i; |
430 | struct ieee80211_wmm_rule *d_wmm, *s_wmm; | ||
431 | 430 | ||
432 | size_of_regd = | 431 | size_of_regd = |
433 | sizeof(struct ieee80211_regdomain) + | 432 | sizeof(struct ieee80211_regdomain) + |
434 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); | 433 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); |
435 | size_of_wmms = src_regd->n_wmm_rules * | ||
436 | sizeof(struct ieee80211_wmm_rule); | ||
437 | 434 | ||
438 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | 435 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
439 | if (!regd) | 436 | if (!regd) |
440 | return ERR_PTR(-ENOMEM); | 437 | return ERR_PTR(-ENOMEM); |
441 | 438 | ||
442 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); | 439 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); |
443 | 440 | ||
444 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | 441 | for (i = 0; i < src_regd->n_reg_rules; i++) |
445 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd); | ||
446 | memcpy(d_wmm, s_wmm, size_of_wmms); | ||
447 | |||
448 | for (i = 0; i < src_regd->n_reg_rules; i++) { | ||
449 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], | 442 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], |
450 | sizeof(struct ieee80211_reg_rule)); | 443 | sizeof(struct ieee80211_reg_rule)); |
451 | if (!src_regd->reg_rules[i].wmm_rule) | ||
452 | continue; | ||
453 | 444 | ||
454 | regd->reg_rules[i].wmm_rule = d_wmm + | ||
455 | (src_regd->reg_rules[i].wmm_rule - s_wmm) / | ||
456 | sizeof(struct ieee80211_wmm_rule); | ||
457 | } | ||
458 | return regd; | 445 | return regd; |
459 | } | 446 | } |
460 | 447 | ||
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size) | |||
860 | return true; | 847 | return true; |
861 | } | 848 | } |
862 | 849 | ||
863 | static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | 850 | static void set_wmm_rule(struct ieee80211_reg_rule *rrule, |
864 | struct fwdb_wmm_rule *wmm) | 851 | struct fwdb_wmm_rule *wmm) |
865 | { | 852 | { |
853 | struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; | ||
866 | unsigned int i; | 854 | unsigned int i; |
867 | 855 | ||
868 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | 856 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | |||
876 | rule->ap[i].aifsn = wmm->ap[i].aifsn; | 864 | rule->ap[i].aifsn = wmm->ap[i].aifsn; |
877 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); | 865 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); |
878 | } | 866 | } |
867 | |||
868 | rrule->has_wmm = true; | ||
879 | } | 869 | } |
880 | 870 | ||
881 | static int __regdb_query_wmm(const struct fwdb_header *db, | 871 | static int __regdb_query_wmm(const struct fwdb_header *db, |
882 | const struct fwdb_country *country, int freq, | 872 | const struct fwdb_country *country, int freq, |
883 | u32 *dbptr, struct ieee80211_wmm_rule *rule) | 873 | struct ieee80211_reg_rule *rule) |
884 | { | 874 | { |
885 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 875 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
886 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 876 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
901 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; | 891 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; |
902 | wmm = (void *)((u8 *)db + wmm_ptr); | 892 | wmm = (void *)((u8 *)db + wmm_ptr); |
903 | set_wmm_rule(rule, wmm); | 893 | set_wmm_rule(rule, wmm); |
904 | if (dbptr) | ||
905 | *dbptr = wmm_ptr; | ||
906 | return 0; | 894 | return 0; |
907 | } | 895 | } |
908 | } | 896 | } |
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
910 | return -ENODATA; | 898 | return -ENODATA; |
911 | } | 899 | } |
912 | 900 | ||
913 | int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | 901 | int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) |
914 | struct ieee80211_wmm_rule *rule) | ||
915 | { | 902 | { |
916 | const struct fwdb_header *hdr = regdb; | 903 | const struct fwdb_header *hdr = regdb; |
917 | const struct fwdb_country *country; | 904 | const struct fwdb_country *country; |
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
925 | country = &hdr->country[0]; | 912 | country = &hdr->country[0]; |
926 | while (country->coll_ptr) { | 913 | while (country->coll_ptr) { |
927 | if (alpha2_equal(alpha2, country->alpha2)) | 914 | if (alpha2_equal(alpha2, country->alpha2)) |
928 | return __regdb_query_wmm(regdb, country, freq, dbptr, | 915 | return __regdb_query_wmm(regdb, country, freq, rule); |
929 | rule); | ||
930 | 916 | ||
931 | country++; | 917 | country++; |
932 | } | 918 | } |
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
935 | } | 921 | } |
936 | EXPORT_SYMBOL(reg_query_regdb_wmm); | 922 | EXPORT_SYMBOL(reg_query_regdb_wmm); |
937 | 923 | ||
938 | struct wmm_ptrs { | ||
939 | struct ieee80211_wmm_rule *rule; | ||
940 | u32 ptr; | ||
941 | }; | ||
942 | |||
943 | static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs, | ||
944 | u32 wmm_ptr, int n_wmms) | ||
945 | { | ||
946 | int i; | ||
947 | |||
948 | for (i = 0; i < n_wmms; i++) { | ||
949 | if (wmm_ptrs[i].ptr == wmm_ptr) | ||
950 | return wmm_ptrs[i].rule; | ||
951 | } | ||
952 | return NULL; | ||
953 | } | ||
954 | |||
955 | static int regdb_query_country(const struct fwdb_header *db, | 924 | static int regdb_query_country(const struct fwdb_header *db, |
956 | const struct fwdb_country *country) | 925 | const struct fwdb_country *country) |
957 | { | 926 | { |
958 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 927 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
959 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 928 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
960 | struct ieee80211_regdomain *regdom; | 929 | struct ieee80211_regdomain *regdom; |
961 | struct ieee80211_regdomain *tmp_rd; | 930 | unsigned int size_of_regd, i; |
962 | unsigned int size_of_regd, i, n_wmms = 0; | ||
963 | struct wmm_ptrs *wmm_ptrs; | ||
964 | 931 | ||
965 | size_of_regd = sizeof(struct ieee80211_regdomain) + | 932 | size_of_regd = sizeof(struct ieee80211_regdomain) + |
966 | coll->n_rules * sizeof(struct ieee80211_reg_rule); | 933 | coll->n_rules * sizeof(struct ieee80211_reg_rule); |
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
969 | if (!regdom) | 936 | if (!regdom) |
970 | return -ENOMEM; | 937 | return -ENOMEM; |
971 | 938 | ||
972 | wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL); | ||
973 | if (!wmm_ptrs) { | ||
974 | kfree(regdom); | ||
975 | return -ENOMEM; | ||
976 | } | ||
977 | |||
978 | regdom->n_reg_rules = coll->n_rules; | 939 | regdom->n_reg_rules = coll->n_rules; |
979 | regdom->alpha2[0] = country->alpha2[0]; | 940 | regdom->alpha2[0] = country->alpha2[0]; |
980 | regdom->alpha2[1] = country->alpha2[1]; | 941 | regdom->alpha2[1] = country->alpha2[1]; |
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
1013 | 1000 * be16_to_cpu(rule->cac_timeout); | 974 | 1000 * be16_to_cpu(rule->cac_timeout); |
1014 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { | 975 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { |
1015 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; | 976 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; |
1016 | struct ieee80211_wmm_rule *wmm_pos = | 977 | struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); |
1017 | find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms); | ||
1018 | struct fwdb_wmm_rule *wmm; | ||
1019 | struct ieee80211_wmm_rule *wmm_rule; | ||
1020 | |||
1021 | if (wmm_pos) { | ||
1022 | rrule->wmm_rule = wmm_pos; | ||
1023 | continue; | ||
1024 | } | ||
1025 | wmm = (void *)((u8 *)db + wmm_ptr); | ||
1026 | tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) * | ||
1027 | sizeof(struct ieee80211_wmm_rule), | ||
1028 | GFP_KERNEL); | ||
1029 | |||
1030 | if (!tmp_rd) { | ||
1031 | kfree(regdom); | ||
1032 | kfree(wmm_ptrs); | ||
1033 | return -ENOMEM; | ||
1034 | } | ||
1035 | regdom = tmp_rd; | ||
1036 | |||
1037 | wmm_rule = (struct ieee80211_wmm_rule *) | ||
1038 | ((u8 *)regdom + size_of_regd + n_wmms * | ||
1039 | sizeof(struct ieee80211_wmm_rule)); | ||
1040 | 978 | ||
1041 | set_wmm_rule(wmm_rule, wmm); | 979 | set_wmm_rule(rrule, wmm); |
1042 | wmm_ptrs[n_wmms].ptr = wmm_ptr; | ||
1043 | wmm_ptrs[n_wmms++].rule = wmm_rule; | ||
1044 | } | 980 | } |
1045 | } | 981 | } |
1046 | kfree(wmm_ptrs); | ||
1047 | 982 | ||
1048 | return reg_schedule_apply(regdom); | 983 | return reg_schedule_apply(regdom); |
1049 | } | 984 | } |
diff --git a/net/wireless/util.c b/net/wireless/util.c index e0825a019e9f..959ed3acd240 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, | |||
1456 | u8 *op_class) | 1456 | u8 *op_class) |
1457 | { | 1457 | { |
1458 | u8 vht_opclass; | 1458 | u8 vht_opclass; |
1459 | u16 freq = chandef->center_freq1; | 1459 | u32 freq = chandef->center_freq1; |
1460 | 1460 | ||
1461 | if (freq >= 2412 && freq <= 2472) { | 1461 | if (freq >= 2412 && freq <= 2472) { |
1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) | 1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) |