diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /net | |
parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'net')
105 files changed, 964 insertions, 526 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index ae3a47f9d1d5..fbd0acf80b13 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -805,6 +805,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, | |||
805 | struct sock *sk; | 805 | struct sock *sk; |
806 | ax25_cb *ax25; | 806 | ax25_cb *ax25; |
807 | 807 | ||
808 | if (protocol < 0 || protocol > SK_PROTOCOL_MAX) | ||
809 | return -EINVAL; | ||
810 | |||
808 | if (!net_eq(net, &init_net)) | 811 | if (!net_eq(net, &init_net)) |
809 | return -EAFNOSUPPORT; | 812 | return -EAFNOSUPPORT; |
810 | 813 | ||
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 83bc1aaf5800..a49c705fb86b 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -566,6 +566,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | |||
566 | int select; | 566 | int select; |
567 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; | 567 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; |
568 | struct batadv_dat_candidate *res; | 568 | struct batadv_dat_candidate *res; |
569 | struct batadv_dat_entry dat; | ||
569 | 570 | ||
570 | if (!bat_priv->orig_hash) | 571 | if (!bat_priv->orig_hash) |
571 | return NULL; | 572 | return NULL; |
@@ -575,7 +576,9 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | |||
575 | if (!res) | 576 | if (!res) |
576 | return NULL; | 577 | return NULL; |
577 | 578 | ||
578 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, | 579 | dat.ip = ip_dst; |
580 | dat.vid = 0; | ||
581 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat, | ||
579 | BATADV_DAT_ADDR_MAX); | 582 | BATADV_DAT_ADDR_MAX); |
580 | 583 | ||
581 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 584 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 8d990b070a2e..3207667e69de 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -836,6 +836,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
836 | u8 *orig_addr; | 836 | u8 *orig_addr; |
837 | struct batadv_orig_node *orig_node = NULL; | 837 | struct batadv_orig_node *orig_node = NULL; |
838 | int check, hdr_size = sizeof(*unicast_packet); | 838 | int check, hdr_size = sizeof(*unicast_packet); |
839 | enum batadv_subtype subtype; | ||
839 | bool is4addr; | 840 | bool is4addr; |
840 | 841 | ||
841 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 842 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
@@ -863,10 +864,20 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
863 | /* packet for me */ | 864 | /* packet for me */ |
864 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { | 865 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
865 | if (is4addr) { | 866 | if (is4addr) { |
866 | batadv_dat_inc_counter(bat_priv, | 867 | subtype = unicast_4addr_packet->subtype; |
867 | unicast_4addr_packet->subtype); | 868 | batadv_dat_inc_counter(bat_priv, subtype); |
868 | orig_addr = unicast_4addr_packet->src; | 869 | |
869 | orig_node = batadv_orig_hash_find(bat_priv, orig_addr); | 870 | /* Only payload data should be considered for speedy |
871 | * join. For example, DAT also uses unicast 4addr | ||
872 | * types, but those packets should not be considered | ||
873 | * for speedy join, since the clients do not actually | ||
874 | * reside at the sending originator. | ||
875 | */ | ||
876 | if (subtype == BATADV_P_DATA) { | ||
877 | orig_addr = unicast_4addr_packet->src; | ||
878 | orig_node = batadv_orig_hash_find(bat_priv, | ||
879 | orig_addr); | ||
880 | } | ||
870 | } | 881 | } |
871 | 882 | ||
872 | if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, | 883 | if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 4228b10c47ea..76f19ba62462 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -68,13 +68,15 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, | |||
68 | unsigned short vid, const char *message, | 68 | unsigned short vid, const char *message, |
69 | bool roaming); | 69 | bool roaming); |
70 | 70 | ||
71 | /* returns 1 if they are the same mac addr */ | 71 | /* returns 1 if they are the same mac addr and vid */ |
72 | static int batadv_compare_tt(const struct hlist_node *node, const void *data2) | 72 | static int batadv_compare_tt(const struct hlist_node *node, const void *data2) |
73 | { | 73 | { |
74 | const void *data1 = container_of(node, struct batadv_tt_common_entry, | 74 | const void *data1 = container_of(node, struct batadv_tt_common_entry, |
75 | hash_entry); | 75 | hash_entry); |
76 | const struct batadv_tt_common_entry *tt1 = data1; | ||
77 | const struct batadv_tt_common_entry *tt2 = data2; | ||
76 | 78 | ||
77 | return batadv_compare_eth(data1, data2); | 79 | return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2); |
78 | } | 80 | } |
79 | 81 | ||
80 | /** | 82 | /** |
@@ -1427,9 +1429,15 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
1427 | } | 1429 | } |
1428 | 1430 | ||
1429 | /* if the client was temporary added before receiving the first | 1431 | /* if the client was temporary added before receiving the first |
1430 | * OGM announcing it, we have to clear the TEMP flag | 1432 | * OGM announcing it, we have to clear the TEMP flag. Also, |
1433 | * remove the previous temporary orig node and re-add it | ||
1434 | * if required. If the orig entry changed, the new one which | ||
1435 | * is a non-temporary entry is preferred. | ||
1431 | */ | 1436 | */ |
1432 | common->flags &= ~BATADV_TT_CLIENT_TEMP; | 1437 | if (common->flags & BATADV_TT_CLIENT_TEMP) { |
1438 | batadv_tt_global_del_orig_list(tt_global_entry); | ||
1439 | common->flags &= ~BATADV_TT_CLIENT_TEMP; | ||
1440 | } | ||
1433 | 1441 | ||
1434 | /* the change can carry possible "attribute" flags like the | 1442 | /* the change can carry possible "attribute" flags like the |
1435 | * TT_CLIENT_WIFI, therefore they have to be copied in the | 1443 | * TT_CLIENT_WIFI, therefore they have to be copied in the |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index a3bffd1ec2b4..70306cc9d814 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -271,11 +271,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo) | |||
271 | if (signal_pending(current) || !timeo) | 271 | if (signal_pending(current) || !timeo) |
272 | break; | 272 | break; |
273 | 273 | ||
274 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 274 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
275 | release_sock(sk); | 275 | release_sock(sk); |
276 | timeo = schedule_timeout(timeo); | 276 | timeo = schedule_timeout(timeo); |
277 | lock_sock(sk); | 277 | lock_sock(sk); |
278 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 278 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
279 | } | 279 | } |
280 | 280 | ||
281 | __set_current_state(TASK_RUNNING); | 281 | __set_current_state(TASK_RUNNING); |
@@ -441,7 +441,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, | |||
441 | if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) | 441 | if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) |
442 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 442 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
443 | else | 443 | else |
444 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 444 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
445 | 445 | ||
446 | return mask; | 446 | return mask; |
447 | } | 447 | } |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index fe129663bd3f..f52bcbf2e58c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -526,6 +526,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, | |||
526 | if (!addr || addr->sa_family != AF_BLUETOOTH) | 526 | if (!addr || addr->sa_family != AF_BLUETOOTH) |
527 | return -EINVAL; | 527 | return -EINVAL; |
528 | 528 | ||
529 | if (addr_len < sizeof(struct sockaddr_sco)) | ||
530 | return -EINVAL; | ||
531 | |||
529 | lock_sock(sk); | 532 | lock_sock(sk); |
530 | 533 | ||
531 | if (sk->sk_state != BT_OPEN) { | 534 | if (sk->sk_state != BT_OPEN) { |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index c91353841e40..ffed8a1d4f27 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan) | |||
3027 | 3027 | ||
3028 | BT_DBG("chan %p", chan); | 3028 | BT_DBG("chan %p", chan); |
3029 | 3029 | ||
3030 | /* No need to call l2cap_chan_hold() here since we already own | ||
3031 | * the reference taken in smp_new_conn_cb(). This is just the | ||
3032 | * first time that we tie it to a specific pointer. The code in | ||
3033 | * l2cap_core.c ensures that there's no risk this function wont | ||
3034 | * get called if smp_new_conn_cb was previously called. | ||
3035 | */ | ||
3030 | conn->smp = chan; | 3036 | conn->smp = chan; |
3031 | l2cap_chan_hold(chan); | ||
3032 | 3037 | ||
3033 | if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) | 3038 | if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) |
3034 | bredr_pairing(chan); | 3039 | bredr_pairing(chan); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index cc858919108e..aa209b1066c9 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) | |||
323 | !timeo) | 323 | !timeo) |
324 | break; | 324 | break; |
325 | 325 | ||
326 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 326 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
327 | release_sock(sk); | 327 | release_sock(sk); |
328 | timeo = schedule_timeout(timeo); | 328 | timeo = schedule_timeout(timeo); |
329 | lock_sock(sk); | 329 | lock_sock(sk); |
@@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) | |||
331 | if (sock_flag(sk, SOCK_DEAD)) | 331 | if (sock_flag(sk, SOCK_DEAD)) |
332 | break; | 332 | break; |
333 | 333 | ||
334 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 334 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
335 | } | 335 | } |
336 | 336 | ||
337 | finish_wait(sk_sleep(sk), &wait); | 337 | finish_wait(sk_sleep(sk), &wait); |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 617088aee21d..d62af69ad844 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -785,7 +785,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, | |||
785 | if (sock_writeable(sk)) | 785 | if (sock_writeable(sk)) |
786 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 786 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
787 | else | 787 | else |
788 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 788 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
789 | 789 | ||
790 | return mask; | 790 | return mask; |
791 | } | 791 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index e6af42da28d9..f18ae91b652e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, | |||
2215 | ndm->ndm_pad2 = 0; | 2215 | ndm->ndm_pad2 = 0; |
2216 | ndm->ndm_flags = pn->flags | NTF_PROXY; | 2216 | ndm->ndm_flags = pn->flags | NTF_PROXY; |
2217 | ndm->ndm_type = RTN_UNICAST; | 2217 | ndm->ndm_type = RTN_UNICAST; |
2218 | ndm->ndm_ifindex = pn->dev->ifindex; | 2218 | ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; |
2219 | ndm->ndm_state = NUD_NONE; | 2219 | ndm->ndm_state = NUD_NONE; |
2220 | 2220 | ||
2221 | if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) | 2221 | if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) |
@@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
2333 | if (h > s_h) | 2333 | if (h > s_h) |
2334 | s_idx = 0; | 2334 | s_idx = 0; |
2335 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { | 2335 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { |
2336 | if (dev_net(n->dev) != net) | 2336 | if (pneigh_net(n) != net) |
2337 | continue; | 2337 | continue; |
2338 | if (idx < s_idx) | 2338 | if (idx < s_idx) |
2339 | goto next; | 2339 | goto next; |
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 6441f47b1a8f..d9ee8d08a3a6 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c | |||
@@ -56,7 +56,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css) | |||
56 | kfree(css_cls_state(css)); | 56 | kfree(css_cls_state(css)); |
57 | } | 57 | } |
58 | 58 | ||
59 | static int update_classid(const void *v, struct file *file, unsigned n) | 59 | static int update_classid_sock(const void *v, struct file *file, unsigned n) |
60 | { | 60 | { |
61 | int err; | 61 | int err; |
62 | struct socket *sock = sock_from_file(file, &err); | 62 | struct socket *sock = sock_from_file(file, &err); |
@@ -67,18 +67,27 @@ static int update_classid(const void *v, struct file *file, unsigned n) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static void cgrp_attach(struct cgroup_subsys_state *css, | 70 | static void update_classid(struct cgroup_subsys_state *css, void *v) |
71 | struct cgroup_taskset *tset) | ||
72 | { | 71 | { |
73 | struct cgroup_cls_state *cs = css_cls_state(css); | 72 | struct css_task_iter it; |
74 | void *v = (void *)(unsigned long)cs->classid; | ||
75 | struct task_struct *p; | 73 | struct task_struct *p; |
76 | 74 | ||
77 | cgroup_taskset_for_each(p, tset) { | 75 | css_task_iter_start(css, &it); |
76 | while ((p = css_task_iter_next(&it))) { | ||
78 | task_lock(p); | 77 | task_lock(p); |
79 | iterate_fd(p->files, 0, update_classid, v); | 78 | iterate_fd(p->files, 0, update_classid_sock, v); |
80 | task_unlock(p); | 79 | task_unlock(p); |
81 | } | 80 | } |
81 | css_task_iter_end(&it); | ||
82 | } | ||
83 | |||
84 | static void cgrp_attach(struct cgroup_taskset *tset) | ||
85 | { | ||
86 | struct cgroup_subsys_state *css; | ||
87 | |||
88 | cgroup_taskset_first(tset, &css); | ||
89 | update_classid(css, | ||
90 | (void *)(unsigned long)css_cls_state(css)->classid); | ||
82 | } | 91 | } |
83 | 92 | ||
84 | static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) | 93 | static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) |
@@ -89,8 +98,11 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) | |||
89 | static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, | 98 | static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, |
90 | u64 value) | 99 | u64 value) |
91 | { | 100 | { |
92 | css_cls_state(css)->classid = (u32) value; | 101 | struct cgroup_cls_state *cs = css_cls_state(css); |
102 | |||
103 | cs->classid = (u32)value; | ||
93 | 104 | ||
105 | update_classid(css, (void *)(unsigned long)cs->classid); | ||
94 | return 0; | 106 | return 0; |
95 | } | 107 | } |
96 | 108 | ||
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index cbd0a199bf52..40fd09fe06ae 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c | |||
@@ -218,13 +218,14 @@ static int update_netprio(const void *v, struct file *file, unsigned n) | |||
218 | return 0; | 218 | return 0; |
219 | } | 219 | } |
220 | 220 | ||
221 | static void net_prio_attach(struct cgroup_subsys_state *css, | 221 | static void net_prio_attach(struct cgroup_taskset *tset) |
222 | struct cgroup_taskset *tset) | ||
223 | { | 222 | { |
224 | struct task_struct *p; | 223 | struct task_struct *p; |
225 | void *v = (void *)(unsigned long)css->cgroup->id; | 224 | struct cgroup_subsys_state *css; |
225 | |||
226 | cgroup_taskset_for_each(p, css, tset) { | ||
227 | void *v = (void *)(unsigned long)css->cgroup->id; | ||
226 | 228 | ||
227 | cgroup_taskset_for_each(p, tset) { | ||
228 | task_lock(p); | 229 | task_lock(p); |
229 | iterate_fd(p->files, 0, update_netprio, v); | 230 | iterate_fd(p->files, 0, update_netprio, v); |
230 | task_unlock(p); | 231 | task_unlock(p); |
diff --git a/net/core/scm.c b/net/core/scm.c index 3b6899b7d810..8a1741b14302 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) | |||
305 | err = put_user(cmlen, &cm->cmsg_len); | 305 | err = put_user(cmlen, &cm->cmsg_len); |
306 | if (!err) { | 306 | if (!err) { |
307 | cmlen = CMSG_SPACE(i*sizeof(int)); | 307 | cmlen = CMSG_SPACE(i*sizeof(int)); |
308 | if (msg->msg_controllen < cmlen) | ||
309 | cmlen = msg->msg_controllen; | ||
308 | msg->msg_control += cmlen; | 310 | msg->msg_control += cmlen; |
309 | msg->msg_controllen -= cmlen; | 311 | msg->msg_controllen -= cmlen; |
310 | } | 312 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 152b9c70e252..b2df375ec9c2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3643,7 +3643,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, | |||
3643 | serr->ee.ee_info = tstype; | 3643 | serr->ee.ee_info = tstype; |
3644 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { | 3644 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { |
3645 | serr->ee.ee_data = skb_shinfo(skb)->tskey; | 3645 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
3646 | if (sk->sk_protocol == IPPROTO_TCP) | 3646 | if (sk->sk_protocol == IPPROTO_TCP && |
3647 | sk->sk_type == SOCK_STREAM) | ||
3647 | serr->ee.ee_data -= sk->sk_tskey; | 3648 | serr->ee.ee_data -= sk->sk_tskey; |
3648 | } | 3649 | } |
3649 | 3650 | ||
@@ -4268,7 +4269,7 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | |||
4268 | return NULL; | 4269 | return NULL; |
4269 | } | 4270 | } |
4270 | 4271 | ||
4271 | memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len, | 4272 | memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, |
4272 | 2 * ETH_ALEN); | 4273 | 2 * ETH_ALEN); |
4273 | skb->mac_header += VLAN_HLEN; | 4274 | skb->mac_header += VLAN_HLEN; |
4274 | return skb; | 4275 | return skb; |
diff --git a/net/core/sock.c b/net/core/sock.c index 1e4dd54bfb5a..0d91f7dca751 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -433,8 +433,6 @@ static bool sock_needs_netstamp(const struct sock *sk) | |||
433 | } | 433 | } |
434 | } | 434 | } |
435 | 435 | ||
436 | #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) | ||
437 | |||
438 | static void sock_disable_timestamp(struct sock *sk, unsigned long flags) | 436 | static void sock_disable_timestamp(struct sock *sk, unsigned long flags) |
439 | { | 437 | { |
440 | if (sk->sk_flags & flags) { | 438 | if (sk->sk_flags & flags) { |
@@ -874,7 +872,8 @@ set_rcvbuf: | |||
874 | 872 | ||
875 | if (val & SOF_TIMESTAMPING_OPT_ID && | 873 | if (val & SOF_TIMESTAMPING_OPT_ID && |
876 | !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { | 874 | !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { |
877 | if (sk->sk_protocol == IPPROTO_TCP) { | 875 | if (sk->sk_protocol == IPPROTO_TCP && |
876 | sk->sk_type == SOCK_STREAM) { | ||
878 | if (sk->sk_state != TCP_ESTABLISHED) { | 877 | if (sk->sk_state != TCP_ESTABLISHED) { |
879 | ret = -EINVAL; | 878 | ret = -EINVAL; |
880 | break; | 879 | break; |
@@ -1530,7 +1529,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1530 | skb_queue_head_init(&newsk->sk_receive_queue); | 1529 | skb_queue_head_init(&newsk->sk_receive_queue); |
1531 | skb_queue_head_init(&newsk->sk_write_queue); | 1530 | skb_queue_head_init(&newsk->sk_write_queue); |
1532 | 1531 | ||
1533 | spin_lock_init(&newsk->sk_dst_lock); | ||
1534 | rwlock_init(&newsk->sk_callback_lock); | 1532 | rwlock_init(&newsk->sk_callback_lock); |
1535 | lockdep_set_class_and_name(&newsk->sk_callback_lock, | 1533 | lockdep_set_class_and_name(&newsk->sk_callback_lock, |
1536 | af_callback_keys + newsk->sk_family, | 1534 | af_callback_keys + newsk->sk_family, |
@@ -1553,7 +1551,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1553 | */ | 1551 | */ |
1554 | is_charged = sk_filter_charge(newsk, filter); | 1552 | is_charged = sk_filter_charge(newsk, filter); |
1555 | 1553 | ||
1556 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) { | 1554 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { |
1557 | /* It is still raw copy of parent, so invalidate | 1555 | /* It is still raw copy of parent, so invalidate |
1558 | * destructor and make plain sk_free() */ | 1556 | * destructor and make plain sk_free() */ |
1559 | newsk->sk_destruct = NULL; | 1557 | newsk->sk_destruct = NULL; |
@@ -1607,7 +1605,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | |||
1607 | { | 1605 | { |
1608 | u32 max_segs = 1; | 1606 | u32 max_segs = 1; |
1609 | 1607 | ||
1610 | __sk_dst_set(sk, dst); | 1608 | sk_dst_set(sk, dst); |
1611 | sk->sk_route_caps = dst->dev->features; | 1609 | sk->sk_route_caps = dst->dev->features; |
1612 | if (sk->sk_route_caps & NETIF_F_GSO) | 1610 | if (sk->sk_route_caps & NETIF_F_GSO) |
1613 | sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; | 1611 | sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; |
@@ -1815,7 +1813,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1815 | { | 1813 | { |
1816 | DEFINE_WAIT(wait); | 1814 | DEFINE_WAIT(wait); |
1817 | 1815 | ||
1818 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1816 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1819 | for (;;) { | 1817 | for (;;) { |
1820 | if (!timeo) | 1818 | if (!timeo) |
1821 | break; | 1819 | break; |
@@ -1861,7 +1859,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, | |||
1861 | if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) | 1859 | if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) |
1862 | break; | 1860 | break; |
1863 | 1861 | ||
1864 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1862 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1865 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1863 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1866 | err = -EAGAIN; | 1864 | err = -EAGAIN; |
1867 | if (!timeo) | 1865 | if (!timeo) |
@@ -2048,9 +2046,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) | |||
2048 | DEFINE_WAIT(wait); | 2046 | DEFINE_WAIT(wait); |
2049 | 2047 | ||
2050 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 2048 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2051 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2049 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2052 | rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); | 2050 | rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); |
2053 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2051 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2054 | finish_wait(sk_sleep(sk), &wait); | 2052 | finish_wait(sk_sleep(sk), &wait); |
2055 | return rc; | 2053 | return rc; |
2056 | } | 2054 | } |
@@ -2388,7 +2386,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
2388 | } else | 2386 | } else |
2389 | sk->sk_wq = NULL; | 2387 | sk->sk_wq = NULL; |
2390 | 2388 | ||
2391 | spin_lock_init(&sk->sk_dst_lock); | ||
2392 | rwlock_init(&sk->sk_callback_lock); | 2389 | rwlock_init(&sk->sk_callback_lock); |
2393 | lockdep_set_class_and_name(&sk->sk_callback_lock, | 2390 | lockdep_set_class_and_name(&sk->sk_callback_lock, |
2394 | af_callback_keys + sk->sk_family, | 2391 | af_callback_keys + sk->sk_family, |
diff --git a/net/core/stream.c b/net/core/stream.c index d70f77a0c889..b96f7a79e544 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
@@ -39,7 +39,7 @@ void sk_stream_write_space(struct sock *sk) | |||
39 | wake_up_interruptible_poll(&wq->wait, POLLOUT | | 39 | wake_up_interruptible_poll(&wq->wait, POLLOUT | |
40 | POLLWRNORM | POLLWRBAND); | 40 | POLLWRNORM | POLLWRBAND); |
41 | if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) | 41 | if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) |
42 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); | 42 | sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); |
43 | rcu_read_unlock(); | 43 | rcu_read_unlock(); |
44 | } | 44 | } |
45 | } | 45 | } |
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
126 | current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; | 126 | current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; |
127 | 127 | ||
128 | while (1) { | 128 | while (1) { |
129 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 129 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
130 | 130 | ||
131 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 131 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
132 | 132 | ||
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
139 | } | 139 | } |
140 | if (signal_pending(current)) | 140 | if (signal_pending(current)) |
141 | goto do_interrupted; | 141 | goto do_interrupted; |
142 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 142 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
143 | if (sk_stream_memory_free(sk) && !vm_wait) | 143 | if (sk_stream_memory_free(sk) && !vm_wait) |
144 | break; | 144 | break; |
145 | 145 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index db5fc2440a23..9c6d0508e63a 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req | |||
202 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); | 202 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); |
203 | 203 | ||
204 | 204 | ||
205 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 205 | rcu_read_lock(); |
206 | final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); | ||
207 | rcu_read_unlock(); | ||
206 | 208 | ||
207 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 209 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
208 | if (IS_ERR(dst)) { | 210 | if (IS_ERR(dst)) { |
@@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req | |||
219 | &ireq->ir_v6_loc_addr, | 221 | &ireq->ir_v6_loc_addr, |
220 | &ireq->ir_v6_rmt_addr); | 222 | &ireq->ir_v6_rmt_addr); |
221 | fl6.daddr = ireq->ir_v6_rmt_addr; | 223 | fl6.daddr = ireq->ir_v6_rmt_addr; |
222 | err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); | 224 | rcu_read_lock(); |
225 | err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), | ||
226 | np->tclass); | ||
227 | rcu_read_unlock(); | ||
223 | err = net_xmit_eval(err); | 228 | err = net_xmit_eval(err); |
224 | } | 229 | } |
225 | 230 | ||
@@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
387 | struct inet_request_sock *ireq = inet_rsk(req); | 392 | struct inet_request_sock *ireq = inet_rsk(req); |
388 | struct ipv6_pinfo *newnp; | 393 | struct ipv6_pinfo *newnp; |
389 | const struct ipv6_pinfo *np = inet6_sk(sk); | 394 | const struct ipv6_pinfo *np = inet6_sk(sk); |
395 | struct ipv6_txoptions *opt; | ||
390 | struct inet_sock *newinet; | 396 | struct inet_sock *newinet; |
391 | struct dccp6_sock *newdp6; | 397 | struct dccp6_sock *newdp6; |
392 | struct sock *newsk; | 398 | struct sock *newsk; |
@@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
453 | * comment in that function for the gory details. -acme | 459 | * comment in that function for the gory details. -acme |
454 | */ | 460 | */ |
455 | 461 | ||
456 | __ip6_dst_store(newsk, dst, NULL, NULL); | 462 | ip6_dst_store(newsk, dst, NULL, NULL); |
457 | newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | | 463 | newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | |
458 | NETIF_F_TSO); | 464 | NETIF_F_TSO); |
459 | newdp6 = (struct dccp6_sock *)newsk; | 465 | newdp6 = (struct dccp6_sock *)newsk; |
@@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
488 | * Yes, keeping reference count would be much more clever, but we make | 494 | * Yes, keeping reference count would be much more clever, but we make |
489 | * one more one thing there: reattach optmem to newsk. | 495 | * one more one thing there: reattach optmem to newsk. |
490 | */ | 496 | */ |
491 | if (np->opt != NULL) | 497 | opt = rcu_dereference(np->opt); |
492 | newnp->opt = ipv6_dup_options(newsk, np->opt); | 498 | if (opt) { |
493 | 499 | opt = ipv6_dup_options(newsk, opt); | |
500 | RCU_INIT_POINTER(newnp->opt, opt); | ||
501 | } | ||
494 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | 502 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
495 | if (newnp->opt != NULL) | 503 | if (opt) |
496 | inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + | 504 | inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + |
497 | newnp->opt->opt_flen); | 505 | opt->opt_flen; |
498 | 506 | ||
499 | dccp_sync_mss(newsk, dst_mtu(dst)); | 507 | dccp_sync_mss(newsk, dst_mtu(dst)); |
500 | 508 | ||
@@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
757 | struct ipv6_pinfo *np = inet6_sk(sk); | 765 | struct ipv6_pinfo *np = inet6_sk(sk); |
758 | struct dccp_sock *dp = dccp_sk(sk); | 766 | struct dccp_sock *dp = dccp_sk(sk); |
759 | struct in6_addr *saddr = NULL, *final_p, final; | 767 | struct in6_addr *saddr = NULL, *final_p, final; |
768 | struct ipv6_txoptions *opt; | ||
760 | struct flowi6 fl6; | 769 | struct flowi6 fl6; |
761 | struct dst_entry *dst; | 770 | struct dst_entry *dst; |
762 | int addr_type; | 771 | int addr_type; |
@@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
856 | fl6.fl6_sport = inet->inet_sport; | 865 | fl6.fl6_sport = inet->inet_sport; |
857 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 866 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
858 | 867 | ||
859 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 868 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
869 | final_p = fl6_update_dst(&fl6, opt, &final); | ||
860 | 870 | ||
861 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 871 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
862 | if (IS_ERR(dst)) { | 872 | if (IS_ERR(dst)) { |
@@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
873 | np->saddr = *saddr; | 883 | np->saddr = *saddr; |
874 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | 884 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
875 | 885 | ||
876 | __ip6_dst_store(sk, dst, NULL, NULL); | 886 | ip6_dst_store(sk, dst, NULL, NULL); |
877 | 887 | ||
878 | icsk->icsk_ext_hdr_len = 0; | 888 | icsk->icsk_ext_hdr_len = 0; |
879 | if (np->opt != NULL) | 889 | if (opt) |
880 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | 890 | icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; |
881 | np->opt->opt_nflen); | ||
882 | 891 | ||
883 | inet->inet_dport = usin->sin6_port; | 892 | inet->inet_dport = usin->sin6_port; |
884 | 893 | ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b5cf13a28009..41e65804ddf5 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock, | |||
339 | if (sk_stream_is_writeable(sk)) { | 339 | if (sk_stream_is_writeable(sk)) { |
340 | mask |= POLLOUT | POLLWRNORM; | 340 | mask |= POLLOUT | POLLWRNORM; |
341 | } else { /* send SIGIO later */ | 341 | } else { /* send SIGIO later */ |
342 | set_bit(SOCK_ASYNC_NOSPACE, | 342 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
343 | &sk->sk_socket->flags); | ||
344 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 343 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
345 | 344 | ||
346 | /* Race breaker. If space is freed after | 345 | /* Race breaker. If space is freed after |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 675cf94e04f8..13d6b1a6e0fc 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol, | |||
678 | { | 678 | { |
679 | struct sock *sk; | 679 | struct sock *sk; |
680 | 680 | ||
681 | if (protocol < 0 || protocol > SK_PROTOCOL_MAX) | ||
682 | return -EINVAL; | ||
683 | |||
681 | if (!net_eq(net, &init_net)) | 684 | if (!net_eq(net, &init_net)) |
682 | return -EAFNOSUPPORT; | 685 | return -EAFNOSUPPORT; |
683 | 686 | ||
@@ -1747,9 +1750,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, | |||
1747 | } | 1750 | } |
1748 | 1751 | ||
1749 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 1752 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1750 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1753 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
1751 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); | 1754 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); |
1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1755 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
1753 | finish_wait(sk_sleep(sk), &wait); | 1756 | finish_wait(sk_sleep(sk), &wait); |
1754 | } | 1757 | } |
1755 | 1758 | ||
@@ -2004,10 +2007,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
2004 | } | 2007 | } |
2005 | 2008 | ||
2006 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 2009 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2007 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2010 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2008 | sk_wait_event(sk, &timeo, | 2011 | sk_wait_event(sk, &timeo, |
2009 | !dn_queue_too_long(scp, queue, flags)); | 2012 | !dn_queue_too_long(scp, queue, flags)); |
2010 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2013 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2011 | finish_wait(sk_sleep(sk), &wait); | 2014 | finish_wait(sk_sleep(sk), &wait); |
2012 | continue; | 2015 | continue; |
2013 | } | 2016 | } |
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index 4677b6fa6dda..ecc28cff08ab 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c | |||
@@ -67,7 +67,7 @@ | |||
67 | * Returns the size of the result on success, -ve error code otherwise. | 67 | * Returns the size of the result on success, -ve error code otherwise. |
68 | */ | 68 | */ |
69 | int dns_query(const char *type, const char *name, size_t namelen, | 69 | int dns_query(const char *type, const char *name, size_t namelen, |
70 | const char *options, char **_result, time_t *_expiry) | 70 | const char *options, char **_result, time64_t *_expiry) |
71 | { | 71 | { |
72 | struct key *rkey; | 72 | struct key *rkey; |
73 | const struct user_key_payload *upayload; | 73 | const struct user_key_payload *upayload; |
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 35a9788bb3ae..c7d1adca30d8 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c | |||
@@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type) | |||
312 | return; | 312 | return; |
313 | 313 | ||
314 | out: | 314 | out: |
315 | WARN_ON_ONCE("HSR: Could not send supervision frame\n"); | 315 | WARN_ONCE(1, "HSR: Could not send supervision frame\n"); |
316 | kfree_skb(skb); | 316 | kfree_skb(skb); |
317 | } | 317 | } |
318 | 318 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 11c4ca13ec3b..5c5db6636704 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -257,6 +257,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, | |||
257 | int try_loading_module = 0; | 257 | int try_loading_module = 0; |
258 | int err; | 258 | int err; |
259 | 259 | ||
260 | if (protocol < 0 || protocol >= IPPROTO_MAX) | ||
261 | return -EINVAL; | ||
262 | |||
260 | sock->state = SS_UNCONNECTED; | 263 | sock->state = SS_UNCONNECTED; |
261 | 264 | ||
262 | /* Look for the requested type/protocol pair. */ | 265 | /* Look for the requested type/protocol pair. */ |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index cc8f3e506cde..473447593060 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -1155,6 +1155,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1155 | static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) | 1155 | static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) |
1156 | { | 1156 | { |
1157 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 1157 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
1158 | struct netdev_notifier_changeupper_info *info; | ||
1158 | struct in_device *in_dev; | 1159 | struct in_device *in_dev; |
1159 | struct net *net = dev_net(dev); | 1160 | struct net *net = dev_net(dev); |
1160 | unsigned int flags; | 1161 | unsigned int flags; |
@@ -1193,6 +1194,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
1193 | case NETDEV_CHANGEMTU: | 1194 | case NETDEV_CHANGEMTU: |
1194 | rt_cache_flush(net); | 1195 | rt_cache_flush(net); |
1195 | break; | 1196 | break; |
1197 | case NETDEV_CHANGEUPPER: | ||
1198 | info = ptr; | ||
1199 | /* flush all routes if dev is linked to or unlinked from | ||
1200 | * an L3 master device (e.g., VRF) | ||
1201 | */ | ||
1202 | if (info->upper_dev && netif_is_l3_master(info->upper_dev)) | ||
1203 | fib_disable_ip(dev, NETDEV_DOWN, true); | ||
1204 | break; | ||
1196 | } | 1205 | } |
1197 | return NOTIFY_DONE; | 1206 | return NOTIFY_DONE; |
1198 | } | 1207 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index e0fcbbbcfe54..bd903fe0f750 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -24,6 +24,7 @@ struct fou { | |||
24 | u16 type; | 24 | u16 type; |
25 | struct udp_offload udp_offloads; | 25 | struct udp_offload udp_offloads; |
26 | struct list_head list; | 26 | struct list_head list; |
27 | struct rcu_head rcu; | ||
27 | }; | 28 | }; |
28 | 29 | ||
29 | #define FOU_F_REMCSUM_NOPARTIAL BIT(0) | 30 | #define FOU_F_REMCSUM_NOPARTIAL BIT(0) |
@@ -417,7 +418,7 @@ static void fou_release(struct fou *fou) | |||
417 | list_del(&fou->list); | 418 | list_del(&fou->list); |
418 | udp_tunnel_sock_release(sock); | 419 | udp_tunnel_sock_release(sock); |
419 | 420 | ||
420 | kfree(fou); | 421 | kfree_rcu(fou, rcu); |
421 | } | 422 | } |
422 | 423 | ||
423 | static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) | 424 | static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6baf36e11808..05e4cba14162 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
2126 | ASSERT_RTNL(); | 2126 | ASSERT_RTNL(); |
2127 | 2127 | ||
2128 | in_dev = ip_mc_find_dev(net, imr); | 2128 | in_dev = ip_mc_find_dev(net, imr); |
2129 | if (!in_dev) { | 2129 | if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { |
2130 | ret = -ENODEV; | 2130 | ret = -ENODEV; |
2131 | goto out; | 2131 | goto out; |
2132 | } | 2132 | } |
@@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
2147 | 2147 | ||
2148 | *imlp = iml->next_rcu; | 2148 | *imlp = iml->next_rcu; |
2149 | 2149 | ||
2150 | ip_mc_dec_group(in_dev, group); | 2150 | if (in_dev) |
2151 | ip_mc_dec_group(in_dev, group); | ||
2151 | 2152 | ||
2152 | /* decrease mem now to avoid the memleak warning */ | 2153 | /* decrease mem now to avoid the memleak warning */ |
2153 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | 2154 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 92dd4b74d513..c3a38353f5dc 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | |||
134 | struct mfc_cache *c, struct rtmsg *rtm); | 134 | struct mfc_cache *c, struct rtmsg *rtm); |
135 | static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, | 135 | static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, |
136 | int cmd); | 136 | int cmd); |
137 | static void mroute_clean_tables(struct mr_table *mrt); | 137 | static void mroute_clean_tables(struct mr_table *mrt, bool all); |
138 | static void ipmr_expire_process(unsigned long arg); | 138 | static void ipmr_expire_process(unsigned long arg); |
139 | 139 | ||
140 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | 140 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES |
@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) | |||
350 | static void ipmr_free_table(struct mr_table *mrt) | 350 | static void ipmr_free_table(struct mr_table *mrt) |
351 | { | 351 | { |
352 | del_timer_sync(&mrt->ipmr_expire_timer); | 352 | del_timer_sync(&mrt->ipmr_expire_timer); |
353 | mroute_clean_tables(mrt); | 353 | mroute_clean_tables(mrt, true); |
354 | kfree(mrt); | 354 | kfree(mrt); |
355 | } | 355 | } |
356 | 356 | ||
@@ -441,10 +441,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) | |||
441 | return dev; | 441 | return dev; |
442 | 442 | ||
443 | failure: | 443 | failure: |
444 | /* allow the register to be completed before unregistering. */ | ||
445 | rtnl_unlock(); | ||
446 | rtnl_lock(); | ||
447 | |||
448 | unregister_netdevice(dev); | 444 | unregister_netdevice(dev); |
449 | return NULL; | 445 | return NULL; |
450 | } | 446 | } |
@@ -540,10 +536,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) | |||
540 | return dev; | 536 | return dev; |
541 | 537 | ||
542 | failure: | 538 | failure: |
543 | /* allow the register to be completed before unregistering. */ | ||
544 | rtnl_unlock(); | ||
545 | rtnl_lock(); | ||
546 | |||
547 | unregister_netdevice(dev); | 539 | unregister_netdevice(dev); |
548 | return NULL; | 540 | return NULL; |
549 | } | 541 | } |
@@ -1208,7 +1200,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, | |||
1208 | * Close the multicast socket, and clear the vif tables etc | 1200 | * Close the multicast socket, and clear the vif tables etc |
1209 | */ | 1201 | */ |
1210 | 1202 | ||
1211 | static void mroute_clean_tables(struct mr_table *mrt) | 1203 | static void mroute_clean_tables(struct mr_table *mrt, bool all) |
1212 | { | 1204 | { |
1213 | int i; | 1205 | int i; |
1214 | LIST_HEAD(list); | 1206 | LIST_HEAD(list); |
@@ -1217,8 +1209,9 @@ static void mroute_clean_tables(struct mr_table *mrt) | |||
1217 | /* Shut down all active vif entries */ | 1209 | /* Shut down all active vif entries */ |
1218 | 1210 | ||
1219 | for (i = 0; i < mrt->maxvif; i++) { | 1211 | for (i = 0; i < mrt->maxvif; i++) { |
1220 | if (!(mrt->vif_table[i].flags & VIFF_STATIC)) | 1212 | if (!all && (mrt->vif_table[i].flags & VIFF_STATIC)) |
1221 | vif_delete(mrt, i, 0, &list); | 1213 | continue; |
1214 | vif_delete(mrt, i, 0, &list); | ||
1222 | } | 1215 | } |
1223 | unregister_netdevice_many(&list); | 1216 | unregister_netdevice_many(&list); |
1224 | 1217 | ||
@@ -1226,7 +1219,7 @@ static void mroute_clean_tables(struct mr_table *mrt) | |||
1226 | 1219 | ||
1227 | for (i = 0; i < MFC_LINES; i++) { | 1220 | for (i = 0; i < MFC_LINES; i++) { |
1228 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { | 1221 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { |
1229 | if (c->mfc_flags & MFC_STATIC) | 1222 | if (!all && (c->mfc_flags & MFC_STATIC)) |
1230 | continue; | 1223 | continue; |
1231 | list_del_rcu(&c->list); | 1224 | list_del_rcu(&c->list); |
1232 | mroute_netlink_event(mrt, c, RTM_DELROUTE); | 1225 | mroute_netlink_event(mrt, c, RTM_DELROUTE); |
@@ -1261,7 +1254,7 @@ static void mrtsock_destruct(struct sock *sk) | |||
1261 | NETCONFA_IFINDEX_ALL, | 1254 | NETCONFA_IFINDEX_ALL, |
1262 | net->ipv4.devconf_all); | 1255 | net->ipv4.devconf_all); |
1263 | RCU_INIT_POINTER(mrt->mroute_sk, NULL); | 1256 | RCU_INIT_POINTER(mrt->mroute_sk, NULL); |
1264 | mroute_clean_tables(mrt); | 1257 | mroute_clean_tables(mrt, false); |
1265 | } | 1258 | } |
1266 | } | 1259 | } |
1267 | rtnl_unlock(); | 1260 | rtnl_unlock(); |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index a35584176535..c187c60e3e0c 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -60,6 +60,7 @@ config NFT_REJECT_IPV4 | |||
60 | 60 | ||
61 | config NFT_DUP_IPV4 | 61 | config NFT_DUP_IPV4 |
62 | tristate "IPv4 nf_tables packet duplication support" | 62 | tristate "IPv4 nf_tables packet duplication support" |
63 | depends on !NF_CONNTRACK || NF_CONNTRACK | ||
63 | select NF_DUP_IPV4 | 64 | select NF_DUP_IPV4 |
64 | help | 65 | help |
65 | This module enables IPv4 packet duplication support for nf_tables. | 66 | This module enables IPv4 packet duplication support for nf_tables. |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c1728771cf89..c82cca18c90f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
517 | if (sk_stream_is_writeable(sk)) { | 517 | if (sk_stream_is_writeable(sk)) { |
518 | mask |= POLLOUT | POLLWRNORM; | 518 | mask |= POLLOUT | POLLWRNORM; |
519 | } else { /* send SIGIO later */ | 519 | } else { /* send SIGIO later */ |
520 | set_bit(SOCK_ASYNC_NOSPACE, | 520 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
521 | &sk->sk_socket->flags); | ||
522 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 521 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
523 | 522 | ||
524 | /* Race breaker. If space is freed after | 523 | /* Race breaker. If space is freed after |
@@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, | |||
906 | goto out_err; | 905 | goto out_err; |
907 | } | 906 | } |
908 | 907 | ||
909 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 908 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
910 | 909 | ||
911 | mss_now = tcp_send_mss(sk, &size_goal, flags); | 910 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
912 | copied = 0; | 911 | copied = 0; |
@@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |||
1134 | } | 1133 | } |
1135 | 1134 | ||
1136 | /* This should be in poll */ | 1135 | /* This should be in poll */ |
1137 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1136 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1138 | 1137 | ||
1139 | mss_now = tcp_send_mss(sk, &size_goal, flags); | 1138 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1140 | 1139 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fdd88c3803a6..2d656eef7f8e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int | |||
4481 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | 4481 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) |
4482 | { | 4482 | { |
4483 | struct sk_buff *skb; | 4483 | struct sk_buff *skb; |
4484 | int err = -ENOMEM; | ||
4485 | int data_len = 0; | ||
4484 | bool fragstolen; | 4486 | bool fragstolen; |
4485 | 4487 | ||
4486 | if (size == 0) | 4488 | if (size == 0) |
4487 | return 0; | 4489 | return 0; |
4488 | 4490 | ||
4489 | skb = alloc_skb(size, sk->sk_allocation); | 4491 | if (size > PAGE_SIZE) { |
4492 | int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); | ||
4493 | |||
4494 | data_len = npages << PAGE_SHIFT; | ||
4495 | size = data_len + (size & ~PAGE_MASK); | ||
4496 | } | ||
4497 | skb = alloc_skb_with_frags(size - data_len, data_len, | ||
4498 | PAGE_ALLOC_COSTLY_ORDER, | ||
4499 | &err, sk->sk_allocation); | ||
4490 | if (!skb) | 4500 | if (!skb) |
4491 | goto err; | 4501 | goto err; |
4492 | 4502 | ||
4503 | skb_put(skb, size - data_len); | ||
4504 | skb->data_len = data_len; | ||
4505 | skb->len = size; | ||
4506 | |||
4493 | if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) | 4507 | if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) |
4494 | goto err_free; | 4508 | goto err_free; |
4495 | 4509 | ||
4496 | if (memcpy_from_msg(skb_put(skb, size), msg, size)) | 4510 | err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); |
4511 | if (err) | ||
4497 | goto err_free; | 4512 | goto err_free; |
4498 | 4513 | ||
4499 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; | 4514 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; |
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | |||
4509 | err_free: | 4524 | err_free: |
4510 | kfree_skb(skb); | 4525 | kfree_skb(skb); |
4511 | err: | 4526 | err: |
4512 | return -ENOMEM; | 4527 | return err; |
4528 | |||
4513 | } | 4529 | } |
4514 | 4530 | ||
4515 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | 4531 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) |
@@ -5667,6 +5683,7 @@ discard: | |||
5667 | } | 5683 | } |
5668 | 5684 | ||
5669 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; | 5685 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; |
5686 | tp->copied_seq = tp->rcv_nxt; | ||
5670 | tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; | 5687 | tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; |
5671 | 5688 | ||
5672 | /* RFC1323: The window in SYN & SYN/ACK segments is | 5689 | /* RFC1323: The window in SYN & SYN/ACK segments is |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ba09016d1bfd..d8841a2f1569 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, | |||
921 | } | 921 | } |
922 | 922 | ||
923 | md5sig = rcu_dereference_protected(tp->md5sig_info, | 923 | md5sig = rcu_dereference_protected(tp->md5sig_info, |
924 | sock_owned_by_user(sk)); | 924 | sock_owned_by_user(sk) || |
925 | lockdep_is_held(&sk->sk_lock.slock)); | ||
925 | if (!md5sig) { | 926 | if (!md5sig) { |
926 | md5sig = kmalloc(sizeof(*md5sig), gfp); | 927 | md5sig = kmalloc(sizeof(*md5sig), gfp); |
927 | if (!md5sig) | 928 | if (!md5sig) |
@@ -1492,7 +1493,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) | |||
1492 | if (likely(sk->sk_rx_dst)) | 1493 | if (likely(sk->sk_rx_dst)) |
1493 | skb_dst_drop(skb); | 1494 | skb_dst_drop(skb); |
1494 | else | 1495 | else |
1495 | skb_dst_force(skb); | 1496 | skb_dst_force_safe(skb); |
1496 | 1497 | ||
1497 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | 1498 | __skb_queue_tail(&tp->ucopy.prequeue, skb); |
1498 | tp->ucopy.memory += skb->truesize; | 1499 | tp->ucopy.memory += skb->truesize; |
@@ -1720,8 +1721,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | |||
1720 | { | 1721 | { |
1721 | struct dst_entry *dst = skb_dst(skb); | 1722 | struct dst_entry *dst = skb_dst(skb); |
1722 | 1723 | ||
1723 | if (dst) { | 1724 | if (dst && dst_hold_safe(dst)) { |
1724 | dst_hold(dst); | ||
1725 | sk->sk_rx_dst = dst; | 1725 | sk->sk_rx_dst = dst; |
1726 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | 1726 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; |
1727 | } | 1727 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cb7ca569052c..9bfc39ff2285 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -3150,7 +3150,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
3150 | { | 3150 | { |
3151 | struct tcp_sock *tp = tcp_sk(sk); | 3151 | struct tcp_sock *tp = tcp_sk(sk); |
3152 | struct tcp_fastopen_request *fo = tp->fastopen_req; | 3152 | struct tcp_fastopen_request *fo = tp->fastopen_req; |
3153 | int syn_loss = 0, space, err = 0, copied; | 3153 | int syn_loss = 0, space, err = 0; |
3154 | unsigned long last_syn_loss = 0; | 3154 | unsigned long last_syn_loss = 0; |
3155 | struct sk_buff *syn_data; | 3155 | struct sk_buff *syn_data; |
3156 | 3156 | ||
@@ -3188,17 +3188,18 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
3188 | goto fallback; | 3188 | goto fallback; |
3189 | syn_data->ip_summed = CHECKSUM_PARTIAL; | 3189 | syn_data->ip_summed = CHECKSUM_PARTIAL; |
3190 | memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); | 3190 | memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); |
3191 | copied = copy_from_iter(skb_put(syn_data, space), space, | 3191 | if (space) { |
3192 | &fo->data->msg_iter); | 3192 | int copied = copy_from_iter(skb_put(syn_data, space), space, |
3193 | if (unlikely(!copied)) { | 3193 | &fo->data->msg_iter); |
3194 | kfree_skb(syn_data); | 3194 | if (unlikely(!copied)) { |
3195 | goto fallback; | 3195 | kfree_skb(syn_data); |
3196 | } | 3196 | goto fallback; |
3197 | if (copied != space) { | 3197 | } |
3198 | skb_trim(syn_data, copied); | 3198 | if (copied != space) { |
3199 | space = copied; | 3199 | skb_trim(syn_data, copied); |
3200 | space = copied; | ||
3201 | } | ||
3200 | } | 3202 | } |
3201 | |||
3202 | /* No more data pending in inet_wait_for_connect() */ | 3203 | /* No more data pending in inet_wait_for_connect() */ |
3203 | if (space == fo->size) | 3204 | if (space == fo->size) |
3204 | fo->data = NULL; | 3205 | fo->data = NULL; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c9c716a483e4..193ba1fa8a9a 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
168 | dst_negative_advice(sk); | 168 | dst_negative_advice(sk); |
169 | if (tp->syn_fastopen || tp->syn_data) | 169 | if (tp->syn_fastopen || tp->syn_data) |
170 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); | 170 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); |
171 | if (tp->syn_data) | 171 | if (tp->syn_data && icsk->icsk_retransmits == 1) |
172 | NET_INC_STATS_BH(sock_net(sk), | 172 | NET_INC_STATS_BH(sock_net(sk), |
173 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | 173 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); |
174 | } | 174 | } |
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk) | |||
176 | syn_set = true; | 176 | syn_set = true; |
177 | } else { | 177 | } else { |
178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { | 178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { |
179 | /* Some middle-boxes may black-hole Fast Open _after_ | ||
180 | * the handshake. Therefore we conservatively disable | ||
181 | * Fast Open on this path on recurring timeouts with | ||
182 | * few or zero bytes acked after Fast Open. | ||
183 | */ | ||
184 | if (tp->syn_data_acked && | ||
185 | tp->bytes_acked <= tp->rx_opt.mss_clamp) { | ||
186 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); | ||
187 | if (icsk->icsk_retransmits == sysctl_tcp_retries1) | ||
188 | NET_INC_STATS_BH(sock_net(sk), | ||
189 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | ||
190 | } | ||
179 | /* Black hole detection */ | 191 | /* Black hole detection */ |
180 | tcp_mtu_probing(icsk, sk); | 192 | tcp_mtu_probing(icsk, sk); |
181 | 193 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 24ec14f9825c..0c7b0e61b917 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -100,7 +100,6 @@ | |||
100 | #include <linux/slab.h> | 100 | #include <linux/slab.h> |
101 | #include <net/tcp_states.h> | 101 | #include <net/tcp_states.h> |
102 | #include <linux/skbuff.h> | 102 | #include <linux/skbuff.h> |
103 | #include <linux/netdevice.h> | ||
104 | #include <linux/proc_fs.h> | 103 | #include <linux/proc_fs.h> |
105 | #include <linux/seq_file.h> | 104 | #include <linux/seq_file.h> |
106 | #include <net/net_namespace.h> | 105 | #include <net/net_namespace.h> |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d84742f003a9..17f8e7ea133b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -350,6 +350,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) | |||
350 | setup_timer(&ndev->rs_timer, addrconf_rs_timer, | 350 | setup_timer(&ndev->rs_timer, addrconf_rs_timer, |
351 | (unsigned long)ndev); | 351 | (unsigned long)ndev); |
352 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); | 352 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); |
353 | |||
354 | if (ndev->cnf.stable_secret.initialized) | ||
355 | ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; | ||
356 | else | ||
357 | ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64; | ||
358 | |||
353 | ndev->cnf.mtu6 = dev->mtu; | 359 | ndev->cnf.mtu6 = dev->mtu; |
354 | ndev->cnf.sysctl = NULL; | 360 | ndev->cnf.sysctl = NULL; |
355 | ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); | 361 | ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); |
@@ -2455,7 +2461,7 @@ ok: | |||
2455 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 2461 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
2456 | if (in6_dev->cnf.optimistic_dad && | 2462 | if (in6_dev->cnf.optimistic_dad && |
2457 | !net->ipv6.devconf_all->forwarding && sllao) | 2463 | !net->ipv6.devconf_all->forwarding && sllao) |
2458 | addr_flags = IFA_F_OPTIMISTIC; | 2464 | addr_flags |= IFA_F_OPTIMISTIC; |
2459 | #endif | 2465 | #endif |
2460 | 2466 | ||
2461 | /* Do not allow to create too much of autoconfigured | 2467 | /* Do not allow to create too much of autoconfigured |
@@ -3642,7 +3648,7 @@ static void addrconf_dad_work(struct work_struct *w) | |||
3642 | 3648 | ||
3643 | /* send a neighbour solicitation for our addr */ | 3649 | /* send a neighbour solicitation for our addr */ |
3644 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); | 3650 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); |
3645 | ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL); | 3651 | ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any); |
3646 | out: | 3652 | out: |
3647 | in6_ifa_put(ifp); | 3653 | in6_ifa_put(ifp); |
3648 | rtnl_unlock(); | 3654 | rtnl_unlock(); |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 44bb66bde0e2..9f5137cd604e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, | |||
109 | int try_loading_module = 0; | 109 | int try_loading_module = 0; |
110 | int err; | 110 | int err; |
111 | 111 | ||
112 | if (protocol < 0 || protocol >= IPPROTO_MAX) | ||
113 | return -EINVAL; | ||
114 | |||
112 | /* Look for the requested type/protocol pair. */ | 115 | /* Look for the requested type/protocol pair. */ |
113 | lookup_protocol: | 116 | lookup_protocol: |
114 | err = -ESOCKTNOSUPPORT; | 117 | err = -ESOCKTNOSUPPORT; |
@@ -428,9 +431,11 @@ void inet6_destroy_sock(struct sock *sk) | |||
428 | 431 | ||
429 | /* Free tx options */ | 432 | /* Free tx options */ |
430 | 433 | ||
431 | opt = xchg(&np->opt, NULL); | 434 | opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); |
432 | if (opt) | 435 | if (opt) { |
433 | sock_kfree_s(sk, opt, opt->tot_len); | 436 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); |
437 | txopt_put(opt); | ||
438 | } | ||
434 | } | 439 | } |
435 | EXPORT_SYMBOL_GPL(inet6_destroy_sock); | 440 | EXPORT_SYMBOL_GPL(inet6_destroy_sock); |
436 | 441 | ||
@@ -659,7 +664,10 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
659 | fl6.fl6_sport = inet->inet_sport; | 664 | fl6.fl6_sport = inet->inet_sport; |
660 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 665 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
661 | 666 | ||
662 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 667 | rcu_read_lock(); |
668 | final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), | ||
669 | &final); | ||
670 | rcu_read_unlock(); | ||
663 | 671 | ||
664 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 672 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
665 | if (IS_ERR(dst)) { | 673 | if (IS_ERR(dst)) { |
@@ -668,7 +676,7 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
668 | return PTR_ERR(dst); | 676 | return PTR_ERR(dst); |
669 | } | 677 | } |
670 | 678 | ||
671 | __ip6_dst_store(sk, dst, NULL, NULL); | 679 | ip6_dst_store(sk, dst, NULL, NULL); |
672 | } | 680 | } |
673 | 681 | ||
674 | return 0; | 682 | return 0; |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index d70b0238f468..517c55b01ba8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -167,8 +167,10 @@ ipv4_connected: | |||
167 | 167 | ||
168 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 168 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
169 | 169 | ||
170 | opt = flowlabel ? flowlabel->opt : np->opt; | 170 | rcu_read_lock(); |
171 | opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); | ||
171 | final_p = fl6_update_dst(&fl6, opt, &final); | 172 | final_p = fl6_update_dst(&fl6, opt, &final); |
173 | rcu_read_unlock(); | ||
172 | 174 | ||
173 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 175 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
174 | err = 0; | 176 | err = 0; |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index ce203b0402be..ea7c4d64a00a 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) | |||
727 | *((char **)&opt2->dst1opt) += dif; | 727 | *((char **)&opt2->dst1opt) += dif; |
728 | if (opt2->srcrt) | 728 | if (opt2->srcrt) |
729 | *((char **)&opt2->srcrt) += dif; | 729 | *((char **)&opt2->srcrt) += dif; |
730 | atomic_set(&opt2->refcnt, 1); | ||
730 | } | 731 | } |
731 | return opt2; | 732 | return opt2; |
732 | } | 733 | } |
@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | |||
790 | return ERR_PTR(-ENOBUFS); | 791 | return ERR_PTR(-ENOBUFS); |
791 | 792 | ||
792 | memset(opt2, 0, tot_len); | 793 | memset(opt2, 0, tot_len); |
793 | 794 | atomic_set(&opt2->refcnt, 1); | |
794 | opt2->tot_len = tot_len; | 795 | opt2->tot_len = tot_len; |
795 | p = (char *)(opt2 + 1); | 796 | p = (char *)(opt2 + 1); |
796 | 797 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 36c5a98b0472..0a37ddc7af51 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, | |||
834 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); | 834 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); |
835 | } | 835 | } |
836 | 836 | ||
837 | /* | ||
838 | * Special lock-class for __icmpv6_sk: | ||
839 | */ | ||
840 | static struct lock_class_key icmpv6_socket_sk_dst_lock_key; | ||
841 | |||
842 | static int __net_init icmpv6_sk_init(struct net *net) | 837 | static int __net_init icmpv6_sk_init(struct net *net) |
843 | { | 838 | { |
844 | struct sock *sk; | 839 | struct sock *sk; |
@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net) | |||
860 | 855 | ||
861 | net->ipv6.icmp_sk[i] = sk; | 856 | net->ipv6.icmp_sk[i] = sk; |
862 | 857 | ||
863 | /* | ||
864 | * Split off their lock-class, because sk->sk_dst_lock | ||
865 | * gets used from softirqs, which is safe for | ||
866 | * __icmpv6_sk (because those never get directly used | ||
867 | * via userspace syscalls), but unsafe for normal sockets. | ||
868 | */ | ||
869 | lockdep_set_class(&sk->sk_dst_lock, | ||
870 | &icmpv6_socket_sk_dst_lock_key); | ||
871 | |||
872 | /* Enough space for 2 64K ICMP packets, including | 858 | /* Enough space for 2 64K ICMP packets, including |
873 | * sk_buff struct overhead. | 859 | * sk_buff struct overhead. |
874 | */ | 860 | */ |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 5d1c7cee2cb2..a7ca2cde2ecb 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, | |||
78 | memset(fl6, 0, sizeof(*fl6)); | 78 | memset(fl6, 0, sizeof(*fl6)); |
79 | fl6->flowi6_proto = proto; | 79 | fl6->flowi6_proto = proto; |
80 | fl6->daddr = ireq->ir_v6_rmt_addr; | 80 | fl6->daddr = ireq->ir_v6_rmt_addr; |
81 | final_p = fl6_update_dst(fl6, np->opt, &final); | 81 | rcu_read_lock(); |
82 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | ||
83 | rcu_read_unlock(); | ||
82 | fl6->saddr = ireq->ir_v6_loc_addr; | 84 | fl6->saddr = ireq->ir_v6_loc_addr; |
83 | fl6->flowi6_oif = ireq->ir_iif; | 85 | fl6->flowi6_oif = ireq->ir_iif; |
84 | fl6->flowi6_mark = ireq->ir_mark; | 86 | fl6->flowi6_mark = ireq->ir_mark; |
@@ -109,14 +111,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |||
109 | EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); | 111 | EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); |
110 | 112 | ||
111 | static inline | 113 | static inline |
112 | void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, | ||
113 | const struct in6_addr *daddr, | ||
114 | const struct in6_addr *saddr) | ||
115 | { | ||
116 | __ip6_dst_store(sk, dst, daddr, saddr); | ||
117 | } | ||
118 | |||
119 | static inline | ||
120 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | 114 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) |
121 | { | 115 | { |
122 | return __sk_dst_check(sk, cookie); | 116 | return __sk_dst_check(sk, cookie); |
@@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, | |||
142 | fl6->fl6_dport = inet->inet_dport; | 136 | fl6->fl6_dport = inet->inet_dport; |
143 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); | 137 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); |
144 | 138 | ||
145 | final_p = fl6_update_dst(fl6, np->opt, &final); | 139 | rcu_read_lock(); |
140 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | ||
141 | rcu_read_unlock(); | ||
146 | 142 | ||
147 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); | 143 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); |
148 | if (!dst) { | 144 | if (!dst) { |
149 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 145 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
150 | 146 | ||
151 | if (!IS_ERR(dst)) | 147 | if (!IS_ERR(dst)) |
152 | __inet6_csk_dst_store(sk, dst, NULL, NULL); | 148 | ip6_dst_store(sk, dst, NULL, NULL); |
153 | } | 149 | } |
154 | return dst; | 150 | return dst; |
155 | } | 151 | } |
@@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused | |||
175 | /* Restore final destination back after routing done */ | 171 | /* Restore final destination back after routing done */ |
176 | fl6.daddr = sk->sk_v6_daddr; | 172 | fl6.daddr = sk->sk_v6_daddr; |
177 | 173 | ||
178 | res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); | 174 | res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), |
175 | np->tclass); | ||
179 | rcu_read_unlock(); | 176 | rcu_read_unlock(); |
180 | return res; | 177 | return res; |
181 | } | 178 | } |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 3c7b9310b33f..e5ea177d34c6 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1571,13 +1571,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | |||
1571 | return -EEXIST; | 1571 | return -EEXIST; |
1572 | } else { | 1572 | } else { |
1573 | t = nt; | 1573 | t = nt; |
1574 | |||
1575 | ip6gre_tunnel_unlink(ign, t); | ||
1576 | ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); | ||
1577 | ip6gre_tunnel_link(ign, t); | ||
1578 | netdev_state_change(dev); | ||
1579 | } | 1574 | } |
1580 | 1575 | ||
1576 | ip6gre_tunnel_unlink(ign, t); | ||
1577 | ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); | ||
1578 | ip6gre_tunnel_link(ign, t); | ||
1581 | return 0; | 1579 | return 0; |
1582 | } | 1580 | } |
1583 | 1581 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index eabffbb89795..137fca42aaa6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t) | |||
177 | int i; | 177 | int i; |
178 | 178 | ||
179 | for_each_possible_cpu(i) | 179 | for_each_possible_cpu(i) |
180 | ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); | 180 | ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); |
181 | } | 181 | } |
182 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); | 182 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); |
183 | 183 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index ad19136086dd..a10e77103c88 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc, | |||
118 | int cmd); | 118 | int cmd); |
119 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, | 119 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, |
120 | struct netlink_callback *cb); | 120 | struct netlink_callback *cb); |
121 | static void mroute_clean_tables(struct mr6_table *mrt); | 121 | static void mroute_clean_tables(struct mr6_table *mrt, bool all); |
122 | static void ipmr_expire_process(unsigned long arg); | 122 | static void ipmr_expire_process(unsigned long arg); |
123 | 123 | ||
124 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | 124 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES |
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) | |||
334 | static void ip6mr_free_table(struct mr6_table *mrt) | 334 | static void ip6mr_free_table(struct mr6_table *mrt) |
335 | { | 335 | { |
336 | del_timer_sync(&mrt->ipmr_expire_timer); | 336 | del_timer_sync(&mrt->ipmr_expire_timer); |
337 | mroute_clean_tables(mrt); | 337 | mroute_clean_tables(mrt, true); |
338 | kfree(mrt); | 338 | kfree(mrt); |
339 | } | 339 | } |
340 | 340 | ||
@@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) | |||
765 | return dev; | 765 | return dev; |
766 | 766 | ||
767 | failure: | 767 | failure: |
768 | /* allow the register to be completed before unregistering. */ | ||
769 | rtnl_unlock(); | ||
770 | rtnl_lock(); | ||
771 | |||
772 | unregister_netdevice(dev); | 768 | unregister_netdevice(dev); |
773 | return NULL; | 769 | return NULL; |
774 | } | 770 | } |
@@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, | |||
1542 | * Close the multicast socket, and clear the vif tables etc | 1538 | * Close the multicast socket, and clear the vif tables etc |
1543 | */ | 1539 | */ |
1544 | 1540 | ||
1545 | static void mroute_clean_tables(struct mr6_table *mrt) | 1541 | static void mroute_clean_tables(struct mr6_table *mrt, bool all) |
1546 | { | 1542 | { |
1547 | int i; | 1543 | int i; |
1548 | LIST_HEAD(list); | 1544 | LIST_HEAD(list); |
@@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt) | |||
1552 | * Shut down all active vif entries | 1548 | * Shut down all active vif entries |
1553 | */ | 1549 | */ |
1554 | for (i = 0; i < mrt->maxvif; i++) { | 1550 | for (i = 0; i < mrt->maxvif; i++) { |
1555 | if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) | 1551 | if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) |
1556 | mif6_delete(mrt, i, &list); | 1552 | continue; |
1553 | mif6_delete(mrt, i, &list); | ||
1557 | } | 1554 | } |
1558 | unregister_netdevice_many(&list); | 1555 | unregister_netdevice_many(&list); |
1559 | 1556 | ||
@@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt) | |||
1562 | */ | 1559 | */ |
1563 | for (i = 0; i < MFC6_LINES; i++) { | 1560 | for (i = 0; i < MFC6_LINES; i++) { |
1564 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { | 1561 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { |
1565 | if (c->mfc_flags & MFC_STATIC) | 1562 | if (!all && (c->mfc_flags & MFC_STATIC)) |
1566 | continue; | 1563 | continue; |
1567 | write_lock_bh(&mrt_lock); | 1564 | write_lock_bh(&mrt_lock); |
1568 | list_del(&c->list); | 1565 | list_del(&c->list); |
@@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk) | |||
1625 | net->ipv6.devconf_all); | 1622 | net->ipv6.devconf_all); |
1626 | write_unlock_bh(&mrt_lock); | 1623 | write_unlock_bh(&mrt_lock); |
1627 | 1624 | ||
1628 | mroute_clean_tables(mrt); | 1625 | mroute_clean_tables(mrt, false); |
1629 | err = 0; | 1626 | err = 0; |
1630 | break; | 1627 | break; |
1631 | } | 1628 | } |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 63e6956917c9..4449ad1f8114 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | |||
111 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); | 111 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); |
112 | } | 112 | } |
113 | } | 113 | } |
114 | opt = xchg(&inet6_sk(sk)->opt, opt); | 114 | opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt, |
115 | opt); | ||
115 | sk_dst_reset(sk); | 116 | sk_dst_reset(sk); |
116 | 117 | ||
117 | return opt; | 118 | return opt; |
@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
231 | sk->sk_socket->ops = &inet_dgram_ops; | 232 | sk->sk_socket->ops = &inet_dgram_ops; |
232 | sk->sk_family = PF_INET; | 233 | sk->sk_family = PF_INET; |
233 | } | 234 | } |
234 | opt = xchg(&np->opt, NULL); | 235 | opt = xchg((__force struct ipv6_txoptions **)&np->opt, |
235 | if (opt) | 236 | NULL); |
236 | sock_kfree_s(sk, opt, opt->tot_len); | 237 | if (opt) { |
238 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); | ||
239 | txopt_put(opt); | ||
240 | } | ||
237 | pktopt = xchg(&np->pktoptions, NULL); | 241 | pktopt = xchg(&np->pktoptions, NULL); |
238 | kfree_skb(pktopt); | 242 | kfree_skb(pktopt); |
239 | 243 | ||
@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
403 | if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) | 407 | if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) |
404 | break; | 408 | break; |
405 | 409 | ||
406 | opt = ipv6_renew_options(sk, np->opt, optname, | 410 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
411 | opt = ipv6_renew_options(sk, opt, optname, | ||
407 | (struct ipv6_opt_hdr __user *)optval, | 412 | (struct ipv6_opt_hdr __user *)optval, |
408 | optlen); | 413 | optlen); |
409 | if (IS_ERR(opt)) { | 414 | if (IS_ERR(opt)) { |
@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
432 | retv = 0; | 437 | retv = 0; |
433 | opt = ipv6_update_options(sk, opt); | 438 | opt = ipv6_update_options(sk, opt); |
434 | sticky_done: | 439 | sticky_done: |
435 | if (opt) | 440 | if (opt) { |
436 | sock_kfree_s(sk, opt, opt->tot_len); | 441 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); |
442 | txopt_put(opt); | ||
443 | } | ||
437 | break; | 444 | break; |
438 | } | 445 | } |
439 | 446 | ||
@@ -486,6 +493,7 @@ sticky_done: | |||
486 | break; | 493 | break; |
487 | 494 | ||
488 | memset(opt, 0, sizeof(*opt)); | 495 | memset(opt, 0, sizeof(*opt)); |
496 | atomic_set(&opt->refcnt, 1); | ||
489 | opt->tot_len = sizeof(*opt) + optlen; | 497 | opt->tot_len = sizeof(*opt) + optlen; |
490 | retv = -EFAULT; | 498 | retv = -EFAULT; |
491 | if (copy_from_user(opt+1, optval, optlen)) | 499 | if (copy_from_user(opt+1, optval, optlen)) |
@@ -502,8 +510,10 @@ update: | |||
502 | retv = 0; | 510 | retv = 0; |
503 | opt = ipv6_update_options(sk, opt); | 511 | opt = ipv6_update_options(sk, opt); |
504 | done: | 512 | done: |
505 | if (opt) | 513 | if (opt) { |
506 | sock_kfree_s(sk, opt, opt->tot_len); | 514 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); |
515 | txopt_put(opt); | ||
516 | } | ||
507 | break; | 517 | break; |
508 | } | 518 | } |
509 | case IPV6_UNICAST_HOPS: | 519 | case IPV6_UNICAST_HOPS: |
@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1110 | case IPV6_RTHDR: | 1120 | case IPV6_RTHDR: |
1111 | case IPV6_DSTOPTS: | 1121 | case IPV6_DSTOPTS: |
1112 | { | 1122 | { |
1123 | struct ipv6_txoptions *opt; | ||
1113 | 1124 | ||
1114 | lock_sock(sk); | 1125 | lock_sock(sk); |
1115 | len = ipv6_getsockopt_sticky(sk, np->opt, | 1126 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
1116 | optname, optval, len); | 1127 | len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len); |
1117 | release_sock(sk); | 1128 | release_sock(sk); |
1118 | /* check if ipv6_getsockopt_sticky() returns err code */ | 1129 | /* check if ipv6_getsockopt_sticky() returns err code */ |
1119 | if (len < 0) | 1130 | if (len < 0) |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3e0f855e1bea..d6161e1c48c8 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev) | |||
556 | } | 556 | } |
557 | 557 | ||
558 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, | 558 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, |
559 | const struct in6_addr *daddr, const struct in6_addr *saddr, | 559 | const struct in6_addr *daddr, const struct in6_addr *saddr) |
560 | struct sk_buff *oskb) | ||
561 | { | 560 | { |
562 | struct sk_buff *skb; | 561 | struct sk_buff *skb; |
563 | struct in6_addr addr_buf; | 562 | struct in6_addr addr_buf; |
@@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, | |||
593 | ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, | 592 | ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, |
594 | dev->dev_addr); | 593 | dev->dev_addr); |
595 | 594 | ||
596 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb) | ||
597 | skb_dst_copy(skb, oskb); | ||
598 | |||
599 | ndisc_send_skb(skb, daddr, saddr); | 595 | ndisc_send_skb(skb, daddr, saddr); |
600 | } | 596 | } |
601 | 597 | ||
@@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
682 | "%s: trying to ucast probe in NUD_INVALID: %pI6\n", | 678 | "%s: trying to ucast probe in NUD_INVALID: %pI6\n", |
683 | __func__, target); | 679 | __func__, target); |
684 | } | 680 | } |
685 | ndisc_send_ns(dev, target, target, saddr, skb); | 681 | ndisc_send_ns(dev, target, target, saddr); |
686 | } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { | 682 | } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { |
687 | neigh_app_ns(neigh); | 683 | neigh_app_ns(neigh); |
688 | } else { | 684 | } else { |
689 | addrconf_addr_solict_mult(target, &mcaddr); | 685 | addrconf_addr_solict_mult(target, &mcaddr); |
690 | ndisc_send_ns(dev, target, &mcaddr, saddr, skb); | 686 | ndisc_send_ns(dev, target, &mcaddr, saddr); |
691 | } | 687 | } |
692 | } | 688 | } |
693 | 689 | ||
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index f6a024e141e5..e10a04c9cdc7 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -49,6 +49,7 @@ config NFT_REJECT_IPV6 | |||
49 | 49 | ||
50 | config NFT_DUP_IPV6 | 50 | config NFT_DUP_IPV6 |
51 | tristate "IPv6 nf_tables packet duplication support" | 51 | tristate "IPv6 nf_tables packet duplication support" |
52 | depends on !NF_CONNTRACK || NF_CONNTRACK | ||
52 | select NF_DUP_IPV6 | 53 | select NF_DUP_IPV6 |
53 | help | 54 | help |
54 | This module enables IPv6 packet duplication support for nf_tables. | 55 | This module enables IPv6 packet duplication support for nf_tables. |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d5efeb87350e..bab4441ed4e4 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data) | |||
190 | /* Creation primitives. */ | 190 | /* Creation primitives. */ |
191 | static inline struct frag_queue *fq_find(struct net *net, __be32 id, | 191 | static inline struct frag_queue *fq_find(struct net *net, __be32 id, |
192 | u32 user, struct in6_addr *src, | 192 | u32 user, struct in6_addr *src, |
193 | struct in6_addr *dst, u8 ecn) | 193 | struct in6_addr *dst, int iif, u8 ecn) |
194 | { | 194 | { |
195 | struct inet_frag_queue *q; | 195 | struct inet_frag_queue *q; |
196 | struct ip6_create_arg arg; | 196 | struct ip6_create_arg arg; |
@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, | |||
200 | arg.user = user; | 200 | arg.user = user; |
201 | arg.src = src; | 201 | arg.src = src; |
202 | arg.dst = dst; | 202 | arg.dst = dst; |
203 | arg.iif = iif; | ||
203 | arg.ecn = ecn; | 204 | arg.ecn = ecn; |
204 | 205 | ||
205 | local_bh_disable(); | 206 | local_bh_disable(); |
@@ -601,7 +602,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use | |||
601 | fhdr = (struct frag_hdr *)skb_transport_header(clone); | 602 | fhdr = (struct frag_hdr *)skb_transport_header(clone); |
602 | 603 | ||
603 | fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, | 604 | fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, |
604 | ip6_frag_ecn(hdr)); | 605 | skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); |
605 | if (fq == NULL) { | 606 | if (fq == NULL) { |
606 | pr_debug("Can't find and can't create new queue\n"); | 607 | pr_debug("Can't find and can't create new queue\n"); |
607 | goto ret_orig; | 608 | goto ret_orig; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index dc65ec198f7c..99140986e887 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, | |||
733 | 733 | ||
734 | static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | 734 | static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
735 | { | 735 | { |
736 | struct ipv6_txoptions *opt_to_free = NULL; | ||
736 | struct ipv6_txoptions opt_space; | 737 | struct ipv6_txoptions opt_space; |
737 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); | 738 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
738 | struct in6_addr *daddr, *final_p, final; | 739 | struct in6_addr *daddr, *final_p, final; |
@@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
839 | if (!(opt->opt_nflen|opt->opt_flen)) | 840 | if (!(opt->opt_nflen|opt->opt_flen)) |
840 | opt = NULL; | 841 | opt = NULL; |
841 | } | 842 | } |
842 | if (!opt) | 843 | if (!opt) { |
843 | opt = np->opt; | 844 | opt = txopt_get(np); |
845 | opt_to_free = opt; | ||
846 | } | ||
844 | if (flowlabel) | 847 | if (flowlabel) |
845 | opt = fl6_merge_options(&opt_space, flowlabel, opt); | 848 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
846 | opt = ipv6_fixup_options(&opt_space, opt); | 849 | opt = ipv6_fixup_options(&opt_space, opt); |
@@ -906,6 +909,7 @@ done: | |||
906 | dst_release(dst); | 909 | dst_release(dst); |
907 | out: | 910 | out: |
908 | fl6_sock_release(flowlabel); | 911 | fl6_sock_release(flowlabel); |
912 | txopt_put(opt_to_free); | ||
909 | return err < 0 ? err : len; | 913 | return err < 0 ? err : len; |
910 | do_confirm: | 914 | do_confirm: |
911 | dst_confirm(dst); | 915 | dst_confirm(dst); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 44e21a03cfc3..45f5ae51de65 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) | |||
108 | return fq->id == arg->id && | 108 | return fq->id == arg->id && |
109 | fq->user == arg->user && | 109 | fq->user == arg->user && |
110 | ipv6_addr_equal(&fq->saddr, arg->src) && | 110 | ipv6_addr_equal(&fq->saddr, arg->src) && |
111 | ipv6_addr_equal(&fq->daddr, arg->dst); | 111 | ipv6_addr_equal(&fq->daddr, arg->dst) && |
112 | (arg->iif == fq->iif || | ||
113 | !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST | | ||
114 | IPV6_ADDR_LINKLOCAL))); | ||
112 | } | 115 | } |
113 | EXPORT_SYMBOL(ip6_frag_match); | 116 | EXPORT_SYMBOL(ip6_frag_match); |
114 | 117 | ||
@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data) | |||
180 | 183 | ||
181 | static struct frag_queue * | 184 | static struct frag_queue * |
182 | fq_find(struct net *net, __be32 id, const struct in6_addr *src, | 185 | fq_find(struct net *net, __be32 id, const struct in6_addr *src, |
183 | const struct in6_addr *dst, u8 ecn) | 186 | const struct in6_addr *dst, int iif, u8 ecn) |
184 | { | 187 | { |
185 | struct inet_frag_queue *q; | 188 | struct inet_frag_queue *q; |
186 | struct ip6_create_arg arg; | 189 | struct ip6_create_arg arg; |
@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, | |||
190 | arg.user = IP6_DEFRAG_LOCAL_DELIVER; | 193 | arg.user = IP6_DEFRAG_LOCAL_DELIVER; |
191 | arg.src = src; | 194 | arg.src = src; |
192 | arg.dst = dst; | 195 | arg.dst = dst; |
196 | arg.iif = iif; | ||
193 | arg.ecn = ecn; | 197 | arg.ecn = ecn; |
194 | 198 | ||
195 | hash = inet6_hash_frag(id, src, dst); | 199 | hash = inet6_hash_frag(id, src, dst); |
@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
551 | } | 555 | } |
552 | 556 | ||
553 | fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, | 557 | fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, |
554 | ip6_frag_ecn(hdr)); | 558 | skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); |
555 | if (fq) { | 559 | if (fq) { |
556 | int ret; | 560 | int ret; |
557 | 561 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6f01fe122abd..826e6aa44f8d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -523,7 +523,7 @@ static void rt6_probe_deferred(struct work_struct *w) | |||
523 | container_of(w, struct __rt6_probe_work, work); | 523 | container_of(w, struct __rt6_probe_work, work); |
524 | 524 | ||
525 | addrconf_addr_solict_mult(&work->target, &mcaddr); | 525 | addrconf_addr_solict_mult(&work->target, &mcaddr); |
526 | ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL); | 526 | ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL); |
527 | dev_put(work->dev); | 527 | dev_put(work->dev); |
528 | kfree(work); | 528 | kfree(work); |
529 | } | 529 | } |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index bb8f2fa1c7fb..eaf7ac496d50 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
222 | memset(&fl6, 0, sizeof(fl6)); | 222 | memset(&fl6, 0, sizeof(fl6)); |
223 | fl6.flowi6_proto = IPPROTO_TCP; | 223 | fl6.flowi6_proto = IPPROTO_TCP; |
224 | fl6.daddr = ireq->ir_v6_rmt_addr; | 224 | fl6.daddr = ireq->ir_v6_rmt_addr; |
225 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 225 | final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); |
226 | fl6.saddr = ireq->ir_v6_loc_addr; | 226 | fl6.saddr = ireq->ir_v6_loc_addr; |
227 | fl6.flowi6_oif = sk->sk_bound_dev_if; | 227 | fl6.flowi6_oif = sk->sk_bound_dev_if; |
228 | fl6.flowi6_mark = ireq->ir_mark; | 228 | fl6.flowi6_mark = ireq->ir_mark; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c5429a636f1a..6b8a8a9091fa 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | |||
93 | { | 93 | { |
94 | struct dst_entry *dst = skb_dst(skb); | 94 | struct dst_entry *dst = skb_dst(skb); |
95 | 95 | ||
96 | if (dst) { | 96 | if (dst && dst_hold_safe(dst)) { |
97 | const struct rt6_info *rt = (const struct rt6_info *)dst; | 97 | const struct rt6_info *rt = (const struct rt6_info *)dst; |
98 | 98 | ||
99 | dst_hold(dst); | ||
100 | sk->sk_rx_dst = dst; | 99 | sk->sk_rx_dst = dst; |
101 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | 100 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; |
102 | inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); | 101 | inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); |
@@ -120,6 +119,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
120 | struct ipv6_pinfo *np = inet6_sk(sk); | 119 | struct ipv6_pinfo *np = inet6_sk(sk); |
121 | struct tcp_sock *tp = tcp_sk(sk); | 120 | struct tcp_sock *tp = tcp_sk(sk); |
122 | struct in6_addr *saddr = NULL, *final_p, final; | 121 | struct in6_addr *saddr = NULL, *final_p, final; |
122 | struct ipv6_txoptions *opt; | ||
123 | struct flowi6 fl6; | 123 | struct flowi6 fl6; |
124 | struct dst_entry *dst; | 124 | struct dst_entry *dst; |
125 | int addr_type; | 125 | int addr_type; |
@@ -235,7 +235,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
235 | fl6.fl6_dport = usin->sin6_port; | 235 | fl6.fl6_dport = usin->sin6_port; |
236 | fl6.fl6_sport = inet->inet_sport; | 236 | fl6.fl6_sport = inet->inet_sport; |
237 | 237 | ||
238 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 238 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
239 | final_p = fl6_update_dst(&fl6, opt, &final); | ||
239 | 240 | ||
240 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 241 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
241 | 242 | ||
@@ -255,7 +256,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
255 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | 256 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
256 | 257 | ||
257 | sk->sk_gso_type = SKB_GSO_TCPV6; | 258 | sk->sk_gso_type = SKB_GSO_TCPV6; |
258 | __ip6_dst_store(sk, dst, NULL, NULL); | 259 | ip6_dst_store(sk, dst, NULL, NULL); |
259 | 260 | ||
260 | if (tcp_death_row.sysctl_tw_recycle && | 261 | if (tcp_death_row.sysctl_tw_recycle && |
261 | !tp->rx_opt.ts_recent_stamp && | 262 | !tp->rx_opt.ts_recent_stamp && |
@@ -263,9 +264,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
263 | tcp_fetch_timewait_stamp(sk, dst); | 264 | tcp_fetch_timewait_stamp(sk, dst); |
264 | 265 | ||
265 | icsk->icsk_ext_hdr_len = 0; | 266 | icsk->icsk_ext_hdr_len = 0; |
266 | if (np->opt) | 267 | if (opt) |
267 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | 268 | icsk->icsk_ext_hdr_len = opt->opt_flen + |
268 | np->opt->opt_nflen); | 269 | opt->opt_nflen; |
269 | 270 | ||
270 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 271 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
271 | 272 | ||
@@ -461,7 +462,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
461 | if (np->repflow && ireq->pktopts) | 462 | if (np->repflow && ireq->pktopts) |
462 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); | 463 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
463 | 464 | ||
464 | err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); | 465 | err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), |
466 | np->tclass); | ||
465 | err = net_xmit_eval(err); | 467 | err = net_xmit_eval(err); |
466 | } | 468 | } |
467 | 469 | ||
@@ -972,6 +974,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
972 | struct inet_request_sock *ireq; | 974 | struct inet_request_sock *ireq; |
973 | struct ipv6_pinfo *newnp; | 975 | struct ipv6_pinfo *newnp; |
974 | const struct ipv6_pinfo *np = inet6_sk(sk); | 976 | const struct ipv6_pinfo *np = inet6_sk(sk); |
977 | struct ipv6_txoptions *opt; | ||
975 | struct tcp6_sock *newtcp6sk; | 978 | struct tcp6_sock *newtcp6sk; |
976 | struct inet_sock *newinet; | 979 | struct inet_sock *newinet; |
977 | struct tcp_sock *newtp; | 980 | struct tcp_sock *newtp; |
@@ -1056,7 +1059,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1056 | */ | 1059 | */ |
1057 | 1060 | ||
1058 | newsk->sk_gso_type = SKB_GSO_TCPV6; | 1061 | newsk->sk_gso_type = SKB_GSO_TCPV6; |
1059 | __ip6_dst_store(newsk, dst, NULL, NULL); | 1062 | ip6_dst_store(newsk, dst, NULL, NULL); |
1060 | inet6_sk_rx_dst_set(newsk, skb); | 1063 | inet6_sk_rx_dst_set(newsk, skb); |
1061 | 1064 | ||
1062 | newtcp6sk = (struct tcp6_sock *)newsk; | 1065 | newtcp6sk = (struct tcp6_sock *)newsk; |
@@ -1098,13 +1101,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1098 | but we make one more one thing there: reattach optmem | 1101 | but we make one more one thing there: reattach optmem |
1099 | to newsk. | 1102 | to newsk. |
1100 | */ | 1103 | */ |
1101 | if (np->opt) | 1104 | opt = rcu_dereference(np->opt); |
1102 | newnp->opt = ipv6_dup_options(newsk, np->opt); | 1105 | if (opt) { |
1103 | 1106 | opt = ipv6_dup_options(newsk, opt); | |
1107 | RCU_INIT_POINTER(newnp->opt, opt); | ||
1108 | } | ||
1104 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | 1109 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
1105 | if (newnp->opt) | 1110 | if (opt) |
1106 | inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + | 1111 | inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + |
1107 | newnp->opt->opt_flen); | 1112 | opt->opt_flen; |
1108 | 1113 | ||
1109 | tcp_ca_openreq_child(newsk, dst); | 1114 | tcp_ca_openreq_child(newsk, dst); |
1110 | 1115 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 01bcb49619ee..9da3287a3923 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
1110 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); | 1110 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
1111 | struct in6_addr *daddr, *final_p, final; | 1111 | struct in6_addr *daddr, *final_p, final; |
1112 | struct ipv6_txoptions *opt = NULL; | 1112 | struct ipv6_txoptions *opt = NULL; |
1113 | struct ipv6_txoptions *opt_to_free = NULL; | ||
1113 | struct ip6_flowlabel *flowlabel = NULL; | 1114 | struct ip6_flowlabel *flowlabel = NULL; |
1114 | struct flowi6 fl6; | 1115 | struct flowi6 fl6; |
1115 | struct dst_entry *dst; | 1116 | struct dst_entry *dst; |
@@ -1263,8 +1264,10 @@ do_udp_sendmsg: | |||
1263 | opt = NULL; | 1264 | opt = NULL; |
1264 | connected = 0; | 1265 | connected = 0; |
1265 | } | 1266 | } |
1266 | if (!opt) | 1267 | if (!opt) { |
1267 | opt = np->opt; | 1268 | opt = txopt_get(np); |
1269 | opt_to_free = opt; | ||
1270 | } | ||
1268 | if (flowlabel) | 1271 | if (flowlabel) |
1269 | opt = fl6_merge_options(&opt_space, flowlabel, opt); | 1272 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
1270 | opt = ipv6_fixup_options(&opt_space, opt); | 1273 | opt = ipv6_fixup_options(&opt_space, opt); |
@@ -1373,6 +1376,7 @@ release_dst: | |||
1373 | out: | 1376 | out: |
1374 | dst_release(dst); | 1377 | dst_release(dst); |
1375 | fl6_sock_release(flowlabel); | 1378 | fl6_sock_release(flowlabel); |
1379 | txopt_put(opt_to_free); | ||
1376 | if (!err) | 1380 | if (!err) |
1377 | return len; | 1381 | return len; |
1378 | /* | 1382 | /* |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index e6aa48b5395c..923abd6b3064 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -1086,6 +1086,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, | |||
1086 | struct sock *sk; | 1086 | struct sock *sk; |
1087 | struct irda_sock *self; | 1087 | struct irda_sock *self; |
1088 | 1088 | ||
1089 | if (protocol < 0 || protocol > SK_PROTOCOL_MAX) | ||
1090 | return -EINVAL; | ||
1091 | |||
1089 | if (net != &init_net) | 1092 | if (net != &init_net) |
1090 | return -EAFNOSUPPORT; | 1093 | return -EAFNOSUPPORT; |
1091 | 1094 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index fcb2752419c6..435608c4306d 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |||
1483 | if (sock_writeable(sk) && iucv_below_msglim(sk)) | 1483 | if (sock_writeable(sk) && iucv_below_msglim(sk)) |
1484 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 1484 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
1485 | else | 1485 | else |
1486 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1486 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1487 | 1487 | ||
1488 | return mask; | 1488 | return mask; |
1489 | } | 1489 | } |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index aca38d8aed8e..a2c8747d2936 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
486 | DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); | 486 | DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); |
487 | struct in6_addr *daddr, *final_p, final; | 487 | struct in6_addr *daddr, *final_p, final; |
488 | struct ipv6_pinfo *np = inet6_sk(sk); | 488 | struct ipv6_pinfo *np = inet6_sk(sk); |
489 | struct ipv6_txoptions *opt_to_free = NULL; | ||
489 | struct ipv6_txoptions *opt = NULL; | 490 | struct ipv6_txoptions *opt = NULL; |
490 | struct ip6_flowlabel *flowlabel = NULL; | 491 | struct ip6_flowlabel *flowlabel = NULL; |
491 | struct dst_entry *dst = NULL; | 492 | struct dst_entry *dst = NULL; |
@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
575 | opt = NULL; | 576 | opt = NULL; |
576 | } | 577 | } |
577 | 578 | ||
578 | if (opt == NULL) | 579 | if (!opt) { |
579 | opt = np->opt; | 580 | opt = txopt_get(np); |
581 | opt_to_free = opt; | ||
582 | } | ||
580 | if (flowlabel) | 583 | if (flowlabel) |
581 | opt = fl6_merge_options(&opt_space, flowlabel, opt); | 584 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
582 | opt = ipv6_fixup_options(&opt_space, opt); | 585 | opt = ipv6_fixup_options(&opt_space, opt); |
@@ -631,6 +634,7 @@ done: | |||
631 | dst_release(dst); | 634 | dst_release(dst); |
632 | out: | 635 | out: |
633 | fl6_sock_release(flowlabel); | 636 | fl6_sock_release(flowlabel); |
637 | txopt_put(opt_to_free); | ||
634 | 638 | ||
635 | return err < 0 ? err : len; | 639 | return err < 0 ? err : len; |
636 | 640 | ||
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index a758eb84e8f0..ff757181b0a8 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
500 | /* send AddBA request */ | 500 | /* send AddBA request */ |
501 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, | 501 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, |
502 | tid_tx->dialog_token, start_seq_num, | 502 | tid_tx->dialog_token, start_seq_num, |
503 | local->hw.max_tx_aggregation_subframes, | 503 | IEEE80211_MAX_AMPDU_BUF, |
504 | tid_tx->timeout); | 504 | tid_tx->timeout); |
505 | } | 505 | } |
506 | 506 | ||
@@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
926 | amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; | 926 | amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; |
927 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | 927 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; |
928 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; | 928 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; |
929 | buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); | ||
929 | 930 | ||
930 | mutex_lock(&sta->ampdu_mlme.mtx); | 931 | mutex_lock(&sta->ampdu_mlme.mtx); |
931 | 932 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c2bd1b6a6922..c12f348138ac 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1169,8 +1169,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, | |||
1169 | * rc isn't initialized here yet, so ignore it | 1169 | * rc isn't initialized here yet, so ignore it |
1170 | */ | 1170 | */ |
1171 | __ieee80211_vht_handle_opmode(sdata, sta, | 1171 | __ieee80211_vht_handle_opmode(sdata, sta, |
1172 | params->opmode_notif, | 1172 | params->opmode_notif, band); |
1173 | band, false); | ||
1174 | } | 1173 | } |
1175 | 1174 | ||
1176 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1175 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
@@ -3454,8 +3453,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
3454 | goto out_unlock; | 3453 | goto out_unlock; |
3455 | } | 3454 | } |
3456 | } else { | 3455 | } else { |
3457 | /* for cookie below */ | 3456 | /* Assign a dummy non-zero cookie, it's not sent to |
3458 | ack_skb = skb; | 3457 | * userspace in this case but we rely on its value |
3458 | * internally in the need_offchan case to distinguish | ||
3459 | * mgmt-tx from remain-on-channel. | ||
3460 | */ | ||
3461 | *cookie = 0xffffffff; | ||
3459 | } | 3462 | } |
3460 | 3463 | ||
3461 | if (!need_offchan) { | 3464 | if (!need_offchan) { |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d832bd59236b..5322b4c71630 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1709,10 +1709,10 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta); | |||
1709 | void ieee80211_sta_set_rx_nss(struct sta_info *sta); | 1709 | void ieee80211_sta_set_rx_nss(struct sta_info *sta); |
1710 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 1710 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
1711 | struct sta_info *sta, u8 opmode, | 1711 | struct sta_info *sta, u8 opmode, |
1712 | enum ieee80211_band band, bool nss_only); | 1712 | enum ieee80211_band band); |
1713 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 1713 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
1714 | struct sta_info *sta, u8 opmode, | 1714 | struct sta_info *sta, u8 opmode, |
1715 | enum ieee80211_band band, bool nss_only); | 1715 | enum ieee80211_band band); |
1716 | void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, | 1716 | void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, |
1717 | struct ieee80211_sta_vht_cap *vht_cap); | 1717 | struct ieee80211_sta_vht_cap *vht_cap); |
1718 | void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, | 1718 | void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d0dc1bfaeec2..c9e325d2e120 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | |||
76 | void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, | 76 | void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, |
77 | bool update_bss) | 77 | bool update_bss) |
78 | { | 78 | { |
79 | if (__ieee80211_recalc_txpower(sdata) || update_bss) | 79 | if (__ieee80211_recalc_txpower(sdata) || |
80 | (update_bss && ieee80211_sdata_running(sdata))) | ||
80 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); | 81 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); |
81 | } | 82 | } |
82 | 83 | ||
@@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) | |||
1861 | unregister_netdevice(sdata->dev); | 1862 | unregister_netdevice(sdata->dev); |
1862 | } else { | 1863 | } else { |
1863 | cfg80211_unregister_wdev(&sdata->wdev); | 1864 | cfg80211_unregister_wdev(&sdata->wdev); |
1865 | ieee80211_teardown_sdata(sdata); | ||
1864 | kfree(sdata); | 1866 | kfree(sdata); |
1865 | } | 1867 | } |
1866 | } | 1868 | } |
@@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) | |||
1870 | if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) | 1872 | if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) |
1871 | return; | 1873 | return; |
1872 | ieee80211_do_stop(sdata, true); | 1874 | ieee80211_do_stop(sdata, true); |
1873 | ieee80211_teardown_sdata(sdata); | ||
1874 | } | 1875 | } |
1875 | 1876 | ||
1876 | void ieee80211_remove_interfaces(struct ieee80211_local *local) | 1877 | void ieee80211_remove_interfaces(struct ieee80211_local *local) |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 858f6b1cb149..175ffcf7fb06 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -541,8 +541,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, | |||
541 | NL80211_FEATURE_HT_IBSS | | 541 | NL80211_FEATURE_HT_IBSS | |
542 | NL80211_FEATURE_VIF_TXPOWER | | 542 | NL80211_FEATURE_VIF_TXPOWER | |
543 | NL80211_FEATURE_MAC_ON_CREATE | | 543 | NL80211_FEATURE_MAC_ON_CREATE | |
544 | NL80211_FEATURE_USERSPACE_MPM | | 544 | NL80211_FEATURE_USERSPACE_MPM; |
545 | NL80211_FEATURE_FULL_AP_CLIENT_STATE; | ||
546 | 545 | ||
547 | if (!ops->hw_scan) | 546 | if (!ops->hw_scan) |
548 | wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | | 547 | wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index b890e225a8f1..b3b44a5dd375 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta) | |||
779 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 779 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
780 | { | 780 | { |
781 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 781 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
782 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; | ||
783 | 782 | ||
784 | del_timer_sync(&node->mpath->timer); | 783 | del_timer_sync(&node->mpath->timer); |
785 | atomic_dec(&sdata->u.mesh.mpaths); | ||
786 | kfree(node->mpath); | 784 | kfree(node->mpath); |
787 | kfree(node); | 785 | kfree(node); |
788 | } | 786 | } |
@@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) | |||
790 | /* needs to be called with the corresponding hashwlock taken */ | 788 | /* needs to be called with the corresponding hashwlock taken */ |
791 | static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) | 789 | static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) |
792 | { | 790 | { |
793 | struct mesh_path *mpath; | 791 | struct mesh_path *mpath = node->mpath; |
794 | mpath = node->mpath; | 792 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
793 | |||
795 | spin_lock(&mpath->state_lock); | 794 | spin_lock(&mpath->state_lock); |
796 | mpath->flags |= MESH_PATH_RESOLVING; | 795 | mpath->flags |= MESH_PATH_RESOLVING; |
797 | if (mpath->is_gate) | 796 | if (mpath->is_gate) |
@@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) | |||
799 | hlist_del_rcu(&node->list); | 798 | hlist_del_rcu(&node->list); |
800 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 799 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
801 | spin_unlock(&mpath->state_lock); | 800 | spin_unlock(&mpath->state_lock); |
801 | atomic_dec(&sdata->u.mesh.mpaths); | ||
802 | atomic_dec(&tbl->entries); | 802 | atomic_dec(&tbl->entries); |
803 | } | 803 | } |
804 | 804 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b140cc6651f4..3aa04344942b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1379,21 +1379,26 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, | |||
1379 | */ | 1379 | */ |
1380 | if (has_80211h_pwr && | 1380 | if (has_80211h_pwr && |
1381 | (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { | 1381 | (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { |
1382 | new_ap_level = pwr_level_80211h; | ||
1383 | |||
1384 | if (sdata->ap_power_level == new_ap_level) | ||
1385 | return 0; | ||
1386 | |||
1382 | sdata_dbg(sdata, | 1387 | sdata_dbg(sdata, |
1383 | "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", | 1388 | "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", |
1384 | pwr_level_80211h, chan_pwr, pwr_reduction_80211h, | 1389 | pwr_level_80211h, chan_pwr, pwr_reduction_80211h, |
1385 | sdata->u.mgd.bssid); | 1390 | sdata->u.mgd.bssid); |
1386 | new_ap_level = pwr_level_80211h; | ||
1387 | } else { /* has_cisco_pwr is always true here. */ | 1391 | } else { /* has_cisco_pwr is always true here. */ |
1392 | new_ap_level = pwr_level_cisco; | ||
1393 | |||
1394 | if (sdata->ap_power_level == new_ap_level) | ||
1395 | return 0; | ||
1396 | |||
1388 | sdata_dbg(sdata, | 1397 | sdata_dbg(sdata, |
1389 | "Limiting TX power to %d dBm as advertised by %pM\n", | 1398 | "Limiting TX power to %d dBm as advertised by %pM\n", |
1390 | pwr_level_cisco, sdata->u.mgd.bssid); | 1399 | pwr_level_cisco, sdata->u.mgd.bssid); |
1391 | new_ap_level = pwr_level_cisco; | ||
1392 | } | 1400 | } |
1393 | 1401 | ||
1394 | if (sdata->ap_power_level == new_ap_level) | ||
1395 | return 0; | ||
1396 | |||
1397 | sdata->ap_power_level = new_ap_level; | 1402 | sdata->ap_power_level = new_ap_level; |
1398 | if (__ieee80211_recalc_txpower(sdata)) | 1403 | if (__ieee80211_recalc_txpower(sdata)) |
1399 | return BSS_CHANGED_TXPOWER; | 1404 | return BSS_CHANGED_TXPOWER; |
@@ -3575,7 +3580,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
3575 | 3580 | ||
3576 | if (sta && elems.opmode_notif) | 3581 | if (sta && elems.opmode_notif) |
3577 | ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, | 3582 | ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, |
3578 | rx_status->band, true); | 3583 | rx_status->band); |
3579 | mutex_unlock(&local->sta_mtx); | 3584 | mutex_unlock(&local->sta_mtx); |
3580 | 3585 | ||
3581 | changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, | 3586 | changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8bae5de0dc44..82af407fea7a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2736,8 +2736,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2736 | opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; | 2736 | opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; |
2737 | 2737 | ||
2738 | ieee80211_vht_handle_opmode(rx->sdata, rx->sta, | 2738 | ieee80211_vht_handle_opmode(rx->sdata, rx->sta, |
2739 | opmode, status->band, | 2739 | opmode, status->band); |
2740 | false); | ||
2741 | goto handled; | 2740 | goto handled; |
2742 | } | 2741 | } |
2743 | default: | 2742 | default: |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 4aeca4b0c3cb..a413e52f7691 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
597 | /* We need to ensure power level is at max for scanning. */ | 597 | /* We need to ensure power level is at max for scanning. */ |
598 | ieee80211_hw_config(local, 0); | 598 | ieee80211_hw_config(local, 0); |
599 | 599 | ||
600 | if ((req->channels[0]->flags & | 600 | if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR | |
601 | IEEE80211_CHAN_NO_IR) || | 601 | IEEE80211_CHAN_RADAR)) || |
602 | !req->n_ssids) { | 602 | !req->n_ssids) { |
603 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | 603 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; |
604 | } else { | 604 | } else { |
@@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) | |||
645 | * TODO: channel switching also consumes quite some time, | 645 | * TODO: channel switching also consumes quite some time, |
646 | * add that delay as well to get a better estimation | 646 | * add that delay as well to get a better estimation |
647 | */ | 647 | */ |
648 | if (chan->flags & IEEE80211_CHAN_NO_IR) | 648 | if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) |
649 | return IEEE80211_PASSIVE_CHANNEL_TIME; | 649 | return IEEE80211_PASSIVE_CHANNEL_TIME; |
650 | return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; | 650 | return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; |
651 | } | 651 | } |
@@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, | |||
777 | * | 777 | * |
778 | * In any case, it is not necessary for a passive scan. | 778 | * In any case, it is not necessary for a passive scan. |
779 | */ | 779 | */ |
780 | if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) { | 780 | if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || |
781 | !scan_req->n_ssids) { | ||
781 | *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | 782 | *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; |
782 | local->next_scan_state = SCAN_DECISION; | 783 | local->next_scan_state = SCAN_DECISION; |
783 | return; | 784 | return; |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 74058020b7d6..33344f5a66a8 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1641,6 +1641,29 @@ void ieee80211_stop_device(struct ieee80211_local *local) | |||
1641 | drv_stop(local); | 1641 | drv_stop(local); |
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | static void ieee80211_flush_completed_scan(struct ieee80211_local *local, | ||
1645 | bool aborted) | ||
1646 | { | ||
1647 | /* It's possible that we don't handle the scan completion in | ||
1648 | * time during suspend, so if it's still marked as completed | ||
1649 | * here, queue the work and flush it to clean things up. | ||
1650 | * Instead of calling the worker function directly here, we | ||
1651 | * really queue it to avoid potential races with other flows | ||
1652 | * scheduling the same work. | ||
1653 | */ | ||
1654 | if (test_bit(SCAN_COMPLETED, &local->scanning)) { | ||
1655 | /* If coming from reconfiguration failure, abort the scan so | ||
1656 | * we don't attempt to continue a partial HW scan - which is | ||
1657 | * possible otherwise if (e.g.) the 2.4 GHz portion was the | ||
1658 | * completed scan, and a 5 GHz portion is still pending. | ||
1659 | */ | ||
1660 | if (aborted) | ||
1661 | set_bit(SCAN_ABORTED, &local->scanning); | ||
1662 | ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); | ||
1663 | flush_delayed_work(&local->scan_work); | ||
1664 | } | ||
1665 | } | ||
1666 | |||
1644 | static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) | 1667 | static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) |
1645 | { | 1668 | { |
1646 | struct ieee80211_sub_if_data *sdata; | 1669 | struct ieee80211_sub_if_data *sdata; |
@@ -1660,6 +1683,8 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) | |||
1660 | local->suspended = false; | 1683 | local->suspended = false; |
1661 | local->in_reconfig = false; | 1684 | local->in_reconfig = false; |
1662 | 1685 | ||
1686 | ieee80211_flush_completed_scan(local, true); | ||
1687 | |||
1663 | /* scheduled scan clearly can't be running any more, but tell | 1688 | /* scheduled scan clearly can't be running any more, but tell |
1664 | * cfg80211 and clear local state | 1689 | * cfg80211 and clear local state |
1665 | */ | 1690 | */ |
@@ -1698,6 +1723,27 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local, | |||
1698 | mutex_unlock(&local->chanctx_mtx); | 1723 | mutex_unlock(&local->chanctx_mtx); |
1699 | } | 1724 | } |
1700 | 1725 | ||
1726 | static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata) | ||
1727 | { | ||
1728 | struct ieee80211_local *local = sdata->local; | ||
1729 | struct sta_info *sta; | ||
1730 | |||
1731 | /* add STAs back */ | ||
1732 | mutex_lock(&local->sta_mtx); | ||
1733 | list_for_each_entry(sta, &local->sta_list, list) { | ||
1734 | enum ieee80211_sta_state state; | ||
1735 | |||
1736 | if (!sta->uploaded || sta->sdata != sdata) | ||
1737 | continue; | ||
1738 | |||
1739 | for (state = IEEE80211_STA_NOTEXIST; | ||
1740 | state < sta->sta_state; state++) | ||
1741 | WARN_ON(drv_sta_state(local, sta->sdata, sta, state, | ||
1742 | state + 1)); | ||
1743 | } | ||
1744 | mutex_unlock(&local->sta_mtx); | ||
1745 | } | ||
1746 | |||
1701 | int ieee80211_reconfig(struct ieee80211_local *local) | 1747 | int ieee80211_reconfig(struct ieee80211_local *local) |
1702 | { | 1748 | { |
1703 | struct ieee80211_hw *hw = &local->hw; | 1749 | struct ieee80211_hw *hw = &local->hw; |
@@ -1833,50 +1879,11 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1833 | WARN_ON(drv_add_chanctx(local, ctx)); | 1879 | WARN_ON(drv_add_chanctx(local, ctx)); |
1834 | mutex_unlock(&local->chanctx_mtx); | 1880 | mutex_unlock(&local->chanctx_mtx); |
1835 | 1881 | ||
1836 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
1837 | if (!ieee80211_sdata_running(sdata)) | ||
1838 | continue; | ||
1839 | ieee80211_assign_chanctx(local, sdata); | ||
1840 | } | ||
1841 | |||
1842 | sdata = rtnl_dereference(local->monitor_sdata); | 1882 | sdata = rtnl_dereference(local->monitor_sdata); |
1843 | if (sdata && ieee80211_sdata_running(sdata)) | 1883 | if (sdata && ieee80211_sdata_running(sdata)) |
1844 | ieee80211_assign_chanctx(local, sdata); | 1884 | ieee80211_assign_chanctx(local, sdata); |
1845 | } | 1885 | } |
1846 | 1886 | ||
1847 | /* add STAs back */ | ||
1848 | mutex_lock(&local->sta_mtx); | ||
1849 | list_for_each_entry(sta, &local->sta_list, list) { | ||
1850 | enum ieee80211_sta_state state; | ||
1851 | |||
1852 | if (!sta->uploaded) | ||
1853 | continue; | ||
1854 | |||
1855 | /* AP-mode stations will be added later */ | ||
1856 | if (sta->sdata->vif.type == NL80211_IFTYPE_AP) | ||
1857 | continue; | ||
1858 | |||
1859 | for (state = IEEE80211_STA_NOTEXIST; | ||
1860 | state < sta->sta_state; state++) | ||
1861 | WARN_ON(drv_sta_state(local, sta->sdata, sta, state, | ||
1862 | state + 1)); | ||
1863 | } | ||
1864 | mutex_unlock(&local->sta_mtx); | ||
1865 | |||
1866 | /* reconfigure tx conf */ | ||
1867 | if (hw->queues >= IEEE80211_NUM_ACS) { | ||
1868 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
1869 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || | ||
1870 | sdata->vif.type == NL80211_IFTYPE_MONITOR || | ||
1871 | !ieee80211_sdata_running(sdata)) | ||
1872 | continue; | ||
1873 | |||
1874 | for (i = 0; i < IEEE80211_NUM_ACS; i++) | ||
1875 | drv_conf_tx(local, sdata, i, | ||
1876 | &sdata->tx_conf[i]); | ||
1877 | } | ||
1878 | } | ||
1879 | |||
1880 | /* reconfigure hardware */ | 1887 | /* reconfigure hardware */ |
1881 | ieee80211_hw_config(local, ~0); | 1888 | ieee80211_hw_config(local, ~0); |
1882 | 1889 | ||
@@ -1889,6 +1896,22 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1889 | if (!ieee80211_sdata_running(sdata)) | 1896 | if (!ieee80211_sdata_running(sdata)) |
1890 | continue; | 1897 | continue; |
1891 | 1898 | ||
1899 | ieee80211_assign_chanctx(local, sdata); | ||
1900 | |||
1901 | switch (sdata->vif.type) { | ||
1902 | case NL80211_IFTYPE_AP_VLAN: | ||
1903 | case NL80211_IFTYPE_MONITOR: | ||
1904 | break; | ||
1905 | default: | ||
1906 | ieee80211_reconfig_stations(sdata); | ||
1907 | /* fall through */ | ||
1908 | case NL80211_IFTYPE_AP: /* AP stations are handled later */ | ||
1909 | for (i = 0; i < IEEE80211_NUM_ACS; i++) | ||
1910 | drv_conf_tx(local, sdata, i, | ||
1911 | &sdata->tx_conf[i]); | ||
1912 | break; | ||
1913 | } | ||
1914 | |||
1892 | /* common change flags for all interface types */ | 1915 | /* common change flags for all interface types */ |
1893 | changed = BSS_CHANGED_ERP_CTS_PROT | | 1916 | changed = BSS_CHANGED_ERP_CTS_PROT | |
1894 | BSS_CHANGED_ERP_PREAMBLE | | 1917 | BSS_CHANGED_ERP_PREAMBLE | |
@@ -2074,17 +2097,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
2074 | mb(); | 2097 | mb(); |
2075 | local->resuming = false; | 2098 | local->resuming = false; |
2076 | 2099 | ||
2077 | /* It's possible that we don't handle the scan completion in | 2100 | ieee80211_flush_completed_scan(local, false); |
2078 | * time during suspend, so if it's still marked as completed | ||
2079 | * here, queue the work and flush it to clean things up. | ||
2080 | * Instead of calling the worker function directly here, we | ||
2081 | * really queue it to avoid potential races with other flows | ||
2082 | * scheduling the same work. | ||
2083 | */ | ||
2084 | if (test_bit(SCAN_COMPLETED, &local->scanning)) { | ||
2085 | ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); | ||
2086 | flush_delayed_work(&local->scan_work); | ||
2087 | } | ||
2088 | 2101 | ||
2089 | if (local->open_count && !reconfig_due_to_wowlan) | 2102 | if (local->open_count && !reconfig_due_to_wowlan) |
2090 | drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); | 2103 | drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); |
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index ff1c798921a6..c38b2f07a919 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c | |||
@@ -378,7 +378,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta) | |||
378 | 378 | ||
379 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 379 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
380 | struct sta_info *sta, u8 opmode, | 380 | struct sta_info *sta, u8 opmode, |
381 | enum ieee80211_band band, bool nss_only) | 381 | enum ieee80211_band band) |
382 | { | 382 | { |
383 | struct ieee80211_local *local = sdata->local; | 383 | struct ieee80211_local *local = sdata->local; |
384 | struct ieee80211_supported_band *sband; | 384 | struct ieee80211_supported_band *sband; |
@@ -401,9 +401,6 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | |||
401 | changed |= IEEE80211_RC_NSS_CHANGED; | 401 | changed |= IEEE80211_RC_NSS_CHANGED; |
402 | } | 402 | } |
403 | 403 | ||
404 | if (nss_only) | ||
405 | return changed; | ||
406 | |||
407 | switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { | 404 | switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { |
408 | case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: | 405 | case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: |
409 | sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; | 406 | sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; |
@@ -430,13 +427,12 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | |||
430 | 427 | ||
431 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 428 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
432 | struct sta_info *sta, u8 opmode, | 429 | struct sta_info *sta, u8 opmode, |
433 | enum ieee80211_band band, bool nss_only) | 430 | enum ieee80211_band band) |
434 | { | 431 | { |
435 | struct ieee80211_local *local = sdata->local; | 432 | struct ieee80211_local *local = sdata->local; |
436 | struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; | 433 | struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; |
437 | 434 | ||
438 | u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, | 435 | u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); |
439 | band, nss_only); | ||
440 | 436 | ||
441 | if (changed > 0) | 437 | if (changed > 0) |
442 | rate_control_rate_update(local, sband, sta, changed); | 438 | rate_control_rate_update(local, sband, sta, changed); |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index c70d750148b6..c32fc411a911 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -27,6 +27,8 @@ | |||
27 | */ | 27 | */ |
28 | #define MAX_MP_SELECT_LABELS 4 | 28 | #define MAX_MP_SELECT_LABELS 4 |
29 | 29 | ||
30 | #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1) | ||
31 | |||
30 | static int zero = 0; | 32 | static int zero = 0; |
31 | static int label_limit = (1 << 20) - 1; | 33 | static int label_limit = (1 << 20) - 1; |
32 | 34 | ||
@@ -317,7 +319,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, | |||
317 | } | 319 | } |
318 | } | 320 | } |
319 | 321 | ||
320 | err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb); | 322 | /* If via wasn't specified then send out using device address */ |
323 | if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC) | ||
324 | err = neigh_xmit(NEIGH_LINK_TABLE, out_dev, | ||
325 | out_dev->dev_addr, skb); | ||
326 | else | ||
327 | err = neigh_xmit(nh->nh_via_table, out_dev, | ||
328 | mpls_nh_via(rt, nh), skb); | ||
321 | if (err) | 329 | if (err) |
322 | net_dbg_ratelimited("%s: packet transmission failed: %d\n", | 330 | net_dbg_ratelimited("%s: packet transmission failed: %d\n", |
323 | __func__, err); | 331 | __func__, err); |
@@ -534,6 +542,10 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt, | |||
534 | if (!mpls_dev_get(dev)) | 542 | if (!mpls_dev_get(dev)) |
535 | goto errout; | 543 | goto errout; |
536 | 544 | ||
545 | if ((nh->nh_via_table == NEIGH_LINK_TABLE) && | ||
546 | (dev->addr_len != nh->nh_via_alen)) | ||
547 | goto errout; | ||
548 | |||
537 | RCU_INIT_POINTER(nh->nh_dev, dev); | 549 | RCU_INIT_POINTER(nh->nh_dev, dev); |
538 | 550 | ||
539 | return 0; | 551 | return 0; |
@@ -592,10 +604,14 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt, | |||
592 | goto errout; | 604 | goto errout; |
593 | } | 605 | } |
594 | 606 | ||
595 | err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, | 607 | if (via) { |
596 | __mpls_nh_via(rt, nh)); | 608 | err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, |
597 | if (err) | 609 | __mpls_nh_via(rt, nh)); |
598 | goto errout; | 610 | if (err) |
611 | goto errout; | ||
612 | } else { | ||
613 | nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC; | ||
614 | } | ||
599 | 615 | ||
600 | err = mpls_nh_assign_dev(net, rt, nh, oif); | 616 | err = mpls_nh_assign_dev(net, rt, nh, oif); |
601 | if (err) | 617 | if (err) |
@@ -677,9 +693,6 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg, | |||
677 | nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); | 693 | nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); |
678 | } | 694 | } |
679 | 695 | ||
680 | if (!nla_via) | ||
681 | goto errout; | ||
682 | |||
683 | err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, | 696 | err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, |
684 | rtnh->rtnh_ifindex, nla_via, | 697 | rtnh->rtnh_ifindex, nla_via, |
685 | nla_newdst); | 698 | nla_newdst); |
@@ -1118,6 +1131,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1118 | 1131 | ||
1119 | cfg->rc_label = LABEL_NOT_SPECIFIED; | 1132 | cfg->rc_label = LABEL_NOT_SPECIFIED; |
1120 | cfg->rc_protocol = rtm->rtm_protocol; | 1133 | cfg->rc_protocol = rtm->rtm_protocol; |
1134 | cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC; | ||
1121 | cfg->rc_nlflags = nlh->nlmsg_flags; | 1135 | cfg->rc_nlflags = nlh->nlmsg_flags; |
1122 | cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; | 1136 | cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; |
1123 | cfg->rc_nlinfo.nlh = nlh; | 1137 | cfg->rc_nlinfo.nlh = nlh; |
@@ -1231,7 +1245,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
1231 | nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, | 1245 | nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, |
1232 | nh->nh_label)) | 1246 | nh->nh_label)) |
1233 | goto nla_put_failure; | 1247 | goto nla_put_failure; |
1234 | if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), | 1248 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && |
1249 | nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), | ||
1235 | nh->nh_via_alen)) | 1250 | nh->nh_via_alen)) |
1236 | goto nla_put_failure; | 1251 | goto nla_put_failure; |
1237 | dev = rtnl_dereference(nh->nh_dev); | 1252 | dev = rtnl_dereference(nh->nh_dev); |
@@ -1257,7 +1272,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
1257 | nh->nh_labels, | 1272 | nh->nh_labels, |
1258 | nh->nh_label)) | 1273 | nh->nh_label)) |
1259 | goto nla_put_failure; | 1274 | goto nla_put_failure; |
1260 | if (nla_put_via(skb, nh->nh_via_table, | 1275 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && |
1276 | nla_put_via(skb, nh->nh_via_table, | ||
1261 | mpls_nh_via(rt, nh), | 1277 | mpls_nh_via(rt, nh), |
1262 | nh->nh_via_alen)) | 1278 | nh->nh_via_alen)) |
1263 | goto nla_put_failure; | 1279 | goto nla_put_failure; |
@@ -1319,7 +1335,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) | |||
1319 | 1335 | ||
1320 | if (nh->nh_dev) | 1336 | if (nh->nh_dev) |
1321 | payload += nla_total_size(4); /* RTA_OIF */ | 1337 | payload += nla_total_size(4); /* RTA_OIF */ |
1322 | payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */ | 1338 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */ |
1339 | payload += nla_total_size(2 + nh->nh_via_alen); | ||
1323 | if (nh->nh_labels) /* RTA_NEWDST */ | 1340 | if (nh->nh_labels) /* RTA_NEWDST */ |
1324 | payload += nla_total_size(nh->nh_labels * 4); | 1341 | payload += nla_total_size(nh->nh_labels * 4); |
1325 | } else { | 1342 | } else { |
@@ -1328,7 +1345,9 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) | |||
1328 | 1345 | ||
1329 | for_nexthops(rt) { | 1346 | for_nexthops(rt) { |
1330 | nhsize += nla_total_size(sizeof(struct rtnexthop)); | 1347 | nhsize += nla_total_size(sizeof(struct rtnexthop)); |
1331 | nhsize += nla_total_size(2 + nh->nh_via_alen); | 1348 | /* RTA_VIA */ |
1349 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) | ||
1350 | nhsize += nla_total_size(2 + nh->nh_via_alen); | ||
1332 | if (nh->nh_labels) | 1351 | if (nh->nh_labels) |
1333 | nhsize += nla_total_size(nh->nh_labels * 4); | 1352 | nhsize += nla_total_size(nh->nh_labels * 4); |
1334 | } endfor_nexthops(rt); | 1353 | } endfor_nexthops(rt); |
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 67591aef9cae..64afd3d0b144 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c | |||
@@ -54,10 +54,10 @@ int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
54 | unsigned int ttl; | 54 | unsigned int ttl; |
55 | 55 | ||
56 | /* Obtain the ttl */ | 56 | /* Obtain the ttl */ |
57 | if (skb->protocol == htons(ETH_P_IP)) { | 57 | if (dst->ops->family == AF_INET) { |
58 | ttl = ip_hdr(skb)->ttl; | 58 | ttl = ip_hdr(skb)->ttl; |
59 | rt = (struct rtable *)dst; | 59 | rt = (struct rtable *)dst; |
60 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 60 | } else if (dst->ops->family == AF_INET6) { |
61 | ttl = ipv6_hdr(skb)->hop_limit; | 61 | ttl = ipv6_hdr(skb)->hop_limit; |
62 | rt6 = (struct rt6_info *)dst; | 62 | rt6 = (struct rt6_info *)dst; |
63 | } else { | 63 | } else { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 93cc4737018f..2cb429d34c03 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -89,6 +89,7 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload) | |||
89 | } | 89 | } |
90 | 90 | ||
91 | static void nft_ctx_init(struct nft_ctx *ctx, | 91 | static void nft_ctx_init(struct nft_ctx *ctx, |
92 | struct net *net, | ||
92 | const struct sk_buff *skb, | 93 | const struct sk_buff *skb, |
93 | const struct nlmsghdr *nlh, | 94 | const struct nlmsghdr *nlh, |
94 | struct nft_af_info *afi, | 95 | struct nft_af_info *afi, |
@@ -96,7 +97,7 @@ static void nft_ctx_init(struct nft_ctx *ctx, | |||
96 | struct nft_chain *chain, | 97 | struct nft_chain *chain, |
97 | const struct nlattr * const *nla) | 98 | const struct nlattr * const *nla) |
98 | { | 99 | { |
99 | ctx->net = sock_net(skb->sk); | 100 | ctx->net = net; |
100 | ctx->afi = afi; | 101 | ctx->afi = afi; |
101 | ctx->table = table; | 102 | ctx->table = table; |
102 | ctx->chain = chain; | 103 | ctx->chain = chain; |
@@ -672,15 +673,14 @@ err: | |||
672 | return ret; | 673 | return ret; |
673 | } | 674 | } |
674 | 675 | ||
675 | static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, | 676 | static int nf_tables_newtable(struct net *net, struct sock *nlsk, |
676 | const struct nlmsghdr *nlh, | 677 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
677 | const struct nlattr * const nla[]) | 678 | const struct nlattr * const nla[]) |
678 | { | 679 | { |
679 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 680 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
680 | const struct nlattr *name; | 681 | const struct nlattr *name; |
681 | struct nft_af_info *afi; | 682 | struct nft_af_info *afi; |
682 | struct nft_table *table; | 683 | struct nft_table *table; |
683 | struct net *net = sock_net(skb->sk); | ||
684 | int family = nfmsg->nfgen_family; | 684 | int family = nfmsg->nfgen_family; |
685 | u32 flags = 0; | 685 | u32 flags = 0; |
686 | struct nft_ctx ctx; | 686 | struct nft_ctx ctx; |
@@ -706,7 +706,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, | |||
706 | if (nlh->nlmsg_flags & NLM_F_REPLACE) | 706 | if (nlh->nlmsg_flags & NLM_F_REPLACE) |
707 | return -EOPNOTSUPP; | 707 | return -EOPNOTSUPP; |
708 | 708 | ||
709 | nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); | 709 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); |
710 | return nf_tables_updtable(&ctx); | 710 | return nf_tables_updtable(&ctx); |
711 | } | 711 | } |
712 | 712 | ||
@@ -730,7 +730,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, | |||
730 | INIT_LIST_HEAD(&table->sets); | 730 | INIT_LIST_HEAD(&table->sets); |
731 | table->flags = flags; | 731 | table->flags = flags; |
732 | 732 | ||
733 | nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); | 733 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); |
734 | err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); | 734 | err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); |
735 | if (err < 0) | 735 | if (err < 0) |
736 | goto err3; | 736 | goto err3; |
@@ -810,18 +810,17 @@ out: | |||
810 | return err; | 810 | return err; |
811 | } | 811 | } |
812 | 812 | ||
813 | static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, | 813 | static int nf_tables_deltable(struct net *net, struct sock *nlsk, |
814 | const struct nlmsghdr *nlh, | 814 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
815 | const struct nlattr * const nla[]) | 815 | const struct nlattr * const nla[]) |
816 | { | 816 | { |
817 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 817 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
818 | struct nft_af_info *afi; | 818 | struct nft_af_info *afi; |
819 | struct nft_table *table; | 819 | struct nft_table *table; |
820 | struct net *net = sock_net(skb->sk); | ||
821 | int family = nfmsg->nfgen_family; | 820 | int family = nfmsg->nfgen_family; |
822 | struct nft_ctx ctx; | 821 | struct nft_ctx ctx; |
823 | 822 | ||
824 | nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); | 823 | nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla); |
825 | if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) | 824 | if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) |
826 | return nft_flush(&ctx, family); | 825 | return nft_flush(&ctx, family); |
827 | 826 | ||
@@ -1221,8 +1220,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) | |||
1221 | } | 1220 | } |
1222 | } | 1221 | } |
1223 | 1222 | ||
1224 | static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | 1223 | static int nf_tables_newchain(struct net *net, struct sock *nlsk, |
1225 | const struct nlmsghdr *nlh, | 1224 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
1226 | const struct nlattr * const nla[]) | 1225 | const struct nlattr * const nla[]) |
1227 | { | 1226 | { |
1228 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1227 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
@@ -1232,7 +1231,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
1232 | struct nft_chain *chain; | 1231 | struct nft_chain *chain; |
1233 | struct nft_base_chain *basechain = NULL; | 1232 | struct nft_base_chain *basechain = NULL; |
1234 | struct nlattr *ha[NFTA_HOOK_MAX + 1]; | 1233 | struct nlattr *ha[NFTA_HOOK_MAX + 1]; |
1235 | struct net *net = sock_net(skb->sk); | ||
1236 | int family = nfmsg->nfgen_family; | 1234 | int family = nfmsg->nfgen_family; |
1237 | struct net_device *dev = NULL; | 1235 | struct net_device *dev = NULL; |
1238 | u8 policy = NF_ACCEPT; | 1236 | u8 policy = NF_ACCEPT; |
@@ -1313,7 +1311,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
1313 | return PTR_ERR(stats); | 1311 | return PTR_ERR(stats); |
1314 | } | 1312 | } |
1315 | 1313 | ||
1316 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 1314 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
1317 | trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, | 1315 | trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, |
1318 | sizeof(struct nft_trans_chain)); | 1316 | sizeof(struct nft_trans_chain)); |
1319 | if (trans == NULL) { | 1317 | if (trans == NULL) { |
@@ -1461,7 +1459,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
1461 | if (err < 0) | 1459 | if (err < 0) |
1462 | goto err1; | 1460 | goto err1; |
1463 | 1461 | ||
1464 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 1462 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
1465 | err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); | 1463 | err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); |
1466 | if (err < 0) | 1464 | if (err < 0) |
1467 | goto err2; | 1465 | goto err2; |
@@ -1476,15 +1474,14 @@ err1: | |||
1476 | return err; | 1474 | return err; |
1477 | } | 1475 | } |
1478 | 1476 | ||
1479 | static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, | 1477 | static int nf_tables_delchain(struct net *net, struct sock *nlsk, |
1480 | const struct nlmsghdr *nlh, | 1478 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
1481 | const struct nlattr * const nla[]) | 1479 | const struct nlattr * const nla[]) |
1482 | { | 1480 | { |
1483 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1481 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
1484 | struct nft_af_info *afi; | 1482 | struct nft_af_info *afi; |
1485 | struct nft_table *table; | 1483 | struct nft_table *table; |
1486 | struct nft_chain *chain; | 1484 | struct nft_chain *chain; |
1487 | struct net *net = sock_net(skb->sk); | ||
1488 | int family = nfmsg->nfgen_family; | 1485 | int family = nfmsg->nfgen_family; |
1489 | struct nft_ctx ctx; | 1486 | struct nft_ctx ctx; |
1490 | 1487 | ||
@@ -1506,7 +1503,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, | |||
1506 | if (chain->use > 0) | 1503 | if (chain->use > 0) |
1507 | return -EBUSY; | 1504 | return -EBUSY; |
1508 | 1505 | ||
1509 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 1506 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
1510 | 1507 | ||
1511 | return nft_delchain(&ctx); | 1508 | return nft_delchain(&ctx); |
1512 | } | 1509 | } |
@@ -2010,13 +2007,12 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | |||
2010 | 2007 | ||
2011 | static struct nft_expr_info *info; | 2008 | static struct nft_expr_info *info; |
2012 | 2009 | ||
2013 | static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | 2010 | static int nf_tables_newrule(struct net *net, struct sock *nlsk, |
2014 | const struct nlmsghdr *nlh, | 2011 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
2015 | const struct nlattr * const nla[]) | 2012 | const struct nlattr * const nla[]) |
2016 | { | 2013 | { |
2017 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2014 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
2018 | struct nft_af_info *afi; | 2015 | struct nft_af_info *afi; |
2019 | struct net *net = sock_net(skb->sk); | ||
2020 | struct nft_table *table; | 2016 | struct nft_table *table; |
2021 | struct nft_chain *chain; | 2017 | struct nft_chain *chain; |
2022 | struct nft_rule *rule, *old_rule = NULL; | 2018 | struct nft_rule *rule, *old_rule = NULL; |
@@ -2075,7 +2071,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
2075 | return PTR_ERR(old_rule); | 2071 | return PTR_ERR(old_rule); |
2076 | } | 2072 | } |
2077 | 2073 | ||
2078 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 2074 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
2079 | 2075 | ||
2080 | n = 0; | 2076 | n = 0; |
2081 | size = 0; | 2077 | size = 0; |
@@ -2176,13 +2172,12 @@ err1: | |||
2176 | return err; | 2172 | return err; |
2177 | } | 2173 | } |
2178 | 2174 | ||
2179 | static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, | 2175 | static int nf_tables_delrule(struct net *net, struct sock *nlsk, |
2180 | const struct nlmsghdr *nlh, | 2176 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
2181 | const struct nlattr * const nla[]) | 2177 | const struct nlattr * const nla[]) |
2182 | { | 2178 | { |
2183 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2179 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
2184 | struct nft_af_info *afi; | 2180 | struct nft_af_info *afi; |
2185 | struct net *net = sock_net(skb->sk); | ||
2186 | struct nft_table *table; | 2181 | struct nft_table *table; |
2187 | struct nft_chain *chain = NULL; | 2182 | struct nft_chain *chain = NULL; |
2188 | struct nft_rule *rule; | 2183 | struct nft_rule *rule; |
@@ -2205,7 +2200,7 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, | |||
2205 | return PTR_ERR(chain); | 2200 | return PTR_ERR(chain); |
2206 | } | 2201 | } |
2207 | 2202 | ||
2208 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 2203 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
2209 | 2204 | ||
2210 | if (chain) { | 2205 | if (chain) { |
2211 | if (nla[NFTA_RULE_HANDLE]) { | 2206 | if (nla[NFTA_RULE_HANDLE]) { |
@@ -2344,12 +2339,11 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = { | |||
2344 | [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, | 2339 | [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, |
2345 | }; | 2340 | }; |
2346 | 2341 | ||
2347 | static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, | 2342 | static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, |
2348 | const struct sk_buff *skb, | 2343 | const struct sk_buff *skb, |
2349 | const struct nlmsghdr *nlh, | 2344 | const struct nlmsghdr *nlh, |
2350 | const struct nlattr * const nla[]) | 2345 | const struct nlattr * const nla[]) |
2351 | { | 2346 | { |
2352 | struct net *net = sock_net(skb->sk); | ||
2353 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2347 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
2354 | struct nft_af_info *afi = NULL; | 2348 | struct nft_af_info *afi = NULL; |
2355 | struct nft_table *table = NULL; | 2349 | struct nft_table *table = NULL; |
@@ -2371,7 +2365,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, | |||
2371 | return -ENOENT; | 2365 | return -ENOENT; |
2372 | } | 2366 | } |
2373 | 2367 | ||
2374 | nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); | 2368 | nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); |
2375 | return 0; | 2369 | return 0; |
2376 | } | 2370 | } |
2377 | 2371 | ||
@@ -2623,6 +2617,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, | |||
2623 | const struct nlmsghdr *nlh, | 2617 | const struct nlmsghdr *nlh, |
2624 | const struct nlattr * const nla[]) | 2618 | const struct nlattr * const nla[]) |
2625 | { | 2619 | { |
2620 | struct net *net = sock_net(skb->sk); | ||
2626 | const struct nft_set *set; | 2621 | const struct nft_set *set; |
2627 | struct nft_ctx ctx; | 2622 | struct nft_ctx ctx; |
2628 | struct sk_buff *skb2; | 2623 | struct sk_buff *skb2; |
@@ -2630,7 +2625,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, | |||
2630 | int err; | 2625 | int err; |
2631 | 2626 | ||
2632 | /* Verify existence before starting dump */ | 2627 | /* Verify existence before starting dump */ |
2633 | err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); | 2628 | err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); |
2634 | if (err < 0) | 2629 | if (err < 0) |
2635 | return err; | 2630 | return err; |
2636 | 2631 | ||
@@ -2693,14 +2688,13 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, | |||
2693 | return 0; | 2688 | return 0; |
2694 | } | 2689 | } |
2695 | 2690 | ||
2696 | static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, | 2691 | static int nf_tables_newset(struct net *net, struct sock *nlsk, |
2697 | const struct nlmsghdr *nlh, | 2692 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
2698 | const struct nlattr * const nla[]) | 2693 | const struct nlattr * const nla[]) |
2699 | { | 2694 | { |
2700 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2695 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
2701 | const struct nft_set_ops *ops; | 2696 | const struct nft_set_ops *ops; |
2702 | struct nft_af_info *afi; | 2697 | struct nft_af_info *afi; |
2703 | struct net *net = sock_net(skb->sk); | ||
2704 | struct nft_table *table; | 2698 | struct nft_table *table; |
2705 | struct nft_set *set; | 2699 | struct nft_set *set; |
2706 | struct nft_ctx ctx; | 2700 | struct nft_ctx ctx; |
@@ -2798,7 +2792,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, | |||
2798 | if (IS_ERR(table)) | 2792 | if (IS_ERR(table)) |
2799 | return PTR_ERR(table); | 2793 | return PTR_ERR(table); |
2800 | 2794 | ||
2801 | nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); | 2795 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); |
2802 | 2796 | ||
2803 | set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); | 2797 | set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); |
2804 | if (IS_ERR(set)) { | 2798 | if (IS_ERR(set)) { |
@@ -2882,8 +2876,8 @@ static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set | |||
2882 | nft_set_destroy(set); | 2876 | nft_set_destroy(set); |
2883 | } | 2877 | } |
2884 | 2878 | ||
2885 | static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, | 2879 | static int nf_tables_delset(struct net *net, struct sock *nlsk, |
2886 | const struct nlmsghdr *nlh, | 2880 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
2887 | const struct nlattr * const nla[]) | 2881 | const struct nlattr * const nla[]) |
2888 | { | 2882 | { |
2889 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2883 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
@@ -2896,7 +2890,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, | |||
2896 | if (nla[NFTA_SET_TABLE] == NULL) | 2890 | if (nla[NFTA_SET_TABLE] == NULL) |
2897 | return -EINVAL; | 2891 | return -EINVAL; |
2898 | 2892 | ||
2899 | err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); | 2893 | err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); |
2900 | if (err < 0) | 2894 | if (err < 0) |
2901 | return err; | 2895 | return err; |
2902 | 2896 | ||
@@ -3024,7 +3018,7 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + | |||
3024 | [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, | 3018 | [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, |
3025 | }; | 3019 | }; |
3026 | 3020 | ||
3027 | static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, | 3021 | static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, |
3028 | const struct sk_buff *skb, | 3022 | const struct sk_buff *skb, |
3029 | const struct nlmsghdr *nlh, | 3023 | const struct nlmsghdr *nlh, |
3030 | const struct nlattr * const nla[], | 3024 | const struct nlattr * const nla[], |
@@ -3033,7 +3027,6 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, | |||
3033 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 3027 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
3034 | struct nft_af_info *afi; | 3028 | struct nft_af_info *afi; |
3035 | struct nft_table *table; | 3029 | struct nft_table *table; |
3036 | struct net *net = sock_net(skb->sk); | ||
3037 | 3030 | ||
3038 | afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); | 3031 | afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); |
3039 | if (IS_ERR(afi)) | 3032 | if (IS_ERR(afi)) |
@@ -3045,7 +3038,7 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, | |||
3045 | if (!trans && (table->flags & NFT_TABLE_INACTIVE)) | 3038 | if (!trans && (table->flags & NFT_TABLE_INACTIVE)) |
3046 | return -ENOENT; | 3039 | return -ENOENT; |
3047 | 3040 | ||
3048 | nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); | 3041 | nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); |
3049 | return 0; | 3042 | return 0; |
3050 | } | 3043 | } |
3051 | 3044 | ||
@@ -3135,6 +3128,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx, | |||
3135 | 3128 | ||
3136 | static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | 3129 | static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) |
3137 | { | 3130 | { |
3131 | struct net *net = sock_net(skb->sk); | ||
3138 | const struct nft_set *set; | 3132 | const struct nft_set *set; |
3139 | struct nft_set_dump_args args; | 3133 | struct nft_set_dump_args args; |
3140 | struct nft_ctx ctx; | 3134 | struct nft_ctx ctx; |
@@ -3150,8 +3144,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
3150 | if (err < 0) | 3144 | if (err < 0) |
3151 | return err; | 3145 | return err; |
3152 | 3146 | ||
3153 | err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, | 3147 | err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, |
3154 | false); | 3148 | (void *)nla, false); |
3155 | if (err < 0) | 3149 | if (err < 0) |
3156 | return err; | 3150 | return err; |
3157 | 3151 | ||
@@ -3212,11 +3206,12 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb, | |||
3212 | const struct nlmsghdr *nlh, | 3206 | const struct nlmsghdr *nlh, |
3213 | const struct nlattr * const nla[]) | 3207 | const struct nlattr * const nla[]) |
3214 | { | 3208 | { |
3209 | struct net *net = sock_net(skb->sk); | ||
3215 | const struct nft_set *set; | 3210 | const struct nft_set *set; |
3216 | struct nft_ctx ctx; | 3211 | struct nft_ctx ctx; |
3217 | int err; | 3212 | int err; |
3218 | 3213 | ||
3219 | err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); | 3214 | err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); |
3220 | if (err < 0) | 3215 | if (err < 0) |
3221 | return err; | 3216 | return err; |
3222 | 3217 | ||
@@ -3528,11 +3523,10 @@ err1: | |||
3528 | return err; | 3523 | return err; |
3529 | } | 3524 | } |
3530 | 3525 | ||
3531 | static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, | 3526 | static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, |
3532 | const struct nlmsghdr *nlh, | 3527 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
3533 | const struct nlattr * const nla[]) | 3528 | const struct nlattr * const nla[]) |
3534 | { | 3529 | { |
3535 | struct net *net = sock_net(skb->sk); | ||
3536 | const struct nlattr *attr; | 3530 | const struct nlattr *attr; |
3537 | struct nft_set *set; | 3531 | struct nft_set *set; |
3538 | struct nft_ctx ctx; | 3532 | struct nft_ctx ctx; |
@@ -3541,7 +3535,7 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, | |||
3541 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) | 3535 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) |
3542 | return -EINVAL; | 3536 | return -EINVAL; |
3543 | 3537 | ||
3544 | err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); | 3538 | err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, true); |
3545 | if (err < 0) | 3539 | if (err < 0) |
3546 | return err; | 3540 | return err; |
3547 | 3541 | ||
@@ -3623,8 +3617,8 @@ err1: | |||
3623 | return err; | 3617 | return err; |
3624 | } | 3618 | } |
3625 | 3619 | ||
3626 | static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, | 3620 | static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, |
3627 | const struct nlmsghdr *nlh, | 3621 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
3628 | const struct nlattr * const nla[]) | 3622 | const struct nlattr * const nla[]) |
3629 | { | 3623 | { |
3630 | const struct nlattr *attr; | 3624 | const struct nlattr *attr; |
@@ -3635,7 +3629,7 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, | |||
3635 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) | 3629 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) |
3636 | return -EINVAL; | 3630 | return -EINVAL; |
3637 | 3631 | ||
3638 | err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); | 3632 | err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); |
3639 | if (err < 0) | 3633 | if (err < 0) |
3640 | return err; | 3634 | return err; |
3641 | 3635 | ||
@@ -4030,7 +4024,8 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
4030 | struct nft_trans *trans, *next; | 4024 | struct nft_trans *trans, *next; |
4031 | struct nft_trans_elem *te; | 4025 | struct nft_trans_elem *te; |
4032 | 4026 | ||
4033 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 4027 | list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, |
4028 | list) { | ||
4034 | switch (trans->msg_type) { | 4029 | switch (trans->msg_type) { |
4035 | case NFT_MSG_NEWTABLE: | 4030 | case NFT_MSG_NEWTABLE: |
4036 | if (nft_trans_table_update(trans)) { | 4031 | if (nft_trans_table_update(trans)) { |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 46453ab318db..77afe913d03d 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -295,8 +295,6 @@ replay: | |||
295 | if (!skb) | 295 | if (!skb) |
296 | return netlink_ack(oskb, nlh, -ENOMEM); | 296 | return netlink_ack(oskb, nlh, -ENOMEM); |
297 | 297 | ||
298 | skb->sk = oskb->sk; | ||
299 | |||
300 | nfnl_lock(subsys_id); | 298 | nfnl_lock(subsys_id); |
301 | ss = rcu_dereference_protected(table[subsys_id].subsys, | 299 | ss = rcu_dereference_protected(table[subsys_id].subsys, |
302 | lockdep_is_held(&table[subsys_id].mutex)); | 300 | lockdep_is_held(&table[subsys_id].mutex)); |
@@ -381,7 +379,7 @@ replay: | |||
381 | goto ack; | 379 | goto ack; |
382 | 380 | ||
383 | if (nc->call_batch) { | 381 | if (nc->call_batch) { |
384 | err = nc->call_batch(net->nfnl, skb, nlh, | 382 | err = nc->call_batch(net, net->nfnl, skb, nlh, |
385 | (const struct nlattr **)cda); | 383 | (const struct nlattr **)cda); |
386 | } | 384 | } |
387 | 385 | ||
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7d81d280cb4f..861c6615253b 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -365,8 +365,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
365 | break; | 365 | break; |
366 | } | 366 | } |
367 | 367 | ||
368 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
369 | |||
368 | if (queue->flags & NFQA_CFG_F_CONNTRACK) { | 370 | if (queue->flags & NFQA_CFG_F_CONNTRACK) { |
369 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
370 | if (nfnl_ct != NULL) { | 371 | if (nfnl_ct != NULL) { |
371 | ct = nfnl_ct->get_ct(entskb, &ctinfo); | 372 | ct = nfnl_ct->get_ct(entskb, &ctinfo); |
372 | if (ct != NULL) | 373 | if (ct != NULL) |
@@ -1064,9 +1065,10 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | |||
1064 | if (entry == NULL) | 1065 | if (entry == NULL) |
1065 | return -ENOENT; | 1066 | return -ENOENT; |
1066 | 1067 | ||
1068 | /* rcu lock already held from nfnl->call_rcu. */ | ||
1069 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
1070 | |||
1067 | if (nfqa[NFQA_CT]) { | 1071 | if (nfqa[NFQA_CT]) { |
1068 | /* rcu lock already held from nfnl->call_rcu. */ | ||
1069 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
1070 | if (nfnl_ct != NULL) | 1072 | if (nfnl_ct != NULL) |
1071 | ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); | 1073 | ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); |
1072 | } | 1074 | } |
@@ -1417,6 +1419,7 @@ static int __init nfnetlink_queue_init(void) | |||
1417 | 1419 | ||
1418 | cleanup_netlink_notifier: | 1420 | cleanup_netlink_notifier: |
1419 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1421 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
1422 | unregister_pernet_subsys(&nfnl_queue_net_ops); | ||
1420 | out: | 1423 | out: |
1421 | return status; | 1424 | return status; |
1422 | } | 1425 | } |
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index b7de0da46acd..ecf0a0196f18 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c | |||
@@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, | |||
572 | if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) | 572 | if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) |
573 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 573 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
574 | else | 574 | else |
575 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 575 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
576 | 576 | ||
577 | pr_debug("mask 0x%x\n", mask); | 577 | pr_debug("mask 0x%x\n", mask); |
578 | 578 | ||
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index c2cc11168fd5..3e8892216f94 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -53,6 +53,8 @@ struct ovs_conntrack_info { | |||
53 | struct md_labels labels; | 53 | struct md_labels labels; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info); | ||
57 | |||
56 | static u16 key_to_nfproto(const struct sw_flow_key *key) | 58 | static u16 key_to_nfproto(const struct sw_flow_key *key) |
57 | { | 59 | { |
58 | switch (ntohs(key->eth.type)) { | 60 | switch (ntohs(key->eth.type)) { |
@@ -141,6 +143,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, | |||
141 | * previously sent the packet to conntrack via the ct action. | 143 | * previously sent the packet to conntrack via the ct action. |
142 | */ | 144 | */ |
143 | static void ovs_ct_update_key(const struct sk_buff *skb, | 145 | static void ovs_ct_update_key(const struct sk_buff *skb, |
146 | const struct ovs_conntrack_info *info, | ||
144 | struct sw_flow_key *key, bool post_ct) | 147 | struct sw_flow_key *key, bool post_ct) |
145 | { | 148 | { |
146 | const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; | 149 | const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; |
@@ -158,13 +161,15 @@ static void ovs_ct_update_key(const struct sk_buff *skb, | |||
158 | zone = nf_ct_zone(ct); | 161 | zone = nf_ct_zone(ct); |
159 | } else if (post_ct) { | 162 | } else if (post_ct) { |
160 | state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; | 163 | state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; |
164 | if (info) | ||
165 | zone = &info->zone; | ||
161 | } | 166 | } |
162 | __ovs_ct_update_key(key, state, zone, ct); | 167 | __ovs_ct_update_key(key, state, zone, ct); |
163 | } | 168 | } |
164 | 169 | ||
165 | void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) | 170 | void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) |
166 | { | 171 | { |
167 | ovs_ct_update_key(skb, key, false); | 172 | ovs_ct_update_key(skb, NULL, key, false); |
168 | } | 173 | } |
169 | 174 | ||
170 | int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) | 175 | int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) |
@@ -418,7 +423,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, | |||
418 | } | 423 | } |
419 | } | 424 | } |
420 | 425 | ||
421 | ovs_ct_update_key(skb, key, true); | 426 | ovs_ct_update_key(skb, info, key, true); |
422 | 427 | ||
423 | return 0; | 428 | return 0; |
424 | } | 429 | } |
@@ -708,7 +713,7 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, | |||
708 | nf_conntrack_get(&ct_info.ct->ct_general); | 713 | nf_conntrack_get(&ct_info.ct->ct_general); |
709 | return 0; | 714 | return 0; |
710 | err_free_ct: | 715 | err_free_ct: |
711 | nf_conntrack_free(ct_info.ct); | 716 | __ovs_ct_free_action(&ct_info); |
712 | return err; | 717 | return err; |
713 | } | 718 | } |
714 | 719 | ||
@@ -750,6 +755,11 @@ void ovs_ct_free_action(const struct nlattr *a) | |||
750 | { | 755 | { |
751 | struct ovs_conntrack_info *ct_info = nla_data(a); | 756 | struct ovs_conntrack_info *ct_info = nla_data(a); |
752 | 757 | ||
758 | __ovs_ct_free_action(ct_info); | ||
759 | } | ||
760 | |||
761 | static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) | ||
762 | { | ||
753 | if (ct_info->helper) | 763 | if (ct_info->helper) |
754 | module_put(ct_info->helper->me); | 764 | module_put(ct_info->helper->me); |
755 | if (ct_info->ct) | 765 | if (ct_info->ct) |
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index a7a80a6b77b0..653d073bae45 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c | |||
@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work) | |||
58 | struct hlist_node *n; | 58 | struct hlist_node *n; |
59 | 59 | ||
60 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { | 60 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { |
61 | if (vport->ops->type != OVS_VPORT_TYPE_NETDEV) | 61 | if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) |
62 | continue; | 62 | continue; |
63 | 63 | ||
64 | if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH)) | 64 | if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH)) |
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index efb736bb6855..e41cd12d9b2d 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c | |||
@@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = { | |||
117 | .destroy = ovs_netdev_tunnel_destroy, | 117 | .destroy = ovs_netdev_tunnel_destroy, |
118 | .get_options = geneve_get_options, | 118 | .get_options = geneve_get_options, |
119 | .send = dev_queue_xmit, | 119 | .send = dev_queue_xmit, |
120 | .owner = THIS_MODULE, | ||
121 | }; | 120 | }; |
122 | 121 | ||
123 | static int __init ovs_geneve_tnl_init(void) | 122 | static int __init ovs_geneve_tnl_init(void) |
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index c3257d78d3d2..7f8897f33a67 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
@@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = { | |||
89 | .create = gre_create, | 89 | .create = gre_create, |
90 | .send = dev_queue_xmit, | 90 | .send = dev_queue_xmit, |
91 | .destroy = ovs_netdev_tunnel_destroy, | 91 | .destroy = ovs_netdev_tunnel_destroy, |
92 | .owner = THIS_MODULE, | ||
93 | }; | 92 | }; |
94 | 93 | ||
95 | static int __init ovs_gre_tnl_init(void) | 94 | static int __init ovs_gre_tnl_init(void) |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index b327368a3848..6b0190b987ec 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport) | |||
180 | if (vport->dev->priv_flags & IFF_OVS_DATAPATH) | 180 | if (vport->dev->priv_flags & IFF_OVS_DATAPATH) |
181 | ovs_netdev_detach_dev(vport); | 181 | ovs_netdev_detach_dev(vport); |
182 | 182 | ||
183 | /* Early release so we can unregister the device */ | 183 | /* We can be invoked by both explicit vport deletion and |
184 | * underlying netdev deregistration; delete the link only | ||
185 | * if it's not already shutting down. | ||
186 | */ | ||
187 | if (vport->dev->reg_state == NETREG_REGISTERED) | ||
188 | rtnl_delete_link(vport->dev); | ||
184 | dev_put(vport->dev); | 189 | dev_put(vport->dev); |
185 | rtnl_delete_link(vport->dev); | ||
186 | vport->dev = NULL; | 190 | vport->dev = NULL; |
187 | rtnl_unlock(); | 191 | rtnl_unlock(); |
188 | 192 | ||
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 0ac0fd004d7e..31cbc8c5c7db 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name) | |||
71 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; | 71 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; |
72 | } | 72 | } |
73 | 73 | ||
74 | int ovs_vport_ops_register(struct vport_ops *ops) | 74 | int __ovs_vport_ops_register(struct vport_ops *ops) |
75 | { | 75 | { |
76 | int err = -EEXIST; | 76 | int err = -EEXIST; |
77 | struct vport_ops *o; | 77 | struct vport_ops *o; |
@@ -87,7 +87,7 @@ errout: | |||
87 | ovs_unlock(); | 87 | ovs_unlock(); |
88 | return err; | 88 | return err; |
89 | } | 89 | } |
90 | EXPORT_SYMBOL_GPL(ovs_vport_ops_register); | 90 | EXPORT_SYMBOL_GPL(__ovs_vport_ops_register); |
91 | 91 | ||
92 | void ovs_vport_ops_unregister(struct vport_ops *ops) | 92 | void ovs_vport_ops_unregister(struct vport_ops *ops) |
93 | { | 93 | { |
@@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options) | |||
256 | * | 256 | * |
257 | * @vport: vport to delete. | 257 | * @vport: vport to delete. |
258 | * | 258 | * |
259 | * Detaches @vport from its datapath and destroys it. It is possible to fail | 259 | * Detaches @vport from its datapath and destroys it. ovs_mutex must |
260 | * for reasons such as lack of memory. ovs_mutex must be held. | 260 | * be held. |
261 | */ | 261 | */ |
262 | void ovs_vport_del(struct vport *vport) | 262 | void ovs_vport_del(struct vport *vport) |
263 | { | 263 | { |
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index bdfd82a7c064..8ea3a96980ac 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h | |||
@@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport) | |||
196 | return vport->dev->name; | 196 | return vport->dev->name; |
197 | } | 197 | } |
198 | 198 | ||
199 | int ovs_vport_ops_register(struct vport_ops *ops); | 199 | int __ovs_vport_ops_register(struct vport_ops *ops); |
200 | #define ovs_vport_ops_register(ops) \ | ||
201 | ({ \ | ||
202 | (ops)->owner = THIS_MODULE; \ | ||
203 | __ovs_vport_ops_register(ops); \ | ||
204 | }) | ||
205 | |||
200 | void ovs_vport_ops_unregister(struct vport_ops *ops); | 206 | void ovs_vport_ops_unregister(struct vport_ops *ops); |
201 | 207 | ||
202 | static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, | 208 | static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1cf928fb573e..992396aa635c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2329,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb) | |||
2329 | static bool ll_header_truncated(const struct net_device *dev, int len) | 2329 | static bool ll_header_truncated(const struct net_device *dev, int len) |
2330 | { | 2330 | { |
2331 | /* net device doesn't like empty head */ | 2331 | /* net device doesn't like empty head */ |
2332 | if (unlikely(len <= dev->hard_header_len)) { | 2332 | if (unlikely(len < dev->hard_header_len)) { |
2333 | net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n", | 2333 | net_warn_ratelimited("%s: packet size is too short (%d < %d)\n", |
2334 | current->comm, len, dev->hard_header_len); | 2334 | current->comm, len, dev->hard_header_len); |
2335 | return true; | 2335 | return true; |
2336 | } | 2336 | } |
diff --git a/net/rds/connection.c b/net/rds/connection.c index d4564036a339..e3b118cae81d 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net, | |||
186 | } | 186 | } |
187 | } | 187 | } |
188 | 188 | ||
189 | if (trans == NULL) { | ||
190 | kmem_cache_free(rds_conn_slab, conn); | ||
191 | conn = ERR_PTR(-ENODEV); | ||
192 | goto out; | ||
193 | } | ||
194 | |||
195 | conn->c_trans = trans; | 189 | conn->c_trans = trans; |
196 | 190 | ||
197 | ret = trans->conn_alloc(conn, gfp); | 191 | ret = trans->conn_alloc(conn, gfp); |
diff --git a/net/rds/send.c b/net/rds/send.c index 827155c2ead1..c9cdb358ea88 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) | |||
1013 | release_sock(sk); | 1013 | release_sock(sk); |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* racing with another thread binding seems ok here */ | 1016 | lock_sock(sk); |
1017 | if (daddr == 0 || rs->rs_bound_addr == 0) { | 1017 | if (daddr == 0 || rs->rs_bound_addr == 0) { |
1018 | release_sock(sk); | ||
1018 | ret = -ENOTCONN; /* XXX not a great errno */ | 1019 | ret = -ENOTCONN; /* XXX not a great errno */ |
1019 | goto out; | 1020 | goto out; |
1020 | } | 1021 | } |
1022 | release_sock(sk); | ||
1021 | 1023 | ||
1022 | if (payload_len > rds_sk_sndbuf(rs)) { | 1024 | if (payload_len > rds_sk_sndbuf(rs)) { |
1023 | ret = -EMSGSIZE; | 1025 | ret = -EMSGSIZE; |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index b41e9ea2ffff..f53bf3b6558b 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -49,7 +49,6 @@ | |||
49 | struct rfkill { | 49 | struct rfkill { |
50 | spinlock_t lock; | 50 | spinlock_t lock; |
51 | 51 | ||
52 | const char *name; | ||
53 | enum rfkill_type type; | 52 | enum rfkill_type type; |
54 | 53 | ||
55 | unsigned long state; | 54 | unsigned long state; |
@@ -73,6 +72,7 @@ struct rfkill { | |||
73 | struct delayed_work poll_work; | 72 | struct delayed_work poll_work; |
74 | struct work_struct uevent_work; | 73 | struct work_struct uevent_work; |
75 | struct work_struct sync_work; | 74 | struct work_struct sync_work; |
75 | char name[]; | ||
76 | }; | 76 | }; |
77 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | 77 | #define to_rfkill(d) container_of(d, struct rfkill, dev) |
78 | 78 | ||
@@ -876,14 +876,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name, | |||
876 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) | 876 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) |
877 | return NULL; | 877 | return NULL; |
878 | 878 | ||
879 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | 879 | rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); |
880 | if (!rfkill) | 880 | if (!rfkill) |
881 | return NULL; | 881 | return NULL; |
882 | 882 | ||
883 | spin_lock_init(&rfkill->lock); | 883 | spin_lock_init(&rfkill->lock); |
884 | INIT_LIST_HEAD(&rfkill->node); | 884 | INIT_LIST_HEAD(&rfkill->node); |
885 | rfkill->type = type; | 885 | rfkill->type = type; |
886 | rfkill->name = name; | 886 | strcpy(rfkill->name, name); |
887 | rfkill->ops = ops; | 887 | rfkill->ops = ops; |
888 | rfkill->data = ops_data; | 888 | rfkill->data = ops_data; |
889 | 889 | ||
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index e0547f521f20..adc555e0323d 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c | |||
@@ -723,8 +723,10 @@ process_further: | |||
723 | 723 | ||
724 | if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || | 724 | if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || |
725 | call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && | 725 | call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && |
726 | hard > tx) | 726 | hard > tx) { |
727 | call->acks_hard = tx; | ||
727 | goto all_acked; | 728 | goto all_acked; |
729 | } | ||
728 | 730 | ||
729 | smp_rmb(); | 731 | smp_rmb(); |
730 | rxrpc_rotate_tx_window(call, hard - 1); | 732 | rxrpc_rotate_tx_window(call, hard - 1); |
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index a40d3afe93b7..14c4e12c47b0 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c | |||
@@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
531 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | 531 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
532 | 532 | ||
533 | /* this should be in poll */ | 533 | /* this should be in poll */ |
534 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 534 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
535 | 535 | ||
536 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 536 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
537 | return -EPIPE; | 537 | return -EPIPE; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index f43c8f33f09e..b5c2cf2aa6d4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name) | |||
253 | } | 253 | } |
254 | 254 | ||
255 | /* We know handle. Find qdisc among all qdisc's attached to device | 255 | /* We know handle. Find qdisc among all qdisc's attached to device |
256 | (root qdisc, all its children, children of children etc.) | 256 | * (root qdisc, all its children, children of children etc.) |
257 | * Note: caller either uses rtnl or rcu_read_lock() | ||
257 | */ | 258 | */ |
258 | 259 | ||
259 | static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | 260 | static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) |
@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | |||
264 | root->handle == handle) | 265 | root->handle == handle) |
265 | return root; | 266 | return root; |
266 | 267 | ||
267 | list_for_each_entry(q, &root->list, list) { | 268 | list_for_each_entry_rcu(q, &root->list, list) { |
268 | if (q->handle == handle) | 269 | if (q->handle == handle) |
269 | return q; | 270 | return q; |
270 | } | 271 | } |
@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q) | |||
277 | struct Qdisc *root = qdisc_dev(q)->qdisc; | 278 | struct Qdisc *root = qdisc_dev(q)->qdisc; |
278 | 279 | ||
279 | WARN_ON_ONCE(root == &noop_qdisc); | 280 | WARN_ON_ONCE(root == &noop_qdisc); |
280 | list_add_tail(&q->list, &root->list); | 281 | ASSERT_RTNL(); |
282 | list_add_tail_rcu(&q->list, &root->list); | ||
281 | } | 283 | } |
282 | } | 284 | } |
283 | EXPORT_SYMBOL(qdisc_list_add); | 285 | EXPORT_SYMBOL(qdisc_list_add); |
284 | 286 | ||
285 | void qdisc_list_del(struct Qdisc *q) | 287 | void qdisc_list_del(struct Qdisc *q) |
286 | { | 288 | { |
287 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) | 289 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { |
288 | list_del(&q->list); | 290 | ASSERT_RTNL(); |
291 | list_del_rcu(&q->list); | ||
292 | } | ||
289 | } | 293 | } |
290 | EXPORT_SYMBOL(qdisc_list_del); | 294 | EXPORT_SYMBOL(qdisc_list_del); |
291 | 295 | ||
@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
750 | if (n == 0) | 754 | if (n == 0) |
751 | return; | 755 | return; |
752 | drops = max_t(int, n, 0); | 756 | drops = max_t(int, n, 0); |
757 | rcu_read_lock(); | ||
753 | while ((parentid = sch->parent)) { | 758 | while ((parentid = sch->parent)) { |
754 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) | 759 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) |
755 | return; | 760 | break; |
756 | 761 | ||
762 | if (sch->flags & TCQ_F_NOPARENT) | ||
763 | break; | ||
764 | /* TODO: perform the search on a per txq basis */ | ||
757 | sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); | 765 | sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); |
758 | if (sch == NULL) { | 766 | if (sch == NULL) { |
759 | WARN_ON(parentid != TC_H_ROOT); | 767 | WARN_ON_ONCE(parentid != TC_H_ROOT); |
760 | return; | 768 | break; |
761 | } | 769 | } |
762 | cops = sch->ops->cl_ops; | 770 | cops = sch->ops->cl_ops; |
763 | if (cops->qlen_notify) { | 771 | if (cops->qlen_notify) { |
@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
768 | sch->q.qlen -= n; | 776 | sch->q.qlen -= n; |
769 | __qdisc_qstats_drop(sch, drops); | 777 | __qdisc_qstats_drop(sch, drops); |
770 | } | 778 | } |
779 | rcu_read_unlock(); | ||
771 | } | 780 | } |
772 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | 781 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
773 | 782 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index cb5d4ad32946..e82a1ad80aa5 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
737 | return; | 737 | return; |
738 | } | 738 | } |
739 | if (!netif_is_multiqueue(dev)) | 739 | if (!netif_is_multiqueue(dev)) |
740 | qdisc->flags |= TCQ_F_ONETXQUEUE; | 740 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
741 | dev_queue->qdisc_sleeping = qdisc; | 741 | dev_queue->qdisc_sleeping = qdisc; |
742 | } | 742 | } |
743 | 743 | ||
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index f3cbaecd283a..3e82f047caaf 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c | |||
@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) | |||
63 | if (qdisc == NULL) | 63 | if (qdisc == NULL) |
64 | goto err; | 64 | goto err; |
65 | priv->qdiscs[ntx] = qdisc; | 65 | priv->qdiscs[ntx] = qdisc; |
66 | qdisc->flags |= TCQ_F_ONETXQUEUE; | 66 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
67 | } | 67 | } |
68 | 68 | ||
69 | sch->flags |= TCQ_F_MQROOT; | 69 | sch->flags |= TCQ_F_MQROOT; |
@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | |||
156 | 156 | ||
157 | *old = dev_graft_qdisc(dev_queue, new); | 157 | *old = dev_graft_qdisc(dev_queue, new); |
158 | if (new) | 158 | if (new) |
159 | new->flags |= TCQ_F_ONETXQUEUE; | 159 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
160 | if (dev->flags & IFF_UP) | 160 | if (dev->flags & IFF_UP) |
161 | dev_activate(dev); | 161 | dev_activate(dev); |
162 | return 0; | 162 | return 0; |
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 3811a745452c..ad70ecf57ce7 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c | |||
@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) | |||
132 | goto err; | 132 | goto err; |
133 | } | 133 | } |
134 | priv->qdiscs[i] = qdisc; | 134 | priv->qdiscs[i] = qdisc; |
135 | qdisc->flags |= TCQ_F_ONETXQUEUE; | 135 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
136 | } | 136 | } |
137 | 137 | ||
138 | /* If the mqprio options indicate that hardware should own | 138 | /* If the mqprio options indicate that hardware should own |
@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | |||
209 | *old = dev_graft_qdisc(dev_queue, new); | 209 | *old = dev_graft_qdisc(dev_queue, new); |
210 | 210 | ||
211 | if (new) | 211 | if (new) |
212 | new->flags |= TCQ_F_ONETXQUEUE; | 212 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
213 | 213 | ||
214 | if (dev->flags & IFF_UP) | 214 | if (dev->flags & IFF_UP) |
215 | dev_activate(dev); | 215 | dev_activate(dev); |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e917d27328ea..ec529121f38a 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
209 | struct sock *sk = skb->sk; | 209 | struct sock *sk = skb->sk; |
210 | struct ipv6_pinfo *np = inet6_sk(sk); | 210 | struct ipv6_pinfo *np = inet6_sk(sk); |
211 | struct flowi6 *fl6 = &transport->fl.u.ip6; | 211 | struct flowi6 *fl6 = &transport->fl.u.ip6; |
212 | int res; | ||
212 | 213 | ||
213 | pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, | 214 | pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, |
214 | skb->len, &fl6->saddr, &fl6->daddr); | 215 | skb->len, &fl6->saddr, &fl6->daddr); |
@@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
220 | 221 | ||
221 | SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); | 222 | SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); |
222 | 223 | ||
223 | return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); | 224 | rcu_read_lock(); |
225 | res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); | ||
226 | rcu_read_unlock(); | ||
227 | return res; | ||
224 | } | 228 | } |
225 | 229 | ||
226 | /* Returns the dst cache entry for the given source and destination ip | 230 | /* Returns the dst cache entry for the given source and destination ip |
@@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
262 | pr_debug("src=%pI6 - ", &fl6->saddr); | 266 | pr_debug("src=%pI6 - ", &fl6->saddr); |
263 | } | 267 | } |
264 | 268 | ||
265 | final_p = fl6_update_dst(fl6, np->opt, &final); | 269 | rcu_read_lock(); |
270 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | ||
271 | rcu_read_unlock(); | ||
272 | |||
266 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 273 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
267 | if (!asoc || saddr) | 274 | if (!asoc || saddr) |
268 | goto out; | 275 | goto out; |
@@ -316,14 +323,13 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
316 | } | 323 | } |
317 | } | 324 | } |
318 | } | 325 | } |
319 | rcu_read_unlock(); | ||
320 | |||
321 | if (baddr) { | 326 | if (baddr) { |
322 | fl6->saddr = baddr->v6.sin6_addr; | 327 | fl6->saddr = baddr->v6.sin6_addr; |
323 | fl6->fl6_sport = baddr->v6.sin6_port; | 328 | fl6->fl6_sport = baddr->v6.sin6_port; |
324 | final_p = fl6_update_dst(fl6, np->opt, &final); | 329 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); |
325 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 330 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
326 | } | 331 | } |
332 | rcu_read_unlock(); | ||
327 | 333 | ||
328 | out: | 334 | out: |
329 | if (!IS_ERR_OR_NULL(dst)) { | 335 | if (!IS_ERR_OR_NULL(dst)) { |
@@ -635,6 +641,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
635 | struct sock *newsk; | 641 | struct sock *newsk; |
636 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); | 642 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); |
637 | struct sctp6_sock *newsctp6sk; | 643 | struct sctp6_sock *newsctp6sk; |
644 | struct ipv6_txoptions *opt; | ||
638 | 645 | ||
639 | newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); | 646 | newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); |
640 | if (!newsk) | 647 | if (!newsk) |
@@ -654,6 +661,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
654 | 661 | ||
655 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 662 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
656 | 663 | ||
664 | rcu_read_lock(); | ||
665 | opt = rcu_dereference(np->opt); | ||
666 | if (opt) | ||
667 | opt = ipv6_dup_options(newsk, opt); | ||
668 | RCU_INIT_POINTER(newnp->opt, opt); | ||
669 | rcu_read_unlock(); | ||
670 | |||
657 | /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() | 671 | /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() |
658 | * and getpeername(). | 672 | * and getpeername(). |
659 | */ | 673 | */ |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 7e8f0a117106..c0380cfb16ae 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -324,6 +324,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
324 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : | 324 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : |
325 | "illegal chunk"); | 325 | "illegal chunk"); |
326 | 326 | ||
327 | sctp_chunk_hold(chunk); | ||
327 | sctp_outq_tail_data(q, chunk); | 328 | sctp_outq_tail_data(q, chunk); |
328 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | 329 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) |
329 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); | 330 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); |
@@ -1251,6 +1252,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
1251 | */ | 1252 | */ |
1252 | 1253 | ||
1253 | sack_a_rwnd = ntohl(sack->a_rwnd); | 1254 | sack_a_rwnd = ntohl(sack->a_rwnd); |
1255 | asoc->peer.zero_window_announced = !sack_a_rwnd; | ||
1254 | outstanding = q->outstanding_bytes; | 1256 | outstanding = q->outstanding_bytes; |
1255 | 1257 | ||
1256 | if (outstanding < sack_a_rwnd) | 1258 | if (outstanding < sack_a_rwnd) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 763e06a55155..5d6a03fad378 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1652,7 +1652,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | |||
1652 | 1652 | ||
1653 | /* Set an expiration time for the cookie. */ | 1653 | /* Set an expiration time for the cookie. */ |
1654 | cookie->c.expiration = ktime_add(asoc->cookie_life, | 1654 | cookie->c.expiration = ktime_add(asoc->cookie_life, |
1655 | ktime_get()); | 1655 | ktime_get_real()); |
1656 | 1656 | ||
1657 | /* Copy the peer's init packet. */ | 1657 | /* Copy the peer's init packet. */ |
1658 | memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, | 1658 | memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, |
@@ -1780,7 +1780,7 @@ no_hmac: | |||
1780 | if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) | 1780 | if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) |
1781 | kt = skb_get_ktime(skb); | 1781 | kt = skb_get_ktime(skb); |
1782 | else | 1782 | else |
1783 | kt = ktime_get(); | 1783 | kt = ktime_get_real(); |
1784 | 1784 | ||
1785 | if (!asoc && ktime_before(bear_cookie->expiration, kt)) { | 1785 | if (!asoc && ktime_before(bear_cookie->expiration, kt)) { |
1786 | /* | 1786 | /* |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 6f46aa16cb76..cd34a4a34065 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -5412,7 +5412,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, | |||
5412 | SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); | 5412 | SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); |
5413 | 5413 | ||
5414 | if (asoc->overall_error_count >= asoc->max_retrans) { | 5414 | if (asoc->overall_error_count >= asoc->max_retrans) { |
5415 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { | 5415 | if (asoc->peer.zero_window_announced && |
5416 | asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { | ||
5416 | /* | 5417 | /* |
5417 | * We are here likely because the receiver had its rwnd | 5418 | * We are here likely because the receiver had its rwnd |
5418 | * closed for a while and we have not been able to | 5419 | * closed for a while and we have not been able to |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 897c01c029ca..9b6cc6de80d8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, | |||
972 | return -EFAULT; | 972 | return -EFAULT; |
973 | 973 | ||
974 | /* Alloc space for the address array in kernel memory. */ | 974 | /* Alloc space for the address array in kernel memory. */ |
975 | kaddrs = kmalloc(addrs_size, GFP_KERNEL); | 975 | kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); |
976 | if (unlikely(!kaddrs)) | 976 | if (unlikely(!kaddrs)) |
977 | return -ENOMEM; | 977 | return -ENOMEM; |
978 | 978 | ||
@@ -1952,8 +1952,6 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
1952 | 1952 | ||
1953 | /* Now send the (possibly) fragmented message. */ | 1953 | /* Now send the (possibly) fragmented message. */ |
1954 | list_for_each_entry(chunk, &datamsg->chunks, frag_list) { | 1954 | list_for_each_entry(chunk, &datamsg->chunks, frag_list) { |
1955 | sctp_chunk_hold(chunk); | ||
1956 | |||
1957 | /* Do accounting for the write space. */ | 1955 | /* Do accounting for the write space. */ |
1958 | sctp_set_owner_w(chunk); | 1956 | sctp_set_owner_w(chunk); |
1959 | 1957 | ||
@@ -1966,15 +1964,13 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
1966 | * breaks. | 1964 | * breaks. |
1967 | */ | 1965 | */ |
1968 | err = sctp_primitive_SEND(net, asoc, datamsg); | 1966 | err = sctp_primitive_SEND(net, asoc, datamsg); |
1967 | sctp_datamsg_put(datamsg); | ||
1969 | /* Did the lower layer accept the chunk? */ | 1968 | /* Did the lower layer accept the chunk? */ |
1970 | if (err) { | 1969 | if (err) |
1971 | sctp_datamsg_free(datamsg); | ||
1972 | goto out_free; | 1970 | goto out_free; |
1973 | } | ||
1974 | 1971 | ||
1975 | pr_debug("%s: we sent primitively\n", __func__); | 1972 | pr_debug("%s: we sent primitively\n", __func__); |
1976 | 1973 | ||
1977 | sctp_datamsg_put(datamsg); | ||
1978 | err = msg_len; | 1974 | err = msg_len; |
1979 | 1975 | ||
1980 | if (unlikely(wait_connect)) { | 1976 | if (unlikely(wait_connect)) { |
@@ -4928,7 +4924,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4928 | to = optval + offsetof(struct sctp_getaddrs, addrs); | 4924 | to = optval + offsetof(struct sctp_getaddrs, addrs); |
4929 | space_left = len - offsetof(struct sctp_getaddrs, addrs); | 4925 | space_left = len - offsetof(struct sctp_getaddrs, addrs); |
4930 | 4926 | ||
4931 | addrs = kmalloc(space_left, GFP_KERNEL); | 4927 | addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); |
4932 | if (!addrs) | 4928 | if (!addrs) |
4933 | return -ENOMEM; | 4929 | return -ENOMEM; |
4934 | 4930 | ||
@@ -6458,7 +6454,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
6458 | if (sctp_writeable(sk)) { | 6454 | if (sctp_writeable(sk)) { |
6459 | mask |= POLLOUT | POLLWRNORM; | 6455 | mask |= POLLOUT | POLLWRNORM; |
6460 | } else { | 6456 | } else { |
6461 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 6457 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
6462 | /* | 6458 | /* |
6463 | * Since the socket is not locked, the buffer | 6459 | * Since the socket is not locked, the buffer |
6464 | * might be made available after the writeable check and | 6460 | * might be made available after the writeable check and |
@@ -6801,26 +6797,30 @@ no_packet: | |||
6801 | static void __sctp_write_space(struct sctp_association *asoc) | 6797 | static void __sctp_write_space(struct sctp_association *asoc) |
6802 | { | 6798 | { |
6803 | struct sock *sk = asoc->base.sk; | 6799 | struct sock *sk = asoc->base.sk; |
6804 | struct socket *sock = sk->sk_socket; | ||
6805 | 6800 | ||
6806 | if ((sctp_wspace(asoc) > 0) && sock) { | 6801 | if (sctp_wspace(asoc) <= 0) |
6807 | if (waitqueue_active(&asoc->wait)) | 6802 | return; |
6808 | wake_up_interruptible(&asoc->wait); | 6803 | |
6804 | if (waitqueue_active(&asoc->wait)) | ||
6805 | wake_up_interruptible(&asoc->wait); | ||
6809 | 6806 | ||
6810 | if (sctp_writeable(sk)) { | 6807 | if (sctp_writeable(sk)) { |
6811 | wait_queue_head_t *wq = sk_sleep(sk); | 6808 | struct socket_wq *wq; |
6812 | 6809 | ||
6813 | if (wq && waitqueue_active(wq)) | 6810 | rcu_read_lock(); |
6814 | wake_up_interruptible(wq); | 6811 | wq = rcu_dereference(sk->sk_wq); |
6812 | if (wq) { | ||
6813 | if (waitqueue_active(&wq->wait)) | ||
6814 | wake_up_interruptible(&wq->wait); | ||
6815 | 6815 | ||
6816 | /* Note that we try to include the Async I/O support | 6816 | /* Note that we try to include the Async I/O support |
6817 | * here by modeling from the current TCP/UDP code. | 6817 | * here by modeling from the current TCP/UDP code. |
6818 | * We have not tested with it yet. | 6818 | * We have not tested with it yet. |
6819 | */ | 6819 | */ |
6820 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) | 6820 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) |
6821 | sock_wake_async(sock, | 6821 | sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); |
6822 | SOCK_WAKE_SPACE, POLL_OUT); | ||
6823 | } | 6822 | } |
6823 | rcu_read_unlock(); | ||
6824 | } | 6824 | } |
6825 | } | 6825 | } |
6826 | 6826 | ||
@@ -7163,6 +7163,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, | |||
7163 | newsk->sk_type = sk->sk_type; | 7163 | newsk->sk_type = sk->sk_type; |
7164 | newsk->sk_bound_dev_if = sk->sk_bound_dev_if; | 7164 | newsk->sk_bound_dev_if = sk->sk_bound_dev_if; |
7165 | newsk->sk_flags = sk->sk_flags; | 7165 | newsk->sk_flags = sk->sk_flags; |
7166 | newsk->sk_tsflags = sk->sk_tsflags; | ||
7166 | newsk->sk_no_check_tx = sk->sk_no_check_tx; | 7167 | newsk->sk_no_check_tx = sk->sk_no_check_tx; |
7167 | newsk->sk_no_check_rx = sk->sk_no_check_rx; | 7168 | newsk->sk_no_check_rx = sk->sk_no_check_rx; |
7168 | newsk->sk_reuse = sk->sk_reuse; | 7169 | newsk->sk_reuse = sk->sk_reuse; |
@@ -7195,6 +7196,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, | |||
7195 | newinet->mc_ttl = 1; | 7196 | newinet->mc_ttl = 1; |
7196 | newinet->mc_index = 0; | 7197 | newinet->mc_index = 0; |
7197 | newinet->mc_list = NULL; | 7198 | newinet->mc_list = NULL; |
7199 | |||
7200 | if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) | ||
7201 | net_enable_timestamp(); | ||
7198 | } | 7202 | } |
7199 | 7203 | ||
7200 | static inline void sctp_copy_descendant(struct sock *sk_to, | 7204 | static inline void sctp_copy_descendant(struct sock *sk_to, |
@@ -7375,6 +7379,13 @@ struct proto sctp_prot = { | |||
7375 | 7379 | ||
7376 | #if IS_ENABLED(CONFIG_IPV6) | 7380 | #if IS_ENABLED(CONFIG_IPV6) |
7377 | 7381 | ||
7382 | #include <net/transp_v6.h> | ||
7383 | static void sctp_v6_destroy_sock(struct sock *sk) | ||
7384 | { | ||
7385 | sctp_destroy_sock(sk); | ||
7386 | inet6_destroy_sock(sk); | ||
7387 | } | ||
7388 | |||
7378 | struct proto sctpv6_prot = { | 7389 | struct proto sctpv6_prot = { |
7379 | .name = "SCTPv6", | 7390 | .name = "SCTPv6", |
7380 | .owner = THIS_MODULE, | 7391 | .owner = THIS_MODULE, |
@@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = { | |||
7384 | .accept = sctp_accept, | 7395 | .accept = sctp_accept, |
7385 | .ioctl = sctp_ioctl, | 7396 | .ioctl = sctp_ioctl, |
7386 | .init = sctp_init_sock, | 7397 | .init = sctp_init_sock, |
7387 | .destroy = sctp_destroy_sock, | 7398 | .destroy = sctp_v6_destroy_sock, |
7388 | .shutdown = sctp_shutdown, | 7399 | .shutdown = sctp_shutdown, |
7389 | .setsockopt = sctp_setsockopt, | 7400 | .setsockopt = sctp_setsockopt, |
7390 | .getsockopt = sctp_getsockopt, | 7401 | .getsockopt = sctp_getsockopt, |
diff --git a/net/socket.c b/net/socket.c index dd2c247c99e3..29822d6dd91e 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on) | |||
1056 | return 0; | 1056 | return 0; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | /* This function may be called only under socket lock or callback_lock or rcu_lock */ | 1059 | /* This function may be called only under rcu_lock */ |
1060 | 1060 | ||
1061 | int sock_wake_async(struct socket *sock, int how, int band) | 1061 | int sock_wake_async(struct socket_wq *wq, int how, int band) |
1062 | { | 1062 | { |
1063 | struct socket_wq *wq; | 1063 | if (!wq || !wq->fasync_list) |
1064 | |||
1065 | if (!sock) | ||
1066 | return -1; | ||
1067 | rcu_read_lock(); | ||
1068 | wq = rcu_dereference(sock->wq); | ||
1069 | if (!wq || !wq->fasync_list) { | ||
1070 | rcu_read_unlock(); | ||
1071 | return -1; | 1064 | return -1; |
1072 | } | 1065 | |
1073 | switch (how) { | 1066 | switch (how) { |
1074 | case SOCK_WAKE_WAITD: | 1067 | case SOCK_WAKE_WAITD: |
1075 | if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) | 1068 | if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags)) |
1076 | break; | 1069 | break; |
1077 | goto call_kill; | 1070 | goto call_kill; |
1078 | case SOCK_WAKE_SPACE: | 1071 | case SOCK_WAKE_SPACE: |
1079 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) | 1072 | if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags)) |
1080 | break; | 1073 | break; |
1081 | /* fall through */ | 1074 | /* fall through */ |
1082 | case SOCK_WAKE_IO: | 1075 | case SOCK_WAKE_IO: |
@@ -1086,7 +1079,7 @@ call_kill: | |||
1086 | case SOCK_WAKE_URG: | 1079 | case SOCK_WAKE_URG: |
1087 | kill_fasync(&wq->fasync_list, SIGURG, band); | 1080 | kill_fasync(&wq->fasync_list, SIGURG, band); |
1088 | } | 1081 | } |
1089 | rcu_read_unlock(); | 1082 | |
1090 | return 0; | 1083 | return 0; |
1091 | } | 1084 | } |
1092 | EXPORT_SYMBOL(sock_wake_async); | 1085 | EXPORT_SYMBOL(sock_wake_async); |
@@ -1702,6 +1695,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, | |||
1702 | msg.msg_name = addr ? (struct sockaddr *)&address : NULL; | 1695 | msg.msg_name = addr ? (struct sockaddr *)&address : NULL; |
1703 | /* We assume all kernel code knows the size of sockaddr_storage */ | 1696 | /* We assume all kernel code knows the size of sockaddr_storage */ |
1704 | msg.msg_namelen = 0; | 1697 | msg.msg_namelen = 0; |
1698 | msg.msg_iocb = NULL; | ||
1705 | if (sock->file->f_flags & O_NONBLOCK) | 1699 | if (sock->file->f_flags & O_NONBLOCK) |
1706 | flags |= MSG_DONTWAIT; | 1700 | flags |= MSG_DONTWAIT; |
1707 | err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); | 1701 | err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f14f24ee9983..73ad57a59989 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -250,11 +250,11 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) | |||
250 | } | 250 | } |
251 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | 251 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); |
252 | 252 | ||
253 | static int rpc_wait_bit_killable(struct wait_bit_key *key) | 253 | static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) |
254 | { | 254 | { |
255 | if (fatal_signal_pending(current)) | ||
256 | return -ERESTARTSYS; | ||
257 | freezable_schedule_unsafe(); | 255 | freezable_schedule_unsafe(); |
256 | if (signal_pending_state(mode, current)) | ||
257 | return -ERESTARTSYS; | ||
258 | return 0; | 258 | return 0; |
259 | } | 259 | } |
260 | 260 | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index bc5b7b5032ca..cc9852897395 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -1364,6 +1364,19 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
1364 | memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); | 1364 | memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); |
1365 | memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); | 1365 | memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); |
1366 | 1366 | ||
1367 | /* Adjust the argument buffer length */ | ||
1368 | rqstp->rq_arg.len = req->rq_private_buf.len; | ||
1369 | if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { | ||
1370 | rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; | ||
1371 | rqstp->rq_arg.page_len = 0; | ||
1372 | } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + | ||
1373 | rqstp->rq_arg.page_len) | ||
1374 | rqstp->rq_arg.page_len = rqstp->rq_arg.len - | ||
1375 | rqstp->rq_arg.head[0].iov_len; | ||
1376 | else | ||
1377 | rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + | ||
1378 | rqstp->rq_arg.page_len; | ||
1379 | |||
1367 | /* reset result send buffer "put" position */ | 1380 | /* reset result send buffer "put" position */ |
1368 | resv->iov_len = 0; | 1381 | resv->iov_len = 0; |
1369 | 1382 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1d1a70498910..2ffaf6a79499 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, | |||
398 | if (unlikely(!sock)) | 398 | if (unlikely(!sock)) |
399 | return -ENOTSOCK; | 399 | return -ENOTSOCK; |
400 | 400 | ||
401 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | 401 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags); |
402 | if (base != 0) { | 402 | if (base != 0) { |
403 | addr = NULL; | 403 | addr = NULL; |
404 | addrlen = 0; | 404 | addrlen = 0; |
@@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task) | |||
442 | struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); | 442 | struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); |
443 | 443 | ||
444 | transport->inet->sk_write_pending--; | 444 | transport->inet->sk_write_pending--; |
445 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 445 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
446 | } | 446 | } |
447 | 447 | ||
448 | /** | 448 | /** |
@@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task) | |||
467 | 467 | ||
468 | /* Don't race with disconnect */ | 468 | /* Don't race with disconnect */ |
469 | if (xprt_connected(xprt)) { | 469 | if (xprt_connected(xprt)) { |
470 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { | 470 | if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) { |
471 | /* | 471 | /* |
472 | * Notify TCP that we're limited by the application | 472 | * Notify TCP that we're limited by the application |
473 | * window size | 473 | * window size |
@@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task) | |||
478 | xprt_wait_for_buffer_space(task, xs_nospace_callback); | 478 | xprt_wait_for_buffer_space(task, xs_nospace_callback); |
479 | } | 479 | } |
480 | } else { | 480 | } else { |
481 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 481 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
482 | ret = -ENOTCONN; | 482 | ret = -ENOTCONN; |
483 | } | 483 | } |
484 | 484 | ||
@@ -626,7 +626,7 @@ process_status: | |||
626 | case -EPERM: | 626 | case -EPERM: |
627 | /* When the server has died, an ICMP port unreachable message | 627 | /* When the server has died, an ICMP port unreachable message |
628 | * prompts ECONNREFUSED. */ | 628 | * prompts ECONNREFUSED. */ |
629 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 629 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
630 | } | 630 | } |
631 | 631 | ||
632 | return status; | 632 | return status; |
@@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
715 | case -EADDRINUSE: | 715 | case -EADDRINUSE: |
716 | case -ENOBUFS: | 716 | case -ENOBUFS: |
717 | case -EPIPE: | 717 | case -EPIPE: |
718 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 718 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
719 | } | 719 | } |
720 | 720 | ||
721 | return status; | 721 | return status; |
@@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk) | |||
1618 | 1618 | ||
1619 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | 1619 | if (unlikely(!(xprt = xprt_from_sock(sk)))) |
1620 | return; | 1620 | return; |
1621 | if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) | 1621 | if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0) |
1622 | return; | 1622 | return; |
1623 | 1623 | ||
1624 | xprt_write_space(xprt); | 1624 | xprt_write_space(xprt); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 9efbdbde2b08..91aea071ab27 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -191,6 +191,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l, | |||
191 | 191 | ||
192 | snd_l->ackers++; | 192 | snd_l->ackers++; |
193 | rcv_l->acked = snd_l->snd_nxt - 1; | 193 | rcv_l->acked = snd_l->snd_nxt - 1; |
194 | snd_l->state = LINK_ESTABLISHED; | ||
194 | tipc_link_build_bc_init_msg(uc_l, xmitq); | 195 | tipc_link_build_bc_init_msg(uc_l, xmitq); |
195 | } | 196 | } |
196 | 197 | ||
@@ -206,6 +207,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l, | |||
206 | rcv_l->state = LINK_RESET; | 207 | rcv_l->state = LINK_RESET; |
207 | if (!snd_l->ackers) { | 208 | if (!snd_l->ackers) { |
208 | tipc_link_reset(snd_l); | 209 | tipc_link_reset(snd_l); |
210 | snd_l->state = LINK_RESET; | ||
209 | __skb_queue_purge(xmitq); | 211 | __skb_queue_purge(xmitq); |
210 | } | 212 | } |
211 | } | 213 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 552dbaba9cf3..b53246fb0412 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -105,6 +105,7 @@ struct tipc_sock { | |||
105 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); | 105 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); |
106 | static void tipc_data_ready(struct sock *sk); | 106 | static void tipc_data_ready(struct sock *sk); |
107 | static void tipc_write_space(struct sock *sk); | 107 | static void tipc_write_space(struct sock *sk); |
108 | static void tipc_sock_destruct(struct sock *sk); | ||
108 | static int tipc_release(struct socket *sock); | 109 | static int tipc_release(struct socket *sock); |
109 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); | 110 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); |
110 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); | 111 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); |
@@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
381 | sk->sk_rcvbuf = sysctl_tipc_rmem[1]; | 382 | sk->sk_rcvbuf = sysctl_tipc_rmem[1]; |
382 | sk->sk_data_ready = tipc_data_ready; | 383 | sk->sk_data_ready = tipc_data_ready; |
383 | sk->sk_write_space = tipc_write_space; | 384 | sk->sk_write_space = tipc_write_space; |
385 | sk->sk_destruct = tipc_sock_destruct; | ||
384 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; | 386 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; |
385 | tsk->sent_unacked = 0; | 387 | tsk->sent_unacked = 0; |
386 | atomic_set(&tsk->dupl_rcvcnt, 0); | 388 | atomic_set(&tsk->dupl_rcvcnt, 0); |
@@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock) | |||
470 | tipc_node_remove_conn(net, dnode, tsk->portid); | 472 | tipc_node_remove_conn(net, dnode, tsk->portid); |
471 | } | 473 | } |
472 | 474 | ||
473 | /* Discard any remaining (connection-based) messages in receive queue */ | ||
474 | __skb_queue_purge(&sk->sk_receive_queue); | ||
475 | |||
476 | /* Reject any messages that accumulated in backlog queue */ | 475 | /* Reject any messages that accumulated in backlog queue */ |
477 | sock->state = SS_DISCONNECTING; | 476 | sock->state = SS_DISCONNECTING; |
478 | release_sock(sk); | 477 | release_sock(sk); |
@@ -1515,6 +1514,11 @@ static void tipc_data_ready(struct sock *sk) | |||
1515 | rcu_read_unlock(); | 1514 | rcu_read_unlock(); |
1516 | } | 1515 | } |
1517 | 1516 | ||
1517 | static void tipc_sock_destruct(struct sock *sk) | ||
1518 | { | ||
1519 | __skb_queue_purge(&sk->sk_receive_queue); | ||
1520 | } | ||
1521 | |||
1518 | /** | 1522 | /** |
1519 | * filter_connect - Handle all incoming messages for a connection-based socket | 1523 | * filter_connect - Handle all incoming messages for a connection-based socket |
1520 | * @tsk: TIPC socket | 1524 | * @tsk: TIPC socket |
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index ad2719ad4c1b..70c03271b798 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c | |||
@@ -158,8 +158,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, | |||
158 | struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; | 158 | struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; |
159 | struct rtable *rt; | 159 | struct rtable *rt; |
160 | 160 | ||
161 | if (skb_headroom(skb) < UDP_MIN_HEADROOM) | 161 | if (skb_headroom(skb) < UDP_MIN_HEADROOM) { |
162 | pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); | 162 | err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); |
163 | if (err) | ||
164 | goto tx_error; | ||
165 | } | ||
163 | 166 | ||
164 | skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); | 167 | skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); |
165 | ub = rcu_dereference_rtnl(b->media_ptr); | 168 | ub = rcu_dereference_rtnl(b->media_ptr); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 955ec152cb71..a4631477cedf 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -326,6 +326,118 @@ found: | |||
326 | return s; | 326 | return s; |
327 | } | 327 | } |
328 | 328 | ||
329 | /* Support code for asymmetrically connected dgram sockets | ||
330 | * | ||
331 | * If a datagram socket is connected to a socket not itself connected | ||
332 | * to the first socket (eg, /dev/log), clients may only enqueue more | ||
333 | * messages if the present receive queue of the server socket is not | ||
334 | * "too large". This means there's a second writeability condition | ||
335 | * poll and sendmsg need to test. The dgram recv code will do a wake | ||
336 | * up on the peer_wait wait queue of a socket upon reception of a | ||
337 | * datagram which needs to be propagated to sleeping would-be writers | ||
338 | * since these might not have sent anything so far. This can't be | ||
339 | * accomplished via poll_wait because the lifetime of the server | ||
340 | * socket might be less than that of its clients if these break their | ||
341 | * association with it or if the server socket is closed while clients | ||
342 | * are still connected to it and there's no way to inform "a polling | ||
343 | * implementation" that it should let go of a certain wait queue | ||
344 | * | ||
345 | * In order to propagate a wake up, a wait_queue_t of the client | ||
346 | * socket is enqueued on the peer_wait queue of the server socket | ||
347 | * whose wake function does a wake_up on the ordinary client socket | ||
348 | * wait queue. This connection is established whenever a write (or | ||
349 | * poll for write) hit the flow control condition and broken when the | ||
350 | * association to the server socket is dissolved or after a wake up | ||
351 | * was relayed. | ||
352 | */ | ||
353 | |||
354 | static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags, | ||
355 | void *key) | ||
356 | { | ||
357 | struct unix_sock *u; | ||
358 | wait_queue_head_t *u_sleep; | ||
359 | |||
360 | u = container_of(q, struct unix_sock, peer_wake); | ||
361 | |||
362 | __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, | ||
363 | q); | ||
364 | u->peer_wake.private = NULL; | ||
365 | |||
366 | /* relaying can only happen while the wq still exists */ | ||
367 | u_sleep = sk_sleep(&u->sk); | ||
368 | if (u_sleep) | ||
369 | wake_up_interruptible_poll(u_sleep, key); | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) | ||
375 | { | ||
376 | struct unix_sock *u, *u_other; | ||
377 | int rc; | ||
378 | |||
379 | u = unix_sk(sk); | ||
380 | u_other = unix_sk(other); | ||
381 | rc = 0; | ||
382 | spin_lock(&u_other->peer_wait.lock); | ||
383 | |||
384 | if (!u->peer_wake.private) { | ||
385 | u->peer_wake.private = other; | ||
386 | __add_wait_queue(&u_other->peer_wait, &u->peer_wake); | ||
387 | |||
388 | rc = 1; | ||
389 | } | ||
390 | |||
391 | spin_unlock(&u_other->peer_wait.lock); | ||
392 | return rc; | ||
393 | } | ||
394 | |||
395 | static void unix_dgram_peer_wake_disconnect(struct sock *sk, | ||
396 | struct sock *other) | ||
397 | { | ||
398 | struct unix_sock *u, *u_other; | ||
399 | |||
400 | u = unix_sk(sk); | ||
401 | u_other = unix_sk(other); | ||
402 | spin_lock(&u_other->peer_wait.lock); | ||
403 | |||
404 | if (u->peer_wake.private == other) { | ||
405 | __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); | ||
406 | u->peer_wake.private = NULL; | ||
407 | } | ||
408 | |||
409 | spin_unlock(&u_other->peer_wait.lock); | ||
410 | } | ||
411 | |||
412 | static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, | ||
413 | struct sock *other) | ||
414 | { | ||
415 | unix_dgram_peer_wake_disconnect(sk, other); | ||
416 | wake_up_interruptible_poll(sk_sleep(sk), | ||
417 | POLLOUT | | ||
418 | POLLWRNORM | | ||
419 | POLLWRBAND); | ||
420 | } | ||
421 | |||
422 | /* preconditions: | ||
423 | * - unix_peer(sk) == other | ||
424 | * - association is stable | ||
425 | */ | ||
426 | static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) | ||
427 | { | ||
428 | int connected; | ||
429 | |||
430 | connected = unix_dgram_peer_wake_connect(sk, other); | ||
431 | |||
432 | if (unix_recvq_full(other)) | ||
433 | return 1; | ||
434 | |||
435 | if (connected) | ||
436 | unix_dgram_peer_wake_disconnect(sk, other); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
329 | static int unix_writable(const struct sock *sk) | 441 | static int unix_writable(const struct sock *sk) |
330 | { | 442 | { |
331 | return sk->sk_state != TCP_LISTEN && | 443 | return sk->sk_state != TCP_LISTEN && |
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion) | |||
431 | skpair->sk_state_change(skpair); | 543 | skpair->sk_state_change(skpair); |
432 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); | 544 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); |
433 | } | 545 | } |
546 | |||
547 | unix_dgram_peer_wake_disconnect(sk, skpair); | ||
434 | sock_put(skpair); /* It may now die */ | 548 | sock_put(skpair); /* It may now die */ |
435 | unix_peer(sk) = NULL; | 549 | unix_peer(sk) = NULL; |
436 | } | 550 | } |
@@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) | |||
666 | INIT_LIST_HEAD(&u->link); | 780 | INIT_LIST_HEAD(&u->link); |
667 | mutex_init(&u->readlock); /* single task reading lock */ | 781 | mutex_init(&u->readlock); /* single task reading lock */ |
668 | init_waitqueue_head(&u->peer_wait); | 782 | init_waitqueue_head(&u->peer_wait); |
783 | init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); | ||
669 | unix_insert_socket(unix_sockets_unbound(sk), sk); | 784 | unix_insert_socket(unix_sockets_unbound(sk), sk); |
670 | out: | 785 | out: |
671 | if (sk == NULL) | 786 | if (sk == NULL) |
@@ -1033,6 +1148,8 @@ restart: | |||
1033 | if (unix_peer(sk)) { | 1148 | if (unix_peer(sk)) { |
1034 | struct sock *old_peer = unix_peer(sk); | 1149 | struct sock *old_peer = unix_peer(sk); |
1035 | unix_peer(sk) = other; | 1150 | unix_peer(sk) = other; |
1151 | unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); | ||
1152 | |||
1036 | unix_state_double_unlock(sk, other); | 1153 | unix_state_double_unlock(sk, other); |
1037 | 1154 | ||
1038 | if (other != old_peer) | 1155 | if (other != old_peer) |
@@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen | |||
1434 | return err; | 1551 | return err; |
1435 | } | 1552 | } |
1436 | 1553 | ||
1554 | static bool unix_passcred_enabled(const struct socket *sock, | ||
1555 | const struct sock *other) | ||
1556 | { | ||
1557 | return test_bit(SOCK_PASSCRED, &sock->flags) || | ||
1558 | !other->sk_socket || | ||
1559 | test_bit(SOCK_PASSCRED, &other->sk_socket->flags); | ||
1560 | } | ||
1561 | |||
1437 | /* | 1562 | /* |
1438 | * Some apps rely on write() giving SCM_CREDENTIALS | 1563 | * Some apps rely on write() giving SCM_CREDENTIALS |
1439 | * We include credentials if source or destination socket | 1564 | * We include credentials if source or destination socket |
@@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, | |||
1444 | { | 1569 | { |
1445 | if (UNIXCB(skb).pid) | 1570 | if (UNIXCB(skb).pid) |
1446 | return; | 1571 | return; |
1447 | if (test_bit(SOCK_PASSCRED, &sock->flags) || | 1572 | if (unix_passcred_enabled(sock, other)) { |
1448 | !other->sk_socket || | ||
1449 | test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) { | ||
1450 | UNIXCB(skb).pid = get_pid(task_tgid(current)); | 1573 | UNIXCB(skb).pid = get_pid(task_tgid(current)); |
1451 | current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); | 1574 | current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); |
1452 | } | 1575 | } |
1453 | } | 1576 | } |
1454 | 1577 | ||
1578 | static int maybe_init_creds(struct scm_cookie *scm, | ||
1579 | struct socket *socket, | ||
1580 | const struct sock *other) | ||
1581 | { | ||
1582 | int err; | ||
1583 | struct msghdr msg = { .msg_controllen = 0 }; | ||
1584 | |||
1585 | err = scm_send(socket, &msg, scm, false); | ||
1586 | if (err) | ||
1587 | return err; | ||
1588 | |||
1589 | if (unix_passcred_enabled(socket, other)) { | ||
1590 | scm->pid = get_pid(task_tgid(current)); | ||
1591 | current_uid_gid(&scm->creds.uid, &scm->creds.gid); | ||
1592 | } | ||
1593 | return err; | ||
1594 | } | ||
1595 | |||
1596 | static bool unix_skb_scm_eq(struct sk_buff *skb, | ||
1597 | struct scm_cookie *scm) | ||
1598 | { | ||
1599 | const struct unix_skb_parms *u = &UNIXCB(skb); | ||
1600 | |||
1601 | return u->pid == scm->pid && | ||
1602 | uid_eq(u->uid, scm->creds.uid) && | ||
1603 | gid_eq(u->gid, scm->creds.gid) && | ||
1604 | unix_secdata_eq(scm, skb); | ||
1605 | } | ||
1606 | |||
1455 | /* | 1607 | /* |
1456 | * Send AF_UNIX data. | 1608 | * Send AF_UNIX data. |
1457 | */ | 1609 | */ |
@@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1472 | struct scm_cookie scm; | 1624 | struct scm_cookie scm; |
1473 | int max_level; | 1625 | int max_level; |
1474 | int data_len = 0; | 1626 | int data_len = 0; |
1627 | int sk_locked; | ||
1475 | 1628 | ||
1476 | wait_for_unix_gc(); | 1629 | wait_for_unix_gc(); |
1477 | err = scm_send(sock, msg, &scm, false); | 1630 | err = scm_send(sock, msg, &scm, false); |
@@ -1550,12 +1703,14 @@ restart: | |||
1550 | goto out_free; | 1703 | goto out_free; |
1551 | } | 1704 | } |
1552 | 1705 | ||
1706 | sk_locked = 0; | ||
1553 | unix_state_lock(other); | 1707 | unix_state_lock(other); |
1708 | restart_locked: | ||
1554 | err = -EPERM; | 1709 | err = -EPERM; |
1555 | if (!unix_may_send(sk, other)) | 1710 | if (!unix_may_send(sk, other)) |
1556 | goto out_unlock; | 1711 | goto out_unlock; |
1557 | 1712 | ||
1558 | if (sock_flag(other, SOCK_DEAD)) { | 1713 | if (unlikely(sock_flag(other, SOCK_DEAD))) { |
1559 | /* | 1714 | /* |
1560 | * Check with 1003.1g - what should | 1715 | * Check with 1003.1g - what should |
1561 | * datagram error | 1716 | * datagram error |
@@ -1563,10 +1718,14 @@ restart: | |||
1563 | unix_state_unlock(other); | 1718 | unix_state_unlock(other); |
1564 | sock_put(other); | 1719 | sock_put(other); |
1565 | 1720 | ||
1721 | if (!sk_locked) | ||
1722 | unix_state_lock(sk); | ||
1723 | |||
1566 | err = 0; | 1724 | err = 0; |
1567 | unix_state_lock(sk); | ||
1568 | if (unix_peer(sk) == other) { | 1725 | if (unix_peer(sk) == other) { |
1569 | unix_peer(sk) = NULL; | 1726 | unix_peer(sk) = NULL; |
1727 | unix_dgram_peer_wake_disconnect_wakeup(sk, other); | ||
1728 | |||
1570 | unix_state_unlock(sk); | 1729 | unix_state_unlock(sk); |
1571 | 1730 | ||
1572 | unix_dgram_disconnected(sk, other); | 1731 | unix_dgram_disconnected(sk, other); |
@@ -1592,21 +1751,38 @@ restart: | |||
1592 | goto out_unlock; | 1751 | goto out_unlock; |
1593 | } | 1752 | } |
1594 | 1753 | ||
1595 | if (unix_peer(other) != sk && unix_recvq_full(other)) { | 1754 | if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { |
1596 | if (!timeo) { | 1755 | if (timeo) { |
1597 | err = -EAGAIN; | 1756 | timeo = unix_wait_for_peer(other, timeo); |
1598 | goto out_unlock; | 1757 | |
1758 | err = sock_intr_errno(timeo); | ||
1759 | if (signal_pending(current)) | ||
1760 | goto out_free; | ||
1761 | |||
1762 | goto restart; | ||
1599 | } | 1763 | } |
1600 | 1764 | ||
1601 | timeo = unix_wait_for_peer(other, timeo); | 1765 | if (!sk_locked) { |
1766 | unix_state_unlock(other); | ||
1767 | unix_state_double_lock(sk, other); | ||
1768 | } | ||
1602 | 1769 | ||
1603 | err = sock_intr_errno(timeo); | 1770 | if (unix_peer(sk) != other || |
1604 | if (signal_pending(current)) | 1771 | unix_dgram_peer_wake_me(sk, other)) { |
1605 | goto out_free; | 1772 | err = -EAGAIN; |
1773 | sk_locked = 1; | ||
1774 | goto out_unlock; | ||
1775 | } | ||
1606 | 1776 | ||
1607 | goto restart; | 1777 | if (!sk_locked) { |
1778 | sk_locked = 1; | ||
1779 | goto restart_locked; | ||
1780 | } | ||
1608 | } | 1781 | } |
1609 | 1782 | ||
1783 | if (unlikely(sk_locked)) | ||
1784 | unix_state_unlock(sk); | ||
1785 | |||
1610 | if (sock_flag(other, SOCK_RCVTSTAMP)) | 1786 | if (sock_flag(other, SOCK_RCVTSTAMP)) |
1611 | __net_timestamp(skb); | 1787 | __net_timestamp(skb); |
1612 | maybe_add_creds(skb, sock, other); | 1788 | maybe_add_creds(skb, sock, other); |
@@ -1620,6 +1796,8 @@ restart: | |||
1620 | return len; | 1796 | return len; |
1621 | 1797 | ||
1622 | out_unlock: | 1798 | out_unlock: |
1799 | if (sk_locked) | ||
1800 | unix_state_unlock(sk); | ||
1623 | unix_state_unlock(other); | 1801 | unix_state_unlock(other); |
1624 | out_free: | 1802 | out_free: |
1625 | kfree_skb(skb); | 1803 | kfree_skb(skb); |
@@ -1741,8 +1919,10 @@ out_err: | |||
1741 | static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, | 1919 | static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, |
1742 | int offset, size_t size, int flags) | 1920 | int offset, size_t size, int flags) |
1743 | { | 1921 | { |
1744 | int err = 0; | 1922 | int err; |
1745 | bool send_sigpipe = true; | 1923 | bool send_sigpipe = false; |
1924 | bool init_scm = true; | ||
1925 | struct scm_cookie scm; | ||
1746 | struct sock *other, *sk = socket->sk; | 1926 | struct sock *other, *sk = socket->sk; |
1747 | struct sk_buff *skb, *newskb = NULL, *tail = NULL; | 1927 | struct sk_buff *skb, *newskb = NULL, *tail = NULL; |
1748 | 1928 | ||
@@ -1760,7 +1940,7 @@ alloc_skb: | |||
1760 | newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, | 1940 | newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, |
1761 | &err, 0); | 1941 | &err, 0); |
1762 | if (!newskb) | 1942 | if (!newskb) |
1763 | return err; | 1943 | goto err; |
1764 | } | 1944 | } |
1765 | 1945 | ||
1766 | /* we must acquire readlock as we modify already present | 1946 | /* we must acquire readlock as we modify already present |
@@ -1769,12 +1949,12 @@ alloc_skb: | |||
1769 | err = mutex_lock_interruptible(&unix_sk(other)->readlock); | 1949 | err = mutex_lock_interruptible(&unix_sk(other)->readlock); |
1770 | if (err) { | 1950 | if (err) { |
1771 | err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; | 1951 | err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; |
1772 | send_sigpipe = false; | ||
1773 | goto err; | 1952 | goto err; |
1774 | } | 1953 | } |
1775 | 1954 | ||
1776 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 1955 | if (sk->sk_shutdown & SEND_SHUTDOWN) { |
1777 | err = -EPIPE; | 1956 | err = -EPIPE; |
1957 | send_sigpipe = true; | ||
1778 | goto err_unlock; | 1958 | goto err_unlock; |
1779 | } | 1959 | } |
1780 | 1960 | ||
@@ -1783,17 +1963,27 @@ alloc_skb: | |||
1783 | if (sock_flag(other, SOCK_DEAD) || | 1963 | if (sock_flag(other, SOCK_DEAD) || |
1784 | other->sk_shutdown & RCV_SHUTDOWN) { | 1964 | other->sk_shutdown & RCV_SHUTDOWN) { |
1785 | err = -EPIPE; | 1965 | err = -EPIPE; |
1966 | send_sigpipe = true; | ||
1786 | goto err_state_unlock; | 1967 | goto err_state_unlock; |
1787 | } | 1968 | } |
1788 | 1969 | ||
1970 | if (init_scm) { | ||
1971 | err = maybe_init_creds(&scm, socket, other); | ||
1972 | if (err) | ||
1973 | goto err_state_unlock; | ||
1974 | init_scm = false; | ||
1975 | } | ||
1976 | |||
1789 | skb = skb_peek_tail(&other->sk_receive_queue); | 1977 | skb = skb_peek_tail(&other->sk_receive_queue); |
1790 | if (tail && tail == skb) { | 1978 | if (tail && tail == skb) { |
1791 | skb = newskb; | 1979 | skb = newskb; |
1792 | } else if (!skb) { | 1980 | } else if (!skb || !unix_skb_scm_eq(skb, &scm)) { |
1793 | if (newskb) | 1981 | if (newskb) { |
1794 | skb = newskb; | 1982 | skb = newskb; |
1795 | else | 1983 | } else { |
1984 | tail = skb; | ||
1796 | goto alloc_skb; | 1985 | goto alloc_skb; |
1986 | } | ||
1797 | } else if (newskb) { | 1987 | } else if (newskb) { |
1798 | /* this is fast path, we don't necessarily need to | 1988 | /* this is fast path, we don't necessarily need to |
1799 | * call to kfree_skb even though with newskb == NULL | 1989 | * call to kfree_skb even though with newskb == NULL |
@@ -1814,6 +2004,9 @@ alloc_skb: | |||
1814 | atomic_add(size, &sk->sk_wmem_alloc); | 2004 | atomic_add(size, &sk->sk_wmem_alloc); |
1815 | 2005 | ||
1816 | if (newskb) { | 2006 | if (newskb) { |
2007 | err = unix_scm_to_skb(&scm, skb, false); | ||
2008 | if (err) | ||
2009 | goto err_state_unlock; | ||
1817 | spin_lock(&other->sk_receive_queue.lock); | 2010 | spin_lock(&other->sk_receive_queue.lock); |
1818 | __skb_queue_tail(&other->sk_receive_queue, newskb); | 2011 | __skb_queue_tail(&other->sk_receive_queue, newskb); |
1819 | spin_unlock(&other->sk_receive_queue.lock); | 2012 | spin_unlock(&other->sk_receive_queue.lock); |
@@ -1823,7 +2016,7 @@ alloc_skb: | |||
1823 | mutex_unlock(&unix_sk(other)->readlock); | 2016 | mutex_unlock(&unix_sk(other)->readlock); |
1824 | 2017 | ||
1825 | other->sk_data_ready(other); | 2018 | other->sk_data_ready(other); |
1826 | 2019 | scm_destroy(&scm); | |
1827 | return size; | 2020 | return size; |
1828 | 2021 | ||
1829 | err_state_unlock: | 2022 | err_state_unlock: |
@@ -1834,6 +2027,8 @@ err: | |||
1834 | kfree_skb(newskb); | 2027 | kfree_skb(newskb); |
1835 | if (send_sigpipe && !(flags & MSG_NOSIGNAL)) | 2028 | if (send_sigpipe && !(flags & MSG_NOSIGNAL)) |
1836 | send_sig(SIGPIPE, current, 0); | 2029 | send_sig(SIGPIPE, current, 0); |
2030 | if (!init_scm) | ||
2031 | scm_destroy(&scm); | ||
1837 | return err; | 2032 | return err; |
1838 | } | 2033 | } |
1839 | 2034 | ||
@@ -1996,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, | |||
1996 | !timeo) | 2191 | !timeo) |
1997 | break; | 2192 | break; |
1998 | 2193 | ||
1999 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2194 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2000 | unix_state_unlock(sk); | 2195 | unix_state_unlock(sk); |
2001 | timeo = freezable_schedule_timeout(timeo); | 2196 | timeo = freezable_schedule_timeout(timeo); |
2002 | unix_state_lock(sk); | 2197 | unix_state_lock(sk); |
@@ -2004,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, | |||
2004 | if (sock_flag(sk, SOCK_DEAD)) | 2199 | if (sock_flag(sk, SOCK_DEAD)) |
2005 | break; | 2200 | break; |
2006 | 2201 | ||
2007 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2202 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2008 | } | 2203 | } |
2009 | 2204 | ||
2010 | finish_wait(sk_sleep(sk), &wait); | 2205 | finish_wait(sk_sleep(sk), &wait); |
@@ -2061,14 +2256,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) | |||
2061 | /* Lock the socket to prevent queue disordering | 2256 | /* Lock the socket to prevent queue disordering |
2062 | * while sleeps in memcpy_tomsg | 2257 | * while sleeps in memcpy_tomsg |
2063 | */ | 2258 | */ |
2064 | err = mutex_lock_interruptible(&u->readlock); | 2259 | mutex_lock(&u->readlock); |
2065 | if (unlikely(err)) { | ||
2066 | /* recvmsg() in non blocking mode is supposed to return -EAGAIN | ||
2067 | * sk_rcvtimeo is not honored by mutex_lock_interruptible() | ||
2068 | */ | ||
2069 | err = noblock ? -EAGAIN : -ERESTARTSYS; | ||
2070 | goto out; | ||
2071 | } | ||
2072 | 2260 | ||
2073 | if (flags & MSG_PEEK) | 2261 | if (flags & MSG_PEEK) |
2074 | skip = sk_peek_offset(sk, flags); | 2262 | skip = sk_peek_offset(sk, flags); |
@@ -2112,12 +2300,12 @@ again: | |||
2112 | timeo = unix_stream_data_wait(sk, timeo, last, | 2300 | timeo = unix_stream_data_wait(sk, timeo, last, |
2113 | last_len); | 2301 | last_len); |
2114 | 2302 | ||
2115 | if (signal_pending(current) || | 2303 | if (signal_pending(current)) { |
2116 | mutex_lock_interruptible(&u->readlock)) { | ||
2117 | err = sock_intr_errno(timeo); | 2304 | err = sock_intr_errno(timeo); |
2118 | goto out; | 2305 | goto out; |
2119 | } | 2306 | } |
2120 | 2307 | ||
2308 | mutex_lock(&u->readlock); | ||
2121 | continue; | 2309 | continue; |
2122 | unlock: | 2310 | unlock: |
2123 | unix_state_unlock(sk); | 2311 | unix_state_unlock(sk); |
@@ -2137,10 +2325,7 @@ unlock: | |||
2137 | 2325 | ||
2138 | if (check_creds) { | 2326 | if (check_creds) { |
2139 | /* Never glue messages from different writers */ | 2327 | /* Never glue messages from different writers */ |
2140 | if ((UNIXCB(skb).pid != scm.pid) || | 2328 | if (!unix_skb_scm_eq(skb, &scm)) |
2141 | !uid_eq(UNIXCB(skb).uid, scm.creds.uid) || | ||
2142 | !gid_eq(UNIXCB(skb).gid, scm.creds.gid) || | ||
2143 | !unix_secdata_eq(&scm, skb)) | ||
2144 | break; | 2329 | break; |
2145 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { | 2330 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { |
2146 | /* Copy credentials */ | 2331 | /* Copy credentials */ |
@@ -2476,20 +2661,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, | |||
2476 | return mask; | 2661 | return mask; |
2477 | 2662 | ||
2478 | writable = unix_writable(sk); | 2663 | writable = unix_writable(sk); |
2479 | other = unix_peer_get(sk); | 2664 | if (writable) { |
2480 | if (other) { | 2665 | unix_state_lock(sk); |
2481 | if (unix_peer(other) != sk) { | 2666 | |
2482 | sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); | 2667 | other = unix_peer(sk); |
2483 | if (unix_recvq_full(other)) | 2668 | if (other && unix_peer(other) != sk && |
2484 | writable = 0; | 2669 | unix_recvq_full(other) && |
2485 | } | 2670 | unix_dgram_peer_wake_me(sk, other)) |
2486 | sock_put(other); | 2671 | writable = 0; |
2672 | |||
2673 | unix_state_unlock(sk); | ||
2487 | } | 2674 | } |
2488 | 2675 | ||
2489 | if (writable) | 2676 | if (writable) |
2490 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 2677 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
2491 | else | 2678 | else |
2492 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 2679 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
2493 | 2680 | ||
2494 | return mask; | 2681 | return mask; |
2495 | } | 2682 | } |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c71e274c810a..75b0d23ee882 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -7941,8 +7941,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | |||
7941 | if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { | 7941 | if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { |
7942 | if (!(rdev->wiphy.features & | 7942 | if (!(rdev->wiphy.features & |
7943 | NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || | 7943 | NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || |
7944 | !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) | 7944 | !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) { |
7945 | kzfree(connkeys); | ||
7945 | return -EINVAL; | 7946 | return -EINVAL; |
7947 | } | ||
7946 | connect.flags |= ASSOC_REQ_USE_RRM; | 7948 | connect.flags |= ASSOC_REQ_USE_RRM; |
7947 | } | 7949 | } |
7948 | 7950 | ||
@@ -9503,6 +9505,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
9503 | if (new_triggers.tcp && new_triggers.tcp->sock) | 9505 | if (new_triggers.tcp && new_triggers.tcp->sock) |
9504 | sock_release(new_triggers.tcp->sock); | 9506 | sock_release(new_triggers.tcp->sock); |
9505 | kfree(new_triggers.tcp); | 9507 | kfree(new_triggers.tcp); |
9508 | kfree(new_triggers.nd_config); | ||
9506 | return err; | 9509 | return err; |
9507 | } | 9510 | } |
9508 | #endif | 9511 | #endif |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2e8d6f39ed56..06d050da0d94 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -3029,6 +3029,7 @@ int set_regdom(const struct ieee80211_regdomain *rd, | |||
3029 | break; | 3029 | break; |
3030 | default: | 3030 | default: |
3031 | WARN(1, "invalid initiator %d\n", lr->initiator); | 3031 | WARN(1, "invalid initiator %d\n", lr->initiator); |
3032 | kfree(rd); | ||
3032 | return -EINVAL; | 3033 | return -EINVAL; |
3033 | } | 3034 | } |
3034 | 3035 | ||
@@ -3221,8 +3222,10 @@ int __init regulatory_init(void) | |||
3221 | /* We always try to get an update for the static regdomain */ | 3222 | /* We always try to get an update for the static regdomain */ |
3222 | err = regulatory_hint_core(cfg80211_world_regdom->alpha2); | 3223 | err = regulatory_hint_core(cfg80211_world_regdom->alpha2); |
3223 | if (err) { | 3224 | if (err) { |
3224 | if (err == -ENOMEM) | 3225 | if (err == -ENOMEM) { |
3226 | platform_device_unregister(reg_pdev); | ||
3225 | return err; | 3227 | return err; |
3228 | } | ||
3226 | /* | 3229 | /* |
3227 | * N.B. kobject_uevent_env() can fail mainly for when we're out | 3230 | * N.B. kobject_uevent_env() can fail mainly for when we're out |
3228 | * memory which is handled and propagated appropriately above | 3231 | * memory which is handled and propagated appropriately above |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 09bfcbac63bb..948fa5560de5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -303,6 +303,14 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) | |||
303 | } | 303 | } |
304 | EXPORT_SYMBOL(xfrm_policy_alloc); | 304 | EXPORT_SYMBOL(xfrm_policy_alloc); |
305 | 305 | ||
306 | static void xfrm_policy_destroy_rcu(struct rcu_head *head) | ||
307 | { | ||
308 | struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); | ||
309 | |||
310 | security_xfrm_policy_free(policy->security); | ||
311 | kfree(policy); | ||
312 | } | ||
313 | |||
306 | /* Destroy xfrm_policy: descendant resources must be released to this moment. */ | 314 | /* Destroy xfrm_policy: descendant resources must be released to this moment. */ |
307 | 315 | ||
308 | void xfrm_policy_destroy(struct xfrm_policy *policy) | 316 | void xfrm_policy_destroy(struct xfrm_policy *policy) |
@@ -312,8 +320,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
312 | if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) | 320 | if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) |
313 | BUG(); | 321 | BUG(); |
314 | 322 | ||
315 | security_xfrm_policy_free(policy->security); | 323 | call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); |
316 | kfree(policy); | ||
317 | } | 324 | } |
318 | EXPORT_SYMBOL(xfrm_policy_destroy); | 325 | EXPORT_SYMBOL(xfrm_policy_destroy); |
319 | 326 | ||
@@ -1214,8 +1221,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, | |||
1214 | struct xfrm_policy *pol; | 1221 | struct xfrm_policy *pol; |
1215 | struct net *net = sock_net(sk); | 1222 | struct net *net = sock_net(sk); |
1216 | 1223 | ||
1224 | rcu_read_lock(); | ||
1217 | read_lock_bh(&net->xfrm.xfrm_policy_lock); | 1225 | read_lock_bh(&net->xfrm.xfrm_policy_lock); |
1218 | if ((pol = sk->sk_policy[dir]) != NULL) { | 1226 | pol = rcu_dereference(sk->sk_policy[dir]); |
1227 | if (pol != NULL) { | ||
1219 | bool match = xfrm_selector_match(&pol->selector, fl, | 1228 | bool match = xfrm_selector_match(&pol->selector, fl, |
1220 | sk->sk_family); | 1229 | sk->sk_family); |
1221 | int err = 0; | 1230 | int err = 0; |
@@ -1239,6 +1248,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, | |||
1239 | } | 1248 | } |
1240 | out: | 1249 | out: |
1241 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); | 1250 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); |
1251 | rcu_read_unlock(); | ||
1242 | return pol; | 1252 | return pol; |
1243 | } | 1253 | } |
1244 | 1254 | ||
@@ -1307,13 +1317,14 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
1307 | #endif | 1317 | #endif |
1308 | 1318 | ||
1309 | write_lock_bh(&net->xfrm.xfrm_policy_lock); | 1319 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
1310 | old_pol = sk->sk_policy[dir]; | 1320 | old_pol = rcu_dereference_protected(sk->sk_policy[dir], |
1311 | sk->sk_policy[dir] = pol; | 1321 | lockdep_is_held(&net->xfrm.xfrm_policy_lock)); |
1312 | if (pol) { | 1322 | if (pol) { |
1313 | pol->curlft.add_time = get_seconds(); | 1323 | pol->curlft.add_time = get_seconds(); |
1314 | pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); | 1324 | pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); |
1315 | xfrm_sk_policy_link(pol, dir); | 1325 | xfrm_sk_policy_link(pol, dir); |
1316 | } | 1326 | } |
1327 | rcu_assign_pointer(sk->sk_policy[dir], pol); | ||
1317 | if (old_pol) { | 1328 | if (old_pol) { |
1318 | if (pol) | 1329 | if (pol) |
1319 | xfrm_policy_requeue(old_pol, pol); | 1330 | xfrm_policy_requeue(old_pol, pol); |
@@ -1361,17 +1372,26 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) | |||
1361 | return newp; | 1372 | return newp; |
1362 | } | 1373 | } |
1363 | 1374 | ||
1364 | int __xfrm_sk_clone_policy(struct sock *sk) | 1375 | int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) |
1365 | { | 1376 | { |
1366 | struct xfrm_policy *p0 = sk->sk_policy[0], | 1377 | const struct xfrm_policy *p; |
1367 | *p1 = sk->sk_policy[1]; | 1378 | struct xfrm_policy *np; |
1379 | int i, ret = 0; | ||
1368 | 1380 | ||
1369 | sk->sk_policy[0] = sk->sk_policy[1] = NULL; | 1381 | rcu_read_lock(); |
1370 | if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) | 1382 | for (i = 0; i < 2; i++) { |
1371 | return -ENOMEM; | 1383 | p = rcu_dereference(osk->sk_policy[i]); |
1372 | if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) | 1384 | if (p) { |
1373 | return -ENOMEM; | 1385 | np = clone_policy(p, i); |
1374 | return 0; | 1386 | if (unlikely(!np)) { |
1387 | ret = -ENOMEM; | ||
1388 | break; | ||
1389 | } | ||
1390 | rcu_assign_pointer(sk->sk_policy[i], np); | ||
1391 | } | ||
1392 | } | ||
1393 | rcu_read_unlock(); | ||
1394 | return ret; | ||
1375 | } | 1395 | } |
1376 | 1396 | ||
1377 | static int | 1397 | static int |
@@ -2198,6 +2218,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2198 | xdst = NULL; | 2218 | xdst = NULL; |
2199 | route = NULL; | 2219 | route = NULL; |
2200 | 2220 | ||
2221 | sk = sk_const_to_full_sk(sk); | ||
2201 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { | 2222 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { |
2202 | num_pols = 1; | 2223 | num_pols = 1; |
2203 | pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); | 2224 | pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); |
@@ -2477,6 +2498,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
2477 | } | 2498 | } |
2478 | 2499 | ||
2479 | pol = NULL; | 2500 | pol = NULL; |
2501 | sk = sk_to_full_sk(sk); | ||
2480 | if (sk && sk->sk_policy[dir]) { | 2502 | if (sk && sk->sk_policy[dir]) { |
2481 | pol = xfrm_sk_policy_lookup(sk, dir, &fl); | 2503 | pol = xfrm_sk_policy_lookup(sk, dir, &fl); |
2482 | if (IS_ERR(pol)) { | 2504 | if (IS_ERR(pol)) { |