diff options
Diffstat (limited to 'net')
68 files changed, 675 insertions, 484 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a18714469bf7..85addcd9372b 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
86 | 86 | ||
87 | grp = &vlan_info->grp; | 87 | grp = &vlan_info->grp; |
88 | 88 | ||
89 | /* Take it out of our own structures, but be sure to interlock with | ||
90 | * HW accelerating devices or SW vlan input packet processing if | ||
91 | * VLAN is not 0 (leave it there for 802.1p). | ||
92 | */ | ||
93 | if (vlan_id) | ||
94 | vlan_vid_del(real_dev, vlan_id); | ||
95 | |||
96 | grp->nr_vlan_devs--; | 89 | grp->nr_vlan_devs--; |
97 | 90 | ||
98 | if (vlan->flags & VLAN_FLAG_MVRP) | 91 | if (vlan->flags & VLAN_FLAG_MVRP) |
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
114 | vlan_gvrp_uninit_applicant(real_dev); | 107 | vlan_gvrp_uninit_applicant(real_dev); |
115 | } | 108 | } |
116 | 109 | ||
110 | /* Take it out of our own structures, but be sure to interlock with | ||
111 | * HW accelerating devices or SW vlan input packet processing if | ||
112 | * VLAN is not 0 (leave it there for 802.1p). | ||
113 | */ | ||
114 | if (vlan_id) | ||
115 | vlan_vid_del(real_dev, vlan_id); | ||
116 | |||
117 | /* Get rid of the vlan's reference to real_dev */ | 117 | /* Get rid of the vlan's reference to real_dev */ |
118 | dev_put(real_dev); | 118 | dev_put(real_dev); |
119 | } | 119 | } |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 79d87d8d4f51..fad0302bdb32 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk) | |||
359 | sco_chan_del(sk, ECONNRESET); | 359 | sco_chan_del(sk, ECONNRESET); |
360 | break; | 360 | break; |
361 | 361 | ||
362 | case BT_CONNECT2: | ||
362 | case BT_CONNECT: | 363 | case BT_CONNECT: |
363 | case BT_DISCONN: | 364 | case BT_DISCONN: |
364 | sco_chan_del(sk, ECONNRESET); | 365 | sco_chan_del(sk, ECONNRESET); |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index b0812c91c0f0..bab338e6270d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | |||
423 | return 0; | 423 | return 0; |
424 | br_warn(br, "adding interface %s with same address " | 424 | br_warn(br, "adding interface %s with same address " |
425 | "as a received packet\n", | 425 | "as a received packet\n", |
426 | source->dev->name); | 426 | source ? source->dev->name : br->dev->name); |
427 | fdb_delete(br, fdb); | 427 | fdb_delete(br, fdb); |
428 | } | 428 | } |
429 | 429 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index d540ced1f6c6..e7d68ed8aafe 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void) | |||
1545 | return; | 1545 | return; |
1546 | } | 1546 | } |
1547 | #endif | 1547 | #endif |
1548 | WARN_ON(in_interrupt()); | ||
1549 | static_key_slow_inc(&netstamp_needed); | 1548 | static_key_slow_inc(&netstamp_needed); |
1550 | } | 1549 | } |
1551 | EXPORT_SYMBOL(net_enable_timestamp); | 1550 | EXPORT_SYMBOL(net_enable_timestamp); |
@@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1625 | } | 1624 | } |
1626 | 1625 | ||
1627 | skb_orphan(skb); | 1626 | skb_orphan(skb); |
1628 | nf_reset(skb); | ||
1629 | 1627 | ||
1630 | if (unlikely(!is_skb_forwardable(dev, skb))) { | 1628 | if (unlikely(!is_skb_forwardable(dev, skb))) { |
1631 | atomic_long_inc(&dev->rx_dropped); | 1629 | atomic_long_inc(&dev->rx_dropped); |
@@ -1641,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1641 | skb->mark = 0; | 1639 | skb->mark = 0; |
1642 | secpath_reset(skb); | 1640 | secpath_reset(skb); |
1643 | nf_reset(skb); | 1641 | nf_reset(skb); |
1642 | nf_reset_trace(skb); | ||
1644 | return netif_rx(skb); | 1643 | return netif_rx(skb); |
1645 | } | 1644 | } |
1646 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1645 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
@@ -3315,6 +3314,7 @@ int netdev_rx_handler_register(struct net_device *dev, | |||
3315 | if (dev->rx_handler) | 3314 | if (dev->rx_handler) |
3316 | return -EBUSY; | 3315 | return -EBUSY; |
3317 | 3316 | ||
3317 | /* Note: rx_handler_data must be set before rx_handler */ | ||
3318 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); | 3318 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); |
3319 | rcu_assign_pointer(dev->rx_handler, rx_handler); | 3319 | rcu_assign_pointer(dev->rx_handler, rx_handler); |
3320 | 3320 | ||
@@ -3335,6 +3335,11 @@ void netdev_rx_handler_unregister(struct net_device *dev) | |||
3335 | 3335 | ||
3336 | ASSERT_RTNL(); | 3336 | ASSERT_RTNL(); |
3337 | RCU_INIT_POINTER(dev->rx_handler, NULL); | 3337 | RCU_INIT_POINTER(dev->rx_handler, NULL); |
3338 | /* a reader seeing a non NULL rx_handler in a rcu_read_lock() | ||
3339 | * section has a guarantee to see a non NULL rx_handler_data | ||
3340 | * as well. | ||
3341 | */ | ||
3342 | synchronize_net(); | ||
3338 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); | 3343 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); |
3339 | } | 3344 | } |
3340 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | 3345 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bd2eb9d3e369..abdc9e6ef33e 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, | |||
37 | ha->type = addr_type; | 37 | ha->type = addr_type; |
38 | ha->refcount = 1; | 38 | ha->refcount = 1; |
39 | ha->global_use = global; | 39 | ha->global_use = global; |
40 | ha->synced = false; | 40 | ha->synced = 0; |
41 | list_add_tail_rcu(&ha->list, &list->list); | 41 | list_add_tail_rcu(&ha->list, &list->list); |
42 | list->count++; | 42 | list->count++; |
43 | 43 | ||
@@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | |||
165 | addr_len, ha->type); | 165 | addr_len, ha->type); |
166 | if (err) | 166 | if (err) |
167 | break; | 167 | break; |
168 | ha->synced = true; | 168 | ha->synced++; |
169 | ha->refcount++; | 169 | ha->refcount++; |
170 | } else if (ha->refcount == 1) { | 170 | } else if (ha->refcount == 1) { |
171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | 171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); |
@@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |||
186 | if (ha->synced) { | 186 | if (ha->synced) { |
187 | __hw_addr_del(to_list, ha->addr, | 187 | __hw_addr_del(to_list, ha->addr, |
188 | addr_len, ha->type); | 188 | addr_len, ha->type); |
189 | ha->synced = false; | 189 | ha->synced--; |
190 | __hw_addr_del(from_list, ha->addr, | 190 | __hw_addr_del(from_list, ha->addr, |
191 | addr_len, ha->type); | 191 | addr_len, ha->type); |
192 | } | 192 | } |
diff --git a/net/core/flow.c b/net/core/flow.c index c56ea6f7f6c7..2bfd081c59f7 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) | |||
328 | struct flow_flush_info *info = data; | 328 | struct flow_flush_info *info = data; |
329 | struct tasklet_struct *tasklet; | 329 | struct tasklet_struct *tasklet; |
330 | 330 | ||
331 | tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); | 331 | tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; |
332 | tasklet->data = (unsigned long)info; | 332 | tasklet->data = (unsigned long)info; |
333 | tasklet_schedule(tasklet); | 333 | tasklet_schedule(tasklet); |
334 | } | 334 | } |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9d4c7201400d..e187bf06d673 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -140,6 +140,8 @@ ipv6: | |||
140 | flow->ports = *ports; | 140 | flow->ports = *ports; |
141 | } | 141 | } |
142 | 142 | ||
143 | flow->thoff = (u16) nhoff; | ||
144 | |||
143 | return true; | 145 | return true; |
144 | } | 146 | } |
145 | EXPORT_SYMBOL(skb_flow_dissect); | 147 | EXPORT_SYMBOL(skb_flow_dissect); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5fb8d7e47294..b65441da74ab 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) | |||
496 | } | 496 | } |
497 | if (ops->fill_info) { | 497 | if (ops->fill_info) { |
498 | data = nla_nest_start(skb, IFLA_INFO_DATA); | 498 | data = nla_nest_start(skb, IFLA_INFO_DATA); |
499 | if (data == NULL) | 499 | if (data == NULL) { |
500 | err = -EMSGSIZE; | ||
500 | goto err_cancel_link; | 501 | goto err_cancel_link; |
502 | } | ||
501 | err = ops->fill_info(skb, dev); | 503 | err = ops->fill_info(skb, dev); |
502 | if (err < 0) | 504 | if (err < 0) |
503 | goto err_cancel_data; | 505 | goto err_cancel_data; |
diff --git a/net/core/scm.c b/net/core/scm.c index 905dcc6ad1e3..2dc6cdaaae8a 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | #include <linux/pid_namespace.h> | ||
27 | #include <linux/pid.h> | 28 | #include <linux/pid.h> |
28 | #include <linux/nsproxy.h> | 29 | #include <linux/nsproxy.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
@@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds) | |||
52 | if (!uid_valid(uid) || !gid_valid(gid)) | 53 | if (!uid_valid(uid) || !gid_valid(gid)) |
53 | return -EINVAL; | 54 | return -EINVAL; |
54 | 55 | ||
55 | if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && | 56 | if ((creds->pid == task_tgid_vnr(current) || |
57 | ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && | ||
56 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || | 58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
57 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && | 59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && |
58 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || | 60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..c929d9c1c4b6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1333 | iph->frag_off |= htons(IP_MF); | 1333 | iph->frag_off |= htons(IP_MF); |
1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
1335 | } else { | 1335 | } else { |
1336 | if (!(iph->frag_off & htons(IP_DF))) | 1336 | iph->id = htons(id++); |
1337 | iph->id = htons(id++); | ||
1338 | } | 1337 | } |
1339 | iph->tot_len = htons(skb->len - skb->mac_len); | 1338 | iph->tot_len = htons(skb->len - skb->mac_len); |
1340 | iph->check = 0; | 1339 | iph->check = 0; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc829..96083b7a436b 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -802,8 +802,10 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
802 | if (nlh->nlmsg_flags & NLM_F_EXCL || | 802 | if (nlh->nlmsg_flags & NLM_F_EXCL || |
803 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) | 803 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) |
804 | return -EEXIST; | 804 | return -EEXIST; |
805 | 805 | ifa = ifa_existing; | |
806 | set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); | 806 | set_ifa_lifetime(ifa, valid_lft, prefered_lft); |
807 | rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); | ||
808 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); | ||
807 | } | 809 | } |
808 | return 0; | 810 | return 0; |
809 | } | 811 | } |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 98cbc6877019..bf6c5cf31aed 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void) | |||
1522 | } | 1522 | } |
1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) | 1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) |
1524 | if (ic_nameservers[i] != NONE) | 1524 | if (ic_nameservers[i] != NONE) |
1525 | pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); | 1525 | pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); |
1526 | pr_cont("\n"); | ||
1526 | #endif /* !SILENT */ | 1527 | #endif /* !SILENT */ |
1527 | 1528 | ||
1528 | return 0; | 1529 | return 0; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index ce2d43e1f09f..0d755c50994b 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT | |||
36 | 36 | ||
37 | If unsure, say Y. | 37 | If unsure, say Y. |
38 | 38 | ||
39 | config IP_NF_QUEUE | ||
40 | tristate "IP Userspace queueing via NETLINK (OBSOLETE)" | ||
41 | depends on NETFILTER_ADVANCED | ||
42 | help | ||
43 | Netfilter has the ability to queue packets to user space: the | ||
44 | netlink device can be used to access them using this driver. | ||
45 | |||
46 | This option enables the old IPv4-only "ip_queue" implementation | ||
47 | which has been obsoleted by the new "nfnetlink_queue" code (see | ||
48 | CONFIG_NETFILTER_NETLINK_QUEUE). | ||
49 | |||
50 | To compile it as a module, choose M here. If unsure, say N. | ||
51 | |||
52 | config IP_NF_IPTABLES | 39 | config IP_NF_IPTABLES |
53 | tristate "IP tables support (required for filtering/masq/NAT)" | 40 | tristate "IP tables support (required for filtering/masq/NAT)" |
54 | default m if NETFILTER_ADVANCED=n | 41 | default m if NETFILTER_ADVANCED=n |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0d9bdacce99f..3bd55bad230a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
2059 | if (tcp_is_reno(tp)) | 2059 | if (tcp_is_reno(tp)) |
2060 | tcp_reset_reno_sack(tp); | 2060 | tcp_reset_reno_sack(tp); |
2061 | 2061 | ||
2062 | if (!how) { | 2062 | tp->undo_marker = tp->snd_una; |
2063 | /* Push undo marker, if it was plain RTO and nothing | 2063 | if (how) { |
2064 | * was retransmitted. */ | ||
2065 | tp->undo_marker = tp->snd_una; | ||
2066 | } else { | ||
2067 | tp->sacked_out = 0; | 2064 | tp->sacked_out = 0; |
2068 | tp->fackets_out = 0; | 2065 | tp->fackets_out = 0; |
2069 | } | 2066 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 817fbb396bc8..5d0b4387cba6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1809,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1809 | goto send_now; | 1809 | goto send_now; |
1810 | } | 1810 | } |
1811 | 1811 | ||
1812 | /* Ok, it looks like it is advisable to defer. */ | 1812 | /* Ok, it looks like it is advisable to defer. |
1813 | tp->tso_deferred = 1 | (jiffies << 1); | 1813 | * Do not rearm the timer if already set to not break TCP ACK clocking. |
1814 | */ | ||
1815 | if (!tp->tso_deferred) | ||
1816 | tp->tso_deferred = 1 | (jiffies << 1); | ||
1814 | 1817 | ||
1815 | return true; | 1818 | return true; |
1816 | 1819 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..0a073a263720 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb) | |||
1762 | 1762 | ||
1763 | void udp_destroy_sock(struct sock *sk) | 1763 | void udp_destroy_sock(struct sock *sk) |
1764 | { | 1764 | { |
1765 | struct udp_sock *up = udp_sk(sk); | ||
1765 | bool slow = lock_sock_fast(sk); | 1766 | bool slow = lock_sock_fast(sk); |
1766 | udp_flush_pending_frames(sk); | 1767 | udp_flush_pending_frames(sk); |
1767 | unlock_sock_fast(sk, slow); | 1768 | unlock_sock_fast(sk, slow); |
1769 | if (static_key_false(&udp_encap_needed) && up->encap_type) { | ||
1770 | void (*encap_destroy)(struct sock *sk); | ||
1771 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
1772 | if (encap_destroy) | ||
1773 | encap_destroy(sk); | ||
1774 | } | ||
1768 | } | 1775 | } |
1769 | 1776 | ||
1770 | /* | 1777 | /* |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f2c7e615f902..a459c4f5b769 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2529,6 +2529,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
2529 | static void init_loopback(struct net_device *dev) | 2529 | static void init_loopback(struct net_device *dev) |
2530 | { | 2530 | { |
2531 | struct inet6_dev *idev; | 2531 | struct inet6_dev *idev; |
2532 | struct net_device *sp_dev; | ||
2533 | struct inet6_ifaddr *sp_ifa; | ||
2534 | struct rt6_info *sp_rt; | ||
2532 | 2535 | ||
2533 | /* ::1 */ | 2536 | /* ::1 */ |
2534 | 2537 | ||
@@ -2540,6 +2543,30 @@ static void init_loopback(struct net_device *dev) | |||
2540 | } | 2543 | } |
2541 | 2544 | ||
2542 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); | 2545 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); |
2546 | |||
2547 | /* Add routes to other interface's IPv6 addresses */ | ||
2548 | for_each_netdev(dev_net(dev), sp_dev) { | ||
2549 | if (!strcmp(sp_dev->name, dev->name)) | ||
2550 | continue; | ||
2551 | |||
2552 | idev = __in6_dev_get(sp_dev); | ||
2553 | if (!idev) | ||
2554 | continue; | ||
2555 | |||
2556 | read_lock_bh(&idev->lock); | ||
2557 | list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { | ||
2558 | |||
2559 | if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) | ||
2560 | continue; | ||
2561 | |||
2562 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); | ||
2563 | |||
2564 | /* Failure cases are ignored */ | ||
2565 | if (!IS_ERR(sp_rt)) | ||
2566 | ip6_ins_rt(sp_rt); | ||
2567 | } | ||
2568 | read_unlock_bh(&idev->lock); | ||
2569 | } | ||
2543 | } | 2570 | } |
2544 | 2571 | ||
2545 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) | 2572 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) |
@@ -4784,26 +4811,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev) | |||
4784 | 4811 | ||
4785 | static int __net_init addrconf_init_net(struct net *net) | 4812 | static int __net_init addrconf_init_net(struct net *net) |
4786 | { | 4813 | { |
4787 | int err; | 4814 | int err = -ENOMEM; |
4788 | struct ipv6_devconf *all, *dflt; | 4815 | struct ipv6_devconf *all, *dflt; |
4789 | 4816 | ||
4790 | err = -ENOMEM; | 4817 | all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL); |
4791 | all = &ipv6_devconf; | 4818 | if (all == NULL) |
4792 | dflt = &ipv6_devconf_dflt; | 4819 | goto err_alloc_all; |
4793 | 4820 | ||
4794 | if (!net_eq(net, &init_net)) { | 4821 | dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); |
4795 | all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); | 4822 | if (dflt == NULL) |
4796 | if (all == NULL) | 4823 | goto err_alloc_dflt; |
4797 | goto err_alloc_all; | ||
4798 | 4824 | ||
4799 | dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); | 4825 | /* these will be inherited by all namespaces */ |
4800 | if (dflt == NULL) | 4826 | dflt->autoconf = ipv6_defaults.autoconf; |
4801 | goto err_alloc_dflt; | 4827 | dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; |
4802 | } else { | ||
4803 | /* these will be inherited by all namespaces */ | ||
4804 | dflt->autoconf = ipv6_defaults.autoconf; | ||
4805 | dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; | ||
4806 | } | ||
4807 | 4828 | ||
4808 | net->ipv6.devconf_all = all; | 4829 | net->ipv6.devconf_all = all; |
4809 | net->ipv6.devconf_dflt = dflt; | 4830 | net->ipv6.devconf_dflt = dflt; |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index e33fe0ab2568..2bab2aa59745 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
118 | ipv6_addr_loopback(&hdr->daddr)) | 118 | ipv6_addr_loopback(&hdr->daddr)) |
119 | goto err; | 119 | goto err; |
120 | 120 | ||
121 | /* RFC4291 Errata ID: 3480 | ||
122 | * Interface-Local scope spans only a single interface on a | ||
123 | * node and is useful only for loopback transmission of | ||
124 | * multicast. Packets with interface-local scope received | ||
125 | * from another node must be discarded. | ||
126 | */ | ||
127 | if (!(skb->pkt_type == PACKET_LOOPBACK || | ||
128 | dev->flags & IFF_LOOPBACK) && | ||
129 | ipv6_addr_is_multicast(&hdr->daddr) && | ||
130 | IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) | ||
131 | goto err; | ||
132 | |||
121 | /* RFC4291 2.7 | 133 | /* RFC4291 2.7 |
122 | * Nodes must not originate a packet to a multicast address whose scope | 134 | * Nodes must not originate a packet to a multicast address whose scope |
123 | * field contains the reserved value 0; if such a packet is received, it | 135 | * field contains the reserved value 0; if such a packet is received, it |
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 83acc1405a18..cb631143721c 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c | |||
@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, | |||
57 | if (pfx_len - i >= 32) | 57 | if (pfx_len - i >= 32) |
58 | mask = 0; | 58 | mask = 0; |
59 | else | 59 | else |
60 | mask = htonl(~((1 << (pfx_len - i)) - 1)); | 60 | mask = htonl((1 << (i - pfx_len + 32)) - 1); |
61 | 61 | ||
62 | idx = i / 32; | 62 | idx = i / 32; |
63 | addr->s6_addr32[idx] &= mask; | 63 | addr->s6_addr32[idx] &= mask; |
@@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
114 | static struct xt_target ip6t_npt_target_reg[] __read_mostly = { | 114 | static struct xt_target ip6t_npt_target_reg[] __read_mostly = { |
115 | { | 115 | { |
116 | .name = "SNPT", | 116 | .name = "SNPT", |
117 | .table = "mangle", | ||
117 | .target = ip6t_snpt_tg, | 118 | .target = ip6t_snpt_tg, |
118 | .targetsize = sizeof(struct ip6t_npt_tginfo), | 119 | .targetsize = sizeof(struct ip6t_npt_tginfo), |
119 | .checkentry = ip6t_npt_checkentry, | 120 | .checkentry = ip6t_npt_checkentry, |
@@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { | |||
124 | }, | 125 | }, |
125 | { | 126 | { |
126 | .name = "DNPT", | 127 | .name = "DNPT", |
128 | .table = "mangle", | ||
127 | .target = ip6t_dnpt_tg, | 129 | .target = ip6t_dnpt_tg, |
128 | .targetsize = sizeof(struct ip6t_npt_tginfo), | 130 | .targetsize = sizeof(struct ip6t_npt_tginfo), |
129 | .checkentry = ip6t_npt_checkentry, | 131 | .checkentry = ip6t_npt_checkentry, |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 599e1ba6d1ce..d8e5e852fc7a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1285,10 +1285,18 @@ do_confirm: | |||
1285 | 1285 | ||
1286 | void udpv6_destroy_sock(struct sock *sk) | 1286 | void udpv6_destroy_sock(struct sock *sk) |
1287 | { | 1287 | { |
1288 | struct udp_sock *up = udp_sk(sk); | ||
1288 | lock_sock(sk); | 1289 | lock_sock(sk); |
1289 | udp_v6_flush_pending_frames(sk); | 1290 | udp_v6_flush_pending_frames(sk); |
1290 | release_sock(sk); | 1291 | release_sock(sk); |
1291 | 1292 | ||
1293 | if (static_key_false(&udpv6_encap_needed) && up->encap_type) { | ||
1294 | void (*encap_destroy)(struct sock *sk); | ||
1295 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
1296 | if (encap_destroy) | ||
1297 | encap_destroy(sk); | ||
1298 | } | ||
1299 | |||
1292 | inet6_destroy_sock(sk); | 1300 | inet6_destroy_sock(sk); |
1293 | } | 1301 | } |
1294 | 1302 | ||
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index d07e3a626446..d28e7f014cc6 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -2583,8 +2583,10 @@ bed: | |||
2583 | NULL, NULL, NULL); | 2583 | NULL, NULL, NULL); |
2584 | 2584 | ||
2585 | /* Check if the we got some results */ | 2585 | /* Check if the we got some results */ |
2586 | if (!self->cachedaddr) | 2586 | if (!self->cachedaddr) { |
2587 | return -EAGAIN; /* Didn't find any devices */ | 2587 | err = -EAGAIN; /* Didn't find any devices */ |
2588 | goto out; | ||
2589 | } | ||
2588 | daddr = self->cachedaddr; | 2590 | daddr = self->cachedaddr; |
2589 | /* Cleanup */ | 2591 | /* Cleanup */ |
2590 | self->cachedaddr = 0; | 2592 | self->cachedaddr = 0; |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 8555f331ea60..5b1e5af25713 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c) | |||
2693 | hdr->sadb_msg_pid = c->portid; | 2693 | hdr->sadb_msg_pid = c->portid; |
2694 | hdr->sadb_msg_version = PF_KEY_V2; | 2694 | hdr->sadb_msg_version = PF_KEY_V2; |
2695 | hdr->sadb_msg_errno = (uint8_t) 0; | 2695 | hdr->sadb_msg_errno = (uint8_t) 0; |
2696 | hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; | ||
2696 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); | 2697 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); |
2697 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); | 2698 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); |
2698 | return 0; | 2699 | return 0; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d36875f3427e..8aecf5df6656 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -114,7 +114,6 @@ struct l2tp_net { | |||
114 | 114 | ||
115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); | 115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); |
116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
117 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
118 | 117 | ||
119 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | 118 | static inline struct l2tp_net *l2tp_pernet(struct net *net) |
120 | { | 119 | { |
@@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) | |||
192 | } else { | 191 | } else { |
193 | /* Socket is owned by kernelspace */ | 192 | /* Socket is owned by kernelspace */ |
194 | sk = tunnel->sock; | 193 | sk = tunnel->sock; |
194 | sock_hold(sk); | ||
195 | } | 195 | } |
196 | 196 | ||
197 | out: | 197 | out: |
@@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk) | |||
210 | } | 210 | } |
211 | sock_put(sk); | 211 | sock_put(sk); |
212 | } | 212 | } |
213 | sock_put(sk); | ||
213 | } | 214 | } |
214 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); | 215 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); |
215 | 216 | ||
@@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk | |||
373 | struct sk_buff *skbp; | 374 | struct sk_buff *skbp; |
374 | struct sk_buff *tmp; | 375 | struct sk_buff *tmp; |
375 | u32 ns = L2TP_SKB_CB(skb)->ns; | 376 | u32 ns = L2TP_SKB_CB(skb)->ns; |
376 | struct l2tp_stats *sstats; | ||
377 | 377 | ||
378 | spin_lock_bh(&session->reorder_q.lock); | 378 | spin_lock_bh(&session->reorder_q.lock); |
379 | sstats = &session->stats; | ||
380 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { | 379 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { |
381 | if (L2TP_SKB_CB(skbp)->ns > ns) { | 380 | if (L2TP_SKB_CB(skbp)->ns > ns) { |
382 | __skb_queue_before(&session->reorder_q, skbp, skb); | 381 | __skb_queue_before(&session->reorder_q, skbp, skb); |
@@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk | |||
384 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", | 383 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", |
385 | session->name, ns, L2TP_SKB_CB(skbp)->ns, | 384 | session->name, ns, L2TP_SKB_CB(skbp)->ns, |
386 | skb_queue_len(&session->reorder_q)); | 385 | skb_queue_len(&session->reorder_q)); |
387 | u64_stats_update_begin(&sstats->syncp); | 386 | atomic_long_inc(&session->stats.rx_oos_packets); |
388 | sstats->rx_oos_packets++; | ||
389 | u64_stats_update_end(&sstats->syncp); | ||
390 | goto out; | 387 | goto out; |
391 | } | 388 | } |
392 | } | 389 | } |
@@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * | |||
403 | { | 400 | { |
404 | struct l2tp_tunnel *tunnel = session->tunnel; | 401 | struct l2tp_tunnel *tunnel = session->tunnel; |
405 | int length = L2TP_SKB_CB(skb)->length; | 402 | int length = L2TP_SKB_CB(skb)->length; |
406 | struct l2tp_stats *tstats, *sstats; | ||
407 | 403 | ||
408 | /* We're about to requeue the skb, so return resources | 404 | /* We're about to requeue the skb, so return resources |
409 | * to its current owner (a socket receive buffer). | 405 | * to its current owner (a socket receive buffer). |
410 | */ | 406 | */ |
411 | skb_orphan(skb); | 407 | skb_orphan(skb); |
412 | 408 | ||
413 | tstats = &tunnel->stats; | 409 | atomic_long_inc(&tunnel->stats.rx_packets); |
414 | u64_stats_update_begin(&tstats->syncp); | 410 | atomic_long_add(length, &tunnel->stats.rx_bytes); |
415 | sstats = &session->stats; | 411 | atomic_long_inc(&session->stats.rx_packets); |
416 | u64_stats_update_begin(&sstats->syncp); | 412 | atomic_long_add(length, &session->stats.rx_bytes); |
417 | tstats->rx_packets++; | ||
418 | tstats->rx_bytes += length; | ||
419 | sstats->rx_packets++; | ||
420 | sstats->rx_bytes += length; | ||
421 | u64_stats_update_end(&tstats->syncp); | ||
422 | u64_stats_update_end(&sstats->syncp); | ||
423 | 413 | ||
424 | if (L2TP_SKB_CB(skb)->has_seq) { | 414 | if (L2TP_SKB_CB(skb)->has_seq) { |
425 | /* Bump our Nr */ | 415 | /* Bump our Nr */ |
@@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) | |||
450 | { | 440 | { |
451 | struct sk_buff *skb; | 441 | struct sk_buff *skb; |
452 | struct sk_buff *tmp; | 442 | struct sk_buff *tmp; |
453 | struct l2tp_stats *sstats; | ||
454 | 443 | ||
455 | /* If the pkt at the head of the queue has the nr that we | 444 | /* If the pkt at the head of the queue has the nr that we |
456 | * expect to send up next, dequeue it and any other | 445 | * expect to send up next, dequeue it and any other |
@@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) | |||
458 | */ | 447 | */ |
459 | start: | 448 | start: |
460 | spin_lock_bh(&session->reorder_q.lock); | 449 | spin_lock_bh(&session->reorder_q.lock); |
461 | sstats = &session->stats; | ||
462 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { | 450 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { |
463 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { | 451 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { |
464 | u64_stats_update_begin(&sstats->syncp); | 452 | atomic_long_inc(&session->stats.rx_seq_discards); |
465 | sstats->rx_seq_discards++; | 453 | atomic_long_inc(&session->stats.rx_errors); |
466 | sstats->rx_errors++; | ||
467 | u64_stats_update_end(&sstats->syncp); | ||
468 | l2tp_dbg(session, L2TP_MSG_SEQ, | 454 | l2tp_dbg(session, L2TP_MSG_SEQ, |
469 | "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", | 455 | "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", |
470 | session->name, L2TP_SKB_CB(skb)->ns, | 456 | session->name, L2TP_SKB_CB(skb)->ns, |
@@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
623 | struct l2tp_tunnel *tunnel = session->tunnel; | 609 | struct l2tp_tunnel *tunnel = session->tunnel; |
624 | int offset; | 610 | int offset; |
625 | u32 ns, nr; | 611 | u32 ns, nr; |
626 | struct l2tp_stats *sstats = &session->stats; | ||
627 | 612 | ||
628 | /* The ref count is increased since we now hold a pointer to | 613 | /* The ref count is increased since we now hold a pointer to |
629 | * the session. Take care to decrement the refcnt when exiting | 614 | * the session. Take care to decrement the refcnt when exiting |
@@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
640 | "%s: cookie mismatch (%u/%u). Discarding.\n", | 625 | "%s: cookie mismatch (%u/%u). Discarding.\n", |
641 | tunnel->name, tunnel->tunnel_id, | 626 | tunnel->name, tunnel->tunnel_id, |
642 | session->session_id); | 627 | session->session_id); |
643 | u64_stats_update_begin(&sstats->syncp); | 628 | atomic_long_inc(&session->stats.rx_cookie_discards); |
644 | sstats->rx_cookie_discards++; | ||
645 | u64_stats_update_end(&sstats->syncp); | ||
646 | goto discard; | 629 | goto discard; |
647 | } | 630 | } |
648 | ptr += session->peer_cookie_len; | 631 | ptr += session->peer_cookie_len; |
@@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
711 | l2tp_warn(session, L2TP_MSG_SEQ, | 694 | l2tp_warn(session, L2TP_MSG_SEQ, |
712 | "%s: recv data has no seq numbers when required. Discarding.\n", | 695 | "%s: recv data has no seq numbers when required. Discarding.\n", |
713 | session->name); | 696 | session->name); |
714 | u64_stats_update_begin(&sstats->syncp); | 697 | atomic_long_inc(&session->stats.rx_seq_discards); |
715 | sstats->rx_seq_discards++; | ||
716 | u64_stats_update_end(&sstats->syncp); | ||
717 | goto discard; | 698 | goto discard; |
718 | } | 699 | } |
719 | 700 | ||
@@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
732 | l2tp_warn(session, L2TP_MSG_SEQ, | 713 | l2tp_warn(session, L2TP_MSG_SEQ, |
733 | "%s: recv data has no seq numbers when required. Discarding.\n", | 714 | "%s: recv data has no seq numbers when required. Discarding.\n", |
734 | session->name); | 715 | session->name); |
735 | u64_stats_update_begin(&sstats->syncp); | 716 | atomic_long_inc(&session->stats.rx_seq_discards); |
736 | sstats->rx_seq_discards++; | ||
737 | u64_stats_update_end(&sstats->syncp); | ||
738 | goto discard; | 717 | goto discard; |
739 | } | 718 | } |
740 | } | 719 | } |
@@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
788 | * packets | 767 | * packets |
789 | */ | 768 | */ |
790 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | 769 | if (L2TP_SKB_CB(skb)->ns != session->nr) { |
791 | u64_stats_update_begin(&sstats->syncp); | 770 | atomic_long_inc(&session->stats.rx_seq_discards); |
792 | sstats->rx_seq_discards++; | ||
793 | u64_stats_update_end(&sstats->syncp); | ||
794 | l2tp_dbg(session, L2TP_MSG_SEQ, | 771 | l2tp_dbg(session, L2TP_MSG_SEQ, |
795 | "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", | 772 | "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", |
796 | session->name, L2TP_SKB_CB(skb)->ns, | 773 | session->name, L2TP_SKB_CB(skb)->ns, |
@@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
816 | return; | 793 | return; |
817 | 794 | ||
818 | discard: | 795 | discard: |
819 | u64_stats_update_begin(&sstats->syncp); | 796 | atomic_long_inc(&session->stats.rx_errors); |
820 | sstats->rx_errors++; | ||
821 | u64_stats_update_end(&sstats->syncp); | ||
822 | kfree_skb(skb); | 797 | kfree_skb(skb); |
823 | 798 | ||
824 | if (session->deref) | 799 | if (session->deref) |
@@ -828,6 +803,23 @@ discard: | |||
828 | } | 803 | } |
829 | EXPORT_SYMBOL(l2tp_recv_common); | 804 | EXPORT_SYMBOL(l2tp_recv_common); |
830 | 805 | ||
806 | /* Drop skbs from the session's reorder_q | ||
807 | */ | ||
808 | int l2tp_session_queue_purge(struct l2tp_session *session) | ||
809 | { | ||
810 | struct sk_buff *skb = NULL; | ||
811 | BUG_ON(!session); | ||
812 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
813 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
814 | atomic_long_inc(&session->stats.rx_errors); | ||
815 | kfree_skb(skb); | ||
816 | if (session->deref) | ||
817 | (*session->deref)(session); | ||
818 | } | ||
819 | return 0; | ||
820 | } | ||
821 | EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); | ||
822 | |||
831 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame | 823 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame |
832 | * here. The skb is not on a list when we get here. | 824 | * here. The skb is not on a list when we get here. |
833 | * Returns 0 if the packet was a data packet and was successfully passed on. | 825 | * Returns 0 if the packet was a data packet and was successfully passed on. |
@@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | |||
843 | u32 tunnel_id, session_id; | 835 | u32 tunnel_id, session_id; |
844 | u16 version; | 836 | u16 version; |
845 | int length; | 837 | int length; |
846 | struct l2tp_stats *tstats; | ||
847 | 838 | ||
848 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) | 839 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) |
849 | goto discard_bad_csum; | 840 | goto discard_bad_csum; |
@@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | |||
932 | discard_bad_csum: | 923 | discard_bad_csum: |
933 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); | 924 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); |
934 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); | 925 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); |
935 | tstats = &tunnel->stats; | 926 | atomic_long_inc(&tunnel->stats.rx_errors); |
936 | u64_stats_update_begin(&tstats->syncp); | ||
937 | tstats->rx_errors++; | ||
938 | u64_stats_update_end(&tstats->syncp); | ||
939 | kfree_skb(skb); | 927 | kfree_skb(skb); |
940 | 928 | ||
941 | return 0; | 929 | return 0; |
@@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
1062 | struct l2tp_tunnel *tunnel = session->tunnel; | 1050 | struct l2tp_tunnel *tunnel = session->tunnel; |
1063 | unsigned int len = skb->len; | 1051 | unsigned int len = skb->len; |
1064 | int error; | 1052 | int error; |
1065 | struct l2tp_stats *tstats, *sstats; | ||
1066 | 1053 | ||
1067 | /* Debug */ | 1054 | /* Debug */ |
1068 | if (session->send_seq) | 1055 | if (session->send_seq) |
@@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
1091 | error = ip_queue_xmit(skb, fl); | 1078 | error = ip_queue_xmit(skb, fl); |
1092 | 1079 | ||
1093 | /* Update stats */ | 1080 | /* Update stats */ |
1094 | tstats = &tunnel->stats; | ||
1095 | u64_stats_update_begin(&tstats->syncp); | ||
1096 | sstats = &session->stats; | ||
1097 | u64_stats_update_begin(&sstats->syncp); | ||
1098 | if (error >= 0) { | 1081 | if (error >= 0) { |
1099 | tstats->tx_packets++; | 1082 | atomic_long_inc(&tunnel->stats.tx_packets); |
1100 | tstats->tx_bytes += len; | 1083 | atomic_long_add(len, &tunnel->stats.tx_bytes); |
1101 | sstats->tx_packets++; | 1084 | atomic_long_inc(&session->stats.tx_packets); |
1102 | sstats->tx_bytes += len; | 1085 | atomic_long_add(len, &session->stats.tx_bytes); |
1103 | } else { | 1086 | } else { |
1104 | tstats->tx_errors++; | 1087 | atomic_long_inc(&tunnel->stats.tx_errors); |
1105 | sstats->tx_errors++; | 1088 | atomic_long_inc(&session->stats.tx_errors); |
1106 | } | 1089 | } |
1107 | u64_stats_update_end(&tstats->syncp); | ||
1108 | u64_stats_update_end(&sstats->syncp); | ||
1109 | 1090 | ||
1110 | return 0; | 1091 | return 0; |
1111 | } | 1092 | } |
@@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk) | |||
1282 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ | 1263 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ |
1283 | (udp_sk(sk))->encap_type = 0; | 1264 | (udp_sk(sk))->encap_type = 0; |
1284 | (udp_sk(sk))->encap_rcv = NULL; | 1265 | (udp_sk(sk))->encap_rcv = NULL; |
1266 | (udp_sk(sk))->encap_destroy = NULL; | ||
1285 | break; | 1267 | break; |
1286 | case L2TP_ENCAPTYPE_IP: | 1268 | case L2TP_ENCAPTYPE_IP: |
1287 | break; | 1269 | break; |
@@ -1311,7 +1293,7 @@ end: | |||
1311 | 1293 | ||
1312 | /* When the tunnel is closed, all the attached sessions need to go too. | 1294 | /* When the tunnel is closed, all the attached sessions need to go too. |
1313 | */ | 1295 | */ |
1314 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) | 1296 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) |
1315 | { | 1297 | { |
1316 | int hash; | 1298 | int hash; |
1317 | struct hlist_node *walk; | 1299 | struct hlist_node *walk; |
@@ -1334,25 +1316,13 @@ again: | |||
1334 | 1316 | ||
1335 | hlist_del_init(&session->hlist); | 1317 | hlist_del_init(&session->hlist); |
1336 | 1318 | ||
1337 | /* Since we should hold the sock lock while | ||
1338 | * doing any unbinding, we need to release the | ||
1339 | * lock we're holding before taking that lock. | ||
1340 | * Hold a reference to the sock so it doesn't | ||
1341 | * disappear as we're jumping between locks. | ||
1342 | */ | ||
1343 | if (session->ref != NULL) | 1319 | if (session->ref != NULL) |
1344 | (*session->ref)(session); | 1320 | (*session->ref)(session); |
1345 | 1321 | ||
1346 | write_unlock_bh(&tunnel->hlist_lock); | 1322 | write_unlock_bh(&tunnel->hlist_lock); |
1347 | 1323 | ||
1348 | if (tunnel->version != L2TP_HDR_VER_2) { | 1324 | __l2tp_session_unhash(session); |
1349 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | 1325 | l2tp_session_queue_purge(session); |
1350 | |||
1351 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1352 | hlist_del_init_rcu(&session->global_hlist); | ||
1353 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1354 | synchronize_rcu(); | ||
1355 | } | ||
1356 | 1326 | ||
1357 | if (session->session_close != NULL) | 1327 | if (session->session_close != NULL) |
1358 | (*session->session_close)(session); | 1328 | (*session->session_close)(session); |
@@ -1360,6 +1330,8 @@ again: | |||
1360 | if (session->deref != NULL) | 1330 | if (session->deref != NULL) |
1361 | (*session->deref)(session); | 1331 | (*session->deref)(session); |
1362 | 1332 | ||
1333 | l2tp_session_dec_refcount(session); | ||
1334 | |||
1363 | write_lock_bh(&tunnel->hlist_lock); | 1335 | write_lock_bh(&tunnel->hlist_lock); |
1364 | 1336 | ||
1365 | /* Now restart from the beginning of this hash | 1337 | /* Now restart from the beginning of this hash |
@@ -1372,6 +1344,17 @@ again: | |||
1372 | } | 1344 | } |
1373 | write_unlock_bh(&tunnel->hlist_lock); | 1345 | write_unlock_bh(&tunnel->hlist_lock); |
1374 | } | 1346 | } |
1347 | EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | ||
1348 | |||
1349 | /* Tunnel socket destroy hook for UDP encapsulation */ | ||
1350 | static void l2tp_udp_encap_destroy(struct sock *sk) | ||
1351 | { | ||
1352 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
1353 | if (tunnel) { | ||
1354 | l2tp_tunnel_closeall(tunnel); | ||
1355 | sock_put(sk); | ||
1356 | } | ||
1357 | } | ||
1375 | 1358 | ||
1376 | /* Really kill the tunnel. | 1359 | /* Really kill the tunnel. |
1377 | * Come here only when all sessions have been cleared from the tunnel. | 1360 | * Come here only when all sessions have been cleared from the tunnel. |
@@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1397 | return; | 1380 | return; |
1398 | 1381 | ||
1399 | sock = sk->sk_socket; | 1382 | sock = sk->sk_socket; |
1400 | BUG_ON(!sock); | ||
1401 | 1383 | ||
1402 | /* If the tunnel socket was created directly by the kernel, use the | 1384 | /* If the tunnel socket was created by userspace, then go through the |
1403 | * sk_* API to release the socket now. Otherwise go through the | 1385 | * inet layer to shut the socket down, and let userspace close it. |
1404 | * inet_* layer to shut the socket down, and let userspace close it. | 1386 | * Otherwise, if we created the socket directly within the kernel, use |
1387 | * the sk API to release it here. | ||
1405 | * In either case the tunnel resources are freed in the socket | 1388 | * In either case the tunnel resources are freed in the socket |
1406 | * destructor when the tunnel socket goes away. | 1389 | * destructor when the tunnel socket goes away. |
1407 | */ | 1390 | */ |
1408 | if (sock->file == NULL) { | 1391 | if (tunnel->fd >= 0) { |
1409 | kernel_sock_shutdown(sock, SHUT_RDWR); | 1392 | if (sock) |
1410 | sk_release_kernel(sk); | 1393 | inet_shutdown(sock, 2); |
1411 | } else { | 1394 | } else { |
1412 | inet_shutdown(sock, 2); | 1395 | if (sock) |
1396 | kernel_sock_shutdown(sock, SHUT_RDWR); | ||
1397 | sk_release_kernel(sk); | ||
1413 | } | 1398 | } |
1414 | 1399 | ||
1415 | l2tp_tunnel_sock_put(sk); | 1400 | l2tp_tunnel_sock_put(sk); |
@@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1668 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | 1653 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
1669 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; | 1654 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; |
1670 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; | 1655 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; |
1656 | udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; | ||
1671 | #if IS_ENABLED(CONFIG_IPV6) | 1657 | #if IS_ENABLED(CONFIG_IPV6) |
1672 | if (sk->sk_family == PF_INET6) | 1658 | if (sk->sk_family == PF_INET6) |
1673 | udpv6_encap_enable(); | 1659 | udpv6_encap_enable(); |
@@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
1723 | */ | 1709 | */ |
1724 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1710 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
1725 | { | 1711 | { |
1712 | l2tp_tunnel_closeall(tunnel); | ||
1726 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); | 1713 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); |
1727 | } | 1714 | } |
1728 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1715 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
@@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | |||
1731 | */ | 1718 | */ |
1732 | void l2tp_session_free(struct l2tp_session *session) | 1719 | void l2tp_session_free(struct l2tp_session *session) |
1733 | { | 1720 | { |
1734 | struct l2tp_tunnel *tunnel; | 1721 | struct l2tp_tunnel *tunnel = session->tunnel; |
1735 | 1722 | ||
1736 | BUG_ON(atomic_read(&session->ref_count) != 0); | 1723 | BUG_ON(atomic_read(&session->ref_count) != 0); |
1737 | 1724 | ||
1738 | tunnel = session->tunnel; | 1725 | if (tunnel) { |
1739 | if (tunnel != NULL) { | ||
1740 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | 1726 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); |
1727 | if (session->session_id != 0) | ||
1728 | atomic_dec(&l2tp_session_count); | ||
1729 | sock_put(tunnel->sock); | ||
1730 | session->tunnel = NULL; | ||
1731 | l2tp_tunnel_dec_refcount(tunnel); | ||
1732 | } | ||
1733 | |||
1734 | kfree(session); | ||
1741 | 1735 | ||
1742 | /* Delete the session from the hash */ | 1736 | return; |
1737 | } | ||
1738 | EXPORT_SYMBOL_GPL(l2tp_session_free); | ||
1739 | |||
1740 | /* Remove an l2tp session from l2tp_core's hash lists. | ||
1741 | * Provides a tidyup interface for pseudowire code which can't just route all | ||
1742 | * shutdown via. l2tp_session_delete and a pseudowire-specific session_close | ||
1743 | * callback. | ||
1744 | */ | ||
1745 | void __l2tp_session_unhash(struct l2tp_session *session) | ||
1746 | { | ||
1747 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
1748 | |||
1749 | /* Remove the session from core hashes */ | ||
1750 | if (tunnel) { | ||
1751 | /* Remove from the per-tunnel hash */ | ||
1743 | write_lock_bh(&tunnel->hlist_lock); | 1752 | write_lock_bh(&tunnel->hlist_lock); |
1744 | hlist_del_init(&session->hlist); | 1753 | hlist_del_init(&session->hlist); |
1745 | write_unlock_bh(&tunnel->hlist_lock); | 1754 | write_unlock_bh(&tunnel->hlist_lock); |
1746 | 1755 | ||
1747 | /* Unlink from the global hash if not L2TPv2 */ | 1756 | /* For L2TPv3 we have a per-net hash: remove from there, too */ |
1748 | if (tunnel->version != L2TP_HDR_VER_2) { | 1757 | if (tunnel->version != L2TP_HDR_VER_2) { |
1749 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | 1758 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); |
1750 | |||
1751 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | 1759 | spin_lock_bh(&pn->l2tp_session_hlist_lock); |
1752 | hlist_del_init_rcu(&session->global_hlist); | 1760 | hlist_del_init_rcu(&session->global_hlist); |
1753 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | 1761 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); |
1754 | synchronize_rcu(); | 1762 | synchronize_rcu(); |
1755 | } | 1763 | } |
1756 | |||
1757 | if (session->session_id != 0) | ||
1758 | atomic_dec(&l2tp_session_count); | ||
1759 | |||
1760 | sock_put(tunnel->sock); | ||
1761 | |||
1762 | /* This will delete the tunnel context if this | ||
1763 | * is the last session on the tunnel. | ||
1764 | */ | ||
1765 | session->tunnel = NULL; | ||
1766 | l2tp_tunnel_dec_refcount(tunnel); | ||
1767 | } | 1764 | } |
1768 | |||
1769 | kfree(session); | ||
1770 | |||
1771 | return; | ||
1772 | } | 1765 | } |
1773 | EXPORT_SYMBOL_GPL(l2tp_session_free); | 1766 | EXPORT_SYMBOL_GPL(__l2tp_session_unhash); |
1774 | 1767 | ||
1775 | /* This function is used by the netlink SESSION_DELETE command and by | 1768 | /* This function is used by the netlink SESSION_DELETE command and by |
1776 | pseudowire modules. | 1769 | pseudowire modules. |
1777 | */ | 1770 | */ |
1778 | int l2tp_session_delete(struct l2tp_session *session) | 1771 | int l2tp_session_delete(struct l2tp_session *session) |
1779 | { | 1772 | { |
1773 | if (session->ref) | ||
1774 | (*session->ref)(session); | ||
1775 | __l2tp_session_unhash(session); | ||
1776 | l2tp_session_queue_purge(session); | ||
1780 | if (session->session_close != NULL) | 1777 | if (session->session_close != NULL) |
1781 | (*session->session_close)(session); | 1778 | (*session->session_close)(session); |
1782 | 1779 | if (session->deref) | |
1780 | (*session->ref)(session); | ||
1783 | l2tp_session_dec_refcount(session); | 1781 | l2tp_session_dec_refcount(session); |
1784 | |||
1785 | return 0; | 1782 | return 0; |
1786 | } | 1783 | } |
1787 | EXPORT_SYMBOL_GPL(l2tp_session_delete); | 1784 | EXPORT_SYMBOL_GPL(l2tp_session_delete); |
1788 | 1785 | ||
1789 | |||
1790 | /* We come here whenever a session's send_seq, cookie_len or | 1786 | /* We come here whenever a session's send_seq, cookie_len or |
1791 | * l2specific_len parameters are set. | 1787 | * l2specific_len parameters are set. |
1792 | */ | 1788 | */ |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8eb8f1d47f3a..485a490fd990 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -36,16 +36,15 @@ enum { | |||
36 | struct sk_buff; | 36 | struct sk_buff; |
37 | 37 | ||
38 | struct l2tp_stats { | 38 | struct l2tp_stats { |
39 | u64 tx_packets; | 39 | atomic_long_t tx_packets; |
40 | u64 tx_bytes; | 40 | atomic_long_t tx_bytes; |
41 | u64 tx_errors; | 41 | atomic_long_t tx_errors; |
42 | u64 rx_packets; | 42 | atomic_long_t rx_packets; |
43 | u64 rx_bytes; | 43 | atomic_long_t rx_bytes; |
44 | u64 rx_seq_discards; | 44 | atomic_long_t rx_seq_discards; |
45 | u64 rx_oos_packets; | 45 | atomic_long_t rx_oos_packets; |
46 | u64 rx_errors; | 46 | atomic_long_t rx_errors; |
47 | u64 rx_cookie_discards; | 47 | atomic_long_t rx_cookie_discards; |
48 | struct u64_stats_sync syncp; | ||
49 | }; | 48 | }; |
50 | 49 | ||
51 | struct l2tp_tunnel; | 50 | struct l2tp_tunnel; |
@@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); | |||
240 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); | 239 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); |
241 | 240 | ||
242 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); | 241 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); |
242 | extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
243 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | 243 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); |
244 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | 244 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); |
245 | extern void __l2tp_session_unhash(struct l2tp_session *session); | ||
245 | extern int l2tp_session_delete(struct l2tp_session *session); | 246 | extern int l2tp_session_delete(struct l2tp_session *session); |
246 | extern void l2tp_session_free(struct l2tp_session *session); | 247 | extern void l2tp_session_free(struct l2tp_session *session); |
247 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); | 248 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); |
249 | extern int l2tp_session_queue_purge(struct l2tp_session *session); | ||
248 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | 250 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); |
249 | 251 | ||
250 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); | 252 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); |
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index c3813bc84552..072d7202e182 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c | |||
@@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | |||
146 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, | 146 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, |
147 | atomic_read(&tunnel->ref_count)); | 147 | atomic_read(&tunnel->ref_count)); |
148 | 148 | ||
149 | seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", | 149 | seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n", |
150 | tunnel->debug, | 150 | tunnel->debug, |
151 | (unsigned long long)tunnel->stats.tx_packets, | 151 | atomic_long_read(&tunnel->stats.tx_packets), |
152 | (unsigned long long)tunnel->stats.tx_bytes, | 152 | atomic_long_read(&tunnel->stats.tx_bytes), |
153 | (unsigned long long)tunnel->stats.tx_errors, | 153 | atomic_long_read(&tunnel->stats.tx_errors), |
154 | (unsigned long long)tunnel->stats.rx_packets, | 154 | atomic_long_read(&tunnel->stats.rx_packets), |
155 | (unsigned long long)tunnel->stats.rx_bytes, | 155 | atomic_long_read(&tunnel->stats.rx_bytes), |
156 | (unsigned long long)tunnel->stats.rx_errors); | 156 | atomic_long_read(&tunnel->stats.rx_errors)); |
157 | 157 | ||
158 | if (tunnel->show != NULL) | 158 | if (tunnel->show != NULL) |
159 | tunnel->show(m, tunnel); | 159 | tunnel->show(m, tunnel); |
@@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) | |||
203 | seq_printf(m, "\n"); | 203 | seq_printf(m, "\n"); |
204 | } | 204 | } |
205 | 205 | ||
206 | seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", | 206 | seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n", |
207 | session->nr, session->ns, | 207 | session->nr, session->ns, |
208 | (unsigned long long)session->stats.tx_packets, | 208 | atomic_long_read(&session->stats.tx_packets), |
209 | (unsigned long long)session->stats.tx_bytes, | 209 | atomic_long_read(&session->stats.tx_bytes), |
210 | (unsigned long long)session->stats.tx_errors, | 210 | atomic_long_read(&session->stats.tx_errors), |
211 | (unsigned long long)session->stats.rx_packets, | 211 | atomic_long_read(&session->stats.rx_packets), |
212 | (unsigned long long)session->stats.rx_bytes, | 212 | atomic_long_read(&session->stats.rx_bytes), |
213 | (unsigned long long)session->stats.rx_errors); | 213 | atomic_long_read(&session->stats.rx_errors)); |
214 | 214 | ||
215 | if (session->show != NULL) | 215 | if (session->show != NULL) |
216 | session->show(m, session); | 216 | session->show(m, session); |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 7f41b7051269..571db8dd2292 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout) | |||
228 | static void l2tp_ip_destroy_sock(struct sock *sk) | 228 | static void l2tp_ip_destroy_sock(struct sock *sk) |
229 | { | 229 | { |
230 | struct sk_buff *skb; | 230 | struct sk_buff *skb; |
231 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
231 | 232 | ||
232 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | 233 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) |
233 | kfree_skb(skb); | 234 | kfree_skb(skb); |
234 | 235 | ||
236 | if (tunnel) { | ||
237 | l2tp_tunnel_closeall(tunnel); | ||
238 | sock_put(sk); | ||
239 | } | ||
240 | |||
235 | sk_refcnt_debug_dec(sk); | 241 | sk_refcnt_debug_dec(sk); |
236 | } | 242 | } |
237 | 243 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 41f2f8126ebc..c74f5a91ff6a 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout) | |||
241 | 241 | ||
242 | static void l2tp_ip6_destroy_sock(struct sock *sk) | 242 | static void l2tp_ip6_destroy_sock(struct sock *sk) |
243 | { | 243 | { |
244 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
245 | |||
244 | lock_sock(sk); | 246 | lock_sock(sk); |
245 | ip6_flush_pending_frames(sk); | 247 | ip6_flush_pending_frames(sk); |
246 | release_sock(sk); | 248 | release_sock(sk); |
247 | 249 | ||
250 | if (tunnel) { | ||
251 | l2tp_tunnel_closeall(tunnel); | ||
252 | sock_put(sk); | ||
253 | } | ||
254 | |||
248 | inet6_destroy_sock(sk); | 255 | inet6_destroy_sock(sk); |
249 | } | 256 | } |
250 | 257 | ||
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index c1bab22db85e..0825ff26e113 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla | |||
246 | #if IS_ENABLED(CONFIG_IPV6) | 246 | #if IS_ENABLED(CONFIG_IPV6) |
247 | struct ipv6_pinfo *np = NULL; | 247 | struct ipv6_pinfo *np = NULL; |
248 | #endif | 248 | #endif |
249 | struct l2tp_stats stats; | ||
250 | unsigned int start; | ||
251 | 249 | ||
252 | hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, | 250 | hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, |
253 | L2TP_CMD_TUNNEL_GET); | 251 | L2TP_CMD_TUNNEL_GET); |
@@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla | |||
265 | if (nest == NULL) | 263 | if (nest == NULL) |
266 | goto nla_put_failure; | 264 | goto nla_put_failure; |
267 | 265 | ||
268 | do { | 266 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, |
269 | start = u64_stats_fetch_begin(&tunnel->stats.syncp); | 267 | atomic_long_read(&tunnel->stats.tx_packets)) || |
270 | stats.tx_packets = tunnel->stats.tx_packets; | 268 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, |
271 | stats.tx_bytes = tunnel->stats.tx_bytes; | 269 | atomic_long_read(&tunnel->stats.tx_bytes)) || |
272 | stats.tx_errors = tunnel->stats.tx_errors; | 270 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, |
273 | stats.rx_packets = tunnel->stats.rx_packets; | 271 | atomic_long_read(&tunnel->stats.tx_errors)) || |
274 | stats.rx_bytes = tunnel->stats.rx_bytes; | 272 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, |
275 | stats.rx_errors = tunnel->stats.rx_errors; | 273 | atomic_long_read(&tunnel->stats.rx_packets)) || |
276 | stats.rx_seq_discards = tunnel->stats.rx_seq_discards; | 274 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, |
277 | stats.rx_oos_packets = tunnel->stats.rx_oos_packets; | 275 | atomic_long_read(&tunnel->stats.rx_bytes)) || |
278 | } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); | ||
279 | |||
280 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || | ||
281 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || | ||
282 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || | ||
283 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || | ||
284 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || | ||
285 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, | 276 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, |
286 | stats.rx_seq_discards) || | 277 | atomic_long_read(&tunnel->stats.rx_seq_discards)) || |
287 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, | 278 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, |
288 | stats.rx_oos_packets) || | 279 | atomic_long_read(&tunnel->stats.rx_oos_packets)) || |
289 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) | 280 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, |
281 | atomic_long_read(&tunnel->stats.rx_errors))) | ||
290 | goto nla_put_failure; | 282 | goto nla_put_failure; |
291 | nla_nest_end(skb, nest); | 283 | nla_nest_end(skb, nest); |
292 | 284 | ||
@@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl | |||
612 | struct nlattr *nest; | 604 | struct nlattr *nest; |
613 | struct l2tp_tunnel *tunnel = session->tunnel; | 605 | struct l2tp_tunnel *tunnel = session->tunnel; |
614 | struct sock *sk = NULL; | 606 | struct sock *sk = NULL; |
615 | struct l2tp_stats stats; | ||
616 | unsigned int start; | ||
617 | 607 | ||
618 | sk = tunnel->sock; | 608 | sk = tunnel->sock; |
619 | 609 | ||
@@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl | |||
656 | if (nest == NULL) | 646 | if (nest == NULL) |
657 | goto nla_put_failure; | 647 | goto nla_put_failure; |
658 | 648 | ||
659 | do { | 649 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, |
660 | start = u64_stats_fetch_begin(&session->stats.syncp); | 650 | atomic_long_read(&session->stats.tx_packets)) || |
661 | stats.tx_packets = session->stats.tx_packets; | 651 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, |
662 | stats.tx_bytes = session->stats.tx_bytes; | 652 | atomic_long_read(&session->stats.tx_bytes)) || |
663 | stats.tx_errors = session->stats.tx_errors; | 653 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, |
664 | stats.rx_packets = session->stats.rx_packets; | 654 | atomic_long_read(&session->stats.tx_errors)) || |
665 | stats.rx_bytes = session->stats.rx_bytes; | 655 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, |
666 | stats.rx_errors = session->stats.rx_errors; | 656 | atomic_long_read(&session->stats.rx_packets)) || |
667 | stats.rx_seq_discards = session->stats.rx_seq_discards; | 657 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, |
668 | stats.rx_oos_packets = session->stats.rx_oos_packets; | 658 | atomic_long_read(&session->stats.rx_bytes)) || |
669 | } while (u64_stats_fetch_retry(&session->stats.syncp, start)); | ||
670 | |||
671 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || | ||
672 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || | ||
673 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || | ||
674 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || | ||
675 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || | ||
676 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, | 659 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, |
677 | stats.rx_seq_discards) || | 660 | atomic_long_read(&session->stats.rx_seq_discards)) || |
678 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, | 661 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, |
679 | stats.rx_oos_packets) || | 662 | atomic_long_read(&session->stats.rx_oos_packets)) || |
680 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) | 663 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, |
664 | atomic_long_read(&session->stats.rx_errors))) | ||
681 | goto nla_put_failure; | 665 | goto nla_put_failure; |
682 | nla_nest_end(skb, nest); | 666 | nla_nest_end(skb, nest); |
683 | 667 | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6a53371dba1f..637a341c1e2d 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -97,6 +97,7 @@ | |||
97 | #include <net/ip.h> | 97 | #include <net/ip.h> |
98 | #include <net/udp.h> | 98 | #include <net/udp.h> |
99 | #include <net/xfrm.h> | 99 | #include <net/xfrm.h> |
100 | #include <net/inet_common.h> | ||
100 | 101 | ||
101 | #include <asm/byteorder.h> | 102 | #include <asm/byteorder.h> |
102 | #include <linux/atomic.h> | 103 | #include <linux/atomic.h> |
@@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int | |||
259 | session->name); | 260 | session->name); |
260 | 261 | ||
261 | /* Not bound. Nothing we can do, so discard. */ | 262 | /* Not bound. Nothing we can do, so discard. */ |
262 | session->stats.rx_errors++; | 263 | atomic_long_inc(&session->stats.rx_errors); |
263 | kfree_skb(skb); | 264 | kfree_skb(skb); |
264 | } | 265 | } |
265 | 266 | ||
@@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session) | |||
447 | { | 448 | { |
448 | struct pppol2tp_session *ps = l2tp_session_priv(session); | 449 | struct pppol2tp_session *ps = l2tp_session_priv(session); |
449 | struct sock *sk = ps->sock; | 450 | struct sock *sk = ps->sock; |
450 | struct sk_buff *skb; | 451 | struct socket *sock = sk->sk_socket; |
451 | 452 | ||
452 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 453 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
453 | 454 | ||
454 | if (session->session_id == 0) | ||
455 | goto out; | ||
456 | |||
457 | if (sk != NULL) { | ||
458 | lock_sock(sk); | ||
459 | |||
460 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
461 | pppox_unbind_sock(sk); | ||
462 | sk->sk_state = PPPOX_DEAD; | ||
463 | sk->sk_state_change(sk); | ||
464 | } | ||
465 | |||
466 | /* Purge any queued data */ | ||
467 | skb_queue_purge(&sk->sk_receive_queue); | ||
468 | skb_queue_purge(&sk->sk_write_queue); | ||
469 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
470 | kfree_skb(skb); | ||
471 | sock_put(sk); | ||
472 | } | ||
473 | 455 | ||
474 | release_sock(sk); | 456 | if (sock) { |
457 | inet_shutdown(sock, 2); | ||
458 | /* Don't let the session go away before our socket does */ | ||
459 | l2tp_session_inc_refcount(session); | ||
475 | } | 460 | } |
476 | |||
477 | out: | ||
478 | return; | 461 | return; |
479 | } | 462 | } |
480 | 463 | ||
@@ -483,19 +466,12 @@ out: | |||
483 | */ | 466 | */ |
484 | static void pppol2tp_session_destruct(struct sock *sk) | 467 | static void pppol2tp_session_destruct(struct sock *sk) |
485 | { | 468 | { |
486 | struct l2tp_session *session; | 469 | struct l2tp_session *session = sk->sk_user_data; |
487 | 470 | if (session) { | |
488 | if (sk->sk_user_data != NULL) { | ||
489 | session = sk->sk_user_data; | ||
490 | if (session == NULL) | ||
491 | goto out; | ||
492 | |||
493 | sk->sk_user_data = NULL; | 471 | sk->sk_user_data = NULL; |
494 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 472 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
495 | l2tp_session_dec_refcount(session); | 473 | l2tp_session_dec_refcount(session); |
496 | } | 474 | } |
497 | |||
498 | out: | ||
499 | return; | 475 | return; |
500 | } | 476 | } |
501 | 477 | ||
@@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock) | |||
525 | session = pppol2tp_sock_to_session(sk); | 501 | session = pppol2tp_sock_to_session(sk); |
526 | 502 | ||
527 | /* Purge any queued data */ | 503 | /* Purge any queued data */ |
528 | skb_queue_purge(&sk->sk_receive_queue); | ||
529 | skb_queue_purge(&sk->sk_write_queue); | ||
530 | if (session != NULL) { | 504 | if (session != NULL) { |
531 | struct sk_buff *skb; | 505 | __l2tp_session_unhash(session); |
532 | while ((skb = skb_dequeue(&session->reorder_q))) { | 506 | l2tp_session_queue_purge(session); |
533 | kfree_skb(skb); | ||
534 | sock_put(sk); | ||
535 | } | ||
536 | sock_put(sk); | 507 | sock_put(sk); |
537 | } | 508 | } |
509 | skb_queue_purge(&sk->sk_receive_queue); | ||
510 | skb_queue_purge(&sk->sk_write_queue); | ||
538 | 511 | ||
539 | release_sock(sk); | 512 | release_sock(sk); |
540 | 513 | ||
@@ -880,18 +853,6 @@ out: | |||
880 | return error; | 853 | return error; |
881 | } | 854 | } |
882 | 855 | ||
883 | /* Called when deleting sessions via the netlink interface. | ||
884 | */ | ||
885 | static int pppol2tp_session_delete(struct l2tp_session *session) | ||
886 | { | ||
887 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
888 | |||
889 | if (ps->sock == NULL) | ||
890 | l2tp_session_dec_refcount(session); | ||
891 | |||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | #endif /* CONFIG_L2TP_V3 */ | 856 | #endif /* CONFIG_L2TP_V3 */ |
896 | 857 | ||
897 | /* getname() support. | 858 | /* getname() support. |
@@ -1025,14 +986,14 @@ end: | |||
1025 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, | 986 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, |
1026 | struct l2tp_stats *stats) | 987 | struct l2tp_stats *stats) |
1027 | { | 988 | { |
1028 | dest->tx_packets = stats->tx_packets; | 989 | dest->tx_packets = atomic_long_read(&stats->tx_packets); |
1029 | dest->tx_bytes = stats->tx_bytes; | 990 | dest->tx_bytes = atomic_long_read(&stats->tx_bytes); |
1030 | dest->tx_errors = stats->tx_errors; | 991 | dest->tx_errors = atomic_long_read(&stats->tx_errors); |
1031 | dest->rx_packets = stats->rx_packets; | 992 | dest->rx_packets = atomic_long_read(&stats->rx_packets); |
1032 | dest->rx_bytes = stats->rx_bytes; | 993 | dest->rx_bytes = atomic_long_read(&stats->rx_bytes); |
1033 | dest->rx_seq_discards = stats->rx_seq_discards; | 994 | dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards); |
1034 | dest->rx_oos_packets = stats->rx_oos_packets; | 995 | dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets); |
1035 | dest->rx_errors = stats->rx_errors; | 996 | dest->rx_errors = atomic_long_read(&stats->rx_errors); |
1036 | } | 997 | } |
1037 | 998 | ||
1038 | /* Session ioctl helper. | 999 | /* Session ioctl helper. |
@@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | |||
1666 | tunnel->name, | 1627 | tunnel->name, |
1667 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', | 1628 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', |
1668 | atomic_read(&tunnel->ref_count) - 1); | 1629 | atomic_read(&tunnel->ref_count) - 1); |
1669 | seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", | 1630 | seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n", |
1670 | tunnel->debug, | 1631 | tunnel->debug, |
1671 | (unsigned long long)tunnel->stats.tx_packets, | 1632 | atomic_long_read(&tunnel->stats.tx_packets), |
1672 | (unsigned long long)tunnel->stats.tx_bytes, | 1633 | atomic_long_read(&tunnel->stats.tx_bytes), |
1673 | (unsigned long long)tunnel->stats.tx_errors, | 1634 | atomic_long_read(&tunnel->stats.tx_errors), |
1674 | (unsigned long long)tunnel->stats.rx_packets, | 1635 | atomic_long_read(&tunnel->stats.rx_packets), |
1675 | (unsigned long long)tunnel->stats.rx_bytes, | 1636 | atomic_long_read(&tunnel->stats.rx_bytes), |
1676 | (unsigned long long)tunnel->stats.rx_errors); | 1637 | atomic_long_read(&tunnel->stats.rx_errors)); |
1677 | } | 1638 | } |
1678 | 1639 | ||
1679 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | 1640 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) |
@@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | |||
1708 | session->lns_mode ? "LNS" : "LAC", | 1669 | session->lns_mode ? "LNS" : "LAC", |
1709 | session->debug, | 1670 | session->debug, |
1710 | jiffies_to_msecs(session->reorder_timeout)); | 1671 | jiffies_to_msecs(session->reorder_timeout)); |
1711 | seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", | 1672 | seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n", |
1712 | session->nr, session->ns, | 1673 | session->nr, session->ns, |
1713 | (unsigned long long)session->stats.tx_packets, | 1674 | atomic_long_read(&session->stats.tx_packets), |
1714 | (unsigned long long)session->stats.tx_bytes, | 1675 | atomic_long_read(&session->stats.tx_bytes), |
1715 | (unsigned long long)session->stats.tx_errors, | 1676 | atomic_long_read(&session->stats.tx_errors), |
1716 | (unsigned long long)session->stats.rx_packets, | 1677 | atomic_long_read(&session->stats.rx_packets), |
1717 | (unsigned long long)session->stats.rx_bytes, | 1678 | atomic_long_read(&session->stats.rx_bytes), |
1718 | (unsigned long long)session->stats.rx_errors); | 1679 | atomic_long_read(&session->stats.rx_errors)); |
1719 | 1680 | ||
1720 | if (po) | 1681 | if (po) |
1721 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | 1682 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); |
@@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = { | |||
1839 | 1800 | ||
1840 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { | 1801 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { |
1841 | .session_create = pppol2tp_session_create, | 1802 | .session_create = pppol2tp_session_create, |
1842 | .session_delete = pppol2tp_session_delete, | 1803 | .session_delete = l2tp_session_delete, |
1843 | }; | 1804 | }; |
1844 | 1805 | ||
1845 | #endif /* CONFIG_L2TP_V3 */ | 1806 | #endif /* CONFIG_L2TP_V3 */ |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fb306814576a..a6893602f87a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2582 | list_del(&dep->list); | 2582 | list_del(&dep->list); |
2583 | mutex_unlock(&local->mtx); | 2583 | mutex_unlock(&local->mtx); |
2584 | 2584 | ||
2585 | ieee80211_roc_notify_destroy(dep); | 2585 | ieee80211_roc_notify_destroy(dep, true); |
2586 | return 0; | 2586 | return 0; |
2587 | } | 2587 | } |
2588 | 2588 | ||
@@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2622 | ieee80211_start_next_roc(local); | 2622 | ieee80211_start_next_roc(local); |
2623 | mutex_unlock(&local->mtx); | 2623 | mutex_unlock(&local->mtx); |
2624 | 2624 | ||
2625 | ieee80211_roc_notify_destroy(found); | 2625 | ieee80211_roc_notify_destroy(found, true); |
2626 | } else { | 2626 | } else { |
2627 | /* work may be pending so use it all the time */ | 2627 | /* work may be pending so use it all the time */ |
2628 | found->abort = true; | 2628 | found->abort = true; |
@@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2632 | 2632 | ||
2633 | /* work will clean up etc */ | 2633 | /* work will clean up etc */ |
2634 | flush_delayed_work(&found->work); | 2634 | flush_delayed_work(&found->work); |
2635 | WARN_ON(!found->to_be_freed); | ||
2636 | kfree(found); | ||
2635 | } | 2637 | } |
2636 | 2638 | ||
2637 | return 0; | 2639 | return 0; |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 78c0d90dd641..931be419ab5a 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
63 | enum ieee80211_chanctx_mode mode) | 63 | enum ieee80211_chanctx_mode mode) |
64 | { | 64 | { |
65 | struct ieee80211_chanctx *ctx; | 65 | struct ieee80211_chanctx *ctx; |
66 | u32 changed; | ||
66 | int err; | 67 | int err; |
67 | 68 | ||
68 | lockdep_assert_held(&local->chanctx_mtx); | 69 | lockdep_assert_held(&local->chanctx_mtx); |
@@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
76 | ctx->conf.rx_chains_dynamic = 1; | 77 | ctx->conf.rx_chains_dynamic = 1; |
77 | ctx->mode = mode; | 78 | ctx->mode = mode; |
78 | 79 | ||
80 | /* acquire mutex to prevent idle from changing */ | ||
81 | mutex_lock(&local->mtx); | ||
82 | /* turn idle off *before* setting channel -- some drivers need that */ | ||
83 | changed = ieee80211_idle_off(local); | ||
84 | if (changed) | ||
85 | ieee80211_hw_config(local, changed); | ||
86 | |||
79 | if (!local->use_chanctx) { | 87 | if (!local->use_chanctx) { |
80 | local->_oper_channel_type = | 88 | local->_oper_channel_type = |
81 | cfg80211_get_chandef_type(chandef); | 89 | cfg80211_get_chandef_type(chandef); |
@@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
85 | err = drv_add_chanctx(local, ctx); | 93 | err = drv_add_chanctx(local, ctx); |
86 | if (err) { | 94 | if (err) { |
87 | kfree(ctx); | 95 | kfree(ctx); |
88 | return ERR_PTR(err); | 96 | ctx = ERR_PTR(err); |
97 | |||
98 | ieee80211_recalc_idle(local); | ||
99 | goto out; | ||
89 | } | 100 | } |
90 | } | 101 | } |
91 | 102 | ||
103 | /* and keep the mutex held until the new chanctx is on the list */ | ||
92 | list_add_rcu(&ctx->list, &local->chanctx_list); | 104 | list_add_rcu(&ctx->list, &local->chanctx_list); |
93 | 105 | ||
94 | mutex_lock(&local->mtx); | 106 | out: |
95 | ieee80211_recalc_idle(local); | ||
96 | mutex_unlock(&local->mtx); | 107 | mutex_unlock(&local->mtx); |
97 | 108 | ||
98 | return ctx; | 109 | return ctx; |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 388580a1bada..5672533a0832 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -309,6 +309,7 @@ struct ieee80211_roc_work { | |||
309 | struct ieee80211_channel *chan; | 309 | struct ieee80211_channel *chan; |
310 | 310 | ||
311 | bool started, abort, hw_begun, notified; | 311 | bool started, abort, hw_begun, notified; |
312 | bool to_be_freed; | ||
312 | 313 | ||
313 | unsigned long hw_start_time; | 314 | unsigned long hw_start_time; |
314 | 315 | ||
@@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local); | |||
1347 | void ieee80211_roc_setup(struct ieee80211_local *local); | 1348 | void ieee80211_roc_setup(struct ieee80211_local *local); |
1348 | void ieee80211_start_next_roc(struct ieee80211_local *local); | 1349 | void ieee80211_start_next_roc(struct ieee80211_local *local); |
1349 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); | 1350 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); |
1350 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); | 1351 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); |
1351 | void ieee80211_sw_roc_work(struct work_struct *work); | 1352 | void ieee80211_sw_roc_work(struct work_struct *work); |
1352 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); | 1353 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); |
1353 | 1354 | ||
@@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
1361 | enum nl80211_iftype type); | 1362 | enum nl80211_iftype type); |
1362 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); | 1363 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); |
1363 | void ieee80211_remove_interfaces(struct ieee80211_local *local); | 1364 | void ieee80211_remove_interfaces(struct ieee80211_local *local); |
1365 | u32 ieee80211_idle_off(struct ieee80211_local *local); | ||
1364 | void ieee80211_recalc_idle(struct ieee80211_local *local); | 1366 | void ieee80211_recalc_idle(struct ieee80211_local *local); |
1365 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, | 1367 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, |
1366 | const int offset); | 1368 | const int offset); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index baaa8608e52d..58150f877ec3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | |||
78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); | 78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); |
79 | } | 79 | } |
80 | 80 | ||
81 | static u32 ieee80211_idle_off(struct ieee80211_local *local) | 81 | u32 ieee80211_idle_off(struct ieee80211_local *local) |
82 | { | 82 | { |
83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) | 83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) |
84 | return 0; | 84 | return 0; |
@@ -349,21 +349,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) | |||
349 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | 349 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) |
350 | { | 350 | { |
351 | struct ieee80211_sub_if_data *sdata; | 351 | struct ieee80211_sub_if_data *sdata; |
352 | int ret = 0; | 352 | int ret; |
353 | 353 | ||
354 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 354 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
355 | return 0; | 355 | return 0; |
356 | 356 | ||
357 | mutex_lock(&local->iflist_mtx); | 357 | ASSERT_RTNL(); |
358 | 358 | ||
359 | if (local->monitor_sdata) | 359 | if (local->monitor_sdata) |
360 | goto out_unlock; | 360 | return 0; |
361 | 361 | ||
362 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); | 362 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); |
363 | if (!sdata) { | 363 | if (!sdata) |
364 | ret = -ENOMEM; | 364 | return -ENOMEM; |
365 | goto out_unlock; | ||
366 | } | ||
367 | 365 | ||
368 | /* set up data */ | 366 | /* set up data */ |
369 | sdata->local = local; | 367 | sdata->local = local; |
@@ -377,13 +375,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
377 | if (WARN_ON(ret)) { | 375 | if (WARN_ON(ret)) { |
378 | /* ok .. stupid driver, it asked for this! */ | 376 | /* ok .. stupid driver, it asked for this! */ |
379 | kfree(sdata); | 377 | kfree(sdata); |
380 | goto out_unlock; | 378 | return ret; |
381 | } | 379 | } |
382 | 380 | ||
383 | ret = ieee80211_check_queues(sdata); | 381 | ret = ieee80211_check_queues(sdata); |
384 | if (ret) { | 382 | if (ret) { |
385 | kfree(sdata); | 383 | kfree(sdata); |
386 | goto out_unlock; | 384 | return ret; |
387 | } | 385 | } |
388 | 386 | ||
389 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, | 387 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, |
@@ -391,13 +389,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
391 | if (ret) { | 389 | if (ret) { |
392 | drv_remove_interface(local, sdata); | 390 | drv_remove_interface(local, sdata); |
393 | kfree(sdata); | 391 | kfree(sdata); |
394 | goto out_unlock; | 392 | return ret; |
395 | } | 393 | } |
396 | 394 | ||
395 | mutex_lock(&local->iflist_mtx); | ||
397 | rcu_assign_pointer(local->monitor_sdata, sdata); | 396 | rcu_assign_pointer(local->monitor_sdata, sdata); |
398 | out_unlock: | ||
399 | mutex_unlock(&local->iflist_mtx); | 397 | mutex_unlock(&local->iflist_mtx); |
400 | return ret; | 398 | |
399 | return 0; | ||
401 | } | 400 | } |
402 | 401 | ||
403 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | 402 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) |
@@ -407,14 +406,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
407 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 406 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
408 | return; | 407 | return; |
409 | 408 | ||
409 | ASSERT_RTNL(); | ||
410 | |||
410 | mutex_lock(&local->iflist_mtx); | 411 | mutex_lock(&local->iflist_mtx); |
411 | 412 | ||
412 | sdata = rcu_dereference_protected(local->monitor_sdata, | 413 | sdata = rcu_dereference_protected(local->monitor_sdata, |
413 | lockdep_is_held(&local->iflist_mtx)); | 414 | lockdep_is_held(&local->iflist_mtx)); |
414 | if (!sdata) | 415 | if (!sdata) { |
415 | goto out_unlock; | 416 | mutex_unlock(&local->iflist_mtx); |
417 | return; | ||
418 | } | ||
416 | 419 | ||
417 | rcu_assign_pointer(local->monitor_sdata, NULL); | 420 | rcu_assign_pointer(local->monitor_sdata, NULL); |
421 | mutex_unlock(&local->iflist_mtx); | ||
422 | |||
418 | synchronize_net(); | 423 | synchronize_net(); |
419 | 424 | ||
420 | ieee80211_vif_release_channel(sdata); | 425 | ieee80211_vif_release_channel(sdata); |
@@ -422,8 +427,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
422 | drv_remove_interface(local, sdata); | 427 | drv_remove_interface(local, sdata); |
423 | 428 | ||
424 | kfree(sdata); | 429 | kfree(sdata); |
425 | out_unlock: | ||
426 | mutex_unlock(&local->iflist_mtx); | ||
427 | } | 430 | } |
428 | 431 | ||
429 | /* | 432 | /* |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 29ce2aa87e7b..4749b3858695 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | |||
1060 | 1060 | ||
1061 | rcu_read_lock(); | 1061 | rcu_read_lock(); |
1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
1063 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1063 | if (ieee80211_vif_is_mesh(&sdata->vif) && |
1064 | ieee80211_sdata_running(sdata)) | ||
1064 | ieee80211_queue_work(&local->hw, &sdata->work); | 1065 | ieee80211_queue_work(&local->hw, &sdata->work); |
1065 | rcu_read_unlock(); | 1066 | rcu_read_unlock(); |
1066 | } | 1067 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 141577412d84..82cc30318a86 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) | |||
3608 | 3608 | ||
3609 | /* Restart STA timers */ | 3609 | /* Restart STA timers */ |
3610 | rcu_read_lock(); | 3610 | rcu_read_lock(); |
3611 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 3611 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
3612 | ieee80211_restart_sta_timer(sdata); | 3612 | if (ieee80211_sdata_running(sdata)) |
3613 | ieee80211_restart_sta_timer(sdata); | ||
3614 | } | ||
3613 | rcu_read_unlock(); | 3615 | rcu_read_unlock(); |
3614 | } | 3616 | } |
3615 | 3617 | ||
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cc79b4a2e821..430bd254e496 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) | |||
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | 300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) |
301 | { | 301 | { |
302 | struct ieee80211_roc_work *dep, *tmp; | 302 | struct ieee80211_roc_work *dep, *tmp; |
303 | 303 | ||
304 | if (WARN_ON(roc->to_be_freed)) | ||
305 | return; | ||
306 | |||
304 | /* was never transmitted */ | 307 | /* was never transmitted */ |
305 | if (roc->frame) { | 308 | if (roc->frame) { |
306 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, | 309 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, |
@@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | |||
316 | GFP_KERNEL); | 319 | GFP_KERNEL); |
317 | 320 | ||
318 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) | 321 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) |
319 | ieee80211_roc_notify_destroy(dep); | 322 | ieee80211_roc_notify_destroy(dep, true); |
320 | 323 | ||
321 | kfree(roc); | 324 | if (free) |
325 | kfree(roc); | ||
326 | else | ||
327 | roc->to_be_freed = true; | ||
322 | } | 328 | } |
323 | 329 | ||
324 | void ieee80211_sw_roc_work(struct work_struct *work) | 330 | void ieee80211_sw_roc_work(struct work_struct *work) |
@@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
331 | 337 | ||
332 | mutex_lock(&local->mtx); | 338 | mutex_lock(&local->mtx); |
333 | 339 | ||
340 | if (roc->to_be_freed) | ||
341 | goto out_unlock; | ||
342 | |||
334 | if (roc->abort) | 343 | if (roc->abort) |
335 | goto finish; | 344 | goto finish; |
336 | 345 | ||
@@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
370 | finish: | 379 | finish: |
371 | list_del(&roc->list); | 380 | list_del(&roc->list); |
372 | started = roc->started; | 381 | started = roc->started; |
373 | ieee80211_roc_notify_destroy(roc); | 382 | ieee80211_roc_notify_destroy(roc, !roc->abort); |
374 | 383 | ||
375 | if (started) { | 384 | if (started) { |
376 | drv_flush(local, false); | 385 | drv_flush(local, false); |
@@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work) | |||
410 | 419 | ||
411 | list_del(&roc->list); | 420 | list_del(&roc->list); |
412 | 421 | ||
413 | ieee80211_roc_notify_destroy(roc); | 422 | ieee80211_roc_notify_destroy(roc, true); |
414 | 423 | ||
415 | /* if there's another roc, start it now */ | 424 | /* if there's another roc, start it now */ |
416 | ieee80211_start_next_roc(local); | 425 | ieee80211_start_next_roc(local); |
@@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) | |||
460 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { | 469 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { |
461 | if (local->ops->remain_on_channel) { | 470 | if (local->ops->remain_on_channel) { |
462 | list_del(&roc->list); | 471 | list_del(&roc->list); |
463 | ieee80211_roc_notify_destroy(roc); | 472 | ieee80211_roc_notify_destroy(roc, true); |
464 | } else { | 473 | } else { |
465 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); | 474 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); |
466 | 475 | ||
467 | /* work will clean up etc */ | 476 | /* work will clean up etc */ |
468 | flush_delayed_work(&roc->work); | 477 | flush_delayed_work(&roc->work); |
478 | WARN_ON(!roc->to_be_freed); | ||
479 | kfree(roc); | ||
469 | } | 480 | } |
470 | } | 481 | } |
471 | 482 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb73ed2d20b9..c6844ad080be 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) | |||
2675 | 2675 | ||
2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); | 2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); |
2677 | 2677 | ||
2678 | ieee80211_tx_skb(rx->sdata, nskb); | 2678 | if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { |
2679 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); | ||
2680 | |||
2681 | info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | | ||
2682 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK | | ||
2683 | IEEE80211_TX_CTL_NO_CCK_RATE; | ||
2684 | if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) | ||
2685 | info->hw_queue = | ||
2686 | local->hw.offchannel_tx_hw_queue; | ||
2687 | } | ||
2688 | |||
2689 | __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, | ||
2690 | status->band); | ||
2679 | } | 2691 | } |
2680 | dev_kfree_skb(rx->skb); | 2692 | dev_kfree_skb(rx->skb); |
2681 | return RX_QUEUED; | 2693 | return RX_QUEUED; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a79ce820cb50..238a0cca320e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
766 | struct ieee80211_local *local; | 766 | struct ieee80211_local *local; |
767 | struct ieee80211_sub_if_data *sdata; | 767 | struct ieee80211_sub_if_data *sdata; |
768 | int ret, i; | 768 | int ret, i; |
769 | bool have_key = false; | ||
769 | 770 | ||
770 | might_sleep(); | 771 | might_sleep(); |
771 | 772 | ||
@@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
793 | list_del_rcu(&sta->list); | 794 | list_del_rcu(&sta->list); |
794 | 795 | ||
795 | mutex_lock(&local->key_mtx); | 796 | mutex_lock(&local->key_mtx); |
796 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) | 797 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { |
797 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); | 798 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); |
798 | if (sta->ptk) | 799 | have_key = true; |
800 | } | ||
801 | if (sta->ptk) { | ||
799 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); | 802 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); |
803 | have_key = true; | ||
804 | } | ||
800 | mutex_unlock(&local->key_mtx); | 805 | mutex_unlock(&local->key_mtx); |
801 | 806 | ||
807 | if (!have_key) | ||
808 | synchronize_net(); | ||
809 | |||
802 | sta->dead = true; | 810 | sta->dead = true; |
803 | 811 | ||
804 | local->num_sta--; | 812 | local->num_sta--; |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 47edf5a40a59..61f49d241712 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
1394 | skb_reset_network_header(skb); | 1394 | skb_reset_network_header(skb); |
1395 | IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", | 1395 | IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", |
1396 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); | 1396 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); |
1397 | rcu_read_lock(); | ||
1398 | ipv4_update_pmtu(skb, dev_net(skb->dev), | 1397 | ipv4_update_pmtu(skb, dev_net(skb->dev), |
1399 | mtu, 0, 0, 0, 0); | 1398 | mtu, 0, 0, 0, 0); |
1400 | rcu_read_unlock(); | ||
1401 | /* Client uses PMTUD? */ | 1399 | /* Client uses PMTUD? */ |
1402 | if (!(cih->frag_off & htons(IP_DF))) | 1400 | if (!(cih->frag_off & htons(IP_DF))) |
1403 | goto ignore_ipip; | 1401 | goto ignore_ipip; |
@@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1577 | } | 1575 | } |
1578 | /* ipvs enabled in this netns ? */ | 1576 | /* ipvs enabled in this netns ? */ |
1579 | net = skb_net(skb); | 1577 | net = skb_net(skb); |
1580 | if (!net_ipvs(net)->enable) | 1578 | ipvs = net_ipvs(net); |
1579 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
1581 | return NF_ACCEPT; | 1580 | return NF_ACCEPT; |
1582 | 1581 | ||
1583 | ip_vs_fill_iph_skb(af, skb, &iph); | 1582 | ip_vs_fill_iph_skb(af, skb, &iph); |
@@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1654 | } | 1653 | } |
1655 | 1654 | ||
1656 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); | 1655 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); |
1657 | ipvs = net_ipvs(net); | ||
1658 | /* Check the server status */ | 1656 | /* Check the server status */ |
1659 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 1657 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
1660 | /* the destination server is not available */ | 1658 | /* the destination server is not available */ |
@@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, | |||
1815 | { | 1813 | { |
1816 | int r; | 1814 | int r; |
1817 | struct net *net; | 1815 | struct net *net; |
1816 | struct netns_ipvs *ipvs; | ||
1818 | 1817 | ||
1819 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) | 1818 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) |
1820 | return NF_ACCEPT; | 1819 | return NF_ACCEPT; |
1821 | 1820 | ||
1822 | /* ipvs enabled in this netns ? */ | 1821 | /* ipvs enabled in this netns ? */ |
1823 | net = skb_net(skb); | 1822 | net = skb_net(skb); |
1824 | if (!net_ipvs(net)->enable) | 1823 | ipvs = net_ipvs(net); |
1824 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
1825 | return NF_ACCEPT; | 1825 | return NF_ACCEPT; |
1826 | 1826 | ||
1827 | return ip_vs_in_icmp(skb, &r, hooknum); | 1827 | return ip_vs_in_icmp(skb, &r, hooknum); |
@@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1835 | { | 1835 | { |
1836 | int r; | 1836 | int r; |
1837 | struct net *net; | 1837 | struct net *net; |
1838 | struct netns_ipvs *ipvs; | ||
1838 | struct ip_vs_iphdr iphdr; | 1839 | struct ip_vs_iphdr iphdr; |
1839 | 1840 | ||
1840 | ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); | 1841 | ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); |
@@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1843 | 1844 | ||
1844 | /* ipvs enabled in this netns ? */ | 1845 | /* ipvs enabled in this netns ? */ |
1845 | net = skb_net(skb); | 1846 | net = skb_net(skb); |
1846 | if (!net_ipvs(net)->enable) | 1847 | ipvs = net_ipvs(net); |
1848 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
1847 | return NF_ACCEPT; | 1849 | return NF_ACCEPT; |
1848 | 1850 | ||
1849 | return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); | 1851 | return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c68198bf9128..9e2d1cccd1eb 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = { | |||
1808 | .mode = 0644, | 1808 | .mode = 0644, |
1809 | .proc_handler = proc_dointvec, | 1809 | .proc_handler = proc_dointvec, |
1810 | }, | 1810 | }, |
1811 | { | ||
1812 | .procname = "backup_only", | ||
1813 | .maxlen = sizeof(int), | ||
1814 | .mode = 0644, | ||
1815 | .proc_handler = proc_dointvec, | ||
1816 | }, | ||
1811 | #ifdef CONFIG_IP_VS_DEBUG | 1817 | #ifdef CONFIG_IP_VS_DEBUG |
1812 | { | 1818 | { |
1813 | .procname = "debug_level", | 1819 | .procname = "debug_level", |
@@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) | |||
3741 | tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; | 3747 | tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; |
3742 | ipvs->sysctl_pmtu_disc = 1; | 3748 | ipvs->sysctl_pmtu_disc = 1; |
3743 | tbl[idx++].data = &ipvs->sysctl_pmtu_disc; | 3749 | tbl[idx++].data = &ipvs->sysctl_pmtu_disc; |
3750 | tbl[idx++].data = &ipvs->sysctl_backup_only; | ||
3744 | 3751 | ||
3745 | 3752 | ||
3746 | ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); | 3753 | ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); |
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index ae8ec6f27688..cd1d7298f7ba 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c | |||
@@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
906 | sctp_chunkhdr_t _sctpch, *sch; | 906 | sctp_chunkhdr_t _sctpch, *sch; |
907 | unsigned char chunk_type; | 907 | unsigned char chunk_type; |
908 | int event, next_state; | 908 | int event, next_state; |
909 | int ihl; | 909 | int ihl, cofs; |
910 | 910 | ||
911 | #ifdef CONFIG_IP_VS_IPV6 | 911 | #ifdef CONFIG_IP_VS_IPV6 |
912 | ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); | 912 | ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); |
@@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
914 | ihl = ip_hdrlen(skb); | 914 | ihl = ip_hdrlen(skb); |
915 | #endif | 915 | #endif |
916 | 916 | ||
917 | sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), | 917 | cofs = ihl + sizeof(sctp_sctphdr_t); |
918 | sizeof(_sctpch), &_sctpch); | 918 | sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch); |
919 | if (sch == NULL) | 919 | if (sch == NULL) |
920 | return; | 920 | return; |
921 | 921 | ||
@@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
933 | */ | 933 | */ |
934 | if ((sch->type == SCTP_CID_COOKIE_ECHO) || | 934 | if ((sch->type == SCTP_CID_COOKIE_ECHO) || |
935 | (sch->type == SCTP_CID_COOKIE_ACK)) { | 935 | (sch->type == SCTP_CID_COOKIE_ACK)) { |
936 | sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + | 936 | int clen = ntohs(sch->length); |
937 | sch->length), sizeof(_sctpch), &_sctpch); | 937 | |
938 | if (sch) { | 938 | if (clen >= sizeof(sctp_chunkhdr_t)) { |
939 | if (sch->type == SCTP_CID_ABORT) | 939 | sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4), |
940 | sizeof(_sctpch), &_sctpch); | ||
941 | if (sch && sch->type == SCTP_CID_ABORT) | ||
940 | chunk_type = sch->type; | 942 | chunk_type = sch->type; |
941 | } | 943 | } |
942 | } | 944 | } |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 432f95780003..ba65b2041eb4 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void) | |||
969 | { | 969 | { |
970 | int ret; | 970 | int ret; |
971 | 971 | ||
972 | ret = register_pernet_subsys(&dccp_net_ops); | ||
973 | if (ret < 0) | ||
974 | goto out_pernet; | ||
975 | |||
972 | ret = nf_ct_l4proto_register(&dccp_proto4); | 976 | ret = nf_ct_l4proto_register(&dccp_proto4); |
973 | if (ret < 0) | 977 | if (ret < 0) |
974 | goto out_dccp4; | 978 | goto out_dccp4; |
@@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void) | |||
977 | if (ret < 0) | 981 | if (ret < 0) |
978 | goto out_dccp6; | 982 | goto out_dccp6; |
979 | 983 | ||
980 | ret = register_pernet_subsys(&dccp_net_ops); | ||
981 | if (ret < 0) | ||
982 | goto out_pernet; | ||
983 | |||
984 | return 0; | 984 | return 0; |
985 | out_pernet: | ||
986 | nf_ct_l4proto_unregister(&dccp_proto6); | ||
987 | out_dccp6: | 985 | out_dccp6: |
988 | nf_ct_l4proto_unregister(&dccp_proto4); | 986 | nf_ct_l4proto_unregister(&dccp_proto4); |
989 | out_dccp4: | 987 | out_dccp4: |
988 | unregister_pernet_subsys(&dccp_net_ops); | ||
989 | out_pernet: | ||
990 | return ret; | 990 | return ret; |
991 | } | 991 | } |
992 | 992 | ||
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index bd7d01d9c7e7..155ce9f8a0db 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void) | |||
420 | { | 420 | { |
421 | int ret; | 421 | int ret; |
422 | 422 | ||
423 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); | ||
424 | if (ret < 0) | ||
425 | goto out_gre4; | ||
426 | |||
427 | ret = register_pernet_subsys(&proto_gre_net_ops); | 423 | ret = register_pernet_subsys(&proto_gre_net_ops); |
428 | if (ret < 0) | 424 | if (ret < 0) |
429 | goto out_pernet; | 425 | goto out_pernet; |
430 | 426 | ||
427 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); | ||
428 | if (ret < 0) | ||
429 | goto out_gre4; | ||
430 | |||
431 | return 0; | 431 | return 0; |
432 | out_pernet: | ||
433 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4); | ||
434 | out_gre4: | 432 | out_gre4: |
433 | unregister_pernet_subsys(&proto_gre_net_ops); | ||
434 | out_pernet: | ||
435 | return ret; | 435 | return ret; |
436 | } | 436 | } |
437 | 437 | ||
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 480f616d5936..ec83536def9a 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void) | |||
888 | { | 888 | { |
889 | int ret; | 889 | int ret; |
890 | 890 | ||
891 | ret = register_pernet_subsys(&sctp_net_ops); | ||
892 | if (ret < 0) | ||
893 | goto out_pernet; | ||
894 | |||
891 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); | 895 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); |
892 | if (ret < 0) | 896 | if (ret < 0) |
893 | goto out_sctp4; | 897 | goto out_sctp4; |
@@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void) | |||
896 | if (ret < 0) | 900 | if (ret < 0) |
897 | goto out_sctp6; | 901 | goto out_sctp6; |
898 | 902 | ||
899 | ret = register_pernet_subsys(&sctp_net_ops); | ||
900 | if (ret < 0) | ||
901 | goto out_pernet; | ||
902 | |||
903 | return 0; | 903 | return 0; |
904 | out_pernet: | ||
905 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6); | ||
906 | out_sctp6: | 904 | out_sctp6: |
907 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); | 905 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
908 | out_sctp4: | 906 | out_sctp4: |
907 | unregister_pernet_subsys(&sctp_net_ops); | ||
908 | out_pernet: | ||
909 | return ret; | 909 | return ret; |
910 | } | 910 | } |
911 | 911 | ||
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 157489581c31..ca969f6273f7 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c | |||
@@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void) | |||
371 | { | 371 | { |
372 | int ret; | 372 | int ret; |
373 | 373 | ||
374 | ret = register_pernet_subsys(&udplite_net_ops); | ||
375 | if (ret < 0) | ||
376 | goto out_pernet; | ||
377 | |||
374 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); | 378 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); |
375 | if (ret < 0) | 379 | if (ret < 0) |
376 | goto out_udplite4; | 380 | goto out_udplite4; |
@@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void) | |||
379 | if (ret < 0) | 383 | if (ret < 0) |
380 | goto out_udplite6; | 384 | goto out_udplite6; |
381 | 385 | ||
382 | ret = register_pernet_subsys(&udplite_net_ops); | ||
383 | if (ret < 0) | ||
384 | goto out_pernet; | ||
385 | |||
386 | return 0; | 386 | return 0; |
387 | out_pernet: | ||
388 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6); | ||
389 | out_udplite6: | 387 | out_udplite6: |
390 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); | 388 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); |
391 | out_udplite4: | 389 | out_udplite4: |
390 | unregister_pernet_subsys(&udplite_net_ops); | ||
391 | out_pernet: | ||
392 | return ret; | 392 | return ret; |
393 | } | 393 | } |
394 | 394 | ||
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6bcce401fd1c..fedee3943661 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void) | |||
568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); | 568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); |
569 | if (!nf_ct_netfilter_header) { | 569 | if (!nf_ct_netfilter_header) { |
570 | pr_err("nf_conntrack: can't register to sysctl.\n"); | 570 | pr_err("nf_conntrack: can't register to sysctl.\n"); |
571 | ret = -ENOMEM; | ||
571 | goto out_sysctl; | 572 | goto out_sysctl; |
572 | } | 573 | } |
573 | #endif | 574 | #endif |
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 589d686f0b4c..dc3fd5d44464 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c | |||
@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, | |||
49 | return -EINVAL; | 49 | return -EINVAL; |
50 | 50 | ||
51 | acct_name = nla_data(tb[NFACCT_NAME]); | 51 | acct_name = nla_data(tb[NFACCT_NAME]); |
52 | if (strlen(acct_name) == 0) | ||
53 | return -EINVAL; | ||
52 | 54 | ||
53 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { | 55 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { |
54 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) | 56 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 858fd52c1040..42680b2baa11 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid) | |||
112 | inst->queue_num = queue_num; | 112 | inst->queue_num = queue_num; |
113 | inst->peer_portid = portid; | 113 | inst->peer_portid = portid; |
114 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; | 114 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; |
115 | inst->copy_range = 0xfffff; | 115 | inst->copy_range = 0xffff; |
116 | inst->copy_mode = NFQNL_COPY_NONE; | 116 | inst->copy_mode = NFQNL_COPY_NONE; |
117 | spin_lock_init(&inst->lock); | 117 | spin_lock_init(&inst->lock); |
118 | INIT_LIST_HEAD(&inst->queue_list); | 118 | INIT_LIST_HEAD(&inst->queue_list); |
@@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void) | |||
1062 | 1062 | ||
1063 | #ifdef CONFIG_PROC_FS | 1063 | #ifdef CONFIG_PROC_FS |
1064 | if (!proc_create("nfnetlink_queue", 0440, | 1064 | if (!proc_create("nfnetlink_queue", 0440, |
1065 | proc_net_netfilter, &nfqnl_file_ops)) | 1065 | proc_net_netfilter, &nfqnl_file_ops)) { |
1066 | status = -ENOMEM; | ||
1066 | goto cleanup_subsys; | 1067 | goto cleanup_subsys; |
1068 | } | ||
1067 | #endif | 1069 | #endif |
1068 | 1070 | ||
1069 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1071 | register_netdevice_notifier(&nfqnl_dev_notifier); |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index f2aabb6f4105..5a55be3f17a5 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family, | |||
142 | int err = 0; | 142 | int err = 0; |
143 | 143 | ||
144 | BUG_ON(grp->name[0] == '\0'); | 144 | BUG_ON(grp->name[0] == '\0'); |
145 | BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); | ||
145 | 146 | ||
146 | genl_lock(); | 147 | genl_lock(); |
147 | 148 | ||
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index b530afadd76c..ee25f25f0cd6 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -107,8 +107,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, | |||
107 | accept_sk->sk_state_change(sk); | 107 | accept_sk->sk_state_change(sk); |
108 | 108 | ||
109 | bh_unlock_sock(accept_sk); | 109 | bh_unlock_sock(accept_sk); |
110 | |||
111 | sock_orphan(accept_sk); | ||
112 | } | 110 | } |
113 | 111 | ||
114 | if (listen == true) { | 112 | if (listen == true) { |
@@ -134,8 +132,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, | |||
134 | 132 | ||
135 | bh_unlock_sock(sk); | 133 | bh_unlock_sock(sk); |
136 | 134 | ||
137 | sock_orphan(sk); | ||
138 | |||
139 | sk_del_node_init(sk); | 135 | sk_del_node_init(sk); |
140 | } | 136 | } |
141 | 137 | ||
@@ -164,8 +160,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, | |||
164 | 160 | ||
165 | bh_unlock_sock(sk); | 161 | bh_unlock_sock(sk); |
166 | 162 | ||
167 | sock_orphan(sk); | ||
168 | |||
169 | sk_del_node_init(sk); | 163 | sk_del_node_init(sk); |
170 | } | 164 | } |
171 | 165 | ||
@@ -827,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local, | |||
827 | skb_get(skb); | 821 | skb_get(skb); |
828 | } else { | 822 | } else { |
829 | pr_err("Receive queue is full\n"); | 823 | pr_err("Receive queue is full\n"); |
830 | kfree_skb(skb); | ||
831 | } | 824 | } |
832 | 825 | ||
833 | nfc_llcp_sock_put(llcp_sock); | 826 | nfc_llcp_sock_put(llcp_sock); |
@@ -1028,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, | |||
1028 | skb_get(skb); | 1021 | skb_get(skb); |
1029 | } else { | 1022 | } else { |
1030 | pr_err("Receive queue is full\n"); | 1023 | pr_err("Receive queue is full\n"); |
1031 | kfree_skb(skb); | ||
1032 | } | 1024 | } |
1033 | } | 1025 | } |
1034 | 1026 | ||
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5c7cdf3f2a83..8f025746f337 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, | |||
270 | } | 270 | } |
271 | 271 | ||
272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { | 272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { |
273 | nfc_llcp_accept_unlink(sk); | 273 | list_del_init(&lsk->accept_queue); |
274 | sock_put(sk); | ||
275 | |||
274 | if (newsock) | 276 | if (newsock) |
275 | sock_graft(sk, newsock); | 277 | sock_graft(sk, newsock); |
276 | 278 | ||
@@ -464,8 +466,6 @@ static int llcp_sock_release(struct socket *sock) | |||
464 | nfc_llcp_accept_unlink(accept_sk); | 466 | nfc_llcp_accept_unlink(accept_sk); |
465 | 467 | ||
466 | release_sock(accept_sk); | 468 | release_sock(accept_sk); |
467 | |||
468 | sock_orphan(accept_sk); | ||
469 | } | 469 | } |
470 | } | 470 | } |
471 | 471 | ||
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 13aa47aa2ffb..1bc210ffcba2 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch) | |||
962 | cbq_update(q); | 962 | cbq_update(q); |
963 | if ((incr -= incr2) < 0) | 963 | if ((incr -= incr2) < 0) |
964 | incr = 0; | 964 | incr = 0; |
965 | q->now += incr; | ||
966 | } else { | ||
967 | if (now > q->now) | ||
968 | q->now = now; | ||
965 | } | 969 | } |
966 | q->now += incr; | ||
967 | q->now_rt = now; | 970 | q->now_rt = now; |
968 | 971 | ||
969 | for (;;) { | 972 | for (;;) { |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4e606fcb2534..55786283a3df 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
195 | flow->deficit = q->quantum; | 195 | flow->deficit = q->quantum; |
196 | flow->dropped = 0; | 196 | flow->dropped = 0; |
197 | } | 197 | } |
198 | if (++sch->q.qlen < sch->limit) | 198 | if (++sch->q.qlen <= sch->limit) |
199 | return NET_XMIT_SUCCESS; | 199 | return NET_XMIT_SUCCESS; |
200 | 200 | ||
201 | q->drop_overlimit++; | 201 | q->drop_overlimit++; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ffad48109a22..eac7e0ee23c1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate) | |||
904 | u64 mult; | 904 | u64 mult; |
905 | int shift; | 905 | int shift; |
906 | 906 | ||
907 | r->rate_bps = rate << 3; | 907 | r->rate_bps = (u64)rate << 3; |
908 | r->shift = 0; | 908 | r->shift = 0; |
909 | r->mult = 1; | 909 | r->mult = 1; |
910 | /* | 910 | /* |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index fb20f25ddec9..f8529fc8e542 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, | |||
180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | 180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); |
181 | task->tk_waitqueue = queue; | 181 | task->tk_waitqueue = queue; |
182 | queue->qlen++; | 182 | queue->qlen++; |
183 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ | ||
184 | smp_wmb(); | ||
183 | rpc_set_queued(task); | 185 | rpc_set_queued(task); |
184 | 186 | ||
185 | dprintk("RPC: %5u added to queue %p \"%s\"\n", | 187 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task | |||
430 | */ | 432 | */ |
431 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) | 433 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
432 | { | 434 | { |
433 | if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) | 435 | if (RPC_IS_QUEUED(task)) { |
434 | __rpc_do_wake_up_task(queue, task); | 436 | smp_rmb(); |
437 | if (task->tk_waitqueue == queue) | ||
438 | __rpc_do_wake_up_task(queue, task); | ||
439 | } | ||
435 | } | 440 | } |
436 | 441 | ||
437 | /* | 442 | /* |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 51be64f163ec..2db702d82e7d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk) | |||
382 | #endif | 382 | #endif |
383 | } | 383 | } |
384 | 384 | ||
385 | static int unix_release_sock(struct sock *sk, int embrion) | 385 | static void unix_release_sock(struct sock *sk, int embrion) |
386 | { | 386 | { |
387 | struct unix_sock *u = unix_sk(sk); | 387 | struct unix_sock *u = unix_sk(sk); |
388 | struct path path; | 388 | struct path path; |
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion) | |||
451 | 451 | ||
452 | if (unix_tot_inflight) | 452 | if (unix_tot_inflight) |
453 | unix_gc(); /* Garbage collect fds */ | 453 | unix_gc(); /* Garbage collect fds */ |
454 | |||
455 | return 0; | ||
456 | } | 454 | } |
457 | 455 | ||
458 | static void init_peercred(struct sock *sk) | 456 | static void init_peercred(struct sock *sk) |
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock) | |||
699 | if (!sk) | 697 | if (!sk) |
700 | return 0; | 698 | return 0; |
701 | 699 | ||
700 | unix_release_sock(sk, 0); | ||
702 | sock->sk = NULL; | 701 | sock->sk = NULL; |
703 | 702 | ||
704 | return unix_release_sock(sk, 0); | 703 | return 0; |
705 | } | 704 | } |
706 | 705 | ||
707 | static int unix_autobind(struct socket *sock) | 706 | static int unix_autobind(struct socket *sock) |
@@ -1994,7 +1993,7 @@ again: | |||
1994 | if ((UNIXCB(skb).pid != siocb->scm->pid) || | 1993 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
1995 | (UNIXCB(skb).cred != siocb->scm->cred)) | 1994 | (UNIXCB(skb).cred != siocb->scm->cred)) |
1996 | break; | 1995 | break; |
1997 | } else { | 1996 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { |
1998 | /* Copy credentials */ | 1997 | /* Copy credentials */ |
1999 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); | 1998 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); |
2000 | check_creds = 1; | 1999 | check_creds = 1; |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ca511c4f388a..d8079daf1bde 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) | |||
207 | struct vsock_sock *vsk; | 207 | struct vsock_sock *vsk; |
208 | 208 | ||
209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) | 209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) |
210 | if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) | 210 | if (addr->svm_port == vsk->local_addr.svm_port) |
211 | return sk_vsock(vsk); | 211 | return sk_vsock(vsk); |
212 | 212 | ||
213 | return NULL; | 213 | return NULL; |
@@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, | |||
220 | 220 | ||
221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), | 221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), |
222 | connected_table) { | 222 | connected_table) { |
223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) | 223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) && |
224 | && vsock_addr_equals_addr(dst, &vsk->local_addr)) { | 224 | dst->svm_port == vsk->local_addr.svm_port) { |
225 | return sk_vsock(vsk); | 225 | return sk_vsock(vsk); |
226 | } | 226 | } |
227 | } | 227 | } |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a70ace83a153..1f6508e249ae 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
@@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending( | |||
464 | struct vsock_sock *vlistener; | 464 | struct vsock_sock *vlistener; |
465 | struct vsock_sock *vpending; | 465 | struct vsock_sock *vpending; |
466 | struct sock *pending; | 466 | struct sock *pending; |
467 | struct sockaddr_vm src; | ||
468 | |||
469 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
467 | 470 | ||
468 | vlistener = vsock_sk(listener); | 471 | vlistener = vsock_sk(listener); |
469 | 472 | ||
470 | list_for_each_entry(vpending, &vlistener->pending_links, | 473 | list_for_each_entry(vpending, &vlistener->pending_links, |
471 | pending_links) { | 474 | pending_links) { |
472 | struct sockaddr_vm src; | ||
473 | struct sockaddr_vm dst; | ||
474 | |||
475 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
476 | vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); | ||
477 | |||
478 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && | 475 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && |
479 | vsock_addr_equals_addr(&dst, &vpending->local_addr)) { | 476 | pkt->dst_port == vpending->local_addr.svm_port) { |
480 | pending = sk_vsock(vpending); | 477 | pending = sk_vsock(vpending); |
481 | sock_hold(pending); | 478 | sock_hold(pending); |
482 | goto found; | 479 | goto found; |
@@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg) | |||
739 | */ | 736 | */ |
740 | bh_lock_sock(sk); | 737 | bh_lock_sock(sk); |
741 | 738 | ||
742 | if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) | 739 | if (!sock_owned_by_user(sk)) { |
743 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | 740 | /* The local context ID may be out of date, update it. */ |
744 | sk, pkt, true, &dst, &src, | 741 | vsk->local_addr.svm_cid = dst.svm_cid; |
745 | &bh_process_pkt); | 742 | |
743 | if (sk->sk_state == SS_CONNECTED) | ||
744 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | ||
745 | sk, pkt, true, &dst, &src, | ||
746 | &bh_process_pkt); | ||
747 | } | ||
746 | 748 | ||
747 | bh_unlock_sock(sk); | 749 | bh_unlock_sock(sk); |
748 | 750 | ||
@@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work) | |||
902 | 904 | ||
903 | lock_sock(sk); | 905 | lock_sock(sk); |
904 | 906 | ||
907 | /* The local context ID may be out of date. */ | ||
908 | vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; | ||
909 | |||
905 | switch (sk->sk_state) { | 910 | switch (sk->sk_state) { |
906 | case SS_LISTEN: | 911 | case SS_LISTEN: |
907 | vmci_transport_recv_listen(sk, pkt); | 912 | vmci_transport_recv_listen(sk, pkt); |
@@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk, | |||
958 | pending = vmci_transport_get_pending(sk, pkt); | 963 | pending = vmci_transport_get_pending(sk, pkt); |
959 | if (pending) { | 964 | if (pending) { |
960 | lock_sock(pending); | 965 | lock_sock(pending); |
966 | |||
967 | /* The local context ID may be out of date. */ | ||
968 | vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; | ||
969 | |||
961 | switch (pending->sk_state) { | 970 | switch (pending->sk_state) { |
962 | case SS_CONNECTING: | 971 | case SS_CONNECTING: |
963 | err = vmci_transport_recv_connecting_server(sk, | 972 | err = vmci_transport_recv_connecting_server(sk, |
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index b7df1aea7c59..ec2611b4ea0e 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c | |||
@@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | |||
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); | 65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); |
66 | 66 | ||
67 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
68 | const struct sockaddr_vm *other) | ||
69 | { | ||
70 | return (addr->svm_cid == VMADDR_CID_ANY || | ||
71 | other->svm_cid == VMADDR_CID_ANY || | ||
72 | addr->svm_cid == other->svm_cid) && | ||
73 | addr->svm_port == other->svm_port; | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); | ||
76 | |||
77 | int vsock_addr_cast(const struct sockaddr *addr, | 67 | int vsock_addr_cast(const struct sockaddr *addr, |
78 | size_t len, struct sockaddr_vm **out_addr) | 68 | size_t len, struct sockaddr_vm **out_addr) |
79 | { | 69 | { |
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h index cdfbcefdf843..9ccd5316eac0 100644 --- a/net/vmw_vsock/vsock_addr.h +++ b/net/vmw_vsock/vsock_addr.h | |||
@@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr); | |||
24 | void vsock_addr_unbind(struct sockaddr_vm *addr); | 24 | void vsock_addr_unbind(struct sockaddr_vm *addr); |
25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | 25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, |
26 | const struct sockaddr_vm *other); | 26 | const struct sockaddr_vm *other); |
27 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
28 | const struct sockaddr_vm *other); | ||
29 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, | 27 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, |
30 | struct sockaddr_vm **out_addr); | 28 | struct sockaddr_vm **out_addr); |
31 | 29 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index ea4155fe9733..6ddf74f0ae1e 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) | |||
212 | rdev_rfkill_poll(rdev); | 212 | rdev_rfkill_poll(rdev); |
213 | } | 213 | } |
214 | 214 | ||
215 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
216 | struct wireless_dev *wdev) | ||
217 | { | ||
218 | lockdep_assert_held(&rdev->devlist_mtx); | ||
219 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
220 | |||
221 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) | ||
222 | return; | ||
223 | |||
224 | if (!wdev->p2p_started) | ||
225 | return; | ||
226 | |||
227 | rdev_stop_p2p_device(rdev, wdev); | ||
228 | wdev->p2p_started = false; | ||
229 | |||
230 | rdev->opencount--; | ||
231 | |||
232 | if (rdev->scan_req && rdev->scan_req->wdev == wdev) { | ||
233 | bool busy = work_busy(&rdev->scan_done_wk); | ||
234 | |||
235 | /* | ||
236 | * If the work isn't pending or running (in which case it would | ||
237 | * be waiting for the lock we hold) the driver didn't properly | ||
238 | * cancel the scan when the interface was removed. In this case | ||
239 | * warn and leak the scan request object to not crash later. | ||
240 | */ | ||
241 | WARN_ON(!busy); | ||
242 | |||
243 | rdev->scan_req->aborted = true; | ||
244 | ___cfg80211_scan_done(rdev, !busy); | ||
245 | } | ||
246 | } | ||
247 | |||
215 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | 248 | static int cfg80211_rfkill_set_block(void *data, bool blocked) |
216 | { | 249 | { |
217 | struct cfg80211_registered_device *rdev = data; | 250 | struct cfg80211_registered_device *rdev = data; |
@@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
221 | return 0; | 254 | return 0; |
222 | 255 | ||
223 | rtnl_lock(); | 256 | rtnl_lock(); |
224 | mutex_lock(&rdev->devlist_mtx); | 257 | |
258 | /* read-only iteration need not hold the devlist_mtx */ | ||
225 | 259 | ||
226 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 260 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
227 | if (wdev->netdev) { | 261 | if (wdev->netdev) { |
@@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
231 | /* otherwise, check iftype */ | 265 | /* otherwise, check iftype */ |
232 | switch (wdev->iftype) { | 266 | switch (wdev->iftype) { |
233 | case NL80211_IFTYPE_P2P_DEVICE: | 267 | case NL80211_IFTYPE_P2P_DEVICE: |
234 | if (!wdev->p2p_started) | 268 | /* but this requires it */ |
235 | break; | 269 | mutex_lock(&rdev->devlist_mtx); |
236 | rdev_stop_p2p_device(rdev, wdev); | 270 | mutex_lock(&rdev->sched_scan_mtx); |
237 | wdev->p2p_started = false; | 271 | cfg80211_stop_p2p_device(rdev, wdev); |
238 | rdev->opencount--; | 272 | mutex_unlock(&rdev->sched_scan_mtx); |
273 | mutex_unlock(&rdev->devlist_mtx); | ||
239 | break; | 274 | break; |
240 | default: | 275 | default: |
241 | break; | 276 | break; |
242 | } | 277 | } |
243 | } | 278 | } |
244 | 279 | ||
245 | mutex_unlock(&rdev->devlist_mtx); | ||
246 | rtnl_unlock(); | 280 | rtnl_unlock(); |
247 | 281 | ||
248 | return 0; | 282 | return 0; |
@@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
745 | wdev = container_of(work, struct wireless_dev, cleanup_work); | 779 | wdev = container_of(work, struct wireless_dev, cleanup_work); |
746 | rdev = wiphy_to_dev(wdev->wiphy); | 780 | rdev = wiphy_to_dev(wdev->wiphy); |
747 | 781 | ||
748 | cfg80211_lock_rdev(rdev); | 782 | mutex_lock(&rdev->sched_scan_mtx); |
749 | 783 | ||
750 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | 784 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { |
751 | rdev->scan_req->aborted = true; | 785 | rdev->scan_req->aborted = true; |
752 | ___cfg80211_scan_done(rdev, true); | 786 | ___cfg80211_scan_done(rdev, true); |
753 | } | 787 | } |
754 | 788 | ||
755 | cfg80211_unlock_rdev(rdev); | ||
756 | |||
757 | mutex_lock(&rdev->sched_scan_mtx); | ||
758 | |||
759 | if (WARN_ON(rdev->sched_scan_req && | 789 | if (WARN_ON(rdev->sched_scan_req && |
760 | rdev->sched_scan_req->dev == wdev->netdev)) { | 790 | rdev->sched_scan_req->dev == wdev->netdev)) { |
761 | __cfg80211_stop_sched_scan(rdev, false); | 791 | __cfg80211_stop_sched_scan(rdev, false); |
@@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) | |||
781 | return; | 811 | return; |
782 | 812 | ||
783 | mutex_lock(&rdev->devlist_mtx); | 813 | mutex_lock(&rdev->devlist_mtx); |
814 | mutex_lock(&rdev->sched_scan_mtx); | ||
784 | list_del_rcu(&wdev->list); | 815 | list_del_rcu(&wdev->list); |
785 | rdev->devlist_generation++; | 816 | rdev->devlist_generation++; |
786 | 817 | ||
787 | switch (wdev->iftype) { | 818 | switch (wdev->iftype) { |
788 | case NL80211_IFTYPE_P2P_DEVICE: | 819 | case NL80211_IFTYPE_P2P_DEVICE: |
789 | if (!wdev->p2p_started) | 820 | cfg80211_stop_p2p_device(rdev, wdev); |
790 | break; | ||
791 | rdev_stop_p2p_device(rdev, wdev); | ||
792 | wdev->p2p_started = false; | ||
793 | rdev->opencount--; | ||
794 | break; | 821 | break; |
795 | default: | 822 | default: |
796 | WARN_ON_ONCE(1); | 823 | WARN_ON_ONCE(1); |
797 | break; | 824 | break; |
798 | } | 825 | } |
826 | mutex_unlock(&rdev->sched_scan_mtx); | ||
799 | mutex_unlock(&rdev->devlist_mtx); | 827 | mutex_unlock(&rdev->devlist_mtx); |
800 | } | 828 | } |
801 | EXPORT_SYMBOL(cfg80211_unregister_wdev); | 829 | EXPORT_SYMBOL(cfg80211_unregister_wdev); |
@@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
936 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); | 964 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); |
937 | cfg80211_lock_rdev(rdev); | 965 | cfg80211_lock_rdev(rdev); |
938 | mutex_lock(&rdev->devlist_mtx); | 966 | mutex_lock(&rdev->devlist_mtx); |
967 | mutex_lock(&rdev->sched_scan_mtx); | ||
939 | wdev_lock(wdev); | 968 | wdev_lock(wdev); |
940 | switch (wdev->iftype) { | 969 | switch (wdev->iftype) { |
941 | #ifdef CONFIG_CFG80211_WEXT | 970 | #ifdef CONFIG_CFG80211_WEXT |
@@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
967 | break; | 996 | break; |
968 | } | 997 | } |
969 | wdev_unlock(wdev); | 998 | wdev_unlock(wdev); |
999 | mutex_unlock(&rdev->sched_scan_mtx); | ||
970 | rdev->opencount++; | 1000 | rdev->opencount++; |
971 | mutex_unlock(&rdev->devlist_mtx); | 1001 | mutex_unlock(&rdev->devlist_mtx); |
972 | cfg80211_unlock_rdev(rdev); | 1002 | cfg80211_unlock_rdev(rdev); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 3aec0e429d8a..5845c2b37aa8 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, | |||
503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, | 503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, |
504 | enum nl80211_iftype iftype, int num); | 504 | enum nl80211_iftype iftype, int num); |
505 | 505 | ||
506 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
507 | struct wireless_dev *wdev); | ||
508 | |||
506 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | 509 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 |
507 | 510 | ||
508 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | 511 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d44ab216c0ec..58e13a8c95f9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4702 | if (!rdev->ops->scan) | 4702 | if (!rdev->ops->scan) |
4703 | return -EOPNOTSUPP; | 4703 | return -EOPNOTSUPP; |
4704 | 4704 | ||
4705 | if (rdev->scan_req) | 4705 | mutex_lock(&rdev->sched_scan_mtx); |
4706 | return -EBUSY; | 4706 | if (rdev->scan_req) { |
4707 | err = -EBUSY; | ||
4708 | goto unlock; | ||
4709 | } | ||
4707 | 4710 | ||
4708 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { | 4711 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { |
4709 | n_channels = validate_scan_freqs( | 4712 | n_channels = validate_scan_freqs( |
4710 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); | 4713 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); |
4711 | if (!n_channels) | 4714 | if (!n_channels) { |
4712 | return -EINVAL; | 4715 | err = -EINVAL; |
4716 | goto unlock; | ||
4717 | } | ||
4713 | } else { | 4718 | } else { |
4714 | enum ieee80211_band band; | 4719 | enum ieee80211_band band; |
4715 | n_channels = 0; | 4720 | n_channels = 0; |
@@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4723 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) | 4728 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) |
4724 | n_ssids++; | 4729 | n_ssids++; |
4725 | 4730 | ||
4726 | if (n_ssids > wiphy->max_scan_ssids) | 4731 | if (n_ssids > wiphy->max_scan_ssids) { |
4727 | return -EINVAL; | 4732 | err = -EINVAL; |
4733 | goto unlock; | ||
4734 | } | ||
4728 | 4735 | ||
4729 | if (info->attrs[NL80211_ATTR_IE]) | 4736 | if (info->attrs[NL80211_ATTR_IE]) |
4730 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 4737 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
4731 | else | 4738 | else |
4732 | ie_len = 0; | 4739 | ie_len = 0; |
4733 | 4740 | ||
4734 | if (ie_len > wiphy->max_scan_ie_len) | 4741 | if (ie_len > wiphy->max_scan_ie_len) { |
4735 | return -EINVAL; | 4742 | err = -EINVAL; |
4743 | goto unlock; | ||
4744 | } | ||
4736 | 4745 | ||
4737 | request = kzalloc(sizeof(*request) | 4746 | request = kzalloc(sizeof(*request) |
4738 | + sizeof(*request->ssids) * n_ssids | 4747 | + sizeof(*request->ssids) * n_ssids |
4739 | + sizeof(*request->channels) * n_channels | 4748 | + sizeof(*request->channels) * n_channels |
4740 | + ie_len, GFP_KERNEL); | 4749 | + ie_len, GFP_KERNEL); |
4741 | if (!request) | 4750 | if (!request) { |
4742 | return -ENOMEM; | 4751 | err = -ENOMEM; |
4752 | goto unlock; | ||
4753 | } | ||
4743 | 4754 | ||
4744 | if (n_ssids) | 4755 | if (n_ssids) |
4745 | request->ssids = (void *)&request->channels[n_channels]; | 4756 | request->ssids = (void *)&request->channels[n_channels]; |
@@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4876 | kfree(request); | 4887 | kfree(request); |
4877 | } | 4888 | } |
4878 | 4889 | ||
4890 | unlock: | ||
4891 | mutex_unlock(&rdev->sched_scan_mtx); | ||
4879 | return err; | 4892 | return err; |
4880 | } | 4893 | } |
4881 | 4894 | ||
@@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) | |||
7749 | if (!rdev->ops->stop_p2p_device) | 7762 | if (!rdev->ops->stop_p2p_device) |
7750 | return -EOPNOTSUPP; | 7763 | return -EOPNOTSUPP; |
7751 | 7764 | ||
7752 | if (!wdev->p2p_started) | 7765 | mutex_lock(&rdev->sched_scan_mtx); |
7753 | return 0; | 7766 | cfg80211_stop_p2p_device(rdev, wdev); |
7754 | 7767 | mutex_unlock(&rdev->sched_scan_mtx); | |
7755 | rdev_stop_p2p_device(rdev, wdev); | ||
7756 | wdev->p2p_started = false; | ||
7757 | |||
7758 | mutex_lock(&rdev->devlist_mtx); | ||
7759 | rdev->opencount--; | ||
7760 | mutex_unlock(&rdev->devlist_mtx); | ||
7761 | |||
7762 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | ||
7763 | rdev->scan_req->aborted = true; | ||
7764 | ___cfg80211_scan_done(rdev, true); | ||
7765 | } | ||
7766 | 7768 | ||
7767 | return 0; | 7769 | return 0; |
7768 | } | 7770 | } |
@@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
8486 | struct nlattr *nest; | 8488 | struct nlattr *nest; |
8487 | int i; | 8489 | int i; |
8488 | 8490 | ||
8489 | ASSERT_RDEV_LOCK(rdev); | 8491 | lockdep_assert_held(&rdev->sched_scan_mtx); |
8490 | 8492 | ||
8491 | if (WARN_ON(!req)) | 8493 | if (WARN_ON(!req)) |
8492 | return 0; | 8494 | return 0; |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 674aadca0079..fd99ea495b7e 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
169 | union iwreq_data wrqu; | 169 | union iwreq_data wrqu; |
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | ASSERT_RDEV_LOCK(rdev); | 172 | lockdep_assert_held(&rdev->sched_scan_mtx); |
173 | 173 | ||
174 | request = rdev->scan_req; | 174 | request = rdev->scan_req; |
175 | 175 | ||
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk) | |||
230 | rdev = container_of(wk, struct cfg80211_registered_device, | 230 | rdev = container_of(wk, struct cfg80211_registered_device, |
231 | scan_done_wk); | 231 | scan_done_wk); |
232 | 232 | ||
233 | cfg80211_lock_rdev(rdev); | 233 | mutex_lock(&rdev->sched_scan_mtx); |
234 | ___cfg80211_scan_done(rdev, false); | 234 | ___cfg80211_scan_done(rdev, false); |
235 | cfg80211_unlock_rdev(rdev); | 235 | mutex_unlock(&rdev->sched_scan_mtx); |
236 | } | 236 | } |
237 | 237 | ||
238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | 238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) |
@@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); | 698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); |
699 | 699 | ||
700 | if (found) { | 700 | if (found) { |
701 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
702 | found->pub.signal = tmp->pub.signal; | ||
703 | found->pub.capability = tmp->pub.capability; | ||
704 | found->ts = tmp->ts; | ||
705 | |||
706 | /* Update IEs */ | 701 | /* Update IEs */ |
707 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { | 702 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { |
708 | const struct cfg80211_bss_ies *old; | 703 | const struct cfg80211_bss_ies *old; |
@@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
723 | 718 | ||
724 | if (found->pub.hidden_beacon_bss && | 719 | if (found->pub.hidden_beacon_bss && |
725 | !list_empty(&found->hidden_list)) { | 720 | !list_empty(&found->hidden_list)) { |
721 | const struct cfg80211_bss_ies *f; | ||
722 | |||
726 | /* | 723 | /* |
727 | * The found BSS struct is one of the probe | 724 | * The found BSS struct is one of the probe |
728 | * response members of a group, but we're | 725 | * response members of a group, but we're |
@@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
732 | * SSID to showing it, which is confusing so | 729 | * SSID to showing it, which is confusing so |
733 | * drop this information. | 730 | * drop this information. |
734 | */ | 731 | */ |
732 | |||
733 | f = rcu_access_pointer(tmp->pub.beacon_ies); | ||
734 | kfree_rcu((struct cfg80211_bss_ies *)f, | ||
735 | rcu_head); | ||
735 | goto drop; | 736 | goto drop; |
736 | } | 737 | } |
737 | 738 | ||
@@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
761 | kfree_rcu((struct cfg80211_bss_ies *)old, | 762 | kfree_rcu((struct cfg80211_bss_ies *)old, |
762 | rcu_head); | 763 | rcu_head); |
763 | } | 764 | } |
765 | |||
766 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
767 | found->pub.signal = tmp->pub.signal; | ||
768 | found->pub.capability = tmp->pub.capability; | ||
769 | found->ts = tmp->ts; | ||
764 | } else { | 770 | } else { |
765 | struct cfg80211_internal_bss *new; | 771 | struct cfg80211_internal_bss *new; |
766 | struct cfg80211_internal_bss *hidden; | 772 | struct cfg80211_internal_bss *hidden; |
@@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1056 | if (IS_ERR(rdev)) | 1062 | if (IS_ERR(rdev)) |
1057 | return PTR_ERR(rdev); | 1063 | return PTR_ERR(rdev); |
1058 | 1064 | ||
1065 | mutex_lock(&rdev->sched_scan_mtx); | ||
1059 | if (rdev->scan_req) { | 1066 | if (rdev->scan_req) { |
1060 | err = -EBUSY; | 1067 | err = -EBUSY; |
1061 | goto out; | 1068 | goto out; |
@@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1162 | dev_hold(dev); | 1169 | dev_hold(dev); |
1163 | } | 1170 | } |
1164 | out: | 1171 | out: |
1172 | mutex_unlock(&rdev->sched_scan_mtx); | ||
1165 | kfree(creq); | 1173 | kfree(creq); |
1166 | cfg80211_unlock_rdev(rdev); | 1174 | cfg80211_unlock_rdev(rdev); |
1167 | return err; | 1175 | return err; |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f432bd3755b1..09d994d192ff 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) | |||
85 | ASSERT_RTNL(); | 85 | ASSERT_RTNL(); |
86 | ASSERT_RDEV_LOCK(rdev); | 86 | ASSERT_RDEV_LOCK(rdev); |
87 | ASSERT_WDEV_LOCK(wdev); | 87 | ASSERT_WDEV_LOCK(wdev); |
88 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
88 | 89 | ||
89 | if (rdev->scan_req) | 90 | if (rdev->scan_req) |
90 | return -EBUSY; | 91 | return -EBUSY; |
@@ -320,11 +321,9 @@ void cfg80211_sme_scan_done(struct net_device *dev) | |||
320 | { | 321 | { |
321 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 322 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
322 | 323 | ||
323 | mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
324 | wdev_lock(wdev); | 324 | wdev_lock(wdev); |
325 | __cfg80211_sme_scan_done(dev); | 325 | __cfg80211_sme_scan_done(dev); |
326 | wdev_unlock(wdev); | 326 | wdev_unlock(wdev); |
327 | mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
328 | } | 327 | } |
329 | 328 | ||
330 | void cfg80211_sme_rx_auth(struct net_device *dev, | 329 | void cfg80211_sme_rx_auth(struct net_device *dev, |
@@ -924,9 +923,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
924 | int err; | 923 | int err; |
925 | 924 | ||
926 | mutex_lock(&rdev->devlist_mtx); | 925 | mutex_lock(&rdev->devlist_mtx); |
926 | /* might request scan - scan_mtx -> wdev_mtx dependency */ | ||
927 | mutex_lock(&rdev->sched_scan_mtx); | ||
927 | wdev_lock(dev->ieee80211_ptr); | 928 | wdev_lock(dev->ieee80211_ptr); |
928 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); | 929 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); |
929 | wdev_unlock(dev->ieee80211_ptr); | 930 | wdev_unlock(dev->ieee80211_ptr); |
931 | mutex_unlock(&rdev->sched_scan_mtx); | ||
930 | mutex_unlock(&rdev->devlist_mtx); | 932 | mutex_unlock(&rdev->devlist_mtx); |
931 | 933 | ||
932 | return err; | 934 | return err; |
diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7a531380e19..7586de77a2f8 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h | |||
@@ -27,7 +27,8 @@ | |||
27 | #define WIPHY_PR_ARG __entry->wiphy_name | 27 | #define WIPHY_PR_ARG __entry->wiphy_name |
28 | 28 | ||
29 | #define WDEV_ENTRY __field(u32, id) | 29 | #define WDEV_ENTRY __field(u32, id) |
30 | #define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) | 30 | #define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ |
31 | ? wdev->identifier : 0) | ||
31 | #define WDEV_PR_FMT "wdev(%u)" | 32 | #define WDEV_PR_FMT "wdev(%u)" |
32 | #define WDEV_PR_ARG (__entry->id) | 33 | #define WDEV_PR_ARG (__entry->id) |
33 | 34 | ||
@@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl, | |||
1778 | ), | 1779 | ), |
1779 | TP_fast_assign( | 1780 | TP_fast_assign( |
1780 | WIPHY_ASSIGN; | 1781 | WIPHY_ASSIGN; |
1781 | WIPHY_ASSIGN; | 1782 | NETDEV_ASSIGN; |
1782 | __entry->acl_policy = params->acl_policy; | 1783 | __entry->acl_policy = params->acl_policy; |
1783 | ), | 1784 | ), |
1784 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", | 1785 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index fb9622f6d99c..e79cb5c0655a 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
89 | 89 | ||
90 | cfg80211_lock_rdev(rdev); | 90 | cfg80211_lock_rdev(rdev); |
91 | mutex_lock(&rdev->devlist_mtx); | 91 | mutex_lock(&rdev->devlist_mtx); |
92 | mutex_lock(&rdev->sched_scan_mtx); | ||
92 | wdev_lock(wdev); | 93 | wdev_lock(wdev); |
93 | 94 | ||
94 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 95 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
@@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
135 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 136 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
136 | out: | 137 | out: |
137 | wdev_unlock(wdev); | 138 | wdev_unlock(wdev); |
139 | mutex_unlock(&rdev->sched_scan_mtx); | ||
138 | mutex_unlock(&rdev->devlist_mtx); | 140 | mutex_unlock(&rdev->devlist_mtx); |
139 | cfg80211_unlock_rdev(rdev); | 141 | cfg80211_unlock_rdev(rdev); |
140 | return err; | 142 | return err; |
@@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
190 | 192 | ||
191 | cfg80211_lock_rdev(rdev); | 193 | cfg80211_lock_rdev(rdev); |
192 | mutex_lock(&rdev->devlist_mtx); | 194 | mutex_lock(&rdev->devlist_mtx); |
195 | mutex_lock(&rdev->sched_scan_mtx); | ||
193 | wdev_lock(wdev); | 196 | wdev_lock(wdev); |
194 | 197 | ||
195 | err = 0; | 198 | err = 0; |
@@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
223 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 226 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
224 | out: | 227 | out: |
225 | wdev_unlock(wdev); | 228 | wdev_unlock(wdev); |
229 | mutex_unlock(&rdev->sched_scan_mtx); | ||
226 | mutex_unlock(&rdev->devlist_mtx); | 230 | mutex_unlock(&rdev->devlist_mtx); |
227 | cfg80211_unlock_rdev(rdev); | 231 | cfg80211_unlock_rdev(rdev); |
228 | return err; | 232 | return err; |
@@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
285 | 289 | ||
286 | cfg80211_lock_rdev(rdev); | 290 | cfg80211_lock_rdev(rdev); |
287 | mutex_lock(&rdev->devlist_mtx); | 291 | mutex_lock(&rdev->devlist_mtx); |
292 | mutex_lock(&rdev->sched_scan_mtx); | ||
288 | wdev_lock(wdev); | 293 | wdev_lock(wdev); |
289 | 294 | ||
290 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 295 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
@@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
313 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 318 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
314 | out: | 319 | out: |
315 | wdev_unlock(wdev); | 320 | wdev_unlock(wdev); |
321 | mutex_unlock(&rdev->sched_scan_mtx); | ||
316 | mutex_unlock(&rdev->devlist_mtx); | 322 | mutex_unlock(&rdev->devlist_mtx); |
317 | cfg80211_unlock_rdev(rdev); | 323 | cfg80211_unlock_rdev(rdev); |
318 | return err; | 324 | return err; |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 35754cc8a9e5..8dafe6d3c6e4 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) | |||
334 | x->xflags &= ~XFRM_TIME_DEFER; | 334 | x->xflags &= ~XFRM_TIME_DEFER; |
335 | } | 335 | } |
336 | 336 | ||
337 | static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) | ||
338 | { | ||
339 | u32 seq_diff, oseq_diff; | ||
340 | struct km_event c; | ||
341 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; | ||
342 | struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; | ||
343 | |||
344 | /* we send notify messages in case | ||
345 | * 1. we updated on of the sequence numbers, and the seqno difference | ||
346 | * is at least x->replay_maxdiff, in this case we also update the | ||
347 | * timeout of our timer function | ||
348 | * 2. if x->replay_maxage has elapsed since last update, | ||
349 | * and there were changes | ||
350 | * | ||
351 | * The state structure must be locked! | ||
352 | */ | ||
353 | |||
354 | switch (event) { | ||
355 | case XFRM_REPLAY_UPDATE: | ||
356 | if (!x->replay_maxdiff) | ||
357 | break; | ||
358 | |||
359 | if (replay_esn->seq_hi == preplay_esn->seq_hi) | ||
360 | seq_diff = replay_esn->seq - preplay_esn->seq; | ||
361 | else | ||
362 | seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; | ||
363 | |||
364 | if (replay_esn->oseq_hi == preplay_esn->oseq_hi) | ||
365 | oseq_diff = replay_esn->oseq - preplay_esn->oseq; | ||
366 | else | ||
367 | oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; | ||
368 | |||
369 | if (seq_diff < x->replay_maxdiff && | ||
370 | oseq_diff < x->replay_maxdiff) { | ||
371 | |||
372 | if (x->xflags & XFRM_TIME_DEFER) | ||
373 | event = XFRM_REPLAY_TIMEOUT; | ||
374 | else | ||
375 | return; | ||
376 | } | ||
377 | |||
378 | break; | ||
379 | |||
380 | case XFRM_REPLAY_TIMEOUT: | ||
381 | if (memcmp(x->replay_esn, x->preplay_esn, | ||
382 | xfrm_replay_state_esn_len(replay_esn)) == 0) { | ||
383 | x->xflags |= XFRM_TIME_DEFER; | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | break; | ||
388 | } | ||
389 | |||
390 | memcpy(x->preplay_esn, x->replay_esn, | ||
391 | xfrm_replay_state_esn_len(replay_esn)); | ||
392 | c.event = XFRM_MSG_NEWAE; | ||
393 | c.data.aevent = event; | ||
394 | km_state_notify(x, &c); | ||
395 | |||
396 | if (x->replay_maxage && | ||
397 | !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) | ||
398 | x->xflags &= ~XFRM_TIME_DEFER; | ||
399 | } | ||
400 | |||
337 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) | 401 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) |
338 | { | 402 | { |
339 | int err = 0; | 403 | int err = 0; |
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = { | |||
510 | .advance = xfrm_replay_advance_esn, | 574 | .advance = xfrm_replay_advance_esn, |
511 | .check = xfrm_replay_check_esn, | 575 | .check = xfrm_replay_check_esn, |
512 | .recheck = xfrm_replay_recheck_esn, | 576 | .recheck = xfrm_replay_recheck_esn, |
513 | .notify = xfrm_replay_notify_bmp, | 577 | .notify = xfrm_replay_notify_esn, |
514 | .overflow = xfrm_replay_overflow_esn, | 578 | .overflow = xfrm_replay_overflow_esn, |
515 | }; | 579 | }; |
516 | 580 | ||