aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c52
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/secure_seq.c29
-rw-r--r--net/core/sock.c1
5 files changed, 83 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 5c713f2239cc..3430b1ed12e5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1917,7 +1917,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
1917 return new_map; 1917 return new_map;
1918} 1918}
1919 1919
1920int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) 1920int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1921 u16 index)
1921{ 1922{
1922 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 1923 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1923 struct xps_map *map, *new_map; 1924 struct xps_map *map, *new_map;
@@ -5247,10 +5248,12 @@ static int dev_new_index(struct net *net)
5247 5248
5248/* Delayed registration/unregisteration */ 5249/* Delayed registration/unregisteration */
5249static LIST_HEAD(net_todo_list); 5250static LIST_HEAD(net_todo_list);
5251static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5250 5252
5251static void net_set_todo(struct net_device *dev) 5253static void net_set_todo(struct net_device *dev)
5252{ 5254{
5253 list_add_tail(&dev->todo_list, &net_todo_list); 5255 list_add_tail(&dev->todo_list, &net_todo_list);
5256 dev_net(dev)->dev_unreg_count++;
5254} 5257}
5255 5258
5256static void rollback_registered_many(struct list_head *head) 5259static void rollback_registered_many(struct list_head *head)
@@ -5918,6 +5921,12 @@ void netdev_run_todo(void)
5918 if (dev->destructor) 5921 if (dev->destructor)
5919 dev->destructor(dev); 5922 dev->destructor(dev);
5920 5923
5924 /* Report a network device has been unregistered */
5925 rtnl_lock();
5926 dev_net(dev)->dev_unreg_count--;
5927 __rtnl_unlock();
5928 wake_up(&netdev_unregistering_wq);
5929
5921 /* Free network device */ 5930 /* Free network device */
5922 kobject_put(&dev->dev.kobj); 5931 kobject_put(&dev->dev.kobj);
5923 } 5932 }
@@ -6603,6 +6612,34 @@ static void __net_exit default_device_exit(struct net *net)
6603 rtnl_unlock(); 6612 rtnl_unlock();
6604} 6613}
6605 6614
6615static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6616{
6617 /* Return with the rtnl_lock held when there are no network
6618 * devices unregistering in any network namespace in net_list.
6619 */
6620 struct net *net;
6621 bool unregistering;
6622 DEFINE_WAIT(wait);
6623
6624 for (;;) {
6625 prepare_to_wait(&netdev_unregistering_wq, &wait,
6626 TASK_UNINTERRUPTIBLE);
6627 unregistering = false;
6628 rtnl_lock();
6629 list_for_each_entry(net, net_list, exit_list) {
6630 if (net->dev_unreg_count > 0) {
6631 unregistering = true;
6632 break;
6633 }
6634 }
6635 if (!unregistering)
6636 break;
6637 __rtnl_unlock();
6638 schedule();
6639 }
6640 finish_wait(&netdev_unregistering_wq, &wait);
6641}
6642
6606static void __net_exit default_device_exit_batch(struct list_head *net_list) 6643static void __net_exit default_device_exit_batch(struct list_head *net_list)
6607{ 6644{
6608 /* At exit all network devices most be removed from a network 6645 /* At exit all network devices most be removed from a network
@@ -6614,7 +6651,18 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
6614 struct net *net; 6651 struct net *net;
6615 LIST_HEAD(dev_kill_list); 6652 LIST_HEAD(dev_kill_list);
6616 6653
6617 rtnl_lock(); 6654 /* To prevent network device cleanup code from dereferencing
6655 * loopback devices or network devices that have been freed
6656 * wait here for all pending unregistrations to complete,
6657 * before unregistring the loopback device and allowing the
6658 * network namespace be freed.
6659 *
6660 * The netdev todo list containing all network devices
6661 * unregistrations that happen in default_device_exit_batch
6662 * will run in the rtnl_unlock() at the end of
6663 * default_device_exit_batch.
6664 */
6665 rtnl_lock_unregistering(net_list);
6618 list_for_each_entry(net, net_list, exit_list) { 6666 list_for_each_entry(net, net_list, exit_list) {
6619 for_each_netdev_reverse(net, dev) { 6667 for_each_netdev_reverse(net, dev) {
6620 if (dev->rtnl_link_ops) 6668 if (dev->rtnl_link_ops)
diff --git a/net/core/filter.c b/net/core/filter.c
index 6438f29ff266..01b780856db2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
645 645
646 bpf_jit_free(fp); 646 bpf_jit_free(fp);
647 kfree(fp);
648} 647}
649EXPORT_SYMBOL(sk_filter_release_rcu); 648EXPORT_SYMBOL(sk_filter_release_rcu);
650 649
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
683 if (fprog->filter == NULL) 682 if (fprog->filter == NULL)
684 return -EINVAL; 683 return -EINVAL;
685 684
686 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); 685 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
687 if (!fp) 686 if (!fp)
688 return -ENOMEM; 687 return -ENOMEM;
689 memcpy(fp->insns, fprog->filter, fsize); 688 memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
723{ 722{
724 struct sk_filter *fp, *old_fp; 723 struct sk_filter *fp, *old_fp;
725 unsigned int fsize = sizeof(struct sock_filter) * fprog->len; 724 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
725 unsigned int sk_fsize = sk_filter_size(fprog->len);
726 int err; 726 int err;
727 727
728 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 728 if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
732 if (fprog->filter == NULL) 732 if (fprog->filter == NULL)
733 return -EINVAL; 733 return -EINVAL;
734 734
735 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); 735 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
736 if (!fp) 736 if (!fp)
737 return -ENOMEM; 737 return -ENOMEM;
738 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 738 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
739 sock_kfree_s(sk, fp, fsize+sizeof(*fp)); 739 sock_kfree_s(sk, fp, sk_fsize);
740 return -EFAULT; 740 return -EFAULT;
741 } 741 }
742 742
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1929af87b260..8d7d0dd72db2 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -154,8 +154,8 @@ ipv6:
154 if (poff >= 0) { 154 if (poff >= 0) {
155 __be32 *ports, _ports; 155 __be32 *ports, _ports;
156 156
157 nhoff += poff; 157 ports = skb_header_pointer(skb, nhoff + poff,
158 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); 158 sizeof(_ports), &_ports);
159 if (ports) 159 if (ports)
160 flow->ports = *ports; 160 flow->ports = *ports;
161 } 161 }
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 6a2f13cee86a..8d9d05edd2eb 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -10,12 +10,27 @@
10 10
11#include <net/secure_seq.h> 11#include <net/secure_seq.h>
12 12
13static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 13#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
14#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
14 15
15void net_secret_init(void) 16static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
17
18static void net_secret_init(void)
16{ 19{
17 get_random_bytes(net_secret, sizeof(net_secret)); 20 u32 tmp;
21 int i;
22
23 if (likely(net_secret[0]))
24 return;
25
26 for (i = NET_SECRET_SIZE; i > 0;) {
27 do {
28 get_random_bytes(&tmp, sizeof(tmp));
29 } while (!tmp);
30 cmpxchg(&net_secret[--i], 0, tmp);
31 }
18} 32}
33#endif
19 34
20#ifdef CONFIG_INET 35#ifdef CONFIG_INET
21static u32 seq_scale(u32 seq) 36static u32 seq_scale(u32 seq)
@@ -42,6 +57,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
42 u32 hash[MD5_DIGEST_WORDS]; 57 u32 hash[MD5_DIGEST_WORDS];
43 u32 i; 58 u32 i;
44 59
60 net_secret_init();
45 memcpy(hash, saddr, 16); 61 memcpy(hash, saddr, 16);
46 for (i = 0; i < 4; i++) 62 for (i = 0; i < 4; i++)
47 secret[i] = net_secret[i] + (__force u32)daddr[i]; 63 secret[i] = net_secret[i] + (__force u32)daddr[i];
@@ -63,6 +79,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
63 u32 hash[MD5_DIGEST_WORDS]; 79 u32 hash[MD5_DIGEST_WORDS];
64 u32 i; 80 u32 i;
65 81
82 net_secret_init();
66 memcpy(hash, saddr, 16); 83 memcpy(hash, saddr, 16);
67 for (i = 0; i < 4; i++) 84 for (i = 0; i < 4; i++)
68 secret[i] = net_secret[i] + (__force u32) daddr[i]; 85 secret[i] = net_secret[i] + (__force u32) daddr[i];
@@ -82,6 +99,7 @@ __u32 secure_ip_id(__be32 daddr)
82{ 99{
83 u32 hash[MD5_DIGEST_WORDS]; 100 u32 hash[MD5_DIGEST_WORDS];
84 101
102 net_secret_init();
85 hash[0] = (__force __u32) daddr; 103 hash[0] = (__force __u32) daddr;
86 hash[1] = net_secret[13]; 104 hash[1] = net_secret[13];
87 hash[2] = net_secret[14]; 105 hash[2] = net_secret[14];
@@ -96,6 +114,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4])
96{ 114{
97 __u32 hash[4]; 115 __u32 hash[4];
98 116
117 net_secret_init();
99 memcpy(hash, daddr, 16); 118 memcpy(hash, daddr, 16);
100 md5_transform(hash, net_secret); 119 md5_transform(hash, net_secret);
101 120
@@ -107,6 +126,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
107{ 126{
108 u32 hash[MD5_DIGEST_WORDS]; 127 u32 hash[MD5_DIGEST_WORDS];
109 128
129 net_secret_init();
110 hash[0] = (__force u32)saddr; 130 hash[0] = (__force u32)saddr;
111 hash[1] = (__force u32)daddr; 131 hash[1] = (__force u32)daddr;
112 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 132 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -121,6 +141,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
121{ 141{
122 u32 hash[MD5_DIGEST_WORDS]; 142 u32 hash[MD5_DIGEST_WORDS];
123 143
144 net_secret_init();
124 hash[0] = (__force u32)saddr; 145 hash[0] = (__force u32)saddr;
125 hash[1] = (__force u32)daddr; 146 hash[1] = (__force u32)daddr;
126 hash[2] = (__force u32)dport ^ net_secret[14]; 147 hash[2] = (__force u32)dport ^ net_secret[14];
@@ -140,6 +161,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
140 u32 hash[MD5_DIGEST_WORDS]; 161 u32 hash[MD5_DIGEST_WORDS];
141 u64 seq; 162 u64 seq;
142 163
164 net_secret_init();
143 hash[0] = (__force u32)saddr; 165 hash[0] = (__force u32)saddr;
144 hash[1] = (__force u32)daddr; 166 hash[1] = (__force u32)daddr;
145 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 167 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -164,6 +186,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
164 u64 seq; 186 u64 seq;
165 u32 i; 187 u32 i;
166 188
189 net_secret_init();
167 memcpy(hash, saddr, 16); 190 memcpy(hash, saddr, 16);
168 for (i = 0; i < 4; i++) 191 for (i = 0; i < 4; i++)
169 secret[i] = net_secret[i] + daddr[i]; 192 secret[i] = net_secret[i] + daddr[i];
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6beba494a3..0b39e7ae4383 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2319,6 +2319,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2319 sk->sk_ll_usec = sysctl_net_busy_read; 2319 sk->sk_ll_usec = sysctl_net_busy_read;
2320#endif 2320#endif
2321 2321
2322 sk->sk_pacing_rate = ~0U;
2322 /* 2323 /*
2323 * Before updating sk_refcnt, we must commit prior changes to memory 2324 * Before updating sk_refcnt, we must commit prior changes to memory
2324 * (Documentation/RCU/rculist_nulls.txt for details) 2325 * (Documentation/RCU/rculist_nulls.txt for details)