diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2009-01-20 06:07:17 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-21 19:00:46 -0500 |
commit | c70f182940f988448f3c12a209d18b1edc276e33 (patch) | |
tree | 6469d90befb657f4ef37cc40c03b97de992dba80 /drivers/net | |
parent | b2430de37ef0bc0799ffba7b5219d38ca417eb76 (diff) |
tun: Fix races between tun_net_close and free_netdev.
The tun code does not cope gracefully if the network device goes away before
the tun file descriptor is closed. It looks like we can trigger this with
rmmod, and moving tun devices between network namespaces will allow this
to be triggered when network namespaces exit.
To fix this I introduce an intermediate data structure tun_file which
holds a count of users and a pointer to the struct tun_struct. tun_get
increments that reference count if it is greater than 0. tun_put decrements
that reference count and detaches from the network device if the count is 0.
While we have a file attached to the network device I hold a reference
to the network device keeping it from going away completely.
When a network device is unregistered I decrement the count of the
attached tun_file and if that was the last user I detach the tun_file,
and all processes on read_wait are woken up to ensure they do not
sleep indefinitely. As some of those sleeps happen with the count on
the tun device elevated waking up the read waiters ensures that
tun_file will be detached in a timely manner.
Signed-off-by: Eric W. Biederman <ebiederm@aristanetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/tun.c | 50 |
1 files changed, 48 insertions, 2 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 030d9858bb68..51dba6192bab 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -88,6 +88,7 @@ struct tap_filter { | |||
88 | }; | 88 | }; |
89 | 89 | ||
90 | struct tun_file { | 90 | struct tun_file { |
91 | atomic_t count; | ||
91 | struct tun_struct *tun; | 92 | struct tun_struct *tun; |
92 | struct net *net; | 93 | struct net *net; |
93 | wait_queue_head_t read_wait; | 94 | wait_queue_head_t read_wait; |
@@ -138,6 +139,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file) | |||
138 | err = 0; | 139 | err = 0; |
139 | tfile->tun = tun; | 140 | tfile->tun = tun; |
140 | tun->tfile = tfile; | 141 | tun->tfile = tfile; |
142 | dev_hold(tun->dev); | ||
143 | atomic_inc(&tfile->count); | ||
141 | 144 | ||
142 | out: | 145 | out: |
143 | netif_tx_unlock_bh(tun->dev); | 146 | netif_tx_unlock_bh(tun->dev); |
@@ -156,11 +159,26 @@ static void __tun_detach(struct tun_struct *tun) | |||
156 | 159 | ||
157 | /* Drop read queue */ | 160 | /* Drop read queue */ |
158 | skb_queue_purge(&tun->readq); | 161 | skb_queue_purge(&tun->readq); |
162 | |||
163 | /* Drop the extra count on the net device */ | ||
164 | dev_put(tun->dev); | ||
165 | } | ||
166 | |||
167 | static void tun_detach(struct tun_struct *tun) | ||
168 | { | ||
169 | rtnl_lock(); | ||
170 | __tun_detach(tun); | ||
171 | rtnl_unlock(); | ||
159 | } | 172 | } |
160 | 173 | ||
161 | static struct tun_struct *__tun_get(struct tun_file *tfile) | 174 | static struct tun_struct *__tun_get(struct tun_file *tfile) |
162 | { | 175 | { |
163 | return tfile->tun; | 176 | struct tun_struct *tun = NULL; |
177 | |||
178 | if (atomic_inc_not_zero(&tfile->count)) | ||
179 | tun = tfile->tun; | ||
180 | |||
181 | return tun; | ||
164 | } | 182 | } |
165 | 183 | ||
166 | static struct tun_struct *tun_get(struct file *file) | 184 | static struct tun_struct *tun_get(struct file *file) |
@@ -170,7 +188,10 @@ static struct tun_struct *tun_get(struct file *file) | |||
170 | 188 | ||
171 | static void tun_put(struct tun_struct *tun) | 189 | static void tun_put(struct tun_struct *tun) |
172 | { | 190 | { |
173 | /* Noop for now */ | 191 | struct tun_file *tfile = tun->tfile; |
192 | |||
193 | if (atomic_dec_and_test(&tfile->count)) | ||
194 | tun_detach(tfile->tun); | ||
174 | } | 195 | } |
175 | 196 | ||
176 | /* TAP filterting */ | 197 | /* TAP filterting */ |
@@ -281,6 +302,21 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) | |||
281 | 302 | ||
282 | static const struct ethtool_ops tun_ethtool_ops; | 303 | static const struct ethtool_ops tun_ethtool_ops; |
283 | 304 | ||
305 | /* Net device detach from fd. */ | ||
306 | static void tun_net_uninit(struct net_device *dev) | ||
307 | { | ||
308 | struct tun_struct *tun = netdev_priv(dev); | ||
309 | struct tun_file *tfile = tun->tfile; | ||
310 | |||
311 | /* Inform the methods they need to stop using the dev. | ||
312 | */ | ||
313 | if (tfile) { | ||
314 | wake_up_all(&tfile->read_wait); | ||
315 | if (atomic_dec_and_test(&tfile->count)) | ||
316 | __tun_detach(tun); | ||
317 | } | ||
318 | } | ||
319 | |||
284 | /* Net device open. */ | 320 | /* Net device open. */ |
285 | static int tun_net_open(struct net_device *dev) | 321 | static int tun_net_open(struct net_device *dev) |
286 | { | 322 | { |
@@ -367,6 +403,7 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu) | |||
367 | } | 403 | } |
368 | 404 | ||
369 | static const struct net_device_ops tun_netdev_ops = { | 405 | static const struct net_device_ops tun_netdev_ops = { |
406 | .ndo_uninit = tun_net_uninit, | ||
370 | .ndo_open = tun_net_open, | 407 | .ndo_open = tun_net_open, |
371 | .ndo_stop = tun_net_close, | 408 | .ndo_stop = tun_net_close, |
372 | .ndo_start_xmit = tun_net_xmit, | 409 | .ndo_start_xmit = tun_net_xmit, |
@@ -374,6 +411,7 @@ static const struct net_device_ops tun_netdev_ops = { | |||
374 | }; | 411 | }; |
375 | 412 | ||
376 | static const struct net_device_ops tap_netdev_ops = { | 413 | static const struct net_device_ops tap_netdev_ops = { |
414 | .ndo_uninit = tun_net_uninit, | ||
377 | .ndo_open = tun_net_open, | 415 | .ndo_open = tun_net_open, |
378 | .ndo_stop = tun_net_close, | 416 | .ndo_stop = tun_net_close, |
379 | .ndo_start_xmit = tun_net_xmit, | 417 | .ndo_start_xmit = tun_net_xmit, |
@@ -434,6 +472,9 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) | |||
434 | if (!skb_queue_empty(&tun->readq)) | 472 | if (!skb_queue_empty(&tun->readq)) |
435 | mask |= POLLIN | POLLRDNORM; | 473 | mask |= POLLIN | POLLRDNORM; |
436 | 474 | ||
475 | if (tun->dev->reg_state != NETREG_REGISTERED) | ||
476 | mask = POLLERR; | ||
477 | |||
437 | tun_put(tun); | 478 | tun_put(tun); |
438 | return mask; | 479 | return mask; |
439 | } | 480 | } |
@@ -734,6 +775,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
734 | ret = -ERESTARTSYS; | 775 | ret = -ERESTARTSYS; |
735 | break; | 776 | break; |
736 | } | 777 | } |
778 | if (tun->dev->reg_state != NETREG_REGISTERED) { | ||
779 | ret = -EIO; | ||
780 | break; | ||
781 | } | ||
737 | 782 | ||
738 | /* Nothing to read, let's sleep */ | 783 | /* Nothing to read, let's sleep */ |
739 | schedule(); | 784 | schedule(); |
@@ -1135,6 +1180,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) | |||
1135 | tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); | 1180 | tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); |
1136 | if (!tfile) | 1181 | if (!tfile) |
1137 | return -ENOMEM; | 1182 | return -ENOMEM; |
1183 | atomic_set(&tfile->count, 0); | ||
1138 | tfile->tun = NULL; | 1184 | tfile->tun = NULL; |
1139 | tfile->net = get_net(current->nsproxy->net_ns); | 1185 | tfile->net = get_net(current->nsproxy->net_ns); |
1140 | init_waitqueue_head(&tfile->read_wait); | 1186 | init_waitqueue_head(&tfile->read_wait); |