aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tun.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2012-10-31 15:46:00 -0400
committerDavid S. Miller <davem@davemloft.net>2012-11-01 11:14:08 -0400
commitc8d68e6be1c3b242f1c598595830890b65cea64a (patch)
treec3e3de9051548b119743e5c3ae1ecf62bd32f40e /drivers/net/tun.c
parentbbb009941efaece3898910a862f6d23aa55d6ba8 (diff)
tuntap: multiqueue support
This patch converts tun/tap to a multiqueue devices and expose the multiqueue queues as multiple file descriptors to userspace. Internally, each tun_file were abstracted as a queue, and an array of pointers to tun_file structurs were stored in tun_structure device, so multiple tun_files were allowed to be attached to the device as multiple queues. When choosing txq, we first try to identify a flow through its rxhash, if it does not have such one, we could try recorded rxq and then use them to choose the transmit queue. This policy may be changed in the future. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r--drivers/net/tun.c308
1 files changed, 220 insertions, 88 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bdbb526eca7b..2762c55aeb66 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,6 +109,12 @@ struct tap_filter {
109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
110}; 110};
111 111
112/* 1024 is probably a high enough limit: modern hypervisors seem to support on
113 * the order of 100-200 CPUs so this leaves us some breathing space if we want
114 * to match a queue per guest CPU.
115 */
116#define MAX_TAP_QUEUES 1024
117
112/* A tun_file connects an open character device to a tuntap netdevice. It 118/* A tun_file connects an open character device to a tuntap netdevice. It
113 * also contains all socket related strctures (except sock_fprog and tap_filter) 119 * also contains all socket related strctures (except sock_fprog and tap_filter)
114 * to serve as one transmit queue for tuntap device. The sock_fprog and 120 * to serve as one transmit queue for tuntap device. The sock_fprog and
@@ -129,6 +135,7 @@ struct tun_file {
129 struct fasync_struct *fasync; 135 struct fasync_struct *fasync;
130 /* only used for fasnyc */ 136 /* only used for fasnyc */
131 unsigned int flags; 137 unsigned int flags;
138 u16 queue_index;
132}; 139};
133 140
134/* Since the socket were moved to tun_file, to preserve the behavior of persist 141/* Since the socket were moved to tun_file, to preserve the behavior of persist
@@ -136,7 +143,8 @@ struct tun_file {
136 * file were attached to a persist device. 143 * file were attached to a persist device.
137 */ 144 */
138struct tun_struct { 145struct tun_struct {
139 struct tun_file __rcu *tfile; 146 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
147 unsigned int numqueues;
140 unsigned int flags; 148 unsigned int flags;
141 kuid_t owner; 149 kuid_t owner;
142 kgid_t group; 150 kgid_t group;
@@ -157,56 +165,157 @@ struct tun_struct {
157#endif 165#endif
158}; 166};
159 167
168/* We try to identify a flow through its rxhash first. The reason that
169 * we do not check rxq no. is becuase some cards(e.g 82599), chooses
170 * the rxq based on the txq where the last packet of the flow comes. As
171 * the userspace application move between processors, we may get a
172 * different rxq no. here. If we could not get rxhash, then we would
173 * hope the rxq no. may help here.
174 */
175static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
176{
177 struct tun_struct *tun = netdev_priv(dev);
178 u32 txq = 0;
179 u32 numqueues = 0;
180
181 rcu_read_lock();
182 numqueues = tun->numqueues;
183
184 txq = skb_get_rxhash(skb);
185 if (txq) {
186 /* use multiply and shift instead of expensive divide */
187 txq = ((u64)txq * numqueues) >> 32;
188 } else if (likely(skb_rx_queue_recorded(skb))) {
189 txq = skb_get_rx_queue(skb);
190 while (unlikely(txq >= numqueues))
191 txq -= numqueues;
192 }
193
194 rcu_read_unlock();
195 return txq;
196}
197
198static void tun_set_real_num_queues(struct tun_struct *tun)
199{
200 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
201 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
202}
203
204static void __tun_detach(struct tun_file *tfile, bool clean)
205{
206 struct tun_file *ntfile;
207 struct tun_struct *tun;
208 struct net_device *dev;
209
210 tun = rcu_dereference_protected(tfile->tun,
211 lockdep_rtnl_is_held());
212 if (tun) {
213 u16 index = tfile->queue_index;
214 BUG_ON(index >= tun->numqueues);
215 dev = tun->dev;
216
217 rcu_assign_pointer(tun->tfiles[index],
218 tun->tfiles[tun->numqueues - 1]);
219 rcu_assign_pointer(tfile->tun, NULL);
220 ntfile = rcu_dereference_protected(tun->tfiles[index],
221 lockdep_rtnl_is_held());
222 ntfile->queue_index = index;
223
224 --tun->numqueues;
225 sock_put(&tfile->sk);
226
227 synchronize_net();
228 /* Drop read queue */
229 skb_queue_purge(&tfile->sk.sk_receive_queue);
230 tun_set_real_num_queues(tun);
231
232 if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
233 if (dev->reg_state == NETREG_REGISTERED)
234 unregister_netdevice(dev);
235 }
236
237 if (clean) {
238 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
239 &tfile->socket.flags));
240 sk_release_kernel(&tfile->sk);
241 }
242}
243
244static void tun_detach(struct tun_file *tfile, bool clean)
245{
246 rtnl_lock();
247 __tun_detach(tfile, clean);
248 rtnl_unlock();
249}
250
251static void tun_detach_all(struct net_device *dev)
252{
253 struct tun_struct *tun = netdev_priv(dev);
254 struct tun_file *tfile;
255 int i, n = tun->numqueues;
256
257 for (i = 0; i < n; i++) {
258 tfile = rcu_dereference_protected(tun->tfiles[i],
259 lockdep_rtnl_is_held());
260 BUG_ON(!tfile);
261 wake_up_all(&tfile->wq.wait);
262 rcu_assign_pointer(tfile->tun, NULL);
263 --tun->numqueues;
264 }
265 BUG_ON(tun->numqueues != 0);
266
267 synchronize_net();
268 for (i = 0; i < n; i++) {
269 tfile = rcu_dereference_protected(tun->tfiles[i],
270 lockdep_rtnl_is_held());
271 /* Drop read queue */
272 skb_queue_purge(&tfile->sk.sk_receive_queue);
273 sock_put(&tfile->sk);
274 }
275}
276
160static int tun_attach(struct tun_struct *tun, struct file *file) 277static int tun_attach(struct tun_struct *tun, struct file *file)
161{ 278{
162 struct tun_file *tfile = file->private_data; 279 struct tun_file *tfile = file->private_data;
163 int err; 280 int err;
164 281
165 ASSERT_RTNL();
166
167 netif_tx_lock_bh(tun->dev);
168
169 err = -EINVAL; 282 err = -EINVAL;
170 if (tfile->tun) 283 if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
171 goto out; 284 goto out;
172 285
173 err = -EBUSY; 286 err = -EBUSY;
174 if (tun->tfile) 287 if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
288 goto out;
289
290 err = -E2BIG;
291 if (tun->numqueues == MAX_TAP_QUEUES)
175 goto out; 292 goto out;
176 293
177 err = 0; 294 err = 0;
178 295
179 /* Re-attach filter when attaching to a persist device */ 296 /* Re-attach the filter to presist device */
180 if (tun->filter_attached == true) { 297 if (tun->filter_attached == true) {
181 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 298 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
182 if (!err) 299 if (!err)
183 goto out; 300 goto out;
184 } 301 }
302 tfile->queue_index = tun->numqueues;
185 rcu_assign_pointer(tfile->tun, tun); 303 rcu_assign_pointer(tfile->tun, tun);
186 tfile->socket.sk->sk_sndbuf = tun->sndbuf; 304 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
187 rcu_assign_pointer(tun->tfile, tfile);
188 netif_carrier_on(tun->dev);
189 sock_hold(&tfile->sk); 305 sock_hold(&tfile->sk);
306 tun->numqueues++;
190 307
191out: 308 tun_set_real_num_queues(tun);
192 netif_tx_unlock_bh(tun->dev);
193 return err;
194}
195 309
196static void __tun_detach(struct tun_struct *tun) 310 if (tun->numqueues == 1)
197{ 311 netif_carrier_on(tun->dev);
198 struct tun_file *tfile = rcu_dereference_protected(tun->tfile,
199 lockdep_rtnl_is_held());
200 /* Detach from net device */
201 netif_carrier_off(tun->dev);
202 rcu_assign_pointer(tun->tfile, NULL);
203 if (tfile) {
204 rcu_assign_pointer(tfile->tun, NULL);
205 312
206 synchronize_net(); 313 /* device is allowed to go away first, so no need to hold extra
207 /* Drop read queue */ 314 * refcnt.
208 skb_queue_purge(&tfile->socket.sk->sk_receive_queue); 315 */
209 } 316
317out:
318 return err;
210} 319}
211 320
212static struct tun_struct *__tun_get(struct tun_file *tfile) 321static struct tun_struct *__tun_get(struct tun_file *tfile)
@@ -349,30 +458,20 @@ static const struct ethtool_ops tun_ethtool_ops;
349/* Net device detach from fd. */ 458/* Net device detach from fd. */
350static void tun_net_uninit(struct net_device *dev) 459static void tun_net_uninit(struct net_device *dev)
351{ 460{
352 struct tun_struct *tun = netdev_priv(dev); 461 tun_detach_all(dev);
353 struct tun_file *tfile = rcu_dereference_protected(tun->tfile,
354 lockdep_rtnl_is_held());
355
356 /* Inform the methods they need to stop using the dev.
357 */
358 if (tfile) {
359 wake_up_all(&tfile->wq.wait);
360 __tun_detach(tun);
361 synchronize_net();
362 }
363} 462}
364 463
365/* Net device open. */ 464/* Net device open. */
366static int tun_net_open(struct net_device *dev) 465static int tun_net_open(struct net_device *dev)
367{ 466{
368 netif_start_queue(dev); 467 netif_tx_start_all_queues(dev);
369 return 0; 468 return 0;
370} 469}
371 470
372/* Net device close. */ 471/* Net device close. */
373static int tun_net_close(struct net_device *dev) 472static int tun_net_close(struct net_device *dev)
374{ 473{
375 netif_stop_queue(dev); 474 netif_tx_stop_all_queues(dev);
376 return 0; 475 return 0;
377} 476}
378 477
@@ -380,16 +479,20 @@ static int tun_net_close(struct net_device *dev)
380static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 479static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
381{ 480{
382 struct tun_struct *tun = netdev_priv(dev); 481 struct tun_struct *tun = netdev_priv(dev);
482 int txq = skb->queue_mapping;
383 struct tun_file *tfile; 483 struct tun_file *tfile;
384 484
385 rcu_read_lock(); 485 rcu_read_lock();
386 tfile = rcu_dereference(tun->tfile); 486 tfile = rcu_dereference(tun->tfiles[txq]);
487
387 /* Drop packet if interface is not attached */ 488 /* Drop packet if interface is not attached */
388 if (!tfile) 489 if (txq >= tun->numqueues)
389 goto drop; 490 goto drop;
390 491
391 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 492 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
392 493
494 BUG_ON(!tfile);
495
393 /* Drop if the filter does not like it. 496 /* Drop if the filter does not like it.
394 * This is a noop if the filter is disabled. 497 * This is a noop if the filter is disabled.
395 * Filter can be enabled only for the TAP devices. */ 498 * Filter can be enabled only for the TAP devices. */
@@ -400,12 +503,15 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
400 sk_filter(tfile->socket.sk, skb)) 503 sk_filter(tfile->socket.sk, skb))
401 goto drop; 504 goto drop;
402 505
506 /* Limit the number of packets queued by divining txq length with the
507 * number of queues.
508 */
403 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) 509 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
404 >= dev->tx_queue_len) { 510 >= dev->tx_queue_len / tun->numqueues){
405 if (!(tun->flags & TUN_ONE_QUEUE)) { 511 if (!(tun->flags & TUN_ONE_QUEUE)) {
406 /* Normal queueing mode. */ 512 /* Normal queueing mode. */
407 /* Packet scheduler handles dropping of further packets. */ 513 /* Packet scheduler handles dropping of further packets. */
408 netif_stop_queue(dev); 514 netif_stop_subqueue(dev, txq);
409 515
410 /* We won't see all dropped packets individually, so overrun 516 /* We won't see all dropped packets individually, so overrun
411 * error is more appropriate. */ 517 * error is more appropriate. */
@@ -494,6 +600,7 @@ static const struct net_device_ops tun_netdev_ops = {
494 .ndo_start_xmit = tun_net_xmit, 600 .ndo_start_xmit = tun_net_xmit,
495 .ndo_change_mtu = tun_net_change_mtu, 601 .ndo_change_mtu = tun_net_change_mtu,
496 .ndo_fix_features = tun_net_fix_features, 602 .ndo_fix_features = tun_net_fix_features,
603 .ndo_select_queue = tun_select_queue,
497#ifdef CONFIG_NET_POLL_CONTROLLER 604#ifdef CONFIG_NET_POLL_CONTROLLER
498 .ndo_poll_controller = tun_poll_controller, 605 .ndo_poll_controller = tun_poll_controller,
499#endif 606#endif
@@ -509,6 +616,7 @@ static const struct net_device_ops tap_netdev_ops = {
509 .ndo_set_rx_mode = tun_net_mclist, 616 .ndo_set_rx_mode = tun_net_mclist,
510 .ndo_set_mac_address = eth_mac_addr, 617 .ndo_set_mac_address = eth_mac_addr,
511 .ndo_validate_addr = eth_validate_addr, 618 .ndo_validate_addr = eth_validate_addr,
619 .ndo_select_queue = tun_select_queue,
512#ifdef CONFIG_NET_POLL_CONTROLLER 620#ifdef CONFIG_NET_POLL_CONTROLLER
513 .ndo_poll_controller = tun_poll_controller, 621 .ndo_poll_controller = tun_poll_controller,
514#endif 622#endif
@@ -550,7 +658,7 @@ static void tun_net_init(struct net_device *dev)
550/* Character device part */ 658/* Character device part */
551 659
552/* Poll */ 660/* Poll */
553static unsigned int tun_chr_poll(struct file *file, poll_table * wait) 661static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
554{ 662{
555 struct tun_file *tfile = file->private_data; 663 struct tun_file *tfile = file->private_data;
556 struct tun_struct *tun = __tun_get(tfile); 664 struct tun_struct *tun = __tun_get(tfile);
@@ -995,7 +1103,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
995 schedule(); 1103 schedule();
996 continue; 1104 continue;
997 } 1105 }
998 netif_wake_queue(tun->dev); 1106 netif_wake_subqueue(tun->dev, tfile->queue_index);
999 1107
1000 ret = tun_put_user(tun, tfile, skb, iv, len); 1108 ret = tun_put_user(tun, tfile, skb, iv, len);
1001 kfree_skb(skb); 1109 kfree_skb(skb);
@@ -1156,6 +1264,9 @@ static int tun_flags(struct tun_struct *tun)
1156 if (tun->flags & TUN_VNET_HDR) 1264 if (tun->flags & TUN_VNET_HDR)
1157 flags |= IFF_VNET_HDR; 1265 flags |= IFF_VNET_HDR;
1158 1266
1267 if (tun->flags & TUN_TAP_MQ)
1268 flags |= IFF_MULTI_QUEUE;
1269
1159 return flags; 1270 return flags;
1160} 1271}
1161 1272
@@ -1247,8 +1358,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1247 if (*ifr->ifr_name) 1358 if (*ifr->ifr_name)
1248 name = ifr->ifr_name; 1359 name = ifr->ifr_name;
1249 1360
1250 dev = alloc_netdev(sizeof(struct tun_struct), name, 1361 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1251 tun_setup); 1362 tun_setup,
1363 MAX_TAP_QUEUES, MAX_TAP_QUEUES);
1252 if (!dev) 1364 if (!dev)
1253 return -ENOMEM; 1365 return -ENOMEM;
1254 1366
@@ -1283,7 +1395,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1283 1395
1284 err = tun_attach(tun, file); 1396 err = tun_attach(tun, file);
1285 if (err < 0) 1397 if (err < 0)
1286 goto failed; 1398 goto err_free_dev;
1287 } 1399 }
1288 1400
1289 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1401 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
@@ -1303,18 +1415,22 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1303 else 1415 else
1304 tun->flags &= ~TUN_VNET_HDR; 1416 tun->flags &= ~TUN_VNET_HDR;
1305 1417
1418 if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1419 tun->flags |= TUN_TAP_MQ;
1420 else
1421 tun->flags &= ~TUN_TAP_MQ;
1422
1306 /* Make sure persistent devices do not get stuck in 1423 /* Make sure persistent devices do not get stuck in
1307 * xoff state. 1424 * xoff state.
1308 */ 1425 */
1309 if (netif_running(tun->dev)) 1426 if (netif_running(tun->dev))
1310 netif_wake_queue(tun->dev); 1427 netif_tx_wake_all_queues(tun->dev);
1311 1428
1312 strcpy(ifr->ifr_name, tun->dev->name); 1429 strcpy(ifr->ifr_name, tun->dev->name);
1313 return 0; 1430 return 0;
1314 1431
1315 err_free_dev: 1432 err_free_dev:
1316 free_netdev(dev); 1433 free_netdev(dev);
1317 failed:
1318 return err; 1434 return err;
1319} 1435}
1320 1436
@@ -1369,6 +1485,51 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1369 return 0; 1485 return 0;
1370} 1486}
1371 1487
1488static void tun_detach_filter(struct tun_struct *tun, int n)
1489{
1490 int i;
1491 struct tun_file *tfile;
1492
1493 for (i = 0; i < n; i++) {
1494 tfile = rcu_dereference_protected(tun->tfiles[i],
1495 lockdep_rtnl_is_held());
1496 sk_detach_filter(tfile->socket.sk);
1497 }
1498
1499 tun->filter_attached = false;
1500}
1501
1502static int tun_attach_filter(struct tun_struct *tun)
1503{
1504 int i, ret = 0;
1505 struct tun_file *tfile;
1506
1507 for (i = 0; i < tun->numqueues; i++) {
1508 tfile = rcu_dereference_protected(tun->tfiles[i],
1509 lockdep_rtnl_is_held());
1510 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1511 if (ret) {
1512 tun_detach_filter(tun, i);
1513 return ret;
1514 }
1515 }
1516
1517 tun->filter_attached = true;
1518 return ret;
1519}
1520
1521static void tun_set_sndbuf(struct tun_struct *tun)
1522{
1523 struct tun_file *tfile;
1524 int i;
1525
1526 for (i = 0; i < tun->numqueues; i++) {
1527 tfile = rcu_dereference_protected(tun->tfiles[i],
1528 lockdep_rtnl_is_held());
1529 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1530 }
1531}
1532
1372static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 1533static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1373 unsigned long arg, int ifreq_len) 1534 unsigned long arg, int ifreq_len)
1374{ 1535{
@@ -1397,6 +1558,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1397 (unsigned int __user*)argp); 1558 (unsigned int __user*)argp);
1398 } 1559 }
1399 1560
1561 ret = 0;
1400 rtnl_lock(); 1562 rtnl_lock();
1401 1563
1402 tun = __tun_get(tfile); 1564 tun = __tun_get(tfile);
@@ -1537,7 +1699,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1537 break; 1699 break;
1538 } 1700 }
1539 1701
1540 tun->sndbuf = tfile->socket.sk->sk_sndbuf = sndbuf; 1702 tun->sndbuf = sndbuf;
1703 tun_set_sndbuf(tun);
1541 break; 1704 break;
1542 1705
1543 case TUNGETVNETHDRSZ: 1706 case TUNGETVNETHDRSZ:
@@ -1568,9 +1731,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1568 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 1731 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
1569 break; 1732 break;
1570 1733
1571 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 1734 ret = tun_attach_filter(tun);
1572 if (!ret)
1573 tun->filter_attached = true;
1574 break; 1735 break;
1575 1736
1576 case TUNDETACHFILTER: 1737 case TUNDETACHFILTER:
@@ -1578,9 +1739,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1578 ret = -EINVAL; 1739 ret = -EINVAL;
1579 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 1740 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1580 break; 1741 break;
1581 ret = sk_detach_filter(tfile->socket.sk); 1742 ret = 0;
1582 if (!ret) 1743 tun_detach_filter(tun, tun->numqueues);
1583 tun->filter_attached = false;
1584 break; 1744 break;
1585 1745
1586 default: 1746 default:
@@ -1685,37 +1845,9 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1685static int tun_chr_close(struct inode *inode, struct file *file) 1845static int tun_chr_close(struct inode *inode, struct file *file)
1686{ 1846{
1687 struct tun_file *tfile = file->private_data; 1847 struct tun_file *tfile = file->private_data;
1688 struct tun_struct *tun;
1689 struct net *net = tfile->net; 1848 struct net *net = tfile->net;
1690 1849
1691 rtnl_lock(); 1850 tun_detach(tfile, true);
1692
1693 tun = rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held());
1694 if (tun) {
1695 struct net_device *dev = tun->dev;
1696
1697 tun_debug(KERN_INFO, tun, "tun_chr_close\n");
1698
1699 __tun_detach(tun);
1700
1701 synchronize_net();
1702
1703 /* If desirable, unregister the netdevice. */
1704 if (!(tun->flags & TUN_PERSIST)) {
1705 if (dev->reg_state == NETREG_REGISTERED)
1706 unregister_netdevice(dev);
1707 }
1708
1709 /* drop the reference that netdevice holds */
1710 sock_put(&tfile->sk);
1711 }
1712
1713 rtnl_unlock();
1714
1715 /* drop the reference that file holds */
1716 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
1717 &tfile->socket.flags));
1718 sk_release_kernel(&tfile->sk);
1719 put_net(net); 1851 put_net(net);
1720 1852
1721 return 0; 1853 return 0;