aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tun.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2012-12-13 18:53:30 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-14 13:14:06 -0500
commit4008e97f866db66511f065ae9052e0733a3a8429 (patch)
tree4d2345229c4e35aeba76314fff9b7dd9c1c311a3 /drivers/net/tun.c
parent8fa45a70badf6ce2c57421c17e86e8967ce0d478 (diff)
tuntap: fix ambigious multiqueue API
The current multiqueue API is ambigious which may confuse both user and LSM to do things correctly: - Both TUNSETIFF and TUNSETQUEUE could be used to create the queues of a tuntap device. - TUNSETQUEUE were used to disable and enable a specific queue of the device. But since the state of tuntap were completely removed from the queue, it could be used to attach to another device (there's no such kind of requirement currently, and it needs new kind of LSM policy. - TUNSETQUEUE could be used to attach to a persistent device without any queues. This kind of attching bypass the necessary checking during TUNSETIFF and may lead unexpected result. So this patch tries to make a cleaner and simpler API by: - Only allow TUNSETIFF to create queues. - TUNSETQUEUE could be only used to disable and enabled the queues of a device, and the state of the tuntap device were not detachd from the queues when it was disabled, so TUNSETQUEUE could be only used after TUNSETIFF and with the same device. This is done by introducing a list which keeps track of all queues which were disabled. The queue would be moved between this list and tfiles[] array when it was enabled/disabled. A pointer of the tun_struct were also introdued to track the device it belongs to when it was disabled. After the change, the isolation between management and application could be done through: TUNSETIFF were only called by management software and TUNSETQUEUE were only called by application.For LSM/SELinux, the things left is to do proper check during tun_set_queue() if needed. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r--drivers/net/tun.c86
1 files changed, 63 insertions, 23 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 40b426edc9e6..255a9f574869 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -138,6 +138,8 @@ struct tun_file {
138 /* only used for fasnyc */ 138 /* only used for fasnyc */
139 unsigned int flags; 139 unsigned int flags;
140 u16 queue_index; 140 u16 queue_index;
141 struct list_head next;
142 struct tun_struct *detached;
141}; 143};
142 144
143struct tun_flow_entry { 145struct tun_flow_entry {
@@ -182,6 +184,8 @@ struct tun_struct {
182 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 184 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
183 struct timer_list flow_gc_timer; 185 struct timer_list flow_gc_timer;
184 unsigned long ageing_time; 186 unsigned long ageing_time;
187 unsigned int numdisabled;
188 struct list_head disabled;
185}; 189};
186 190
187static inline u32 tun_hashfn(u32 rxhash) 191static inline u32 tun_hashfn(u32 rxhash)
@@ -385,6 +389,23 @@ static void tun_set_real_num_queues(struct tun_struct *tun)
385 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 389 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
386} 390}
387 391
392static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
393{
394 tfile->detached = tun;
395 list_add_tail(&tfile->next, &tun->disabled);
396 ++tun->numdisabled;
397}
398
399struct tun_struct *tun_enable_queue(struct tun_file *tfile)
400{
401 struct tun_struct *tun = tfile->detached;
402
403 tfile->detached = NULL;
404 list_del_init(&tfile->next);
405 --tun->numdisabled;
406 return tun;
407}
408
388static void __tun_detach(struct tun_file *tfile, bool clean) 409static void __tun_detach(struct tun_file *tfile, bool clean)
389{ 410{
390 struct tun_file *ntfile; 411 struct tun_file *ntfile;
@@ -406,20 +427,25 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
406 ntfile->queue_index = index; 427 ntfile->queue_index = index;
407 428
408 --tun->numqueues; 429 --tun->numqueues;
409 sock_put(&tfile->sk); 430 if (clean)
431 sock_put(&tfile->sk);
432 else
433 tun_disable_queue(tun, tfile);
410 434
411 synchronize_net(); 435 synchronize_net();
412 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 436 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
413 /* Drop read queue */ 437 /* Drop read queue */
414 skb_queue_purge(&tfile->sk.sk_receive_queue); 438 skb_queue_purge(&tfile->sk.sk_receive_queue);
415 tun_set_real_num_queues(tun); 439 tun_set_real_num_queues(tun);
416 440 } else if (tfile->detached && clean)
417 if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST)) 441 tun = tun_enable_queue(tfile);
418 if (dev->reg_state == NETREG_REGISTERED)
419 unregister_netdevice(dev);
420 }
421 442
422 if (clean) { 443 if (clean) {
444 if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
445 !(tun->flags & TUN_PERSIST))
446 if (tun->dev->reg_state == NETREG_REGISTERED)
447 unregister_netdevice(tun->dev);
448
423 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, 449 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
424 &tfile->socket.flags)); 450 &tfile->socket.flags));
425 sk_release_kernel(&tfile->sk); 451 sk_release_kernel(&tfile->sk);
@@ -436,7 +462,7 @@ static void tun_detach(struct tun_file *tfile, bool clean)
436static void tun_detach_all(struct net_device *dev) 462static void tun_detach_all(struct net_device *dev)
437{ 463{
438 struct tun_struct *tun = netdev_priv(dev); 464 struct tun_struct *tun = netdev_priv(dev);
439 struct tun_file *tfile; 465 struct tun_file *tfile, *tmp;
440 int i, n = tun->numqueues; 466 int i, n = tun->numqueues;
441 467
442 for (i = 0; i < n; i++) { 468 for (i = 0; i < n; i++) {
@@ -457,6 +483,12 @@ static void tun_detach_all(struct net_device *dev)
457 skb_queue_purge(&tfile->sk.sk_receive_queue); 483 skb_queue_purge(&tfile->sk.sk_receive_queue);
458 sock_put(&tfile->sk); 484 sock_put(&tfile->sk);
459 } 485 }
486 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
487 tun_enable_queue(tfile);
488 skb_queue_purge(&tfile->sk.sk_receive_queue);
489 sock_put(&tfile->sk);
490 }
491 BUG_ON(tun->numdisabled != 0);
460} 492}
461 493
462static int tun_attach(struct tun_struct *tun, struct file *file) 494static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -473,7 +505,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
473 goto out; 505 goto out;
474 506
475 err = -E2BIG; 507 err = -E2BIG;
476 if (tun->numqueues == MAX_TAP_QUEUES) 508 if (!tfile->detached &&
509 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
477 goto out; 510 goto out;
478 511
479 err = 0; 512 err = 0;
@@ -487,9 +520,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
487 tfile->queue_index = tun->numqueues; 520 tfile->queue_index = tun->numqueues;
488 rcu_assign_pointer(tfile->tun, tun); 521 rcu_assign_pointer(tfile->tun, tun);
489 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 522 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
490 sock_hold(&tfile->sk);
491 tun->numqueues++; 523 tun->numqueues++;
492 524
525 if (tfile->detached)
526 tun_enable_queue(tfile);
527 else
528 sock_hold(&tfile->sk);
529
493 tun_set_real_num_queues(tun); 530 tun_set_real_num_queues(tun);
494 531
495 /* device is allowed to go away first, so no need to hold extra 532 /* device is allowed to go away first, so no need to hold extra
@@ -1349,6 +1386,7 @@ static void tun_free_netdev(struct net_device *dev)
1349{ 1386{
1350 struct tun_struct *tun = netdev_priv(dev); 1387 struct tun_struct *tun = netdev_priv(dev);
1351 1388
1389 BUG_ON(!(list_empty(&tun->disabled)));
1352 tun_flow_uninit(tun); 1390 tun_flow_uninit(tun);
1353 free_netdev(dev); 1391 free_netdev(dev);
1354} 1392}
@@ -1543,6 +1581,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1543 err = tun_attach(tun, file); 1581 err = tun_attach(tun, file);
1544 if (err < 0) 1582 if (err < 0)
1545 return err; 1583 return err;
1584
1585 if (tun->flags & TUN_TAP_MQ &&
1586 (tun->numqueues + tun->numdisabled > 1))
1587 return err;
1546 } 1588 }
1547 else { 1589 else {
1548 char *name; 1590 char *name;
@@ -1601,6 +1643,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1601 TUN_USER_FEATURES; 1643 TUN_USER_FEATURES;
1602 dev->features = dev->hw_features; 1644 dev->features = dev->hw_features;
1603 1645
1646 INIT_LIST_HEAD(&tun->disabled);
1604 err = tun_attach(tun, file); 1647 err = tun_attach(tun, file);
1605 if (err < 0) 1648 if (err < 0)
1606 goto err_free_dev; 1649 goto err_free_dev;
@@ -1755,32 +1798,28 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1755{ 1798{
1756 struct tun_file *tfile = file->private_data; 1799 struct tun_file *tfile = file->private_data;
1757 struct tun_struct *tun; 1800 struct tun_struct *tun;
1758 struct net_device *dev;
1759 int ret = 0; 1801 int ret = 0;
1760 1802
1761 rtnl_lock(); 1803 rtnl_lock();
1762 1804
1763 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 1805 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1764 dev = __dev_get_by_name(tfile->net, ifr->ifr_name); 1806 tun = tfile->detached;
1765 if (!dev) { 1807 if (!tun)
1766 ret = -EINVAL;
1767 goto unlock;
1768 }
1769
1770 tun = netdev_priv(dev);
1771 if (dev->netdev_ops != &tap_netdev_ops &&
1772 dev->netdev_ops != &tun_netdev_ops)
1773 ret = -EINVAL; 1808 ret = -EINVAL;
1774 else if (tun_not_capable(tun)) 1809 else if (tun_not_capable(tun))
1775 ret = -EPERM; 1810 ret = -EPERM;
1776 else 1811 else
1777 ret = tun_attach(tun, file); 1812 ret = tun_attach(tun, file);
1778 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) 1813 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1779 __tun_detach(tfile, false); 1814 tun = rcu_dereference_protected(tfile->tun,
1780 else 1815 lockdep_rtnl_is_held());
1816 if (!tun || !(tun->flags & TUN_TAP_MQ))
1817 ret = -EINVAL;
1818 else
1819 __tun_detach(tfile, false);
1820 } else
1781 ret = -EINVAL; 1821 ret = -EINVAL;
1782 1822
1783unlock:
1784 rtnl_unlock(); 1823 rtnl_unlock();
1785 return ret; 1824 return ret;
1786} 1825}
@@ -2092,6 +2131,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2092 2131
2093 file->private_data = tfile; 2132 file->private_data = tfile;
2094 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); 2133 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2134 INIT_LIST_HEAD(&tfile->next);
2095 2135
2096 return 0; 2136 return 0;
2097} 2137}