diff options
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r-- | drivers/net/tun.c | 45 |
1 files changed, 32 insertions, 13 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index af372d0957fe..cc09b67c23bc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -109,11 +109,11 @@ struct tap_filter { | |||
109 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; | 109 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; |
110 | }; | 110 | }; |
111 | 111 | ||
112 | /* 1024 is probably a high enough limit: modern hypervisors seem to support on | 112 | /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for |
113 | * the order of 100-200 CPUs so this leaves us some breathing space if we want | 113 | * the netdevice to be fit in one page. So we can make sure the success of |
114 | * to match a queue per guest CPU. | 114 | * memory allocation. TODO: increase the limit. */ |
115 | */ | 115 | #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES |
116 | #define MAX_TAP_QUEUES 1024 | 116 | #define MAX_TAP_FLOWS 4096 |
117 | 117 | ||
118 | #define TUN_FLOW_EXPIRE (3 * HZ) | 118 | #define TUN_FLOW_EXPIRE (3 * HZ) |
119 | 119 | ||
@@ -185,6 +185,8 @@ struct tun_struct { | |||
185 | unsigned long ageing_time; | 185 | unsigned long ageing_time; |
186 | unsigned int numdisabled; | 186 | unsigned int numdisabled; |
187 | struct list_head disabled; | 187 | struct list_head disabled; |
188 | void *security; | ||
189 | u32 flow_count; | ||
188 | }; | 190 | }; |
189 | 191 | ||
190 | static inline u32 tun_hashfn(u32 rxhash) | 192 | static inline u32 tun_hashfn(u32 rxhash) |
@@ -218,6 +220,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, | |||
218 | e->queue_index = queue_index; | 220 | e->queue_index = queue_index; |
219 | e->tun = tun; | 221 | e->tun = tun; |
220 | hlist_add_head_rcu(&e->hash_link, head); | 222 | hlist_add_head_rcu(&e->hash_link, head); |
223 | ++tun->flow_count; | ||
221 | } | 224 | } |
222 | return e; | 225 | return e; |
223 | } | 226 | } |
@@ -228,6 +231,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) | |||
228 | e->rxhash, e->queue_index); | 231 | e->rxhash, e->queue_index); |
229 | hlist_del_rcu(&e->hash_link); | 232 | hlist_del_rcu(&e->hash_link); |
230 | kfree_rcu(e, rcu); | 233 | kfree_rcu(e, rcu); |
234 | --tun->flow_count; | ||
231 | } | 235 | } |
232 | 236 | ||
233 | static void tun_flow_flush(struct tun_struct *tun) | 237 | static void tun_flow_flush(struct tun_struct *tun) |
@@ -317,7 +321,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, | |||
317 | e->updated = jiffies; | 321 | e->updated = jiffies; |
318 | } else { | 322 | } else { |
319 | spin_lock_bh(&tun->lock); | 323 | spin_lock_bh(&tun->lock); |
320 | if (!tun_flow_find(head, rxhash)) | 324 | if (!tun_flow_find(head, rxhash) && |
325 | tun->flow_count < MAX_TAP_FLOWS) | ||
321 | tun_flow_create(tun, head, rxhash, queue_index); | 326 | tun_flow_create(tun, head, rxhash, queue_index); |
322 | 327 | ||
323 | if (!timer_pending(&tun->flow_gc_timer)) | 328 | if (!timer_pending(&tun->flow_gc_timer)) |
@@ -490,6 +495,10 @@ static int tun_attach(struct tun_struct *tun, struct file *file) | |||
490 | struct tun_file *tfile = file->private_data; | 495 | struct tun_file *tfile = file->private_data; |
491 | int err; | 496 | int err; |
492 | 497 | ||
498 | err = security_tun_dev_attach(tfile->socket.sk, tun->security); | ||
499 | if (err < 0) | ||
500 | goto out; | ||
501 | |||
493 | err = -EINVAL; | 502 | err = -EINVAL; |
494 | if (rtnl_dereference(tfile->tun)) | 503 | if (rtnl_dereference(tfile->tun)) |
495 | goto out; | 504 | goto out; |
@@ -1373,6 +1382,7 @@ static void tun_free_netdev(struct net_device *dev) | |||
1373 | 1382 | ||
1374 | BUG_ON(!(list_empty(&tun->disabled))); | 1383 | BUG_ON(!(list_empty(&tun->disabled))); |
1375 | tun_flow_uninit(tun); | 1384 | tun_flow_uninit(tun); |
1385 | security_tun_dev_free_security(tun->security); | ||
1376 | free_netdev(dev); | 1386 | free_netdev(dev); |
1377 | } | 1387 | } |
1378 | 1388 | ||
@@ -1562,7 +1572,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1562 | 1572 | ||
1563 | if (tun_not_capable(tun)) | 1573 | if (tun_not_capable(tun)) |
1564 | return -EPERM; | 1574 | return -EPERM; |
1565 | err = security_tun_dev_attach(tfile->socket.sk); | 1575 | err = security_tun_dev_open(tun->security); |
1566 | if (err < 0) | 1576 | if (err < 0) |
1567 | return err; | 1577 | return err; |
1568 | 1578 | ||
@@ -1577,6 +1587,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1577 | else { | 1587 | else { |
1578 | char *name; | 1588 | char *name; |
1579 | unsigned long flags = 0; | 1589 | unsigned long flags = 0; |
1590 | int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? | ||
1591 | MAX_TAP_QUEUES : 1; | ||
1580 | 1592 | ||
1581 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) | 1593 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1582 | return -EPERM; | 1594 | return -EPERM; |
@@ -1600,8 +1612,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1600 | name = ifr->ifr_name; | 1612 | name = ifr->ifr_name; |
1601 | 1613 | ||
1602 | dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, | 1614 | dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, |
1603 | tun_setup, | 1615 | tun_setup, queues, queues); |
1604 | MAX_TAP_QUEUES, MAX_TAP_QUEUES); | 1616 | |
1605 | if (!dev) | 1617 | if (!dev) |
1606 | return -ENOMEM; | 1618 | return -ENOMEM; |
1607 | 1619 | ||
@@ -1619,7 +1631,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1619 | 1631 | ||
1620 | spin_lock_init(&tun->lock); | 1632 | spin_lock_init(&tun->lock); |
1621 | 1633 | ||
1622 | security_tun_dev_post_create(&tfile->sk); | 1634 | err = security_tun_dev_alloc_security(&tun->security); |
1635 | if (err < 0) | ||
1636 | goto err_free_dev; | ||
1623 | 1637 | ||
1624 | tun_net_init(dev); | 1638 | tun_net_init(dev); |
1625 | 1639 | ||
@@ -1789,10 +1803,14 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) | |||
1789 | 1803 | ||
1790 | if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { | 1804 | if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { |
1791 | tun = tfile->detached; | 1805 | tun = tfile->detached; |
1792 | if (!tun) | 1806 | if (!tun) { |
1793 | ret = -EINVAL; | 1807 | ret = -EINVAL; |
1794 | else | 1808 | goto unlock; |
1795 | ret = tun_attach(tun, file); | 1809 | } |
1810 | ret = security_tun_dev_attach_queue(tun->security); | ||
1811 | if (ret < 0) | ||
1812 | goto unlock; | ||
1813 | ret = tun_attach(tun, file); | ||
1796 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { | 1814 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { |
1797 | tun = rtnl_dereference(tfile->tun); | 1815 | tun = rtnl_dereference(tfile->tun); |
1798 | if (!tun || !(tun->flags & TUN_TAP_MQ)) | 1816 | if (!tun || !(tun->flags & TUN_TAP_MQ)) |
@@ -1802,6 +1820,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) | |||
1802 | } else | 1820 | } else |
1803 | ret = -EINVAL; | 1821 | ret = -EINVAL; |
1804 | 1822 | ||
1823 | unlock: | ||
1805 | rtnl_unlock(); | 1824 | rtnl_unlock(); |
1806 | return ret; | 1825 | return ret; |
1807 | } | 1826 | } |