aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tun.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r--drivers/net/tun.c101
1 files changed, 81 insertions, 20 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ecec8029c5e8..26f8635b027d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -69,6 +69,7 @@
69#include <net/netns/generic.h> 69#include <net/netns/generic.h>
70#include <net/rtnetlink.h> 70#include <net/rtnetlink.h>
71#include <net/sock.h> 71#include <net/sock.h>
72#include <linux/seq_file.h>
72 73
73#include <asm/uaccess.h> 74#include <asm/uaccess.h>
74 75
@@ -110,7 +111,7 @@ struct tap_filter {
110 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 111 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
111}; 112};
112 113
113/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for 114/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
114 * the netdevice to be fit in one page. So we can make sure the success of 115 * the netdevice to be fit in one page. So we can make sure the success of
115 * memory allocation. TODO: increase the limit. */ 116 * memory allocation. TODO: increase the limit. */
116#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES 117#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
@@ -119,7 +120,7 @@ struct tap_filter {
119#define TUN_FLOW_EXPIRE (3 * HZ) 120#define TUN_FLOW_EXPIRE (3 * HZ)
120 121
121/* A tun_file connects an open character device to a tuntap netdevice. It 122/* A tun_file connects an open character device to a tuntap netdevice. It
122 * also contains all socket related strctures (except sock_fprog and tap_filter) 123 * also contains all socket related structures (except sock_fprog and tap_filter)
123 * to serve as one transmit queue for tuntap device. The sock_fprog and 124 * to serve as one transmit queue for tuntap device. The sock_fprog and
124 * tap_filter were kept in tun_struct since they were used for filtering for the 125 * tap_filter were kept in tun_struct since they were used for filtering for the
125 * netdevice not for a specific queue (at least I didn't see the requirement for 126 * netdevice not for a specific queue (at least I didn't see the requirement for
@@ -152,6 +153,7 @@ struct tun_flow_entry {
152 struct tun_struct *tun; 153 struct tun_struct *tun;
153 154
154 u32 rxhash; 155 u32 rxhash;
156 u32 rps_rxhash;
155 int queue_index; 157 int queue_index;
156 unsigned long updated; 158 unsigned long updated;
157}; 159};
@@ -220,6 +222,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
220 rxhash, queue_index); 222 rxhash, queue_index);
221 e->updated = jiffies; 223 e->updated = jiffies;
222 e->rxhash = rxhash; 224 e->rxhash = rxhash;
225 e->rps_rxhash = 0;
223 e->queue_index = queue_index; 226 e->queue_index = queue_index;
224 e->tun = tun; 227 e->tun = tun;
225 hlist_add_head_rcu(&e->hash_link, head); 228 hlist_add_head_rcu(&e->hash_link, head);
@@ -232,6 +235,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
232{ 235{
233 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 236 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
234 e->rxhash, e->queue_index); 237 e->rxhash, e->queue_index);
238 sock_rps_reset_flow_hash(e->rps_rxhash);
235 hlist_del_rcu(&e->hash_link); 239 hlist_del_rcu(&e->hash_link);
236 kfree_rcu(e, rcu); 240 kfree_rcu(e, rcu);
237 --tun->flow_count; 241 --tun->flow_count;
@@ -325,6 +329,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
325 /* TODO: keep queueing to old queue until it's empty? */ 329 /* TODO: keep queueing to old queue until it's empty? */
326 e->queue_index = queue_index; 330 e->queue_index = queue_index;
327 e->updated = jiffies; 331 e->updated = jiffies;
332 sock_rps_record_flow_hash(e->rps_rxhash);
328 } else { 333 } else {
329 spin_lock_bh(&tun->lock); 334 spin_lock_bh(&tun->lock);
330 if (!tun_flow_find(head, rxhash) && 335 if (!tun_flow_find(head, rxhash) &&
@@ -341,15 +346,27 @@ unlock:
341 rcu_read_unlock(); 346 rcu_read_unlock();
342} 347}
343 348
349/**
350 * Save the hash received in the stack receive path and update the
351 * flow_hash table accordingly.
352 */
353static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
354{
355 if (unlikely(e->rps_rxhash != hash)) {
356 sock_rps_reset_flow_hash(e->rps_rxhash);
357 e->rps_rxhash = hash;
358 }
359}
360
344/* We try to identify a flow through its rxhash first. The reason that 361/* We try to identify a flow through its rxhash first. The reason that
345 * we do not check rxq no. is becuase some cards(e.g 82599), chooses 362 * we do not check rxq no. is because some cards(e.g 82599), chooses
346 * the rxq based on the txq where the last packet of the flow comes. As 363 * the rxq based on the txq where the last packet of the flow comes. As
347 * the userspace application move between processors, we may get a 364 * the userspace application move between processors, we may get a
348 * different rxq no. here. If we could not get rxhash, then we would 365 * different rxq no. here. If we could not get rxhash, then we would
349 * hope the rxq no. may help here. 366 * hope the rxq no. may help here.
350 */ 367 */
351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 368static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
352 void *accel_priv) 369 void *accel_priv, select_queue_fallback_t fallback)
353{ 370{
354 struct tun_struct *tun = netdev_priv(dev); 371 struct tun_struct *tun = netdev_priv(dev);
355 struct tun_flow_entry *e; 372 struct tun_flow_entry *e;
@@ -359,12 +376,13 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
359 rcu_read_lock(); 376 rcu_read_lock();
360 numqueues = ACCESS_ONCE(tun->numqueues); 377 numqueues = ACCESS_ONCE(tun->numqueues);
361 378
362 txq = skb_get_rxhash(skb); 379 txq = skb_get_hash(skb);
363 if (txq) { 380 if (txq) {
364 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 381 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
365 if (e) 382 if (e) {
383 tun_flow_save_rps_rxhash(e, txq);
366 txq = e->queue_index; 384 txq = e->queue_index;
367 else 385 } else
368 /* use multiply and shift instead of expensive divide */ 386 /* use multiply and shift instead of expensive divide */
369 txq = ((u64)txq * numqueues) >> 32; 387 txq = ((u64)txq * numqueues) >> 32;
370 } else if (likely(skb_rx_queue_recorded(skb))) { 388 } else if (likely(skb_rx_queue_recorded(skb))) {
@@ -532,7 +550,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
532 550
533 err = 0; 551 err = 0;
534 552
535 /* Re-attach the filter to presist device */ 553 /* Re-attach the filter to persist device */
536 if (!skip_filter && (tun->filter_attached == true)) { 554 if (!skip_filter && (tun->filter_attached == true)) {
537 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 555 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
538 if (!err) 556 if (!err)
@@ -721,14 +739,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
721 struct tun_struct *tun = netdev_priv(dev); 739 struct tun_struct *tun = netdev_priv(dev);
722 int txq = skb->queue_mapping; 740 int txq = skb->queue_mapping;
723 struct tun_file *tfile; 741 struct tun_file *tfile;
742 u32 numqueues = 0;
724 743
725 rcu_read_lock(); 744 rcu_read_lock();
726 tfile = rcu_dereference(tun->tfiles[txq]); 745 tfile = rcu_dereference(tun->tfiles[txq]);
746 numqueues = ACCESS_ONCE(tun->numqueues);
727 747
728 /* Drop packet if interface is not attached */ 748 /* Drop packet if interface is not attached */
729 if (txq >= tun->numqueues) 749 if (txq >= numqueues)
730 goto drop; 750 goto drop;
731 751
752 if (numqueues == 1) {
753 /* Select queue was not called for the skbuff, so we extract the
754 * RPS hash and save it into the flow_table here.
755 */
756 __u32 rxhash;
757
758 rxhash = skb_get_hash(skb);
759 if (rxhash) {
760 struct tun_flow_entry *e;
761 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
762 rxhash);
763 if (e)
764 tun_flow_save_rps_rxhash(e, rxhash);
765 }
766 }
767
732 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 768 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
733 769
734 BUG_ON(!tfile); 770 BUG_ON(!tfile);
@@ -746,8 +782,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
746 /* Limit the number of packets queued by dividing txq length with the 782 /* Limit the number of packets queued by dividing txq length with the
747 * number of queues. 783 * number of queues.
748 */ 784 */
749 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) 785 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
750 >= dev->tx_queue_len / tun->numqueues) 786 >= dev->tx_queue_len)
751 goto drop; 787 goto drop;
752 788
753 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 789 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
@@ -820,9 +856,9 @@ static void tun_poll_controller(struct net_device *dev)
820 * Tun only receives frames when: 856 * Tun only receives frames when:
821 * 1) the char device endpoint gets data from user space 857 * 1) the char device endpoint gets data from user space
822 * 2) the tun socket gets a sendmsg call from user space 858 * 2) the tun socket gets a sendmsg call from user space
823 * Since both of those are syncronous operations, we are guaranteed 859 * Since both of those are synchronous operations, we are guaranteed
824 * never to have pending data when we poll for it 860 * never to have pending data when we poll for it
825 * so theres nothing to do here but return. 861 * so there is nothing to do here but return.
826 * We need this though so netpoll recognizes us as an interface that 862 * We need this though so netpoll recognizes us as an interface that
827 * supports polling, which enables bridge devices in virt setups to 863 * supports polling, which enables bridge devices in virt setups to
828 * still use netconsole 864 * still use netconsole
@@ -1147,7 +1183,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1147 skb_reset_network_header(skb); 1183 skb_reset_network_header(skb);
1148 skb_probe_transport_header(skb, 0); 1184 skb_probe_transport_header(skb, 0);
1149 1185
1150 rxhash = skb_get_rxhash(skb); 1186 rxhash = skb_get_hash(skb);
1151 netif_rx_ni(skb); 1187 netif_rx_ni(skb);
1152 1188
1153 tun->dev->stats.rx_packets++; 1189 tun->dev->stats.rx_packets++;
@@ -1292,8 +1328,7 @@ done:
1292} 1328}
1293 1329
1294static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 1330static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1295 struct kiocb *iocb, const struct iovec *iv, 1331 const struct iovec *iv, ssize_t len, int noblock)
1296 ssize_t len, int noblock)
1297{ 1332{
1298 DECLARE_WAITQUEUE(wait, current); 1333 DECLARE_WAITQUEUE(wait, current);
1299 struct sk_buff *skb; 1334 struct sk_buff *skb;
@@ -1356,7 +1391,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1356 goto out; 1391 goto out;
1357 } 1392 }
1358 1393
1359 ret = tun_do_read(tun, tfile, iocb, iv, len, 1394 ret = tun_do_read(tun, tfile, iv, len,
1360 file->f_flags & O_NONBLOCK); 1395 file->f_flags & O_NONBLOCK);
1361 ret = min_t(ssize_t, ret, len); 1396 ret = min_t(ssize_t, ret, len);
1362 if (ret > 0) 1397 if (ret > 0)
@@ -1457,7 +1492,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1457 SOL_PACKET, TUN_TX_TIMESTAMP); 1492 SOL_PACKET, TUN_TX_TIMESTAMP);
1458 goto out; 1493 goto out;
1459 } 1494 }
1460 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len, 1495 ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
1461 flags & MSG_DONTWAIT); 1496 flags & MSG_DONTWAIT);
1462 if (ret > total_len) { 1497 if (ret > total_len) {
1463 m->msg_flags |= MSG_TRUNC; 1498 m->msg_flags |= MSG_TRUNC;
@@ -1651,7 +1686,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1651 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 1686 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1652 NETIF_F_HW_VLAN_STAG_TX; 1687 NETIF_F_HW_VLAN_STAG_TX;
1653 dev->features = dev->hw_features; 1688 dev->features = dev->hw_features;
1654 dev->vlan_features = dev->features; 1689 dev->vlan_features = dev->features &
1690 ~(NETIF_F_HW_VLAN_CTAG_TX |
1691 NETIF_F_HW_VLAN_STAG_TX);
1655 1692
1656 INIT_LIST_HEAD(&tun->disabled); 1693 INIT_LIST_HEAD(&tun->disabled);
1657 err = tun_attach(tun, file, false); 1694 err = tun_attach(tun, file, false);
@@ -2194,6 +2231,27 @@ static int tun_chr_close(struct inode *inode, struct file *file)
2194 return 0; 2231 return 0;
2195} 2232}
2196 2233
2234#ifdef CONFIG_PROC_FS
2235static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2236{
2237 struct tun_struct *tun;
2238 struct ifreq ifr;
2239
2240 memset(&ifr, 0, sizeof(ifr));
2241
2242 rtnl_lock();
2243 tun = tun_get(f);
2244 if (tun)
2245 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2246 rtnl_unlock();
2247
2248 if (tun)
2249 tun_put(tun);
2250
2251 return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
2252}
2253#endif
2254
2197static const struct file_operations tun_fops = { 2255static const struct file_operations tun_fops = {
2198 .owner = THIS_MODULE, 2256 .owner = THIS_MODULE,
2199 .llseek = no_llseek, 2257 .llseek = no_llseek,
@@ -2208,7 +2266,10 @@ static const struct file_operations tun_fops = {
2208#endif 2266#endif
2209 .open = tun_chr_open, 2267 .open = tun_chr_open,
2210 .release = tun_chr_close, 2268 .release = tun_chr_close,
2211 .fasync = tun_chr_fasync 2269 .fasync = tun_chr_fasync,
2270#ifdef CONFIG_PROC_FS
2271 .show_fdinfo = tun_chr_show_fdinfo,
2272#endif
2212}; 2273};
2213 2274
2214static struct miscdevice tun_miscdev = { 2275static struct miscdevice tun_miscdev = {