aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tun.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r--drivers/net/tun.c60
1 files changed, 46 insertions, 14 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7c8343a4f918..09f66624eaca 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -110,7 +110,7 @@ struct tap_filter {
110 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 110 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
111}; 111};
112 112
113/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for 113/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
114 * the netdevice to be fit in one page. So we can make sure the success of 114 * the netdevice to be fit in one page. So we can make sure the success of
115 * memory allocation. TODO: increase the limit. */ 115 * memory allocation. TODO: increase the limit. */
116#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES 116#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
@@ -119,7 +119,7 @@ struct tap_filter {
119#define TUN_FLOW_EXPIRE (3 * HZ) 119#define TUN_FLOW_EXPIRE (3 * HZ)
120 120
121/* A tun_file connects an open character device to a tuntap netdevice. It 121/* A tun_file connects an open character device to a tuntap netdevice. It
122 * also contains all socket related strctures (except sock_fprog and tap_filter) 122 * also contains all socket related structures (except sock_fprog and tap_filter)
123 * to serve as one transmit queue for tuntap device. The sock_fprog and 123 * to serve as one transmit queue for tuntap device. The sock_fprog and
124 * tap_filter were kept in tun_struct since they were used for filtering for the 124 * tap_filter were kept in tun_struct since they were used for filtering for the
125 * netdevice not for a specific queue (at least I didn't see the requirement for 125 * netdevice not for a specific queue (at least I didn't see the requirement for
@@ -152,6 +152,7 @@ struct tun_flow_entry {
152 struct tun_struct *tun; 152 struct tun_struct *tun;
153 153
154 u32 rxhash; 154 u32 rxhash;
155 u32 rps_rxhash;
155 int queue_index; 156 int queue_index;
156 unsigned long updated; 157 unsigned long updated;
157}; 158};
@@ -220,6 +221,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
220 rxhash, queue_index); 221 rxhash, queue_index);
221 e->updated = jiffies; 222 e->updated = jiffies;
222 e->rxhash = rxhash; 223 e->rxhash = rxhash;
224 e->rps_rxhash = 0;
223 e->queue_index = queue_index; 225 e->queue_index = queue_index;
224 e->tun = tun; 226 e->tun = tun;
225 hlist_add_head_rcu(&e->hash_link, head); 227 hlist_add_head_rcu(&e->hash_link, head);
@@ -232,6 +234,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
232{ 234{
233 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 235 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
234 e->rxhash, e->queue_index); 236 e->rxhash, e->queue_index);
237 sock_rps_reset_flow_hash(e->rps_rxhash);
235 hlist_del_rcu(&e->hash_link); 238 hlist_del_rcu(&e->hash_link);
236 kfree_rcu(e, rcu); 239 kfree_rcu(e, rcu);
237 --tun->flow_count; 240 --tun->flow_count;
@@ -325,6 +328,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
325 /* TODO: keep queueing to old queue until it's empty? */ 328 /* TODO: keep queueing to old queue until it's empty? */
326 e->queue_index = queue_index; 329 e->queue_index = queue_index;
327 e->updated = jiffies; 330 e->updated = jiffies;
331 sock_rps_record_flow_hash(e->rps_rxhash);
328 } else { 332 } else {
329 spin_lock_bh(&tun->lock); 333 spin_lock_bh(&tun->lock);
330 if (!tun_flow_find(head, rxhash) && 334 if (!tun_flow_find(head, rxhash) &&
@@ -341,8 +345,20 @@ unlock:
341 rcu_read_unlock(); 345 rcu_read_unlock();
342} 346}
343 347
348/**
349 * Save the hash received in the stack receive path and update the
350 * flow_hash table accordingly.
351 */
352static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
353{
354 if (unlikely(e->rps_rxhash != hash)) {
355 sock_rps_reset_flow_hash(e->rps_rxhash);
356 e->rps_rxhash = hash;
357 }
358}
359
344/* We try to identify a flow through its rxhash first. The reason that 360/* We try to identify a flow through its rxhash first. The reason that
345 * we do not check rxq no. is becuase some cards(e.g 82599), chooses 361 * we do not check rxq no. is because some cards(e.g 82599), chooses
346 * the rxq based on the txq where the last packet of the flow comes. As 362 * the rxq based on the txq where the last packet of the flow comes. As
347 * the userspace application move between processors, we may get a 363 * the userspace application move between processors, we may get a
348 * different rxq no. here. If we could not get rxhash, then we would 364 * different rxq no. here. If we could not get rxhash, then we would
@@ -358,12 +374,13 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
358 rcu_read_lock(); 374 rcu_read_lock();
359 numqueues = ACCESS_ONCE(tun->numqueues); 375 numqueues = ACCESS_ONCE(tun->numqueues);
360 376
361 txq = skb_get_rxhash(skb); 377 txq = skb_get_hash(skb);
362 if (txq) { 378 if (txq) {
363 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 379 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
364 if (e) 380 if (e) {
381 tun_flow_save_rps_rxhash(e, txq);
365 txq = e->queue_index; 382 txq = e->queue_index;
366 else 383 } else
367 /* use multiply and shift instead of expensive divide */ 384 /* use multiply and shift instead of expensive divide */
368 txq = ((u64)txq * numqueues) >> 32; 385 txq = ((u64)txq * numqueues) >> 32;
369 } else if (likely(skb_rx_queue_recorded(skb))) { 386 } else if (likely(skb_rx_queue_recorded(skb))) {
@@ -531,7 +548,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
531 548
532 err = 0; 549 err = 0;
533 550
534 /* Re-attach the filter to presist device */ 551 /* Re-attach the filter to persist device */
535 if (!skip_filter && (tun->filter_attached == true)) { 552 if (!skip_filter && (tun->filter_attached == true)) {
536 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 553 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
537 if (!err) 554 if (!err)
@@ -728,6 +745,22 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
728 if (txq >= tun->numqueues) 745 if (txq >= tun->numqueues)
729 goto drop; 746 goto drop;
730 747
748 if (tun->numqueues == 1) {
749 /* Select queue was not called for the skbuff, so we extract the
750 * RPS hash and save it into the flow_table here.
751 */
752 __u32 rxhash;
753
754 rxhash = skb_get_hash(skb);
755 if (rxhash) {
756 struct tun_flow_entry *e;
757 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
758 rxhash);
759 if (e)
760 tun_flow_save_rps_rxhash(e, rxhash);
761 }
762 }
763
731 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 764 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
732 765
733 BUG_ON(!tfile); 766 BUG_ON(!tfile);
@@ -819,9 +852,9 @@ static void tun_poll_controller(struct net_device *dev)
819 * Tun only receives frames when: 852 * Tun only receives frames when:
820 * 1) the char device endpoint gets data from user space 853 * 1) the char device endpoint gets data from user space
821 * 2) the tun socket gets a sendmsg call from user space 854 * 2) the tun socket gets a sendmsg call from user space
822 * Since both of those are syncronous operations, we are guaranteed 855 * Since both of those are synchronous operations, we are guaranteed
823 * never to have pending data when we poll for it 856 * never to have pending data when we poll for it
824 * so theres nothing to do here but return. 857 * so there is nothing to do here but return.
825 * We need this though so netpoll recognizes us as an interface that 858 * We need this though so netpoll recognizes us as an interface that
826 * supports polling, which enables bridge devices in virt setups to 859 * supports polling, which enables bridge devices in virt setups to
827 * still use netconsole 860 * still use netconsole
@@ -1146,7 +1179,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1146 skb_reset_network_header(skb); 1179 skb_reset_network_header(skb);
1147 skb_probe_transport_header(skb, 0); 1180 skb_probe_transport_header(skb, 0);
1148 1181
1149 rxhash = skb_get_rxhash(skb); 1182 rxhash = skb_get_hash(skb);
1150 netif_rx_ni(skb); 1183 netif_rx_ni(skb);
1151 1184
1152 tun->dev->stats.rx_packets++; 1185 tun->dev->stats.rx_packets++;
@@ -1291,8 +1324,7 @@ done:
1291} 1324}
1292 1325
1293static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 1326static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1294 struct kiocb *iocb, const struct iovec *iv, 1327 const struct iovec *iv, ssize_t len, int noblock)
1295 ssize_t len, int noblock)
1296{ 1328{
1297 DECLARE_WAITQUEUE(wait, current); 1329 DECLARE_WAITQUEUE(wait, current);
1298 struct sk_buff *skb; 1330 struct sk_buff *skb;
@@ -1355,7 +1387,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1355 goto out; 1387 goto out;
1356 } 1388 }
1357 1389
1358 ret = tun_do_read(tun, tfile, iocb, iv, len, 1390 ret = tun_do_read(tun, tfile, iv, len,
1359 file->f_flags & O_NONBLOCK); 1391 file->f_flags & O_NONBLOCK);
1360 ret = min_t(ssize_t, ret, len); 1392 ret = min_t(ssize_t, ret, len);
1361 if (ret > 0) 1393 if (ret > 0)
@@ -1456,7 +1488,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1456 SOL_PACKET, TUN_TX_TIMESTAMP); 1488 SOL_PACKET, TUN_TX_TIMESTAMP);
1457 goto out; 1489 goto out;
1458 } 1490 }
1459 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len, 1491 ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
1460 flags & MSG_DONTWAIT); 1492 flags & MSG_DONTWAIT);
1461 if (ret > total_len) { 1493 if (ret > total_len) {
1462 m->msg_flags |= MSG_TRUNC; 1494 m->msg_flags |= MSG_TRUNC;