aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/tun.c37
-rw-r--r--include/net/sock.h18
2 files changed, 49 insertions, 6 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a17a7018db19..3cf0457f5c69 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -152,6 +152,7 @@ struct tun_flow_entry {
152 struct tun_struct *tun; 152 struct tun_struct *tun;
153 153
154 u32 rxhash; 154 u32 rxhash;
155 u32 rps_rxhash;
155 int queue_index; 156 int queue_index;
156 unsigned long updated; 157 unsigned long updated;
157}; 158};
@@ -220,6 +221,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
220 rxhash, queue_index); 221 rxhash, queue_index);
221 e->updated = jiffies; 222 e->updated = jiffies;
222 e->rxhash = rxhash; 223 e->rxhash = rxhash;
224 e->rps_rxhash = 0;
223 e->queue_index = queue_index; 225 e->queue_index = queue_index;
224 e->tun = tun; 226 e->tun = tun;
225 hlist_add_head_rcu(&e->hash_link, head); 227 hlist_add_head_rcu(&e->hash_link, head);
@@ -232,6 +234,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
232{ 234{
233 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 235 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
234 e->rxhash, e->queue_index); 236 e->rxhash, e->queue_index);
237 sock_rps_reset_flow_hash(e->rps_rxhash);
235 hlist_del_rcu(&e->hash_link); 238 hlist_del_rcu(&e->hash_link);
236 kfree_rcu(e, rcu); 239 kfree_rcu(e, rcu);
237 --tun->flow_count; 240 --tun->flow_count;
@@ -325,6 +328,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
325 /* TODO: keep queueing to old queue until it's empty? */ 328 /* TODO: keep queueing to old queue until it's empty? */
326 e->queue_index = queue_index; 329 e->queue_index = queue_index;
327 e->updated = jiffies; 330 e->updated = jiffies;
331 sock_rps_record_flow_hash(e->rps_rxhash);
328 } else { 332 } else {
329 spin_lock_bh(&tun->lock); 333 spin_lock_bh(&tun->lock);
330 if (!tun_flow_find(head, rxhash) && 334 if (!tun_flow_find(head, rxhash) &&
@@ -341,6 +345,18 @@ unlock:
341 rcu_read_unlock(); 345 rcu_read_unlock();
342} 346}
343 347
348/**
349 * Save the hash received in the stack receive path and update the
350 * flow_hash table accordingly.
351 */
352static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
353{
354 if (unlikely(e->rps_rxhash != hash)) {
355 sock_rps_reset_flow_hash(e->rps_rxhash);
356 e->rps_rxhash = hash;
357 }
358}
359
344/* We try to identify a flow through its rxhash first. The reason that 360/* We try to identify a flow through its rxhash first. The reason that
345 * we do not check rxq no. is because some cards(e.g 82599), chooses 361 * we do not check rxq no. is because some cards(e.g 82599), chooses
346 * the rxq based on the txq where the last packet of the flow comes. As 362 * the rxq based on the txq where the last packet of the flow comes. As
@@ -361,9 +377,10 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
361 txq = skb_get_hash(skb); 377 txq = skb_get_hash(skb);
362 if (txq) { 378 if (txq) {
363 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 379 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
364 if (e) 380 if (e) {
365 txq = e->queue_index; 381 txq = e->queue_index;
366 else 382 tun_flow_save_rps_rxhash(e, txq);
383 } else
367 /* use multiply and shift instead of expensive divide */ 384 /* use multiply and shift instead of expensive divide */
368 txq = ((u64)txq * numqueues) >> 32; 385 txq = ((u64)txq * numqueues) >> 32;
369 } else if (likely(skb_rx_queue_recorded(skb))) { 386 } else if (likely(skb_rx_queue_recorded(skb))) {
@@ -728,6 +745,22 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
728 if (txq >= tun->numqueues) 745 if (txq >= tun->numqueues)
729 goto drop; 746 goto drop;
730 747
748 if (tun->numqueues == 1) {
749 /* Select queue was not called for the skbuff, so we extract the
750 * RPS hash and save it into the flow_table here.
751 */
752 __u32 rxhash;
753
754 rxhash = skb_get_hash(skb);
755 if (rxhash) {
756 struct tun_flow_entry *e;
757 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
758 rxhash);
759 if (e)
760 tun_flow_save_rps_rxhash(e, rxhash);
761 }
762 }
763
731 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 764 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
732 765
733 BUG_ON(!tfile); 766 BUG_ON(!tfile);
diff --git a/include/net/sock.h b/include/net/sock.h
index 2ef3c3eca47a..8ee90add69d2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -820,30 +820,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
820 return sk->sk_backlog_rcv(sk, skb); 820 return sk->sk_backlog_rcv(sk, skb);
821} 821}
822 822
823static inline void sock_rps_record_flow(const struct sock *sk) 823static inline void sock_rps_record_flow_hash(__u32 hash)
824{ 824{
825#ifdef CONFIG_RPS 825#ifdef CONFIG_RPS
826 struct rps_sock_flow_table *sock_flow_table; 826 struct rps_sock_flow_table *sock_flow_table;
827 827
828 rcu_read_lock(); 828 rcu_read_lock();
829 sock_flow_table = rcu_dereference(rps_sock_flow_table); 829 sock_flow_table = rcu_dereference(rps_sock_flow_table);
830 rps_record_sock_flow(sock_flow_table, sk->sk_rxhash); 830 rps_record_sock_flow(sock_flow_table, hash);
831 rcu_read_unlock(); 831 rcu_read_unlock();
832#endif 832#endif
833} 833}
834 834
835static inline void sock_rps_reset_flow(const struct sock *sk) 835static inline void sock_rps_reset_flow_hash(__u32 hash)
836{ 836{
837#ifdef CONFIG_RPS 837#ifdef CONFIG_RPS
838 struct rps_sock_flow_table *sock_flow_table; 838 struct rps_sock_flow_table *sock_flow_table;
839 839
840 rcu_read_lock(); 840 rcu_read_lock();
841 sock_flow_table = rcu_dereference(rps_sock_flow_table); 841 sock_flow_table = rcu_dereference(rps_sock_flow_table);
842 rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash); 842 rps_reset_sock_flow(sock_flow_table, hash);
843 rcu_read_unlock(); 843 rcu_read_unlock();
844#endif 844#endif
845} 845}
846 846
847static inline void sock_rps_record_flow(const struct sock *sk)
848{
849 sock_rps_record_flow_hash(sk->sk_rxhash);
850}
851
852static inline void sock_rps_reset_flow(const struct sock *sk)
853{
854 sock_rps_reset_flow_hash(sk->sk_rxhash);
855}
856
847static inline void sock_rps_save_rxhash(struct sock *sk, 857static inline void sock_rps_save_rxhash(struct sock *sk,
848 const struct sk_buff *skb) 858 const struct sk_buff *skb)
849{ 859{