diff options
| author | Matthew Cover <werekraken@gmail.com> | 2018-11-18 02:46:00 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2018-11-18 22:05:43 -0500 |
| commit | 8ebebcba559a1bfbaec7bbda64feb9870b9c58da (patch) | |
| tree | 8cccb1975f0c8563935257b8abcccc2b28d66394 | |
| parent | 7ddacfa564870cdd97275fd87decb6174abc6380 (diff) | |
tuntap: fix multiqueue rx
When writing packets to a descriptor associated with a combined queue, the
packets should end up on that queue.
Before this change all packets written to any descriptor associated with a
tap interface end up on rx-0, even when the descriptor is associated with a
different queue.
The rx traffic can be generated by either of the following.
1. a simple tap program which spins up multiple queues and writes packets
to each of the file descriptors
2. tx from a qemu vm with a tap multiqueue netdev
The queue for rx traffic can be observed by either of the following (done
on the hypervisor in the qemu case).
1. a simple netmap program which opens and reads from per-queue
descriptors
2. configuring RPS and doing per-cpu captures with rxtxcpu
Alternatively, if you printk() the return value of skb_get_rx_queue() just
before each instance of netif_receive_skb() in tun.c, you will get 65535
for every skb.
Calling skb_record_rx_queue() to set the rx queue to the queue_index fixes
the association between descriptor and rx queue.
Signed-off-by: Matthew Cover <matthew.cover@stackpath.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | drivers/net/tun.c | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 060135ceaf0e..e244f5d7512a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1536,6 +1536,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1536 | 1536 | ||
| 1537 | if (!rx_batched || (!more && skb_queue_empty(queue))) { | 1537 | if (!rx_batched || (!more && skb_queue_empty(queue))) { |
| 1538 | local_bh_disable(); | 1538 | local_bh_disable(); |
| 1539 | skb_record_rx_queue(skb, tfile->queue_index); | ||
| 1539 | netif_receive_skb(skb); | 1540 | netif_receive_skb(skb); |
| 1540 | local_bh_enable(); | 1541 | local_bh_enable(); |
| 1541 | return; | 1542 | return; |
| @@ -1555,8 +1556,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1555 | struct sk_buff *nskb; | 1556 | struct sk_buff *nskb; |
| 1556 | 1557 | ||
| 1557 | local_bh_disable(); | 1558 | local_bh_disable(); |
| 1558 | while ((nskb = __skb_dequeue(&process_queue))) | 1559 | while ((nskb = __skb_dequeue(&process_queue))) { |
| 1560 | skb_record_rx_queue(nskb, tfile->queue_index); | ||
| 1559 | netif_receive_skb(nskb); | 1561 | netif_receive_skb(nskb); |
| 1562 | } | ||
| 1563 | skb_record_rx_queue(skb, tfile->queue_index); | ||
| 1560 | netif_receive_skb(skb); | 1564 | netif_receive_skb(skb); |
| 1561 | local_bh_enable(); | 1565 | local_bh_enable(); |
| 1562 | } | 1566 | } |
| @@ -2451,6 +2455,7 @@ build: | |||
| 2451 | if (!rcu_dereference(tun->steering_prog)) | 2455 | if (!rcu_dereference(tun->steering_prog)) |
| 2452 | rxhash = __skb_get_hash_symmetric(skb); | 2456 | rxhash = __skb_get_hash_symmetric(skb); |
| 2453 | 2457 | ||
| 2458 | skb_record_rx_queue(skb, tfile->queue_index); | ||
| 2454 | netif_receive_skb(skb); | 2459 | netif_receive_skb(skb); |
| 2455 | 2460 | ||
| 2456 | stats = get_cpu_ptr(tun->pcpu_stats); | 2461 | stats = get_cpu_ptr(tun->pcpu_stats); |
