aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2018-02-07 04:14:46 -0500
committerDavid S. Miller <davem@davemloft.net>2018-02-08 14:10:30 -0500
commit762c330d670e3d4b795cf7a8d761866fdd1eef49 (patch)
tree249dbef044fc3ecf0b3f2d449e63080e9f802359
parentcb9f7a9a5c96a773bbc9c70660dc600cfff82f82 (diff)
tuntap: add missing xdp flush
When using devmap to redirect packets between interfaces, xdp_do_flush() is usually a must to flush any batched packets. Unfortunately this is missed in current tuntap implementation. Unlike most hardware driver which did XDP inside NAPI loop and call xdp_do_flush() at then end of each round of poll. TAP did it in the context of process e.g tun_get_user(). So fix this by count the pending redirected packets and flush when it exceeds NAPI_POLL_WEIGHT or MSG_MORE was cleared by sendmsg() caller. With this fix, xdp_redirect_map works again between two TAPs. Fixes: 761876c857cb ("tap: XDP support") Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/tun.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0dc66e4fbb2c..17e496b88f81 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,6 +181,7 @@ struct tun_file {
181 struct tun_struct *detached; 181 struct tun_struct *detached;
182 struct ptr_ring tx_ring; 182 struct ptr_ring tx_ring;
183 struct xdp_rxq_info xdp_rxq; 183 struct xdp_rxq_info xdp_rxq;
184 int xdp_pending_pkts;
184}; 185};
185 186
186struct tun_flow_entry { 187struct tun_flow_entry {
@@ -1665,6 +1666,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1665 case XDP_REDIRECT: 1666 case XDP_REDIRECT:
1666 get_page(alloc_frag->page); 1667 get_page(alloc_frag->page);
1667 alloc_frag->offset += buflen; 1668 alloc_frag->offset += buflen;
1669 ++tfile->xdp_pending_pkts;
1668 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1670 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
1669 if (err) 1671 if (err)
1670 goto err_redirect; 1672 goto err_redirect;
@@ -1986,6 +1988,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1986 result = tun_get_user(tun, tfile, NULL, from, 1988 result = tun_get_user(tun, tfile, NULL, from,
1987 file->f_flags & O_NONBLOCK, false); 1989 file->f_flags & O_NONBLOCK, false);
1988 1990
1991 if (tfile->xdp_pending_pkts) {
1992 tfile->xdp_pending_pkts = 0;
1993 xdp_do_flush_map();
1994 }
1995
1989 tun_put(tun); 1996 tun_put(tun);
1990 return result; 1997 return result;
1991} 1998}
@@ -2322,6 +2329,13 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2322 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 2329 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
2323 m->msg_flags & MSG_DONTWAIT, 2330 m->msg_flags & MSG_DONTWAIT,
2324 m->msg_flags & MSG_MORE); 2331 m->msg_flags & MSG_MORE);
2332
2333 if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
2334 !(m->msg_flags & MSG_MORE)) {
2335 tfile->xdp_pending_pkts = 0;
2336 xdp_do_flush_map();
2337 }
2338
2325 tun_put(tun); 2339 tun_put(tun);
2326 return ret; 2340 return ret;
2327} 2341}
@@ -3153,6 +3167,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3153 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3167 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3154 3168
3155 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 3169 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
3170 tfile->xdp_pending_pkts = 0;
3156 3171
3157 return 0; 3172 return 0;
3158} 3173}