aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Liu <wei.liu2@citrix.com>2014-08-12 06:48:07 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-13 23:07:44 -0400
commita64bd934528e26e8956112e43a279fba2ee0634e (patch)
tree304e4fca43ec1c2907bb0db71f50445ef5fcae32
parentea2c5e134237eadc9924ce821ded678750024549 (diff)
xen-netback: don't stop dealloc kthread too early
Reference count the number of packets in host stack, so that we don't stop the deallocation thread too early. If not, we can end up with xenvif_free permanently waiting for deallocation thread to unmap grefs. Reported-by: Thomas Leonard <talex5@gmail.com> Signed-off-by: Wei Liu <wei.liu2@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h5
-rw-r--r--drivers/net/xen-netback/interface.c18
-rw-r--r--drivers/net/xen-netback/netback.c26
3 files changed, 42 insertions, 7 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ef3026f46a37..d4eb8d2e9cb7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -165,6 +165,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
165 u16 dealloc_ring[MAX_PENDING_REQS]; 165 u16 dealloc_ring[MAX_PENDING_REQS];
166 struct task_struct *dealloc_task; 166 struct task_struct *dealloc_task;
167 wait_queue_head_t dealloc_wq; 167 wait_queue_head_t dealloc_wq;
168 atomic_t inflight_packets;
168 169
169 /* Use kthread for guest RX */ 170 /* Use kthread for guest RX */
170 struct task_struct *task; 171 struct task_struct *task;
@@ -329,4 +330,8 @@ extern unsigned int xenvif_max_queues;
329extern struct dentry *xen_netback_dbg_root; 330extern struct dentry *xen_netback_dbg_root;
330#endif 331#endif
331 332
333void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
334 struct sk_buff *skb);
335void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
336
332#endif /* __XEN_NETBACK__COMMON_H__ */ 337#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 5f3d6c06fcf7..0aaca902699a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,23 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
47 * increasing the inflight counter. We need to increase the inflight
48 * counter because core driver calls into xenvif_zerocopy_callback
49 * which calls xenvif_skb_zerocopy_complete.
50 */
51void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
52 struct sk_buff *skb)
53{
54 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
55 atomic_inc(&queue->inflight_packets);
56}
57
58void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
59{
60 atomic_dec(&queue->inflight_packets);
61}
62
46static inline void xenvif_stop_queue(struct xenvif_queue *queue) 63static inline void xenvif_stop_queue(struct xenvif_queue *queue)
47{ 64{
48 struct net_device *dev = queue->vif->dev; 65 struct net_device *dev = queue->vif->dev;
@@ -557,6 +574,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
557 574
558 init_waitqueue_head(&queue->wq); 575 init_waitqueue_head(&queue->wq);
559 init_waitqueue_head(&queue->dealloc_wq); 576 init_waitqueue_head(&queue->dealloc_wq);
577 atomic_set(&queue->inflight_packets, 0);
560 578
561 if (tx_evtchn == rx_evtchn) { 579 if (tx_evtchn == rx_evtchn) {
562 /* feature-split-event-channels == 0 */ 580 /* feature-split-event-channels == 0 */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4734472aa620..08f65996534c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1525,10 +1525,12 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
1525 /* remove traces of mapped pages and frag_list */ 1525 /* remove traces of mapped pages and frag_list */
1526 skb_frag_list_init(skb); 1526 skb_frag_list_init(skb);
1527 uarg = skb_shinfo(skb)->destructor_arg; 1527 uarg = skb_shinfo(skb)->destructor_arg;
1528 /* increase inflight counter to offset decrement in callback */
1529 atomic_inc(&queue->inflight_packets);
1528 uarg->callback(uarg, true); 1530 uarg->callback(uarg, true);
1529 skb_shinfo(skb)->destructor_arg = NULL; 1531 skb_shinfo(skb)->destructor_arg = NULL;
1530 1532
1531 skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1533 xenvif_skb_zerocopy_prepare(queue, nskb);
1532 kfree_skb(nskb); 1534 kfree_skb(nskb);
1533 1535
1534 return 0; 1536 return 0;
@@ -1589,7 +1591,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1589 if (net_ratelimit()) 1591 if (net_ratelimit())
1590 netdev_err(queue->vif->dev, 1592 netdev_err(queue->vif->dev,
1591 "Not enough memory to consolidate frag_list!\n"); 1593 "Not enough memory to consolidate frag_list!\n");
1592 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1594 xenvif_skb_zerocopy_prepare(queue, skb);
1593 kfree_skb(skb); 1595 kfree_skb(skb);
1594 continue; 1596 continue;
1595 } 1597 }
@@ -1609,7 +1611,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1609 "Can't setup checksum in net_tx_action\n"); 1611 "Can't setup checksum in net_tx_action\n");
1610 /* We have to set this flag to trigger the callback */ 1612 /* We have to set this flag to trigger the callback */
1611 if (skb_shinfo(skb)->destructor_arg) 1613 if (skb_shinfo(skb)->destructor_arg)
1612 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1614 xenvif_skb_zerocopy_prepare(queue, skb);
1613 kfree_skb(skb); 1615 kfree_skb(skb);
1614 continue; 1616 continue;
1615 } 1617 }
@@ -1641,7 +1643,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1641 * skb. E.g. the __pskb_pull_tail earlier can do such thing. 1643 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1642 */ 1644 */
1643 if (skb_shinfo(skb)->destructor_arg) { 1645 if (skb_shinfo(skb)->destructor_arg) {
1644 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1646 xenvif_skb_zerocopy_prepare(queue, skb);
1645 queue->stats.tx_zerocopy_sent++; 1647 queue->stats.tx_zerocopy_sent++;
1646 } 1648 }
1647 1649
@@ -1681,6 +1683,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1681 queue->stats.tx_zerocopy_success++; 1683 queue->stats.tx_zerocopy_success++;
1682 else 1684 else
1683 queue->stats.tx_zerocopy_fail++; 1685 queue->stats.tx_zerocopy_fail++;
1686 xenvif_skb_zerocopy_complete(queue);
1684} 1687}
1685 1688
1686static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) 1689static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
@@ -2058,15 +2061,24 @@ int xenvif_kthread_guest_rx(void *data)
2058 return 0; 2061 return 0;
2059} 2062}
2060 2063
2064static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2065{
2066 /* Dealloc thread must remain running until all inflight
2067 * packets complete.
2068 */
2069 return kthread_should_stop() &&
2070 !atomic_read(&queue->inflight_packets);
2071}
2072
2061int xenvif_dealloc_kthread(void *data) 2073int xenvif_dealloc_kthread(void *data)
2062{ 2074{
2063 struct xenvif_queue *queue = data; 2075 struct xenvif_queue *queue = data;
2064 2076
2065 while (!kthread_should_stop()) { 2077 for (;;) {
2066 wait_event_interruptible(queue->dealloc_wq, 2078 wait_event_interruptible(queue->dealloc_wq,
2067 tx_dealloc_work_todo(queue) || 2079 tx_dealloc_work_todo(queue) ||
2068 kthread_should_stop()); 2080 xenvif_dealloc_kthread_should_stop(queue));
2069 if (kthread_should_stop()) 2081 if (xenvif_dealloc_kthread_should_stop(queue))
2070 break; 2082 break;
2071 2083
2072 xenvif_tx_dealloc_action(queue); 2084 xenvif_tx_dealloc_action(queue);