aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2014-01-08 07:41:58 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-09 23:05:46 -0500
commit11b57f90257c1d6a91cee720151b69e0c2020cf6 (patch)
tree6da4cdb6bbaf71ab38ad9a133e2391aa4910f32a /drivers/net
parent451cd14e082c631aeaec37bfa872d3fa594d1845 (diff)
xen-netback: stop vif thread spinning if frontend is unresponsive
The recent patch to improve guest receive side flow control (ca2f09f2) had a slight flaw in the wait condition for the vif thread in that any remaining skbs in the guest receive side netback internal queue would prevent the thread from sleeping. An unresponsive frontend can lead to a permanently non-empty internal queue and thus the thread will spin. In this case the thread should really sleep until the frontend becomes responsive again. This patch adds an extra flag to the vif which is set if the shared ring is full and cleared when skbs are drained into the shared ring. Thus, if the thread runs, finds the shared ring full and can make no progress the flag remains set. If the flag remains set then the thread will sleep, regardless of a non-empty queue, until the next event from the frontend. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: David Vrabel <david.vrabel@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/netback.c14
2 files changed, 10 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index c955fc39d69a..4c76bcb9a879 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,6 +143,7 @@ struct xenvif {
143 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 143 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
144 struct xen_netif_rx_back_ring rx; 144 struct xen_netif_rx_back_ring rx;
145 struct sk_buff_head rx_queue; 145 struct sk_buff_head rx_queue;
146 bool rx_queue_stopped;
146 /* Set when the RX interrupt is triggered by the frontend. 147 /* Set when the RX interrupt is triggered by the frontend.
147 * The worker thread may need to wake the queue. 148 * The worker thread may need to wake the queue.
148 */ 149 */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4f81ac0e2f0a..27385639b6e5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -476,7 +476,8 @@ static void xenvif_rx_action(struct xenvif *vif)
476 int ret; 476 int ret;
477 unsigned long offset; 477 unsigned long offset;
478 struct skb_cb_overlay *sco; 478 struct skb_cb_overlay *sco;
479 int need_to_notify = 0; 479 bool need_to_notify = false;
480 bool ring_full = false;
480 481
481 struct netrx_pending_operations npo = { 482 struct netrx_pending_operations npo = {
482 .copy = vif->grant_copy_op, 483 .copy = vif->grant_copy_op,
@@ -508,7 +509,8 @@ static void xenvif_rx_action(struct xenvif *vif)
508 /* If the skb may not fit then bail out now */ 509 /* If the skb may not fit then bail out now */
509 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { 510 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
510 skb_queue_head(&vif->rx_queue, skb); 511 skb_queue_head(&vif->rx_queue, skb);
511 need_to_notify = 1; 512 need_to_notify = true;
513 ring_full = true;
512 break; 514 break;
513 } 515 }
514 516
@@ -521,6 +523,8 @@ static void xenvif_rx_action(struct xenvif *vif)
521 523
522 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); 524 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
523 525
526 vif->rx_queue_stopped = !npo.copy_prod && ring_full;
527
524 if (!npo.copy_prod) 528 if (!npo.copy_prod)
525 goto done; 529 goto done;
526 530
@@ -592,8 +596,7 @@ static void xenvif_rx_action(struct xenvif *vif)
592 596
593 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 597 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
594 598
595 if (ret) 599 need_to_notify |= !!ret;
596 need_to_notify = 1;
597 600
598 npo.meta_cons += sco->meta_slots_used; 601 npo.meta_cons += sco->meta_slots_used;
599 dev_kfree_skb(skb); 602 dev_kfree_skb(skb);
@@ -1724,7 +1727,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1724 1727
1725static inline int rx_work_todo(struct xenvif *vif) 1728static inline int rx_work_todo(struct xenvif *vif)
1726{ 1729{
1727 return !skb_queue_empty(&vif->rx_queue) || vif->rx_event; 1730 return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
1731 vif->rx_event;
1728} 1732}
1729 1733
1730static inline int tx_work_todo(struct xenvif *vif) 1734static inline int tx_work_todo(struct xenvif *vif)