aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-06 16:48:30 -0500
committerDavid S. Miller <davem@davemloft.net>2014-03-07 15:57:15 -0500
commit093507885ae5dc0288af07fbb922d2f85b3a88a6 (patch)
treef9fef41eb8e9a433dd8e9f44b7d9d9ff9aa392c8
parente3377f36ca20a034dce56335dc9b89f41094d845 (diff)
xen-netback: Timeout packets in RX path
A malicious or buggy guest can leave its queue filled indefinitely, in which case qdisc start to queue packets for that VIF. If those packets came from an another guest, it can block its slots and prevent shutdown. To avoid that, we make sure the queue is drained in every 10 seconds. The QDisc queue in worst case takes 3 round to flush usually. Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h6
-rw-r--r--drivers/net/xen-netback/interface.c37
-rw-r--r--drivers/net/xen-netback/netback.c23
3 files changed, 61 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index f2f8a02afc36..0355f8767e3b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -148,6 +148,9 @@ struct xenvif {
148 struct xen_netif_rx_back_ring rx; 148 struct xen_netif_rx_back_ring rx;
149 struct sk_buff_head rx_queue; 149 struct sk_buff_head rx_queue;
150 RING_IDX rx_last_skb_slots; 150 RING_IDX rx_last_skb_slots;
151 bool rx_queue_purge;
152
153 struct timer_list wake_queue;
151 154
152 /* This array is allocated seperately as it is large */ 155 /* This array is allocated seperately as it is large */
153 struct gnttab_copy *grant_copy_op; 156 struct gnttab_copy *grant_copy_op;
@@ -259,4 +262,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
259 262
260extern bool separate_tx_rx_irq; 263extern bool separate_tx_rx_irq;
261 264
265extern unsigned int rx_drain_timeout_msecs;
266extern unsigned int rx_drain_timeout_jiffies;
267
262#endif /* __XEN_NETBACK__COMMON_H__ */ 268#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b646039e539b..9cc9f638f442 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -115,6 +115,18 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
115 return IRQ_HANDLED; 115 return IRQ_HANDLED;
116} 116}
117 117
118static void xenvif_wake_queue(unsigned long data)
119{
120 struct xenvif *vif = (struct xenvif *)data;
121
122 if (netif_queue_stopped(vif->dev)) {
123 netdev_err(vif->dev, "draining TX queue\n");
124 vif->rx_queue_purge = true;
125 xenvif_kick_thread(vif);
126 netif_wake_queue(vif->dev);
127 }
128}
129
118static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 130static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
119{ 131{
120 struct xenvif *vif = netdev_priv(dev); 132 struct xenvif *vif = netdev_priv(dev);
@@ -144,8 +156,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
144 * then turn off the queue to give the ring a chance to 156 * then turn off the queue to give the ring a chance to
145 * drain. 157 * drain.
146 */ 158 */
147 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) 159 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
160 vif->wake_queue.function = xenvif_wake_queue;
161 vif->wake_queue.data = (unsigned long)vif;
148 xenvif_stop_queue(vif); 162 xenvif_stop_queue(vif);
163 mod_timer(&vif->wake_queue,
164 jiffies + rx_drain_timeout_jiffies);
165 }
149 166
150 skb_queue_tail(&vif->rx_queue, skb); 167 skb_queue_tail(&vif->rx_queue, skb);
151 xenvif_kick_thread(vif); 168 xenvif_kick_thread(vif);
@@ -353,6 +370,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
353 init_timer(&vif->credit_timeout); 370 init_timer(&vif->credit_timeout);
354 vif->credit_window_start = get_jiffies_64(); 371 vif->credit_window_start = get_jiffies_64();
355 372
373 init_timer(&vif->wake_queue);
374
356 dev->netdev_ops = &xenvif_netdev_ops; 375 dev->netdev_ops = &xenvif_netdev_ops;
357 dev->hw_features = NETIF_F_SG | 376 dev->hw_features = NETIF_F_SG |
358 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 377 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -531,6 +550,7 @@ void xenvif_disconnect(struct xenvif *vif)
531 xenvif_carrier_off(vif); 550 xenvif_carrier_off(vif);
532 551
533 if (vif->task) { 552 if (vif->task) {
553 del_timer_sync(&vif->wake_queue);
534 kthread_stop(vif->task); 554 kthread_stop(vif->task);
535 vif->task = NULL; 555 vif->task = NULL;
536 } 556 }
@@ -556,12 +576,25 @@ void xenvif_disconnect(struct xenvif *vif)
556void xenvif_free(struct xenvif *vif) 576void xenvif_free(struct xenvif *vif)
557{ 577{
558 int i, unmap_timeout = 0; 578 int i, unmap_timeout = 0;
579 /* Here we want to avoid timeout messages if an skb can be legitimatly
580 * stucked somewhere else. Realisticly this could be an another vif's
581 * internal or QDisc queue. That another vif also has this
582 * rx_drain_timeout_msecs timeout, but the timer only ditches the
583 * internal queue. After that, the QDisc queue can put in worst case
584 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
585 * internal queue, so we need several rounds of such timeouts until we
586 * can be sure that no another vif should have skb's from us. We are
587 * not sending more skb's, so newly stucked packets are not interesting
588 * for us here.
589 */
590 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
591 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
559 592
560 for (i = 0; i < MAX_PENDING_REQS; ++i) { 593 for (i = 0; i < MAX_PENDING_REQS; ++i) {
561 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { 594 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
562 unmap_timeout++; 595 unmap_timeout++;
563 schedule_timeout(msecs_to_jiffies(1000)); 596 schedule_timeout(msecs_to_jiffies(1000));
564 if (unmap_timeout > 9 && 597 if (unmap_timeout > worst_case_skb_lifetime &&
565 net_ratelimit()) 598 net_ratelimit())
566 netdev_err(vif->dev, 599 netdev_err(vif->dev,
567 "Page still granted! Index: %x\n", 600 "Page still granted! Index: %x\n",
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 58effc49f526..8518a0d1f6f9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -55,6 +55,13 @@
55bool separate_tx_rx_irq = 1; 55bool separate_tx_rx_irq = 1;
56module_param(separate_tx_rx_irq, bool, 0644); 56module_param(separate_tx_rx_irq, bool, 0644);
57 57
58/* When guest ring is filled up, qdisc queues the packets for us, but we have
59 * to timeout them, otherwise other guests' packets can get stucked there
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63unsigned int rx_drain_timeout_jiffies;
64
58/* 65/*
59 * This is the maximum slots a skb can have. If a guest sends a skb 66 * This is the maximum slots a skb can have. If a guest sends a skb
60 * which exceeds this limit it is considered malicious. 67 * which exceeds this limit it is considered malicious.
@@ -1694,8 +1701,9 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1694 1701
1695static inline int rx_work_todo(struct xenvif *vif) 1702static inline int rx_work_todo(struct xenvif *vif)
1696{ 1703{
1697 return !skb_queue_empty(&vif->rx_queue) && 1704 return (!skb_queue_empty(&vif->rx_queue) &&
1698 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots); 1705 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
1706 vif->rx_queue_purge;
1699} 1707}
1700 1708
1701static inline int tx_work_todo(struct xenvif *vif) 1709static inline int tx_work_todo(struct xenvif *vif)
@@ -1782,12 +1790,19 @@ int xenvif_kthread_guest_rx(void *data)
1782 if (kthread_should_stop()) 1790 if (kthread_should_stop())
1783 break; 1791 break;
1784 1792
1793 if (vif->rx_queue_purge) {
1794 skb_queue_purge(&vif->rx_queue);
1795 vif->rx_queue_purge = false;
1796 }
1797
1785 if (!skb_queue_empty(&vif->rx_queue)) 1798 if (!skb_queue_empty(&vif->rx_queue))
1786 xenvif_rx_action(vif); 1799 xenvif_rx_action(vif);
1787 1800
1788 if (skb_queue_empty(&vif->rx_queue) && 1801 if (skb_queue_empty(&vif->rx_queue) &&
1789 netif_queue_stopped(vif->dev)) 1802 netif_queue_stopped(vif->dev)) {
1803 del_timer_sync(&vif->wake_queue);
1790 xenvif_start_queue(vif); 1804 xenvif_start_queue(vif);
1805 }
1791 1806
1792 cond_resched(); 1807 cond_resched();
1793 } 1808 }
@@ -1838,6 +1853,8 @@ static int __init netback_init(void)
1838 if (rc) 1853 if (rc)
1839 goto failed_init; 1854 goto failed_init;
1840 1855
1856 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
1857
1841 return 0; 1858 return 0;
1842 1859
1843failed_init: 1860failed_init: