aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-06 16:48:30 -0500
committerDavid S. Miller <davem@davemloft.net>2014-03-07 15:57:15 -0500
commit093507885ae5dc0288af07fbb922d2f85b3a88a6 (patch)
treef9fef41eb8e9a433dd8e9f44b7d9d9ff9aa392c8 /drivers/net/xen-netback/interface.c
parente3377f36ca20a034dce56335dc9b89f41094d845 (diff)
xen-netback: Timeout packets in RX path
A malicious or buggy guest can leave its queue filled indefinitely, in which case qdisc start to queue packets for that VIF. If those packets came from an another guest, it can block its slots and prevent shutdown. To avoid that, we make sure the queue is drained in every 10 seconds. The QDisc queue in worst case takes 3 round to flush usually. Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b646039e539b..9cc9f638f442 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -115,6 +115,18 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
115 return IRQ_HANDLED; 115 return IRQ_HANDLED;
116} 116}
117 117
118static void xenvif_wake_queue(unsigned long data)
119{
120 struct xenvif *vif = (struct xenvif *)data;
121
122 if (netif_queue_stopped(vif->dev)) {
123 netdev_err(vif->dev, "draining TX queue\n");
124 vif->rx_queue_purge = true;
125 xenvif_kick_thread(vif);
126 netif_wake_queue(vif->dev);
127 }
128}
129
118static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 130static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
119{ 131{
120 struct xenvif *vif = netdev_priv(dev); 132 struct xenvif *vif = netdev_priv(dev);
@@ -144,8 +156,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
144 * then turn off the queue to give the ring a chance to 156 * then turn off the queue to give the ring a chance to
145 * drain. 157 * drain.
146 */ 158 */
147 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) 159 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
160 vif->wake_queue.function = xenvif_wake_queue;
161 vif->wake_queue.data = (unsigned long)vif;
148 xenvif_stop_queue(vif); 162 xenvif_stop_queue(vif);
163 mod_timer(&vif->wake_queue,
164 jiffies + rx_drain_timeout_jiffies);
165 }
149 166
150 skb_queue_tail(&vif->rx_queue, skb); 167 skb_queue_tail(&vif->rx_queue, skb);
151 xenvif_kick_thread(vif); 168 xenvif_kick_thread(vif);
@@ -353,6 +370,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
353 init_timer(&vif->credit_timeout); 370 init_timer(&vif->credit_timeout);
354 vif->credit_window_start = get_jiffies_64(); 371 vif->credit_window_start = get_jiffies_64();
355 372
373 init_timer(&vif->wake_queue);
374
356 dev->netdev_ops = &xenvif_netdev_ops; 375 dev->netdev_ops = &xenvif_netdev_ops;
357 dev->hw_features = NETIF_F_SG | 376 dev->hw_features = NETIF_F_SG |
358 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 377 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -531,6 +550,7 @@ void xenvif_disconnect(struct xenvif *vif)
531 xenvif_carrier_off(vif); 550 xenvif_carrier_off(vif);
532 551
533 if (vif->task) { 552 if (vif->task) {
553 del_timer_sync(&vif->wake_queue);
534 kthread_stop(vif->task); 554 kthread_stop(vif->task);
535 vif->task = NULL; 555 vif->task = NULL;
536 } 556 }
@@ -556,12 +576,25 @@ void xenvif_disconnect(struct xenvif *vif)
556void xenvif_free(struct xenvif *vif) 576void xenvif_free(struct xenvif *vif)
557{ 577{
558 int i, unmap_timeout = 0; 578 int i, unmap_timeout = 0;
579 /* Here we want to avoid timeout messages if an skb can be legitimatly
580 * stucked somewhere else. Realisticly this could be an another vif's
581 * internal or QDisc queue. That another vif also has this
582 * rx_drain_timeout_msecs timeout, but the timer only ditches the
583 * internal queue. After that, the QDisc queue can put in worst case
584 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
585 * internal queue, so we need several rounds of such timeouts until we
586 * can be sure that no another vif should have skb's from us. We are
587 * not sending more skb's, so newly stucked packets are not interesting
588 * for us here.
589 */
590 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
591 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
559 592
560 for (i = 0; i < MAX_PENDING_REQS; ++i) { 593 for (i = 0; i < MAX_PENDING_REQS; ++i) {
561 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { 594 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
562 unmap_timeout++; 595 unmap_timeout++;
563 schedule_timeout(msecs_to_jiffies(1000)); 596 schedule_timeout(msecs_to_jiffies(1000));
564 if (unmap_timeout > 9 && 597 if (unmap_timeout > worst_case_skb_lifetime &&
565 net_ratelimit()) 598 net_ratelimit())
566 netdev_err(vif->dev, 599 netdev_err(vif->dev,
567 "Page still granted! Index: %x\n", 600 "Page still granted! Index: %x\n",