diff options
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r-- | drivers/net/xen-netback/common.h | 6 | ||||
-rw-r--r-- | drivers/net/xen-netback/interface.c | 1 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 16 |
3 files changed, 7 insertions, 16 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 4c76bcb9a879..ae413a2cbee7 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -143,11 +143,7 @@ struct xenvif { | |||
143 | char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ | 143 | char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ |
144 | struct xen_netif_rx_back_ring rx; | 144 | struct xen_netif_rx_back_ring rx; |
145 | struct sk_buff_head rx_queue; | 145 | struct sk_buff_head rx_queue; |
146 | bool rx_queue_stopped; | 146 | RING_IDX rx_last_skb_slots; |
147 | /* Set when the RX interrupt is triggered by the frontend. | ||
148 | * The worker thread may need to wake the queue. | ||
149 | */ | ||
150 | bool rx_event; | ||
151 | 147 | ||
152 | /* This array is allocated seperately as it is large */ | 148 | /* This array is allocated seperately as it is large */ |
153 | struct gnttab_copy *grant_copy_op; | 149 | struct gnttab_copy *grant_copy_op; |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b9de31ea7fc4..7669d49a67e2 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -100,7 +100,6 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | |||
100 | { | 100 | { |
101 | struct xenvif *vif = dev_id; | 101 | struct xenvif *vif = dev_id; |
102 | 102 | ||
103 | vif->rx_event = true; | ||
104 | xenvif_kick_thread(vif); | 103 | xenvif_kick_thread(vif); |
105 | 104 | ||
106 | return IRQ_HANDLED; | 105 | return IRQ_HANDLED; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 6b62c3eb8e18..e5284bca2d90 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -476,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
476 | unsigned long offset; | 476 | unsigned long offset; |
477 | struct skb_cb_overlay *sco; | 477 | struct skb_cb_overlay *sco; |
478 | bool need_to_notify = false; | 478 | bool need_to_notify = false; |
479 | bool ring_full = false; | ||
480 | 479 | ||
481 | struct netrx_pending_operations npo = { | 480 | struct netrx_pending_operations npo = { |
482 | .copy = vif->grant_copy_op, | 481 | .copy = vif->grant_copy_op, |
@@ -486,7 +485,7 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
486 | skb_queue_head_init(&rxq); | 485 | skb_queue_head_init(&rxq); |
487 | 486 | ||
488 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { | 487 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { |
489 | int max_slots_needed; | 488 | RING_IDX max_slots_needed; |
490 | int i; | 489 | int i; |
491 | 490 | ||
492 | /* We need a cheap worse case estimate for the number of | 491 | /* We need a cheap worse case estimate for the number of |
@@ -509,9 +508,10 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
509 | if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { | 508 | if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { |
510 | skb_queue_head(&vif->rx_queue, skb); | 509 | skb_queue_head(&vif->rx_queue, skb); |
511 | need_to_notify = true; | 510 | need_to_notify = true; |
512 | ring_full = true; | 511 | vif->rx_last_skb_slots = max_slots_needed; |
513 | break; | 512 | break; |
514 | } | 513 | } else |
514 | vif->rx_last_skb_slots = 0; | ||
515 | 515 | ||
516 | sco = (struct skb_cb_overlay *)skb->cb; | 516 | sco = (struct skb_cb_overlay *)skb->cb; |
517 | sco->meta_slots_used = xenvif_gop_skb(skb, &npo); | 517 | sco->meta_slots_used = xenvif_gop_skb(skb, &npo); |
@@ -522,8 +522,6 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
522 | 522 | ||
523 | BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); | 523 | BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); |
524 | 524 | ||
525 | vif->rx_queue_stopped = !npo.copy_prod && ring_full; | ||
526 | |||
527 | if (!npo.copy_prod) | 525 | if (!npo.copy_prod) |
528 | goto done; | 526 | goto done; |
529 | 527 | ||
@@ -1473,8 +1471,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | |||
1473 | 1471 | ||
1474 | static inline int rx_work_todo(struct xenvif *vif) | 1472 | static inline int rx_work_todo(struct xenvif *vif) |
1475 | { | 1473 | { |
1476 | return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) || | 1474 | return !skb_queue_empty(&vif->rx_queue) && |
1477 | vif->rx_event; | 1475 | xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots); |
1478 | } | 1476 | } |
1479 | 1477 | ||
1480 | static inline int tx_work_todo(struct xenvif *vif) | 1478 | static inline int tx_work_todo(struct xenvif *vif) |
@@ -1560,8 +1558,6 @@ int xenvif_kthread(void *data) | |||
1560 | if (!skb_queue_empty(&vif->rx_queue)) | 1558 | if (!skb_queue_empty(&vif->rx_queue)) |
1561 | xenvif_rx_action(vif); | 1559 | xenvif_rx_action(vif); |
1562 | 1560 | ||
1563 | vif->rx_event = false; | ||
1564 | |||
1565 | if (skb_queue_empty(&vif->rx_queue) && | 1561 | if (skb_queue_empty(&vif->rx_queue) && |
1566 | netif_queue_stopped(vif->dev)) | 1562 | netif_queue_stopped(vif->dev)) |
1567 | xenvif_start_queue(vif); | 1563 | xenvif_start_queue(vif); |