diff options
-rw-r--r-- | drivers/net/xen-netback/common.h | 27 | ||||
-rw-r--r-- | drivers/net/xen-netback/interface.c | 47 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 217 |
3 files changed, 106 insertions, 185 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 08ae01b41c83..ba30a6d9fefa 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -136,12 +136,10 @@ struct xenvif { | |||
136 | char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ | 136 | char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ |
137 | struct xen_netif_rx_back_ring rx; | 137 | struct xen_netif_rx_back_ring rx; |
138 | struct sk_buff_head rx_queue; | 138 | struct sk_buff_head rx_queue; |
139 | 139 | /* Set when the RX interrupt is triggered by the frontend. | |
140 | /* Allow xenvif_start_xmit() to peek ahead in the rx request | 140 | * The worker thread may need to wake the queue. |
141 | * ring. This is a prediction of what rx_req_cons will be | ||
142 | * once all queued skbs are put on the ring. | ||
143 | */ | 141 | */ |
144 | RING_IDX rx_req_cons_peek; | 142 | bool rx_event; |
145 | 143 | ||
146 | /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each | 144 | /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each |
147 | * head/fragment page uses 2 copy operations because it | 145 | * head/fragment page uses 2 copy operations because it |
@@ -198,8 +196,6 @@ void xenvif_xenbus_fini(void); | |||
198 | 196 | ||
199 | int xenvif_schedulable(struct xenvif *vif); | 197 | int xenvif_schedulable(struct xenvif *vif); |
200 | 198 | ||
201 | int xenvif_rx_ring_full(struct xenvif *vif); | ||
202 | |||
203 | int xenvif_must_stop_queue(struct xenvif *vif); | 199 | int xenvif_must_stop_queue(struct xenvif *vif); |
204 | 200 | ||
205 | /* (Un)Map communication rings. */ | 201 | /* (Un)Map communication rings. */ |
@@ -211,21 +207,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif, | |||
211 | /* Check for SKBs from frontend and schedule backend processing */ | 207 | /* Check for SKBs from frontend and schedule backend processing */ |
212 | void xenvif_check_rx_xenvif(struct xenvif *vif); | 208 | void xenvif_check_rx_xenvif(struct xenvif *vif); |
213 | 209 | ||
214 | /* Queue an SKB for transmission to the frontend */ | ||
215 | void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); | ||
216 | /* Notify xenvif that ring now has space to send an skb to the frontend */ | ||
217 | void xenvif_notify_tx_completion(struct xenvif *vif); | ||
218 | |||
219 | /* Prevent the device from generating any further traffic. */ | 210 | /* Prevent the device from generating any further traffic. */ |
220 | void xenvif_carrier_off(struct xenvif *vif); | 211 | void xenvif_carrier_off(struct xenvif *vif); |
221 | 212 | ||
222 | /* Returns number of ring slots required to send an skb to the frontend */ | ||
223 | unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); | ||
224 | |||
225 | int xenvif_tx_action(struct xenvif *vif, int budget); | 213 | int xenvif_tx_action(struct xenvif *vif, int budget); |
226 | void xenvif_rx_action(struct xenvif *vif); | ||
227 | 214 | ||
228 | int xenvif_kthread(void *data); | 215 | int xenvif_kthread(void *data); |
216 | void xenvif_kick_thread(struct xenvif *vif); | ||
217 | |||
218 | /* Determine whether the needed number of slots (req) are available, | ||
219 | * and set req_event if not. | ||
220 | */ | ||
221 | bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); | ||
222 | |||
223 | void xenvif_stop_queue(struct xenvif *vif); | ||
229 | 224 | ||
230 | extern bool separate_tx_rx_irq; | 225 | extern bool separate_tx_rx_irq; |
231 | 226 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 870f1fa58370..1dcb9606e6e0 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -46,11 +46,6 @@ int xenvif_schedulable(struct xenvif *vif) | |||
46 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 46 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); |
47 | } | 47 | } |
48 | 48 | ||
49 | static int xenvif_rx_schedulable(struct xenvif *vif) | ||
50 | { | ||
51 | return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); | ||
52 | } | ||
53 | |||
54 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 49 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
55 | { | 50 | { |
56 | struct xenvif *vif = dev_id; | 51 | struct xenvif *vif = dev_id; |
@@ -104,8 +99,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | |||
104 | { | 99 | { |
105 | struct xenvif *vif = dev_id; | 100 | struct xenvif *vif = dev_id; |
106 | 101 | ||
107 | if (xenvif_rx_schedulable(vif)) | 102 | vif->rx_event = true; |
108 | netif_wake_queue(vif->dev); | 103 | xenvif_kick_thread(vif); |
109 | 104 | ||
110 | return IRQ_HANDLED; | 105 | return IRQ_HANDLED; |
111 | } | 106 | } |
@@ -121,24 +116,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
121 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 116 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
122 | { | 117 | { |
123 | struct xenvif *vif = netdev_priv(dev); | 118 | struct xenvif *vif = netdev_priv(dev); |
119 | int min_slots_needed; | ||
124 | 120 | ||
125 | BUG_ON(skb->dev != dev); | 121 | BUG_ON(skb->dev != dev); |
126 | 122 | ||
127 | /* Drop the packet if vif is not ready */ | 123 | /* Drop the packet if vif is not ready */ |
128 | if (vif->task == NULL) | 124 | if (vif->task == NULL || !xenvif_schedulable(vif)) |
129 | goto drop; | 125 | goto drop; |
130 | 126 | ||
131 | /* Drop the packet if the target domain has no receive buffers. */ | 127 | /* At best we'll need one slot for the header and one for each |
132 | if (!xenvif_rx_schedulable(vif)) | 128 | * frag. |
133 | goto drop; | 129 | */ |
130 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | ||
134 | 131 | ||
135 | /* Reserve ring slots for the worst-case number of fragments. */ | 132 | /* If the skb is GSO then we'll also need an extra slot for the |
136 | vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); | 133 | * metadata. |
134 | */ | ||
135 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | ||
136 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
137 | min_slots_needed++; | ||
137 | 138 | ||
138 | if (vif->can_queue && xenvif_must_stop_queue(vif)) | 139 | /* If the skb can't possibly fit in the remaining slots |
139 | netif_stop_queue(dev); | 140 | * then turn off the queue to give the ring a chance to |
141 | * drain. | ||
142 | */ | ||
143 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) | ||
144 | xenvif_stop_queue(vif); | ||
140 | 145 | ||
141 | xenvif_queue_tx_skb(vif, skb); | 146 | skb_queue_tail(&vif->rx_queue, skb); |
147 | xenvif_kick_thread(vif); | ||
142 | 148 | ||
143 | return NETDEV_TX_OK; | 149 | return NETDEV_TX_OK; |
144 | 150 | ||
@@ -148,12 +154,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
148 | return NETDEV_TX_OK; | 154 | return NETDEV_TX_OK; |
149 | } | 155 | } |
150 | 156 | ||
151 | void xenvif_notify_tx_completion(struct xenvif *vif) | ||
152 | { | ||
153 | if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) | ||
154 | netif_wake_queue(vif->dev); | ||
155 | } | ||
156 | |||
157 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 157 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
158 | { | 158 | { |
159 | struct xenvif *vif = netdev_priv(dev); | 159 | struct xenvif *vif = netdev_priv(dev); |
@@ -378,6 +378,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
378 | if (err < 0) | 378 | if (err < 0) |
379 | goto err; | 379 | goto err; |
380 | 380 | ||
381 | init_waitqueue_head(&vif->wq); | ||
382 | |||
381 | if (tx_evtchn == rx_evtchn) { | 383 | if (tx_evtchn == rx_evtchn) { |
382 | /* feature-split-event-channels == 0 */ | 384 | /* feature-split-event-channels == 0 */ |
383 | err = bind_interdomain_evtchn_to_irqhandler( | 385 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -410,7 +412,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
410 | disable_irq(vif->rx_irq); | 412 | disable_irq(vif->rx_irq); |
411 | } | 413 | } |
412 | 414 | ||
413 | init_waitqueue_head(&vif->wq); | ||
414 | task = kthread_create(xenvif_kthread, | 415 | task = kthread_create(xenvif_kthread, |
415 | (void *)vif, "%s", vif->dev->name); | 416 | (void *)vif, "%s", vif->dev->name); |
416 | if (IS_ERR(task)) { | 417 | if (IS_ERR(task)) { |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index acf13920e6d1..43341b82649c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -138,36 +138,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) | |||
138 | vif->pending_prod + vif->pending_cons; | 138 | vif->pending_prod + vif->pending_cons; |
139 | } | 139 | } |
140 | 140 | ||
141 | static int max_required_rx_slots(struct xenvif *vif) | 141 | bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) |
142 | { | 142 | { |
143 | int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); | 143 | RING_IDX prod, cons; |
144 | 144 | ||
145 | /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ | 145 | do { |
146 | if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask) | 146 | prod = vif->rx.sring->req_prod; |
147 | max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ | 147 | cons = vif->rx.req_cons; |
148 | |||
149 | return max; | ||
150 | } | ||
151 | |||
152 | int xenvif_rx_ring_full(struct xenvif *vif) | ||
153 | { | ||
154 | RING_IDX peek = vif->rx_req_cons_peek; | ||
155 | RING_IDX needed = max_required_rx_slots(vif); | ||
156 | 148 | ||
157 | return ((vif->rx.sring->req_prod - peek) < needed) || | 149 | if (prod - cons >= needed) |
158 | ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); | 150 | return true; |
159 | } | ||
160 | 151 | ||
161 | int xenvif_must_stop_queue(struct xenvif *vif) | 152 | vif->rx.sring->req_event = prod + 1; |
162 | { | ||
163 | if (!xenvif_rx_ring_full(vif)) | ||
164 | return 0; | ||
165 | 153 | ||
166 | vif->rx.sring->req_event = vif->rx_req_cons_peek + | 154 | /* Make sure event is visible before we check prod |
167 | max_required_rx_slots(vif); | 155 | * again. |
168 | mb(); /* request notification /then/ check the queue */ | 156 | */ |
157 | mb(); | ||
158 | } while (vif->rx.sring->req_prod != prod); | ||
169 | 159 | ||
170 | return xenvif_rx_ring_full(vif); | 160 | return false; |
171 | } | 161 | } |
172 | 162 | ||
173 | /* | 163 | /* |
@@ -210,93 +200,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
210 | return false; | 200 | return false; |
211 | } | 201 | } |
212 | 202 | ||
213 | struct xenvif_count_slot_state { | ||
214 | unsigned long copy_off; | ||
215 | bool head; | ||
216 | }; | ||
217 | |||
218 | unsigned int xenvif_count_frag_slots(struct xenvif *vif, | ||
219 | unsigned long offset, unsigned long size, | ||
220 | struct xenvif_count_slot_state *state) | ||
221 | { | ||
222 | unsigned count = 0; | ||
223 | |||
224 | offset &= ~PAGE_MASK; | ||
225 | |||
226 | while (size > 0) { | ||
227 | unsigned long bytes; | ||
228 | |||
229 | bytes = PAGE_SIZE - offset; | ||
230 | |||
231 | if (bytes > size) | ||
232 | bytes = size; | ||
233 | |||
234 | if (start_new_rx_buffer(state->copy_off, bytes, state->head)) { | ||
235 | count++; | ||
236 | state->copy_off = 0; | ||
237 | } | ||
238 | |||
239 | if (state->copy_off + bytes > MAX_BUFFER_OFFSET) | ||
240 | bytes = MAX_BUFFER_OFFSET - state->copy_off; | ||
241 | |||
242 | state->copy_off += bytes; | ||
243 | |||
244 | offset += bytes; | ||
245 | size -= bytes; | ||
246 | |||
247 | if (offset == PAGE_SIZE) | ||
248 | offset = 0; | ||
249 | |||
250 | state->head = false; | ||
251 | } | ||
252 | |||
253 | return count; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Figure out how many ring slots we're going to need to send @skb to | ||
258 | * the guest. This function is essentially a dry run of | ||
259 | * xenvif_gop_frag_copy. | ||
260 | */ | ||
261 | unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) | ||
262 | { | ||
263 | struct xenvif_count_slot_state state; | ||
264 | unsigned int count; | ||
265 | unsigned char *data; | ||
266 | unsigned i; | ||
267 | |||
268 | state.head = true; | ||
269 | state.copy_off = 0; | ||
270 | |||
271 | /* Slot for the first (partial) page of data. */ | ||
272 | count = 1; | ||
273 | |||
274 | /* Need a slot for the GSO prefix for GSO extra data? */ | ||
275 | if (skb_shinfo(skb)->gso_size) | ||
276 | count++; | ||
277 | |||
278 | data = skb->data; | ||
279 | while (data < skb_tail_pointer(skb)) { | ||
280 | unsigned long offset = offset_in_page(data); | ||
281 | unsigned long size = PAGE_SIZE - offset; | ||
282 | |||
283 | if (data + size > skb_tail_pointer(skb)) | ||
284 | size = skb_tail_pointer(skb) - data; | ||
285 | |||
286 | count += xenvif_count_frag_slots(vif, offset, size, &state); | ||
287 | |||
288 | data += size; | ||
289 | } | ||
290 | |||
291 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
292 | unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | ||
293 | unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; | ||
294 | |||
295 | count += xenvif_count_frag_slots(vif, offset, size, &state); | ||
296 | } | ||
297 | return count; | ||
298 | } | ||
299 | |||
300 | struct netrx_pending_operations { | 203 | struct netrx_pending_operations { |
301 | unsigned copy_prod, copy_cons; | 204 | unsigned copy_prod, copy_cons; |
302 | unsigned meta_prod, meta_cons; | 205 | unsigned meta_prod, meta_cons; |
@@ -557,12 +460,12 @@ struct skb_cb_overlay { | |||
557 | int meta_slots_used; | 460 | int meta_slots_used; |
558 | }; | 461 | }; |
559 | 462 | ||
560 | static void xenvif_kick_thread(struct xenvif *vif) | 463 | void xenvif_kick_thread(struct xenvif *vif) |
561 | { | 464 | { |
562 | wake_up(&vif->wq); | 465 | wake_up(&vif->wq); |
563 | } | 466 | } |
564 | 467 | ||
565 | void xenvif_rx_action(struct xenvif *vif) | 468 | static void xenvif_rx_action(struct xenvif *vif) |
566 | { | 469 | { |
567 | s8 status; | 470 | s8 status; |
568 | u16 flags; | 471 | u16 flags; |
@@ -571,8 +474,6 @@ void xenvif_rx_action(struct xenvif *vif) | |||
571 | struct sk_buff *skb; | 474 | struct sk_buff *skb; |
572 | LIST_HEAD(notify); | 475 | LIST_HEAD(notify); |
573 | int ret; | 476 | int ret; |
574 | int nr_frags; | ||
575 | int count; | ||
576 | unsigned long offset; | 477 | unsigned long offset; |
577 | struct skb_cb_overlay *sco; | 478 | struct skb_cb_overlay *sco; |
578 | int need_to_notify = 0; | 479 | int need_to_notify = 0; |
@@ -584,29 +485,44 @@ void xenvif_rx_action(struct xenvif *vif) | |||
584 | 485 | ||
585 | skb_queue_head_init(&rxq); | 486 | skb_queue_head_init(&rxq); |
586 | 487 | ||
587 | count = 0; | ||
588 | |||
589 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { | 488 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { |
590 | vif = netdev_priv(skb->dev); | 489 | int max_slots_needed; |
591 | nr_frags = skb_shinfo(skb)->nr_frags; | 490 | int i; |
491 | |||
492 | /* We need a cheap worse case estimate for the number of | ||
493 | * slots we'll use. | ||
494 | */ | ||
495 | |||
496 | max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + | ||
497 | skb_headlen(skb), | ||
498 | PAGE_SIZE); | ||
499 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
500 | unsigned int size; | ||
501 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | ||
502 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); | ||
503 | } | ||
504 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | ||
505 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
506 | max_slots_needed++; | ||
507 | |||
508 | /* If the skb may not fit then bail out now */ | ||
509 | if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { | ||
510 | skb_queue_head(&vif->rx_queue, skb); | ||
511 | need_to_notify = 1; | ||
512 | break; | ||
513 | } | ||
592 | 514 | ||
593 | sco = (struct skb_cb_overlay *)skb->cb; | 515 | sco = (struct skb_cb_overlay *)skb->cb; |
594 | sco->meta_slots_used = xenvif_gop_skb(skb, &npo); | 516 | sco->meta_slots_used = xenvif_gop_skb(skb, &npo); |
595 | 517 | BUG_ON(sco->meta_slots_used > max_slots_needed); | |
596 | count += nr_frags + 1; | ||
597 | 518 | ||
598 | __skb_queue_tail(&rxq, skb); | 519 | __skb_queue_tail(&rxq, skb); |
599 | |||
600 | /* Filled the batch queue? */ | ||
601 | /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ | ||
602 | if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) | ||
603 | break; | ||
604 | } | 520 | } |
605 | 521 | ||
606 | BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); | 522 | BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); |
607 | 523 | ||
608 | if (!npo.copy_prod) | 524 | if (!npo.copy_prod) |
609 | return; | 525 | goto done; |
610 | 526 | ||
611 | BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); | 527 | BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); |
612 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); | 528 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); |
@@ -614,8 +530,6 @@ void xenvif_rx_action(struct xenvif *vif) | |||
614 | while ((skb = __skb_dequeue(&rxq)) != NULL) { | 530 | while ((skb = __skb_dequeue(&rxq)) != NULL) { |
615 | sco = (struct skb_cb_overlay *)skb->cb; | 531 | sco = (struct skb_cb_overlay *)skb->cb; |
616 | 532 | ||
617 | vif = netdev_priv(skb->dev); | ||
618 | |||
619 | if ((1 << vif->meta[npo.meta_cons].gso_type) & | 533 | if ((1 << vif->meta[npo.meta_cons].gso_type) & |
620 | vif->gso_prefix_mask) { | 534 | vif->gso_prefix_mask) { |
621 | resp = RING_GET_RESPONSE(&vif->rx, | 535 | resp = RING_GET_RESPONSE(&vif->rx, |
@@ -681,25 +595,13 @@ void xenvif_rx_action(struct xenvif *vif) | |||
681 | if (ret) | 595 | if (ret) |
682 | need_to_notify = 1; | 596 | need_to_notify = 1; |
683 | 597 | ||
684 | xenvif_notify_tx_completion(vif); | ||
685 | |||
686 | npo.meta_cons += sco->meta_slots_used; | 598 | npo.meta_cons += sco->meta_slots_used; |
687 | dev_kfree_skb(skb); | 599 | dev_kfree_skb(skb); |
688 | } | 600 | } |
689 | 601 | ||
602 | done: | ||
690 | if (need_to_notify) | 603 | if (need_to_notify) |
691 | notify_remote_via_irq(vif->rx_irq); | 604 | notify_remote_via_irq(vif->rx_irq); |
692 | |||
693 | /* More work to do? */ | ||
694 | if (!skb_queue_empty(&vif->rx_queue)) | ||
695 | xenvif_kick_thread(vif); | ||
696 | } | ||
697 | |||
698 | void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) | ||
699 | { | ||
700 | skb_queue_tail(&vif->rx_queue, skb); | ||
701 | |||
702 | xenvif_kick_thread(vif); | ||
703 | } | 605 | } |
704 | 606 | ||
705 | void xenvif_check_rx_xenvif(struct xenvif *vif) | 607 | void xenvif_check_rx_xenvif(struct xenvif *vif) |
@@ -1804,7 +1706,7 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | |||
1804 | 1706 | ||
1805 | static inline int rx_work_todo(struct xenvif *vif) | 1707 | static inline int rx_work_todo(struct xenvif *vif) |
1806 | { | 1708 | { |
1807 | return !skb_queue_empty(&vif->rx_queue); | 1709 | return !skb_queue_empty(&vif->rx_queue) || vif->rx_event; |
1808 | } | 1710 | } |
1809 | 1711 | ||
1810 | static inline int tx_work_todo(struct xenvif *vif) | 1712 | static inline int tx_work_todo(struct xenvif *vif) |
@@ -1854,8 +1756,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif, | |||
1854 | rxs = (struct xen_netif_rx_sring *)addr; | 1756 | rxs = (struct xen_netif_rx_sring *)addr; |
1855 | BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); | 1757 | BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); |
1856 | 1758 | ||
1857 | vif->rx_req_cons_peek = 0; | ||
1858 | |||
1859 | return 0; | 1759 | return 0; |
1860 | 1760 | ||
1861 | err: | 1761 | err: |
@@ -1863,9 +1763,24 @@ err: | |||
1863 | return err; | 1763 | return err; |
1864 | } | 1764 | } |
1865 | 1765 | ||
1766 | void xenvif_stop_queue(struct xenvif *vif) | ||
1767 | { | ||
1768 | if (!vif->can_queue) | ||
1769 | return; | ||
1770 | |||
1771 | netif_stop_queue(vif->dev); | ||
1772 | } | ||
1773 | |||
1774 | static void xenvif_start_queue(struct xenvif *vif) | ||
1775 | { | ||
1776 | if (xenvif_schedulable(vif)) | ||
1777 | netif_wake_queue(vif->dev); | ||
1778 | } | ||
1779 | |||
1866 | int xenvif_kthread(void *data) | 1780 | int xenvif_kthread(void *data) |
1867 | { | 1781 | { |
1868 | struct xenvif *vif = data; | 1782 | struct xenvif *vif = data; |
1783 | struct sk_buff *skb; | ||
1869 | 1784 | ||
1870 | while (!kthread_should_stop()) { | 1785 | while (!kthread_should_stop()) { |
1871 | wait_event_interruptible(vif->wq, | 1786 | wait_event_interruptible(vif->wq, |
@@ -1874,12 +1789,22 @@ int xenvif_kthread(void *data) | |||
1874 | if (kthread_should_stop()) | 1789 | if (kthread_should_stop()) |
1875 | break; | 1790 | break; |
1876 | 1791 | ||
1877 | if (rx_work_todo(vif)) | 1792 | if (!skb_queue_empty(&vif->rx_queue)) |
1878 | xenvif_rx_action(vif); | 1793 | xenvif_rx_action(vif); |
1879 | 1794 | ||
1795 | vif->rx_event = false; | ||
1796 | |||
1797 | if (skb_queue_empty(&vif->rx_queue) && | ||
1798 | netif_queue_stopped(vif->dev)) | ||
1799 | xenvif_start_queue(vif); | ||
1800 | |||
1880 | cond_resched(); | 1801 | cond_resched(); |
1881 | } | 1802 | } |
1882 | 1803 | ||
1804 | /* Bin any remaining skbs */ | ||
1805 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) | ||
1806 | dev_kfree_skb(skb); | ||
1807 | |||
1883 | return 0; | 1808 | return 0; |
1884 | } | 1809 | } |
1885 | 1810 | ||