diff options
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r-- | drivers/net/xen-netback/common.h | 27 | ||||
-rw-r--r-- | drivers/net/xen-netback/interface.c | 47 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 236 | ||||
-rw-r--r-- | drivers/net/xen-netback/xenbus.c | 3 |
4 files changed, 122 insertions, 191 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index c47794b9d42f..c955fc39d69a 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -143,12 +143,10 @@ struct xenvif { | |||
143 | char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ | 143 | char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ |
144 | struct xen_netif_rx_back_ring rx; | 144 | struct xen_netif_rx_back_ring rx; |
145 | struct sk_buff_head rx_queue; | 145 | struct sk_buff_head rx_queue; |
146 | 146 | /* Set when the RX interrupt is triggered by the frontend. | |
147 | /* Allow xenvif_start_xmit() to peek ahead in the rx request | 147 | * The worker thread may need to wake the queue. |
148 | * ring. This is a prediction of what rx_req_cons will be | ||
149 | * once all queued skbs are put on the ring. | ||
150 | */ | 148 | */ |
151 | RING_IDX rx_req_cons_peek; | 149 | bool rx_event; |
152 | 150 | ||
153 | /* This array is allocated seperately as it is large */ | 151 | /* This array is allocated seperately as it is large */ |
154 | struct gnttab_copy *grant_copy_op; | 152 | struct gnttab_copy *grant_copy_op; |
@@ -205,8 +203,6 @@ void xenvif_xenbus_fini(void); | |||
205 | 203 | ||
206 | int xenvif_schedulable(struct xenvif *vif); | 204 | int xenvif_schedulable(struct xenvif *vif); |
207 | 205 | ||
208 | int xenvif_rx_ring_full(struct xenvif *vif); | ||
209 | |||
210 | int xenvif_must_stop_queue(struct xenvif *vif); | 206 | int xenvif_must_stop_queue(struct xenvif *vif); |
211 | 207 | ||
212 | /* (Un)Map communication rings. */ | 208 | /* (Un)Map communication rings. */ |
@@ -218,21 +214,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif, | |||
218 | /* Check for SKBs from frontend and schedule backend processing */ | 214 | /* Check for SKBs from frontend and schedule backend processing */ |
219 | void xenvif_check_rx_xenvif(struct xenvif *vif); | 215 | void xenvif_check_rx_xenvif(struct xenvif *vif); |
220 | 216 | ||
221 | /* Queue an SKB for transmission to the frontend */ | ||
222 | void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); | ||
223 | /* Notify xenvif that ring now has space to send an skb to the frontend */ | ||
224 | void xenvif_notify_tx_completion(struct xenvif *vif); | ||
225 | |||
226 | /* Prevent the device from generating any further traffic. */ | 217 | /* Prevent the device from generating any further traffic. */ |
227 | void xenvif_carrier_off(struct xenvif *vif); | 218 | void xenvif_carrier_off(struct xenvif *vif); |
228 | 219 | ||
229 | /* Returns number of ring slots required to send an skb to the frontend */ | ||
230 | unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); | ||
231 | |||
232 | int xenvif_tx_action(struct xenvif *vif, int budget); | 220 | int xenvif_tx_action(struct xenvif *vif, int budget); |
233 | void xenvif_rx_action(struct xenvif *vif); | ||
234 | 221 | ||
235 | int xenvif_kthread(void *data); | 222 | int xenvif_kthread(void *data); |
223 | void xenvif_kick_thread(struct xenvif *vif); | ||
224 | |||
225 | /* Determine whether the needed number of slots (req) are available, | ||
226 | * and set req_event if not. | ||
227 | */ | ||
228 | bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); | ||
229 | |||
230 | void xenvif_stop_queue(struct xenvif *vif); | ||
236 | 231 | ||
237 | extern bool separate_tx_rx_irq; | 232 | extern bool separate_tx_rx_irq; |
238 | 233 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fff8cddfed81..b9de31ea7fc4 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -47,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif) | |||
47 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 47 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); |
48 | } | 48 | } |
49 | 49 | ||
50 | static int xenvif_rx_schedulable(struct xenvif *vif) | ||
51 | { | ||
52 | return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); | ||
53 | } | ||
54 | |||
55 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 50 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
56 | { | 51 | { |
57 | struct xenvif *vif = dev_id; | 52 | struct xenvif *vif = dev_id; |
@@ -105,8 +100,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | |||
105 | { | 100 | { |
106 | struct xenvif *vif = dev_id; | 101 | struct xenvif *vif = dev_id; |
107 | 102 | ||
108 | if (xenvif_rx_schedulable(vif)) | 103 | vif->rx_event = true; |
109 | netif_wake_queue(vif->dev); | 104 | xenvif_kick_thread(vif); |
110 | 105 | ||
111 | return IRQ_HANDLED; | 106 | return IRQ_HANDLED; |
112 | } | 107 | } |
@@ -122,24 +117,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
122 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 117 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
123 | { | 118 | { |
124 | struct xenvif *vif = netdev_priv(dev); | 119 | struct xenvif *vif = netdev_priv(dev); |
120 | int min_slots_needed; | ||
125 | 121 | ||
126 | BUG_ON(skb->dev != dev); | 122 | BUG_ON(skb->dev != dev); |
127 | 123 | ||
128 | /* Drop the packet if vif is not ready */ | 124 | /* Drop the packet if vif is not ready */ |
129 | if (vif->task == NULL) | 125 | if (vif->task == NULL || !xenvif_schedulable(vif)) |
130 | goto drop; | 126 | goto drop; |
131 | 127 | ||
132 | /* Drop the packet if the target domain has no receive buffers. */ | 128 | /* At best we'll need one slot for the header and one for each |
133 | if (!xenvif_rx_schedulable(vif)) | 129 | * frag. |
134 | goto drop; | 130 | */ |
131 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | ||
135 | 132 | ||
136 | /* Reserve ring slots for the worst-case number of fragments. */ | 133 | /* If the skb is GSO then we'll also need an extra slot for the |
137 | vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); | 134 | * metadata. |
135 | */ | ||
136 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | ||
137 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
138 | min_slots_needed++; | ||
138 | 139 | ||
139 | if (vif->can_queue && xenvif_must_stop_queue(vif)) | 140 | /* If the skb can't possibly fit in the remaining slots |
140 | netif_stop_queue(dev); | 141 | * then turn off the queue to give the ring a chance to |
142 | * drain. | ||
143 | */ | ||
144 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) | ||
145 | xenvif_stop_queue(vif); | ||
141 | 146 | ||
142 | xenvif_queue_tx_skb(vif, skb); | 147 | skb_queue_tail(&vif->rx_queue, skb); |
148 | xenvif_kick_thread(vif); | ||
143 | 149 | ||
144 | return NETDEV_TX_OK; | 150 | return NETDEV_TX_OK; |
145 | 151 | ||
@@ -149,12 +155,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
149 | return NETDEV_TX_OK; | 155 | return NETDEV_TX_OK; |
150 | } | 156 | } |
151 | 157 | ||
152 | void xenvif_notify_tx_completion(struct xenvif *vif) | ||
153 | { | ||
154 | if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) | ||
155 | netif_wake_queue(vif->dev); | ||
156 | } | ||
157 | |||
158 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 158 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
159 | { | 159 | { |
160 | struct xenvif *vif = netdev_priv(dev); | 160 | struct xenvif *vif = netdev_priv(dev); |
@@ -388,6 +388,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
388 | if (err < 0) | 388 | if (err < 0) |
389 | goto err; | 389 | goto err; |
390 | 390 | ||
391 | init_waitqueue_head(&vif->wq); | ||
392 | |||
391 | if (tx_evtchn == rx_evtchn) { | 393 | if (tx_evtchn == rx_evtchn) { |
392 | /* feature-split-event-channels == 0 */ | 394 | /* feature-split-event-channels == 0 */ |
393 | err = bind_interdomain_evtchn_to_irqhandler( | 395 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -420,7 +422,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
420 | disable_irq(vif->rx_irq); | 422 | disable_irq(vif->rx_irq); |
421 | } | 423 | } |
422 | 424 | ||
423 | init_waitqueue_head(&vif->wq); | ||
424 | task = kthread_create(xenvif_kthread, | 425 | task = kthread_create(xenvif_kthread, |
425 | (void *)vif, "%s", vif->dev->name); | 426 | (void *)vif, "%s", vif->dev->name); |
426 | if (IS_ERR(task)) { | 427 | if (IS_ERR(task)) { |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 78425554a537..4f81ac0e2f0a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -138,36 +138,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) | |||
138 | vif->pending_prod + vif->pending_cons; | 138 | vif->pending_prod + vif->pending_cons; |
139 | } | 139 | } |
140 | 140 | ||
141 | static int max_required_rx_slots(struct xenvif *vif) | 141 | bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) |
142 | { | 142 | { |
143 | int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); | 143 | RING_IDX prod, cons; |
144 | 144 | ||
145 | /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ | 145 | do { |
146 | if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask) | 146 | prod = vif->rx.sring->req_prod; |
147 | max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ | 147 | cons = vif->rx.req_cons; |
148 | |||
149 | return max; | ||
150 | } | ||
151 | |||
152 | int xenvif_rx_ring_full(struct xenvif *vif) | ||
153 | { | ||
154 | RING_IDX peek = vif->rx_req_cons_peek; | ||
155 | RING_IDX needed = max_required_rx_slots(vif); | ||
156 | 148 | ||
157 | return ((vif->rx.sring->req_prod - peek) < needed) || | 149 | if (prod - cons >= needed) |
158 | ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); | 150 | return true; |
159 | } | ||
160 | 151 | ||
161 | int xenvif_must_stop_queue(struct xenvif *vif) | 152 | vif->rx.sring->req_event = prod + 1; |
162 | { | ||
163 | if (!xenvif_rx_ring_full(vif)) | ||
164 | return 0; | ||
165 | 153 | ||
166 | vif->rx.sring->req_event = vif->rx_req_cons_peek + | 154 | /* Make sure event is visible before we check prod |
167 | max_required_rx_slots(vif); | 155 | * again. |
168 | mb(); /* request notification /then/ check the queue */ | 156 | */ |
157 | mb(); | ||
158 | } while (vif->rx.sring->req_prod != prod); | ||
169 | 159 | ||
170 | return xenvif_rx_ring_full(vif); | 160 | return false; |
171 | } | 161 | } |
172 | 162 | ||
173 | /* | 163 | /* |
@@ -210,93 +200,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
210 | return false; | 200 | return false; |
211 | } | 201 | } |
212 | 202 | ||
213 | struct xenvif_count_slot_state { | ||
214 | unsigned long copy_off; | ||
215 | bool head; | ||
216 | }; | ||
217 | |||
218 | unsigned int xenvif_count_frag_slots(struct xenvif *vif, | ||
219 | unsigned long offset, unsigned long size, | ||
220 | struct xenvif_count_slot_state *state) | ||
221 | { | ||
222 | unsigned count = 0; | ||
223 | |||
224 | offset &= ~PAGE_MASK; | ||
225 | |||
226 | while (size > 0) { | ||
227 | unsigned long bytes; | ||
228 | |||
229 | bytes = PAGE_SIZE - offset; | ||
230 | |||
231 | if (bytes > size) | ||
232 | bytes = size; | ||
233 | |||
234 | if (start_new_rx_buffer(state->copy_off, bytes, state->head)) { | ||
235 | count++; | ||
236 | state->copy_off = 0; | ||
237 | } | ||
238 | |||
239 | if (state->copy_off + bytes > MAX_BUFFER_OFFSET) | ||
240 | bytes = MAX_BUFFER_OFFSET - state->copy_off; | ||
241 | |||
242 | state->copy_off += bytes; | ||
243 | |||
244 | offset += bytes; | ||
245 | size -= bytes; | ||
246 | |||
247 | if (offset == PAGE_SIZE) | ||
248 | offset = 0; | ||
249 | |||
250 | state->head = false; | ||
251 | } | ||
252 | |||
253 | return count; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Figure out how many ring slots we're going to need to send @skb to | ||
258 | * the guest. This function is essentially a dry run of | ||
259 | * xenvif_gop_frag_copy. | ||
260 | */ | ||
261 | unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) | ||
262 | { | ||
263 | struct xenvif_count_slot_state state; | ||
264 | unsigned int count; | ||
265 | unsigned char *data; | ||
266 | unsigned i; | ||
267 | |||
268 | state.head = true; | ||
269 | state.copy_off = 0; | ||
270 | |||
271 | /* Slot for the first (partial) page of data. */ | ||
272 | count = 1; | ||
273 | |||
274 | /* Need a slot for the GSO prefix for GSO extra data? */ | ||
275 | if (skb_shinfo(skb)->gso_size) | ||
276 | count++; | ||
277 | |||
278 | data = skb->data; | ||
279 | while (data < skb_tail_pointer(skb)) { | ||
280 | unsigned long offset = offset_in_page(data); | ||
281 | unsigned long size = PAGE_SIZE - offset; | ||
282 | |||
283 | if (data + size > skb_tail_pointer(skb)) | ||
284 | size = skb_tail_pointer(skb) - data; | ||
285 | |||
286 | count += xenvif_count_frag_slots(vif, offset, size, &state); | ||
287 | |||
288 | data += size; | ||
289 | } | ||
290 | |||
291 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
292 | unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | ||
293 | unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; | ||
294 | |||
295 | count += xenvif_count_frag_slots(vif, offset, size, &state); | ||
296 | } | ||
297 | return count; | ||
298 | } | ||
299 | |||
300 | struct netrx_pending_operations { | 203 | struct netrx_pending_operations { |
301 | unsigned copy_prod, copy_cons; | 204 | unsigned copy_prod, copy_cons; |
302 | unsigned meta_prod, meta_cons; | 205 | unsigned meta_prod, meta_cons; |
@@ -557,12 +460,12 @@ struct skb_cb_overlay { | |||
557 | int meta_slots_used; | 460 | int meta_slots_used; |
558 | }; | 461 | }; |
559 | 462 | ||
560 | static void xenvif_kick_thread(struct xenvif *vif) | 463 | void xenvif_kick_thread(struct xenvif *vif) |
561 | { | 464 | { |
562 | wake_up(&vif->wq); | 465 | wake_up(&vif->wq); |
563 | } | 466 | } |
564 | 467 | ||
565 | void xenvif_rx_action(struct xenvif *vif) | 468 | static void xenvif_rx_action(struct xenvif *vif) |
566 | { | 469 | { |
567 | s8 status; | 470 | s8 status; |
568 | u16 flags; | 471 | u16 flags; |
@@ -571,8 +474,6 @@ void xenvif_rx_action(struct xenvif *vif) | |||
571 | struct sk_buff *skb; | 474 | struct sk_buff *skb; |
572 | LIST_HEAD(notify); | 475 | LIST_HEAD(notify); |
573 | int ret; | 476 | int ret; |
574 | int nr_frags; | ||
575 | int count; | ||
576 | unsigned long offset; | 477 | unsigned long offset; |
577 | struct skb_cb_overlay *sco; | 478 | struct skb_cb_overlay *sco; |
578 | int need_to_notify = 0; | 479 | int need_to_notify = 0; |
@@ -584,29 +485,44 @@ void xenvif_rx_action(struct xenvif *vif) | |||
584 | 485 | ||
585 | skb_queue_head_init(&rxq); | 486 | skb_queue_head_init(&rxq); |
586 | 487 | ||
587 | count = 0; | ||
588 | |||
589 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { | 488 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { |
590 | vif = netdev_priv(skb->dev); | 489 | int max_slots_needed; |
591 | nr_frags = skb_shinfo(skb)->nr_frags; | 490 | int i; |
491 | |||
492 | /* We need a cheap worse case estimate for the number of | ||
493 | * slots we'll use. | ||
494 | */ | ||
495 | |||
496 | max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + | ||
497 | skb_headlen(skb), | ||
498 | PAGE_SIZE); | ||
499 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
500 | unsigned int size; | ||
501 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | ||
502 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); | ||
503 | } | ||
504 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | ||
505 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
506 | max_slots_needed++; | ||
507 | |||
508 | /* If the skb may not fit then bail out now */ | ||
509 | if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { | ||
510 | skb_queue_head(&vif->rx_queue, skb); | ||
511 | need_to_notify = 1; | ||
512 | break; | ||
513 | } | ||
592 | 514 | ||
593 | sco = (struct skb_cb_overlay *)skb->cb; | 515 | sco = (struct skb_cb_overlay *)skb->cb; |
594 | sco->meta_slots_used = xenvif_gop_skb(skb, &npo); | 516 | sco->meta_slots_used = xenvif_gop_skb(skb, &npo); |
595 | 517 | BUG_ON(sco->meta_slots_used > max_slots_needed); | |
596 | count += nr_frags + 1; | ||
597 | 518 | ||
598 | __skb_queue_tail(&rxq, skb); | 519 | __skb_queue_tail(&rxq, skb); |
599 | |||
600 | /* Filled the batch queue? */ | ||
601 | /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ | ||
602 | if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) | ||
603 | break; | ||
604 | } | 520 | } |
605 | 521 | ||
606 | BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); | 522 | BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); |
607 | 523 | ||
608 | if (!npo.copy_prod) | 524 | if (!npo.copy_prod) |
609 | return; | 525 | goto done; |
610 | 526 | ||
611 | BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); | 527 | BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); |
612 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); | 528 | gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); |
@@ -614,8 +530,6 @@ void xenvif_rx_action(struct xenvif *vif) | |||
614 | while ((skb = __skb_dequeue(&rxq)) != NULL) { | 530 | while ((skb = __skb_dequeue(&rxq)) != NULL) { |
615 | sco = (struct skb_cb_overlay *)skb->cb; | 531 | sco = (struct skb_cb_overlay *)skb->cb; |
616 | 532 | ||
617 | vif = netdev_priv(skb->dev); | ||
618 | |||
619 | if ((1 << vif->meta[npo.meta_cons].gso_type) & | 533 | if ((1 << vif->meta[npo.meta_cons].gso_type) & |
620 | vif->gso_prefix_mask) { | 534 | vif->gso_prefix_mask) { |
621 | resp = RING_GET_RESPONSE(&vif->rx, | 535 | resp = RING_GET_RESPONSE(&vif->rx, |
@@ -681,25 +595,13 @@ void xenvif_rx_action(struct xenvif *vif) | |||
681 | if (ret) | 595 | if (ret) |
682 | need_to_notify = 1; | 596 | need_to_notify = 1; |
683 | 597 | ||
684 | xenvif_notify_tx_completion(vif); | ||
685 | |||
686 | npo.meta_cons += sco->meta_slots_used; | 598 | npo.meta_cons += sco->meta_slots_used; |
687 | dev_kfree_skb(skb); | 599 | dev_kfree_skb(skb); |
688 | } | 600 | } |
689 | 601 | ||
602 | done: | ||
690 | if (need_to_notify) | 603 | if (need_to_notify) |
691 | notify_remote_via_irq(vif->rx_irq); | 604 | notify_remote_via_irq(vif->rx_irq); |
692 | |||
693 | /* More work to do? */ | ||
694 | if (!skb_queue_empty(&vif->rx_queue)) | ||
695 | xenvif_kick_thread(vif); | ||
696 | } | ||
697 | |||
698 | void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) | ||
699 | { | ||
700 | skb_queue_tail(&vif->rx_queue, skb); | ||
701 | |||
702 | xenvif_kick_thread(vif); | ||
703 | } | 605 | } |
704 | 606 | ||
705 | void xenvif_check_rx_xenvif(struct xenvif *vif) | 607 | void xenvif_check_rx_xenvif(struct xenvif *vif) |
@@ -1141,10 +1043,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif, | |||
1141 | } | 1043 | } |
1142 | 1044 | ||
1143 | skb_shinfo(skb)->gso_size = gso->u.gso.size; | 1045 | skb_shinfo(skb)->gso_size = gso->u.gso.size; |
1144 | 1046 | /* gso_segs will be calculated later */ | |
1145 | /* Header must be checked, and gso_segs computed. */ | ||
1146 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | ||
1147 | skb_shinfo(skb)->gso_segs = 0; | ||
1148 | 1047 | ||
1149 | return 0; | 1048 | return 0; |
1150 | } | 1049 | } |
@@ -1687,6 +1586,20 @@ static int xenvif_tx_submit(struct xenvif *vif) | |||
1687 | 1586 | ||
1688 | skb_probe_transport_header(skb, 0); | 1587 | skb_probe_transport_header(skb, 0); |
1689 | 1588 | ||
1589 | /* If the packet is GSO then we will have just set up the | ||
1590 | * transport header offset in checksum_setup so it's now | ||
1591 | * straightforward to calculate gso_segs. | ||
1592 | */ | ||
1593 | if (skb_is_gso(skb)) { | ||
1594 | int mss = skb_shinfo(skb)->gso_size; | ||
1595 | int hdrlen = skb_transport_header(skb) - | ||
1596 | skb_mac_header(skb) + | ||
1597 | tcp_hdrlen(skb); | ||
1598 | |||
1599 | skb_shinfo(skb)->gso_segs = | ||
1600 | DIV_ROUND_UP(skb->len - hdrlen, mss); | ||
1601 | } | ||
1602 | |||
1690 | vif->dev->stats.rx_bytes += skb->len; | 1603 | vif->dev->stats.rx_bytes += skb->len; |
1691 | vif->dev->stats.rx_packets++; | 1604 | vif->dev->stats.rx_packets++; |
1692 | 1605 | ||
@@ -1811,7 +1724,7 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, | |||
1811 | 1724 | ||
1812 | static inline int rx_work_todo(struct xenvif *vif) | 1725 | static inline int rx_work_todo(struct xenvif *vif) |
1813 | { | 1726 | { |
1814 | return !skb_queue_empty(&vif->rx_queue); | 1727 | return !skb_queue_empty(&vif->rx_queue) || vif->rx_event; |
1815 | } | 1728 | } |
1816 | 1729 | ||
1817 | static inline int tx_work_todo(struct xenvif *vif) | 1730 | static inline int tx_work_todo(struct xenvif *vif) |
@@ -1861,8 +1774,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif, | |||
1861 | rxs = (struct xen_netif_rx_sring *)addr; | 1774 | rxs = (struct xen_netif_rx_sring *)addr; |
1862 | BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); | 1775 | BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); |
1863 | 1776 | ||
1864 | vif->rx_req_cons_peek = 0; | ||
1865 | |||
1866 | return 0; | 1777 | return 0; |
1867 | 1778 | ||
1868 | err: | 1779 | err: |
@@ -1870,9 +1781,24 @@ err: | |||
1870 | return err; | 1781 | return err; |
1871 | } | 1782 | } |
1872 | 1783 | ||
1784 | void xenvif_stop_queue(struct xenvif *vif) | ||
1785 | { | ||
1786 | if (!vif->can_queue) | ||
1787 | return; | ||
1788 | |||
1789 | netif_stop_queue(vif->dev); | ||
1790 | } | ||
1791 | |||
1792 | static void xenvif_start_queue(struct xenvif *vif) | ||
1793 | { | ||
1794 | if (xenvif_schedulable(vif)) | ||
1795 | netif_wake_queue(vif->dev); | ||
1796 | } | ||
1797 | |||
1873 | int xenvif_kthread(void *data) | 1798 | int xenvif_kthread(void *data) |
1874 | { | 1799 | { |
1875 | struct xenvif *vif = data; | 1800 | struct xenvif *vif = data; |
1801 | struct sk_buff *skb; | ||
1876 | 1802 | ||
1877 | while (!kthread_should_stop()) { | 1803 | while (!kthread_should_stop()) { |
1878 | wait_event_interruptible(vif->wq, | 1804 | wait_event_interruptible(vif->wq, |
@@ -1881,12 +1807,22 @@ int xenvif_kthread(void *data) | |||
1881 | if (kthread_should_stop()) | 1807 | if (kthread_should_stop()) |
1882 | break; | 1808 | break; |
1883 | 1809 | ||
1884 | if (rx_work_todo(vif)) | 1810 | if (!skb_queue_empty(&vif->rx_queue)) |
1885 | xenvif_rx_action(vif); | 1811 | xenvif_rx_action(vif); |
1886 | 1812 | ||
1813 | vif->rx_event = false; | ||
1814 | |||
1815 | if (skb_queue_empty(&vif->rx_queue) && | ||
1816 | netif_queue_stopped(vif->dev)) | ||
1817 | xenvif_start_queue(vif); | ||
1818 | |||
1887 | cond_resched(); | 1819 | cond_resched(); |
1888 | } | 1820 | } |
1889 | 1821 | ||
1822 | /* Bin any remaining skbs */ | ||
1823 | while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) | ||
1824 | dev_kfree_skb(skb); | ||
1825 | |||
1890 | return 0; | 1826 | return 0; |
1891 | } | 1827 | } |
1892 | 1828 | ||
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index f0358992b04f..7a206cffb062 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -15,8 +15,7 @@ | |||
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | 19 | */ |
21 | 20 | ||
22 | #include "common.h" | 21 | #include "common.h" |