aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c55
1 files changed, 24 insertions, 31 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6b62c3eb8e18..438d0c09b7e6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
240 struct gnttab_copy *copy_gop; 240 struct gnttab_copy *copy_gop;
241 struct xenvif_rx_meta *meta; 241 struct xenvif_rx_meta *meta;
242 unsigned long bytes; 242 unsigned long bytes;
243 int gso_type; 243 int gso_type = XEN_NETIF_GSO_TYPE_NONE;
244 244
245 /* Data must not cross a page boundary. */ 245 /* Data must not cross a page boundary. */
246 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); 246 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
299 } 299 }
300 300
301 /* Leave a gap for the GSO descriptor. */ 301 /* Leave a gap for the GSO descriptor. */
302 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 302 if (skb_is_gso(skb)) {
303 gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 303 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
304 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 304 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
305 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 305 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
306 else 306 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
307 gso_type = XEN_NETIF_GSO_TYPE_NONE; 307 }
308 308
309 if (*head && ((1 << gso_type) & vif->gso_mask)) 309 if (*head && ((1 << gso_type) & vif->gso_mask))
310 vif->rx.req_cons++; 310 vif->rx.req_cons++;
@@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
338 int head = 1; 338 int head = 1;
339 int old_meta_prod; 339 int old_meta_prod;
340 int gso_type; 340 int gso_type;
341 int gso_size;
342 341
343 old_meta_prod = npo->meta_prod; 342 old_meta_prod = npo->meta_prod;
344 343
345 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 344 gso_type = XEN_NETIF_GSO_TYPE_NONE;
346 gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 345 if (skb_is_gso(skb)) {
347 gso_size = skb_shinfo(skb)->gso_size; 346 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
348 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 347 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
349 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 348 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
350 gso_size = skb_shinfo(skb)->gso_size; 349 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
351 } else {
352 gso_type = XEN_NETIF_GSO_TYPE_NONE;
353 gso_size = 0;
354 } 350 }
355 351
356 /* Set up a GSO prefix descriptor, if necessary */ 352 /* Set up a GSO prefix descriptor, if necessary */
@@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
358 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 354 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
359 meta = npo->meta + npo->meta_prod++; 355 meta = npo->meta + npo->meta_prod++;
360 meta->gso_type = gso_type; 356 meta->gso_type = gso_type;
361 meta->gso_size = gso_size; 357 meta->gso_size = skb_shinfo(skb)->gso_size;
362 meta->size = 0; 358 meta->size = 0;
363 meta->id = req->id; 359 meta->id = req->id;
364 } 360 }
@@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
368 364
369 if ((1 << gso_type) & vif->gso_mask) { 365 if ((1 << gso_type) & vif->gso_mask) {
370 meta->gso_type = gso_type; 366 meta->gso_type = gso_type;
371 meta->gso_size = gso_size; 367 meta->gso_size = skb_shinfo(skb)->gso_size;
372 } else { 368 } else {
373 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 369 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
374 meta->gso_size = 0; 370 meta->gso_size = 0;
@@ -476,7 +472,6 @@ static void xenvif_rx_action(struct xenvif *vif)
476 unsigned long offset; 472 unsigned long offset;
477 struct skb_cb_overlay *sco; 473 struct skb_cb_overlay *sco;
478 bool need_to_notify = false; 474 bool need_to_notify = false;
479 bool ring_full = false;
480 475
481 struct netrx_pending_operations npo = { 476 struct netrx_pending_operations npo = {
482 .copy = vif->grant_copy_op, 477 .copy = vif->grant_copy_op,
@@ -486,7 +481,7 @@ static void xenvif_rx_action(struct xenvif *vif)
486 skb_queue_head_init(&rxq); 481 skb_queue_head_init(&rxq);
487 482
488 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 483 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
489 int max_slots_needed; 484 RING_IDX max_slots_needed;
490 int i; 485 int i;
491 486
492 /* We need a cheap worse case estimate for the number of 487 /* We need a cheap worse case estimate for the number of
@@ -501,17 +496,19 @@ static void xenvif_rx_action(struct xenvif *vif)
501 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 496 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
502 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); 497 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
503 } 498 }
504 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 499 if (skb_is_gso(skb) &&
505 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 500 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
501 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
506 max_slots_needed++; 502 max_slots_needed++;
507 503
508 /* If the skb may not fit then bail out now */ 504 /* If the skb may not fit then bail out now */
509 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { 505 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
510 skb_queue_head(&vif->rx_queue, skb); 506 skb_queue_head(&vif->rx_queue, skb);
511 need_to_notify = true; 507 need_to_notify = true;
512 ring_full = true; 508 vif->rx_last_skb_slots = max_slots_needed;
513 break; 509 break;
514 } 510 } else
511 vif->rx_last_skb_slots = 0;
515 512
516 sco = (struct skb_cb_overlay *)skb->cb; 513 sco = (struct skb_cb_overlay *)skb->cb;
517 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 514 sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
@@ -522,8 +519,6 @@ static void xenvif_rx_action(struct xenvif *vif)
522 519
523 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); 520 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
524 521
525 vif->rx_queue_stopped = !npo.copy_prod && ring_full;
526
527 if (!npo.copy_prod) 522 if (!npo.copy_prod)
528 goto done; 523 goto done;
529 524
@@ -1473,8 +1468,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1473 1468
1474static inline int rx_work_todo(struct xenvif *vif) 1469static inline int rx_work_todo(struct xenvif *vif)
1475{ 1470{
1476 return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) || 1471 return !skb_queue_empty(&vif->rx_queue) &&
1477 vif->rx_event; 1472 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
1478} 1473}
1479 1474
1480static inline int tx_work_todo(struct xenvif *vif) 1475static inline int tx_work_todo(struct xenvif *vif)
@@ -1560,8 +1555,6 @@ int xenvif_kthread(void *data)
1560 if (!skb_queue_empty(&vif->rx_queue)) 1555 if (!skb_queue_empty(&vif->rx_queue))
1561 xenvif_rx_action(vif); 1556 xenvif_rx_action(vif);
1562 1557
1563 vif->rx_event = false;
1564
1565 if (skb_queue_empty(&vif->rx_queue) && 1558 if (skb_queue_empty(&vif->rx_queue) &&
1566 netif_queue_stopped(vif->dev)) 1559 netif_queue_stopped(vif->dev))
1567 xenvif_start_queue(vif); 1560 xenvif_start_queue(vif);