aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c135
1 files changed, 86 insertions, 49 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index caa011008cd0..7ffa43bd7cf9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
452 /* Grant backend access to each skb fragment page. */ 452 /* Grant backend access to each skb fragment page. */
453 for (i = 0; i < frags; i++) { 453 for (i = 0; i < frags; i++) {
454 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 454 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
455 struct page *page = skb_frag_page(frag);
455 456
456 tx->flags |= XEN_NETTXF_more_data; 457 len = skb_frag_size(frag);
458 offset = frag->page_offset;
457 459
458 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 460 /* Data must not cross a page boundary. */
459 np->tx_skbs[id].skb = skb_get(skb); 461 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
460 tx = RING_GET_REQUEST(&np->tx, prod++);
461 tx->id = id;
462 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
463 BUG_ON((signed short)ref < 0);
464 462
465 mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); 463 /* Skip unused frames from start of page */
466 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 464 page += offset >> PAGE_SHIFT;
467 mfn, GNTMAP_readonly); 465 offset &= ~PAGE_MASK;
468 466
469 tx->gref = np->grant_tx_ref[id] = ref; 467 while (len > 0) {
470 tx->offset = frag->page_offset; 468 unsigned long bytes;
471 tx->size = skb_frag_size(frag); 469
472 tx->flags = 0; 470 BUG_ON(offset >= PAGE_SIZE);
471
472 bytes = PAGE_SIZE - offset;
473 if (bytes > len)
474 bytes = len;
475
476 tx->flags |= XEN_NETTXF_more_data;
477
478 id = get_id_from_freelist(&np->tx_skb_freelist,
479 np->tx_skbs);
480 np->tx_skbs[id].skb = skb_get(skb);
481 tx = RING_GET_REQUEST(&np->tx, prod++);
482 tx->id = id;
483 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
484 BUG_ON((signed short)ref < 0);
485
486 mfn = pfn_to_mfn(page_to_pfn(page));
487 gnttab_grant_foreign_access_ref(ref,
488 np->xbdev->otherend_id,
489 mfn, GNTMAP_readonly);
490
491 tx->gref = np->grant_tx_ref[id] = ref;
492 tx->offset = offset;
493 tx->size = bytes;
494 tx->flags = 0;
495
496 offset += bytes;
497 len -= bytes;
498
499 /* Next frame */
500 if (offset == PAGE_SIZE && len) {
501 BUG_ON(!PageCompound(page));
502 page++;
503 offset = 0;
504 }
505 }
473 } 506 }
474 507
475 np->tx.req_prod_pvt = prod; 508 np->tx.req_prod_pvt = prod;
476} 509}
477 510
511/*
512 * Count how many ring slots are required to send the frags of this
513 * skb. Each frag might be a compound page.
514 */
515static int xennet_count_skb_frag_slots(struct sk_buff *skb)
516{
517 int i, frags = skb_shinfo(skb)->nr_frags;
518 int pages = 0;
519
520 for (i = 0; i < frags; i++) {
521 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
522 unsigned long size = skb_frag_size(frag);
523 unsigned long offset = frag->page_offset;
524
525 /* Skip unused frames from start of page */
526 offset &= ~PAGE_MASK;
527
528 pages += PFN_UP(offset + size);
529 }
530
531 return pages;
532}
533
478static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 534static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
479{ 535{
480 unsigned short id; 536 unsigned short id;
@@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
487 grant_ref_t ref; 543 grant_ref_t ref;
488 unsigned long mfn; 544 unsigned long mfn;
489 int notify; 545 int notify;
490 int frags = skb_shinfo(skb)->nr_frags; 546 int slots;
491 unsigned int offset = offset_in_page(data); 547 unsigned int offset = offset_in_page(data);
492 unsigned int len = skb_headlen(skb); 548 unsigned int len = skb_headlen(skb);
493 unsigned long flags; 549 unsigned long flags;
494 550
495 frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); 551 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
496 if (unlikely(frags > MAX_SKB_FRAGS + 1)) { 552 xennet_count_skb_frag_slots(skb);
497 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", 553 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
498 frags); 554 net_alert_ratelimited(
499 dump_stack(); 555 "xennet: skb rides the rocket: %d slots\n", slots);
500 goto drop; 556 goto drop;
501 } 557 }
502 558
503 spin_lock_irqsave(&np->tx_lock, flags); 559 spin_lock_irqsave(&np->tx_lock, flags);
504 560
505 if (unlikely(!netif_carrier_ok(dev) || 561 if (unlikely(!netif_carrier_ok(dev) ||
506 (frags > 1 && !xennet_can_sg(dev)) || 562 (slots > 1 && !xennet_can_sg(dev)) ||
507 netif_needs_gso(skb, netif_skb_features(skb)))) { 563 netif_needs_gso(skb, netif_skb_features(skb)))) {
508 spin_unlock_irqrestore(&np->tx_lock, flags); 564 spin_unlock_irqrestore(&np->tx_lock, flags);
509 goto drop; 565 goto drop;
@@ -959,29 +1015,10 @@ err:
959 i = xennet_fill_frags(np, skb, &tmpq); 1015 i = xennet_fill_frags(np, skb, &tmpq);
960 1016
961 /* 1017 /*
962 * Truesize approximates the size of true data plus 1018 * Truesize is the actual allocation size, even if the
963 * any supervisor overheads. Adding hypervisor 1019 * allocation is only partially used.
964 * overheads has been shown to significantly reduce 1020 */
965 * achievable bandwidth with the default receive 1021 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
966 * buffer size. It is therefore not wise to account
967 * for it here.
968 *
969 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
970 * to RX_COPY_THRESHOLD + the supervisor
971 * overheads. Here, we add the size of the data pulled
972 * in xennet_fill_frags().
973 *
974 * We also adjust for any unused space in the main
975 * data area by subtracting (RX_COPY_THRESHOLD -
976 * len). This is especially important with drivers
977 * which split incoming packets into header and data,
978 * using only 66 bytes of the main data area (see the
979 * e1000 driver for example.) On such systems,
980 * without this last adjustement, our achievable
981 * receive throughout using the standard receive
982 * buffer size was cut by 25%(!!!).
983 */
984 skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
985 skb->len += skb->data_len; 1022 skb->len += skb->data_len;
986 1023
987 if (rx->flags & XEN_NETRXF_csum_blank) 1024 if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1255,7 +1292,7 @@ static const struct net_device_ops xennet_netdev_ops = {
1255#endif 1292#endif
1256}; 1293};
1257 1294
1258static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1295static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1259{ 1296{
1260 int i, err; 1297 int i, err;
1261 struct net_device *netdev; 1298 struct net_device *netdev;
@@ -1351,8 +1388,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1351 * structures and the ring buffers for communication with the backend, and 1388 * structures and the ring buffers for communication with the backend, and
1352 * inform the backend of the appropriate details for those. 1389 * inform the backend of the appropriate details for those.
1353 */ 1390 */
1354static int __devinit netfront_probe(struct xenbus_device *dev, 1391static int netfront_probe(struct xenbus_device *dev,
1355 const struct xenbus_device_id *id) 1392 const struct xenbus_device_id *id)
1356{ 1393{
1357 int err; 1394 int err;
1358 struct net_device *netdev; 1395 struct net_device *netdev;
@@ -1911,7 +1948,7 @@ static const struct xenbus_device_id netfront_ids[] = {
1911}; 1948};
1912 1949
1913 1950
1914static int __devexit xennet_remove(struct xenbus_device *dev) 1951static int xennet_remove(struct xenbus_device *dev)
1915{ 1952{
1916 struct netfront_info *info = dev_get_drvdata(&dev->dev); 1953 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1917 1954
@@ -1934,7 +1971,7 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
1934 1971
1935static DEFINE_XENBUS_DRIVER(netfront, , 1972static DEFINE_XENBUS_DRIVER(netfront, ,
1936 .probe = netfront_probe, 1973 .probe = netfront_probe,
1937 .remove = __devexit_p(xennet_remove), 1974 .remove = xennet_remove,
1938 .resume = netfront_resume, 1975 .resume = netfront_resume,
1939 .otherend_changed = netback_changed, 1976 .otherend_changed = netback_changed,
1940); 1977);