aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c258
1 files changed, 89 insertions, 169 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d8c10764f130..e9b960f0ff32 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -142,10 +142,6 @@ struct netfront_queue {
142 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 142 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
143 grant_ref_t gref_rx_head; 143 grant_ref_t gref_rx_head;
144 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 144 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
145
146 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
147 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
148 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
149}; 145};
150 146
151struct netfront_info { 147struct netfront_info {
@@ -223,11 +219,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
223} 219}
224 220
225#ifdef CONFIG_SYSFS 221#ifdef CONFIG_SYSFS
226static int xennet_sysfs_addif(struct net_device *netdev); 222static const struct attribute_group xennet_dev_group;
227static void xennet_sysfs_delif(struct net_device *netdev);
228#else /* !CONFIG_SYSFS */
229#define xennet_sysfs_addif(dev) (0)
230#define xennet_sysfs_delif(dev) do { } while (0)
231#endif 223#endif
232 224
233static bool xennet_can_sg(struct net_device *dev) 225static bool xennet_can_sg(struct net_device *dev)
@@ -424,109 +416,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
424 xennet_maybe_wake_tx(queue); 416 xennet_maybe_wake_tx(queue);
425} 417}
426 418
427static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, 419static struct xen_netif_tx_request *xennet_make_one_txreq(
428 struct xen_netif_tx_request *tx) 420 struct netfront_queue *queue, struct sk_buff *skb,
421 struct page *page, unsigned int offset, unsigned int len)
429{ 422{
430 char *data = skb->data;
431 unsigned long mfn;
432 RING_IDX prod = queue->tx.req_prod_pvt;
433 int frags = skb_shinfo(skb)->nr_frags;
434 unsigned int offset = offset_in_page(data);
435 unsigned int len = skb_headlen(skb);
436 unsigned int id; 423 unsigned int id;
424 struct xen_netif_tx_request *tx;
437 grant_ref_t ref; 425 grant_ref_t ref;
438 int i;
439 426
440 /* While the header overlaps a page boundary (including being 427 len = min_t(unsigned int, PAGE_SIZE - offset, len);
441 larger than a page), split it it into page-sized chunks. */
442 while (len > PAGE_SIZE - offset) {
443 tx->size = PAGE_SIZE - offset;
444 tx->flags |= XEN_NETTXF_more_data;
445 len -= tx->size;
446 data += tx->size;
447 offset = 0;
448 428
449 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 429 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
450 queue->tx_skbs[id].skb = skb_get(skb); 430 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
451 tx = RING_GET_REQUEST(&queue->tx, prod++); 431 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
452 tx->id = id; 432 BUG_ON((signed short)ref < 0);
453 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
454 BUG_ON((signed short)ref < 0);
455 433
456 mfn = virt_to_mfn(data); 434 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
457 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 435 page_to_mfn(page), GNTMAP_readonly);
458 mfn, GNTMAP_readonly);
459 436
460 queue->grant_tx_page[id] = virt_to_page(data); 437 queue->tx_skbs[id].skb = skb;
461 tx->gref = queue->grant_tx_ref[id] = ref; 438 queue->grant_tx_page[id] = page;
462 tx->offset = offset; 439 queue->grant_tx_ref[id] = ref;
463 tx->size = len;
464 tx->flags = 0;
465 }
466 440
467 /* Grant backend access to each skb fragment page. */ 441 tx->id = id;
468 for (i = 0; i < frags; i++) { 442 tx->gref = ref;
469 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 443 tx->offset = offset;
470 struct page *page = skb_frag_page(frag); 444 tx->size = len;
445 tx->flags = 0;
471 446
472 len = skb_frag_size(frag); 447 return tx;
473 offset = frag->page_offset; 448}
474 449
475 /* Skip unused frames from start of page */ 450static struct xen_netif_tx_request *xennet_make_txreqs(
476 page += offset >> PAGE_SHIFT; 451 struct netfront_queue *queue, struct xen_netif_tx_request *tx,
477 offset &= ~PAGE_MASK; 452 struct sk_buff *skb, struct page *page,
453 unsigned int offset, unsigned int len)
454{
455 /* Skip unused frames from start of page */
456 page += offset >> PAGE_SHIFT;
457 offset &= ~PAGE_MASK;
478 458
479 while (len > 0) { 459 while (len) {
480 unsigned long bytes; 460 tx->flags |= XEN_NETTXF_more_data;
481 461 tx = xennet_make_one_txreq(queue, skb_get(skb),
482 bytes = PAGE_SIZE - offset; 462 page, offset, len);
483 if (bytes > len) 463 page++;
484 bytes = len; 464 offset = 0;
485 465 len -= tx->size;
486 tx->flags |= XEN_NETTXF_more_data;
487
488 id = get_id_from_freelist(&queue->tx_skb_freelist,
489 queue->tx_skbs);
490 queue->tx_skbs[id].skb = skb_get(skb);
491 tx = RING_GET_REQUEST(&queue->tx, prod++);
492 tx->id = id;
493 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
494 BUG_ON((signed short)ref < 0);
495
496 mfn = pfn_to_mfn(page_to_pfn(page));
497 gnttab_grant_foreign_access_ref(ref,
498 queue->info->xbdev->otherend_id,
499 mfn, GNTMAP_readonly);
500
501 queue->grant_tx_page[id] = page;
502 tx->gref = queue->grant_tx_ref[id] = ref;
503 tx->offset = offset;
504 tx->size = bytes;
505 tx->flags = 0;
506
507 offset += bytes;
508 len -= bytes;
509
510 /* Next frame */
511 if (offset == PAGE_SIZE && len) {
512 BUG_ON(!PageCompound(page));
513 page++;
514 offset = 0;
515 }
516 }
517 } 466 }
518 467
519 queue->tx.req_prod_pvt = prod; 468 return tx;
520} 469}
521 470
522/* 471/*
523 * Count how many ring slots are required to send the frags of this 472 * Count how many ring slots are required to send this skb. Each frag
524 * skb. Each frag might be a compound page. 473 * might be a compound page.
525 */ 474 */
526static int xennet_count_skb_frag_slots(struct sk_buff *skb) 475static int xennet_count_skb_slots(struct sk_buff *skb)
527{ 476{
528 int i, frags = skb_shinfo(skb)->nr_frags; 477 int i, frags = skb_shinfo(skb)->nr_frags;
529 int pages = 0; 478 int pages;
479
480 pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
530 481
531 for (i = 0; i < frags; i++) { 482 for (i = 0; i < frags; i++) {
532 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 483 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
@@ -562,18 +513,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
562 513
563static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 514static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
564{ 515{
565 unsigned short id;
566 struct netfront_info *np = netdev_priv(dev); 516 struct netfront_info *np = netdev_priv(dev);
567 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 517 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
568 struct xen_netif_tx_request *tx; 518 struct xen_netif_tx_request *tx, *first_tx;
569 char *data = skb->data; 519 unsigned int i;
570 RING_IDX i;
571 grant_ref_t ref;
572 unsigned long mfn;
573 int notify; 520 int notify;
574 int slots; 521 int slots;
575 unsigned int offset = offset_in_page(data); 522 struct page *page;
576 unsigned int len = skb_headlen(skb); 523 unsigned int offset;
524 unsigned int len;
577 unsigned long flags; 525 unsigned long flags;
578 struct netfront_queue *queue = NULL; 526 struct netfront_queue *queue = NULL;
579 unsigned int num_queues = dev->real_num_tx_queues; 527 unsigned int num_queues = dev->real_num_tx_queues;
@@ -596,18 +544,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
596 goto drop; 544 goto drop;
597 } 545 }
598 546
599 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + 547 slots = xennet_count_skb_slots(skb);
600 xennet_count_skb_frag_slots(skb);
601 if (unlikely(slots > MAX_SKB_FRAGS + 1)) { 548 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
602 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 549 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
603 slots, skb->len); 550 slots, skb->len);
604 if (skb_linearize(skb)) 551 if (skb_linearize(skb))
605 goto drop; 552 goto drop;
606 data = skb->data;
607 offset = offset_in_page(data);
608 len = skb_headlen(skb);
609 } 553 }
610 554
555 page = virt_to_page(skb->data);
556 offset = offset_in_page(skb->data);
557 len = skb_headlen(skb);
558
611 spin_lock_irqsave(&queue->tx_lock, flags); 559 spin_lock_irqsave(&queue->tx_lock, flags);
612 560
613 if (unlikely(!netif_carrier_ok(dev) || 561 if (unlikely(!netif_carrier_ok(dev) ||
@@ -617,25 +565,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
617 goto drop; 565 goto drop;
618 } 566 }
619 567
620 i = queue->tx.req_prod_pvt; 568 /* First request for the linear area. */
621 569 first_tx = tx = xennet_make_one_txreq(queue, skb,
622 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 570 page, offset, len);
623 queue->tx_skbs[id].skb = skb; 571 page++;
624 572 offset = 0;
625 tx = RING_GET_REQUEST(&queue->tx, i); 573 len -= tx->size;
626 574
627 tx->id = id;
628 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
629 BUG_ON((signed short)ref < 0);
630 mfn = virt_to_mfn(data);
631 gnttab_grant_foreign_access_ref(
632 ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
633 queue->grant_tx_page[id] = virt_to_page(data);
634 tx->gref = queue->grant_tx_ref[id] = ref;
635 tx->offset = offset;
636 tx->size = len;
637
638 tx->flags = 0;
639 if (skb->ip_summed == CHECKSUM_PARTIAL) 575 if (skb->ip_summed == CHECKSUM_PARTIAL)
640 /* local packet? */ 576 /* local packet? */
641 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 577 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
@@ -643,11 +579,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
643 /* remote but checksummed. */ 579 /* remote but checksummed. */
644 tx->flags |= XEN_NETTXF_data_validated; 580 tx->flags |= XEN_NETTXF_data_validated;
645 581
582 /* Optional extra info after the first request. */
646 if (skb_shinfo(skb)->gso_size) { 583 if (skb_shinfo(skb)->gso_size) {
647 struct xen_netif_extra_info *gso; 584 struct xen_netif_extra_info *gso;
648 585
649 gso = (struct xen_netif_extra_info *) 586 gso = (struct xen_netif_extra_info *)
650 RING_GET_REQUEST(&queue->tx, ++i); 587 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
651 588
652 tx->flags |= XEN_NETTXF_extra_info; 589 tx->flags |= XEN_NETTXF_extra_info;
653 590
@@ -662,10 +599,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
662 gso->flags = 0; 599 gso->flags = 0;
663 } 600 }
664 601
665 queue->tx.req_prod_pvt = i + 1; 602 /* Requests for the rest of the linear area. */
603 tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
604
605 /* Requests for all the frags. */
606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
607 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
608 tx = xennet_make_txreqs(queue, tx, skb,
609 skb_frag_page(frag), frag->page_offset,
610 skb_frag_size(frag));
611 }
666 612
667 xennet_make_frags(skb, queue, tx); 613 /* First request has the packet length. */
668 tx->size = skb->len; 614 first_tx->size = skb->len;
669 615
670 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 616 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
671 if (notify) 617 if (notify)
@@ -1367,20 +1313,15 @@ static int netfront_probe(struct xenbus_device *dev,
1367 1313
1368 info = netdev_priv(netdev); 1314 info = netdev_priv(netdev);
1369 dev_set_drvdata(&dev->dev, info); 1315 dev_set_drvdata(&dev->dev, info);
1370 1316#ifdef CONFIG_SYSFS
1317 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1318#endif
1371 err = register_netdev(info->netdev); 1319 err = register_netdev(info->netdev);
1372 if (err) { 1320 if (err) {
1373 pr_warn("%s: register_netdev err=%d\n", __func__, err); 1321 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1374 goto fail; 1322 goto fail;
1375 } 1323 }
1376 1324
1377 err = xennet_sysfs_addif(info->netdev);
1378 if (err) {
1379 unregister_netdev(info->netdev);
1380 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1381 goto fail;
1382 }
1383
1384 return 0; 1325 return 0;
1385 1326
1386 fail: 1327 fail:
@@ -2144,39 +2085,20 @@ static ssize_t store_rxbuf(struct device *dev,
2144 return len; 2085 return len;
2145} 2086}
2146 2087
2147static struct device_attribute xennet_attrs[] = { 2088static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2148 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), 2089static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2149 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), 2090static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2150 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
2151};
2152
2153static int xennet_sysfs_addif(struct net_device *netdev)
2154{
2155 int i;
2156 int err;
2157
2158 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2159 err = device_create_file(&netdev->dev,
2160 &xennet_attrs[i]);
2161 if (err)
2162 goto fail;
2163 }
2164 return 0;
2165
2166 fail:
2167 while (--i >= 0)
2168 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2169 return err;
2170}
2171
2172static void xennet_sysfs_delif(struct net_device *netdev)
2173{
2174 int i;
2175 2091
2176 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) 2092static struct attribute *xennet_dev_attrs[] = {
2177 device_remove_file(&netdev->dev, &xennet_attrs[i]); 2093 &dev_attr_rxbuf_min.attr,
2178} 2094 &dev_attr_rxbuf_max.attr,
2095 &dev_attr_rxbuf_cur.attr,
2096 NULL
2097};
2179 2098
2099static const struct attribute_group xennet_dev_group = {
2100 .attrs = xennet_dev_attrs
2101};
2180#endif /* CONFIG_SYSFS */ 2102#endif /* CONFIG_SYSFS */
2181 2103
2182static int xennet_remove(struct xenbus_device *dev) 2104static int xennet_remove(struct xenbus_device *dev)
@@ -2190,8 +2112,6 @@ static int xennet_remove(struct xenbus_device *dev)
2190 2112
2191 xennet_disconnect_backend(info); 2113 xennet_disconnect_backend(info);
2192 2114
2193 xennet_sysfs_delif(info->netdev);
2194
2195 unregister_netdev(info->netdev); 2115 unregister_netdev(info->netdev);
2196 2116
2197 for (i = 0; i < num_queues; ++i) { 2117 for (i = 0; i < num_queues; ++i) {