diff options
author | David Vrabel <david.vrabel@citrix.com> | 2015-01-13 12:16:44 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-14 00:22:01 -0500 |
commit | a55e8bb8fb89c90b33791861e59859a39e57ba30 (patch) | |
tree | 47cb71e2663f17bdb8abea097c3c030d03c63d81 /drivers/net/xen-netfront.c | |
parent | e84448d52190413400663736067f826f28a04ad6 (diff) |
xen-netfront: refactor making Tx requests
Eliminate all the duplicate code for making Tx requests by
consolidating them into a single xennet_make_one_txreq() function.
xennet_make_one_txreq() and xennet_make_txreqs() work with pages and
offsets so it will be easier to make netfront handle highmem frags in
the future.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 181 |
1 files changed, 67 insertions, 114 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 803ff5356faa..01a4350eb313 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -421,99 +421,56 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) | |||
421 | xennet_maybe_wake_tx(queue); | 421 | xennet_maybe_wake_tx(queue); |
422 | } | 422 | } |
423 | 423 | ||
424 | static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, | 424 | static struct xen_netif_tx_request *xennet_make_one_txreq( |
425 | struct xen_netif_tx_request *tx) | 425 | struct netfront_queue *queue, struct sk_buff *skb, |
426 | { | 426 | struct page *page, unsigned int offset, unsigned int len) |
427 | char *data = skb->data; | 427 | { |
428 | unsigned long mfn; | ||
429 | RING_IDX prod = queue->tx.req_prod_pvt; | ||
430 | int frags = skb_shinfo(skb)->nr_frags; | ||
431 | unsigned int offset = offset_in_page(data); | ||
432 | unsigned int len = skb_headlen(skb); | ||
433 | unsigned int id; | 428 | unsigned int id; |
429 | struct xen_netif_tx_request *tx; | ||
434 | grant_ref_t ref; | 430 | grant_ref_t ref; |
435 | int i; | ||
436 | 431 | ||
437 | /* While the header overlaps a page boundary (including being | 432 | len = min_t(unsigned int, PAGE_SIZE - offset, len); |
438 | larger than a page), split it it into page-sized chunks. */ | ||
439 | while (len > PAGE_SIZE - offset) { | ||
440 | tx->size = PAGE_SIZE - offset; | ||
441 | tx->flags |= XEN_NETTXF_more_data; | ||
442 | len -= tx->size; | ||
443 | data += tx->size; | ||
444 | offset = 0; | ||
445 | 433 | ||
446 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); | 434 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); |
447 | queue->tx_skbs[id].skb = skb_get(skb); | 435 | tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); |
448 | tx = RING_GET_REQUEST(&queue->tx, prod++); | 436 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); |
449 | tx->id = id; | 437 | BUG_ON((signed short)ref < 0); |
450 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); | ||
451 | BUG_ON((signed short)ref < 0); | ||
452 | 438 | ||
453 | mfn = virt_to_mfn(data); | 439 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, |
454 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, | 440 | page_to_mfn(page), GNTMAP_readonly); |
455 | mfn, GNTMAP_readonly); | ||
456 | 441 | ||
457 | queue->grant_tx_page[id] = virt_to_page(data); | 442 | queue->tx_skbs[id].skb = skb; |
458 | tx->gref = queue->grant_tx_ref[id] = ref; | 443 | queue->grant_tx_page[id] = page; |
459 | tx->offset = offset; | 444 | queue->grant_tx_ref[id] = ref; |
460 | tx->size = len; | ||
461 | tx->flags = 0; | ||
462 | } | ||
463 | 445 | ||
464 | /* Grant backend access to each skb fragment page. */ | 446 | tx->id = id; |
465 | for (i = 0; i < frags; i++) { | 447 | tx->gref = ref; |
466 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | 448 | tx->offset = offset; |
467 | struct page *page = skb_frag_page(frag); | 449 | tx->size = len; |
450 | tx->flags = 0; | ||
468 | 451 | ||
469 | len = skb_frag_size(frag); | 452 | return tx; |
470 | offset = frag->page_offset; | 453 | } |
471 | 454 | ||
472 | /* Skip unused frames from start of page */ | 455 | static struct xen_netif_tx_request *xennet_make_txreqs( |
473 | page += offset >> PAGE_SHIFT; | 456 | struct netfront_queue *queue, struct xen_netif_tx_request *tx, |
474 | offset &= ~PAGE_MASK; | 457 | struct sk_buff *skb, struct page *page, |
458 | unsigned int offset, unsigned int len) | ||
459 | { | ||
460 | /* Skip unused frames from start of page */ | ||
461 | page += offset >> PAGE_SHIFT; | ||
462 | offset &= ~PAGE_MASK; | ||
475 | 463 | ||
476 | while (len > 0) { | 464 | while (len) { |
477 | unsigned long bytes; | 465 | tx->flags |= XEN_NETTXF_more_data; |
478 | 466 | tx = xennet_make_one_txreq(queue, skb_get(skb), | |
479 | bytes = PAGE_SIZE - offset; | 467 | page, offset, len); |
480 | if (bytes > len) | 468 | page++; |
481 | bytes = len; | 469 | offset = 0; |
482 | 470 | len -= tx->size; | |
483 | tx->flags |= XEN_NETTXF_more_data; | ||
484 | |||
485 | id = get_id_from_freelist(&queue->tx_skb_freelist, | ||
486 | queue->tx_skbs); | ||
487 | queue->tx_skbs[id].skb = skb_get(skb); | ||
488 | tx = RING_GET_REQUEST(&queue->tx, prod++); | ||
489 | tx->id = id; | ||
490 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); | ||
491 | BUG_ON((signed short)ref < 0); | ||
492 | |||
493 | mfn = pfn_to_mfn(page_to_pfn(page)); | ||
494 | gnttab_grant_foreign_access_ref(ref, | ||
495 | queue->info->xbdev->otherend_id, | ||
496 | mfn, GNTMAP_readonly); | ||
497 | |||
498 | queue->grant_tx_page[id] = page; | ||
499 | tx->gref = queue->grant_tx_ref[id] = ref; | ||
500 | tx->offset = offset; | ||
501 | tx->size = bytes; | ||
502 | tx->flags = 0; | ||
503 | |||
504 | offset += bytes; | ||
505 | len -= bytes; | ||
506 | |||
507 | /* Next frame */ | ||
508 | if (offset == PAGE_SIZE && len) { | ||
509 | BUG_ON(!PageCompound(page)); | ||
510 | page++; | ||
511 | offset = 0; | ||
512 | } | ||
513 | } | ||
514 | } | 471 | } |
515 | 472 | ||
516 | queue->tx.req_prod_pvt = prod; | 473 | return tx; |
517 | } | 474 | } |
518 | 475 | ||
519 | /* | 476 | /* |
@@ -561,18 +518,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
561 | 518 | ||
562 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 519 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
563 | { | 520 | { |
564 | unsigned short id; | ||
565 | struct netfront_info *np = netdev_priv(dev); | 521 | struct netfront_info *np = netdev_priv(dev); |
566 | struct netfront_stats *stats = this_cpu_ptr(np->stats); | 522 | struct netfront_stats *stats = this_cpu_ptr(np->stats); |
567 | struct xen_netif_tx_request *tx; | 523 | struct xen_netif_tx_request *tx, *first_tx; |
568 | char *data = skb->data; | 524 | unsigned int i; |
569 | RING_IDX i; | ||
570 | grant_ref_t ref; | ||
571 | unsigned long mfn; | ||
572 | int notify; | 525 | int notify; |
573 | int slots; | 526 | int slots; |
574 | unsigned int offset = offset_in_page(data); | 527 | struct page *page; |
575 | unsigned int len = skb_headlen(skb); | 528 | unsigned int offset; |
529 | unsigned int len; | ||
576 | unsigned long flags; | 530 | unsigned long flags; |
577 | struct netfront_queue *queue = NULL; | 531 | struct netfront_queue *queue = NULL; |
578 | unsigned int num_queues = dev->real_num_tx_queues; | 532 | unsigned int num_queues = dev->real_num_tx_queues; |
@@ -601,11 +555,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
601 | slots, skb->len); | 555 | slots, skb->len); |
602 | if (skb_linearize(skb)) | 556 | if (skb_linearize(skb)) |
603 | goto drop; | 557 | goto drop; |
604 | data = skb->data; | ||
605 | offset = offset_in_page(data); | ||
606 | len = skb_headlen(skb); | ||
607 | } | 558 | } |
608 | 559 | ||
560 | page = virt_to_page(skb->data); | ||
561 | offset = offset_in_page(skb->data); | ||
562 | len = skb_headlen(skb); | ||
563 | |||
609 | spin_lock_irqsave(&queue->tx_lock, flags); | 564 | spin_lock_irqsave(&queue->tx_lock, flags); |
610 | 565 | ||
611 | if (unlikely(!netif_carrier_ok(dev) || | 566 | if (unlikely(!netif_carrier_ok(dev) || |
@@ -615,25 +570,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
615 | goto drop; | 570 | goto drop; |
616 | } | 571 | } |
617 | 572 | ||
618 | i = queue->tx.req_prod_pvt; | 573 | /* First request for the linear area. */ |
619 | 574 | first_tx = tx = xennet_make_one_txreq(queue, skb, | |
620 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); | 575 | page, offset, len); |
621 | queue->tx_skbs[id].skb = skb; | 576 | page++; |
622 | 577 | offset = 0; | |
623 | tx = RING_GET_REQUEST(&queue->tx, i); | 578 | len -= tx->size; |
624 | 579 | ||
625 | tx->id = id; | ||
626 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); | ||
627 | BUG_ON((signed short)ref < 0); | ||
628 | mfn = virt_to_mfn(data); | ||
629 | gnttab_grant_foreign_access_ref( | ||
630 | ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly); | ||
631 | queue->grant_tx_page[id] = virt_to_page(data); | ||
632 | tx->gref = queue->grant_tx_ref[id] = ref; | ||
633 | tx->offset = offset; | ||
634 | tx->size = len; | ||
635 | |||
636 | tx->flags = 0; | ||
637 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 580 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
638 | /* local packet? */ | 581 | /* local packet? */ |
639 | tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; | 582 | tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; |
@@ -641,11 +584,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
641 | /* remote but checksummed. */ | 584 | /* remote but checksummed. */ |
642 | tx->flags |= XEN_NETTXF_data_validated; | 585 | tx->flags |= XEN_NETTXF_data_validated; |
643 | 586 | ||
587 | /* Optional extra info after the first request. */ | ||
644 | if (skb_shinfo(skb)->gso_size) { | 588 | if (skb_shinfo(skb)->gso_size) { |
645 | struct xen_netif_extra_info *gso; | 589 | struct xen_netif_extra_info *gso; |
646 | 590 | ||
647 | gso = (struct xen_netif_extra_info *) | 591 | gso = (struct xen_netif_extra_info *) |
648 | RING_GET_REQUEST(&queue->tx, ++i); | 592 | RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); |
649 | 593 | ||
650 | tx->flags |= XEN_NETTXF_extra_info; | 594 | tx->flags |= XEN_NETTXF_extra_info; |
651 | 595 | ||
@@ -660,10 +604,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
660 | gso->flags = 0; | 604 | gso->flags = 0; |
661 | } | 605 | } |
662 | 606 | ||
663 | queue->tx.req_prod_pvt = i + 1; | 607 | /* Requests for the rest of the linear area. */ |
608 | tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); | ||
609 | |||
610 | /* Requests for all the frags. */ | ||
611 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
612 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
613 | tx = xennet_make_txreqs(queue, tx, skb, | ||
614 | skb_frag_page(frag), frag->page_offset, | ||
615 | skb_frag_size(frag)); | ||
616 | } | ||
664 | 617 | ||
665 | xennet_make_frags(skb, queue, tx); | 618 | /* First request has the packet length. */ |
666 | tx->size = skb->len; | 619 | first_tx->size = skb->len; |
667 | 620 | ||
668 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); | 621 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); |
669 | if (notify) | 622 | if (notify) |