aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
authorJennifer Herbert <jennifer.herbert@citrix.com>2015-01-05 09:45:10 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2015-01-28 09:03:13 -0500
commitc2677a6fc4dee765fff8f7ac3d61f657dc295650 (patch)
treeb32501958567bd3de4ef368f9e86ec73d26d6685 /drivers/net/xen-netback
parent8da7633f168b5428e2cfb7342408b2c44088f5df (diff)
xen-netback: use foreign page information from the pages themselves
Use the foreign page flag in netback to get the domid and grant ref needed for the grant copy. This signficiantly simplifies the netback code and makes netback work with foreign pages from other backends (e.g., blkback). This allows blkback to use iSCSI disks provided by domUs running on the same host. Signed-off-by: Jennifer Herbert <jennifer.herbert@citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/netback.c100
1 files changed, 9 insertions, 91 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64413189ad06..ae3ab3752ea8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, 314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
315 struct netrx_pending_operations *npo, 315 struct netrx_pending_operations *npo,
316 struct page *page, unsigned long size, 316 struct page *page, unsigned long size,
317 unsigned long offset, int *head, 317 unsigned long offset, int *head)
318 struct xenvif_queue *foreign_queue,
319 grant_ref_t foreign_gref)
320{ 318{
321 struct gnttab_copy *copy_gop; 319 struct gnttab_copy *copy_gop;
322 struct xenvif_rx_meta *meta; 320 struct xenvif_rx_meta *meta;
@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
333 offset &= ~PAGE_MASK; 331 offset &= ~PAGE_MASK;
334 332
335 while (size > 0) { 333 while (size > 0) {
334 struct xen_page_foreign *foreign;
335
336 BUG_ON(offset >= PAGE_SIZE); 336 BUG_ON(offset >= PAGE_SIZE);
337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
338 338
@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
361 copy_gop->flags = GNTCOPY_dest_gref; 361 copy_gop->flags = GNTCOPY_dest_gref;
362 copy_gop->len = bytes; 362 copy_gop->len = bytes;
363 363
364 if (foreign_queue) { 364 foreign = xen_page_foreign(page);
365 copy_gop->source.domid = foreign_queue->vif->domid; 365 if (foreign) {
366 copy_gop->source.u.ref = foreign_gref; 366 copy_gop->source.domid = foreign->domid;
367 copy_gop->source.u.ref = foreign->gref;
367 copy_gop->flags |= GNTCOPY_source_gref; 368 copy_gop->flags |= GNTCOPY_source_gref;
368 } else { 369 } else {
369 copy_gop->source.domid = DOMID_SELF; 370 copy_gop->source.domid = DOMID_SELF;
@@ -406,35 +407,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
406} 407}
407 408
408/* 409/*
409 * Find the grant ref for a given frag in a chain of struct ubuf_info's
410 * skb: the skb itself
411 * i: the frag's number
412 * ubuf: a pointer to an element in the chain. It should not be NULL
413 *
414 * Returns a pointer to the element in the chain where the page were found. If
415 * not found, returns NULL.
416 * See the definition of callback_struct in common.h for more details about
417 * the chain.
418 */
419static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
420 const int i,
421 const struct ubuf_info *ubuf)
422{
423 struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
424
425 do {
426 u16 pending_idx = ubuf->desc;
427
428 if (skb_shinfo(skb)->frags[i].page.p ==
429 foreign_queue->mmap_pages[pending_idx])
430 break;
431 ubuf = (struct ubuf_info *) ubuf->ctx;
432 } while (ubuf);
433
434 return ubuf;
435}
436
437/*
438 * Prepare an SKB to be transmitted to the frontend. 410 * Prepare an SKB to be transmitted to the frontend.
439 * 411 *
440 * This function is responsible for allocating grant operations, meta 412 * This function is responsible for allocating grant operations, meta
@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
459 int head = 1; 431 int head = 1;
460 int old_meta_prod; 432 int old_meta_prod;
461 int gso_type; 433 int gso_type;
462 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
463 const struct ubuf_info *const head_ubuf = ubuf;
464 434
465 old_meta_prod = npo->meta_prod; 435 old_meta_prod = npo->meta_prod;
466 436
@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
507 len = skb_tail_pointer(skb) - data; 477 len = skb_tail_pointer(skb) - data;
508 478
509 xenvif_gop_frag_copy(queue, skb, npo, 479 xenvif_gop_frag_copy(queue, skb, npo,
510 virt_to_page(data), len, offset, &head, 480 virt_to_page(data), len, offset, &head);
511 NULL,
512 0);
513 data += len; 481 data += len;
514 } 482 }
515 483
516 for (i = 0; i < nr_frags; i++) { 484 for (i = 0; i < nr_frags; i++) {
517 /* This variable also signals whether foreign_gref has a real
518 * value or not.
519 */
520 struct xenvif_queue *foreign_queue = NULL;
521 grant_ref_t foreign_gref;
522
523 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
524 (ubuf->callback == &xenvif_zerocopy_callback)) {
525 const struct ubuf_info *const startpoint = ubuf;
526
527 /* Ideally ubuf points to the chain element which
528 * belongs to this frag. Or if frags were removed from
529 * the beginning, then shortly before it.
530 */
531 ubuf = xenvif_find_gref(skb, i, ubuf);
532
533 /* Try again from the beginning of the list, if we
534 * haven't tried from there. This only makes sense in
535 * the unlikely event of reordering the original frags.
536 * For injected local pages it's an unnecessary second
537 * run.
538 */
539 if (unlikely(!ubuf) && startpoint != head_ubuf)
540 ubuf = xenvif_find_gref(skb, i, head_ubuf);
541
542 if (likely(ubuf)) {
543 u16 pending_idx = ubuf->desc;
544
545 foreign_queue = ubuf_to_queue(ubuf);
546 foreign_gref =
547 foreign_queue->pending_tx_info[pending_idx].req.gref;
548 /* Just a safety measure. If this was the last
549 * element on the list, the for loop will
550 * iterate again if a local page were added to
551 * the end. Using head_ubuf here prevents the
552 * second search on the chain. Or the original
553 * frags changed order, but that's less likely.
554 * In any way, ubuf shouldn't be NULL.
555 */
556 ubuf = ubuf->ctx ?
557 (struct ubuf_info *) ubuf->ctx :
558 head_ubuf;
559 } else
560 /* This frag was a local page, added to the
561 * array after the skb left netback.
562 */
563 ubuf = head_ubuf;
564 }
565 xenvif_gop_frag_copy(queue, skb, npo, 485 xenvif_gop_frag_copy(queue, skb, npo,
566 skb_frag_page(&skb_shinfo(skb)->frags[i]), 486 skb_frag_page(&skb_shinfo(skb)->frags[i]),
567 skb_frag_size(&skb_shinfo(skb)->frags[i]), 487 skb_frag_size(&skb_shinfo(skb)->frags[i]),
568 skb_shinfo(skb)->frags[i].page_offset, 488 skb_shinfo(skb)->frags[i].page_offset,
569 &head, 489 &head);
570 foreign_queue,
571 foreign_queue ? foreign_gref : UINT_MAX);
572 } 490 }
573 491
574 return npo->meta_prod - old_meta_prod; 492 return npo->meta_prod - old_meta_prod;