aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 16:56:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 16:56:56 -0500
commitbdccc4edeb03ad68c55053b0260bdaaac547bbd9 (patch)
treec9fdce7c6bb13632f5a64925a1fb3fa306b7cfe1 /drivers/net/xen-netback/netback.c
parent98368ab436538103a557fc1f15f54afd8aab6712 (diff)
parent72978b2fe2f2cdf9f319c6c6dcdbe92b38de2be2 (diff)
Merge tag 'stable/for-linus-3.20-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen features and fixes from David Vrabel: - Reworked handling for foreign (grant mapped) pages to simplify the code, enable a number of additional use cases and fix a number of long-standing bugs. - Prefer the TSC over the Xen PV clock when dom0 (and the TSC is stable). - Assorted other cleanup and minor bug fixes. * tag 'stable/for-linus-3.20-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (25 commits) xen/manage: Fix USB interaction issues when resuming xenbus: Add proper handling of XS_ERROR from Xenbus for transactions. xen/gntdev: provide find_special_page VMA operation xen/gntdev: mark userspace PTEs as special on x86 PV guests xen-blkback: safely unmap grants in case they are still in use xen/gntdev: safely unmap grants in case they are still in use xen/gntdev: convert priv->lock to a mutex xen/grant-table: add a mechanism to safely unmap pages that are in use xen-netback: use foreign page information from the pages themselves xen: mark grant mapped pages as foreign xen/grant-table: add helpers for allocating pages x86/xen: require ballooned pages for grant maps xen: remove scratch frames for ballooned pages and m2p override xen/grant-table: pre-populate kernel unmap ops for xen_gnttab_unmap_refs() mm: add 'foreign' alias for the 'pinned' page flag mm: provide a find_special_page vma operation x86/xen: cleanup arch/x86/xen/mmu.c x86/xen: add some __init annotations in arch/x86/xen/mmu.c x86/xen: add some __init and static annotations in arch/x86/xen/setup.c x86/xen: use correct types for addresses in arch/x86/xen/setup.c ...
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c106
1 files changed, 9 insertions, 97 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c8ce701a7efb..7dc2d64db3cb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, 314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
315 struct netrx_pending_operations *npo, 315 struct netrx_pending_operations *npo,
316 struct page *page, unsigned long size, 316 struct page *page, unsigned long size,
317 unsigned long offset, int *head, 317 unsigned long offset, int *head)
318 struct xenvif_queue *foreign_queue,
319 grant_ref_t foreign_gref)
320{ 318{
321 struct gnttab_copy *copy_gop; 319 struct gnttab_copy *copy_gop;
322 struct xenvif_rx_meta *meta; 320 struct xenvif_rx_meta *meta;
@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
333 offset &= ~PAGE_MASK; 331 offset &= ~PAGE_MASK;
334 332
335 while (size > 0) { 333 while (size > 0) {
334 struct xen_page_foreign *foreign;
335
336 BUG_ON(offset >= PAGE_SIZE); 336 BUG_ON(offset >= PAGE_SIZE);
337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
338 338
@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
361 copy_gop->flags = GNTCOPY_dest_gref; 361 copy_gop->flags = GNTCOPY_dest_gref;
362 copy_gop->len = bytes; 362 copy_gop->len = bytes;
363 363
364 if (foreign_queue) { 364 foreign = xen_page_foreign(page);
365 copy_gop->source.domid = foreign_queue->vif->domid; 365 if (foreign) {
366 copy_gop->source.u.ref = foreign_gref; 366 copy_gop->source.domid = foreign->domid;
367 copy_gop->source.u.ref = foreign->gref;
367 copy_gop->flags |= GNTCOPY_source_gref; 368 copy_gop->flags |= GNTCOPY_source_gref;
368 } else { 369 } else {
369 copy_gop->source.domid = DOMID_SELF; 370 copy_gop->source.domid = DOMID_SELF;
@@ -406,35 +407,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
406} 407}
407 408
408/* 409/*
409 * Find the grant ref for a given frag in a chain of struct ubuf_info's
410 * skb: the skb itself
411 * i: the frag's number
412 * ubuf: a pointer to an element in the chain. It should not be NULL
413 *
414 * Returns a pointer to the element in the chain where the page were found. If
415 * not found, returns NULL.
416 * See the definition of callback_struct in common.h for more details about
417 * the chain.
418 */
419static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
420 const int i,
421 const struct ubuf_info *ubuf)
422{
423 struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
424
425 do {
426 u16 pending_idx = ubuf->desc;
427
428 if (skb_shinfo(skb)->frags[i].page.p ==
429 foreign_queue->mmap_pages[pending_idx])
430 break;
431 ubuf = (struct ubuf_info *) ubuf->ctx;
432 } while (ubuf);
433
434 return ubuf;
435}
436
437/*
438 * Prepare an SKB to be transmitted to the frontend. 410 * Prepare an SKB to be transmitted to the frontend.
439 * 411 *
440 * This function is responsible for allocating grant operations, meta 412 * This function is responsible for allocating grant operations, meta
@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
459 int head = 1; 431 int head = 1;
460 int old_meta_prod; 432 int old_meta_prod;
461 int gso_type; 433 int gso_type;
462 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
463 const struct ubuf_info *const head_ubuf = ubuf;
464 434
465 old_meta_prod = npo->meta_prod; 435 old_meta_prod = npo->meta_prod;
466 436
@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
507 len = skb_tail_pointer(skb) - data; 477 len = skb_tail_pointer(skb) - data;
508 478
509 xenvif_gop_frag_copy(queue, skb, npo, 479 xenvif_gop_frag_copy(queue, skb, npo,
510 virt_to_page(data), len, offset, &head, 480 virt_to_page(data), len, offset, &head);
511 NULL,
512 0);
513 data += len; 481 data += len;
514 } 482 }
515 483
516 for (i = 0; i < nr_frags; i++) { 484 for (i = 0; i < nr_frags; i++) {
517 /* This variable also signals whether foreign_gref has a real
518 * value or not.
519 */
520 struct xenvif_queue *foreign_queue = NULL;
521 grant_ref_t foreign_gref;
522
523 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
524 (ubuf->callback == &xenvif_zerocopy_callback)) {
525 const struct ubuf_info *const startpoint = ubuf;
526
527 /* Ideally ubuf points to the chain element which
528 * belongs to this frag. Or if frags were removed from
529 * the beginning, then shortly before it.
530 */
531 ubuf = xenvif_find_gref(skb, i, ubuf);
532
533 /* Try again from the beginning of the list, if we
534 * haven't tried from there. This only makes sense in
535 * the unlikely event of reordering the original frags.
536 * For injected local pages it's an unnecessary second
537 * run.
538 */
539 if (unlikely(!ubuf) && startpoint != head_ubuf)
540 ubuf = xenvif_find_gref(skb, i, head_ubuf);
541
542 if (likely(ubuf)) {
543 u16 pending_idx = ubuf->desc;
544
545 foreign_queue = ubuf_to_queue(ubuf);
546 foreign_gref =
547 foreign_queue->pending_tx_info[pending_idx].req.gref;
548 /* Just a safety measure. If this was the last
549 * element on the list, the for loop will
550 * iterate again if a local page were added to
551 * the end. Using head_ubuf here prevents the
552 * second search on the chain. Or the original
553 * frags changed order, but that's less likely.
554 * In any way, ubuf shouldn't be NULL.
555 */
556 ubuf = ubuf->ctx ?
557 (struct ubuf_info *) ubuf->ctx :
558 head_ubuf;
559 } else
560 /* This frag was a local page, added to the
561 * array after the skb left netback.
562 */
563 ubuf = head_ubuf;
564 }
565 xenvif_gop_frag_copy(queue, skb, npo, 485 xenvif_gop_frag_copy(queue, skb, npo,
566 skb_frag_page(&skb_shinfo(skb)->frags[i]), 486 skb_frag_page(&skb_shinfo(skb)->frags[i]),
567 skb_frag_size(&skb_shinfo(skb)->frags[i]), 487 skb_frag_size(&skb_shinfo(skb)->frags[i]),
568 skb_shinfo(skb)->frags[i].page_offset, 488 skb_shinfo(skb)->frags[i].page_offset,
569 &head, 489 &head);
570 foreign_queue,
571 foreign_queue ? foreign_gref : UINT_MAX);
572 } 490 }
573 491
574 return npo->meta_prod - old_meta_prod; 492 return npo->meta_prod - old_meta_prod;
@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1241 /* Take an extra reference to offset network stack's put_page */ 1159 /* Take an extra reference to offset network stack's put_page */
1242 get_page(queue->mmap_pages[pending_idx]); 1160 get_page(queue->mmap_pages[pending_idx]);
1243 } 1161 }
1244 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1245 * overlaps with "index", and "mapping" is not set. I think mapping
1246 * should be set. If delivered to local stack, it would drop this
1247 * skb in sk_filter unless the socket has the right to use it.
1248 */
1249 skb->pfmemalloc = false;
1250} 1162}
1251 1163
1252static int xenvif_get_extras(struct xenvif_queue *queue, 1164static int xenvif_get_extras(struct xenvif_queue *queue,