aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c40
1 files changed, 35 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4ebfcf3d8a3b..f2d6b78d901d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -335,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
335 335
336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
337 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 337 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
338 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
338 unsigned long bytes; 339 unsigned long bytes;
340
341 offset &= ~PAGE_MASK;
342
339 while (size > 0) { 343 while (size > 0) {
344 BUG_ON(offset >= PAGE_SIZE);
340 BUG_ON(copy_off > MAX_BUFFER_OFFSET); 345 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
341 346
342 if (start_new_rx_buffer(copy_off, size, 0)) { 347 bytes = PAGE_SIZE - offset;
348
349 if (bytes > size)
350 bytes = size;
351
352 if (start_new_rx_buffer(copy_off, bytes, 0)) {
343 count++; 353 count++;
344 copy_off = 0; 354 copy_off = 0;
345 } 355 }
346 356
347 bytes = size;
348 if (copy_off + bytes > MAX_BUFFER_OFFSET) 357 if (copy_off + bytes > MAX_BUFFER_OFFSET)
349 bytes = MAX_BUFFER_OFFSET - copy_off; 358 bytes = MAX_BUFFER_OFFSET - copy_off;
350 359
351 copy_off += bytes; 360 copy_off += bytes;
361
362 offset += bytes;
352 size -= bytes; 363 size -= bytes;
364
365 if (offset == PAGE_SIZE)
366 offset = 0;
353 } 367 }
354 } 368 }
355 return count; 369 return count;
@@ -403,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
403 unsigned long bytes; 417 unsigned long bytes;
404 418
405 /* Data must not cross a page boundary. */ 419 /* Data must not cross a page boundary. */
406 BUG_ON(size + offset > PAGE_SIZE); 420 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
407 421
408 meta = npo->meta + npo->meta_prod - 1; 422 meta = npo->meta + npo->meta_prod - 1;
409 423
424 /* Skip unused frames from start of page */
425 page += offset >> PAGE_SHIFT;
426 offset &= ~PAGE_MASK;
427
410 while (size > 0) { 428 while (size > 0) {
429 BUG_ON(offset >= PAGE_SIZE);
411 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 430 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
412 431
413 if (start_new_rx_buffer(npo->copy_off, size, *head)) { 432 bytes = PAGE_SIZE - offset;
433
434 if (bytes > size)
435 bytes = size;
436
437 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
414 /* 438 /*
415 * Netfront requires there to be some data in the head 439 * Netfront requires there to be some data in the head
416 * buffer. 440 * buffer.
@@ -420,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
420 meta = get_next_rx_buffer(vif, npo); 444 meta = get_next_rx_buffer(vif, npo);
421 } 445 }
422 446
423 bytes = size;
424 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 447 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
425 bytes = MAX_BUFFER_OFFSET - npo->copy_off; 448 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
426 449
@@ -453,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
453 offset += bytes; 476 offset += bytes;
454 size -= bytes; 477 size -= bytes;
455 478
479 /* Next frame */
480 if (offset == PAGE_SIZE && size) {
481 BUG_ON(!PageCompound(page));
482 page++;
483 offset = 0;
484 }
485
456 /* Leave a gap for the GSO descriptor. */ 486 /* Leave a gap for the GSO descriptor. */
457 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 487 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
458 vif->rx.req_cons++; 488 vif->rx.req_cons++;