aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorWei Liu <wei.liu2@citrix.com>2013-10-08 05:54:21 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-08 15:10:48 -0400
commit33bc801dddc14f0f96b79e453ec51cecfe5ed612 (patch)
treed91064e894363814be9aec6355e16e85e4835eaf /drivers/net/xen-netback/netback.c
parent0ca45208b0400812965d810b1917839dc8844bbe (diff)
Revert "xen-netback: improve ring effeciency for guest RX"
This reverts commit 4f0581d25827d5e864bcf07b05d73d0d12a20a5c. The named changeset is causing problem. Let's aim to make this part less fragile before trying to improve things. Signed-off-by: Wei Liu <wei.liu2@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Annie Li <annie.li@oracle.com> Cc: Matt Wilson <msw@amazon.com> Cc: Xi Xiong <xixiong@amazon.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Paul Durrant <paul.durrant@citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c144
1 files changed, 83 insertions, 61 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d0b0feb035fb..f3e591c611de 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -47,14 +47,6 @@
47#include <asm/xen/hypercall.h> 47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h> 48#include <asm/xen/page.h>
49 49
50/* SKB control block overlay is used to store useful information when
51 * doing guest RX.
52 */
53struct skb_cb_overlay {
54 int meta_slots_used;
55 int peek_slots_count;
56};
57
58/* Provide an option to disable split event channels at load time as 50/* Provide an option to disable split event channels at load time as
59 * event channels are limited resource. Split event channels are 51 * event channels are limited resource. Split event channels are
60 * enabled by default. 52 * enabled by default.
@@ -220,6 +212,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
220 return false; 212 return false;
221} 213}
222 214
215struct xenvif_count_slot_state {
216 unsigned long copy_off;
217 bool head;
218};
219
220unsigned int xenvif_count_frag_slots(struct xenvif *vif,
221 unsigned long offset, unsigned long size,
222 struct xenvif_count_slot_state *state)
223{
224 unsigned count = 0;
225
226 offset &= ~PAGE_MASK;
227
228 while (size > 0) {
229 unsigned long bytes;
230
231 bytes = PAGE_SIZE - offset;
232
233 if (bytes > size)
234 bytes = size;
235
236 if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
237 count++;
238 state->copy_off = 0;
239 }
240
241 if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
242 bytes = MAX_BUFFER_OFFSET - state->copy_off;
243
244 state->copy_off += bytes;
245
246 offset += bytes;
247 size -= bytes;
248
249 if (offset == PAGE_SIZE)
250 offset = 0;
251
252 state->head = false;
253 }
254
255 return count;
256}
257
223/* 258/*
224 * Figure out how many ring slots we're going to need to send @skb to 259 * Figure out how many ring slots we're going to need to send @skb to
225 * the guest. This function is essentially a dry run of 260 * the guest. This function is essentially a dry run of
@@ -227,53 +262,40 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
227 */ 262 */
228unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) 263unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
229{ 264{
265 struct xenvif_count_slot_state state;
230 unsigned int count; 266 unsigned int count;
231 int i, copy_off; 267 unsigned char *data;
232 struct skb_cb_overlay *sco; 268 unsigned i;
233 269
234 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); 270 state.head = true;
271 state.copy_off = 0;
235 272
236 copy_off = skb_headlen(skb) % PAGE_SIZE; 273 /* Slot for the first (partial) page of data. */
274 count = 1;
237 275
276 /* Need a slot for the GSO prefix for GSO extra data? */
238 if (skb_shinfo(skb)->gso_size) 277 if (skb_shinfo(skb)->gso_size)
239 count++; 278 count++;
240 279
241 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 280 data = skb->data;
242 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 281 while (data < skb_tail_pointer(skb)) {
243 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; 282 unsigned long offset = offset_in_page(data);
244 unsigned long bytes; 283 unsigned long size = PAGE_SIZE - offset;
245
246 offset &= ~PAGE_MASK;
247
248 while (size > 0) {
249 BUG_ON(offset >= PAGE_SIZE);
250 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
251
252 bytes = PAGE_SIZE - offset;
253
254 if (bytes > size)
255 bytes = size;
256 284
257 if (start_new_rx_buffer(copy_off, bytes, 0)) { 285 if (data + size > skb_tail_pointer(skb))
258 count++; 286 size = skb_tail_pointer(skb) - data;
259 copy_off = 0;
260 }
261 287
262 if (copy_off + bytes > MAX_BUFFER_OFFSET) 288 count += xenvif_count_frag_slots(vif, offset, size, &state);
263 bytes = MAX_BUFFER_OFFSET - copy_off;
264 289
265 copy_off += bytes; 290 data += size;
291 }
266 292
267 offset += bytes; 293 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
268 size -= bytes; 294 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
295 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
269 296
270 if (offset == PAGE_SIZE) 297 count += xenvif_count_frag_slots(vif, offset, size, &state);
271 offset = 0;
272 }
273 } 298 }
274
275 sco = (struct skb_cb_overlay *)skb->cb;
276 sco->peek_slots_count = count;
277 return count; 299 return count;
278} 300}
279 301
@@ -305,11 +327,14 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
305 return meta; 327 return meta;
306} 328}
307 329
308/* Set up the grant operations for this fragment. */ 330/*
331 * Set up the grant operations for this fragment. If it's a flipping
332 * interface, we also set up the unmap request from here.
333 */
309static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, 334static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
310 struct netrx_pending_operations *npo, 335 struct netrx_pending_operations *npo,
311 struct page *page, unsigned long size, 336 struct page *page, unsigned long size,
312 unsigned long offset, int head, int *first) 337 unsigned long offset, int *head)
313{ 338{
314 struct gnttab_copy *copy_gop; 339 struct gnttab_copy *copy_gop;
315 struct xenvif_rx_meta *meta; 340 struct xenvif_rx_meta *meta;
@@ -333,12 +358,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
333 if (bytes > size) 358 if (bytes > size)
334 bytes = size; 359 bytes = size;
335 360
336 if (start_new_rx_buffer(npo->copy_off, bytes, head)) { 361 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
337 /* 362 /*
338 * Netfront requires there to be some data in the head 363 * Netfront requires there to be some data in the head
339 * buffer. 364 * buffer.
340 */ 365 */
341 BUG_ON(*first); 366 BUG_ON(*head);
342 367
343 meta = get_next_rx_buffer(vif, npo); 368 meta = get_next_rx_buffer(vif, npo);
344 } 369 }
@@ -372,10 +397,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
372 } 397 }
373 398
374 /* Leave a gap for the GSO descriptor. */ 399 /* Leave a gap for the GSO descriptor. */
375 if (*first && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 400 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
376 vif->rx.req_cons++; 401 vif->rx.req_cons++;
377 402
378 *first = 0; /* There must be something in this buffer now. */ 403 *head = 0; /* There must be something in this buffer now. */
379 404
380 } 405 }
381} 406}
@@ -401,7 +426,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
401 struct xen_netif_rx_request *req; 426 struct xen_netif_rx_request *req;
402 struct xenvif_rx_meta *meta; 427 struct xenvif_rx_meta *meta;
403 unsigned char *data; 428 unsigned char *data;
404 int first = 1; 429 int head = 1;
405 int old_meta_prod; 430 int old_meta_prod;
406 431
407 old_meta_prod = npo->meta_prod; 432 old_meta_prod = npo->meta_prod;
@@ -437,7 +462,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
437 len = skb_tail_pointer(skb) - data; 462 len = skb_tail_pointer(skb) - data;
438 463
439 xenvif_gop_frag_copy(vif, skb, npo, 464 xenvif_gop_frag_copy(vif, skb, npo,
440 virt_to_page(data), len, offset, 1, &first); 465 virt_to_page(data), len, offset, &head);
441 data += len; 466 data += len;
442 } 467 }
443 468
@@ -446,7 +471,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
446 skb_frag_page(&skb_shinfo(skb)->frags[i]), 471 skb_frag_page(&skb_shinfo(skb)->frags[i]),
447 skb_frag_size(&skb_shinfo(skb)->frags[i]), 472 skb_frag_size(&skb_shinfo(skb)->frags[i]),
448 skb_shinfo(skb)->frags[i].page_offset, 473 skb_shinfo(skb)->frags[i].page_offset,
449 0, &first); 474 &head);
450 } 475 }
451 476
452 return npo->meta_prod - old_meta_prod; 477 return npo->meta_prod - old_meta_prod;
@@ -504,6 +529,10 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
504 } 529 }
505} 530}
506 531
532struct skb_cb_overlay {
533 int meta_slots_used;
534};
535
507static void xenvif_kick_thread(struct xenvif *vif) 536static void xenvif_kick_thread(struct xenvif *vif)
508{ 537{
509 wake_up(&vif->wq); 538 wake_up(&vif->wq);
@@ -534,26 +563,19 @@ void xenvif_rx_action(struct xenvif *vif)
534 count = 0; 563 count = 0;
535 564
536 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 565 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
537 RING_IDX old_rx_req_cons;
538
539 vif = netdev_priv(skb->dev); 566 vif = netdev_priv(skb->dev);
540 nr_frags = skb_shinfo(skb)->nr_frags; 567 nr_frags = skb_shinfo(skb)->nr_frags;
541 568
542 old_rx_req_cons = vif->rx.req_cons;
543 sco = (struct skb_cb_overlay *)skb->cb; 569 sco = (struct skb_cb_overlay *)skb->cb;
544 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 570 sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
545 571
546 count += vif->rx.req_cons - old_rx_req_cons; 572 count += nr_frags + 1;
547 573
548 __skb_queue_tail(&rxq, skb); 574 __skb_queue_tail(&rxq, skb);
549 575
550 skb = skb_peek(&vif->rx_queue);
551 if (skb == NULL)
552 break;
553 sco = (struct skb_cb_overlay *)skb->cb;
554
555 /* Filled the batch queue? */ 576 /* Filled the batch queue? */
556 if (count + sco->peek_slots_count >= XEN_NETIF_RX_RING_SIZE) 577 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
578 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
557 break; 579 break;
558 } 580 }
559 581