aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c107
3 files changed, 4 insertions, 106 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5f1fda44882b..589fa256256b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -251,7 +251,6 @@ struct xenvif {
251struct xenvif_rx_cb { 251struct xenvif_rx_cb {
252 unsigned long expires; 252 unsigned long expires;
253 int meta_slots_used; 253 int meta_slots_used;
254 bool full_coalesce;
255}; 254};
256 255
257#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) 256#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 12f9e2708afb..f38227afe099 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -80,7 +80,7 @@ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
80 return IRQ_HANDLED; 80 return IRQ_HANDLED;
81} 81}
82 82
83int xenvif_poll(struct napi_struct *napi, int budget) 83static int xenvif_poll(struct napi_struct *napi, int budget)
84{ 84{
85 struct xenvif_queue *queue = 85 struct xenvif_queue *queue =
86 container_of(napi, struct xenvif_queue, napi); 86 container_of(napi, struct xenvif_queue, napi);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7dc2d64db3cb..f7a31d2cb3f1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -233,51 +233,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
233 } 233 }
234} 234}
235 235
236/*
237 * Returns true if we should start a new receive buffer instead of
238 * adding 'size' bytes to a buffer which currently contains 'offset'
239 * bytes.
240 */
241static bool start_new_rx_buffer(int offset, unsigned long size, int head,
242 bool full_coalesce)
243{
244 /* simple case: we have completely filled the current buffer. */
245 if (offset == MAX_BUFFER_OFFSET)
246 return true;
247
248 /*
249 * complex case: start a fresh buffer if the current frag
250 * would overflow the current buffer but only if:
251 * (i) this frag would fit completely in the next buffer
252 * and (ii) there is already some data in the current buffer
253 * and (iii) this is not the head buffer.
254 * and (iv) there is no need to fully utilize the buffers
255 *
256 * Where:
257 * - (i) stops us splitting a frag into two copies
258 * unless the frag is too large for a single buffer.
259 * - (ii) stops us from leaving a buffer pointlessly empty.
260 * - (iii) stops us leaving the first buffer
261 * empty. Strictly speaking this is already covered
262 * by (ii) but is explicitly checked because
263 * netfront relies on the first buffer being
264 * non-empty and can crash otherwise.
265 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
266 * slot
267 *
268 * This means we will effectively linearise small
269 * frags but do not needlessly split large buffers
270 * into multiple copies tend to give large frags their
271 * own buffers as before.
272 */
273 BUG_ON(size > MAX_BUFFER_OFFSET);
274 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
275 !full_coalesce)
276 return true;
277
278 return false;
279}
280
281struct netrx_pending_operations { 236struct netrx_pending_operations {
282 unsigned copy_prod, copy_cons; 237 unsigned copy_prod, copy_cons;
283 unsigned meta_prod, meta_cons; 238 unsigned meta_prod, meta_cons;
@@ -336,24 +291,13 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
336 BUG_ON(offset >= PAGE_SIZE); 291 BUG_ON(offset >= PAGE_SIZE);
337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 292 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
338 293
339 bytes = PAGE_SIZE - offset; 294 if (npo->copy_off == MAX_BUFFER_OFFSET)
295 meta = get_next_rx_buffer(queue, npo);
340 296
297 bytes = PAGE_SIZE - offset;
341 if (bytes > size) 298 if (bytes > size)
342 bytes = size; 299 bytes = size;
343 300
344 if (start_new_rx_buffer(npo->copy_off,
345 bytes,
346 *head,
347 XENVIF_RX_CB(skb)->full_coalesce)) {
348 /*
349 * Netfront requires there to be some data in the head
350 * buffer.
351 */
352 BUG_ON(*head);
353
354 meta = get_next_rx_buffer(queue, npo);
355 }
356
357 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 301 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
358 bytes = MAX_BUFFER_OFFSET - npo->copy_off; 302 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
359 303
@@ -570,60 +514,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
570 514
571 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) 515 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
572 && (skb = xenvif_rx_dequeue(queue)) != NULL) { 516 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
573 RING_IDX max_slots_needed;
574 RING_IDX old_req_cons; 517 RING_IDX old_req_cons;
575 RING_IDX ring_slots_used; 518 RING_IDX ring_slots_used;
576 int i;
577 519
578 queue->last_rx_time = jiffies; 520 queue->last_rx_time = jiffies;
579 521
580 /* We need a cheap worse case estimate for the number of
581 * slots we'll use.
582 */
583
584 max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
585 skb_headlen(skb),
586 PAGE_SIZE);
587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
588 unsigned int size;
589 unsigned int offset;
590
591 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
592 offset = skb_shinfo(skb)->frags[i].page_offset;
593
594 /* For a worse-case estimate we need to factor in
595 * the fragment page offset as this will affect the
596 * number of times xenvif_gop_frag_copy() will
597 * call start_new_rx_buffer().
598 */
599 max_slots_needed += DIV_ROUND_UP(offset + size,
600 PAGE_SIZE);
601 }
602
603 /* To avoid the estimate becoming too pessimal for some
604 * frontends that limit posted rx requests, cap the estimate
605 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
606 * the skb into the provided slots.
607 */
608 if (max_slots_needed > MAX_SKB_FRAGS) {
609 max_slots_needed = MAX_SKB_FRAGS;
610 XENVIF_RX_CB(skb)->full_coalesce = true;
611 } else {
612 XENVIF_RX_CB(skb)->full_coalesce = false;
613 }
614
615 /* We may need one more slot for GSO metadata */
616 if (skb_is_gso(skb) &&
617 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
618 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
619 max_slots_needed++;
620
621 old_req_cons = queue->rx.req_cons; 522 old_req_cons = queue->rx.req_cons;
622 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); 523 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
623 ring_slots_used = queue->rx.req_cons - old_req_cons; 524 ring_slots_used = queue->rx.req_cons - old_req_cons;
624 525
625 BUG_ON(ring_slots_used > max_slots_needed);
626
627 __skb_queue_tail(&rxq, skb); 526 __skb_queue_tail(&rxq, skb);
628 } 527 }
629 528