aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2016-10-04 05:29:17 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-06 20:37:36 -0400
commita37f12298c251a48bc74d4012e07bf0d78175f46 (patch)
tree8610270f8e73fa214527d58e36f9915f1d4e7607
parent98f6d57ced73b723551568262019f1d6c8771f20 (diff)
xen-netback: batch copies for multiple to-guest rx packets
Instead of flushing the copy ops when an packet is complete, complete packets when their copy ops are done. This improves performance by reducing the number of grant copy hypercalls. Latency is still limited by the relatively small size of the copy batch. Signed-off-by: David Vrabel <david.vrabel@citrix.com> [re-based] Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/rx.c27
2 files changed, 18 insertions, 10 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 7d12a388afc6..cf68149cbb55 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -132,6 +132,7 @@ struct xenvif_copy_state {
132 struct gnttab_copy op[COPY_BATCH_SIZE]; 132 struct gnttab_copy op[COPY_BATCH_SIZE];
133 RING_IDX idx[COPY_BATCH_SIZE]; 133 RING_IDX idx[COPY_BATCH_SIZE];
134 unsigned int num; 134 unsigned int num;
135 struct sk_buff_head *completed;
135}; 136};
136 137
137struct xenvif_queue { /* Per-queue data for xenvif */ 138struct xenvif_queue { /* Per-queue data for xenvif */
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index ae822b8fa76d..8c8c5b5883eb 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -133,6 +133,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
133static void xenvif_rx_copy_flush(struct xenvif_queue *queue) 133static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
134{ 134{
135 unsigned int i; 135 unsigned int i;
136 int notify;
136 137
137 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); 138 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
138 139
@@ -154,6 +155,13 @@ static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
154 } 155 }
155 156
156 queue->rx_copy.num = 0; 157 queue->rx_copy.num = 0;
158
159 /* Push responses for all completed packets. */
160 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
161 if (notify)
162 notify_remote_via_irq(queue->rx_irq);
163
164 __skb_queue_purge(queue->rx_copy.completed);
157} 165}
158 166
159static void xenvif_rx_copy_add(struct xenvif_queue *queue, 167static void xenvif_rx_copy_add(struct xenvif_queue *queue,
@@ -279,18 +287,10 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue,
279static void xenvif_rx_complete(struct xenvif_queue *queue, 287static void xenvif_rx_complete(struct xenvif_queue *queue,
280 struct xenvif_pkt_state *pkt) 288 struct xenvif_pkt_state *pkt)
281{ 289{
282 int notify; 290 /* All responses are ready to be pushed. */
283
284 /* Complete any outstanding copy ops for this skb. */
285 xenvif_rx_copy_flush(queue);
286
287 /* Push responses and notify. */
288 queue->rx.rsp_prod_pvt = queue->rx.req_cons; 291 queue->rx.rsp_prod_pvt = queue->rx.req_cons;
289 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
290 if (notify)
291 notify_remote_via_irq(queue->rx_irq);
292 292
293 dev_kfree_skb(pkt->skb); 293 __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
294} 294}
295 295
296static void xenvif_rx_next_chunk(struct xenvif_queue *queue, 296static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
@@ -429,13 +429,20 @@ void xenvif_rx_skb(struct xenvif_queue *queue)
429 429
430void xenvif_rx_action(struct xenvif_queue *queue) 430void xenvif_rx_action(struct xenvif_queue *queue)
431{ 431{
432 struct sk_buff_head completed_skbs;
432 unsigned int work_done = 0; 433 unsigned int work_done = 0;
433 434
435 __skb_queue_head_init(&completed_skbs);
436 queue->rx_copy.completed = &completed_skbs;
437
434 while (xenvif_rx_ring_slots_available(queue) && 438 while (xenvif_rx_ring_slots_available(queue) &&
435 work_done < RX_BATCH_SIZE) { 439 work_done < RX_BATCH_SIZE) {
436 xenvif_rx_skb(queue); 440 xenvif_rx_skb(queue);
437 work_done++; 441 work_done++;
438 } 442 }
443
444 /* Flush any pending copies and complete all skbs. */
445 xenvif_rx_copy_flush(queue);
439} 446}
440 447
441static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) 448static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)