aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
commitc5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch)
tree9830baf38832769e1cf621708889111bbe3c93df /drivers/net/xen-netback
parent29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff)
parent9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) More iov_iter conversion work from Al Viro. [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was wrong, and this pull actually adds an extra commit on top of the branch I'm pulling to fix that up, so that the pre-merge state is ok. - Linus ] 2) Various optimizations to the ipv4 forwarding information base trie lookup implementation. From Alexander Duyck. 3) Remove sock_iocb altogether, from CHristoph Hellwig. 4) Allow congestion control algorithm selection via routing metrics. From Daniel Borkmann. 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet. 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet. 7) Add xmit_more support to r8169, e1000, and e1000e drivers. From Florian Westphal. 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross. 9) Add BPF packet actions to packet scheduler, from Jiri Pirko. 10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer. 11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman Kwok. 12) More sanely handle out-of-window dupacks, which can result in serious ACK storms. From Neal Cardwell. 13) Various rhashtable bug fixes and enhancements, from Herbert Xu, Patrick McHardy, and Thomas Graf. 14) Support xmit_more in be2net, from Sathya Perla. 15) Group Policy extensions for vxlan, from Thomas Graf. 16) Remove Checksum Offload support for vxlan, from Tom Herbert. 17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits) crypto: fix af_alg_make_sg() conversion to iov_iter ipv4: Namespecify TCP PMTU mechanism i40e: Fix for stats init function call in Rx setup tcp: don't include Fast Open option in SYN-ACK on pure SYN-data openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set ipv6: Make __ipv6_select_ident static ipv6: Fix fragment id assignment on LE arches. bridge: Fix inability to add non-vlan fdb entry net: Mellanox: Delete unnecessary checks before the function call "vunmap" cxgb4: Add support in cxgb4 to get expansion rom version via ethtool ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version net: dsa: Remove redundant phy_attach() IB/mlx4: Reset flow support for IB kernel ULPs IB/mlx4: Always use the correct port for mirrored multicast attachments net/bonding: Fix potential bad memory access during bonding events tipc: remove tipc_snprintf tipc: nl compat add noop and remove legacy nl framework tipc: convert legacy nl stats show to nl compat tipc: convert legacy nl net id get to nl compat tipc: convert legacy nl net id set to nl compat ...
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c107
3 files changed, 4 insertions, 106 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5f1fda44882b..589fa256256b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -251,7 +251,6 @@ struct xenvif {
251struct xenvif_rx_cb { 251struct xenvif_rx_cb {
252 unsigned long expires; 252 unsigned long expires;
253 int meta_slots_used; 253 int meta_slots_used;
254 bool full_coalesce;
255}; 254};
256 255
257#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) 256#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 12f9e2708afb..f38227afe099 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -80,7 +80,7 @@ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
80 return IRQ_HANDLED; 80 return IRQ_HANDLED;
81} 81}
82 82
83int xenvif_poll(struct napi_struct *napi, int budget) 83static int xenvif_poll(struct napi_struct *napi, int budget)
84{ 84{
85 struct xenvif_queue *queue = 85 struct xenvif_queue *queue =
86 container_of(napi, struct xenvif_queue, napi); 86 container_of(napi, struct xenvif_queue, napi);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7dc2d64db3cb..f7a31d2cb3f1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -233,51 +233,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
233 } 233 }
234} 234}
235 235
236/*
237 * Returns true if we should start a new receive buffer instead of
238 * adding 'size' bytes to a buffer which currently contains 'offset'
239 * bytes.
240 */
241static bool start_new_rx_buffer(int offset, unsigned long size, int head,
242 bool full_coalesce)
243{
244 /* simple case: we have completely filled the current buffer. */
245 if (offset == MAX_BUFFER_OFFSET)
246 return true;
247
248 /*
249 * complex case: start a fresh buffer if the current frag
250 * would overflow the current buffer but only if:
251 * (i) this frag would fit completely in the next buffer
252 * and (ii) there is already some data in the current buffer
253 * and (iii) this is not the head buffer.
254 * and (iv) there is no need to fully utilize the buffers
255 *
256 * Where:
257 * - (i) stops us splitting a frag into two copies
258 * unless the frag is too large for a single buffer.
259 * - (ii) stops us from leaving a buffer pointlessly empty.
260 * - (iii) stops us leaving the first buffer
261 * empty. Strictly speaking this is already covered
262 * by (ii) but is explicitly checked because
263 * netfront relies on the first buffer being
264 * non-empty and can crash otherwise.
265 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
266 * slot
267 *
268 * This means we will effectively linearise small
269 * frags but do not needlessly split large buffers
270 * into multiple copies tend to give large frags their
271 * own buffers as before.
272 */
273 BUG_ON(size > MAX_BUFFER_OFFSET);
274 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
275 !full_coalesce)
276 return true;
277
278 return false;
279}
280
281struct netrx_pending_operations { 236struct netrx_pending_operations {
282 unsigned copy_prod, copy_cons; 237 unsigned copy_prod, copy_cons;
283 unsigned meta_prod, meta_cons; 238 unsigned meta_prod, meta_cons;
@@ -336,24 +291,13 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
336 BUG_ON(offset >= PAGE_SIZE); 291 BUG_ON(offset >= PAGE_SIZE);
337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 292 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
338 293
339 bytes = PAGE_SIZE - offset; 294 if (npo->copy_off == MAX_BUFFER_OFFSET)
295 meta = get_next_rx_buffer(queue, npo);
340 296
297 bytes = PAGE_SIZE - offset;
341 if (bytes > size) 298 if (bytes > size)
342 bytes = size; 299 bytes = size;
343 300
344 if (start_new_rx_buffer(npo->copy_off,
345 bytes,
346 *head,
347 XENVIF_RX_CB(skb)->full_coalesce)) {
348 /*
349 * Netfront requires there to be some data in the head
350 * buffer.
351 */
352 BUG_ON(*head);
353
354 meta = get_next_rx_buffer(queue, npo);
355 }
356
357 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 301 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
358 bytes = MAX_BUFFER_OFFSET - npo->copy_off; 302 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
359 303
@@ -570,60 +514,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
570 514
571 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) 515 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
572 && (skb = xenvif_rx_dequeue(queue)) != NULL) { 516 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
573 RING_IDX max_slots_needed;
574 RING_IDX old_req_cons; 517 RING_IDX old_req_cons;
575 RING_IDX ring_slots_used; 518 RING_IDX ring_slots_used;
576 int i;
577 519
578 queue->last_rx_time = jiffies; 520 queue->last_rx_time = jiffies;
579 521
580 /* We need a cheap worse case estimate for the number of
581 * slots we'll use.
582 */
583
584 max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
585 skb_headlen(skb),
586 PAGE_SIZE);
587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
588 unsigned int size;
589 unsigned int offset;
590
591 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
592 offset = skb_shinfo(skb)->frags[i].page_offset;
593
594 /* For a worse-case estimate we need to factor in
595 * the fragment page offset as this will affect the
596 * number of times xenvif_gop_frag_copy() will
597 * call start_new_rx_buffer().
598 */
599 max_slots_needed += DIV_ROUND_UP(offset + size,
600 PAGE_SIZE);
601 }
602
603 /* To avoid the estimate becoming too pessimal for some
604 * frontends that limit posted rx requests, cap the estimate
605 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
606 * the skb into the provided slots.
607 */
608 if (max_slots_needed > MAX_SKB_FRAGS) {
609 max_slots_needed = MAX_SKB_FRAGS;
610 XENVIF_RX_CB(skb)->full_coalesce = true;
611 } else {
612 XENVIF_RX_CB(skb)->full_coalesce = false;
613 }
614
615 /* We may need one more slot for GSO metadata */
616 if (skb_is_gso(skb) &&
617 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
618 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
619 max_slots_needed++;
620
621 old_req_cons = queue->rx.req_cons; 522 old_req_cons = queue->rx.req_cons;
622 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); 523 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
623 ring_slots_used = queue->rx.req_cons - old_req_cons; 524 ring_slots_used = queue->rx.req_cons - old_req_cons;
624 525
625 BUG_ON(ring_slots_used > max_slots_needed);
626
627 __skb_queue_tail(&rxq, skb); 526 __skb_queue_tail(&rxq, skb);
628 } 527 }
629 528