diff options
author | David S. Miller <davem@davemloft.net> | 2014-06-05 19:22:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-05 19:22:02 -0400 |
commit | f666f87b9423fb534d2116206ace04495080f2b5 (patch) | |
tree | 23f929c890219d6ef412b4ff630adf501b70a3ec /drivers/net/xen-netback/netback.c | |
parent | 46cfd6ea23b0a207c87269d86457727dc4485708 (diff) | |
parent | 0dcceabb0c1bf2d4c12a748df9933fad303072a7 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/xen-netback/netback.c
net/core/filter.c
A filter bug fix overlapped some cleanups and a conversion
over to some new insn generation macros.
A xen-netback bug fix overlapped the addition of multi-queue
support.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 36 |
1 files changed, 25 insertions, 11 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 49efff9b99f4..1844a47636b6 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -168,7 +168,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) | |||
168 | * adding 'size' bytes to a buffer which currently contains 'offset' | 168 | * adding 'size' bytes to a buffer which currently contains 'offset' |
169 | * bytes. | 169 | * bytes. |
170 | */ | 170 | */ |
171 | static bool start_new_rx_buffer(int offset, unsigned long size, int head) | 171 | static bool start_new_rx_buffer(int offset, unsigned long size, int head, |
172 | bool full_coalesce) | ||
172 | { | 173 | { |
173 | /* simple case: we have completely filled the current buffer. */ | 174 | /* simple case: we have completely filled the current buffer. */ |
174 | if (offset == MAX_BUFFER_OFFSET) | 175 | if (offset == MAX_BUFFER_OFFSET) |
@@ -180,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
180 | * (i) this frag would fit completely in the next buffer | 181 | * (i) this frag would fit completely in the next buffer |
181 | * and (ii) there is already some data in the current buffer | 182 | * and (ii) there is already some data in the current buffer |
182 | * and (iii) this is not the head buffer. | 183 | * and (iii) this is not the head buffer. |
184 | * and (iv) there is no need to fully utilize the buffers | ||
183 | * | 185 | * |
184 | * Where: | 186 | * Where: |
185 | * - (i) stops us splitting a frag into two copies | 187 | * - (i) stops us splitting a frag into two copies |
@@ -190,6 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
190 | * by (ii) but is explicitly checked because | 192 | * by (ii) but is explicitly checked because |
191 | * netfront relies on the first buffer being | 193 | * netfront relies on the first buffer being |
192 | * non-empty and can crash otherwise. | 194 | * non-empty and can crash otherwise. |
195 | * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS | ||
196 | * slot | ||
193 | * | 197 | * |
194 | * This means we will effectively linearise small | 198 | * This means we will effectively linearise small |
195 | * frags but do not needlessly split large buffers | 199 | * frags but do not needlessly split large buffers |
@@ -197,7 +201,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) | |||
197 | * own buffers as before. | 201 | * own buffers as before. |
198 | */ | 202 | */ |
199 | BUG_ON(size > MAX_BUFFER_OFFSET); | 203 | BUG_ON(size > MAX_BUFFER_OFFSET); |
200 | if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) | 204 | if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head && |
205 | !full_coalesce) | ||
201 | return true; | 206 | return true; |
202 | 207 | ||
203 | return false; | 208 | return false; |
@@ -232,6 +237,13 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, | |||
232 | return meta; | 237 | return meta; |
233 | } | 238 | } |
234 | 239 | ||
240 | struct xenvif_rx_cb { | ||
241 | int meta_slots_used; | ||
242 | bool full_coalesce; | ||
243 | }; | ||
244 | |||
245 | #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) | ||
246 | |||
235 | /* | 247 | /* |
236 | * Set up the grant operations for this fragment. If it's a flipping | 248 | * Set up the grant operations for this fragment. If it's a flipping |
237 | * interface, we also set up the unmap request from here. | 249 | * interface, we also set up the unmap request from here. |
@@ -266,7 +278,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb | |||
266 | if (bytes > size) | 278 | if (bytes > size) |
267 | bytes = size; | 279 | bytes = size; |
268 | 280 | ||
269 | if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { | 281 | if (start_new_rx_buffer(npo->copy_off, |
282 | bytes, | ||
283 | *head, | ||
284 | XENVIF_RX_CB(skb)->full_coalesce)) { | ||
270 | /* | 285 | /* |
271 | * Netfront requires there to be some data in the head | 286 | * Netfront requires there to be some data in the head |
272 | * buffer. | 287 | * buffer. |
@@ -548,12 +563,6 @@ static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, | |||
548 | } | 563 | } |
549 | } | 564 | } |
550 | 565 | ||
551 | struct xenvif_rx_cb { | ||
552 | int meta_slots_used; | ||
553 | }; | ||
554 | |||
555 | #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) | ||
556 | |||
557 | void xenvif_kick_thread(struct xenvif_queue *queue) | 566 | void xenvif_kick_thread(struct xenvif_queue *queue) |
558 | { | 567 | { |
559 | wake_up(&queue->wq); | 568 | wake_up(&queue->wq); |
@@ -609,10 +618,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue) | |||
609 | 618 | ||
610 | /* To avoid the estimate becoming too pessimal for some | 619 | /* To avoid the estimate becoming too pessimal for some |
611 | * frontends that limit posted rx requests, cap the estimate | 620 | * frontends that limit posted rx requests, cap the estimate |
612 | * at MAX_SKB_FRAGS. | 621 | * at MAX_SKB_FRAGS. In this case netback will fully coalesce |
622 | * the skb into the provided slots. | ||
613 | */ | 623 | */ |
614 | if (max_slots_needed > MAX_SKB_FRAGS) | 624 | if (max_slots_needed > MAX_SKB_FRAGS) { |
615 | max_slots_needed = MAX_SKB_FRAGS; | 625 | max_slots_needed = MAX_SKB_FRAGS; |
626 | XENVIF_RX_CB(skb)->full_coalesce = true; | ||
627 | } else { | ||
628 | XENVIF_RX_CB(skb)->full_coalesce = false; | ||
629 | } | ||
616 | 630 | ||
617 | /* We may need one more slot for GSO metadata */ | 631 | /* We may need one more slot for GSO metadata */ |
618 | if (skb_is_gso(skb) && | 632 | if (skb_is_gso(skb) && |