diff options
author | David S. Miller <davem@davemloft.net> | 2014-03-14 22:31:55 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-14 22:31:55 -0400 |
commit | 85dcce7a73f1cc59f7a96fe52713b1630f4ca272 (patch) | |
tree | 6c645923eb2f0152073b90685ce80e46cfb7afed /drivers/net/xen-netback/netback.c | |
parent | 4c4e4113db249c828fffb286bc95ffb255e081f5 (diff) | |
parent | a4ecdf82f8ea49f7d3a072121dcbd0bf3a7cb93a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/r8152.c
drivers/net/xen-netback/netback.c
Both the r8152 and netback conflicts were simple overlapping
changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 39 |
1 files changed, 18 insertions, 21 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index bc943205a691..5a8c4a43c522 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -243,7 +243,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
243 | struct gnttab_copy *copy_gop; | 243 | struct gnttab_copy *copy_gop; |
244 | struct xenvif_rx_meta *meta; | 244 | struct xenvif_rx_meta *meta; |
245 | unsigned long bytes; | 245 | unsigned long bytes; |
246 | int gso_type; | 246 | int gso_type = XEN_NETIF_GSO_TYPE_NONE; |
247 | 247 | ||
248 | /* Data must not cross a page boundary. */ | 248 | /* Data must not cross a page boundary. */ |
249 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); | 249 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); |
@@ -309,12 +309,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
309 | } | 309 | } |
310 | 310 | ||
311 | /* Leave a gap for the GSO descriptor. */ | 311 | /* Leave a gap for the GSO descriptor. */ |
312 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | 312 | if (skb_is_gso(skb)) { |
313 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 313 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
314 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 314 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
315 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 315 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
316 | else | 316 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
317 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | 317 | } |
318 | 318 | ||
319 | if (*head && ((1 << gso_type) & vif->gso_mask)) | 319 | if (*head && ((1 << gso_type) & vif->gso_mask)) |
320 | vif->rx.req_cons++; | 320 | vif->rx.req_cons++; |
@@ -348,22 +348,18 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
348 | int head = 1; | 348 | int head = 1; |
349 | int old_meta_prod; | 349 | int old_meta_prod; |
350 | int gso_type; | 350 | int gso_type; |
351 | int gso_size; | ||
352 | struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; | 351 | struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; |
353 | grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; | 352 | grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; |
354 | struct xenvif *foreign_vif = NULL; | 353 | struct xenvif *foreign_vif = NULL; |
355 | 354 | ||
356 | old_meta_prod = npo->meta_prod; | 355 | old_meta_prod = npo->meta_prod; |
357 | 356 | ||
358 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { | 357 | gso_type = XEN_NETIF_GSO_TYPE_NONE; |
359 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 358 | if (skb_is_gso(skb)) { |
360 | gso_size = skb_shinfo(skb)->gso_size; | 359 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
361 | } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 360 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
362 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 361 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
363 | gso_size = skb_shinfo(skb)->gso_size; | 362 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
364 | } else { | ||
365 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | ||
366 | gso_size = 0; | ||
367 | } | 363 | } |
368 | 364 | ||
369 | /* Set up a GSO prefix descriptor, if necessary */ | 365 | /* Set up a GSO prefix descriptor, if necessary */ |
@@ -371,7 +367,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
371 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 367 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); |
372 | meta = npo->meta + npo->meta_prod++; | 368 | meta = npo->meta + npo->meta_prod++; |
373 | meta->gso_type = gso_type; | 369 | meta->gso_type = gso_type; |
374 | meta->gso_size = gso_size; | 370 | meta->gso_size = skb_shinfo(skb)->gso_size; |
375 | meta->size = 0; | 371 | meta->size = 0; |
376 | meta->id = req->id; | 372 | meta->id = req->id; |
377 | } | 373 | } |
@@ -381,7 +377,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
381 | 377 | ||
382 | if ((1 << gso_type) & vif->gso_mask) { | 378 | if ((1 << gso_type) & vif->gso_mask) { |
383 | meta->gso_type = gso_type; | 379 | meta->gso_type = gso_type; |
384 | meta->gso_size = gso_size; | 380 | meta->gso_size = skb_shinfo(skb)->gso_size; |
385 | } else { | 381 | } else { |
386 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | 382 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; |
387 | meta->gso_size = 0; | 383 | meta->gso_size = 0; |
@@ -531,8 +527,9 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
531 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 527 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
532 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); | 528 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); |
533 | } | 529 | } |
534 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | 530 | if (skb_is_gso(skb) && |
535 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 531 | (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || |
532 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) | ||
536 | max_slots_needed++; | 533 | max_slots_needed++; |
537 | 534 | ||
538 | /* If the skb may not fit then bail out now */ | 535 | /* If the skb may not fit then bail out now */ |