aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-05-11 04:33:21 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-07-18 16:16:37 -0400
commit70a10e258ce3d45b294de9190dee9dcc73a495cb (patch)
treee397dbc180b68568d2c6766f9c091ace48cc0db7 /drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
parentfb40195cc975b14c5d4e44863ea996f999ba5aee (diff)
ixgbevf: Consolidate Tx context descriptor creation code
There is a good bit of redundancy between the Tx checksum and segmentation offloads. In order to reduce some of this I am moving the code for creating a context descriptor into a separate function. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Greg Rose <gregory.v.rose@intel.com> Tested-by: Sibai Li <sibai.li@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c342
1 files changed, 162 insertions, 180 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1c53e13b466d..ce81ce0698b3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -42,6 +42,7 @@
42#include <linux/in.h> 42#include <linux/in.h>
43#include <linux/ip.h> 43#include <linux/ip.h>
44#include <linux/tcp.h> 44#include <linux/tcp.h>
45#include <linux/sctp.h>
45#include <linux/ipv6.h> 46#include <linux/ipv6.h>
46#include <linux/slab.h> 47#include <linux/slab.h>
47#include <net/checksum.h> 48#include <net/checksum.h>
@@ -144,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
144 } 145 }
145} 146}
146 147
147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, 148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
148 struct ixgbevf_tx_buffer 149 struct ixgbevf_tx_buffer
149 *tx_buffer_info) 150 *tx_buffer_info)
150{ 151{
151 if (tx_buffer_info->dma) { 152 if (tx_buffer_info->dma) {
152 if (tx_buffer_info->mapped_as_page) 153 if (tx_buffer_info->mapped_as_page)
153 dma_unmap_page(&adapter->pdev->dev, 154 dma_unmap_page(tx_ring->dev,
154 tx_buffer_info->dma, 155 tx_buffer_info->dma,
155 tx_buffer_info->length, 156 tx_buffer_info->length,
156 DMA_TO_DEVICE); 157 DMA_TO_DEVICE);
157 else 158 else
158 dma_unmap_single(&adapter->pdev->dev, 159 dma_unmap_single(tx_ring->dev,
159 tx_buffer_info->dma, 160 tx_buffer_info->dma,
160 tx_buffer_info->length, 161 tx_buffer_info->length,
161 DMA_TO_DEVICE); 162 DMA_TO_DEVICE);
@@ -222,7 +223,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
222 total_bytes += bytecount; 223 total_bytes += bytecount;
223 } 224 }
224 225
225 ixgbevf_unmap_and_free_tx_resource(adapter, 226 ixgbevf_unmap_and_free_tx_resource(tx_ring,
226 tx_buffer_info); 227 tx_buffer_info);
227 228
228 tx_desc->wb.status = 0; 229 tx_desc->wb.status = 0;
@@ -1443,7 +1444,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1443 1444
1444 for (i = 0; i < tx_ring->count; i++) { 1445 for (i = 0; i < tx_ring->count; i++) {
1445 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1446 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1446 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1447 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1447 } 1448 }
1448 1449
1449 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1450 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -2389,172 +2390,153 @@ static int ixgbevf_close(struct net_device *netdev)
2389 return 0; 2390 return 0;
2390} 2391}
2391 2392
2392static int ixgbevf_tso(struct ixgbevf_adapter *adapter, 2393static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2393 struct ixgbevf_ring *tx_ring, 2394 u32 vlan_macip_lens, u32 type_tucmd,
2394 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2395 u32 mss_l4len_idx)
2395{ 2396{
2396 struct ixgbe_adv_tx_context_desc *context_desc; 2397 struct ixgbe_adv_tx_context_desc *context_desc;
2397 unsigned int i; 2398 u16 i = tx_ring->next_to_use;
2398 int err;
2399 struct ixgbevf_tx_buffer *tx_buffer_info;
2400 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2401 u32 mss_l4len_idx, l4len;
2402 2399
2403 if (skb_is_gso(skb)) { 2400 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2404 if (skb_header_cloned(skb)) {
2405 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2406 if (err)
2407 return err;
2408 }
2409 l4len = tcp_hdrlen(skb);
2410 *hdr_len += l4len;
2411
2412 if (skb->protocol == htons(ETH_P_IP)) {
2413 struct iphdr *iph = ip_hdr(skb);
2414 iph->tot_len = 0;
2415 iph->check = 0;
2416 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2417 iph->daddr, 0,
2418 IPPROTO_TCP,
2419 0);
2420 adapter->hw_tso_ctxt++;
2421 } else if (skb_is_gso_v6(skb)) {
2422 ipv6_hdr(skb)->payload_len = 0;
2423 tcp_hdr(skb)->check =
2424 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2425 &ipv6_hdr(skb)->daddr,
2426 0, IPPROTO_TCP, 0);
2427 adapter->hw_tso6_ctxt++;
2428 }
2429 2401
2430 i = tx_ring->next_to_use; 2402 i++;
2403 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2431 2404
2432 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2405 /* set bits to identify this as an advanced context descriptor */
2433 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2406 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2434
2435 /* VLAN MACLEN IPLEN */
2436 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2437 vlan_macip_lens |=
2438 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2439 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2440 IXGBE_ADVTXD_MACLEN_SHIFT);
2441 *hdr_len += skb_network_offset(skb);
2442 vlan_macip_lens |=
2443 (skb_transport_header(skb) - skb_network_header(skb));
2444 *hdr_len +=
2445 (skb_transport_header(skb) - skb_network_header(skb));
2446 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2447 context_desc->seqnum_seed = 0;
2448
2449 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2450 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2451 IXGBE_ADVTXD_DTYP_CTXT);
2452
2453 if (skb->protocol == htons(ETH_P_IP))
2454 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2455 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2456 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2457
2458 /* MSS L4LEN IDX */
2459 mss_l4len_idx =
2460 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2461 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2462 /* use index 1 for TSO */
2463 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2464 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2465
2466 tx_buffer_info->time_stamp = jiffies;
2467 tx_buffer_info->next_to_watch = i;
2468 2407
2469 i++; 2408 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2470 if (i == tx_ring->count) 2409 context_desc->seqnum_seed = 0;
2471 i = 0; 2410 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2472 tx_ring->next_to_use = i; 2411 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2412}
2413
2414static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2415 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2416{
2417 u32 vlan_macip_lens, type_tucmd;
2418 u32 mss_l4len_idx, l4len;
2419
2420 if (!skb_is_gso(skb))
2421 return 0;
2473 2422
2474 return true; 2423 if (skb_header_cloned(skb)) {
2424 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2425 if (err)
2426 return err;
2475 } 2427 }
2476 2428
2477 return false; 2429 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2430 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2431
2432 if (skb->protocol == htons(ETH_P_IP)) {
2433 struct iphdr *iph = ip_hdr(skb);
2434 iph->tot_len = 0;
2435 iph->check = 0;
2436 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2437 iph->daddr, 0,
2438 IPPROTO_TCP,
2439 0);
2440 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2441 } else if (skb_is_gso_v6(skb)) {
2442 ipv6_hdr(skb)->payload_len = 0;
2443 tcp_hdr(skb)->check =
2444 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2445 &ipv6_hdr(skb)->daddr,
2446 0, IPPROTO_TCP, 0);
2447 }
2448
2449 /* compute header lengths */
2450 l4len = tcp_hdrlen(skb);
2451 *hdr_len += l4len;
2452 *hdr_len = skb_transport_offset(skb) + l4len;
2453
2454 /* mss_l4len_id: use 1 as index for TSO */
2455 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2456 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2457 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2458
2459 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2460 vlan_macip_lens = skb_network_header_len(skb);
2461 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2462 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2463
2464 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2465 type_tucmd, mss_l4len_idx);
2466
2467 return 1;
2478} 2468}
2479 2469
2480static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, 2470static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2481 struct ixgbevf_ring *tx_ring,
2482 struct sk_buff *skb, u32 tx_flags) 2471 struct sk_buff *skb, u32 tx_flags)
2483{ 2472{
2484 struct ixgbe_adv_tx_context_desc *context_desc;
2485 unsigned int i;
2486 struct ixgbevf_tx_buffer *tx_buffer_info;
2487 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2488 2473
2489 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2490 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2491 i = tx_ring->next_to_use;
2492 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2493 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2494
2495 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2496 vlan_macip_lens |= (tx_flags &
2497 IXGBE_TX_FLAGS_VLAN_MASK);
2498 vlan_macip_lens |= (skb_network_offset(skb) <<
2499 IXGBE_ADVTXD_MACLEN_SHIFT);
2500 if (skb->ip_summed == CHECKSUM_PARTIAL)
2501 vlan_macip_lens |= (skb_transport_header(skb) -
2502 skb_network_header(skb));
2503
2504 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2505 context_desc->seqnum_seed = 0;
2506
2507 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2508 IXGBE_ADVTXD_DTYP_CTXT);
2509
2510 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2511 switch (skb->protocol) {
2512 case __constant_htons(ETH_P_IP):
2513 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2514 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2515 type_tucmd_mlhl |=
2516 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2517 break;
2518 case __constant_htons(ETH_P_IPV6):
2519 /* XXX what about other V6 headers?? */
2520 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2521 type_tucmd_mlhl |=
2522 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2523 break;
2524 default:
2525 if (unlikely(net_ratelimit())) {
2526 pr_warn("partial checksum but "
2527 "proto=%x!\n", skb->protocol);
2528 }
2529 break;
2530 }
2531 }
2532 2474
2533 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2534 /* use index zero for tx checksum offload */
2535 context_desc->mss_l4len_idx = 0;
2536 2475
2537 tx_buffer_info->time_stamp = jiffies; 2476 u32 vlan_macip_lens = 0;
2538 tx_buffer_info->next_to_watch = i; 2477 u32 mss_l4len_idx = 0;
2478 u32 type_tucmd = 0;
2539 2479
2540 adapter->hw_csum_tx_good++; 2480 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2541 i++; 2481 u8 l4_hdr = 0;
2542 if (i == tx_ring->count) 2482 switch (skb->protocol) {
2543 i = 0; 2483 case __constant_htons(ETH_P_IP):
2544 tx_ring->next_to_use = i; 2484 vlan_macip_lens |= skb_network_header_len(skb);
2485 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2486 l4_hdr = ip_hdr(skb)->protocol;
2487 break;
2488 case __constant_htons(ETH_P_IPV6):
2489 vlan_macip_lens |= skb_network_header_len(skb);
2490 l4_hdr = ipv6_hdr(skb)->nexthdr;
2491 break;
2492 default:
2493 if (unlikely(net_ratelimit())) {
2494 dev_warn(tx_ring->dev,
2495 "partial checksum but proto=%x!\n",
2496 skb->protocol);
2497 }
2498 break;
2499 }
2545 2500
2546 return true; 2501 switch (l4_hdr) {
2502 case IPPROTO_TCP:
2503 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2504 mss_l4len_idx = tcp_hdrlen(skb) <<
2505 IXGBE_ADVTXD_L4LEN_SHIFT;
2506 break;
2507 case IPPROTO_SCTP:
2508 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2509 mss_l4len_idx = sizeof(struct sctphdr) <<
2510 IXGBE_ADVTXD_L4LEN_SHIFT;
2511 break;
2512 case IPPROTO_UDP:
2513 mss_l4len_idx = sizeof(struct udphdr) <<
2514 IXGBE_ADVTXD_L4LEN_SHIFT;
2515 break;
2516 default:
2517 if (unlikely(net_ratelimit())) {
2518 dev_warn(tx_ring->dev,
2519 "partial checksum but l4 proto=%x!\n",
2520 l4_hdr);
2521 }
2522 break;
2523 }
2547 } 2524 }
2548 2525
2549 return false; 2526 /* vlan_macip_lens: MACLEN, VLAN tag */
2527 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2528 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2529
2530 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2531 type_tucmd, mss_l4len_idx);
2532
2533 return (skb->ip_summed == CHECKSUM_PARTIAL);
2550} 2534}
2551 2535
2552static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, 2536static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2553 struct ixgbevf_ring *tx_ring,
2554 struct sk_buff *skb, u32 tx_flags, 2537 struct sk_buff *skb, u32 tx_flags,
2555 unsigned int first) 2538 unsigned int first)
2556{ 2539{
2557 struct pci_dev *pdev = adapter->pdev;
2558 struct ixgbevf_tx_buffer *tx_buffer_info; 2540 struct ixgbevf_tx_buffer *tx_buffer_info;
2559 unsigned int len; 2541 unsigned int len;
2560 unsigned int total = skb->len; 2542 unsigned int total = skb->len;
@@ -2573,12 +2555,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2573 2555
2574 tx_buffer_info->length = size; 2556 tx_buffer_info->length = size;
2575 tx_buffer_info->mapped_as_page = false; 2557 tx_buffer_info->mapped_as_page = false;
2576 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, 2558 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2577 skb->data + offset, 2559 skb->data + offset,
2578 size, DMA_TO_DEVICE); 2560 size, DMA_TO_DEVICE);
2579 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2561 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2580 goto dma_error; 2562 goto dma_error;
2581 tx_buffer_info->time_stamp = jiffies;
2582 tx_buffer_info->next_to_watch = i; 2563 tx_buffer_info->next_to_watch = i;
2583 2564
2584 len -= size; 2565 len -= size;
@@ -2603,12 +2584,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2603 2584
2604 tx_buffer_info->length = size; 2585 tx_buffer_info->length = size;
2605 tx_buffer_info->dma = 2586 tx_buffer_info->dma =
2606 skb_frag_dma_map(&adapter->pdev->dev, frag, 2587 skb_frag_dma_map(tx_ring->dev, frag,
2607 offset, size, DMA_TO_DEVICE); 2588 offset, size, DMA_TO_DEVICE);
2608 tx_buffer_info->mapped_as_page = true; 2589 tx_buffer_info->mapped_as_page = true;
2609 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2590 if (dma_mapping_error(tx_ring->dev,
2591 tx_buffer_info->dma))
2610 goto dma_error; 2592 goto dma_error;
2611 tx_buffer_info->time_stamp = jiffies;
2612 tx_buffer_info->next_to_watch = i; 2593 tx_buffer_info->next_to_watch = i;
2613 2594
2614 len -= size; 2595 len -= size;
@@ -2629,15 +2610,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2629 i = i - 1; 2610 i = i - 1;
2630 tx_ring->tx_buffer_info[i].skb = skb; 2611 tx_ring->tx_buffer_info[i].skb = skb;
2631 tx_ring->tx_buffer_info[first].next_to_watch = i; 2612 tx_ring->tx_buffer_info[first].next_to_watch = i;
2613 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2632 2614
2633 return count; 2615 return count;
2634 2616
2635dma_error: 2617dma_error:
2636 dev_err(&pdev->dev, "TX DMA map failed\n"); 2618 dev_err(tx_ring->dev, "TX DMA map failed\n");
2637 2619
2638 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2620 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2639 tx_buffer_info->dma = 0; 2621 tx_buffer_info->dma = 0;
2640 tx_buffer_info->time_stamp = 0;
2641 tx_buffer_info->next_to_watch = 0; 2622 tx_buffer_info->next_to_watch = 0;
2642 count--; 2623 count--;
2643 2624
@@ -2648,14 +2629,13 @@ dma_error:
2648 if (i < 0) 2629 if (i < 0)
2649 i += tx_ring->count; 2630 i += tx_ring->count;
2650 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2631 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2651 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 2632 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2652 } 2633 }
2653 2634
2654 return count; 2635 return count;
2655} 2636}
2656 2637
2657static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, 2638static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2658 struct ixgbevf_ring *tx_ring, int tx_flags,
2659 int count, u32 paylen, u8 hdr_len) 2639 int count, u32 paylen, u8 hdr_len)
2660{ 2640{
2661 union ixgbe_adv_tx_desc *tx_desc = NULL; 2641 union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -2672,21 +2652,24 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
2672 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2652 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2673 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2653 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2674 2654
2655 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2656 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2657
2675 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2658 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2676 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2659 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2677 2660
2678 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
2679 IXGBE_ADVTXD_POPTS_SHIFT;
2680
2681 /* use index 1 context for tso */ 2661 /* use index 1 context for tso */
2682 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2662 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2683 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2663 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2684 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 2664 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2685 IXGBE_ADVTXD_POPTS_SHIFT; 2665
2666 }
2686 2667
2687 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2668 /*
2688 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 2669 * Check Context must be set if Tx switch is enabled, which it
2689 IXGBE_ADVTXD_POPTS_SHIFT; 2670 * always is for case where virtual functions are running
2671 */
2672 olinfo_status |= IXGBE_ADVTXD_CC;
2690 2673
2691 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2674 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2692 2675
@@ -2705,16 +2688,7 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
2705 2688
2706 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2689 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2707 2690
2708 /*
2709 * Force memory writes to complete before letting h/w
2710 * know there are new descriptors to fetch. (Only
2711 * applicable for weak-ordered memory model archs,
2712 * such as IA-64).
2713 */
2714 wmb();
2715
2716 tx_ring->next_to_use = i; 2691 tx_ring->next_to_use = i;
2717 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2718} 2692}
2719 2693
2720static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2694static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
@@ -2788,21 +2762,29 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2788 2762
2789 if (skb->protocol == htons(ETH_P_IP)) 2763 if (skb->protocol == htons(ETH_P_IP))
2790 tx_flags |= IXGBE_TX_FLAGS_IPV4; 2764 tx_flags |= IXGBE_TX_FLAGS_IPV4;
2791 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 2765 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
2792 if (tso < 0) { 2766 if (tso < 0) {
2793 dev_kfree_skb_any(skb); 2767 dev_kfree_skb_any(skb);
2794 return NETDEV_TX_OK; 2768 return NETDEV_TX_OK;
2795 } 2769 }
2796 2770
2797 if (tso) 2771 if (tso)
2798 tx_flags |= IXGBE_TX_FLAGS_TSO; 2772 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
2799 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2773 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
2800 (skb->ip_summed == CHECKSUM_PARTIAL))
2801 tx_flags |= IXGBE_TX_FLAGS_CSUM; 2774 tx_flags |= IXGBE_TX_FLAGS_CSUM;
2802 2775
2803 ixgbevf_tx_queue(adapter, tx_ring, tx_flags, 2776 ixgbevf_tx_queue(tx_ring, tx_flags,
2804 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), 2777 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
2805 skb->len, hdr_len); 2778 skb->len, hdr_len);
2779 /*
2780 * Force memory writes to complete before letting h/w
2781 * know there are new descriptors to fetch. (Only
2782 * applicable for weak-ordered memory model archs,
2783 * such as IA-64).
2784 */
2785 wmb();
2786
2787 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
2806 2788
2807 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 2789 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2808 2790