aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-05-27 01:31:47 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-06-25 03:06:05 -0400
commit897ab15606ce896b6a574a263beb51cbfb43f041 (patch)
treea269fefc4423c239eeb411c76c6bd9dd25ac09db
parent63544e9c0055316d0397cb671f2ff99d85c77293 (diff)
ixgbe: Add one function that handles most of context descriptor setup
There is a significant amount of shared functionality between the checksum and TSO offload configuration that is shared in regards to how they setup the context descriptors. Since so much of the functionality is shared it makes sense to move the shared functionality into a single function and just call that function from the two context descriptor specific routines. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c85
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c295
3 files changed, 162 insertions, 222 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index b1d4b02606ae..393ceae7f3df 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -580,10 +580,10 @@ extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
580 struct ixgbe_ring *ring); 580 struct ixgbe_ring *ring);
581extern void ixgbe_set_rx_mode(struct net_device *netdev); 581extern void ixgbe_set_rx_mode(struct net_device *netdev);
582extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); 582extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
583extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
583#ifdef IXGBE_FCOE 584#ifdef IXGBE_FCOE
584extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 585extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
585extern int ixgbe_fso(struct ixgbe_adapter *adapter, 586extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
586 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
587 u32 tx_flags, u8 *hdr_len); 587 u32 tx_flags, u8 *hdr_len);
588extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); 588extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
589extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 589extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index f5f39edb86ab..9c20077dd44b 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -474,24 +474,18 @@ ddp_out:
474 * 474 *
475 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error 475 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
476 */ 476 */
477int ixgbe_fso(struct ixgbe_adapter *adapter, 477int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
478 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
479 u32 tx_flags, u8 *hdr_len) 478 u32 tx_flags, u8 *hdr_len)
480{ 479{
481 u8 sof, eof; 480 struct fc_frame_header *fh;
482 u32 vlan_macip_lens; 481 u32 vlan_macip_lens;
483 u32 fcoe_sof_eof; 482 u32 fcoe_sof_eof = 0;
484 u32 type_tucmd;
485 u32 mss_l4len_idx; 483 u32 mss_l4len_idx;
486 int mss = 0; 484 u8 sof, eof;
487 unsigned int i;
488 struct ixgbe_tx_buffer *tx_buffer_info;
489 struct ixgbe_adv_tx_context_desc *context_desc;
490 struct fc_frame_header *fh;
491 485
492 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 486 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
493 e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 487 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
494 skb_shinfo(skb)->gso_type); 488 skb_shinfo(skb)->gso_type);
495 return -EINVAL; 489 return -EINVAL;
496 } 490 }
497 491
@@ -501,23 +495,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
501 sizeof(struct fcoe_hdr)); 495 sizeof(struct fcoe_hdr));
502 496
503 /* sets up SOF and ORIS */ 497 /* sets up SOF and ORIS */
504 fcoe_sof_eof = 0;
505 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 498 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
506 switch (sof) { 499 switch (sof) {
507 case FC_SOF_I2: 500 case FC_SOF_I2:
508 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; 501 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
509 break; 502 break;
510 case FC_SOF_I3: 503 case FC_SOF_I3:
511 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 504 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
512 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; 505 IXGBE_ADVTXD_FCOEF_ORIS;
513 break; 506 break;
514 case FC_SOF_N2: 507 case FC_SOF_N2:
515 break; 508 break;
516 case FC_SOF_N3: 509 case FC_SOF_N3:
517 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 510 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
518 break; 511 break;
519 default: 512 default:
520 e_warn(drv, "unknown sof = 0x%x\n", sof); 513 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
521 return -EINVAL; 514 return -EINVAL;
522 } 515 }
523 516
@@ -530,12 +523,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
530 break; 523 break;
531 case FC_EOF_T: 524 case FC_EOF_T:
532 /* lso needs ORIE */ 525 /* lso needs ORIE */
533 if (skb_is_gso(skb)) { 526 if (skb_is_gso(skb))
534 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 527 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
535 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE; 528 IXGBE_ADVTXD_FCOEF_ORIE;
536 } else { 529 else
537 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 530 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
538 }
539 break; 531 break;
540 case FC_EOF_NI: 532 case FC_EOF_NI:
541 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 533 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
@@ -544,7 +536,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
544 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 536 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
545 break; 537 break;
546 default: 538 default:
547 e_warn(drv, "unknown eof = 0x%x\n", eof); 539 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
548 return -EINVAL; 540 return -EINVAL;
549 } 541 }
550 542
@@ -553,43 +545,28 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
553 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 545 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
554 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 546 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
555 547
556 /* hdr_len includes fc_hdr if FCoE lso is enabled */ 548 /* include trailer in headlen as it is replicated per frame */
557 *hdr_len = sizeof(struct fcoe_crc_eof); 549 *hdr_len = sizeof(struct fcoe_crc_eof);
550
551 /* hdr_len includes fc_hdr if FCoE LSO is enabled */
558 if (skb_is_gso(skb)) 552 if (skb_is_gso(skb))
559 *hdr_len += (skb_transport_offset(skb) + 553 *hdr_len += (skb_transport_offset(skb) +
560 sizeof(struct fc_frame_header)); 554 sizeof(struct fc_frame_header));
561 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 555
562 vlan_macip_lens = (skb_transport_offset(skb) +
563 sizeof(struct fc_frame_header));
564 vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
565 << IXGBE_ADVTXD_MACLEN_SHIFT);
566 vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
567
568 /* type_tycmd and mss: set TUCMD.FCoE to enable offload */
569 type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
570 IXGBE_ADVTXT_TUCMD_FCOE;
571 if (skb_is_gso(skb))
572 mss = skb_shinfo(skb)->gso_size;
573 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ 556 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
574 mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) | 557 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
575 (1 << IXGBE_ADVTXD_IDX_SHIFT); 558 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
559
560 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
561 vlan_macip_lens = skb_transport_offset(skb) +
562 sizeof(struct fc_frame_header);
563 vlan_macip_lens |= (skb_transport_offset(skb) - 4)
564 << IXGBE_ADVTXD_MACLEN_SHIFT;
565 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
576 566
577 /* write context desc */ 567 /* write context desc */
578 i = tx_ring->next_to_use; 568 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
579 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); 569 IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
580 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
581 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
582 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
583 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
584
585 tx_buffer_info = &tx_ring->tx_buffer_info[i];
586 tx_buffer_info->time_stamp = jiffies;
587 tx_buffer_info->next_to_watch = i;
588
589 i++;
590 if (i == tx_ring->count)
591 i = 0;
592 tx_ring->next_to_use = i;
593 570
594 return skb_is_gso(skb); 571 return skb_is_gso(skb);
595} 572}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 4fbe07702180..00e60c5ab27f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -35,6 +35,7 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/ip.h> 36#include <linux/ip.h>
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/sctp.h>
38#include <linux/pkt_sched.h> 39#include <linux/pkt_sched.h>
39#include <linux/ipv6.h> 40#include <linux/ipv6.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
@@ -6353,179 +6354,145 @@ static void ixgbe_service_task(struct work_struct *work)
6353 ixgbe_service_event_complete(adapter); 6354 ixgbe_service_event_complete(adapter);
6354} 6355}
6355 6356
6356static int ixgbe_tso(struct ixgbe_adapter *adapter, 6357void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
6357 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 6358 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
6358 u32 tx_flags, u8 *hdr_len, __be16 protocol)
6359{ 6359{
6360 struct ixgbe_adv_tx_context_desc *context_desc; 6360 struct ixgbe_adv_tx_context_desc *context_desc;
6361 unsigned int i; 6361 u16 i = tx_ring->next_to_use;
6362 int err;
6363 struct ixgbe_tx_buffer *tx_buffer_info;
6364 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
6365 u32 mss_l4len_idx, l4len;
6366 6362
6367 if (skb_is_gso(skb)) { 6363 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
6368 if (skb_header_cloned(skb)) {
6369 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6370 if (err)
6371 return err;
6372 }
6373 l4len = tcp_hdrlen(skb);
6374 *hdr_len += l4len;
6375
6376 if (protocol == htons(ETH_P_IP)) {
6377 struct iphdr *iph = ip_hdr(skb);
6378 iph->tot_len = 0;
6379 iph->check = 0;
6380 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6381 iph->daddr, 0,
6382 IPPROTO_TCP,
6383 0);
6384 } else if (skb_is_gso_v6(skb)) {
6385 ipv6_hdr(skb)->payload_len = 0;
6386 tcp_hdr(skb)->check =
6387 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6388 &ipv6_hdr(skb)->daddr,
6389 0, IPPROTO_TCP, 0);
6390 }
6391 6364
6392 i = tx_ring->next_to_use; 6365 i++;
6366 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
6393 6367
6394 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6368 /* set bits to identify this as an advanced context descriptor */
6395 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); 6369 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
6396
6397 /* VLAN MACLEN IPLEN */
6398 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6399 vlan_macip_lens |=
6400 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6401 vlan_macip_lens |= ((skb_network_offset(skb)) <<
6402 IXGBE_ADVTXD_MACLEN_SHIFT);
6403 *hdr_len += skb_network_offset(skb);
6404 vlan_macip_lens |=
6405 (skb_transport_header(skb) - skb_network_header(skb));
6406 *hdr_len +=
6407 (skb_transport_header(skb) - skb_network_header(skb));
6408 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6409 context_desc->seqnum_seed = 0;
6410
6411 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6412 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
6413 IXGBE_ADVTXD_DTYP_CTXT);
6414 6370
6415 if (protocol == htons(ETH_P_IP)) 6371 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6416 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 6372 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
6417 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6373 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
6418 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 6374 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6419 6375}
6420 /* MSS L4LEN IDX */
6421 mss_l4len_idx =
6422 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
6423 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
6424 /* use index 1 for TSO */
6425 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6426 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6427 6376
6428 tx_buffer_info->time_stamp = jiffies; 6377static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
6429 tx_buffer_info->next_to_watch = i; 6378 u32 tx_flags, __be16 protocol, u8 *hdr_len)
6379{
6380 int err;
6381 u32 vlan_macip_lens, type_tucmd;
6382 u32 mss_l4len_idx, l4len;
6430 6383
6431 i++; 6384 if (!skb_is_gso(skb))
6432 if (i == tx_ring->count) 6385 return 0;
6433 i = 0;
6434 tx_ring->next_to_use = i;
6435 6386
6436 return true; 6387 if (skb_header_cloned(skb)) {
6388 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6389 if (err)
6390 return err;
6437 } 6391 }
6438 return false;
6439}
6440 6392
6441static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6393 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6442 __be16 protocol) 6394 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6395
6396 if (protocol == __constant_htons(ETH_P_IP)) {
6397 struct iphdr *iph = ip_hdr(skb);
6398 iph->tot_len = 0;
6399 iph->check = 0;
6400 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6401 iph->daddr, 0,
6402 IPPROTO_TCP,
6403 0);
6404 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6405 } else if (skb_is_gso_v6(skb)) {
6406 ipv6_hdr(skb)->payload_len = 0;
6407 tcp_hdr(skb)->check =
6408 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6409 &ipv6_hdr(skb)->daddr,
6410 0, IPPROTO_TCP, 0);
6411 }
6412
6413 l4len = tcp_hdrlen(skb);
6414 *hdr_len = skb_transport_offset(skb) + l4len;
6415
6416 /* mss_l4len_id: use 1 as index for TSO */
6417 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6418 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6419 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
6420
6421 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
6422 vlan_macip_lens = skb_network_header_len(skb);
6423 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6424 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6425
6426 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6427 mss_l4len_idx);
6428
6429 return 1;
6430}
6431
6432static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6433 struct sk_buff *skb, u32 tx_flags,
6434 __be16 protocol)
6443{ 6435{
6444 u32 rtn = 0; 6436 u32 vlan_macip_lens = 0;
6437 u32 mss_l4len_idx = 0;
6438 u32 type_tucmd = 0;
6445 6439
6446 switch (protocol) { 6440 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6447 case cpu_to_be16(ETH_P_IP): 6441 if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
6448 rtn |= IXGBE_ADVTXD_TUCMD_IPV4; 6442 return false;
6449 switch (ip_hdr(skb)->protocol) { 6443 } else {
6450 case IPPROTO_TCP: 6444 u8 l4_hdr = 0;
6451 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6445 switch (protocol) {
6446 case __constant_htons(ETH_P_IP):
6447 vlan_macip_lens |= skb_network_header_len(skb);
6448 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6449 l4_hdr = ip_hdr(skb)->protocol;
6452 break; 6450 break;
6453 case IPPROTO_SCTP: 6451 case __constant_htons(ETH_P_IPV6):
6454 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 6452 vlan_macip_lens |= skb_network_header_len(skb);
6453 l4_hdr = ipv6_hdr(skb)->nexthdr;
6454 break;
6455 default:
6456 if (unlikely(net_ratelimit())) {
6457 dev_warn(tx_ring->dev,
6458 "partial checksum but proto=%x!\n",
6459 skb->protocol);
6460 }
6455 break; 6461 break;
6456 } 6462 }
6457 break; 6463
6458 case cpu_to_be16(ETH_P_IPV6): 6464 switch (l4_hdr) {
6459 /* XXX what about other V6 headers?? */
6460 switch (ipv6_hdr(skb)->nexthdr) {
6461 case IPPROTO_TCP: 6465 case IPPROTO_TCP:
6462 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6466 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6467 mss_l4len_idx = tcp_hdrlen(skb) <<
6468 IXGBE_ADVTXD_L4LEN_SHIFT;
6463 break; 6469 break;
6464 case IPPROTO_SCTP: 6470 case IPPROTO_SCTP:
6465 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 6471 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6472 mss_l4len_idx = sizeof(struct sctphdr) <<
6473 IXGBE_ADVTXD_L4LEN_SHIFT;
6474 break;
6475 case IPPROTO_UDP:
6476 mss_l4len_idx = sizeof(struct udphdr) <<
6477 IXGBE_ADVTXD_L4LEN_SHIFT;
6478 break;
6479 default:
6480 if (unlikely(net_ratelimit())) {
6481 dev_warn(tx_ring->dev,
6482 "partial checksum but l4 proto=%x!\n",
6483 skb->protocol);
6484 }
6466 break; 6485 break;
6467 } 6486 }
6468 break;
6469 default:
6470 if (unlikely(net_ratelimit()))
6471 e_warn(probe, "partial checksum but proto=%x!\n",
6472 protocol);
6473 break;
6474 } 6487 }
6475 6488
6476 return rtn; 6489 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6477} 6490 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6478
6479static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
6480 struct ixgbe_ring *tx_ring,
6481 struct sk_buff *skb, u32 tx_flags,
6482 __be16 protocol)
6483{
6484 struct ixgbe_adv_tx_context_desc *context_desc;
6485 unsigned int i;
6486 struct ixgbe_tx_buffer *tx_buffer_info;
6487 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
6488
6489 if (skb->ip_summed == CHECKSUM_PARTIAL ||
6490 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
6491 i = tx_ring->next_to_use;
6492 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6493 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
6494
6495 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6496 vlan_macip_lens |=
6497 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6498 vlan_macip_lens |= (skb_network_offset(skb) <<
6499 IXGBE_ADVTXD_MACLEN_SHIFT);
6500 if (skb->ip_summed == CHECKSUM_PARTIAL)
6501 vlan_macip_lens |= (skb_transport_header(skb) -
6502 skb_network_header(skb));
6503
6504 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6505 context_desc->seqnum_seed = 0;
6506 6491
6507 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 6492 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
6508 IXGBE_ADVTXD_DTYP_CTXT); 6493 type_tucmd, mss_l4len_idx);
6509 6494
6510 if (skb->ip_summed == CHECKSUM_PARTIAL) 6495 return (skb->ip_summed == CHECKSUM_PARTIAL);
6511 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
6512
6513 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
6514 /* use index zero for tx checksum offload */
6515 context_desc->mss_l4len_idx = 0;
6516
6517 tx_buffer_info->time_stamp = jiffies;
6518 tx_buffer_info->next_to_watch = i;
6519
6520 i++;
6521 if (i == tx_ring->count)
6522 i = 0;
6523 tx_ring->next_to_use = i;
6524
6525 return true;
6526 }
6527
6528 return false;
6529} 6496}
6530 6497
6531static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 6498static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
@@ -6918,29 +6885,21 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6918 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6885 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6919#ifdef IXGBE_FCOE 6886#ifdef IXGBE_FCOE
6920 /* setup tx offload for FCoE */ 6887 /* setup tx offload for FCoE */
6921 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6888 tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
6922 if (tso < 0) { 6889 if (tso < 0)
6923 dev_kfree_skb_any(skb); 6890 goto out_drop;
6924 return NETDEV_TX_OK; 6891 else if (tso)
6925 }
6926 if (tso)
6927 tx_flags |= IXGBE_TX_FLAGS_FSO; 6892 tx_flags |= IXGBE_TX_FLAGS_FSO;
6928#endif /* IXGBE_FCOE */ 6893#endif /* IXGBE_FCOE */
6929 } else { 6894 } else {
6930 if (protocol == htons(ETH_P_IP)) 6895 if (protocol == htons(ETH_P_IP))
6931 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6896 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6932 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, 6897 tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
6933 protocol); 6898 if (tso < 0)
6934 if (tso < 0) { 6899 goto out_drop;
6935 dev_kfree_skb_any(skb); 6900 else if (tso)
6936 return NETDEV_TX_OK;
6937 }
6938
6939 if (tso)
6940 tx_flags |= IXGBE_TX_FLAGS_TSO; 6901 tx_flags |= IXGBE_TX_FLAGS_TSO;
6941 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, 6902 else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
6942 protocol) &&
6943 (skb->ip_summed == CHECKSUM_PARTIAL))
6944 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6903 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6945 } 6904 }
6946 6905
@@ -6953,12 +6912,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6953 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 6912 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6954 6913
6955 } else { 6914 } else {
6956 dev_kfree_skb_any(skb);
6957 tx_ring->tx_buffer_info[first].time_stamp = 0; 6915 tx_ring->tx_buffer_info[first].time_stamp = 0;
6958 tx_ring->next_to_use = first; 6916 tx_ring->next_to_use = first;
6917 goto out_drop;
6959 } 6918 }
6960 6919
6961 return NETDEV_TX_OK; 6920 return NETDEV_TX_OK;
6921
6922out_drop:
6923 dev_kfree_skb_any(skb);
6924 return NETDEV_TX_OK;
6962} 6925}
6963 6926
6964static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 6927static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)