aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c74
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c139
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c390
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
7 files changed, 298 insertions, 345 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index d6bfb2f6ba86..744b64108130 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -131,6 +131,13 @@ struct vf_macvlans {
131 u8 vf_macvlan[ETH_ALEN]; 131 u8 vf_macvlan[ETH_ALEN];
132}; 132};
133 133
134#define IXGBE_MAX_TXD_PWR 14
135#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
136
137/* Tx Descriptors needed, worst case */
138#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
139#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
140
134/* wrapper around a pointer to a socket buffer, 141/* wrapper around a pointer to a socket buffer,
135 * so a DMA handle can be stored along with the buffer */ 142 * so a DMA handle can be stored along with the buffer */
136struct ixgbe_tx_buffer { 143struct ixgbe_tx_buffer {
@@ -306,9 +313,13 @@ struct ixgbe_q_vector {
306 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) 313 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
307#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG 314#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
308 315
309#define IXGBE_DESC_UNUSED(R) \ 316static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
310 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 317{
311 (R)->next_to_clean - (R)->next_to_use - 1) 318 u16 ntc = ring->next_to_clean;
319 u16 ntu = ring->next_to_use;
320
321 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
322}
312 323
313#define IXGBE_RX_DESC_ADV(R, i) \ 324#define IXGBE_RX_DESC_ADV(R, i) \
314 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) 325 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -576,10 +587,10 @@ extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
576 struct ixgbe_ring *ring); 587 struct ixgbe_ring *ring);
577extern void ixgbe_set_rx_mode(struct net_device *netdev); 588extern void ixgbe_set_rx_mode(struct net_device *netdev);
578extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); 589extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
590extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
579#ifdef IXGBE_FCOE 591#ifdef IXGBE_FCOE
580extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 592extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
581extern int ixgbe_fso(struct ixgbe_adapter *adapter, 593extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
582 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
583 u32 tx_flags, u8 *hdr_len); 594 u32 tx_flags, u8 *hdr_len);
584extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); 595extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
585extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 596extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bd2d75265389..0ace6ce1d0b4 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -330,24 +330,20 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
330static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) 330static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
331{ 331{
332 struct ixgbe_adapter *adapter = netdev_priv(netdev); 332 struct ixgbe_adapter *adapter = netdev_priv(netdev);
333 int ret;
334#ifdef IXGBE_FCOE
333 struct dcb_app app = { 335 struct dcb_app app = {
334 .selector = DCB_APP_IDTYPE_ETHTYPE, 336 .selector = DCB_APP_IDTYPE_ETHTYPE,
335 .protocol = ETH_P_FCOE, 337 .protocol = ETH_P_FCOE,
336 }; 338 };
337 u8 up = dcb_getapp(netdev, &app); 339 u8 up = dcb_getapp(netdev, &app);
338 int ret; 340#endif
339 341
340 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 342 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
341 MAX_TRAFFIC_CLASS); 343 MAX_TRAFFIC_CLASS);
342 if (ret) 344 if (ret)
343 return DCB_NO_HW_CHG; 345 return DCB_NO_HW_CHG;
344 346
345 /* In IEEE mode app data must be parsed into DCBX format for
346 * hardware routines.
347 */
348 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
349 up = (1 << up);
350
351#ifdef IXGBE_FCOE 347#ifdef IXGBE_FCOE
352 if (up && (up != (1 << adapter->fcoe.up))) 348 if (up && (up != (1 << adapter->fcoe.up)))
353 adapter->dcb_set_bitmap |= BIT_APP_UPCHG; 349 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
@@ -361,7 +357,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
361 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 357 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
362 usleep_range(1000, 2000); 358 usleep_range(1000, 2000);
363 359
364 ixgbe_fcoe_setapp(adapter, up); 360 adapter->fcoe.up = ffs(up) - 1;
365 361
366 if (netif_running(netdev)) 362 if (netif_running(netdev))
367 netdev->netdev_ops->ndo_stop(netdev); 363 netdev->netdev_ops->ndo_stop(netdev);
@@ -674,24 +670,75 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
674 return err; 670 return err;
675} 671}
676 672
673#ifdef IXGBE_FCOE
674static void ixgbe_dcbnl_devreset(struct net_device *dev)
675{
676 struct ixgbe_adapter *adapter = netdev_priv(dev);
677
678 if (netif_running(dev))
679 dev->netdev_ops->ndo_stop(dev);
680
681 ixgbe_clear_interrupt_scheme(adapter);
682 ixgbe_init_interrupt_scheme(adapter);
683
684 if (netif_running(dev))
685 dev->netdev_ops->ndo_open(dev);
686}
687#endif
688
677static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, 689static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
678 struct dcb_app *app) 690 struct dcb_app *app)
679{ 691{
680 struct ixgbe_adapter *adapter = netdev_priv(dev); 692 struct ixgbe_adapter *adapter = netdev_priv(dev);
693 int err = -EINVAL;
681 694
682 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) 695 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
683 return -EINVAL; 696 return err;
684 697
685 dcb_setapp(dev, app); 698 err = dcb_ieee_setapp(dev, app);
686 699
687#ifdef IXGBE_FCOE 700#ifdef IXGBE_FCOE
688 if (app->selector == 1 && app->protocol == ETH_P_FCOE && 701 if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
689 adapter->fcoe.tc == app->priority) 702 app->protocol == ETH_P_FCOE) {
690 ixgbe_dcbnl_set_all(dev); 703 u8 app_mask = dcb_ieee_getapp_mask(dev, app);
704
705 if (app_mask & (1 << adapter->fcoe.up))
706 return err;
707
708 adapter->fcoe.up = app->priority;
709 ixgbe_dcbnl_devreset(dev);
710 }
691#endif 711#endif
692 return 0; 712 return 0;
693} 713}
694 714
715static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
716 struct dcb_app *app)
717{
718 struct ixgbe_adapter *adapter = netdev_priv(dev);
719 int err;
720
721 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
722 return -EINVAL;
723
724 err = dcb_ieee_delapp(dev, app);
725
726#ifdef IXGBE_FCOE
727 if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
728 app->protocol == ETH_P_FCOE) {
729 u8 app_mask = dcb_ieee_getapp_mask(dev, app);
730
731 if (app_mask & (1 << adapter->fcoe.up))
732 return err;
733
734 adapter->fcoe.up = app_mask ?
735 ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
736 ixgbe_dcbnl_devreset(dev);
737 }
738#endif
739 return err;
740}
741
695static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) 742static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
696{ 743{
697 struct ixgbe_adapter *adapter = netdev_priv(dev); 744 struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -743,6 +790,7 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
743 .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, 790 .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
744 .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, 791 .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
745 .ieee_setapp = ixgbe_dcbnl_ieee_setapp, 792 .ieee_setapp = ixgbe_dcbnl_ieee_setapp,
793 .ieee_delapp = ixgbe_dcbnl_ieee_delapp,
746 .getstate = ixgbe_dcbnl_get_state, 794 .getstate = ixgbe_dcbnl_get_state,
747 .setstate = ixgbe_dcbnl_set_state, 795 .setstate = ixgbe_dcbnl_set_state,
748 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 796 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index f5f39edb86ab..f0c1018bbf31 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -26,9 +26,6 @@
26*******************************************************************************/ 26*******************************************************************************/
27 27
28#include "ixgbe.h" 28#include "ixgbe.h"
29#ifdef CONFIG_IXGBE_DCB
30#include "ixgbe_dcb_82599.h"
31#endif /* CONFIG_IXGBE_DCB */
32#include <linux/if_ether.h> 29#include <linux/if_ether.h>
33#include <linux/gfp.h> 30#include <linux/gfp.h>
34#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
@@ -474,24 +471,18 @@ ddp_out:
474 * 471 *
475 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error 472 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
476 */ 473 */
477int ixgbe_fso(struct ixgbe_adapter *adapter, 474int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
478 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
479 u32 tx_flags, u8 *hdr_len) 475 u32 tx_flags, u8 *hdr_len)
480{ 476{
481 u8 sof, eof; 477 struct fc_frame_header *fh;
482 u32 vlan_macip_lens; 478 u32 vlan_macip_lens;
483 u32 fcoe_sof_eof; 479 u32 fcoe_sof_eof = 0;
484 u32 type_tucmd;
485 u32 mss_l4len_idx; 480 u32 mss_l4len_idx;
486 int mss = 0; 481 u8 sof, eof;
487 unsigned int i;
488 struct ixgbe_tx_buffer *tx_buffer_info;
489 struct ixgbe_adv_tx_context_desc *context_desc;
490 struct fc_frame_header *fh;
491 482
492 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 483 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
493 e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 484 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
494 skb_shinfo(skb)->gso_type); 485 skb_shinfo(skb)->gso_type);
495 return -EINVAL; 486 return -EINVAL;
496 } 487 }
497 488
@@ -501,23 +492,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
501 sizeof(struct fcoe_hdr)); 492 sizeof(struct fcoe_hdr));
502 493
503 /* sets up SOF and ORIS */ 494 /* sets up SOF and ORIS */
504 fcoe_sof_eof = 0;
505 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 495 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
506 switch (sof) { 496 switch (sof) {
507 case FC_SOF_I2: 497 case FC_SOF_I2:
508 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; 498 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
509 break; 499 break;
510 case FC_SOF_I3: 500 case FC_SOF_I3:
511 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 501 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
512 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; 502 IXGBE_ADVTXD_FCOEF_ORIS;
513 break; 503 break;
514 case FC_SOF_N2: 504 case FC_SOF_N2:
515 break; 505 break;
516 case FC_SOF_N3: 506 case FC_SOF_N3:
517 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 507 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
518 break; 508 break;
519 default: 509 default:
520 e_warn(drv, "unknown sof = 0x%x\n", sof); 510 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
521 return -EINVAL; 511 return -EINVAL;
522 } 512 }
523 513
@@ -530,12 +520,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
530 break; 520 break;
531 case FC_EOF_T: 521 case FC_EOF_T:
532 /* lso needs ORIE */ 522 /* lso needs ORIE */
533 if (skb_is_gso(skb)) { 523 if (skb_is_gso(skb))
534 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 524 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
535 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE; 525 IXGBE_ADVTXD_FCOEF_ORIE;
536 } else { 526 else
537 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 527 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
538 }
539 break; 528 break;
540 case FC_EOF_NI: 529 case FC_EOF_NI:
541 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 530 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
@@ -544,7 +533,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
544 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 533 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
545 break; 534 break;
546 default: 535 default:
547 e_warn(drv, "unknown eof = 0x%x\n", eof); 536 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
548 return -EINVAL; 537 return -EINVAL;
549 } 538 }
550 539
@@ -553,43 +542,28 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
553 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 542 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
554 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 543 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
555 544
556 /* hdr_len includes fc_hdr if FCoE lso is enabled */ 545 /* include trailer in headlen as it is replicated per frame */
557 *hdr_len = sizeof(struct fcoe_crc_eof); 546 *hdr_len = sizeof(struct fcoe_crc_eof);
547
548 /* hdr_len includes fc_hdr if FCoE LSO is enabled */
558 if (skb_is_gso(skb)) 549 if (skb_is_gso(skb))
559 *hdr_len += (skb_transport_offset(skb) + 550 *hdr_len += (skb_transport_offset(skb) +
560 sizeof(struct fc_frame_header)); 551 sizeof(struct fc_frame_header));
561 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 552
562 vlan_macip_lens = (skb_transport_offset(skb) +
563 sizeof(struct fc_frame_header));
564 vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
565 << IXGBE_ADVTXD_MACLEN_SHIFT);
566 vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
567
568 /* type_tycmd and mss: set TUCMD.FCoE to enable offload */
569 type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
570 IXGBE_ADVTXT_TUCMD_FCOE;
571 if (skb_is_gso(skb))
572 mss = skb_shinfo(skb)->gso_size;
573 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ 553 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
574 mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) | 554 mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
575 (1 << IXGBE_ADVTXD_IDX_SHIFT); 555 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
556
557 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
558 vlan_macip_lens = skb_transport_offset(skb) +
559 sizeof(struct fc_frame_header);
560 vlan_macip_lens |= (skb_transport_offset(skb) - 4)
561 << IXGBE_ADVTXD_MACLEN_SHIFT;
562 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
576 563
577 /* write context desc */ 564 /* write context desc */
578 i = tx_ring->next_to_use; 565 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
579 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); 566 IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
580 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
581 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
582 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
583 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
584
585 tx_buffer_info = &tx_ring->tx_buffer_info[i];
586 tx_buffer_info->time_stamp = jiffies;
587 tx_buffer_info->next_to_watch = i;
588
589 i++;
590 if (i == tx_ring->count)
591 i = 0;
592 tx_ring->next_to_use = i;
593 567
594 return skb_is_gso(skb); 568 return skb_is_gso(skb);
595} 569}
@@ -648,10 +622,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
648 struct ixgbe_hw *hw = &adapter->hw; 622 struct ixgbe_hw *hw = &adapter->hw;
649 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 623 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
650 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 624 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
651#ifdef CONFIG_IXGBE_DCB
652 u8 tc;
653 u32 up2tc;
654#endif
655 625
656 if (!fcoe->pool) { 626 if (!fcoe->pool) {
657 spin_lock_init(&fcoe->lock); 627 spin_lock_init(&fcoe->lock);
@@ -717,18 +687,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
717 IXGBE_FCRXCTRL_FCOELLI | 687 IXGBE_FCRXCTRL_FCOELLI |
718 IXGBE_FCRXCTRL_FCCRCBO | 688 IXGBE_FCRXCTRL_FCCRCBO |
719 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 689 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
720#ifdef CONFIG_IXGBE_DCB
721 up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
722 for (i = 0; i < MAX_USER_PRIORITY; i++) {
723 tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
724 tc &= (MAX_TRAFFIC_CLASS - 1);
725 if (fcoe->tc == tc) {
726 fcoe->up = i;
727 break;
728 }
729 }
730#endif
731
732 return; 690 return;
733 691
734out_extra_ddp_buffer: 692out_extra_ddp_buffer:
@@ -856,41 +814,6 @@ out_disable:
856 return rc; 814 return rc;
857} 815}
858 816
859#ifdef CONFIG_IXGBE_DCB
860/**
861 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
862 * @adapter : ixgbe adapter
863 * @up : 802.1p user priority bitmap
864 *
865 * Finds out the traffic class from the input user priority
866 * bitmap for FCoE.
867 *
868 * Returns : 0 on success otherwise returns 1 on error
869 */
870u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
871{
872 int i;
873 u32 up2tc;
874
875 /* valid user priority bitmap must not be 0 */
876 if (up) {
877 /* from user priority to the corresponding traffic class */
878 up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
879 for (i = 0; i < MAX_USER_PRIORITY; i++) {
880 if (up & (1 << i)) {
881 up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
882 up2tc &= (MAX_TRAFFIC_CLASS - 1);
883 adapter->fcoe.tc = (u8)up2tc;
884 adapter->fcoe.up = i;
885 return 0;
886 }
887 }
888 }
889
890 return 1;
891}
892#endif /* CONFIG_IXGBE_DCB */
893
894/** 817/**
895 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port 818 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
896 * @netdev : ixgbe adapter 819 * @netdev : ixgbe adapter
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index d876e7ac2257..99de145e290d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -74,7 +74,6 @@ struct ixgbe_fcoe {
74 dma_addr_t extra_ddp_buffer_dma; 74 dma_addr_t extra_ddp_buffer_dma;
75 unsigned long mode; 75 unsigned long mode;
76#ifdef CONFIG_IXGBE_DCB 76#ifdef CONFIG_IXGBE_DCB
77 u8 tc;
78 u8 up; 77 u8 up;
79#endif 78#endif
80}; 79};
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2496a27b5991..fa671ae0ab69 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -35,6 +35,7 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/ip.h> 36#include <linux/ip.h>
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/sctp.h>
38#include <linux/pkt_sched.h> 39#include <linux/pkt_sched.h>
39#include <linux/ipv6.h> 40#include <linux/ipv6.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
@@ -771,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
771 return ret; 772 return ret;
772} 773}
773 774
774#define IXGBE_MAX_TXD_PWR 14
775#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
776
777/* Tx Descriptors needed, worst case */
778#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
779 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
780#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
781 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
782
783/** 775/**
784 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout 776 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
785 * @adapter: driver private struct 777 * @adapter: driver private struct
@@ -882,7 +874,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
882 874
883#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 875#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
884 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 876 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
885 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 877 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
886 /* Make sure that anybody stopping the queue after this 878 /* Make sure that anybody stopping the queue after this
887 * sees the new next_to_clean. 879 * sees the new next_to_clean.
888 */ 880 */
@@ -1474,7 +1466,7 @@ next_desc:
1474 } 1466 }
1475 1467
1476 rx_ring->next_to_clean = i; 1468 rx_ring->next_to_clean = i;
1477 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 1469 cleaned_count = ixgbe_desc_unused(rx_ring);
1478 1470
1479 if (cleaned_count) 1471 if (cleaned_count)
1480 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 1472 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -1880,8 +1872,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1880 1872
1881static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 1873static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1882{ 1874{
1883 struct net_device *netdev = data; 1875 struct ixgbe_adapter *adapter = data;
1884 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1885 struct ixgbe_hw *hw = &adapter->hw; 1876 struct ixgbe_hw *hw = &adapter->hw;
1886 u32 eicr; 1877 u32 eicr;
1887 1878
@@ -2376,7 +2367,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2376 2367
2377 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); 2368 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
2378 err = request_irq(adapter->msix_entries[vector].vector, 2369 err = request_irq(adapter->msix_entries[vector].vector,
2379 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev); 2370 ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter);
2380 if (err) { 2371 if (err) {
2381 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2372 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2382 goto free_queue_irqs; 2373 goto free_queue_irqs;
@@ -2488,8 +2479,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2488 **/ 2479 **/
2489static irqreturn_t ixgbe_intr(int irq, void *data) 2480static irqreturn_t ixgbe_intr(int irq, void *data)
2490{ 2481{
2491 struct net_device *netdev = data; 2482 struct ixgbe_adapter *adapter = data;
2492 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2493 struct ixgbe_hw *hw = &adapter->hw; 2483 struct ixgbe_hw *hw = &adapter->hw;
2494 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2484 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2495 u32 eicr; 2485 u32 eicr;
@@ -2586,10 +2576,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2586 err = ixgbe_request_msix_irqs(adapter); 2576 err = ixgbe_request_msix_irqs(adapter);
2587 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 2577 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2588 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 2578 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2589 netdev->name, netdev); 2579 netdev->name, adapter);
2590 } else { 2580 } else {
2591 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 2581 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2592 netdev->name, netdev); 2582 netdev->name, adapter);
2593 } 2583 }
2594 2584
2595 if (err) 2585 if (err)
@@ -2600,15 +2590,13 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2600 2590
2601static void ixgbe_free_irq(struct ixgbe_adapter *adapter) 2591static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2602{ 2592{
2603 struct net_device *netdev = adapter->netdev;
2604
2605 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2593 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2606 int i, q_vectors; 2594 int i, q_vectors;
2607 2595
2608 q_vectors = adapter->num_msix_vectors; 2596 q_vectors = adapter->num_msix_vectors;
2609 2597
2610 i = q_vectors - 1; 2598 i = q_vectors - 1;
2611 free_irq(adapter->msix_entries[i].vector, netdev); 2599 free_irq(adapter->msix_entries[i].vector, adapter);
2612 2600
2613 i--; 2601 i--;
2614 for (; i >= 0; i--) { 2602 for (; i >= 0; i--) {
@@ -2623,7 +2611,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2623 2611
2624 ixgbe_reset_q_vectors(adapter); 2612 ixgbe_reset_q_vectors(adapter);
2625 } else { 2613 } else {
2626 free_irq(adapter->pdev->irq, netdev); 2614 free_irq(adapter->pdev->irq, adapter);
2627 } 2615 }
2628} 2616}
2629 2617
@@ -3130,7 +3118,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3130 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3118 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3131 3119
3132 ixgbe_rx_desc_queue_enable(adapter, ring); 3120 ixgbe_rx_desc_queue_enable(adapter, ring);
3133 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); 3121 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3134} 3122}
3135 3123
3136static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 3124static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -5181,7 +5169,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5181 adapter->ring_feature[RING_F_FCOE].indices = 0; 5169 adapter->ring_feature[RING_F_FCOE].indices = 0;
5182#ifdef CONFIG_IXGBE_DCB 5170#ifdef CONFIG_IXGBE_DCB
5183 /* Default traffic class to use for FCoE */ 5171 /* Default traffic class to use for FCoE */
5184 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
5185 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 5172 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
5186#endif 5173#endif
5187#endif /* IXGBE_FCOE */ 5174#endif /* IXGBE_FCOE */
@@ -6357,179 +6344,145 @@ static void ixgbe_service_task(struct work_struct *work)
6357 ixgbe_service_event_complete(adapter); 6344 ixgbe_service_event_complete(adapter);
6358} 6345}
6359 6346
6360static int ixgbe_tso(struct ixgbe_adapter *adapter, 6347void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
6361 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 6348 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
6362 u32 tx_flags, u8 *hdr_len, __be16 protocol)
6363{ 6349{
6364 struct ixgbe_adv_tx_context_desc *context_desc; 6350 struct ixgbe_adv_tx_context_desc *context_desc;
6365 unsigned int i; 6351 u16 i = tx_ring->next_to_use;
6366 int err;
6367 struct ixgbe_tx_buffer *tx_buffer_info;
6368 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
6369 u32 mss_l4len_idx, l4len;
6370 6352
6371 if (skb_is_gso(skb)) { 6353 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
6372 if (skb_header_cloned(skb)) {
6373 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6374 if (err)
6375 return err;
6376 }
6377 l4len = tcp_hdrlen(skb);
6378 *hdr_len += l4len;
6379
6380 if (protocol == htons(ETH_P_IP)) {
6381 struct iphdr *iph = ip_hdr(skb);
6382 iph->tot_len = 0;
6383 iph->check = 0;
6384 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6385 iph->daddr, 0,
6386 IPPROTO_TCP,
6387 0);
6388 } else if (skb_is_gso_v6(skb)) {
6389 ipv6_hdr(skb)->payload_len = 0;
6390 tcp_hdr(skb)->check =
6391 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6392 &ipv6_hdr(skb)->daddr,
6393 0, IPPROTO_TCP, 0);
6394 }
6395 6354
6396 i = tx_ring->next_to_use; 6355 i++;
6356 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
6397 6357
6398 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6358 /* set bits to identify this as an advanced context descriptor */
6399 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); 6359 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
6400
6401 /* VLAN MACLEN IPLEN */
6402 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6403 vlan_macip_lens |=
6404 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6405 vlan_macip_lens |= ((skb_network_offset(skb)) <<
6406 IXGBE_ADVTXD_MACLEN_SHIFT);
6407 *hdr_len += skb_network_offset(skb);
6408 vlan_macip_lens |=
6409 (skb_transport_header(skb) - skb_network_header(skb));
6410 *hdr_len +=
6411 (skb_transport_header(skb) - skb_network_header(skb));
6412 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6413 context_desc->seqnum_seed = 0;
6414
6415 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6416 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
6417 IXGBE_ADVTXD_DTYP_CTXT);
6418 6360
6419 if (protocol == htons(ETH_P_IP)) 6361 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6420 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 6362 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
6421 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6363 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
6422 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 6364 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6423 6365}
6424 /* MSS L4LEN IDX */
6425 mss_l4len_idx =
6426 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
6427 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
6428 /* use index 1 for TSO */
6429 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6430 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6431 6366
6432 tx_buffer_info->time_stamp = jiffies; 6367static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
6433 tx_buffer_info->next_to_watch = i; 6368 u32 tx_flags, __be16 protocol, u8 *hdr_len)
6369{
6370 int err;
6371 u32 vlan_macip_lens, type_tucmd;
6372 u32 mss_l4len_idx, l4len;
6434 6373
6435 i++; 6374 if (!skb_is_gso(skb))
6436 if (i == tx_ring->count) 6375 return 0;
6437 i = 0;
6438 tx_ring->next_to_use = i;
6439 6376
6440 return true; 6377 if (skb_header_cloned(skb)) {
6378 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6379 if (err)
6380 return err;
6441 } 6381 }
6442 return false;
6443}
6444 6382
6445static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6383 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6446 __be16 protocol) 6384 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6385
6386 if (protocol == __constant_htons(ETH_P_IP)) {
6387 struct iphdr *iph = ip_hdr(skb);
6388 iph->tot_len = 0;
6389 iph->check = 0;
6390 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6391 iph->daddr, 0,
6392 IPPROTO_TCP,
6393 0);
6394 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6395 } else if (skb_is_gso_v6(skb)) {
6396 ipv6_hdr(skb)->payload_len = 0;
6397 tcp_hdr(skb)->check =
6398 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6399 &ipv6_hdr(skb)->daddr,
6400 0, IPPROTO_TCP, 0);
6401 }
6402
6403 l4len = tcp_hdrlen(skb);
6404 *hdr_len = skb_transport_offset(skb) + l4len;
6405
6406 /* mss_l4len_id: use 1 as index for TSO */
6407 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6408 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6409 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
6410
6411 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
6412 vlan_macip_lens = skb_network_header_len(skb);
6413 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6414 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6415
6416 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6417 mss_l4len_idx);
6418
6419 return 1;
6420}
6421
6422static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6423 struct sk_buff *skb, u32 tx_flags,
6424 __be16 protocol)
6447{ 6425{
6448 u32 rtn = 0; 6426 u32 vlan_macip_lens = 0;
6427 u32 mss_l4len_idx = 0;
6428 u32 type_tucmd = 0;
6449 6429
6450 switch (protocol) { 6430 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6451 case cpu_to_be16(ETH_P_IP): 6431 if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
6452 rtn |= IXGBE_ADVTXD_TUCMD_IPV4; 6432 return false;
6453 switch (ip_hdr(skb)->protocol) { 6433 } else {
6454 case IPPROTO_TCP: 6434 u8 l4_hdr = 0;
6455 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6435 switch (protocol) {
6436 case __constant_htons(ETH_P_IP):
6437 vlan_macip_lens |= skb_network_header_len(skb);
6438 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6439 l4_hdr = ip_hdr(skb)->protocol;
6456 break; 6440 break;
6457 case IPPROTO_SCTP: 6441 case __constant_htons(ETH_P_IPV6):
6458 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 6442 vlan_macip_lens |= skb_network_header_len(skb);
6443 l4_hdr = ipv6_hdr(skb)->nexthdr;
6444 break;
6445 default:
6446 if (unlikely(net_ratelimit())) {
6447 dev_warn(tx_ring->dev,
6448 "partial checksum but proto=%x!\n",
6449 skb->protocol);
6450 }
6459 break; 6451 break;
6460 } 6452 }
6461 break; 6453
6462 case cpu_to_be16(ETH_P_IPV6): 6454 switch (l4_hdr) {
6463 /* XXX what about other V6 headers?? */
6464 switch (ipv6_hdr(skb)->nexthdr) {
6465 case IPPROTO_TCP: 6455 case IPPROTO_TCP:
6466 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6456 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6457 mss_l4len_idx = tcp_hdrlen(skb) <<
6458 IXGBE_ADVTXD_L4LEN_SHIFT;
6467 break; 6459 break;
6468 case IPPROTO_SCTP: 6460 case IPPROTO_SCTP:
6469 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 6461 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6462 mss_l4len_idx = sizeof(struct sctphdr) <<
6463 IXGBE_ADVTXD_L4LEN_SHIFT;
6464 break;
6465 case IPPROTO_UDP:
6466 mss_l4len_idx = sizeof(struct udphdr) <<
6467 IXGBE_ADVTXD_L4LEN_SHIFT;
6468 break;
6469 default:
6470 if (unlikely(net_ratelimit())) {
6471 dev_warn(tx_ring->dev,
6472 "partial checksum but l4 proto=%x!\n",
6473 skb->protocol);
6474 }
6470 break; 6475 break;
6471 } 6476 }
6472 break;
6473 default:
6474 if (unlikely(net_ratelimit()))
6475 e_warn(probe, "partial checksum but proto=%x!\n",
6476 protocol);
6477 break;
6478 } 6477 }
6479 6478
6480 return rtn; 6479 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6481} 6480 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6482
6483static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
6484 struct ixgbe_ring *tx_ring,
6485 struct sk_buff *skb, u32 tx_flags,
6486 __be16 protocol)
6487{
6488 struct ixgbe_adv_tx_context_desc *context_desc;
6489 unsigned int i;
6490 struct ixgbe_tx_buffer *tx_buffer_info;
6491 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
6492
6493 if (skb->ip_summed == CHECKSUM_PARTIAL ||
6494 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
6495 i = tx_ring->next_to_use;
6496 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6497 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
6498
6499 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6500 vlan_macip_lens |=
6501 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6502 vlan_macip_lens |= (skb_network_offset(skb) <<
6503 IXGBE_ADVTXD_MACLEN_SHIFT);
6504 if (skb->ip_summed == CHECKSUM_PARTIAL)
6505 vlan_macip_lens |= (skb_transport_header(skb) -
6506 skb_network_header(skb));
6507
6508 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6509 context_desc->seqnum_seed = 0;
6510 6481
6511 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 6482 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
6512 IXGBE_ADVTXD_DTYP_CTXT); 6483 type_tucmd, mss_l4len_idx);
6513 6484
6514 if (skb->ip_summed == CHECKSUM_PARTIAL) 6485 return (skb->ip_summed == CHECKSUM_PARTIAL);
6515 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
6516
6517 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
6518 /* use index zero for tx checksum offload */
6519 context_desc->mss_l4len_idx = 0;
6520
6521 tx_buffer_info->time_stamp = jiffies;
6522 tx_buffer_info->next_to_watch = i;
6523
6524 i++;
6525 if (i == tx_ring->count)
6526 i = 0;
6527 tx_ring->next_to_use = i;
6528
6529 return true;
6530 }
6531
6532 return false;
6533} 6486}
6534 6487
6535static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 6488static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
@@ -6541,11 +6494,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6541 struct ixgbe_tx_buffer *tx_buffer_info; 6494 struct ixgbe_tx_buffer *tx_buffer_info;
6542 unsigned int len; 6495 unsigned int len;
6543 unsigned int total = skb->len; 6496 unsigned int total = skb->len;
6544 unsigned int offset = 0, size, count = 0, i; 6497 unsigned int offset = 0, size, count = 0;
6545 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 6498 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6546 unsigned int f; 6499 unsigned int f;
6547 unsigned int bytecount = skb->len; 6500 unsigned int bytecount = skb->len;
6548 u16 gso_segs = 1; 6501 u16 gso_segs = 1;
6502 u16 i;
6549 6503
6550 i = tx_ring->next_to_use; 6504 i = tx_ring->next_to_use;
6551 6505
@@ -6811,7 +6765,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6811 input, common, ring->queue_index); 6765 input, common, ring->queue_index);
6812} 6766}
6813 6767
6814static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) 6768static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6815{ 6769{
6816 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 6770 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6817 /* Herbert's original patch had: 6771 /* Herbert's original patch had:
@@ -6821,7 +6775,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6821 6775
6822 /* We need to check again in a case another CPU has just 6776 /* We need to check again in a case another CPU has just
6823 * made room available. */ 6777 * made room available. */
6824 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 6778 if (likely(ixgbe_desc_unused(tx_ring) < size))
6825 return -EBUSY; 6779 return -EBUSY;
6826 6780
6827 /* A reprieve! - use start_queue because it doesn't call schedule */ 6781 /* A reprieve! - use start_queue because it doesn't call schedule */
@@ -6830,9 +6784,9 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6830 return 0; 6784 return 0;
6831} 6785}
6832 6786
6833static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) 6787static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6834{ 6788{
6835 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6789 if (likely(ixgbe_desc_unused(tx_ring) >= size))
6836 return 0; 6790 return 0;
6837 return __ixgbe_maybe_stop_tx(tx_ring, size); 6791 return __ixgbe_maybe_stop_tx(tx_ring, size);
6838} 6792}
@@ -6868,13 +6822,33 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6868 struct ixgbe_adapter *adapter, 6822 struct ixgbe_adapter *adapter,
6869 struct ixgbe_ring *tx_ring) 6823 struct ixgbe_ring *tx_ring)
6870{ 6824{
6871 unsigned int first;
6872 unsigned int tx_flags = 0;
6873 u8 hdr_len = 0;
6874 int tso; 6825 int tso;
6875 int count = 0; 6826 u32 tx_flags = 0;
6876 unsigned int f; 6827#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6828 unsigned short f;
6829#endif
6830 u16 first;
6831 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6877 __be16 protocol; 6832 __be16 protocol;
6833 u8 hdr_len = 0;
6834
6835 /*
6836 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
6837 * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
6838 * + 2 desc gap to keep tail from touching head,
6839 * + 1 desc for context descriptor,
6840 * otherwise try next time
6841 */
6842#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6843 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6844 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6845#else
6846 count += skb_shinfo(skb)->nr_frags;
6847#endif
6848 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6849 tx_ring->tx_stats.tx_busy++;
6850 return NETDEV_TX_BUSY;
6851 }
6878 6852
6879 protocol = vlan_get_protocol(skb); 6853 protocol = vlan_get_protocol(skb);
6880 6854
@@ -6899,51 +6873,29 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6899 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6873 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6900 (protocol == htons(ETH_P_FCOE))) 6874 (protocol == htons(ETH_P_FCOE)))
6901 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6875 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6902#endif
6903
6904 /* four things can cause us to need a context descriptor */
6905 if (skb_is_gso(skb) ||
6906 (skb->ip_summed == CHECKSUM_PARTIAL) ||
6907 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6908 (tx_flags & IXGBE_TX_FLAGS_FCOE))
6909 count++;
6910
6911 count += TXD_USE_COUNT(skb_headlen(skb));
6912 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6913 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6914
6915 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
6916 tx_ring->tx_stats.tx_busy++;
6917 return NETDEV_TX_BUSY;
6918 }
6919 6876
6877#endif
6878 /* record the location of the first descriptor for this packet */
6920 first = tx_ring->next_to_use; 6879 first = tx_ring->next_to_use;
6880
6921 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6881 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6922#ifdef IXGBE_FCOE 6882#ifdef IXGBE_FCOE
6923 /* setup tx offload for FCoE */ 6883 /* setup tx offload for FCoE */
6924 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6884 tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
6925 if (tso < 0) { 6885 if (tso < 0)
6926 dev_kfree_skb_any(skb); 6886 goto out_drop;
6927 return NETDEV_TX_OK; 6887 else if (tso)
6928 }
6929 if (tso)
6930 tx_flags |= IXGBE_TX_FLAGS_FSO; 6888 tx_flags |= IXGBE_TX_FLAGS_FSO;
6931#endif /* IXGBE_FCOE */ 6889#endif /* IXGBE_FCOE */
6932 } else { 6890 } else {
6933 if (protocol == htons(ETH_P_IP)) 6891 if (protocol == htons(ETH_P_IP))
6934 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6892 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6935 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, 6893 tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
6936 protocol); 6894 if (tso < 0)
6937 if (tso < 0) { 6895 goto out_drop;
6938 dev_kfree_skb_any(skb); 6896 else if (tso)
6939 return NETDEV_TX_OK;
6940 }
6941
6942 if (tso)
6943 tx_flags |= IXGBE_TX_FLAGS_TSO; 6897 tx_flags |= IXGBE_TX_FLAGS_TSO;
6944 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, 6898 else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
6945 protocol) &&
6946 (skb->ip_summed == CHECKSUM_PARTIAL))
6947 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6899 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6948 } 6900 }
6949 6901
@@ -6956,12 +6908,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6956 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 6908 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6957 6909
6958 } else { 6910 } else {
6959 dev_kfree_skb_any(skb);
6960 tx_ring->tx_buffer_info[first].time_stamp = 0; 6911 tx_ring->tx_buffer_info[first].time_stamp = 0;
6961 tx_ring->next_to_use = first; 6912 tx_ring->next_to_use = first;
6913 goto out_drop;
6962 } 6914 }
6963 6915
6964 return NETDEV_TX_OK; 6916 return NETDEV_TX_OK;
6917
6918out_drop:
6919 dev_kfree_skb_any(skb);
6920 return NETDEV_TX_OK;
6965} 6921}
6966 6922
6967static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 6923static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index ac99b0458fe2..d99d01e21326 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -605,6 +605,22 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
605 } 605 }
606 606
607 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */ 607 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
608 /*
609 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
610 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
611 * and 0x004 otherwise.
612 */
613 switch (hw->mac.type) {
614 case ixgbe_mac_82599EB:
615 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
616 break;
617 case ixgbe_mac_X540:
618 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
619 break;
620 default:
621 break;
622 }
623
608 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 624 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
609} 625}
610 626
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 1eefc0c68409..e0d970ebab7a 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -534,7 +534,7 @@
534#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 534#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
535#define IXGBE_RTTBCNRC_RF_INT_MASK \ 535#define IXGBE_RTTBCNRC_RF_INT_MASK \
536 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) 536 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
537 537#define IXGBE_RTTBCNRM 0x04980
538 538
539/* FCoE DMA Context Registers */ 539/* FCoE DMA Context Registers */
540#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ 540#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */