aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-05-05 01:32:47 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-07-19 21:18:49 -0400
commit7c8ae65a6248518b2775a03129424a7e08fd058a (patch)
tree3ed6d53b83c651f7880176e3c0f30324e5672404 /drivers/net/ethernet/intel/ixgbe
parent5a1ee2704bff078bd58abde38266caa10fbcd714 (diff)
ixgbe: Make FCoE allocation and configuration closer to how rings work
This patch changes the behavior of the FCoE configuration so that it is much closer to how the main body of the ixgbe driver works for ring allocation. The first piece is the ixgbe_fcoe_ddp_enable/disable calls. These allocate the percpu values and if successful set the fcoe_ddp_xid value indicating that we can support DDP. The next piece is the ixgbe_setup/free_ddp_resources calls. These are called on open/close and will allocate and free the DMA pools. Finally ixgbe_configure_fcoe is now just register configuration. It can go through and enable the registers for the FCoE redirection offload, and FIP configuration without any interference from the DDP pool allocation. The net result of all this is two fold. First it adds a certain amount of exception handling. So for example if ixgbe_setup_fcoe_resources fails we will actually generate an error in open and refuse to bring up the interface. Secondly it provides a much more graceful failure case than the previous model which would skip setting up the registers for FCoE on failure to allocate DDP resources leaving no Rx functionality enabled instead of just disabling DDP. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c228
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c43
4 files changed, 154 insertions, 122 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c2365005b545..5a286adc65c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -691,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
691extern int ixgbe_fso(struct ixgbe_ring *tx_ring, 691extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
692 struct ixgbe_tx_buffer *first, 692 struct ixgbe_tx_buffer *first,
693 u8 *hdr_len); 693 u8 *hdr_len);
694extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
695extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 694extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
696 union ixgbe_adv_rx_desc *rx_desc, 695 union ixgbe_adv_rx_desc *rx_desc,
697 struct sk_buff *skb); 696 struct sk_buff *skb);
@@ -700,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
700extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 699extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
701 struct scatterlist *sgl, unsigned int sgc); 700 struct scatterlist *sgl, unsigned int sgc);
702extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 701extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
702extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
703extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
703extern int ixgbe_fcoe_enable(struct net_device *netdev); 704extern int ixgbe_fcoe_enable(struct net_device *netdev);
704extern int ixgbe_fcoe_disable(struct net_device *netdev); 705extern int ixgbe_fcoe_disable(struct net_device *netdev);
705#ifdef CONFIG_IXGBE_DCB 706#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index e7c463c7b6a1..e79ba3927344 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -578,17 +578,6 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
578 ddp_pool->pool = NULL; 578 ddp_pool->pool = NULL;
579} 579}
580 580
581static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
582{
583 unsigned int cpu;
584
585 for_each_possible_cpu(cpu)
586 ixgbe_fcoe_dma_pool_free(fcoe, cpu);
587
588 free_percpu(fcoe->ddp_pool);
589 fcoe->ddp_pool = NULL;
590}
591
592static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, 581static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
593 struct device *dev, 582 struct device *dev,
594 unsigned int cpu) 583 unsigned int cpu)
@@ -612,21 +601,6 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
612 return 0; 601 return 0;
613} 602}
614 603
615static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
616{
617 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
618 struct device *dev = &adapter->pdev->dev;
619 unsigned int cpu;
620
621 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
622 if (!fcoe->ddp_pool)
623 return;
624
625 /* allocate pci pool for each cpu */
626 for_each_possible_cpu(cpu)
627 ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
628}
629
630/** 604/**
631 * ixgbe_configure_fcoe - configures registers for fcoe at start 605 * ixgbe_configure_fcoe - configures registers for fcoe at start
632 * @adapter: ptr to ixgbe adapter 606 * @adapter: ptr to ixgbe adapter
@@ -637,39 +611,14 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
637 */ 611 */
638void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 612void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
639{ 613{
640 int i, fcoe_q, fcoe_i; 614 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
641 struct ixgbe_hw *hw = &adapter->hw; 615 struct ixgbe_hw *hw = &adapter->hw;
642 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 616 int i, fcoe_q, fcoe_i;
643 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
644 u32 etqf; 617 u32 etqf;
645 618
646 if (!fcoe->ddp_pool) { 619 /* leave registers unconfigued if FCoE is disabled */
647 spin_lock_init(&fcoe->lock); 620 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
648 621 return;
649 ixgbe_fcoe_ddp_pools_alloc(adapter);
650 if (!fcoe->ddp_pool) {
651 e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
652 return;
653 }
654
655 /* Extra buffer to be shared by all DDPs for HW work around */
656 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
657 if (fcoe->extra_ddp_buffer == NULL) {
658 e_err(drv, "failed to allocated extra DDP buffer\n");
659 goto out_ddp_pools;
660 }
661
662 fcoe->extra_ddp_buffer_dma =
663 dma_map_single(&adapter->pdev->dev,
664 fcoe->extra_ddp_buffer,
665 IXGBE_FCBUFF_MIN,
666 DMA_FROM_DEVICE);
667 if (dma_mapping_error(&adapter->pdev->dev,
668 fcoe->extra_ddp_buffer_dma)) {
669 e_err(drv, "failed to map extra DDP buffer\n");
670 goto out_extra_ddp_buffer;
671 }
672 }
673 622
674 /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ 623 /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */
675 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; 624 etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
@@ -682,7 +631,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
682 631
683 /* Use one or more Rx queues for FCoE by redirection table */ 632 /* Use one or more Rx queues for FCoE by redirection table */
684 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 633 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
685 fcoe_i = f->offset + (i % f->indices); 634 fcoe_i = fcoe->offset + (i % fcoe->indices);
686 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 635 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
687 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 636 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
688 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 637 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
@@ -698,7 +647,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
698 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); 647 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
699 648
700 /* Send FIP frames to the first FCoE queue */ 649 /* Send FIP frames to the first FCoE queue */
701 fcoe_q = adapter->rx_ring[f->offset]->reg_idx; 650 fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
702 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 651 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
703 IXGBE_ETQS_QUEUE_EN | 652 IXGBE_ETQS_QUEUE_EN |
704 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 653 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
@@ -707,40 +656,122 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
707 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 656 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
708 IXGBE_FCRXCTRL_FCCRCBO | 657 IXGBE_FCRXCTRL_FCCRCBO |
709 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 658 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
710
711 return;
712out_extra_ddp_buffer:
713 kfree(fcoe->extra_ddp_buffer);
714out_ddp_pools:
715 ixgbe_fcoe_ddp_pools_free(fcoe);
716} 659}
717 660
718/** 661/**
719 * ixgbe_cleanup_fcoe - release all fcoe ddp context resources 662 * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
720 * @adapter : ixgbe adapter 663 * @adapter : ixgbe adapter
721 * 664 *
722 * Cleans up outstanding ddp context resources 665 * Cleans up outstanding ddp context resources
723 * 666 *
724 * Returns : none 667 * Returns : none
725 */ 668 */
726void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) 669void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
727{ 670{
728 int i;
729 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 671 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
672 int cpu, i;
730 673
674 /* do nothing if no DDP pools were allocated */
731 if (!fcoe->ddp_pool) 675 if (!fcoe->ddp_pool)
732 return; 676 return;
733 677
734 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 678 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
735 ixgbe_fcoe_ddp_put(adapter->netdev, i); 679 ixgbe_fcoe_ddp_put(adapter->netdev, i);
736 680
681 for_each_possible_cpu(cpu)
682 ixgbe_fcoe_dma_pool_free(fcoe, cpu);
683
737 dma_unmap_single(&adapter->pdev->dev, 684 dma_unmap_single(&adapter->pdev->dev,
738 fcoe->extra_ddp_buffer_dma, 685 fcoe->extra_ddp_buffer_dma,
739 IXGBE_FCBUFF_MIN, 686 IXGBE_FCBUFF_MIN,
740 DMA_FROM_DEVICE); 687 DMA_FROM_DEVICE);
741 kfree(fcoe->extra_ddp_buffer); 688 kfree(fcoe->extra_ddp_buffer);
742 689
743 ixgbe_fcoe_ddp_pools_free(fcoe); 690 fcoe->extra_ddp_buffer = NULL;
691 fcoe->extra_ddp_buffer_dma = 0;
692}
693
694/**
695 * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
696 * @adapter: ixgbe adapter
697 *
698 * Sets up ddp context resouces
699 *
700 * Returns : 0 indicates success or -EINVAL on failure
701 */
702int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
703{
704 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
705 struct device *dev = &adapter->pdev->dev;
706 void *buffer;
707 dma_addr_t dma;
708 unsigned int cpu;
709
710 /* do nothing if no DDP pools were allocated */
711 if (!fcoe->ddp_pool)
712 return 0;
713
714 /* Extra buffer to be shared by all DDPs for HW work around */
715 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
716 if (!buffer) {
717 e_err(drv, "failed to allocate extra DDP buffer\n");
718 return -ENOMEM;
719 }
720
721 dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
722 if (dma_mapping_error(dev, dma)) {
723 e_err(drv, "failed to map extra DDP buffer\n");
724 kfree(buffer);
725 return -ENOMEM;
726 }
727
728 fcoe->extra_ddp_buffer = buffer;
729 fcoe->extra_ddp_buffer_dma = dma;
730
731 /* allocate pci pool for each cpu */
732 for_each_possible_cpu(cpu) {
733 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
734 if (!err)
735 continue;
736
737 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
738 ixgbe_free_fcoe_ddp_resources(adapter);
739 return -ENOMEM;
740 }
741
742 return 0;
743}
744
745static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
746{
747 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
748
749 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
750 return -EINVAL;
751
752 fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
753
754 if (!fcoe->ddp_pool) {
755 e_err(drv, "failed to allocate percpu DDP resources\n");
756 return -ENOMEM;
757 }
758
759 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
760
761 return 0;
762}
763
764static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
765{
766 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
767
768 adapter->netdev->fcoe_ddp_xid = 0;
769
770 if (!fcoe->ddp_pool)
771 return;
772
773 free_percpu(fcoe->ddp_pool);
774 fcoe->ddp_pool = NULL;
744} 775}
745 776
746/** 777/**
@@ -753,40 +784,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
753 */ 784 */
754int ixgbe_fcoe_enable(struct net_device *netdev) 785int ixgbe_fcoe_enable(struct net_device *netdev)
755{ 786{
756 int rc = -EINVAL;
757 struct ixgbe_adapter *adapter = netdev_priv(netdev); 787 struct ixgbe_adapter *adapter = netdev_priv(netdev);
758 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 788 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
759 789
790 atomic_inc(&fcoe->refcnt);
760 791
761 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 792 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
762 goto out_enable; 793 return -EINVAL;
763 794
764 atomic_inc(&fcoe->refcnt);
765 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 795 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
766 goto out_enable; 796 return -EINVAL;
767 797
768 e_info(drv, "Enabling FCoE offload features.\n"); 798 e_info(drv, "Enabling FCoE offload features.\n");
769 if (netif_running(netdev)) 799 if (netif_running(netdev))
770 netdev->netdev_ops->ndo_stop(netdev); 800 netdev->netdev_ops->ndo_stop(netdev);
771 801
772 ixgbe_clear_interrupt_scheme(adapter); 802 /* Allocate per CPU memory to track DDP pools */
803 ixgbe_fcoe_ddp_enable(adapter);
773 804
805 /* enable FCoE and notify stack */
774 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 806 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
775 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; 807 netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU;
776 netdev->features |= NETIF_F_FCOE_CRC; 808 netdev_features_change(netdev);
777 netdev->features |= NETIF_F_FSO;
778 netdev->features |= NETIF_F_FCOE_MTU;
779 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
780 809
810 /* release existing queues and reallocate them */
811 ixgbe_clear_interrupt_scheme(adapter);
781 ixgbe_init_interrupt_scheme(adapter); 812 ixgbe_init_interrupt_scheme(adapter);
782 netdev_features_change(netdev);
783 813
784 if (netif_running(netdev)) 814 if (netif_running(netdev))
785 netdev->netdev_ops->ndo_open(netdev); 815 netdev->netdev_ops->ndo_open(netdev);
786 rc = 0;
787 816
788out_enable: 817 return 0;
789 return rc;
790} 818}
791 819
792/** 820/**
@@ -799,41 +827,37 @@ out_enable:
799 */ 827 */
800int ixgbe_fcoe_disable(struct net_device *netdev) 828int ixgbe_fcoe_disable(struct net_device *netdev)
801{ 829{
802 int rc = -EINVAL;
803 struct ixgbe_adapter *adapter = netdev_priv(netdev); 830 struct ixgbe_adapter *adapter = netdev_priv(netdev);
804 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
805 831
806 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 832 if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
807 goto out_disable; 833 return -EINVAL;
808 834
809 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 835 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
810 goto out_disable; 836 return -EINVAL;
811
812 if (!atomic_dec_and_test(&fcoe->refcnt))
813 goto out_disable;
814 837
815 e_info(drv, "Disabling FCoE offload features.\n"); 838 e_info(drv, "Disabling FCoE offload features.\n");
816 netdev->features &= ~NETIF_F_FCOE_CRC;
817 netdev->features &= ~NETIF_F_FSO;
818 netdev->features &= ~NETIF_F_FCOE_MTU;
819 netdev->fcoe_ddp_xid = 0;
820 netdev_features_change(netdev);
821
822 if (netif_running(netdev)) 839 if (netif_running(netdev))
823 netdev->netdev_ops->ndo_stop(netdev); 840 netdev->netdev_ops->ndo_stop(netdev);
824 841
825 ixgbe_clear_interrupt_scheme(adapter); 842 /* Free per CPU memory to track DDP pools */
843 ixgbe_fcoe_ddp_disable(adapter);
844
845 /* disable FCoE and notify stack */
826 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 846 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
827 adapter->ring_feature[RING_F_FCOE].indices = 0; 847 netdev->features &= ~(NETIF_F_FCOE_CRC |
828 ixgbe_cleanup_fcoe(adapter); 848 NETIF_F_FSO |
849 NETIF_F_FCOE_MTU);
850
851 netdev_features_change(netdev);
852
853 /* release existing queues and reallocate them */
854 ixgbe_clear_interrupt_scheme(adapter);
829 ixgbe_init_interrupt_scheme(adapter); 855 ixgbe_init_interrupt_scheme(adapter);
830 856
831 if (netif_running(netdev)) 857 if (netif_running(netdev))
832 netdev->netdev_ops->ndo_open(netdev); 858 netdev->netdev_ops->ndo_open(netdev);
833 rc = 0;
834 859
835out_disable: 860 return 0;
836 return rc;
837} 861}
838 862
839/** 863/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 5d028739fe3f..bf724da99375 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -77,7 +77,7 @@ struct ixgbe_fcoe {
77 atomic_t refcnt; 77 atomic_t refcnt;
78 spinlock_t lock; 78 spinlock_t lock;
79 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 79 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
80 unsigned char *extra_ddp_buffer; 80 void *extra_ddp_buffer;
81 dma_addr_t extra_ddp_buffer_dma; 81 dma_addr_t extra_ddp_buffer_dma;
82 unsigned long mode; 82 unsigned long mode;
83#ifdef CONFIG_IXGBE_DCB 83#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c66625945534..e006c05580ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3807,12 +3807,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3807 ixgbe_set_rx_mode(adapter->netdev); 3807 ixgbe_set_rx_mode(adapter->netdev);
3808 ixgbe_restore_vlan(adapter); 3808 ixgbe_restore_vlan(adapter);
3809 3809
3810#ifdef IXGBE_FCOE
3811 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3812 ixgbe_configure_fcoe(adapter);
3813
3814#endif /* IXGBE_FCOE */
3815
3816 switch (hw->mac.type) { 3810 switch (hw->mac.type) {
3817 case ixgbe_mac_82599EB: 3811 case ixgbe_mac_82599EB:
3818 case ixgbe_mac_X540: 3812 case ixgbe_mac_X540:
@@ -3842,6 +3836,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3842 3836
3843 ixgbe_configure_virtualization(adapter); 3837 ixgbe_configure_virtualization(adapter);
3844 3838
3839#ifdef IXGBE_FCOE
3840 /* configure FCoE L2 filters, redirection table, and Rx control */
3841 ixgbe_configure_fcoe(adapter);
3842
3843#endif /* IXGBE_FCOE */
3845 ixgbe_configure_tx(adapter); 3844 ixgbe_configure_tx(adapter);
3846 ixgbe_configure_rx(adapter); 3845 ixgbe_configure_rx(adapter);
3847} 3846}
@@ -4434,6 +4433,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4434 break; 4433 break;
4435 } 4434 }
4436 4435
4436#ifdef IXGBE_FCOE
4437 /* FCoE support exists, always init the FCoE lock */
4438 spin_lock_init(&adapter->fcoe.lock);
4439
4440#endif
4437 /* n-tuple support exists, always init our spinlock */ 4441 /* n-tuple support exists, always init our spinlock */
4438 spin_lock_init(&adapter->fdir_perfect_lock); 4442 spin_lock_init(&adapter->fdir_perfect_lock);
4439 4443
@@ -4662,7 +4666,11 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4662 goto err_setup_rx; 4666 goto err_setup_rx;
4663 } 4667 }
4664 4668
4665 return 0; 4669#ifdef IXGBE_FCOE
4670 err = ixgbe_setup_fcoe_ddp_resources(adapter);
4671 if (!err)
4672#endif
4673 return 0;
4666err_setup_rx: 4674err_setup_rx:
4667 /* rewind the index freeing the rings as we go */ 4675 /* rewind the index freeing the rings as we go */
4668 while (i--) 4676 while (i--)
@@ -4741,6 +4749,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4741{ 4749{
4742 int i; 4750 int i;
4743 4751
4752#ifdef IXGBE_FCOE
4753 ixgbe_free_fcoe_ddp_resources(adapter);
4754
4755#endif
4744 for (i = 0; i < adapter->num_rx_queues; i++) 4756 for (i = 0; i < adapter->num_rx_queues; i++)
4745 if (adapter->rx_ring[i]->desc) 4757 if (adapter->rx_ring[i]->desc)
4746 ixgbe_free_rx_resources(adapter->rx_ring[i]); 4758 ixgbe_free_rx_resources(adapter->rx_ring[i]);
@@ -7235,11 +7247,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7235 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) 7247 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7236 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 7248 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
7237 } 7249 }
7238 } 7250
7239 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { 7251 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
7240 netdev->vlan_features |= NETIF_F_FCOE_CRC; 7252
7241 netdev->vlan_features |= NETIF_F_FSO; 7253 netdev->vlan_features |= NETIF_F_FSO |
7242 netdev->vlan_features |= NETIF_F_FCOE_MTU; 7254 NETIF_F_FCOE_CRC |
7255 NETIF_F_FCOE_MTU;
7243 } 7256 }
7244#endif /* IXGBE_FCOE */ 7257#endif /* IXGBE_FCOE */
7245 if (pci_using_dac) { 7258 if (pci_using_dac) {
@@ -7436,12 +7449,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7436 ixgbe_sysfs_exit(adapter); 7449 ixgbe_sysfs_exit(adapter);
7437#endif /* CONFIG_IXGBE_HWMON */ 7450#endif /* CONFIG_IXGBE_HWMON */
7438 7451
7439#ifdef IXGBE_FCOE
7440 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7441 ixgbe_cleanup_fcoe(adapter);
7442
7443#endif /* IXGBE_FCOE */
7444
7445 /* remove the added san mac */ 7452 /* remove the added san mac */
7446 ixgbe_del_sanmac_netdev(netdev); 7453 ixgbe_del_sanmac_netdev(netdev);
7447 7454