aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorYi Zou <yi.zou@intel.com>2009-05-13 09:11:53 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-17 15:00:08 -0400
commit332d4a7d981e25d239c5d723a4f35020397dc606 (patch)
tree2bcbd270cc623746c46d4f707a2355504ff40542 /drivers/net/ixgbe/ixgbe_main.c
parentd0ed89373f2da1a0d83697d87441e519caf18cf7 (diff)
ixgbe: Implement FCoE Rx side large receive offload feature to 82599
This patch implements the FCoE Rx side offload feature in ixgbe_main.c to 82599 using the Rx offload infrastructure code added in the previous patch. The large receive offload by Direct Data Placement (DDP) for FCoE is achieved by implementing the ndo_fcoe_ddp_setup and ndo_fcoe_ddp_done in net_device_ops via netdev. It is up to the ULD, i.e., fcoe and libfc to query and setup large receive offload accordingly through the corresponding netdev upon creating fcoe instances. Signed-off-by: Yi Zou <yi.zou@intel.com> Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ee80f6f45015..e7c44a3d9c8c 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -784,6 +784,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
784 total_rx_packets++; 784 total_rx_packets++;
785 785
786 skb->protocol = eth_type_trans(skb, adapter->netdev); 786 skb->protocol = eth_type_trans(skb, adapter->netdev);
787#ifdef IXGBE_FCOE
788 /* if ddp, not passing to ULD unless for FCP_RSP or error */
789 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
790 if (!ixgbe_fcoe_ddp(adapter, rx_desc, skb))
791 goto next_desc;
792#endif /* IXGBE_FCOE */
787 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 793 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
788 794
789next_desc: 795next_desc:
@@ -4822,6 +4828,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
4822#ifdef CONFIG_NET_POLL_CONTROLLER 4828#ifdef CONFIG_NET_POLL_CONTROLLER
4823 .ndo_poll_controller = ixgbe_netpoll, 4829 .ndo_poll_controller = ixgbe_netpoll,
4824#endif 4830#endif
4831#ifdef IXGBE_FCOE
4832 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
4833 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
4834#endif /* IXGBE_FCOE */
4825}; 4835};
4826 4836
4827/** 4837/**
@@ -5036,6 +5046,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5036 if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) { 5046 if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) {
5037 netdev->features |= NETIF_F_FCOE_CRC; 5047 netdev->features |= NETIF_F_FCOE_CRC;
5038 netdev->features |= NETIF_F_FSO; 5048 netdev->features |= NETIF_F_FSO;
5049 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
5039 } else { 5050 } else {
5040 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 5051 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5041 } 5052 }
@@ -5205,6 +5216,11 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5205 } 5216 }
5206 5217
5207#endif 5218#endif
5219#ifdef IXGBE_FCOE
5220 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
5221 ixgbe_cleanup_fcoe(adapter);
5222
5223#endif /* IXGBE_FCOE */
5208 if (netdev->reg_state == NETREG_REGISTERED) 5224 if (netdev->reg_state == NETREG_REGISTERED)
5209 unregister_netdev(netdev); 5225 unregister_netdev(netdev);
5210 5226