aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-02 22:41:24 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-02 22:41:24 -0400
commitd39a9ffce7f14b494391da982b8cefa311dae0f6 (patch)
tree6f6da2803db73ee34b08ccab20ad3d08dcb4693c
parenta4f090fda308c040039f060edf9a4620ce27ffed (diff)
parent8bb1a540450c3dbd075491ea43772ac8a7ddec46 (diff)
Merge branch 'intel-next'
Aaron Brown says: ==================== Intel Wired LAN Driver Updates This series contains updates to the i40e and i40evf drivers. Vasu adds FCOE support, build options and a documentation pointer to i40e. Shannon exposes a Firmware API request used to do register writes on the driver's behalf and disables local loopback on VMDQ VSI in order to stop the VEB from echoing the VMDQ packets back at the VSI. Ashish corrects the vf_id offset for virtchnl messages in the case of multiple PFs, removes support for vf unicast promiscuos mode to disallow VFs from receiving traffic intended for another VF, updates the vfr_stat state check to handle the existing and future mechanism and adds an adapter state check to prevent re-arming the watchdog timer after i40evf_remove has been called and the timer has been deleted. Serey fixes an issue where a guest OS would panic when removing the vf driver while the device is being reset due to an attempt to clean a non initialized mac_filter_list. Akeem makes a minor comment change. Jessie changes an instance of sprintf to snprintf that was missed when the driver was converted to use snprintf everywhere. Mitch plugs a few memory leaks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/networking/i40e.txt7
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h62
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c1561
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.h128
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c269
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h138
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c59
17 files changed, 2393 insertions, 33 deletions
diff --git a/Documentation/networking/i40e.txt b/Documentation/networking/i40e.txt
index f737273c6dc1..a251bf4fe9c9 100644
--- a/Documentation/networking/i40e.txt
+++ b/Documentation/networking/i40e.txt
@@ -69,8 +69,11 @@ Additional Configurations
69 69
70 FCoE 70 FCoE
71 ---- 71 ----
72 Fiber Channel over Ethernet (FCoE) hardware offload is not currently 72 The driver supports Fiber Channel over Ethernet (FCoE) and Data Center
73 supported. 73 Bridging (DCB) functionality. Configuring DCB and FCoE is outside the scope
74 of this driver doc. Refer to http://www.open-fcoe.org/ for FCoE project
75 information and http://www.open-lldp.org/ or email list
76 e1000-eedc@lists.sourceforge.net for DCB information.
74 77
75 MAC and VLAN anti-spoofing feature 78 MAC and VLAN anti-spoofing feature
76 ---------------------------------- 79 ----------------------------------
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index d9eb80acac4f..4b94ddb29c24 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -44,3 +44,4 @@ i40e-objs := i40e_main.o \
44 i40e_virtchnl_pf.o 44 i40e_virtchnl_pf.o
45 45
46i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o 46i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
47i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 29cd81ae29f4..801da392a20e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -54,6 +54,9 @@
54#include <linux/ptp_clock_kernel.h> 54#include <linux/ptp_clock_kernel.h>
55#include "i40e_type.h" 55#include "i40e_type.h"
56#include "i40e_prototype.h" 56#include "i40e_prototype.h"
57#ifdef I40E_FCOE
58#include "i40e_fcoe.h"
59#endif
57#include "i40e_virtchnl.h" 60#include "i40e_virtchnl.h"
58#include "i40e_virtchnl_pf.h" 61#include "i40e_virtchnl_pf.h"
59#include "i40e_txrx.h" 62#include "i40e_txrx.h"
@@ -79,6 +82,10 @@
79#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */ 82#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
80#define I40E_FDIR_RING 0 83#define I40E_FDIR_RING 0
81#define I40E_FDIR_RING_COUNT 32 84#define I40E_FDIR_RING_COUNT 32
85#ifdef I40E_FCOE
86#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
87#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
88#endif /* I40E_FCOE */
82#define I40E_MAX_AQ_BUF_SIZE 4096 89#define I40E_MAX_AQ_BUF_SIZE 4096
83#define I40E_AQ_LEN 32 90#define I40E_AQ_LEN 32
84#define I40E_AQ_WORK_LIMIT 16 91#define I40E_AQ_WORK_LIMIT 16
@@ -225,6 +232,10 @@ struct i40e_pf {
225 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ 232 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
226 u16 num_req_vfs; /* num vfs requested for this vf */ 233 u16 num_req_vfs; /* num vfs requested for this vf */
227 u16 num_vf_qps; /* num queue pairs per vf */ 234 u16 num_vf_qps; /* num queue pairs per vf */
235#ifdef I40E_FCOE
236 u16 num_fcoe_qps; /* num fcoe queues this pf has set up */
237 u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
238#endif /* I40E_FCOE */
228 u16 num_lan_qps; /* num lan queues this pf has set up */ 239 u16 num_lan_qps; /* num lan queues this pf has set up */
229 u16 num_lan_msix; /* num queue vectors for the base pf vsi */ 240 u16 num_lan_msix; /* num queue vectors for the base pf vsi */
230 int queues_left; /* queues left unclaimed */ 241 int queues_left; /* queues left unclaimed */
@@ -265,6 +276,9 @@ struct i40e_pf {
265#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) 276#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
266#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) 277#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
267#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) 278#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
279#ifdef I40E_FCOE
280#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11)
281#endif /* I40E_FCOE */
268#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) 282#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
269#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) 283#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
270#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) 284#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
@@ -286,6 +300,10 @@ struct i40e_pf {
286 /* tracks features that get auto disabled by errors */ 300 /* tracks features that get auto disabled by errors */
287 u64 auto_disable_flags; 301 u64 auto_disable_flags;
288 302
303#ifdef I40E_FCOE
304 struct i40e_fcoe fcoe;
305
306#endif /* I40E_FCOE */
289 bool stat_offsets_loaded; 307 bool stat_offsets_loaded;
290 struct i40e_hw_port_stats stats; 308 struct i40e_hw_port_stats stats;
291 struct i40e_hw_port_stats stats_offsets; 309 struct i40e_hw_port_stats stats_offsets;
@@ -408,6 +426,11 @@ struct i40e_vsi {
408 struct rtnl_link_stats64 net_stats_offsets; 426 struct rtnl_link_stats64 net_stats_offsets;
409 struct i40e_eth_stats eth_stats; 427 struct i40e_eth_stats eth_stats;
410 struct i40e_eth_stats eth_stats_offsets; 428 struct i40e_eth_stats eth_stats_offsets;
429#ifdef I40E_FCOE
430 struct i40e_fcoe_stats fcoe_stats;
431 struct i40e_fcoe_stats fcoe_stats_offsets;
432 bool fcoe_stat_offsets_loaded;
433#endif
411 u32 tx_restart; 434 u32 tx_restart;
412 u32 tx_busy; 435 u32 tx_busy;
413 u32 rx_buf_failed; 436 u32 rx_buf_failed;
@@ -598,6 +621,11 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
598int i40e_vsi_release(struct i40e_vsi *vsi); 621int i40e_vsi_release(struct i40e_vsi *vsi);
599struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type, 622struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
600 struct i40e_vsi *start_vsi); 623 struct i40e_vsi *start_vsi);
624#ifdef I40E_FCOE
625void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
626 struct i40e_vsi_context *ctxt,
627 u8 enabled_tc, bool is_add);
628#endif
601int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); 629int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
602int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); 630int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
603struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, 631struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
@@ -624,7 +652,21 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
624void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); 652void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
625void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); 653void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
626void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); 654void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
655#ifdef I40E_FCOE
656struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
657 struct net_device *netdev,
658 struct rtnl_link_stats64 *storage);
659int i40e_set_mac(struct net_device *netdev, void *p);
660void i40e_set_rx_mode(struct net_device *netdev);
661#endif
627int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 662int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
663#ifdef I40E_FCOE
664void i40e_tx_timeout(struct net_device *netdev);
665int i40e_vlan_rx_add_vid(struct net_device *netdev,
666 __always_unused __be16 proto, u16 vid);
667int i40e_vlan_rx_kill_vid(struct net_device *netdev,
668 __always_unused __be16 proto, u16 vid);
669#endif
628int i40e_vsi_open(struct i40e_vsi *vsi); 670int i40e_vsi_open(struct i40e_vsi *vsi);
629void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 671void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
630int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 672int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -634,6 +676,26 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
634bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); 676bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
635struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 677struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
636 bool is_vf, bool is_netdev); 678 bool is_vf, bool is_netdev);
679#ifdef I40E_FCOE
680int i40e_open(struct net_device *netdev);
681int i40e_close(struct net_device *netdev);
682int i40e_setup_tc(struct net_device *netdev, u8 tc);
683void i40e_netpoll(struct net_device *netdev);
684int i40e_fcoe_enable(struct net_device *netdev);
685int i40e_fcoe_disable(struct net_device *netdev);
686int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
687u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
688void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
689void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
690int i40e_init_pf_fcoe(struct i40e_pf *pf);
691int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
692void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
693int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
694 union i40e_rx_desc *rx_desc,
695 struct sk_buff *skb);
696void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
697 union i40e_rx_desc *rx_desc, u8 prog_id);
698#endif /* I40E_FCOE */
637void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); 699void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
638#ifdef CONFIG_I40E_DCB 700#ifdef CONFIG_I40E_DCB
639void i40e_dcbnl_flush_apps(struct i40e_pf *pf, 701void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index f4e502a305ff..df43e7c6777c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -709,6 +709,33 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
709 709
710 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 710 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
711} 711}
712#ifdef I40E_FCOE
713
714/**
715 * i40e_get_san_mac_addr - get SAN MAC address
716 * @hw: pointer to the HW structure
717 * @mac_addr: pointer to SAN MAC address
718 *
719 * Reads the adapter's SAN MAC address from NVM
720 **/
721i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
722{
723 struct i40e_aqc_mac_address_read_data addrs;
724 i40e_status status;
725 u16 flags = 0;
726
727 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
728 if (status)
729 return status;
730
731 if (flags & I40E_AQC_SAN_ADDR_VALID)
732 memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
733 else
734 status = I40E_ERR_INVALID_MAC_ADDR;
735
736 return status;
737}
738#endif
712 739
713/** 740/**
714 * i40e_get_media_type - Gets media type 741 * i40e_get_media_type - Gets media type
@@ -1975,6 +2002,35 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
1975} 2002}
1976 2003
1977/** 2004/**
2005 * i40e_aq_debug_write_register
2006 * @hw: pointer to the hw struct
2007 * @reg_addr: register address
2008 * @reg_val: register value
2009 * @cmd_details: pointer to command details structure or NULL
2010 *
2011 * Write to a register using the admin queue commands
2012 **/
2013i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
2014 u32 reg_addr, u64 reg_val,
2015 struct i40e_asq_cmd_details *cmd_details)
2016{
2017 struct i40e_aq_desc desc;
2018 struct i40e_aqc_debug_reg_read_write *cmd =
2019 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2020 i40e_status status;
2021
2022 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2023
2024 cmd->address = cpu_to_le32(reg_addr);
2025 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2026 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2027
2028 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2029
2030 return status;
2031}
2032
2033/**
1978 * i40e_aq_set_hmc_resource_profile 2034 * i40e_aq_set_hmc_resource_profile
1979 * @hw: pointer to the hw struct 2035 * @hw: pointer to the hw struct
1980 * @profile: type of profile the HMC is to be set as 2036 * @profile: type of profile the HMC is to be set as
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 9eaed04618a3..5a0cabeb35ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -697,6 +697,25 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
697 vsi->bw_ets_limit_credits[i], 697 vsi->bw_ets_limit_credits[i],
698 vsi->bw_ets_max_quanta[i]); 698 vsi->bw_ets_max_quanta[i]);
699 } 699 }
700#ifdef I40E_FCOE
701 if (vsi->type == I40E_VSI_FCOE) {
702 dev_info(&pf->pdev->dev,
703 " fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
704 vsi->fcoe_stats.rx_fcoe_packets,
705 vsi->fcoe_stats.rx_fcoe_dwords,
706 vsi->fcoe_stats.rx_fcoe_dropped);
707 dev_info(&pf->pdev->dev,
708 " fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
709 vsi->fcoe_stats.tx_fcoe_packets,
710 vsi->fcoe_stats.tx_fcoe_dwords);
711 dev_info(&pf->pdev->dev,
712 " fcoe_stats: bad_crc = %llu, last_error = %llu\n",
713 vsi->fcoe_stats.fcoe_bad_fccrc,
714 vsi->fcoe_stats.fcoe_last_error);
715 dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n",
716 vsi->fcoe_stats.fcoe_ddp_count);
717 }
718#endif
700} 719}
701 720
702/** 721/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9c93ff28d4aa..681a9e81ff51 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -155,6 +155,19 @@ static struct i40e_stats i40e_gstrings_stats[] = {
155 I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), 155 I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
156}; 156};
157 157
158#ifdef I40E_FCOE
159static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
160 I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
161 I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
162 I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
163 I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
164 I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
165 I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
166 I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
167 I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
168};
169
170#endif /* I40E_FCOE */
158#define I40E_QUEUE_STATS_LEN(n) \ 171#define I40E_QUEUE_STATS_LEN(n) \
159 (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ 172 (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
160 * 2 /* Tx and Rx together */ \ 173 * 2 /* Tx and Rx together */ \
@@ -162,9 +175,17 @@ static struct i40e_stats i40e_gstrings_stats[] = {
162#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) 175#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
163#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) 176#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
164#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) 177#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
178#ifdef I40E_FCOE
179#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats)
180#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
181 I40E_FCOE_STATS_LEN + \
182 I40E_MISC_STATS_LEN + \
183 I40E_QUEUE_STATS_LEN((n)))
184#else
165#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ 185#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
166 I40E_MISC_STATS_LEN + \ 186 I40E_MISC_STATS_LEN + \
167 I40E_QUEUE_STATS_LEN((n))) 187 I40E_QUEUE_STATS_LEN((n)))
188#endif /* I40E_FCOE */
168#define I40E_PFC_STATS_LEN ( \ 189#define I40E_PFC_STATS_LEN ( \
169 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ 190 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
170 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ 191 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
@@ -1112,6 +1133,13 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
1112 data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == 1133 data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
1113 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1134 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1114 } 1135 }
1136#ifdef I40E_FCOE
1137 for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
1138 p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
1139 data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
1140 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1141 }
1142#endif
1115 rcu_read_lock(); 1143 rcu_read_lock();
1116 for (j = 0; j < vsi->num_queue_pairs; j++) { 1144 for (j = 0; j < vsi->num_queue_pairs; j++) {
1117 tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); 1145 tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
@@ -1193,6 +1221,13 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
1193 i40e_gstrings_misc_stats[i].stat_string); 1221 i40e_gstrings_misc_stats[i].stat_string);
1194 p += ETH_GSTRING_LEN; 1222 p += ETH_GSTRING_LEN;
1195 } 1223 }
1224#ifdef I40E_FCOE
1225 for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
1226 snprintf(p, ETH_GSTRING_LEN, "%s",
1227 i40e_gstrings_fcoe_stats[i].stat_string);
1228 p += ETH_GSTRING_LEN;
1229 }
1230#endif
1196 for (i = 0; i < vsi->num_queue_pairs; i++) { 1231 for (i = 0; i < vsi->num_queue_pairs; i++) {
1197 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); 1232 snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
1198 p += ETH_GSTRING_LEN; 1233 p += ETH_GSTRING_LEN;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
new file mode 100644
index 000000000000..6938fc1ad877
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -0,0 +1,1561 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27
28#include <linux/if_ether.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/fc/fc_fs.h>
32#include <scsi/fc/fc_fip.h>
33#include <scsi/fc/fc_fcoe.h>
34#include <scsi/libfc.h>
35#include <scsi/libfcoe.h>
36
37#include "i40e.h"
38#include "i40e_fcoe.h"
39
40/**
41 * i40e_rx_is_fip - returns true if the rx packet type is FIP
42 * @ptype: the packet type field from rx descriptor write-back
43 **/
44static inline bool i40e_rx_is_fip(u16 ptype)
45{
46 return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
47}
48
49/**
50 * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
51 * @ptype: the packet type field from rx descriptor write-back
52 **/
53static inline bool i40e_rx_is_fcoe(u16 ptype)
54{
55 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
56 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
57}
58
59/**
60 * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
61 * @sof: the FCoE start of frame delimiter
62 **/
63static inline bool i40e_fcoe_sof_is_class2(u8 sof)
64{
65 return (sof == FC_SOF_I2) || (sof == FC_SOF_N2);
66}
67
68/**
69 * i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF
70 * @sof: the FCoE start of frame delimiter
71 **/
72static inline bool i40e_fcoe_sof_is_class3(u8 sof)
73{
74 return (sof == FC_SOF_I3) || (sof == FC_SOF_N3);
75}
76
77/**
78 * i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW
79 * @sof: the input SOF value from the frame
80 **/
81static inline bool i40e_fcoe_sof_is_supported(u8 sof)
82{
83 return i40e_fcoe_sof_is_class2(sof) ||
84 i40e_fcoe_sof_is_class3(sof);
85}
86
87/**
88 * i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame
89 * @skb: the frame whose EOF is to be pulled from
90 **/
91static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof)
92{
93 *sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
94
95 if (!i40e_fcoe_sof_is_supported(*sof))
96 return -EINVAL;
97 return 0;
98}
99
100/**
101 * i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW
102 * @eof: the input EOF value from the frame
103 **/
104static inline bool i40e_fcoe_eof_is_supported(u8 eof)
105{
106 return (eof == FC_EOF_N) || (eof == FC_EOF_T) ||
107 (eof == FC_EOF_NI) || (eof == FC_EOF_A);
108}
109
110/**
111 * i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame
112 * @skb: the frame whose EOF is to be pulled from
113 **/
114static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
115{
116 /* the first byte of the last dword is EOF */
117 skb_copy_bits(skb, skb->len - 4, eof, 1);
118
119 if (!i40e_fcoe_eof_is_supported(*eof))
120 return -EINVAL;
121 return 0;
122}
123
124/**
125 * i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming
126 * @eof: the input eof value from the frame
127 *
128 * The FC EOF is converted to the value understood by HW for descriptor
129 * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
130 * first.
131 **/
132static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
133{
134 switch (eof) {
135 case FC_EOF_N:
136 return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N;
137 case FC_EOF_T:
138 return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T;
139 case FC_EOF_NI:
140 return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI;
141 case FC_EOF_A:
142 return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
143 default:
144 /* FIXME: still returns 0 */
145 pr_err("Unrecognized EOF %x\n", eof);
146 return 0;
147 }
148}
149
150/**
151 * i40e_fcoe_xid_is_valid - returns true if the exchange id is valid
152 * @xid: the exchange id
153 **/
154static inline bool i40e_fcoe_xid_is_valid(u16 xid)
155{
156 return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX);
157}
158
159/**
160 * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
161 * @pf: pointer to pf
162 * @ddp: sw DDP context
163 *
164 * Unmap the scatter-gather list associated with the given SW DDP context
165 *
166 * Returns: data length already ddp-ed in bytes
167 *
168 **/
169static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf,
170 struct i40e_fcoe_ddp *ddp)
171{
172 if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags))
173 return;
174
175 if (ddp->sgl) {
176 dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc,
177 DMA_FROM_DEVICE);
178 ddp->sgl = NULL;
179 ddp->sgc = 0;
180 }
181
182 if (ddp->pool) {
183 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
184 ddp->pool = NULL;
185 }
186}
187
188/**
189 * i40e_fcoe_ddp_clear - clear the given SW DDP context
190 * @ddp - SW DDP context
191 **/
192static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp)
193{
194 memset(ddp, 0, sizeof(struct i40e_fcoe_ddp));
195 ddp->xid = FC_XID_UNKNOWN;
196 ddp->flags = __I40E_FCOE_DDP_NONE;
197}
198
199/**
200 * i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE
201 * @id: the prog id for the programming status Rx descriptor write-back
202 **/
203static inline bool i40e_fcoe_progid_is_fcoe(u8 id)
204{
205 return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
206 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS);
207}
208
209/**
210 * i40e_fcoe_fc_get_xid - get xid from the frame header
211 * @fh: the fc frame header
212 *
213 * In case the incoming frame's exchange is originated from
214 * the initiator, then received frame's exchange id is ANDed
215 * with fc_cpu_mask bits to get the same cpu on which exchange
216 * was originated, otherwise just use the current cpu.
217 *
218 * Returns ox_id if exchange originator, rx_id if responder
219 **/
220static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh)
221{
222 u32 f_ctl = ntoh24(fh->fh_f_ctl);
223
224 return (f_ctl & FC_FC_EX_CTX) ?
225 be16_to_cpu(fh->fh_ox_id) :
226 be16_to_cpu(fh->fh_rx_id);
227}
228
229/**
230 * i40e_fcoe_fc_frame_header - get fc frame header from skb
231 * @skb: packet
232 *
233 * This checks if there is a VLAN header and returns the data
234 * pointer to the start of the fc_frame_header.
235 *
236 * Returns pointer to the fc_frame_header
237 **/
238static inline struct fc_frame_header *i40e_fcoe_fc_frame_header(
239 struct sk_buff *skb)
240{
241 void *fh = skb->data + sizeof(struct fcoe_hdr);
242
243 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
244 fh += sizeof(struct vlan_hdr);
245
246 return (struct fc_frame_header *)fh;
247}
248
249/**
250 * i40e_fcoe_ddp_put - release the DDP context for a given exchange id
251 * @netdev: the corresponding net_device
252 * @xid: the exchange id that corresponding DDP context will be released
253 *
254 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
255 * and it is expected to be called by ULD, i.e., FCP layer of libfc
256 * to release the corresponding ddp context when the I/O is done.
257 *
258 * Returns : data length already ddp-ed in bytes
259 **/
260static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid)
261{
262 struct i40e_netdev_priv *np = netdev_priv(netdev);
263 struct i40e_pf *pf = np->vsi->back;
264 struct i40e_fcoe *fcoe = &pf->fcoe;
265 int len = 0;
266 struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid];
267
268 if (!fcoe || !ddp)
269 goto out;
270
271 if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags))
272 len = ddp->len;
273 i40e_fcoe_ddp_unmap(pf, ddp);
274out:
275 return len;
276}
277
278/**
279 * i40e_fcoe_sw_init - sets up the HW for FCoE
280 * @pf: pointer to pf
281 *
282 * Returns 0 if FCoE is supported otherwise the error code
283 **/
284int i40e_init_pf_fcoe(struct i40e_pf *pf)
285{
286 struct i40e_hw *hw = &pf->hw;
287 u32 val;
288
289 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
290 pf->num_fcoe_qps = 0;
291 pf->fcoe_hmc_cntx_num = 0;
292 pf->fcoe_hmc_filt_num = 0;
293
294 if (!pf->hw.func_caps.fcoe) {
295 dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
296 return 0;
297 }
298
299 if (!pf->hw.func_caps.dcb) {
300 dev_warn(&pf->pdev->dev,
301 "Hardware is not DCB capable not enabling FCoE.\n");
302 return 0;
303 }
304
305 /* enable FCoE hash filter */
306 val = rd32(hw, I40E_PFQF_HENA(1));
307 val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
308 val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
309 val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
310 wr32(hw, I40E_PFQF_HENA(1), val);
311
312 /* enable flag */
313 pf->flags |= I40E_FLAG_FCOE_ENABLED;
314 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
315
316 /* Reserve 4K DDP contexts and 20K filter size for FCoE */
317 pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
318 I40E_DMA_CNTX_BASE_SIZE;
319 pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
320 (1 << I40E_HASH_FILTER_SIZE_16K) *
321 I40E_HASH_FILTER_BASE_SIZE;
322
323 /* FCoE object: max 16K filter buckets and 4K DMA contexts */
324 pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K;
325 pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
326
327 /* Setup max frame with FCoE_MTU plus L2 overheads */
328 val = rd32(hw, I40E_GLFCOE_RCTL);
329 val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
330 val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
331 << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
332 wr32(hw, I40E_GLFCOE_RCTL, val);
333
334 dev_info(&pf->pdev->dev, "FCoE is supported.\n");
335 return 0;
336}
337
338/**
339 * i40e_get_fcoe_tc_map - Return TC map for FCoE APP
340 * @pf: pointer to pf
341 *
342 **/
343u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
344{
345 struct i40e_ieee_app_priority_table app;
346 struct i40e_hw *hw = &pf->hw;
347 u8 enabled_tc = 0;
348 u8 tc, i;
349 /* Get the FCoE APP TLV */
350 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
351
352 for (i = 0; i < dcbcfg->numapps; i++) {
353 app = dcbcfg->app[i];
354 if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
355 app.protocolid == ETH_P_FCOE) {
356 tc = dcbcfg->etscfg.prioritytable[app.priority];
357 enabled_tc |= (1 << tc);
358 break;
359 }
360 }
361
362 /* TC0 if there is no TC defined for FCoE APP TLV */
363 enabled_tc = enabled_tc ? enabled_tc : 0x1;
364
365 return enabled_tc;
366}
367
368/**
369 * i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI
370 * @vsi: pointer to the associated VSI struct
371 * @ctxt: pointer to the associated VSI context to be passed to HW
372 *
373 * Returns 0 on success or < 0 on error
374 **/
375int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
376{
377 struct i40e_aqc_vsi_properties_data *info = &ctxt->info;
378 struct i40e_pf *pf = vsi->back;
379 struct i40e_hw *hw = &pf->hw;
380 u8 enabled_tc = 0;
381
382 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
383 dev_err(&pf->pdev->dev,
384 "FCoE is not enabled for this device\n");
385 return -EPERM;
386 }
387
388 /* initialize the hardware for FCoE */
389 ctxt->pf_num = hw->pf_id;
390 ctxt->vf_num = 0;
391 ctxt->uplink_seid = vsi->uplink_seid;
392 ctxt->connection_type = 0x1;
393 ctxt->flags = I40E_AQ_VSI_TYPE_PF;
394
395 /* FCoE VSI would need the following sections */
396 info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID |
397 I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
398
399 /* FCoE VSI does not need these sections */
400 info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
401 I40E_AQ_VSI_PROP_VLAN_VALID |
402 I40E_AQ_VSI_PROP_CAS_PV_VALID |
403 I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
404 I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
405
406 enabled_tc = i40e_get_fcoe_tc_map(pf);
407 i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
408
409 /* set up queue option section: only enable FCoE */
410 info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA;
411
412 return 0;
413}
414
415/**
416 * i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable,
417 * indicating the upper FCoE protocol stack is ready to use FCoE
418 * offload features.
419 *
420 * @netdev: pointer to the netdev that FCoE is created on
421 *
422 * Returns 0 on success
423 *
424 * in RTNL
425 *
426 **/
427int i40e_fcoe_enable(struct net_device *netdev)
428{
429 struct i40e_netdev_priv *np = netdev_priv(netdev);
430 struct i40e_vsi *vsi = np->vsi;
431 struct i40e_pf *pf = vsi->back;
432 struct i40e_fcoe *fcoe = &pf->fcoe;
433
434 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
435 netdev_err(netdev, "HW does not support FCoE.\n");
436 return -ENODEV;
437 }
438
439 if (vsi->type != I40E_VSI_FCOE) {
440 netdev_err(netdev, "interface does not support FCoE.\n");
441 return -EBUSY;
442 }
443
444 atomic_inc(&fcoe->refcnt);
445
446 return 0;
447}
448
449/**
450 * i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack.
451 * @dev: pointer to the netdev that FCoE is created on
452 *
453 * Returns 0 on success
454 *
455 **/
456int i40e_fcoe_disable(struct net_device *netdev)
457{
458 struct i40e_netdev_priv *np = netdev_priv(netdev);
459 struct i40e_vsi *vsi = np->vsi;
460 struct i40e_pf *pf = vsi->back;
461 struct i40e_fcoe *fcoe = &pf->fcoe;
462
463 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
464 netdev_err(netdev, "device does not support FCoE\n");
465 return -ENODEV;
466 }
467 if (vsi->type != I40E_VSI_FCOE)
468 return -EBUSY;
469
470 if (!atomic_dec_and_test(&fcoe->refcnt))
471 return -EINVAL;
472
473 netdev_info(netdev, "FCoE disabled\n");
474
475 return 0;
476}
477
478/**
479 * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP
480 * @fcoe: the FCoE sw object
481 * @dev: the device that the pool is associated with
482 * @cpu: the cpu for this pool
483 *
484 **/
485static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe,
486 struct device *dev,
487 unsigned int cpu)
488{
489 struct i40e_fcoe_ddp_pool *ddp_pool;
490
491 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
492 if (!ddp_pool->pool) {
493 dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu);
494 return;
495 }
496 dma_pool_destroy(ddp_pool->pool);
497 ddp_pool->pool = NULL;
498}
499
500/**
501 * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP
502 * @fcoe: the FCoE sw object
503 * @dev: the device that the pool is associated with
504 * @cpu: the cpu for this pool
505 *
506 * Returns 0 on successful or non zero on failure
507 *
508 **/
509static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe,
510 struct device *dev,
511 unsigned int cpu)
512{
513 struct i40e_fcoe_ddp_pool *ddp_pool;
514 struct dma_pool *pool;
515 char pool_name[32];
516
517 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
518 if (ddp_pool && ddp_pool->pool) {
519 dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu);
520 return 0;
521 }
522 snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu);
523 pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX,
524 I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE);
525 if (!pool) {
526 dev_err(dev, "dma_pool_create %s failed\n", pool_name);
527 return -ENOMEM;
528 }
529 ddp_pool->pool = pool;
530 return 0;
531}
532
533/**
534 * i40e_fcoe_free_ddp_resources - release FCoE DDP resources
535 * @vsi: the vsi FCoE is associated with
536 *
537 **/
538void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi)
539{
540 struct i40e_pf *pf = vsi->back;
541 struct i40e_fcoe *fcoe = &pf->fcoe;
542 int cpu, i;
543
544 /* do nothing if not FCoE VSI */
545 if (vsi->type != I40E_VSI_FCOE)
546 return;
547
548 /* do nothing if no DDP pools were allocated */
549 if (!fcoe->ddp_pool)
550 return;
551
552 for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
553 i40e_fcoe_ddp_put(vsi->netdev, i);
554
555 for_each_possible_cpu(cpu)
556 i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu);
557
558 free_percpu(fcoe->ddp_pool);
559 fcoe->ddp_pool = NULL;
560
561 netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n",
562 vsi->id, vsi->seid);
563}
564
565/**
566 * i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources
567 * @vsi: the VSI FCoE is associated with
568 *
569 * Returns 0 on successful or non zero on failure
570 *
571 **/
572int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi)
573{
574 struct i40e_pf *pf = vsi->back;
575 struct device *dev = &pf->pdev->dev;
576 struct i40e_fcoe *fcoe = &pf->fcoe;
577 unsigned int cpu;
578 int i;
579
580 if (vsi->type != I40E_VSI_FCOE)
581 return -ENODEV;
582
583 /* do nothing if no DDP pools were allocated */
584 if (fcoe->ddp_pool)
585 return -EEXIST;
586
587 /* allocate per CPU memory to track DDP pools */
588 fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool);
589 if (!fcoe->ddp_pool) {
590 dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n");
591 return -ENOMEM;
592 }
593
594 /* allocate pci pool for each cpu */
595 for_each_possible_cpu(cpu) {
596 if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu))
597 continue;
598
599 dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu);
600 i40e_fcoe_free_ddp_resources(vsi);
601 return -ENOMEM;
602 }
603
604 /* initialize the sw context */
605 for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
606 i40e_fcoe_ddp_clear(&fcoe->ddp[i]);
607
608 netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n",
609 vsi->id, vsi->seid);
610
611 return 0;
612}
613
614/**
615 * i40e_fcoe_handle_status - check the Programming Status for FCoE
616 * @rx_ring: the Rx ring for this descriptor
617 * @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor.
618 *
619 * Check if this is the Rx Programming Status descriptor write-back for FCoE.
620 * This is used to verify if the context/filter programming or invalidation
621 * requested by SW to the HW is successful or not and take actions accordingly.
622 **/
623void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
624 union i40e_rx_desc *rx_desc, u8 prog_id)
625{
626 struct i40e_pf *pf = rx_ring->vsi->back;
627 struct i40e_fcoe *fcoe = &pf->fcoe;
628 struct i40e_fcoe_ddp *ddp;
629 u32 error;
630 u16 xid;
631 u64 qw;
632
633 /* we only care for FCoE here */
634 if (!i40e_fcoe_progid_is_fcoe(prog_id))
635 return;
636
637 xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) &
638 (I40E_FCOE_DDP_MAX - 1);
639
640 if (!i40e_fcoe_xid_is_valid(xid))
641 return;
642
643 ddp = &fcoe->ddp[xid];
644 WARN_ON(xid != ddp->xid);
645
646 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
647 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
648 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
649
650 /* DDP context programming status: failure or success */
651 if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) {
652 if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) {
653 dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n",
654 xid, ddp->xid);
655 ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT;
656 }
657 if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) {
658 dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n",
659 xid, ddp->xid);
660 ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT;
661 }
662 }
663
664 /* DDP context invalidation status: failure or success */
665 if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) {
666 if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) {
667 dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n",
668 xid, ddp->xid);
669 ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT;
670 }
671 /* clear the flag so we can retry invalidation */
672 clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags);
673 }
674
675 /* unmap DMA */
676 i40e_fcoe_ddp_unmap(pf, ddp);
677 i40e_fcoe_ddp_clear(ddp);
678}
679
680/**
681 * i40e_fcoe_handle_offload - check ddp status and mark it done
682 * @adapter: i40e adapter
683 * @rx_desc: advanced rx descriptor
684 * @skb: the skb holding the received data
685 *
686 * This checks ddp status.
687 *
688 * Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates
689 * not passing the skb to ULD, > 0 indicates is the length of data
690 * being ddped.
691 *
692 **/
693int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
694 union i40e_rx_desc *rx_desc,
695 struct sk_buff *skb)
696{
697 struct i40e_pf *pf = rx_ring->vsi->back;
698 struct i40e_fcoe *fcoe = &pf->fcoe;
699 struct fc_frame_header *fh = NULL;
700 struct i40e_fcoe_ddp *ddp = NULL;
701 u32 status, fltstat;
702 u32 error, fcerr;
703 int rc = -EINVAL;
704 u16 ptype;
705 u16 xid;
706 u64 qw;
707
708 /* check this rxd is for programming status */
709 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
710 /* packet descriptor, check packet type */
711 ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
712 if (!i40e_rx_is_fcoe(ptype))
713 goto out_no_ddp;
714
715 error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT;
716 fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) &
717 I40E_RX_DESC_FCOE_ERROR_MASK;
718
719 /* check stateless offload error */
720 if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) {
721 dev_err(&pf->pdev->dev, "Protocol Error\n");
722 skb->ip_summed = CHECKSUM_NONE;
723 } else {
724 skb->ip_summed = CHECKSUM_UNNECESSARY;
725 }
726
727 /* check hw status on ddp */
728 status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT;
729 fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
730 I40E_RX_DESC_FLTSTAT_FCMASK;
731
732 /* now we are ready to check DDP */
733 fh = i40e_fcoe_fc_frame_header(skb);
734 xid = i40e_fcoe_fc_get_xid(fh);
735 if (!i40e_fcoe_xid_is_valid(xid))
736 goto out_no_ddp;
737
738 /* non DDP normal receive, return to the protocol stack */
739 if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH)
740 goto out_no_ddp;
741
742 /* do we have a sw ddp context setup ? */
743 ddp = &fcoe->ddp[xid];
744 if (!ddp->sgl)
745 goto out_no_ddp;
746
747 /* fetch xid from hw rxd wb, which should match up the sw ctxt */
748 xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id);
749 if (ddp->xid != xid) {
750 dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n",
751 ddp->xid, xid);
752 goto out_put_ddp;
753 }
754
755 /* the same exchange has already errored out */
756 if (ddp->fcerr) {
757 dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n",
758 xid, ddp->fcerr, fcerr);
759 goto out_put_ddp;
760 }
761
762 /* fcoe param is valid by now with correct DDPed length */
763 ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param);
764 ddp->fcerr = fcerr;
765 /* header posting only, useful only for target mode and debugging */
766 if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) {
767 /* For target mode, we get header of the last packet but it
768 * does not have the FCoE trailer field, i.e., CRC and EOF
769 * Ordered Set since they are offloaded by the HW, so fill
770 * it up correspondingly to allow the packet to pass through
771 * to the upper protocol stack.
772 */
773 u32 f_ctl = ntoh24(fh->fh_f_ctl);
774
775 if ((f_ctl & FC_FC_END_SEQ) &&
776 (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
777 struct fcoe_crc_eof *crc = NULL;
778
779 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
780 crc->fcoe_eof = FC_EOF_T;
781 } else {
782 /* otherwise, drop the header only frame */
783 rc = 0;
784 goto out_no_ddp;
785 }
786 }
787
788out_put_ddp:
789 /* either we got RSP or we have an error, unmap DMA in both cases */
790 i40e_fcoe_ddp_unmap(pf, ddp);
791 if (ddp->len && !ddp->fcerr) {
792 int pkts;
793
794 rc = ddp->len;
795 i40e_fcoe_ddp_clear(ddp);
796 ddp->len = rc;
797 pkts = DIV_ROUND_UP(rc, 2048);
798 rx_ring->stats.bytes += rc;
799 rx_ring->stats.packets += pkts;
800 rx_ring->q_vector->rx.total_bytes += rc;
801 rx_ring->q_vector->rx.total_packets += pkts;
802 set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags);
803 }
804
805out_no_ddp:
806 return rc;
807}
808
809/**
810 * i40e_fcoe_ddp_setup - called to set up ddp context
811 * @netdev: the corresponding net_device
812 * @xid: the exchange id requesting ddp
813 * @sgl: the scatter-gather list for this request
814 * @sgc: the number of scatter-gather items
815 * @target_mode: indicates this is a DDP request for target
816 *
817 * Returns : 1 for success and 0 for no DDP on this I/O
818 **/
819static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
820 struct scatterlist *sgl, unsigned int sgc,
821 int target_mode)
822{
823 static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN;
824 struct i40e_netdev_priv *np = netdev_priv(netdev);
825 struct i40e_fcoe_ddp_pool *ddp_pool;
826 struct i40e_pf *pf = np->vsi->back;
827 struct i40e_fcoe *fcoe = &pf->fcoe;
828 unsigned int i, j, dmacount;
829 struct i40e_fcoe_ddp *ddp;
830 unsigned int firstoff = 0;
831 unsigned int thisoff = 0;
832 unsigned int thislen = 0;
833 struct scatterlist *sg;
834 dma_addr_t addr = 0;
835 unsigned int len;
836
837 if (xid >= I40E_FCOE_DDP_MAX) {
838 dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid);
839 return 0;
840 }
841
842 /* no DDP if we are already down or resetting */
843 if (test_bit(__I40E_DOWN, &pf->state) ||
844 test_bit(__I40E_NEEDS_RESTART, &pf->state)) {
845 dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n",
846 xid);
847 return 0;
848 }
849
850 ddp = &fcoe->ddp[xid];
851 if (ddp->sgl) {
852 dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
853 xid, ddp->sgl, ddp->sgc);
854 return 0;
855 }
856 i40e_fcoe_ddp_clear(ddp);
857
858 if (!fcoe->ddp_pool) {
859 dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid);
860 return 0;
861 }
862
863 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
864 if (!ddp_pool->pool) {
865 dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid);
866 goto out_noddp;
867 }
868
869 /* setup dma from scsi command sgl */
870 dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
871 if (dmacount == 0) {
872 dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n",
873 sgl, sgc);
874 goto out_noddp_unmap;
875 }
876
877 /* alloc the udl from our ddp pool */
878 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
879 if (!ddp->udl) {
880 dev_info(&pf->pdev->dev,
881 "Failed allocated ddp context, xid 0x%x\n", xid);
882 goto out_noddp_unmap;
883 }
884
885 j = 0;
886 ddp->len = 0;
887 for_each_sg(sgl, sg, dmacount, i) {
888 addr = sg_dma_address(sg);
889 len = sg_dma_len(sg);
890 ddp->len += len;
891 while (len) {
892 /* max number of buffers allowed in one DDP context */
893 if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) {
894 dev_info(&pf->pdev->dev,
895 "xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n",
896 xid, i, j, dmacount, (u64)addr);
897 goto out_noddp_free;
898 }
899
900 /* get the offset of length of current buffer */
901 thisoff = addr & ((dma_addr_t)bufflen - 1);
902 thislen = min_t(unsigned int, (bufflen - thisoff), len);
903 /* all but the 1st buffer (j == 0)
904 * must be aligned on bufflen
905 */
906 if ((j != 0) && (thisoff))
907 goto out_noddp_free;
908
909 /* all but the last buffer
910 * ((i == (dmacount - 1)) && (thislen == len))
911 * must end at bufflen
912 */
913 if (((i != (dmacount - 1)) || (thislen != len)) &&
914 ((thislen + thisoff) != bufflen))
915 goto out_noddp_free;
916
917 ddp->udl[j] = (u64)(addr - thisoff);
918 /* only the first buffer may have none-zero offset */
919 if (j == 0)
920 firstoff = thisoff;
921 len -= thislen;
922 addr += thislen;
923 j++;
924 }
925 }
926 /* only the last buffer may have non-full bufflen */
927 ddp->lastsize = thisoff + thislen;
928 ddp->firstoff = firstoff;
929 ddp->list_len = j;
930 ddp->pool = ddp_pool->pool;
931 ddp->sgl = sgl;
932 ddp->sgc = sgc;
933 ddp->xid = xid;
934 if (target_mode)
935 set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
936 set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags);
937
938 put_cpu();
939 return 1; /* Success */
940
941out_noddp_free:
942 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
943 i40e_fcoe_ddp_clear(ddp);
944
945out_noddp_unmap:
946 dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
947out_noddp:
948 put_cpu();
949 return 0;
950}
951
952/**
953 * i40e_fcoe_ddp_get - called to set up ddp context in initiator mode
954 * @netdev: the corresponding net_device
955 * @xid: the exchange id requesting ddp
956 * @sgl: the scatter-gather list for this request
957 * @sgc: the number of scatter-gather items
958 *
959 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
960 * and is expected to be called from ULD, e.g., FCP layer of libfc
961 * to set up ddp for the corresponding xid of the given sglist for
962 * the corresponding I/O.
963 *
964 * Returns : 1 for success and 0 for no ddp
965 **/
966static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid,
967 struct scatterlist *sgl, unsigned int sgc)
968{
969 return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
970}
971
972/**
973 * i40e_fcoe_ddp_target - called to set up ddp context in target mode
974 * @netdev: the corresponding net_device
975 * @xid: the exchange id requesting ddp
976 * @sgl: the scatter-gather list for this request
977 * @sgc: the number of scatter-gather items
978 *
979 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
980 * and is expected to be called from ULD, e.g., FCP layer of libfc
981 * to set up ddp for the corresponding xid of the given sglist for
982 * the corresponding I/O. The DDP in target mode is a write I/O request
983 * from the initiator.
984 *
985 * Returns : 1 for success and 0 for no ddp
986 **/
987static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid,
988 struct scatterlist *sgl, unsigned int sgc)
989{
990 return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
991}
992
993/**
994 * i40e_fcoe_program_ddp - programs the HW DDP related descriptors
995 * @tx_ring: transmit ring for this packet
996 * @skb: the packet to be sent out
997 * @sof: the SOF to indicate class of service
998 *
999 * Determine if it is READ/WRITE command, and finds out if there is
1000 * a matching SW DDP context for this command. DDP is applicable
1001 * only in case of READ if initiator or WRITE in case of
1002 * responder (via checking XFER_RDY).
1003 *
1004 * Note: caller checks sof and ddp sw context
1005 *
1006 * Returns : none
1007 *
1008 **/
1009static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring,
1010 struct sk_buff *skb,
1011 struct i40e_fcoe_ddp *ddp, u8 sof)
1012{
1013 struct i40e_fcoe_filter_context_desc *filter_desc = NULL;
1014 struct i40e_fcoe_queue_context_desc *queue_desc = NULL;
1015 struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL;
1016 struct i40e_pf *pf = tx_ring->vsi->back;
1017 u16 i = tx_ring->next_to_use;
1018 struct fc_frame_header *fh;
1019 u64 flags_rsvd_lanq = 0;
1020 bool target_mode;
1021
1022 /* check if abort is still pending */
1023 if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) {
1024 dev_warn(&pf->pdev->dev,
1025 "DDP abort is still pending xid:%hx and ddp->flags:%lx:\n",
1026 ddp->xid, ddp->flags);
1027 return;
1028 }
1029
1030 /* set the flag to indicate this is programmed */
1031 if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) {
1032 dev_warn(&pf->pdev->dev,
1033 "DDP is already programmed for xid:%hx and ddp->flags:%lx:\n",
1034 ddp->xid, ddp->flags);
1035 return;
1036 }
1037
1038 /* Prepare the DDP context descriptor */
1039 ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i);
1040 i++;
1041 if (i == tx_ring->count)
1042 i = 0;
1043
1044 ddp_desc->type_cmd_foff_lsize =
1045 cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX |
1046 ((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K <<
1047 I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) |
1048 ((u64)ddp->firstoff <<
1049 I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) |
1050 ((u64)ddp->lastsize <<
1051 I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT));
1052 ddp_desc->rsvd = cpu_to_le64(0);
1053
1054 /* target mode needs last packet in the sequence */
1055 target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
1056 if (target_mode)
1057 ddp_desc->type_cmd_foff_lsize |=
1058 cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH);
1059
1060 /* Prepare queue_context descriptor */
1061 queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++);
1062 if (i == tx_ring->count)
1063 i = 0;
1064 queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp));
1065 queue_desc->flen_tph = cpu_to_le64(ddp->list_len |
1066 ((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC |
1067 I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) <<
1068 I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT));
1069
1070 /* Prepare filter_context_desc */
1071 filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i);
1072 i++;
1073 if (i == tx_ring->count)
1074 i = 0;
1075
1076 fh = (struct fc_frame_header *)skb_transport_header(skb);
1077 filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset));
1078 filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt));
1079 filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid <<
1080 I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT);
1081
1082 flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP;
1083 flags_rsvd_lanq |= (u64)(target_mode ?
1084 I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP :
1085 I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT);
1086
1087 flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ?
1088 I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 :
1089 I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3);
1090
1091 flags_rsvd_lanq |= ((u64)skb->queue_mapping <<
1092 I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT);
1093 filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq);
1094
1095 /* By this time, all offload related descriptors has been programmed */
1096 tx_ring->next_to_use = i;
1097}
1098
1099/**
1100 * i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort
1101 * @tx_ring: transmit ring for this packet
1102 * @skb: the packet associated w/ this DDP invalidation, i.e., ABTS
1103 * @ddp: the SW DDP context for this DDP
1104 *
1105 * Programs the Tx context descriptor to do DDP invalidation.
1106 **/
1107static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring,
1108 struct sk_buff *skb,
1109 struct i40e_fcoe_ddp *ddp)
1110{
1111 struct i40e_tx_context_desc *context_desc;
1112 int i;
1113
1114 if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags))
1115 return;
1116
1117 i = tx_ring->next_to_use;
1118 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
1119 i++;
1120 if (i == tx_ring->count)
1121 i = 0;
1122
1123 context_desc->tunneling_params = cpu_to_le32(0);
1124 context_desc->l2tag2 = cpu_to_le16(0);
1125 context_desc->rsvd = cpu_to_le16(0);
1126 context_desc->type_cmd_tso_mss = cpu_to_le64(
1127 I40E_TX_DESC_DTYPE_FCOE_CTX |
1128 (I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL <<
1129 I40E_TXD_CTX_QW1_CMD_SHIFT) |
1130 (I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND <<
1131 I40E_TXD_CTX_QW1_CMD_SHIFT));
1132 tx_ring->next_to_use = i;
1133}
1134
1135/**
1136 * i40e_fcoe_handle_ddp - check we should setup or invalidate DDP
1137 * @tx_ring: transmit ring for this packet
1138 * @skb: the packet to be sent out
1139 * @sof: the SOF to indicate class of service
1140 *
1141 * Determine if it is ABTS/READ/XFER_RDY, and finds out if there is
1142 * a matching SW DDP context for this command. DDP is applicable
1143 * only in case of READ if initiator or WRITE in case of
1144 * responder (via checking XFER_RDY). In case this is an ABTS, send
1145 * just invalidate the context.
1146 **/
1147static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring,
1148 struct sk_buff *skb, u8 sof)
1149{
1150 struct i40e_pf *pf = tx_ring->vsi->back;
1151 struct i40e_fcoe *fcoe = &pf->fcoe;
1152 struct fc_frame_header *fh;
1153 struct i40e_fcoe_ddp *ddp;
1154 u32 f_ctl;
1155 u8 r_ctl;
1156 u16 xid;
1157
1158 fh = (struct fc_frame_header *)skb_transport_header(skb);
1159 f_ctl = ntoh24(fh->fh_f_ctl);
1160 r_ctl = fh->fh_r_ctl;
1161 ddp = NULL;
1162
1163 if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) {
1164 /* exchange responder? if so, XFER_RDY for write */
1165 xid = ntohs(fh->fh_rx_id);
1166 if (i40e_fcoe_xid_is_valid(xid)) {
1167 ddp = &fcoe->ddp[xid];
1168 if ((ddp->xid == xid) &&
1169 (test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
1170 i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
1171 }
1172 } else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) {
1173 /* exchange originator, check READ cmd */
1174 xid = ntohs(fh->fh_ox_id);
1175 if (i40e_fcoe_xid_is_valid(xid)) {
1176 ddp = &fcoe->ddp[xid];
1177 if ((ddp->xid == xid) &&
1178 (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
1179 i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
1180 }
1181 } else if (r_ctl == FC_RCTL_BA_ABTS) {
1182 /* exchange originator, check ABTS */
1183 xid = ntohs(fh->fh_ox_id);
1184 if (i40e_fcoe_xid_is_valid(xid)) {
1185 ddp = &fcoe->ddp[xid];
1186 if ((ddp->xid == xid) &&
1187 (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
1188 i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp);
1189 }
1190 }
1191}
1192
1193/**
1194 * i40e_fcoe_tso - set up FCoE TSO
1195 * @tx_ring: ring to send buffer on
1196 * @skb: send buffer
1197 * @tx_flags: collected send information
1198 * @hdr_len: the tso header length
1199 * @sof: the SOF to indicate class of service
1200 *
1201 * Note must already have sof checked to be either class 2 or class 3 before
1202 * calling this function.
1203 *
1204 * Returns 1 to indicate sequence segmentation offload is properly setup
1205 * or returns 0 to indicate no tso is needed, otherwise returns error
1206 * code to drop the frame.
1207 **/
1208static int i40e_fcoe_tso(struct i40e_ring *tx_ring,
1209 struct sk_buff *skb,
1210 u32 tx_flags, u8 *hdr_len, u8 sof)
1211{
1212 struct i40e_tx_context_desc *context_desc;
1213 u32 cd_type, cd_cmd, cd_tso_len, cd_mss;
1214 struct fc_frame_header *fh;
1215 u64 cd_type_cmd_tso_mss;
1216
1217 /* must match gso type as FCoE */
1218 if (!skb_is_gso(skb))
1219 return 0;
1220
1221 /* is it the expected gso type for FCoE ?*/
1222 if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
1223 netdev_err(skb->dev,
1224 "wrong gso type %d:expecting SKB_GSO_FCOE\n",
1225 skb_shinfo(skb)->gso_type);
1226 return -EINVAL;
1227 }
1228
1229 /* header and trailer are inserted by hw */
1230 *hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) +
1231 sizeof(struct fcoe_crc_eof);
1232
1233 /* check sof to decide a class 2 or 3 TSO */
1234 if (likely(i40e_fcoe_sof_is_class3(sof)))
1235 cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3;
1236 else
1237 cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2;
1238
1239 /* param field valid? */
1240 fh = (struct fc_frame_header *)skb_transport_header(skb);
1241 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
1242 cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF;
1243
1244 /* fill the field values */
1245 cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX;
1246 cd_tso_len = skb->len - *hdr_len;
1247 cd_mss = skb_shinfo(skb)->gso_size;
1248 cd_type_cmd_tso_mss =
1249 ((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
1250 ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1251 ((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1252 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1253
1254 /* grab the next descriptor */
1255 context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
1256 tx_ring->next_to_use++;
1257 if (tx_ring->next_to_use == tx_ring->count)
1258 tx_ring->next_to_use = 0;
1259
1260 context_desc->tunneling_params = 0;
1261 context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK)
1262 >> I40E_TX_FLAGS_VLAN_SHIFT);
1263 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1264
1265 return 1;
1266}
1267
1268/**
1269 * i40e_fcoe_tx_map - build the tx descriptor
1270 * @tx_ring: ring to send buffer on
1271 * @skb: send buffer
1272 * @first: first buffer info buffer to use
1273 * @tx_flags: collected send information
1274 * @hdr_len: ptr to the size of the packet header
1275 * @eof: the frame eof value
1276 *
1277 * Note, for FCoE, sof and eof are already checked
1278 **/
1279static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
1280 struct sk_buff *skb,
1281 struct i40e_tx_buffer *first,
1282 u32 tx_flags, u8 hdr_len, u8 eof)
1283{
1284 u32 td_offset = 0;
1285 u32 td_cmd = 0;
1286 u32 maclen;
1287
1288 /* insert CRC */
1289 td_cmd = I40E_TX_DESC_CMD_ICRC;
1290
1291 /* setup MACLEN */
1292 maclen = skb_network_offset(skb);
1293 if (tx_flags & I40E_TX_FLAGS_SW_VLAN)
1294 maclen += sizeof(struct vlan_hdr);
1295
1296 if (skb->protocol == htons(ETH_P_FCOE)) {
1297 /* for FCoE, maclen should exclude ether type */
1298 maclen -= 2;
1299 /* setup type as FCoE and EOF insertion */
1300 td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof));
1301 /* setup FCoELEN and FCLEN */
1302 td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) <<
1303 I40E_TX_DESC_LENGTH_IPLEN_SHIFT) |
1304 ((sizeof(struct fc_frame_header) >> 2) <<
1305 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT));
1306 /* trim to exclude trailer */
1307 pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof));
1308 }
1309
1310 /* MACLEN is ether header length in words not bytes */
1311 td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1312
1313 return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
1314 td_cmd, td_offset);
1315}
1316
1317/**
1318 * i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC
1319 * @skb: the skb to be adjusted
1320 *
1321 * Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then
1322 * adjusts the skb header pointers correspondingly. Otherwise, returns false.
1323 **/
1324static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb)
1325{
1326 __be16 protocol = skb->protocol;
1327
1328 skb_reset_mac_header(skb);
1329 skb->mac_len = sizeof(struct ethhdr);
1330 if (protocol == htons(ETH_P_8021Q)) {
1331 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb);
1332
1333 protocol = veth->h_vlan_encapsulated_proto;
1334 skb->mac_len += sizeof(struct vlan_hdr);
1335 }
1336
1337 /* FCoE or FIP only */
1338 if ((protocol != htons(ETH_P_FIP)) &&
1339 (protocol != htons(ETH_P_FCOE)))
1340 return -EINVAL;
1341
1342 /* set header to L2 of FCoE/FIP */
1343 skb_set_network_header(skb, skb->mac_len);
1344 if (protocol == htons(ETH_P_FIP))
1345 return 0;
1346
1347 /* set header to L3 of FC */
1348 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1349 return 0;
1350}
1351
1352/**
1353 * i40e_fcoe_xmit_frame - transmit buffer
1354 * @skb: send buffer
1355 * @netdev: the fcoe netdev
1356 *
1357 * Returns 0 if sent, else an error code
1358 **/
1359static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
1360 struct net_device *netdev)
1361{
1362 struct i40e_netdev_priv *np = netdev_priv(skb->dev);
1363 struct i40e_vsi *vsi = np->vsi;
1364 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
1365 struct i40e_tx_buffer *first;
1366 u32 tx_flags = 0;
1367 u8 hdr_len = 0;
1368 u8 sof = 0;
1369 u8 eof = 0;
1370 int fso;
1371
1372 if (i40e_fcoe_set_skb_header(skb))
1373 goto out_drop;
1374
1375 if (!i40e_xmit_descriptor_count(skb, tx_ring))
1376 return NETDEV_TX_BUSY;
1377
1378 /* prepare the xmit flags */
1379 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
1380 goto out_drop;
1381
1382 /* record the location of the first descriptor for this packet */
1383 first = &tx_ring->tx_bi[tx_ring->next_to_use];
1384
1385 /* FIP is a regular L2 traffic w/o offload */
1386 if (skb->protocol == htons(ETH_P_FIP))
1387 goto out_send;
1388
1389 /* check sof and eof, only supports FC Class 2 or 3 */
1390 if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) {
1391 netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof);
1392 goto out_drop;
1393 }
1394
1395 /* always do FCCRC for FCoE */
1396 tx_flags |= I40E_TX_FLAGS_FCCRC;
1397
1398 /* check we should do sequence offload */
1399 fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof);
1400 if (fso < 0)
1401 goto out_drop;
1402 else if (fso)
1403 tx_flags |= I40E_TX_FLAGS_FSO;
1404 else
1405 i40e_fcoe_handle_ddp(tx_ring, skb, sof);
1406
1407out_send:
1408 /* send out the packet */
1409 i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof);
1410
1411 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
1412 return NETDEV_TX_OK;
1413
1414out_drop:
1415 dev_kfree_skb_any(skb);
1416 return NETDEV_TX_OK;
1417}
1418
1419/**
1420 * i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit
1421 * @netdev: network interface device structure
1422 * @new_mtu: new value for maximum frame size
1423 *
1424 * Returns error as operation not permitted
1425 *
1426 **/
1427static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu)
1428{
1429 netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n");
1430 return -EPERM;
1431}
1432
1433/**
1434 * i40e_fcoe_set_features - set the netdev feature flags
1435 * @netdev: ptr to the netdev being adjusted
1436 * @features: the feature set that the stack is suggesting
1437 *
1438 **/
1439static int i40e_fcoe_set_features(struct net_device *netdev,
1440 netdev_features_t features)
1441{
1442 struct i40e_netdev_priv *np = netdev_priv(netdev);
1443 struct i40e_vsi *vsi = np->vsi;
1444
1445 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1446 i40e_vlan_stripping_enable(vsi);
1447 else
1448 i40e_vlan_stripping_disable(vsi);
1449
1450 return 0;
1451}
1452
1453
1454static const struct net_device_ops i40e_fcoe_netdev_ops = {
1455 .ndo_open = i40e_open,
1456 .ndo_stop = i40e_close,
1457 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
1458 .ndo_set_rx_mode = i40e_set_rx_mode,
1459 .ndo_validate_addr = eth_validate_addr,
1460 .ndo_set_mac_address = i40e_set_mac,
1461 .ndo_change_mtu = i40e_fcoe_change_mtu,
1462 .ndo_do_ioctl = i40e_ioctl,
1463 .ndo_tx_timeout = i40e_tx_timeout,
1464 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
1465 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
1466 .ndo_setup_tc = i40e_setup_tc,
1467
1468#ifdef CONFIG_NET_POLL_CONTROLLER
1469 .ndo_poll_controller = i40e_netpoll,
1470#endif
1471 .ndo_start_xmit = i40e_fcoe_xmit_frame,
1472 .ndo_fcoe_enable = i40e_fcoe_enable,
1473 .ndo_fcoe_disable = i40e_fcoe_disable,
1474 .ndo_fcoe_ddp_setup = i40e_fcoe_ddp_get,
1475 .ndo_fcoe_ddp_done = i40e_fcoe_ddp_put,
1476 .ndo_fcoe_ddp_target = i40e_fcoe_ddp_target,
1477 .ndo_set_features = i40e_fcoe_set_features,
1478};
1479
1480/**
1481 * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
1482 * @vsi: pointer to the associated VSI struct
1483 * @ctxt: pointer to the associated VSI context to be passed to HW
1484 *
1485 * Returns 0 on success or < 0 on error
1486 **/
1487void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
1488{
1489 struct i40e_hw *hw = &vsi->back->hw;
1490 struct i40e_pf *pf = vsi->back;
1491
1492 if (vsi->type != I40E_VSI_FCOE)
1493 return;
1494
1495 netdev->features = (NETIF_F_HW_VLAN_CTAG_TX |
1496 NETIF_F_HW_VLAN_CTAG_RX |
1497 NETIF_F_HW_VLAN_CTAG_FILTER);
1498
1499 netdev->vlan_features = netdev->features;
1500 netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
1501 NETIF_F_HW_VLAN_CTAG_RX |
1502 NETIF_F_HW_VLAN_CTAG_FILTER);
1503 netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1;
1504 netdev->features |= NETIF_F_ALL_FCOE;
1505 netdev->vlan_features |= NETIF_F_ALL_FCOE;
1506 netdev->hw_features |= netdev->features;
1507 netdev->priv_flags |= IFF_UNICAST_FLT;
1508 netdev->priv_flags |= IFF_SUPP_NOFCS;
1509
1510 strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
1511 netdev->mtu = FCOE_MTU;
1512 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
1513 i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
1514 i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
1515 i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
1516 i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
1517 i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
1518 i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
1519
1520 /* use san mac */
1521 ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
1522 ether_addr_copy(netdev->perm_addr, hw->mac.san_addr);
1523 /* fcoe netdev ops */
1524 netdev->netdev_ops = &i40e_fcoe_netdev_ops;
1525}
1526
1527/**
1528 * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
1529 * @pf: the pf that VSI is associated with
1530 *
1531 **/
1532void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
1533{
1534 struct i40e_vsi *vsi;
1535 u16 seid;
1536 int i;
1537
1538 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
1539 return;
1540
1541 BUG_ON(!pf->vsi[pf->lan_vsi]);
1542
1543 for (i = 0; i < pf->num_alloc_vsi; i++) {
1544 vsi = pf->vsi[i];
1545 if (vsi && vsi->type == I40E_VSI_FCOE) {
1546 dev_warn(&pf->pdev->dev,
1547 "FCoE VSI already created\n");
1548 return;
1549 }
1550 }
1551
1552 seid = pf->vsi[pf->lan_vsi]->seid;
1553 vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
1554 if (vsi) {
1555 dev_dbg(&pf->pdev->dev,
1556 "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n",
1557 vsi->seid, vsi->id, vsi->uplink_seid, seid);
1558 } else {
1559 dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
1560 }
1561}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
new file mode 100644
index 000000000000..21e0f582031c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -0,0 +1,128 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#ifndef _I40E_FCOE_H_
28#define _I40E_FCOE_H_
29
30/* FCoE HW context helper macros */
31#define I40E_DDP_CONTEXT_DESC(R, i) \
32 (&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i]))
33
34#define I40E_QUEUE_CONTEXT_DESC(R, i) \
35 (&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i]))
36
37#define I40E_FILTER_CONTEXT_DESC(R, i) \
38 (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
39
40
41/* receive queue descriptor filter status for FCoE */
42#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3
43#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */
44#define I40E_RX_DESC_FLTSTAT_NODDP 0x1 /* no ddp due to error */
45#define I40E_RX_DESC_FLTSTAT_DDP 0x2 /* DDPed payload, post header */
46#define I40E_RX_DESC_FLTSTAT_FCPRSP 0x3 /* FCP_RSP */
47
48/* receive queue descriptor error codes for FCoE */
49#define I40E_RX_DESC_FCOE_ERROR_MASK \
50 (I40E_RX_DESC_ERROR_L3L4E_PROT | \
51 I40E_RX_DESC_ERROR_L3L4E_FC | \
52 I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR | \
53 I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN)
54
55/* receive queue descriptor programming error */
56#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e) \
57 (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1)
58
59#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) \
60 (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
61
62#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
63 (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
64#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
65 (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
66
67#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
68 I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
69#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT \
70 I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT
71
72/* FCoE DDP related definitions */
73#define I40E_FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
74#define I40E_FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
75#define I40E_FCOE_DDP_BUFFCNT_MAX 512 /* 9 bits bufcnt */
76#define I40E_FCOE_DDP_PTR_ALIGN 16
77#define I40E_FCOE_DDP_PTR_MAX (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t))
78#define I40E_FCOE_DDP_BUF_MIN 4096
79#define I40E_FCOE_DDP_MAX 2048
80#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
81
82/* supported netdev features for FCoE */
83#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \
84 NETIF_F_HW_VLAN_CTAG_TX | \
85 NETIF_F_HW_VLAN_CTAG_RX | \
86 NETIF_F_HW_VLAN_CTAG_FILTER)
87
88/* DDP context flags */
89enum i40e_fcoe_ddp_flags {
90 __I40E_FCOE_DDP_NONE = 1,
91 __I40E_FCOE_DDP_TARGET,
92 __I40E_FCOE_DDP_INITALIZED,
93 __I40E_FCOE_DDP_PROGRAMMED,
94 __I40E_FCOE_DDP_DONE,
95 __I40E_FCOE_DDP_ABORTED,
96 __I40E_FCOE_DDP_UNMAPPED,
97};
98
99/* DDP SW context struct */
100struct i40e_fcoe_ddp {
101 int len;
102 u16 xid;
103 u16 firstoff;
104 u16 lastsize;
105 u16 list_len;
106 u8 fcerr;
107 u8 prerr;
108 unsigned long flags;
109 unsigned int sgc;
110 struct scatterlist *sgl;
111 dma_addr_t udp;
112 u64 *udl;
113 struct dma_pool *pool;
114
115};
116
117struct i40e_fcoe_ddp_pool {
118 struct dma_pool *pool;
119};
120
121struct i40e_fcoe {
122 unsigned long mode;
123 atomic_t refcnt;
124 struct i40e_fcoe_ddp_pool __percpu *ddp_pool;
125 struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX];
126};
127
128#endif /* _I40E_FCOE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 821fcc1adb85..51bc03072ed3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -269,7 +269,11 @@ static void i40e_service_event_schedule(struct i40e_pf *pf)
269 * device is munged, not just the one netdev port, so go for the full 269 * device is munged, not just the one netdev port, so go for the full
270 * reset. 270 * reset.
271 **/ 271 **/
272#ifdef I40E_FCOE
273void i40e_tx_timeout(struct net_device *netdev)
274#else
272static void i40e_tx_timeout(struct net_device *netdev) 275static void i40e_tx_timeout(struct net_device *netdev)
276#endif
273{ 277{
274 struct i40e_netdev_priv *np = netdev_priv(netdev); 278 struct i40e_netdev_priv *np = netdev_priv(netdev);
275 struct i40e_vsi *vsi = np->vsi; 279 struct i40e_vsi *vsi = np->vsi;
@@ -349,9 +353,15 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
349 * Returns the address of the device statistics structure. 353 * Returns the address of the device statistics structure.
350 * The statistics are actually updated from the service task. 354 * The statistics are actually updated from the service task.
351 **/ 355 **/
356#ifdef I40E_FCOE
357struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
358 struct net_device *netdev,
359 struct rtnl_link_stats64 *stats)
360#else
352static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 361static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
353 struct net_device *netdev, 362 struct net_device *netdev,
354 struct rtnl_link_stats64 *stats) 363 struct rtnl_link_stats64 *stats)
364#endif
355{ 365{
356 struct i40e_netdev_priv *np = netdev_priv(netdev); 366 struct i40e_netdev_priv *np = netdev_priv(netdev);
357 struct i40e_ring *tx_ring, *rx_ring; 367 struct i40e_ring *tx_ring, *rx_ring;
@@ -636,6 +646,55 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
636 veb->stat_offsets_loaded = true; 646 veb->stat_offsets_loaded = true;
637} 647}
638 648
649#ifdef I40E_FCOE
650/**
651 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
652 * @vsi: the VSI that is capable of doing FCoE
653 **/
654static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
655{
656 struct i40e_pf *pf = vsi->back;
657 struct i40e_hw *hw = &pf->hw;
658 struct i40e_fcoe_stats *ofs;
659 struct i40e_fcoe_stats *fs; /* device's eth stats */
660 int idx;
661
662 if (vsi->type != I40E_VSI_FCOE)
663 return;
664
665 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
666 fs = &vsi->fcoe_stats;
667 ofs = &vsi->fcoe_stats_offsets;
668
669 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
670 vsi->fcoe_stat_offsets_loaded,
671 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
672 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
673 vsi->fcoe_stat_offsets_loaded,
674 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
675 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
676 vsi->fcoe_stat_offsets_loaded,
677 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
678 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
679 vsi->fcoe_stat_offsets_loaded,
680 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
681 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
682 vsi->fcoe_stat_offsets_loaded,
683 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
684 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
685 vsi->fcoe_stat_offsets_loaded,
686 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
687 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
688 vsi->fcoe_stat_offsets_loaded,
689 &ofs->fcoe_last_error, &fs->fcoe_last_error);
690 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
691 vsi->fcoe_stat_offsets_loaded,
692 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
693
694 vsi->fcoe_stat_offsets_loaded = true;
695}
696
697#endif
639/** 698/**
640 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 699 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
641 * @pf: the corresponding PF 700 * @pf: the corresponding PF
@@ -1064,6 +1123,9 @@ void i40e_update_stats(struct i40e_vsi *vsi)
1064 i40e_update_pf_stats(pf); 1123 i40e_update_pf_stats(pf);
1065 1124
1066 i40e_update_vsi_stats(vsi); 1125 i40e_update_vsi_stats(vsi);
1126#ifdef I40E_FCOE
1127 i40e_update_fcoe_stats(vsi);
1128#endif
1067} 1129}
1068 1130
1069/** 1131/**
@@ -1315,7 +1377,11 @@ void i40e_del_filter(struct i40e_vsi *vsi,
1315 * 1377 *
1316 * Returns 0 on success, negative on failure 1378 * Returns 0 on success, negative on failure
1317 **/ 1379 **/
1380#ifdef I40E_FCOE
1381int i40e_set_mac(struct net_device *netdev, void *p)
1382#else
1318static int i40e_set_mac(struct net_device *netdev, void *p) 1383static int i40e_set_mac(struct net_device *netdev, void *p)
1384#endif
1319{ 1385{
1320 struct i40e_netdev_priv *np = netdev_priv(netdev); 1386 struct i40e_netdev_priv *np = netdev_priv(netdev);
1321 struct i40e_vsi *vsi = np->vsi; 1387 struct i40e_vsi *vsi = np->vsi;
@@ -1376,10 +1442,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1376 * 1442 *
1377 * Setup VSI queue mapping for enabled traffic classes. 1443 * Setup VSI queue mapping for enabled traffic classes.
1378 **/ 1444 **/
1445#ifdef I40E_FCOE
1446void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1447 struct i40e_vsi_context *ctxt,
1448 u8 enabled_tc,
1449 bool is_add)
1450#else
1379static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1451static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1380 struct i40e_vsi_context *ctxt, 1452 struct i40e_vsi_context *ctxt,
1381 u8 enabled_tc, 1453 u8 enabled_tc,
1382 bool is_add) 1454 bool is_add)
1455#endif
1383{ 1456{
1384 struct i40e_pf *pf = vsi->back; 1457 struct i40e_pf *pf = vsi->back;
1385 u16 sections = 0; 1458 u16 sections = 0;
@@ -1425,6 +1498,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1425 case I40E_VSI_MAIN: 1498 case I40E_VSI_MAIN:
1426 qcount = min_t(int, pf->rss_size, num_tc_qps); 1499 qcount = min_t(int, pf->rss_size, num_tc_qps);
1427 break; 1500 break;
1501#ifdef I40E_FCOE
1502 case I40E_VSI_FCOE:
1503 qcount = num_tc_qps;
1504 break;
1505#endif
1428 case I40E_VSI_FDIR: 1506 case I40E_VSI_FDIR:
1429 case I40E_VSI_SRIOV: 1507 case I40E_VSI_SRIOV:
1430 case I40E_VSI_VMDQ2: 1508 case I40E_VSI_VMDQ2:
@@ -1491,7 +1569,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1491 * i40e_set_rx_mode - NDO callback to set the netdev filters 1569 * i40e_set_rx_mode - NDO callback to set the netdev filters
1492 * @netdev: network interface device structure 1570 * @netdev: network interface device structure
1493 **/ 1571 **/
1572#ifdef I40E_FCOE
1573void i40e_set_rx_mode(struct net_device *netdev)
1574#else
1494static void i40e_set_rx_mode(struct net_device *netdev) 1575static void i40e_set_rx_mode(struct net_device *netdev)
1576#endif
1495{ 1577{
1496 struct i40e_netdev_priv *np = netdev_priv(netdev); 1578 struct i40e_netdev_priv *np = netdev_priv(netdev);
1497 struct i40e_mac_filter *f, *ftmp; 1579 struct i40e_mac_filter *f, *ftmp;
@@ -2069,8 +2151,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2069 * 2151 *
2070 * net_device_ops implementation for adding vlan ids 2152 * net_device_ops implementation for adding vlan ids
2071 **/ 2153 **/
2154#ifdef I40E_FCOE
2155int i40e_vlan_rx_add_vid(struct net_device *netdev,
2156 __always_unused __be16 proto, u16 vid)
2157#else
2072static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2158static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2073 __always_unused __be16 proto, u16 vid) 2159 __always_unused __be16 proto, u16 vid)
2160#endif
2074{ 2161{
2075 struct i40e_netdev_priv *np = netdev_priv(netdev); 2162 struct i40e_netdev_priv *np = netdev_priv(netdev);
2076 struct i40e_vsi *vsi = np->vsi; 2163 struct i40e_vsi *vsi = np->vsi;
@@ -2103,8 +2190,13 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2103 * 2190 *
2104 * net_device_ops implementation for removing vlan ids 2191 * net_device_ops implementation for removing vlan ids
2105 **/ 2192 **/
2193#ifdef I40E_FCOE
2194int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2195 __always_unused __be16 proto, u16 vid)
2196#else
2106static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2197static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2107 __always_unused __be16 proto, u16 vid) 2198 __always_unused __be16 proto, u16 vid)
2199#endif
2108{ 2200{
2109 struct i40e_netdev_priv *np = netdev_priv(netdev); 2201 struct i40e_netdev_priv *np = netdev_priv(netdev);
2110 struct i40e_vsi *vsi = np->vsi; 2202 struct i40e_vsi *vsi = np->vsi;
@@ -2236,6 +2328,9 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2236 2328
2237 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2329 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2238 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2330 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2331#ifdef I40E_FCOE
2332 i40e_fcoe_setup_ddp_resources(vsi);
2333#endif
2239 return err; 2334 return err;
2240} 2335}
2241 2336
@@ -2255,6 +2350,9 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2255 for (i = 0; i < vsi->num_queue_pairs; i++) 2350 for (i = 0; i < vsi->num_queue_pairs; i++)
2256 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2351 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2257 i40e_free_rx_resources(vsi->rx_rings[i]); 2352 i40e_free_rx_resources(vsi->rx_rings[i]);
2353#ifdef I40E_FCOE
2354 i40e_fcoe_free_ddp_resources(vsi);
2355#endif
2258} 2356}
2259 2357
2260/** 2358/**
@@ -2296,6 +2394,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
2296 tx_ctx.qlen = ring->count; 2394 tx_ctx.qlen = ring->count;
2297 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2395 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2298 I40E_FLAG_FD_ATR_ENABLED)); 2396 I40E_FLAG_FD_ATR_ENABLED));
2397#ifdef I40E_FCOE
2398 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2399#endif
2299 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2400 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2300 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2401 /* FDIR VSI tx ring can still use RS bit and writebacks */
2301 if (vsi->type != I40E_VSI_FDIR) 2402 if (vsi->type != I40E_VSI_FDIR)
@@ -2408,6 +2509,9 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
2408 rx_ctx.crcstrip = 1; 2509 rx_ctx.crcstrip = 1;
2409 rx_ctx.l2tsel = 1; 2510 rx_ctx.l2tsel = 1;
2410 rx_ctx.showiv = 1; 2511 rx_ctx.showiv = 1;
2512#ifdef I40E_FCOE
2513 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2514#endif
2411 /* set the prefena field to 1 because the manual says to */ 2515 /* set the prefena field to 1 because the manual says to */
2412 rx_ctx.prefena = 1; 2516 rx_ctx.prefena = 1;
2413 2517
@@ -2492,6 +2596,17 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2492 break; 2596 break;
2493 } 2597 }
2494 2598
2599#ifdef I40E_FCOE
2600 /* setup rx buffer for FCoE */
2601 if ((vsi->type == I40E_VSI_FCOE) &&
2602 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2603 vsi->rx_hdr_len = 0;
2604 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2605 vsi->max_frame = I40E_RXBUFFER_3072;
2606 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2607 }
2608
2609#endif /* I40E_FCOE */
2495 /* round up for the chip's needs */ 2610 /* round up for the chip's needs */
2496 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2611 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2497 (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); 2612 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
@@ -3252,7 +3367,11 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3252 * This is used by netconsole to send skbs without having to re-enable 3367 * This is used by netconsole to send skbs without having to re-enable
3253 * interrupts. It's not called while the normal interrupt routine is executing. 3368 * interrupts. It's not called while the normal interrupt routine is executing.
3254 **/ 3369 **/
3370#ifdef I40E_FCOE
3371void i40e_netpoll(struct net_device *netdev)
3372#else
3255static void i40e_netpoll(struct net_device *netdev) 3373static void i40e_netpoll(struct net_device *netdev)
3374#endif
3256{ 3375{
3257 struct i40e_netdev_priv *np = netdev_priv(netdev); 3376 struct i40e_netdev_priv *np = netdev_priv(netdev);
3258 struct i40e_vsi *vsi = np->vsi; 3377 struct i40e_vsi *vsi = np->vsi;
@@ -4202,12 +4321,20 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4202 continue; 4321 continue;
4203 4322
4204 /* - Enable all TCs for the LAN VSI 4323 /* - Enable all TCs for the LAN VSI
4324#ifdef I40E_FCOE
4325 * - For FCoE VSI only enable the TC configured
4326 * as per the APP TLV
4327#endif
4205 * - For all others keep them at TC0 for now 4328 * - For all others keep them at TC0 for now
4206 */ 4329 */
4207 if (v == pf->lan_vsi) 4330 if (v == pf->lan_vsi)
4208 tc_map = i40e_pf_get_tc_map(pf); 4331 tc_map = i40e_pf_get_tc_map(pf);
4209 else 4332 else
4210 tc_map = i40e_pf_get_default_tc(pf); 4333 tc_map = i40e_pf_get_default_tc(pf);
4334#ifdef I40E_FCOE
4335 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4336 tc_map = i40e_get_fcoe_tc_map(pf);
4337#endif /* #ifdef I40E_FCOE */
4211 4338
4212 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4339 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4213 if (ret) { 4340 if (ret) {
@@ -4434,7 +4561,11 @@ void i40e_down(struct i40e_vsi *vsi)
4434 * @netdev: net device to configure 4561 * @netdev: net device to configure
4435 * @tc: number of traffic classes to enable 4562 * @tc: number of traffic classes to enable
4436 **/ 4563 **/
4564#ifdef I40E_FCOE
4565int i40e_setup_tc(struct net_device *netdev, u8 tc)
4566#else
4437static int i40e_setup_tc(struct net_device *netdev, u8 tc) 4567static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4568#endif
4438{ 4569{
4439 struct i40e_netdev_priv *np = netdev_priv(netdev); 4570 struct i40e_netdev_priv *np = netdev_priv(netdev);
4440 struct i40e_vsi *vsi = np->vsi; 4571 struct i40e_vsi *vsi = np->vsi;
@@ -4499,7 +4630,11 @@ exit:
4499 * 4630 *
4500 * Returns 0 on success, negative value on failure 4631 * Returns 0 on success, negative value on failure
4501 **/ 4632 **/
4633#ifdef I40E_FCOE
4634int i40e_open(struct net_device *netdev)
4635#else
4502static int i40e_open(struct net_device *netdev) 4636static int i40e_open(struct net_device *netdev)
4637#endif
4503{ 4638{
4504 struct i40e_netdev_priv *np = netdev_priv(netdev); 4639 struct i40e_netdev_priv *np = netdev_priv(netdev);
4505 struct i40e_vsi *vsi = np->vsi; 4640 struct i40e_vsi *vsi = np->vsi;
@@ -4635,7 +4770,11 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4635 * 4770 *
4636 * Returns 0, this is not allowed to fail 4771 * Returns 0, this is not allowed to fail
4637 **/ 4772 **/
4773#ifdef I40E_FCOE
4774int i40e_close(struct net_device *netdev)
4775#else
4638static int i40e_close(struct net_device *netdev) 4776static int i40e_close(struct net_device *netdev)
4777#endif
4639{ 4778{
4640 struct i40e_netdev_priv *np = netdev_priv(netdev); 4779 struct i40e_netdev_priv *np = netdev_priv(netdev);
4641 struct i40e_vsi *vsi = np->vsi; 4780 struct i40e_vsi *vsi = np->vsi;
@@ -5050,6 +5189,9 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5050 5189
5051 switch (vsi->type) { 5190 switch (vsi->type) {
5052 case I40E_VSI_MAIN: 5191 case I40E_VSI_MAIN:
5192#ifdef I40E_FCOE
5193 case I40E_VSI_FCOE:
5194#endif
5053 if (!vsi->netdev || !vsi->netdev_registered) 5195 if (!vsi->netdev || !vsi->netdev_registered)
5054 break; 5196 break;
5055 5197
@@ -5768,7 +5910,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5768 goto end_core_reset; 5910 goto end_core_reset;
5769 } 5911 }
5770#endif /* CONFIG_I40E_DCB */ 5912#endif /* CONFIG_I40E_DCB */
5913#ifdef I40E_FCOE
5914 ret = i40e_init_pf_fcoe(pf);
5915 if (ret)
5916 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
5771 5917
5918#endif
5772 /* do basic switch setup */ 5919 /* do basic switch setup */
5773 ret = i40e_setup_pf_switch(pf, reinit); 5920 ret = i40e_setup_pf_switch(pf, reinit);
5774 if (ret) 5921 if (ret)
@@ -6107,6 +6254,15 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6107 I40E_REQ_DESCRIPTOR_MULTIPLE); 6254 I40E_REQ_DESCRIPTOR_MULTIPLE);
6108 break; 6255 break;
6109 6256
6257#ifdef I40E_FCOE
6258 case I40E_VSI_FCOE:
6259 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6260 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6261 I40E_REQ_DESCRIPTOR_MULTIPLE);
6262 vsi->num_q_vectors = pf->num_fcoe_msix;
6263 break;
6264
6265#endif /* I40E_FCOE */
6110 default: 6266 default:
6111 WARN_ON(1); 6267 WARN_ON(1);
6112 return -ENODATA; 6268 return -ENODATA;
@@ -6418,6 +6574,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
6418 * is governed by number of cpus in the system. 6574 * is governed by number of cpus in the system.
6419 * - assumes symmetric Tx/Rx pairing 6575 * - assumes symmetric Tx/Rx pairing
6420 * - The number of VMDq pairs 6576 * - The number of VMDq pairs
6577#ifdef I40E_FCOE
6578 * - The number of FCOE qps.
6579#endif
6421 * Once we count this up, try the request. 6580 * Once we count this up, try the request.
6422 * 6581 *
6423 * If we can't get what we want, we'll simplify to nearly nothing 6582 * If we can't get what we want, we'll simplify to nearly nothing
@@ -6430,6 +6589,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
6430 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 6589 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
6431 v_budget++; 6590 v_budget++;
6432 6591
6592#ifdef I40E_FCOE
6593 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6594 pf->num_fcoe_msix = pf->num_fcoe_qps;
6595 v_budget += pf->num_fcoe_msix;
6596 }
6597
6598#endif
6433 /* Scale down if necessary, and the rings will share vectors */ 6599 /* Scale down if necessary, and the rings will share vectors */
6434 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); 6600 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
6435 6601
@@ -6448,6 +6614,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
6448 * of these features based on the policy and at the end disable 6614 * of these features based on the policy and at the end disable
6449 * the features that did not get any vectors. 6615 * the features that did not get any vectors.
6450 */ 6616 */
6617#ifdef I40E_FCOE
6618 pf->num_fcoe_qps = 0;
6619 pf->num_fcoe_msix = 0;
6620#endif
6451 pf->num_vmdq_msix = 0; 6621 pf->num_vmdq_msix = 0;
6452 } 6622 }
6453 6623
@@ -6478,9 +6648,24 @@ static int i40e_init_msix(struct i40e_pf *pf)
6478 pf->num_lan_msix = 1; 6648 pf->num_lan_msix = 1;
6479 break; 6649 break;
6480 case 3: 6650 case 3:
6651#ifdef I40E_FCOE
6652 /* give one vector to FCoE */
6653 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6654 pf->num_lan_msix = 1;
6655 pf->num_fcoe_msix = 1;
6656 }
6657#else
6481 pf->num_lan_msix = 2; 6658 pf->num_lan_msix = 2;
6659#endif
6482 break; 6660 break;
6483 default: 6661 default:
6662#ifdef I40E_FCOE
6663 /* give one vector to FCoE */
6664 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6665 pf->num_fcoe_msix = 1;
6666 vec--;
6667 }
6668#endif
6484 pf->num_lan_msix = min_t(int, (vec / 2), 6669 pf->num_lan_msix = min_t(int, (vec / 2),
6485 pf->num_lan_qps); 6670 pf->num_lan_qps);
6486 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), 6671 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
@@ -6494,6 +6679,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
6494 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 6679 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
6495 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 6680 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6496 } 6681 }
6682#ifdef I40E_FCOE
6683
6684 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
6685 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
6686 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
6687 }
6688#endif
6497 return err; 6689 return err;
6498} 6690}
6499 6691
@@ -6577,6 +6769,9 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6577 err = i40e_init_msix(pf); 6769 err = i40e_init_msix(pf);
6578 if (err) { 6770 if (err) {
6579 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 6771 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
6772#ifdef I40E_FCOE
6773 I40E_FLAG_FCOE_ENABLED |
6774#endif
6580 I40E_FLAG_RSS_ENABLED | 6775 I40E_FLAG_RSS_ENABLED |
6581 I40E_FLAG_DCB_CAPABLE | 6776 I40E_FLAG_DCB_CAPABLE |
6582 I40E_FLAG_SRIOV_ENABLED | 6777 I40E_FLAG_SRIOV_ENABLED |
@@ -6814,6 +7009,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
6814 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; 7009 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
6815 } 7010 }
6816 7011
7012#ifdef I40E_FCOE
7013 err = i40e_init_pf_fcoe(pf);
7014 if (err)
7015 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7016
7017#endif /* I40E_FCOE */
6817#ifdef CONFIG_PCI_IOV 7018#ifdef CONFIG_PCI_IOV
6818 if (pf->hw.func_caps.num_vfs) { 7019 if (pf->hw.func_caps.num_vfs) {
6819 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 7020 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -7141,6 +7342,10 @@ static const struct net_device_ops i40e_netdev_ops = {
7141 .ndo_poll_controller = i40e_netpoll, 7342 .ndo_poll_controller = i40e_netpoll,
7142#endif 7343#endif
7143 .ndo_setup_tc = i40e_setup_tc, 7344 .ndo_setup_tc = i40e_setup_tc,
7345#ifdef I40E_FCOE
7346 .ndo_fcoe_enable = i40e_fcoe_enable,
7347 .ndo_fcoe_disable = i40e_fcoe_disable,
7348#endif
7144 .ndo_set_features = i40e_set_features, 7349 .ndo_set_features = i40e_set_features,
7145 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 7350 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
7146 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 7351 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
@@ -7249,6 +7454,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
7249 netdev->netdev_ops = &i40e_netdev_ops; 7454 netdev->netdev_ops = &i40e_netdev_ops;
7250 netdev->watchdog_timeo = 5 * HZ; 7455 netdev->watchdog_timeo = 5 * HZ;
7251 i40e_set_ethtool_ops(netdev); 7456 i40e_set_ethtool_ops(netdev);
7457#ifdef I40E_FCOE
7458 i40e_fcoe_config_netdev(netdev, vsi);
7459#endif
7252 7460
7253 return 0; 7461 return 0;
7254} 7462}
@@ -7368,7 +7576,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7368 * should be set to zero by default. 7576 * should be set to zero by default.
7369 */ 7577 */
7370 ctxt.info.switch_id = 0; 7578 ctxt.info.switch_id = 0;
7371 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
7372 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 7579 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7373 7580
7374 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7581 /* Setup the VSI tx/rx queue map for TC0 only for now */
@@ -7402,6 +7609,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7402 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7609 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7403 break; 7610 break;
7404 7611
7612#ifdef I40E_FCOE
7613 case I40E_VSI_FCOE:
7614 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
7615 if (ret) {
7616 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
7617 return ret;
7618 }
7619 break;
7620
7621#endif /* I40E_FCOE */
7405 default: 7622 default:
7406 return -ENODEV; 7623 return -ENODEV;
7407 } 7624 }
@@ -7760,6 +7977,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7760 /* setup the netdev if needed */ 7977 /* setup the netdev if needed */
7761 case I40E_VSI_MAIN: 7978 case I40E_VSI_MAIN:
7762 case I40E_VSI_VMDQ2: 7979 case I40E_VSI_VMDQ2:
7980 case I40E_VSI_FCOE:
7763 ret = i40e_config_netdev(vsi); 7981 ret = i40e_config_netdev(vsi);
7764 if (ret) 7982 if (ret)
7765 goto err_netdev; 7983 goto err_netdev;
@@ -8378,6 +8596,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
8378 int queues_left; 8596 int queues_left;
8379 8597
8380 pf->num_lan_qps = 0; 8598 pf->num_lan_qps = 0;
8599#ifdef I40E_FCOE
8600 pf->num_fcoe_qps = 0;
8601#endif
8381 8602
8382 /* Find the max queues to be put into basic use. We'll always be 8603 /* Find the max queues to be put into basic use. We'll always be
8383 * using TC0, whether or not DCB is running, and TC0 will get the 8604 * using TC0, whether or not DCB is running, and TC0 will get the
@@ -8393,6 +8614,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
8393 8614
8394 /* make sure all the fancies are disabled */ 8615 /* make sure all the fancies are disabled */
8395 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8616 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8617#ifdef I40E_FCOE
8618 I40E_FLAG_FCOE_ENABLED |
8619#endif
8396 I40E_FLAG_FD_SB_ENABLED | 8620 I40E_FLAG_FD_SB_ENABLED |
8397 I40E_FLAG_FD_ATR_ENABLED | 8621 I40E_FLAG_FD_ATR_ENABLED |
8398 I40E_FLAG_DCB_CAPABLE | 8622 I40E_FLAG_DCB_CAPABLE |
@@ -8407,6 +8631,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
8407 queues_left -= pf->num_lan_qps; 8631 queues_left -= pf->num_lan_qps;
8408 8632
8409 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8633 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8634#ifdef I40E_FCOE
8635 I40E_FLAG_FCOE_ENABLED |
8636#endif
8410 I40E_FLAG_FD_SB_ENABLED | 8637 I40E_FLAG_FD_SB_ENABLED |
8411 I40E_FLAG_FD_ATR_ENABLED | 8638 I40E_FLAG_FD_ATR_ENABLED |
8412 I40E_FLAG_DCB_ENABLED | 8639 I40E_FLAG_DCB_ENABLED |
@@ -8422,6 +8649,22 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
8422 queues_left -= pf->num_lan_qps; 8649 queues_left -= pf->num_lan_qps;
8423 } 8650 }
8424 8651
8652#ifdef I40E_FCOE
8653 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
8654 if (I40E_DEFAULT_FCOE <= queues_left) {
8655 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
8656 } else if (I40E_MINIMUM_FCOE <= queues_left) {
8657 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
8658 } else {
8659 pf->num_fcoe_qps = 0;
8660 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
8661 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
8662 }
8663
8664 queues_left -= pf->num_fcoe_qps;
8665 }
8666
8667#endif
8425 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8668 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8426 if (queues_left > 1) { 8669 if (queues_left > 1) {
8427 queues_left -= 1; /* save 1 queue for FD */ 8670 queues_left -= 1; /* save 1 queue for FD */
@@ -8446,6 +8689,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
8446 } 8689 }
8447 8690
8448 pf->queues_left = queues_left; 8691 pf->queues_left = queues_left;
8692#ifdef I40E_FCOE
8693 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
8694#endif
8449} 8695}
8450 8696
8451/** 8697/**
@@ -8512,6 +8758,10 @@ static void i40e_print_features(struct i40e_pf *pf)
8512 buf += sprintf(buf, "DCB "); 8758 buf += sprintf(buf, "DCB ");
8513 if (pf->flags & I40E_FLAG_PTP) 8759 if (pf->flags & I40E_FLAG_PTP)
8514 buf += sprintf(buf, "PTP "); 8760 buf += sprintf(buf, "PTP ");
8761#ifdef I40E_FCOE
8762 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
8763 buf += sprintf(buf, "FCOE ");
8764#endif
8515 8765
8516 BUG_ON(buf > (string + INFO_STRING_LEN)); 8766 BUG_ON(buf > (string + INFO_STRING_LEN));
8517 dev_info(&pf->pdev->dev, "%s\n", string); 8767 dev_info(&pf->pdev->dev, "%s\n", string);
@@ -8699,6 +8949,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8699 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 8949 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
8700 if (is_valid_ether_addr(hw->mac.port_addr)) 8950 if (is_valid_ether_addr(hw->mac.port_addr))
8701 pf->flags |= I40E_FLAG_PORT_ID_VALID; 8951 pf->flags |= I40E_FLAG_PORT_ID_VALID;
8952#ifdef I40E_FCOE
8953 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
8954 if (err)
8955 dev_info(&pdev->dev,
8956 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
8957 if (!is_valid_ether_addr(hw->mac.san_addr)) {
8958 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
8959 hw->mac.san_addr);
8960 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
8961 }
8962 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
8963#endif /* I40E_FCOE */
8702 8964
8703 pci_set_drvdata(pdev, pf); 8965 pci_set_drvdata(pdev, pf);
8704 pci_save_state(pdev); 8966 pci_save_state(pdev);
@@ -8815,6 +9077,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8815 mod_timer(&pf->service_timer, 9077 mod_timer(&pf->service_timer,
8816 round_jiffies(jiffies + pf->service_timer_period)); 9078 round_jiffies(jiffies + pf->service_timer_period));
8817 9079
9080#ifdef I40E_FCOE
9081 /* create FCoE interface */
9082 i40e_fcoe_vsi_setup(pf);
9083
9084#endif
8818 /* Get the negotiated link width and speed from PCI config space */ 9085 /* Get the negotiated link width and speed from PCI config space */
8819 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); 9086 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
8820 9087
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index ecd0f0b663c9..045b5c4b98b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,4 +78,7 @@ do { \
78} while (0) 78} while (0)
79 79
80typedef enum i40e_status_code i40e_status; 80typedef enum i40e_status_code i40e_status;
81#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
82#define I40E_FCOE
83#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
81#endif /* _I40E_OSDEP_H_ */ 84#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index a91d7e1a5b5b..949a9a01778b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -70,6 +70,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
70 u16 *fw_major_version, u16 *fw_minor_version, 70 u16 *fw_major_version, u16 *fw_minor_version,
71 u16 *api_major_version, u16 *api_minor_version, 71 u16 *api_major_version, u16 *api_minor_version,
72 struct i40e_asq_cmd_details *cmd_details); 72 struct i40e_asq_cmd_details *cmd_details);
73i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
74 u32 reg_addr, u64 reg_val,
75 struct i40e_asq_cmd_details *cmd_details);
73i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 76i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
74 struct i40e_asq_cmd_details *cmd_details); 77 struct i40e_asq_cmd_details *cmd_details);
75i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 78i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -237,6 +240,9 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
237i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); 240i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
238i40e_status i40e_validate_mac_addr(u8 *mac_addr); 241i40e_status i40e_validate_mac_addr(u8 *mac_addr);
239void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); 242void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
243#ifdef I40E_FCOE
244i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
245#endif
240/* prototype for functions used for NVM access */ 246/* prototype for functions used for NVM access */
241i40e_status i40e_init_nvm(struct i40e_hw *hw); 247i40e_status i40e_init_nvm(struct i40e_hw *hw);
242i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 248i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d26d6836689d..a51aa37b7b5a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -896,6 +896,11 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
896 896
897 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 897 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
898 i40e_fd_handle_status(rx_ring, rx_desc, id); 898 i40e_fd_handle_status(rx_ring, rx_desc, id);
899#ifdef I40E_FCOE
900 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
901 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
902 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
903#endif
899} 904}
900 905
901/** 906/**
@@ -1489,6 +1494,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1489 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) 1494 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1490 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) 1495 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1491 : 0; 1496 : 0;
1497#ifdef I40E_FCOE
1498 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1499 dev_kfree_skb_any(skb);
1500 goto next_desc;
1501 }
1502#endif
1492 i40e_receive_skb(rx_ring, skb, vlan_tag); 1503 i40e_receive_skb(rx_ring, skb, vlan_tag);
1493 1504
1494 rx_ring->netdev->last_rx = jiffies; 1505 rx_ring->netdev->last_rx = jiffies;
@@ -1719,9 +1730,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1719 * Returns error code indicate the frame should be dropped upon error and the 1730 * Returns error code indicate the frame should be dropped upon error and the
1720 * otherwise returns 0 to indicate the flags has been set properly. 1731 * otherwise returns 0 to indicate the flags has been set properly.
1721 **/ 1732 **/
1733#ifdef I40E_FCOE
1734int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1735 struct i40e_ring *tx_ring,
1736 u32 *flags)
1737#else
1722static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 1738static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1723 struct i40e_ring *tx_ring, 1739 struct i40e_ring *tx_ring,
1724 u32 *flags) 1740 u32 *flags)
1741#endif
1725{ 1742{
1726 __be16 protocol = skb->protocol; 1743 __be16 protocol = skb->protocol;
1727 u32 tx_flags = 0; 1744 u32 tx_flags = 0;
@@ -1743,9 +1760,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1743 } 1760 }
1744 1761
1745 /* Insert 802.1p priority into VLAN header */ 1762 /* Insert 802.1p priority into VLAN header */
1746 if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) && 1763 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
1747 ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || 1764 (skb->priority != TC_PRIO_CONTROL)) {
1748 (skb->priority != TC_PRIO_CONTROL))) {
1749 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; 1765 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
1750 tx_flags |= (skb->priority & 0x7) << 1766 tx_flags |= (skb->priority & 0x7) <<
1751 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 1767 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
@@ -2018,9 +2034,15 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2018 * @td_cmd: the command field in the descriptor 2034 * @td_cmd: the command field in the descriptor
2019 * @td_offset: offset for checksum or crc 2035 * @td_offset: offset for checksum or crc
2020 **/ 2036 **/
2037#ifdef I40E_FCOE
2038void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2039 struct i40e_tx_buffer *first, u32 tx_flags,
2040 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2041#else
2021static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 2042static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2022 struct i40e_tx_buffer *first, u32 tx_flags, 2043 struct i40e_tx_buffer *first, u32 tx_flags,
2023 const u8 hdr_len, u32 td_cmd, u32 td_offset) 2044 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2045#endif
2024{ 2046{
2025 unsigned int data_len = skb->data_len; 2047 unsigned int data_len = skb->data_len;
2026 unsigned int size = skb_headlen(skb); 2048 unsigned int size = skb_headlen(skb);
@@ -2197,7 +2219,11 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2197 * 2219 *
2198 * Returns 0 if stop is not needed 2220 * Returns 0 if stop is not needed
2199 **/ 2221 **/
2222#ifdef I40E_FCOE
2223int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2224#else
2200static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 2225static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2226#endif
2201{ 2227{
2202 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) 2228 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2203 return 0; 2229 return 0;
@@ -2213,8 +2239,13 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2213 * there is not enough descriptors available in this ring since we need at least 2239 * there is not enough descriptors available in this ring since we need at least
2214 * one descriptor. 2240 * one descriptor.
2215 **/ 2241 **/
2242#ifdef I40E_FCOE
2243int i40e_xmit_descriptor_count(struct sk_buff *skb,
2244 struct i40e_ring *tx_ring)
2245#else
2216static int i40e_xmit_descriptor_count(struct sk_buff *skb, 2246static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2217 struct i40e_ring *tx_ring) 2247 struct i40e_ring *tx_ring)
2248#endif
2218{ 2249{
2219 unsigned int f; 2250 unsigned int f;
2220 int count = 0; 2251 int count = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index c1c356984b17..73f4fa425697 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -290,4 +290,13 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
290void i40e_free_tx_resources(struct i40e_ring *tx_ring); 290void i40e_free_tx_resources(struct i40e_ring *tx_ring);
291void i40e_free_rx_resources(struct i40e_ring *rx_ring); 291void i40e_free_rx_resources(struct i40e_ring *rx_ring);
292int i40e_napi_poll(struct napi_struct *napi, int budget); 292int i40e_napi_poll(struct napi_struct *napi, int budget);
293#ifdef I40E_FCOE
294void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
295 struct i40e_tx_buffer *first, u32 tx_flags,
296 const u8 hdr_len, u32 td_cmd, u32 td_offset);
297int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
298int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
299int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
300 struct i40e_ring *tx_ring, u32 *flags);
301#endif
293#endif /* _I40E_TXRX_H_ */ 302#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 8bb9049191cb..ce04d9093db6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1051,6 +1051,25 @@ struct i40e_eth_stats {
1051 u64 tx_errors; /* tepc */ 1051 u64 tx_errors; /* tepc */
1052}; 1052};
1053 1053
1054#ifdef I40E_FCOE
1055/* Statistics collected per function for FCoE */
1056struct i40e_fcoe_stats {
1057 u64 rx_fcoe_packets; /* fcoeprc */
1058 u64 rx_fcoe_dwords; /* focedwrc */
1059 u64 rx_fcoe_dropped; /* fcoerpdc */
1060 u64 tx_fcoe_packets; /* fcoeptc */
1061 u64 tx_fcoe_dwords; /* focedwtc */
1062 u64 fcoe_bad_fccrc; /* fcoecrc */
1063 u64 fcoe_last_error; /* fcoelast */
1064 u64 fcoe_ddp_count; /* fcoeddpc */
1065};
1066
1067/* offset to per function FCoE statistics block */
1068#define I40E_FCOE_VF_STAT_OFFSET 0
1069#define I40E_FCOE_PF_STAT_OFFSET 128
1070#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
1071
1072#endif
1054/* Statistics collected by the MAC */ 1073/* Statistics collected by the MAC */
1055struct i40e_hw_port_stats { 1074struct i40e_hw_port_stats {
1056 /* eth stats collected by the port */ 1075 /* eth stats collected by the port */
@@ -1131,6 +1150,125 @@ struct i40e_hw_port_stats {
1131 1150
1132#define I40E_SRRD_SRCTL_ATTEMPTS 100000 1151#define I40E_SRRD_SRCTL_ATTEMPTS 100000
1133 1152
1153#ifdef I40E_FCOE
1154/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
1155
1156enum i40E_fcoe_tx_ctx_desc_cmd_bits {
1157 I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
1158 I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
1159 I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
1160 I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
1161 I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
1162 I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
1163 I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
1164 I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
1165 I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
1166 I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
1167 I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
1168 I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
1169 I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
1170};
1171
1172/* FCoE DDP Context descriptor */
1173struct i40e_fcoe_ddp_context_desc {
1174 __le64 rsvd;
1175 __le64 type_cmd_foff_lsize;
1176};
1177
1178#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
1179#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
1180 I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
1181
1182#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
1183#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
1184 I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
1185
1186enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
1187 I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
1188 I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
1189 I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
1190 I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
1191 I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
1192 I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
1193};
1194
1195#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
1196#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
1197 I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
1198
1199#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
1200#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
1201 I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
1202
1203/* FCoE DDP/DWO Queue Context descriptor */
1204struct i40e_fcoe_queue_context_desc {
1205 __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
1206 __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
1207};
1208
1209#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
1210#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
1211 I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
1212
1213#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
1214#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
1215 I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
1216
1217#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
1218#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
1219 I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
1220
1221#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
1222#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
1223 I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
1224
1225enum i40e_fcoe_queue_ctx_desc_tph_bits {
1226 I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
1227 I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
1228};
1229
1230#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
1231#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
1232 I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
1233
1234/* FCoE DDP/DWO Filter Context descriptor */
1235struct i40e_fcoe_filter_context_desc {
1236 __le32 param;
1237 __le16 seqn;
1238
1239 /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
1240 __le16 rsvd_dmaindx;
1241
1242 /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
1243 __le64 flags_rsvd_lanq;
1244};
1245
1246#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
1247#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
1248 I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
1249
1250enum i40e_fcoe_filter_ctx_desc_flags_bits {
1251 I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
1252 I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
1253 I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
1254 I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
1255 I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
1256 I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
1257};
1258
1259#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
1260#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
1261 I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
1262
1263#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
1264#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
1265 I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
1266
1267#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
1268#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
1269 I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
1270
1271#endif /* I40E_FCOE */
1134enum i40e_switch_element_types { 1272enum i40e_switch_element_types {
1135 I40E_SWITCH_ELEMENT_TYPE_MAC = 1, 1273 I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
1136 I40E_SWITCH_ELEMENT_TYPE_PF = 2, 1274 I40E_SWITCH_ELEMENT_TYPE_PF = 2,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index cafda0cfc1a9..89672551dce9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -669,7 +669,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
669 */ 669 */
670 for (i = 0; i < 100; i++) { 670 for (i = 0; i < 100; i++) {
671 /* vf reset requires driver to first reset the 671 /* vf reset requires driver to first reset the
672 * vf & than poll the status register to make sure 672 * vf and then poll the status register to make sure
673 * that the requested op was completed 673 * that the requested op was completed
674 * successfully 674 * successfully
675 */ 675 */
@@ -1005,7 +1005,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1005{ 1005{
1006 struct i40e_pf *pf = vf->pf; 1006 struct i40e_pf *pf = vf->pf;
1007 struct i40e_hw *hw = &pf->hw; 1007 struct i40e_hw *hw = &pf->hw;
1008 int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1008 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1009 i40e_status aq_ret; 1009 i40e_status aq_ret;
1010 1010
1011 /* single place to detect unsuccessful return values */ 1011 /* single place to detect unsuccessful return values */
@@ -1025,7 +1025,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1025 vf->num_valid_msgs++; 1025 vf->num_valid_msgs++;
1026 } 1026 }
1027 1027
1028 aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval, 1028 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1029 msg, msglen, NULL); 1029 msg, msglen, NULL);
1030 if (aq_ret) { 1030 if (aq_ret) {
1031 dev_err(&pf->pdev->dev, 1031 dev_err(&pf->pdev->dev,
@@ -1163,8 +1163,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1163 (struct i40e_virtchnl_promisc_info *)msg; 1163 (struct i40e_virtchnl_promisc_info *)msg;
1164 struct i40e_pf *pf = vf->pf; 1164 struct i40e_pf *pf = vf->pf;
1165 struct i40e_hw *hw = &pf->hw; 1165 struct i40e_hw *hw = &pf->hw;
1166 struct i40e_vsi *vsi;
1166 bool allmulti = false; 1167 bool allmulti = false;
1167 bool promisc = false;
1168 i40e_status aq_ret; 1168 i40e_status aq_ret;
1169 1169
1170 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1170 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
@@ -1174,17 +1174,10 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1174 aq_ret = I40E_ERR_PARAM; 1174 aq_ret = I40E_ERR_PARAM;
1175 goto error_param; 1175 goto error_param;
1176 } 1176 }
1177 1177 vsi = pf->vsi[info->vsi_id];
1178 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1179 promisc = true;
1180 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1181 promisc, NULL);
1182 if (aq_ret)
1183 goto error_param;
1184
1185 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1178 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1186 allmulti = true; 1179 allmulti = true;
1187 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id, 1180 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1188 allmulti, NULL); 1181 allmulti, NULL);
1189 1182
1190error_param: 1183error_param:
@@ -1935,15 +1928,17 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1935{ 1928{
1936 struct i40e_hw *hw = &pf->hw; 1929 struct i40e_hw *hw = &pf->hw;
1937 struct i40e_vf *vf = pf->vf; 1930 struct i40e_vf *vf = pf->vf;
1931 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1938 int i; 1932 int i;
1939 1933
1940 for (i = 0; i < pf->num_alloc_vfs; i++) { 1934 for (i = 0; i < pf->num_alloc_vfs; i++) {
1941 /* Ignore return value on purpose - a given VF may fail, but 1935 /* Ignore return value on purpose - a given VF may fail, but
1942 * we need to keep going and send to all of them 1936 * we need to keep going and send to all of them
1943 */ 1937 */
1944 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, 1938 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1945 msg, msglen, NULL); 1939 msg, msglen, NULL);
1946 vf++; 1940 vf++;
1941 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1947 } 1942 }
1948} 1943}
1949 1944
@@ -1959,6 +1954,7 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
1959 struct i40e_hw *hw = &pf->hw; 1954 struct i40e_hw *hw = &pf->hw;
1960 struct i40e_vf *vf = pf->vf; 1955 struct i40e_vf *vf = pf->vf;
1961 struct i40e_link_status *ls = &pf->hw.phy.link_info; 1956 struct i40e_link_status *ls = &pf->hw.phy.link_info;
1957 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1962 int i; 1958 int i;
1963 1959
1964 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1960 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
@@ -1973,10 +1969,11 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
1973 ls->link_info & I40E_AQ_LINK_UP; 1969 ls->link_info & I40E_AQ_LINK_UP;
1974 pfe.event_data.link_event.link_speed = ls->link_speed; 1970 pfe.event_data.link_event.link_speed = ls->link_speed;
1975 } 1971 }
1976 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 1972 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
1977 0, (u8 *)&pfe, sizeof(pfe), 1973 0, (u8 *)&pfe, sizeof(pfe),
1978 NULL); 1974 NULL);
1979 vf++; 1975 vf++;
1976 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1980 } 1977 }
1981} 1978}
1982 1979
@@ -2005,10 +2002,11 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
2005void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 2002void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2006{ 2003{
2007 struct i40e_virtchnl_pf_event pfe; 2004 struct i40e_virtchnl_pf_event pfe;
2005 int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
2008 2006
2009 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 2007 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2010 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 2008 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2011 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 2009 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
2012 I40E_SUCCESS, (u8 *)&pfe, 2010 I40E_SUCCESS, (u8 *)&pfe,
2013 sizeof(struct i40e_virtchnl_pf_event), NULL); 2011 sizeof(struct i40e_virtchnl_pf_event), NULL);
2014} 2012}
@@ -2345,6 +2343,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2345 struct i40e_virtchnl_pf_event pfe; 2343 struct i40e_virtchnl_pf_event pfe;
2346 struct i40e_hw *hw = &pf->hw; 2344 struct i40e_hw *hw = &pf->hw;
2347 struct i40e_vf *vf; 2345 struct i40e_vf *vf;
2346 int abs_vf_id;
2348 int ret = 0; 2347 int ret = 0;
2349 2348
2350 /* validate the request */ 2349 /* validate the request */
@@ -2355,6 +2354,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2355 } 2354 }
2356 2355
2357 vf = &pf->vf[vf_id]; 2356 vf = &pf->vf[vf_id];
2357 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
2358 2358
2359 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 2359 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2360 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 2360 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -2384,7 +2384,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2384 goto error_out; 2384 goto error_out;
2385 } 2385 }
2386 /* Notify the VF of its new link state */ 2386 /* Notify the VF of its new link state */
2387 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, 2387 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
2388 0, (u8 *)&pfe, sizeof(pfe), NULL); 2388 0, (u8 *)&pfe, sizeof(pfe), NULL);
2389 2389
2390error_out: 2390error_out:
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index e70e4cdb0eb2..efee6b290c0f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -193,7 +193,7 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
193} 193}
194 194
195/** 195/**
196 * i40evf_get_drvinto - Get driver info 196 * i40evf_get_drvinfo - Get driver info
197 * @netdev: network interface device structure 197 * @netdev: network interface device structure
198 * @drvinfo: ethool driver info structure 198 * @drvinfo: ethool driver info structure
199 * 199 *
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index a53e81bb0960..ab15f4d07e41 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -527,7 +527,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
527 struct net_device *netdev = adapter->netdev; 527 struct net_device *netdev = adapter->netdev;
528 int err; 528 int err;
529 529
530 sprintf(adapter->misc_vector_name, "i40evf:mbx"); 530 snprintf(adapter->misc_vector_name,
531 sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx");
531 err = request_irq(adapter->msix_entries[0].vector, 532 err = request_irq(adapter->msix_entries[0].vector,
532 &i40evf_msix_aq, 0, 533 &i40evf_msix_aq, 0,
533 adapter->misc_vector_name, netdev); 534 adapter->misc_vector_name, netdev);
@@ -1297,12 +1298,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
1297 struct i40evf_adapter, 1298 struct i40evf_adapter,
1298 watchdog_task); 1299 watchdog_task);
1299 struct i40e_hw *hw = &adapter->hw; 1300 struct i40e_hw *hw = &adapter->hw;
1301 uint32_t rstat_val;
1300 1302
1301 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1303 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1302 goto restart_watchdog; 1304 goto restart_watchdog;
1303 1305
1304 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { 1306 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1305 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) { 1307 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1308 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1309 if ((rstat_val == I40E_VFR_VFACTIVE) ||
1310 (rstat_val == I40E_VFR_COMPLETED)) {
1306 /* A chance for redemption! */ 1311 /* A chance for redemption! */
1307 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); 1312 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1308 adapter->state = __I40EVF_STARTUP; 1313 adapter->state = __I40EVF_STARTUP;
@@ -1328,8 +1333,11 @@ static void i40evf_watchdog_task(struct work_struct *work)
1328 goto watchdog_done; 1333 goto watchdog_done;
1329 1334
1330 /* check for reset */ 1335 /* check for reset */
1336 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1337 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1331 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && 1338 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
1332 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { 1339 (rstat_val != I40E_VFR_VFACTIVE) &&
1340 (rstat_val != I40E_VFR_COMPLETED)) {
1333 adapter->state = __I40EVF_RESETTING; 1341 adapter->state = __I40EVF_RESETTING;
1334 adapter->flags |= I40EVF_FLAG_RESET_PENDING; 1342 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1335 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1343 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
@@ -1395,6 +1403,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
1395watchdog_done: 1403watchdog_done:
1396 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1404 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1397restart_watchdog: 1405restart_watchdog:
1406 if (adapter->state == __I40EVF_REMOVE)
1407 return;
1398 if (adapter->aq_required) 1408 if (adapter->aq_required)
1399 mod_timer(&adapter->watchdog_timer, 1409 mod_timer(&adapter->watchdog_timer,
1400 jiffies + msecs_to_jiffies(20)); 1410 jiffies + msecs_to_jiffies(20));
@@ -1495,7 +1505,8 @@ static void i40evf_reset_task(struct work_struct *work)
1495 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1505 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1496 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1506 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1497 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1507 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1498 if (rstat_val != I40E_VFR_VFACTIVE) 1508 if ((rstat_val != I40E_VFR_VFACTIVE) &&
1509 (rstat_val != I40E_VFR_COMPLETED))
1499 break; 1510 break;
1500 else 1511 else
1501 msleep(I40EVF_RESET_WAIT_MS); 1512 msleep(I40EVF_RESET_WAIT_MS);
@@ -1509,12 +1520,16 @@ static void i40evf_reset_task(struct work_struct *work)
1509 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1520 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1510 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1521 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1511 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1522 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1512 if (rstat_val == I40E_VFR_VFACTIVE) 1523 if ((rstat_val == I40E_VFR_VFACTIVE) ||
1524 (rstat_val == I40E_VFR_COMPLETED))
1513 break; 1525 break;
1514 else 1526 else
1515 msleep(I40EVF_RESET_WAIT_MS); 1527 msleep(I40EVF_RESET_WAIT_MS);
1516 } 1528 }
1517 if (i == I40EVF_RESET_WAIT_COUNT) { 1529 if (i == I40EVF_RESET_WAIT_COUNT) {
1530 struct i40evf_mac_filter *f, *ftmp;
1531 struct i40evf_vlan_filter *fv, *fvtmp;
1532
1518 /* reset never finished */ 1533 /* reset never finished */
1519 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 1534 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1520 rstat_val); 1535 rstat_val);
@@ -1527,9 +1542,23 @@ static void i40evf_reset_task(struct work_struct *work)
1527 i40evf_free_all_tx_resources(adapter); 1542 i40evf_free_all_tx_resources(adapter);
1528 i40evf_free_all_rx_resources(adapter); 1543 i40evf_free_all_rx_resources(adapter);
1529 } 1544 }
1545
1546 /* Delete all of the filters, both MAC and VLAN. */
1547 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1548 list) {
1549 list_del(&f->list);
1550 kfree(f);
1551 }
1552 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list,
1553 list) {
1554 list_del(&fv->list);
1555 kfree(fv);
1556 }
1557
1530 i40evf_free_misc_irq(adapter); 1558 i40evf_free_misc_irq(adapter);
1531 i40evf_reset_interrupt_capability(adapter); 1559 i40evf_reset_interrupt_capability(adapter);
1532 i40evf_free_queues(adapter); 1560 i40evf_free_queues(adapter);
1561 i40evf_free_q_vectors(adapter);
1533 kfree(adapter->vf_res); 1562 kfree(adapter->vf_res);
1534 i40evf_shutdown_adminq(hw); 1563 i40evf_shutdown_adminq(hw);
1535 adapter->netdev->flags &= ~IFF_UP; 1564 adapter->netdev->flags &= ~IFF_UP;
@@ -1946,8 +1975,10 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
1946 int i; 1975 int i;
1947 1976
1948 for (i = 0; i < 100; i++) { 1977 for (i = 0; i < 100; i++) {
1949 rstat = rd32(hw, I40E_VFGEN_RSTAT); 1978 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
1950 if (rstat == I40E_VFR_VFACTIVE) 1979 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1980 if ((rstat == I40E_VFR_VFACTIVE) ||
1981 (rstat == I40E_VFR_COMPLETED))
1951 return 0; 1982 return 0;
1952 udelay(10); 1983 udelay(10);
1953 } 1984 }
@@ -2106,8 +2137,6 @@ static void i40evf_init_task(struct work_struct *work)
2106 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 2137 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2107 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 2138 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2108 2139
2109 INIT_LIST_HEAD(&adapter->mac_filter_list);
2110 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2111 f = kzalloc(sizeof(*f), GFP_ATOMIC); 2140 f = kzalloc(sizeof(*f), GFP_ATOMIC);
2112 if (NULL == f) 2141 if (NULL == f)
2113 goto err_sw_init; 2142 goto err_sw_init;
@@ -2289,6 +2318,9 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2289 hw->bus.device = PCI_SLOT(pdev->devfn); 2318 hw->bus.device = PCI_SLOT(pdev->devfn);
2290 hw->bus.func = PCI_FUNC(pdev->devfn); 2319 hw->bus.func = PCI_FUNC(pdev->devfn);
2291 2320
2321 INIT_LIST_HEAD(&adapter->mac_filter_list);
2322 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2323
2292 INIT_WORK(&adapter->reset_task, i40evf_reset_task); 2324 INIT_WORK(&adapter->reset_task, i40evf_reset_task);
2293 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); 2325 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
2294 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); 2326 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
@@ -2400,6 +2432,7 @@ static void i40evf_remove(struct pci_dev *pdev)
2400{ 2432{
2401 struct net_device *netdev = pci_get_drvdata(pdev); 2433 struct net_device *netdev = pci_get_drvdata(pdev);
2402 struct i40evf_adapter *adapter = netdev_priv(netdev); 2434 struct i40evf_adapter *adapter = netdev_priv(netdev);
2435 struct i40evf_mac_filter *f, *ftmp;
2403 struct i40e_hw *hw = &adapter->hw; 2436 struct i40e_hw *hw = &adapter->hw;
2404 2437
2405 cancel_delayed_work_sync(&adapter->init_task); 2438 cancel_delayed_work_sync(&adapter->init_task);
@@ -2415,6 +2448,7 @@ static void i40evf_remove(struct pci_dev *pdev)
2415 i40evf_misc_irq_disable(adapter); 2448 i40evf_misc_irq_disable(adapter);
2416 i40evf_free_misc_irq(adapter); 2449 i40evf_free_misc_irq(adapter);
2417 i40evf_reset_interrupt_capability(adapter); 2450 i40evf_reset_interrupt_capability(adapter);
2451 i40evf_free_q_vectors(adapter);
2418 } 2452 }
2419 2453
2420 if (adapter->watchdog_timer.function) 2454 if (adapter->watchdog_timer.function)
@@ -2430,6 +2464,13 @@ static void i40evf_remove(struct pci_dev *pdev)
2430 2464
2431 i40evf_free_queues(adapter); 2465 i40evf_free_queues(adapter);
2432 kfree(adapter->vf_res); 2466 kfree(adapter->vf_res);
2467 /* If we got removed before an up/down sequence, we've got a filter
2468 * hanging out there that we need to get rid of.
2469 */
2470 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2471 list_del(&f->list);
2472 kfree(f);
2473 }
2433 2474
2434 free_netdev(netdev); 2475 free_netdev(netdev);
2435 2476