aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-11-21 15:23:02 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-21 15:23:02 -0500
commitcd91a88b9983802d65d48aaa8a38161c85c74203 (patch)
tree3699c403cb0aa8b850c885dade6d32a2e2d2fb9f
parent7d7a10792d729e7a0e3627b1b85b9312c4785542 (diff)
parent5d1ff1061c26f6e115784bad62767ca47d67f47f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2014-11-20 This series contains updates to ixgbevf, i40e and i40evf. Emil updates ixgbevf with much of the work that Alex Duyck did while at Intel. First updates the driver to clear the status bits on allocation instead of in the cleanup routine, this way we can leave the recieve descriptor rings as a read only memory block until we actually have buffers to give back to the hardware. Clean up ixgbevf_clean_rx_irq() by creating ixgbevf_process_skb_field() to merge several similar operations into this new function. Cleanup temporary variables within the receive hot-path and reducing the scope of variables that do not need to exist outside the main loop. Save on stack space by just storing our updated values back in next_to_clean instead of using a stack variable, which also collapses the size the function. Improve performace on IOMMU enabled systems and reduce cache misses by changing the basic receive patch for ixgbevf so that instead of receiving the data into an skb, it is received into a double buffered page. Add netpoll support by creating ixgbevf_netpoll(), which is a callback for .ndo_poll_controller to allow for the VF interface to be used with netconsole. Mitch provides several cleanups and trivial fixes for i40e and i40evf. First is a fix the overloading of the msg_size field in the arq_event_info struct by splitting the field into two and renaming to indicate the actual function of each field. Updates code comments to match the actual function. Cleanup several checkpatch.pl warnings by adding or removing blank lines, aligning function parameters, and correcting over-long lines (which makes the code more readable). Shannon provides a patch for i40e to write the extra bits that will turn off the ITR wait for the interrupt, since we want the SW INT to go off as soon as possible. v2: updated patch 07 based on feedback from Alex Duyck by - adding pfmemalloc check to a new function for reusable page - moved atomic_inc outside of #if/else in ixgbevf_add_rx_frag() - reverted the removal of the API check in ixgbevf_change_mtu() ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c71
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c720
12 files changed, 595 insertions, 314 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index f7f6206368df..5bb4914bda56 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -980,10 +980,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
980 980
981 e->desc = *desc; 981 e->desc = *desc;
982 datalen = le16_to_cpu(desc->datalen); 982 datalen = le16_to_cpu(desc->datalen);
983 e->msg_size = min(datalen, e->msg_size); 983 e->msg_len = min(datalen, e->buf_len);
984 if (e->msg_buf != NULL && (e->msg_size != 0)) 984 if (e->msg_buf != NULL && (e->msg_len != 0))
985 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, 985 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
986 e->msg_size); 986 e->msg_len);
987 987
988 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 988 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
989 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 989 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index df0bd09ed5d8..003a227b8515 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -76,7 +76,8 @@ struct i40e_asq_cmd_details {
76/* ARQ event information */ 76/* ARQ event information */
77struct i40e_arq_event_info { 77struct i40e_arq_event_info {
78 struct i40e_aq_desc desc; 78 struct i40e_aq_desc desc;
79 u16 msg_size; 79 u16 msg_len;
80 u16 buf_len;
80 u8 *msg_buf; 81 u8 *msg_buf;
81}; 82};
82 83
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index bb1698a7b3d1..b2402851a9bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1400,7 +1400,10 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
1400 netif_info(pf, hw, netdev, "interrupt test\n"); 1400 netif_info(pf, hw, netdev, "interrupt test\n");
1401 wr32(&pf->hw, I40E_PFINT_DYN_CTL0, 1401 wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
1402 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 1402 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
1403 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); 1403 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1404 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
1405 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
1406 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
1404 usleep_range(1000, 2000); 1407 usleep_range(1000, 2000);
1405 *data = (swc_old == pf->sw_int_count); 1408 *data = (swc_old == pf->sw_int_count);
1406 1409
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c998d82da0fc..3913329998bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5565,11 +5565,17 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
5565 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 5565 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5566 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 5566 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5567 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 5567 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5568 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); 5568 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5569 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5570 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5571 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
5569 } else { 5572 } else {
5570 u16 vec = vsi->base_vector - 1; 5573 u16 vec = vsi->base_vector - 1;
5571 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 5574 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5572 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); 5575 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5576 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5577 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5578 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
5573 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 5579 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5574 wr32(&vsi->back->hw, 5580 wr32(&vsi->back->hw,
5575 I40E_PFINT_DYN_CTLN(vec), val); 5581 I40E_PFINT_DYN_CTLN(vec), val);
@@ -5750,13 +5756,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5750 if (oldval != val) 5756 if (oldval != val)
5751 wr32(&pf->hw, pf->hw.aq.asq.len, val); 5757 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5752 5758
5753 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 5759 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5754 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 5760 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
5755 if (!event.msg_buf) 5761 if (!event.msg_buf)
5756 return; 5762 return;
5757 5763
5758 do { 5764 do {
5759 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
5760 ret = i40e_clean_arq_element(hw, &event, &pending); 5765 ret = i40e_clean_arq_element(hw, &event, &pending);
5761 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 5766 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
5762 break; 5767 break;
@@ -5777,7 +5782,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5777 le32_to_cpu(event.desc.cookie_high), 5782 le32_to_cpu(event.desc.cookie_high),
5778 le32_to_cpu(event.desc.cookie_low), 5783 le32_to_cpu(event.desc.cookie_low),
5779 event.msg_buf, 5784 event.msg_buf,
5780 event.msg_size); 5785 event.msg_len);
5781 break; 5786 break;
5782 case i40e_aqc_opc_lldp_update_mib: 5787 case i40e_aqc_opc_lldp_update_mib:
5783 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5788 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 500ca2162708..d7e446f3e7a4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -929,10 +929,10 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
929 929
930 e->desc = *desc; 930 e->desc = *desc;
931 datalen = le16_to_cpu(desc->datalen); 931 datalen = le16_to_cpu(desc->datalen);
932 e->msg_size = min(datalen, e->msg_size); 932 e->msg_len = min(datalen, e->buf_len);
933 if (e->msg_buf != NULL && (e->msg_size != 0)) 933 if (e->msg_buf != NULL && (e->msg_len != 0))
934 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, 934 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
935 e->msg_size); 935 e->msg_len);
936 936
937 if (i40e_is_nvm_update_op(&e->desc)) 937 if (i40e_is_nvm_update_op(&e->desc))
938 hw->aq.nvm_busy = false; 938 hw->aq.nvm_busy = false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index f40cfac4b022..0d58378be740 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -76,7 +76,8 @@ struct i40e_asq_cmd_details {
76/* ARQ event information */ 76/* ARQ event information */
77struct i40e_arq_event_info { 77struct i40e_arq_event_info {
78 struct i40e_aq_desc desc; 78 struct i40e_aq_desc desc;
79 u16 msg_size; 79 u16 msg_len;
80 u16 buf_len;
80 u8 *msg_buf; 81 u8 *msg_buf;
81}; 82};
82 83
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 1113f8a2d3b6..981224743c73 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -244,7 +244,7 @@ struct i40evf_adapter {
244 struct i40e_hw hw; /* defined in i40e_type.h */ 244 struct i40e_hw hw; /* defined in i40e_type.h */
245 245
246 enum i40evf_state_t state; 246 enum i40evf_state_t state;
247 volatile unsigned long crit_section; 247 unsigned long crit_section;
248 248
249 struct work_struct watchdog_task; 249 struct work_struct watchdog_task;
250 bool netdev_registered; 250 bool netdev_registered;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 876411c39ee0..69a269b23be6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -58,7 +58,7 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
58 58
59#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) 59#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
60#define I40EVF_QUEUE_STATS_LEN(_dev) \ 60#define I40EVF_QUEUE_STATS_LEN(_dev) \
61 (((struct i40evf_adapter *) \ 61 (((struct i40evf_adapter *)\
62 netdev_priv(_dev))->num_active_queues \ 62 netdev_priv(_dev))->num_active_queues \
63 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) 63 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
64#define I40EVF_STATS_LEN(_dev) \ 64#define I40EVF_STATS_LEN(_dev) \
@@ -175,6 +175,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
175static u32 i40evf_get_msglevel(struct net_device *netdev) 175static u32 i40evf_get_msglevel(struct net_device *netdev)
176{ 176{
177 struct i40evf_adapter *adapter = netdev_priv(netdev); 177 struct i40evf_adapter *adapter = netdev_priv(netdev);
178
178 return adapter->msg_enable; 179 return adapter->msg_enable;
179} 180}
180 181
@@ -189,6 +190,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev)
189static void i40evf_set_msglevel(struct net_device *netdev, u32 data) 190static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
190{ 191{
191 struct i40evf_adapter *adapter = netdev_priv(netdev); 192 struct i40evf_adapter *adapter = netdev_priv(netdev);
193
192 adapter->msg_enable = data; 194 adapter->msg_enable = data;
193} 195}
194 196
@@ -219,7 +221,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
219 * but the number of rings is not reported. 221 * but the number of rings is not reported.
220 **/ 222 **/
221static void i40evf_get_ringparam(struct net_device *netdev, 223static void i40evf_get_ringparam(struct net_device *netdev,
222 struct ethtool_ringparam *ring) 224 struct ethtool_ringparam *ring)
223{ 225{
224 struct i40evf_adapter *adapter = netdev_priv(netdev); 226 struct i40evf_adapter *adapter = netdev_priv(netdev);
225 227
@@ -280,7 +282,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
280 * this functionality. 282 * this functionality.
281 **/ 283 **/
282static int i40evf_get_coalesce(struct net_device *netdev, 284static int i40evf_get_coalesce(struct net_device *netdev,
283 struct ethtool_coalesce *ec) 285 struct ethtool_coalesce *ec)
284{ 286{
285 struct i40evf_adapter *adapter = netdev_priv(netdev); 287 struct i40evf_adapter *adapter = netdev_priv(netdev);
286 struct i40e_vsi *vsi = &adapter->vsi; 288 struct i40e_vsi *vsi = &adapter->vsi;
@@ -308,7 +310,7 @@ static int i40evf_get_coalesce(struct net_device *netdev,
308 * Change current coalescing settings. 310 * Change current coalescing settings.
309 **/ 311 **/
310static int i40evf_set_coalesce(struct net_device *netdev, 312static int i40evf_set_coalesce(struct net_device *netdev,
311 struct ethtool_coalesce *ec) 313 struct ethtool_coalesce *ec)
312{ 314{
313 struct i40evf_adapter *adapter = netdev_priv(netdev); 315 struct i40evf_adapter *adapter = netdev_priv(netdev);
314 struct i40e_hw *hw = &adapter->hw; 316 struct i40e_hw *hw = &adapter->hw;
@@ -621,7 +623,7 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
621 * i40evf_get_rxfh - get the rx flow hash indirection table 623 * i40evf_get_rxfh - get the rx flow hash indirection table
622 * @netdev: network interface device structure 624 * @netdev: network interface device structure
623 * @indir: indirection table 625 * @indir: indirection table
624 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) 626 * @key: hash key
625 * 627 *
626 * Reads the indirection table directly from the hardware. Always returns 0. 628 * Reads the indirection table directly from the hardware. Always returns 0.
627 **/ 629 **/
@@ -646,7 +648,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
646 * i40evf_set_rxfh - set the rx flow hash indirection table 648 * i40evf_set_rxfh - set the rx flow hash indirection table
647 * @netdev: network interface device structure 649 * @netdev: network interface device structure
648 * @indir: indirection table 650 * @indir: indirection table
649 * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) 651 * @key: hash key
650 * 652 *
651 * Returns -EINVAL if the table specifies an inavlid queue id, otherwise 653 * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
652 * returns 0 after programming the table. 654 * returns 0 after programming the table.
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 489227891ffb..8e01009695da 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -185,6 +185,7 @@ static void i40evf_tx_timeout(struct net_device *netdev)
185static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) 185static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
186{ 186{
187 struct i40e_hw *hw = &adapter->hw; 187 struct i40e_hw *hw = &adapter->hw;
188
188 wr32(hw, I40E_VFINT_DYN_CTL01, 0); 189 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
189 190
190 /* read flush */ 191 /* read flush */
@@ -200,6 +201,7 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
200static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) 201static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
201{ 202{
202 struct i40e_hw *hw = &adapter->hw; 203 struct i40e_hw *hw = &adapter->hw;
204
203 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | 205 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
204 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); 206 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
205 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); 207 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
@@ -226,7 +228,6 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
226 } 228 }
227 /* read flush */ 229 /* read flush */
228 rd32(hw, I40E_VFGEN_RSTAT); 230 rd32(hw, I40E_VFGEN_RSTAT);
229
230} 231}
231 232
232/** 233/**
@@ -253,8 +254,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
253 * @adapter: board private structure 254 * @adapter: board private structure
254 * @mask: bitmap of vectors to trigger 255 * @mask: bitmap of vectors to trigger
255 **/ 256 **/
256static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, 257static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
257 u32 mask)
258{ 258{
259 struct i40e_hw *hw = &adapter->hw; 259 struct i40e_hw *hw = &adapter->hw;
260 int i; 260 int i;
@@ -551,6 +551,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
551{ 551{
552 int i; 552 int i;
553 int q_vectors; 553 int q_vectors;
554
554 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 555 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
555 556
556 for (i = 0; i < q_vectors; i++) { 557 for (i = 0; i < q_vectors; i++) {
@@ -584,6 +585,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
584{ 585{
585 struct i40e_hw *hw = &adapter->hw; 586 struct i40e_hw *hw = &adapter->hw;
586 int i; 587 int i;
588
587 for (i = 0; i < adapter->num_active_queues; i++) 589 for (i = 0; i < adapter->num_active_queues; i++)
588 adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); 590 adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
589} 591}
@@ -667,9 +669,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
667 struct i40evf_vlan_filter *f; 669 struct i40evf_vlan_filter *f;
668 670
669 f = i40evf_find_vlan(adapter, vlan); 671 f = i40evf_find_vlan(adapter, vlan);
670 if (NULL == f) { 672 if (!f) {
671 f = kzalloc(sizeof(*f), GFP_ATOMIC); 673 f = kzalloc(sizeof(*f), GFP_ATOMIC);
672 if (NULL == f) 674 if (!f)
673 return NULL; 675 return NULL;
674 676
675 f->vlan = vlan; 677 f->vlan = vlan;
@@ -705,7 +707,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
705 * @vid: VLAN tag 707 * @vid: VLAN tag
706 **/ 708 **/
707static int i40evf_vlan_rx_add_vid(struct net_device *netdev, 709static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
708 __always_unused __be16 proto, u16 vid) 710 __always_unused __be16 proto, u16 vid)
709{ 711{
710 struct i40evf_adapter *adapter = netdev_priv(netdev); 712 struct i40evf_adapter *adapter = netdev_priv(netdev);
711 713
@@ -720,7 +722,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
720 * @vid: VLAN tag 722 * @vid: VLAN tag
721 **/ 723 **/
722static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, 724static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
723 __always_unused __be16 proto, u16 vid) 725 __always_unused __be16 proto, u16 vid)
724{ 726{
725 struct i40evf_adapter *adapter = netdev_priv(netdev); 727 struct i40evf_adapter *adapter = netdev_priv(netdev);
726 728
@@ -772,9 +774,9 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
772 udelay(1); 774 udelay(1);
773 775
774 f = i40evf_find_filter(adapter, macaddr); 776 f = i40evf_find_filter(adapter, macaddr);
775 if (NULL == f) { 777 if (!f) {
776 f = kzalloc(sizeof(*f), GFP_ATOMIC); 778 f = kzalloc(sizeof(*f), GFP_ATOMIC);
777 if (NULL == f) { 779 if (!f) {
778 clear_bit(__I40EVF_IN_CRITICAL_TASK, 780 clear_bit(__I40EVF_IN_CRITICAL_TASK,
779 &adapter->crit_section); 781 &adapter->crit_section);
780 return NULL; 782 return NULL;
@@ -881,6 +883,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
881 883
882 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 884 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
883 struct napi_struct *napi; 885 struct napi_struct *napi;
886
884 q_vector = adapter->q_vector[q_idx]; 887 q_vector = adapter->q_vector[q_idx];
885 napi = &q_vector->napi; 888 napi = &q_vector->napi;
886 napi_enable(napi); 889 napi_enable(napi);
@@ -920,6 +923,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
920 923
921 for (i = 0; i < adapter->num_active_queues; i++) { 924 for (i = 0; i < adapter->num_active_queues; i++) {
922 struct i40e_ring *ring = adapter->rx_rings[i]; 925 struct i40e_ring *ring = adapter->rx_rings[i];
926
923 i40evf_alloc_rx_buffers(ring, ring->count); 927 i40evf_alloc_rx_buffers(ring, ring->count);
924 ring->next_to_use = ring->count - 1; 928 ring->next_to_use = ring->count - 1;
925 writel(ring->next_to_use, ring->tail); 929 writel(ring->next_to_use, ring->tail);
@@ -1088,7 +1092,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1088 struct i40e_ring *tx_ring; 1092 struct i40e_ring *tx_ring;
1089 struct i40e_ring *rx_ring; 1093 struct i40e_ring *rx_ring;
1090 1094
1091 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 1095 tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL);
1092 if (!tx_ring) 1096 if (!tx_ring)
1093 goto err_out; 1097 goto err_out;
1094 1098
@@ -1172,14 +1176,14 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1172 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1176 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1173 1177
1174 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1178 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1175 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 1179 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
1176 if (!q_vector) 1180 if (!q_vector)
1177 goto err_out; 1181 goto err_out;
1178 q_vector->adapter = adapter; 1182 q_vector->adapter = adapter;
1179 q_vector->vsi = &adapter->vsi; 1183 q_vector->vsi = &adapter->vsi;
1180 q_vector->v_idx = q_idx; 1184 q_vector->v_idx = q_idx;
1181 netif_napi_add(adapter->netdev, &q_vector->napi, 1185 netif_napi_add(adapter->netdev, &q_vector->napi,
1182 i40evf_napi_poll, NAPI_POLL_WEIGHT); 1186 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1183 adapter->q_vector[q_idx] = q_vector; 1187 adapter->q_vector[q_idx] = q_vector;
1184 } 1188 }
1185 1189
@@ -1265,8 +1269,8 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1265 } 1269 }
1266 1270
1267 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1271 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1268 (adapter->num_active_queues > 1) ? "Enabled" : 1272 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1269 "Disabled", adapter->num_active_queues); 1273 adapter->num_active_queues);
1270 1274
1271 return 0; 1275 return 0;
1272err_alloc_queues: 1276err_alloc_queues:
@@ -1284,6 +1288,7 @@ err_set_interrupt:
1284static void i40evf_watchdog_timer(unsigned long data) 1288static void i40evf_watchdog_timer(unsigned long data)
1285{ 1289{
1286 struct i40evf_adapter *adapter = (struct i40evf_adapter *)data; 1290 struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
1291
1287 schedule_work(&adapter->watchdog_task); 1292 schedule_work(&adapter->watchdog_task);
1288 /* timer will be rescheduled in watchdog task */ 1293 /* timer will be rescheduled in watchdog task */
1289} 1294}
@@ -1295,8 +1300,8 @@ static void i40evf_watchdog_timer(unsigned long data)
1295static void i40evf_watchdog_task(struct work_struct *work) 1300static void i40evf_watchdog_task(struct work_struct *work)
1296{ 1301{
1297 struct i40evf_adapter *adapter = container_of(work, 1302 struct i40evf_adapter *adapter = container_of(work,
1298 struct i40evf_adapter, 1303 struct i40evf_adapter,
1299 watchdog_task); 1304 watchdog_task);
1300 struct i40e_hw *hw = &adapter->hw; 1305 struct i40e_hw *hw = &adapter->hw;
1301 uint32_t rstat_val; 1306 uint32_t rstat_val;
1302 1307
@@ -1334,7 +1339,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
1334 1339
1335 /* check for reset */ 1340 /* check for reset */
1336 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1341 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1337 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1342 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1338 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && 1343 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
1339 (rstat_val != I40E_VFR_VFACTIVE) && 1344 (rstat_val != I40E_VFR_VFACTIVE) &&
1340 (rstat_val != I40E_VFR_COMPLETED)) { 1345 (rstat_val != I40E_VFR_COMPLETED)) {
@@ -1508,8 +1513,7 @@ static void i40evf_reset_task(struct work_struct *work)
1508 if ((rstat_val != I40E_VFR_VFACTIVE) && 1513 if ((rstat_val != I40E_VFR_VFACTIVE) &&
1509 (rstat_val != I40E_VFR_COMPLETED)) 1514 (rstat_val != I40E_VFR_COMPLETED))
1510 break; 1515 break;
1511 else 1516 msleep(I40EVF_RESET_WAIT_MS);
1512 msleep(I40EVF_RESET_WAIT_MS);
1513 } 1517 }
1514 if (i == I40EVF_RESET_WAIT_COUNT) { 1518 if (i == I40EVF_RESET_WAIT_COUNT) {
1515 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1519 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
@@ -1523,8 +1527,7 @@ static void i40evf_reset_task(struct work_struct *work)
1523 if ((rstat_val == I40E_VFR_VFACTIVE) || 1527 if ((rstat_val == I40E_VFR_VFACTIVE) ||
1524 (rstat_val == I40E_VFR_COMPLETED)) 1528 (rstat_val == I40E_VFR_COMPLETED))
1525 break; 1529 break;
1526 else 1530 msleep(I40EVF_RESET_WAIT_MS);
1527 msleep(I40EVF_RESET_WAIT_MS);
1528 } 1531 }
1529 if (i == I40EVF_RESET_WAIT_COUNT) { 1532 if (i == I40EVF_RESET_WAIT_COUNT) {
1530 struct i40evf_mac_filter *f, *ftmp; 1533 struct i40evf_mac_filter *f, *ftmp;
@@ -1575,12 +1578,12 @@ continue_reset:
1575 /* kill and reinit the admin queue */ 1578 /* kill and reinit the admin queue */
1576 if (i40evf_shutdown_adminq(hw)) 1579 if (i40evf_shutdown_adminq(hw))
1577 dev_warn(&adapter->pdev->dev, 1580 dev_warn(&adapter->pdev->dev,
1578 "%s: Failed to destroy the Admin Queue resources\n", 1581 "%s: Failed to destroy the Admin Queue resources\n",
1579 __func__); 1582 __func__);
1580 err = i40evf_init_adminq(hw); 1583 err = i40evf_init_adminq(hw);
1581 if (err) 1584 if (err)
1582 dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", 1585 dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
1583 __func__, err); 1586 __func__, err);
1584 1587
1585 adapter->aq_pending = 0; 1588 adapter->aq_pending = 0;
1586 adapter->aq_required = 0; 1589 adapter->aq_required = 0;
@@ -1632,8 +1635,8 @@ static void i40evf_adminq_task(struct work_struct *work)
1632 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) 1635 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1633 return; 1636 return;
1634 1637
1635 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 1638 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
1636 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 1639 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1637 if (!event.msg_buf) 1640 if (!event.msg_buf)
1638 return; 1641 return;
1639 1642
@@ -1645,11 +1648,9 @@ static void i40evf_adminq_task(struct work_struct *work)
1645 1648
1646 i40evf_virtchnl_completion(adapter, v_msg->v_opcode, 1649 i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
1647 v_msg->v_retval, event.msg_buf, 1650 v_msg->v_retval, event.msg_buf,
1648 event.msg_size); 1651 event.msg_len);
1649 if (pending != 0) { 1652 if (pending != 0)
1650 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); 1653 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
1651 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
1652 }
1653 } while (pending); 1654 } while (pending);
1654 1655
1655 /* check for error indications */ 1656 /* check for error indications */
@@ -1706,7 +1707,6 @@ static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
1706 for (i = 0; i < adapter->num_active_queues; i++) 1707 for (i = 0; i < adapter->num_active_queues; i++)
1707 if (adapter->tx_rings[i]->desc) 1708 if (adapter->tx_rings[i]->desc)
1708 i40evf_free_tx_resources(adapter->tx_rings[i]); 1709 i40evf_free_tx_resources(adapter->tx_rings[i]);
1709
1710} 1710}
1711 1711
1712/** 1712/**
@@ -2020,7 +2020,7 @@ static void i40evf_init_task(struct work_struct *work)
2020 err = i40evf_check_reset_complete(hw); 2020 err = i40evf_check_reset_complete(hw);
2021 if (err) { 2021 if (err) {
2022 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 2022 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2023 err); 2023 err);
2024 goto err; 2024 goto err;
2025 } 2025 }
2026 hw->aq.num_arq_entries = I40EVF_AQ_LEN; 2026 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -2052,7 +2052,7 @@ static void i40evf_init_task(struct work_struct *work)
2052 err = i40evf_verify_api_ver(adapter); 2052 err = i40evf_verify_api_ver(adapter);
2053 if (err) { 2053 if (err) {
2054 dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n", 2054 dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
2055 err); 2055 err);
2056 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { 2056 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2057 dev_info(&pdev->dev, "Resending request\n"); 2057 dev_info(&pdev->dev, "Resending request\n");
2058 err = i40evf_send_api_ver(adapter); 2058 err = i40evf_send_api_ver(adapter);
@@ -2136,7 +2136,7 @@ static void i40evf_init_task(struct work_struct *work)
2136 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 2136 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2137 2137
2138 f = kzalloc(sizeof(*f), GFP_ATOMIC); 2138 f = kzalloc(sizeof(*f), GFP_ATOMIC);
2139 if (NULL == f) 2139 if (!f)
2140 goto err_sw_init; 2140 goto err_sw_init;
2141 2141
2142 ether_addr_copy(f->macaddr, adapter->hw.mac.addr); 2142 ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
@@ -2501,8 +2501,9 @@ static struct pci_driver i40evf_driver = {
2501static int __init i40evf_init_module(void) 2501static int __init i40evf_init_module(void)
2502{ 2502{
2503 int ret; 2503 int ret;
2504
2504 pr_info("i40evf: %s - version %s\n", i40evf_driver_string, 2505 pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
2505 i40evf_driver_version); 2506 i40evf_driver_version);
2506 2507
2507 pr_info("%s\n", i40evf_copyright); 2508 pr_info("%s\n", i40evf_copyright);
2508 2509
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 49bfdb5421c8..07c13b039181 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -92,8 +92,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
92 enum i40e_virtchnl_ops op; 92 enum i40e_virtchnl_ops op;
93 i40e_status err; 93 i40e_status err;
94 94
95 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 95 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
96 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 96 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
97 if (!event.msg_buf) { 97 if (!event.msg_buf) {
98 err = -ENOMEM; 98 err = -ENOMEM;
99 goto out; 99 goto out;
@@ -169,15 +169,14 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
169 169
170 len = sizeof(struct i40e_virtchnl_vf_resource) + 170 len = sizeof(struct i40e_virtchnl_vf_resource) +
171 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); 171 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
172 event.msg_size = len; 172 event.buf_len = len;
173 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 173 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
174 if (!event.msg_buf) { 174 if (!event.msg_buf) {
175 err = -ENOMEM; 175 err = -ENOMEM;
176 goto out; 176 goto out;
177 } 177 }
178 178
179 while (1) { 179 while (1) {
180 event.msg_size = len;
181 /* When the AQ is empty, i40evf_clean_arq_element will return 180 /* When the AQ is empty, i40evf_clean_arq_element will return
182 * nonzero and this loop will terminate. 181 * nonzero and this loop will terminate.
183 */ 182 */
@@ -191,7 +190,7 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
191 } 190 }
192 191
193 err = (i40e_status)le32_to_cpu(event.desc.cookie_low); 192 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
194 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); 193 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
195 194
196 i40e_vf_parse_hw_config(hw, adapter->vf_res); 195 i40e_vf_parse_hw_config(hw, adapter->vf_res);
197out_alloc: 196out_alloc:
@@ -396,7 +395,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
396 (count * sizeof(struct i40e_virtchnl_ether_addr)); 395 (count * sizeof(struct i40e_virtchnl_ether_addr));
397 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 396 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
398 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", 397 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
399 __func__); 398 __func__);
400 count = (I40EVF_MAX_AQ_BUF_SIZE - 399 count = (I40EVF_MAX_AQ_BUF_SIZE -
401 sizeof(struct i40e_virtchnl_ether_addr_list)) / 400 sizeof(struct i40e_virtchnl_ether_addr_list)) /
402 sizeof(struct i40e_virtchnl_ether_addr); 401 sizeof(struct i40e_virtchnl_ether_addr);
@@ -457,7 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
457 (count * sizeof(struct i40e_virtchnl_ether_addr)); 456 (count * sizeof(struct i40e_virtchnl_ether_addr));
458 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 457 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
459 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", 458 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
460 __func__); 459 __func__);
461 count = (I40EVF_MAX_AQ_BUF_SIZE - 460 count = (I40EVF_MAX_AQ_BUF_SIZE -
462 sizeof(struct i40e_virtchnl_ether_addr_list)) / 461 sizeof(struct i40e_virtchnl_ether_addr_list)) /
463 sizeof(struct i40e_virtchnl_ether_addr); 462 sizeof(struct i40e_virtchnl_ether_addr);
@@ -519,7 +518,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
519 (count * sizeof(u16)); 518 (count * sizeof(u16));
520 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 519 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
521 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", 520 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
522 __func__); 521 __func__);
523 count = (I40EVF_MAX_AQ_BUF_SIZE - 522 count = (I40EVF_MAX_AQ_BUF_SIZE -
524 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 523 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
525 sizeof(u16); 524 sizeof(u16);
@@ -579,7 +578,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
579 (count * sizeof(u16)); 578 (count * sizeof(u16));
580 if (len > I40EVF_MAX_AQ_BUF_SIZE) { 579 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
581 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", 580 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
582 __func__); 581 __func__);
583 count = (I40EVF_MAX_AQ_BUF_SIZE - 582 count = (I40EVF_MAX_AQ_BUF_SIZE -
584 sizeof(struct i40e_virtchnl_vlan_filter_list)) / 583 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
585 sizeof(u16); 584 sizeof(u16);
@@ -638,6 +637,7 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
638void i40evf_request_stats(struct i40evf_adapter *adapter) 637void i40evf_request_stats(struct i40evf_adapter *adapter)
639{ 638{
640 struct i40e_virtchnl_queue_select vqs; 639 struct i40e_virtchnl_queue_select vqs;
640
641 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 641 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
642 /* no error message, this isn't crucial */ 642 /* no error message, this isn't crucial */
643 return; 643 return;
@@ -712,7 +712,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
712 "%s: Unknown event %d from pf\n", 712 "%s: Unknown event %d from pf\n",
713 __func__, vpe->event); 713 __func__, vpe->event);
714 break; 714 break;
715
716 } 715 }
717 return; 716 return;
718 } 717 }
@@ -777,7 +776,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
777 break; 776 break;
778 default: 777 default:
779 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n", 778 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
780 __func__, v_opcode); 779 __func__, v_opcode);
781 break; 780 break;
782 } /* switch v_opcode */ 781 } /* switch v_opcode */
783 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 782 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index ba96cb5b886d..bb6726cbeb86 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,8 +58,9 @@ struct ixgbevf_tx_buffer {
58}; 58};
59 59
60struct ixgbevf_rx_buffer { 60struct ixgbevf_rx_buffer {
61 struct sk_buff *skb;
62 dma_addr_t dma; 61 dma_addr_t dma;
62 struct page *page;
63 unsigned int page_offset;
63}; 64};
64 65
65struct ixgbevf_stats { 66struct ixgbevf_stats {
@@ -79,7 +80,6 @@ struct ixgbevf_tx_queue_stats {
79}; 80};
80 81
81struct ixgbevf_rx_queue_stats { 82struct ixgbevf_rx_queue_stats {
82 u64 non_eop_descs;
83 u64 alloc_rx_page_failed; 83 u64 alloc_rx_page_failed;
84 u64 alloc_rx_buff_failed; 84 u64 alloc_rx_buff_failed;
85 u64 csum_err; 85 u64 csum_err;
@@ -92,9 +92,10 @@ struct ixgbevf_ring {
92 void *desc; /* descriptor ring memory */ 92 void *desc; /* descriptor ring memory */
93 dma_addr_t dma; /* phys. address of descriptor ring */ 93 dma_addr_t dma; /* phys. address of descriptor ring */
94 unsigned int size; /* length in bytes */ 94 unsigned int size; /* length in bytes */
95 unsigned int count; /* amount of descriptors */ 95 u16 count; /* amount of descriptors */
96 unsigned int next_to_use; 96 u16 next_to_use;
97 unsigned int next_to_clean; 97 u16 next_to_clean;
98 u16 next_to_alloc;
98 99
99 union { 100 union {
100 struct ixgbevf_tx_buffer *tx_buffer_info; 101 struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -110,12 +111,11 @@ struct ixgbevf_ring {
110 111
111 u64 hw_csum_rx_error; 112 u64 hw_csum_rx_error;
112 u8 __iomem *tail; 113 u8 __iomem *tail;
114 struct sk_buff *skb;
113 115
114 u16 reg_idx; /* holds the special value that gets the hardware register 116 u16 reg_idx; /* holds the special value that gets the hardware register
115 * offset associated with this ring, which is different 117 * offset associated with this ring, which is different
116 * for DCB and RSS modes */ 118 * for DCB and RSS modes */
117
118 u16 rx_buf_len;
119 int queue_index; /* needed for multiqueue queue management */ 119 int queue_index; /* needed for multiqueue queue management */
120}; 120};
121 121
@@ -134,12 +134,10 @@ struct ixgbevf_ring {
134 134
135/* Supported Rx Buffer Sizes */ 135/* Supported Rx Buffer Sizes */
136#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ 136#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
137#define IXGBEVF_RXBUFFER_2K 2048 137#define IXGBEVF_RXBUFFER_2048 2048
138#define IXGBEVF_RXBUFFER_4K 4096
139#define IXGBEVF_RXBUFFER_8K 8192
140#define IXGBEVF_RXBUFFER_10K 10240
141 138
142#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 139#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
140#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
143 141
144#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) 142#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
145 143
@@ -307,6 +305,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
307 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) 305 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
308#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG 306#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
309 307
308/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */
309static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
310 const u32 stat_err_bits)
311{
312 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
313}
314
310static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) 315static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
311{ 316{
312 u16 ntc = ring->next_to_clean; 317 u16 ntc = ring->next_to_clean;
@@ -339,8 +344,10 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
339 344
340/* board specific private data structure */ 345/* board specific private data structure */
341struct ixgbevf_adapter { 346struct ixgbevf_adapter {
342 struct timer_list watchdog_timer; 347 /* this field must be first, see ixgbevf_process_skb_fields */
343 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 348 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
349
350 struct timer_list watchdog_timer;
344 struct work_struct reset_task; 351 struct work_struct reset_task;
345 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 352 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
346 353
@@ -363,7 +370,6 @@ struct ixgbevf_adapter {
363 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ 370 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
364 u64 hw_csum_rx_error; 371 u64 hw_csum_rx_error;
365 u64 hw_rx_no_dma_resources; 372 u64 hw_rx_no_dma_resources;
366 u64 non_eop_descs;
367 int num_msix_vectors; 373 int num_msix_vectors;
368 u32 alloc_rx_page_failed; 374 u32 alloc_rx_page_failed;
369 u32 alloc_rx_buff_failed; 375 u32 alloc_rx_buff_failed;
@@ -373,7 +379,7 @@ struct ixgbevf_adapter {
373 */ 379 */
374 u32 flags; 380 u32 flags;
375#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) 381#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
376#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) 382
377#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) 383#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
378 384
379 struct msix_entry *msix_entries; 385 struct msix_entry *msix_entries;
@@ -423,11 +429,6 @@ enum ixbgevf_state_t {
423 __IXGBEVF_WORK_INIT, 429 __IXGBEVF_WORK_INIT,
424}; 430};
425 431
426struct ixgbevf_cb {
427 struct sk_buff *prev;
428};
429#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
430
431enum ixgbevf_boards { 432enum ixgbevf_boards {
432 board_82599_vf, 433 board_82599_vf,
433 board_X540_vf, 434 board_X540_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 030a219c85e3..755f71f07ae1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -143,21 +143,6 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
143 return value; 143 return value;
144} 144}
145 145
146static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
147 u32 val)
148{
149 rx_ring->next_to_use = val;
150
151 /*
152 * Force memory writes to complete before letting h/w
153 * know there are new descriptors to fetch. (Only
154 * applicable for weak-ordered memory model archs,
155 * such as IA-64).
156 */
157 wmb();
158 ixgbevf_write_tail(rx_ring, val);
159}
160
161/** 146/**
162 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 147 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
163 * @adapter: pointer to adapter struct 148 * @adapter: pointer to adapter struct
@@ -343,39 +328,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
343} 328}
344 329
345/** 330/**
346 * ixgbevf_receive_skb - Send a completed packet up the stack
347 * @q_vector: structure containing interrupt and ring information
348 * @skb: packet to send up
349 * @status: hardware indication of status of receive
350 * @rx_desc: rx descriptor
351 **/
352static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
353 struct sk_buff *skb, u8 status,
354 union ixgbe_adv_rx_desc *rx_desc)
355{
356 struct ixgbevf_adapter *adapter = q_vector->adapter;
357 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
358 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
359
360 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
361 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
362
363 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
364 napi_gro_receive(&q_vector->napi, skb);
365 else
366 netif_rx(skb);
367}
368
369/**
370 * ixgbevf_rx_skb - Helper function to determine proper Rx method 331 * ixgbevf_rx_skb - Helper function to determine proper Rx method
371 * @q_vector: structure containing interrupt and ring information 332 * @q_vector: structure containing interrupt and ring information
372 * @skb: packet to send up 333 * @skb: packet to send up
373 * @status: hardware indication of status of receive
374 * @rx_desc: rx descriptor
375 **/ 334 **/
376static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, 335static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
377 struct sk_buff *skb, u8 status, 336 struct sk_buff *skb)
378 union ixgbe_adv_rx_desc *rx_desc)
379{ 337{
380#ifdef CONFIG_NET_RX_BUSY_POLL 338#ifdef CONFIG_NET_RX_BUSY_POLL
381 skb_mark_napi_id(skb, &q_vector->napi); 339 skb_mark_napi_id(skb, &q_vector->napi);
@@ -387,17 +345,17 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
387 } 345 }
388#endif /* CONFIG_NET_RX_BUSY_POLL */ 346#endif /* CONFIG_NET_RX_BUSY_POLL */
389 347
390 ixgbevf_receive_skb(q_vector, skb, status, rx_desc); 348 napi_gro_receive(&q_vector->napi, skb);
391} 349}
392 350
393/** 351/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
394 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 352 * @ring: structure containig ring specific data
395 * @ring: pointer to Rx descriptor ring structure 353 * @rx_desc: current Rx descriptor being processed
396 * @status_err: hardware indication of status of receive
397 * @skb: skb currently being received and modified 354 * @skb: skb currently being received and modified
398 **/ 355 */
399static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 356static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
400 u32 status_err, struct sk_buff *skb) 357 union ixgbe_adv_rx_desc *rx_desc,
358 struct sk_buff *skb)
401{ 359{
402 skb_checksum_none_assert(skb); 360 skb_checksum_none_assert(skb);
403 361
@@ -406,16 +364,16 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
406 return; 364 return;
407 365
408 /* if IP and error */ 366 /* if IP and error */
409 if ((status_err & IXGBE_RXD_STAT_IPCS) && 367 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
410 (status_err & IXGBE_RXDADV_ERR_IPE)) { 368 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
411 ring->rx_stats.csum_err++; 369 ring->rx_stats.csum_err++;
412 return; 370 return;
413 } 371 }
414 372
415 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 373 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
416 return; 374 return;
417 375
418 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 376 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
419 ring->rx_stats.csum_err++; 377 ring->rx_stats.csum_err++;
420 return; 378 return;
421 } 379 }
@@ -424,52 +382,413 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
424 skb->ip_summed = CHECKSUM_UNNECESSARY; 382 skb->ip_summed = CHECKSUM_UNNECESSARY;
425} 383}
426 384
385/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
386 * @rx_ring: rx descriptor ring packet is being transacted on
387 * @rx_desc: pointer to the EOP Rx descriptor
388 * @skb: pointer to current skb being populated
389 *
390 * This function checks the ring, descriptor, and packet information in
391 * order to populate the checksum, VLAN, protocol, and other fields within
392 * the skb.
393 */
394static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
395 union ixgbe_adv_rx_desc *rx_desc,
396 struct sk_buff *skb)
397{
398 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
399
400 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
401 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
402 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
403
404 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
405 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
406 }
407
408 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
409}
410
411/**
412 * ixgbevf_is_non_eop - process handling of non-EOP buffers
413 * @rx_ring: Rx ring being processed
414 * @rx_desc: Rx descriptor for current buffer
415 * @skb: current socket buffer containing buffer in progress
416 *
417 * This function updates next to clean. If the buffer is an EOP buffer
418 * this function exits returning false, otherwise it will place the
419 * sk_buff in the next buffer to be chained and return true indicating
420 * that this is in fact a non-EOP buffer.
421 **/
422static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
423 union ixgbe_adv_rx_desc *rx_desc)
424{
425 u32 ntc = rx_ring->next_to_clean + 1;
426
427 /* fetch, update, and store next to clean */
428 ntc = (ntc < rx_ring->count) ? ntc : 0;
429 rx_ring->next_to_clean = ntc;
430
431 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
432
433 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
434 return false;
435
436 return true;
437}
438
439static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
440 struct ixgbevf_rx_buffer *bi)
441{
442 struct page *page = bi->page;
443 dma_addr_t dma = bi->dma;
444
445 /* since we are recycling buffers we should seldom need to alloc */
446 if (likely(page))
447 return true;
448
449 /* alloc new page for storage */
450 page = dev_alloc_page();
451 if (unlikely(!page)) {
452 rx_ring->rx_stats.alloc_rx_page_failed++;
453 return false;
454 }
455
456 /* map page for use */
457 dma = dma_map_page(rx_ring->dev, page, 0,
458 PAGE_SIZE, DMA_FROM_DEVICE);
459
460 /* if mapping failed free memory back to system since
461 * there isn't much point in holding memory we can't use
462 */
463 if (dma_mapping_error(rx_ring->dev, dma)) {
464 __free_page(page);
465
466 rx_ring->rx_stats.alloc_rx_buff_failed++;
467 return false;
468 }
469
470 bi->dma = dma;
471 bi->page = page;
472 bi->page_offset = 0;
473
474 return true;
475}
476
427/** 477/**
428 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 478 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
429 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on 479 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
480 * @cleaned_count: number of buffers to replace
430 **/ 481 **/
431static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, 482static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
432 int cleaned_count) 483 u16 cleaned_count)
433{ 484{
434 union ixgbe_adv_rx_desc *rx_desc; 485 union ixgbe_adv_rx_desc *rx_desc;
435 struct ixgbevf_rx_buffer *bi; 486 struct ixgbevf_rx_buffer *bi;
436 unsigned int i = rx_ring->next_to_use; 487 unsigned int i = rx_ring->next_to_use;
437 488
438 while (cleaned_count--) { 489 /* nothing to do or no valid netdev defined */
439 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 490 if (!cleaned_count || !rx_ring->netdev)
440 bi = &rx_ring->rx_buffer_info[i]; 491 return;
441 492
442 if (!bi->skb) { 493 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
443 struct sk_buff *skb; 494 bi = &rx_ring->rx_buffer_info[i];
495 i -= rx_ring->count;
444 496
445 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 497 do {
446 rx_ring->rx_buf_len); 498 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
447 if (!skb) 499 break;
448 goto no_buffers;
449 500
450 bi->skb = skb; 501 /* Refresh the desc even if pkt_addr didn't change
502 * because each write-back erases this info.
503 */
504 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
451 505
452 bi->dma = dma_map_single(rx_ring->dev, skb->data, 506 rx_desc++;
453 rx_ring->rx_buf_len, 507 bi++;
454 DMA_FROM_DEVICE); 508 i++;
455 if (dma_mapping_error(rx_ring->dev, bi->dma)) { 509 if (unlikely(!i)) {
456 dev_kfree_skb(skb); 510 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
457 bi->skb = NULL; 511 bi = rx_ring->rx_buffer_info;
458 dev_err(rx_ring->dev, "Rx DMA map failed\n"); 512 i -= rx_ring->count;
459 break;
460 }
461 } 513 }
462 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
463 514
464 i++; 515 /* clear the hdr_addr for the next_to_use descriptor */
465 if (i == rx_ring->count) 516 rx_desc->read.hdr_addr = 0;
466 i = 0; 517
518 cleaned_count--;
519 } while (cleaned_count);
520
521 i += rx_ring->count;
522
523 if (rx_ring->next_to_use != i) {
524 /* record the next descriptor to use */
525 rx_ring->next_to_use = i;
526
527 /* update next to alloc since we have filled the ring */
528 rx_ring->next_to_alloc = i;
529
530 /* Force memory writes to complete before letting h/w
531 * know there are new descriptors to fetch. (Only
532 * applicable for weak-ordered memory model archs,
533 * such as IA-64).
534 */
535 wmb();
536 ixgbevf_write_tail(rx_ring, i);
467 } 537 }
538}
468 539
469no_buffers: 540/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
470 rx_ring->rx_stats.alloc_rx_buff_failed++; 541 * @rx_ring: rx descriptor ring packet is being transacted on
471 if (rx_ring->next_to_use != i) 542 * @skb: pointer to current skb being adjusted
472 ixgbevf_release_rx_desc(rx_ring, i); 543 *
544 * This function is an ixgbevf specific version of __pskb_pull_tail. The
545 * main difference between this version and the original function is that
546 * this function can make several assumptions about the state of things
547 * that allow for significant optimizations versus the standard function.
548 * As a result we can do things like drop a frag and maintain an accurate
549 * truesize for the skb.
550 */
551static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
552 struct sk_buff *skb)
553{
554 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
555 unsigned char *va;
556 unsigned int pull_len;
557
558 /* it is valid to use page_address instead of kmap since we are
559 * working with pages allocated out of the lomem pool per
560 * alloc_page(GFP_ATOMIC)
561 */
562 va = skb_frag_address(frag);
563
564 /* we need the header to contain the greater of either ETH_HLEN or
565 * 60 bytes if the skb->len is less than 60 for skb_pad.
566 */
567 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
568
569 /* align pull length to size of long to optimize memcpy performance */
570 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
571
572 /* update all of the pointers */
573 skb_frag_size_sub(frag, pull_len);
574 frag->page_offset += pull_len;
575 skb->data_len -= pull_len;
576 skb->tail += pull_len;
577}
578
579/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
580 * @rx_ring: rx descriptor ring packet is being transacted on
581 * @rx_desc: pointer to the EOP Rx descriptor
582 * @skb: pointer to current skb being fixed
583 *
584 * Check for corrupted packet headers caused by senders on the local L2
585 * embedded NIC switch not setting up their Tx Descriptors right. These
586 * should be very rare.
587 *
588 * Also address the case where we are pulling data in on pages only
589 * and as such no data is present in the skb header.
590 *
591 * In addition if skb is not at least 60 bytes we need to pad it so that
592 * it is large enough to qualify as a valid Ethernet frame.
593 *
594 * Returns true if an error was encountered and skb was freed.
595 */
596static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
597 union ixgbe_adv_rx_desc *rx_desc,
598 struct sk_buff *skb)
599{
600 /* verify that the packet does not have any known errors */
601 if (unlikely(ixgbevf_test_staterr(rx_desc,
602 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
603 struct net_device *netdev = rx_ring->netdev;
604
605 if (!(netdev->features & NETIF_F_RXALL)) {
606 dev_kfree_skb_any(skb);
607 return true;
608 }
609 }
610
611 /* place header in linear portion of buffer */
612 if (skb_is_nonlinear(skb))
613 ixgbevf_pull_tail(rx_ring, skb);
614
615 /* if skb_pad returns an error the skb was freed */
616 if (unlikely(skb->len < 60)) {
617 int pad_len = 60 - skb->len;
618
619 if (skb_pad(skb, pad_len))
620 return true;
621 __skb_put(skb, pad_len);
622 }
623
624 return false;
625}
626
627/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
628 * @rx_ring: rx descriptor ring to store buffers on
629 * @old_buff: donor buffer to have page reused
630 *
631 * Synchronizes page for reuse by the adapter
632 */
633static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
634 struct ixgbevf_rx_buffer *old_buff)
635{
636 struct ixgbevf_rx_buffer *new_buff;
637 u16 nta = rx_ring->next_to_alloc;
638
639 new_buff = &rx_ring->rx_buffer_info[nta];
640
641 /* update, and store next to alloc */
642 nta++;
643 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
644
645 /* transfer page from old buffer to new buffer */
646 new_buff->page = old_buff->page;
647 new_buff->dma = old_buff->dma;
648 new_buff->page_offset = old_buff->page_offset;
649
650 /* sync the buffer for use by the device */
651 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
652 new_buff->page_offset,
653 IXGBEVF_RX_BUFSZ,
654 DMA_FROM_DEVICE);
655}
656
657static inline bool ixgbevf_page_is_reserved(struct page *page)
658{
659 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
660}
661
662/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
663 * @rx_ring: rx descriptor ring to transact packets on
664 * @rx_buffer: buffer containing page to add
665 * @rx_desc: descriptor containing length of buffer written by hardware
666 * @skb: sk_buff to place the data into
667 *
668 * This function will add the data contained in rx_buffer->page to the skb.
669 * This is done either through a direct copy if the data in the buffer is
670 * less than the skb header size, otherwise it will just attach the page as
671 * a frag to the skb.
672 *
673 * The function will then update the page offset if necessary and return
674 * true if the buffer can be reused by the adapter.
675 */
676static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
677 struct ixgbevf_rx_buffer *rx_buffer,
678 union ixgbe_adv_rx_desc *rx_desc,
679 struct sk_buff *skb)
680{
681 struct page *page = rx_buffer->page;
682 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
683#if (PAGE_SIZE < 8192)
684 unsigned int truesize = IXGBEVF_RX_BUFSZ;
685#else
686 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
687#endif
688
689 if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
690 unsigned char *va = page_address(page) + rx_buffer->page_offset;
691
692 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
693
694 /* page is not reserved, we can reuse buffer as is */
695 if (likely(!ixgbevf_page_is_reserved(page)))
696 return true;
697
698 /* this page cannot be reused so discard it */
699 put_page(page);
700 return false;
701 }
702
703 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
704 rx_buffer->page_offset, size, truesize);
705
706 /* avoid re-using remote pages */
707 if (unlikely(ixgbevf_page_is_reserved(page)))
708 return false;
709
710#if (PAGE_SIZE < 8192)
711 /* if we are only owner of page we can reuse it */
712 if (unlikely(page_count(page) != 1))
713 return false;
714
715 /* flip page offset to other buffer */
716 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
717
718#else
719 /* move offset up to the next cache line */
720 rx_buffer->page_offset += truesize;
721
722 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
723 return false;
724
725#endif
726 /* Even if we own the page, we are not allowed to use atomic_set()
727 * This would break get_page_unless_zero() users.
728 */
729 atomic_inc(&page->_count);
730
731 return true;
732}
733
734static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
735 union ixgbe_adv_rx_desc *rx_desc,
736 struct sk_buff *skb)
737{
738 struct ixgbevf_rx_buffer *rx_buffer;
739 struct page *page;
740
741 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
742 page = rx_buffer->page;
743 prefetchw(page);
744
745 if (likely(!skb)) {
746 void *page_addr = page_address(page) +
747 rx_buffer->page_offset;
748
749 /* prefetch first cache line of first page */
750 prefetch(page_addr);
751#if L1_CACHE_BYTES < 128
752 prefetch(page_addr + L1_CACHE_BYTES);
753#endif
754
755 /* allocate a skb to store the frags */
756 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
757 IXGBEVF_RX_HDR_SIZE);
758 if (unlikely(!skb)) {
759 rx_ring->rx_stats.alloc_rx_buff_failed++;
760 return NULL;
761 }
762
763 /* we will be copying header into skb->data in
764 * pskb_may_pull so it is in our interest to prefetch
765 * it now to avoid a possible cache miss
766 */
767 prefetchw(skb->data);
768 }
769
770 /* we are reusing so sync this buffer for CPU use */
771 dma_sync_single_range_for_cpu(rx_ring->dev,
772 rx_buffer->dma,
773 rx_buffer->page_offset,
774 IXGBEVF_RX_BUFSZ,
775 DMA_FROM_DEVICE);
776
777 /* pull page into skb */
778 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
779 /* hand second half of page back to the ring */
780 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
781 } else {
782 /* we are not reusing the buffer so unmap it */
783 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
784 PAGE_SIZE, DMA_FROM_DEVICE);
785 }
786
787 /* clear contents of buffer_info */
788 rx_buffer->dma = 0;
789 rx_buffer->page = NULL;
790
791 return skb;
473} 792}
474 793
475static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 794static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -484,78 +803,51 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
484 struct ixgbevf_ring *rx_ring, 803 struct ixgbevf_ring *rx_ring,
485 int budget) 804 int budget)
486{ 805{
487 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
488 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
489 struct sk_buff *skb;
490 unsigned int i;
491 u32 len, staterr;
492 int cleaned_count = 0;
493 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 806 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
807 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
808 struct sk_buff *skb = rx_ring->skb;
494 809
495 i = rx_ring->next_to_clean; 810 while (likely(total_rx_packets < budget)) {
496 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 811 union ixgbe_adv_rx_desc *rx_desc;
497 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
498 rx_buffer_info = &rx_ring->rx_buffer_info[i];
499 812
500 while (staterr & IXGBE_RXD_STAT_DD) { 813 /* return some buffers to hardware, one at a time is too slow */
501 if (!budget) 814 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
502 break; 815 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
503 budget--; 816 cleaned_count = 0;
817 }
504 818
505 rmb(); /* read descriptor and rx_buffer_info after status DD */ 819 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
506 len = le16_to_cpu(rx_desc->wb.upper.length);
507 skb = rx_buffer_info->skb;
508 prefetch(skb->data - NET_IP_ALIGN);
509 rx_buffer_info->skb = NULL;
510 820
511 if (rx_buffer_info->dma) { 821 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
512 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 822 break;
513 rx_ring->rx_buf_len,
514 DMA_FROM_DEVICE);
515 rx_buffer_info->dma = 0;
516 skb_put(skb, len);
517 }
518 823
519 i++; 824 /* This memory barrier is needed to keep us from reading
520 if (i == rx_ring->count) 825 * any other fields out of the rx_desc until we know the
521 i = 0; 826 * RXD_STAT_DD bit is set
827 */
828 rmb();
522 829
523 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 830 /* retrieve a buffer from the ring */
524 prefetch(next_rxd); 831 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
525 cleaned_count++;
526 832
527 next_buffer = &rx_ring->rx_buffer_info[i]; 833 /* exit if we failed to retrieve a buffer */
834 if (!skb)
835 break;
528 836
529 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 837 cleaned_count++;
530 skb->next = next_buffer->skb;
531 IXGBE_CB(skb->next)->prev = skb;
532 rx_ring->rx_stats.non_eop_descs++;
533 goto next_desc;
534 }
535 838
536 /* we should not be chaining buffers, if we did drop the skb */ 839 /* fetch next buffer in frame if non-eop */
537 if (IXGBE_CB(skb)->prev) { 840 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
538 do { 841 continue;
539 struct sk_buff *this = skb;
540 skb = IXGBE_CB(skb)->prev;
541 dev_kfree_skb(this);
542 } while (skb);
543 goto next_desc;
544 }
545 842
546 /* ERR_MASK will only have valid bits if EOP set */ 843 /* verify the packet layout is correct */
547 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 844 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
548 dev_kfree_skb_irq(skb); 845 skb = NULL;
549 goto next_desc; 846 continue;
550 } 847 }
551 848
552 ixgbevf_rx_checksum(rx_ring, staterr, skb);
553
554 /* probably a little skewed due to removing CRC */ 849 /* probably a little skewed due to removing CRC */
555 total_rx_bytes += skb->len; 850 total_rx_bytes += skb->len;
556 total_rx_packets++;
557
558 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
559 851
560 /* Workaround hardware that can't do proper VEPA multicast 852 /* Workaround hardware that can't do proper VEPA multicast
561 * source pruning. 853 * source pruning.
@@ -565,32 +857,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
565 ether_addr_equal(rx_ring->netdev->dev_addr, 857 ether_addr_equal(rx_ring->netdev->dev_addr,
566 eth_hdr(skb)->h_source)) { 858 eth_hdr(skb)->h_source)) {
567 dev_kfree_skb_irq(skb); 859 dev_kfree_skb_irq(skb);
568 goto next_desc; 860 continue;
569 } 861 }
570 862
571 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); 863 /* populate checksum, VLAN, and protocol */
864 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
572 865
573next_desc: 866 ixgbevf_rx_skb(q_vector, skb);
574 rx_desc->wb.upper.status_error = 0;
575 867
576 /* return some buffers to hardware, one at a time is too slow */ 868 /* reset skb pointer */
577 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 869 skb = NULL;
578 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
579 cleaned_count = 0;
580 }
581 870
582 /* use prefetched values */ 871 /* update budget accounting */
583 rx_desc = next_rxd; 872 total_rx_packets++;
584 rx_buffer_info = &rx_ring->rx_buffer_info[i];
585
586 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
587 } 873 }
588 874
589 rx_ring->next_to_clean = i; 875 /* place incomplete frames back on ring for completion */
590 cleaned_count = ixgbevf_desc_unused(rx_ring); 876 rx_ring->skb = skb;
591
592 if (cleaned_count)
593 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
594 877
595 u64_stats_update_begin(&rx_ring->syncp); 878 u64_stats_update_begin(&rx_ring->syncp);
596 rx_ring->stats.packets += total_rx_packets; 879 rx_ring->stats.packets += total_rx_packets;
@@ -634,12 +917,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
634 else 917 else
635 per_ring_budget = budget; 918 per_ring_budget = budget;
636 919
637 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
638 ixgbevf_for_each_ring(ring, q_vector->rx) 920 ixgbevf_for_each_ring(ring, q_vector->rx)
639 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, 921 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
640 per_ring_budget) 922 per_ring_budget)
641 < per_ring_budget); 923 < per_ring_budget);
642 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
643 924
644#ifdef CONFIG_NET_RX_BUSY_POLL 925#ifdef CONFIG_NET_RX_BUSY_POLL
645 ixgbevf_qv_unlock_napi(q_vector); 926 ixgbevf_qv_unlock_napi(q_vector);
@@ -1229,19 +1510,15 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1229 1510
1230static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1511static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1231{ 1512{
1232 struct ixgbevf_ring *rx_ring;
1233 struct ixgbe_hw *hw = &adapter->hw; 1513 struct ixgbe_hw *hw = &adapter->hw;
1234 u32 srrctl; 1514 u32 srrctl;
1235 1515
1236 rx_ring = adapter->rx_ring[index];
1237
1238 srrctl = IXGBE_SRRCTL_DROP_EN; 1516 srrctl = IXGBE_SRRCTL_DROP_EN;
1239 1517
1518 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1519 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1240 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1520 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1241 1521
1242 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1243 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1244
1245 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1522 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1246} 1523}
1247 1524
@@ -1260,40 +1537,6 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1260 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1537 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1261} 1538}
1262 1539
1263static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1264{
1265 struct ixgbe_hw *hw = &adapter->hw;
1266 struct net_device *netdev = adapter->netdev;
1267 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1268 int i;
1269 u16 rx_buf_len;
1270
1271 /* notify the PF of our intent to use this size of frame */
1272 ixgbevf_rlpml_set_vf(hw, max_frame);
1273
1274 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1275 max_frame += VLAN_HLEN;
1276
1277 /*
1278 * Allocate buffer sizes that fit well into 32K and
1279 * take into account max frame size of 9.5K
1280 */
1281 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1282 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1283 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1284 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1285 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1286 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1287 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1288 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1289 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1290 else
1291 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1292
1293 for (i = 0; i < adapter->num_rx_queues; i++)
1294 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1295}
1296
1297#define IXGBEVF_MAX_RX_DESC_POLL 10 1540#define IXGBEVF_MAX_RX_DESC_POLL 10
1298static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1541static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1299 struct ixgbevf_ring *ring) 1542 struct ixgbevf_ring *ring)
@@ -1371,12 +1614,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1371 /* reset ntu and ntc to place SW in sync with hardwdare */ 1614 /* reset ntu and ntc to place SW in sync with hardwdare */
1372 ring->next_to_clean = 0; 1615 ring->next_to_clean = 0;
1373 ring->next_to_use = 0; 1616 ring->next_to_use = 0;
1617 ring->next_to_alloc = 0;
1374 1618
1375 ixgbevf_configure_srrctl(adapter, reg_idx); 1619 ixgbevf_configure_srrctl(adapter, reg_idx);
1376 1620
1377 /* prevent DMA from exceeding buffer space available */ 1621 /* allow any size packet since we can handle overflow */
1378 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1622 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1379 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; 1623
1380 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1624 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1381 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1625 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1382 1626
@@ -1393,11 +1637,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1393static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1637static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1394{ 1638{
1395 int i; 1639 int i;
1640 struct ixgbe_hw *hw = &adapter->hw;
1641 struct net_device *netdev = adapter->netdev;
1396 1642
1397 ixgbevf_setup_psrtype(adapter); 1643 ixgbevf_setup_psrtype(adapter);
1398 1644
1399 /* set_rx_buffer_len must be called before ring initialization */ 1645 /* notify the PF of our intent to use this size of frame */
1400 ixgbevf_set_rx_buffer_len(adapter); 1646 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1401 1647
1402 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1648 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1403 * the Base and Length of the Rx Descriptor Ring */ 1649 * the Base and Length of the Rx Descriptor Ring */
@@ -1702,32 +1948,32 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
1702 **/ 1948 **/
1703static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 1949static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1704{ 1950{
1951 struct device *dev = rx_ring->dev;
1705 unsigned long size; 1952 unsigned long size;
1706 unsigned int i; 1953 unsigned int i;
1707 1954
1955 /* Free Rx ring sk_buff */
1956 if (rx_ring->skb) {
1957 dev_kfree_skb(rx_ring->skb);
1958 rx_ring->skb = NULL;
1959 }
1960
1961 /* ring already cleared, nothing to do */
1708 if (!rx_ring->rx_buffer_info) 1962 if (!rx_ring->rx_buffer_info)
1709 return; 1963 return;
1710 1964
1711 /* Free all the Rx ring sk_buffs */ 1965 /* Free all the Rx ring pages */
1712 for (i = 0; i < rx_ring->count; i++) { 1966 for (i = 0; i < rx_ring->count; i++) {
1713 struct ixgbevf_rx_buffer *rx_buffer_info; 1967 struct ixgbevf_rx_buffer *rx_buffer;
1714 1968
1715 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1969 rx_buffer = &rx_ring->rx_buffer_info[i];
1716 if (rx_buffer_info->dma) { 1970 if (rx_buffer->dma)
1717 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 1971 dma_unmap_page(dev, rx_buffer->dma,
1718 rx_ring->rx_buf_len, 1972 PAGE_SIZE, DMA_FROM_DEVICE);
1719 DMA_FROM_DEVICE); 1973 rx_buffer->dma = 0;
1720 rx_buffer_info->dma = 0; 1974 if (rx_buffer->page)
1721 } 1975 __free_page(rx_buffer->page);
1722 if (rx_buffer_info->skb) { 1976 rx_buffer->page = NULL;
1723 struct sk_buff *skb = rx_buffer_info->skb;
1724 rx_buffer_info->skb = NULL;
1725 do {
1726 struct sk_buff *this = skb;
1727 skb = IXGBE_CB(skb)->prev;
1728 dev_kfree_skb(this);
1729 } while (skb);
1730 }
1731 } 1977 }
1732 1978
1733 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1979 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -3274,6 +3520,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3274static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3520static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3275{ 3521{
3276 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3522 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3523 struct ixgbe_hw *hw = &adapter->hw;
3277 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3524 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3278 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3525 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3279 3526
@@ -3291,17 +3538,35 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3291 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3538 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3292 return -EINVAL; 3539 return -EINVAL;
3293 3540
3294 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3541 hw_dbg(hw, "changing MTU from %d to %d\n",
3295 netdev->mtu, new_mtu); 3542 netdev->mtu, new_mtu);
3296 /* must set new MTU before calling down or up */ 3543 /* must set new MTU before calling down or up */
3297 netdev->mtu = new_mtu; 3544 netdev->mtu = new_mtu;
3298 3545
3299 if (netif_running(netdev)) 3546 /* notify the PF of our intent to use this size of frame */
3300 ixgbevf_reinit_locked(adapter); 3547 ixgbevf_rlpml_set_vf(hw, max_frame);
3301 3548
3302 return 0; 3549 return 0;
3303} 3550}
3304 3551
3552#ifdef CONFIG_NET_POLL_CONTROLLER
3553/* Polling 'interrupt' - used by things like netconsole to send skbs
3554 * without having to re-enable interrupts. It's not called while
3555 * the interrupt routine is executing.
3556 */
3557static void ixgbevf_netpoll(struct net_device *netdev)
3558{
3559 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3560 int i;
3561
3562 /* if interface is down do nothing */
3563 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3564 return;
3565 for (i = 0; i < adapter->num_rx_queues; i++)
3566 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3567}
3568#endif /* CONFIG_NET_POLL_CONTROLLER */
3569
3305static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3570static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3306{ 3571{
3307 struct net_device *netdev = pci_get_drvdata(pdev); 3572 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3438,6 +3703,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
3438#ifdef CONFIG_NET_RX_BUSY_POLL 3703#ifdef CONFIG_NET_RX_BUSY_POLL
3439 .ndo_busy_poll = ixgbevf_busy_poll_recv, 3704 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3440#endif 3705#endif
3706#ifdef CONFIG_NET_POLL_CONTROLLER
3707 .ndo_poll_controller = ixgbevf_netpoll,
3708#endif
3441}; 3709};
3442 3710
3443static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3711static void ixgbevf_assign_netdev_ops(struct net_device *dev)