aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c50
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c37
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c64
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c131
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h45
23 files changed, 441 insertions, 209 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index f1e33f896439..b7a807b380d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -269,7 +269,8 @@ struct i40e_pf {
269 u16 msg_enable; 269 u16 msg_enable;
270 char misc_int_name[IFNAMSIZ + 9]; 270 char misc_int_name[IFNAMSIZ + 9];
271 u16 adminq_work_limit; /* num of admin receive queue desc to process */ 271 u16 adminq_work_limit; /* num of admin receive queue desc to process */
272 int service_timer_period; 272 unsigned long service_timer_period;
273 unsigned long service_timer_previous;
273 struct timer_list service_timer; 274 struct timer_list service_timer;
274 struct work_struct service_task; 275 struct work_struct service_task;
275 276
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 72f5d25a222f..f7f6206368df 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -853,7 +853,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
853 */ 853 */
854 if (!details->async && !details->postpone) { 854 if (!details->async && !details->postpone) {
855 u32 total_delay = 0; 855 u32 total_delay = 0;
856 u32 delay_len = 10;
857 856
858 do { 857 do {
859 /* AQ designers suggest use of head for better 858 /* AQ designers suggest use of head for better
@@ -861,9 +860,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
861 */ 860 */
862 if (i40e_asq_done(hw)) 861 if (i40e_asq_done(hw))
863 break; 862 break;
864 /* ugh! delay while spin_lock */ 863 usleep_range(1000, 2000);
865 udelay(delay_len); 864 total_delay++;
866 total_delay += delay_len;
867 } while (total_delay < hw->aq.asq_cmd_timeout); 865 } while (total_delay < hw->aq.asq_cmd_timeout);
868 } 866 }
869 867
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index ba38a89c79d6..df0bd09ed5d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -141,7 +141,7 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc)
141 141
142/* general information */ 142/* general information */
143#define I40E_AQ_LARGE_BUF 512 143#define I40E_AQ_LARGE_BUF 512
144#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ 144#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
145 145
146void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 146void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
147 u16 opcode); 147 u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b6e745f277cc..afad5aa5a12b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1575,11 +1575,9 @@ static int i40e_set_coalesce(struct net_device *netdev,
1575 } else if (ec->rx_coalesce_usecs == 0) { 1575 } else if (ec->rx_coalesce_usecs == 0) {
1576 vsi->rx_itr_setting = ec->rx_coalesce_usecs; 1576 vsi->rx_itr_setting = ec->rx_coalesce_usecs;
1577 if (ec->use_adaptive_rx_coalesce) 1577 if (ec->use_adaptive_rx_coalesce)
1578 netif_info(pf, drv, netdev, 1578 netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
1579 "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n");
1580 } else { 1579 } else {
1581 netif_info(pf, drv, netdev, 1580 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
1582 "Invalid value, Rx-usecs range is 0, 8-8160\n");
1583 return -EINVAL; 1581 return -EINVAL;
1584 } 1582 }
1585 1583
@@ -1589,11 +1587,10 @@ static int i40e_set_coalesce(struct net_device *netdev,
1589 } else if (ec->tx_coalesce_usecs == 0) { 1587 } else if (ec->tx_coalesce_usecs == 0) {
1590 vsi->tx_itr_setting = ec->tx_coalesce_usecs; 1588 vsi->tx_itr_setting = ec->tx_coalesce_usecs;
1591 if (ec->use_adaptive_tx_coalesce) 1589 if (ec->use_adaptive_tx_coalesce)
1592 netif_info(pf, drv, netdev, 1590 netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
1593 "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n");
1594 } else { 1591 } else {
1595 netif_info(pf, drv, netdev, 1592 netif_info(pf, drv, netdev,
1596 "Invalid value, Tx-usecs range is 0, 8-8160\n"); 1593 "Invalid value, tx-usecs range is 0-8160\n");
1597 return -EINVAL; 1594 return -EINVAL;
1598 } 1595 }
1599 1596
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1a98e2384b3b..de664631c807 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5449,7 +5449,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
5449} 5449}
5450 5450
5451/** 5451/**
5452 * i40e_watchdog_subtask - Check and bring link up 5452 * i40e_watchdog_subtask - periodic checks not using event driven response
5453 * @pf: board private structure 5453 * @pf: board private structure
5454 **/ 5454 **/
5455static void i40e_watchdog_subtask(struct i40e_pf *pf) 5455static void i40e_watchdog_subtask(struct i40e_pf *pf)
@@ -5461,6 +5461,15 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
5461 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5461 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5462 return; 5462 return;
5463 5463
5464 /* make sure we don't do these things too often */
5465 if (time_before(jiffies, (pf->service_timer_previous +
5466 pf->service_timer_period)))
5467 return;
5468 pf->service_timer_previous = jiffies;
5469
5470 i40e_check_hang_subtask(pf);
5471 i40e_link_event(pf);
5472
5464 /* Update the stats for active netdevs so the network stack 5473 /* Update the stats for active netdevs so the network stack
5465 * can look at updated numbers whenever it cares to 5474 * can look at updated numbers whenever it cares to
5466 */ 5475 */
@@ -6325,15 +6334,12 @@ static void i40e_service_task(struct work_struct *work)
6325 i40e_vc_process_vflr_event(pf); 6334 i40e_vc_process_vflr_event(pf);
6326 i40e_watchdog_subtask(pf); 6335 i40e_watchdog_subtask(pf);
6327 i40e_fdir_reinit_subtask(pf); 6336 i40e_fdir_reinit_subtask(pf);
6328 i40e_check_hang_subtask(pf);
6329 i40e_sync_filters_subtask(pf); 6337 i40e_sync_filters_subtask(pf);
6330#ifdef CONFIG_I40E_VXLAN 6338#ifdef CONFIG_I40E_VXLAN
6331 i40e_sync_vxlan_filters_subtask(pf); 6339 i40e_sync_vxlan_filters_subtask(pf);
6332#endif 6340#endif
6333 i40e_clean_adminq_subtask(pf); 6341 i40e_clean_adminq_subtask(pf);
6334 6342
6335 i40e_link_event(pf);
6336
6337 i40e_service_event_complete(pf); 6343 i40e_service_event_complete(pf);
6338 6344
6339 /* If the tasks have taken longer than one timer cycle or there 6345 /* If the tasks have taken longer than one timer cycle or there
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d7a625a6a14f..e60d3accb2e2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -30,10 +30,7 @@
30/* Interrupt Throttling and Rate Limiting Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
31 31
32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
34#define I40E_MAX_IRATE 0x03F
35#define I40E_MIN_IRATE 0x001
36#define I40E_IRATE_USEC_RESOLUTION 4
37#define I40E_ITR_100K 0x0005 34#define I40E_ITR_100K 0x0005
38#define I40E_ITR_20K 0x0019 35#define I40E_ITR_20K 0x0019
39#define I40E_ITR_8K 0x003E 36#define I40E_ITR_8K 0x003E
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f206be917842..500ca2162708 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -801,7 +801,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
801 */ 801 */
802 if (!details->async && !details->postpone) { 802 if (!details->async && !details->postpone) {
803 u32 total_delay = 0; 803 u32 total_delay = 0;
804 u32 delay_len = 10;
805 804
806 do { 805 do {
807 /* AQ designers suggest use of head for better 806 /* AQ designers suggest use of head for better
@@ -809,9 +808,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
809 */ 808 */
810 if (i40evf_asq_done(hw)) 809 if (i40evf_asq_done(hw))
811 break; 810 break;
812 /* ugh! delay while spin_lock */ 811 usleep_range(1000, 2000);
813 udelay(delay_len); 812 total_delay++;
814 total_delay += delay_len;
815 } while (total_delay < hw->aq.asq_cmd_timeout); 813 } while (total_delay < hw->aq.asq_cmd_timeout);
816 } 814 }
817 815
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 91a5c5bd80f3..f40cfac4b022 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -141,7 +141,7 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc)
141 141
142/* general information */ 142/* general information */
143#define I40E_AQ_LARGE_BUF 512 143#define I40E_AQ_LARGE_BUF 512
144#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ 144#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
145 145
146void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 146void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
147 u16 opcode); 147 u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index f6dcf9dd9290..c7f29626eada 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -30,10 +30,7 @@
30/* Interrupt Throttling and Rate Limiting Goodies */ 30/* Interrupt Throttling and Rate Limiting Goodies */
31 31
32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
33#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ 33#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
34#define I40E_MAX_IRATE 0x03F
35#define I40E_MIN_IRATE 0x001
36#define I40E_IRATE_USEC_RESOLUTION 4
37#define I40E_ITR_100K 0x0005 34#define I40E_ITR_100K 0x0005
38#define I40E_ITR_20K 0x0019 35#define I40E_ITR_20K 0x0019
39#define I40E_ITR_8K 0x003E 36#define I40E_ITR_8K 0x003E
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 30ef519d4b91..1113f8a2d3b6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -191,6 +191,7 @@ struct i40evf_adapter {
191 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 191 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
192 struct list_head vlan_filter_list; 192 struct list_head vlan_filter_list;
193 char misc_vector_name[IFNAMSIZ + 9]; 193 char misc_vector_name[IFNAMSIZ + 9];
194 int num_active_queues;
194 195
195 /* TX */ 196 /* TX */
196 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; 197 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index efee6b290c0f..876411c39ee0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -59,7 +59,7 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
59#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) 59#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
60#define I40EVF_QUEUE_STATS_LEN(_dev) \ 60#define I40EVF_QUEUE_STATS_LEN(_dev) \
61 (((struct i40evf_adapter *) \ 61 (((struct i40evf_adapter *) \
62 netdev_priv(_dev))->vsi_res->num_queue_pairs \ 62 netdev_priv(_dev))->num_active_queues \
63 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) 63 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
64#define I40EVF_STATS_LEN(_dev) \ 64#define I40EVF_STATS_LEN(_dev) \
65 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) 65 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
@@ -121,11 +121,11 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev,
121 p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; 121 p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
122 data[i] = *(u64 *)p; 122 data[i] = *(u64 *)p;
123 } 123 }
124 for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { 124 for (j = 0; j < adapter->num_active_queues; j++) {
125 data[i++] = adapter->tx_rings[j]->stats.packets; 125 data[i++] = adapter->tx_rings[j]->stats.packets;
126 data[i++] = adapter->tx_rings[j]->stats.bytes; 126 data[i++] = adapter->tx_rings[j]->stats.bytes;
127 } 127 }
128 for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { 128 for (j = 0; j < adapter->num_active_queues; j++) {
129 data[i++] = adapter->rx_rings[j]->stats.packets; 129 data[i++] = adapter->rx_rings[j]->stats.packets;
130 data[i++] = adapter->rx_rings[j]->stats.bytes; 130 data[i++] = adapter->rx_rings[j]->stats.bytes;
131 } 131 }
@@ -151,13 +151,13 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
151 ETH_GSTRING_LEN); 151 ETH_GSTRING_LEN);
152 p += ETH_GSTRING_LEN; 152 p += ETH_GSTRING_LEN;
153 } 153 }
154 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 154 for (i = 0; i < adapter->num_active_queues; i++) {
155 snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); 155 snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
156 p += ETH_GSTRING_LEN; 156 p += ETH_GSTRING_LEN;
157 snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); 157 snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
158 p += ETH_GSTRING_LEN; 158 p += ETH_GSTRING_LEN;
159 } 159 }
160 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 160 for (i = 0; i < adapter->num_active_queues; i++) {
161 snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); 161 snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
162 p += ETH_GSTRING_LEN; 162 p += ETH_GSTRING_LEN;
163 snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); 163 snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
@@ -430,7 +430,7 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
430 430
431 switch (cmd->cmd) { 431 switch (cmd->cmd) {
432 case ETHTOOL_GRXRINGS: 432 case ETHTOOL_GRXRINGS:
433 cmd->data = adapter->vsi_res->num_queue_pairs; 433 cmd->data = adapter->num_active_queues;
434 ret = 0; 434 ret = 0;
435 break; 435 break;
436 case ETHTOOL_GRXFH: 436 case ETHTOOL_GRXFH:
@@ -598,12 +598,12 @@ static void i40evf_get_channels(struct net_device *netdev,
598 struct i40evf_adapter *adapter = netdev_priv(netdev); 598 struct i40evf_adapter *adapter = netdev_priv(netdev);
599 599
600 /* Report maximum channels */ 600 /* Report maximum channels */
601 ch->max_combined = adapter->vsi_res->num_queue_pairs; 601 ch->max_combined = adapter->num_active_queues;
602 602
603 ch->max_other = NONQ_VECS; 603 ch->max_other = NONQ_VECS;
604 ch->other_count = NONQ_VECS; 604 ch->other_count = NONQ_VECS;
605 605
606 ch->combined_count = adapter->vsi_res->num_queue_pairs; 606 ch->combined_count = adapter->num_active_queues;
607} 607}
608 608
609/** 609/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index b2f01eb2f9e5..f0d07ad54198 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -397,8 +397,8 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
397 int q_vectors; 397 int q_vectors;
398 int v_start = 0; 398 int v_start = 0;
399 int rxr_idx = 0, txr_idx = 0; 399 int rxr_idx = 0, txr_idx = 0;
400 int rxr_remaining = adapter->vsi_res->num_queue_pairs; 400 int rxr_remaining = adapter->num_active_queues;
401 int txr_remaining = adapter->vsi_res->num_queue_pairs; 401 int txr_remaining = adapter->num_active_queues;
402 int i, j; 402 int i, j;
403 int rqpv, tqpv; 403 int rqpv, tqpv;
404 int err = 0; 404 int err = 0;
@@ -584,7 +584,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
584{ 584{
585 struct i40e_hw *hw = &adapter->hw; 585 struct i40e_hw *hw = &adapter->hw;
586 int i; 586 int i;
587 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) 587 for (i = 0; i < adapter->num_active_queues; i++)
588 adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); 588 adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
589} 589}
590 590
@@ -629,7 +629,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
629 rx_buf_len = ALIGN(max_frame, 1024); 629 rx_buf_len = ALIGN(max_frame, 1024);
630 } 630 }
631 631
632 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 632 for (i = 0; i < adapter->num_active_queues; i++) {
633 adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); 633 adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
634 adapter->rx_rings[i]->rx_buf_len = rx_buf_len; 634 adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
635 } 635 }
@@ -918,7 +918,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
918 i40evf_configure_rx(adapter); 918 i40evf_configure_rx(adapter);
919 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; 919 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
920 920
921 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 921 for (i = 0; i < adapter->num_active_queues; i++) {
922 struct i40e_ring *ring = adapter->rx_rings[i]; 922 struct i40e_ring *ring = adapter->rx_rings[i];
923 i40evf_alloc_rx_buffers(ring, ring->count); 923 i40evf_alloc_rx_buffers(ring, ring->count);
924 ring->next_to_use = ring->count - 1; 924 ring->next_to_use = ring->count - 1;
@@ -950,7 +950,7 @@ static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
950{ 950{
951 int i; 951 int i;
952 952
953 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) 953 for (i = 0; i < adapter->num_active_queues; i++)
954 i40evf_clean_rx_ring(adapter->rx_rings[i]); 954 i40evf_clean_rx_ring(adapter->rx_rings[i]);
955} 955}
956 956
@@ -962,7 +962,7 @@ static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
962{ 962{
963 int i; 963 int i;
964 964
965 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) 965 for (i = 0; i < adapter->num_active_queues; i++)
966 i40evf_clean_tx_ring(adapter->tx_rings[i]); 966 i40evf_clean_tx_ring(adapter->tx_rings[i]);
967} 967}
968 968
@@ -1064,7 +1064,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
1064 1064
1065 if (!adapter->vsi_res) 1065 if (!adapter->vsi_res)
1066 return; 1066 return;
1067 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1067 for (i = 0; i < adapter->num_active_queues; i++) {
1068 if (adapter->tx_rings[i]) 1068 if (adapter->tx_rings[i])
1069 kfree_rcu(adapter->tx_rings[i], rcu); 1069 kfree_rcu(adapter->tx_rings[i], rcu);
1070 adapter->tx_rings[i] = NULL; 1070 adapter->tx_rings[i] = NULL;
@@ -1084,7 +1084,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1084{ 1084{
1085 int i; 1085 int i;
1086 1086
1087 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1087 for (i = 0; i < adapter->num_active_queues; i++) {
1088 struct i40e_ring *tx_ring; 1088 struct i40e_ring *tx_ring;
1089 struct i40e_ring *rx_ring; 1089 struct i40e_ring *rx_ring;
1090 1090
@@ -1130,7 +1130,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1130 err = -EIO; 1130 err = -EIO;
1131 goto out; 1131 goto out;
1132 } 1132 }
1133 pairs = adapter->vsi_res->num_queue_pairs; 1133 pairs = adapter->num_active_queues;
1134 1134
1135 /* It's easy to be greedy for MSI-X vectors, but it really 1135 /* It's easy to be greedy for MSI-X vectors, but it really
1136 * doesn't do us much good if we have a lot more vectors 1136 * doesn't do us much good if we have a lot more vectors
@@ -1210,7 +1210,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1210 int napi_vectors; 1210 int napi_vectors;
1211 1211
1212 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1212 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1213 napi_vectors = adapter->vsi_res->num_queue_pairs; 1213 napi_vectors = adapter->num_active_queues;
1214 1214
1215 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1215 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1216 struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; 1216 struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
@@ -1265,8 +1265,8 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1265 } 1265 }
1266 1266
1267 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1267 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1268 (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" : 1268 (adapter->num_active_queues > 1) ? "Enabled" :
1269 "Disabled", adapter->vsi_res->num_queue_pairs); 1269 "Disabled", adapter->num_active_queues);
1270 1270
1271 return 0; 1271 return 0;
1272err_alloc_queues: 1272err_alloc_queues:
@@ -1425,7 +1425,7 @@ static int next_queue(struct i40evf_adapter *adapter, int j)
1425{ 1425{
1426 j += 1; 1426 j += 1;
1427 1427
1428 return j >= adapter->vsi_res->num_queue_pairs ? 0 : j; 1428 return j >= adapter->num_active_queues ? 0 : j;
1429} 1429}
1430 1430
1431/** 1431/**
@@ -1446,9 +1446,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1446 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e, 1446 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
1447 0x4954b126 }; 1447 0x4954b126 };
1448 1448
1449 /* Hash type is configured by the PF - we just supply the key */ 1449 /* No RSS for single queue. */
1450 if (adapter->num_active_queues == 1) {
1451 wr32(hw, I40E_VFQF_HENA(0), 0);
1452 wr32(hw, I40E_VFQF_HENA(1), 0);
1453 return;
1454 }
1450 1455
1451 /* Fill out hash function seed */ 1456 /* Hash type is configured by the PF - we just supply the key */
1452 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) 1457 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1453 wr32(hw, I40E_VFQF_HKEY(i), seed[i]); 1458 wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
1454 1459
@@ -1458,7 +1463,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1458 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); 1463 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1459 1464
1460 /* Populate the LUT with max no. of queues in round robin fashion */ 1465 /* Populate the LUT with max no. of queues in round robin fashion */
1461 j = adapter->vsi_res->num_queue_pairs; 1466 j = adapter->num_active_queues;
1462 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1467 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
1463 j = next_queue(adapter, j); 1468 j = next_queue(adapter, j);
1464 lut = j; 1469 lut = j;
@@ -1703,7 +1708,7 @@ static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
1703{ 1708{
1704 int i; 1709 int i;
1705 1710
1706 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) 1711 for (i = 0; i < adapter->num_active_queues; i++)
1707 if (adapter->tx_rings[i]->desc) 1712 if (adapter->tx_rings[i]->desc)
1708 i40evf_free_tx_resources(adapter->tx_rings[i]); 1713 i40evf_free_tx_resources(adapter->tx_rings[i]);
1709 1714
@@ -1723,7 +1728,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
1723{ 1728{
1724 int i, err = 0; 1729 int i, err = 0;
1725 1730
1726 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1731 for (i = 0; i < adapter->num_active_queues; i++) {
1727 adapter->tx_rings[i]->count = adapter->tx_desc_count; 1732 adapter->tx_rings[i]->count = adapter->tx_desc_count;
1728 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); 1733 err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
1729 if (!err) 1734 if (!err)
@@ -1751,7 +1756,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
1751{ 1756{
1752 int i, err = 0; 1757 int i, err = 0;
1753 1758
1754 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { 1759 for (i = 0; i < adapter->num_active_queues; i++) {
1755 adapter->rx_rings[i]->count = adapter->rx_desc_count; 1760 adapter->rx_rings[i]->count = adapter->rx_desc_count;
1756 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); 1761 err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
1757 if (!err) 1762 if (!err)
@@ -1774,7 +1779,7 @@ static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
1774{ 1779{
1775 int i; 1780 int i;
1776 1781
1777 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) 1782 for (i = 0; i < adapter->num_active_queues; i++)
1778 if (adapter->rx_rings[i]->desc) 1783 if (adapter->rx_rings[i]->desc)
1779 i40evf_free_rx_resources(adapter->rx_rings[i]); 1784 i40evf_free_rx_resources(adapter->rx_rings[i]);
1780} 1785}
@@ -2150,6 +2155,9 @@ static void i40evf_init_task(struct work_struct *work)
2150 adapter->watchdog_timer.data = (unsigned long)adapter; 2155 adapter->watchdog_timer.data = (unsigned long)adapter;
2151 mod_timer(&adapter->watchdog_timer, jiffies + 1); 2156 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2152 2157
2158 adapter->num_active_queues = min_t(int,
2159 adapter->vsi_res->num_queue_pairs,
2160 (int)(num_online_cpus()));
2153 adapter->tx_desc_count = I40EVF_DEFAULT_TXD; 2161 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2154 adapter->rx_desc_count = I40EVF_DEFAULT_RXD; 2162 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2155 err = i40evf_init_interrupt_scheme(adapter); 2163 err = i40evf_init_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 66d12f5b4ca8..49bfdb5421c8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -89,6 +89,7 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
89 struct i40e_virtchnl_version_info *pf_vvi; 89 struct i40e_virtchnl_version_info *pf_vvi;
90 struct i40e_hw *hw = &adapter->hw; 90 struct i40e_hw *hw = &adapter->hw;
91 struct i40e_arq_event_info event; 91 struct i40e_arq_event_info event;
92 enum i40e_virtchnl_ops op;
92 i40e_status err; 93 i40e_status err;
93 94
94 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 95 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
@@ -98,18 +99,27 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
98 goto out; 99 goto out;
99 } 100 }
100 101
101 err = i40evf_clean_arq_element(hw, &event, NULL); 102 while (1) {
102 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) 103 err = i40evf_clean_arq_element(hw, &event, NULL);
103 goto out_alloc; 104 /* When the AQ is empty, i40evf_clean_arq_element will return
105 * nonzero and this loop will terminate.
106 */
107 if (err)
108 goto out_alloc;
109 op =
110 (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
111 if (op == I40E_VIRTCHNL_OP_VERSION)
112 break;
113 }
114
104 115
105 err = (i40e_status)le32_to_cpu(event.desc.cookie_low); 116 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
106 if (err) 117 if (err)
107 goto out_alloc; 118 goto out_alloc;
108 119
109 if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != 120 if (op != I40E_VIRTCHNL_OP_VERSION) {
110 I40E_VIRTCHNL_OP_VERSION) {
111 dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", 121 dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
112 le32_to_cpu(event.desc.cookie_high)); 122 op);
113 err = -EIO; 123 err = -EIO;
114 goto out_alloc; 124 goto out_alloc;
115 } 125 }
@@ -153,8 +163,9 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
153{ 163{
154 struct i40e_hw *hw = &adapter->hw; 164 struct i40e_hw *hw = &adapter->hw;
155 struct i40e_arq_event_info event; 165 struct i40e_arq_event_info event;
156 u16 len; 166 enum i40e_virtchnl_ops op;
157 i40e_status err; 167 i40e_status err;
168 u16 len;
158 169
159 len = sizeof(struct i40e_virtchnl_vf_resource) + 170 len = sizeof(struct i40e_virtchnl_vf_resource) +
160 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); 171 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
@@ -165,29 +176,21 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
165 goto out; 176 goto out;
166 } 177 }
167 178
168 err = i40evf_clean_arq_element(hw, &event, NULL); 179 while (1) {
169 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) 180 event.msg_size = len;
170 goto out_alloc; 181 /* When the AQ is empty, i40evf_clean_arq_element will return
171 182 * nonzero and this loop will terminate.
172 err = (i40e_status)le32_to_cpu(event.desc.cookie_low); 183 */
173 if (err) { 184 err = i40evf_clean_arq_element(hw, &event, NULL);
174 dev_err(&adapter->pdev->dev, 185 if (err)
175 "%s: Error returned from PF, %d, %d\n", __func__, 186 goto out_alloc;
176 le32_to_cpu(event.desc.cookie_high), 187 op =
177 le32_to_cpu(event.desc.cookie_low)); 188 (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
178 err = -EIO; 189 if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES)
179 goto out_alloc; 190 break;
180 } 191 }
181 192
182 if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != 193 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
183 I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
184 dev_err(&adapter->pdev->dev,
185 "%s: Invalid response from PF, %d, %d\n", __func__,
186 le32_to_cpu(event.desc.cookie_high),
187 le32_to_cpu(event.desc.cookie_low));
188 err = -EIO;
189 goto out_alloc;
190 }
191 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); 194 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
192 195
193 i40e_vf_parse_hw_config(hw, adapter->vf_res); 196 i40e_vf_parse_hw_config(hw, adapter->vf_res);
@@ -207,7 +210,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
207{ 210{
208 struct i40e_virtchnl_vsi_queue_config_info *vqci; 211 struct i40e_virtchnl_vsi_queue_config_info *vqci;
209 struct i40e_virtchnl_queue_pair_info *vqpi; 212 struct i40e_virtchnl_queue_pair_info *vqpi;
210 int pairs = adapter->vsi_res->num_queue_pairs; 213 int pairs = adapter->num_active_queues;
211 int i, len; 214 int i, len;
212 215
213 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { 216 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
@@ -273,7 +276,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
273 } 276 }
274 adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; 277 adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
275 vqs.vsi_id = adapter->vsi_res->vsi_id; 278 vqs.vsi_id = adapter->vsi_res->vsi_id;
276 vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; 279 vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
277 vqs.rx_queues = vqs.tx_queues; 280 vqs.rx_queues = vqs.tx_queues;
278 adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; 281 adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
279 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; 282 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
@@ -299,7 +302,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
299 } 302 }
300 adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; 303 adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
301 vqs.vsi_id = adapter->vsi_res->vsi_id; 304 vqs.vsi_id = adapter->vsi_res->vsi_id;
302 vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; 305 vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
303 vqs.rx_queues = vqs.tx_queues; 306 vqs.rx_queues = vqs.tx_queues;
304 adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 307 adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
305 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; 308 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5f484bf3fda..0e754b4c4220 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2799,6 +2799,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2799 break; 2799 break;
2800 case ixgbe_mac_82599EB: 2800 case ixgbe_mac_82599EB:
2801 case ixgbe_mac_X540: 2801 case ixgbe_mac_X540:
2802 case ixgbe_mac_X550:
2803 case ixgbe_mac_X550EM_x:
2802 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2804 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2803 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2805 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2804 break; 2806 break;
@@ -3192,17 +3194,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3192 *link_up = false; 3194 *link_up = false;
3193 } 3195 }
3194 3196
3195 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3197 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3196 IXGBE_LINKS_SPEED_10G_82599) 3198 case IXGBE_LINKS_SPEED_10G_82599:
3197 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3199 if ((hw->mac.type >= ixgbe_mac_X550) &&
3198 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3200 (links_reg & IXGBE_LINKS_SPEED_NON_STD))
3199 IXGBE_LINKS_SPEED_1G_82599) 3201 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3202 else
3203 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3204 break;
3205 case IXGBE_LINKS_SPEED_1G_82599:
3200 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3206 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3201 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3207 break;
3202 IXGBE_LINKS_SPEED_100_82599) 3208 case IXGBE_LINKS_SPEED_100_82599:
3203 *speed = IXGBE_LINK_SPEED_100_FULL; 3209 if ((hw->mac.type >= ixgbe_mac_X550) &&
3204 else 3210 (links_reg & IXGBE_LINKS_SPEED_NON_STD))
3211 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3212 else
3213 *speed = IXGBE_LINK_SPEED_100_FULL;
3214 break;
3215 default:
3205 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3216 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3217 }
3206 3218
3207 return 0; 3219 return 0;
3208} 3220}
@@ -3583,7 +3595,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3583 **/ 3595 **/
3584void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 3596void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3585{ 3597{
3586 u32 gcr_ext, hlreg0; 3598 u32 gcr_ext, hlreg0, i, poll;
3599 u16 value;
3587 3600
3588 /* 3601 /*
3589 * If double reset is not requested then all transactions should 3602 * If double reset is not requested then all transactions should
@@ -3600,6 +3613,23 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3600 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3613 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3601 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 3614 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3602 3615
3616 /* wait for a last completion before clearing buffers */
3617 IXGBE_WRITE_FLUSH(hw);
3618 usleep_range(3000, 6000);
3619
3620 /* Before proceeding, make sure that the PCIe block does not have
3621 * transactions pending.
3622 */
3623 poll = ixgbe_pcie_timeout_poll(hw);
3624 for (i = 0; i < poll; i++) {
3625 usleep_range(100, 200);
3626 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
3627 if (ixgbe_removed(hw->hw_addr))
3628 break;
3629 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3630 break;
3631 }
3632
3603 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 3633 /* initiate cleaning flow for buffers in the PCIe transaction layer */
3604 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3634 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3605 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 3635 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 48f35fc963f8..a507a6fe3624 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -286,6 +286,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
286 bwgid, ptype); 286 bwgid, ptype);
287 case ixgbe_mac_82599EB: 287 case ixgbe_mac_82599EB:
288 case ixgbe_mac_X540: 288 case ixgbe_mac_X540:
289 case ixgbe_mac_X550:
290 case ixgbe_mac_X550EM_x:
289 return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, 291 return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
290 bwgid, ptype, prio_tc); 292 bwgid, ptype, prio_tc);
291 default: 293 default:
@@ -302,6 +304,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
302 return ixgbe_dcb_config_pfc_82598(hw, pfc_en); 304 return ixgbe_dcb_config_pfc_82598(hw, pfc_en);
303 case ixgbe_mac_82599EB: 305 case ixgbe_mac_82599EB:
304 case ixgbe_mac_X540: 306 case ixgbe_mac_X540:
307 case ixgbe_mac_X550:
308 case ixgbe_mac_X550EM_x:
305 return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); 309 return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
306 default: 310 default:
307 break; 311 break;
@@ -357,6 +361,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
357 break; 361 break;
358 case ixgbe_mac_82599EB: 362 case ixgbe_mac_82599EB:
359 case ixgbe_mac_X540: 363 case ixgbe_mac_X540:
364 case ixgbe_mac_X550:
365 case ixgbe_mac_X550EM_x:
360 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, 366 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
361 bwg_id, prio_type, prio_tc); 367 bwg_id, prio_type, prio_tc);
362 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, 368 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -385,6 +391,8 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
385 switch (hw->mac.type) { 391 switch (hw->mac.type) {
386 case ixgbe_mac_82599EB: 392 case ixgbe_mac_82599EB:
387 case ixgbe_mac_X540: 393 case ixgbe_mac_X540:
394 case ixgbe_mac_X550:
395 case ixgbe_mac_X550EM_x:
388 ixgbe_dcb_read_rtrup2tc_82599(hw, map); 396 ixgbe_dcb_read_rtrup2tc_82599(hw, map);
389 break; 397 break;
390 default: 398 default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 58a7f5312a96..2707bda37418 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -180,6 +180,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
180 switch (adapter->hw.mac.type) { 180 switch (adapter->hw.mac.type) {
181 case ixgbe_mac_82599EB: 181 case ixgbe_mac_82599EB:
182 case ixgbe_mac_X540: 182 case ixgbe_mac_X540:
183 case ixgbe_mac_X550:
183 for (j = 0; j < netdev->addr_len; j++, i++) 184 for (j = 0; j < netdev->addr_len; j++, i++)
184 perm_addr[i] = adapter->hw.mac.san_addr[j]; 185 perm_addr[i] = adapter->hw.mac.san_addr[j];
185 break; 186 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0ae038b9af90..26fd85e2bca5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -511,6 +511,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
511 break; 511 break;
512 case ixgbe_mac_82599EB: 512 case ixgbe_mac_82599EB:
513 case ixgbe_mac_X540: 513 case ixgbe_mac_X540:
514 case ixgbe_mac_X550:
515 case ixgbe_mac_X550EM_x:
514 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 516 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
515 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 517 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
516 break; 518 break;
@@ -622,6 +624,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
622 break; 624 break;
623 case ixgbe_mac_82599EB: 625 case ixgbe_mac_82599EB:
624 case ixgbe_mac_X540: 626 case ixgbe_mac_X540:
627 case ixgbe_mac_X550:
628 case ixgbe_mac_X550EM_x:
625 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 629 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
626 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); 630 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
627 for (i = 0; i < 8; i++) 631 for (i = 0; i < 8; i++)
@@ -1406,6 +1410,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1406 break; 1410 break;
1407 case ixgbe_mac_82599EB: 1411 case ixgbe_mac_82599EB:
1408 case ixgbe_mac_X540: 1412 case ixgbe_mac_X540:
1413 case ixgbe_mac_X550:
1414 case ixgbe_mac_X550EM_x:
1409 toggle = 0x7FFFF30F; 1415 toggle = 0x7FFFF30F;
1410 test = reg_test_82599; 1416 test = reg_test_82599;
1411 break; 1417 break;
@@ -1644,6 +1650,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1644 switch (hw->mac.type) { 1650 switch (hw->mac.type) {
1645 case ixgbe_mac_82599EB: 1651 case ixgbe_mac_82599EB:
1646 case ixgbe_mac_X540: 1652 case ixgbe_mac_X540:
1653 case ixgbe_mac_X550:
1654 case ixgbe_mac_X550EM_x:
1647 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1655 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1648 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1656 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1649 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1657 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1680,6 +1688,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1680 switch (adapter->hw.mac.type) { 1688 switch (adapter->hw.mac.type) {
1681 case ixgbe_mac_82599EB: 1689 case ixgbe_mac_82599EB:
1682 case ixgbe_mac_X540: 1690 case ixgbe_mac_X540:
1691 case ixgbe_mac_X550:
1692 case ixgbe_mac_X550EM_x:
1683 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1693 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1684 reg_data |= IXGBE_DMATXCTL_TE; 1694 reg_data |= IXGBE_DMATXCTL_TE;
1685 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1695 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1733,12 +1743,16 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1733 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1743 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1734 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1744 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1735 1745
1736 /* X540 needs to set the MACC.FLU bit to force link up */ 1746 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1737 if (adapter->hw.mac.type == ixgbe_mac_X540) { 1747 switch (adapter->hw.mac.type) {
1748 case ixgbe_mac_X540:
1749 case ixgbe_mac_X550:
1750 case ixgbe_mac_X550EM_x:
1738 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1751 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1739 reg_data |= IXGBE_MACC_FLU; 1752 reg_data |= IXGBE_MACC_FLU;
1740 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1753 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1741 } else { 1754 break;
1755 default:
1742 if (hw->mac.orig_autoc) { 1756 if (hw->mac.orig_autoc) {
1743 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; 1757 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1744 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1758 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
@@ -2776,7 +2790,14 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2776 /* if we changed something we need to update flags */ 2790 /* if we changed something we need to update flags */
2777 if (flags2 != adapter->flags2) { 2791 if (flags2 != adapter->flags2) {
2778 struct ixgbe_hw *hw = &adapter->hw; 2792 struct ixgbe_hw *hw = &adapter->hw;
2779 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); 2793 u32 mrqc;
2794 unsigned int pf_pool = adapter->num_vfs;
2795
2796 if ((hw->mac.type >= ixgbe_mac_X550) &&
2797 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2798 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2799 else
2800 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2780 2801
2781 if ((flags2 & UDP_RSS_FLAGS) && 2802 if ((flags2 & UDP_RSS_FLAGS) &&
2782 !(adapter->flags2 & UDP_RSS_FLAGS)) 2803 !(adapter->flags2 & UDP_RSS_FLAGS))
@@ -2799,7 +2820,11 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2799 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 2820 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2800 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 2821 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2801 2822
2802 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2823 if ((hw->mac.type >= ixgbe_mac_X550) &&
2824 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2825 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2826 else
2827 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2803 } 2828 }
2804 2829
2805 return 0; 2830 return 0;
@@ -2833,6 +2858,8 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2833 struct ixgbe_adapter *adapter = netdev_priv(dev); 2858 struct ixgbe_adapter *adapter = netdev_priv(dev);
2834 2859
2835 switch (adapter->hw.mac.type) { 2860 switch (adapter->hw.mac.type) {
2861 case ixgbe_mac_X550:
2862 case ixgbe_mac_X550EM_x:
2836 case ixgbe_mac_X540: 2863 case ixgbe_mac_X540:
2837 case ixgbe_mac_82599EB: 2864 case ixgbe_mac_82599EB:
2838 info->so_timestamping = 2865 info->so_timestamping =
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ce40c77381e9..68e1e757ecef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -126,6 +126,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
126 break; 126 break;
127 case ixgbe_mac_82599EB: 127 case ixgbe_mac_82599EB:
128 case ixgbe_mac_X540: 128 case ixgbe_mac_X540:
129 case ixgbe_mac_X550:
130 case ixgbe_mac_X550EM_x:
129 if (num_tcs > 4) { 131 if (num_tcs > 4) {
130 /* 132 /*
131 * TCs : TC0/1 TC2/3 TC4-7 133 * TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d2df4e3d1032..a5ca877eac1a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -835,6 +835,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
835 break; 835 break;
836 case ixgbe_mac_82599EB: 836 case ixgbe_mac_82599EB:
837 case ixgbe_mac_X540: 837 case ixgbe_mac_X540:
838 case ixgbe_mac_X550:
839 case ixgbe_mac_X550EM_x:
838 if (direction == -1) { 840 if (direction == -1) {
839 /* other causes */ 841 /* other causes */
840 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 842 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -871,6 +873,8 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
871 break; 873 break;
872 case ixgbe_mac_82599EB: 874 case ixgbe_mac_82599EB:
873 case ixgbe_mac_X540: 875 case ixgbe_mac_X540:
876 case ixgbe_mac_X550:
877 case ixgbe_mac_X550EM_x:
874 mask = (qmask & 0xFFFFFFFF); 878 mask = (qmask & 0xFFFFFFFF);
875 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 879 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
876 mask = (qmask >> 32); 880 mask = (qmask >> 32);
@@ -2155,6 +2159,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2155 break; 2159 break;
2156 case ixgbe_mac_82599EB: 2160 case ixgbe_mac_82599EB:
2157 case ixgbe_mac_X540: 2161 case ixgbe_mac_X540:
2162 case ixgbe_mac_X550:
2163 case ixgbe_mac_X550EM_x:
2158 ixgbe_set_ivar(adapter, -1, 1, v_idx); 2164 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2159 break; 2165 break;
2160 default: 2166 default:
@@ -2264,6 +2270,8 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2264 break; 2270 break;
2265 case ixgbe_mac_82599EB: 2271 case ixgbe_mac_82599EB:
2266 case ixgbe_mac_X540: 2272 case ixgbe_mac_X540:
2273 case ixgbe_mac_X550:
2274 case ixgbe_mac_X550EM_x:
2267 /* 2275 /*
2268 * set the WDIS bit to not clear the timer bits and cause an 2276 * set the WDIS bit to not clear the timer bits and cause an
2269 * immediate assertion of the interrupt 2277 * immediate assertion of the interrupt
@@ -2467,6 +2475,8 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2467 break; 2475 break;
2468 case ixgbe_mac_82599EB: 2476 case ixgbe_mac_82599EB:
2469 case ixgbe_mac_X540: 2477 case ixgbe_mac_X540:
2478 case ixgbe_mac_X550:
2479 case ixgbe_mac_X550EM_x:
2470 mask = (qmask & 0xFFFFFFFF); 2480 mask = (qmask & 0xFFFFFFFF);
2471 if (mask) 2481 if (mask)
2472 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2482 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2493,6 +2503,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2493 break; 2503 break;
2494 case ixgbe_mac_82599EB: 2504 case ixgbe_mac_82599EB:
2495 case ixgbe_mac_X540: 2505 case ixgbe_mac_X540:
2506 case ixgbe_mac_X550:
2507 case ixgbe_mac_X550EM_x:
2496 mask = (qmask & 0xFFFFFFFF); 2508 mask = (qmask & 0xFFFFFFFF);
2497 if (mask) 2509 if (mask)
2498 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2510 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2525,6 +2537,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2525 mask |= IXGBE_EIMS_GPI_SDP0; 2537 mask |= IXGBE_EIMS_GPI_SDP0;
2526 break; 2538 break;
2527 case ixgbe_mac_X540: 2539 case ixgbe_mac_X540:
2540 case ixgbe_mac_X550:
2541 case ixgbe_mac_X550EM_x:
2528 mask |= IXGBE_EIMS_TS; 2542 mask |= IXGBE_EIMS_TS;
2529 break; 2543 break;
2530 default: 2544 default:
@@ -2536,7 +2550,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2536 case ixgbe_mac_82599EB: 2550 case ixgbe_mac_82599EB:
2537 mask |= IXGBE_EIMS_GPI_SDP1; 2551 mask |= IXGBE_EIMS_GPI_SDP1;
2538 mask |= IXGBE_EIMS_GPI_SDP2; 2552 mask |= IXGBE_EIMS_GPI_SDP2;
2553 /* fall through */
2539 case ixgbe_mac_X540: 2554 case ixgbe_mac_X540:
2555 case ixgbe_mac_X550:
2556 case ixgbe_mac_X550EM_x:
2540 mask |= IXGBE_EIMS_ECC; 2557 mask |= IXGBE_EIMS_ECC;
2541 mask |= IXGBE_EIMS_MAILBOX; 2558 mask |= IXGBE_EIMS_MAILBOX;
2542 break; 2559 break;
@@ -2544,9 +2561,6 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2544 break; 2561 break;
2545 } 2562 }
2546 2563
2547 if (adapter->hw.mac.type == ixgbe_mac_X540)
2548 mask |= IXGBE_EIMS_TIMESYNC;
2549
2550 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && 2564 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2551 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 2565 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2552 mask |= IXGBE_EIMS_FLOW_DIR; 2566 mask |= IXGBE_EIMS_FLOW_DIR;
@@ -2592,6 +2606,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2592 switch (hw->mac.type) { 2606 switch (hw->mac.type) {
2593 case ixgbe_mac_82599EB: 2607 case ixgbe_mac_82599EB:
2594 case ixgbe_mac_X540: 2608 case ixgbe_mac_X540:
2609 case ixgbe_mac_X550:
2610 case ixgbe_mac_X550EM_x:
2595 if (eicr & IXGBE_EICR_ECC) { 2611 if (eicr & IXGBE_EICR_ECC) {
2596 e_info(link, "Received ECC Err, initiating reset\n"); 2612 e_info(link, "Received ECC Err, initiating reset\n");
2597 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; 2613 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -2811,6 +2827,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2811 ixgbe_check_sfp_event(adapter, eicr); 2827 ixgbe_check_sfp_event(adapter, eicr);
2812 /* Fall through */ 2828 /* Fall through */
2813 case ixgbe_mac_X540: 2829 case ixgbe_mac_X540:
2830 case ixgbe_mac_X550:
2831 case ixgbe_mac_X550EM_x:
2814 if (eicr & IXGBE_EICR_ECC) { 2832 if (eicr & IXGBE_EICR_ECC) {
2815 e_info(link, "Received ECC Err, initiating reset\n"); 2833 e_info(link, "Received ECC Err, initiating reset\n");
2816 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; 2834 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -2905,6 +2923,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2905 break; 2923 break;
2906 case ixgbe_mac_82599EB: 2924 case ixgbe_mac_82599EB:
2907 case ixgbe_mac_X540: 2925 case ixgbe_mac_X540:
2926 case ixgbe_mac_X550:
2927 case ixgbe_mac_X550EM_x:
2908 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 2928 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 2929 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2910 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 2930 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3190,14 +3210,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3190 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3210 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3191} 3211}
3192 3212
3193static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 3213static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
3194{ 3214{
3195 struct ixgbe_hw *hw = &adapter->hw; 3215 struct ixgbe_hw *hw = &adapter->hw;
3196 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, 3216 u32 reta = 0;
3197 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
3198 0x6A3E67EA, 0x14364D17, 0x3BED200D};
3199 u32 mrqc = 0, reta = 0;
3200 u32 rxcsum;
3201 int i, j; 3217 int i, j;
3202 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3218 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3203 3219
@@ -3223,6 +3239,16 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3223 if ((i & 3) == 3) 3239 if ((i & 3) == 3)
3224 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3240 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3225 } 3241 }
3242}
3243
3244static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3245{
3246 struct ixgbe_hw *hw = &adapter->hw;
3247 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
3248 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
3249 0x6A3E67EA, 0x14364D17, 0x3BED200D};
3250 u32 mrqc = 0, rss_field = 0;
3251 u32 rxcsum;
3226 3252
3227 /* Disable indicating checksum in descriptor, enables RSS hash */ 3253 /* Disable indicating checksum in descriptor, enables RSS hash */
3228 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3254 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
@@ -3255,16 +3281,18 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3255 } 3281 }
3256 3282
3257 /* Perform hash on these packet types */ 3283 /* Perform hash on these packet types */
3258 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | 3284 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3259 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 3285 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3260 IXGBE_MRQC_RSS_FIELD_IPV6 | 3286 IXGBE_MRQC_RSS_FIELD_IPV6 |
3261 IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3287 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3262 3288
3263 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 3289 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3264 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3290 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3265 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 3291 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3266 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3292 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3267 3293
3294 ixgbe_setup_reta(adapter, seed);
3295 mrqc |= rss_field;
3268 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3296 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3269} 3297}
3270 3298
@@ -3534,6 +3562,8 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3534 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 3562 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3535 3563
3536 switch (hw->mac.type) { 3564 switch (hw->mac.type) {
3565 case ixgbe_mac_X550:
3566 case ixgbe_mac_X550EM_x:
3537 case ixgbe_mac_82598EB: 3567 case ixgbe_mac_82598EB:
3538 /* 3568 /*
3539 * For VMDq support of different descriptor types or 3569 * For VMDq support of different descriptor types or
@@ -3657,6 +3687,8 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3657 break; 3687 break;
3658 case ixgbe_mac_82599EB: 3688 case ixgbe_mac_82599EB:
3659 case ixgbe_mac_X540: 3689 case ixgbe_mac_X540:
3690 case ixgbe_mac_X550:
3691 case ixgbe_mac_X550EM_x:
3660 for (i = 0; i < adapter->num_rx_queues; i++) { 3692 for (i = 0; i < adapter->num_rx_queues; i++) {
3661 struct ixgbe_ring *ring = adapter->rx_ring[i]; 3693 struct ixgbe_ring *ring = adapter->rx_ring[i];
3662 3694
@@ -3691,6 +3723,8 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3691 break; 3723 break;
3692 case ixgbe_mac_82599EB: 3724 case ixgbe_mac_82599EB:
3693 case ixgbe_mac_X540: 3725 case ixgbe_mac_X540:
3726 case ixgbe_mac_X550:
3727 case ixgbe_mac_X550EM_x:
3694 for (i = 0; i < adapter->num_rx_queues; i++) { 3728 for (i = 0; i < adapter->num_rx_queues; i++) {
3695 struct ixgbe_ring *ring = adapter->rx_ring[i]; 3729 struct ixgbe_ring *ring = adapter->rx_ring[i];
3696 3730
@@ -4112,6 +4146,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4112 /* Calculate delay value for device */ 4146 /* Calculate delay value for device */
4113 switch (hw->mac.type) { 4147 switch (hw->mac.type) {
4114 case ixgbe_mac_X540: 4148 case ixgbe_mac_X540:
4149 case ixgbe_mac_X550:
4150 case ixgbe_mac_X550EM_x:
4115 dv_id = IXGBE_DV_X540(link, tc); 4151 dv_id = IXGBE_DV_X540(link, tc);
4116 break; 4152 break;
4117 default: 4153 default:
@@ -4170,6 +4206,8 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4170 /* Calculate delay value for device */ 4206 /* Calculate delay value for device */
4171 switch (hw->mac.type) { 4207 switch (hw->mac.type) {
4172 case ixgbe_mac_X540: 4208 case ixgbe_mac_X540:
4209 case ixgbe_mac_X550:
4210 case ixgbe_mac_X550EM_x:
4173 dv_id = IXGBE_LOW_DV_X540(tc); 4211 dv_id = IXGBE_LOW_DV_X540(tc);
4174 break; 4212 break;
4175 default: 4213 default:
@@ -4606,6 +4644,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4606 break; 4644 break;
4607 case ixgbe_mac_82599EB: 4645 case ixgbe_mac_82599EB:
4608 case ixgbe_mac_X540: 4646 case ixgbe_mac_X540:
4647 case ixgbe_mac_X550:
4648 case ixgbe_mac_X550EM_x:
4609 default: 4649 default:
4610 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 4650 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4611 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 4651 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -4948,10 +4988,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4948 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 4988 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
4949 } 4989 }
4950 4990
4951 /* Disable the Tx DMA engine on 82599 and X540 */ 4991 /* Disable the Tx DMA engine on 82599 and later MAC */
4952 switch (hw->mac.type) { 4992 switch (hw->mac.type) {
4953 case ixgbe_mac_82599EB: 4993 case ixgbe_mac_82599EB:
4954 case ixgbe_mac_X540: 4994 case ixgbe_mac_X540:
4995 case ixgbe_mac_X550:
4996 case ixgbe_mac_X550EM_x:
4955 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 4997 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
4956 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 4998 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4957 ~IXGBE_DMATXCTL_TE)); 4999 ~IXGBE_DMATXCTL_TE));
@@ -5071,6 +5113,12 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5071 if (fwsm & IXGBE_FWSM_TS_ENABLED) 5113 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5072 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5114 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5073 break; 5115 break;
5116 case ixgbe_mac_X550EM_x:
5117 case ixgbe_mac_X550:
5118#ifdef CONFIG_IXGBE_DCA
5119 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5120#endif
5121 break;
5074 default: 5122 default:
5075 break; 5123 break;
5076 } 5124 }
@@ -5086,6 +5134,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5086#ifdef CONFIG_IXGBE_DCB 5134#ifdef CONFIG_IXGBE_DCB
5087 switch (hw->mac.type) { 5135 switch (hw->mac.type) {
5088 case ixgbe_mac_X540: 5136 case ixgbe_mac_X540:
5137 case ixgbe_mac_X550:
5138 case ixgbe_mac_X550EM_x:
5089 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; 5139 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5090 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; 5140 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5091 break; 5141 break;
@@ -5675,6 +5725,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5675 break; 5725 break;
5676 case ixgbe_mac_82599EB: 5726 case ixgbe_mac_82599EB:
5677 case ixgbe_mac_X540: 5727 case ixgbe_mac_X540:
5728 case ixgbe_mac_X550:
5729 case ixgbe_mac_X550EM_x:
5678 pci_wake_from_d3(pdev, !!wufc); 5730 pci_wake_from_d3(pdev, !!wufc);
5679 break; 5731 break;
5680 default: 5732 default:
@@ -5806,6 +5858,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5806 break; 5858 break;
5807 case ixgbe_mac_82599EB: 5859 case ixgbe_mac_82599EB:
5808 case ixgbe_mac_X540: 5860 case ixgbe_mac_X540:
5861 case ixgbe_mac_X550:
5862 case ixgbe_mac_X550EM_x:
5809 hwstats->pxonrxc[i] += 5863 hwstats->pxonrxc[i] +=
5810 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 5864 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5811 break; 5865 break;
@@ -5819,7 +5873,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5819 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 5873 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5820 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5874 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5821 if ((hw->mac.type == ixgbe_mac_82599EB) || 5875 if ((hw->mac.type == ixgbe_mac_82599EB) ||
5822 (hw->mac.type == ixgbe_mac_X540)) { 5876 (hw->mac.type == ixgbe_mac_X540) ||
5877 (hw->mac.type == ixgbe_mac_X550) ||
5878 (hw->mac.type == ixgbe_mac_X550EM_x)) {
5823 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 5879 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
5824 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ 5880 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
5825 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 5881 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -5842,7 +5898,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5842 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 5898 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5843 break; 5899 break;
5844 case ixgbe_mac_X540: 5900 case ixgbe_mac_X540:
5845 /* OS2BMC stats are X540 only*/ 5901 case ixgbe_mac_X550:
5902 case ixgbe_mac_X550EM_x:
5903 /* OS2BMC stats are X540 and later */
5846 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); 5904 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5847 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); 5905 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5848 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); 5906 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
@@ -6110,6 +6168,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6110 } 6168 }
6111 break; 6169 break;
6112 case ixgbe_mac_X540: 6170 case ixgbe_mac_X540:
6171 case ixgbe_mac_X550:
6172 case ixgbe_mac_X550EM_x:
6113 case ixgbe_mac_82599EB: { 6173 case ixgbe_mac_82599EB: {
6114 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 6174 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6115 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 6175 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6221,6 +6281,10 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
6221 if (!adapter->num_vfs) 6281 if (!adapter->num_vfs)
6222 return false; 6282 return false;
6223 6283
6284 /* resetting the PF is only needed for MAC before X550 */
6285 if (hw->mac.type >= ixgbe_mac_X550)
6286 return false;
6287
6224 for (i = 0; i < adapter->num_vfs; i++) { 6288 for (i = 0; i < adapter->num_vfs; i++) {
6225 for (j = 0; j < q_per_pool; j++) { 6289 for (j = 0; j < q_per_pool; j++) {
6226 u32 h, t; 6290 u32 h, t;
@@ -6430,11 +6494,11 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6430 ciaa = (vf << 16) | 0x80000000; 6494 ciaa = (vf << 16) | 0x80000000;
6431 /* 32 bit read so align, we really want status at offset 6 */ 6495 /* 32 bit read so align, we really want status at offset 6 */
6432 ciaa |= PCI_COMMAND; 6496 ciaa |= PCI_COMMAND;
6433 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6497 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6434 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599); 6498 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_BY_MAC(hw));
6435 ciaa &= 0x7FFFFFFF; 6499 ciaa &= 0x7FFFFFFF;
6436 /* disable debug mode asap after reading data */ 6500 /* disable debug mode asap after reading data */
6437 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6501 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6438 /* Get the upper 16 bits which will be the PCI status reg */ 6502 /* Get the upper 16 bits which will be the PCI status reg */
6439 ciad >>= 16; 6503 ciad >>= 16;
6440 if (ciad & PCI_STATUS_REC_MASTER_ABORT) { 6504 if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
@@ -6442,11 +6506,11 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6442 /* Issue VFLR */ 6506 /* Issue VFLR */
6443 ciaa = (vf << 16) | 0x80000000; 6507 ciaa = (vf << 16) | 0x80000000;
6444 ciaa |= 0xA8; 6508 ciaa |= 0xA8;
6445 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6509 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6446 ciad = 0x00008000; /* VFLR */ 6510 ciad = 0x00008000; /* VFLR */
6447 IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad); 6511 IXGBE_WRITE_REG(hw, IXGBE_CIAD_BY_MAC(hw), ciad);
6448 ciaa &= 0x7FFFFFFF; 6512 ciaa &= 0x7FFFFFFF;
6449 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6513 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6450 } 6514 }
6451 } 6515 }
6452} 6516}
@@ -8098,6 +8162,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8098 switch (adapter->hw.mac.type) { 8162 switch (adapter->hw.mac.type) {
8099 case ixgbe_mac_82599EB: 8163 case ixgbe_mac_82599EB:
8100 case ixgbe_mac_X540: 8164 case ixgbe_mac_X540:
8165 case ixgbe_mac_X550:
8166 case ixgbe_mac_X550EM_x:
8101 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 8167 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8102 break; 8168 break;
8103 default: 8169 default:
@@ -8161,6 +8227,8 @@ skip_sriov:
8161 switch (adapter->hw.mac.type) { 8227 switch (adapter->hw.mac.type) {
8162 case ixgbe_mac_82599EB: 8228 case ixgbe_mac_82599EB:
8163 case ixgbe_mac_X540: 8229 case ixgbe_mac_X540:
8230 case ixgbe_mac_X550:
8231 case ixgbe_mac_X550EM_x:
8164 netdev->features |= NETIF_F_SCTP_CSUM; 8232 netdev->features |= NETIF_F_SCTP_CSUM;
8165 netdev->hw_features |= NETIF_F_SCTP_CSUM | 8233 netdev->hw_features |= NETIF_F_SCTP_CSUM |
8166 NETIF_F_NTUPLE; 8234 NETIF_F_NTUPLE;
@@ -8514,6 +8582,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8514 case ixgbe_mac_X540: 8582 case ixgbe_mac_X540:
8515 device_id = IXGBE_X540_VF_DEVICE_ID; 8583 device_id = IXGBE_X540_VF_DEVICE_ID;
8516 break; 8584 break;
8585 case ixgbe_mac_X550:
8586 device_id = IXGBE_DEV_ID_X550_VF;
8587 break;
8588 case ixgbe_mac_X550EM_x:
8589 device_id = IXGBE_DEV_ID_X550EM_X_VF;
8590 break;
8517 default: 8591 default:
8518 device_id = 0; 8592 device_id = 0;
8519 break; 8593 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index cc8f0128286c..9993a471d668 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -305,6 +305,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
305 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); 305 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
306 break; 306 break;
307 case ixgbe_mac_X540: 307 case ixgbe_mac_X540:
308 case ixgbe_mac_X550:
309 case ixgbe_mac_X550EM_x:
308 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); 310 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
309 break; 311 break;
310 default: 312 default:
@@ -426,6 +428,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
426 struct ixgbe_mbx_info *mbx = &hw->mbx; 428 struct ixgbe_mbx_info *mbx = &hw->mbx;
427 429
428 if (hw->mac.type != ixgbe_mac_82599EB && 430 if (hw->mac.type != ixgbe_mac_82599EB &&
431 hw->mac.type != ixgbe_mac_X550 &&
432 hw->mac.type != ixgbe_mac_X550EM_x &&
429 hw->mac.type != ixgbe_mac_X540) 433 hw->mac.type != ixgbe_mac_X540)
430 return; 434 return;
431 435
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index d47b19f27c35..dc97c03134ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -43,7 +43,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
43static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); 43static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
44static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); 44static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
45static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); 45static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
46static bool ixgbe_get_i2c_data(u32 *i2cctl); 46static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
47static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); 47static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
48static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 48static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
49static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 49static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
@@ -576,6 +576,10 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
576 *speed |= IXGBE_LINK_SPEED_100_FULL; 576 *speed |= IXGBE_LINK_SPEED_100_FULL;
577 } 577 }
578 578
579 /* Internal PHY does not support 100 Mbps */
580 if (hw->mac.type == ixgbe_mac_X550EM_x)
581 *speed &= ~IXGBE_LINK_SPEED_100_FULL;
582
579 return status; 583 return status;
580} 584}
581 585
@@ -632,10 +636,12 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
632 * @hw: pointer to hardware structure 636 * @hw: pointer to hardware structure
633 * 637 *
634 * Restart autonegotiation and PHY and waits for completion. 638 * Restart autonegotiation and PHY and waits for completion.
639 * This function always returns success, this is nessary since
640 * it is called via a function pointer that could call other
641 * functions that could return an error.
635 **/ 642 **/
636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) 643s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
637{ 644{
638 s32 status;
639 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 645 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
640 bool autoneg = false; 646 bool autoneg = false;
641 ixgbe_link_speed speed; 647 ixgbe_link_speed speed;
@@ -701,7 +707,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
701 hw->phy.ops.write_reg(hw, MDIO_CTRL1, 707 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
702 MDIO_MMD_AN, autoneg_reg); 708 MDIO_MMD_AN, autoneg_reg);
703 709
704 return status; 710 return 0;
705} 711}
706 712
707/** 713/**
@@ -1612,7 +1618,7 @@ fail:
1612 **/ 1618 **/
1613static void ixgbe_i2c_start(struct ixgbe_hw *hw) 1619static void ixgbe_i2c_start(struct ixgbe_hw *hw)
1614{ 1620{
1615 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1621 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1616 1622
1617 /* Start condition must begin with data and clock high */ 1623 /* Start condition must begin with data and clock high */
1618 ixgbe_set_i2c_data(hw, &i2cctl, 1); 1624 ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1641,7 +1647,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
1641 **/ 1647 **/
1642static void ixgbe_i2c_stop(struct ixgbe_hw *hw) 1648static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
1643{ 1649{
1644 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1650 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1645 1651
1646 /* Stop condition must begin with data low and clock high */ 1652 /* Stop condition must begin with data low and clock high */
1647 ixgbe_set_i2c_data(hw, &i2cctl, 0); 1653 ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1699,9 +1705,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
1699 } 1705 }
1700 1706
1701 /* Release SDA line (set high) */ 1707 /* Release SDA line (set high) */
1702 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1708 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1703 i2cctl |= IXGBE_I2C_DATA_OUT; 1709 i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
1704 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); 1710 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
1705 IXGBE_WRITE_FLUSH(hw); 1711 IXGBE_WRITE_FLUSH(hw);
1706 1712
1707 return status; 1713 return status;
@@ -1717,7 +1723,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
1717{ 1723{
1718 s32 status = 0; 1724 s32 status = 0;
1719 u32 i = 0; 1725 u32 i = 0;
1720 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1726 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1721 u32 timeout = 10; 1727 u32 timeout = 10;
1722 bool ack = true; 1728 bool ack = true;
1723 1729
@@ -1730,8 +1736,8 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
1730 /* Poll for ACK. Note that ACK in I2C spec is 1736 /* Poll for ACK. Note that ACK in I2C spec is
1731 * transition from 1 to 0 */ 1737 * transition from 1 to 0 */
1732 for (i = 0; i < timeout; i++) { 1738 for (i = 0; i < timeout; i++) {
1733 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1739 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1734 ack = ixgbe_get_i2c_data(&i2cctl); 1740 ack = ixgbe_get_i2c_data(hw, &i2cctl);
1735 1741
1736 udelay(1); 1742 udelay(1);
1737 if (ack == 0) 1743 if (ack == 0)
@@ -1760,15 +1766,15 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
1760 **/ 1766 **/
1761static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) 1767static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
1762{ 1768{
1763 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1769 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1764 1770
1765 ixgbe_raise_i2c_clk(hw, &i2cctl); 1771 ixgbe_raise_i2c_clk(hw, &i2cctl);
1766 1772
1767 /* Minimum high period of clock is 4us */ 1773 /* Minimum high period of clock is 4us */
1768 udelay(IXGBE_I2C_T_HIGH); 1774 udelay(IXGBE_I2C_T_HIGH);
1769 1775
1770 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1776 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1771 *data = ixgbe_get_i2c_data(&i2cctl); 1777 *data = ixgbe_get_i2c_data(hw, &i2cctl);
1772 1778
1773 ixgbe_lower_i2c_clk(hw, &i2cctl); 1779 ixgbe_lower_i2c_clk(hw, &i2cctl);
1774 1780
@@ -1788,7 +1794,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
1788static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) 1794static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
1789{ 1795{
1790 s32 status; 1796 s32 status;
1791 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1797 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1792 1798
1793 status = ixgbe_set_i2c_data(hw, &i2cctl, data); 1799 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
1794 if (status == 0) { 1800 if (status == 0) {
@@ -1824,14 +1830,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1824 u32 i2cctl_r = 0; 1830 u32 i2cctl_r = 0;
1825 1831
1826 for (i = 0; i < timeout; i++) { 1832 for (i = 0; i < timeout; i++) {
1827 *i2cctl |= IXGBE_I2C_CLK_OUT; 1833 *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
1828 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1834 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
1829 IXGBE_WRITE_FLUSH(hw); 1835 IXGBE_WRITE_FLUSH(hw);
1830 /* SCL rise time (1000ns) */ 1836 /* SCL rise time (1000ns) */
1831 udelay(IXGBE_I2C_T_RISE); 1837 udelay(IXGBE_I2C_T_RISE);
1832 1838
1833 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1839 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1834 if (i2cctl_r & IXGBE_I2C_CLK_IN) 1840 if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
1835 break; 1841 break;
1836 } 1842 }
1837} 1843}
@@ -1846,9 +1852,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1846static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) 1852static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1847{ 1853{
1848 1854
1849 *i2cctl &= ~IXGBE_I2C_CLK_OUT; 1855 *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw);
1850 1856
1851 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1857 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
1852 IXGBE_WRITE_FLUSH(hw); 1858 IXGBE_WRITE_FLUSH(hw);
1853 1859
1854 /* SCL fall time (300ns) */ 1860 /* SCL fall time (300ns) */
@@ -1866,19 +1872,19 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1866static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) 1872static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
1867{ 1873{
1868 if (data) 1874 if (data)
1869 *i2cctl |= IXGBE_I2C_DATA_OUT; 1875 *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
1870 else 1876 else
1871 *i2cctl &= ~IXGBE_I2C_DATA_OUT; 1877 *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw);
1872 1878
1873 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1879 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
1874 IXGBE_WRITE_FLUSH(hw); 1880 IXGBE_WRITE_FLUSH(hw);
1875 1881
1876 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ 1882 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
1877 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); 1883 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
1878 1884
1879 /* Verify data was set correctly */ 1885 /* Verify data was set correctly */
1880 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1886 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1881 if (data != ixgbe_get_i2c_data(i2cctl)) { 1887 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
1882 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); 1888 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
1883 return IXGBE_ERR_I2C; 1889 return IXGBE_ERR_I2C;
1884 } 1890 }
@@ -1893,9 +1899,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
1893 * 1899 *
1894 * Returns the I2C data bit value 1900 * Returns the I2C data bit value
1895 **/ 1901 **/
1896static bool ixgbe_get_i2c_data(u32 *i2cctl) 1902static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
1897{ 1903{
1898 if (*i2cctl & IXGBE_I2C_DATA_IN) 1904 if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
1899 return true; 1905 return true;
1900 return false; 1906 return false;
1901} 1907}
@@ -1909,7 +1915,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
1909 **/ 1915 **/
1910static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) 1916static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1911{ 1917{
1912 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1918 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
1913 u32 i; 1919 u32 i;
1914 1920
1915 ixgbe_i2c_start(hw); 1921 ixgbe_i2c_start(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 97c85b859536..04eee7c7b653 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -618,6 +618,27 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
618 return 0; 618 return 0;
619} 619}
620 620
621static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
622 u32 qde)
623{
624 struct ixgbe_hw *hw = &adapter->hw;
625 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
626 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
627 int i;
628
629 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
630 u32 reg;
631
632 /* flush previous write */
633 IXGBE_WRITE_FLUSH(hw);
634
635 /* indicate to hardware that we want to set drop enable */
636 reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
637 reg |= i << IXGBE_QDE_IDX_SHIFT;
638 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
639 }
640}
641
621static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 642static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
622{ 643{
623 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 644 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -647,15 +668,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
647 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); 668 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
648 669
649 /* force drop enable for all VF Rx queues */ 670 /* force drop enable for all VF Rx queues */
650 for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { 671 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
651 /* flush previous write */
652 IXGBE_WRITE_FLUSH(hw);
653
654 /* indicate to hardware that we want to set drop enable */
655 reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
656 reg |= i << IXGBE_QDE_IDX_SHIFT;
657 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
658 }
659 672
660 /* enable receive for vf */ 673 /* enable receive for vf */
661 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 674 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
@@ -1079,52 +1092,86 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1079 return ixgbe_set_vf_mac(adapter, vf, mac); 1092 return ixgbe_set_vf_mac(adapter, vf, mac);
1080} 1093}
1081 1094
1095static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1096 u16 vlan, u8 qos)
1097{
1098 struct ixgbe_hw *hw = &adapter->hw;
1099 int err = 0;
1100
1101 if (adapter->vfinfo[vf].pf_vlan)
1102 err = ixgbe_set_vf_vlan(adapter, false,
1103 adapter->vfinfo[vf].pf_vlan,
1104 vf);
1105 if (err)
1106 goto out;
1107 ixgbe_set_vmvir(adapter, vlan, qos, vf);
1108 ixgbe_set_vmolr(hw, vf, false);
1109 if (adapter->vfinfo[vf].spoofchk_enabled)
1110 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
1111 adapter->vfinfo[vf].vlan_count++;
1112
1113 /* enable hide vlan on X550 */
1114 if (hw->mac.type >= ixgbe_mac_X550)
1115 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
1116 IXGBE_QDE_HIDE_VLAN);
1117
1118 adapter->vfinfo[vf].pf_vlan = vlan;
1119 adapter->vfinfo[vf].pf_qos = qos;
1120 dev_info(&adapter->pdev->dev,
1121 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1122 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1123 dev_warn(&adapter->pdev->dev,
1124 "The VF VLAN has been set, but the PF device is not up.\n");
1125 dev_warn(&adapter->pdev->dev,
1126 "Bring the PF device up before attempting to use the VF device.\n");
1127 }
1128
1129out:
1130 return err;
1131}
1132
1133static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1134{
1135 struct ixgbe_hw *hw = &adapter->hw;
1136 int err;
1137
1138 err = ixgbe_set_vf_vlan(adapter, false,
1139 adapter->vfinfo[vf].pf_vlan, vf);
1140 ixgbe_clear_vmvir(adapter, vf);
1141 ixgbe_set_vmolr(hw, vf, true);
1142 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1143 if (adapter->vfinfo[vf].vlan_count)
1144 adapter->vfinfo[vf].vlan_count--;
1145 adapter->vfinfo[vf].pf_vlan = 0;
1146 adapter->vfinfo[vf].pf_qos = 0;
1147
1148 return err;
1149}
1150
1082int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1151int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1083{ 1152{
1084 int err = 0; 1153 int err = 0;
1085 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1154 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1086 struct ixgbe_hw *hw = &adapter->hw;
1087 1155
1088 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) 1156 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1089 return -EINVAL; 1157 return -EINVAL;
1090 if (vlan || qos) { 1158 if (vlan || qos) {
1159 /* Check if there is already a port VLAN set, if so
1160 * we have to delete the old one first before we
1161 * can set the new one. The usage model had
1162 * previously assumed the user would delete the
1163 * old port VLAN before setting a new one but this
1164 * is not necessarily the case.
1165 */
1091 if (adapter->vfinfo[vf].pf_vlan) 1166 if (adapter->vfinfo[vf].pf_vlan)
1092 err = ixgbe_set_vf_vlan(adapter, false, 1167 err = ixgbe_disable_port_vlan(adapter, vf);
1093 adapter->vfinfo[vf].pf_vlan,
1094 vf);
1095 if (err)
1096 goto out;
1097 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1098 if (err) 1168 if (err)
1099 goto out; 1169 goto out;
1100 ixgbe_set_vmvir(adapter, vlan, qos, vf); 1170 err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
1101 ixgbe_set_vmolr(hw, vf, false);
1102 if (adapter->vfinfo[vf].spoofchk_enabled)
1103 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
1104 adapter->vfinfo[vf].vlan_count++;
1105 adapter->vfinfo[vf].pf_vlan = vlan;
1106 adapter->vfinfo[vf].pf_qos = qos;
1107 dev_info(&adapter->pdev->dev,
1108 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1109 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1110 dev_warn(&adapter->pdev->dev,
1111 "The VF VLAN has been set,"
1112 " but the PF device is not up.\n");
1113 dev_warn(&adapter->pdev->dev,
1114 "Bring the PF device up before"
1115 " attempting to use the VF device.\n");
1116 }
1117 } else { 1171 } else {
1118 err = ixgbe_set_vf_vlan(adapter, false, 1172 err = ixgbe_disable_port_vlan(adapter, vf);
1119 adapter->vfinfo[vf].pf_vlan, vf);
1120 ixgbe_clear_vmvir(adapter, vf);
1121 ixgbe_set_vmolr(hw, vf, true);
1122 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1123 if (adapter->vfinfo[vf].vlan_count)
1124 adapter->vfinfo[vf].vlan_count--;
1125 adapter->vfinfo[vf].pf_vlan = 0;
1126 adapter->vfinfo[vf].pf_qos = 0;
1127 } 1173 }
1174
1128out: 1175out:
1129 return err; 1176 return err;
1130} 1177}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index dfd55d83bc03..64de20d1de56 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -77,6 +77,8 @@
77/* VF Device IDs */ 77/* VF Device IDs */
78#define IXGBE_DEV_ID_82599_VF 0x10ED 78#define IXGBE_DEV_ID_82599_VF 0x10ED
79#define IXGBE_DEV_ID_X540_VF 0x1515 79#define IXGBE_DEV_ID_X540_VF 0x1515
80#define IXGBE_DEV_ID_X550_VF 0x1565
81#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
80 82
81/* General Registers */ 83/* General Registers */
82#define IXGBE_CTRL 0x00000 84#define IXGBE_CTRL 0x00000
@@ -84,7 +86,8 @@
84#define IXGBE_CTRL_EXT 0x00018 86#define IXGBE_CTRL_EXT 0x00018
85#define IXGBE_ESDP 0x00020 87#define IXGBE_ESDP 0x00020
86#define IXGBE_EODSDP 0x00028 88#define IXGBE_EODSDP 0x00028
87#define IXGBE_I2CCTL 0x00028 89#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \
90 0x15F5C : 0x00028))
88#define IXGBE_LEDCTL 0x00200 91#define IXGBE_LEDCTL 0x00200
89#define IXGBE_FRTIMER 0x00048 92#define IXGBE_FRTIMER 0x00048
90#define IXGBE_TCPTIMER 0x0004C 93#define IXGBE_TCPTIMER 0x0004C
@@ -112,10 +115,14 @@
112#define IXGBE_VPDDIAG1 0x10208 115#define IXGBE_VPDDIAG1 0x10208
113 116
114/* I2CCTL Bit Masks */ 117/* I2CCTL Bit Masks */
115#define IXGBE_I2C_CLK_IN 0x00000001 118#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
116#define IXGBE_I2C_CLK_OUT 0x00000002 119 0x00004000 : 0x00000001)
117#define IXGBE_I2C_DATA_IN 0x00000004 120#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
118#define IXGBE_I2C_DATA_OUT 0x00000008 121 0x00000200 : 0x00000002)
122#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
123 0x00001000 : 0x00000004)
124#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
125 0x00000400 : 0x00000008)
119#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 126#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
120 127
121#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 128#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -292,6 +299,14 @@ struct ixgbe_thermal_sensor_data {
292#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ 299#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
293#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ 300#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
294 301
302/* Registers for setting up RSS on X550 with SRIOV
303 * _p - pool number (0..63)
304 * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
305 */
306#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4))
307#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40))
308#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40))
309
295/* Flow Director registers */ 310/* Flow Director registers */
296#define IXGBE_FDIRCTRL 0x0EE00 311#define IXGBE_FDIRCTRL 0x0EE00
297#define IXGBE_FDIRHKEY 0x0EE68 312#define IXGBE_FDIRHKEY 0x0EE68
@@ -798,6 +813,12 @@ struct ixgbe_thermal_sensor_data {
798#define IXGBE_PBACLR_82599 0x11068 813#define IXGBE_PBACLR_82599 0x11068
799#define IXGBE_CIAA_82599 0x11088 814#define IXGBE_CIAA_82599 0x11088
800#define IXGBE_CIAD_82599 0x1108C 815#define IXGBE_CIAD_82599 0x1108C
816#define IXGBE_CIAA_X550 0x11508
817#define IXGBE_CIAD_X550 0x11510
818#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
819 IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
820#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
821 IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
801#define IXGBE_PICAUSE 0x110B0 822#define IXGBE_PICAUSE 0x110B0
802#define IXGBE_PIENA 0x110B8 823#define IXGBE_PIENA 0x110B8
803#define IXGBE_CDQ_MBR_82599 0x110B4 824#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1632,6 +1653,7 @@ enum {
1632#define IXGBE_LINKS_TL_FAULT 0x00001000 1653#define IXGBE_LINKS_TL_FAULT 0x00001000
1633#define IXGBE_LINKS_SIGNAL 0x00000F00 1654#define IXGBE_LINKS_SIGNAL 0x00000F00
1634 1655
1656#define IXGBE_LINKS_SPEED_NON_STD 0x08000000
1635#define IXGBE_LINKS_SPEED_82599 0x30000000 1657#define IXGBE_LINKS_SPEED_82599 0x30000000
1636#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 1658#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
1637#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 1659#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
@@ -2000,6 +2022,7 @@ enum {
2000 2022
2001/* Queue Drop Enable */ 2023/* Queue Drop Enable */
2002#define IXGBE_QDE_ENABLE 0x00000001 2024#define IXGBE_QDE_ENABLE 0x00000001
2025#define IXGBE_QDE_HIDE_VLAN 0x00000002
2003#define IXGBE_QDE_IDX_MASK 0x00007F00 2026#define IXGBE_QDE_IDX_MASK 0x00007F00
2004#define IXGBE_QDE_IDX_SHIFT 8 2027#define IXGBE_QDE_IDX_SHIFT 8
2005#define IXGBE_QDE_WRITE 0x00010000 2028#define IXGBE_QDE_WRITE 0x00010000
@@ -2437,10 +2460,12 @@ struct ixgbe_adv_tx_context_desc {
2437typedef u32 ixgbe_autoneg_advertised; 2460typedef u32 ixgbe_autoneg_advertised;
2438/* Link speed */ 2461/* Link speed */
2439typedef u32 ixgbe_link_speed; 2462typedef u32 ixgbe_link_speed;
2440#define IXGBE_LINK_SPEED_UNKNOWN 0 2463#define IXGBE_LINK_SPEED_UNKNOWN 0
2441#define IXGBE_LINK_SPEED_100_FULL 0x0008 2464#define IXGBE_LINK_SPEED_100_FULL 0x0008
2442#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 2465#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
2443#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 2466#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
2467#define IXGBE_LINK_SPEED_5GB_FULL 0x0800
2468#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
2444#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ 2469#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
2445 IXGBE_LINK_SPEED_10GB_FULL) 2470 IXGBE_LINK_SPEED_10GB_FULL)
2446#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 2471#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
@@ -2588,6 +2613,8 @@ enum ixgbe_mac_type {
2588 ixgbe_mac_82598EB, 2613 ixgbe_mac_82598EB,
2589 ixgbe_mac_82599EB, 2614 ixgbe_mac_82599EB,
2590 ixgbe_mac_X540, 2615 ixgbe_mac_X540,
2616 ixgbe_mac_X550,
2617 ixgbe_mac_X550EM_x,
2591 ixgbe_num_macs 2618 ixgbe_num_macs
2592}; 2619};
2593 2620