diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 25 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_common.c | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_controlq.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_ethtool.c | 52 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 115 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_nvm.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_type.h | 16 |
14 files changed, 185 insertions, 126 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d8b5fff581e7..868f4a1d0f72 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h | |||
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[]; | |||
89 | #define ice_for_each_rxq(vsi, i) \ | 89 | #define ice_for_each_rxq(vsi, i) \ |
90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) | 90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) |
91 | 91 | ||
92 | /* Macros for each allocated tx/rx ring whether used or not in a VSI */ | ||
93 | #define ice_for_each_alloc_txq(vsi, i) \ | ||
94 | for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) | ||
95 | |||
96 | #define ice_for_each_alloc_rxq(vsi, i) \ | ||
97 | for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) | ||
98 | |||
92 | struct ice_tc_info { | 99 | struct ice_tc_info { |
93 | u16 qoffset; | 100 | u16 qoffset; |
94 | u16 qcount; | 101 | u16 qcount; |
@@ -189,9 +196,9 @@ struct ice_vsi { | |||
189 | struct list_head tmp_sync_list; /* MAC filters to be synced */ | 196 | struct list_head tmp_sync_list; /* MAC filters to be synced */ |
190 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ | 197 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ |
191 | 198 | ||
192 | bool irqs_ready; | 199 | u8 irqs_ready; |
193 | bool current_isup; /* Sync 'link up' logging */ | 200 | u8 current_isup; /* Sync 'link up' logging */ |
194 | bool stat_offsets_loaded; | 201 | u8 stat_offsets_loaded; |
195 | 202 | ||
196 | /* queue information */ | 203 | /* queue information */ |
197 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ | 204 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
@@ -262,7 +269,7 @@ struct ice_pf { | |||
262 | struct ice_hw_port_stats stats; | 269 | struct ice_hw_port_stats stats; |
263 | struct ice_hw_port_stats stats_prev; | 270 | struct ice_hw_port_stats stats_prev; |
264 | struct ice_hw hw; | 271 | struct ice_hw hw; |
265 | bool stat_prev_loaded; /* has previous stats been loaded */ | 272 | u8 stat_prev_loaded; /* has previous stats been loaded */ |
266 | char int_name[ICE_INT_NAME_STR_LEN]; | 273 | char int_name[ICE_INT_NAME_STR_LEN]; |
267 | }; | 274 | }; |
268 | 275 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7541ec2270b3..a0614f472658 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | |||
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props { | |||
329 | /* VLAN section */ | 329 | /* VLAN section */ |
330 | __le16 pvid; /* VLANS include priority bits */ | 330 | __le16 pvid; /* VLANS include priority bits */ |
331 | u8 pvlan_reserved[2]; | 331 | u8 pvlan_reserved[2]; |
332 | u8 port_vlan_flags; | 332 | u8 vlan_flags; |
333 | #define ICE_AQ_VSI_PVLAN_MODE_S 0 | 333 | #define ICE_AQ_VSI_VLAN_MODE_S 0 |
334 | #define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) | 334 | #define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) |
335 | #define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 | 335 | #define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 |
336 | #define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 | 336 | #define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 |
337 | #define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 | 337 | #define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 |
338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) | 338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) |
339 | #define ICE_AQ_VSI_PVLAN_EMOD_S 3 | 339 | #define ICE_AQ_VSI_VLAN_EMOD_S 3 |
340 | #define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 340 | #define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
341 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) | 341 | #define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) |
342 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) | 342 | #define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) |
343 | #define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) | 343 | #define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) |
344 | #define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 344 | #define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
345 | u8 pvlan_reserved2[3]; | 345 | u8 pvlan_reserved2[3]; |
346 | /* ingress egress up sections */ | 346 | /* ingress egress up sections */ |
347 | __le32 ingress_table; /* bitmap, 3 bits per up */ | 347 | __le32 ingress_table; /* bitmap, 3 bits per up */ |
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act { | |||
594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) | 594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) |
595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 | 595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 |
596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) | 596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) |
597 | #define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 | ||
597 | 598 | ||
598 | /* Action = 7 - Set Stat count */ | 599 | /* Action = 7 - Set Stat count */ |
599 | #define ICE_LG_ACT_STAT_COUNT 0x7 | 600 | #define ICE_LG_ACT_STAT_COUNT 0x7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 71d032cc5fa7..661beea6af79 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c | |||
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) | |||
45 | /** | 45 | /** |
46 | * ice_clear_pf_cfg - Clear PF configuration | 46 | * ice_clear_pf_cfg - Clear PF configuration |
47 | * @hw: pointer to the hardware structure | 47 | * @hw: pointer to the hardware structure |
48 | * | ||
49 | * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port | ||
50 | * configuration, flow director filters, etc.). | ||
48 | */ | 51 | */ |
49 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) | 52 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) |
50 | { | 53 | { |
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) | |||
1483 | struct ice_phy_info *phy_info; | 1486 | struct ice_phy_info *phy_info; |
1484 | enum ice_status status = 0; | 1487 | enum ice_status status = 0; |
1485 | 1488 | ||
1486 | if (!pi) | 1489 | if (!pi || !link_up) |
1487 | return ICE_ERR_PARAM; | 1490 | return ICE_ERR_PARAM; |
1488 | 1491 | ||
1489 | phy_info = &pi->phy; | 1492 | phy_info = &pi->phy; |
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, | |||
1619 | } | 1622 | } |
1620 | 1623 | ||
1621 | /* LUT size is only valid for Global and PF table types */ | 1624 | /* LUT size is only valid for Global and PF table types */ |
1622 | if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { | 1625 | switch (lut_size) { |
1623 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << | 1626 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: |
1624 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1627 | break; |
1625 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1628 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: |
1626 | } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { | ||
1627 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << | 1629 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << |
1628 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1630 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
1629 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1631 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
1630 | } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && | 1632 | break; |
1631 | (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { | 1633 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: |
1632 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << | 1634 | if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { |
1633 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1635 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << |
1634 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1636 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
1635 | } else { | 1637 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
1638 | break; | ||
1639 | } | ||
1640 | /* fall-through */ | ||
1641 | default: | ||
1636 | status = ICE_ERR_PARAM; | 1642 | status = ICE_ERR_PARAM; |
1637 | goto ice_aq_get_set_rss_lut_exit; | 1643 | goto ice_aq_get_set_rss_lut_exit; |
1638 | } | 1644 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 7c511f144ed6..62be72fdc8f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c | |||
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) | |||
597 | return 0; | 597 | return 0; |
598 | 598 | ||
599 | init_ctrlq_free_rq: | 599 | init_ctrlq_free_rq: |
600 | ice_shutdown_rq(hw, cq); | 600 | if (cq->rq.head) { |
601 | ice_shutdown_sq(hw, cq); | 601 | ice_shutdown_rq(hw, cq); |
602 | mutex_destroy(&cq->sq_lock); | 602 | mutex_destroy(&cq->rq_lock); |
603 | mutex_destroy(&cq->rq_lock); | 603 | } |
604 | if (cq->sq.head) { | ||
605 | ice_shutdown_sq(hw, cq); | ||
606 | mutex_destroy(&cq->sq_lock); | ||
607 | } | ||
604 | return status; | 608 | return status; |
605 | } | 609 | } |
606 | 610 | ||
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) | |||
706 | return; | 710 | return; |
707 | } | 711 | } |
708 | 712 | ||
709 | ice_shutdown_sq(hw, cq); | 713 | if (cq->sq.head) { |
710 | ice_shutdown_rq(hw, cq); | 714 | ice_shutdown_sq(hw, cq); |
711 | mutex_destroy(&cq->sq_lock); | 715 | mutex_destroy(&cq->sq_lock); |
712 | mutex_destroy(&cq->rq_lock); | 716 | } |
717 | if (cq->rq.head) { | ||
718 | ice_shutdown_rq(hw, cq); | ||
719 | mutex_destroy(&cq->rq_lock); | ||
720 | } | ||
713 | } | 721 | } |
714 | 722 | ||
715 | /** | 723 | /** |
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, | |||
1057 | 1065 | ||
1058 | clean_rq_elem_out: | 1066 | clean_rq_elem_out: |
1059 | /* Set pending if needed, unlock and return */ | 1067 | /* Set pending if needed, unlock and return */ |
1060 | if (pending) | 1068 | if (pending) { |
1069 | /* re-read HW head to calculate actual pending messages */ | ||
1070 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
1061 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); | 1071 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); |
1072 | } | ||
1062 | clean_rq_elem_err: | 1073 | clean_rq_elem_err: |
1063 | mutex_unlock(&cq->rq_lock); | 1074 | mutex_unlock(&cq->rq_lock); |
1064 | 1075 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1db304c01d10..c71a9b528d6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c | |||
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev) | |||
26 | { | 26 | { |
27 | struct ice_netdev_priv *np = netdev_priv(netdev); | 27 | struct ice_netdev_priv *np = netdev_priv(netdev); |
28 | 28 | ||
29 | return ((np->vsi->num_txq + np->vsi->num_rxq) * | 29 | return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * |
30 | (sizeof(struct ice_q_stats) / sizeof(u64))); | 30 | (sizeof(struct ice_q_stats) / sizeof(u64))); |
31 | } | 31 | } |
32 | 32 | ||
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
218 | p += ETH_GSTRING_LEN; | 218 | p += ETH_GSTRING_LEN; |
219 | } | 219 | } |
220 | 220 | ||
221 | ice_for_each_txq(vsi, i) { | 221 | ice_for_each_alloc_txq(vsi, i) { |
222 | snprintf(p, ETH_GSTRING_LEN, | 222 | snprintf(p, ETH_GSTRING_LEN, |
223 | "tx-queue-%u.tx_packets", i); | 223 | "tx-queue-%u.tx_packets", i); |
224 | p += ETH_GSTRING_LEN; | 224 | p += ETH_GSTRING_LEN; |
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
226 | p += ETH_GSTRING_LEN; | 226 | p += ETH_GSTRING_LEN; |
227 | } | 227 | } |
228 | 228 | ||
229 | ice_for_each_rxq(vsi, i) { | 229 | ice_for_each_alloc_rxq(vsi, i) { |
230 | snprintf(p, ETH_GSTRING_LEN, | 230 | snprintf(p, ETH_GSTRING_LEN, |
231 | "rx-queue-%u.rx_packets", i); | 231 | "rx-queue-%u.rx_packets", i); |
232 | p += ETH_GSTRING_LEN; | 232 | p += ETH_GSTRING_LEN; |
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) | |||
253 | { | 253 | { |
254 | switch (sset) { | 254 | switch (sset) { |
255 | case ETH_SS_STATS: | 255 | case ETH_SS_STATS: |
256 | /* The number (and order) of strings reported *must* remain | ||
257 | * constant for a given netdevice. This function must not | ||
258 | * report a different number based on run time parameters | ||
259 | * (such as the number of queues in use, or the setting of | ||
260 | * a private ethtool flag). This is due to the nature of the | ||
261 | * ethtool stats API. | ||
262 | * | ||
263 | * User space programs such as ethtool must make 3 separate | ||
264 | * ioctl requests, one for size, one for the strings, and | ||
265 | * finally one for the stats. Since these cross into | ||
266 | * user space, changes to the number or size could result in | ||
267 | * undefined memory access or incorrect string<->value | ||
268 | * correlations for statistics. | ||
269 | * | ||
270 | * Even if it appears to be safe, changes to the size or | ||
271 | * order of strings will suffer from race conditions and are | ||
272 | * not safe. | ||
273 | */ | ||
256 | return ICE_ALL_STATS_LEN(netdev); | 274 | return ICE_ALL_STATS_LEN(netdev); |
257 | default: | 275 | default: |
258 | return -EOPNOTSUPP; | 276 | return -EOPNOTSUPP; |
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev, | |||
280 | /* populate per queue stats */ | 298 | /* populate per queue stats */ |
281 | rcu_read_lock(); | 299 | rcu_read_lock(); |
282 | 300 | ||
283 | ice_for_each_txq(vsi, j) { | 301 | ice_for_each_alloc_txq(vsi, j) { |
284 | ring = READ_ONCE(vsi->tx_rings[j]); | 302 | ring = READ_ONCE(vsi->tx_rings[j]); |
285 | if (!ring) | 303 | if (ring) { |
286 | continue; | 304 | data[i++] = ring->stats.pkts; |
287 | data[i++] = ring->stats.pkts; | 305 | data[i++] = ring->stats.bytes; |
288 | data[i++] = ring->stats.bytes; | 306 | } else { |
307 | data[i++] = 0; | ||
308 | data[i++] = 0; | ||
309 | } | ||
289 | } | 310 | } |
290 | 311 | ||
291 | ice_for_each_rxq(vsi, j) { | 312 | ice_for_each_alloc_rxq(vsi, j) { |
292 | ring = READ_ONCE(vsi->rx_rings[j]); | 313 | ring = READ_ONCE(vsi->rx_rings[j]); |
293 | data[i++] = ring->stats.pkts; | 314 | if (ring) { |
294 | data[i++] = ring->stats.bytes; | 315 | data[i++] = ring->stats.pkts; |
316 | data[i++] = ring->stats.bytes; | ||
317 | } else { | ||
318 | data[i++] = 0; | ||
319 | data[i++] = 0; | ||
320 | } | ||
295 | } | 321 | } |
296 | 322 | ||
297 | rcu_read_unlock(); | 323 | rcu_read_unlock(); |
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) | |||
519 | goto done; | 545 | goto done; |
520 | } | 546 | } |
521 | 547 | ||
522 | for (i = 0; i < vsi->num_txq; i++) { | 548 | for (i = 0; i < vsi->alloc_txq; i++) { |
523 | /* clone ring and setup updated count */ | 549 | /* clone ring and setup updated count */ |
524 | tx_rings[i] = *vsi->tx_rings[i]; | 550 | tx_rings[i] = *vsi->tx_rings[i]; |
525 | tx_rings[i].count = new_tx_cnt; | 551 | tx_rings[i].count = new_tx_cnt; |
@@ -551,7 +577,7 @@ process_rx: | |||
551 | goto done; | 577 | goto done; |
552 | } | 578 | } |
553 | 579 | ||
554 | for (i = 0; i < vsi->num_rxq; i++) { | 580 | for (i = 0; i < vsi->alloc_rxq; i++) { |
555 | /* clone ring and setup updated count */ | 581 | /* clone ring and setup updated count */ |
556 | rx_rings[i] = *vsi->rx_rings[i]; | 582 | rx_rings[i] = *vsi->rx_rings[i]; |
557 | rx_rings[i].count = new_rx_cnt; | 583 | rx_rings[i].count = new_rx_cnt; |
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 499904874b3f..6076fc87df9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h | |||
@@ -121,10 +121,6 @@ | |||
121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 | 121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 |
122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) | 122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) |
123 | #define PFINT_OICR 0x0016CA00 | 123 | #define PFINT_OICR 0x0016CA00 |
124 | #define PFINT_OICR_HLP_RDY_S 14 | ||
125 | #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) | ||
126 | #define PFINT_OICR_CPM_RDY_S 15 | ||
127 | #define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S) | ||
128 | #define PFINT_OICR_ECC_ERR_S 16 | 124 | #define PFINT_OICR_ECC_ERR_S 16 |
129 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) | 125 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) |
130 | #define PFINT_OICR_MAL_DETECT_S 19 | 126 | #define PFINT_OICR_MAL_DETECT_S 19 |
@@ -133,10 +129,6 @@ | |||
133 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) | 129 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) |
134 | #define PFINT_OICR_PCI_EXCEPTION_S 21 | 130 | #define PFINT_OICR_PCI_EXCEPTION_S 21 |
135 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) | 131 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) |
136 | #define PFINT_OICR_GPIO_S 22 | ||
137 | #define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S) | ||
138 | #define PFINT_OICR_STORM_DETECT_S 24 | ||
139 | #define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S) | ||
140 | #define PFINT_OICR_HMC_ERR_S 26 | 132 | #define PFINT_OICR_HMC_ERR_S 26 |
141 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) | 133 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) |
142 | #define PFINT_OICR_PE_CRITERR_S 28 | 134 | #define PFINT_OICR_PE_CRITERR_S 28 |
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d23a91665b46..068dbc740b76 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | |||
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits { | |||
265 | struct ice_rlan_ctx { | 265 | struct ice_rlan_ctx { |
266 | u16 head; | 266 | u16 head; |
267 | u16 cpuid; /* bigger than needed, see above for reason */ | 267 | u16 cpuid; /* bigger than needed, see above for reason */ |
268 | #define ICE_RLAN_BASE_S 7 | ||
268 | u64 base; | 269 | u64 base; |
269 | u16 qlen; | 270 | u16 qlen; |
270 | #define ICE_RLAN_CTX_DBUF_S 7 | 271 | #define ICE_RLAN_CTX_DBUF_S 7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5299caf55a7f..f1e80eed2fd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
901 | case ice_aqc_opc_get_link_status: | 901 | case ice_aqc_opc_get_link_status: |
902 | if (ice_handle_link_event(pf)) | 902 | if (ice_handle_link_event(pf)) |
903 | dev_err(&pf->pdev->dev, | 903 | dev_err(&pf->pdev->dev, |
904 | "Could not handle link event"); | 904 | "Could not handle link event\n"); |
905 | break; | 905 | break; |
906 | default: | 906 | default: |
907 | dev_dbg(&pf->pdev->dev, | 907 | dev_dbg(&pf->pdev->dev, |
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
917 | } | 917 | } |
918 | 918 | ||
919 | /** | 919 | /** |
920 | * ice_ctrlq_pending - check if there is a difference between ntc and ntu | ||
921 | * @hw: pointer to hardware info | ||
922 | * @cq: control queue information | ||
923 | * | ||
924 | * returns true if there are pending messages in a queue, false if there aren't | ||
925 | */ | ||
926 | static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) | ||
927 | { | ||
928 | u16 ntu; | ||
929 | |||
930 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
931 | return cq->rq.next_to_clean != ntu; | ||
932 | } | ||
933 | |||
934 | /** | ||
920 | * ice_clean_adminq_subtask - clean the AdminQ rings | 935 | * ice_clean_adminq_subtask - clean the AdminQ rings |
921 | * @pf: board private structure | 936 | * @pf: board private structure |
922 | */ | 937 | */ |
923 | static void ice_clean_adminq_subtask(struct ice_pf *pf) | 938 | static void ice_clean_adminq_subtask(struct ice_pf *pf) |
924 | { | 939 | { |
925 | struct ice_hw *hw = &pf->hw; | 940 | struct ice_hw *hw = &pf->hw; |
926 | u32 val; | ||
927 | 941 | ||
928 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) | 942 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) |
929 | return; | 943 | return; |
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) | |||
933 | 947 | ||
934 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); | 948 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); |
935 | 949 | ||
936 | /* re-enable Admin queue interrupt causes */ | 950 | /* There might be a situation where new messages arrive to a control |
937 | val = rd32(hw, PFINT_FW_CTL); | 951 | * queue between processing the last message and clearing the |
938 | wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); | 952 | * EVENT_PENDING bit. So before exiting, check queue head again (using |
953 | * ice_ctrlq_pending) and process new messages if any. | ||
954 | */ | ||
955 | if (ice_ctrlq_pending(hw, &hw->adminq)) | ||
956 | __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); | ||
939 | 957 | ||
940 | ice_flush(hw); | 958 | ice_flush(hw); |
941 | } | 959 | } |
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) | |||
1295 | qcount = numq_tc; | 1313 | qcount = numq_tc; |
1296 | } | 1314 | } |
1297 | 1315 | ||
1298 | /* find higher power-of-2 of qcount */ | 1316 | /* find the (rounded up) power-of-2 of qcount */ |
1299 | pow = ilog2(qcount); | 1317 | pow = order_base_2(qcount); |
1300 | |||
1301 | if (!is_power_of_2(qcount)) | ||
1302 | pow++; | ||
1303 | 1318 | ||
1304 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { | 1319 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { |
1305 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { | 1320 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { |
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) | |||
1352 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; | 1367 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; |
1353 | /* Traffic from VSI can be sent to LAN */ | 1368 | /* Traffic from VSI can be sent to LAN */ |
1354 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; | 1369 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; |
1355 | /* Allow all packets untagged/tagged */ | 1370 | |
1356 | ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & | 1371 | /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy |
1357 | ICE_AQ_VSI_PVLAN_MODE_M) >> | 1372 | * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all |
1358 | ICE_AQ_VSI_PVLAN_MODE_S); | 1373 | * packets untagged/tagged. |
1359 | /* Show VLAN/UP from packets in Rx descriptors */ | 1374 | */ |
1360 | ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & | 1375 | ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & |
1361 | ICE_AQ_VSI_PVLAN_EMOD_M) >> | 1376 | ICE_AQ_VSI_VLAN_MODE_M) >> |
1362 | ICE_AQ_VSI_PVLAN_EMOD_S); | 1377 | ICE_AQ_VSI_VLAN_MODE_S); |
1378 | |||
1363 | /* Have 1:1 UP mapping for both ingress/egress tables */ | 1379 | /* Have 1:1 UP mapping for both ingress/egress tables */ |
1364 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); | 1380 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); |
1365 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); | 1381 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); |
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf) | |||
1688 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ | 1704 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ |
1689 | rd32(hw, PFINT_OICR); /* read to clear */ | 1705 | rd32(hw, PFINT_OICR); /* read to clear */ |
1690 | 1706 | ||
1691 | val = (PFINT_OICR_HLP_RDY_M | | 1707 | val = (PFINT_OICR_ECC_ERR_M | |
1692 | PFINT_OICR_CPM_RDY_M | | ||
1693 | PFINT_OICR_ECC_ERR_M | | ||
1694 | PFINT_OICR_MAL_DETECT_M | | 1708 | PFINT_OICR_MAL_DETECT_M | |
1695 | PFINT_OICR_GRST_M | | 1709 | PFINT_OICR_GRST_M | |
1696 | PFINT_OICR_PCI_EXCEPTION_M | | 1710 | PFINT_OICR_PCI_EXCEPTION_M | |
1697 | PFINT_OICR_GPIO_M | | 1711 | PFINT_OICR_HMC_ERR_M | |
1698 | PFINT_OICR_STORM_DETECT_M | | 1712 | PFINT_OICR_PE_CRITERR_M); |
1699 | PFINT_OICR_HMC_ERR_M); | ||
1700 | 1713 | ||
1701 | wr32(hw, PFINT_OICR_ENA, val); | 1714 | wr32(hw, PFINT_OICR_ENA, val); |
1702 | 1715 | ||
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) | |||
2058 | skip_req_irq: | 2071 | skip_req_irq: |
2059 | ice_ena_misc_vector(pf); | 2072 | ice_ena_misc_vector(pf); |
2060 | 2073 | ||
2061 | val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | | 2074 | val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
2062 | (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | | 2075 | PFINT_OICR_CTL_CAUSE_ENA_M); |
2063 | PFINT_OICR_CTL_CAUSE_ENA_M; | ||
2064 | wr32(hw, PFINT_OICR_CTL, val); | 2076 | wr32(hw, PFINT_OICR_CTL, val); |
2065 | 2077 | ||
2066 | /* This enables Admin queue Interrupt causes */ | 2078 | /* This enables Admin queue Interrupt causes */ |
2067 | val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | | 2079 | val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
2068 | (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | | 2080 | PFINT_FW_CTL_CAUSE_ENA_M); |
2069 | PFINT_FW_CTL_CAUSE_ENA_M; | ||
2070 | wr32(hw, PFINT_FW_CTL, val); | 2081 | wr32(hw, PFINT_FW_CTL, val); |
2071 | 2082 | ||
2072 | itr_gran = hw->itr_gran_200; | 2083 | itr_gran = hw->itr_gran_200; |
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) | |||
3246 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | 3257 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
3247 | ice_dis_msix(pf); | 3258 | ice_dis_msix(pf); |
3248 | 3259 | ||
3249 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); | 3260 | if (pf->irq_tracker) { |
3250 | pf->irq_tracker = NULL; | 3261 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); |
3262 | pf->irq_tracker = NULL; | ||
3263 | } | ||
3251 | } | 3264 | } |
3252 | 3265 | ||
3253 | /** | 3266 | /** |
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev, | |||
3271 | 3284 | ||
3272 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); | 3285 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); |
3273 | if (err) { | 3286 | if (err) { |
3274 | dev_err(&pdev->dev, "I/O map error %d\n", err); | 3287 | dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); |
3275 | return err; | 3288 | return err; |
3276 | } | 3289 | } |
3277 | 3290 | ||
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
3720 | enum ice_status status; | 3733 | enum ice_status status; |
3721 | 3734 | ||
3722 | /* Here we are configuring the VSI to let the driver add VLAN tags by | 3735 | /* Here we are configuring the VSI to let the driver add VLAN tags by |
3723 | * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN | 3736 | * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag |
3724 | * tag insertion happens in the Tx hot path, in ice_tx_map. | 3737 | * insertion happens in the Tx hot path, in ice_tx_map. |
3725 | */ | 3738 | */ |
3726 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; | 3739 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; |
3727 | 3740 | ||
3728 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3741 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
3729 | ctxt.vsi_num = vsi->vsi_num; | 3742 | ctxt.vsi_num = vsi->vsi_num; |
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
3735 | return -EIO; | 3748 | return -EIO; |
3736 | } | 3749 | } |
3737 | 3750 | ||
3738 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3751 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
3739 | return 0; | 3752 | return 0; |
3740 | } | 3753 | } |
3741 | 3754 | ||
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
3757 | */ | 3770 | */ |
3758 | if (ena) { | 3771 | if (ena) { |
3759 | /* Strip VLAN tag from Rx packet and put it in the desc */ | 3772 | /* Strip VLAN tag from Rx packet and put it in the desc */ |
3760 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; | 3773 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; |
3761 | } else { | 3774 | } else { |
3762 | /* Disable stripping. Leave tag in packet */ | 3775 | /* Disable stripping. Leave tag in packet */ |
3763 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; | 3776 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
3764 | } | 3777 | } |
3765 | 3778 | ||
3779 | /* Allow all packets untagged/tagged */ | ||
3780 | ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; | ||
3781 | |||
3766 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3782 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
3767 | ctxt.vsi_num = vsi->vsi_num; | 3783 | ctxt.vsi_num = vsi->vsi_num; |
3768 | 3784 | ||
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
3773 | return -EIO; | 3789 | return -EIO; |
3774 | } | 3790 | } |
3775 | 3791 | ||
3776 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3792 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
3777 | return 0; | 3793 | return 0; |
3778 | } | 3794 | } |
3779 | 3795 | ||
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) | |||
3986 | /* clear the context structure first */ | 4002 | /* clear the context structure first */ |
3987 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | 4003 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); |
3988 | 4004 | ||
3989 | rlan_ctx.base = ring->dma >> 7; | 4005 | rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; |
3990 | 4006 | ||
3991 | rlan_ctx.qlen = ring->count; | 4007 | rlan_ctx.qlen = ring->count; |
3992 | 4008 | ||
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) | |||
4098 | { | 4114 | { |
4099 | int err; | 4115 | int err; |
4100 | 4116 | ||
4101 | ice_set_rx_mode(vsi->netdev); | 4117 | if (vsi->netdev) { |
4102 | 4118 | ice_set_rx_mode(vsi->netdev); | |
4103 | err = ice_restore_vlan(vsi); | 4119 | err = ice_restore_vlan(vsi); |
4104 | if (err) | 4120 | if (err) |
4105 | return err; | 4121 | return err; |
4122 | } | ||
4106 | 4123 | ||
4107 | err = ice_vsi_cfg_txqs(vsi); | 4124 | err = ice_vsi_cfg_txqs(vsi); |
4108 | if (!err) | 4125 | if (!err) |
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi) | |||
4868 | */ | 4885 | */ |
4869 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | 4886 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
4870 | { | 4887 | { |
4871 | int i, err; | 4888 | int i, err = 0; |
4872 | 4889 | ||
4873 | if (!vsi->num_txq) { | 4890 | if (!vsi->num_txq) { |
4874 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", | 4891 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", |
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | |||
4893 | */ | 4910 | */ |
4894 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) | 4911 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
4895 | { | 4912 | { |
4896 | int i, err; | 4913 | int i, err = 0; |
4897 | 4914 | ||
4898 | if (!vsi->num_rxq) { | 4915 | if (!vsi->num_rxq) { |
4899 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", | 4916 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", |
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) | |||
5235 | u8 count = 0; | 5252 | u8 count = 0; |
5236 | 5253 | ||
5237 | if (new_mtu == netdev->mtu) { | 5254 | if (new_mtu == netdev->mtu) { |
5238 | netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); | 5255 | netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); |
5239 | return 0; | 5256 | return 0; |
5240 | } | 5257 | } |
5241 | 5258 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 92da0a626ce0..295a8cd87fc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c | |||
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) | |||
131 | * | 131 | * |
132 | * This function will request NVM ownership. | 132 | * This function will request NVM ownership. |
133 | */ | 133 | */ |
134 | static enum | 134 | static enum ice_status |
135 | ice_status ice_acquire_nvm(struct ice_hw *hw, | 135 | ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) |
136 | enum ice_aq_res_access_type access) | ||
137 | { | 136 | { |
138 | if (hw->nvm.blank_nvm_mode) | 137 | if (hw->nvm.blank_nvm_mode) |
139 | return 0; | 138 | return 0; |
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2e6c1d92cc88..eeae199469b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c | |||
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, | |||
1576 | return status; | 1576 | return status; |
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | if (owner == ICE_SCHED_NODE_OWNER_LAN) | 1579 | vsi->max_lanq[tc] = new_numqs; |
1580 | vsi->max_lanq[tc] = new_numqs; | ||
1581 | 1580 | ||
1582 | return status; | 1581 | return status; |
1583 | } | 1582 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 723d15f1e90b..6b7ec2ae5ad6 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c | |||
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, | |||
645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; |
646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); | 646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); |
647 | 647 | ||
648 | act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 648 | act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << |
649 | ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; | ||
649 | 650 | ||
650 | /* Third action Marker value */ | 651 | /* Third action Marker value */ |
651 | act |= ICE_LG_ACT_GENERIC; | 652 | act |= ICE_LG_ACT_GENERIC; |
652 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & | 653 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & |
653 | ICE_LG_ACT_GENERIC_VALUE_M; | 654 | ICE_LG_ACT_GENERIC_VALUE_M; |
654 | 655 | ||
655 | act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | ||
656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); | 656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); |
657 | 657 | ||
658 | /* call the fill switch rule to fill the lookup tx rx structure */ | 658 | /* call the fill switch rule to fill the lookup tx rx structure */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 6f4a0d159dbf..9b8ec128ee31 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h | |||
@@ -17,7 +17,7 @@ struct ice_vsi_ctx { | |||
17 | u16 vsis_unallocated; | 17 | u16 vsis_unallocated; |
18 | u16 flags; | 18 | u16 flags; |
19 | struct ice_aqc_vsi_props info; | 19 | struct ice_aqc_vsi_props info; |
20 | bool alloc_from_pool; | 20 | u8 alloc_from_pool; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | enum ice_sw_fwd_act_type { | 23 | enum ice_sw_fwd_act_type { |
@@ -94,8 +94,8 @@ struct ice_fltr_info { | |||
94 | u8 qgrp_size; | 94 | u8 qgrp_size; |
95 | 95 | ||
96 | /* Rule creations populate these indicators basing on the switch type */ | 96 | /* Rule creations populate these indicators basing on the switch type */ |
97 | bool lb_en; /* Indicate if packet can be looped back */ | 97 | u8 lb_en; /* Indicate if packet can be looped back */ |
98 | bool lan_en; /* Indicate if packet can be forwarded to the uplink */ | 98 | u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ |
99 | }; | 99 | }; |
100 | 100 | ||
101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ | 101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 567067b650c4..31bc998fe200 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h | |||
@@ -143,7 +143,7 @@ struct ice_ring { | |||
143 | u16 next_to_use; | 143 | u16 next_to_use; |
144 | u16 next_to_clean; | 144 | u16 next_to_clean; |
145 | 145 | ||
146 | bool ring_active; /* is ring online or not */ | 146 | u8 ring_active; /* is ring online or not */ |
147 | 147 | ||
148 | /* stats structs */ | 148 | /* stats structs */ |
149 | struct ice_q_stats stats; | 149 | struct ice_q_stats stats; |
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 99c8a9a71b5e..97c366e0ca59 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h | |||
@@ -83,7 +83,7 @@ struct ice_link_status { | |||
83 | u64 phy_type_low; | 83 | u64 phy_type_low; |
84 | u16 max_frame_size; | 84 | u16 max_frame_size; |
85 | u16 link_speed; | 85 | u16 link_speed; |
86 | bool lse_ena; /* Link Status Event notification */ | 86 | u8 lse_ena; /* Link Status Event notification */ |
87 | u8 link_info; | 87 | u8 link_info; |
88 | u8 an_info; | 88 | u8 an_info; |
89 | u8 ext_info; | 89 | u8 ext_info; |
@@ -101,7 +101,7 @@ struct ice_phy_info { | |||
101 | struct ice_link_status link_info_old; | 101 | struct ice_link_status link_info_old; |
102 | u64 phy_type_low; | 102 | u64 phy_type_low; |
103 | enum ice_media_type media_type; | 103 | enum ice_media_type media_type; |
104 | bool get_link_info; | 104 | u8 get_link_info; |
105 | }; | 105 | }; |
106 | 106 | ||
107 | /* Common HW capabilities for SW use */ | 107 | /* Common HW capabilities for SW use */ |
@@ -167,7 +167,7 @@ struct ice_nvm_info { | |||
167 | u32 oem_ver; /* OEM version info */ | 167 | u32 oem_ver; /* OEM version info */ |
168 | u16 sr_words; /* Shadow RAM size in words */ | 168 | u16 sr_words; /* Shadow RAM size in words */ |
169 | u16 ver; /* NVM package version */ | 169 | u16 ver; /* NVM package version */ |
170 | bool blank_nvm_mode; /* is NVM empty (no FW present) */ | 170 | u8 blank_nvm_mode; /* is NVM empty (no FW present) */ |
171 | }; | 171 | }; |
172 | 172 | ||
173 | /* Max number of port to queue branches w.r.t topology */ | 173 | /* Max number of port to queue branches w.r.t topology */ |
@@ -181,7 +181,7 @@ struct ice_sched_node { | |||
181 | struct ice_aqc_txsched_elem_data info; | 181 | struct ice_aqc_txsched_elem_data info; |
182 | u32 agg_id; /* aggregator group id */ | 182 | u32 agg_id; /* aggregator group id */ |
183 | u16 vsi_id; | 183 | u16 vsi_id; |
184 | bool in_use; /* suspended or in use */ | 184 | u8 in_use; /* suspended or in use */ |
185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ | 185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ |
186 | u8 num_children; | 186 | u8 num_children; |
187 | u8 tc_num; | 187 | u8 tc_num; |
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info { | |||
218 | struct ice_sched_tx_policy { | 218 | struct ice_sched_tx_policy { |
219 | u16 max_num_vsis; | 219 | u16 max_num_vsis; |
220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; | 220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; |
221 | bool rdma_ena; | 221 | u8 rdma_ena; |
222 | }; | 222 | }; |
223 | 223 | ||
224 | struct ice_port_info { | 224 | struct ice_port_info { |
@@ -243,7 +243,7 @@ struct ice_port_info { | |||
243 | struct list_head agg_list; /* lists all aggregator */ | 243 | struct list_head agg_list; /* lists all aggregator */ |
244 | u8 lport; | 244 | u8 lport; |
245 | #define ICE_LPORT_MASK 0xff | 245 | #define ICE_LPORT_MASK 0xff |
246 | bool is_vf; | 246 | u8 is_vf; |
247 | }; | 247 | }; |
248 | 248 | ||
249 | struct ice_switch_info { | 249 | struct ice_switch_info { |
@@ -287,7 +287,7 @@ struct ice_hw { | |||
287 | u8 max_cgds; | 287 | u8 max_cgds; |
288 | u8 sw_entry_point_layer; | 288 | u8 sw_entry_point_layer; |
289 | 289 | ||
290 | bool evb_veb; /* true for VEB, false for VEPA */ | 290 | u8 evb_veb; /* true for VEB, false for VEPA */ |
291 | struct ice_bus_info bus; | 291 | struct ice_bus_info bus; |
292 | struct ice_nvm_info nvm; | 292 | struct ice_nvm_info nvm; |
293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ | 293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ |
@@ -318,7 +318,7 @@ struct ice_hw { | |||
318 | u8 itr_gran_100; | 318 | u8 itr_gran_100; |
319 | u8 itr_gran_50; | 319 | u8 itr_gran_50; |
320 | u8 itr_gran_25; | 320 | u8 itr_gran_25; |
321 | bool ucast_shared; /* true if VSIs can share unicast addr */ | 321 | u8 ucast_shared; /* true if VSIs can share unicast addr */ |
322 | 322 | ||
323 | }; | 323 | }; |
324 | 324 | ||