diff options
Diffstat (limited to 'drivers/net')
59 files changed, 434 insertions, 203 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ffa37adb7681..333387f1f1fe 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event, | |||
3112 | case NETDEV_CHANGE: | 3112 | case NETDEV_CHANGE: |
3113 | /* For 802.3ad mode only: | 3113 | /* For 802.3ad mode only: |
3114 | * Getting invalid Speed/Duplex values here will put slave | 3114 | * Getting invalid Speed/Duplex values here will put slave |
3115 | * in weird state. So mark it as link-down for the time | 3115 | * in weird state. So mark it as link-fail for the time |
3116 | * being and let link-monitoring (miimon) set it right when | 3116 | * being and let link-monitoring (miimon) set it right when |
3117 | * correct speeds/duplex are available. | 3117 | * correct speeds/duplex are available. |
3118 | */ | 3118 | */ |
3119 | if (bond_update_speed_duplex(slave) && | 3119 | if (bond_update_speed_duplex(slave) && |
3120 | BOND_MODE(bond) == BOND_MODE_8023AD) | 3120 | BOND_MODE(bond) == BOND_MODE_8023AD) |
3121 | slave->link = BOND_LINK_DOWN; | 3121 | slave->link = BOND_LINK_FAIL; |
3122 | 3122 | ||
3123 | if (BOND_MODE(bond) == BOND_MODE_8023AD) | 3123 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
3124 | bond_3ad_adapter_speed_duplex_changed(slave); | 3124 | bond_3ad_adapter_speed_duplex_changed(slave); |
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 54e0ca6ed730..86b6464b4525 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c | |||
@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev) | |||
1117 | { | 1117 | { |
1118 | int i; | 1118 | int i; |
1119 | 1119 | ||
1120 | mutex_init(&dev->reg_mutex); | ||
1121 | mutex_init(&dev->stats_mutex); | ||
1122 | mutex_init(&dev->alu_mutex); | ||
1123 | mutex_init(&dev->vlan_mutex); | ||
1124 | |||
1125 | dev->ds->ops = &ksz_switch_ops; | 1120 | dev->ds->ops = &ksz_switch_ops; |
1126 | 1121 | ||
1127 | for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { | 1122 | for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { |
@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev) | |||
1206 | if (dev->pdata) | 1201 | if (dev->pdata) |
1207 | dev->chip_id = dev->pdata->chip_id; | 1202 | dev->chip_id = dev->pdata->chip_id; |
1208 | 1203 | ||
1204 | mutex_init(&dev->reg_mutex); | ||
1205 | mutex_init(&dev->stats_mutex); | ||
1206 | mutex_init(&dev->alu_mutex); | ||
1207 | mutex_init(&dev->vlan_mutex); | ||
1208 | |||
1209 | if (ksz_switch_detect(dev)) | 1209 | if (ksz_switch_detect(dev)) |
1210 | return -EINVAL; | 1210 | return -EINVAL; |
1211 | 1211 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index d721ccf7d8be..38e399e0f30e 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c | |||
@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip) | |||
567 | if (err) | 567 | if (err) |
568 | return err; | 568 | return err; |
569 | 569 | ||
570 | /* Keep the histogram mode bits */ | ||
571 | val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX; | ||
570 | val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; | 572 | val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; |
571 | 573 | ||
572 | err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val); | 574 | err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 6a633c70f603..99ef1daaa4d8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | |||
@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev, | |||
407 | struct ethtool_pauseparam *pause) | 407 | struct ethtool_pauseparam *pause) |
408 | { | 408 | { |
409 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 409 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
410 | u32 fc = aq_nic->aq_nic_cfg.flow_control; | ||
410 | 411 | ||
411 | pause->autoneg = 0; | 412 | pause->autoneg = 0; |
412 | 413 | ||
413 | if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) | 414 | pause->rx_pause = !!(fc & AQ_NIC_FC_RX); |
414 | pause->rx_pause = 1; | 415 | pause->tx_pause = !!(fc & AQ_NIC_FC_TX); |
415 | if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) | 416 | |
416 | pause->tx_pause = 1; | ||
417 | } | 417 | } |
418 | 418 | ||
419 | static int aq_ethtool_set_pauseparam(struct net_device *ndev, | 419 | static int aq_ethtool_set_pauseparam(struct net_device *ndev, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index e8689241204e..a1e70da358ca 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h | |||
@@ -204,6 +204,10 @@ struct aq_hw_ops { | |||
204 | 204 | ||
205 | int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); | 205 | int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); |
206 | 206 | ||
207 | int (*hw_set_offload)(struct aq_hw_s *self, | ||
208 | struct aq_nic_cfg_s *aq_nic_cfg); | ||
209 | |||
210 | int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc); | ||
207 | }; | 211 | }; |
208 | 212 | ||
209 | struct aq_fw_ops { | 213 | struct aq_fw_ops { |
@@ -226,6 +230,8 @@ struct aq_fw_ops { | |||
226 | 230 | ||
227 | int (*update_stats)(struct aq_hw_s *self); | 231 | int (*update_stats)(struct aq_hw_s *self); |
228 | 232 | ||
233 | u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode); | ||
234 | |||
229 | int (*set_flow_control)(struct aq_hw_s *self); | 235 | int (*set_flow_control)(struct aq_hw_s *self); |
230 | 236 | ||
231 | int (*set_power)(struct aq_hw_s *self, unsigned int power_state, | 237 | int (*set_power)(struct aq_hw_s *self, unsigned int power_state, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e3ae29e523f0..7c07eef275eb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c | |||
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev, | |||
99 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 99 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
100 | struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); | 100 | struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); |
101 | bool is_lro = false; | 101 | bool is_lro = false; |
102 | int err = 0; | ||
103 | |||
104 | aq_cfg->features = features; | ||
102 | 105 | ||
103 | if (aq_cfg->hw_features & NETIF_F_LRO) { | 106 | if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) { |
104 | is_lro = features & NETIF_F_LRO; | 107 | is_lro = features & NETIF_F_LRO; |
105 | 108 | ||
106 | if (aq_cfg->is_lro != is_lro) { | 109 | if (aq_cfg->is_lro != is_lro) { |
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev, | |||
112 | } | 115 | } |
113 | } | 116 | } |
114 | } | 117 | } |
118 | if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) | ||
119 | err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw, | ||
120 | aq_cfg); | ||
115 | 121 | ||
116 | return 0; | 122 | return err; |
117 | } | 123 | } |
118 | 124 | ||
119 | static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) | 125 | static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 5fed24446687..7abdc0952425 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; | 120 | cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; |
121 | cfg->hw_features = cfg->aq_hw_caps->hw_features; | 121 | cfg->features = cfg->aq_hw_caps->hw_features; |
122 | } | 122 | } |
123 | 123 | ||
124 | static int aq_nic_update_link_status(struct aq_nic_s *self) | 124 | static int aq_nic_update_link_status(struct aq_nic_s *self) |
125 | { | 125 | { |
126 | int err = self->aq_fw_ops->update_link_status(self->aq_hw); | 126 | int err = self->aq_fw_ops->update_link_status(self->aq_hw); |
127 | u32 fc = 0; | ||
127 | 128 | ||
128 | if (err) | 129 | if (err) |
129 | return err; | 130 | return err; |
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) | |||
133 | AQ_CFG_DRV_NAME, self->link_status.mbps, | 134 | AQ_CFG_DRV_NAME, self->link_status.mbps, |
134 | self->aq_hw->aq_link_status.mbps); | 135 | self->aq_hw->aq_link_status.mbps); |
135 | aq_nic_update_interrupt_moderation_settings(self); | 136 | aq_nic_update_interrupt_moderation_settings(self); |
137 | |||
138 | /* Driver has to update flow control settings on RX block | ||
139 | * on any link event. | ||
140 | * We should query FW whether it negotiated FC. | ||
141 | */ | ||
142 | if (self->aq_fw_ops->get_flow_control) | ||
143 | self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); | ||
144 | if (self->aq_hw_ops->hw_set_fc) | ||
145 | self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0); | ||
136 | } | 146 | } |
137 | 147 | ||
138 | self->link_status = self->aq_hw->aq_link_status; | 148 | self->link_status = self->aq_hw->aq_link_status; |
@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) | |||
590 | } | 600 | } |
591 | } | 601 | } |
592 | 602 | ||
593 | if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { | 603 | if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { |
594 | packet_filter |= IFF_MULTICAST; | 604 | packet_filter |= IFF_MULTICAST; |
595 | self->mc_list.count = i; | 605 | self->mc_list.count = i; |
596 | self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, | 606 | self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, |
@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, | |||
772 | ethtool_link_ksettings_add_link_mode(cmd, advertising, | 782 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
773 | Pause); | 783 | Pause); |
774 | 784 | ||
775 | if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) | 785 | /* Asym is when either RX or TX, but not both */ |
786 | if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^ | ||
787 | !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)) | ||
776 | ethtool_link_ksettings_add_link_mode(cmd, advertising, | 788 | ethtool_link_ksettings_add_link_mode(cmd, advertising, |
777 | Asym_Pause); | 789 | Asym_Pause); |
778 | 790 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index c1582f4e8e1b..44ec47a3d60a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
@@ -23,7 +23,7 @@ struct aq_vec_s; | |||
23 | 23 | ||
24 | struct aq_nic_cfg_s { | 24 | struct aq_nic_cfg_s { |
25 | const struct aq_hw_caps_s *aq_hw_caps; | 25 | const struct aq_hw_caps_s *aq_hw_caps; |
26 | u64 hw_features; | 26 | u64 features; |
27 | u32 rxds; /* rx ring size, descriptors # */ | 27 | u32 rxds; /* rx ring size, descriptors # */ |
28 | u32 txds; /* tx ring size, descriptors # */ | 28 | u32 txds; /* tx ring size, descriptors # */ |
29 | u32 vecs; /* vecs==allocated irqs */ | 29 | u32 vecs; /* vecs==allocated irqs */ |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 3db91446cc67..74550ccc7a20 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) | |||
172 | return !!budget; | 172 | return !!budget; |
173 | } | 173 | } |
174 | 174 | ||
175 | static void aq_rx_checksum(struct aq_ring_s *self, | ||
176 | struct aq_ring_buff_s *buff, | ||
177 | struct sk_buff *skb) | ||
178 | { | ||
179 | if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) | ||
180 | return; | ||
181 | |||
182 | if (unlikely(buff->is_cso_err)) { | ||
183 | ++self->stats.rx.errors; | ||
184 | skb->ip_summed = CHECKSUM_NONE; | ||
185 | return; | ||
186 | } | ||
187 | if (buff->is_ip_cso) { | ||
188 | __skb_incr_checksum_unnecessary(skb); | ||
189 | if (buff->is_udp_cso || buff->is_tcp_cso) | ||
190 | __skb_incr_checksum_unnecessary(skb); | ||
191 | } else { | ||
192 | skb->ip_summed = CHECKSUM_NONE; | ||
193 | } | ||
194 | } | ||
195 | |||
175 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) | 196 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) |
176 | int aq_ring_rx_clean(struct aq_ring_s *self, | 197 | int aq_ring_rx_clean(struct aq_ring_s *self, |
177 | struct napi_struct *napi, | 198 | struct napi_struct *napi, |
@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, | |||
267 | } | 288 | } |
268 | 289 | ||
269 | skb->protocol = eth_type_trans(skb, ndev); | 290 | skb->protocol = eth_type_trans(skb, ndev); |
270 | if (unlikely(buff->is_cso_err)) { | 291 | |
271 | ++self->stats.rx.errors; | 292 | aq_rx_checksum(self, buff, skb); |
272 | skb->ip_summed = CHECKSUM_NONE; | ||
273 | } else { | ||
274 | if (buff->is_ip_cso) { | ||
275 | __skb_incr_checksum_unnecessary(skb); | ||
276 | if (buff->is_udp_cso || buff->is_tcp_cso) | ||
277 | __skb_incr_checksum_unnecessary(skb); | ||
278 | } else { | ||
279 | skb->ip_summed = CHECKSUM_NONE; | ||
280 | } | ||
281 | } | ||
282 | 293 | ||
283 | skb_set_hash(skb, buff->rss_hash, | 294 | skb_set_hash(skb, buff->rss_hash, |
284 | buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : | 295 | buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 76d25d594a0f..f02592f43fe3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self) | |||
100 | return err; | 100 | return err; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) | ||
104 | { | ||
105 | hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
103 | static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) | 109 | static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) |
104 | { | 110 | { |
105 | u32 tc = 0U; | 111 | u32 tc = 0U; |
106 | u32 buff_size = 0U; | 112 | u32 buff_size = 0U; |
107 | unsigned int i_priority = 0U; | 113 | unsigned int i_priority = 0U; |
108 | bool is_rx_flow_control = false; | ||
109 | 114 | ||
110 | /* TPS Descriptor rate init */ | 115 | /* TPS Descriptor rate init */ |
111 | hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); | 116 | hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); |
@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) | |||
138 | 143 | ||
139 | /* QoS Rx buf size per TC */ | 144 | /* QoS Rx buf size per TC */ |
140 | tc = 0; | 145 | tc = 0; |
141 | is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); | ||
142 | buff_size = HW_ATL_B0_RXBUF_MAX; | 146 | buff_size = HW_ATL_B0_RXBUF_MAX; |
143 | 147 | ||
144 | hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); | 148 | hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); |
@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) | |||
150 | (buff_size * | 154 | (buff_size * |
151 | (1024U / 32U) * 50U) / | 155 | (1024U / 32U) * 50U) / |
152 | 100U, tc); | 156 | 100U, tc); |
153 | hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); | 157 | |
158 | hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc); | ||
154 | 159 | ||
155 | /* QoS 802.1p priority -> TC mapping */ | 160 | /* QoS 802.1p priority -> TC mapping */ |
156 | for (i_priority = 8U; i_priority--;) | 161 | for (i_priority = 8U; i_priority--;) |
@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, | |||
229 | hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); | 234 | hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); |
230 | 235 | ||
231 | /* RX checksums offloads*/ | 236 | /* RX checksums offloads*/ |
232 | hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); | 237 | hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features & |
233 | hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); | 238 | NETIF_F_RXCSUM)); |
239 | hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features & | ||
240 | NETIF_F_RXCSUM)); | ||
234 | 241 | ||
235 | /* LSO offloads*/ | 242 | /* LSO offloads*/ |
236 | hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); | 243 | hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); |
@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
655 | struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) | 662 | struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) |
656 | &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; | 663 | &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; |
657 | 664 | ||
658 | unsigned int is_err = 1U; | ||
659 | unsigned int is_rx_check_sum_enabled = 0U; | 665 | unsigned int is_rx_check_sum_enabled = 0U; |
660 | unsigned int pkt_type = 0U; | 666 | unsigned int pkt_type = 0U; |
667 | u8 rx_stat = 0U; | ||
661 | 668 | ||
662 | if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ | 669 | if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ |
663 | break; | 670 | break; |
@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
665 | 672 | ||
666 | buff = &ring->buff_ring[ring->hw_head]; | 673 | buff = &ring->buff_ring[ring->hw_head]; |
667 | 674 | ||
668 | is_err = (0x0000003CU & rxd_wb->status); | 675 | rx_stat = (0x0000003CU & rxd_wb->status) >> 2; |
669 | 676 | ||
670 | is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); | 677 | is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); |
671 | is_err &= ~0x20U; /* exclude validity bit */ | ||
672 | 678 | ||
673 | pkt_type = 0xFFU & (rxd_wb->type >> 4); | 679 | pkt_type = 0xFFU & (rxd_wb->type >> 4); |
674 | 680 | ||
675 | if (is_rx_check_sum_enabled) { | 681 | if (is_rx_check_sum_enabled & BIT(0) && |
676 | if (0x0U == (pkt_type & 0x3U)) | 682 | (0x0U == (pkt_type & 0x3U))) |
677 | buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; | 683 | buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U; |
678 | 684 | ||
685 | if (is_rx_check_sum_enabled & BIT(1)) { | ||
679 | if (0x4U == (pkt_type & 0x1CU)) | 686 | if (0x4U == (pkt_type & 0x1CU)) |
680 | buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; | 687 | buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U : |
688 | !!(rx_stat & BIT(3)); | ||
681 | else if (0x0U == (pkt_type & 0x1CU)) | 689 | else if (0x0U == (pkt_type & 0x1CU)) |
682 | buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; | 690 | buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U : |
683 | 691 | !!(rx_stat & BIT(3)); | |
684 | /* Checksum offload workaround for small packets */ | 692 | } |
685 | if (rxd_wb->pkt_len <= 60) { | 693 | buff->is_cso_err = !!(rx_stat & 0x6); |
686 | buff->is_ip_cso = 0U; | 694 | /* Checksum offload workaround for small packets */ |
687 | buff->is_cso_err = 0U; | 695 | if (unlikely(rxd_wb->pkt_len <= 60)) { |
688 | } | 696 | buff->is_ip_cso = 0U; |
697 | buff->is_cso_err = 0U; | ||
689 | } | 698 | } |
690 | |||
691 | is_err &= ~0x18U; | ||
692 | 699 | ||
693 | dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); | 700 | dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); |
694 | 701 | ||
695 | if (is_err || rxd_wb->type & 0x1000U) { | 702 | if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { |
696 | /* status error or DMA error */ | 703 | /* MAC error or DMA error */ |
697 | buff->is_error = 1U; | 704 | buff->is_error = 1U; |
698 | } else { | 705 | } else { |
699 | if (self->aq_nic_cfg->is_rss) { | 706 | if (self->aq_nic_cfg->is_rss) { |
@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) | |||
915 | static int hw_atl_b0_hw_stop(struct aq_hw_s *self) | 922 | static int hw_atl_b0_hw_stop(struct aq_hw_s *self) |
916 | { | 923 | { |
917 | hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); | 924 | hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); |
925 | |||
926 | /* Invalidate Descriptor Cache to prevent writing to the cached | ||
927 | * descriptors and to the data pointer of those descriptors | ||
928 | */ | ||
929 | hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1); | ||
930 | |||
918 | return aq_hw_err_from_flags(self); | 931 | return aq_hw_err_from_flags(self); |
919 | } | 932 | } |
920 | 933 | ||
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = { | |||
963 | .hw_get_regs = hw_atl_utils_hw_get_regs, | 976 | .hw_get_regs = hw_atl_utils_hw_get_regs, |
964 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, | 977 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, |
965 | .hw_get_fw_version = hw_atl_utils_get_fw_version, | 978 | .hw_get_fw_version = hw_atl_utils_get_fw_version, |
979 | .hw_set_offload = hw_atl_b0_hw_offload_set, | ||
980 | .hw_set_fc = hw_atl_b0_set_fc, | ||
966 | }; | 981 | }; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index be0a3a90dfad..5502ec5f0f69 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c | |||
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode | |||
619 | HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); | 619 | HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); |
620 | } | 620 | } |
621 | 621 | ||
622 | void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init) | ||
623 | { | ||
624 | aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR, | ||
625 | HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK, | ||
626 | HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT, | ||
627 | init); | ||
628 | } | ||
629 | |||
622 | void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, | 630 | void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, |
623 | u32 rx_pkt_buff_size_per_tc, u32 buffer) | 631 | u32 rx_pkt_buff_size_per_tc, u32 buffer) |
624 | { | 632 | { |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 7056c7342afc..41f239928c15 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h | |||
@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, | |||
325 | u32 rx_pkt_buff_size_per_tc, | 325 | u32 rx_pkt_buff_size_per_tc, |
326 | u32 buffer); | 326 | u32 buffer); |
327 | 327 | ||
328 | /* set rdm rx dma descriptor cache init */ | ||
329 | void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init); | ||
330 | |||
328 | /* set rx xoff enable (per tc) */ | 331 | /* set rx xoff enable (per tc) */ |
329 | void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, | 332 | void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, |
330 | u32 buffer); | 333 | u32 buffer); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 716674a9b729..a715fa317b1c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h | |||
@@ -293,6 +293,24 @@ | |||
293 | /* default value of bitfield desc{d}_reset */ | 293 | /* default value of bitfield desc{d}_reset */ |
294 | #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 | 294 | #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 |
295 | 295 | ||
296 | /* rdm_desc_init_i bitfield definitions | ||
297 | * preprocessor definitions for the bitfield rdm_desc_init_i. | ||
298 | * port="pif_rdm_desc_init_i" | ||
299 | */ | ||
300 | |||
301 | /* register address for bitfield rdm_desc_init_i */ | ||
302 | #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00 | ||
303 | /* bitmask for bitfield rdm_desc_init_i */ | ||
304 | #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff | ||
305 | /* inverted bitmask for bitfield rdm_desc_init_i */ | ||
306 | #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000 | ||
307 | /* lower bit position of bitfield rdm_desc_init_i */ | ||
308 | #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0 | ||
309 | /* width of bitfield rdm_desc_init_i */ | ||
310 | #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32 | ||
311 | /* default value of bitfield rdm_desc_init_i */ | ||
312 | #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0 | ||
313 | |||
296 | /* rx int_desc_wrb_en bitfield definitions | 314 | /* rx int_desc_wrb_en bitfield definitions |
297 | * preprocessor definitions for the bitfield "int_desc_wrb_en". | 315 | * preprocessor definitions for the bitfield "int_desc_wrb_en". |
298 | * port="pif_rdm_int_desc_wrb_en_i" | 316 | * port="pif_rdm_int_desc_wrb_en_i" |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 096ca5730887..7de3220d9cab 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 | 30 | #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 |
31 | #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 | 31 | #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 |
32 | 32 | ||
33 | #define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE) | ||
34 | #define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE) | ||
33 | #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) | 35 | #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) |
34 | #define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) | 36 | #define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) |
35 | 37 | ||
@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self) | |||
451 | return 0; | 453 | return 0; |
452 | } | 454 | } |
453 | 455 | ||
456 | static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode) | ||
457 | { | ||
458 | u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); | ||
459 | |||
460 | if (mpi_state & HW_ATL_FW2X_CAP_PAUSE) | ||
461 | if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) | ||
462 | *fcmode = AQ_NIC_FC_RX; | ||
463 | else | ||
464 | *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX; | ||
465 | else | ||
466 | if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) | ||
467 | *fcmode = AQ_NIC_FC_TX; | ||
468 | else | ||
469 | *fcmode = 0; | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
454 | const struct aq_fw_ops aq_fw_2x_ops = { | 474 | const struct aq_fw_ops aq_fw_2x_ops = { |
455 | .init = aq_fw2x_init, | 475 | .init = aq_fw2x_init, |
456 | .deinit = aq_fw2x_deinit, | 476 | .deinit = aq_fw2x_deinit, |
@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = { | |||
465 | .set_eee_rate = aq_fw2x_set_eee_rate, | 485 | .set_eee_rate = aq_fw2x_set_eee_rate, |
466 | .get_eee_rate = aq_fw2x_get_eee_rate, | 486 | .get_eee_rate = aq_fw2x_get_eee_rate, |
467 | .set_flow_control = aq_fw2x_set_flow_control, | 487 | .set_flow_control = aq_fw2x_set_flow_control, |
488 | .get_flow_control = aq_fw2x_get_flow_control | ||
468 | }; | 489 | }; |
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 78c5de467426..9d0e74f6b089 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h | |||
@@ -140,6 +140,5 @@ struct alx_priv { | |||
140 | }; | 140 | }; |
141 | 141 | ||
142 | extern const struct ethtool_ops alx_ethtool_ops; | 142 | extern const struct ethtool_ops alx_ethtool_ops; |
143 | extern const char alx_drv_name[]; | ||
144 | 143 | ||
145 | #endif | 144 | #endif |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 7968c644ad86..c131cfc1b79d 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include "hw.h" | 49 | #include "hw.h" |
50 | #include "reg.h" | 50 | #include "reg.h" |
51 | 51 | ||
52 | const char alx_drv_name[] = "alx"; | 52 | static const char alx_drv_name[] = "alx"; |
53 | 53 | ||
54 | static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) | 54 | static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) |
55 | { | 55 | { |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 4122553e224b..0e2d99c737e3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev) | |||
1902 | intrl2_1_mask_clear(priv, 0xffffffff); | 1902 | intrl2_1_mask_clear(priv, 0xffffffff); |
1903 | else | 1903 | else |
1904 | intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); | 1904 | intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); |
1905 | |||
1906 | /* Last call before we start the real business */ | ||
1907 | netif_tx_start_all_queues(dev); | ||
1908 | } | 1905 | } |
1909 | 1906 | ||
1910 | static void rbuf_init(struct bcm_sysport_priv *priv) | 1907 | static void rbuf_init(struct bcm_sysport_priv *priv) |
@@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev) | |||
2048 | 2045 | ||
2049 | bcm_sysport_netif_start(dev); | 2046 | bcm_sysport_netif_start(dev); |
2050 | 2047 | ||
2048 | netif_tx_start_all_queues(dev); | ||
2049 | |||
2051 | return 0; | 2050 | return 0; |
2052 | 2051 | ||
2053 | out_clear_rx_int: | 2052 | out_clear_rx_int: |
@@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) | |||
2071 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 2070 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
2072 | 2071 | ||
2073 | /* stop all software from updating hardware */ | 2072 | /* stop all software from updating hardware */ |
2074 | netif_tx_stop_all_queues(dev); | 2073 | netif_tx_disable(dev); |
2075 | napi_disable(&priv->napi); | 2074 | napi_disable(&priv->napi); |
2076 | cancel_work_sync(&priv->dim.dim.work); | 2075 | cancel_work_sync(&priv->dim.dim.work); |
2077 | phy_stop(dev->phydev); | 2076 | phy_stop(dev->phydev); |
@@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d) | |||
2658 | if (!netif_running(dev)) | 2657 | if (!netif_running(dev)) |
2659 | return 0; | 2658 | return 0; |
2660 | 2659 | ||
2660 | netif_device_detach(dev); | ||
2661 | |||
2661 | bcm_sysport_netif_stop(dev); | 2662 | bcm_sysport_netif_stop(dev); |
2662 | 2663 | ||
2663 | phy_suspend(dev->phydev); | 2664 | phy_suspend(dev->phydev); |
2664 | 2665 | ||
2665 | netif_device_detach(dev); | ||
2666 | |||
2667 | /* Disable UniMAC RX */ | 2666 | /* Disable UniMAC RX */ |
2668 | umac_enable_set(priv, CMD_RX_EN, 0); | 2667 | umac_enable_set(priv, CMD_RX_EN, 0); |
2669 | 2668 | ||
@@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) | |||
2746 | goto out_free_rx_ring; | 2745 | goto out_free_rx_ring; |
2747 | } | 2746 | } |
2748 | 2747 | ||
2749 | netif_device_attach(dev); | ||
2750 | |||
2751 | /* RX pipe enable */ | 2748 | /* RX pipe enable */ |
2752 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); | 2749 | topctrl_writel(priv, 0, RX_FLUSH_CNTL); |
2753 | 2750 | ||
@@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) | |||
2788 | 2785 | ||
2789 | bcm_sysport_netif_start(dev); | 2786 | bcm_sysport_netif_start(dev); |
2790 | 2787 | ||
2788 | netif_device_attach(dev); | ||
2789 | |||
2791 | return 0; | 2790 | return 0; |
2792 | 2791 | ||
2793 | out_free_rx_ring: | 2792 | out_free_rx_ring: |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 20c1681bb1af..2d6f090bf644 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev) | |||
2855 | 2855 | ||
2856 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); | 2856 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); |
2857 | 2857 | ||
2858 | netif_tx_start_all_queues(dev); | ||
2859 | bcmgenet_enable_tx_napi(priv); | 2858 | bcmgenet_enable_tx_napi(priv); |
2860 | 2859 | ||
2861 | /* Monitor link interrupts now */ | 2860 | /* Monitor link interrupts now */ |
@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev) | |||
2937 | 2936 | ||
2938 | bcmgenet_netif_start(dev); | 2937 | bcmgenet_netif_start(dev); |
2939 | 2938 | ||
2939 | netif_tx_start_all_queues(dev); | ||
2940 | |||
2940 | return 0; | 2941 | return 0; |
2941 | 2942 | ||
2942 | err_irq1: | 2943 | err_irq1: |
@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev) | |||
2958 | struct bcmgenet_priv *priv = netdev_priv(dev); | 2959 | struct bcmgenet_priv *priv = netdev_priv(dev); |
2959 | 2960 | ||
2960 | bcmgenet_disable_tx_napi(priv); | 2961 | bcmgenet_disable_tx_napi(priv); |
2961 | netif_tx_stop_all_queues(dev); | 2962 | netif_tx_disable(dev); |
2962 | 2963 | ||
2963 | /* Disable MAC receive */ | 2964 | /* Disable MAC receive */ |
2964 | umac_enable_set(priv, CMD_RX_EN, false); | 2965 | umac_enable_set(priv, CMD_RX_EN, false); |
@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d) | |||
3620 | if (!netif_running(dev)) | 3621 | if (!netif_running(dev)) |
3621 | return 0; | 3622 | return 0; |
3622 | 3623 | ||
3624 | netif_device_detach(dev); | ||
3625 | |||
3623 | bcmgenet_netif_stop(dev); | 3626 | bcmgenet_netif_stop(dev); |
3624 | 3627 | ||
3625 | if (!device_may_wakeup(d)) | 3628 | if (!device_may_wakeup(d)) |
3626 | phy_suspend(dev->phydev); | 3629 | phy_suspend(dev->phydev); |
3627 | 3630 | ||
3628 | netif_device_detach(dev); | ||
3629 | |||
3630 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ | 3631 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ |
3631 | if (device_may_wakeup(d) && priv->wolopts) { | 3632 | if (device_may_wakeup(d) && priv->wolopts) { |
3632 | ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); | 3633 | ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); |
@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d) | |||
3700 | /* Always enable ring 16 - descriptor ring */ | 3701 | /* Always enable ring 16 - descriptor ring */ |
3701 | bcmgenet_enable_dma(priv, dma_ctrl); | 3702 | bcmgenet_enable_dma(priv, dma_ctrl); |
3702 | 3703 | ||
3703 | netif_device_attach(dev); | ||
3704 | |||
3705 | if (!device_may_wakeup(d)) | 3704 | if (!device_may_wakeup(d)) |
3706 | phy_resume(dev->phydev); | 3705 | phy_resume(dev->phydev); |
3707 | 3706 | ||
@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d) | |||
3710 | 3709 | ||
3711 | bcmgenet_netif_start(dev); | 3710 | bcmgenet_netif_start(dev); |
3712 | 3711 | ||
3712 | netif_device_attach(dev); | ||
3713 | |||
3713 | return 0; | 3714 | return 0; |
3714 | 3715 | ||
3715 | out_clk_disable: | 3716 | out_clk_disable: |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3f96aa30068e..20fcf0d1c2ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |||
@@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) | |||
3760 | /* Hardware table is only clear when pf resets */ | 3760 | /* Hardware table is only clear when pf resets */ |
3761 | if (!(handle->flags & HNAE3_SUPPORT_VF)) { | 3761 | if (!(handle->flags & HNAE3_SUPPORT_VF)) { |
3762 | ret = hns3_restore_vlan(netdev); | 3762 | ret = hns3_restore_vlan(netdev); |
3763 | return ret; | 3763 | if (ret) |
3764 | return ret; | ||
3764 | } | 3765 | } |
3765 | 3766 | ||
3766 | ret = hns3_restore_fd_rules(netdev); | 3767 | ret = hns3_restore_fd_rules(netdev); |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index aa5cb9834d73..494e562fe8c7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | |||
@@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) | |||
1168 | */ | 1168 | */ |
1169 | static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) | 1169 | static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) |
1170 | { | 1170 | { |
1171 | struct hclge_vport *vport = hdev->vport; | 1171 | int i; |
1172 | u32 i, k, qs_bitmap; | ||
1173 | int ret; | ||
1174 | 1172 | ||
1175 | for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { | 1173 | for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { |
1176 | qs_bitmap = 0; | 1174 | u32 qs_bitmap = 0; |
1175 | int k, ret; | ||
1177 | 1176 | ||
1178 | for (k = 0; k < hdev->num_alloc_vport; k++) { | 1177 | for (k = 0; k < hdev->num_alloc_vport; k++) { |
1178 | struct hclge_vport *vport = &hdev->vport[k]; | ||
1179 | u16 qs_id = vport->qs_offset + tc; | 1179 | u16 qs_id = vport->qs_offset + tc; |
1180 | u8 grp, sub_grp; | 1180 | u8 grp, sub_grp; |
1181 | 1181 | ||
@@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) | |||
1185 | HCLGE_BP_SUB_GRP_ID_S); | 1185 | HCLGE_BP_SUB_GRP_ID_S); |
1186 | if (i == grp) | 1186 | if (i == grp) |
1187 | qs_bitmap |= (1 << sub_grp); | 1187 | qs_bitmap |= (1 << sub_grp); |
1188 | |||
1189 | vport++; | ||
1190 | } | 1188 | } |
1191 | 1189 | ||
1192 | ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); | 1190 | ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7893beffcc71..c9d5d0a7fbf1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -1545,7 +1545,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
1545 | tx_crq.v1.sge_len = cpu_to_be32(skb->len); | 1545 | tx_crq.v1.sge_len = cpu_to_be32(skb->len); |
1546 | tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); | 1546 | tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); |
1547 | 1547 | ||
1548 | if (adapter->vlan_header_insertion) { | 1548 | if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { |
1549 | tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; | 1549 | tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; |
1550 | tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); | 1550 | tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); |
1551 | } | 1551 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bc71a21c1dc2..21c2688d6308 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) | |||
12249 | NETIF_F_GSO_GRE | | 12249 | NETIF_F_GSO_GRE | |
12250 | NETIF_F_GSO_GRE_CSUM | | 12250 | NETIF_F_GSO_GRE_CSUM | |
12251 | NETIF_F_GSO_PARTIAL | | 12251 | NETIF_F_GSO_PARTIAL | |
12252 | NETIF_F_GSO_IPXIP4 | | ||
12253 | NETIF_F_GSO_IPXIP6 | | ||
12252 | NETIF_F_GSO_UDP_TUNNEL | | 12254 | NETIF_F_GSO_UDP_TUNNEL | |
12253 | NETIF_F_GSO_UDP_TUNNEL_CSUM | | 12255 | NETIF_F_GSO_UDP_TUNNEL_CSUM | |
12254 | NETIF_F_SCTP_CRC | | 12256 | NETIF_F_SCTP_CRC | |
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) | |||
12266 | /* record features VLANs can make use of */ | 12268 | /* record features VLANs can make use of */ |
12267 | netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; | 12269 | netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; |
12268 | 12270 | ||
12269 | if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) | ||
12270 | netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; | ||
12271 | |||
12272 | hw_features = hw_enc_features | | 12271 | hw_features = hw_enc_features | |
12273 | NETIF_F_HW_VLAN_CTAG_TX | | 12272 | NETIF_F_HW_VLAN_CTAG_TX | |
12274 | NETIF_F_HW_VLAN_CTAG_RX; | 12273 | NETIF_F_HW_VLAN_CTAG_RX; |
12275 | 12274 | ||
12275 | if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) | ||
12276 | hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; | ||
12277 | |||
12276 | netdev->hw_features |= hw_features; | 12278 | netdev->hw_features |= hw_features; |
12277 | 12279 | ||
12278 | netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; | 12280 | netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; |
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 4c4b5717a627..b8548370f1c7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h | |||
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[]; | |||
76 | #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) | 76 | #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) |
77 | #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) | 77 | #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) |
78 | 78 | ||
79 | #define ICE_MAX_RESET_WAIT 20 | ||
80 | |||
79 | #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) | 81 | #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) |
80 | 82 | ||
81 | #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) | 83 | #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
@@ -189,7 +191,6 @@ struct ice_vsi { | |||
189 | u64 tx_linearize; | 191 | u64 tx_linearize; |
190 | DECLARE_BITMAP(state, __ICE_STATE_NBITS); | 192 | DECLARE_BITMAP(state, __ICE_STATE_NBITS); |
191 | DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); | 193 | DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); |
192 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | ||
193 | unsigned int current_netdev_flags; | 194 | unsigned int current_netdev_flags; |
194 | u32 tx_restart; | 195 | u32 tx_restart; |
195 | u32 tx_busy; | 196 | u32 tx_busy; |
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); | |||
369 | int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); | 370 | int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); |
370 | void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); | 371 | void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); |
371 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup); | 372 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup); |
373 | void ice_napi_del(struct ice_vsi *vsi); | ||
372 | 374 | ||
373 | #endif /* _ICE_H_ */ | 375 | #endif /* _ICE_H_ */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 8cd6a2401fd9..554fd707a6d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c | |||
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw) | |||
811 | /* Attempt to disable FW logging before shutting down control queues */ | 811 | /* Attempt to disable FW logging before shutting down control queues */ |
812 | ice_cfg_fw_log(hw, false); | 812 | ice_cfg_fw_log(hw, false); |
813 | ice_shutdown_all_ctrlq(hw); | 813 | ice_shutdown_all_ctrlq(hw); |
814 | |||
815 | /* Clear VSI contexts if not already cleared */ | ||
816 | ice_clear_all_vsi_ctx(hw); | ||
814 | } | 817 | } |
815 | 818 | ||
816 | /** | 819 | /** |
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 96923580f2a6..648acdb4c644 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c | |||
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) | |||
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | if (!test_bit(__ICE_DOWN, pf->state)) { | 1519 | if (!test_bit(__ICE_DOWN, pf->state)) { |
1520 | /* Give it a little more time to try to come back */ | 1520 | /* Give it a little more time to try to come back. If still |
1521 | * down, restart autoneg link or reinitialize the interface. | ||
1522 | */ | ||
1521 | msleep(75); | 1523 | msleep(75); |
1522 | if (!test_bit(__ICE_DOWN, pf->state)) | 1524 | if (!test_bit(__ICE_DOWN, pf->state)) |
1523 | return ice_nway_reset(netdev); | 1525 | return ice_nway_reset(netdev); |
1526 | |||
1527 | ice_down(vsi); | ||
1528 | ice_up(vsi); | ||
1524 | } | 1529 | } |
1525 | 1530 | ||
1526 | return err; | 1531 | return err; |
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 5fdea6ec7675..596b9fb1c510 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h | |||
@@ -242,6 +242,8 @@ | |||
242 | #define GLNVM_ULD 0x000B6008 | 242 | #define GLNVM_ULD 0x000B6008 |
243 | #define GLNVM_ULD_CORER_DONE_M BIT(3) | 243 | #define GLNVM_ULD_CORER_DONE_M BIT(3) |
244 | #define GLNVM_ULD_GLOBR_DONE_M BIT(4) | 244 | #define GLNVM_ULD_GLOBR_DONE_M BIT(4) |
245 | #define GLPCI_CNF2 0x000BE004 | ||
246 | #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) | ||
245 | #define PF_FUNC_RID 0x0009E880 | 247 | #define PF_FUNC_RID 0x0009E880 |
246 | #define PF_FUNC_RID_FUNC_NUM_S 0 | 248 | #define PF_FUNC_RID_FUNC_NUM_S 0 |
247 | #define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) | 249 | #define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) |
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 5bacad01f0c9..1041fa2a7767 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c | |||
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) | |||
1997 | status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); | 1997 | status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); |
1998 | if (status) { | 1998 | if (status) { |
1999 | netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", | 1999 | netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", |
2000 | ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status, | 2000 | ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, |
2001 | vsi->back->hw.adminq.sq_last_status); | 2001 | vsi->back->hw.adminq.sq_last_status); |
2002 | goto err_out; | 2002 | goto err_out; |
2003 | } | 2003 | } |
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi) | |||
2458 | * on this wq | 2458 | * on this wq |
2459 | */ | 2459 | */ |
2460 | if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { | 2460 | if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { |
2461 | ice_napi_del(vsi); | ||
2461 | unregister_netdev(vsi->netdev); | 2462 | unregister_netdev(vsi->netdev); |
2462 | free_netdev(vsi->netdev); | 2463 | free_netdev(vsi->netdev); |
2463 | vsi->netdev = NULL; | 2464 | vsi->netdev = NULL; |
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 05993451147a..333312a1d595 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
@@ -1465,7 +1465,7 @@ skip_req_irq: | |||
1465 | * ice_napi_del - Remove NAPI handler for the VSI | 1465 | * ice_napi_del - Remove NAPI handler for the VSI |
1466 | * @vsi: VSI for which NAPI handler is to be removed | 1466 | * @vsi: VSI for which NAPI handler is to be removed |
1467 | */ | 1467 | */ |
1468 | static void ice_napi_del(struct ice_vsi *vsi) | 1468 | void ice_napi_del(struct ice_vsi *vsi) |
1469 | { | 1469 | { |
1470 | int v_idx; | 1470 | int v_idx; |
1471 | 1471 | ||
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, | |||
1622 | { | 1622 | { |
1623 | struct ice_netdev_priv *np = netdev_priv(netdev); | 1623 | struct ice_netdev_priv *np = netdev_priv(netdev); |
1624 | struct ice_vsi *vsi = np->vsi; | 1624 | struct ice_vsi *vsi = np->vsi; |
1625 | int ret; | ||
1626 | 1625 | ||
1627 | if (vid >= VLAN_N_VID) { | 1626 | if (vid >= VLAN_N_VID) { |
1628 | netdev_err(netdev, "VLAN id requested %d is out of range %d\n", | 1627 | netdev_err(netdev, "VLAN id requested %d is out of range %d\n", |
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, | |||
1635 | 1634 | ||
1636 | /* Enable VLAN pruning when VLAN 0 is added */ | 1635 | /* Enable VLAN pruning when VLAN 0 is added */ |
1637 | if (unlikely(!vid)) { | 1636 | if (unlikely(!vid)) { |
1638 | ret = ice_cfg_vlan_pruning(vsi, true); | 1637 | int ret = ice_cfg_vlan_pruning(vsi, true); |
1638 | |||
1639 | if (ret) | 1639 | if (ret) |
1640 | return ret; | 1640 | return ret; |
1641 | } | 1641 | } |
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, | |||
1644 | * needed to continue allowing all untagged packets since VLAN prune | 1644 | * needed to continue allowing all untagged packets since VLAN prune |
1645 | * list is applied to all packets by the switch | 1645 | * list is applied to all packets by the switch |
1646 | */ | 1646 | */ |
1647 | ret = ice_vsi_add_vlan(vsi, vid); | 1647 | return ice_vsi_add_vlan(vsi, vid); |
1648 | |||
1649 | if (!ret) | ||
1650 | set_bit(vid, vsi->active_vlans); | ||
1651 | |||
1652 | return ret; | ||
1653 | } | 1648 | } |
1654 | 1649 | ||
1655 | /** | 1650 | /** |
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev, | |||
1677 | if (status) | 1672 | if (status) |
1678 | return status; | 1673 | return status; |
1679 | 1674 | ||
1680 | clear_bit(vid, vsi->active_vlans); | ||
1681 | |||
1682 | /* Disable VLAN pruning when VLAN 0 is removed */ | 1675 | /* Disable VLAN pruning when VLAN 0 is removed */ |
1683 | if (unlikely(!vid)) | 1676 | if (unlikely(!vid)) |
1684 | status = ice_cfg_vlan_pruning(vsi, false); | 1677 | status = ice_cfg_vlan_pruning(vsi, false); |
@@ -2002,6 +1995,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) | |||
2002 | } | 1995 | } |
2003 | 1996 | ||
2004 | /** | 1997 | /** |
1998 | * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines | ||
1999 | * @pf: pointer to the PF structure | ||
2000 | * | ||
2001 | * There is no error returned here because the driver should be able to handle | ||
2002 | * 128 Byte cache lines, so we only print a warning in case issues are seen, | ||
2003 | * specifically with Tx. | ||
2004 | */ | ||
2005 | static void ice_verify_cacheline_size(struct ice_pf *pf) | ||
2006 | { | ||
2007 | if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) | ||
2008 | dev_warn(&pf->pdev->dev, | ||
2009 | "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", | ||
2010 | ICE_CACHE_LINE_BYTES); | ||
2011 | } | ||
2012 | |||
2013 | /** | ||
2005 | * ice_probe - Device initialization routine | 2014 | * ice_probe - Device initialization routine |
2006 | * @pdev: PCI device information struct | 2015 | * @pdev: PCI device information struct |
2007 | * @ent: entry in ice_pci_tbl | 2016 | * @ent: entry in ice_pci_tbl |
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev, | |||
2151 | /* since everything is good, start the service timer */ | 2160 | /* since everything is good, start the service timer */ |
2152 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); | 2161 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
2153 | 2162 | ||
2163 | ice_verify_cacheline_size(pf); | ||
2164 | |||
2154 | return 0; | 2165 | return 0; |
2155 | 2166 | ||
2156 | err_alloc_sw_unroll: | 2167 | err_alloc_sw_unroll: |
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev) | |||
2182 | if (!pf) | 2193 | if (!pf) |
2183 | return; | 2194 | return; |
2184 | 2195 | ||
2196 | for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { | ||
2197 | if (!ice_is_reset_in_progress(pf->state)) | ||
2198 | break; | ||
2199 | msleep(100); | ||
2200 | } | ||
2201 | |||
2185 | set_bit(__ICE_DOWN, pf->state); | 2202 | set_bit(__ICE_DOWN, pf->state); |
2186 | ice_service_task_stop(pf); | 2203 | ice_service_task_stop(pf); |
2187 | 2204 | ||
@@ -2510,31 +2527,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi) | |||
2510 | } | 2527 | } |
2511 | 2528 | ||
2512 | /** | 2529 | /** |
2513 | * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up | ||
2514 | * @vsi: the VSI being brought back up | ||
2515 | */ | ||
2516 | static int ice_restore_vlan(struct ice_vsi *vsi) | ||
2517 | { | ||
2518 | int err; | ||
2519 | u16 vid; | ||
2520 | |||
2521 | if (!vsi->netdev) | ||
2522 | return -EINVAL; | ||
2523 | |||
2524 | err = ice_vsi_vlan_setup(vsi); | ||
2525 | if (err) | ||
2526 | return err; | ||
2527 | |||
2528 | for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) { | ||
2529 | err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); | ||
2530 | if (err) | ||
2531 | break; | ||
2532 | } | ||
2533 | |||
2534 | return err; | ||
2535 | } | ||
2536 | |||
2537 | /** | ||
2538 | * ice_vsi_cfg - Setup the VSI | 2530 | * ice_vsi_cfg - Setup the VSI |
2539 | * @vsi: the VSI being configured | 2531 | * @vsi: the VSI being configured |
2540 | * | 2532 | * |
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) | |||
2546 | 2538 | ||
2547 | if (vsi->netdev) { | 2539 | if (vsi->netdev) { |
2548 | ice_set_rx_mode(vsi->netdev); | 2540 | ice_set_rx_mode(vsi->netdev); |
2549 | err = ice_restore_vlan(vsi); | 2541 | |
2542 | err = ice_vsi_vlan_setup(vsi); | ||
2543 | |||
2550 | if (err) | 2544 | if (err) |
2551 | return err; | 2545 | return err; |
2552 | } | 2546 | } |
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf) | |||
3296 | struct device *dev = &pf->pdev->dev; | 3290 | struct device *dev = &pf->pdev->dev; |
3297 | struct ice_hw *hw = &pf->hw; | 3291 | struct ice_hw *hw = &pf->hw; |
3298 | enum ice_status ret; | 3292 | enum ice_status ret; |
3299 | int err; | 3293 | int err, i; |
3300 | 3294 | ||
3301 | if (test_bit(__ICE_DOWN, pf->state)) | 3295 | if (test_bit(__ICE_DOWN, pf->state)) |
3302 | goto clear_recovery; | 3296 | goto clear_recovery; |
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf) | |||
3370 | } | 3364 | } |
3371 | 3365 | ||
3372 | ice_reset_all_vfs(pf, true); | 3366 | ice_reset_all_vfs(pf, true); |
3367 | |||
3368 | for (i = 0; i < pf->num_alloc_vsi; i++) { | ||
3369 | bool link_up; | ||
3370 | |||
3371 | if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) | ||
3372 | continue; | ||
3373 | ice_get_link_status(pf->vsi[i]->port_info, &link_up); | ||
3374 | if (link_up) { | ||
3375 | netif_carrier_on(pf->vsi[i]->netdev); | ||
3376 | netif_tx_wake_all_queues(pf->vsi[i]->netdev); | ||
3377 | } else { | ||
3378 | netif_carrier_off(pf->vsi[i]->netdev); | ||
3379 | netif_tx_stop_all_queues(pf->vsi[i]->netdev); | ||
3380 | } | ||
3381 | } | ||
3382 | |||
3373 | /* if we get here, reset flow is successful */ | 3383 | /* if we get here, reset flow is successful */ |
3374 | clear_bit(__ICE_RESET_FAILED, pf->state); | 3384 | clear_bit(__ICE_RESET_FAILED, pf->state); |
3375 | return; | 3385 | return; |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 33403f39f1b3..40c9c6558956 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c | |||
@@ -348,6 +348,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) | |||
348 | } | 348 | } |
349 | 349 | ||
350 | /** | 350 | /** |
351 | * ice_clear_all_vsi_ctx - clear all the VSI context entries | ||
352 | * @hw: pointer to the hw struct | ||
353 | */ | ||
354 | void ice_clear_all_vsi_ctx(struct ice_hw *hw) | ||
355 | { | ||
356 | u16 i; | ||
357 | |||
358 | for (i = 0; i < ICE_MAX_VSI; i++) | ||
359 | ice_clear_vsi_ctx(hw, i); | ||
360 | } | ||
361 | |||
362 | /** | ||
351 | * ice_add_vsi - add VSI context to the hardware and VSI handle list | 363 | * ice_add_vsi - add VSI context to the hardware and VSI handle list |
352 | * @hw: pointer to the hw struct | 364 | * @hw: pointer to the hw struct |
353 | * @vsi_handle: unique VSI handle provided by drivers | 365 | * @vsi_handle: unique VSI handle provided by drivers |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index b88d96a1ef69..d5ef0bd58bf9 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h | |||
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, | |||
190 | struct ice_sq_cd *cd); | 190 | struct ice_sq_cd *cd); |
191 | bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); | 191 | bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); |
192 | struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); | 192 | struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); |
193 | void ice_clear_all_vsi_ctx(struct ice_hw *hw); | ||
194 | /* Switch config */ | ||
193 | enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); | 195 | enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); |
194 | 196 | ||
195 | /* Switch/bridge related commands */ | 197 | /* Switch/bridge related commands */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 5dae968d853e..fe5bbabbb41e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c | |||
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) | |||
1520 | 1520 | ||
1521 | /* update gso_segs and bytecount */ | 1521 | /* update gso_segs and bytecount */ |
1522 | first->gso_segs = skb_shinfo(skb)->gso_segs; | 1522 | first->gso_segs = skb_shinfo(skb)->gso_segs; |
1523 | first->bytecount = (first->gso_segs - 1) * off->header_len; | 1523 | first->bytecount += (first->gso_segs - 1) * off->header_len; |
1524 | 1524 | ||
1525 | cd_tso_len = skb->len - off->header_len; | 1525 | cd_tso_len = skb->len - off->header_len; |
1526 | cd_mss = skb_shinfo(skb)->gso_size; | 1526 | cd_mss = skb_shinfo(skb)->gso_size; |
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) | |||
1556 | * magnitude greater than our largest possible GSO size. | 1556 | * magnitude greater than our largest possible GSO size. |
1557 | * | 1557 | * |
1558 | * This would then be implemented as: | 1558 | * This would then be implemented as: |
1559 | * return (((size >> 12) * 85) >> 8) + 1; | 1559 | * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; |
1560 | * | 1560 | * |
1561 | * Since multiplication and division are commutative, we can reorder | 1561 | * Since multiplication and division are commutative, we can reorder |
1562 | * operations into: | 1562 | * operations into: |
1563 | * return ((size * 85) >> 20) + 1; | 1563 | * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; |
1564 | */ | 1564 | */ |
1565 | static unsigned int ice_txd_use_count(unsigned int size) | 1565 | static unsigned int ice_txd_use_count(unsigned int size) |
1566 | { | 1566 | { |
1567 | return ((size * 85) >> 20) + 1; | 1567 | return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | /** | 1570 | /** |
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) | |||
1706 | * + 1 desc for context descriptor, | 1706 | * + 1 desc for context descriptor, |
1707 | * otherwise try next time | 1707 | * otherwise try next time |
1708 | */ | 1708 | */ |
1709 | if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) { | 1709 | if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + |
1710 | ICE_DESCS_FOR_CTX_DESC)) { | ||
1710 | tx_ring->tx_stats.tx_busy++; | 1711 | tx_ring->tx_stats.tx_busy++; |
1711 | return NETDEV_TX_BUSY; | 1712 | return NETDEV_TX_BUSY; |
1712 | } | 1713 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 1d0f58bd389b..75d0eaf6c9dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h | |||
@@ -22,8 +22,21 @@ | |||
22 | #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ | 22 | #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ |
23 | #define ICE_MAX_TXQ_PER_TXQG 128 | 23 | #define ICE_MAX_TXQ_PER_TXQG 128 |
24 | 24 | ||
25 | /* Tx Descriptors needed, worst case */ | 25 | /* We are assuming that the cache line is always 64 Bytes here for ice. |
26 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) | 26 | * In order to make sure that is a correct assumption there is a check in probe |
27 | * to print a warning if the read from GLPCI_CNF2 tells us that the cache line | ||
28 | * size is 128 bytes. We do it this way because we do not want to read the | ||
29 | * GLPCI_CNF2 register or a variable containing the value on every pass through | ||
30 | * the Tx path. | ||
31 | */ | ||
32 | #define ICE_CACHE_LINE_BYTES 64 | ||
33 | #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ | ||
34 | sizeof(struct ice_tx_desc)) | ||
35 | #define ICE_DESCS_FOR_CTX_DESC 1 | ||
36 | #define ICE_DESCS_FOR_SKB_DATA_PTR 1 | ||
37 | /* Tx descriptors needed, worst case */ | ||
38 | #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ | ||
39 | ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) | ||
27 | #define ICE_DESC_UNUSED(R) \ | 40 | #define ICE_DESC_UNUSED(R) \ |
28 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ | 41 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ |
29 | (R)->next_to_clean - (R)->next_to_use - 1) | 42 | (R)->next_to_clean - (R)->next_to_use - 1) |
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 12f9432abf11..f4dbc81c1988 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h | |||
@@ -92,12 +92,12 @@ struct ice_link_status { | |||
92 | u64 phy_type_low; | 92 | u64 phy_type_low; |
93 | u16 max_frame_size; | 93 | u16 max_frame_size; |
94 | u16 link_speed; | 94 | u16 link_speed; |
95 | u16 req_speeds; | ||
95 | u8 lse_ena; /* Link Status Event notification */ | 96 | u8 lse_ena; /* Link Status Event notification */ |
96 | u8 link_info; | 97 | u8 link_info; |
97 | u8 an_info; | 98 | u8 an_info; |
98 | u8 ext_info; | 99 | u8 ext_info; |
99 | u8 pacing; | 100 | u8 pacing; |
100 | u8 req_speeds; | ||
101 | /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of | 101 | /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of |
102 | * ice_aqc_get_phy_caps structure | 102 | * ice_aqc_get_phy_caps structure |
103 | */ | 103 | */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 45f10f8f01dc..e71065f9d391 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | |||
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) | |||
348 | struct ice_vsi_ctx ctxt = { 0 }; | 348 | struct ice_vsi_ctx ctxt = { 0 }; |
349 | enum ice_status status; | 349 | enum ice_status status; |
350 | 350 | ||
351 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED | | 351 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED | |
352 | ICE_AQ_VSI_PVLAN_INSERT_PVID | | 352 | ICE_AQ_VSI_PVLAN_INSERT_PVID | |
353 | ICE_AQ_VSI_VLAN_EMOD_STR; | 353 | ICE_AQ_VSI_VLAN_EMOD_STR; |
354 | ctxt.info.pvid = cpu_to_le16(vid); | 354 | ctxt.info.pvid = cpu_to_le16(vid); |
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) | |||
2171 | 2171 | ||
2172 | if (!ice_vsi_add_vlan(vsi, vid)) { | 2172 | if (!ice_vsi_add_vlan(vsi, vid)) { |
2173 | vf->num_vlan++; | 2173 | vf->num_vlan++; |
2174 | set_bit(vid, vsi->active_vlans); | ||
2175 | 2174 | ||
2176 | /* Enable VLAN pruning when VLAN 0 is added */ | 2175 | /* Enable VLAN pruning when VLAN 0 is added */ |
2177 | if (unlikely(!vid)) | 2176 | if (unlikely(!vid)) |
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) | |||
2190 | */ | 2189 | */ |
2191 | if (!ice_vsi_kill_vlan(vsi, vid)) { | 2190 | if (!ice_vsi_kill_vlan(vsi, vid)) { |
2192 | vf->num_vlan--; | 2191 | vf->num_vlan--; |
2193 | clear_bit(vid, vsi->active_vlans); | ||
2194 | 2192 | ||
2195 | /* Disable VLAN pruning when removing VLAN 0 */ | 2193 | /* Disable VLAN pruning when removing VLAN 0 */ |
2196 | if (unlikely(!vid)) | 2194 | if (unlikely(!vid)) |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 29ced6b74d36..2b95dc9c7a6a 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -53,13 +53,15 @@ | |||
53 | * 2^40 * 10^-9 / 60 = 18.3 minutes. | 53 | * 2^40 * 10^-9 / 60 = 18.3 minutes. |
54 | * | 54 | * |
55 | * SYSTIM is converted to real time using a timecounter. As | 55 | * SYSTIM is converted to real time using a timecounter. As |
56 | * timecounter_cyc2time() allows old timestamps, the timecounter | 56 | * timecounter_cyc2time() allows old timestamps, the timecounter needs |
57 | * needs to be updated at least once per half of the SYSTIM interval. | 57 | * to be updated at least once per half of the SYSTIM interval. |
58 | * Scheduling of delayed work is not very accurate, so we aim for 8 | 58 | * Scheduling of delayed work is not very accurate, and also the NIC |
59 | * minutes to be sure the actual interval is shorter than 9.16 minutes. | 59 | * clock can be adjusted to run up to 6% faster and the system clock |
60 | * up to 10% slower, so we aim for 6 minutes to be sure the actual | ||
61 | * interval in the NIC time is shorter than 9.16 minutes. | ||
60 | */ | 62 | */ |
61 | 63 | ||
62 | #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) | 64 | #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6) |
63 | #define IGB_PTP_TX_TIMEOUT (HZ * 15) | 65 | #define IGB_PTP_TX_TIMEOUT (HZ * 15) |
64 | #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) | 66 | #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) |
65 | #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) | 67 | #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5bfd349bf41a..3ba672e9e353 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -494,7 +494,7 @@ struct mvneta_port { | |||
494 | #if defined(__LITTLE_ENDIAN) | 494 | #if defined(__LITTLE_ENDIAN) |
495 | struct mvneta_tx_desc { | 495 | struct mvneta_tx_desc { |
496 | u32 command; /* Options used by HW for packet transmitting.*/ | 496 | u32 command; /* Options used by HW for packet transmitting.*/ |
497 | u16 reserverd1; /* csum_l4 (for future use) */ | 497 | u16 reserved1; /* csum_l4 (for future use) */ |
498 | u16 data_size; /* Data size of transmitted packet in bytes */ | 498 | u16 data_size; /* Data size of transmitted packet in bytes */ |
499 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ | 499 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ |
500 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ | 500 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ |
@@ -519,7 +519,7 @@ struct mvneta_rx_desc { | |||
519 | #else | 519 | #else |
520 | struct mvneta_tx_desc { | 520 | struct mvneta_tx_desc { |
521 | u16 data_size; /* Data size of transmitted packet in bytes */ | 521 | u16 data_size; /* Data size of transmitted packet in bytes */ |
522 | u16 reserverd1; /* csum_l4 (for future use) */ | 522 | u16 reserved1; /* csum_l4 (for future use) */ |
523 | u32 command; /* Options used by HW for packet transmitting.*/ | 523 | u32 command; /* Options used by HW for packet transmitting.*/ |
524 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ | 524 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ |
525 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ | 525 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1857ee0f0871..6f5153afcab4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1006 | ring->packets++; | 1006 | ring->packets++; |
1007 | } | 1007 | } |
1008 | ring->bytes += tx_info->nr_bytes; | 1008 | ring->bytes += tx_info->nr_bytes; |
1009 | netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); | ||
1010 | AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); | 1009 | AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); |
1011 | 1010 | ||
1012 | if (tx_info->inl) | 1011 | if (tx_info->inl) |
@@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1044 | netif_tx_stop_queue(ring->tx_queue); | 1043 | netif_tx_stop_queue(ring->tx_queue); |
1045 | ring->queue_stopped++; | 1044 | ring->queue_stopped++; |
1046 | } | 1045 | } |
1047 | send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); | 1046 | |
1047 | send_doorbell = __netdev_tx_sent_queue(ring->tx_queue, | ||
1048 | tx_info->nr_bytes, | ||
1049 | skb->xmit_more); | ||
1048 | 1050 | ||
1049 | real_size = (real_size / 16) & 0x3f; | 1051 | real_size = (real_size / 16) & 0x3f; |
1050 | 1052 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a2df12b79f8e..9bec940330a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -3568,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) | |||
3568 | burst_size = 7; | 3568 | burst_size = 7; |
3569 | break; | 3569 | break; |
3570 | case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: | 3570 | case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: |
3571 | is_bytes = true; | ||
3572 | rate = 4 * 1024; | 3571 | rate = 4 * 1024; |
3573 | burst_size = 4; | 3572 | burst_size = 4; |
3574 | break; | 3573 | break; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index cc1b373c0ace..46dc93d3b9b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c | |||
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, | |||
147 | "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", | 147 | "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", |
148 | fcoe_pf_params->num_cqs, | 148 | fcoe_pf_params->num_cqs, |
149 | p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); | 149 | p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); |
150 | return -EINVAL; | 150 | rc = -EINVAL; |
151 | goto err; | ||
151 | } | 152 | } |
152 | 153 | ||
153 | p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); | 154 | p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); |
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, | |||
156 | 157 | ||
157 | rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); | 158 | rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); |
158 | if (rc) | 159 | if (rc) |
159 | return rc; | 160 | goto err; |
160 | 161 | ||
161 | cxt_info.iid = dummy_cid; | 162 | cxt_info.iid = dummy_cid; |
162 | rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); | 163 | rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); |
163 | if (rc) { | 164 | if (rc) { |
164 | DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", | 165 | DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", |
165 | dummy_cid); | 166 | dummy_cid); |
166 | return rc; | 167 | goto err; |
167 | } | 168 | } |
168 | p_cxt = cxt_info.p_cxt; | 169 | p_cxt = cxt_info.p_cxt; |
169 | SET_FIELD(p_cxt->tstorm_ag_context.flags3, | 170 | SET_FIELD(p_cxt->tstorm_ag_context.flags3, |
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, | |||
240 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | 241 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
241 | 242 | ||
242 | return rc; | 243 | return rc; |
244 | |||
245 | err: | ||
246 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
247 | return rc; | ||
243 | } | 248 | } |
244 | 249 | ||
245 | static int | 250 | static int |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 1135387bd99d..4f8a685d1a55 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c | |||
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, | |||
200 | "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", | 200 | "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", |
201 | p_params->num_queues, | 201 | p_params->num_queues, |
202 | p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); | 202 | p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); |
203 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
203 | return -EINVAL; | 204 | return -EINVAL; |
204 | } | 205 | } |
205 | 206 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 82a1bd1f8a8c..67c02ea93906 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c | |||
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, | |||
740 | 740 | ||
741 | rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); | 741 | rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); |
742 | if (rc) { | 742 | if (rc) { |
743 | /* Return spq entry which is taken in qed_sp_init_request()*/ | 743 | qed_sp_destroy_request(p_hwfn, p_ent); |
744 | qed_spq_return_entry(p_hwfn, p_ent); | ||
745 | return rc; | 744 | return rc; |
746 | } | 745 | } |
747 | 746 | ||
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, | |||
1355 | DP_NOTICE(p_hwfn, | 1354 | DP_NOTICE(p_hwfn, |
1356 | "%d is not supported yet\n", | 1355 | "%d is not supported yet\n", |
1357 | p_filter_cmd->opcode); | 1356 | p_filter_cmd->opcode); |
1357 | qed_sp_destroy_request(p_hwfn, *pp_ent); | ||
1358 | return -EINVAL; | 1358 | return -EINVAL; |
1359 | } | 1359 | } |
1360 | 1360 | ||
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, | |||
2056 | } else { | 2056 | } else { |
2057 | rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); | 2057 | rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); |
2058 | if (rc) | 2058 | if (rc) |
2059 | return rc; | 2059 | goto err; |
2060 | 2060 | ||
2061 | if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { | 2061 | if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { |
2062 | rc = qed_fw_l2_queue(p_hwfn, p_params->qid, | 2062 | rc = qed_fw_l2_queue(p_hwfn, p_params->qid, |
2063 | &abs_rx_q_id); | 2063 | &abs_rx_q_id); |
2064 | if (rc) | 2064 | if (rc) |
2065 | return rc; | 2065 | goto err; |
2066 | 2066 | ||
2067 | p_ramrod->rx_qid_valid = 1; | 2067 | p_ramrod->rx_qid_valid = 1; |
2068 | p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); | 2068 | p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); |
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, | |||
2083 | (u64)p_params->addr, p_params->length); | 2083 | (u64)p_params->addr, p_params->length); |
2084 | 2084 | ||
2085 | return qed_spq_post(p_hwfn, p_ent, NULL); | 2085 | return qed_spq_post(p_hwfn, p_ent, NULL); |
2086 | |||
2087 | err: | ||
2088 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
2089 | return rc; | ||
2086 | } | 2090 | } |
2087 | 2091 | ||
2088 | int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, | 2092 | int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index f40f654398a0..a96364df4320 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
@@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, | |||
1944 | struct qed_ptt *p_ptt, u32 *p_speed_mask) | 1944 | struct qed_ptt *p_ptt, u32 *p_speed_mask) |
1945 | { | 1945 | { |
1946 | u32 transceiver_type, transceiver_state; | 1946 | u32 transceiver_type, transceiver_state; |
1947 | int ret; | ||
1947 | 1948 | ||
1948 | qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, | 1949 | ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, |
1949 | &transceiver_type); | 1950 | &transceiver_type); |
1951 | if (ret) | ||
1952 | return ret; | ||
1950 | 1953 | ||
1951 | if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == | 1954 | if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == |
1952 | false) | 1955 | false) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index c71391b9c757..62113438c880 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c | |||
@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt, | |||
1514 | default: | 1514 | default: |
1515 | rc = -EINVAL; | 1515 | rc = -EINVAL; |
1516 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | 1516 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); |
1517 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
1517 | return rc; | 1518 | return rc; |
1518 | } | 1519 | } |
1519 | SET_FIELD(p_ramrod->flags1, | 1520 | SET_FIELD(p_ramrod->flags1, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index f9167d1354bb..e49fada85410 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c | |||
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, | |||
745 | DP_NOTICE(p_hwfn, | 745 | DP_NOTICE(p_hwfn, |
746 | "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", | 746 | "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", |
747 | rc); | 747 | rc); |
748 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
748 | return rc; | 749 | return rc; |
749 | } | 750 | } |
750 | 751 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index e95431f6acd4..3157c0d99441 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
@@ -167,6 +167,9 @@ struct qed_spq_entry { | |||
167 | enum spq_mode comp_mode; | 167 | enum spq_mode comp_mode; |
168 | struct qed_spq_comp_cb comp_cb; | 168 | struct qed_spq_comp_cb comp_cb; |
169 | struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ | 169 | struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ |
170 | |||
171 | /* Posted entry for unlimited list entry in EBLOCK mode */ | ||
172 | struct qed_spq_entry *post_ent; | ||
170 | }; | 173 | }; |
171 | 174 | ||
172 | struct qed_eq { | 175 | struct qed_eq { |
@@ -396,6 +399,17 @@ struct qed_sp_init_data { | |||
396 | struct qed_spq_comp_cb *p_comp_data; | 399 | struct qed_spq_comp_cb *p_comp_data; |
397 | }; | 400 | }; |
398 | 401 | ||
402 | /** | ||
403 | * @brief Returns a SPQ entry to the pool / frees the entry if allocated. | ||
404 | * Should be called on in error flows after initializing the SPQ entry | ||
405 | * and before posting it. | ||
406 | * | ||
407 | * @param p_hwfn | ||
408 | * @param p_ent | ||
409 | */ | ||
410 | void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, | ||
411 | struct qed_spq_entry *p_ent); | ||
412 | |||
399 | int qed_sp_init_request(struct qed_hwfn *p_hwfn, | 413 | int qed_sp_init_request(struct qed_hwfn *p_hwfn, |
400 | struct qed_spq_entry **pp_ent, | 414 | struct qed_spq_entry **pp_ent, |
401 | u8 cmd, | 415 | u8 cmd, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 77b6248ad3b9..888274fa208b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | |||
@@ -47,6 +47,19 @@ | |||
47 | #include "qed_sp.h" | 47 | #include "qed_sp.h" |
48 | #include "qed_sriov.h" | 48 | #include "qed_sriov.h" |
49 | 49 | ||
50 | void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, | ||
51 | struct qed_spq_entry *p_ent) | ||
52 | { | ||
53 | /* qed_spq_get_entry() can either get an entry from the free_pool, | ||
54 | * or, if no entries are left, allocate a new entry and add it to | ||
55 | * the unlimited_pending list. | ||
56 | */ | ||
57 | if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) | ||
58 | kfree(p_ent); | ||
59 | else | ||
60 | qed_spq_return_entry(p_hwfn, p_ent); | ||
61 | } | ||
62 | |||
50 | int qed_sp_init_request(struct qed_hwfn *p_hwfn, | 63 | int qed_sp_init_request(struct qed_hwfn *p_hwfn, |
51 | struct qed_spq_entry **pp_ent, | 64 | struct qed_spq_entry **pp_ent, |
52 | u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) | 65 | u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) |
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, | |||
80 | 93 | ||
81 | case QED_SPQ_MODE_BLOCK: | 94 | case QED_SPQ_MODE_BLOCK: |
82 | if (!p_data->p_comp_data) | 95 | if (!p_data->p_comp_data) |
83 | return -EINVAL; | 96 | goto err; |
84 | 97 | ||
85 | p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; | 98 | p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; |
86 | break; | 99 | break; |
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, | |||
95 | default: | 108 | default: |
96 | DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", | 109 | DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", |
97 | p_ent->comp_mode); | 110 | p_ent->comp_mode); |
98 | return -EINVAL; | 111 | goto err; |
99 | } | 112 | } |
100 | 113 | ||
101 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | 114 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, | |||
109 | memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); | 122 | memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); |
110 | 123 | ||
111 | return 0; | 124 | return 0; |
125 | |||
126 | err: | ||
127 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
128 | |||
129 | return -EINVAL; | ||
112 | } | 130 | } |
113 | 131 | ||
114 | static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) | 132 | static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index c4a6274dd625..0a9c5bb0fa48 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, | |||
142 | 142 | ||
143 | DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); | 143 | DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); |
144 | rc = qed_mcp_drain(p_hwfn, p_ptt); | 144 | rc = qed_mcp_drain(p_hwfn, p_ptt); |
145 | qed_ptt_release(p_hwfn, p_ptt); | ||
145 | if (rc) { | 146 | if (rc) { |
146 | DP_NOTICE(p_hwfn, "MCP drain failed\n"); | 147 | DP_NOTICE(p_hwfn, "MCP drain failed\n"); |
147 | goto err; | 148 | goto err; |
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, | |||
150 | /* Retry after drain */ | 151 | /* Retry after drain */ |
151 | rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); | 152 | rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); |
152 | if (!rc) | 153 | if (!rc) |
153 | goto out; | 154 | return 0; |
154 | 155 | ||
155 | comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; | 156 | comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; |
156 | if (comp_done->done == 1) | 157 | if (comp_done->done == 1) { |
157 | if (p_fw_ret) | 158 | if (p_fw_ret) |
158 | *p_fw_ret = comp_done->fw_return_code; | 159 | *p_fw_ret = comp_done->fw_return_code; |
159 | out: | 160 | return 0; |
160 | qed_ptt_release(p_hwfn, p_ptt); | 161 | } |
161 | return 0; | ||
162 | |||
163 | err: | 162 | err: |
164 | qed_ptt_release(p_hwfn, p_ptt); | ||
165 | DP_NOTICE(p_hwfn, | 163 | DP_NOTICE(p_hwfn, |
166 | "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", | 164 | "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", |
167 | le32_to_cpu(p_ent->elem.hdr.cid), | 165 | le32_to_cpu(p_ent->elem.hdr.cid), |
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, | |||
685 | /* EBLOCK responsible to free the allocated p_ent */ | 683 | /* EBLOCK responsible to free the allocated p_ent */ |
686 | if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) | 684 | if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) |
687 | kfree(p_ent); | 685 | kfree(p_ent); |
686 | else | ||
687 | p_ent->post_ent = p_en2; | ||
688 | 688 | ||
689 | p_ent = p_en2; | 689 | p_ent = p_en2; |
690 | } | 690 | } |
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) | |||
767 | SPQ_HIGH_PRI_RESERVE_DEFAULT); | 767 | SPQ_HIGH_PRI_RESERVE_DEFAULT); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* Avoid overriding of SPQ entries when getting out-of-order completions, by | ||
771 | * marking the completions in a bitmap and increasing the chain consumer only | ||
772 | * for the first successive completed entries. | ||
773 | */ | ||
774 | static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) | ||
775 | { | ||
776 | u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; | ||
777 | struct qed_spq *p_spq = p_hwfn->p_spq; | ||
778 | |||
779 | __set_bit(pos, p_spq->p_comp_bitmap); | ||
780 | while (test_bit(p_spq->comp_bitmap_idx, | ||
781 | p_spq->p_comp_bitmap)) { | ||
782 | __clear_bit(p_spq->comp_bitmap_idx, | ||
783 | p_spq->p_comp_bitmap); | ||
784 | p_spq->comp_bitmap_idx++; | ||
785 | qed_chain_return_produced(&p_spq->chain); | ||
786 | } | ||
787 | } | ||
788 | |||
770 | int qed_spq_post(struct qed_hwfn *p_hwfn, | 789 | int qed_spq_post(struct qed_hwfn *p_hwfn, |
771 | struct qed_spq_entry *p_ent, u8 *fw_return_code) | 790 | struct qed_spq_entry *p_ent, u8 *fw_return_code) |
772 | { | 791 | { |
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, | |||
824 | p_ent->queue == &p_spq->unlimited_pending); | 843 | p_ent->queue == &p_spq->unlimited_pending); |
825 | 844 | ||
826 | if (p_ent->queue == &p_spq->unlimited_pending) { | 845 | if (p_ent->queue == &p_spq->unlimited_pending) { |
827 | /* This is an allocated p_ent which does not need to | 846 | struct qed_spq_entry *p_post_ent = p_ent->post_ent; |
828 | * return to pool. | 847 | |
829 | */ | ||
830 | kfree(p_ent); | 848 | kfree(p_ent); |
831 | return rc; | 849 | |
850 | /* Return the entry which was actually posted */ | ||
851 | p_ent = p_post_ent; | ||
832 | } | 852 | } |
833 | 853 | ||
834 | if (rc) | 854 | if (rc) |
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, | |||
842 | spq_post_fail2: | 862 | spq_post_fail2: |
843 | spin_lock_bh(&p_spq->lock); | 863 | spin_lock_bh(&p_spq->lock); |
844 | list_del(&p_ent->list); | 864 | list_del(&p_ent->list); |
845 | qed_chain_return_produced(&p_spq->chain); | 865 | qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); |
846 | 866 | ||
847 | spq_post_fail: | 867 | spq_post_fail: |
848 | /* return to the free pool */ | 868 | /* return to the free pool */ |
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
874 | spin_lock_bh(&p_spq->lock); | 894 | spin_lock_bh(&p_spq->lock); |
875 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { | 895 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { |
876 | if (p_ent->elem.hdr.echo == echo) { | 896 | if (p_ent->elem.hdr.echo == echo) { |
877 | u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; | ||
878 | |||
879 | list_del(&p_ent->list); | 897 | list_del(&p_ent->list); |
880 | 898 | qed_spq_comp_bmap_update(p_hwfn, echo); | |
881 | /* Avoid overriding of SPQ entries when getting | ||
882 | * out-of-order completions, by marking the completions | ||
883 | * in a bitmap and increasing the chain consumer only | ||
884 | * for the first successive completed entries. | ||
885 | */ | ||
886 | __set_bit(pos, p_spq->p_comp_bitmap); | ||
887 | |||
888 | while (test_bit(p_spq->comp_bitmap_idx, | ||
889 | p_spq->p_comp_bitmap)) { | ||
890 | __clear_bit(p_spq->comp_bitmap_idx, | ||
891 | p_spq->p_comp_bitmap); | ||
892 | p_spq->comp_bitmap_idx++; | ||
893 | qed_chain_return_produced(&p_spq->chain); | ||
894 | } | ||
895 | |||
896 | p_spq->comp_count++; | 899 | p_spq->comp_count++; |
897 | found = p_ent; | 900 | found = p_ent; |
898 | break; | 901 | break; |
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
931 | QED_MSG_SPQ, | 934 | QED_MSG_SPQ, |
932 | "Got a completion without a callback function\n"); | 935 | "Got a completion without a callback function\n"); |
933 | 936 | ||
934 | if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || | 937 | if (found->comp_mode != QED_SPQ_MODE_EBLOCK) |
935 | (found->queue == &p_spq->unlimited_pending)) | ||
936 | /* EBLOCK is responsible for returning its own entry into the | 938 | /* EBLOCK is responsible for returning its own entry into the |
937 | * free list, unless it originally added the entry into the | 939 | * free list. |
938 | * unlimited pending list. | ||
939 | */ | 940 | */ |
940 | qed_spq_return_entry(p_hwfn, found); | 941 | qed_spq_return_entry(p_hwfn, found); |
941 | 942 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9b08a9d9e151..ca6290fa0f30 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) | |||
101 | default: | 101 | default: |
102 | DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", | 102 | DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", |
103 | p_hwfn->hw_info.personality); | 103 | p_hwfn->hw_info.personality); |
104 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
104 | return -EINVAL; | 105 | return -EINVAL; |
105 | } | 106 | } |
106 | 107 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 9647578cbe6a..14f26bf3b388 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, | |||
459 | struct cmd_desc_type0 *first_desc, struct sk_buff *skb, | 459 | struct cmd_desc_type0 *first_desc, struct sk_buff *skb, |
460 | struct qlcnic_host_tx_ring *tx_ring) | 460 | struct qlcnic_host_tx_ring *tx_ring) |
461 | { | 461 | { |
462 | u8 l4proto, opcode = 0, hdr_len = 0; | 462 | u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0; |
463 | u16 flags = 0, vlan_tci = 0; | 463 | u16 flags = 0, vlan_tci = 0; |
464 | int copied, offset, copy_len, size; | 464 | int copied, offset, copy_len, size; |
465 | struct cmd_desc_type0 *hwdesc; | 465 | struct cmd_desc_type0 *hwdesc; |
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, | |||
472 | flags = QLCNIC_FLAGS_VLAN_TAGGED; | 472 | flags = QLCNIC_FLAGS_VLAN_TAGGED; |
473 | vlan_tci = ntohs(vh->h_vlan_TCI); | 473 | vlan_tci = ntohs(vh->h_vlan_TCI); |
474 | protocol = ntohs(vh->h_vlan_encapsulated_proto); | 474 | protocol = ntohs(vh->h_vlan_encapsulated_proto); |
475 | tag_vlan = 1; | ||
475 | } else if (skb_vlan_tag_present(skb)) { | 476 | } else if (skb_vlan_tag_present(skb)) { |
476 | flags = QLCNIC_FLAGS_VLAN_OOB; | 477 | flags = QLCNIC_FLAGS_VLAN_OOB; |
477 | vlan_tci = skb_vlan_tag_get(skb); | 478 | vlan_tci = skb_vlan_tag_get(skb); |
479 | tag_vlan = 1; | ||
478 | } | 480 | } |
479 | if (unlikely(adapter->tx_pvid)) { | 481 | if (unlikely(adapter->tx_pvid)) { |
480 | if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) | 482 | if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) |
481 | return -EIO; | 483 | return -EIO; |
482 | if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) | 484 | if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED)) |
483 | goto set_flags; | 485 | goto set_flags; |
484 | 486 | ||
485 | flags = QLCNIC_FLAGS_VLAN_OOB; | 487 | flags = QLCNIC_FLAGS_VLAN_OOB; |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 0afc3d335d56..d11c16aeb19a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | |||
@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, | |||
234 | struct net_device *real_dev, | 234 | struct net_device *real_dev, |
235 | struct rmnet_endpoint *ep) | 235 | struct rmnet_endpoint *ep) |
236 | { | 236 | { |
237 | struct rmnet_priv *priv; | 237 | struct rmnet_priv *priv = netdev_priv(rmnet_dev); |
238 | int rc; | 238 | int rc; |
239 | 239 | ||
240 | if (ep->egress_dev) | 240 | if (ep->egress_dev) |
@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, | |||
247 | rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 247 | rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
248 | rmnet_dev->hw_features |= NETIF_F_SG; | 248 | rmnet_dev->hw_features |= NETIF_F_SG; |
249 | 249 | ||
250 | priv->real_dev = real_dev; | ||
251 | |||
250 | rc = register_netdevice(rmnet_dev); | 252 | rc = register_netdevice(rmnet_dev); |
251 | if (!rc) { | 253 | if (!rc) { |
252 | ep->egress_dev = rmnet_dev; | 254 | ep->egress_dev = rmnet_dev; |
@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, | |||
255 | 257 | ||
256 | rmnet_dev->rtnl_link_ops = &rmnet_link_ops; | 258 | rmnet_dev->rtnl_link_ops = &rmnet_link_ops; |
257 | 259 | ||
258 | priv = netdev_priv(rmnet_dev); | ||
259 | priv->mux_id = id; | 260 | priv->mux_id = id; |
260 | priv->real_dev = real_dev; | ||
261 | 261 | ||
262 | netdev_dbg(rmnet_dev, "rmnet dev created\n"); | 262 | netdev_dbg(rmnet_dev, "rmnet dev created\n"); |
263 | } | 263 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b1b305f8f414..272b9ca66314 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -365,7 +365,8 @@ struct dma_features { | |||
365 | 365 | ||
366 | /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ | 366 | /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ |
367 | #define BUF_SIZE_16KiB 16384 | 367 | #define BUF_SIZE_16KiB 16384 |
368 | #define BUF_SIZE_8KiB 8192 | 368 | /* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ |
369 | #define BUF_SIZE_8KiB 8188 | ||
369 | #define BUF_SIZE_4KiB 4096 | 370 | #define BUF_SIZE_4KiB 4096 |
370 | #define BUF_SIZE_2KiB 2048 | 371 | #define BUF_SIZE_2KiB 2048 |
371 | 372 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index ca9d7e48034c..40d6356a7e73 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h | |||
@@ -31,7 +31,7 @@ | |||
31 | /* Enhanced descriptors */ | 31 | /* Enhanced descriptors */ |
32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) | 32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) |
33 | { | 33 | { |
34 | p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) | 34 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB |
35 | << ERDES1_BUFFER2_SIZE_SHIFT) | 35 | << ERDES1_BUFFER2_SIZE_SHIFT) |
36 | & ERDES1_BUFFER2_SIZE_MASK); | 36 | & ERDES1_BUFFER2_SIZE_MASK); |
37 | 37 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 77914c89d749..5ef91a790f9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c | |||
@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | |||
262 | int mode, int end) | 262 | int mode, int end) |
263 | { | 263 | { |
264 | p->des0 |= cpu_to_le32(RDES0_OWN); | 264 | p->des0 |= cpu_to_le32(RDES0_OWN); |
265 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); | 265 | p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); |
266 | 266 | ||
267 | if (mode == STMMAC_CHAIN_MODE) | 267 | if (mode == STMMAC_CHAIN_MODE) |
268 | ehn_desc_rx_set_on_chain(p); | 268 | ehn_desc_rx_set_on_chain(p); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index abc3f85270cd..d8c5bc412219 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) | |||
140 | static int set_16kib_bfsize(int mtu) | 140 | static int set_16kib_bfsize(int mtu) |
141 | { | 141 | { |
142 | int ret = 0; | 142 | int ret = 0; |
143 | if (unlikely(mtu >= BUF_SIZE_8KiB)) | 143 | if (unlikely(mtu > BUF_SIZE_8KiB)) |
144 | ret = BUF_SIZE_16KiB; | 144 | ret = BUF_SIZE_16KiB; |
145 | return ret; | 145 | return ret; |
146 | } | 146 | } |
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 3b7f10a5f06a..c5cae8e74dc4 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. | 2 | /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. |
3 | * | 3 | * |
4 | * Copyright (c) 2018 Maciej W. Rozycki | 4 | * Copyright (c) 2018 Maciej W. Rozycki |
@@ -56,7 +56,7 @@ | |||
56 | #define DRV_VERSION "v.1.1.4" | 56 | #define DRV_VERSION "v.1.1.4" |
57 | #define DRV_RELDATE "Oct 6 2018" | 57 | #define DRV_RELDATE "Oct 6 2018" |
58 | 58 | ||
59 | static char version[] = | 59 | static const char version[] = |
60 | DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; | 60 | DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; |
61 | 61 | ||
62 | MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); | 62 | MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); |
@@ -784,7 +784,7 @@ err_rx: | |||
784 | static void fza_tx_smt(struct net_device *dev) | 784 | static void fza_tx_smt(struct net_device *dev) |
785 | { | 785 | { |
786 | struct fza_private *fp = netdev_priv(dev); | 786 | struct fza_private *fp = netdev_priv(dev); |
787 | struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr; | 787 | struct fza_buffer_tx __iomem *smt_tx_ptr; |
788 | int i, len; | 788 | int i, len; |
789 | u32 own; | 789 | u32 own; |
790 | 790 | ||
@@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev) | |||
799 | 799 | ||
800 | if (!netif_queue_stopped(dev)) { | 800 | if (!netif_queue_stopped(dev)) { |
801 | if (dev_nit_active(dev)) { | 801 | if (dev_nit_active(dev)) { |
802 | struct fza_buffer_tx *skb_data_ptr; | ||
802 | struct sk_buff *skb; | 803 | struct sk_buff *skb; |
803 | 804 | ||
804 | /* Length must be a multiple of 4 as only word | 805 | /* Length must be a multiple of 4 as only word |
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h index b06acf32738e..93bda61be8e3 100644 --- a/drivers/net/fddi/defza.h +++ b/drivers/net/fddi/defza.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. | 2 | /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. |
3 | * | 3 | * |
4 | * Copyright (c) 2018 Maciej W. Rozycki | 4 | * Copyright (c) 2018 Maciej W. Rozycki |
@@ -235,6 +235,7 @@ struct fza_ring_cmd { | |||
235 | #define FZA_RING_CMD 0x200400 /* command ring address */ | 235 | #define FZA_RING_CMD 0x200400 /* command ring address */ |
236 | #define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring | 236 | #define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring |
237 | * size | 237 | * size |
238 | */ | ||
238 | /* Command constants. */ | 239 | /* Command constants. */ |
239 | #define FZA_RING_CMD_MASK 0x7fffffff | 240 | #define FZA_RING_CMD_MASK 0x7fffffff |
240 | #define FZA_RING_CMD_NOP 0x00000000 /* nop */ | 241 | #define FZA_RING_CMD_NOP 0x00000000 /* nop */ |
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index e86ea105c802..704537010453 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c | |||
@@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev) | |||
92 | return 0; | 92 | return 0; |
93 | } | 93 | } |
94 | 94 | ||
95 | static int bcm5481x_config(struct phy_device *phydev) | 95 | static int bcm54xx_config_clock_delay(struct phy_device *phydev) |
96 | { | 96 | { |
97 | int rc, val; | 97 | int rc, val; |
98 | 98 | ||
@@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev) | |||
429 | ret = genphy_config_aneg(phydev); | 429 | ret = genphy_config_aneg(phydev); |
430 | 430 | ||
431 | /* Then we can set up the delay. */ | 431 | /* Then we can set up the delay. */ |
432 | bcm5481x_config(phydev); | 432 | bcm54xx_config_clock_delay(phydev); |
433 | 433 | ||
434 | if (of_property_read_bool(np, "enet-phy-lane-swap")) { | 434 | if (of_property_read_bool(np, "enet-phy-lane-swap")) { |
435 | /* Lane Swap - Undocumented register...magic! */ | 435 | /* Lane Swap - Undocumented register...magic! */ |
@@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev) | |||
442 | return ret; | 442 | return ret; |
443 | } | 443 | } |
444 | 444 | ||
445 | static int bcm54616s_config_aneg(struct phy_device *phydev) | ||
446 | { | ||
447 | int ret; | ||
448 | |||
449 | /* Aneg firsly. */ | ||
450 | ret = genphy_config_aneg(phydev); | ||
451 | |||
452 | /* Then we can set up the delay. */ | ||
453 | bcm54xx_config_clock_delay(phydev); | ||
454 | |||
455 | return ret; | ||
456 | } | ||
457 | |||
445 | static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set) | 458 | static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set) |
446 | { | 459 | { |
447 | int val; | 460 | int val; |
@@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = { | |||
636 | .features = PHY_GBIT_FEATURES, | 649 | .features = PHY_GBIT_FEATURES, |
637 | .flags = PHY_HAS_INTERRUPT, | 650 | .flags = PHY_HAS_INTERRUPT, |
638 | .config_init = bcm54xx_config_init, | 651 | .config_init = bcm54xx_config_init, |
652 | .config_aneg = bcm54616s_config_aneg, | ||
639 | .ack_interrupt = bcm_phy_ack_intr, | 653 | .ack_interrupt = bcm_phy_ack_intr, |
640 | .config_intr = bcm_phy_config_intr, | 654 | .config_intr = bcm_phy_config_intr, |
641 | }, { | 655 | }, { |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 7fc8508b5231..271e8adc39f1 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c | |||
@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = { | |||
220 | .flags = PHY_HAS_INTERRUPT, | 220 | .flags = PHY_HAS_INTERRUPT, |
221 | }, { | 221 | }, { |
222 | .phy_id = 0x001cc816, | 222 | .phy_id = 0x001cc816, |
223 | .name = "RTL8201F 10/100Mbps Ethernet", | 223 | .name = "RTL8201F Fast Ethernet", |
224 | .phy_id_mask = 0x001fffff, | 224 | .phy_id_mask = 0x001fffff, |
225 | .features = PHY_BASIC_FEATURES, | 225 | .features = PHY_BASIC_FEATURES, |
226 | .flags = PHY_HAS_INTERRUPT, | 226 | .flags = PHY_HAS_INTERRUPT, |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 262e7a3c23cb..f2d01cb6f958 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) | |||
1321 | dev->net->ethtool_ops = &smsc95xx_ethtool_ops; | 1321 | dev->net->ethtool_ops = &smsc95xx_ethtool_ops; |
1322 | dev->net->flags |= IFF_MULTICAST; | 1322 | dev->net->flags |= IFF_MULTICAST; |
1323 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; | 1323 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; |
1324 | dev->net->min_mtu = ETH_MIN_MTU; | ||
1325 | dev->net->max_mtu = ETH_DATA_LEN; | ||
1324 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | 1326 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; |
1325 | 1327 | ||
1326 | pdata->dev = dev; | 1328 | pdata->dev = dev; |
@@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) | |||
1598 | return ret; | 1600 | return ret; |
1599 | } | 1601 | } |
1600 | 1602 | ||
1603 | cancel_delayed_work_sync(&pdata->carrier_check); | ||
1604 | |||
1601 | if (pdata->suspend_flags) { | 1605 | if (pdata->suspend_flags) { |
1602 | netdev_warn(dev->net, "error during last resume\n"); | 1606 | netdev_warn(dev->net, "error during last resume\n"); |
1603 | pdata->suspend_flags = 0; | 1607 | pdata->suspend_flags = 0; |
@@ -1840,6 +1844,11 @@ done: | |||
1840 | */ | 1844 | */ |
1841 | if (ret && PMSG_IS_AUTO(message)) | 1845 | if (ret && PMSG_IS_AUTO(message)) |
1842 | usbnet_resume(intf); | 1846 | usbnet_resume(intf); |
1847 | |||
1848 | if (ret) | ||
1849 | schedule_delayed_work(&pdata->carrier_check, | ||
1850 | CARRIER_CHECK_DELAY); | ||
1851 | |||
1843 | return ret; | 1852 | return ret; |
1844 | } | 1853 | } |
1845 | 1854 | ||