aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40evf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 23:53:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 23:53:45 -0400
commitcd6362befe4cc7bf589a5236d2a780af2d47bcc9 (patch)
tree3bd4e13ec3f92a00dc4f6c3d65e820b54dbfe46e /drivers/net/ethernet/intel/i40evf
parent0f1b1e6d73cb989ce2c071edc57deade3b084dfe (diff)
parentb1586f099ba897542ece36e8a23c1a62907261ef (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Here is my initial pull request for the networking subsystem during this merge window: 1) Support for ESN in AH (RFC 4302) from Fan Du. 2) Add full kernel doc for ethtool command structures, from Ben Hutchings. 3) Add BCM7xxx PHY driver, from Florian Fainelli. 4) Export computed TCP rate information in netlink socket dumps, from Eric Dumazet. 5) Allow IPSEC SA to be dumped partially using a filter, from Nicolas Dichtel. 6) Convert many drivers to pci_enable_msix_range(), from Alexander Gordeev. 7) Record SKB timestamps more efficiently, from Eric Dumazet. 8) Switch to microsecond resolution for TCP round trip times, also from Eric Dumazet. 9) Clean up and fix 6lowpan fragmentation handling by making use of the existing inet_frag api for it's implementation. 10) Add TX grant mapping to xen-netback driver, from Zoltan Kiss. 11) Auto size SKB lengths when composing netlink messages based upon past message sizes used, from Eric Dumazet. 12) qdisc dumps can take a long time, add a cond_resched(), From Eric Dumazet. 13) Sanitize netpoll core and drivers wrt. SKB handling semantics. Get rid of never-used-in-tree netpoll RX handling. From Eric W Biederman. 14) Support inter-address-family and namespace changing in VTI tunnel driver(s). From Steffen Klassert. 15) Add Altera TSE driver, from Vince Bridgers. 16) Optimizing csum_replace2() so that it doesn't adjust the checksum by checksumming the entire header, from Eric Dumazet. 17) Expand BPF internal implementation for faster interpreting, more direct translations into JIT'd code, and much cleaner uses of BPF filtering in non-socket ocntexts. From Daniel Borkmann and Alexei Starovoitov" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1976 commits) netpoll: Use skb_irq_freeable to make zap_completion_queue safe. net: Add a test to see if a skb is freeable in irq context qlcnic: Fix build failure due to undefined reference to `vxlan_get_rx_port' net: ptp: move PTP classifier in its own file net: sxgbe: make "core_ops" static net: sxgbe: fix logical vs bitwise operation net: sxgbe: sxgbe_mdio_register() frees the bus Call efx_set_channels() before efx->type->dimension_resources() xen-netback: disable rogue vif in kthread context net/mlx4: Set proper build dependancy with vxlan be2net: fix build dependency on VxLAN mac802154: make csma/cca parameters per-wpan mac802154: allow only one WPAN to be up at any given time net: filter: minor: fix kdoc in __sk_run_filter netlink: don't compare the nul-termination in nla_strcmp can: c_can: Avoid led toggling for every packet. can: c_can: Simplify TX interrupt cleanup can: c_can: Store dlc private can: c_can: Reduce register access can: c_can: Make the code readable ...
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c369
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c90
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h16
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h48
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c299
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c27
9 files changed, 697 insertions, 174 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f7cea1bca38d..97662b6bd98a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1229#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 1229#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
1230#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 1230#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
1231 1231
1232 __le32 tenant_id ; 1232 __le32 tenant_id;
1233 u8 reserved[4]; 1233 u8 reserved[4];
1234 __le16 queue_number; 1234 __le16 queue_number;
1235#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 1235#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7b13953b28c4..ae084378faab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -160,6 +160,372 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
160} 160}
161 161
162 162
163/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
164 * hardware to a bit-field that can be used by SW to more easily determine the
165 * packet type.
166 *
167 * Macros are used to shorten the table lines and make this table human
168 * readable.
169 *
170 * We store the PTYPE in the top byte of the bit field - this is just so that
171 * we can check that the table doesn't have a row missing, as the index into
172 * the table should be the PTYPE.
173 *
174 * Typical work flow:
175 *
176 * IF NOT i40evf_ptype_lookup[ptype].known
177 * THEN
178 * Packet is unknown
179 * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
180 * Use the rest of the fields to look at the tunnels, inner protocols, etc
181 * ELSE
182 * Use the enum i40e_rx_l2_ptype to decode the packet type
183 * ENDIF
184 */
185
186/* macro to make the table lines short */
187#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
188 { PTYPE, \
189 1, \
190 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
191 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
192 I40E_RX_PTYPE_##OUTER_FRAG, \
193 I40E_RX_PTYPE_TUNNEL_##T, \
194 I40E_RX_PTYPE_TUNNEL_END_##TE, \
195 I40E_RX_PTYPE_##TEF, \
196 I40E_RX_PTYPE_INNER_PROT_##I, \
197 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
198
199#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
200 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
201
202/* shorter macros makes the table fit but are terse */
203#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
204#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
205#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
206
207/* Lookup table mapping the HW PTYPE to the bit field for decoding */
208struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
209 /* L2 Packet types */
210 I40E_PTT_UNUSED_ENTRY(0),
211 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
212 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
213 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
214 I40E_PTT_UNUSED_ENTRY(4),
215 I40E_PTT_UNUSED_ENTRY(5),
216 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
217 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
218 I40E_PTT_UNUSED_ENTRY(8),
219 I40E_PTT_UNUSED_ENTRY(9),
220 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
221 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
222 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
223 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
224 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
225 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
226 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
227 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
228 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
229 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
230 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
231 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
232
233 /* Non Tunneled IPv4 */
234 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
235 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
236 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
237 I40E_PTT_UNUSED_ENTRY(25),
238 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
239 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
240 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
241
242 /* IPv4 --> IPv4 */
243 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
244 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
245 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
246 I40E_PTT_UNUSED_ENTRY(32),
247 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
248 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
249 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
250
251 /* IPv4 --> IPv6 */
252 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
253 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
254 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
255 I40E_PTT_UNUSED_ENTRY(39),
256 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
257 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
258 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
259
260 /* IPv4 --> GRE/NAT */
261 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
262
263 /* IPv4 --> GRE/NAT --> IPv4 */
264 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
265 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
266 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
267 I40E_PTT_UNUSED_ENTRY(47),
268 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
269 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
270 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
271
272 /* IPv4 --> GRE/NAT --> IPv6 */
273 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
274 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
275 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
276 I40E_PTT_UNUSED_ENTRY(54),
277 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
278 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
279 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
280
281 /* IPv4 --> GRE/NAT --> MAC */
282 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
283
284 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
285 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
286 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
287 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
288 I40E_PTT_UNUSED_ENTRY(62),
289 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
290 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
291 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
292
293 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
294 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
295 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
296 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
297 I40E_PTT_UNUSED_ENTRY(69),
298 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
299 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
300 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
301
302 /* IPv4 --> GRE/NAT --> MAC/VLAN */
303 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
304
305 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
306 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
307 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
308 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
309 I40E_PTT_UNUSED_ENTRY(77),
310 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
311 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
312 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
313
314 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
315 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
316 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
317 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
318 I40E_PTT_UNUSED_ENTRY(84),
319 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
320 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
321 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
322
323 /* Non Tunneled IPv6 */
324 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
325 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
326 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
327 I40E_PTT_UNUSED_ENTRY(91),
328 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
329 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
330 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
331
332 /* IPv6 --> IPv4 */
333 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
334 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
335 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
336 I40E_PTT_UNUSED_ENTRY(98),
337 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
338 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
339 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
340
341 /* IPv6 --> IPv6 */
342 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
343 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
344 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
345 I40E_PTT_UNUSED_ENTRY(105),
346 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
347 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
348 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
349
350 /* IPv6 --> GRE/NAT */
351 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
352
353 /* IPv6 --> GRE/NAT -> IPv4 */
354 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
355 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
356 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
357 I40E_PTT_UNUSED_ENTRY(113),
358 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
359 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
360 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
361
362 /* IPv6 --> GRE/NAT -> IPv6 */
363 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
364 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
365 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
366 I40E_PTT_UNUSED_ENTRY(120),
367 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
368 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
369 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
370
371 /* IPv6 --> GRE/NAT -> MAC */
372 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
373
374 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
375 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
376 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
377 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
378 I40E_PTT_UNUSED_ENTRY(128),
379 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
380 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
381 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
382
383 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
384 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
385 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
386 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
387 I40E_PTT_UNUSED_ENTRY(135),
388 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
389 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
390 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
391
392 /* IPv6 --> GRE/NAT -> MAC/VLAN */
393 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
394
395 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
396 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
397 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
398 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
399 I40E_PTT_UNUSED_ENTRY(143),
400 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
401 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
402 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
403
404 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
405 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
406 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
407 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
408 I40E_PTT_UNUSED_ENTRY(150),
409 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
410 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
411 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
412
413 /* unused entries */
414 I40E_PTT_UNUSED_ENTRY(154),
415 I40E_PTT_UNUSED_ENTRY(155),
416 I40E_PTT_UNUSED_ENTRY(156),
417 I40E_PTT_UNUSED_ENTRY(157),
418 I40E_PTT_UNUSED_ENTRY(158),
419 I40E_PTT_UNUSED_ENTRY(159),
420
421 I40E_PTT_UNUSED_ENTRY(160),
422 I40E_PTT_UNUSED_ENTRY(161),
423 I40E_PTT_UNUSED_ENTRY(162),
424 I40E_PTT_UNUSED_ENTRY(163),
425 I40E_PTT_UNUSED_ENTRY(164),
426 I40E_PTT_UNUSED_ENTRY(165),
427 I40E_PTT_UNUSED_ENTRY(166),
428 I40E_PTT_UNUSED_ENTRY(167),
429 I40E_PTT_UNUSED_ENTRY(168),
430 I40E_PTT_UNUSED_ENTRY(169),
431
432 I40E_PTT_UNUSED_ENTRY(170),
433 I40E_PTT_UNUSED_ENTRY(171),
434 I40E_PTT_UNUSED_ENTRY(172),
435 I40E_PTT_UNUSED_ENTRY(173),
436 I40E_PTT_UNUSED_ENTRY(174),
437 I40E_PTT_UNUSED_ENTRY(175),
438 I40E_PTT_UNUSED_ENTRY(176),
439 I40E_PTT_UNUSED_ENTRY(177),
440 I40E_PTT_UNUSED_ENTRY(178),
441 I40E_PTT_UNUSED_ENTRY(179),
442
443 I40E_PTT_UNUSED_ENTRY(180),
444 I40E_PTT_UNUSED_ENTRY(181),
445 I40E_PTT_UNUSED_ENTRY(182),
446 I40E_PTT_UNUSED_ENTRY(183),
447 I40E_PTT_UNUSED_ENTRY(184),
448 I40E_PTT_UNUSED_ENTRY(185),
449 I40E_PTT_UNUSED_ENTRY(186),
450 I40E_PTT_UNUSED_ENTRY(187),
451 I40E_PTT_UNUSED_ENTRY(188),
452 I40E_PTT_UNUSED_ENTRY(189),
453
454 I40E_PTT_UNUSED_ENTRY(190),
455 I40E_PTT_UNUSED_ENTRY(191),
456 I40E_PTT_UNUSED_ENTRY(192),
457 I40E_PTT_UNUSED_ENTRY(193),
458 I40E_PTT_UNUSED_ENTRY(194),
459 I40E_PTT_UNUSED_ENTRY(195),
460 I40E_PTT_UNUSED_ENTRY(196),
461 I40E_PTT_UNUSED_ENTRY(197),
462 I40E_PTT_UNUSED_ENTRY(198),
463 I40E_PTT_UNUSED_ENTRY(199),
464
465 I40E_PTT_UNUSED_ENTRY(200),
466 I40E_PTT_UNUSED_ENTRY(201),
467 I40E_PTT_UNUSED_ENTRY(202),
468 I40E_PTT_UNUSED_ENTRY(203),
469 I40E_PTT_UNUSED_ENTRY(204),
470 I40E_PTT_UNUSED_ENTRY(205),
471 I40E_PTT_UNUSED_ENTRY(206),
472 I40E_PTT_UNUSED_ENTRY(207),
473 I40E_PTT_UNUSED_ENTRY(208),
474 I40E_PTT_UNUSED_ENTRY(209),
475
476 I40E_PTT_UNUSED_ENTRY(210),
477 I40E_PTT_UNUSED_ENTRY(211),
478 I40E_PTT_UNUSED_ENTRY(212),
479 I40E_PTT_UNUSED_ENTRY(213),
480 I40E_PTT_UNUSED_ENTRY(214),
481 I40E_PTT_UNUSED_ENTRY(215),
482 I40E_PTT_UNUSED_ENTRY(216),
483 I40E_PTT_UNUSED_ENTRY(217),
484 I40E_PTT_UNUSED_ENTRY(218),
485 I40E_PTT_UNUSED_ENTRY(219),
486
487 I40E_PTT_UNUSED_ENTRY(220),
488 I40E_PTT_UNUSED_ENTRY(221),
489 I40E_PTT_UNUSED_ENTRY(222),
490 I40E_PTT_UNUSED_ENTRY(223),
491 I40E_PTT_UNUSED_ENTRY(224),
492 I40E_PTT_UNUSED_ENTRY(225),
493 I40E_PTT_UNUSED_ENTRY(226),
494 I40E_PTT_UNUSED_ENTRY(227),
495 I40E_PTT_UNUSED_ENTRY(228),
496 I40E_PTT_UNUSED_ENTRY(229),
497
498 I40E_PTT_UNUSED_ENTRY(230),
499 I40E_PTT_UNUSED_ENTRY(231),
500 I40E_PTT_UNUSED_ENTRY(232),
501 I40E_PTT_UNUSED_ENTRY(233),
502 I40E_PTT_UNUSED_ENTRY(234),
503 I40E_PTT_UNUSED_ENTRY(235),
504 I40E_PTT_UNUSED_ENTRY(236),
505 I40E_PTT_UNUSED_ENTRY(237),
506 I40E_PTT_UNUSED_ENTRY(238),
507 I40E_PTT_UNUSED_ENTRY(239),
508
509 I40E_PTT_UNUSED_ENTRY(240),
510 I40E_PTT_UNUSED_ENTRY(241),
511 I40E_PTT_UNUSED_ENTRY(242),
512 I40E_PTT_UNUSED_ENTRY(243),
513 I40E_PTT_UNUSED_ENTRY(244),
514 I40E_PTT_UNUSED_ENTRY(245),
515 I40E_PTT_UNUSED_ENTRY(246),
516 I40E_PTT_UNUSED_ENTRY(247),
517 I40E_PTT_UNUSED_ENTRY(248),
518 I40E_PTT_UNUSED_ENTRY(249),
519
520 I40E_PTT_UNUSED_ENTRY(250),
521 I40E_PTT_UNUSED_ENTRY(251),
522 I40E_PTT_UNUSED_ENTRY(252),
523 I40E_PTT_UNUSED_ENTRY(253),
524 I40E_PTT_UNUSED_ENTRY(254),
525 I40E_PTT_UNUSED_ENTRY(255)
526};
527
528
163/** 529/**
164 * i40e_aq_send_msg_to_pf 530 * i40e_aq_send_msg_to_pf
165 * @hw: pointer to the hardware structure 531 * @hw: pointer to the hardware structure
@@ -199,8 +565,7 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
199 details.async = true; 565 details.async = true;
200 cmd_details = &details; 566 cmd_details = &details;
201 } 567 }
202 status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, 568 status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
203 msglen, cmd_details);
204 return status; 569 return status;
205} 570}
206 571
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 7841573a58c9..97ab8c2b76f8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -63,6 +63,13 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
63 63
64i40e_status i40e_set_mac_type(struct i40e_hw *hw); 64i40e_status i40e_set_mac_type(struct i40e_hw *hw);
65 65
66extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
67
68static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
69{
70 return i40evf_ptype_lookup[ptype];
71}
72
66/* prototype for functions used for SW locks */ 73/* prototype for functions used for SW locks */
67 74
68/* i40e_common for VF drivers*/ 75/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ffdb01d853db..53be5f44d015 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -24,6 +24,7 @@
24#include <linux/prefetch.h> 24#include <linux/prefetch.h>
25 25
26#include "i40evf.h" 26#include "i40evf.h"
27#include "i40e_prototype.h"
27 28
28static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 29static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
29 u32 td_tag) 30 u32 td_tag)
@@ -169,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
169} 170}
170 171
171/** 172/**
173 * i40e_get_head - Retrieve head from head writeback
174 * @tx_ring: tx ring to fetch head of
175 *
176 * Returns value of Tx ring head based on value stored
177 * in head write-back location
178 **/
179static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
180{
181 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
182
183 return le32_to_cpu(*(volatile __le32 *)head);
184}
185
186/**
172 * i40e_clean_tx_irq - Reclaim resources after transmit completes 187 * i40e_clean_tx_irq - Reclaim resources after transmit completes
173 * @tx_ring: tx ring to clean 188 * @tx_ring: tx ring to clean
174 * @budget: how many cleans we're allowed 189 * @budget: how many cleans we're allowed
@@ -179,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
179{ 194{
180 u16 i = tx_ring->next_to_clean; 195 u16 i = tx_ring->next_to_clean;
181 struct i40e_tx_buffer *tx_buf; 196 struct i40e_tx_buffer *tx_buf;
197 struct i40e_tx_desc *tx_head;
182 struct i40e_tx_desc *tx_desc; 198 struct i40e_tx_desc *tx_desc;
183 unsigned int total_packets = 0; 199 unsigned int total_packets = 0;
184 unsigned int total_bytes = 0; 200 unsigned int total_bytes = 0;
@@ -187,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
187 tx_desc = I40E_TX_DESC(tx_ring, i); 203 tx_desc = I40E_TX_DESC(tx_ring, i);
188 i -= tx_ring->count; 204 i -= tx_ring->count;
189 205
206 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
207
190 do { 208 do {
191 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 209 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
192 210
@@ -197,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
197 /* prevent any other reads prior to eop_desc */ 215 /* prevent any other reads prior to eop_desc */
198 read_barrier_depends(); 216 read_barrier_depends();
199 217
200 /* if the descriptor isn't done, no work yet to do */ 218 /* we have caught up to head, no work left to do */
201 if (!(eop_desc->cmd_type_offset_bsz & 219 if (tx_head == tx_desc)
202 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
203 break; 220 break;
204 221
205 /* clear next_to_watch to prevent false hangs */ 222 /* clear next_to_watch to prevent false hangs */
@@ -431,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
431 448
432 /* round up to nearest 4K */ 449 /* round up to nearest 4K */
433 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 450 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
451 /* add u32 for head writeback, align after this takes care of
452 * guaranteeing this is at least one cache line in size
453 */
454 tx_ring->size += sizeof(u32);
434 tx_ring->size = ALIGN(tx_ring->size, 4096); 455 tx_ring->size = ALIGN(tx_ring->size, 4096);
435 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 456 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
436 &tx_ring->dma, GFP_KERNEL); 457 &tx_ring->dma, GFP_KERNEL);
@@ -722,7 +743,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
722 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 743 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
723 return; 744 return;
724 745
725 /* likely incorrect csum if alternate IP extention headers found */ 746 /* likely incorrect csum if alternate IP extension headers found */
726 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 747 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
727 return; 748 return;
728 749
@@ -786,6 +807,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
786} 807}
787 808
788/** 809/**
810 * i40e_ptype_to_hash - get a hash type
811 * @ptype: the ptype value from the descriptor
812 *
813 * Returns a hash type to be used by skb_set_hash
814 **/
815static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
816{
817 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
818
819 if (!decoded.known)
820 return PKT_HASH_TYPE_NONE;
821
822 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
823 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
824 return PKT_HASH_TYPE_L4;
825 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
826 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
827 return PKT_HASH_TYPE_L3;
828 else
829 return PKT_HASH_TYPE_L2;
830}
831
832/**
789 * i40e_clean_rx_irq - Reclaim resources after receive completes 833 * i40e_clean_rx_irq - Reclaim resources after receive completes
790 * @rx_ring: rx ring to clean 834 * @rx_ring: rx ring to clean
791 * @budget: how many cleans we're allowed 835 * @budget: how many cleans we're allowed
@@ -802,13 +846,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
802 u16 i = rx_ring->next_to_clean; 846 u16 i = rx_ring->next_to_clean;
803 union i40e_rx_desc *rx_desc; 847 union i40e_rx_desc *rx_desc;
804 u32 rx_error, rx_status; 848 u32 rx_error, rx_status;
849 u8 rx_ptype;
805 u64 qword; 850 u64 qword;
806 u16 rx_ptype;
807 851
808 rx_desc = I40E_RX_DESC(rx_ring, i); 852 rx_desc = I40E_RX_DESC(rx_ring, i);
809 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 853 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
810 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) 854 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
811 >> I40E_RXD_QW1_STATUS_SHIFT; 855 I40E_RXD_QW1_STATUS_SHIFT;
812 856
813 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { 857 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
814 union i40e_rx_desc *next_rxd; 858 union i40e_rx_desc *next_rxd;
@@ -912,7 +956,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
912 goto next_desc; 956 goto next_desc;
913 } 957 }
914 958
915 skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); 959 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
960 i40e_ptype_to_hash(rx_ptype));
916 /* probably a little skewed due to removing CRC */ 961 /* probably a little skewed due to removing CRC */
917 total_rx_bytes += skb->len; 962 total_rx_bytes += skb->len;
918 total_rx_packets++; 963 total_rx_packets++;
@@ -1241,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1241 struct i40e_tx_context_desc *context_desc; 1286 struct i40e_tx_context_desc *context_desc;
1242 int i = tx_ring->next_to_use; 1287 int i = tx_ring->next_to_use;
1243 1288
1244 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1289 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1290 !cd_tunneling && !cd_l2tag2)
1245 return; 1291 return;
1246 1292
1247 /* grab the next descriptor */ 1293 /* grab the next descriptor */
@@ -1352,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1352 tx_bi = &tx_ring->tx_bi[i]; 1398 tx_bi = &tx_ring->tx_bi[i];
1353 } 1399 }
1354 1400
1355 tx_desc->cmd_type_offset_bsz = 1401 /* Place RS bit on last descriptor of any packet that spans across the
1356 build_ctob(td_cmd, td_offset, size, td_tag) | 1402 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
1357 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 1403 */
1404#define WB_STRIDE 0x3
1405 if (((i & WB_STRIDE) != WB_STRIDE) &&
1406 (first <= &tx_ring->tx_bi[i]) &&
1407 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
1408 tx_desc->cmd_type_offset_bsz =
1409 build_ctob(td_cmd, td_offset, size, td_tag) |
1410 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
1411 I40E_TXD_QW1_CMD_SHIFT);
1412 } else {
1413 tx_desc->cmd_type_offset_bsz =
1414 build_ctob(td_cmd, td_offset, size, td_tag) |
1415 cpu_to_le64((u64)I40E_TXD_CMD <<
1416 I40E_TXD_QW1_CMD_SHIFT);
1417 }
1358 1418
1359 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, 1419 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1360 tx_ring->queue_index), 1420 tx_ring->queue_index),
@@ -1457,7 +1517,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1457 1517
1458 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 1518 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1459 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 1519 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1460 * + 2 desc gap to keep tail from touching head, 1520 * + 4 desc gap to avoid the cache line where head is,
1461 * + 1 desc for context descriptor, 1521 * + 1 desc for context descriptor,
1462 * otherwise try next time 1522 * otherwise try next time
1463 */ 1523 */
@@ -1468,7 +1528,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1468 count += skb_shinfo(skb)->nr_frags; 1528 count += skb_shinfo(skb)->nr_frags;
1469#endif 1529#endif
1470 count += TXD_USE_COUNT(skb_headlen(skb)); 1530 count += TXD_USE_COUNT(skb_headlen(skb));
1471 if (i40e_maybe_stop_tx(tx_ring, count + 3)) { 1531 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1472 tx_ring->tx_stats.tx_busy++; 1532 tx_ring->tx_stats.tx_busy++;
1473 return 0; 1533 return 0;
1474 } 1534 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3bffac06592f..4673b3381edd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
64struct i40e_hw; 64struct i40e_hw;
65typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); 65typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
66 66
67#define ETH_ALEN 6
68
69/* Data type manipulation macros. */ 67/* Data type manipulation macros. */
70 68
71#define I40E_DESC_UNUSED(R) \ 69#define I40E_DESC_UNUSED(R) \
@@ -90,6 +88,7 @@ enum i40e_debug_mask {
90 I40E_DEBUG_FLOW = 0x00000200, 88 I40E_DEBUG_FLOW = 0x00000200,
91 I40E_DEBUG_DCB = 0x00000400, 89 I40E_DEBUG_DCB = 0x00000400,
92 I40E_DEBUG_DIAG = 0x00000800, 90 I40E_DEBUG_DIAG = 0x00000800,
91 I40E_DEBUG_FD = 0x00001000,
93 92
94 I40E_DEBUG_AQ_MESSAGE = 0x01000000, 93 I40E_DEBUG_AQ_MESSAGE = 0x01000000,
95 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, 94 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -466,6 +465,10 @@ union i40e_32byte_rx_desc {
466 union { 465 union {
467 __le32 rss; /* RSS Hash */ 466 __le32 rss; /* RSS Hash */
468 __le32 fcoe_param; /* FCoE DDP Context id */ 467 __le32 fcoe_param; /* FCoE DDP Context id */
468 /* Flow director filter id in case of
469 * Programming status desc WB
470 */
471 __le32 fd_id;
469 } hi_dword; 472 } hi_dword;
470 } qword0; 473 } qword0;
471 struct { 474 struct {
@@ -706,7 +709,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
706enum i40e_rx_prog_status_desc_error_bits { 709enum i40e_rx_prog_status_desc_error_bits {
707 /* Note: These are predefined bit offsets */ 710 /* Note: These are predefined bit offsets */
708 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, 711 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
709 I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, 712 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
710 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, 713 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
711 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 714 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
712}; 715};
@@ -1018,6 +1021,11 @@ struct i40e_hw_port_stats {
1018 u64 tx_size_big; /* ptc9522 */ 1021 u64 tx_size_big; /* ptc9522 */
1019 u64 mac_short_packet_dropped; /* mspdc */ 1022 u64 mac_short_packet_dropped; /* mspdc */
1020 u64 checksum_error; /* xec */ 1023 u64 checksum_error; /* xec */
1024 /* EEE LPI */
1025 bool tx_lpi_status;
1026 bool rx_lpi_status;
1027 u64 tx_lpi_count; /* etlpic */
1028 u64 rx_lpi_count; /* erlpic */
1021}; 1029};
1022 1030
1023/* Checksum and Shadow RAM pointers */ 1031/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index ff6529b288a1..807807d62387 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
38#include <linux/ipv6.h> 38#include <linux/ipv6.h>
39#include <net/ip6_checksum.h> 39#include <net/ip6_checksum.h>
40#include <net/udp.h> 40#include <net/udp.h>
41#include <linux/sctp.h>
42
43 41
44#include "i40e_type.h" 42#include "i40e_type.h"
45#include "i40e_virtchnl.h" 43#include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
164/* Driver state. The order of these is important! */ 162/* Driver state. The order of these is important! */
165enum i40evf_state_t { 163enum i40evf_state_t {
166 __I40EVF_STARTUP, /* driver loaded, probe complete */ 164 __I40EVF_STARTUP, /* driver loaded, probe complete */
167 __I40EVF_FAILED, /* PF communication failed. Fatal. */
168 __I40EVF_REMOVE, /* driver is being unloaded */ 165 __I40EVF_REMOVE, /* driver is being unloaded */
169 __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ 166 __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
170 __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ 167 __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
171 __I40EVF_INIT_SW, /* got resources, setting up structs */ 168 __I40EVF_INIT_SW, /* got resources, setting up structs */
169 __I40EVF_RESETTING, /* in reset */
172 /* Below here, watchdog is running */ 170 /* Below here, watchdog is running */
173 __I40EVF_DOWN, /* ready, can be opened */ 171 __I40EVF_DOWN, /* ready, can be opened */
174 __I40EVF_TESTING, /* in ethtool self-test */ 172 __I40EVF_TESTING, /* in ethtool self-test */
175 __I40EVF_RESETTING, /* in reset */
176 __I40EVF_RUNNING, /* opened, working */ 173 __I40EVF_RUNNING, /* opened, working */
177}; 174};
178 175
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
185/* board specific private data structure */ 182/* board specific private data structure */
186struct i40evf_adapter { 183struct i40evf_adapter {
187 struct timer_list watchdog_timer; 184 struct timer_list watchdog_timer;
188 struct vlan_group *vlgrp;
189 struct work_struct reset_task; 185 struct work_struct reset_task;
190 struct work_struct adminq_task; 186 struct work_struct adminq_task;
191 struct delayed_work init_task; 187 struct delayed_work init_task;
192 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 188 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
193 struct list_head vlan_filter_list; 189 struct list_head vlan_filter_list;
194 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; 190 char misc_vector_name[IFNAMSIZ + 9];
195
196 /* Interrupt Throttle Rate */
197 u32 itr_setting;
198 u16 eitr_low;
199 u16 eitr_high;
200 191
201 /* TX */ 192 /* TX */
202 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; 193 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
203 u64 restart_queue;
204 u64 hw_csum_tx_good;
205 u64 lsc_int;
206 u64 hw_tso_ctxt;
207 u64 hw_tso6_ctxt;
208 u32 tx_timeout_count; 194 u32 tx_timeout_count;
209 struct list_head mac_filter_list; 195 struct list_head mac_filter_list;
210#ifdef DEBUG
211 bool detect_tx_hung;
212#endif /* DEBUG */
213 196
214 /* RX */ 197 /* RX */
215 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; 198 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
216 int txd_count;
217 int rxd_count;
218 u64 hw_csum_rx_error; 199 u64 hw_csum_rx_error;
219 u64 hw_rx_no_dma_resources;
220 u64 hw_csum_rx_good;
221 u64 non_eop_descs;
222 int num_msix_vectors; 200 int num_msix_vectors;
223 struct msix_entry *msix_entries; 201 struct msix_entry *msix_entries;
224 202
225 u64 rx_hdr_split; 203 u32 flags;
226
227 u32 init_state;
228 volatile unsigned long flags;
229#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) 204#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
230#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) 205#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
231#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) 206#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
@@ -234,6 +209,9 @@ struct i40evf_adapter {
234#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) 209#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
235#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) 210#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
236#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) 211#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
212#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
213#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
214#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
237/* duplcates for common code */ 215/* duplcates for common code */
238#define I40E_FLAG_FDIR_ATR_ENABLED 0 216#define I40E_FLAG_FDIR_ATR_ENABLED 0
239#define I40E_FLAG_DCB_ENABLED 0 217#define I40E_FLAG_DCB_ENABLED 0
@@ -251,21 +229,19 @@ struct i40evf_adapter {
251#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) 229#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
252#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) 230#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
253#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) 231#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
232
254 /* OS defined structs */ 233 /* OS defined structs */
255 struct net_device *netdev; 234 struct net_device *netdev;
256 struct pci_dev *pdev; 235 struct pci_dev *pdev;
257 struct net_device_stats net_stats; 236 struct net_device_stats net_stats;
258 237
259 /* structs defined in i40e_vf.h */ 238 struct i40e_hw hw; /* defined in i40e_type.h */
260 struct i40e_hw hw;
261 239
262 enum i40evf_state_t state; 240 enum i40evf_state_t state;
263 volatile unsigned long crit_section; 241 volatile unsigned long crit_section;
264 u64 tx_busy;
265 242
266 struct work_struct watchdog_task; 243 struct work_struct watchdog_task;
267 bool netdev_registered; 244 bool netdev_registered;
268 bool dev_closed;
269 bool link_up; 245 bool link_up;
270 enum i40e_virtchnl_ops current_op; 246 enum i40e_virtchnl_ops current_op;
271 struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ 247 struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +252,6 @@ struct i40evf_adapter {
276 u32 aq_wait_count; 252 u32 aq_wait_count;
277}; 253};
278 254
279struct i40evf_info {
280 enum i40e_mac_type mac;
281 unsigned int flags;
282};
283
284 255
285/* needed by i40evf_ethtool.c */ 256/* needed by i40evf_ethtool.c */
286extern char i40evf_driver_name[]; 257extern char i40evf_driver_name[];
@@ -315,6 +286,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
315void i40evf_del_vlans(struct i40evf_adapter *adapter); 286void i40evf_del_vlans(struct i40evf_adapter *adapter);
316void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); 287void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
317void i40evf_request_stats(struct i40evf_adapter *adapter); 288void i40evf_request_stats(struct i40evf_adapter *adapter);
289void i40evf_request_reset(struct i40evf_adapter *adapter);
318void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, 290void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
319 enum i40e_virtchnl_ops v_opcode, 291 enum i40e_virtchnl_ops v_opcode,
320 i40e_status v_retval, u8 *msg, u16 msglen); 292 i40e_status v_retval, u8 *msg, u16 msglen);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index b0b1f4bf5ac0..8b0db1ce179c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
241{ 241{
242 struct i40evf_adapter *adapter = netdev_priv(netdev); 242 struct i40evf_adapter *adapter = netdev_priv(netdev);
243 u32 new_rx_count, new_tx_count; 243 u32 new_rx_count, new_tx_count;
244 int i;
244 245
245 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
246 return -EINVAL; 247 return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
256 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); 257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
257 258
258 /* if nothing to do return success */ 259 /* if nothing to do return success */
259 if ((new_tx_count == adapter->txd_count) && 260 if ((new_tx_count == adapter->tx_rings[0]->count) &&
260 (new_rx_count == adapter->rxd_count)) 261 (new_rx_count == adapter->rx_rings[0]->count))
261 return 0; 262 return 0;
262 263
263 adapter->txd_count = new_tx_count; 264 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
264 adapter->rxd_count = new_rx_count; 265 adapter->tx_rings[0]->count = new_tx_count;
266 adapter->rx_rings[0]->count = new_rx_count;
267 }
265 268
266 if (netif_running(netdev)) 269 if (netif_running(netdev))
267 i40evf_reinit_locked(adapter); 270 i40evf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f5caf4419243..e35e66ffa782 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
31static const char i40evf_driver_string[] = 31static const char i40evf_driver_string[] =
32 "Intel(R) XL710 X710 Virtual Function Network Driver"; 32 "Intel(R) XL710 X710 Virtual Function Network Driver";
33 33
34#define DRV_VERSION "0.9.11" 34#define DRV_VERSION "0.9.16"
35const char i40evf_driver_version[] = DRV_VERSION; 35const char i40evf_driver_version[] = DRV_VERSION;
36static const char i40evf_copyright[] = 36static const char i40evf_copyright[] =
37 "Copyright (c) 2013 Intel Corporation."; 37 "Copyright (c) 2013 - 2014 Intel Corporation.";
38 38
39/* i40evf_pci_tbl - PCI Device ID Table 39/* i40evf_pci_tbl - PCI Device ID Table
40 * 40 *
@@ -167,9 +167,11 @@ static void i40evf_tx_timeout(struct net_device *netdev)
167 struct i40evf_adapter *adapter = netdev_priv(netdev); 167 struct i40evf_adapter *adapter = netdev_priv(netdev);
168 168
169 adapter->tx_timeout_count++; 169 adapter->tx_timeout_count++;
170 170 dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
171 /* Do the reset outside of interrupt context */ 171 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
172 schedule_work(&adapter->reset_task); 172 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
173 schedule_work(&adapter->reset_task);
174 }
173} 175}
174 176
175/** 177/**
@@ -211,6 +213,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
211 int i; 213 int i;
212 struct i40e_hw *hw = &adapter->hw; 214 struct i40e_hw *hw = &adapter->hw;
213 215
216 if (!adapter->msix_entries)
217 return;
218
214 for (i = 1; i < adapter->num_msix_vectors; i++) { 219 for (i = 1; i < adapter->num_msix_vectors; i++) {
215 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); 220 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
216 synchronize_irq(adapter->msix_entries[i].vector); 221 synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +516,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
511 struct net_device *netdev = adapter->netdev; 516 struct net_device *netdev = adapter->netdev;
512 int err; 517 int err;
513 518
514 sprintf(adapter->name[0], "i40evf:mbx"); 519 sprintf(adapter->misc_vector_name, "i40evf:mbx");
515 err = request_irq(adapter->msix_entries[0].vector, 520 err = request_irq(adapter->msix_entries[0].vector,
516 &i40evf_msix_aq, 0, adapter->name[0], netdev); 521 &i40evf_msix_aq, 0,
522 adapter->misc_vector_name, netdev);
517 if (err) { 523 if (err) {
518 dev_err(&adapter->pdev->dev, 524 dev_err(&adapter->pdev->dev,
519 "request_irq for msix_aq failed: %d\n", err); 525 "request_irq for %s failed: %d\n",
526 adapter->misc_vector_name, err);
520 free_irq(adapter->msix_entries[0].vector, netdev); 527 free_irq(adapter->msix_entries[0].vector, netdev);
521 } 528 }
522 return err; 529 return err;
@@ -963,16 +970,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
963 struct net_device *netdev = adapter->netdev; 970 struct net_device *netdev = adapter->netdev;
964 struct i40evf_mac_filter *f; 971 struct i40evf_mac_filter *f;
965 972
966 /* remove all MAC filters from the VSI */ 973 /* remove all MAC filters */
967 list_for_each_entry(f, &adapter->mac_filter_list, list) { 974 list_for_each_entry(f, &adapter->mac_filter_list, list) {
968 f->remove = true; 975 f->remove = true;
969 } 976 }
970 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; 977 /* remove all VLAN filters */
971 /* disable receives */ 978 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
972 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 979 f->remove = true;
973 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); 980 }
974 msleep(20); 981 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
975 982 adapter->state != __I40EVF_RESETTING) {
983 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
984 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
985 /* disable receives */
986 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
987 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
988 msleep(20);
989 }
976 netif_tx_disable(netdev); 990 netif_tx_disable(netdev);
977 991
978 netif_tx_stop_all_queues(netdev); 992 netif_tx_stop_all_queues(netdev);
@@ -1124,8 +1138,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1124 * than CPU's. So let's be conservative and only ask for 1138 * than CPU's. So let's be conservative and only ask for
1125 * (roughly) twice the number of vectors as there are CPU's. 1139 * (roughly) twice the number of vectors as there are CPU's.
1126 */ 1140 */
1127 v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1141 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1128 v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1); 1142 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1129 1143
1130 /* A failure in MSI-X entry allocation isn't fatal, but it does 1144 /* A failure in MSI-X entry allocation isn't fatal, but it does
1131 * mean we disable MSI-X capabilities of the adapter. 1145 * mean we disable MSI-X capabilities of the adapter.
@@ -1291,19 +1305,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
1291 watchdog_task); 1305 watchdog_task);
1292 struct i40e_hw *hw = &adapter->hw; 1306 struct i40e_hw *hw = &adapter->hw;
1293 1307
1294 if (adapter->state < __I40EVF_DOWN) 1308 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1309 goto restart_watchdog;
1310
1311 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1312 dev_info(&adapter->pdev->dev, "Checking for redemption\n");
1313 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
1314 /* A chance for redemption! */
1315 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1316 adapter->state = __I40EVF_STARTUP;
1317 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1318 schedule_delayed_work(&adapter->init_task, 10);
1319 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1320 &adapter->crit_section);
1321 /* Don't reschedule the watchdog, since we've restarted
1322 * the init task. When init_task contacts the PF and
1323 * gets everything set up again, it'll restart the
1324 * watchdog for us. Down, boy. Sit. Stay. Woof.
1325 */
1326 return;
1327 }
1328 adapter->aq_pending = 0;
1329 adapter->aq_required = 0;
1330 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1295 goto watchdog_done; 1331 goto watchdog_done;
1332 }
1296 1333
1297 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1334 if ((adapter->state < __I40EVF_DOWN) ||
1335 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1298 goto watchdog_done; 1336 goto watchdog_done;
1299 1337
1300 /* check for unannounced reset */ 1338 /* check for reset */
1301 if ((adapter->state != __I40EVF_RESETTING) && 1339 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
1302 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { 1340 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
1303 adapter->state = __I40EVF_RESETTING; 1341 adapter->state = __I40EVF_RESETTING;
1342 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1343 dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
1344 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1304 schedule_work(&adapter->reset_task); 1345 schedule_work(&adapter->reset_task);
1305 dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n", 1346 adapter->aq_pending = 0;
1306 __func__); 1347 adapter->aq_required = 0;
1348 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1307 goto watchdog_done; 1349 goto watchdog_done;
1308 } 1350 }
1309 1351
@@ -1358,16 +1400,25 @@ static void i40evf_watchdog_task(struct work_struct *work)
1358 1400
1359 i40evf_irq_enable(adapter, true); 1401 i40evf_irq_enable(adapter, true);
1360 i40evf_fire_sw_int(adapter, 0xFF); 1402 i40evf_fire_sw_int(adapter, 0xFF);
1403
1361watchdog_done: 1404watchdog_done:
1405 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1406restart_watchdog:
1362 if (adapter->aq_required) 1407 if (adapter->aq_required)
1363 mod_timer(&adapter->watchdog_timer, 1408 mod_timer(&adapter->watchdog_timer,
1364 jiffies + msecs_to_jiffies(20)); 1409 jiffies + msecs_to_jiffies(20));
1365 else 1410 else
1366 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); 1411 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1367 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1368 schedule_work(&adapter->adminq_task); 1412 schedule_work(&adapter->adminq_task);
1369} 1413}
1370 1414
1415static int next_queue(struct i40evf_adapter *adapter, int j)
1416{
1417 j += 1;
1418
1419 return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
1420}
1421
1371/** 1422/**
1372 * i40evf_configure_rss - Prepare for RSS if used 1423 * i40evf_configure_rss - Prepare for RSS if used
1373 * @adapter: board private structure 1424 * @adapter: board private structure
@@ -1398,19 +1449,19 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1398 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); 1449 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1399 1450
1400 /* Populate the LUT with max no. of queues in round robin fashion */ 1451 /* Populate the LUT with max no. of queues in round robin fashion */
1401 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) { 1452 j = adapter->vsi_res->num_queue_pairs;
1402 if (j == adapter->vsi_res->num_queue_pairs) 1453 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
1403 j = 0; 1454 lut = next_queue(adapter, j);
1404 /* lut = 4-byte sliding window of 4 lut entries */ 1455 lut |= next_queue(adapter, j) << 8;
1405 lut = (lut << 8) | (j & 1456 lut |= next_queue(adapter, j) << 16;
1406 ((0x1 << 8) - 1)); 1457 lut |= next_queue(adapter, j) << 24;
1407 /* On i = 3, we have 4 entries in lut; write to the register */ 1458 wr32(hw, I40E_VFQF_HLUT(i), lut);
1408 if ((i & 3) == 3)
1409 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1410 } 1459 }
1411 i40e_flush(hw); 1460 i40e_flush(hw);
1412} 1461}
1413 1462
1463#define I40EVF_RESET_WAIT_MS 100
1464#define I40EVF_RESET_WAIT_COUNT 200
1414/** 1465/**
1415 * i40evf_reset_task - Call-back task to handle hardware reset 1466 * i40evf_reset_task - Call-back task to handle hardware reset
1416 * @work: pointer to work_struct 1467 * @work: pointer to work_struct
@@ -1421,8 +1472,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1421 **/ 1472 **/
1422static void i40evf_reset_task(struct work_struct *work) 1473static void i40evf_reset_task(struct work_struct *work)
1423{ 1474{
1424 struct i40evf_adapter *adapter = 1475 struct i40evf_adapter *adapter = container_of(work,
1425 container_of(work, struct i40evf_adapter, reset_task); 1476 struct i40evf_adapter,
1477 reset_task);
1426 struct i40e_hw *hw = &adapter->hw; 1478 struct i40e_hw *hw = &adapter->hw;
1427 int i = 0, err; 1479 int i = 0, err;
1428 uint32_t rstat_val; 1480 uint32_t rstat_val;
@@ -1431,21 +1483,61 @@ static void i40evf_reset_task(struct work_struct *work)
1431 &adapter->crit_section)) 1483 &adapter->crit_section))
1432 udelay(500); 1484 udelay(500);
1433 1485
1434 /* wait until the reset is complete */ 1486 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1435 for (i = 0; i < 20; i++) { 1487 dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
1488 i40evf_request_reset(adapter);
1489 }
1490
1491 /* poll until we see the reset actually happen */
1492 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1436 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1493 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1437 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1494 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1438 if (rstat_val == I40E_VFR_COMPLETED) 1495 if (rstat_val != I40E_VFR_VFACTIVE) {
1496 dev_info(&adapter->pdev->dev, "Reset now occurring\n");
1439 break; 1497 break;
1440 else 1498 } else {
1441 mdelay(100); 1499 msleep(I40EVF_RESET_WAIT_MS);
1500 }
1501 }
1502 if (i == I40EVF_RESET_WAIT_COUNT) {
1503 dev_err(&adapter->pdev->dev, "Reset was not detected\n");
1504 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1505 goto continue_reset; /* act like the reset happened */
1506 }
1507
1508 /* wait until the reset is complete and the PF is responding to us */
1509 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1510 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1511 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1512 if (rstat_val == I40E_VFR_VFACTIVE) {
1513 dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
1514 break;
1515 } else {
1516 msleep(I40EVF_RESET_WAIT_MS);
1517 }
1442 } 1518 }
1443 if (i == 20) { 1519 if (i == I40EVF_RESET_WAIT_COUNT) {
1444 /* reset never finished */ 1520 /* reset never finished */
1445 dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n", 1521 dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
1446 __func__, rstat_val); 1522 rstat_val);
1447 /* carry on anyway */ 1523 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1524
1525 if (netif_running(adapter->netdev))
1526 i40evf_close(adapter->netdev);
1527
1528 i40evf_free_misc_irq(adapter);
1529 i40evf_reset_interrupt_capability(adapter);
1530 i40evf_free_queues(adapter);
1531 kfree(adapter->vf_res);
1532 i40evf_shutdown_adminq(hw);
1533 adapter->netdev->flags &= ~IFF_UP;
1534 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1535 return; /* Do not attempt to reinit. It's dead, Jim. */
1448 } 1536 }
1537
1538continue_reset:
1539 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1540
1449 i40evf_down(adapter); 1541 i40evf_down(adapter);
1450 adapter->state = __I40EVF_RESETTING; 1542 adapter->state = __I40EVF_RESETTING;
1451 1543
@@ -1505,6 +1597,9 @@ static void i40evf_adminq_task(struct work_struct *work)
1505 i40e_status ret; 1597 i40e_status ret;
1506 u16 pending; 1598 u16 pending;
1507 1599
1600 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1601 return;
1602
1508 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 1603 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
1509 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 1604 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
1510 if (!event.msg_buf) { 1605 if (!event.msg_buf) {
@@ -1636,6 +1731,10 @@ static int i40evf_open(struct net_device *netdev)
1636 struct i40evf_adapter *adapter = netdev_priv(netdev); 1731 struct i40evf_adapter *adapter = netdev_priv(netdev);
1637 int err; 1732 int err;
1638 1733
1734 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1735 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
1736 return -EIO;
1737 }
1639 if (adapter->state != __I40EVF_DOWN) 1738 if (adapter->state != __I40EVF_DOWN)
1640 return -EBUSY; 1739 return -EBUSY;
1641 1740
@@ -1690,8 +1789,12 @@ static int i40evf_close(struct net_device *netdev)
1690{ 1789{
1691 struct i40evf_adapter *adapter = netdev_priv(netdev); 1790 struct i40evf_adapter *adapter = netdev_priv(netdev);
1692 1791
1792 if (adapter->state <= __I40EVF_DOWN)
1793 return 0;
1794
1693 /* signal that we are down to the interrupt handler */ 1795 /* signal that we are down to the interrupt handler */
1694 adapter->state = __I40EVF_DOWN; 1796 adapter->state = __I40EVF_DOWN;
1797
1695 set_bit(__I40E_DOWN, &adapter->vsi.state); 1798 set_bit(__I40E_DOWN, &adapter->vsi.state);
1696 1799
1697 i40evf_down(adapter); 1800 i40evf_down(adapter);
@@ -1842,16 +1945,18 @@ static void i40evf_init_task(struct work_struct *work)
1842 switch (adapter->state) { 1945 switch (adapter->state) {
1843 case __I40EVF_STARTUP: 1946 case __I40EVF_STARTUP:
1844 /* driver loaded, probe complete */ 1947 /* driver loaded, probe complete */
1948 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1949 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1845 err = i40e_set_mac_type(hw); 1950 err = i40e_set_mac_type(hw);
1846 if (err) { 1951 if (err) {
1847 dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n", 1952 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
1848 __func__, err); 1953 err);
1849 goto err; 1954 goto err;
1850 } 1955 }
1851 err = i40evf_check_reset_complete(hw); 1956 err = i40evf_check_reset_complete(hw);
1852 if (err) { 1957 if (err) {
1853 dev_info(&pdev->dev, "%s: device is still in reset (%d).\n", 1958 dev_err(&pdev->dev, "Device is still in reset (%d)\n",
1854 __func__, err); 1959 err);
1855 goto err; 1960 goto err;
1856 } 1961 }
1857 hw->aq.num_arq_entries = I40EVF_AQ_LEN; 1962 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1966,13 @@ static void i40evf_init_task(struct work_struct *work)
1861 1966
1862 err = i40evf_init_adminq(hw); 1967 err = i40evf_init_adminq(hw);
1863 if (err) { 1968 if (err) {
1864 dev_info(&pdev->dev, "%s: init_adminq failed: %d\n", 1969 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
1865 __func__, err); 1970 err);
1866 goto err; 1971 goto err;
1867 } 1972 }
1868 err = i40evf_send_api_ver(adapter); 1973 err = i40evf_send_api_ver(adapter);
1869 if (err) { 1974 if (err) {
1870 dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n", 1975 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1871 __func__, err);
1872 i40evf_shutdown_adminq(hw); 1976 i40evf_shutdown_adminq(hw);
1873 goto err; 1977 goto err;
1874 } 1978 }
@@ -1876,19 +1980,21 @@ static void i40evf_init_task(struct work_struct *work)
1876 goto restart; 1980 goto restart;
1877 break; 1981 break;
1878 case __I40EVF_INIT_VERSION_CHECK: 1982 case __I40EVF_INIT_VERSION_CHECK:
1879 if (!i40evf_asq_done(hw)) 1983 if (!i40evf_asq_done(hw)) {
1984 dev_err(&pdev->dev, "Admin queue command never completed.\n");
1880 goto err; 1985 goto err;
1986 }
1881 1987
1882 /* aq msg sent, awaiting reply */ 1988 /* aq msg sent, awaiting reply */
1883 err = i40evf_verify_api_ver(adapter); 1989 err = i40evf_verify_api_ver(adapter);
1884 if (err) { 1990 if (err) {
1885 dev_err(&pdev->dev, "Unable to verify API version, error %d\n", 1991 dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
1886 err); 1992 err);
1887 goto err; 1993 goto err;
1888 } 1994 }
1889 err = i40evf_send_vf_config_msg(adapter); 1995 err = i40evf_send_vf_config_msg(adapter);
1890 if (err) { 1996 if (err) {
1891 dev_err(&pdev->dev, "Unable send config request, error %d\n", 1997 dev_err(&pdev->dev, "Unable send config request (%d)\n",
1892 err); 1998 err);
1893 goto err; 1999 goto err;
1894 } 2000 }
@@ -1902,18 +2008,15 @@ static void i40evf_init_task(struct work_struct *work)
1902 (I40E_MAX_VF_VSI * 2008 (I40E_MAX_VF_VSI *
1903 sizeof(struct i40e_virtchnl_vsi_resource)); 2009 sizeof(struct i40e_virtchnl_vsi_resource));
1904 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); 2010 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
1905 if (!adapter->vf_res) { 2011 if (!adapter->vf_res)
1906 dev_err(&pdev->dev, "%s: unable to allocate memory\n",
1907 __func__);
1908 goto err; 2012 goto err;
1909 }
1910 } 2013 }
1911 err = i40evf_get_vf_config(adapter); 2014 err = i40evf_get_vf_config(adapter);
1912 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) 2015 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
1913 goto restart; 2016 goto restart;
1914 if (err) { 2017 if (err) {
1915 dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n", 2018 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
1916 __func__, err); 2019 err);
1917 goto err_alloc; 2020 goto err_alloc;
1918 } 2021 }
1919 adapter->state = __I40EVF_INIT_SW; 2022 adapter->state = __I40EVF_INIT_SW;
@@ -1927,25 +2030,23 @@ static void i40evf_init_task(struct work_struct *work)
1927 adapter->vsi_res = &adapter->vf_res->vsi_res[i]; 2030 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
1928 } 2031 }
1929 if (!adapter->vsi_res) { 2032 if (!adapter->vsi_res) {
1930 dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__); 2033 dev_err(&pdev->dev, "No LAN VSI found\n");
1931 goto err_alloc; 2034 goto err_alloc;
1932 } 2035 }
1933 2036
1934 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; 2037 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
1935 2038
1936 adapter->txd_count = I40EVF_DEFAULT_TXD;
1937 adapter->rxd_count = I40EVF_DEFAULT_RXD;
1938
1939 netdev->netdev_ops = &i40evf_netdev_ops; 2039 netdev->netdev_ops = &i40evf_netdev_ops;
1940 i40evf_set_ethtool_ops(netdev); 2040 i40evf_set_ethtool_ops(netdev);
1941 netdev->watchdog_timeo = 5 * HZ; 2041 netdev->watchdog_timeo = 5 * HZ;
1942 2042 netdev->features |= NETIF_F_HIGHDMA |
1943 netdev->features |= NETIF_F_SG | 2043 NETIF_F_SG |
1944 NETIF_F_IP_CSUM | 2044 NETIF_F_IP_CSUM |
1945 NETIF_F_SCTP_CSUM | 2045 NETIF_F_SCTP_CSUM |
1946 NETIF_F_IPV6_CSUM | 2046 NETIF_F_IPV6_CSUM |
1947 NETIF_F_TSO | 2047 NETIF_F_TSO |
1948 NETIF_F_TSO6 | 2048 NETIF_F_TSO6 |
2049 NETIF_F_RXCSUM |
1949 NETIF_F_GRO; 2050 NETIF_F_GRO;
1950 2051
1951 if (adapter->vf_res->vf_offload_flags 2052 if (adapter->vf_res->vf_offload_flags
@@ -1956,11 +2057,13 @@ static void i40evf_init_task(struct work_struct *work)
1956 NETIF_F_HW_VLAN_CTAG_FILTER; 2057 NETIF_F_HW_VLAN_CTAG_FILTER;
1957 } 2058 }
1958 2059
1959 /* The HW MAC address was set and/or determined in sw_init */ 2060 /* copy netdev features into list of user selectable features */
2061 netdev->hw_features |= netdev->features;
2062 netdev->hw_features &= ~NETIF_F_RXCSUM;
2063
1960 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2064 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1961 dev_info(&pdev->dev, 2065 dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
1962 "Invalid MAC address %pMAC, using random\n", 2066 adapter->hw.mac.addr);
1963 adapter->hw.mac.addr);
1964 random_ether_addr(adapter->hw.mac.addr); 2067 random_ether_addr(adapter->hw.mac.addr);
1965 } 2068 }
1966 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2069 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2097,6 @@ static void i40evf_init_task(struct work_struct *work)
1994 2097
1995 netif_carrier_off(netdev); 2098 netif_carrier_off(netdev);
1996 2099
1997 strcpy(netdev->name, "eth%d");
1998
1999 adapter->vsi.id = adapter->vsi_res->vsi_id; 2100 adapter->vsi.id = adapter->vsi_res->vsi_id;
2000 adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ 2101 adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
2001 adapter->vsi.back = adapter; 2102 adapter->vsi.back = adapter;
@@ -2005,9 +2106,11 @@ static void i40evf_init_task(struct work_struct *work)
2005 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; 2106 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
2006 adapter->vsi.netdev = adapter->netdev; 2107 adapter->vsi.netdev = adapter->netdev;
2007 2108
2008 err = register_netdev(netdev); 2109 if (!adapter->netdev_registered) {
2009 if (err) 2110 err = register_netdev(netdev);
2010 goto err_register; 2111 if (err)
2112 goto err_register;
2113 }
2011 2114
2012 adapter->netdev_registered = true; 2115 adapter->netdev_registered = true;
2013 2116
@@ -2031,7 +2134,6 @@ err_register:
2031 i40evf_free_misc_irq(adapter); 2134 i40evf_free_misc_irq(adapter);
2032err_sw_init: 2135err_sw_init:
2033 i40evf_reset_interrupt_capability(adapter); 2136 i40evf_reset_interrupt_capability(adapter);
2034 adapter->state = __I40EVF_FAILED;
2035err_alloc: 2137err_alloc:
2036 kfree(adapter->vf_res); 2138 kfree(adapter->vf_res);
2037 adapter->vf_res = NULL; 2139 adapter->vf_res = NULL;
@@ -2039,9 +2141,7 @@ err:
2039 /* Things went into the weeds, so try again later */ 2141 /* Things went into the weeds, so try again later */
2040 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { 2142 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2041 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); 2143 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
2042 if (hw->aq.asq.count) 2144 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2043 i40evf_shutdown_adminq(hw); /* ignore error */
2044 adapter->state = __I40EVF_FAILED;
2045 return; /* do not reschedule */ 2145 return; /* do not reschedule */
2046 } 2146 }
2047 schedule_delayed_work(&adapter->init_task, HZ * 3); 2147 schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,26 +2184,20 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2084 struct net_device *netdev; 2184 struct net_device *netdev;
2085 struct i40evf_adapter *adapter = NULL; 2185 struct i40evf_adapter *adapter = NULL;
2086 struct i40e_hw *hw = NULL; 2186 struct i40e_hw *hw = NULL;
2087 int err, pci_using_dac; 2187 int err;
2088 2188
2089 err = pci_enable_device(pdev); 2189 err = pci_enable_device(pdev);
2090 if (err) 2190 if (err)
2091 return err; 2191 return err;
2092 2192
2093 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 2193 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2094 pci_using_dac = true; 2194 if (err) {
2095 /* coherent mask for the same size will always succeed if 2195 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2096 * dma_set_mask does 2196 if (err) {
2097 */ 2197 dev_err(&pdev->dev,
2098 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2198 "DMA configuration failed: 0x%x\n", err);
2099 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 2199 goto err_dma;
2100 pci_using_dac = false; 2200 }
2101 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2102 } else {
2103 dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
2104 __func__, err);
2105 err = -EIO;
2106 goto err_dma;
2107 } 2201 }
2108 2202
2109 err = pci_request_regions(pdev, i40evf_driver_name); 2203 err = pci_request_regions(pdev, i40evf_driver_name);
@@ -2128,8 +2222,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2128 2222
2129 pci_set_drvdata(pdev, netdev); 2223 pci_set_drvdata(pdev, netdev);
2130 adapter = netdev_priv(netdev); 2224 adapter = netdev_priv(netdev);
2131 if (pci_using_dac)
2132 netdev->features |= NETIF_F_HIGHDMA;
2133 2225
2134 adapter->netdev = netdev; 2226 adapter->netdev = netdev;
2135 adapter->pdev = pdev; 2227 adapter->pdev = pdev;
@@ -2271,6 +2363,7 @@ static void i40evf_remove(struct pci_dev *pdev)
2271 struct i40e_hw *hw = &adapter->hw; 2363 struct i40e_hw *hw = &adapter->hw;
2272 2364
2273 cancel_delayed_work_sync(&adapter->init_task); 2365 cancel_delayed_work_sync(&adapter->init_task);
2366 cancel_work_sync(&adapter->reset_task);
2274 2367
2275 if (adapter->netdev_registered) { 2368 if (adapter->netdev_registered) {
2276 unregister_netdev(netdev); 2369 unregister_netdev(netdev);
@@ -2278,17 +2371,15 @@ static void i40evf_remove(struct pci_dev *pdev)
2278 } 2371 }
2279 adapter->state = __I40EVF_REMOVE; 2372 adapter->state = __I40EVF_REMOVE;
2280 2373
2281 if (adapter->num_msix_vectors) { 2374 if (adapter->msix_entries) {
2282 i40evf_misc_irq_disable(adapter); 2375 i40evf_misc_irq_disable(adapter);
2283 del_timer_sync(&adapter->watchdog_timer);
2284
2285 flush_scheduled_work();
2286
2287 i40evf_free_misc_irq(adapter); 2376 i40evf_free_misc_irq(adapter);
2288
2289 i40evf_reset_interrupt_capability(adapter); 2377 i40evf_reset_interrupt_capability(adapter);
2290 } 2378 }
2291 2379
2380 del_timer_sync(&adapter->watchdog_timer);
2381 flush_scheduled_work();
2382
2292 if (hw->aq.asq.count) 2383 if (hw->aq.asq.count)
2293 i40evf_shutdown_adminq(hw); 2384 i40evf_shutdown_adminq(hw);
2294 2385
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e6978d79e62b..e294f012647d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
43 struct i40e_hw *hw = &adapter->hw; 43 struct i40e_hw *hw = &adapter->hw;
44 i40e_status err; 44 i40e_status err;
45 45
46 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
47 return 0; /* nothing to see here, move along */
48
46 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 49 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
47 if (err) 50 if (err)
48 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", 51 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
651 /* if the request failed, don't lock out others */ 654 /* if the request failed, don't lock out others */
652 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 655 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
653} 656}
657/**
658 * i40evf_request_reset
659 * @adapter: adapter structure
660 *
661 * Request that the PF reset this VF. No response is expected.
662 **/
663void i40evf_request_reset(struct i40evf_adapter *adapter)
664{
665 /* Don't check CURRENT_OP - this is always higher priority */
666 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
667 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
668}
654 669
655/** 670/**
656 * i40evf_virtchnl_completion 671 * i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
689 } 704 }
690 break; 705 break;
691 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: 706 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
692 adapter->state = __I40EVF_RESETTING; 707 dev_info(&adapter->pdev->dev, "PF reset warning received\n");
693 schedule_work(&adapter->reset_task); 708 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
694 dev_info(&adapter->pdev->dev, 709 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
695 "%s: hardware reset pending\n", __func__); 710 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
711 schedule_work(&adapter->reset_task);
712 }
696 break; 713 break;
697 default: 714 default:
698 dev_err(&adapter->pdev->dev, 715 dev_err(&adapter->pdev->dev,