aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/ef10.c319
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.h105
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c5
-rw-r--r--drivers/net/ethernet/sfc/io.h5
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h120
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h26
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.c73
-rw-r--r--drivers/net/ethernet/sfc/nic.h256
-rw-r--r--drivers/net/ethernet/sfc/phy.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c90
-rw-r--r--drivers/net/ethernet/sfc/selftest.h15
-rw-r--r--drivers/net/ethernet/sfc/tx.c426
14 files changed, 1049 insertions, 410 deletions
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21f9ad6392e9..676c3c057bfb 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
285 return rc; 285 return rc;
286} 286}
287 287
288#ifdef EFX_USE_PIO
289
290static void efx_ef10_free_piobufs(struct efx_nic *efx)
291{
292 struct efx_ef10_nic_data *nic_data = efx->nic_data;
293 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
294 unsigned int i;
295 int rc;
296
297 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
298
299 for (i = 0; i < nic_data->n_piobufs; i++) {
300 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
301 nic_data->piobuf_handle[i]);
302 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
303 NULL, 0, NULL);
304 WARN_ON(rc);
305 }
306
307 nic_data->n_piobufs = 0;
308}
309
310static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
311{
312 struct efx_ef10_nic_data *nic_data = efx->nic_data;
313 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
314 unsigned int i;
315 size_t outlen;
316 int rc = 0;
317
318 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
319
320 for (i = 0; i < n; i++) {
321 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
322 outbuf, sizeof(outbuf), &outlen);
323 if (rc)
324 break;
325 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
326 rc = -EIO;
327 break;
328 }
329 nic_data->piobuf_handle[i] =
330 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
331 netif_dbg(efx, probe, efx->net_dev,
332 "allocated PIO buffer %u handle %x\n", i,
333 nic_data->piobuf_handle[i]);
334 }
335
336 nic_data->n_piobufs = i;
337 if (rc)
338 efx_ef10_free_piobufs(efx);
339 return rc;
340}
341
342static int efx_ef10_link_piobufs(struct efx_nic *efx)
343{
344 struct efx_ef10_nic_data *nic_data = efx->nic_data;
345 MCDI_DECLARE_BUF(inbuf,
346 max(MC_CMD_LINK_PIOBUF_IN_LEN,
347 MC_CMD_UNLINK_PIOBUF_IN_LEN));
348 struct efx_channel *channel;
349 struct efx_tx_queue *tx_queue;
350 unsigned int offset, index;
351 int rc;
352
353 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
354 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
355
356 /* Link a buffer to each VI in the write-combining mapping */
357 for (index = 0; index < nic_data->n_piobufs; ++index) {
358 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
359 nic_data->piobuf_handle[index]);
360 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
361 nic_data->pio_write_vi_base + index);
362 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
363 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
364 NULL, 0, NULL);
365 if (rc) {
366 netif_err(efx, drv, efx->net_dev,
367 "failed to link VI %u to PIO buffer %u (%d)\n",
368 nic_data->pio_write_vi_base + index, index,
369 rc);
370 goto fail;
371 }
372 netif_dbg(efx, probe, efx->net_dev,
373 "linked VI %u to PIO buffer %u\n",
374 nic_data->pio_write_vi_base + index, index);
375 }
376
377 /* Link a buffer to each TX queue */
378 efx_for_each_channel(channel, efx) {
379 efx_for_each_channel_tx_queue(tx_queue, channel) {
380 /* We assign the PIO buffers to queues in
381 * reverse order to allow for the following
382 * special case.
383 */
384 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
385 tx_queue->channel->channel - 1) *
386 efx_piobuf_size);
387 index = offset / ER_DZ_TX_PIOBUF_SIZE;
388 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
389
390 /* When the host page size is 4K, the first
391 * host page in the WC mapping may be within
392 * the same VI page as the last TX queue. We
393 * can only link one buffer to each VI.
394 */
395 if (tx_queue->queue == nic_data->pio_write_vi_base) {
396 BUG_ON(index != 0);
397 rc = 0;
398 } else {
399 MCDI_SET_DWORD(inbuf,
400 LINK_PIOBUF_IN_PIOBUF_HANDLE,
401 nic_data->piobuf_handle[index]);
402 MCDI_SET_DWORD(inbuf,
403 LINK_PIOBUF_IN_TXQ_INSTANCE,
404 tx_queue->queue);
405 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
406 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
407 NULL, 0, NULL);
408 }
409
410 if (rc) {
411 /* This is non-fatal; the TX path just
412 * won't use PIO for this queue
413 */
414 netif_err(efx, drv, efx->net_dev,
415 "failed to link VI %u to PIO buffer %u (%d)\n",
416 tx_queue->queue, index, rc);
417 tx_queue->piobuf = NULL;
418 } else {
419 tx_queue->piobuf =
420 nic_data->pio_write_base +
421 index * EFX_VI_PAGE_SIZE + offset;
422 tx_queue->piobuf_offset = offset;
423 netif_dbg(efx, probe, efx->net_dev,
424 "linked VI %u to PIO buffer %u offset %x addr %p\n",
425 tx_queue->queue, index,
426 tx_queue->piobuf_offset,
427 tx_queue->piobuf);
428 }
429 }
430 }
431
432 return 0;
433
434fail:
435 while (index--) {
436 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
437 nic_data->pio_write_vi_base + index);
438 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
439 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
440 NULL, 0, NULL);
441 }
442 return rc;
443}
444
445#else /* !EFX_USE_PIO */
446
447static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
448{
449 return n == 0 ? 0 : -ENOBUFS;
450}
451
452static int efx_ef10_link_piobufs(struct efx_nic *efx)
453{
454 return 0;
455}
456
457static void efx_ef10_free_piobufs(struct efx_nic *efx)
458{
459}
460
461#endif /* EFX_USE_PIO */
462
288static void efx_ef10_remove(struct efx_nic *efx) 463static void efx_ef10_remove(struct efx_nic *efx)
289{ 464{
290 struct efx_ef10_nic_data *nic_data = efx->nic_data; 465 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
295 /* This needs to be after efx_ptp_remove_channel() with no filters */ 470 /* This needs to be after efx_ptp_remove_channel() with no filters */
296 efx_ef10_rx_free_indir_table(efx); 471 efx_ef10_rx_free_indir_table(efx);
297 472
473 if (nic_data->wc_membase)
474 iounmap(nic_data->wc_membase);
475
298 rc = efx_ef10_free_vis(efx); 476 rc = efx_ef10_free_vis(efx);
299 WARN_ON(rc != 0); 477 WARN_ON(rc != 0);
300 478
479 if (!nic_data->must_restore_piobufs)
480 efx_ef10_free_piobufs(efx);
481
301 efx_mcdi_fini(efx); 482 efx_mcdi_fini(efx);
302 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 483 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
303 kfree(nic_data); 484 kfree(nic_data);
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
330 return 0; 511 return 0;
331} 512}
332 513
514/* Note that the failure path of this function does not free
515 * resources, as this will be done by efx_ef10_remove().
516 */
333static int efx_ef10_dimension_resources(struct efx_nic *efx) 517static int efx_ef10_dimension_resources(struct efx_nic *efx)
334{ 518{
335 unsigned int n_vis = 519 struct efx_ef10_nic_data *nic_data = efx->nic_data;
336 max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 520 unsigned int uc_mem_map_size, wc_mem_map_size;
521 unsigned int min_vis, pio_write_vi_base, max_vis;
522 void __iomem *membase;
523 int rc;
524
525 min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
526
527#ifdef EFX_USE_PIO
528 /* Try to allocate PIO buffers if wanted and if the full
529 * number of PIO buffers would be sufficient to allocate one
530 * copy-buffer per TX channel. Failure is non-fatal, as there
531 * are only a small number of PIO buffers shared between all
532 * functions of the controller.
533 */
534 if (efx_piobuf_size != 0 &&
535 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
536 efx->n_tx_channels) {
537 unsigned int n_piobufs =
538 DIV_ROUND_UP(efx->n_tx_channels,
539 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
540
541 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
542 if (rc)
543 netif_err(efx, probe, efx->net_dev,
544 "failed to allocate PIO buffers (%d)\n", rc);
545 else
546 netif_dbg(efx, probe, efx->net_dev,
547 "allocated %u PIO buffers\n", n_piobufs);
548 }
549#else
550 nic_data->n_piobufs = 0;
551#endif
337 552
338 return efx_ef10_alloc_vis(efx, n_vis, n_vis); 553 /* PIO buffers should be mapped with write-combining enabled,
554 * and we want to make single UC and WC mappings rather than
555 * several of each (in fact that's the only option if host
556 * page size is >4K). So we may allocate some extra VIs just
557 * for writing PIO buffers through.
558 */
559 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
560 ER_DZ_TX_PIOBUF);
561 if (nic_data->n_piobufs) {
562 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
563 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
564 nic_data->n_piobufs) *
565 EFX_VI_PAGE_SIZE) -
566 uc_mem_map_size);
567 max_vis = pio_write_vi_base + nic_data->n_piobufs;
568 } else {
569 pio_write_vi_base = 0;
570 wc_mem_map_size = 0;
571 max_vis = min_vis;
572 }
573
574 /* In case the last attached driver failed to free VIs, do it now */
575 rc = efx_ef10_free_vis(efx);
576 if (rc != 0)
577 return rc;
578
579 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
580 if (rc != 0)
581 return rc;
582
583 /* If we didn't get enough VIs to map all the PIO buffers, free the
584 * PIO buffers
585 */
586 if (nic_data->n_piobufs &&
587 nic_data->n_allocated_vis <
588 pio_write_vi_base + nic_data->n_piobufs) {
589 netif_dbg(efx, probe, efx->net_dev,
590 "%u VIs are not sufficient to map %u PIO buffers\n",
591 nic_data->n_allocated_vis, nic_data->n_piobufs);
592 efx_ef10_free_piobufs(efx);
593 }
594
595 /* Shrink the original UC mapping of the memory BAR */
596 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
597 if (!membase) {
598 netif_err(efx, probe, efx->net_dev,
599 "could not shrink memory BAR to %x\n",
600 uc_mem_map_size);
601 return -ENOMEM;
602 }
603 iounmap(efx->membase);
604 efx->membase = membase;
605
606 /* Set up the WC mapping if needed */
607 if (wc_mem_map_size) {
608 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
609 uc_mem_map_size,
610 wc_mem_map_size);
611 if (!nic_data->wc_membase) {
612 netif_err(efx, probe, efx->net_dev,
613 "could not allocate WC mapping of size %x\n",
614 wc_mem_map_size);
615 return -ENOMEM;
616 }
617 nic_data->pio_write_vi_base = pio_write_vi_base;
618 nic_data->pio_write_base =
619 nic_data->wc_membase +
620 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
621 uc_mem_map_size);
622
623 rc = efx_ef10_link_piobufs(efx);
624 if (rc)
625 efx_ef10_free_piobufs(efx);
626 }
627
628 netif_dbg(efx, probe, efx->net_dev,
629 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
630 &efx->membase_phys, efx->membase, uc_mem_map_size,
631 nic_data->wc_membase, wc_mem_map_size);
632
633 return 0;
339} 634}
340 635
341static int efx_ef10_init_nic(struct efx_nic *efx) 636static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
359 nic_data->must_realloc_vis = false; 654 nic_data->must_realloc_vis = false;
360 } 655 }
361 656
657 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
658 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
659 if (rc == 0) {
660 rc = efx_ef10_link_piobufs(efx);
661 if (rc)
662 efx_ef10_free_piobufs(efx);
663 }
664
665 /* Log an error on failure, but this is non-fatal */
666 if (rc)
667 netif_err(efx, drv, efx->net_dev,
668 "failed to restore PIO buffers (%d)\n", rc);
669 nic_data->must_restore_piobufs = false;
670 }
671
362 efx_ef10_rx_push_indir_table(efx); 672 efx_ef10_rx_push_indir_table(efx);
363 return 0; 673 return 0;
364} 674}
@@ -759,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
759 /* All our allocations have been reset */ 1069 /* All our allocations have been reset */
760 nic_data->must_realloc_vis = true; 1070 nic_data->must_realloc_vis = true;
761 nic_data->must_restore_filters = true; 1071 nic_data->must_restore_filters = true;
1072 nic_data->must_restore_piobufs = true;
762 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 1073 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
763 1074
764 /* The datapath firmware might have been changed */ 1075 /* The datapath firmware might have been changed */
@@ -2180,7 +2491,7 @@ out_unlock:
2180 return rc; 2491 return rc;
2181} 2492}
2182 2493
2183void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 2494static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2184{ 2495{
2185 /* no need to do anything here on EF10 */ 2496 /* no need to do anything here on EF10 */
2186} 2497}
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index b3f4e3755fd9..207ac9a1e3de 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -315,6 +315,7 @@
315#define ESF_DZ_TX_PIO_TYPE_WIDTH 1 315#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
316#define ESF_DZ_TX_PIO_OPT_LBN 60 316#define ESF_DZ_TX_PIO_OPT_LBN 60
317#define ESF_DZ_TX_PIO_OPT_WIDTH 3 317#define ESF_DZ_TX_PIO_OPT_WIDTH 3
318#define ESE_DZ_TX_OPTION_DESC_PIO 1
318#define ESF_DZ_TX_PIO_CONT_LBN 59 319#define ESF_DZ_TX_PIO_CONT_LBN 59
319#define ESF_DZ_TX_PIO_CONT_WIDTH 1 320#define ESF_DZ_TX_PIO_CONT_WIDTH 1
320#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 321#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 34d00f5771fe..b8235ee5d7d7 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -18,37 +18,36 @@
18#define EFX_MEM_BAR 2 18#define EFX_MEM_BAR 2
19 19
20/* TX */ 20/* TX */
21extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 21int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
22extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 22void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
23extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 23void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
24extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); 24void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
25extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 25void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
26extern netdev_tx_t 26netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
27efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 27 struct net_device *net_dev);
28extern netdev_tx_t 28netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
29efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 29void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 30int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
31extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); 31unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
32extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); 32extern unsigned int efx_piobuf_size;
33 33
34/* RX */ 34/* RX */
35extern void efx_rx_config_page_split(struct efx_nic *efx); 35void efx_rx_config_page_split(struct efx_nic *efx);
36extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 36int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
37extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 37void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
38extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 38void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
39extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 39void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
40extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 40void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
41extern void efx_rx_slow_fill(unsigned long context); 41void efx_rx_slow_fill(unsigned long context);
42extern void __efx_rx_packet(struct efx_channel *channel); 42void __efx_rx_packet(struct efx_channel *channel);
43extern void efx_rx_packet(struct efx_rx_queue *rx_queue, 43void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
44 unsigned int index, unsigned int n_frags, 44 unsigned int n_frags, unsigned int len, u16 flags);
45 unsigned int len, u16 flags);
46static inline void efx_rx_flush_packet(struct efx_channel *channel) 45static inline void efx_rx_flush_packet(struct efx_channel *channel)
47{ 46{
48 if (channel->rx_pkt_n_frags) 47 if (channel->rx_pkt_n_frags)
49 __efx_rx_packet(channel); 48 __efx_rx_packet(channel);
50} 49}
51extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 50void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
52 51
53#define EFX_MAX_DMAQ_SIZE 4096UL 52#define EFX_MAX_DMAQ_SIZE 4096UL
54#define EFX_DEFAULT_DMAQ_SIZE 1024UL 53#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
162 return efx->type->filter_get_rx_ids(efx, priority, buf, size); 161 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
163} 162}
164#ifdef CONFIG_RFS_ACCEL 163#ifdef CONFIG_RFS_ACCEL
165extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 164int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
166 u16 rxq_index, u32 flow_id); 165 u16 rxq_index, u32 flow_id);
167extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); 166bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
168static inline void efx_filter_rfs_expire(struct efx_channel *channel) 167static inline void efx_filter_rfs_expire(struct efx_channel *channel)
169{ 168{
170 if (channel->rfs_filters_added >= 60 && 169 if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
176static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} 175static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
177#define efx_filter_rfs_enabled() 0 176#define efx_filter_rfs_enabled() 0
178#endif 177#endif
179extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); 178bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
180 179
181/* Channels */ 180/* Channels */
182extern int efx_channel_dummy_op_int(struct efx_channel *channel); 181int efx_channel_dummy_op_int(struct efx_channel *channel);
183extern void efx_channel_dummy_op_void(struct efx_channel *channel); 182void efx_channel_dummy_op_void(struct efx_channel *channel);
184extern int 183int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
185efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
186 184
187/* Ports */ 185/* Ports */
188extern int efx_reconfigure_port(struct efx_nic *efx); 186int efx_reconfigure_port(struct efx_nic *efx);
189extern int __efx_reconfigure_port(struct efx_nic *efx); 187int __efx_reconfigure_port(struct efx_nic *efx);
190 188
191/* Ethtool support */ 189/* Ethtool support */
192extern const struct ethtool_ops efx_ethtool_ops; 190extern const struct ethtool_ops efx_ethtool_ops;
193 191
194/* Reset handling */ 192/* Reset handling */
195extern int efx_reset(struct efx_nic *efx, enum reset_type method); 193int efx_reset(struct efx_nic *efx, enum reset_type method);
196extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); 194void efx_reset_down(struct efx_nic *efx, enum reset_type method);
197extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); 195int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
198extern int efx_try_recovery(struct efx_nic *efx); 196int efx_try_recovery(struct efx_nic *efx);
199 197
200/* Global */ 198/* Global */
201extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 199void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
202extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 200int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
203 unsigned int rx_usecs, bool rx_adaptive, 201 unsigned int rx_usecs, bool rx_adaptive,
204 bool rx_may_override_tx); 202 bool rx_may_override_tx);
205extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 203void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
206 unsigned int *rx_usecs, bool *rx_adaptive); 204 unsigned int *rx_usecs, bool *rx_adaptive);
207 205
208/* Dummy PHY ops for PHY drivers */ 206/* Dummy PHY ops for PHY drivers */
209extern int efx_port_dummy_op_int(struct efx_nic *efx); 207int efx_port_dummy_op_int(struct efx_nic *efx);
210extern void efx_port_dummy_op_void(struct efx_nic *efx); 208void efx_port_dummy_op_void(struct efx_nic *efx);
211
212 209
213/* MTD */ 210/* MTD */
214#ifdef CONFIG_SFC_MTD 211#ifdef CONFIG_SFC_MTD
215extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, 212int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
216 size_t n_parts, size_t sizeof_part); 213 size_t n_parts, size_t sizeof_part);
217static inline int efx_mtd_probe(struct efx_nic *efx) 214static inline int efx_mtd_probe(struct efx_nic *efx)
218{ 215{
219 return efx->type->mtd_probe(efx); 216 return efx->type->mtd_probe(efx);
220} 217}
221extern void efx_mtd_rename(struct efx_nic *efx); 218void efx_mtd_rename(struct efx_nic *efx);
222extern void efx_mtd_remove(struct efx_nic *efx); 219void efx_mtd_remove(struct efx_nic *efx);
223#else 220#else
224static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } 221static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
225static inline void efx_mtd_rename(struct efx_nic *efx) {} 222static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
241 efx_schedule_channel(channel); 238 efx_schedule_channel(channel);
242} 239}
243 240
244extern void efx_link_status_changed(struct efx_nic *efx); 241void efx_link_status_changed(struct efx_nic *efx);
245extern void efx_link_set_advertising(struct efx_nic *efx, u32); 242void efx_link_set_advertising(struct efx_nic *efx, u32);
246extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); 243void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
247 244
248static inline void efx_device_detach_sync(struct efx_nic *efx) 245static inline void efx_device_detach_sync(struct efx_nic *efx)
249{ 246{
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5b471cf5c323..1f529fa2edb1 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
70 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), 70 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
71 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), 71 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
72 EFX_ETHTOOL_UINT_TXQ_STAT(pushes), 72 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
73 EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
73 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), 74 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
74 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 75 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
75 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 76 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
1035 return 0; 1036 return 0;
1036} 1037}
1037 1038
1038int efx_ethtool_get_ts_info(struct net_device *net_dev, 1039static int efx_ethtool_get_ts_info(struct net_device *net_dev,
1039 struct ethtool_ts_info *ts_info) 1040 struct ethtool_ts_info *ts_info)
1040{ 1041{
1041 struct efx_nic *efx = netdev_priv(net_dev); 1042 struct efx_nic *efx = netdev_priv(net_dev);
1042 1043
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96ce507d8602..4d3f119b67b3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,6 +66,11 @@
66#define EFX_USE_QWORD_IO 1 66#define EFX_USE_QWORD_IO 1
67#endif 67#endif
68 68
69/* PIO is a win only if write-combining is possible */
70#ifdef ARCH_HAS_IOREMAP_WC
71#define EFX_USE_PIO 1
72#endif
73
69#ifdef EFX_USE_QWORD_IO 74#ifdef EFX_USE_QWORD_IO
70static inline void _efx_writeq(struct efx_nic *efx, __le64 value, 75static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
71 unsigned int reg) 76 unsigned int reg)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index c34d0d4e10ee..656a3277c2b2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
108} 108}
109#endif 109#endif
110 110
111extern int efx_mcdi_init(struct efx_nic *efx); 111int efx_mcdi_init(struct efx_nic *efx);
112extern void efx_mcdi_fini(struct efx_nic *efx); 112void efx_mcdi_fini(struct efx_nic *efx);
113 113
114extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 114int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
115 const efx_dword_t *inbuf, size_t inlen, 115 size_t inlen, efx_dword_t *outbuf, size_t outlen,
116 size_t *outlen_actual);
117
118int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
119 const efx_dword_t *inbuf, size_t inlen);
120int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
116 efx_dword_t *outbuf, size_t outlen, 121 efx_dword_t *outbuf, size_t outlen,
117 size_t *outlen_actual); 122 size_t *outlen_actual);
118 123
119extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
120 const efx_dword_t *inbuf, size_t inlen);
121extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
122 efx_dword_t *outbuf, size_t outlen,
123 size_t *outlen_actual);
124
125typedef void efx_mcdi_async_completer(struct efx_nic *efx, 124typedef void efx_mcdi_async_completer(struct efx_nic *efx,
126 unsigned long cookie, int rc, 125 unsigned long cookie, int rc,
127 efx_dword_t *outbuf, 126 efx_dword_t *outbuf,
128 size_t outlen_actual); 127 size_t outlen_actual);
129extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, 128int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
130 const efx_dword_t *inbuf, size_t inlen, 129 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
131 size_t outlen, 130 efx_mcdi_async_completer *complete,
132 efx_mcdi_async_completer *complete, 131 unsigned long cookie);
133 unsigned long cookie);
134 132
135extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 133int efx_mcdi_poll_reboot(struct efx_nic *efx);
136extern void efx_mcdi_mode_poll(struct efx_nic *efx); 134void efx_mcdi_mode_poll(struct efx_nic *efx);
137extern void efx_mcdi_mode_event(struct efx_nic *efx); 135void efx_mcdi_mode_event(struct efx_nic *efx);
138extern void efx_mcdi_flush_async(struct efx_nic *efx); 136void efx_mcdi_flush_async(struct efx_nic *efx);
139 137
140extern void efx_mcdi_process_event(struct efx_channel *channel, 138void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
141 efx_qword_t *event); 139void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
142extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
143 140
144/* We expect that 16- and 32-bit fields in MCDI requests and responses 141/* We expect that 16- and 32-bit fields in MCDI requests and responses
145 * are appropriately aligned, but 64-bit fields are only 142 * are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
275#define MCDI_EVENT_FIELD(_ev, _field) \ 272#define MCDI_EVENT_FIELD(_ev, _field) \
276 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 273 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
277 274
278extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 275void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
279extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 276int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
280 u16 *fw_subtype_list, u32 *capabilities); 277 u16 *fw_subtype_list, u32 *capabilities);
281extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, 278int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
282 u32 dest_evq); 279int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
283extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); 280int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
284extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 281 size_t *size_out, size_t *erase_size_out,
285 size_t *size_out, size_t *erase_size_out, 282 bool *protected_out);
286 bool *protected_out); 283int efx_mcdi_nvram_test_all(struct efx_nic *efx);
287extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); 284int efx_mcdi_handle_assertion(struct efx_nic *efx);
288extern int efx_mcdi_handle_assertion(struct efx_nic *efx); 285void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
289extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 286int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
290extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, 287 int *id_out);
291 const u8 *mac, int *id_out); 288int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
292extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 289int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
293extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); 290int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
294extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); 291int efx_mcdi_flush_rxqs(struct efx_nic *efx);
295extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); 292int efx_mcdi_port_probe(struct efx_nic *efx);
296extern int efx_mcdi_port_probe(struct efx_nic *efx); 293void efx_mcdi_port_remove(struct efx_nic *efx);
297extern void efx_mcdi_port_remove(struct efx_nic *efx); 294int efx_mcdi_port_reconfigure(struct efx_nic *efx);
298extern int efx_mcdi_port_reconfigure(struct efx_nic *efx); 295int efx_mcdi_port_get_number(struct efx_nic *efx);
299extern int efx_mcdi_port_get_number(struct efx_nic *efx); 296u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
300extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); 297void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
301extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); 298int efx_mcdi_set_mac(struct efx_nic *efx);
302extern int efx_mcdi_set_mac(struct efx_nic *efx);
303#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) 299#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
304extern void efx_mcdi_mac_start_stats(struct efx_nic *efx); 300void efx_mcdi_mac_start_stats(struct efx_nic *efx);
305extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx); 301void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
306extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); 302bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
307extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); 303enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
308extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); 304int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
309extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); 305int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
310 306
311#ifdef CONFIG_SFC_MCDI_MON 307#ifdef CONFIG_SFC_MCDI_MON
312extern int efx_mcdi_mon_probe(struct efx_nic *efx); 308int efx_mcdi_mon_probe(struct efx_nic *efx);
313extern void efx_mcdi_mon_remove(struct efx_nic *efx); 309void efx_mcdi_mon_remove(struct efx_nic *efx);
314#else 310#else
315static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } 311static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
316static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} 312static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
317#endif 313#endif
318 314
319#ifdef CONFIG_SFC_MTD 315#ifdef CONFIG_SFC_MTD
320extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, 316int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
321 size_t len, size_t *retlen, u8 *buffer); 317 size_t *retlen, u8 *buffer);
322extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); 318int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
323extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, 319int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
324 size_t len, size_t *retlen, const u8 *buffer); 320 size_t *retlen, const u8 *buffer);
325extern int efx_mcdi_mtd_sync(struct mtd_info *mtd); 321int efx_mcdi_mtd_sync(struct mtd_info *mtd);
326extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); 322void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
327#endif 323#endif
328 324
329#endif /* EFX_MCDI_H */ 325#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index 16824fecc5ee..4a2dc4c281b7 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -20,7 +20,7 @@
20 20
21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } 21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } 22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
23extern unsigned efx_mdio_id_oui(u32 id); 23unsigned efx_mdio_id_oui(u32 id);
24 24
25static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr) 25static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
26{ 26{
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
56 return sync; 56 return sync;
57} 57}
58 58
59extern const char *efx_mdio_mmd_name(int mmd); 59const char *efx_mdio_mmd_name(int mmd);
60 60
61/* 61/*
62 * Reset a specific MMD and wait for reset to clear. 62 * Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
64 * 64 *
65 * This function will sleep 65 * This function will sleep
66 */ 66 */
67extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, 67int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
68 int spins, int spintime);
69 68
70/* As efx_mdio_check_mmd but for multiple MMDs */ 69/* As efx_mdio_check_mmd but for multiple MMDs */
71int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask); 70int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
72 71
73/* Check the link status of specified mmds in bit mask */ 72/* Check the link status of specified mmds in bit mask */
74extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); 73bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
75 74
76/* Generic transmit disable support though PMAPMD */ 75/* Generic transmit disable support though PMAPMD */
77extern void efx_mdio_transmit_disable(struct efx_nic *efx); 76void efx_mdio_transmit_disable(struct efx_nic *efx);
78 77
79/* Generic part of reconfigure: set/clear loopback bits */ 78/* Generic part of reconfigure: set/clear loopback bits */
80extern void efx_mdio_phy_reconfigure(struct efx_nic *efx); 79void efx_mdio_phy_reconfigure(struct efx_nic *efx);
81 80
82/* Set the power state of the specified MMDs */ 81/* Set the power state of the specified MMDs */
83extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx, 82void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
84 int low_power, unsigned int mmd_mask); 83 unsigned int mmd_mask);
85 84
86/* Set (some of) the PHY settings over MDIO */ 85/* Set (some of) the PHY settings over MDIO */
87extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); 86int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
88 87
89/* Push advertising flags and restart autonegotiation */ 88/* Push advertising flags and restart autonegotiation */
90extern void efx_mdio_an_reconfigure(struct efx_nic *efx); 89void efx_mdio_an_reconfigure(struct efx_nic *efx);
91 90
92/* Get pause parameters from AN if available (otherwise return 91/* Get pause parameters from AN if available (otherwise return
93 * requested pause parameters) 92 * requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
95u8 efx_mdio_get_pause(struct efx_nic *efx); 94u8 efx_mdio_get_pause(struct efx_nic *efx);
96 95
97/* Wait for specified MMDs to exit reset within a timeout */ 96/* Wait for specified MMDs to exit reset within a timeout */
98extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx, 97int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
99 unsigned int mmd_mask);
100 98
101/* Set or clear flag, debouncing */ 99/* Set or clear flag, debouncing */
102static inline void 100static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
107} 105}
108 106
109/* Liveness self-test for MDIO PHYs */ 107/* Liveness self-test for MDIO PHYs */
110extern int efx_mdio_test_alive(struct efx_nic *efx); 108int efx_mdio_test_alive(struct efx_nic *efx);
111 109
112#endif /* EFX_MDIO_10G_H */ 110#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b172ed133055..b14a717ac3e8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -141,6 +141,8 @@ struct efx_special_buffer {
141 * @len: Length of this fragment. 141 * @len: Length of this fragment.
142 * This field is zero when the queue slot is empty. 142 * This field is zero when the queue slot is empty.
143 * @unmap_len: Length of this fragment to unmap 143 * @unmap_len: Length of this fragment to unmap
144 * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
145 * Only valid if @unmap_len != 0.
144 */ 146 */
145struct efx_tx_buffer { 147struct efx_tx_buffer {
146 union { 148 union {
@@ -154,6 +156,7 @@ struct efx_tx_buffer {
154 unsigned short flags; 156 unsigned short flags;
155 unsigned short len; 157 unsigned short len;
156 unsigned short unmap_len; 158 unsigned short unmap_len;
159 unsigned short dma_offset;
157}; 160};
158#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ 161#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
159#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ 162#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
@@ -182,6 +185,9 @@ struct efx_tx_buffer {
182 * @tsoh_page: Array of pages of TSO header buffers 185 * @tsoh_page: Array of pages of TSO header buffers
183 * @txd: The hardware descriptor ring 186 * @txd: The hardware descriptor ring
184 * @ptr_mask: The size of the ring minus 1. 187 * @ptr_mask: The size of the ring minus 1.
188 * @piobuf: PIO buffer region for this TX queue (shared with its partner).
189 * Size of the region is efx_piobuf_size.
190 * @piobuf_offset: Buffer offset to be specified in PIO descriptors
185 * @initialised: Has hardware queue been initialised? 191 * @initialised: Has hardware queue been initialised?
186 * @read_count: Current read pointer. 192 * @read_count: Current read pointer.
187 * This is the number of buffers that have been removed from both rings. 193 * This is the number of buffers that have been removed from both rings.
@@ -209,6 +215,7 @@ struct efx_tx_buffer {
209 * blocks 215 * blocks
210 * @tso_packets: Number of packets via the TSO xmit path 216 * @tso_packets: Number of packets via the TSO xmit path
211 * @pushes: Number of times the TX push feature has been used 217 * @pushes: Number of times the TX push feature has been used
218 * @pio_packets: Number of times the TX PIO feature has been used
212 * @empty_read_count: If the completion path has seen the queue as empty 219 * @empty_read_count: If the completion path has seen the queue as empty
213 * and the transmission path has not yet checked this, the value of 220 * and the transmission path has not yet checked this, the value of
214 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. 221 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +230,8 @@ struct efx_tx_queue {
223 struct efx_buffer *tsoh_page; 230 struct efx_buffer *tsoh_page;
224 struct efx_special_buffer txd; 231 struct efx_special_buffer txd;
225 unsigned int ptr_mask; 232 unsigned int ptr_mask;
233 void __iomem *piobuf;
234 unsigned int piobuf_offset;
226 bool initialised; 235 bool initialised;
227 236
228 /* Members used mainly on the completion path */ 237 /* Members used mainly on the completion path */
@@ -238,6 +247,7 @@ struct efx_tx_queue {
238 unsigned int tso_long_headers; 247 unsigned int tso_long_headers;
239 unsigned int tso_packets; 248 unsigned int tso_packets;
240 unsigned int pushes; 249 unsigned int pushes;
250 unsigned int pio_packets;
241 251
242 /* Members shared between paths and sometimes updated */ 252 /* Members shared between paths and sometimes updated */
243 unsigned int empty_read_count ____cacheline_aligned_in_smp; 253 unsigned int empty_read_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9826594c8a48..9c90bf56090f 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -19,6 +19,7 @@
19#include "bitfield.h" 19#include "bitfield.h"
20#include "efx.h" 20#include "efx.h"
21#include "nic.h" 21#include "nic.h"
22#include "ef10_regs.h"
22#include "farch_regs.h" 23#include "farch_regs.h"
23#include "io.h" 24#include "io.h"
24#include "workarounds.h" 25#include "workarounds.h"
@@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
166 167
167/* Register dump */ 168/* Register dump */
168 169
169#define REGISTER_REVISION_A 1 170#define REGISTER_REVISION_FA 1
170#define REGISTER_REVISION_B 2 171#define REGISTER_REVISION_FB 2
171#define REGISTER_REVISION_C 3 172#define REGISTER_REVISION_FC 3
172#define REGISTER_REVISION_Z 3 /* latest revision */ 173#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
174#define REGISTER_REVISION_ED 4
175#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
173 176
174struct efx_nic_reg { 177struct efx_nic_reg {
175 u32 offset:24; 178 u32 offset:24;
176 u32 min_revision:2, max_revision:2; 179 u32 min_revision:3, max_revision:3;
177}; 180};
178 181
179#define REGISTER(name, min_rev, max_rev) { \ 182#define REGISTER(name, arch, min_rev, max_rev) { \
180 FR_ ## min_rev ## max_rev ## _ ## name, \ 183 arch ## R_ ## min_rev ## max_rev ## _ ## name, \
181 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 184 REGISTER_REVISION_ ## arch ## min_rev, \
185 REGISTER_REVISION_ ## arch ## max_rev \
182} 186}
183#define REGISTER_AA(name) REGISTER(name, A, A) 187#define REGISTER_AA(name) REGISTER(name, F, A, A)
184#define REGISTER_AB(name) REGISTER(name, A, B) 188#define REGISTER_AB(name) REGISTER(name, F, A, B)
185#define REGISTER_AZ(name) REGISTER(name, A, Z) 189#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
186#define REGISTER_BB(name) REGISTER(name, B, B) 190#define REGISTER_BB(name) REGISTER(name, F, B, B)
187#define REGISTER_BZ(name) REGISTER(name, B, Z) 191#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
188#define REGISTER_CZ(name) REGISTER(name, C, Z) 192#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
193#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
189 194
190static const struct efx_nic_reg efx_nic_regs[] = { 195static const struct efx_nic_reg efx_nic_regs[] = {
191 REGISTER_AZ(ADR_REGION), 196 REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = {
292 REGISTER_AB(XX_TXDRV_CTL), 297 REGISTER_AB(XX_TXDRV_CTL),
293 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 298 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
294 /* XX_CORE_STAT is partly RC */ 299 /* XX_CORE_STAT is partly RC */
300 REGISTER_DZ(BIU_HW_REV_ID),
301 REGISTER_DZ(MC_DB_LWRD),
302 REGISTER_DZ(MC_DB_HWRD),
295}; 303};
296 304
297struct efx_nic_reg_table { 305struct efx_nic_reg_table {
298 u32 offset:24; 306 u32 offset:24;
299 u32 min_revision:2, max_revision:2; 307 u32 min_revision:3, max_revision:3;
300 u32 step:6, rows:21; 308 u32 step:6, rows:21;
301}; 309};
302 310
303#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 311#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
304 offset, \ 312 offset, \
305 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 313 REGISTER_REVISION_ ## arch ## min_rev, \
314 REGISTER_REVISION_ ## arch ## max_rev, \
306 step, rows \ 315 step, rows \
307} 316}
308#define REGISTER_TABLE(name, min_rev, max_rev) \ 317#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
309 REGISTER_TABLE_DIMENSIONS( \ 318 REGISTER_TABLE_DIMENSIONS( \
310 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 319 name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
311 min_rev, max_rev, \ 320 arch, min_rev, max_rev, \
312 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 321 arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
313 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 322 arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
314#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 323#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
315#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 324#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
316#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 325#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
317#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 326#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
318#define REGISTER_TABLE_BB_CZ(name) \ 327#define REGISTER_TABLE_BB_CZ(name) \
319 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 328 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
320 FR_BZ_ ## name ## _STEP, \ 329 FR_BZ_ ## name ## _STEP, \
321 FR_BB_ ## name ## _ROWS), \ 330 FR_BB_ ## name ## _ROWS), \
322 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 331 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
323 FR_BZ_ ## name ## _STEP, \ 332 FR_BZ_ ## name ## _STEP, \
324 FR_CZ_ ## name ## _ROWS) 333 FR_CZ_ ## name ## _ROWS)
325#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 334#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
335#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
326 336
327static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 337static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
328 /* DRIVER is not used */ 338 /* DRIVER is not used */
@@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
340 * 1K entries allows for some expansion of queue count and 350 * 1K entries allows for some expansion of queue count and
341 * size before we need to change the version. */ 351 * size before we need to change the version. */
342 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 352 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
343 A, A, 8, 1024), 353 F, A, A, 8, 1024),
344 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 354 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
345 B, Z, 8, 1024), 355 F, B, Z, 8, 1024),
346 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 356 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
347 REGISTER_TABLE_BB_CZ(TIMER_TBL), 357 REGISTER_TABLE_BB_CZ(TIMER_TBL),
348 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 358 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
353 /* MSIX_PBA_TABLE is not mapped */ 363 /* MSIX_PBA_TABLE is not mapped */
354 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 364 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
355 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 365 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
366 REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
356}; 367};
357 368
358size_t efx_nic_get_regs_len(struct efx_nic *efx) 369size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 890bbbe8320e..11b6112d9249 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
30 return efx->type->revision; 30 return efx->type->revision;
31} 31}
32 32
33extern u32 efx_farch_fpga_ver(struct efx_nic *efx); 33u32 efx_farch_fpga_ver(struct efx_nic *efx);
34 34
35/* NIC has two interlinked PCI functions for the same port. */ 35/* NIC has two interlinked PCI functions for the same port. */
36static inline bool efx_nic_is_dual_func(struct efx_nic *efx) 36static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 71 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
72} 72}
73 73
74/* Report whether the NIC considers this TX queue empty, given the
75 * write_count used for the last doorbell push. May return false
76 * negative.
77 */
78static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
79 unsigned int write_count)
80{
81 unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
82
83 if (empty_read_count == 0)
84 return false;
85
86 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
87}
88
89static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
90{
91 return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
92}
93
74/* Decide whether to push a TX descriptor to the NIC vs merely writing 94/* Decide whether to push a TX descriptor to the NIC vs merely writing
75 * the doorbell. This can reduce latency when we are adding a single 95 * the doorbell. This can reduce latency when we are adding a single
76 * descriptor to an empty queue, but is otherwise pointless. Further, 96 * descriptor to an empty queue, but is otherwise pointless. Further,
@@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
80static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, 100static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
81 unsigned int write_count) 101 unsigned int write_count)
82{ 102{
83 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 103 bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
84
85 if (empty_read_count == 0)
86 return false;
87 104
88 tx_queue->empty_read_count = 0; 105 tx_queue->empty_read_count = 0;
89 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 106 return was_empty && tx_queue->write_count - write_count == 1;
90 && tx_queue->write_count - write_count == 1;
91} 107}
92 108
93/* Returns a pointer to the specified descriptor in the RX descriptor queue */ 109/* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -401,6 +417,12 @@ enum {
401 EF10_STAT_COUNT 417 EF10_STAT_COUNT
402}; 418};
403 419
420/* Maximum number of TX PIO buffers we may allocate to a function.
421 * This matches the total number of buffers on each SFC9100-family
422 * controller.
423 */
424#define EF10_TX_PIOBUF_COUNT 16
425
404/** 426/**
405 * struct efx_ef10_nic_data - EF10 architecture NIC state 427 * struct efx_ef10_nic_data - EF10 architecture NIC state
406 * @mcdi_buf: DMA buffer for MCDI 428 * @mcdi_buf: DMA buffer for MCDI
@@ -409,6 +431,13 @@ enum {
409 * @n_allocated_vis: Number of VIs allocated to this function 431 * @n_allocated_vis: Number of VIs allocated to this function
410 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot 432 * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
411 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot 433 * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
434 * @n_piobufs: Number of PIO buffers allocated to this function
435 * @wc_membase: Base address of write-combining mapping of the memory BAR
436 * @pio_write_base: Base address for writing PIO buffers
437 * @pio_write_vi_base: Relative VI number for @pio_write_base
438 * @piobuf_handle: Handle of each PIO buffer allocated
439 * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
440 * reboot
412 * @rx_rss_context: Firmware handle for our RSS context 441 * @rx_rss_context: Firmware handle for our RSS context
413 * @stats: Hardware statistics 442 * @stats: Hardware statistics
414 * @workaround_35388: Flag: firmware supports workaround for bug 35388 443 * @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -424,6 +453,11 @@ struct efx_ef10_nic_data {
424 unsigned int n_allocated_vis; 453 unsigned int n_allocated_vis;
425 bool must_realloc_vis; 454 bool must_realloc_vis;
426 bool must_restore_filters; 455 bool must_restore_filters;
456 unsigned int n_piobufs;
457 void __iomem *wc_membase, *pio_write_base;
458 unsigned int pio_write_vi_base;
459 unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
460 bool must_restore_piobufs;
427 u32 rx_rss_context; 461 u32 rx_rss_context;
428 u64 stats[EF10_STAT_COUNT]; 462 u64 stats[EF10_STAT_COUNT];
429 bool workaround_35388; 463 bool workaround_35388;
@@ -475,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
475 return 1 << efx->vi_scale; 509 return 1 << efx->vi_scale;
476} 510}
477 511
478extern int efx_init_sriov(void); 512int efx_init_sriov(void);
479extern void efx_sriov_probe(struct efx_nic *efx); 513void efx_sriov_probe(struct efx_nic *efx);
480extern int efx_sriov_init(struct efx_nic *efx); 514int efx_sriov_init(struct efx_nic *efx);
481extern void efx_sriov_mac_address_changed(struct efx_nic *efx); 515void efx_sriov_mac_address_changed(struct efx_nic *efx);
482extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); 516void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
483extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); 517void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
484extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); 518void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
485extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); 519void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
486extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr); 520void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
487extern void efx_sriov_reset(struct efx_nic *efx); 521void efx_sriov_reset(struct efx_nic *efx);
488extern void efx_sriov_fini(struct efx_nic *efx); 522void efx_sriov_fini(struct efx_nic *efx);
489extern void efx_fini_sriov(void); 523void efx_fini_sriov(void);
490 524
491#else 525#else
492 526
@@ -512,22 +546,20 @@ static inline void efx_fini_sriov(void) {}
512 546
513#endif 547#endif
514 548
515extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); 549int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
516extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, 550int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
517 u16 vlan, u8 qos); 551int efx_sriov_get_vf_config(struct net_device *dev, int vf,
518extern int efx_sriov_get_vf_config(struct net_device *dev, int vf, 552 struct ifla_vf_info *ivf);
519 struct ifla_vf_info *ivf); 553int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
520extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, 554 bool spoofchk);
521 bool spoofchk);
522 555
523struct ethtool_ts_info; 556struct ethtool_ts_info;
524extern void efx_ptp_probe(struct efx_nic *efx); 557void efx_ptp_probe(struct efx_nic *efx);
525extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); 558int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
526extern void efx_ptp_get_ts_info(struct efx_nic *efx, 559void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
527 struct ethtool_ts_info *ts_info); 560bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
528extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 561int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
529extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 562void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
530extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
531 563
532extern const struct efx_nic_type falcon_a1_nic_type; 564extern const struct efx_nic_type falcon_a1_nic_type;
533extern const struct efx_nic_type falcon_b0_nic_type; 565extern const struct efx_nic_type falcon_b0_nic_type;
@@ -541,7 +573,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
541 ************************************************************************** 573 **************************************************************************
542 */ 574 */
543 575
544extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); 576int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
545 577
546/* TX data path */ 578/* TX data path */
547static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 579static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -609,58 +641,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
609{ 641{
610 channel->efx->type->ev_read_ack(channel); 642 channel->efx->type->ev_read_ack(channel);
611} 643}
612extern void efx_nic_event_test_start(struct efx_channel *channel); 644void efx_nic_event_test_start(struct efx_channel *channel);
613 645
614/* Falcon/Siena queue operations */ 646/* Falcon/Siena queue operations */
615extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); 647int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
616extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue); 648void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
617extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); 649void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
618extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); 650void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
619extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue); 651void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
620extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); 652int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
621extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue); 653void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
622extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); 654void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
623extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); 655void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
624extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); 656void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
625extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); 657void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
626extern int efx_farch_ev_probe(struct efx_channel *channel); 658int efx_farch_ev_probe(struct efx_channel *channel);
627extern int efx_farch_ev_init(struct efx_channel *channel); 659int efx_farch_ev_init(struct efx_channel *channel);
628extern void efx_farch_ev_fini(struct efx_channel *channel); 660void efx_farch_ev_fini(struct efx_channel *channel);
629extern void efx_farch_ev_remove(struct efx_channel *channel); 661void efx_farch_ev_remove(struct efx_channel *channel);
630extern int efx_farch_ev_process(struct efx_channel *channel, int quota); 662int efx_farch_ev_process(struct efx_channel *channel, int quota);
631extern void efx_farch_ev_read_ack(struct efx_channel *channel); 663void efx_farch_ev_read_ack(struct efx_channel *channel);
632extern void efx_farch_ev_test_generate(struct efx_channel *channel); 664void efx_farch_ev_test_generate(struct efx_channel *channel);
633 665
634/* Falcon/Siena filter operations */ 666/* Falcon/Siena filter operations */
635extern int efx_farch_filter_table_probe(struct efx_nic *efx); 667int efx_farch_filter_table_probe(struct efx_nic *efx);
636extern void efx_farch_filter_table_restore(struct efx_nic *efx); 668void efx_farch_filter_table_restore(struct efx_nic *efx);
637extern void efx_farch_filter_table_remove(struct efx_nic *efx); 669void efx_farch_filter_table_remove(struct efx_nic *efx);
638extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); 670void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
639extern s32 efx_farch_filter_insert(struct efx_nic *efx, 671s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
640 struct efx_filter_spec *spec, bool replace); 672 bool replace);
641extern int efx_farch_filter_remove_safe(struct efx_nic *efx, 673int efx_farch_filter_remove_safe(struct efx_nic *efx,
642 enum efx_filter_priority priority, 674 enum efx_filter_priority priority,
643 u32 filter_id); 675 u32 filter_id);
644extern int efx_farch_filter_get_safe(struct efx_nic *efx, 676int efx_farch_filter_get_safe(struct efx_nic *efx,
645 enum efx_filter_priority priority, 677 enum efx_filter_priority priority, u32 filter_id,
646 u32 filter_id, struct efx_filter_spec *); 678 struct efx_filter_spec *);
647extern void efx_farch_filter_clear_rx(struct efx_nic *efx, 679void efx_farch_filter_clear_rx(struct efx_nic *efx,
648 enum efx_filter_priority priority); 680 enum efx_filter_priority priority);
649extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, 681u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
650 enum efx_filter_priority priority); 682 enum efx_filter_priority priority);
651extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); 683u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
652extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, 684s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
653 enum efx_filter_priority priority, 685 enum efx_filter_priority priority, u32 *buf,
654 u32 *buf, u32 size); 686 u32 size);
655#ifdef CONFIG_RFS_ACCEL 687#ifdef CONFIG_RFS_ACCEL
656extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, 688s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
657 struct efx_filter_spec *spec); 689 struct efx_filter_spec *spec);
658extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 690bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
659 unsigned int index); 691 unsigned int index);
660#endif 692#endif
661extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); 693void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
662 694
663extern bool efx_nic_event_present(struct efx_channel *channel); 695bool efx_nic_event_present(struct efx_channel *channel);
664 696
665/* Some statistics are computed as A - B where A and B each increase 697/* Some statistics are computed as A - B where A and B each increase
666 * linearly with some hardware counter(s) and the counters are read 698 * linearly with some hardware counter(s) and the counters are read
@@ -681,17 +713,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
681} 713}
682 714
683/* Interrupts */ 715/* Interrupts */
684extern int efx_nic_init_interrupt(struct efx_nic *efx); 716int efx_nic_init_interrupt(struct efx_nic *efx);
685extern void efx_nic_irq_test_start(struct efx_nic *efx); 717void efx_nic_irq_test_start(struct efx_nic *efx);
686extern void efx_nic_fini_interrupt(struct efx_nic *efx); 718void efx_nic_fini_interrupt(struct efx_nic *efx);
687 719
688/* Falcon/Siena interrupts */ 720/* Falcon/Siena interrupts */
689extern void efx_farch_irq_enable_master(struct efx_nic *efx); 721void efx_farch_irq_enable_master(struct efx_nic *efx);
690extern void efx_farch_irq_test_generate(struct efx_nic *efx); 722void efx_farch_irq_test_generate(struct efx_nic *efx);
691extern void efx_farch_irq_disable_master(struct efx_nic *efx); 723void efx_farch_irq_disable_master(struct efx_nic *efx);
692extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); 724irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
693extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); 725irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
694extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); 726irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
695 727
696static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) 728static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
697{ 729{
@@ -703,21 +735,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
703} 735}
704 736
705/* Global Resources */ 737/* Global Resources */
706extern int efx_nic_flush_queues(struct efx_nic *efx); 738int efx_nic_flush_queues(struct efx_nic *efx);
707extern void siena_prepare_flush(struct efx_nic *efx); 739void siena_prepare_flush(struct efx_nic *efx);
708extern int efx_farch_fini_dmaq(struct efx_nic *efx); 740int efx_farch_fini_dmaq(struct efx_nic *efx);
709extern void siena_finish_flush(struct efx_nic *efx); 741void siena_finish_flush(struct efx_nic *efx);
710extern void falcon_start_nic_stats(struct efx_nic *efx); 742void falcon_start_nic_stats(struct efx_nic *efx);
711extern void falcon_stop_nic_stats(struct efx_nic *efx); 743void falcon_stop_nic_stats(struct efx_nic *efx);
712extern int falcon_reset_xaui(struct efx_nic *efx); 744int falcon_reset_xaui(struct efx_nic *efx);
713extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); 745void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
714extern void efx_farch_init_common(struct efx_nic *efx); 746void efx_farch_init_common(struct efx_nic *efx);
715extern void efx_ef10_handle_drain_event(struct efx_nic *efx); 747void efx_ef10_handle_drain_event(struct efx_nic *efx);
716static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) 748static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
717{ 749{
718 efx->type->rx_push_indir_table(efx); 750 efx->type->rx_push_indir_table(efx);
719} 751}
720extern void efx_farch_rx_push_indir_table(struct efx_nic *efx); 752void efx_farch_rx_push_indir_table(struct efx_nic *efx);
721 753
722int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 754int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
723 unsigned int len, gfp_t gfp_flags); 755 unsigned int len, gfp_t gfp_flags);
@@ -728,24 +760,22 @@ struct efx_farch_register_test {
728 unsigned address; 760 unsigned address;
729 efx_oword_t mask; 761 efx_oword_t mask;
730}; 762};
731extern int efx_farch_test_registers(struct efx_nic *efx, 763int efx_farch_test_registers(struct efx_nic *efx,
732 const struct efx_farch_register_test *regs, 764 const struct efx_farch_register_test *regs,
733 size_t n_regs); 765 size_t n_regs);
734 766
735extern size_t efx_nic_get_regs_len(struct efx_nic *efx); 767size_t efx_nic_get_regs_len(struct efx_nic *efx);
736extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); 768void efx_nic_get_regs(struct efx_nic *efx, void *buf);
737 769
738extern size_t 770size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
739efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, 771 const unsigned long *mask, u8 *names);
740 const unsigned long *mask, u8 *names); 772void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
741extern void 773 const unsigned long *mask, u64 *stats,
742efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, 774 const void *dma_buf, bool accumulate);
743 const unsigned long *mask,
744 u64 *stats, const void *dma_buf, bool accumulate);
745 775
746#define EFX_MAX_FLUSH_TIME 5000 776#define EFX_MAX_FLUSH_TIME 5000
747 777
748extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, 778void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
749 efx_qword_t *event); 779 efx_qword_t *event);
750 780
751#endif /* EFX_NIC_H */ 781#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 45eeb7075156..803bf445c08e 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -15,7 +15,7 @@
15 */ 15 */
16extern const struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
17 17
18extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 18void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
19 19
20/**************************************************************************** 20/****************************************************************************
21 * AMCC/Quake QT202x PHYs 21 * AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
34#define QUAKE_LED_TXLINK (0) 34#define QUAKE_LED_TXLINK (0)
35#define QUAKE_LED_RXLINK (8) 35#define QUAKE_LED_RXLINK (8)
36 36
37extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); 37void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
38 38
39/**************************************************************************** 39/****************************************************************************
40* Transwitch CX4 retimer 40* Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
44#define TXC_GPIO_DIR_INPUT 0 44#define TXC_GPIO_DIR_INPUT 0
45#define TXC_GPIO_DIR_OUTPUT 1 45#define TXC_GPIO_DIR_OUTPUT 1
46 46
47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); 47void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); 48void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
49 49
50#endif 50#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 4a596725023f..8f09e686fc23 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -12,6 +12,7 @@
12#include <linux/in.h> 12#include <linux/in.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/ip.h> 14#include <linux/ip.h>
15#include <linux/ipv6.h>
15#include <linux/tcp.h> 16#include <linux/tcp.h>
16#include <linux/udp.h> 17#include <linux/udp.h>
17#include <linux/prefetch.h> 18#include <linux/prefetch.h>
@@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
818 struct efx_nic *efx = netdev_priv(net_dev); 819 struct efx_nic *efx = netdev_priv(net_dev);
819 struct efx_channel *channel; 820 struct efx_channel *channel;
820 struct efx_filter_spec spec; 821 struct efx_filter_spec spec;
821 const struct iphdr *ip;
822 const __be16 *ports; 822 const __be16 *ports;
823 __be16 ether_type;
823 int nhoff; 824 int nhoff;
824 int rc; 825 int rc;
825 826
826 nhoff = skb_network_offset(skb); 827 /* The core RPS/RFS code has already parsed and validated
828 * VLAN, IP and transport headers. We assume they are in the
829 * header area.
830 */
827 831
828 if (skb->protocol == htons(ETH_P_8021Q)) { 832 if (skb->protocol == htons(ETH_P_8021Q)) {
829 EFX_BUG_ON_PARANOID(skb_headlen(skb) < 833 const struct vlan_hdr *vh =
830 nhoff + sizeof(struct vlan_hdr)); 834 (const struct vlan_hdr *)skb->data;
831 if (((const struct vlan_hdr *)skb->data + nhoff)->
832 h_vlan_encapsulated_proto != htons(ETH_P_IP))
833 return -EPROTONOSUPPORT;
834 835
835 /* This is IP over 802.1q VLAN. We can't filter on the 836 /* We can't filter on the IP 5-tuple and the vlan
836 * IP 5-tuple and the vlan together, so just strip the 837 * together, so just strip the vlan header and filter
837 * vlan header and filter on the IP part. 838 * on the IP part.
838 */ 839 */
839 nhoff += sizeof(struct vlan_hdr); 840 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
840 } else if (skb->protocol != htons(ETH_P_IP)) { 841 ether_type = vh->h_vlan_encapsulated_proto;
841 return -EPROTONOSUPPORT; 842 nhoff = sizeof(struct vlan_hdr);
843 } else {
844 ether_type = skb->protocol;
845 nhoff = 0;
842 } 846 }
843 847
844 /* RFS must validate the IP header length before calling us */ 848 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
845 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
846 ip = (const struct iphdr *)(skb->data + nhoff);
847 if (ip_is_fragment(ip))
848 return -EPROTONOSUPPORT; 849 return -EPROTONOSUPPORT;
849 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
850 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
851 850
852 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 851 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
853 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 852 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
854 rxq_index); 853 rxq_index);
855 rc = efx_filter_set_ipv4_full(&spec, ip->protocol, 854 spec.match_flags =
856 ip->daddr, ports[1], ip->saddr, ports[0]); 855 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
857 if (rc) 856 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
858 return rc; 857 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
858 spec.ether_type = ether_type;
859
860 if (ether_type == htons(ETH_P_IP)) {
861 const struct iphdr *ip =
862 (const struct iphdr *)(skb->data + nhoff);
863
864 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
865 if (ip_is_fragment(ip))
866 return -EPROTONOSUPPORT;
867 spec.ip_proto = ip->protocol;
868 spec.rem_host[0] = ip->saddr;
869 spec.loc_host[0] = ip->daddr;
870 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
871 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
872 } else {
873 const struct ipv6hdr *ip6 =
874 (const struct ipv6hdr *)(skb->data + nhoff);
875
876 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
877 nhoff + sizeof(*ip6) + 4);
878 spec.ip_proto = ip6->nexthdr;
879 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
880 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
881 ports = (const __be16 *)(ip6 + 1);
882 }
883
884 spec.rem_port = ports[0];
885 spec.loc_port = ports[1];
859 886
860 rc = efx->type->filter_rfs_insert(efx, &spec); 887 rc = efx->type->filter_rfs_insert(efx, &spec);
861 if (rc < 0) 888 if (rc < 0)
@@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
866 channel = efx_get_channel(efx, skb_get_rx_queue(skb)); 893 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
867 ++channel->rfs_filters_added; 894 ++channel->rfs_filters_added;
868 895
869 netif_info(efx, rx_status, efx->net_dev, 896 if (ether_type == htons(ETH_P_IP))
870 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 897 netif_info(efx, rx_status, efx->net_dev,
871 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", 898 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
872 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), 899 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
873 rxq_index, flow_id, rc); 900 spec.rem_host, ntohs(ports[0]), spec.loc_host,
901 ntohs(ports[1]), rxq_index, flow_id, rc);
902 else
903 netif_info(efx, rx_status, efx->net_dev,
904 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
905 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
906 spec.rem_host, ntohs(ports[0]), spec.loc_host,
907 ntohs(ports[1]), rxq_index, flow_id, rc);
874 908
875 return rc; 909 return rc;
876} 910}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 87698ae0bf75..a2f4a06ffa4e 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -43,13 +43,12 @@ struct efx_self_tests {
43 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; 43 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
44}; 44};
45 45
46extern void efx_loopback_rx_packet(struct efx_nic *efx, 46void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
47 const char *buf_ptr, int pkt_len); 47 int pkt_len);
48extern int efx_selftest(struct efx_nic *efx, 48int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
49 struct efx_self_tests *tests, 49 unsigned flags);
50 unsigned flags); 50void efx_selftest_async_start(struct efx_nic *efx);
51extern void efx_selftest_async_start(struct efx_nic *efx); 51void efx_selftest_async_cancel(struct efx_nic *efx);
52extern void efx_selftest_async_cancel(struct efx_nic *efx); 52void efx_selftest_async_work(struct work_struct *data);
53extern void efx_selftest_async_work(struct work_struct *data);
54 53
55#endif /* EFX_SELFTEST_H */ 54#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2ac91c5b5eea..c49d1fb16965 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -17,10 +17,46 @@
17#include <net/ipv6.h> 17#include <net/ipv6.h>
18#include <linux/if_ether.h> 18#include <linux/if_ether.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/cache.h>
20#include "net_driver.h" 21#include "net_driver.h"
21#include "efx.h" 22#include "efx.h"
23#include "io.h"
22#include "nic.h" 24#include "nic.h"
23#include "workarounds.h" 25#include "workarounds.h"
26#include "ef10_regs.h"
27
28#ifdef EFX_USE_PIO
29
30#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34#endif /* EFX_USE_PIO */
35
36static inline unsigned int
37efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
38{
39 return tx_queue->insert_count & tx_queue->ptr_mask;
40}
41
42static inline struct efx_tx_buffer *
43__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
44{
45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
46}
47
48static inline struct efx_tx_buffer *
49efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
50{
51 struct efx_tx_buffer *buffer =
52 __efx_tx_queue_get_insert_buffer(tx_queue);
53
54 EFX_BUG_ON_PARANOID(buffer->len);
55 EFX_BUG_ON_PARANOID(buffer->flags);
56 EFX_BUG_ON_PARANOID(buffer->unmap_len);
57
58 return buffer;
59}
24 60
25static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 61static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
26 struct efx_tx_buffer *buffer, 62 struct efx_tx_buffer *buffer,
@@ -29,8 +65,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
29{ 65{
30 if (buffer->unmap_len) { 66 if (buffer->unmap_len) {
31 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 67 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
32 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 68 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
33 buffer->unmap_len);
34 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 69 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
35 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 70 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
36 DMA_TO_DEVICE); 71 DMA_TO_DEVICE);
@@ -83,8 +118,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
83 */ 118 */
84 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 119 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
85 120
86 /* Possibly one more per segment for the alignment workaround */ 121 /* Possibly one more per segment for the alignment workaround,
87 if (EFX_WORKAROUND_5391(efx)) 122 * or for option descriptors
123 */
124 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
88 max_descs += EFX_TSO_MAX_SEGS; 125 max_descs += EFX_TSO_MAX_SEGS;
89 126
90 /* Possibly more for PCIe page boundaries within input fragments */ 127 /* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +182,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
145 } 182 }
146} 183}
147 184
185#ifdef EFX_USE_PIO
186
187struct efx_short_copy_buffer {
188 int used;
189 u8 buf[L1_CACHE_BYTES];
190};
191
192/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
193 * Advances piobuf pointer. Leaves additional data in the copy buffer.
194 */
195static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
196 u8 *data, int len,
197 struct efx_short_copy_buffer *copy_buf)
198{
199 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
200
201 memcpy_toio(*piobuf, data, block_len);
202 *piobuf += block_len;
203 len -= block_len;
204
205 if (len) {
206 data += block_len;
207 BUG_ON(copy_buf->used);
208 BUG_ON(len > sizeof(copy_buf->buf));
209 memcpy(copy_buf->buf, data, len);
210 copy_buf->used = len;
211 }
212}
213
214/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
215 * Advances piobuf pointer. Leaves additional data in the copy buffer.
216 */
217static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
218 u8 *data, int len,
219 struct efx_short_copy_buffer *copy_buf)
220{
221 if (copy_buf->used) {
222 /* if the copy buffer is partially full, fill it up and write */
223 int copy_to_buf =
224 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
225
226 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
227 copy_buf->used += copy_to_buf;
228
229 /* if we didn't fill it up then we're done for now */
230 if (copy_buf->used < sizeof(copy_buf->buf))
231 return;
232
233 memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
234 *piobuf += sizeof(copy_buf->buf);
235 data += copy_to_buf;
236 len -= copy_to_buf;
237 copy_buf->used = 0;
238 }
239
240 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
241}
242
243static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
244 struct efx_short_copy_buffer *copy_buf)
245{
246 /* if there's anything in it, write the whole buffer, including junk */
247 if (copy_buf->used)
248 memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
249}
250
251/* Traverse skb structure and copy fragments in to PIO buffer.
252 * Advances piobuf pointer.
253 */
254static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
255 u8 __iomem **piobuf,
256 struct efx_short_copy_buffer *copy_buf)
257{
258 int i;
259
260 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
261 copy_buf);
262
263 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
264 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
265 u8 *vaddr;
266
267 vaddr = kmap_atomic(skb_frag_page(f));
268
269 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
270 skb_frag_size(f), copy_buf);
271 kunmap_atomic(vaddr);
272 }
273
274 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
275}
276
277static struct efx_tx_buffer *
278efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
279{
280 struct efx_tx_buffer *buffer =
281 efx_tx_queue_get_insert_buffer(tx_queue);
282 u8 __iomem *piobuf = tx_queue->piobuf;
283
284 /* Copy to PIO buffer. Ensure the writes are padded to the end
285 * of a cache line, as this is required for write-combining to be
286 * effective on at least x86.
287 */
288
289 if (skb_shinfo(skb)->nr_frags) {
290 /* The size of the copy buffer will ensure all writes
291 * are the size of a cache line.
292 */
293 struct efx_short_copy_buffer copy_buf;
294
295 copy_buf.used = 0;
296
297 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
298 &piobuf, &copy_buf);
299 efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
300 } else {
301 /* Pad the write to the size of a cache line.
302 * We can do this because we know the skb_shared_info sruct is
303 * after the source, and the destination buffer is big enough.
304 */
305 BUILD_BUG_ON(L1_CACHE_BYTES >
306 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
307 memcpy_toio(tx_queue->piobuf, skb->data,
308 ALIGN(skb->len, L1_CACHE_BYTES));
309 }
310
311 EFX_POPULATE_QWORD_5(buffer->option,
312 ESF_DZ_TX_DESC_IS_OPT, 1,
313 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
314 ESF_DZ_TX_PIO_CONT, 0,
315 ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
316 ESF_DZ_TX_PIO_BUF_ADDR,
317 tx_queue->piobuf_offset);
318 ++tx_queue->pio_packets;
319 ++tx_queue->insert_count;
320 return buffer;
321}
322#endif /* EFX_USE_PIO */
323
148/* 324/*
149 * Add a socket buffer to a TX queue 325 * Add a socket buffer to a TX queue
150 * 326 *
@@ -167,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
167 struct device *dma_dev = &efx->pci_dev->dev; 343 struct device *dma_dev = &efx->pci_dev->dev;
168 struct efx_tx_buffer *buffer; 344 struct efx_tx_buffer *buffer;
169 skb_frag_t *fragment; 345 skb_frag_t *fragment;
170 unsigned int len, unmap_len = 0, insert_ptr; 346 unsigned int len, unmap_len = 0;
171 dma_addr_t dma_addr, unmap_addr = 0; 347 dma_addr_t dma_addr, unmap_addr = 0;
172 unsigned int dma_len; 348 unsigned int dma_len;
173 unsigned short dma_flags; 349 unsigned short dma_flags;
@@ -189,6 +365,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
189 return NETDEV_TX_OK; 365 return NETDEV_TX_OK;
190 } 366 }
191 367
368 /* Consider using PIO for short packets */
369#ifdef EFX_USE_PIO
370 if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
371 efx_nic_tx_is_empty(tx_queue) &&
372 efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
373 buffer = efx_enqueue_skb_pio(tx_queue, skb);
374 dma_flags = EFX_TX_BUF_OPTION;
375 goto finish_packet;
376 }
377#endif
378
192 /* Map for DMA. Use dma_map_single rather than dma_map_page 379 /* Map for DMA. Use dma_map_single rather than dma_map_page
193 * since this is more efficient on machines with sparse 380 * since this is more efficient on machines with sparse
194 * memory. 381 * memory.
@@ -208,11 +395,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
208 395
209 /* Add to TX queue, splitting across DMA boundaries */ 396 /* Add to TX queue, splitting across DMA boundaries */
210 do { 397 do {
211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 398 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
212 buffer = &tx_queue->buffer[insert_ptr];
213 EFX_BUG_ON_PARANOID(buffer->flags);
214 EFX_BUG_ON_PARANOID(buffer->len);
215 EFX_BUG_ON_PARANOID(buffer->unmap_len);
216 399
217 dma_len = efx_max_tx_len(efx, dma_addr); 400 dma_len = efx_max_tx_len(efx, dma_addr);
218 if (likely(dma_len >= len)) 401 if (likely(dma_len >= len))
@@ -230,6 +413,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
230 /* Transfer ownership of the unmapping to the final buffer */ 413 /* Transfer ownership of the unmapping to the final buffer */
231 buffer->flags = EFX_TX_BUF_CONT | dma_flags; 414 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
232 buffer->unmap_len = unmap_len; 415 buffer->unmap_len = unmap_len;
416 buffer->dma_offset = buffer->dma_addr - unmap_addr;
233 unmap_len = 0; 417 unmap_len = 0;
234 418
235 /* Get address and size of next fragment */ 419 /* Get address and size of next fragment */
@@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
245 } 429 }
246 430
247 /* Transfer ownership of the skb to the final buffer */ 431 /* Transfer ownership of the skb to the final buffer */
432finish_packet:
248 buffer->skb = skb; 433 buffer->skb = skb;
249 buffer->flags = EFX_TX_BUF_SKB | dma_flags; 434 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
250 435
@@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
270 while (tx_queue->insert_count != tx_queue->write_count) { 455 while (tx_queue->insert_count != tx_queue->write_count) {
271 unsigned int pkts_compl = 0, bytes_compl = 0; 456 unsigned int pkts_compl = 0, bytes_compl = 0;
272 --tx_queue->insert_count; 457 --tx_queue->insert_count;
273 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 458 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
274 buffer = &tx_queue->buffer[insert_ptr];
275 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 459 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
276 } 460 }
277 461
@@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
628 * @tcp_off: Offset of TCP header 812 * @tcp_off: Offset of TCP header
629 * @header_len: Number of bytes of header 813 * @header_len: Number of bytes of header
630 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload 814 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
815 * @header_dma_addr: Header DMA address, when using option descriptors
816 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
817 * descriptors
631 * 818 *
632 * The state used during segmentation. It is put into this data structure 819 * The state used during segmentation. It is put into this data structure
633 * just to make it easy to pass into inline functions. 820 * just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@ struct tso_state {
636 /* Output position */ 823 /* Output position */
637 unsigned out_len; 824 unsigned out_len;
638 unsigned seqnum; 825 unsigned seqnum;
639 unsigned ipv4_id; 826 u16 ipv4_id;
640 unsigned packet_space; 827 unsigned packet_space;
641 828
642 /* Input position */ 829 /* Input position */
@@ -651,6 +838,8 @@ struct tso_state {
651 unsigned int tcp_off; 838 unsigned int tcp_off;
652 unsigned header_len; 839 unsigned header_len;
653 unsigned int ip_base_len; 840 unsigned int ip_base_len;
841 dma_addr_t header_dma_addr;
842 unsigned int header_unmap_len;
654}; 843};
655 844
656 845
@@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
737{ 926{
738 struct efx_tx_buffer *buffer; 927 struct efx_tx_buffer *buffer;
739 struct efx_nic *efx = tx_queue->efx; 928 struct efx_nic *efx = tx_queue->efx;
740 unsigned dma_len, insert_ptr; 929 unsigned dma_len;
741 930
742 EFX_BUG_ON_PARANOID(len <= 0); 931 EFX_BUG_ON_PARANOID(len <= 0);
743 932
744 while (1) { 933 while (1) {
745 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 934 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
746 buffer = &tx_queue->buffer[insert_ptr];
747 ++tx_queue->insert_count; 935 ++tx_queue->insert_count;
748 936
749 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 937 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
750 tx_queue->read_count >= 938 tx_queue->read_count >=
751 efx->txq_entries); 939 efx->txq_entries);
752 940
753 EFX_BUG_ON_PARANOID(buffer->len);
754 EFX_BUG_ON_PARANOID(buffer->unmap_len);
755 EFX_BUG_ON_PARANOID(buffer->flags);
756
757 buffer->dma_addr = dma_addr; 941 buffer->dma_addr = dma_addr;
758 942
759 dma_len = efx_max_tx_len(efx, dma_addr); 943 dma_len = efx_max_tx_len(efx, dma_addr);
@@ -796,6 +980,7 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
796 return -ENOMEM; 980 return -ENOMEM;
797 } 981 }
798 buffer->unmap_len = buffer->len; 982 buffer->unmap_len = buffer->len;
983 buffer->dma_offset = 0;
799 buffer->flags |= EFX_TX_BUF_MAP_SINGLE; 984 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
800 } 985 }
801 986
@@ -814,19 +999,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
814 /* Work backwards until we hit the original insert pointer value */ 999 /* Work backwards until we hit the original insert pointer value */
815 while (tx_queue->insert_count != tx_queue->write_count) { 1000 while (tx_queue->insert_count != tx_queue->write_count) {
816 --tx_queue->insert_count; 1001 --tx_queue->insert_count;
817 buffer = &tx_queue->buffer[tx_queue->insert_count & 1002 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
818 tx_queue->ptr_mask];
819 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 1003 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
820 } 1004 }
821} 1005}
822 1006
823 1007
824/* Parse the SKB header and initialise state. */ 1008/* Parse the SKB header and initialise state. */
825static void tso_start(struct tso_state *st, const struct sk_buff *skb) 1009static int tso_start(struct tso_state *st, struct efx_nic *efx,
1010 const struct sk_buff *skb)
826{ 1011{
1012 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
1013 struct device *dma_dev = &efx->pci_dev->dev;
1014 unsigned int header_len, in_len;
1015 dma_addr_t dma_addr;
1016
827 st->ip_off = skb_network_header(skb) - skb->data; 1017 st->ip_off = skb_network_header(skb) - skb->data;
828 st->tcp_off = skb_transport_header(skb) - skb->data; 1018 st->tcp_off = skb_transport_header(skb) - skb->data;
829 st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); 1019 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
1020 in_len = skb_headlen(skb) - header_len;
1021 st->header_len = header_len;
1022 st->in_len = in_len;
830 if (st->protocol == htons(ETH_P_IP)) { 1023 if (st->protocol == htons(ETH_P_IP)) {
831 st->ip_base_len = st->header_len - st->ip_off; 1024 st->ip_base_len = st->header_len - st->ip_off;
832 st->ipv4_id = ntohs(ip_hdr(skb)->id); 1025 st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1033,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
840 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 1033 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
841 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 1034 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
842 1035
843 st->out_len = skb->len - st->header_len; 1036 st->out_len = skb->len - header_len;
844 st->unmap_len = 0; 1037
845 st->dma_flags = 0; 1038 if (!use_options) {
1039 st->header_unmap_len = 0;
1040
1041 if (likely(in_len == 0)) {
1042 st->dma_flags = 0;
1043 st->unmap_len = 0;
1044 return 0;
1045 }
1046
1047 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
1048 in_len, DMA_TO_DEVICE);
1049 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
1050 st->dma_addr = dma_addr;
1051 st->unmap_addr = dma_addr;
1052 st->unmap_len = in_len;
1053 } else {
1054 dma_addr = dma_map_single(dma_dev, skb->data,
1055 skb_headlen(skb), DMA_TO_DEVICE);
1056 st->header_dma_addr = dma_addr;
1057 st->header_unmap_len = skb_headlen(skb);
1058 st->dma_flags = 0;
1059 st->dma_addr = dma_addr + header_len;
1060 st->unmap_len = 0;
1061 }
1062
1063 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
846} 1064}
847 1065
848static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 1066static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1078,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
860 return -ENOMEM; 1078 return -ENOMEM;
861} 1079}
862 1080
863static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
864 const struct sk_buff *skb)
865{
866 int hl = st->header_len;
867 int len = skb_headlen(skb) - hl;
868
869 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
870 len, DMA_TO_DEVICE);
871 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
872 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
873 st->unmap_len = len;
874 st->in_len = len;
875 st->dma_addr = st->unmap_addr;
876 return 0;
877 }
878 return -ENOMEM;
879}
880
881 1081
882/** 1082/**
883 * tso_fill_packet_with_fragment - form descriptors for the current fragment 1083 * tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -922,6 +1122,7 @@ static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
922 if (st->in_len == 0) { 1122 if (st->in_len == 0) {
923 /* Transfer ownership of the DMA mapping */ 1123 /* Transfer ownership of the DMA mapping */
924 buffer->unmap_len = st->unmap_len; 1124 buffer->unmap_len = st->unmap_len;
1125 buffer->dma_offset = buffer->unmap_len - buffer->len;
925 buffer->flags |= st->dma_flags; 1126 buffer->flags |= st->dma_flags;
926 st->unmap_len = 0; 1127 st->unmap_len = 0;
927 } 1128 }
@@ -944,55 +1145,98 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
944 struct tso_state *st) 1145 struct tso_state *st)
945{ 1146{
946 struct efx_tx_buffer *buffer = 1147 struct efx_tx_buffer *buffer =
947 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; 1148 efx_tx_queue_get_insert_buffer(tx_queue);
948 struct tcphdr *tsoh_th; 1149 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
949 unsigned ip_length; 1150 u8 tcp_flags_clear;
950 u8 *header;
951 int rc;
952 1151
953 /* Allocate and insert a DMA-mapped header buffer. */ 1152 if (!is_last) {
954 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
955 if (!header)
956 return -ENOMEM;
957
958 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
959
960 /* Copy and update the headers. */
961 memcpy(header, skb->data, st->header_len);
962
963 tsoh_th->seq = htonl(st->seqnum);
964 st->seqnum += skb_shinfo(skb)->gso_size;
965 if (st->out_len > skb_shinfo(skb)->gso_size) {
966 /* This packet will not finish the TSO burst. */
967 st->packet_space = skb_shinfo(skb)->gso_size; 1153 st->packet_space = skb_shinfo(skb)->gso_size;
968 tsoh_th->fin = 0; 1154 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
969 tsoh_th->psh = 0;
970 } else { 1155 } else {
971 /* This packet will be the last in the TSO burst. */
972 st->packet_space = st->out_len; 1156 st->packet_space = st->out_len;
973 tsoh_th->fin = tcp_hdr(skb)->fin; 1157 tcp_flags_clear = 0x00;
974 tsoh_th->psh = tcp_hdr(skb)->psh;
975 } 1158 }
976 ip_length = st->ip_base_len + st->packet_space;
977 1159
978 if (st->protocol == htons(ETH_P_IP)) { 1160 if (!st->header_unmap_len) {
979 struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); 1161 /* Allocate and insert a DMA-mapped header buffer. */
1162 struct tcphdr *tsoh_th;
1163 unsigned ip_length;
1164 u8 *header;
1165 int rc;
1166
1167 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1168 if (!header)
1169 return -ENOMEM;
980 1170
981 tsoh_iph->tot_len = htons(ip_length); 1171 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1172
1173 /* Copy and update the headers. */
1174 memcpy(header, skb->data, st->header_len);
1175
1176 tsoh_th->seq = htonl(st->seqnum);
1177 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1178
1179 ip_length = st->ip_base_len + st->packet_space;
1180
1181 if (st->protocol == htons(ETH_P_IP)) {
1182 struct iphdr *tsoh_iph =
1183 (struct iphdr *)(header + st->ip_off);
1184
1185 tsoh_iph->tot_len = htons(ip_length);
1186 tsoh_iph->id = htons(st->ipv4_id);
1187 } else {
1188 struct ipv6hdr *tsoh_iph =
1189 (struct ipv6hdr *)(header + st->ip_off);
1190
1191 tsoh_iph->payload_len = htons(ip_length);
1192 }
982 1193
983 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1194 rc = efx_tso_put_header(tx_queue, buffer, header);
984 tsoh_iph->id = htons(st->ipv4_id); 1195 if (unlikely(rc))
985 st->ipv4_id++; 1196 return rc;
986 } else { 1197 } else {
987 struct ipv6hdr *tsoh_iph = 1198 /* Send the original headers with a TSO option descriptor
988 (struct ipv6hdr *)(header + st->ip_off); 1199 * in front
1200 */
1201 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
989 1202
990 tsoh_iph->payload_len = htons(ip_length); 1203 buffer->flags = EFX_TX_BUF_OPTION;
1204 buffer->len = 0;
1205 buffer->unmap_len = 0;
1206 EFX_POPULATE_QWORD_5(buffer->option,
1207 ESF_DZ_TX_DESC_IS_OPT, 1,
1208 ESF_DZ_TX_OPTION_TYPE,
1209 ESE_DZ_TX_OPTION_DESC_TSO,
1210 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1211 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1212 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1213 ++tx_queue->insert_count;
1214
1215 /* We mapped the headers in tso_start(). Unmap them
1216 * when the last segment is completed.
1217 */
1218 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1219 buffer->dma_addr = st->header_dma_addr;
1220 buffer->len = st->header_len;
1221 if (is_last) {
1222 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1223 buffer->unmap_len = st->header_unmap_len;
1224 buffer->dma_offset = 0;
1225 /* Ensure we only unmap them once in case of a
1226 * later DMA mapping error and rollback
1227 */
1228 st->header_unmap_len = 0;
1229 } else {
1230 buffer->flags = EFX_TX_BUF_CONT;
1231 buffer->unmap_len = 0;
1232 }
1233 ++tx_queue->insert_count;
991 } 1234 }
992 1235
993 rc = efx_tso_put_header(tx_queue, buffer, header); 1236 st->seqnum += skb_shinfo(skb)->gso_size;
994 if (unlikely(rc)) 1237
995 return rc; 1238 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1239 ++st->ipv4_id;
996 1240
997 ++tx_queue->tso_packets; 1241 ++tx_queue->tso_packets;
998 1242
@@ -1023,12 +1267,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1023 1267
1024 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1268 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1025 1269
1026 tso_start(&state, skb); 1270 rc = tso_start(&state, efx, skb);
1271 if (rc)
1272 goto mem_err;
1027 1273
1028 /* Assume that skb header area contains exactly the headers, and 1274 if (likely(state.in_len == 0)) {
1029 * all payload is in the frag list.
1030 */
1031 if (skb_headlen(skb) == state.header_len) {
1032 /* Grab the first payload fragment. */ 1275 /* Grab the first payload fragment. */
1033 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1276 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1034 frag_i = 0; 1277 frag_i = 0;
@@ -1037,9 +1280,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1037 if (rc) 1280 if (rc)
1038 goto mem_err; 1281 goto mem_err;
1039 } else { 1282 } else {
1040 rc = tso_get_head_fragment(&state, efx, skb); 1283 /* Payload starts in the header area. */
1041 if (rc)
1042 goto mem_err;
1043 frag_i = -1; 1284 frag_i = -1;
1044 } 1285 }
1045 1286
@@ -1091,6 +1332,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1091 state.unmap_len, DMA_TO_DEVICE); 1332 state.unmap_len, DMA_TO_DEVICE);
1092 } 1333 }
1093 1334
1335 /* Free the header DMA mapping, if using option descriptors */
1336 if (state.header_unmap_len)
1337 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1338 state.header_unmap_len, DMA_TO_DEVICE);
1339
1094 efx_enqueue_unwind(tx_queue); 1340 efx_enqueue_unwind(tx_queue);
1095 return NETDEV_TX_OK; 1341 return NETDEV_TX_OK;
1096} 1342}