diff options
author | David S. Miller <davem@davemloft.net> | 2014-07-30 21:47:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-30 21:47:51 -0400 |
commit | 6b1bed793d69ca162eac7a2a2f4f629fc4d0d325 (patch) | |
tree | e6bcfb2af38d8f8a7eabbbaaef6e5f004bd67dd6 | |
parent | 80019d310f9fb4f8c9eeda0a5d76144ad3132fdf (diff) | |
parent | fca2d99428473884e67ef8ea1586e58151ed6ac3 (diff) |
Merge branch 'amd-xgbe-next'
Tom Lendacky says:
====================
amd-xgbe: AMD XGBE driver update 2014-07-25
This patch series is dependent on the following patch that was
applied to the net tree and needs to be applied to the net-next
tree:
332cfc823d18 - amd-xgbe: Fix error return code in xgbe_probe()
The following series of patches includes fixes and new support in the
driver.
- Device bindings documentation update
- Hardware timestamp support
- 2.5GbE support changes
- Fifo sizes based on active queues/rings
- Phylib driver updates for:
- Rate change completion check
- KR training initiation
- Auto-negotiation results
- Traffic class support, including DCB support
This patch series is based on net-next.
Changes in V2:
- Remove DBGPR(...., __func__) calls
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | Documentation/devicetree/bindings/net/amd-xgbe-phy.txt | 6 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/net/amd-xgbe.txt | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/Kconfig | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/Makefile | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-common.h | 93 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dcb.c | 270 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 363 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 401 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 52 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-main.c | 36 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 285 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe.h | 77 | ||||
-rw-r--r-- | drivers/net/phy/amd-xgbe-phy.c | 134 |
14 files changed, 1631 insertions, 127 deletions
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt index d01ed63d3ebb..42409bfe04c4 100644 --- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt +++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt | |||
@@ -8,10 +8,16 @@ Required properties: | |||
8 | - SerDes integration registers (1/2) | 8 | - SerDes integration registers (1/2) |
9 | - SerDes integration registers (2/2) | 9 | - SerDes integration registers (2/2) |
10 | 10 | ||
11 | Optional properties: | ||
12 | - amd,speed-set: Speed capabilities of the device | ||
13 | 0 - 1GbE and 10GbE (default) | ||
14 | 1 - 2.5GbE and 10GbE | ||
15 | |||
11 | Example: | 16 | Example: |
12 | xgbe_phy@e1240800 { | 17 | xgbe_phy@e1240800 { |
13 | compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45"; | 18 | compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45"; |
14 | reg = <0 0xe1240800 0 0x00400>, | 19 | reg = <0 0xe1240800 0 0x00400>, |
15 | <0 0xe1250000 0 0x00060>, | 20 | <0 0xe1250000 0 0x00060>, |
16 | <0 0xe1250080 0 0x00004>; | 21 | <0 0xe1250080 0 0x00004>; |
22 | amd,speed-set = <0>; | ||
17 | }; | 23 | }; |
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt index ea0c7908a3b8..41354f730beb 100644 --- a/Documentation/devicetree/bindings/net/amd-xgbe.txt +++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt | |||
@@ -8,16 +8,21 @@ Required properties: | |||
8 | - interrupt-parent: Should be the phandle for the interrupt controller | 8 | - interrupt-parent: Should be the phandle for the interrupt controller |
9 | that services interrupts for this device | 9 | that services interrupts for this device |
10 | - interrupts: Should contain the amd-xgbe interrupt | 10 | - interrupts: Should contain the amd-xgbe interrupt |
11 | - clocks: Should be the DMA clock for the amd-xgbe device (used for | 11 | - clocks: |
12 | calculating the correct Rx interrupt watchdog timer value on a DMA | 12 | - DMA clock for the amd-xgbe device (used for calculating the |
13 | channel for coalescing) | 13 | correct Rx interrupt watchdog timer value on a DMA channel |
14 | - clock-names: Should be the name of the DMA clock, "dma_clk" | 14 | for coalescing) |
15 | - PTP clock for the amd-xgbe device | ||
16 | - clock-names: Should be the names of the clocks | ||
17 | - "dma_clk" for the DMA clock | ||
18 | - "ptp_clk" for the PTP clock | ||
15 | - phy-handle: See ethernet.txt file in the same directory | 19 | - phy-handle: See ethernet.txt file in the same directory |
16 | - phy-mode: See ethernet.txt file in the same directory | 20 | - phy-mode: See ethernet.txt file in the same directory |
17 | 21 | ||
18 | Optional properties: | 22 | Optional properties: |
19 | - mac-address: mac address to be assigned to the device. Can be overridden | 23 | - mac-address: mac address to be assigned to the device. Can be overridden |
20 | by UEFI. | 24 | by UEFI. |
25 | - dma-coherent: Present if dma operations are coherent | ||
21 | 26 | ||
22 | Example: | 27 | Example: |
23 | xgbe@e0700000 { | 28 | xgbe@e0700000 { |
@@ -26,8 +31,8 @@ Example: | |||
26 | <0 0xe0780000 0 0x80000>; | 31 | <0 0xe0780000 0 0x80000>; |
27 | interrupt-parent = <&gic>; | 32 | interrupt-parent = <&gic>; |
28 | interrupts = <0 325 4>; | 33 | interrupts = <0 325 4>; |
29 | clocks = <&xgbe_clk>; | 34 | clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>; |
30 | clock-names = "dma_clk"; | 35 | clock-names = "dma_clk", "ptp_clk"; |
31 | phy-handle = <&phy>; | 36 | phy-handle = <&phy>; |
32 | phy-mode = "xgmii"; | 37 | phy-mode = "xgmii"; |
33 | mac-address = [ 02 a1 a2 a3 a4 a5 ]; | 38 | mac-address = [ 02 a1 a2 a3 a4 a5 ]; |
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 6e314dbba805..8319c99331b0 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig | |||
@@ -184,6 +184,7 @@ config AMD_XGBE | |||
184 | select AMD_XGBE_PHY | 184 | select AMD_XGBE_PHY |
185 | select BITREVERSE | 185 | select BITREVERSE |
186 | select CRC32 | 186 | select CRC32 |
187 | select PTP_1588_CLOCK | ||
187 | ---help--- | 188 | ---help--- |
188 | This driver supports the AMD 10GbE Ethernet device found on an | 189 | This driver supports the AMD 10GbE Ethernet device found on an |
189 | AMD SoC. | 190 | AMD SoC. |
@@ -191,4 +192,14 @@ config AMD_XGBE | |||
191 | To compile this driver as a module, choose M here: the module | 192 | To compile this driver as a module, choose M here: the module |
192 | will be called amd-xgbe. | 193 | will be called amd-xgbe. |
193 | 194 | ||
195 | config AMD_XGBE_DCB | ||
196 | bool "Data Center Bridging (DCB) support" | ||
197 | default n | ||
198 | depends on AMD_XGBE && DCB | ||
199 | ---help--- | ||
200 | Say Y here to enable Data Center Bridging (DCB) support in the | ||
201 | driver. | ||
202 | |||
203 | If unsure, say N. | ||
204 | |||
194 | endif # NET_VENDOR_AMD | 205 | endif # NET_VENDOR_AMD |
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile index 26cf9af1642f..171a7e68048d 100644 --- a/drivers/net/ethernet/amd/xgbe/Makefile +++ b/drivers/net/ethernet/amd/xgbe/Makefile | |||
@@ -1,6 +1,8 @@ | |||
1 | obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o | 1 | obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o |
2 | 2 | ||
3 | amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ | 3 | amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ |
4 | xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o | 4 | xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \ |
5 | xgbe-ptp.o | ||
5 | 6 | ||
7 | amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o | ||
6 | amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o | 8 | amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 7ec80ac7043f..cc25a3a9e7cf 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
@@ -307,11 +307,24 @@ | |||
307 | #define MAC_MACA0LR 0x0304 | 307 | #define MAC_MACA0LR 0x0304 |
308 | #define MAC_MACA1HR 0x0308 | 308 | #define MAC_MACA1HR 0x0308 |
309 | #define MAC_MACA1LR 0x030c | 309 | #define MAC_MACA1LR 0x030c |
310 | #define MAC_TSCR 0x0d00 | ||
311 | #define MAC_SSIR 0x0d04 | ||
312 | #define MAC_STSR 0x0d08 | ||
313 | #define MAC_STNR 0x0d0c | ||
314 | #define MAC_STSUR 0x0d10 | ||
315 | #define MAC_STNUR 0x0d14 | ||
316 | #define MAC_TSAR 0x0d18 | ||
317 | #define MAC_TSSR 0x0d20 | ||
318 | #define MAC_TXSNR 0x0d30 | ||
319 | #define MAC_TXSSR 0x0d34 | ||
310 | 320 | ||
311 | #define MAC_QTFCR_INC 4 | 321 | #define MAC_QTFCR_INC 4 |
312 | #define MAC_MACA_INC 4 | 322 | #define MAC_MACA_INC 4 |
313 | #define MAC_HTR_INC 4 | 323 | #define MAC_HTR_INC 4 |
314 | 324 | ||
325 | #define MAC_RQC2_INC 4 | ||
326 | #define MAC_RQC2_Q_PER_REG 4 | ||
327 | |||
315 | /* MAC register entry bit positions and sizes */ | 328 | /* MAC register entry bit positions and sizes */ |
316 | #define MAC_HWF0R_ADDMACADRSEL_INDEX 18 | 329 | #define MAC_HWF0R_ADDMACADRSEL_INDEX 18 |
317 | #define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 | 330 | #define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 |
@@ -351,6 +364,8 @@ | |||
351 | #define MAC_HWF1R_HASHTBLSZ_WIDTH 3 | 364 | #define MAC_HWF1R_HASHTBLSZ_WIDTH 3 |
352 | #define MAC_HWF1R_L3L4FNUM_INDEX 27 | 365 | #define MAC_HWF1R_L3L4FNUM_INDEX 27 |
353 | #define MAC_HWF1R_L3L4FNUM_WIDTH 4 | 366 | #define MAC_HWF1R_L3L4FNUM_WIDTH 4 |
367 | #define MAC_HWF1R_NUMTC_INDEX 21 | ||
368 | #define MAC_HWF1R_NUMTC_WIDTH 3 | ||
354 | #define MAC_HWF1R_RSSEN_INDEX 20 | 369 | #define MAC_HWF1R_RSSEN_INDEX 20 |
355 | #define MAC_HWF1R_RSSEN_WIDTH 1 | 370 | #define MAC_HWF1R_RSSEN_WIDTH 1 |
356 | #define MAC_HWF1R_RXFIFOSIZE_INDEX 0 | 371 | #define MAC_HWF1R_RXFIFOSIZE_INDEX 0 |
@@ -373,12 +388,16 @@ | |||
373 | #define MAC_HWF2R_TXCHCNT_WIDTH 4 | 388 | #define MAC_HWF2R_TXCHCNT_WIDTH 4 |
374 | #define MAC_HWF2R_TXQCNT_INDEX 6 | 389 | #define MAC_HWF2R_TXQCNT_INDEX 6 |
375 | #define MAC_HWF2R_TXQCNT_WIDTH 4 | 390 | #define MAC_HWF2R_TXQCNT_WIDTH 4 |
391 | #define MAC_IER_TSIE_INDEX 12 | ||
392 | #define MAC_IER_TSIE_WIDTH 1 | ||
376 | #define MAC_ISR_MMCRXIS_INDEX 9 | 393 | #define MAC_ISR_MMCRXIS_INDEX 9 |
377 | #define MAC_ISR_MMCRXIS_WIDTH 1 | 394 | #define MAC_ISR_MMCRXIS_WIDTH 1 |
378 | #define MAC_ISR_MMCTXIS_INDEX 10 | 395 | #define MAC_ISR_MMCTXIS_INDEX 10 |
379 | #define MAC_ISR_MMCTXIS_WIDTH 1 | 396 | #define MAC_ISR_MMCTXIS_WIDTH 1 |
380 | #define MAC_ISR_PMTIS_INDEX 4 | 397 | #define MAC_ISR_PMTIS_INDEX 4 |
381 | #define MAC_ISR_PMTIS_WIDTH 1 | 398 | #define MAC_ISR_PMTIS_WIDTH 1 |
399 | #define MAC_ISR_TSIS_INDEX 12 | ||
400 | #define MAC_ISR_TSIS_WIDTH 1 | ||
382 | #define MAC_MACA1HR_AE_INDEX 31 | 401 | #define MAC_MACA1HR_AE_INDEX 31 |
383 | #define MAC_MACA1HR_AE_WIDTH 1 | 402 | #define MAC_MACA1HR_AE_WIDTH 1 |
384 | #define MAC_PFR_HMC_INDEX 2 | 403 | #define MAC_PFR_HMC_INDEX 2 |
@@ -419,14 +438,56 @@ | |||
419 | #define MAC_RCR_LM_WIDTH 1 | 438 | #define MAC_RCR_LM_WIDTH 1 |
420 | #define MAC_RCR_RE_INDEX 0 | 439 | #define MAC_RCR_RE_INDEX 0 |
421 | #define MAC_RCR_RE_WIDTH 1 | 440 | #define MAC_RCR_RE_WIDTH 1 |
441 | #define MAC_RFCR_PFCE_INDEX 8 | ||
442 | #define MAC_RFCR_PFCE_WIDTH 1 | ||
422 | #define MAC_RFCR_RFE_INDEX 0 | 443 | #define MAC_RFCR_RFE_INDEX 0 |
423 | #define MAC_RFCR_RFE_WIDTH 1 | 444 | #define MAC_RFCR_RFE_WIDTH 1 |
445 | #define MAC_RFCR_UP_INDEX 1 | ||
446 | #define MAC_RFCR_UP_WIDTH 1 | ||
424 | #define MAC_RQC0R_RXQ0EN_INDEX 0 | 447 | #define MAC_RQC0R_RXQ0EN_INDEX 0 |
425 | #define MAC_RQC0R_RXQ0EN_WIDTH 2 | 448 | #define MAC_RQC0R_RXQ0EN_WIDTH 2 |
449 | #define MAC_SSIR_SNSINC_INDEX 8 | ||
450 | #define MAC_SSIR_SNSINC_WIDTH 8 | ||
451 | #define MAC_SSIR_SSINC_INDEX 16 | ||
452 | #define MAC_SSIR_SSINC_WIDTH 8 | ||
426 | #define MAC_TCR_SS_INDEX 29 | 453 | #define MAC_TCR_SS_INDEX 29 |
427 | #define MAC_TCR_SS_WIDTH 2 | 454 | #define MAC_TCR_SS_WIDTH 2 |
428 | #define MAC_TCR_TE_INDEX 0 | 455 | #define MAC_TCR_TE_INDEX 0 |
429 | #define MAC_TCR_TE_WIDTH 1 | 456 | #define MAC_TCR_TE_WIDTH 1 |
457 | #define MAC_TSCR_AV8021ASMEN_INDEX 28 | ||
458 | #define MAC_TSCR_AV8021ASMEN_WIDTH 1 | ||
459 | #define MAC_TSCR_SNAPTYPSEL_INDEX 16 | ||
460 | #define MAC_TSCR_SNAPTYPSEL_WIDTH 2 | ||
461 | #define MAC_TSCR_TSADDREG_INDEX 5 | ||
462 | #define MAC_TSCR_TSADDREG_WIDTH 1 | ||
463 | #define MAC_TSCR_TSCFUPDT_INDEX 1 | ||
464 | #define MAC_TSCR_TSCFUPDT_WIDTH 1 | ||
465 | #define MAC_TSCR_TSCTRLSSR_INDEX 9 | ||
466 | #define MAC_TSCR_TSCTRLSSR_WIDTH 1 | ||
467 | #define MAC_TSCR_TSENA_INDEX 0 | ||
468 | #define MAC_TSCR_TSENA_WIDTH 1 | ||
469 | #define MAC_TSCR_TSENALL_INDEX 8 | ||
470 | #define MAC_TSCR_TSENALL_WIDTH 1 | ||
471 | #define MAC_TSCR_TSEVNTENA_INDEX 14 | ||
472 | #define MAC_TSCR_TSEVNTENA_WIDTH 1 | ||
473 | #define MAC_TSCR_TSINIT_INDEX 2 | ||
474 | #define MAC_TSCR_TSINIT_WIDTH 1 | ||
475 | #define MAC_TSCR_TSIPENA_INDEX 11 | ||
476 | #define MAC_TSCR_TSIPENA_WIDTH 1 | ||
477 | #define MAC_TSCR_TSIPV4ENA_INDEX 13 | ||
478 | #define MAC_TSCR_TSIPV4ENA_WIDTH 1 | ||
479 | #define MAC_TSCR_TSIPV6ENA_INDEX 12 | ||
480 | #define MAC_TSCR_TSIPV6ENA_WIDTH 1 | ||
481 | #define MAC_TSCR_TSMSTRENA_INDEX 15 | ||
482 | #define MAC_TSCR_TSMSTRENA_WIDTH 1 | ||
483 | #define MAC_TSCR_TSVER2ENA_INDEX 10 | ||
484 | #define MAC_TSCR_TSVER2ENA_WIDTH 1 | ||
485 | #define MAC_TSCR_TXTSSTSM_INDEX 24 | ||
486 | #define MAC_TSCR_TXTSSTSM_WIDTH 1 | ||
487 | #define MAC_TSSR_TXTSC_INDEX 15 | ||
488 | #define MAC_TSSR_TXTSC_WIDTH 1 | ||
489 | #define MAC_TXSNR_TXTSSTSMIS_INDEX 31 | ||
490 | #define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 | ||
430 | #define MAC_VLANHTR_VLHT_INDEX 0 | 491 | #define MAC_VLANHTR_VLHT_INDEX 0 |
431 | #define MAC_VLANHTR_VLHT_WIDTH 16 | 492 | #define MAC_VLANHTR_VLHT_WIDTH 16 |
432 | #define MAC_VLANIR_VLTI_INDEX 20 | 493 | #define MAC_VLANIR_VLTI_INDEX 20 |
@@ -652,6 +713,8 @@ | |||
652 | 713 | ||
653 | #define MTL_RQDCM_INC 4 | 714 | #define MTL_RQDCM_INC 4 |
654 | #define MTL_RQDCM_Q_PER_REG 4 | 715 | #define MTL_RQDCM_Q_PER_REG 4 |
716 | #define MTL_TCPM_INC 4 | ||
717 | #define MTL_TCPM_TC_PER_REG 4 | ||
655 | 718 | ||
656 | /* MTL register entry bit positions and sizes */ | 719 | /* MTL register entry bit positions and sizes */ |
657 | #define MTL_OMR_ETSALG_INDEX 5 | 720 | #define MTL_OMR_ETSALG_INDEX 5 |
@@ -670,9 +733,6 @@ | |||
670 | #define MTL_Q_TQOMR 0x00 | 733 | #define MTL_Q_TQOMR 0x00 |
671 | #define MTL_Q_TQUR 0x04 | 734 | #define MTL_Q_TQUR 0x04 |
672 | #define MTL_Q_TQDR 0x08 | 735 | #define MTL_Q_TQDR 0x08 |
673 | #define MTL_Q_TCECR 0x10 | ||
674 | #define MTL_Q_TCESR 0x14 | ||
675 | #define MTL_Q_TCQWR 0x18 | ||
676 | #define MTL_Q_RQOMR 0x40 | 736 | #define MTL_Q_RQOMR 0x40 |
677 | #define MTL_Q_RQMPOCR 0x44 | 737 | #define MTL_Q_RQMPOCR 0x44 |
678 | #define MTL_Q_RQDR 0x4c | 738 | #define MTL_Q_RQDR 0x4c |
@@ -680,8 +740,6 @@ | |||
680 | #define MTL_Q_ISR 0x74 | 740 | #define MTL_Q_ISR 0x74 |
681 | 741 | ||
682 | /* MTL queue register entry bit positions and sizes */ | 742 | /* MTL queue register entry bit positions and sizes */ |
683 | #define MTL_Q_TCQWR_QW_INDEX 0 | ||
684 | #define MTL_Q_TCQWR_QW_WIDTH 21 | ||
685 | #define MTL_Q_RQOMR_EHFC_INDEX 7 | 743 | #define MTL_Q_RQOMR_EHFC_INDEX 7 |
686 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 | 744 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 |
687 | #define MTL_Q_RQOMR_RFA_INDEX 8 | 745 | #define MTL_Q_RQOMR_RFA_INDEX 8 |
@@ -696,6 +754,8 @@ | |||
696 | #define MTL_Q_RQOMR_RTC_WIDTH 2 | 754 | #define MTL_Q_RQOMR_RTC_WIDTH 2 |
697 | #define MTL_Q_TQOMR_FTQ_INDEX 0 | 755 | #define MTL_Q_TQOMR_FTQ_INDEX 0 |
698 | #define MTL_Q_TQOMR_FTQ_WIDTH 1 | 756 | #define MTL_Q_TQOMR_FTQ_WIDTH 1 |
757 | #define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 | ||
758 | #define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 | ||
699 | #define MTL_Q_TQOMR_TQS_INDEX 16 | 759 | #define MTL_Q_TQOMR_TQS_INDEX 16 |
700 | #define MTL_Q_TQOMR_TQS_WIDTH 10 | 760 | #define MTL_Q_TQOMR_TQS_WIDTH 10 |
701 | #define MTL_Q_TQOMR_TSF_INDEX 1 | 761 | #define MTL_Q_TQOMR_TSF_INDEX 1 |
@@ -742,10 +802,14 @@ | |||
742 | #define MTL_TC_INC MTL_Q_INC | 802 | #define MTL_TC_INC MTL_Q_INC |
743 | 803 | ||
744 | #define MTL_TC_ETSCR 0x10 | 804 | #define MTL_TC_ETSCR 0x10 |
805 | #define MTL_TC_ETSSR 0x14 | ||
806 | #define MTL_TC_QWR 0x18 | ||
745 | 807 | ||
746 | /* MTL traffic class register entry bit positions and sizes */ | 808 | /* MTL traffic class register entry bit positions and sizes */ |
747 | #define MTL_TC_ETSCR_TSA_INDEX 0 | 809 | #define MTL_TC_ETSCR_TSA_INDEX 0 |
748 | #define MTL_TC_ETSCR_TSA_WIDTH 2 | 810 | #define MTL_TC_ETSCR_TSA_WIDTH 2 |
811 | #define MTL_TC_QWR_QW_INDEX 0 | ||
812 | #define MTL_TC_QWR_QW_WIDTH 21 | ||
749 | 813 | ||
750 | /* MTL traffic class register value */ | 814 | /* MTL traffic class register value */ |
751 | #define MTL_TSA_SP 0x00 | 815 | #define MTL_TSA_SP 0x00 |
@@ -778,9 +842,19 @@ | |||
778 | #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 | 842 | #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 |
779 | #define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 | 843 | #define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 |
780 | #define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 | 844 | #define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 |
845 | #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 | ||
846 | #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 | ||
847 | #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 | ||
848 | #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 | ||
849 | #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 | ||
850 | #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 | ||
781 | 851 | ||
782 | #define RX_NORMAL_DESC0_OVT_INDEX 0 | 852 | #define RX_NORMAL_DESC0_OVT_INDEX 0 |
783 | #define RX_NORMAL_DESC0_OVT_WIDTH 16 | 853 | #define RX_NORMAL_DESC0_OVT_WIDTH 16 |
854 | #define RX_NORMAL_DESC3_CDA_INDEX 27 | ||
855 | #define RX_NORMAL_DESC3_CDA_WIDTH 1 | ||
856 | #define RX_NORMAL_DESC3_CTXT_INDEX 30 | ||
857 | #define RX_NORMAL_DESC3_CTXT_WIDTH 1 | ||
784 | #define RX_NORMAL_DESC3_ES_INDEX 15 | 858 | #define RX_NORMAL_DESC3_ES_INDEX 15 |
785 | #define RX_NORMAL_DESC3_ES_WIDTH 1 | 859 | #define RX_NORMAL_DESC3_ES_WIDTH 1 |
786 | #define RX_NORMAL_DESC3_ETLT_INDEX 16 | 860 | #define RX_NORMAL_DESC3_ETLT_INDEX 16 |
@@ -794,12 +868,19 @@ | |||
794 | #define RX_NORMAL_DESC3_PL_INDEX 0 | 868 | #define RX_NORMAL_DESC3_PL_INDEX 0 |
795 | #define RX_NORMAL_DESC3_PL_WIDTH 14 | 869 | #define RX_NORMAL_DESC3_PL_WIDTH 14 |
796 | 870 | ||
871 | #define RX_CONTEXT_DESC3_TSA_INDEX 4 | ||
872 | #define RX_CONTEXT_DESC3_TSA_WIDTH 1 | ||
873 | #define RX_CONTEXT_DESC3_TSD_INDEX 6 | ||
874 | #define RX_CONTEXT_DESC3_TSD_WIDTH 1 | ||
875 | |||
797 | #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 | 876 | #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 |
798 | #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 | 877 | #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 |
799 | #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 | 878 | #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 |
800 | #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 | 879 | #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 |
801 | #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 | 880 | #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 |
802 | #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 | 881 | #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 |
882 | #define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 | ||
883 | #define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 | ||
803 | 884 | ||
804 | #define TX_CONTEXT_DESC2_MSS_INDEX 0 | 885 | #define TX_CONTEXT_DESC2_MSS_INDEX 0 |
805 | #define TX_CONTEXT_DESC2_MSS_WIDTH 15 | 886 | #define TX_CONTEXT_DESC2_MSS_WIDTH 15 |
@@ -816,6 +897,8 @@ | |||
816 | #define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 | 897 | #define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 |
817 | #define TX_NORMAL_DESC2_IC_INDEX 31 | 898 | #define TX_NORMAL_DESC2_IC_INDEX 31 |
818 | #define TX_NORMAL_DESC2_IC_WIDTH 1 | 899 | #define TX_NORMAL_DESC2_IC_WIDTH 1 |
900 | #define TX_NORMAL_DESC2_TTSE_INDEX 30 | ||
901 | #define TX_NORMAL_DESC2_TTSE_WIDTH 1 | ||
819 | #define TX_NORMAL_DESC2_VTIR_INDEX 14 | 902 | #define TX_NORMAL_DESC2_VTIR_INDEX 14 |
820 | #define TX_NORMAL_DESC2_VTIR_WIDTH 2 | 903 | #define TX_NORMAL_DESC2_VTIR_WIDTH 2 |
821 | #define TX_NORMAL_DESC3_CIC_INDEX 16 | 904 | #define TX_NORMAL_DESC3_CIC_INDEX 16 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c new file mode 100644 index 000000000000..7d6a49b24321 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c | |||
@@ -0,0 +1,270 @@ | |||
1 | /* | ||
2 | * AMD 10Gb Ethernet driver | ||
3 | * | ||
4 | * This file is available to you under your choice of the following two | ||
5 | * licenses: | ||
6 | * | ||
7 | * License 1: GPLv2 | ||
8 | * | ||
9 | * Copyright (c) 2014 Advanced Micro Devices, Inc. | ||
10 | * | ||
11 | * This file is free software; you may copy, redistribute and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation, either version 2 of the License, or (at | ||
14 | * your option) any later version. | ||
15 | * | ||
16 | * This file is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
23 | * | ||
24 | * This file incorporates work covered by the following copyright and | ||
25 | * permission notice: | ||
26 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation | ||
27 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, | ||
28 | * Inc. unless otherwise expressly agreed to in writing between Synopsys | ||
29 | * and you. | ||
30 | * | ||
31 | * The Software IS NOT an item of Licensed Software or Licensed Product | ||
32 | * under any End User Software License Agreement or Agreement for Licensed | ||
33 | * Product with Synopsys or any supplement thereto. Permission is hereby | ||
34 | * granted, free of charge, to any person obtaining a copy of this software | ||
35 | * annotated with this license and the Software, to deal in the Software | ||
36 | * without restriction, including without limitation the rights to use, | ||
37 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies | ||
38 | * of the Software, and to permit persons to whom the Software is furnished | ||
39 | * to do so, subject to the following conditions: | ||
40 | * | ||
41 | * The above copyright notice and this permission notice shall be included | ||
42 | * in all copies or substantial portions of the Software. | ||
43 | * | ||
44 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" | ||
45 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | ||
46 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A | ||
47 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS | ||
48 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
49 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
50 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
51 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
52 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
53 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | ||
54 | * THE POSSIBILITY OF SUCH DAMAGE. | ||
55 | * | ||
56 | * | ||
57 | * License 2: Modified BSD | ||
58 | * | ||
59 | * Copyright (c) 2014 Advanced Micro Devices, Inc. | ||
60 | * All rights reserved. | ||
61 | * | ||
62 | * Redistribution and use in source and binary forms, with or without | ||
63 | * modification, are permitted provided that the following conditions are met: | ||
64 | * * Redistributions of source code must retain the above copyright | ||
65 | * notice, this list of conditions and the following disclaimer. | ||
66 | * * Redistributions in binary form must reproduce the above copyright | ||
67 | * notice, this list of conditions and the following disclaimer in the | ||
68 | * documentation and/or other materials provided with the distribution. | ||
69 | * * Neither the name of Advanced Micro Devices, Inc. nor the | ||
70 | * names of its contributors may be used to endorse or promote products | ||
71 | * derived from this software without specific prior written permission. | ||
72 | * | ||
73 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
74 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
75 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
76 | * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY | ||
77 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
78 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
79 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
80 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
81 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
82 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
83 | * | ||
84 | * This file incorporates work covered by the following copyright and | ||
85 | * permission notice: | ||
86 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation | ||
87 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, | ||
88 | * Inc. unless otherwise expressly agreed to in writing between Synopsys | ||
89 | * and you. | ||
90 | * | ||
91 | * The Software IS NOT an item of Licensed Software or Licensed Product | ||
92 | * under any End User Software License Agreement or Agreement for Licensed | ||
93 | * Product with Synopsys or any supplement thereto. Permission is hereby | ||
94 | * granted, free of charge, to any person obtaining a copy of this software | ||
95 | * annotated with this license and the Software, to deal in the Software | ||
96 | * without restriction, including without limitation the rights to use, | ||
97 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies | ||
98 | * of the Software, and to permit persons to whom the Software is furnished | ||
99 | * to do so, subject to the following conditions: | ||
100 | * | ||
101 | * The above copyright notice and this permission notice shall be included | ||
102 | * in all copies or substantial portions of the Software. | ||
103 | * | ||
104 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" | ||
105 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | ||
106 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A | ||
107 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS | ||
108 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
109 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
110 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
111 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
112 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
113 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | ||
114 | * THE POSSIBILITY OF SUCH DAMAGE. | ||
115 | */ | ||
116 | |||
117 | #include <linux/netdevice.h> | ||
118 | #include <net/dcbnl.h> | ||
119 | |||
120 | #include "xgbe.h" | ||
121 | #include "xgbe-common.h" | ||
122 | |||
123 | |||
124 | static int xgbe_dcb_ieee_getets(struct net_device *netdev, | ||
125 | struct ieee_ets *ets) | ||
126 | { | ||
127 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | ||
128 | |||
129 | /* Set number of supported traffic classes */ | ||
130 | ets->ets_cap = pdata->hw_feat.tc_cnt; | ||
131 | |||
132 | if (pdata->ets) { | ||
133 | ets->cbs = pdata->ets->cbs; | ||
134 | memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw, | ||
135 | sizeof(ets->tc_tx_bw)); | ||
136 | memcpy(ets->tc_tsa, pdata->ets->tc_tsa, | ||
137 | sizeof(ets->tc_tsa)); | ||
138 | memcpy(ets->prio_tc, pdata->ets->prio_tc, | ||
139 | sizeof(ets->prio_tc)); | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int xgbe_dcb_ieee_setets(struct net_device *netdev, | ||
146 | struct ieee_ets *ets) | ||
147 | { | ||
148 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | ||
149 | unsigned int i, tc_ets, tc_ets_weight; | ||
150 | |||
151 | tc_ets = 0; | ||
152 | tc_ets_weight = 0; | ||
153 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { | ||
154 | DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i, | ||
155 | ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]); | ||
156 | DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]); | ||
157 | |||
158 | if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && | ||
159 | (i >= pdata->hw_feat.tc_cnt)) | ||
160 | return -EINVAL; | ||
161 | |||
162 | if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt) | ||
163 | return -EINVAL; | ||
164 | |||
165 | switch (ets->tc_tsa[i]) { | ||
166 | case IEEE_8021QAZ_TSA_STRICT: | ||
167 | break; | ||
168 | case IEEE_8021QAZ_TSA_ETS: | ||
169 | tc_ets = 1; | ||
170 | tc_ets_weight += ets->tc_tx_bw[i]; | ||
171 | break; | ||
172 | |||
173 | default: | ||
174 | return -EINVAL; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | /* Weights must add up to 100% */ | ||
179 | if (tc_ets && (tc_ets_weight != 100)) | ||
180 | return -EINVAL; | ||
181 | |||
182 | if (!pdata->ets) { | ||
183 | pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets), | ||
184 | GFP_KERNEL); | ||
185 | if (!pdata->ets) | ||
186 | return -ENOMEM; | ||
187 | } | ||
188 | |||
189 | memcpy(pdata->ets, ets, sizeof(*pdata->ets)); | ||
190 | |||
191 | pdata->hw_if.config_dcb_tc(pdata); | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static int xgbe_dcb_ieee_getpfc(struct net_device *netdev, | ||
197 | struct ieee_pfc *pfc) | ||
198 | { | ||
199 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | ||
200 | |||
201 | /* Set number of supported PFC traffic classes */ | ||
202 | pfc->pfc_cap = pdata->hw_feat.tc_cnt; | ||
203 | |||
204 | if (pdata->pfc) { | ||
205 | pfc->pfc_en = pdata->pfc->pfc_en; | ||
206 | pfc->mbc = pdata->pfc->mbc; | ||
207 | pfc->delay = pdata->pfc->delay; | ||
208 | } | ||
209 | |||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, | ||
214 | struct ieee_pfc *pfc) | ||
215 | { | ||
216 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | ||
217 | |||
218 | DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n", | ||
219 | pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); | ||
220 | |||
221 | if (!pdata->pfc) { | ||
222 | pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), | ||
223 | GFP_KERNEL); | ||
224 | if (!pdata->pfc) | ||
225 | return -ENOMEM; | ||
226 | } | ||
227 | |||
228 | memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc)); | ||
229 | |||
230 | pdata->hw_if.config_dcb_pfc(pdata); | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static u8 xgbe_dcb_getdcbx(struct net_device *netdev) | ||
236 | { | ||
237 | return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; | ||
238 | } | ||
239 | |||
240 | static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx) | ||
241 | { | ||
242 | u8 support = xgbe_dcb_getdcbx(netdev); | ||
243 | |||
244 | DBGPR(" DCBX=%#hhx\n", dcbx); | ||
245 | |||
246 | if (dcbx & ~support) | ||
247 | return 1; | ||
248 | |||
249 | if ((dcbx & support) != support) | ||
250 | return 1; | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = { | ||
256 | /* IEEE 802.1Qaz std */ | ||
257 | .ieee_getets = xgbe_dcb_ieee_getets, | ||
258 | .ieee_setets = xgbe_dcb_ieee_setets, | ||
259 | .ieee_getpfc = xgbe_dcb_ieee_getpfc, | ||
260 | .ieee_setpfc = xgbe_dcb_ieee_setpfc, | ||
261 | |||
262 | /* DCBX configuration */ | ||
263 | .getdcbx = xgbe_dcb_getdcbx, | ||
264 | .setdcbx = xgbe_dcb_setdcbx, | ||
265 | }; | ||
266 | |||
267 | const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void) | ||
268 | { | ||
269 | return &xgbe_dcbnl_ops; | ||
270 | } | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index a9ce56d5e988..1c5d62e8dab6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c | |||
@@ -359,6 +359,15 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata, | |||
359 | rdata->len = 0; | 359 | rdata->len = 0; |
360 | rdata->interrupt = 0; | 360 | rdata->interrupt = 0; |
361 | rdata->mapped_as_page = 0; | 361 | rdata->mapped_as_page = 0; |
362 | |||
363 | if (rdata->state_saved) { | ||
364 | rdata->state_saved = 0; | ||
365 | rdata->state.incomplete = 0; | ||
366 | rdata->state.context_next = 0; | ||
367 | rdata->state.skb = NULL; | ||
368 | rdata->state.len = 0; | ||
369 | rdata->state.error = 0; | ||
370 | } | ||
362 | } | 371 | } |
363 | 372 | ||
364 | static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) | 373 | static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 699cff5d3184..edaca4496264 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -131,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, | |||
131 | 131 | ||
132 | DBGPR("-->xgbe_usec_to_riwt\n"); | 132 | DBGPR("-->xgbe_usec_to_riwt\n"); |
133 | 133 | ||
134 | rate = clk_get_rate(pdata->sysclock); | 134 | rate = clk_get_rate(pdata->sysclk); |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * Convert the input usec value to the watchdog timer value. Each | 137 | * Convert the input usec value to the watchdog timer value. Each |
@@ -154,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, | |||
154 | 154 | ||
155 | DBGPR("-->xgbe_riwt_to_usec\n"); | 155 | DBGPR("-->xgbe_riwt_to_usec\n"); |
156 | 156 | ||
157 | rate = clk_get_rate(pdata->sysclock); | 157 | rate = clk_get_rate(pdata->sysclk); |
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Convert the input watchdog timer value to the usec value. Each | 160 | * Convert the input watchdog timer value to the usec value. Each |
@@ -247,7 +247,7 @@ static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) | |||
247 | { | 247 | { |
248 | unsigned int i; | 248 | unsigned int i; |
249 | 249 | ||
250 | for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) | 250 | for (i = 0; i < pdata->rx_q_count; i++) |
251 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); | 251 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); |
252 | 252 | ||
253 | return 0; | 253 | return 0; |
@@ -257,7 +257,7 @@ static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) | |||
257 | { | 257 | { |
258 | unsigned int i; | 258 | unsigned int i; |
259 | 259 | ||
260 | for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) | 260 | for (i = 0; i < pdata->tx_q_count; i++) |
261 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); | 261 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); |
262 | 262 | ||
263 | return 0; | 263 | return 0; |
@@ -268,7 +268,7 @@ static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, | |||
268 | { | 268 | { |
269 | unsigned int i; | 269 | unsigned int i; |
270 | 270 | ||
271 | for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) | 271 | for (i = 0; i < pdata->rx_q_count; i++) |
272 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); | 272 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); |
273 | 273 | ||
274 | return 0; | 274 | return 0; |
@@ -279,7 +279,7 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, | |||
279 | { | 279 | { |
280 | unsigned int i; | 280 | unsigned int i; |
281 | 281 | ||
282 | for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) | 282 | for (i = 0; i < pdata->tx_q_count; i++) |
283 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); | 283 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); |
284 | 284 | ||
285 | return 0; | 285 | return 0; |
@@ -343,12 +343,12 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) | |||
343 | unsigned int i; | 343 | unsigned int i; |
344 | 344 | ||
345 | /* Clear MTL flow control */ | 345 | /* Clear MTL flow control */ |
346 | for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) | 346 | for (i = 0; i < pdata->rx_q_count; i++) |
347 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); | 347 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); |
348 | 348 | ||
349 | /* Clear MAC flow control */ | 349 | /* Clear MAC flow control */ |
350 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; | 350 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; |
351 | q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); | 351 | q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); |
352 | reg = MAC_Q0TFCR; | 352 | reg = MAC_Q0TFCR; |
353 | for (i = 0; i < q_count; i++) { | 353 | for (i = 0; i < q_count; i++) { |
354 | reg_val = XGMAC_IOREAD(pdata, reg); | 354 | reg_val = XGMAC_IOREAD(pdata, reg); |
@@ -368,12 +368,12 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) | |||
368 | unsigned int i; | 368 | unsigned int i; |
369 | 369 | ||
370 | /* Set MTL flow control */ | 370 | /* Set MTL flow control */ |
371 | for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) | 371 | for (i = 0; i < pdata->rx_q_count; i++) |
372 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); | 372 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); |
373 | 373 | ||
374 | /* Set MAC flow control */ | 374 | /* Set MAC flow control */ |
375 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; | 375 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; |
376 | q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); | 376 | q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); |
377 | reg = MAC_Q0TFCR; | 377 | reg = MAC_Q0TFCR; |
378 | for (i = 0; i < q_count; i++) { | 378 | for (i = 0; i < q_count; i++) { |
379 | reg_val = XGMAC_IOREAD(pdata, reg); | 379 | reg_val = XGMAC_IOREAD(pdata, reg); |
@@ -407,7 +407,9 @@ static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) | |||
407 | 407 | ||
408 | static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) | 408 | static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) |
409 | { | 409 | { |
410 | if (pdata->tx_pause) | 410 | struct ieee_pfc *pfc = pdata->pfc; |
411 | |||
412 | if (pdata->tx_pause || (pfc && pfc->pfc_en)) | ||
411 | xgbe_enable_tx_flow_control(pdata); | 413 | xgbe_enable_tx_flow_control(pdata); |
412 | else | 414 | else |
413 | xgbe_disable_tx_flow_control(pdata); | 415 | xgbe_disable_tx_flow_control(pdata); |
@@ -417,7 +419,9 @@ static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) | |||
417 | 419 | ||
418 | static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) | 420 | static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) |
419 | { | 421 | { |
420 | if (pdata->rx_pause) | 422 | struct ieee_pfc *pfc = pdata->pfc; |
423 | |||
424 | if (pdata->rx_pause || (pfc && pfc->pfc_en)) | ||
421 | xgbe_enable_rx_flow_control(pdata); | 425 | xgbe_enable_rx_flow_control(pdata); |
422 | else | 426 | else |
423 | xgbe_disable_rx_flow_control(pdata); | 427 | xgbe_disable_rx_flow_control(pdata); |
@@ -427,8 +431,13 @@ static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) | |||
427 | 431 | ||
428 | static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) | 432 | static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) |
429 | { | 433 | { |
434 | struct ieee_pfc *pfc = pdata->pfc; | ||
435 | |||
430 | xgbe_config_tx_flow_control(pdata); | 436 | xgbe_config_tx_flow_control(pdata); |
431 | xgbe_config_rx_flow_control(pdata); | 437 | xgbe_config_rx_flow_control(pdata); |
438 | |||
439 | XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, | ||
440 | (pfc && pfc->pfc_en) ? 1 : 0); | ||
432 | } | 441 | } |
433 | 442 | ||
434 | static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) | 443 | static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) |
@@ -492,8 +501,12 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) | |||
492 | 501 | ||
493 | static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) | 502 | static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) |
494 | { | 503 | { |
495 | /* No MAC interrupts to be enabled */ | 504 | unsigned int mac_ier = 0; |
496 | XGMAC_IOWRITE(pdata, MAC_IER, 0); | 505 | |
506 | /* Enable Timestamp interrupt */ | ||
507 | XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); | ||
508 | |||
509 | XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); | ||
497 | 510 | ||
498 | /* Enable all counter interrupts */ | 511 | /* Enable all counter interrupts */ |
499 | XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); | 512 | XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); |
@@ -1012,6 +1025,180 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel) | |||
1012 | DBGPR("<--rx_desc_init\n"); | 1025 | DBGPR("<--rx_desc_init\n"); |
1013 | } | 1026 | } |
1014 | 1027 | ||
1028 | static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, | ||
1029 | unsigned int addend) | ||
1030 | { | ||
1031 | /* Set the addend register value and tell the device */ | ||
1032 | XGMAC_IOWRITE(pdata, MAC_TSAR, addend); | ||
1033 | XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); | ||
1034 | |||
1035 | /* Wait for addend update to complete */ | ||
1036 | while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) | ||
1037 | udelay(5); | ||
1038 | } | ||
1039 | |||
1040 | static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, | ||
1041 | unsigned int nsec) | ||
1042 | { | ||
1043 | /* Set the time values and tell the device */ | ||
1044 | XGMAC_IOWRITE(pdata, MAC_STSUR, sec); | ||
1045 | XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); | ||
1046 | XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); | ||
1047 | |||
1048 | /* Wait for time update to complete */ | ||
1049 | while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) | ||
1050 | udelay(5); | ||
1051 | } | ||
1052 | |||
1053 | static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) | ||
1054 | { | ||
1055 | u64 nsec; | ||
1056 | |||
1057 | nsec = XGMAC_IOREAD(pdata, MAC_STSR); | ||
1058 | nsec *= NSEC_PER_SEC; | ||
1059 | nsec += XGMAC_IOREAD(pdata, MAC_STNR); | ||
1060 | |||
1061 | return nsec; | ||
1062 | } | ||
1063 | |||
1064 | static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) | ||
1065 | { | ||
1066 | unsigned int tx_snr; | ||
1067 | u64 nsec; | ||
1068 | |||
1069 | tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); | ||
1070 | if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) | ||
1071 | return 0; | ||
1072 | |||
1073 | nsec = XGMAC_IOREAD(pdata, MAC_TXSSR); | ||
1074 | nsec *= NSEC_PER_SEC; | ||
1075 | nsec += tx_snr; | ||
1076 | |||
1077 | return nsec; | ||
1078 | } | ||
1079 | |||
1080 | static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, | ||
1081 | struct xgbe_ring_desc *rdesc) | ||
1082 | { | ||
1083 | u64 nsec; | ||
1084 | |||
1085 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && | ||
1086 | !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { | ||
1087 | nsec = le32_to_cpu(rdesc->desc1); | ||
1088 | nsec <<= 32; | ||
1089 | nsec |= le32_to_cpu(rdesc->desc0); | ||
1090 | if (nsec != 0xffffffffffffffffULL) { | ||
1091 | packet->rx_tstamp = nsec; | ||
1092 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | ||
1093 | RX_TSTAMP, 1); | ||
1094 | } | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, | ||
1099 | unsigned int mac_tscr) | ||
1100 | { | ||
1101 | /* Set one nano-second accuracy */ | ||
1102 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); | ||
1103 | |||
1104 | /* Set fine timestamp update */ | ||
1105 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); | ||
1106 | |||
1107 | /* Overwrite earlier timestamps */ | ||
1108 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); | ||
1109 | |||
1110 | XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); | ||
1111 | |||
1112 | /* Exit if timestamping is not enabled */ | ||
1113 | if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) | ||
1114 | return 0; | ||
1115 | |||
1116 | /* Initialize time registers */ | ||
1117 | XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); | ||
1118 | XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); | ||
1119 | xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); | ||
1120 | xgbe_set_tstamp_time(pdata, 0, 0); | ||
1121 | |||
1122 | /* Initialize the timecounter */ | ||
1123 | timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, | ||
1124 | ktime_to_ns(ktime_get_real())); | ||
1125 | |||
1126 | return 0; | ||
1127 | } | ||
1128 | |||
1129 | static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) | ||
1130 | { | ||
1131 | struct ieee_ets *ets = pdata->ets; | ||
1132 | unsigned int total_weight, min_weight, weight; | ||
1133 | unsigned int i; | ||
1134 | |||
1135 | if (!ets) | ||
1136 | return; | ||
1137 | |||
1138 | /* Set Tx to deficit weighted round robin scheduling algorithm (when | ||
1139 | * traffic class is using ETS algorithm) | ||
1140 | */ | ||
1141 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); | ||
1142 | |||
1143 | /* Set Traffic Class algorithms */ | ||
1144 | total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; | ||
1145 | min_weight = total_weight / 100; | ||
1146 | if (!min_weight) | ||
1147 | min_weight = 1; | ||
1148 | |||
1149 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { | ||
1150 | switch (ets->tc_tsa[i]) { | ||
1151 | case IEEE_8021QAZ_TSA_STRICT: | ||
1152 | DBGPR(" TC%u using SP\n", i); | ||
1153 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, | ||
1154 | MTL_TSA_SP); | ||
1155 | break; | ||
1156 | case IEEE_8021QAZ_TSA_ETS: | ||
1157 | weight = total_weight * ets->tc_tx_bw[i] / 100; | ||
1158 | weight = clamp(weight, min_weight, total_weight); | ||
1159 | |||
1160 | DBGPR(" TC%u using DWRR (weight %u)\n", i, weight); | ||
1161 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, | ||
1162 | MTL_TSA_ETS); | ||
1163 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, | ||
1164 | weight); | ||
1165 | break; | ||
1166 | } | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) | ||
1171 | { | ||
1172 | struct ieee_pfc *pfc = pdata->pfc; | ||
1173 | struct ieee_ets *ets = pdata->ets; | ||
1174 | unsigned int mask, reg, reg_val; | ||
1175 | unsigned int tc, prio; | ||
1176 | |||
1177 | if (!pfc || !ets) | ||
1178 | return; | ||
1179 | |||
1180 | for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { | ||
1181 | mask = 0; | ||
1182 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { | ||
1183 | if ((pfc->pfc_en & (1 << prio)) && | ||
1184 | (ets->prio_tc[prio] == tc)) | ||
1185 | mask |= (1 << prio); | ||
1186 | } | ||
1187 | mask &= 0xff; | ||
1188 | |||
1189 | DBGPR(" TC%u PFC mask=%#x\n", tc, mask); | ||
1190 | reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); | ||
1191 | reg_val = XGMAC_IOREAD(pdata, reg); | ||
1192 | |||
1193 | reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); | ||
1194 | reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); | ||
1195 | |||
1196 | XGMAC_IOWRITE(pdata, reg, reg_val); | ||
1197 | } | ||
1198 | |||
1199 | xgbe_config_flow_control(pdata); | ||
1200 | } | ||
1201 | |||
1015 | static void xgbe_pre_xmit(struct xgbe_channel *channel) | 1202 | static void xgbe_pre_xmit(struct xgbe_channel *channel) |
1016 | { | 1203 | { |
1017 | struct xgbe_prv_data *pdata = channel->pdata; | 1204 | struct xgbe_prv_data *pdata = channel->pdata; |
@@ -1110,6 +1297,10 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel) | |||
1110 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, | 1297 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, |
1111 | TX_NORMAL_DESC2_VLAN_INSERT); | 1298 | TX_NORMAL_DESC2_VLAN_INSERT); |
1112 | 1299 | ||
1300 | /* Timestamp enablement check */ | ||
1301 | if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) | ||
1302 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); | ||
1303 | |||
1113 | /* Set IC bit based on Tx coalescing settings */ | 1304 | /* Set IC bit based on Tx coalescing settings */ |
1114 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); | 1305 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); |
1115 | if (tx_coalesce && (!tx_frames || | 1306 | if (tx_coalesce && (!tx_frames || |
@@ -1245,6 +1436,25 @@ static int xgbe_dev_read(struct xgbe_channel *channel) | |||
1245 | xgbe_dump_rx_desc(ring, rdesc, ring->cur); | 1436 | xgbe_dump_rx_desc(ring, rdesc, ring->cur); |
1246 | #endif | 1437 | #endif |
1247 | 1438 | ||
1439 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { | ||
1440 | /* Timestamp Context Descriptor */ | ||
1441 | xgbe_get_rx_tstamp(packet, rdesc); | ||
1442 | |||
1443 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | ||
1444 | CONTEXT, 1); | ||
1445 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | ||
1446 | CONTEXT_NEXT, 0); | ||
1447 | return 0; | ||
1448 | } | ||
1449 | |||
1450 | /* Normal Descriptor, be sure Context Descriptor bit is off */ | ||
1451 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); | ||
1452 | |||
1453 | /* Indicate if a Context Descriptor is next */ | ||
1454 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) | ||
1455 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | ||
1456 | CONTEXT_NEXT, 1); | ||
1457 | |||
1248 | /* Get the packet length */ | 1458 | /* Get the packet length */ |
1249 | rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); | 1459 | rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); |
1250 | 1460 | ||
@@ -1423,11 +1633,11 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) | |||
1423 | { | 1633 | { |
1424 | unsigned int i, count; | 1634 | unsigned int i, count; |
1425 | 1635 | ||
1426 | for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) | 1636 | for (i = 0; i < pdata->tx_q_count; i++) |
1427 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); | 1637 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); |
1428 | 1638 | ||
1429 | /* Poll Until Poll Condition */ | 1639 | /* Poll Until Poll Condition */ |
1430 | for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) { | 1640 | for (i = 0; i < pdata->tx_q_count; i++) { |
1431 | count = 2000; | 1641 | count = 2000; |
1432 | while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, | 1642 | while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, |
1433 | MTL_Q_TQOMR, FTQ)) | 1643 | MTL_Q_TQOMR, FTQ)) |
@@ -1479,14 +1689,15 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) | |||
1479 | { | 1689 | { |
1480 | unsigned int i; | 1690 | unsigned int i; |
1481 | 1691 | ||
1482 | /* Set Tx to weighted round robin scheduling algorithm (when | 1692 | /* Set Tx to weighted round robin scheduling algorithm */ |
1483 | * traffic class is using ETS algorithm) | ||
1484 | */ | ||
1485 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); | 1693 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); |
1486 | 1694 | ||
1487 | /* Set Tx traffic classes to strict priority algorithm */ | 1695 | /* Set Tx traffic classes to use WRR algorithm with equal weights */ |
1488 | for (i = 0; i < XGBE_TC_CNT; i++) | 1696 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { |
1489 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP); | 1697 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, |
1698 | MTL_TSA_ETS); | ||
1699 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); | ||
1700 | } | ||
1490 | 1701 | ||
1491 | /* Set Rx to strict priority algorithm */ | 1702 | /* Set Rx to strict priority algorithm */ |
1492 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); | 1703 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); |
@@ -1572,13 +1783,13 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) | |||
1572 | unsigned int i; | 1783 | unsigned int i; |
1573 | 1784 | ||
1574 | fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, | 1785 | fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, |
1575 | pdata->hw_feat.tx_q_cnt); | 1786 | pdata->tx_q_count); |
1576 | 1787 | ||
1577 | for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) | 1788 | for (i = 0; i < pdata->tx_q_count; i++) |
1578 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); | 1789 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); |
1579 | 1790 | ||
1580 | netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", | 1791 | netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", |
1581 | pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256)); | 1792 | pdata->tx_q_count, ((fifo_size + 1) * 256)); |
1582 | } | 1793 | } |
1583 | 1794 | ||
1584 | static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) | 1795 | static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) |
@@ -1587,27 +1798,84 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) | |||
1587 | unsigned int i; | 1798 | unsigned int i; |
1588 | 1799 | ||
1589 | fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, | 1800 | fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, |
1590 | pdata->hw_feat.rx_q_cnt); | 1801 | pdata->rx_q_count); |
1591 | 1802 | ||
1592 | for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) | 1803 | for (i = 0; i < pdata->rx_q_count; i++) |
1593 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); | 1804 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); |
1594 | 1805 | ||
1595 | netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", | 1806 | netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", |
1596 | pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256)); | 1807 | pdata->rx_q_count, ((fifo_size + 1) * 256)); |
1597 | } | 1808 | } |
1598 | 1809 | ||
1599 | static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata) | 1810 | static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) |
1600 | { | 1811 | { |
1601 | unsigned int i, reg, reg_val; | 1812 | unsigned int qptc, qptc_extra, queue; |
1602 | unsigned int q_count = pdata->hw_feat.rx_q_cnt; | 1813 | unsigned int prio_queues; |
1814 | unsigned int ppq, ppq_extra, prio; | ||
1815 | unsigned int mask; | ||
1816 | unsigned int i, j, reg, reg_val; | ||
1817 | |||
1818 | /* Map the MTL Tx Queues to Traffic Classes | ||
1819 | * Note: Tx Queues >= Traffic Classes | ||
1820 | */ | ||
1821 | qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; | ||
1822 | qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; | ||
1823 | |||
1824 | for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { | ||
1825 | for (j = 0; j < qptc; j++) { | ||
1826 | DBGPR(" TXq%u mapped to TC%u\n", queue, i); | ||
1827 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, | ||
1828 | Q2TCMAP, i); | ||
1829 | pdata->q2tc_map[queue++] = i; | ||
1830 | } | ||
1831 | |||
1832 | if (i < qptc_extra) { | ||
1833 | DBGPR(" TXq%u mapped to TC%u\n", queue, i); | ||
1834 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, | ||
1835 | Q2TCMAP, i); | ||
1836 | pdata->q2tc_map[queue++] = i; | ||
1837 | } | ||
1838 | } | ||
1839 | |||
1840 | /* Map the 8 VLAN priority values to available MTL Rx queues */ | ||
1841 | prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, | ||
1842 | pdata->rx_q_count); | ||
1843 | ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; | ||
1844 | ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; | ||
1845 | |||
1846 | reg = MAC_RQC2R; | ||
1847 | reg_val = 0; | ||
1848 | for (i = 0, prio = 0; i < prio_queues;) { | ||
1849 | mask = 0; | ||
1850 | for (j = 0; j < ppq; j++) { | ||
1851 | DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); | ||
1852 | mask |= (1 << prio); | ||
1853 | pdata->prio2q_map[prio++] = i; | ||
1854 | } | ||
1855 | |||
1856 | if (i < ppq_extra) { | ||
1857 | DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); | ||
1858 | mask |= (1 << prio); | ||
1859 | pdata->prio2q_map[prio++] = i; | ||
1860 | } | ||
1861 | |||
1862 | reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); | ||
1863 | |||
1864 | if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) | ||
1865 | continue; | ||
1866 | |||
1867 | XGMAC_IOWRITE(pdata, reg, reg_val); | ||
1868 | reg += MAC_RQC2_INC; | ||
1869 | reg_val = 0; | ||
1870 | } | ||
1603 | 1871 | ||
1604 | /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ | 1872 | /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ |
1605 | reg = MTL_RQDCM0R; | 1873 | reg = MTL_RQDCM0R; |
1606 | reg_val = 0; | 1874 | reg_val = 0; |
1607 | for (i = 0; i < q_count;) { | 1875 | for (i = 0; i < pdata->rx_q_count;) { |
1608 | reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); | 1876 | reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); |
1609 | 1877 | ||
1610 | if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count)) | 1878 | if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) |
1611 | continue; | 1879 | continue; |
1612 | 1880 | ||
1613 | XGMAC_IOWRITE(pdata, reg, reg_val); | 1881 | XGMAC_IOWRITE(pdata, reg, reg_val); |
@@ -1621,7 +1889,7 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) | |||
1621 | { | 1889 | { |
1622 | unsigned int i; | 1890 | unsigned int i; |
1623 | 1891 | ||
1624 | for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) { | 1892 | for (i = 0; i < pdata->rx_q_count; i++) { |
1625 | /* Activate flow control when less than 4k left in fifo */ | 1893 | /* Activate flow control when less than 4k left in fifo */ |
1626 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); | 1894 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); |
1627 | 1895 | ||
@@ -2013,7 +2281,7 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata) | |||
2013 | } | 2281 | } |
2014 | 2282 | ||
2015 | /* Enable each Tx queue */ | 2283 | /* Enable each Tx queue */ |
2016 | for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) | 2284 | for (i = 0; i < pdata->tx_q_count; i++) |
2017 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, | 2285 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, |
2018 | MTL_Q_ENABLED); | 2286 | MTL_Q_ENABLED); |
2019 | 2287 | ||
@@ -2030,7 +2298,7 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) | |||
2030 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); | 2298 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); |
2031 | 2299 | ||
2032 | /* Disable each Tx queue */ | 2300 | /* Disable each Tx queue */ |
2033 | for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) | 2301 | for (i = 0; i < pdata->tx_q_count; i++) |
2034 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); | 2302 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); |
2035 | 2303 | ||
2036 | /* Disable each Tx DMA channel */ | 2304 | /* Disable each Tx DMA channel */ |
@@ -2059,7 +2327,7 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata) | |||
2059 | 2327 | ||
2060 | /* Enable each Rx queue */ | 2328 | /* Enable each Rx queue */ |
2061 | reg_val = 0; | 2329 | reg_val = 0; |
2062 | for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) | 2330 | for (i = 0; i < pdata->rx_q_count; i++) |
2063 | reg_val |= (0x02 << (i << 1)); | 2331 | reg_val |= (0x02 << (i << 1)); |
2064 | XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); | 2332 | XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); |
2065 | 2333 | ||
@@ -2193,9 +2461,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata) | |||
2193 | * Initialize MTL related features | 2461 | * Initialize MTL related features |
2194 | */ | 2462 | */ |
2195 | xgbe_config_mtl_mode(pdata); | 2463 | xgbe_config_mtl_mode(pdata); |
2196 | xgbe_config_rx_queue_mapping(pdata); | 2464 | xgbe_config_queue_mapping(pdata); |
2197 | /*TODO: Program the priorities mapped to the Selected Traffic Classes | ||
2198 | in MTL_TC_Prty_Map0-3 registers */ | ||
2199 | xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); | 2465 | xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); |
2200 | xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); | 2466 | xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); |
2201 | xgbe_config_tx_threshold(pdata, pdata->tx_threshold); | 2467 | xgbe_config_tx_threshold(pdata, pdata->tx_threshold); |
@@ -2203,15 +2469,13 @@ static int xgbe_init(struct xgbe_prv_data *pdata) | |||
2203 | xgbe_config_tx_fifo_size(pdata); | 2469 | xgbe_config_tx_fifo_size(pdata); |
2204 | xgbe_config_rx_fifo_size(pdata); | 2470 | xgbe_config_rx_fifo_size(pdata); |
2205 | xgbe_config_flow_control_threshold(pdata); | 2471 | xgbe_config_flow_control_threshold(pdata); |
2206 | /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */ | ||
2207 | /*TODO: Error Packet and undersized good Packet forwarding enable | 2472 | /*TODO: Error Packet and undersized good Packet forwarding enable |
2208 | (FEP and FUP) | 2473 | (FEP and FUP) |
2209 | */ | 2474 | */ |
2475 | xgbe_config_dcb_tc(pdata); | ||
2476 | xgbe_config_dcb_pfc(pdata); | ||
2210 | xgbe_enable_mtl_interrupts(pdata); | 2477 | xgbe_enable_mtl_interrupts(pdata); |
2211 | 2478 | ||
2212 | /* Transmit Class Weight */ | ||
2213 | XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10); | ||
2214 | |||
2215 | /* | 2479 | /* |
2216 | * Initialize MAC related features | 2480 | * Initialize MAC related features |
2217 | */ | 2481 | */ |
@@ -2313,5 +2577,16 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) | |||
2313 | hw_if->rx_mmc_int = xgbe_rx_mmc_int; | 2577 | hw_if->rx_mmc_int = xgbe_rx_mmc_int; |
2314 | hw_if->read_mmc_stats = xgbe_read_mmc_stats; | 2578 | hw_if->read_mmc_stats = xgbe_read_mmc_stats; |
2315 | 2579 | ||
2580 | /* For PTP config */ | ||
2581 | hw_if->config_tstamp = xgbe_config_tstamp; | ||
2582 | hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; | ||
2583 | hw_if->set_tstamp_time = xgbe_set_tstamp_time; | ||
2584 | hw_if->get_tstamp_time = xgbe_get_tstamp_time; | ||
2585 | hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; | ||
2586 | |||
2587 | /* For Data Center Bridging config */ | ||
2588 | hw_if->config_dcb_tc = xgbe_config_dcb_tc; | ||
2589 | hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; | ||
2590 | |||
2316 | DBGPR("<--xgbe_init_function_ptrs\n"); | 2591 | DBGPR("<--xgbe_init_function_ptrs\n"); |
2317 | } | 2592 | } |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 344e6b19ec0e..3bf3c0194ad3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -121,6 +121,7 @@ | |||
121 | #include <net/busy_poll.h> | 121 | #include <net/busy_poll.h> |
122 | #include <linux/clk.h> | 122 | #include <linux/clk.h> |
123 | #include <linux/if_ether.h> | 123 | #include <linux/if_ether.h> |
124 | #include <linux/net_tstamp.h> | ||
124 | 125 | ||
125 | #include "xgbe.h" | 126 | #include "xgbe.h" |
126 | #include "xgbe-common.h" | 127 | #include "xgbe-common.h" |
@@ -202,7 +203,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) | |||
202 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | 203 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
203 | struct xgbe_channel *channel; | 204 | struct xgbe_channel *channel; |
204 | unsigned int dma_isr, dma_ch_isr; | 205 | unsigned int dma_isr, dma_ch_isr; |
205 | unsigned int mac_isr; | 206 | unsigned int mac_isr, mac_tssr; |
206 | unsigned int i; | 207 | unsigned int i; |
207 | 208 | ||
208 | /* The DMA interrupt status register also reports MAC and MTL | 209 | /* The DMA interrupt status register also reports MAC and MTL |
@@ -255,6 +256,17 @@ static irqreturn_t xgbe_isr(int irq, void *data) | |||
255 | 256 | ||
256 | if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) | 257 | if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) |
257 | hw_if->rx_mmc_int(pdata); | 258 | hw_if->rx_mmc_int(pdata); |
259 | |||
260 | if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) { | ||
261 | mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR); | ||
262 | |||
263 | if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) { | ||
264 | /* Read Tx Timestamp to clear interrupt */ | ||
265 | pdata->tx_tstamp = | ||
266 | hw_if->get_tx_tstamp(pdata); | ||
267 | schedule_work(&pdata->tx_tstamp_work); | ||
268 | } | ||
269 | } | ||
258 | } | 270 | } |
259 | 271 | ||
260 | DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); | 272 | DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); |
@@ -375,6 +387,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) | |||
375 | hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); | 387 | hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); |
376 | hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); | 388 | hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); |
377 | hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); | 389 | hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); |
390 | hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); | ||
378 | hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, | 391 | hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, |
379 | HASHTBLSZ); | 392 | HASHTBLSZ); |
380 | hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, | 393 | hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, |
@@ -668,6 +681,197 @@ static void xgbe_restart(struct work_struct *work) | |||
668 | rtnl_unlock(); | 681 | rtnl_unlock(); |
669 | } | 682 | } |
670 | 683 | ||
684 | static void xgbe_tx_tstamp(struct work_struct *work) | ||
685 | { | ||
686 | struct xgbe_prv_data *pdata = container_of(work, | ||
687 | struct xgbe_prv_data, | ||
688 | tx_tstamp_work); | ||
689 | struct skb_shared_hwtstamps hwtstamps; | ||
690 | u64 nsec; | ||
691 | unsigned long flags; | ||
692 | |||
693 | if (pdata->tx_tstamp) { | ||
694 | nsec = timecounter_cyc2time(&pdata->tstamp_tc, | ||
695 | pdata->tx_tstamp); | ||
696 | |||
697 | memset(&hwtstamps, 0, sizeof(hwtstamps)); | ||
698 | hwtstamps.hwtstamp = ns_to_ktime(nsec); | ||
699 | skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps); | ||
700 | } | ||
701 | |||
702 | dev_kfree_skb_any(pdata->tx_tstamp_skb); | ||
703 | |||
704 | spin_lock_irqsave(&pdata->tstamp_lock, flags); | ||
705 | pdata->tx_tstamp_skb = NULL; | ||
706 | spin_unlock_irqrestore(&pdata->tstamp_lock, flags); | ||
707 | } | ||
708 | |||
709 | static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, | ||
710 | struct ifreq *ifreq) | ||
711 | { | ||
712 | if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, | ||
713 | sizeof(pdata->tstamp_config))) | ||
714 | return -EFAULT; | ||
715 | |||
716 | return 0; | ||
717 | } | ||
718 | |||
719 | static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, | ||
720 | struct ifreq *ifreq) | ||
721 | { | ||
722 | struct hwtstamp_config config; | ||
723 | unsigned int mac_tscr; | ||
724 | |||
725 | if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) | ||
726 | return -EFAULT; | ||
727 | |||
728 | if (config.flags) | ||
729 | return -EINVAL; | ||
730 | |||
731 | mac_tscr = 0; | ||
732 | |||
733 | switch (config.tx_type) { | ||
734 | case HWTSTAMP_TX_OFF: | ||
735 | break; | ||
736 | |||
737 | case HWTSTAMP_TX_ON: | ||
738 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
739 | break; | ||
740 | |||
741 | default: | ||
742 | return -ERANGE; | ||
743 | } | ||
744 | |||
745 | switch (config.rx_filter) { | ||
746 | case HWTSTAMP_FILTER_NONE: | ||
747 | break; | ||
748 | |||
749 | case HWTSTAMP_FILTER_ALL: | ||
750 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); | ||
751 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
752 | break; | ||
753 | |||
754 | /* PTP v2, UDP, any kind of event packet */ | ||
755 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | ||
756 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); | ||
757 | /* PTP v1, UDP, any kind of event packet */ | ||
758 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | ||
759 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); | ||
760 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); | ||
761 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); | ||
762 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
763 | break; | ||
764 | |||
765 | /* PTP v2, UDP, Sync packet */ | ||
766 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | ||
767 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); | ||
768 | /* PTP v1, UDP, Sync packet */ | ||
769 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | ||
770 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); | ||
771 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); | ||
772 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); | ||
773 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
774 | break; | ||
775 | |||
776 | /* PTP v2, UDP, Delay_req packet */ | ||
777 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | ||
778 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); | ||
779 | /* PTP v1, UDP, Delay_req packet */ | ||
780 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | ||
781 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); | ||
782 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); | ||
783 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); | ||
784 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); | ||
785 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
786 | break; | ||
787 | |||
788 | /* 802.AS1, Ethernet, any kind of event packet */ | ||
789 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | ||
790 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); | ||
791 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); | ||
792 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
793 | break; | ||
794 | |||
795 | /* 802.AS1, Ethernet, Sync packet */ | ||
796 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | ||
797 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); | ||
798 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); | ||
799 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
800 | break; | ||
801 | |||
802 | /* 802.AS1, Ethernet, Delay_req packet */ | ||
803 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | ||
804 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); | ||
805 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); | ||
806 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); | ||
807 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
808 | break; | ||
809 | |||
810 | /* PTP v2/802.AS1, any layer, any kind of event packet */ | ||
811 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | ||
812 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); | ||
813 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); | ||
814 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); | ||
815 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); | ||
816 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); | ||
817 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
818 | break; | ||
819 | |||
820 | /* PTP v2/802.AS1, any layer, Sync packet */ | ||
821 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | ||
822 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); | ||
823 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); | ||
824 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); | ||
825 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); | ||
826 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); | ||
827 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
828 | break; | ||
829 | |||
830 | /* PTP v2/802.AS1, any layer, Delay_req packet */ | ||
831 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | ||
832 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); | ||
833 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); | ||
834 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); | ||
835 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); | ||
836 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); | ||
837 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); | ||
838 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); | ||
839 | break; | ||
840 | |||
841 | default: | ||
842 | return -ERANGE; | ||
843 | } | ||
844 | |||
845 | pdata->hw_if.config_tstamp(pdata, mac_tscr); | ||
846 | |||
847 | memcpy(&pdata->tstamp_config, &config, sizeof(config)); | ||
848 | |||
849 | return 0; | ||
850 | } | ||
851 | |||
852 | static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, | ||
853 | struct sk_buff *skb, | ||
854 | struct xgbe_packet_data *packet) | ||
855 | { | ||
856 | unsigned long flags; | ||
857 | |||
858 | if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) { | ||
859 | spin_lock_irqsave(&pdata->tstamp_lock, flags); | ||
860 | if (pdata->tx_tstamp_skb) { | ||
861 | /* Another timestamp in progress, ignore this one */ | ||
862 | XGMAC_SET_BITS(packet->attributes, | ||
863 | TX_PACKET_ATTRIBUTES, PTP, 0); | ||
864 | } else { | ||
865 | pdata->tx_tstamp_skb = skb_get(skb); | ||
866 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | ||
867 | } | ||
868 | spin_unlock_irqrestore(&pdata->tstamp_lock, flags); | ||
869 | } | ||
870 | |||
871 | if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) | ||
872 | skb_tx_timestamp(skb); | ||
873 | } | ||
874 | |||
671 | static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) | 875 | static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) |
672 | { | 876 | { |
673 | if (vlan_tx_tag_present(skb)) | 877 | if (vlan_tx_tag_present(skb)) |
@@ -711,7 +915,8 @@ static int xgbe_is_tso(struct sk_buff *skb) | |||
711 | return 1; | 915 | return 1; |
712 | } | 916 | } |
713 | 917 | ||
714 | static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb, | 918 | static void xgbe_packet_info(struct xgbe_prv_data *pdata, |
919 | struct xgbe_ring *ring, struct sk_buff *skb, | ||
715 | struct xgbe_packet_data *packet) | 920 | struct xgbe_packet_data *packet) |
716 | { | 921 | { |
717 | struct skb_frag_struct *frag; | 922 | struct skb_frag_struct *frag; |
@@ -753,6 +958,11 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb, | |||
753 | VLAN_CTAG, 1); | 958 | VLAN_CTAG, 1); |
754 | } | 959 | } |
755 | 960 | ||
961 | if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | ||
962 | (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) | ||
963 | XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, | ||
964 | PTP, 1); | ||
965 | |||
756 | for (len = skb_headlen(skb); len;) { | 966 | for (len = skb_headlen(skb); len;) { |
757 | packet->rdesc_count++; | 967 | packet->rdesc_count++; |
758 | len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); | 968 | len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); |
@@ -776,26 +986,33 @@ static int xgbe_open(struct net_device *netdev) | |||
776 | 986 | ||
777 | DBGPR("-->xgbe_open\n"); | 987 | DBGPR("-->xgbe_open\n"); |
778 | 988 | ||
779 | /* Enable the clock */ | 989 | /* Enable the clocks */ |
780 | ret = clk_prepare_enable(pdata->sysclock); | 990 | ret = clk_prepare_enable(pdata->sysclk); |
781 | if (ret) { | 991 | if (ret) { |
782 | netdev_alert(netdev, "clk_prepare_enable failed\n"); | 992 | netdev_alert(netdev, "dma clk_prepare_enable failed\n"); |
783 | return ret; | 993 | return ret; |
784 | } | 994 | } |
785 | 995 | ||
996 | ret = clk_prepare_enable(pdata->ptpclk); | ||
997 | if (ret) { | ||
998 | netdev_alert(netdev, "ptp clk_prepare_enable failed\n"); | ||
999 | goto err_sysclk; | ||
1000 | } | ||
1001 | |||
786 | /* Calculate the Rx buffer size before allocating rings */ | 1002 | /* Calculate the Rx buffer size before allocating rings */ |
787 | ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); | 1003 | ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); |
788 | if (ret < 0) | 1004 | if (ret < 0) |
789 | goto err_clk; | 1005 | goto err_ptpclk; |
790 | pdata->rx_buf_size = ret; | 1006 | pdata->rx_buf_size = ret; |
791 | 1007 | ||
792 | /* Allocate the ring descriptors and buffers */ | 1008 | /* Allocate the ring descriptors and buffers */ |
793 | ret = desc_if->alloc_ring_resources(pdata); | 1009 | ret = desc_if->alloc_ring_resources(pdata); |
794 | if (ret) | 1010 | if (ret) |
795 | goto err_clk; | 1011 | goto err_ptpclk; |
796 | 1012 | ||
797 | /* Initialize the device restart work struct */ | 1013 | /* Initialize the device restart and Tx timestamp work struct */ |
798 | INIT_WORK(&pdata->restart_work, xgbe_restart); | 1014 | INIT_WORK(&pdata->restart_work, xgbe_restart); |
1015 | INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); | ||
799 | 1016 | ||
800 | /* Request interrupts */ | 1017 | /* Request interrupts */ |
801 | ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, | 1018 | ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0, |
@@ -824,8 +1041,11 @@ err_start: | |||
824 | err_irq: | 1041 | err_irq: |
825 | desc_if->free_ring_resources(pdata); | 1042 | desc_if->free_ring_resources(pdata); |
826 | 1043 | ||
827 | err_clk: | 1044 | err_ptpclk: |
828 | clk_disable_unprepare(pdata->sysclock); | 1045 | clk_disable_unprepare(pdata->ptpclk); |
1046 | |||
1047 | err_sysclk: | ||
1048 | clk_disable_unprepare(pdata->sysclk); | ||
829 | 1049 | ||
830 | return ret; | 1050 | return ret; |
831 | } | 1051 | } |
@@ -853,8 +1073,9 @@ static int xgbe_close(struct net_device *netdev) | |||
853 | pdata->irq_number = 0; | 1073 | pdata->irq_number = 0; |
854 | } | 1074 | } |
855 | 1075 | ||
856 | /* Disable the clock */ | 1076 | /* Disable the clocks */ |
857 | clk_disable_unprepare(pdata->sysclock); | 1077 | clk_disable_unprepare(pdata->ptpclk); |
1078 | clk_disable_unprepare(pdata->sysclk); | ||
858 | 1079 | ||
859 | DBGPR("<--xgbe_close\n"); | 1080 | DBGPR("<--xgbe_close\n"); |
860 | 1081 | ||
@@ -890,7 +1111,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
890 | 1111 | ||
891 | /* Calculate preliminary packet info */ | 1112 | /* Calculate preliminary packet info */ |
892 | memset(packet, 0, sizeof(*packet)); | 1113 | memset(packet, 0, sizeof(*packet)); |
893 | xgbe_packet_info(ring, skb, packet); | 1114 | xgbe_packet_info(pdata, ring, skb, packet); |
894 | 1115 | ||
895 | /* Check that there are enough descriptors available */ | 1116 | /* Check that there are enough descriptors available */ |
896 | if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) { | 1117 | if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) { |
@@ -914,6 +1135,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
914 | goto tx_netdev_return; | 1135 | goto tx_netdev_return; |
915 | } | 1136 | } |
916 | 1137 | ||
1138 | xgbe_prep_tx_tstamp(pdata, skb, packet); | ||
1139 | |||
917 | /* Configure required descriptor fields for transmission */ | 1140 | /* Configure required descriptor fields for transmission */ |
918 | hw_if->pre_xmit(channel); | 1141 | hw_if->pre_xmit(channel); |
919 | 1142 | ||
@@ -968,6 +1191,27 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr) | |||
968 | return 0; | 1191 | return 0; |
969 | } | 1192 | } |
970 | 1193 | ||
1194 | static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) | ||
1195 | { | ||
1196 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | ||
1197 | int ret; | ||
1198 | |||
1199 | switch (cmd) { | ||
1200 | case SIOCGHWTSTAMP: | ||
1201 | ret = xgbe_get_hwtstamp_settings(pdata, ifreq); | ||
1202 | break; | ||
1203 | |||
1204 | case SIOCSHWTSTAMP: | ||
1205 | ret = xgbe_set_hwtstamp_settings(pdata, ifreq); | ||
1206 | break; | ||
1207 | |||
1208 | default: | ||
1209 | ret = -EOPNOTSUPP; | ||
1210 | } | ||
1211 | |||
1212 | return ret; | ||
1213 | } | ||
1214 | |||
971 | static int xgbe_change_mtu(struct net_device *netdev, int mtu) | 1215 | static int xgbe_change_mtu(struct net_device *netdev, int mtu) |
972 | { | 1216 | { |
973 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 1217 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
@@ -1069,6 +1313,33 @@ static void xgbe_poll_controller(struct net_device *netdev) | |||
1069 | } | 1313 | } |
1070 | #endif /* End CONFIG_NET_POLL_CONTROLLER */ | 1314 | #endif /* End CONFIG_NET_POLL_CONTROLLER */ |
1071 | 1315 | ||
1316 | static int xgbe_setup_tc(struct net_device *netdev, u8 tc) | ||
1317 | { | ||
1318 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | ||
1319 | unsigned int offset, queue; | ||
1320 | u8 i; | ||
1321 | |||
1322 | if (tc && (tc != pdata->hw_feat.tc_cnt)) | ||
1323 | return -EINVAL; | ||
1324 | |||
1325 | if (tc) { | ||
1326 | netdev_set_num_tc(netdev, tc); | ||
1327 | for (i = 0, queue = 0, offset = 0; i < tc; i++) { | ||
1328 | while ((queue < pdata->tx_q_count) && | ||
1329 | (pdata->q2tc_map[queue] == i)) | ||
1330 | queue++; | ||
1331 | |||
1332 | DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1); | ||
1333 | netdev_set_tc_queue(netdev, i, queue - offset, offset); | ||
1334 | offset = queue; | ||
1335 | } | ||
1336 | } else { | ||
1337 | netdev_reset_tc(netdev); | ||
1338 | } | ||
1339 | |||
1340 | return 0; | ||
1341 | } | ||
1342 | |||
1072 | static int xgbe_set_features(struct net_device *netdev, | 1343 | static int xgbe_set_features(struct net_device *netdev, |
1073 | netdev_features_t features) | 1344 | netdev_features_t features) |
1074 | { | 1345 | { |
@@ -1109,6 +1380,7 @@ static const struct net_device_ops xgbe_netdev_ops = { | |||
1109 | .ndo_set_rx_mode = xgbe_set_rx_mode, | 1380 | .ndo_set_rx_mode = xgbe_set_rx_mode, |
1110 | .ndo_set_mac_address = xgbe_set_mac_address, | 1381 | .ndo_set_mac_address = xgbe_set_mac_address, |
1111 | .ndo_validate_addr = eth_validate_addr, | 1382 | .ndo_validate_addr = eth_validate_addr, |
1383 | .ndo_do_ioctl = xgbe_ioctl, | ||
1112 | .ndo_change_mtu = xgbe_change_mtu, | 1384 | .ndo_change_mtu = xgbe_change_mtu, |
1113 | .ndo_get_stats64 = xgbe_get_stats64, | 1385 | .ndo_get_stats64 = xgbe_get_stats64, |
1114 | .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, | 1386 | .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, |
@@ -1116,6 +1388,7 @@ static const struct net_device_ops xgbe_netdev_ops = { | |||
1116 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1388 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1117 | .ndo_poll_controller = xgbe_poll_controller, | 1389 | .ndo_poll_controller = xgbe_poll_controller, |
1118 | #endif | 1390 | #endif |
1391 | .ndo_setup_tc = xgbe_setup_tc, | ||
1119 | .ndo_set_features = xgbe_set_features, | 1392 | .ndo_set_features = xgbe_set_features, |
1120 | }; | 1393 | }; |
1121 | 1394 | ||
@@ -1202,8 +1475,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
1202 | struct xgbe_packet_data *packet; | 1475 | struct xgbe_packet_data *packet; |
1203 | struct net_device *netdev = pdata->netdev; | 1476 | struct net_device *netdev = pdata->netdev; |
1204 | struct sk_buff *skb; | 1477 | struct sk_buff *skb; |
1205 | unsigned int incomplete, error; | 1478 | struct skb_shared_hwtstamps *hwtstamps; |
1206 | unsigned int cur_len, put_len, max_len; | 1479 | unsigned int incomplete, error, context_next, context; |
1480 | unsigned int len, put_len, max_len; | ||
1207 | int received = 0; | 1481 | int received = 0; |
1208 | 1482 | ||
1209 | DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); | 1483 | DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); |
@@ -1212,22 +1486,33 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
1212 | if (!ring) | 1486 | if (!ring) |
1213 | return 0; | 1487 | return 0; |
1214 | 1488 | ||
1489 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); | ||
1215 | packet = &ring->packet_data; | 1490 | packet = &ring->packet_data; |
1216 | while (received < budget) { | 1491 | while (received < budget) { |
1217 | DBGPR(" cur = %d\n", ring->cur); | 1492 | DBGPR(" cur = %d\n", ring->cur); |
1218 | 1493 | ||
1219 | /* Clear the packet data information */ | 1494 | /* First time in loop see if we need to restore state */ |
1220 | memset(packet, 0, sizeof(*packet)); | 1495 | if (!received && rdata->state_saved) { |
1221 | skb = NULL; | 1496 | incomplete = rdata->state.incomplete; |
1222 | error = 0; | 1497 | context_next = rdata->state.context_next; |
1223 | cur_len = 0; | 1498 | skb = rdata->state.skb; |
1499 | error = rdata->state.error; | ||
1500 | len = rdata->state.len; | ||
1501 | } else { | ||
1502 | memset(packet, 0, sizeof(*packet)); | ||
1503 | incomplete = 0; | ||
1504 | context_next = 0; | ||
1505 | skb = NULL; | ||
1506 | error = 0; | ||
1507 | len = 0; | ||
1508 | } | ||
1224 | 1509 | ||
1225 | read_again: | 1510 | read_again: |
1511 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); | ||
1512 | |||
1226 | if (ring->dirty > (XGBE_RX_DESC_CNT >> 3)) | 1513 | if (ring->dirty > (XGBE_RX_DESC_CNT >> 3)) |
1227 | xgbe_rx_refresh(channel); | 1514 | xgbe_rx_refresh(channel); |
1228 | 1515 | ||
1229 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); | ||
1230 | |||
1231 | if (hw_if->dev_read(channel)) | 1516 | if (hw_if->dev_read(channel)) |
1232 | break; | 1517 | break; |
1233 | 1518 | ||
@@ -1242,9 +1527,15 @@ read_again: | |||
1242 | incomplete = XGMAC_GET_BITS(packet->attributes, | 1527 | incomplete = XGMAC_GET_BITS(packet->attributes, |
1243 | RX_PACKET_ATTRIBUTES, | 1528 | RX_PACKET_ATTRIBUTES, |
1244 | INCOMPLETE); | 1529 | INCOMPLETE); |
1530 | context_next = XGMAC_GET_BITS(packet->attributes, | ||
1531 | RX_PACKET_ATTRIBUTES, | ||
1532 | CONTEXT_NEXT); | ||
1533 | context = XGMAC_GET_BITS(packet->attributes, | ||
1534 | RX_PACKET_ATTRIBUTES, | ||
1535 | CONTEXT); | ||
1245 | 1536 | ||
1246 | /* Earlier error, just drain the remaining data */ | 1537 | /* Earlier error, just drain the remaining data */ |
1247 | if (incomplete && error) | 1538 | if ((incomplete || context_next) && error) |
1248 | goto read_again; | 1539 | goto read_again; |
1249 | 1540 | ||
1250 | if (error || packet->errors) { | 1541 | if (error || packet->errors) { |
@@ -1254,30 +1545,37 @@ read_again: | |||
1254 | continue; | 1545 | continue; |
1255 | } | 1546 | } |
1256 | 1547 | ||
1257 | put_len = rdata->len - cur_len; | 1548 | if (!context) { |
1258 | if (skb) { | 1549 | put_len = rdata->len - len; |
1259 | if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) { | 1550 | if (skb) { |
1260 | DBGPR("pskb_expand_head error\n"); | 1551 | if (pskb_expand_head(skb, 0, put_len, |
1261 | if (incomplete) { | 1552 | GFP_ATOMIC)) { |
1262 | error = 1; | 1553 | DBGPR("pskb_expand_head error\n"); |
1263 | goto read_again; | 1554 | if (incomplete) { |
1555 | error = 1; | ||
1556 | goto read_again; | ||
1557 | } | ||
1558 | |||
1559 | dev_kfree_skb(skb); | ||
1560 | continue; | ||
1264 | } | 1561 | } |
1265 | 1562 | memcpy(skb_tail_pointer(skb), rdata->skb->data, | |
1266 | dev_kfree_skb(skb); | 1563 | put_len); |
1267 | continue; | 1564 | } else { |
1565 | skb = rdata->skb; | ||
1566 | rdata->skb = NULL; | ||
1268 | } | 1567 | } |
1269 | memcpy(skb_tail_pointer(skb), rdata->skb->data, | 1568 | skb_put(skb, put_len); |
1270 | put_len); | 1569 | len += put_len; |
1271 | } else { | ||
1272 | skb = rdata->skb; | ||
1273 | rdata->skb = NULL; | ||
1274 | } | 1570 | } |
1275 | skb_put(skb, put_len); | ||
1276 | cur_len += put_len; | ||
1277 | 1571 | ||
1278 | if (incomplete) | 1572 | if (incomplete || context_next) |
1279 | goto read_again; | 1573 | goto read_again; |
1280 | 1574 | ||
1575 | /* Stray Context Descriptor? */ | ||
1576 | if (!skb) | ||
1577 | continue; | ||
1578 | |||
1281 | /* Be sure we don't exceed the configured MTU */ | 1579 | /* Be sure we don't exceed the configured MTU */ |
1282 | max_len = netdev->mtu + ETH_HLEN; | 1580 | max_len = netdev->mtu + ETH_HLEN; |
1283 | if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && | 1581 | if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
@@ -1304,6 +1602,16 @@ read_again: | |||
1304 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | 1602 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
1305 | packet->vlan_ctag); | 1603 | packet->vlan_ctag); |
1306 | 1604 | ||
1605 | if (XGMAC_GET_BITS(packet->attributes, | ||
1606 | RX_PACKET_ATTRIBUTES, RX_TSTAMP)) { | ||
1607 | u64 nsec; | ||
1608 | |||
1609 | nsec = timecounter_cyc2time(&pdata->tstamp_tc, | ||
1610 | packet->rx_tstamp); | ||
1611 | hwtstamps = skb_hwtstamps(skb); | ||
1612 | hwtstamps->hwtstamp = ns_to_ktime(nsec); | ||
1613 | } | ||
1614 | |||
1307 | skb->dev = netdev; | 1615 | skb->dev = netdev; |
1308 | skb->protocol = eth_type_trans(skb, netdev); | 1616 | skb->protocol = eth_type_trans(skb, netdev); |
1309 | skb_record_rx_queue(skb, channel->queue_index); | 1617 | skb_record_rx_queue(skb, channel->queue_index); |
@@ -1313,6 +1621,17 @@ read_again: | |||
1313 | napi_gro_receive(&pdata->napi, skb); | 1621 | napi_gro_receive(&pdata->napi, skb); |
1314 | } | 1622 | } |
1315 | 1623 | ||
1624 | /* Check if we need to save state before leaving */ | ||
1625 | if (received && (incomplete || context_next)) { | ||
1626 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); | ||
1627 | rdata->state_saved = 1; | ||
1628 | rdata->state.incomplete = incomplete; | ||
1629 | rdata->state.context_next = context_next; | ||
1630 | rdata->state.skb = skb; | ||
1631 | rdata->state.len = len; | ||
1632 | rdata->state.error = error; | ||
1633 | } | ||
1634 | |||
1316 | DBGPR("<--xgbe_rx_poll: received = %d\n", received); | 1635 | DBGPR("<--xgbe_rx_poll: received = %d\n", received); |
1317 | 1636 | ||
1318 | return received; | 1637 | return received; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index f7405261f23e..6005b6021f78 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | |||
@@ -116,6 +116,7 @@ | |||
116 | 116 | ||
117 | #include <linux/spinlock.h> | 117 | #include <linux/spinlock.h> |
118 | #include <linux/phy.h> | 118 | #include <linux/phy.h> |
119 | #include <linux/net_tstamp.h> | ||
119 | 120 | ||
120 | #include "xgbe.h" | 121 | #include "xgbe.h" |
121 | #include "xgbe-common.h" | 122 | #include "xgbe-common.h" |
@@ -326,10 +327,19 @@ static int xgbe_set_settings(struct net_device *netdev, | |||
326 | (cmd->autoneg != AUTONEG_DISABLE)) | 327 | (cmd->autoneg != AUTONEG_DISABLE)) |
327 | goto unlock; | 328 | goto unlock; |
328 | 329 | ||
329 | if ((cmd->autoneg == AUTONEG_DISABLE) && | 330 | if (cmd->autoneg == AUTONEG_DISABLE) { |
330 | (((speed != SPEED_10000) && (speed != SPEED_1000)) || | 331 | switch (speed) { |
331 | (cmd->duplex != DUPLEX_FULL))) | 332 | case SPEED_10000: |
332 | goto unlock; | 333 | case SPEED_2500: |
334 | case SPEED_1000: | ||
335 | break; | ||
336 | default: | ||
337 | goto unlock; | ||
338 | } | ||
339 | |||
340 | if (cmd->duplex != DUPLEX_FULL) | ||
341 | goto unlock; | ||
342 | } | ||
333 | 343 | ||
334 | cmd->advertising &= phydev->supported; | 344 | cmd->advertising &= phydev->supported; |
335 | if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) | 345 | if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) |
@@ -480,6 +490,39 @@ static int xgbe_set_coalesce(struct net_device *netdev, | |||
480 | return 0; | 490 | return 0; |
481 | } | 491 | } |
482 | 492 | ||
493 | static int xgbe_get_ts_info(struct net_device *netdev, | ||
494 | struct ethtool_ts_info *ts_info) | ||
495 | { | ||
496 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | ||
497 | |||
498 | ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | | ||
499 | SOF_TIMESTAMPING_RX_SOFTWARE | | ||
500 | SOF_TIMESTAMPING_SOFTWARE | | ||
501 | SOF_TIMESTAMPING_TX_HARDWARE | | ||
502 | SOF_TIMESTAMPING_RX_HARDWARE | | ||
503 | SOF_TIMESTAMPING_RAW_HARDWARE; | ||
504 | |||
505 | if (pdata->ptp_clock) | ||
506 | ts_info->phc_index = ptp_clock_index(pdata->ptp_clock); | ||
507 | else | ||
508 | ts_info->phc_index = -1; | ||
509 | |||
510 | ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); | ||
511 | ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | | ||
512 | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | | ||
513 | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | | ||
514 | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | | ||
515 | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | | ||
516 | (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | | ||
517 | (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | | ||
518 | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | | ||
519 | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | | ||
520 | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | | ||
521 | (1 << HWTSTAMP_FILTER_ALL); | ||
522 | |||
523 | return 0; | ||
524 | } | ||
525 | |||
483 | static const struct ethtool_ops xgbe_ethtool_ops = { | 526 | static const struct ethtool_ops xgbe_ethtool_ops = { |
484 | .get_settings = xgbe_get_settings, | 527 | .get_settings = xgbe_get_settings, |
485 | .set_settings = xgbe_set_settings, | 528 | .set_settings = xgbe_set_settings, |
@@ -492,6 +535,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = { | |||
492 | .get_strings = xgbe_get_strings, | 535 | .get_strings = xgbe_get_strings, |
493 | .get_ethtool_stats = xgbe_get_ethtool_stats, | 536 | .get_ethtool_stats = xgbe_get_ethtool_stats, |
494 | .get_sset_count = xgbe_get_sset_count, | 537 | .get_sset_count = xgbe_get_sset_count, |
538 | .get_ts_info = xgbe_get_ts_info, | ||
495 | }; | 539 | }; |
496 | 540 | ||
497 | struct ethtool_ops *xgbe_get_ethtool_ops(void) | 541 | struct ethtool_ops *xgbe_get_ethtool_ops(void) |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index e56c3d45b30e..ec977d36063f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c | |||
@@ -245,6 +245,7 @@ static int xgbe_probe(struct platform_device *pdev) | |||
245 | 245 | ||
246 | spin_lock_init(&pdata->lock); | 246 | spin_lock_init(&pdata->lock); |
247 | mutex_init(&pdata->xpcs_mutex); | 247 | mutex_init(&pdata->xpcs_mutex); |
248 | spin_lock_init(&pdata->tstamp_lock); | ||
248 | 249 | ||
249 | /* Set and validate the number of descriptors for a ring */ | 250 | /* Set and validate the number of descriptors for a ring */ |
250 | BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); | 251 | BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); |
@@ -265,10 +266,18 @@ static int xgbe_probe(struct platform_device *pdev) | |||
265 | } | 266 | } |
266 | 267 | ||
267 | /* Obtain the system clock setting */ | 268 | /* Obtain the system clock setting */ |
268 | pdata->sysclock = devm_clk_get(dev, NULL); | 269 | pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK); |
269 | if (IS_ERR(pdata->sysclock)) { | 270 | if (IS_ERR(pdata->sysclk)) { |
270 | dev_err(dev, "devm_clk_get failed\n"); | 271 | dev_err(dev, "dma devm_clk_get failed\n"); |
271 | ret = PTR_ERR(pdata->sysclock); | 272 | ret = PTR_ERR(pdata->sysclk); |
273 | goto err_io; | ||
274 | } | ||
275 | |||
276 | /* Obtain the PTP clock setting */ | ||
277 | pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK); | ||
278 | if (IS_ERR(pdata->ptpclk)) { | ||
279 | dev_err(dev, "ptp devm_clk_get failed\n"); | ||
280 | ret = PTR_ERR(pdata->ptpclk); | ||
272 | goto err_io; | 281 | goto err_io; |
273 | } | 282 | } |
274 | 283 | ||
@@ -346,9 +355,16 @@ static int xgbe_probe(struct platform_device *pdev) | |||
346 | /* Set default configuration data */ | 355 | /* Set default configuration data */ |
347 | xgbe_default_config(pdata); | 356 | xgbe_default_config(pdata); |
348 | 357 | ||
349 | /* Calculate the number of Tx and Rx rings to be created */ | 358 | /* Calculate the number of Tx and Rx rings to be created |
359 | * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set | ||
360 | * the number of Tx queues to the number of Tx channels | ||
361 | * enabled | ||
362 | * -Rx (DMA) Channels do not map 1-to-1 so use the actual | ||
363 | * number of Rx queues | ||
364 | */ | ||
350 | pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), | 365 | pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), |
351 | pdata->hw_feat.tx_ch_cnt); | 366 | pdata->hw_feat.tx_ch_cnt); |
367 | pdata->tx_q_count = pdata->tx_ring_count; | ||
352 | ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); | 368 | ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); |
353 | if (ret) { | 369 | if (ret) { |
354 | dev_err(dev, "error setting real tx queue count\n"); | 370 | dev_err(dev, "error setting real tx queue count\n"); |
@@ -358,6 +374,7 @@ static int xgbe_probe(struct platform_device *pdev) | |||
358 | pdata->rx_ring_count = min_t(unsigned int, | 374 | pdata->rx_ring_count = min_t(unsigned int, |
359 | netif_get_num_default_rss_queues(), | 375 | netif_get_num_default_rss_queues(), |
360 | pdata->hw_feat.rx_ch_cnt); | 376 | pdata->hw_feat.rx_ch_cnt); |
377 | pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; | ||
361 | ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); | 378 | ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); |
362 | if (ret) { | 379 | if (ret) { |
363 | dev_err(dev, "error setting real rx queue count\n"); | 380 | dev_err(dev, "error setting real rx queue count\n"); |
@@ -383,9 +400,12 @@ static int xgbe_probe(struct platform_device *pdev) | |||
383 | if (ret) | 400 | if (ret) |
384 | goto err_bus_id; | 401 | goto err_bus_id; |
385 | 402 | ||
386 | /* Set network and ethtool operations */ | 403 | /* Set device operations */ |
387 | netdev->netdev_ops = xgbe_get_netdev_ops(); | 404 | netdev->netdev_ops = xgbe_get_netdev_ops(); |
388 | netdev->ethtool_ops = xgbe_get_ethtool_ops(); | 405 | netdev->ethtool_ops = xgbe_get_ethtool_ops(); |
406 | #ifdef CONFIG_AMD_XGBE_DCB | ||
407 | netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); | ||
408 | #endif | ||
389 | 409 | ||
390 | /* Set device features */ | 410 | /* Set device features */ |
391 | netdev->hw_features = NETIF_F_SG | | 411 | netdev->hw_features = NETIF_F_SG | |
@@ -420,6 +440,8 @@ static int xgbe_probe(struct platform_device *pdev) | |||
420 | goto err_reg_netdev; | 440 | goto err_reg_netdev; |
421 | } | 441 | } |
422 | 442 | ||
443 | xgbe_ptp_register(pdata); | ||
444 | |||
423 | xgbe_debugfs_init(pdata); | 445 | xgbe_debugfs_init(pdata); |
424 | 446 | ||
425 | netdev_notice(netdev, "net device enabled\n"); | 447 | netdev_notice(netdev, "net device enabled\n"); |
@@ -452,6 +474,8 @@ static int xgbe_remove(struct platform_device *pdev) | |||
452 | 474 | ||
453 | xgbe_debugfs_exit(pdata); | 475 | xgbe_debugfs_exit(pdata); |
454 | 476 | ||
477 | xgbe_ptp_unregister(pdata); | ||
478 | |||
455 | unregister_netdev(netdev); | 479 | unregister_netdev(netdev); |
456 | 480 | ||
457 | xgbe_mdio_unregister(pdata); | 481 | xgbe_mdio_unregister(pdata); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c new file mode 100644 index 000000000000..37e64cfa5718 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | |||
@@ -0,0 +1,285 @@ | |||
1 | /* | ||
2 | * AMD 10Gb Ethernet driver | ||
3 | * | ||
4 | * This file is available to you under your choice of the following two | ||
5 | * licenses: | ||
6 | * | ||
7 | * License 1: GPLv2 | ||
8 | * | ||
9 | * Copyright (c) 2014 Advanced Micro Devices, Inc. | ||
10 | * | ||
11 | * This file is free software; you may copy, redistribute and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation, either version 2 of the License, or (at | ||
14 | * your option) any later version. | ||
15 | * | ||
16 | * This file is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
23 | * | ||
24 | * This file incorporates work covered by the following copyright and | ||
25 | * permission notice: | ||
26 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation | ||
27 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, | ||
28 | * Inc. unless otherwise expressly agreed to in writing between Synopsys | ||
29 | * and you. | ||
30 | * | ||
31 | * The Software IS NOT an item of Licensed Software or Licensed Product | ||
32 | * under any End User Software License Agreement or Agreement for Licensed | ||
33 | * Product with Synopsys or any supplement thereto. Permission is hereby | ||
34 | * granted, free of charge, to any person obtaining a copy of this software | ||
35 | * annotated with this license and the Software, to deal in the Software | ||
36 | * without restriction, including without limitation the rights to use, | ||
37 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies | ||
38 | * of the Software, and to permit persons to whom the Software is furnished | ||
39 | * to do so, subject to the following conditions: | ||
40 | * | ||
41 | * The above copyright notice and this permission notice shall be included | ||
42 | * in all copies or substantial portions of the Software. | ||
43 | * | ||
44 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" | ||
45 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | ||
46 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A | ||
47 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS | ||
48 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
49 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
50 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
51 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
52 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
53 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | ||
54 | * THE POSSIBILITY OF SUCH DAMAGE. | ||
55 | * | ||
56 | * | ||
57 | * License 2: Modified BSD | ||
58 | * | ||
59 | * Copyright (c) 2014 Advanced Micro Devices, Inc. | ||
60 | * All rights reserved. | ||
61 | * | ||
62 | * Redistribution and use in source and binary forms, with or without | ||
63 | * modification, are permitted provided that the following conditions are met: | ||
64 | * * Redistributions of source code must retain the above copyright | ||
65 | * notice, this list of conditions and the following disclaimer. | ||
66 | * * Redistributions in binary form must reproduce the above copyright | ||
67 | * notice, this list of conditions and the following disclaimer in the | ||
68 | * documentation and/or other materials provided with the distribution. | ||
69 | * * Neither the name of Advanced Micro Devices, Inc. nor the | ||
70 | * names of its contributors may be used to endorse or promote products | ||
71 | * derived from this software without specific prior written permission. | ||
72 | * | ||
73 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
74 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
75 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
76 | * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY | ||
77 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
78 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
79 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
80 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
81 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
82 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
83 | * | ||
84 | * This file incorporates work covered by the following copyright and | ||
85 | * permission notice: | ||
86 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation | ||
87 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, | ||
88 | * Inc. unless otherwise expressly agreed to in writing between Synopsys | ||
89 | * and you. | ||
90 | * | ||
91 | * The Software IS NOT an item of Licensed Software or Licensed Product | ||
92 | * under any End User Software License Agreement or Agreement for Licensed | ||
93 | * Product with Synopsys or any supplement thereto. Permission is hereby | ||
94 | * granted, free of charge, to any person obtaining a copy of this software | ||
95 | * annotated with this license and the Software, to deal in the Software | ||
96 | * without restriction, including without limitation the rights to use, | ||
97 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies | ||
98 | * of the Software, and to permit persons to whom the Software is furnished | ||
99 | * to do so, subject to the following conditions: | ||
100 | * | ||
101 | * The above copyright notice and this permission notice shall be included | ||
102 | * in all copies or substantial portions of the Software. | ||
103 | * | ||
104 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" | ||
105 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | ||
106 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A | ||
107 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS | ||
108 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
109 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
110 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
111 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
112 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
113 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | ||
114 | * THE POSSIBILITY OF SUCH DAMAGE. | ||
115 | */ | ||
116 | |||
117 | #include <linux/clk.h> | ||
118 | #include <linux/clocksource.h> | ||
119 | #include <linux/ptp_clock_kernel.h> | ||
120 | #include <linux/net_tstamp.h> | ||
121 | |||
122 | #include "xgbe.h" | ||
123 | #include "xgbe-common.h" | ||
124 | |||
125 | |||
126 | static cycle_t xgbe_cc_read(const struct cyclecounter *cc) | ||
127 | { | ||
128 | struct xgbe_prv_data *pdata = container_of(cc, | ||
129 | struct xgbe_prv_data, | ||
130 | tstamp_cc); | ||
131 | u64 nsec; | ||
132 | |||
133 | nsec = pdata->hw_if.get_tstamp_time(pdata); | ||
134 | |||
135 | return nsec; | ||
136 | } | ||
137 | |||
138 | static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta) | ||
139 | { | ||
140 | struct xgbe_prv_data *pdata = container_of(info, | ||
141 | struct xgbe_prv_data, | ||
142 | ptp_clock_info); | ||
143 | unsigned long flags; | ||
144 | u64 adjust; | ||
145 | u32 addend, diff; | ||
146 | unsigned int neg_adjust = 0; | ||
147 | |||
148 | if (delta < 0) { | ||
149 | neg_adjust = 1; | ||
150 | delta = -delta; | ||
151 | } | ||
152 | |||
153 | adjust = pdata->tstamp_addend; | ||
154 | adjust *= delta; | ||
155 | diff = div_u64(adjust, 1000000000UL); | ||
156 | |||
157 | addend = (neg_adjust) ? pdata->tstamp_addend - diff : | ||
158 | pdata->tstamp_addend + diff; | ||
159 | |||
160 | spin_lock_irqsave(&pdata->tstamp_lock, flags); | ||
161 | |||
162 | pdata->hw_if.update_tstamp_addend(pdata, addend); | ||
163 | |||
164 | spin_unlock_irqrestore(&pdata->tstamp_lock, flags); | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta) | ||
170 | { | ||
171 | struct xgbe_prv_data *pdata = container_of(info, | ||
172 | struct xgbe_prv_data, | ||
173 | ptp_clock_info); | ||
174 | unsigned long flags; | ||
175 | u64 nsec; | ||
176 | |||
177 | spin_lock_irqsave(&pdata->tstamp_lock, flags); | ||
178 | |||
179 | nsec = timecounter_read(&pdata->tstamp_tc); | ||
180 | |||
181 | nsec += delta; | ||
182 | timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec); | ||
183 | |||
184 | spin_unlock_irqrestore(&pdata->tstamp_lock, flags); | ||
185 | |||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts) | ||
190 | { | ||
191 | struct xgbe_prv_data *pdata = container_of(info, | ||
192 | struct xgbe_prv_data, | ||
193 | ptp_clock_info); | ||
194 | unsigned long flags; | ||
195 | u64 nsec; | ||
196 | |||
197 | spin_lock_irqsave(&pdata->tstamp_lock, flags); | ||
198 | |||
199 | nsec = timecounter_read(&pdata->tstamp_tc); | ||
200 | |||
201 | spin_unlock_irqrestore(&pdata->tstamp_lock, flags); | ||
202 | |||
203 | *ts = ns_to_timespec(nsec); | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts) | ||
209 | { | ||
210 | struct xgbe_prv_data *pdata = container_of(info, | ||
211 | struct xgbe_prv_data, | ||
212 | ptp_clock_info); | ||
213 | unsigned long flags; | ||
214 | u64 nsec; | ||
215 | |||
216 | nsec = timespec_to_ns(ts); | ||
217 | |||
218 | spin_lock_irqsave(&pdata->tstamp_lock, flags); | ||
219 | |||
220 | timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec); | ||
221 | |||
222 | spin_unlock_irqrestore(&pdata->tstamp_lock, flags); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int xgbe_enable(struct ptp_clock_info *info, | ||
228 | struct ptp_clock_request *request, int on) | ||
229 | { | ||
230 | return -EOPNOTSUPP; | ||
231 | } | ||
232 | |||
233 | void xgbe_ptp_register(struct xgbe_prv_data *pdata) | ||
234 | { | ||
235 | struct ptp_clock_info *info = &pdata->ptp_clock_info; | ||
236 | struct ptp_clock *clock; | ||
237 | struct cyclecounter *cc = &pdata->tstamp_cc; | ||
238 | u64 dividend; | ||
239 | |||
240 | snprintf(info->name, sizeof(info->name), "%s", | ||
241 | netdev_name(pdata->netdev)); | ||
242 | info->owner = THIS_MODULE; | ||
243 | info->max_adj = clk_get_rate(pdata->ptpclk); | ||
244 | info->adjfreq = xgbe_adjfreq; | ||
245 | info->adjtime = xgbe_adjtime; | ||
246 | info->gettime = xgbe_gettime; | ||
247 | info->settime = xgbe_settime; | ||
248 | info->enable = xgbe_enable; | ||
249 | |||
250 | clock = ptp_clock_register(info, pdata->dev); | ||
251 | if (IS_ERR(clock)) { | ||
252 | dev_err(pdata->dev, "ptp_clock_register failed\n"); | ||
253 | return; | ||
254 | } | ||
255 | |||
256 | pdata->ptp_clock = clock; | ||
257 | |||
258 | /* Calculate the addend: | ||
259 | * addend = 2^32 / (PTP ref clock / 50Mhz) | ||
260 | * = (2^32 * 50Mhz) / PTP ref clock | ||
261 | */ | ||
262 | dividend = 50000000; | ||
263 | dividend <<= 32; | ||
264 | pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk)); | ||
265 | |||
266 | /* Setup the timecounter */ | ||
267 | cc->read = xgbe_cc_read; | ||
268 | cc->mask = CLOCKSOURCE_MASK(64); | ||
269 | cc->mult = 1; | ||
270 | cc->shift = 0; | ||
271 | |||
272 | timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, | ||
273 | ktime_to_ns(ktime_get_real())); | ||
274 | |||
275 | /* Disable all timestamping to start */ | ||
276 | XGMAC_IOWRITE(pdata, MAC_TCR, 0); | ||
277 | pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF; | ||
278 | pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; | ||
279 | } | ||
280 | |||
281 | void xgbe_ptp_unregister(struct xgbe_prv_data *pdata) | ||
282 | { | ||
283 | if (pdata->ptp_clock) | ||
284 | ptp_clock_unregister(pdata->ptp_clock); | ||
285 | } | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 9e24b296e272..07bf70a82908 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h | |||
@@ -123,6 +123,10 @@ | |||
123 | #include <linux/phy.h> | 123 | #include <linux/phy.h> |
124 | #include <linux/if_vlan.h> | 124 | #include <linux/if_vlan.h> |
125 | #include <linux/bitops.h> | 125 | #include <linux/bitops.h> |
126 | #include <linux/ptp_clock_kernel.h> | ||
127 | #include <linux/clocksource.h> | ||
128 | #include <linux/net_tstamp.h> | ||
129 | #include <net/dcbnl.h> | ||
126 | 130 | ||
127 | 131 | ||
128 | #define XGBE_DRV_NAME "amd-xgbe" | 132 | #define XGBE_DRV_NAME "amd-xgbe" |
@@ -141,6 +145,7 @@ | |||
141 | #define XGBE_RX_BUF_ALIGN 64 | 145 | #define XGBE_RX_BUF_ALIGN 64 |
142 | 146 | ||
143 | #define XGBE_MAX_DMA_CHANNELS 16 | 147 | #define XGBE_MAX_DMA_CHANNELS 16 |
148 | #define XGBE_MAX_QUEUES 16 | ||
144 | 149 | ||
145 | /* DMA cache settings - Outer sharable, write-back, write-allocate */ | 150 | /* DMA cache settings - Outer sharable, write-back, write-allocate */ |
146 | #define XGBE_DMA_OS_AXDOMAIN 0x2 | 151 | #define XGBE_DMA_OS_AXDOMAIN 0x2 |
@@ -164,6 +169,16 @@ | |||
164 | #define XGBE_PHY_NAME "amd_xgbe_phy" | 169 | #define XGBE_PHY_NAME "amd_xgbe_phy" |
165 | #define XGBE_PRTAD 0 | 170 | #define XGBE_PRTAD 0 |
166 | 171 | ||
172 | /* Device-tree clock names */ | ||
173 | #define XGBE_DMA_CLOCK "dma_clk" | ||
174 | #define XGBE_PTP_CLOCK "ptp_clk" | ||
175 | |||
176 | /* Timestamp support - values based on 50MHz PTP clock | ||
177 | * 50MHz => 20 nsec | ||
178 | */ | ||
179 | #define XGBE_TSTAMP_SSINC 20 | ||
180 | #define XGBE_TSTAMP_SNSINC 0 | ||
181 | |||
167 | /* Driver PMT macros */ | 182 | /* Driver PMT macros */ |
168 | #define XGMAC_DRIVER_CONTEXT 1 | 183 | #define XGMAC_DRIVER_CONTEXT 1 |
169 | #define XGMAC_IOCTL_CONTEXT 2 | 184 | #define XGMAC_IOCTL_CONTEXT 2 |
@@ -171,7 +186,7 @@ | |||
171 | #define XGBE_FIFO_SIZE_B(x) (x) | 186 | #define XGBE_FIFO_SIZE_B(x) (x) |
172 | #define XGBE_FIFO_SIZE_KB(x) (x * 1024) | 187 | #define XGBE_FIFO_SIZE_KB(x) (x * 1024) |
173 | 188 | ||
174 | #define XGBE_TC_CNT 2 | 189 | #define XGBE_TC_MIN_QUANTUM 10 |
175 | 190 | ||
176 | /* Helper macro for descriptor handling | 191 | /* Helper macro for descriptor handling |
177 | * Always use XGBE_GET_DESC_DATA to access the descriptor data | 192 | * Always use XGBE_GET_DESC_DATA to access the descriptor data |
@@ -214,6 +229,8 @@ struct xgbe_packet_data { | |||
214 | unsigned short mss; | 229 | unsigned short mss; |
215 | 230 | ||
216 | unsigned short vlan_ctag; | 231 | unsigned short vlan_ctag; |
232 | |||
233 | u64 rx_tstamp; | ||
217 | }; | 234 | }; |
218 | 235 | ||
219 | /* Common Rx and Tx descriptor mapping */ | 236 | /* Common Rx and Tx descriptor mapping */ |
@@ -242,6 +259,20 @@ struct xgbe_ring_data { | |||
242 | unsigned int interrupt; /* Interrupt indicator */ | 259 | unsigned int interrupt; /* Interrupt indicator */ |
243 | 260 | ||
244 | unsigned int mapped_as_page; | 261 | unsigned int mapped_as_page; |
262 | |||
263 | /* Incomplete receive save location. If the budget is exhausted | ||
264 | * or the last descriptor (last normal descriptor or a following | ||
265 | * context descriptor) has not been DMA'd yet the current state | ||
266 | * of the receive processing needs to be saved. | ||
267 | */ | ||
268 | unsigned int state_saved; | ||
269 | struct { | ||
270 | unsigned int incomplete; | ||
271 | unsigned int context_next; | ||
272 | struct sk_buff *skb; | ||
273 | unsigned int len; | ||
274 | unsigned int error; | ||
275 | } state; | ||
245 | }; | 276 | }; |
246 | 277 | ||
247 | struct xgbe_ring { | 278 | struct xgbe_ring { |
@@ -467,6 +498,18 @@ struct xgbe_hw_if { | |||
467 | void (*rx_mmc_int)(struct xgbe_prv_data *); | 498 | void (*rx_mmc_int)(struct xgbe_prv_data *); |
468 | void (*tx_mmc_int)(struct xgbe_prv_data *); | 499 | void (*tx_mmc_int)(struct xgbe_prv_data *); |
469 | void (*read_mmc_stats)(struct xgbe_prv_data *); | 500 | void (*read_mmc_stats)(struct xgbe_prv_data *); |
501 | |||
502 | /* For Timestamp config */ | ||
503 | int (*config_tstamp)(struct xgbe_prv_data *, unsigned int); | ||
504 | void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int); | ||
505 | void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec, | ||
506 | unsigned int nsec); | ||
507 | u64 (*get_tstamp_time)(struct xgbe_prv_data *); | ||
508 | u64 (*get_tx_tstamp)(struct xgbe_prv_data *); | ||
509 | |||
510 | /* For Data Center Bridging config */ | ||
511 | void (*config_dcb_tc)(struct xgbe_prv_data *); | ||
512 | void (*config_dcb_pfc)(struct xgbe_prv_data *); | ||
470 | }; | 513 | }; |
471 | 514 | ||
472 | struct xgbe_desc_if { | 515 | struct xgbe_desc_if { |
@@ -508,6 +551,7 @@ struct xgbe_hw_features { | |||
508 | unsigned int tso; /* TCP Segmentation Offload */ | 551 | unsigned int tso; /* TCP Segmentation Offload */ |
509 | unsigned int dma_debug; /* DMA Debug Registers */ | 552 | unsigned int dma_debug; /* DMA Debug Registers */ |
510 | unsigned int rss; /* Receive Side Scaling */ | 553 | unsigned int rss; /* Receive Side Scaling */ |
554 | unsigned int tc_cnt; /* Number of Traffic Classes */ | ||
511 | unsigned int hash_table_size; /* Hash Table Size */ | 555 | unsigned int hash_table_size; /* Hash Table Size */ |
512 | unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ | 556 | unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ |
513 | 557 | ||
@@ -553,6 +597,9 @@ struct xgbe_prv_data { | |||
553 | unsigned int rx_ring_count; | 597 | unsigned int rx_ring_count; |
554 | unsigned int rx_desc_count; | 598 | unsigned int rx_desc_count; |
555 | 599 | ||
600 | unsigned int tx_q_count; | ||
601 | unsigned int rx_q_count; | ||
602 | |||
556 | /* Tx/Rx common settings */ | 603 | /* Tx/Rx common settings */ |
557 | unsigned int pblx8; | 604 | unsigned int pblx8; |
558 | 605 | ||
@@ -607,8 +654,27 @@ struct xgbe_prv_data { | |||
607 | /* Filtering support */ | 654 | /* Filtering support */ |
608 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 655 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
609 | 656 | ||
610 | /* System clock value used for Rx watchdog */ | 657 | /* Device clocks */ |
611 | struct clk *sysclock; | 658 | struct clk *sysclk; |
659 | struct clk *ptpclk; | ||
660 | |||
661 | /* Timestamp support */ | ||
662 | spinlock_t tstamp_lock; | ||
663 | struct ptp_clock_info ptp_clock_info; | ||
664 | struct ptp_clock *ptp_clock; | ||
665 | struct hwtstamp_config tstamp_config; | ||
666 | struct cyclecounter tstamp_cc; | ||
667 | struct timecounter tstamp_tc; | ||
668 | unsigned int tstamp_addend; | ||
669 | struct work_struct tx_tstamp_work; | ||
670 | struct sk_buff *tx_tstamp_skb; | ||
671 | u64 tx_tstamp; | ||
672 | |||
673 | /* DCB support */ | ||
674 | struct ieee_ets *ets; | ||
675 | struct ieee_pfc *pfc; | ||
676 | unsigned int q2tc_map[XGBE_MAX_QUEUES]; | ||
677 | unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; | ||
612 | 678 | ||
613 | /* Hardware features of the device */ | 679 | /* Hardware features of the device */ |
614 | struct xgbe_hw_features hw_feat; | 680 | struct xgbe_hw_features hw_feat; |
@@ -635,10 +701,15 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); | |||
635 | void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); | 701 | void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); |
636 | struct net_device_ops *xgbe_get_netdev_ops(void); | 702 | struct net_device_ops *xgbe_get_netdev_ops(void); |
637 | struct ethtool_ops *xgbe_get_ethtool_ops(void); | 703 | struct ethtool_ops *xgbe_get_ethtool_ops(void); |
704 | #ifdef CONFIG_AMD_XGBE_DCB | ||
705 | const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void); | ||
706 | #endif | ||
638 | 707 | ||
639 | int xgbe_mdio_register(struct xgbe_prv_data *); | 708 | int xgbe_mdio_register(struct xgbe_prv_data *); |
640 | void xgbe_mdio_unregister(struct xgbe_prv_data *); | 709 | void xgbe_mdio_unregister(struct xgbe_prv_data *); |
641 | void xgbe_dump_phy_registers(struct xgbe_prv_data *); | 710 | void xgbe_dump_phy_registers(struct xgbe_prv_data *); |
711 | void xgbe_ptp_register(struct xgbe_prv_data *); | ||
712 | void xgbe_ptp_unregister(struct xgbe_prv_data *); | ||
642 | void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int, | 713 | void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int, |
643 | unsigned int); | 714 | unsigned int); |
644 | void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *, | 715 | void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *, |
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index b57c22442867..388e3029165a 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c | |||
@@ -74,7 +74,6 @@ | |||
74 | #include <linux/of_platform.h> | 74 | #include <linux/of_platform.h> |
75 | #include <linux/of_device.h> | 75 | #include <linux/of_device.h> |
76 | #include <linux/uaccess.h> | 76 | #include <linux/uaccess.h> |
77 | #include <asm/irq.h> | ||
78 | 77 | ||
79 | 78 | ||
80 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); | 79 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); |
@@ -85,6 +84,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
85 | #define XGBE_PHY_ID 0x000162d0 | 84 | #define XGBE_PHY_ID 0x000162d0 |
86 | #define XGBE_PHY_MASK 0xfffffff0 | 85 | #define XGBE_PHY_MASK 0xfffffff0 |
87 | 86 | ||
87 | #define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set" | ||
88 | |||
88 | #define XGBE_AN_INT_CMPLT 0x01 | 89 | #define XGBE_AN_INT_CMPLT 0x01 |
89 | #define XGBE_AN_INC_LINK 0x02 | 90 | #define XGBE_AN_INC_LINK 0x02 |
90 | #define XGBE_AN_PG_RCV 0x04 | 91 | #define XGBE_AN_PG_RCV 0x04 |
@@ -94,6 +95,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
94 | #define XNP_MP_FORMATTED (1 << 13) | 95 | #define XNP_MP_FORMATTED (1 << 13) |
95 | #define XNP_NP_EXCHANGE (1 << 15) | 96 | #define XNP_NP_EXCHANGE (1 << 15) |
96 | 97 | ||
98 | #define XGBE_PHY_RATECHANGE_COUNT 100 | ||
99 | |||
97 | #ifndef MDIO_PMA_10GBR_PMD_CTRL | 100 | #ifndef MDIO_PMA_10GBR_PMD_CTRL |
98 | #define MDIO_PMA_10GBR_PMD_CTRL 0x0096 | 101 | #define MDIO_PMA_10GBR_PMD_CTRL 0x0096 |
99 | #endif | 102 | #endif |
@@ -116,10 +119,13 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
116 | #endif | 119 | #endif |
117 | 120 | ||
118 | /* SerDes integration register offsets */ | 121 | /* SerDes integration register offsets */ |
122 | #define SIR0_KR_RT_1 0x002c | ||
119 | #define SIR0_STATUS 0x0040 | 123 | #define SIR0_STATUS 0x0040 |
120 | #define SIR1_SPEED 0x0000 | 124 | #define SIR1_SPEED 0x0000 |
121 | 125 | ||
122 | /* SerDes integration register entry bit positions and sizes */ | 126 | /* SerDes integration register entry bit positions and sizes */ |
127 | #define SIR0_KR_RT_1_RESET_INDEX 11 | ||
128 | #define SIR0_KR_RT_1_RESET_WIDTH 1 | ||
123 | #define SIR0_STATUS_RX_READY_INDEX 0 | 129 | #define SIR0_STATUS_RX_READY_INDEX 0 |
124 | #define SIR0_STATUS_RX_READY_WIDTH 1 | 130 | #define SIR0_STATUS_RX_READY_WIDTH 1 |
125 | #define SIR0_STATUS_TX_READY_INDEX 8 | 131 | #define SIR0_STATUS_TX_READY_INDEX 8 |
@@ -145,7 +151,7 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
145 | 151 | ||
146 | #define SPEED_2500_CDR 0x2 | 152 | #define SPEED_2500_CDR 0x2 |
147 | #define SPEED_2500_PLL 0x0 | 153 | #define SPEED_2500_PLL 0x0 |
148 | #define SPEED_2500_RATE 0x2 | 154 | #define SPEED_2500_RATE 0x1 |
149 | #define SPEED_2500_TXAMP 0xf | 155 | #define SPEED_2500_TXAMP 0xf |
150 | #define SPEED_2500_WORD 0x1 | 156 | #define SPEED_2500_WORD 0x1 |
151 | 157 | ||
@@ -192,6 +198,16 @@ do { \ | |||
192 | (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ | 198 | (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ |
193 | } while (0) | 199 | } while (0) |
194 | 200 | ||
201 | #define XSIR_GET_BITS(_var, _prefix, _field) \ | ||
202 | GET_BITS((_var), \ | ||
203 | _prefix##_##_field##_INDEX, \ | ||
204 | _prefix##_##_field##_WIDTH) | ||
205 | |||
206 | #define XSIR_SET_BITS(_var, _prefix, _field, _val) \ | ||
207 | SET_BITS((_var), \ | ||
208 | _prefix##_##_field##_INDEX, \ | ||
209 | _prefix##_##_field##_WIDTH, (_val)) | ||
210 | |||
195 | /* Macros for reading or writing SerDes integration registers | 211 | /* Macros for reading or writing SerDes integration registers |
196 | * The ioread macros will get bit fields or full values using the | 212 | * The ioread macros will get bit fields or full values using the |
197 | * register definitions formed using the input names | 213 | * register definitions formed using the input names |
@@ -292,6 +308,11 @@ enum amd_xgbe_phy_mode { | |||
292 | AMD_XGBE_MODE_KX, | 308 | AMD_XGBE_MODE_KX, |
293 | }; | 309 | }; |
294 | 310 | ||
311 | enum amd_xgbe_phy_speedset { | ||
312 | AMD_XGBE_PHY_SPEEDSET_1000_10000, | ||
313 | AMD_XGBE_PHY_SPEEDSET_2500_10000, | ||
314 | }; | ||
315 | |||
295 | struct amd_xgbe_phy_priv { | 316 | struct amd_xgbe_phy_priv { |
296 | struct platform_device *pdev; | 317 | struct platform_device *pdev; |
297 | struct device *dev; | 318 | struct device *dev; |
@@ -311,6 +332,7 @@ struct amd_xgbe_phy_priv { | |||
311 | /* Maintain link status for re-starting auto-negotiation */ | 332 | /* Maintain link status for re-starting auto-negotiation */ |
312 | unsigned int link; | 333 | unsigned int link; |
313 | enum amd_xgbe_phy_mode mode; | 334 | enum amd_xgbe_phy_mode mode; |
335 | unsigned int speed_set; | ||
314 | 336 | ||
315 | /* Auto-negotiation state machine support */ | 337 | /* Auto-negotiation state machine support */ |
316 | struct mutex an_mutex; | 338 | struct mutex an_mutex; |
@@ -380,14 +402,25 @@ static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev) | |||
380 | static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev) | 402 | static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev) |
381 | { | 403 | { |
382 | struct amd_xgbe_phy_priv *priv = phydev->priv; | 404 | struct amd_xgbe_phy_priv *priv = phydev->priv; |
405 | unsigned int wait; | ||
406 | u16 status; | ||
383 | 407 | ||
384 | /* Release Rx and Tx ratechange */ | 408 | /* Release Rx and Tx ratechange */ |
385 | XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0); | 409 | XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0); |
386 | 410 | ||
387 | /* Wait for Rx and Tx ready */ | 411 | /* Wait for Rx and Tx ready */ |
388 | while (!XSIR0_IOREAD_BITS(priv, SIR0_STATUS, RX_READY) && | 412 | wait = XGBE_PHY_RATECHANGE_COUNT; |
389 | !XSIR0_IOREAD_BITS(priv, SIR0_STATUS, TX_READY)) | 413 | while (wait--) { |
390 | usleep_range(10, 20); | 414 | usleep_range(10, 20); |
415 | |||
416 | status = XSIR0_IOREAD(priv, SIR0_STATUS); | ||
417 | if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && | ||
418 | XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) | ||
419 | return; | ||
420 | } | ||
421 | |||
422 | netdev_err(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", | ||
423 | status); | ||
391 | } | 424 | } |
392 | 425 | ||
393 | static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) | 426 | static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) |
@@ -546,10 +579,14 @@ static int amd_xgbe_phy_switch_mode(struct phy_device *phydev) | |||
546 | int ret; | 579 | int ret; |
547 | 580 | ||
548 | /* If we are in KR switch to KX, and vice-versa */ | 581 | /* If we are in KR switch to KX, and vice-versa */ |
549 | if (priv->mode == AMD_XGBE_MODE_KR) | 582 | if (priv->mode == AMD_XGBE_MODE_KR) { |
550 | ret = amd_xgbe_phy_gmii_mode(phydev); | 583 | if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000) |
551 | else | 584 | ret = amd_xgbe_phy_gmii_mode(phydev); |
585 | else | ||
586 | ret = amd_xgbe_phy_gmii_2500_mode(phydev); | ||
587 | } else { | ||
552 | ret = amd_xgbe_phy_xgmii_mode(phydev); | 588 | ret = amd_xgbe_phy_xgmii_mode(phydev); |
589 | } | ||
553 | 590 | ||
554 | return ret; | 591 | return ret; |
555 | } | 592 | } |
@@ -602,9 +639,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, | |||
602 | if (ret < 0) | 639 | if (ret < 0) |
603 | return AMD_XGBE_AN_ERROR; | 640 | return AMD_XGBE_AN_ERROR; |
604 | 641 | ||
642 | XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1); | ||
643 | |||
605 | ret |= 0x01; | 644 | ret |= 0x01; |
606 | phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); | 645 | phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); |
607 | 646 | ||
647 | XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0); | ||
648 | |||
608 | return AMD_XGBE_AN_EVENT; | 649 | return AMD_XGBE_AN_EVENT; |
609 | } | 650 | } |
610 | 651 | ||
@@ -713,7 +754,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev) | |||
713 | else | 754 | else |
714 | ret &= ~0x80; | 755 | ret &= ~0x80; |
715 | 756 | ||
716 | if (phydev->supported & SUPPORTED_1000baseKX_Full) | 757 | if ((phydev->supported & SUPPORTED_1000baseKX_Full) || |
758 | (phydev->supported & SUPPORTED_2500baseX_Full)) | ||
717 | ret |= 0x20; | 759 | ret |= 0x20; |
718 | else | 760 | else |
719 | ret &= ~0x20; | 761 | ret &= ~0x20; |
@@ -815,6 +857,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) | |||
815 | struct phy_device *phydev = priv->phydev; | 857 | struct phy_device *phydev = priv->phydev; |
816 | enum amd_xgbe_phy_an cur_state; | 858 | enum amd_xgbe_phy_an cur_state; |
817 | int sleep; | 859 | int sleep; |
860 | unsigned int an_supported = 0; | ||
818 | 861 | ||
819 | while (1) { | 862 | while (1) { |
820 | mutex_lock(&priv->an_mutex); | 863 | mutex_lock(&priv->an_mutex); |
@@ -824,6 +867,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) | |||
824 | switch (priv->an_state) { | 867 | switch (priv->an_state) { |
825 | case AMD_XGBE_AN_START: | 868 | case AMD_XGBE_AN_START: |
826 | priv->an_state = amd_xgbe_an_start(phydev); | 869 | priv->an_state = amd_xgbe_an_start(phydev); |
870 | an_supported = 0; | ||
827 | break; | 871 | break; |
828 | 872 | ||
829 | case AMD_XGBE_AN_EVENT: | 873 | case AMD_XGBE_AN_EVENT: |
@@ -832,6 +876,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) | |||
832 | 876 | ||
833 | case AMD_XGBE_AN_PAGE_RECEIVED: | 877 | case AMD_XGBE_AN_PAGE_RECEIVED: |
834 | priv->an_state = amd_xgbe_an_page_received(phydev); | 878 | priv->an_state = amd_xgbe_an_page_received(phydev); |
879 | an_supported++; | ||
835 | break; | 880 | break; |
836 | 881 | ||
837 | case AMD_XGBE_AN_INCOMPAT_LINK: | 882 | case AMD_XGBE_AN_INCOMPAT_LINK: |
@@ -839,6 +884,11 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) | |||
839 | break; | 884 | break; |
840 | 885 | ||
841 | case AMD_XGBE_AN_COMPLETE: | 886 | case AMD_XGBE_AN_COMPLETE: |
887 | netdev_info(phydev->attached_dev, "%s successful\n", | ||
888 | an_supported ? "Auto negotiation" | ||
889 | : "Parallel detection"); | ||
890 | /* fall through */ | ||
891 | |||
842 | case AMD_XGBE_AN_NO_LINK: | 892 | case AMD_XGBE_AN_NO_LINK: |
843 | case AMD_XGBE_AN_EXIT: | 893 | case AMD_XGBE_AN_EXIT: |
844 | goto exit_unlock; | 894 | goto exit_unlock; |
@@ -896,14 +946,22 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) | |||
896 | 946 | ||
897 | static int amd_xgbe_phy_config_init(struct phy_device *phydev) | 947 | static int amd_xgbe_phy_config_init(struct phy_device *phydev) |
898 | { | 948 | { |
949 | struct amd_xgbe_phy_priv *priv = phydev->priv; | ||
950 | |||
899 | /* Initialize supported features */ | 951 | /* Initialize supported features */ |
900 | phydev->supported = SUPPORTED_Autoneg; | 952 | phydev->supported = SUPPORTED_Autoneg; |
901 | phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | 953 | phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; |
902 | phydev->supported |= SUPPORTED_Backplane; | 954 | phydev->supported |= SUPPORTED_Backplane; |
903 | phydev->supported |= SUPPORTED_1000baseKX_Full | | ||
904 | SUPPORTED_2500baseX_Full; | ||
905 | phydev->supported |= SUPPORTED_10000baseKR_Full | | 955 | phydev->supported |= SUPPORTED_10000baseKR_Full | |
906 | SUPPORTED_10000baseR_FEC; | 956 | SUPPORTED_10000baseR_FEC; |
957 | switch (priv->speed_set) { | ||
958 | case AMD_XGBE_PHY_SPEEDSET_1000_10000: | ||
959 | phydev->supported |= SUPPORTED_1000baseKX_Full; | ||
960 | break; | ||
961 | case AMD_XGBE_PHY_SPEEDSET_2500_10000: | ||
962 | phydev->supported |= SUPPORTED_2500baseX_Full; | ||
963 | break; | ||
964 | } | ||
907 | phydev->advertising = phydev->supported; | 965 | phydev->advertising = phydev->supported; |
908 | 966 | ||
909 | /* Turn off and clear interrupts */ | 967 | /* Turn off and clear interrupts */ |
@@ -1020,9 +1078,9 @@ static int amd_xgbe_phy_update_link(struct phy_device *phydev) | |||
1020 | * (re-)established (cable connected after the interface is | 1078 | * (re-)established (cable connected after the interface is |
1021 | * up, etc.), the link status may report no link. If there | 1079 | * up, etc.), the link status may report no link. If there |
1022 | * is no link, try switching modes and checking the status | 1080 | * is no link, try switching modes and checking the status |
1023 | * again. | 1081 | * again if auto negotiation is enabled. |
1024 | */ | 1082 | */ |
1025 | check_again = 1; | 1083 | check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0; |
1026 | again: | 1084 | again: |
1027 | /* Link status is latched low, so read once to clear | 1085 | /* Link status is latched low, so read once to clear |
1028 | * and then read again to get current state | 1086 | * and then read again to get current state |
@@ -1038,8 +1096,10 @@ again: | |||
1038 | phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0; | 1096 | phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0; |
1039 | 1097 | ||
1040 | if (!phydev->link) { | 1098 | if (!phydev->link) { |
1041 | ret = amd_xgbe_phy_switch_mode(phydev); | ||
1042 | if (check_again) { | 1099 | if (check_again) { |
1100 | ret = amd_xgbe_phy_switch_mode(phydev); | ||
1101 | if (ret < 0) | ||
1102 | return ret; | ||
1043 | check_again = 0; | 1103 | check_again = 0; |
1044 | goto again; | 1104 | goto again; |
1045 | } | 1105 | } |
@@ -1059,6 +1119,7 @@ again: | |||
1059 | 1119 | ||
1060 | static int amd_xgbe_phy_read_status(struct phy_device *phydev) | 1120 | static int amd_xgbe_phy_read_status(struct phy_device *phydev) |
1061 | { | 1121 | { |
1122 | struct amd_xgbe_phy_priv *priv = phydev->priv; | ||
1062 | u32 mmd_mask = phydev->c45_ids.devices_in_package; | 1123 | u32 mmd_mask = phydev->c45_ids.devices_in_package; |
1063 | int ret, mode, ad_ret, lp_ret; | 1124 | int ret, mode, ad_ret, lp_ret; |
1064 | 1125 | ||
@@ -1108,9 +1169,19 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev) | |||
1108 | return ret; | 1169 | return ret; |
1109 | } | 1170 | } |
1110 | } else { | 1171 | } else { |
1111 | phydev->speed = SPEED_1000; | 1172 | int (*mode_fcn)(struct phy_device *); |
1173 | |||
1174 | if (priv->speed_set == | ||
1175 | AMD_XGBE_PHY_SPEEDSET_1000_10000) { | ||
1176 | phydev->speed = SPEED_1000; | ||
1177 | mode_fcn = amd_xgbe_phy_gmii_mode; | ||
1178 | } else { | ||
1179 | phydev->speed = SPEED_2500; | ||
1180 | mode_fcn = amd_xgbe_phy_gmii_2500_mode; | ||
1181 | } | ||
1182 | |||
1112 | if (mode == MDIO_PCS_CTRL2_10GBR) { | 1183 | if (mode == MDIO_PCS_CTRL2_10GBR) { |
1113 | ret = amd_xgbe_phy_gmii_mode(phydev); | 1184 | ret = mode_fcn(phydev); |
1114 | if (ret < 0) | 1185 | if (ret < 0) |
1115 | return ret; | 1186 | return ret; |
1116 | } | 1187 | } |
@@ -1118,8 +1189,15 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev) | |||
1118 | 1189 | ||
1119 | phydev->duplex = DUPLEX_FULL; | 1190 | phydev->duplex = DUPLEX_FULL; |
1120 | } else { | 1191 | } else { |
1121 | phydev->speed = (mode == MDIO_PCS_CTRL2_10GBR) ? SPEED_10000 | 1192 | if (mode == MDIO_PCS_CTRL2_10GBR) { |
1122 | : SPEED_1000; | 1193 | phydev->speed = SPEED_10000; |
1194 | } else { | ||
1195 | if (priv->speed_set == | ||
1196 | AMD_XGBE_PHY_SPEEDSET_1000_10000) | ||
1197 | phydev->speed = SPEED_1000; | ||
1198 | else | ||
1199 | phydev->speed = SPEED_2500; | ||
1200 | } | ||
1123 | phydev->duplex = DUPLEX_FULL; | 1201 | phydev->duplex = DUPLEX_FULL; |
1124 | phydev->pause = 0; | 1202 | phydev->pause = 0; |
1125 | phydev->asym_pause = 0; | 1203 | phydev->asym_pause = 0; |
@@ -1176,6 +1254,8 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) | |||
1176 | struct platform_device *pdev; | 1254 | struct platform_device *pdev; |
1177 | struct device *dev; | 1255 | struct device *dev; |
1178 | char *wq_name; | 1256 | char *wq_name; |
1257 | const __be32 *property; | ||
1258 | unsigned int speed_set; | ||
1179 | int ret; | 1259 | int ret; |
1180 | 1260 | ||
1181 | if (!phydev->dev.of_node) | 1261 | if (!phydev->dev.of_node) |
@@ -1227,6 +1307,26 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) | |||
1227 | goto err_sir0; | 1307 | goto err_sir0; |
1228 | } | 1308 | } |
1229 | 1309 | ||
1310 | /* Get the device speed set property */ | ||
1311 | speed_set = 0; | ||
1312 | property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY, | ||
1313 | NULL); | ||
1314 | if (property) | ||
1315 | speed_set = be32_to_cpu(*property); | ||
1316 | |||
1317 | switch (speed_set) { | ||
1318 | case 0: | ||
1319 | priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000; | ||
1320 | break; | ||
1321 | case 1: | ||
1322 | priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000; | ||
1323 | break; | ||
1324 | default: | ||
1325 | dev_err(dev, "invalid amd,speed-set property\n"); | ||
1326 | ret = -EINVAL; | ||
1327 | goto err_sir1; | ||
1328 | } | ||
1329 | |||
1230 | priv->link = 1; | 1330 | priv->link = 1; |
1231 | 1331 | ||
1232 | ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); | 1332 | ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); |