diff options
| author | David S. Miller <davem@davemloft.net> | 2015-01-19 15:07:43 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2015-01-19 15:07:43 -0500 |
| commit | b66a4eaaee6470d486f3ce3f4a2a6fef8501ea7e (patch) | |
| tree | ebc07f415869066ebe05ee3bf77ba0f9ddb8c37f | |
| parent | 92cb13fb21c60b3567c7bb7e55be7c38dc6bda38 (diff) | |
| parent | 90cff9e2da9f7be6d7847001f430c3e5f8e9532e (diff) | |
Merge branch 'netcp'
Murali Karicheri says:
====================
net: Add Keystone NetCP ethernet driver support
The Network Coprocessor (NetCP) is a hardware accelerator that processes
Ethernet packets. NetCP has a gigabit Ethernet (GbE) subsystem with a ethernet
switch sub-module to send and receive packets. NetCP also includes a packet
accelerator (PA) module to perform packet classification operations such as
header matching, and packet modification operations such as checksum
generation. NetCP can also optionally include a Security Accelerator(SA)
capable of performing IPSec operations on ingress/egress packets.
Keystone SoC's also have a 10 Gigabit Ethernet Subsystem (XGbE) which
includes a 3-port Ethernet switch sub-module capable of 10Gb/s and
1Gb/s rates per Ethernet port.
Both GBE and XGBE network processors supported using common driver. It
is also designed to handle future variants of NetCP.
version history
---------------
v7->v8
- Reworked comments against v7, related to checker warning.
- Patch 2/4 that has all of the driver code in v7 is now split into 3
patches based on functionality so that we have 3 smaller patches
review instead of a big patch.
- Patch for MAINTAINER is merged to 2/4 along with netcp core driver
- Separate patch (3/4) for 1G and (4/4) for 10G
- Removed big endian support for initial version (will add it later)
v6->v7
- Fixed some minor documentation error and also modified the netcp driver
to fix the set* functions to include correct le/be macros.
v5->v6
- updated version after incorporating comments [6] from David Miller,
David Laight & Geert Uytterhoeven on v5. I would like get this in
for v3.19 merge window if the latest version is acceptable.
v4->v5
- Sorry to spin v5 quickly but I missed few check-patch warnings which
were pointed by Joe Perches(thanks). I folded his changes [5] along with
few more check-patch warning fixes. I would like get this in for v3.18
merge window if David is happy with this version.
v3->v4
- Couple of fixes in in error path as pointed [4] out by David. Rest of
the patches are unchanged from v3.
v2->v3
- Update v3 after incorporating Jamal and David Miller's comment/suggestion
from earlier versions [1] [2]. After per the discussion here [3], the
controversial custom exports have been dropped now. And for future
future offload support additions, we will plug into generic frameworks
as an when they are available.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | Documentation/devicetree/bindings/net/keystone-netcp.txt | 197 | ||||
| -rw-r--r-- | MAINTAINERS | 7 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/Kconfig | 11 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/Makefile | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp.h | 229 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp_core.c | 2141 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp_ethss.c | 2156 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp_sgmii.c | 131 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp_xgbepcsr.c | 501 |
9 files changed, 5377 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt new file mode 100644 index 000000000000..f9c07710478d --- /dev/null +++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt | |||
| @@ -0,0 +1,197 @@ | |||
| 1 | This document describes the device tree bindings associated with the | ||
| 2 | keystone network coprocessor(NetCP) driver support. | ||
| 3 | |||
| 4 | The network coprocessor (NetCP) is a hardware accelerator that processes | ||
| 5 | Ethernet packets. NetCP has a gigabit Ethernet (GbE) subsytem with a ethernet | ||
| 6 | switch sub-module to send and receive packets. NetCP also includes a packet | ||
| 7 | accelerator (PA) module to perform packet classification operations such as | ||
| 8 | header matching, and packet modification operations such as checksum | ||
| 9 | generation. NetCP can also optionally include a Security Accelerator (SA) | ||
| 10 | capable of performing IPSec operations on ingress/egress packets. | ||
| 11 | |||
| 12 | Keystone II SoC's also have a 10 Gigabit Ethernet Subsystem (XGbE) which | ||
| 13 | includes a 3-port Ethernet switch sub-module capable of 10Gb/s and 1Gb/s rates | ||
| 14 | per Ethernet port. | ||
| 15 | |||
| 16 | Keystone NetCP driver has a plug-in module architecture where each of the NetCP | ||
| 17 | sub-modules exist as a loadable kernel module which plug in to the netcp core. | ||
| 18 | These sub-modules are represented as "netcp-devices" in the dts bindings. It is | ||
| 19 | mandatory to have the ethernet switch sub-module for the ethernet interface to | ||
| 20 | be operational. Any other sub-module like the PA is optional. | ||
| 21 | |||
| 22 | NetCP Ethernet SubSystem Layout: | ||
| 23 | |||
| 24 | ----------------------------- | ||
| 25 | NetCP subsystem(10G or 1G) | ||
| 26 | ----------------------------- | ||
| 27 | | | ||
| 28 | |-> NetCP Devices -> | | ||
| 29 | | |-> GBE/XGBE Switch | ||
| 30 | | | | ||
| 31 | | |-> Packet Accelerator | ||
| 32 | | | | ||
| 33 | | |-> Security Accelerator | ||
| 34 | | | ||
| 35 | | | ||
| 36 | | | ||
| 37 | |-> NetCP Interfaces -> | | ||
| 38 | |-> Ethernet Port 0 | ||
| 39 | | | ||
| 40 | |-> Ethernet Port 1 | ||
| 41 | | | ||
| 42 | |-> Ethernet Port 2 | ||
| 43 | | | ||
| 44 | |-> Ethernet Port 3 | ||
| 45 | |||
| 46 | |||
| 47 | NetCP subsystem properties: | ||
| 48 | Required properties: | ||
| 49 | - compatible: Should be "ti,netcp-1.0" | ||
| 50 | - clocks: phandle to the reference clocks for the subsystem. | ||
| 51 | - dma-id: Navigator packet dma instance id. | ||
| 52 | |||
| 53 | Optional properties: | ||
| 54 | - reg: register location and the size for the following register | ||
| 55 | regions in the specified order. | ||
| 56 | - Efuse MAC address register | ||
| 57 | - dma-coherent: Present if dma operations are coherent | ||
| 58 | - big-endian: Keystone devices can be operated in a mode where the DSP is in | ||
| 59 | the big endian mode. In such cases enable this option. This | ||
| 60 | option should also be enabled if the ARM is operated in | ||
| 61 | big endian mode with the DSP in little endian. | ||
| 62 | |||
| 63 | NetCP device properties: Device specification for NetCP sub-modules. | ||
| 64 | 1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications. | ||
| 65 | Required properties: | ||
| 66 | - label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb. | ||
| 67 | - reg: register location and the size for the following register | ||
| 68 | regions in the specified order. | ||
| 69 | - subsystem registers | ||
| 70 | - serdes registers | ||
| 71 | - tx-channel: the navigator packet dma channel name for tx. | ||
| 72 | - tx-queue: the navigator queue number associated with the tx dma channel. | ||
| 73 | - interfaces: specification for each of the switch port to be registered as a | ||
| 74 | network interface in the stack. | ||
| 75 | -- slave-port: Switch port number, 0 based numbering. | ||
| 76 | -- link-interface: type of link interface, supported options are | ||
| 77 | - mac<->mac auto negotiate mode: 0 | ||
| 78 | - mac<->phy mode: 1 | ||
| 79 | - mac<->mac forced mode: 2 | ||
| 80 | - mac<->fiber mode: 3 | ||
| 81 | - mac<->phy mode with no mdio: 4 | ||
| 82 | - 10Gb mac<->phy mode : 10 | ||
| 83 | - 10Gb mac<->mac forced mode : 11 | ||
| 84 | ----phy-handle: phandle to PHY device | ||
| 85 | |||
| 86 | Optional properties: | ||
| 87 | - enable-ale: NetCP driver keeps the address learning feature in the ethernet | ||
| 88 | switch module disabled. This attribute is to enable the address | ||
| 89 | learning. | ||
| 90 | - secondary-slave-ports: specification for each of the switch port not be | ||
| 91 | registered as a network interface. NetCP driver | ||
| 92 | will only initialize these ports and attach PHY | ||
| 93 | driver to them if needed. | ||
| 94 | |||
| 95 | NetCP interface properties: Interface specification for NetCP sub-modules. | ||
| 96 | Required properties: | ||
| 97 | - rx-channel: the navigator packet dma channel name for rx. | ||
| 98 | - rx-queue: the navigator queue number associated with rx dma channel. | ||
| 99 | - rx-pool: specifies the number of descriptors to be used & the region-id | ||
| 100 | for creating the rx descriptor pool. | ||
| 101 | - tx-pool: specifies the number of descriptors to be used & the region-id | ||
| 102 | for creating the tx descriptor pool. | ||
| 103 | - rx-queue-depth: number of descriptors in each of the free descriptor | ||
| 104 | queue (FDQ) for the pktdma Rx flow. There can be at | ||
| 105 | present a maximum of 4 queues per Rx flow. | ||
| 106 | - rx-buffer-size: the buffer size for each of the Rx flow FDQ. | ||
| 107 | - tx-completion-queue: the navigator queue number where the descriptors are | ||
| 108 | recycled after Tx DMA completion. | ||
| 109 | |||
| 110 | Optional properties: | ||
| 111 | - efuse-mac: If this is 1, then the MAC address for the interface is | ||
| 112 | obtained from the device efuse mac address register | ||
| 113 | - local-mac-address: the driver is designed to use the of_get_mac_address api | ||
| 114 | only if efuse-mac is 0. When efuse-mac is 0, the MAC | ||
| 115 | address is obtained from local-mac-address. If this | ||
| 116 | attribute is not present, then the driver will use a | ||
| 117 | random MAC address. | ||
| 118 | - "netcp-device label": phandle to the device specification for each of NetCP | ||
| 119 | sub-module attached to this interface. | ||
| 120 | |||
| 121 | Example binding: | ||
| 122 | |||
| 123 | netcp: netcp@2090000 { | ||
| 124 | reg = <0x2620110 0x8>; | ||
| 125 | reg-names = "efuse"; | ||
| 126 | compatible = "ti,netcp-1.0"; | ||
| 127 | #address-cells = <1>; | ||
| 128 | #size-cells = <1>; | ||
| 129 | ranges; | ||
| 130 | |||
| 131 | clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; | ||
| 132 | dma-coherent; | ||
| 133 | /* big-endian; */ | ||
| 134 | dma-id = <0>; | ||
| 135 | |||
| 136 | netcp-devices { | ||
| 137 | #address-cells = <1>; | ||
| 138 | #size-cells = <1>; | ||
| 139 | ranges; | ||
| 140 | gbe@0x2090000 { | ||
| 141 | label = "netcp-gbe"; | ||
| 142 | reg = <0x2090000 0xf00>; | ||
| 143 | /* enable-ale; */ | ||
| 144 | tx-queue = <648>; | ||
| 145 | tx-channel = <8>; | ||
| 146 | |||
| 147 | interfaces { | ||
| 148 | gbe0: interface-0 { | ||
| 149 | slave-port = <0>; | ||
| 150 | link-interface = <4>; | ||
| 151 | }; | ||
| 152 | gbe1: interface-1 { | ||
| 153 | slave-port = <1>; | ||
| 154 | link-interface = <4>; | ||
| 155 | }; | ||
| 156 | }; | ||
| 157 | |||
| 158 | secondary-slave-ports { | ||
| 159 | port-2 { | ||
| 160 | slave-port = <2>; | ||
| 161 | link-interface = <2>; | ||
| 162 | }; | ||
| 163 | port-3 { | ||
| 164 | slave-port = <3>; | ||
| 165 | link-interface = <2>; | ||
| 166 | }; | ||
| 167 | }; | ||
| 168 | }; | ||
| 169 | }; | ||
| 170 | |||
| 171 | netcp-interfaces { | ||
| 172 | interface-0 { | ||
| 173 | rx-channel = <22>; | ||
| 174 | rx-pool = <1024 12>; | ||
| 175 | tx-pool = <1024 12>; | ||
| 176 | rx-queue-depth = <128 128 0 0>; | ||
| 177 | rx-buffer-size = <1518 4096 0 0>; | ||
| 178 | rx-queue = <8704>; | ||
| 179 | tx-completion-queue = <8706>; | ||
| 180 | efuse-mac = <1>; | ||
| 181 | netcp-gbe = <&gbe0>; | ||
| 182 | |||
| 183 | }; | ||
| 184 | interface-1 { | ||
| 185 | rx-channel = <23>; | ||
| 186 | rx-pool = <1024 12>; | ||
| 187 | tx-pool = <1024 12>; | ||
| 188 | rx-queue-depth = <128 128 0 0>; | ||
| 189 | rx-buffer-size = <1518 4096 0 0>; | ||
| 190 | rx-queue = <8705>; | ||
| 191 | tx-completion-queue = <8707>; | ||
| 192 | efuse-mac = <0>; | ||
| 193 | local-mac-address = [02 18 31 7e 3e 6f]; | ||
| 194 | netcp-gbe = <&gbe1>; | ||
| 195 | }; | ||
| 196 | }; | ||
| 197 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 9b91d9f0257e..e1ff4ce5bcab 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -9609,6 +9609,13 @@ F: drivers/power/lp8788-charger.c | |||
| 9609 | F: drivers/regulator/lp8788-*.c | 9609 | F: drivers/regulator/lp8788-*.c |
| 9610 | F: include/linux/mfd/lp8788*.h | 9610 | F: include/linux/mfd/lp8788*.h |
| 9611 | 9611 | ||
| 9612 | TI NETCP ETHERNET DRIVER | ||
| 9613 | M: Wingman Kwok <w-kwok2@ti.com> | ||
| 9614 | M: Murali Karicheri <m-karicheri2@ti.com> | ||
| 9615 | L: netdev@vger.kernel.org | ||
| 9616 | S: Maintained | ||
| 9617 | F: drivers/net/ethernet/ti/netcp* | ||
| 9618 | |||
| 9612 | TI TWL4030 SERIES SOC CODEC DRIVER | 9619 | TI TWL4030 SERIES SOC CODEC DRIVER |
| 9613 | M: Peter Ujfalusi <peter.ujfalusi@ti.com> | 9620 | M: Peter Ujfalusi <peter.ujfalusi@ti.com> |
| 9614 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 9621 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 605dd909bcc3..e11bcfa69f52 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig | |||
| @@ -73,12 +73,23 @@ config TI_CPSW | |||
| 73 | config TI_CPTS | 73 | config TI_CPTS |
| 74 | boolean "TI Common Platform Time Sync (CPTS) Support" | 74 | boolean "TI Common Platform Time Sync (CPTS) Support" |
| 75 | depends on TI_CPSW | 75 | depends on TI_CPSW |
| 76 | depends on TI_CPSW || TI_KEYSTONE_NET | ||
| 76 | select PTP_1588_CLOCK | 77 | select PTP_1588_CLOCK |
| 77 | ---help--- | 78 | ---help--- |
| 78 | This driver supports the Common Platform Time Sync unit of | 79 | This driver supports the Common Platform Time Sync unit of |
| 79 | the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4 | 80 | the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4 |
| 80 | and Layer 2 packets, and the driver offers a PTP Hardware Clock. | 81 | and Layer 2 packets, and the driver offers a PTP Hardware Clock. |
| 81 | 82 | ||
| 83 | config TI_KEYSTONE_NETCP | ||
| 84 | tristate "TI Keystone NETCP Ethernet subsystem Support" | ||
| 85 | depends on OF | ||
| 86 | depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS | ||
| 87 | ---help--- | ||
| 88 | This driver supports TI's Keystone NETCP Ethernet subsystem. | ||
| 89 | |||
| 90 | To compile this driver as a module, choose M here: the module | ||
| 91 | will be called keystone_netcp. | ||
| 92 | |||
| 82 | config TLAN | 93 | config TLAN |
| 83 | tristate "TI ThunderLAN support" | 94 | tristate "TI ThunderLAN support" |
| 84 | depends on (PCI || EISA) | 95 | depends on (PCI || EISA) |
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 9cfaab8152be..465d03ddf441 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile | |||
| @@ -10,3 +10,7 @@ obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o | |||
| 10 | obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o | 10 | obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o |
| 11 | obj-$(CONFIG_TI_CPSW) += ti_cpsw.o | 11 | obj-$(CONFIG_TI_CPSW) += ti_cpsw.o |
| 12 | ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o | 12 | ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o |
| 13 | |||
| 14 | obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o | ||
| 15 | keystone_netcp-y := netcp_core.o netcp_ethss.o netcp_sgmii.o \ | ||
| 16 | netcp_xgbepcsr.o cpsw_ale.o cpts.o | ||
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h new file mode 100644 index 000000000000..906e9bc412f5 --- /dev/null +++ b/drivers/net/ethernet/ti/netcp.h | |||
| @@ -0,0 +1,229 @@ | |||
| 1 | /* | ||
| 2 | * NetCP driver local header | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Texas Instruments Incorporated | ||
| 5 | * Authors: Sandeep Nair <sandeep_n@ti.com> | ||
| 6 | * Sandeep Paulraj <s-paulraj@ti.com> | ||
| 7 | * Cyril Chemparathy <cyril@ti.com> | ||
| 8 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
| 9 | * Wingman Kwok <w-kwok2@ti.com> | ||
| 10 | * Murali Karicheri <m-karicheri2@ti.com> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or | ||
| 13 | * modify it under the terms of the GNU General Public License as | ||
| 14 | * published by the Free Software Foundation version 2. | ||
| 15 | * | ||
| 16 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 17 | * kind, whether express or implied; without even the implied warranty | ||
| 18 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | */ | ||
| 21 | #ifndef __NETCP_H__ | ||
| 22 | #define __NETCP_H__ | ||
| 23 | |||
| 24 | #include <linux/netdevice.h> | ||
| 25 | #include <linux/soc/ti/knav_dma.h> | ||
| 26 | |||
| 27 | /* Maximum Ethernet frame size supported by Keystone switch */ | ||
| 28 | #define NETCP_MAX_FRAME_SIZE 9504 | ||
| 29 | |||
| 30 | #define SGMII_LINK_MAC_MAC_AUTONEG 0 | ||
| 31 | #define SGMII_LINK_MAC_PHY 1 | ||
| 32 | #define SGMII_LINK_MAC_MAC_FORCED 2 | ||
| 33 | #define SGMII_LINK_MAC_FIBER 3 | ||
| 34 | #define SGMII_LINK_MAC_PHY_NO_MDIO 4 | ||
| 35 | #define XGMII_LINK_MAC_PHY 10 | ||
| 36 | #define XGMII_LINK_MAC_MAC_FORCED 11 | ||
| 37 | |||
| 38 | struct netcp_device; | ||
| 39 | |||
| 40 | struct netcp_tx_pipe { | ||
| 41 | struct netcp_device *netcp_device; | ||
| 42 | void *dma_queue; | ||
| 43 | unsigned int dma_queue_id; | ||
| 44 | u8 dma_psflags; | ||
| 45 | void *dma_channel; | ||
| 46 | const char *dma_chan_name; | ||
| 47 | }; | ||
| 48 | |||
| 49 | #define ADDR_NEW BIT(0) | ||
| 50 | #define ADDR_VALID BIT(1) | ||
| 51 | |||
| 52 | enum netcp_addr_type { | ||
| 53 | ADDR_ANY, | ||
| 54 | ADDR_DEV, | ||
| 55 | ADDR_UCAST, | ||
| 56 | ADDR_MCAST, | ||
| 57 | ADDR_BCAST | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct netcp_addr { | ||
| 61 | struct netcp_intf *netcp; | ||
| 62 | unsigned char addr[ETH_ALEN]; | ||
| 63 | enum netcp_addr_type type; | ||
| 64 | unsigned int flags; | ||
| 65 | struct list_head node; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct netcp_intf { | ||
| 69 | struct device *dev; | ||
| 70 | struct device *ndev_dev; | ||
| 71 | struct net_device *ndev; | ||
| 72 | bool big_endian; | ||
| 73 | unsigned int tx_compl_qid; | ||
| 74 | void *tx_pool; | ||
| 75 | struct list_head txhook_list_head; | ||
| 76 | unsigned int tx_pause_threshold; | ||
| 77 | void *tx_compl_q; | ||
| 78 | |||
| 79 | unsigned int tx_resume_threshold; | ||
| 80 | void *rx_queue; | ||
| 81 | void *rx_pool; | ||
| 82 | struct list_head rxhook_list_head; | ||
| 83 | unsigned int rx_queue_id; | ||
| 84 | void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; | ||
| 85 | u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN]; | ||
| 86 | struct napi_struct rx_napi; | ||
| 87 | struct napi_struct tx_napi; | ||
| 88 | |||
| 89 | void *rx_channel; | ||
| 90 | const char *dma_chan_name; | ||
| 91 | u32 rx_pool_size; | ||
| 92 | u32 rx_pool_region_id; | ||
| 93 | u32 tx_pool_size; | ||
| 94 | u32 tx_pool_region_id; | ||
| 95 | struct list_head module_head; | ||
| 96 | struct list_head interface_list; | ||
| 97 | struct list_head addr_list; | ||
| 98 | bool netdev_registered; | ||
| 99 | bool primary_module_attached; | ||
| 100 | |||
| 101 | /* Lock used for protecting Rx/Tx hook list management */ | ||
| 102 | spinlock_t lock; | ||
| 103 | struct netcp_device *netcp_device; | ||
| 104 | struct device_node *node_interface; | ||
| 105 | |||
| 106 | /* DMA configuration data */ | ||
| 107 | u32 msg_enable; | ||
| 108 | u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN]; | ||
| 109 | }; | ||
| 110 | |||
| 111 | #define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS | ||
| 112 | struct netcp_packet { | ||
| 113 | struct sk_buff *skb; | ||
| 114 | u32 *epib; | ||
| 115 | u32 *psdata; | ||
| 116 | unsigned int psdata_len; | ||
| 117 | struct netcp_intf *netcp; | ||
| 118 | struct netcp_tx_pipe *tx_pipe; | ||
| 119 | bool rxtstamp_complete; | ||
| 120 | void *ts_context; | ||
| 121 | |||
| 122 | int (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt); | ||
| 123 | }; | ||
| 124 | |||
| 125 | static inline u32 *netcp_push_psdata(struct netcp_packet *p_info, | ||
| 126 | unsigned int bytes) | ||
| 127 | { | ||
| 128 | u32 *buf; | ||
| 129 | unsigned int words; | ||
| 130 | |||
| 131 | if ((bytes & 0x03) != 0) | ||
| 132 | return NULL; | ||
| 133 | words = bytes >> 2; | ||
| 134 | |||
| 135 | if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN) | ||
| 136 | return NULL; | ||
| 137 | |||
| 138 | p_info->psdata_len += words; | ||
| 139 | buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len]; | ||
| 140 | return buf; | ||
| 141 | } | ||
| 142 | |||
| 143 | static inline int netcp_align_psdata(struct netcp_packet *p_info, | ||
| 144 | unsigned int byte_align) | ||
| 145 | { | ||
| 146 | int padding; | ||
| 147 | |||
| 148 | switch (byte_align) { | ||
| 149 | case 0: | ||
| 150 | padding = -EINVAL; | ||
| 151 | break; | ||
| 152 | case 1: | ||
| 153 | case 2: | ||
| 154 | case 4: | ||
| 155 | padding = 0; | ||
| 156 | break; | ||
| 157 | case 8: | ||
| 158 | padding = (p_info->psdata_len << 2) % 8; | ||
| 159 | break; | ||
| 160 | case 16: | ||
| 161 | padding = (p_info->psdata_len << 2) % 16; | ||
| 162 | break; | ||
| 163 | default: | ||
| 164 | padding = (p_info->psdata_len << 2) % byte_align; | ||
| 165 | break; | ||
| 166 | } | ||
| 167 | return padding; | ||
| 168 | } | ||
| 169 | |||
| 170 | struct netcp_module { | ||
| 171 | const char *name; | ||
| 172 | struct module *owner; | ||
| 173 | bool primary; | ||
| 174 | |||
| 175 | /* probe/remove: called once per NETCP instance */ | ||
| 176 | int (*probe)(struct netcp_device *netcp_device, | ||
| 177 | struct device *device, struct device_node *node, | ||
| 178 | void **inst_priv); | ||
| 179 | int (*remove)(struct netcp_device *netcp_device, void *inst_priv); | ||
| 180 | |||
| 181 | /* attach/release: called once per network interface */ | ||
| 182 | int (*attach)(void *inst_priv, struct net_device *ndev, | ||
| 183 | struct device_node *node, void **intf_priv); | ||
| 184 | int (*release)(void *intf_priv); | ||
| 185 | int (*open)(void *intf_priv, struct net_device *ndev); | ||
| 186 | int (*close)(void *intf_priv, struct net_device *ndev); | ||
| 187 | int (*add_addr)(void *intf_priv, struct netcp_addr *naddr); | ||
| 188 | int (*del_addr)(void *intf_priv, struct netcp_addr *naddr); | ||
| 189 | int (*add_vid)(void *intf_priv, int vid); | ||
| 190 | int (*del_vid)(void *intf_priv, int vid); | ||
| 191 | int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd); | ||
| 192 | |||
| 193 | /* used internally */ | ||
| 194 | struct list_head module_list; | ||
| 195 | struct list_head interface_list; | ||
| 196 | }; | ||
| 197 | |||
| 198 | int netcp_register_module(struct netcp_module *module); | ||
| 199 | void netcp_unregister_module(struct netcp_module *module); | ||
| 200 | void *netcp_module_get_intf_data(struct netcp_module *module, | ||
| 201 | struct netcp_intf *intf); | ||
| 202 | |||
| 203 | int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe, | ||
| 204 | struct netcp_device *netcp_device, | ||
| 205 | const char *dma_chan_name, unsigned int dma_queue_id); | ||
| 206 | int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe); | ||
| 207 | int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe); | ||
| 208 | |||
| 209 | typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet); | ||
| 210 | int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, | ||
| 211 | netcp_hook_rtn *hook_rtn, void *hook_data); | ||
| 212 | int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, | ||
| 213 | netcp_hook_rtn *hook_rtn, void *hook_data); | ||
| 214 | int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, | ||
| 215 | netcp_hook_rtn *hook_rtn, void *hook_data); | ||
| 216 | int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, | ||
| 217 | netcp_hook_rtn *hook_rtn, void *hook_data); | ||
| 218 | void *netcp_device_find_module(struct netcp_device *netcp_device, | ||
| 219 | const char *name); | ||
| 220 | |||
| 221 | /* SGMII functions */ | ||
| 222 | int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); | ||
| 223 | int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); | ||
| 224 | int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); | ||
| 225 | |||
| 226 | /* XGBE SERDES init functions */ | ||
| 227 | int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs); | ||
| 228 | |||
| 229 | #endif /* __NETCP_H__ */ | ||
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c new file mode 100644 index 000000000000..ba3002ec710a --- /dev/null +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
| @@ -0,0 +1,2141 @@ | |||
| 1 | /* | ||
| 2 | * Keystone NetCP Core driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Texas Instruments Incorporated | ||
| 5 | * Authors: Sandeep Nair <sandeep_n@ti.com> | ||
| 6 | * Sandeep Paulraj <s-paulraj@ti.com> | ||
| 7 | * Cyril Chemparathy <cyril@ti.com> | ||
| 8 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
| 9 | * Murali Karicheri <m-karicheri2@ti.com> | ||
| 10 | * Wingman Kwok <w-kwok2@ti.com> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or | ||
| 13 | * modify it under the terms of the GNU General Public License as | ||
| 14 | * published by the Free Software Foundation version 2. | ||
| 15 | * | ||
| 16 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 17 | * kind, whether express or implied; without even the implied warranty | ||
| 18 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/io.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/of_net.h> | ||
| 25 | #include <linux/of_address.h> | ||
| 26 | #include <linux/if_vlan.h> | ||
| 27 | #include <linux/pm_runtime.h> | ||
| 28 | #include <linux/platform_device.h> | ||
| 29 | #include <linux/soc/ti/knav_qmss.h> | ||
| 30 | #include <linux/soc/ti/knav_dma.h> | ||
| 31 | |||
| 32 | #include "netcp.h" | ||
| 33 | |||
| 34 | #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) | ||
| 35 | #define NETCP_NAPI_WEIGHT 64 | ||
| 36 | #define NETCP_TX_TIMEOUT (5 * HZ) | ||
| 37 | #define NETCP_MIN_PACKET_SIZE ETH_ZLEN | ||
| 38 | #define NETCP_MAX_MCAST_ADDR 16 | ||
| 39 | |||
| 40 | #define NETCP_EFUSE_REG_INDEX 0 | ||
| 41 | |||
| 42 | #define NETCP_MOD_PROBE_SKIPPED 1 | ||
| 43 | #define NETCP_MOD_PROBE_FAILED 2 | ||
| 44 | |||
| 45 | #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ | ||
| 46 | NETIF_MSG_DRV | NETIF_MSG_LINK | \ | ||
| 47 | NETIF_MSG_IFUP | NETIF_MSG_INTR | \ | ||
| 48 | NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ | ||
| 49 | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ | ||
| 50 | NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ | ||
| 51 | NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ | ||
| 52 | NETIF_MSG_RX_STATUS) | ||
| 53 | |||
| 54 | #define knav_queue_get_id(q) knav_queue_device_control(q, \ | ||
| 55 | KNAV_QUEUE_GET_ID, (unsigned long)NULL) | ||
| 56 | |||
| 57 | #define knav_queue_enable_notify(q) knav_queue_device_control(q, \ | ||
| 58 | KNAV_QUEUE_ENABLE_NOTIFY, \ | ||
| 59 | (unsigned long)NULL) | ||
| 60 | |||
| 61 | #define knav_queue_disable_notify(q) knav_queue_device_control(q, \ | ||
| 62 | KNAV_QUEUE_DISABLE_NOTIFY, \ | ||
| 63 | (unsigned long)NULL) | ||
| 64 | |||
| 65 | #define knav_queue_get_count(q) knav_queue_device_control(q, \ | ||
| 66 | KNAV_QUEUE_GET_COUNT, (unsigned long)NULL) | ||
| 67 | |||
| 68 | #define for_each_netcp_module(module) \ | ||
| 69 | list_for_each_entry(module, &netcp_modules, module_list) | ||
| 70 | |||
| 71 | #define for_each_netcp_device_module(netcp_device, inst_modpriv) \ | ||
| 72 | list_for_each_entry(inst_modpriv, \ | ||
| 73 | &((netcp_device)->modpriv_head), inst_list) | ||
| 74 | |||
| 75 | #define for_each_module(netcp, intf_modpriv) \ | ||
| 76 | list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list) | ||
| 77 | |||
| 78 | /* Module management structures */ | ||
| 79 | struct netcp_device { | ||
| 80 | struct list_head device_list; | ||
| 81 | struct list_head interface_head; | ||
| 82 | struct list_head modpriv_head; | ||
| 83 | struct device *device; | ||
| 84 | }; | ||
| 85 | |||
| 86 | struct netcp_inst_modpriv { | ||
| 87 | struct netcp_device *netcp_device; | ||
| 88 | struct netcp_module *netcp_module; | ||
| 89 | struct list_head inst_list; | ||
| 90 | void *module_priv; | ||
| 91 | }; | ||
| 92 | |||
| 93 | struct netcp_intf_modpriv { | ||
| 94 | struct netcp_intf *netcp_priv; | ||
| 95 | struct netcp_module *netcp_module; | ||
| 96 | struct list_head intf_list; | ||
| 97 | void *module_priv; | ||
| 98 | }; | ||
| 99 | |||
| 100 | static LIST_HEAD(netcp_devices); | ||
| 101 | static LIST_HEAD(netcp_modules); | ||
| 102 | static DEFINE_MUTEX(netcp_modules_lock); | ||
| 103 | |||
| 104 | static int netcp_debug_level = -1; | ||
| 105 | module_param(netcp_debug_level, int, 0); | ||
| 106 | MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)"); | ||
| 107 | |||
| 108 | /* Helper functions - Get/Set */ | ||
| 109 | static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc, | ||
| 110 | struct knav_dma_desc *desc) | ||
| 111 | { | ||
| 112 | *buff_len = desc->buff_len; | ||
| 113 | *buff = desc->buff; | ||
| 114 | *ndesc = desc->next_desc; | ||
| 115 | } | ||
| 116 | |||
| 117 | static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc) | ||
| 118 | { | ||
| 119 | *pad0 = desc->pad[0]; | ||
| 120 | *pad1 = desc->pad[1]; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void get_org_pkt_info(u32 *buff, u32 *buff_len, | ||
| 124 | struct knav_dma_desc *desc) | ||
| 125 | { | ||
| 126 | *buff = desc->orig_buff; | ||
| 127 | *buff_len = desc->orig_len; | ||
| 128 | } | ||
| 129 | |||
| 130 | static void get_words(u32 *words, int num_words, u32 *desc) | ||
| 131 | { | ||
| 132 | int i; | ||
| 133 | |||
| 134 | for (i = 0; i < num_words; i++) | ||
| 135 | words[i] = desc[i]; | ||
| 136 | } | ||
| 137 | |||
| 138 | static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc, | ||
| 139 | struct knav_dma_desc *desc) | ||
| 140 | { | ||
| 141 | desc->buff_len = buff_len; | ||
| 142 | desc->buff = buff; | ||
| 143 | desc->next_desc = ndesc; | ||
| 144 | } | ||
| 145 | |||
| 146 | static void set_desc_info(u32 desc_info, u32 pkt_info, | ||
| 147 | struct knav_dma_desc *desc) | ||
| 148 | { | ||
| 149 | desc->desc_info = desc_info; | ||
| 150 | desc->packet_info = pkt_info; | ||
| 151 | } | ||
| 152 | |||
| 153 | static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc) | ||
| 154 | { | ||
| 155 | desc->pad[0] = pad0; | ||
| 156 | desc->pad[1] = pad1; | ||
| 157 | } | ||
| 158 | |||
| 159 | static void set_org_pkt_info(u32 buff, u32 buff_len, | ||
| 160 | struct knav_dma_desc *desc) | ||
| 161 | { | ||
| 162 | desc->orig_buff = buff; | ||
| 163 | desc->orig_len = buff_len; | ||
| 164 | } | ||
| 165 | |||
| 166 | static void set_words(u32 *words, int num_words, u32 *desc) | ||
| 167 | { | ||
| 168 | int i; | ||
| 169 | |||
| 170 | for (i = 0; i < num_words; i++) | ||
| 171 | desc[i] = words[i]; | ||
| 172 | } | ||
| 173 | |||
| 174 | /* Read the e-fuse value as 32 bit values to be endian independent */ | ||
| 175 | static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac) | ||
| 176 | { | ||
| 177 | unsigned int addr0, addr1; | ||
| 178 | |||
| 179 | addr1 = readl(efuse_mac + 4); | ||
| 180 | addr0 = readl(efuse_mac); | ||
| 181 | |||
| 182 | x[0] = (addr1 & 0x0000ff00) >> 8; | ||
| 183 | x[1] = addr1 & 0x000000ff; | ||
| 184 | x[2] = (addr0 & 0xff000000) >> 24; | ||
| 185 | x[3] = (addr0 & 0x00ff0000) >> 16; | ||
| 186 | x[4] = (addr0 & 0x0000ff00) >> 8; | ||
| 187 | x[5] = addr0 & 0x000000ff; | ||
| 188 | |||
| 189 | return 0; | ||
| 190 | } | ||
| 191 | |||
| 192 | static const char *netcp_node_name(struct device_node *node) | ||
| 193 | { | ||
| 194 | const char *name; | ||
| 195 | |||
| 196 | if (of_property_read_string(node, "label", &name) < 0) | ||
| 197 | name = node->name; | ||
| 198 | if (!name) | ||
| 199 | name = "unknown"; | ||
| 200 | return name; | ||
| 201 | } | ||
| 202 | |||
| 203 | /* Module management routines */ | ||
| 204 | static int netcp_register_interface(struct netcp_intf *netcp) | ||
| 205 | { | ||
| 206 | int ret; | ||
| 207 | |||
| 208 | ret = register_netdev(netcp->ndev); | ||
| 209 | if (!ret) | ||
| 210 | netcp->netdev_registered = true; | ||
| 211 | return ret; | ||
| 212 | } | ||
| 213 | |||
| 214 | static int netcp_module_probe(struct netcp_device *netcp_device, | ||
| 215 | struct netcp_module *module) | ||
| 216 | { | ||
| 217 | struct device *dev = netcp_device->device; | ||
| 218 | struct device_node *devices, *interface, *node = dev->of_node; | ||
| 219 | struct device_node *child; | ||
| 220 | struct netcp_inst_modpriv *inst_modpriv; | ||
| 221 | struct netcp_intf *netcp_intf; | ||
| 222 | struct netcp_module *tmp; | ||
| 223 | bool primary_module_registered = false; | ||
| 224 | int ret; | ||
| 225 | |||
| 226 | /* Find this module in the sub-tree for this device */ | ||
| 227 | devices = of_get_child_by_name(node, "netcp-devices"); | ||
| 228 | if (!devices) { | ||
| 229 | dev_err(dev, "could not find netcp-devices node\n"); | ||
| 230 | return NETCP_MOD_PROBE_SKIPPED; | ||
| 231 | } | ||
| 232 | |||
| 233 | for_each_available_child_of_node(devices, child) { | ||
| 234 | const char *name = netcp_node_name(child); | ||
| 235 | |||
| 236 | if (!strcasecmp(module->name, name)) | ||
| 237 | break; | ||
| 238 | } | ||
| 239 | |||
| 240 | of_node_put(devices); | ||
| 241 | /* If module not used for this device, skip it */ | ||
| 242 | if (!child) { | ||
| 243 | dev_warn(dev, "module(%s) not used for device\n", module->name); | ||
| 244 | return NETCP_MOD_PROBE_SKIPPED; | ||
| 245 | } | ||
| 246 | |||
| 247 | inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL); | ||
| 248 | if (!inst_modpriv) { | ||
| 249 | of_node_put(child); | ||
| 250 | return -ENOMEM; | ||
| 251 | } | ||
| 252 | |||
| 253 | inst_modpriv->netcp_device = netcp_device; | ||
| 254 | inst_modpriv->netcp_module = module; | ||
| 255 | list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head); | ||
| 256 | |||
| 257 | ret = module->probe(netcp_device, dev, child, | ||
| 258 | &inst_modpriv->module_priv); | ||
| 259 | of_node_put(child); | ||
| 260 | if (ret) { | ||
| 261 | dev_err(dev, "Probe of module(%s) failed with %d\n", | ||
| 262 | module->name, ret); | ||
| 263 | list_del(&inst_modpriv->inst_list); | ||
| 264 | devm_kfree(dev, inst_modpriv); | ||
| 265 | return NETCP_MOD_PROBE_FAILED; | ||
| 266 | } | ||
| 267 | |||
| 268 | /* Attach modules only if the primary module is probed */ | ||
| 269 | for_each_netcp_module(tmp) { | ||
| 270 | if (tmp->primary) | ||
| 271 | primary_module_registered = true; | ||
| 272 | } | ||
| 273 | |||
| 274 | if (!primary_module_registered) | ||
| 275 | return 0; | ||
| 276 | |||
| 277 | /* Attach module to interfaces */ | ||
| 278 | list_for_each_entry(netcp_intf, &netcp_device->interface_head, | ||
| 279 | interface_list) { | ||
| 280 | struct netcp_intf_modpriv *intf_modpriv; | ||
| 281 | |||
| 282 | /* If interface not registered then register now */ | ||
| 283 | if (!netcp_intf->netdev_registered) | ||
| 284 | ret = netcp_register_interface(netcp_intf); | ||
| 285 | |||
| 286 | if (ret) | ||
| 287 | return -ENODEV; | ||
| 288 | |||
| 289 | intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), | ||
| 290 | GFP_KERNEL); | ||
| 291 | if (!intf_modpriv) | ||
| 292 | return -ENOMEM; | ||
| 293 | |||
| 294 | interface = of_parse_phandle(netcp_intf->node_interface, | ||
| 295 | module->name, 0); | ||
| 296 | |||
| 297 | intf_modpriv->netcp_priv = netcp_intf; | ||
| 298 | intf_modpriv->netcp_module = module; | ||
| 299 | list_add_tail(&intf_modpriv->intf_list, | ||
| 300 | &netcp_intf->module_head); | ||
| 301 | |||
| 302 | ret = module->attach(inst_modpriv->module_priv, | ||
| 303 | netcp_intf->ndev, interface, | ||
| 304 | &intf_modpriv->module_priv); | ||
| 305 | of_node_put(interface); | ||
| 306 | if (ret) { | ||
| 307 | dev_dbg(dev, "Attach of module %s declined with %d\n", | ||
| 308 | module->name, ret); | ||
| 309 | list_del(&intf_modpriv->intf_list); | ||
| 310 | devm_kfree(dev, intf_modpriv); | ||
| 311 | continue; | ||
| 312 | } | ||
| 313 | } | ||
| 314 | return 0; | ||
| 315 | } | ||
| 316 | |||
| 317 | int netcp_register_module(struct netcp_module *module) | ||
| 318 | { | ||
| 319 | struct netcp_device *netcp_device; | ||
| 320 | struct netcp_module *tmp; | ||
| 321 | int ret; | ||
| 322 | |||
| 323 | if (!module->name) { | ||
| 324 | WARN(1, "error registering netcp module: no name\n"); | ||
| 325 | return -EINVAL; | ||
| 326 | } | ||
| 327 | |||
| 328 | if (!module->probe) { | ||
| 329 | WARN(1, "error registering netcp module: no probe\n"); | ||
| 330 | return -EINVAL; | ||
| 331 | } | ||
| 332 | |||
| 333 | mutex_lock(&netcp_modules_lock); | ||
| 334 | |||
| 335 | for_each_netcp_module(tmp) { | ||
| 336 | if (!strcasecmp(tmp->name, module->name)) { | ||
| 337 | mutex_unlock(&netcp_modules_lock); | ||
| 338 | return -EEXIST; | ||
| 339 | } | ||
| 340 | } | ||
| 341 | list_add_tail(&module->module_list, &netcp_modules); | ||
| 342 | |||
| 343 | list_for_each_entry(netcp_device, &netcp_devices, device_list) { | ||
| 344 | ret = netcp_module_probe(netcp_device, module); | ||
| 345 | if (ret < 0) | ||
| 346 | goto fail; | ||
| 347 | } | ||
| 348 | |||
| 349 | mutex_unlock(&netcp_modules_lock); | ||
| 350 | return 0; | ||
| 351 | |||
| 352 | fail: | ||
| 353 | mutex_unlock(&netcp_modules_lock); | ||
| 354 | netcp_unregister_module(module); | ||
| 355 | return ret; | ||
| 356 | } | ||
| 357 | |||
| 358 | static void netcp_release_module(struct netcp_device *netcp_device, | ||
| 359 | struct netcp_module *module) | ||
| 360 | { | ||
| 361 | struct netcp_inst_modpriv *inst_modpriv, *inst_tmp; | ||
| 362 | struct netcp_intf *netcp_intf, *netcp_tmp; | ||
| 363 | struct device *dev = netcp_device->device; | ||
| 364 | |||
| 365 | /* Release the module from each interface */ | ||
| 366 | list_for_each_entry_safe(netcp_intf, netcp_tmp, | ||
| 367 | &netcp_device->interface_head, | ||
| 368 | interface_list) { | ||
| 369 | struct netcp_intf_modpriv *intf_modpriv, *intf_tmp; | ||
| 370 | |||
| 371 | list_for_each_entry_safe(intf_modpriv, intf_tmp, | ||
| 372 | &netcp_intf->module_head, | ||
| 373 | intf_list) { | ||
| 374 | if (intf_modpriv->netcp_module == module) { | ||
| 375 | module->release(intf_modpriv->module_priv); | ||
| 376 | list_del(&intf_modpriv->intf_list); | ||
| 377 | devm_kfree(dev, intf_modpriv); | ||
| 378 | break; | ||
| 379 | } | ||
| 380 | } | ||
| 381 | } | ||
| 382 | |||
| 383 | /* Remove the module from each instance */ | ||
| 384 | list_for_each_entry_safe(inst_modpriv, inst_tmp, | ||
| 385 | &netcp_device->modpriv_head, inst_list) { | ||
| 386 | if (inst_modpriv->netcp_module == module) { | ||
| 387 | module->remove(netcp_device, | ||
| 388 | inst_modpriv->module_priv); | ||
| 389 | list_del(&inst_modpriv->inst_list); | ||
| 390 | devm_kfree(dev, inst_modpriv); | ||
| 391 | break; | ||
| 392 | } | ||
| 393 | } | ||
| 394 | } | ||
| 395 | |||
| 396 | void netcp_unregister_module(struct netcp_module *module) | ||
| 397 | { | ||
| 398 | struct netcp_device *netcp_device; | ||
| 399 | struct netcp_module *module_tmp; | ||
| 400 | |||
| 401 | mutex_lock(&netcp_modules_lock); | ||
| 402 | |||
| 403 | list_for_each_entry(netcp_device, &netcp_devices, device_list) { | ||
| 404 | netcp_release_module(netcp_device, module); | ||
| 405 | } | ||
| 406 | |||
| 407 | /* Remove the module from the module list */ | ||
| 408 | for_each_netcp_module(module_tmp) { | ||
| 409 | if (module == module_tmp) { | ||
| 410 | list_del(&module->module_list); | ||
| 411 | break; | ||
| 412 | } | ||
| 413 | } | ||
| 414 | |||
| 415 | mutex_unlock(&netcp_modules_lock); | ||
| 416 | } | ||
| 417 | |||
| 418 | void *netcp_module_get_intf_data(struct netcp_module *module, | ||
| 419 | struct netcp_intf *intf) | ||
| 420 | { | ||
| 421 | struct netcp_intf_modpriv *intf_modpriv; | ||
| 422 | |||
| 423 | list_for_each_entry(intf_modpriv, &intf->module_head, intf_list) | ||
| 424 | if (intf_modpriv->netcp_module == module) | ||
| 425 | return intf_modpriv->module_priv; | ||
| 426 | return NULL; | ||
| 427 | } | ||
| 428 | |||
| 429 | /* Module TX and RX Hook management */ | ||
| 430 | struct netcp_hook_list { | ||
| 431 | struct list_head list; | ||
| 432 | netcp_hook_rtn *hook_rtn; | ||
| 433 | void *hook_data; | ||
| 434 | int order; | ||
| 435 | }; | ||
| 436 | |||
| 437 | int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, | ||
| 438 | netcp_hook_rtn *hook_rtn, void *hook_data) | ||
| 439 | { | ||
| 440 | struct netcp_hook_list *entry; | ||
| 441 | struct netcp_hook_list *next; | ||
| 442 | unsigned long flags; | ||
| 443 | |||
| 444 | entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); | ||
| 445 | if (!entry) | ||
| 446 | return -ENOMEM; | ||
| 447 | |||
| 448 | entry->hook_rtn = hook_rtn; | ||
| 449 | entry->hook_data = hook_data; | ||
| 450 | entry->order = order; | ||
| 451 | |||
| 452 | spin_lock_irqsave(&netcp_priv->lock, flags); | ||
| 453 | list_for_each_entry(next, &netcp_priv->txhook_list_head, list) { | ||
| 454 | if (next->order > order) | ||
| 455 | break; | ||
| 456 | } | ||
| 457 | __list_add(&entry->list, next->list.prev, &next->list); | ||
| 458 | spin_unlock_irqrestore(&netcp_priv->lock, flags); | ||
| 459 | |||
| 460 | return 0; | ||
| 461 | } | ||
| 462 | |||
| 463 | int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, | ||
| 464 | netcp_hook_rtn *hook_rtn, void *hook_data) | ||
| 465 | { | ||
| 466 | struct netcp_hook_list *next, *n; | ||
| 467 | unsigned long flags; | ||
| 468 | |||
| 469 | spin_lock_irqsave(&netcp_priv->lock, flags); | ||
| 470 | list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) { | ||
| 471 | if ((next->order == order) && | ||
| 472 | (next->hook_rtn == hook_rtn) && | ||
| 473 | (next->hook_data == hook_data)) { | ||
| 474 | list_del(&next->list); | ||
| 475 | spin_unlock_irqrestore(&netcp_priv->lock, flags); | ||
| 476 | devm_kfree(netcp_priv->dev, next); | ||
| 477 | return 0; | ||
| 478 | } | ||
| 479 | } | ||
| 480 | spin_unlock_irqrestore(&netcp_priv->lock, flags); | ||
| 481 | return -ENOENT; | ||
| 482 | } | ||
| 483 | |||
| 484 | int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, | ||
| 485 | netcp_hook_rtn *hook_rtn, void *hook_data) | ||
| 486 | { | ||
| 487 | struct netcp_hook_list *entry; | ||
| 488 | struct netcp_hook_list *next; | ||
| 489 | unsigned long flags; | ||
| 490 | |||
| 491 | entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); | ||
| 492 | if (!entry) | ||
| 493 | return -ENOMEM; | ||
| 494 | |||
| 495 | entry->hook_rtn = hook_rtn; | ||
| 496 | entry->hook_data = hook_data; | ||
| 497 | entry->order = order; | ||
| 498 | |||
| 499 | spin_lock_irqsave(&netcp_priv->lock, flags); | ||
| 500 | list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) { | ||
| 501 | if (next->order > order) | ||
| 502 | break; | ||
| 503 | } | ||
| 504 | __list_add(&entry->list, next->list.prev, &next->list); | ||
| 505 | spin_unlock_irqrestore(&netcp_priv->lock, flags); | ||
| 506 | |||
| 507 | return 0; | ||
| 508 | } | ||
| 509 | |||
| 510 | int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, | ||
| 511 | netcp_hook_rtn *hook_rtn, void *hook_data) | ||
| 512 | { | ||
| 513 | struct netcp_hook_list *next, *n; | ||
| 514 | unsigned long flags; | ||
| 515 | |||
| 516 | spin_lock_irqsave(&netcp_priv->lock, flags); | ||
| 517 | list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) { | ||
| 518 | if ((next->order == order) && | ||
| 519 | (next->hook_rtn == hook_rtn) && | ||
| 520 | (next->hook_data == hook_data)) { | ||
| 521 | list_del(&next->list); | ||
| 522 | spin_unlock_irqrestore(&netcp_priv->lock, flags); | ||
| 523 | devm_kfree(netcp_priv->dev, next); | ||
| 524 | return 0; | ||
| 525 | } | ||
| 526 | } | ||
| 527 | spin_unlock_irqrestore(&netcp_priv->lock, flags); | ||
| 528 | |||
| 529 | return -ENOENT; | ||
| 530 | } | ||
| 531 | |||
| 532 | static void netcp_frag_free(bool is_frag, void *ptr) | ||
| 533 | { | ||
| 534 | if (is_frag) | ||
| 535 | put_page(virt_to_head_page(ptr)); | ||
| 536 | else | ||
| 537 | kfree(ptr); | ||
| 538 | } | ||
| 539 | |||
| 540 | static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, | ||
| 541 | struct knav_dma_desc *desc) | ||
| 542 | { | ||
| 543 | struct knav_dma_desc *ndesc; | ||
| 544 | dma_addr_t dma_desc, dma_buf; | ||
| 545 | unsigned int buf_len, dma_sz = sizeof(*ndesc); | ||
| 546 | void *buf_ptr; | ||
| 547 | u32 tmp; | ||
| 548 | |||
| 549 | get_words(&dma_desc, 1, &desc->next_desc); | ||
| 550 | |||
| 551 | while (dma_desc) { | ||
| 552 | ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); | ||
| 553 | if (unlikely(!ndesc)) { | ||
| 554 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); | ||
| 555 | break; | ||
| 556 | } | ||
| 557 | get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); | ||
| 558 | get_pad_info((u32 *)&buf_ptr, &tmp, ndesc); | ||
| 559 | dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); | ||
| 560 | __free_page(buf_ptr); | ||
| 561 | knav_pool_desc_put(netcp->rx_pool, desc); | ||
| 562 | } | ||
| 563 | |||
| 564 | get_pad_info((u32 *)&buf_ptr, &buf_len, desc); | ||
| 565 | if (buf_ptr) | ||
| 566 | netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); | ||
| 567 | knav_pool_desc_put(netcp->rx_pool, desc); | ||
| 568 | } | ||
| 569 | |||
| 570 | static void netcp_empty_rx_queue(struct netcp_intf *netcp) | ||
| 571 | { | ||
| 572 | struct knav_dma_desc *desc; | ||
| 573 | unsigned int dma_sz; | ||
| 574 | dma_addr_t dma; | ||
| 575 | |||
| 576 | for (; ;) { | ||
| 577 | dma = knav_queue_pop(netcp->rx_queue, &dma_sz); | ||
| 578 | if (!dma) | ||
| 579 | break; | ||
| 580 | |||
| 581 | desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); | ||
| 582 | if (unlikely(!desc)) { | ||
| 583 | dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", | ||
| 584 | __func__); | ||
| 585 | netcp->ndev->stats.rx_errors++; | ||
| 586 | continue; | ||
| 587 | } | ||
| 588 | netcp_free_rx_desc_chain(netcp, desc); | ||
| 589 | netcp->ndev->stats.rx_dropped++; | ||
| 590 | } | ||
| 591 | } | ||
| 592 | |||
| 593 | static int netcp_process_one_rx_packet(struct netcp_intf *netcp) | ||
| 594 | { | ||
| 595 | unsigned int dma_sz, buf_len, org_buf_len; | ||
| 596 | struct knav_dma_desc *desc, *ndesc; | ||
| 597 | unsigned int pkt_sz = 0, accum_sz; | ||
| 598 | struct netcp_hook_list *rx_hook; | ||
| 599 | dma_addr_t dma_desc, dma_buff; | ||
| 600 | struct netcp_packet p_info; | ||
| 601 | struct sk_buff *skb; | ||
| 602 | void *org_buf_ptr; | ||
| 603 | u32 tmp; | ||
| 604 | |||
| 605 | dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); | ||
| 606 | if (!dma_desc) | ||
| 607 | return -1; | ||
| 608 | |||
| 609 | desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); | ||
| 610 | if (unlikely(!desc)) { | ||
| 611 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); | ||
| 612 | return 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); | ||
| 616 | get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc); | ||
| 617 | |||
| 618 | if (unlikely(!org_buf_ptr)) { | ||
| 619 | dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); | ||
| 620 | goto free_desc; | ||
| 621 | } | ||
| 622 | |||
| 623 | pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK; | ||
| 624 | accum_sz = buf_len; | ||
| 625 | dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE); | ||
| 626 | |||
| 627 | /* Build a new sk_buff for the primary buffer */ | ||
| 628 | skb = build_skb(org_buf_ptr, org_buf_len); | ||
| 629 | if (unlikely(!skb)) { | ||
| 630 | dev_err(netcp->ndev_dev, "build_skb() failed\n"); | ||
| 631 | goto free_desc; | ||
| 632 | } | ||
| 633 | |||
| 634 | /* update data, tail and len */ | ||
| 635 | skb_reserve(skb, NETCP_SOP_OFFSET); | ||
| 636 | __skb_put(skb, buf_len); | ||
| 637 | |||
| 638 | /* Fill in the page fragment list */ | ||
| 639 | while (dma_desc) { | ||
| 640 | struct page *page; | ||
| 641 | |||
| 642 | ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); | ||
| 643 | if (unlikely(!ndesc)) { | ||
| 644 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); | ||
| 645 | goto free_desc; | ||
| 646 | } | ||
| 647 | |||
| 648 | get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); | ||
| 649 | get_pad_info((u32 *)&page, &tmp, ndesc); | ||
| 650 | |||
| 651 | if (likely(dma_buff && buf_len && page)) { | ||
| 652 | dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, | ||
| 653 | DMA_FROM_DEVICE); | ||
| 654 | } else { | ||
| 655 | dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n", | ||
| 656 | (void *)dma_buff, buf_len, page); | ||
| 657 | goto free_desc; | ||
| 658 | } | ||
| 659 | |||
| 660 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
| 661 | offset_in_page(dma_buff), buf_len, PAGE_SIZE); | ||
| 662 | accum_sz += buf_len; | ||
| 663 | |||
| 664 | /* Free the descriptor */ | ||
| 665 | knav_pool_desc_put(netcp->rx_pool, ndesc); | ||
| 666 | } | ||
| 667 | |||
| 668 | /* Free the primary descriptor */ | ||
| 669 | knav_pool_desc_put(netcp->rx_pool, desc); | ||
| 670 | |||
| 671 | /* check for packet len and warn */ | ||
| 672 | if (unlikely(pkt_sz != accum_sz)) | ||
| 673 | dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", | ||
| 674 | pkt_sz, accum_sz); | ||
| 675 | |||
| 676 | /* Remove ethernet FCS from the packet */ | ||
| 677 | __pskb_trim(skb, skb->len - ETH_FCS_LEN); | ||
| 678 | |||
| 679 | /* Call each of the RX hooks */ | ||
| 680 | p_info.skb = skb; | ||
| 681 | p_info.rxtstamp_complete = false; | ||
| 682 | list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { | ||
| 683 | int ret; | ||
| 684 | |||
| 685 | ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data, | ||
| 686 | &p_info); | ||
| 687 | if (unlikely(ret)) { | ||
| 688 | dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", | ||
| 689 | rx_hook->order, ret); | ||
| 690 | netcp->ndev->stats.rx_errors++; | ||
| 691 | dev_kfree_skb(skb); | ||
| 692 | return 0; | ||
| 693 | } | ||
| 694 | } | ||
| 695 | |||
| 696 | netcp->ndev->last_rx = jiffies; | ||
| 697 | netcp->ndev->stats.rx_packets++; | ||
| 698 | netcp->ndev->stats.rx_bytes += skb->len; | ||
| 699 | |||
| 700 | /* push skb up the stack */ | ||
| 701 | skb->protocol = eth_type_trans(skb, netcp->ndev); | ||
| 702 | netif_receive_skb(skb); | ||
| 703 | return 0; | ||
| 704 | |||
| 705 | free_desc: | ||
| 706 | netcp_free_rx_desc_chain(netcp, desc); | ||
| 707 | netcp->ndev->stats.rx_errors++; | ||
| 708 | return 0; | ||
| 709 | } | ||
| 710 | |||
| 711 | static int netcp_process_rx_packets(struct netcp_intf *netcp, | ||
| 712 | unsigned int budget) | ||
| 713 | { | ||
| 714 | int i; | ||
| 715 | |||
| 716 | for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++) | ||
| 717 | ; | ||
| 718 | return i; | ||
| 719 | } | ||
| 720 | |||
| 721 | /* Release descriptors and attached buffers from Rx FDQ */ | ||
| 722 | static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) | ||
| 723 | { | ||
| 724 | struct knav_dma_desc *desc; | ||
| 725 | unsigned int buf_len, dma_sz; | ||
| 726 | dma_addr_t dma; | ||
| 727 | void *buf_ptr; | ||
| 728 | u32 tmp; | ||
| 729 | |||
| 730 | /* Allocate descriptor */ | ||
| 731 | while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { | ||
| 732 | desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); | ||
| 733 | if (unlikely(!desc)) { | ||
| 734 | dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); | ||
| 735 | continue; | ||
| 736 | } | ||
| 737 | |||
| 738 | get_org_pkt_info(&dma, &buf_len, desc); | ||
| 739 | get_pad_info((u32 *)&buf_ptr, &tmp, desc); | ||
| 740 | |||
| 741 | if (unlikely(!dma)) { | ||
| 742 | dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); | ||
| 743 | knav_pool_desc_put(netcp->rx_pool, desc); | ||
| 744 | continue; | ||
| 745 | } | ||
| 746 | |||
| 747 | if (unlikely(!buf_ptr)) { | ||
| 748 | dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); | ||
| 749 | knav_pool_desc_put(netcp->rx_pool, desc); | ||
| 750 | continue; | ||
| 751 | } | ||
| 752 | |||
| 753 | if (fdq == 0) { | ||
| 754 | dma_unmap_single(netcp->dev, dma, buf_len, | ||
| 755 | DMA_FROM_DEVICE); | ||
| 756 | netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr); | ||
| 757 | } else { | ||
| 758 | dma_unmap_page(netcp->dev, dma, buf_len, | ||
| 759 | DMA_FROM_DEVICE); | ||
| 760 | __free_page(buf_ptr); | ||
| 761 | } | ||
| 762 | |||
| 763 | knav_pool_desc_put(netcp->rx_pool, desc); | ||
| 764 | } | ||
| 765 | } | ||
| 766 | |||
| 767 | static void netcp_rxpool_free(struct netcp_intf *netcp) | ||
| 768 | { | ||
| 769 | int i; | ||
| 770 | |||
| 771 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && | ||
| 772 | !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++) | ||
| 773 | netcp_free_rx_buf(netcp, i); | ||
| 774 | |||
| 775 | if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size) | ||
| 776 | dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n", | ||
| 777 | netcp->rx_pool_size - knav_pool_count(netcp->rx_pool)); | ||
| 778 | |||
| 779 | knav_pool_destroy(netcp->rx_pool); | ||
| 780 | netcp->rx_pool = NULL; | ||
| 781 | } | ||
| 782 | |||
| 783 | static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) | ||
| 784 | { | ||
| 785 | struct knav_dma_desc *hwdesc; | ||
| 786 | unsigned int buf_len, dma_sz; | ||
| 787 | u32 desc_info, pkt_info; | ||
| 788 | struct page *page; | ||
| 789 | dma_addr_t dma; | ||
| 790 | void *bufptr; | ||
| 791 | u32 pad[2]; | ||
| 792 | |||
| 793 | /* Allocate descriptor */ | ||
| 794 | hwdesc = knav_pool_desc_get(netcp->rx_pool); | ||
| 795 | if (IS_ERR_OR_NULL(hwdesc)) { | ||
| 796 | dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); | ||
| 797 | return; | ||
| 798 | } | ||
| 799 | |||
| 800 | if (likely(fdq == 0)) { | ||
| 801 | unsigned int primary_buf_len; | ||
| 802 | /* Allocate a primary receive queue entry */ | ||
| 803 | buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; | ||
| 804 | primary_buf_len = SKB_DATA_ALIGN(buf_len) + | ||
| 805 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 806 | |||
| 807 | if (primary_buf_len <= PAGE_SIZE) { | ||
| 808 | bufptr = netdev_alloc_frag(primary_buf_len); | ||
| 809 | pad[1] = primary_buf_len; | ||
| 810 | } else { | ||
| 811 | bufptr = kmalloc(primary_buf_len, GFP_ATOMIC | | ||
| 812 | GFP_DMA32 | __GFP_COLD); | ||
| 813 | pad[1] = 0; | ||
| 814 | } | ||
| 815 | |||
| 816 | if (unlikely(!bufptr)) { | ||
| 817 | dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); | ||
| 818 | goto fail; | ||
| 819 | } | ||
| 820 | dma = dma_map_single(netcp->dev, bufptr, buf_len, | ||
| 821 | DMA_TO_DEVICE); | ||
| 822 | pad[0] = (u32)bufptr; | ||
| 823 | |||
| 824 | } else { | ||
| 825 | /* Allocate a secondary receive queue entry */ | ||
| 826 | page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); | ||
| 827 | if (unlikely(!page)) { | ||
| 828 | dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); | ||
| 829 | goto fail; | ||
| 830 | } | ||
| 831 | buf_len = PAGE_SIZE; | ||
| 832 | dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); | ||
| 833 | pad[0] = (u32)page; | ||
| 834 | pad[1] = 0; | ||
| 835 | } | ||
| 836 | |||
| 837 | desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; | ||
| 838 | desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK; | ||
| 839 | pkt_info = KNAV_DMA_DESC_HAS_EPIB; | ||
| 840 | pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT; | ||
| 841 | pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << | ||
| 842 | KNAV_DMA_DESC_RETQ_SHIFT; | ||
| 843 | set_org_pkt_info(dma, buf_len, hwdesc); | ||
| 844 | set_pad_info(pad[0], pad[1], hwdesc); | ||
| 845 | set_desc_info(desc_info, pkt_info, hwdesc); | ||
| 846 | |||
| 847 | /* Push to FDQs */ | ||
| 848 | knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, | ||
| 849 | &dma_sz); | ||
| 850 | knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); | ||
| 851 | return; | ||
| 852 | |||
| 853 | fail: | ||
| 854 | knav_pool_desc_put(netcp->rx_pool, hwdesc); | ||
| 855 | } | ||
| 856 | |||
| 857 | /* Refill Rx FDQ with descriptors & attached buffers */ | ||
| 858 | static void netcp_rxpool_refill(struct netcp_intf *netcp) | ||
| 859 | { | ||
| 860 | u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; | ||
| 861 | int i; | ||
| 862 | |||
| 863 | /* Calculate the FDQ deficit and refill */ | ||
| 864 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { | ||
| 865 | fdq_deficit[i] = netcp->rx_queue_depths[i] - | ||
| 866 | knav_queue_get_count(netcp->rx_fdq[i]); | ||
| 867 | |||
| 868 | while (fdq_deficit[i]--) | ||
| 869 | netcp_allocate_rx_buf(netcp, i); | ||
| 870 | } /* end for fdqs */ | ||
| 871 | } | ||
| 872 | |||
| 873 | /* NAPI poll */ | ||
| 874 | static int netcp_rx_poll(struct napi_struct *napi, int budget) | ||
| 875 | { | ||
| 876 | struct netcp_intf *netcp = container_of(napi, struct netcp_intf, | ||
| 877 | rx_napi); | ||
| 878 | unsigned int packets; | ||
| 879 | |||
| 880 | packets = netcp_process_rx_packets(netcp, budget); | ||
| 881 | |||
| 882 | if (packets < budget) { | ||
| 883 | napi_complete(&netcp->rx_napi); | ||
| 884 | knav_queue_enable_notify(netcp->rx_queue); | ||
| 885 | } | ||
| 886 | |||
| 887 | netcp_rxpool_refill(netcp); | ||
| 888 | return packets; | ||
| 889 | } | ||
| 890 | |||
| 891 | static void netcp_rx_notify(void *arg) | ||
| 892 | { | ||
| 893 | struct netcp_intf *netcp = arg; | ||
| 894 | |||
| 895 | knav_queue_disable_notify(netcp->rx_queue); | ||
| 896 | napi_schedule(&netcp->rx_napi); | ||
| 897 | } | ||
| 898 | |||
| 899 | static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, | ||
| 900 | struct knav_dma_desc *desc, | ||
| 901 | unsigned int desc_sz) | ||
| 902 | { | ||
| 903 | struct knav_dma_desc *ndesc = desc; | ||
| 904 | dma_addr_t dma_desc, dma_buf; | ||
| 905 | unsigned int buf_len; | ||
| 906 | |||
| 907 | while (ndesc) { | ||
| 908 | get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc); | ||
| 909 | |||
| 910 | if (dma_buf && buf_len) | ||
| 911 | dma_unmap_single(netcp->dev, dma_buf, buf_len, | ||
| 912 | DMA_TO_DEVICE); | ||
| 913 | else | ||
| 914 | dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n", | ||
| 915 | (void *)dma_buf, buf_len); | ||
| 916 | |||
| 917 | knav_pool_desc_put(netcp->tx_pool, ndesc); | ||
| 918 | ndesc = NULL; | ||
| 919 | if (dma_desc) { | ||
| 920 | ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc, | ||
| 921 | desc_sz); | ||
| 922 | if (!ndesc) | ||
| 923 | dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); | ||
| 924 | } | ||
| 925 | } | ||
| 926 | } | ||
| 927 | |||
| 928 | static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, | ||
| 929 | unsigned int budget) | ||
| 930 | { | ||
| 931 | struct knav_dma_desc *desc; | ||
| 932 | struct sk_buff *skb; | ||
| 933 | unsigned int dma_sz; | ||
| 934 | dma_addr_t dma; | ||
| 935 | int pkts = 0; | ||
| 936 | u32 tmp; | ||
| 937 | |||
| 938 | while (budget--) { | ||
| 939 | dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); | ||
| 940 | if (!dma) | ||
| 941 | break; | ||
| 942 | desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); | ||
| 943 | if (unlikely(!desc)) { | ||
| 944 | dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); | ||
| 945 | netcp->ndev->stats.tx_errors++; | ||
| 946 | continue; | ||
| 947 | } | ||
| 948 | |||
| 949 | get_pad_info((u32 *)&skb, &tmp, desc); | ||
| 950 | netcp_free_tx_desc_chain(netcp, desc, dma_sz); | ||
| 951 | if (!skb) { | ||
| 952 | dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); | ||
| 953 | netcp->ndev->stats.tx_errors++; | ||
| 954 | continue; | ||
| 955 | } | ||
| 956 | |||
| 957 | if (netif_subqueue_stopped(netcp->ndev, skb) && | ||
| 958 | netif_running(netcp->ndev) && | ||
| 959 | (knav_pool_count(netcp->tx_pool) > | ||
| 960 | netcp->tx_resume_threshold)) { | ||
| 961 | u16 subqueue = skb_get_queue_mapping(skb); | ||
| 962 | |||
| 963 | netif_wake_subqueue(netcp->ndev, subqueue); | ||
| 964 | } | ||
| 965 | |||
| 966 | netcp->ndev->stats.tx_packets++; | ||
| 967 | netcp->ndev->stats.tx_bytes += skb->len; | ||
| 968 | dev_kfree_skb(skb); | ||
| 969 | pkts++; | ||
| 970 | } | ||
| 971 | return pkts; | ||
| 972 | } | ||
| 973 | |||
| 974 | static int netcp_tx_poll(struct napi_struct *napi, int budget) | ||
| 975 | { | ||
| 976 | int packets; | ||
| 977 | struct netcp_intf *netcp = container_of(napi, struct netcp_intf, | ||
| 978 | tx_napi); | ||
| 979 | |||
| 980 | packets = netcp_process_tx_compl_packets(netcp, budget); | ||
| 981 | if (packets < budget) { | ||
| 982 | napi_complete(&netcp->tx_napi); | ||
| 983 | knav_queue_enable_notify(netcp->tx_compl_q); | ||
| 984 | } | ||
| 985 | |||
| 986 | return packets; | ||
| 987 | } | ||
| 988 | |||
| 989 | static void netcp_tx_notify(void *arg) | ||
| 990 | { | ||
| 991 | struct netcp_intf *netcp = arg; | ||
| 992 | |||
| 993 | knav_queue_disable_notify(netcp->tx_compl_q); | ||
| 994 | napi_schedule(&netcp->tx_napi); | ||
| 995 | } | ||
| 996 | |||
| 997 | static struct knav_dma_desc* | ||
| 998 | netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) | ||
| 999 | { | ||
| 1000 | struct knav_dma_desc *desc, *ndesc, *pdesc; | ||
| 1001 | unsigned int pkt_len = skb_headlen(skb); | ||
| 1002 | struct device *dev = netcp->dev; | ||
| 1003 | dma_addr_t dma_addr; | ||
| 1004 | unsigned int dma_sz; | ||
| 1005 | int i; | ||
| 1006 | |||
| 1007 | /* Map the linear buffer */ | ||
| 1008 | dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); | ||
| 1009 | if (unlikely(!dma_addr)) { | ||
| 1010 | dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); | ||
| 1011 | return NULL; | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | desc = knav_pool_desc_get(netcp->tx_pool); | ||
| 1015 | if (unlikely(IS_ERR_OR_NULL(desc))) { | ||
| 1016 | dev_err(netcp->ndev_dev, "out of TX desc\n"); | ||
| 1017 | dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE); | ||
| 1018 | return NULL; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | set_pkt_info(dma_addr, pkt_len, 0, desc); | ||
| 1022 | if (skb_is_nonlinear(skb)) { | ||
| 1023 | prefetchw(skb_shinfo(skb)); | ||
| 1024 | } else { | ||
| 1025 | desc->next_desc = 0; | ||
| 1026 | goto upd_pkt_len; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | pdesc = desc; | ||
| 1030 | |||
| 1031 | /* Handle the case where skb is fragmented in pages */ | ||
| 1032 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
| 1033 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
| 1034 | struct page *page = skb_frag_page(frag); | ||
| 1035 | u32 page_offset = frag->page_offset; | ||
| 1036 | u32 buf_len = skb_frag_size(frag); | ||
| 1037 | dma_addr_t desc_dma; | ||
| 1038 | u32 pkt_info; | ||
| 1039 | |||
| 1040 | dma_addr = dma_map_page(dev, page, page_offset, buf_len, | ||
| 1041 | DMA_TO_DEVICE); | ||
| 1042 | if (unlikely(!dma_addr)) { | ||
| 1043 | dev_err(netcp->ndev_dev, "Failed to map skb page\n"); | ||
| 1044 | goto free_descs; | ||
| 1045 | } | ||
| 1046 | |||
| 1047 | ndesc = knav_pool_desc_get(netcp->tx_pool); | ||
| 1048 | if (unlikely(IS_ERR_OR_NULL(ndesc))) { | ||
| 1049 | dev_err(netcp->ndev_dev, "out of TX desc for frags\n"); | ||
| 1050 | dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE); | ||
| 1051 | goto free_descs; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, | ||
| 1055 | (void *)ndesc); | ||
| 1056 | pkt_info = | ||
| 1057 | (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << | ||
| 1058 | KNAV_DMA_DESC_RETQ_SHIFT; | ||
| 1059 | set_pkt_info(dma_addr, buf_len, 0, ndesc); | ||
| 1060 | set_words(&desc_dma, 1, &pdesc->next_desc); | ||
| 1061 | pkt_len += buf_len; | ||
| 1062 | if (pdesc != desc) | ||
| 1063 | knav_pool_desc_map(netcp->tx_pool, pdesc, | ||
| 1064 | sizeof(*pdesc), &desc_dma, &dma_sz); | ||
| 1065 | pdesc = ndesc; | ||
| 1066 | } | ||
| 1067 | if (pdesc != desc) | ||
| 1068 | knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc), | ||
| 1069 | &dma_addr, &dma_sz); | ||
| 1070 | |||
| 1071 | /* frag list based linkage is not supported for now. */ | ||
| 1072 | if (skb_shinfo(skb)->frag_list) { | ||
| 1073 | dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n"); | ||
| 1074 | goto free_descs; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | upd_pkt_len: | ||
| 1078 | WARN_ON(pkt_len != skb->len); | ||
| 1079 | |||
| 1080 | pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK; | ||
| 1081 | set_words(&pkt_len, 1, &desc->desc_info); | ||
| 1082 | return desc; | ||
| 1083 | |||
| 1084 | free_descs: | ||
| 1085 | netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); | ||
| 1086 | return NULL; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | static int netcp_tx_submit_skb(struct netcp_intf *netcp, | ||
| 1090 | struct sk_buff *skb, | ||
| 1091 | struct knav_dma_desc *desc) | ||
| 1092 | { | ||
| 1093 | struct netcp_tx_pipe *tx_pipe = NULL; | ||
| 1094 | struct netcp_hook_list *tx_hook; | ||
| 1095 | struct netcp_packet p_info; | ||
| 1096 | u32 packet_info = 0; | ||
| 1097 | unsigned int dma_sz; | ||
| 1098 | dma_addr_t dma; | ||
| 1099 | int ret = 0; | ||
| 1100 | |||
| 1101 | p_info.netcp = netcp; | ||
| 1102 | p_info.skb = skb; | ||
| 1103 | p_info.tx_pipe = NULL; | ||
| 1104 | p_info.psdata_len = 0; | ||
| 1105 | p_info.ts_context = NULL; | ||
| 1106 | p_info.txtstamp_complete = NULL; | ||
| 1107 | p_info.epib = desc->epib; | ||
| 1108 | p_info.psdata = desc->psdata; | ||
| 1109 | memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32)); | ||
| 1110 | |||
| 1111 | /* Find out where to inject the packet for transmission */ | ||
| 1112 | list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) { | ||
| 1113 | ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data, | ||
| 1114 | &p_info); | ||
| 1115 | if (unlikely(ret != 0)) { | ||
| 1116 | dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n", | ||
| 1117 | tx_hook->order, ret); | ||
| 1118 | ret = (ret < 0) ? ret : NETDEV_TX_OK; | ||
| 1119 | goto out; | ||
| 1120 | } | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | /* Make sure some TX hook claimed the packet */ | ||
| 1124 | tx_pipe = p_info.tx_pipe; | ||
| 1125 | if (!tx_pipe) { | ||
| 1126 | dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n"); | ||
| 1127 | ret = -ENXIO; | ||
| 1128 | goto out; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | /* update descriptor */ | ||
| 1132 | if (p_info.psdata_len) { | ||
| 1133 | u32 *psdata = p_info.psdata; | ||
| 1134 | |||
| 1135 | memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, | ||
| 1136 | p_info.psdata_len); | ||
| 1137 | set_words(psdata, p_info.psdata_len, psdata); | ||
| 1138 | packet_info |= | ||
| 1139 | (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << | ||
| 1140 | KNAV_DMA_DESC_PSLEN_SHIFT; | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | packet_info |= KNAV_DMA_DESC_HAS_EPIB | | ||
| 1144 | ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << | ||
| 1145 | KNAV_DMA_DESC_RETQ_SHIFT) | | ||
| 1146 | ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) << | ||
| 1147 | KNAV_DMA_DESC_PSFLAG_SHIFT); | ||
| 1148 | |||
| 1149 | set_words(&packet_info, 1, &desc->packet_info); | ||
| 1150 | set_words((u32 *)&skb, 1, &desc->pad[0]); | ||
| 1151 | |||
| 1152 | /* submit packet descriptor */ | ||
| 1153 | ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma, | ||
| 1154 | &dma_sz); | ||
| 1155 | if (unlikely(ret)) { | ||
| 1156 | dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__); | ||
| 1157 | ret = -ENOMEM; | ||
| 1158 | goto out; | ||
| 1159 | } | ||
| 1160 | skb_tx_timestamp(skb); | ||
| 1161 | knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0); | ||
| 1162 | |||
| 1163 | out: | ||
| 1164 | return ret; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | /* Submit the packet */ | ||
| 1168 | static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) | ||
| 1169 | { | ||
| 1170 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1171 | int subqueue = skb_get_queue_mapping(skb); | ||
| 1172 | struct knav_dma_desc *desc; | ||
| 1173 | int desc_count, ret = 0; | ||
| 1174 | |||
| 1175 | if (unlikely(skb->len <= 0)) { | ||
| 1176 | dev_kfree_skb(skb); | ||
| 1177 | return NETDEV_TX_OK; | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) { | ||
| 1181 | ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE); | ||
| 1182 | if (ret < 0) { | ||
| 1183 | /* If we get here, the skb has already been dropped */ | ||
| 1184 | dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n", | ||
| 1185 | ret); | ||
| 1186 | ndev->stats.tx_dropped++; | ||
| 1187 | return ret; | ||
| 1188 | } | ||
| 1189 | skb->len = NETCP_MIN_PACKET_SIZE; | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | desc = netcp_tx_map_skb(skb, netcp); | ||
| 1193 | if (unlikely(!desc)) { | ||
| 1194 | netif_stop_subqueue(ndev, subqueue); | ||
| 1195 | ret = -ENOBUFS; | ||
| 1196 | goto drop; | ||
| 1197 | } | ||
| 1198 | |||
| 1199 | ret = netcp_tx_submit_skb(netcp, skb, desc); | ||
| 1200 | if (ret) | ||
| 1201 | goto drop; | ||
| 1202 | |||
| 1203 | ndev->trans_start = jiffies; | ||
| 1204 | |||
| 1205 | /* Check Tx pool count & stop subqueue if needed */ | ||
| 1206 | desc_count = knav_pool_count(netcp->tx_pool); | ||
| 1207 | if (desc_count < netcp->tx_pause_threshold) { | ||
| 1208 | dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count); | ||
| 1209 | netif_stop_subqueue(ndev, subqueue); | ||
| 1210 | } | ||
| 1211 | return NETDEV_TX_OK; | ||
| 1212 | |||
| 1213 | drop: | ||
| 1214 | ndev->stats.tx_dropped++; | ||
| 1215 | if (desc) | ||
| 1216 | netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); | ||
| 1217 | dev_kfree_skb(skb); | ||
| 1218 | return ret; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe) | ||
| 1222 | { | ||
| 1223 | if (tx_pipe->dma_channel) { | ||
| 1224 | knav_dma_close_channel(tx_pipe->dma_channel); | ||
| 1225 | tx_pipe->dma_channel = NULL; | ||
| 1226 | } | ||
| 1227 | return 0; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) | ||
| 1231 | { | ||
| 1232 | struct device *dev = tx_pipe->netcp_device->device; | ||
| 1233 | struct knav_dma_cfg config; | ||
| 1234 | int ret = 0; | ||
| 1235 | u8 name[16]; | ||
| 1236 | |||
| 1237 | memset(&config, 0, sizeof(config)); | ||
| 1238 | config.direction = DMA_MEM_TO_DEV; | ||
| 1239 | config.u.tx.filt_einfo = false; | ||
| 1240 | config.u.tx.filt_pswords = false; | ||
| 1241 | config.u.tx.priority = DMA_PRIO_MED_L; | ||
| 1242 | |||
| 1243 | tx_pipe->dma_channel = knav_dma_open_channel(dev, | ||
| 1244 | tx_pipe->dma_chan_name, &config); | ||
| 1245 | if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { | ||
| 1246 | dev_err(dev, "failed opening tx chan(%s)\n", | ||
| 1247 | tx_pipe->dma_chan_name); | ||
| 1248 | goto err; | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev)); | ||
| 1252 | tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id, | ||
| 1253 | KNAV_QUEUE_SHARED); | ||
| 1254 | if (IS_ERR(tx_pipe->dma_queue)) { | ||
| 1255 | dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n", | ||
| 1256 | name, ret); | ||
| 1257 | ret = PTR_ERR(tx_pipe->dma_queue); | ||
| 1258 | goto err; | ||
| 1259 | } | ||
| 1260 | |||
| 1261 | dev_dbg(dev, "opened tx pipe %s\n", name); | ||
| 1262 | return 0; | ||
| 1263 | |||
| 1264 | err: | ||
| 1265 | if (!IS_ERR_OR_NULL(tx_pipe->dma_channel)) | ||
| 1266 | knav_dma_close_channel(tx_pipe->dma_channel); | ||
| 1267 | tx_pipe->dma_channel = NULL; | ||
| 1268 | return ret; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe, | ||
| 1272 | struct netcp_device *netcp_device, | ||
| 1273 | const char *dma_chan_name, unsigned int dma_queue_id) | ||
| 1274 | { | ||
| 1275 | memset(tx_pipe, 0, sizeof(*tx_pipe)); | ||
| 1276 | tx_pipe->netcp_device = netcp_device; | ||
| 1277 | tx_pipe->dma_chan_name = dma_chan_name; | ||
| 1278 | tx_pipe->dma_queue_id = dma_queue_id; | ||
| 1279 | return 0; | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp, | ||
| 1283 | const u8 *addr, | ||
| 1284 | enum netcp_addr_type type) | ||
| 1285 | { | ||
| 1286 | struct netcp_addr *naddr; | ||
| 1287 | |||
| 1288 | list_for_each_entry(naddr, &netcp->addr_list, node) { | ||
| 1289 | if (naddr->type != type) | ||
| 1290 | continue; | ||
| 1291 | if (addr && memcmp(addr, naddr->addr, ETH_ALEN)) | ||
| 1292 | continue; | ||
| 1293 | return naddr; | ||
| 1294 | } | ||
| 1295 | |||
| 1296 | return NULL; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, | ||
| 1300 | const u8 *addr, | ||
| 1301 | enum netcp_addr_type type) | ||
| 1302 | { | ||
| 1303 | struct netcp_addr *naddr; | ||
| 1304 | |||
| 1305 | naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC); | ||
| 1306 | if (!naddr) | ||
| 1307 | return NULL; | ||
| 1308 | |||
| 1309 | naddr->type = type; | ||
| 1310 | naddr->flags = 0; | ||
| 1311 | naddr->netcp = netcp; | ||
| 1312 | if (addr) | ||
| 1313 | ether_addr_copy(naddr->addr, addr); | ||
| 1314 | else | ||
| 1315 | memset(naddr->addr, 0, ETH_ALEN); | ||
| 1316 | list_add_tail(&naddr->node, &netcp->addr_list); | ||
| 1317 | |||
| 1318 | return naddr; | ||
| 1319 | } | ||
| 1320 | |||
| 1321 | static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr) | ||
| 1322 | { | ||
| 1323 | list_del(&naddr->node); | ||
| 1324 | devm_kfree(netcp->dev, naddr); | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | static void netcp_addr_clear_mark(struct netcp_intf *netcp) | ||
| 1328 | { | ||
| 1329 | struct netcp_addr *naddr; | ||
| 1330 | |||
| 1331 | list_for_each_entry(naddr, &netcp->addr_list, node) | ||
| 1332 | naddr->flags = 0; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr, | ||
| 1336 | enum netcp_addr_type type) | ||
| 1337 | { | ||
| 1338 | struct netcp_addr *naddr; | ||
| 1339 | |||
| 1340 | naddr = netcp_addr_find(netcp, addr, type); | ||
| 1341 | if (naddr) { | ||
| 1342 | naddr->flags |= ADDR_VALID; | ||
| 1343 | return; | ||
| 1344 | } | ||
| 1345 | |||
| 1346 | naddr = netcp_addr_add(netcp, addr, type); | ||
| 1347 | if (!WARN_ON(!naddr)) | ||
| 1348 | naddr->flags |= ADDR_NEW; | ||
| 1349 | } | ||
| 1350 | |||
| 1351 | static void netcp_addr_sweep_del(struct netcp_intf *netcp) | ||
| 1352 | { | ||
| 1353 | struct netcp_addr *naddr, *tmp; | ||
| 1354 | struct netcp_intf_modpriv *priv; | ||
| 1355 | struct netcp_module *module; | ||
| 1356 | int error; | ||
| 1357 | |||
| 1358 | list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { | ||
| 1359 | if (naddr->flags & (ADDR_VALID | ADDR_NEW)) | ||
| 1360 | continue; | ||
| 1361 | dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", | ||
| 1362 | naddr->addr, naddr->type); | ||
| 1363 | mutex_lock(&netcp_modules_lock); | ||
| 1364 | for_each_module(netcp, priv) { | ||
| 1365 | module = priv->netcp_module; | ||
| 1366 | if (!module->del_addr) | ||
| 1367 | continue; | ||
| 1368 | error = module->del_addr(priv->module_priv, | ||
| 1369 | naddr); | ||
| 1370 | WARN_ON(error); | ||
| 1371 | } | ||
| 1372 | mutex_unlock(&netcp_modules_lock); | ||
| 1373 | netcp_addr_del(netcp, naddr); | ||
| 1374 | } | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | static void netcp_addr_sweep_add(struct netcp_intf *netcp) | ||
| 1378 | { | ||
| 1379 | struct netcp_addr *naddr, *tmp; | ||
| 1380 | struct netcp_intf_modpriv *priv; | ||
| 1381 | struct netcp_module *module; | ||
| 1382 | int error; | ||
| 1383 | |||
| 1384 | list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { | ||
| 1385 | if (!(naddr->flags & ADDR_NEW)) | ||
| 1386 | continue; | ||
| 1387 | dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", | ||
| 1388 | naddr->addr, naddr->type); | ||
| 1389 | mutex_lock(&netcp_modules_lock); | ||
| 1390 | for_each_module(netcp, priv) { | ||
| 1391 | module = priv->netcp_module; | ||
| 1392 | if (!module->add_addr) | ||
| 1393 | continue; | ||
| 1394 | error = module->add_addr(priv->module_priv, naddr); | ||
| 1395 | WARN_ON(error); | ||
| 1396 | } | ||
| 1397 | mutex_unlock(&netcp_modules_lock); | ||
| 1398 | } | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | static void netcp_set_rx_mode(struct net_device *ndev) | ||
| 1402 | { | ||
| 1403 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1404 | struct netdev_hw_addr *ndev_addr; | ||
| 1405 | bool promisc; | ||
| 1406 | |||
| 1407 | promisc = (ndev->flags & IFF_PROMISC || | ||
| 1408 | ndev->flags & IFF_ALLMULTI || | ||
| 1409 | netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); | ||
| 1410 | |||
| 1411 | /* first clear all marks */ | ||
| 1412 | netcp_addr_clear_mark(netcp); | ||
| 1413 | |||
| 1414 | /* next add new entries, mark existing ones */ | ||
| 1415 | netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST); | ||
| 1416 | for_each_dev_addr(ndev, ndev_addr) | ||
| 1417 | netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV); | ||
| 1418 | netdev_for_each_uc_addr(ndev_addr, ndev) | ||
| 1419 | netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST); | ||
| 1420 | netdev_for_each_mc_addr(ndev_addr, ndev) | ||
| 1421 | netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST); | ||
| 1422 | |||
| 1423 | if (promisc) | ||
| 1424 | netcp_addr_add_mark(netcp, NULL, ADDR_ANY); | ||
| 1425 | |||
| 1426 | /* finally sweep and callout into modules */ | ||
| 1427 | netcp_addr_sweep_del(netcp); | ||
| 1428 | netcp_addr_sweep_add(netcp); | ||
| 1429 | } | ||
| 1430 | |||
| 1431 | static void netcp_free_navigator_resources(struct netcp_intf *netcp) | ||
| 1432 | { | ||
| 1433 | int i; | ||
| 1434 | |||
| 1435 | if (netcp->rx_channel) { | ||
| 1436 | knav_dma_close_channel(netcp->rx_channel); | ||
| 1437 | netcp->rx_channel = NULL; | ||
| 1438 | } | ||
| 1439 | |||
| 1440 | if (!IS_ERR_OR_NULL(netcp->rx_pool)) | ||
| 1441 | netcp_rxpool_free(netcp); | ||
| 1442 | |||
| 1443 | if (!IS_ERR_OR_NULL(netcp->rx_queue)) { | ||
| 1444 | knav_queue_close(netcp->rx_queue); | ||
| 1445 | netcp->rx_queue = NULL; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && | ||
| 1449 | !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) { | ||
| 1450 | knav_queue_close(netcp->rx_fdq[i]); | ||
| 1451 | netcp->rx_fdq[i] = NULL; | ||
| 1452 | } | ||
| 1453 | |||
| 1454 | if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) { | ||
| 1455 | knav_queue_close(netcp->tx_compl_q); | ||
| 1456 | netcp->tx_compl_q = NULL; | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | if (!IS_ERR_OR_NULL(netcp->tx_pool)) { | ||
| 1460 | knav_pool_destroy(netcp->tx_pool); | ||
| 1461 | netcp->tx_pool = NULL; | ||
| 1462 | } | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | static int netcp_setup_navigator_resources(struct net_device *ndev) | ||
| 1466 | { | ||
| 1467 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1468 | struct knav_queue_notify_config notify_cfg; | ||
| 1469 | struct knav_dma_cfg config; | ||
| 1470 | u32 last_fdq = 0; | ||
| 1471 | u8 name[16]; | ||
| 1472 | int ret; | ||
| 1473 | int i; | ||
| 1474 | |||
| 1475 | /* Create Rx/Tx descriptor pools */ | ||
| 1476 | snprintf(name, sizeof(name), "rx-pool-%s", ndev->name); | ||
| 1477 | netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size, | ||
| 1478 | netcp->rx_pool_region_id); | ||
| 1479 | if (IS_ERR_OR_NULL(netcp->rx_pool)) { | ||
| 1480 | dev_err(netcp->ndev_dev, "Couldn't create rx pool\n"); | ||
| 1481 | ret = PTR_ERR(netcp->rx_pool); | ||
| 1482 | goto fail; | ||
| 1483 | } | ||
| 1484 | |||
| 1485 | snprintf(name, sizeof(name), "tx-pool-%s", ndev->name); | ||
| 1486 | netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size, | ||
| 1487 | netcp->tx_pool_region_id); | ||
| 1488 | if (IS_ERR_OR_NULL(netcp->tx_pool)) { | ||
| 1489 | dev_err(netcp->ndev_dev, "Couldn't create tx pool\n"); | ||
| 1490 | ret = PTR_ERR(netcp->tx_pool); | ||
| 1491 | goto fail; | ||
| 1492 | } | ||
| 1493 | |||
| 1494 | /* open Tx completion queue */ | ||
| 1495 | snprintf(name, sizeof(name), "tx-compl-%s", ndev->name); | ||
| 1496 | netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0); | ||
| 1497 | if (IS_ERR_OR_NULL(netcp->tx_compl_q)) { | ||
| 1498 | ret = PTR_ERR(netcp->tx_compl_q); | ||
| 1499 | goto fail; | ||
| 1500 | } | ||
| 1501 | netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q); | ||
| 1502 | |||
| 1503 | /* Set notification for Tx completion */ | ||
| 1504 | notify_cfg.fn = netcp_tx_notify; | ||
| 1505 | notify_cfg.fn_arg = netcp; | ||
| 1506 | ret = knav_queue_device_control(netcp->tx_compl_q, | ||
| 1507 | KNAV_QUEUE_SET_NOTIFIER, | ||
| 1508 | (unsigned long)¬ify_cfg); | ||
| 1509 | if (ret) | ||
| 1510 | goto fail; | ||
| 1511 | |||
| 1512 | knav_queue_disable_notify(netcp->tx_compl_q); | ||
| 1513 | |||
| 1514 | /* open Rx completion queue */ | ||
| 1515 | snprintf(name, sizeof(name), "rx-compl-%s", ndev->name); | ||
| 1516 | netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0); | ||
| 1517 | if (IS_ERR_OR_NULL(netcp->rx_queue)) { | ||
| 1518 | ret = PTR_ERR(netcp->rx_queue); | ||
| 1519 | goto fail; | ||
| 1520 | } | ||
| 1521 | netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue); | ||
| 1522 | |||
| 1523 | /* Set notification for Rx completion */ | ||
| 1524 | notify_cfg.fn = netcp_rx_notify; | ||
| 1525 | notify_cfg.fn_arg = netcp; | ||
| 1526 | ret = knav_queue_device_control(netcp->rx_queue, | ||
| 1527 | KNAV_QUEUE_SET_NOTIFIER, | ||
| 1528 | (unsigned long)¬ify_cfg); | ||
| 1529 | if (ret) | ||
| 1530 | goto fail; | ||
| 1531 | |||
| 1532 | knav_queue_disable_notify(netcp->rx_queue); | ||
| 1533 | |||
| 1534 | /* open Rx FDQs */ | ||
| 1535 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && | ||
| 1536 | netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { | ||
| 1537 | snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); | ||
| 1538 | netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); | ||
| 1539 | if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { | ||
| 1540 | ret = PTR_ERR(netcp->rx_fdq[i]); | ||
| 1541 | goto fail; | ||
| 1542 | } | ||
| 1543 | } | ||
| 1544 | |||
| 1545 | memset(&config, 0, sizeof(config)); | ||
| 1546 | config.direction = DMA_DEV_TO_MEM; | ||
| 1547 | config.u.rx.einfo_present = true; | ||
| 1548 | config.u.rx.psinfo_present = true; | ||
| 1549 | config.u.rx.err_mode = DMA_DROP; | ||
| 1550 | config.u.rx.desc_type = DMA_DESC_HOST; | ||
| 1551 | config.u.rx.psinfo_at_sop = false; | ||
| 1552 | config.u.rx.sop_offset = NETCP_SOP_OFFSET; | ||
| 1553 | config.u.rx.dst_q = netcp->rx_queue_id; | ||
| 1554 | config.u.rx.thresh = DMA_THRESH_NONE; | ||
| 1555 | |||
| 1556 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) { | ||
| 1557 | if (netcp->rx_fdq[i]) | ||
| 1558 | last_fdq = knav_queue_get_id(netcp->rx_fdq[i]); | ||
| 1559 | config.u.rx.fdq[i] = last_fdq; | ||
| 1560 | } | ||
| 1561 | |||
| 1562 | netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, | ||
| 1563 | netcp->dma_chan_name, &config); | ||
| 1564 | if (IS_ERR_OR_NULL(netcp->rx_channel)) { | ||
| 1565 | dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", | ||
| 1566 | netcp->dma_chan_name); | ||
| 1567 | goto fail; | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel); | ||
| 1571 | return 0; | ||
| 1572 | |||
| 1573 | fail: | ||
| 1574 | netcp_free_navigator_resources(netcp); | ||
| 1575 | return ret; | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | /* Open the device */ | ||
| 1579 | static int netcp_ndo_open(struct net_device *ndev) | ||
| 1580 | { | ||
| 1581 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1582 | struct netcp_intf_modpriv *intf_modpriv; | ||
| 1583 | struct netcp_module *module; | ||
| 1584 | int ret; | ||
| 1585 | |||
| 1586 | netif_carrier_off(ndev); | ||
| 1587 | ret = netcp_setup_navigator_resources(ndev); | ||
| 1588 | if (ret) { | ||
| 1589 | dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n"); | ||
| 1590 | goto fail; | ||
| 1591 | } | ||
| 1592 | |||
| 1593 | mutex_lock(&netcp_modules_lock); | ||
| 1594 | for_each_module(netcp, intf_modpriv) { | ||
| 1595 | module = intf_modpriv->netcp_module; | ||
| 1596 | if (module->open) { | ||
| 1597 | ret = module->open(intf_modpriv->module_priv, ndev); | ||
| 1598 | if (ret != 0) { | ||
| 1599 | dev_err(netcp->ndev_dev, "module open failed\n"); | ||
| 1600 | goto fail_open; | ||
| 1601 | } | ||
| 1602 | } | ||
| 1603 | } | ||
| 1604 | mutex_unlock(&netcp_modules_lock); | ||
| 1605 | |||
| 1606 | netcp_rxpool_refill(netcp); | ||
| 1607 | napi_enable(&netcp->rx_napi); | ||
| 1608 | napi_enable(&netcp->tx_napi); | ||
| 1609 | knav_queue_enable_notify(netcp->tx_compl_q); | ||
| 1610 | knav_queue_enable_notify(netcp->rx_queue); | ||
| 1611 | netif_tx_wake_all_queues(ndev); | ||
| 1612 | dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); | ||
| 1613 | return 0; | ||
| 1614 | |||
| 1615 | fail_open: | ||
| 1616 | for_each_module(netcp, intf_modpriv) { | ||
| 1617 | module = intf_modpriv->netcp_module; | ||
| 1618 | if (module->close) | ||
| 1619 | module->close(intf_modpriv->module_priv, ndev); | ||
| 1620 | } | ||
| 1621 | mutex_unlock(&netcp_modules_lock); | ||
| 1622 | |||
| 1623 | fail: | ||
| 1624 | netcp_free_navigator_resources(netcp); | ||
| 1625 | return ret; | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | /* Close the device */ | ||
| 1629 | static int netcp_ndo_stop(struct net_device *ndev) | ||
| 1630 | { | ||
| 1631 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1632 | struct netcp_intf_modpriv *intf_modpriv; | ||
| 1633 | struct netcp_module *module; | ||
| 1634 | int err = 0; | ||
| 1635 | |||
| 1636 | netif_tx_stop_all_queues(ndev); | ||
| 1637 | netif_carrier_off(ndev); | ||
| 1638 | netcp_addr_clear_mark(netcp); | ||
| 1639 | netcp_addr_sweep_del(netcp); | ||
| 1640 | knav_queue_disable_notify(netcp->rx_queue); | ||
| 1641 | knav_queue_disable_notify(netcp->tx_compl_q); | ||
| 1642 | napi_disable(&netcp->rx_napi); | ||
| 1643 | napi_disable(&netcp->tx_napi); | ||
| 1644 | |||
| 1645 | mutex_lock(&netcp_modules_lock); | ||
| 1646 | for_each_module(netcp, intf_modpriv) { | ||
| 1647 | module = intf_modpriv->netcp_module; | ||
| 1648 | if (module->close) { | ||
| 1649 | err = module->close(intf_modpriv->module_priv, ndev); | ||
| 1650 | if (err != 0) | ||
| 1651 | dev_err(netcp->ndev_dev, "Close failed\n"); | ||
| 1652 | } | ||
| 1653 | } | ||
| 1654 | mutex_unlock(&netcp_modules_lock); | ||
| 1655 | |||
| 1656 | /* Recycle Rx descriptors from completion queue */ | ||
| 1657 | netcp_empty_rx_queue(netcp); | ||
| 1658 | |||
| 1659 | /* Recycle Tx descriptors from completion queue */ | ||
| 1660 | netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); | ||
| 1661 | |||
| 1662 | if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size) | ||
| 1663 | dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n", | ||
| 1664 | netcp->tx_pool_size - knav_pool_count(netcp->tx_pool)); | ||
| 1665 | |||
| 1666 | netcp_free_navigator_resources(netcp); | ||
| 1667 | dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name); | ||
| 1668 | return 0; | ||
| 1669 | } | ||
| 1670 | |||
| 1671 | static int netcp_ndo_ioctl(struct net_device *ndev, | ||
| 1672 | struct ifreq *req, int cmd) | ||
| 1673 | { | ||
| 1674 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1675 | struct netcp_intf_modpriv *intf_modpriv; | ||
| 1676 | struct netcp_module *module; | ||
| 1677 | int ret = -1, err = -EOPNOTSUPP; | ||
| 1678 | |||
| 1679 | if (!netif_running(ndev)) | ||
| 1680 | return -EINVAL; | ||
| 1681 | |||
| 1682 | mutex_lock(&netcp_modules_lock); | ||
| 1683 | for_each_module(netcp, intf_modpriv) { | ||
| 1684 | module = intf_modpriv->netcp_module; | ||
| 1685 | if (!module->ioctl) | ||
| 1686 | continue; | ||
| 1687 | |||
| 1688 | err = module->ioctl(intf_modpriv->module_priv, req, cmd); | ||
| 1689 | if ((err < 0) && (err != -EOPNOTSUPP)) { | ||
| 1690 | ret = err; | ||
| 1691 | goto out; | ||
| 1692 | } | ||
| 1693 | if (err == 0) | ||
| 1694 | ret = err; | ||
| 1695 | } | ||
| 1696 | |||
| 1697 | out: | ||
| 1698 | mutex_unlock(&netcp_modules_lock); | ||
| 1699 | return (ret == 0) ? 0 : err; | ||
| 1700 | } | ||
| 1701 | |||
| 1702 | static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu) | ||
| 1703 | { | ||
| 1704 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1705 | |||
| 1706 | /* MTU < 68 is an error for IPv4 traffic */ | ||
| 1707 | if ((new_mtu < 68) || | ||
| 1708 | (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) { | ||
| 1709 | dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu); | ||
| 1710 | return -EINVAL; | ||
| 1711 | } | ||
| 1712 | |||
| 1713 | ndev->mtu = new_mtu; | ||
| 1714 | return 0; | ||
| 1715 | } | ||
| 1716 | |||
| 1717 | static void netcp_ndo_tx_timeout(struct net_device *ndev) | ||
| 1718 | { | ||
| 1719 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1720 | unsigned int descs = knav_pool_count(netcp->tx_pool); | ||
| 1721 | |||
| 1722 | dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs); | ||
| 1723 | netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); | ||
| 1724 | ndev->trans_start = jiffies; | ||
| 1725 | netif_tx_wake_all_queues(ndev); | ||
| 1726 | } | ||
| 1727 | |||
| 1728 | static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) | ||
| 1729 | { | ||
| 1730 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1731 | struct netcp_intf_modpriv *intf_modpriv; | ||
| 1732 | struct netcp_module *module; | ||
| 1733 | int err = 0; | ||
| 1734 | |||
| 1735 | dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); | ||
| 1736 | |||
| 1737 | mutex_lock(&netcp_modules_lock); | ||
| 1738 | for_each_module(netcp, intf_modpriv) { | ||
| 1739 | module = intf_modpriv->netcp_module; | ||
| 1740 | if ((module->add_vid) && (vid != 0)) { | ||
| 1741 | err = module->add_vid(intf_modpriv->module_priv, vid); | ||
| 1742 | if (err != 0) { | ||
| 1743 | dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n", | ||
| 1744 | vid); | ||
| 1745 | break; | ||
| 1746 | } | ||
| 1747 | } | ||
| 1748 | } | ||
| 1749 | mutex_unlock(&netcp_modules_lock); | ||
| 1750 | return err; | ||
| 1751 | } | ||
| 1752 | |||
| 1753 | static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) | ||
| 1754 | { | ||
| 1755 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1756 | struct netcp_intf_modpriv *intf_modpriv; | ||
| 1757 | struct netcp_module *module; | ||
| 1758 | int err = 0; | ||
| 1759 | |||
| 1760 | dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); | ||
| 1761 | |||
| 1762 | mutex_lock(&netcp_modules_lock); | ||
| 1763 | for_each_module(netcp, intf_modpriv) { | ||
| 1764 | module = intf_modpriv->netcp_module; | ||
| 1765 | if (module->del_vid) { | ||
| 1766 | err = module->del_vid(intf_modpriv->module_priv, vid); | ||
| 1767 | if (err != 0) { | ||
| 1768 | dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n", | ||
| 1769 | vid); | ||
| 1770 | break; | ||
| 1771 | } | ||
| 1772 | } | ||
| 1773 | } | ||
| 1774 | mutex_unlock(&netcp_modules_lock); | ||
| 1775 | return err; | ||
| 1776 | } | ||
| 1777 | |||
| 1778 | static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
| 1779 | void *accel_priv, | ||
| 1780 | select_queue_fallback_t fallback) | ||
| 1781 | { | ||
| 1782 | return 0; | ||
| 1783 | } | ||
| 1784 | |||
| 1785 | static int netcp_setup_tc(struct net_device *dev, u8 num_tc) | ||
| 1786 | { | ||
| 1787 | int i; | ||
| 1788 | |||
| 1789 | /* setup tc must be called under rtnl lock */ | ||
| 1790 | ASSERT_RTNL(); | ||
| 1791 | |||
| 1792 | /* Sanity-check the number of traffic classes requested */ | ||
| 1793 | if ((dev->real_num_tx_queues <= 1) || | ||
| 1794 | (dev->real_num_tx_queues < num_tc)) | ||
| 1795 | return -EINVAL; | ||
| 1796 | |||
| 1797 | /* Configure traffic class to queue mappings */ | ||
| 1798 | if (num_tc) { | ||
| 1799 | netdev_set_num_tc(dev, num_tc); | ||
| 1800 | for (i = 0; i < num_tc; i++) | ||
| 1801 | netdev_set_tc_queue(dev, i, 1, i); | ||
| 1802 | } else { | ||
| 1803 | netdev_reset_tc(dev); | ||
| 1804 | } | ||
| 1805 | |||
| 1806 | return 0; | ||
| 1807 | } | ||
| 1808 | |||
| 1809 | static const struct net_device_ops netcp_netdev_ops = { | ||
| 1810 | .ndo_open = netcp_ndo_open, | ||
| 1811 | .ndo_stop = netcp_ndo_stop, | ||
| 1812 | .ndo_start_xmit = netcp_ndo_start_xmit, | ||
| 1813 | .ndo_set_rx_mode = netcp_set_rx_mode, | ||
| 1814 | .ndo_do_ioctl = netcp_ndo_ioctl, | ||
| 1815 | .ndo_change_mtu = netcp_ndo_change_mtu, | ||
| 1816 | .ndo_set_mac_address = eth_mac_addr, | ||
| 1817 | .ndo_validate_addr = eth_validate_addr, | ||
| 1818 | .ndo_vlan_rx_add_vid = netcp_rx_add_vid, | ||
| 1819 | .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, | ||
| 1820 | .ndo_tx_timeout = netcp_ndo_tx_timeout, | ||
| 1821 | .ndo_select_queue = netcp_select_queue, | ||
| 1822 | .ndo_setup_tc = netcp_setup_tc, | ||
| 1823 | }; | ||
| 1824 | |||
| 1825 | static int netcp_create_interface(struct netcp_device *netcp_device, | ||
| 1826 | struct device_node *node_interface) | ||
| 1827 | { | ||
| 1828 | struct device *dev = netcp_device->device; | ||
| 1829 | struct device_node *node = dev->of_node; | ||
| 1830 | struct netcp_intf *netcp; | ||
| 1831 | struct net_device *ndev; | ||
| 1832 | resource_size_t size; | ||
| 1833 | struct resource res; | ||
| 1834 | void __iomem *efuse = NULL; | ||
| 1835 | u32 efuse_mac = 0; | ||
| 1836 | const void *mac_addr; | ||
| 1837 | u8 efuse_mac_addr[6]; | ||
| 1838 | u32 temp[2]; | ||
| 1839 | int ret = 0; | ||
| 1840 | |||
| 1841 | ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1); | ||
| 1842 | if (!ndev) { | ||
| 1843 | dev_err(dev, "Error allocating netdev\n"); | ||
| 1844 | return -ENOMEM; | ||
| 1845 | } | ||
| 1846 | |||
| 1847 | ndev->features |= NETIF_F_SG; | ||
| 1848 | ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | ||
| 1849 | ndev->hw_features = ndev->features; | ||
| 1850 | ndev->vlan_features |= NETIF_F_SG; | ||
| 1851 | |||
| 1852 | netcp = netdev_priv(ndev); | ||
| 1853 | spin_lock_init(&netcp->lock); | ||
| 1854 | INIT_LIST_HEAD(&netcp->module_head); | ||
| 1855 | INIT_LIST_HEAD(&netcp->txhook_list_head); | ||
| 1856 | INIT_LIST_HEAD(&netcp->rxhook_list_head); | ||
| 1857 | INIT_LIST_HEAD(&netcp->addr_list); | ||
| 1858 | netcp->netcp_device = netcp_device; | ||
| 1859 | netcp->dev = netcp_device->device; | ||
| 1860 | netcp->ndev = ndev; | ||
| 1861 | netcp->ndev_dev = &ndev->dev; | ||
| 1862 | netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG); | ||
| 1863 | netcp->tx_pause_threshold = MAX_SKB_FRAGS; | ||
| 1864 | netcp->tx_resume_threshold = netcp->tx_pause_threshold; | ||
| 1865 | netcp->node_interface = node_interface; | ||
| 1866 | |||
| 1867 | ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac); | ||
| 1868 | if (efuse_mac) { | ||
| 1869 | if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) { | ||
| 1870 | dev_err(dev, "could not find efuse-mac reg resource\n"); | ||
| 1871 | ret = -ENODEV; | ||
| 1872 | goto quit; | ||
| 1873 | } | ||
| 1874 | size = resource_size(&res); | ||
| 1875 | |||
| 1876 | if (!devm_request_mem_region(dev, res.start, size, | ||
| 1877 | dev_name(dev))) { | ||
| 1878 | dev_err(dev, "could not reserve resource\n"); | ||
| 1879 | ret = -ENOMEM; | ||
| 1880 | goto quit; | ||
| 1881 | } | ||
| 1882 | |||
| 1883 | efuse = devm_ioremap_nocache(dev, res.start, size); | ||
| 1884 | if (!efuse) { | ||
| 1885 | dev_err(dev, "could not map resource\n"); | ||
| 1886 | devm_release_mem_region(dev, res.start, size); | ||
| 1887 | ret = -ENOMEM; | ||
| 1888 | goto quit; | ||
| 1889 | } | ||
| 1890 | |||
| 1891 | emac_arch_get_mac_addr(efuse_mac_addr, efuse); | ||
| 1892 | if (is_valid_ether_addr(efuse_mac_addr)) | ||
| 1893 | ether_addr_copy(ndev->dev_addr, efuse_mac_addr); | ||
| 1894 | else | ||
| 1895 | random_ether_addr(ndev->dev_addr); | ||
| 1896 | |||
| 1897 | devm_iounmap(dev, efuse); | ||
| 1898 | devm_release_mem_region(dev, res.start, size); | ||
| 1899 | } else { | ||
| 1900 | mac_addr = of_get_mac_address(node_interface); | ||
| 1901 | if (mac_addr) | ||
| 1902 | ether_addr_copy(ndev->dev_addr, mac_addr); | ||
| 1903 | else | ||
| 1904 | random_ether_addr(ndev->dev_addr); | ||
| 1905 | } | ||
| 1906 | |||
| 1907 | ret = of_property_read_string(node_interface, "rx-channel", | ||
| 1908 | &netcp->dma_chan_name); | ||
| 1909 | if (ret < 0) { | ||
| 1910 | dev_err(dev, "missing \"rx-channel\" parameter\n"); | ||
| 1911 | ret = -ENODEV; | ||
| 1912 | goto quit; | ||
| 1913 | } | ||
| 1914 | |||
| 1915 | ret = of_property_read_u32(node_interface, "rx-queue", | ||
| 1916 | &netcp->rx_queue_id); | ||
| 1917 | if (ret < 0) { | ||
| 1918 | dev_warn(dev, "missing \"rx-queue\" parameter\n"); | ||
| 1919 | netcp->rx_queue_id = KNAV_QUEUE_QPEND; | ||
| 1920 | } | ||
| 1921 | |||
| 1922 | ret = of_property_read_u32_array(node_interface, "rx-queue-depth", | ||
| 1923 | netcp->rx_queue_depths, | ||
| 1924 | KNAV_DMA_FDQ_PER_CHAN); | ||
| 1925 | if (ret < 0) { | ||
| 1926 | dev_err(dev, "missing \"rx-queue-depth\" parameter\n"); | ||
| 1927 | netcp->rx_queue_depths[0] = 128; | ||
| 1928 | } | ||
| 1929 | |||
| 1930 | ret = of_property_read_u32_array(node_interface, "rx-buffer-size", | ||
| 1931 | netcp->rx_buffer_sizes, | ||
| 1932 | KNAV_DMA_FDQ_PER_CHAN); | ||
| 1933 | if (ret) { | ||
| 1934 | dev_err(dev, "missing \"rx-buffer-size\" parameter\n"); | ||
| 1935 | netcp->rx_buffer_sizes[0] = 1536; | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); | ||
| 1939 | if (ret < 0) { | ||
| 1940 | dev_err(dev, "missing \"rx-pool\" parameter\n"); | ||
| 1941 | ret = -ENODEV; | ||
| 1942 | goto quit; | ||
| 1943 | } | ||
| 1944 | netcp->rx_pool_size = temp[0]; | ||
| 1945 | netcp->rx_pool_region_id = temp[1]; | ||
| 1946 | |||
| 1947 | ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2); | ||
| 1948 | if (ret < 0) { | ||
| 1949 | dev_err(dev, "missing \"tx-pool\" parameter\n"); | ||
| 1950 | ret = -ENODEV; | ||
| 1951 | goto quit; | ||
| 1952 | } | ||
| 1953 | netcp->tx_pool_size = temp[0]; | ||
| 1954 | netcp->tx_pool_region_id = temp[1]; | ||
| 1955 | |||
| 1956 | if (netcp->tx_pool_size < MAX_SKB_FRAGS) { | ||
| 1957 | dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n", | ||
| 1958 | MAX_SKB_FRAGS); | ||
| 1959 | ret = -ENODEV; | ||
| 1960 | goto quit; | ||
| 1961 | } | ||
| 1962 | |||
| 1963 | ret = of_property_read_u32(node_interface, "tx-completion-queue", | ||
| 1964 | &netcp->tx_compl_qid); | ||
| 1965 | if (ret < 0) { | ||
| 1966 | dev_warn(dev, "missing \"tx-completion-queue\" parameter\n"); | ||
| 1967 | netcp->tx_compl_qid = KNAV_QUEUE_QPEND; | ||
| 1968 | } | ||
| 1969 | |||
| 1970 | /* NAPI register */ | ||
| 1971 | netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); | ||
| 1972 | netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); | ||
| 1973 | |||
| 1974 | /* Register the network device */ | ||
| 1975 | ndev->dev_id = 0; | ||
| 1976 | ndev->watchdog_timeo = NETCP_TX_TIMEOUT; | ||
| 1977 | ndev->netdev_ops = &netcp_netdev_ops; | ||
| 1978 | SET_NETDEV_DEV(ndev, dev); | ||
| 1979 | |||
| 1980 | list_add_tail(&netcp->interface_list, &netcp_device->interface_head); | ||
| 1981 | return 0; | ||
| 1982 | |||
| 1983 | quit: | ||
| 1984 | free_netdev(ndev); | ||
| 1985 | return ret; | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | static void netcp_delete_interface(struct netcp_device *netcp_device, | ||
| 1989 | struct net_device *ndev) | ||
| 1990 | { | ||
| 1991 | struct netcp_intf_modpriv *intf_modpriv, *tmp; | ||
| 1992 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1993 | struct netcp_module *module; | ||
| 1994 | |||
| 1995 | dev_dbg(netcp_device->device, "Removing interface \"%s\"\n", | ||
| 1996 | ndev->name); | ||
| 1997 | |||
| 1998 | /* Notify each of the modules that the interface is going away */ | ||
| 1999 | list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head, | ||
| 2000 | intf_list) { | ||
| 2001 | module = intf_modpriv->netcp_module; | ||
| 2002 | dev_dbg(netcp_device->device, "Releasing module \"%s\"\n", | ||
| 2003 | module->name); | ||
| 2004 | if (module->release) | ||
| 2005 | module->release(intf_modpriv->module_priv); | ||
| 2006 | list_del(&intf_modpriv->intf_list); | ||
| 2007 | kfree(intf_modpriv); | ||
| 2008 | } | ||
| 2009 | WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n", | ||
| 2010 | ndev->name); | ||
| 2011 | |||
| 2012 | list_del(&netcp->interface_list); | ||
| 2013 | |||
| 2014 | of_node_put(netcp->node_interface); | ||
| 2015 | unregister_netdev(ndev); | ||
| 2016 | netif_napi_del(&netcp->rx_napi); | ||
| 2017 | free_netdev(ndev); | ||
| 2018 | } | ||
| 2019 | |||
| 2020 | static int netcp_probe(struct platform_device *pdev) | ||
| 2021 | { | ||
| 2022 | struct device_node *node = pdev->dev.of_node; | ||
| 2023 | struct netcp_intf *netcp_intf, *netcp_tmp; | ||
| 2024 | struct device_node *child, *interfaces; | ||
| 2025 | struct netcp_device *netcp_device; | ||
| 2026 | struct device *dev = &pdev->dev; | ||
| 2027 | struct netcp_module *module; | ||
| 2028 | int ret; | ||
| 2029 | |||
| 2030 | if (!node) { | ||
| 2031 | dev_err(dev, "could not find device info\n"); | ||
| 2032 | return -ENODEV; | ||
| 2033 | } | ||
| 2034 | |||
| 2035 | /* Allocate a new NETCP device instance */ | ||
| 2036 | netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL); | ||
| 2037 | if (!netcp_device) | ||
| 2038 | return -ENOMEM; | ||
| 2039 | |||
| 2040 | pm_runtime_enable(&pdev->dev); | ||
| 2041 | ret = pm_runtime_get_sync(&pdev->dev); | ||
| 2042 | if (ret < 0) { | ||
| 2043 | dev_err(dev, "Failed to enable NETCP power-domain\n"); | ||
| 2044 | pm_runtime_disable(&pdev->dev); | ||
| 2045 | return ret; | ||
| 2046 | } | ||
| 2047 | |||
| 2048 | /* Initialize the NETCP device instance */ | ||
| 2049 | INIT_LIST_HEAD(&netcp_device->interface_head); | ||
| 2050 | INIT_LIST_HEAD(&netcp_device->modpriv_head); | ||
| 2051 | netcp_device->device = dev; | ||
| 2052 | platform_set_drvdata(pdev, netcp_device); | ||
| 2053 | |||
| 2054 | /* create interfaces */ | ||
| 2055 | interfaces = of_get_child_by_name(node, "netcp-interfaces"); | ||
| 2056 | if (!interfaces) { | ||
| 2057 | dev_err(dev, "could not find netcp-interfaces node\n"); | ||
| 2058 | ret = -ENODEV; | ||
| 2059 | goto probe_quit; | ||
| 2060 | } | ||
| 2061 | |||
| 2062 | for_each_available_child_of_node(interfaces, child) { | ||
| 2063 | ret = netcp_create_interface(netcp_device, child); | ||
| 2064 | if (ret) { | ||
| 2065 | dev_err(dev, "could not create interface(%s)\n", | ||
| 2066 | child->name); | ||
| 2067 | goto probe_quit_interface; | ||
| 2068 | } | ||
| 2069 | } | ||
| 2070 | |||
| 2071 | /* Add the device instance to the list */ | ||
| 2072 | list_add_tail(&netcp_device->device_list, &netcp_devices); | ||
| 2073 | |||
| 2074 | /* Probe & attach any modules already registered */ | ||
| 2075 | mutex_lock(&netcp_modules_lock); | ||
| 2076 | for_each_netcp_module(module) { | ||
| 2077 | ret = netcp_module_probe(netcp_device, module); | ||
| 2078 | if (ret < 0) | ||
| 2079 | dev_err(dev, "module(%s) probe failed\n", module->name); | ||
| 2080 | } | ||
| 2081 | mutex_unlock(&netcp_modules_lock); | ||
| 2082 | return 0; | ||
| 2083 | |||
| 2084 | probe_quit_interface: | ||
| 2085 | list_for_each_entry_safe(netcp_intf, netcp_tmp, | ||
| 2086 | &netcp_device->interface_head, | ||
| 2087 | interface_list) { | ||
| 2088 | netcp_delete_interface(netcp_device, netcp_intf->ndev); | ||
| 2089 | } | ||
| 2090 | |||
| 2091 | probe_quit: | ||
| 2092 | pm_runtime_put_sync(&pdev->dev); | ||
| 2093 | pm_runtime_disable(&pdev->dev); | ||
| 2094 | platform_set_drvdata(pdev, NULL); | ||
| 2095 | return ret; | ||
| 2096 | } | ||
| 2097 | |||
| 2098 | static int netcp_remove(struct platform_device *pdev) | ||
| 2099 | { | ||
| 2100 | struct netcp_device *netcp_device = platform_get_drvdata(pdev); | ||
| 2101 | struct netcp_inst_modpriv *inst_modpriv, *tmp; | ||
| 2102 | struct netcp_module *module; | ||
| 2103 | |||
| 2104 | list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head, | ||
| 2105 | inst_list) { | ||
| 2106 | module = inst_modpriv->netcp_module; | ||
| 2107 | dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name); | ||
| 2108 | module->remove(netcp_device, inst_modpriv->module_priv); | ||
| 2109 | list_del(&inst_modpriv->inst_list); | ||
| 2110 | kfree(inst_modpriv); | ||
| 2111 | } | ||
| 2112 | WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n", | ||
| 2113 | pdev->name); | ||
| 2114 | |||
| 2115 | devm_kfree(&pdev->dev, netcp_device); | ||
| 2116 | pm_runtime_put_sync(&pdev->dev); | ||
| 2117 | pm_runtime_disable(&pdev->dev); | ||
| 2118 | platform_set_drvdata(pdev, NULL); | ||
| 2119 | return 0; | ||
| 2120 | } | ||
| 2121 | |||
| 2122 | static struct of_device_id of_match[] = { | ||
| 2123 | { .compatible = "ti,netcp-1.0", }, | ||
| 2124 | {}, | ||
| 2125 | }; | ||
| 2126 | MODULE_DEVICE_TABLE(of, of_match); | ||
| 2127 | |||
| 2128 | static struct platform_driver netcp_driver = { | ||
| 2129 | .driver = { | ||
| 2130 | .name = "netcp-1.0", | ||
| 2131 | .owner = THIS_MODULE, | ||
| 2132 | .of_match_table = of_match, | ||
| 2133 | }, | ||
| 2134 | .probe = netcp_probe, | ||
| 2135 | .remove = netcp_remove, | ||
| 2136 | }; | ||
| 2137 | module_platform_driver(netcp_driver); | ||
| 2138 | |||
| 2139 | MODULE_LICENSE("GPL v2"); | ||
| 2140 | MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs"); | ||
| 2141 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com"); | ||
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c new file mode 100644 index 000000000000..fa1041a78b46 --- /dev/null +++ b/drivers/net/ethernet/ti/netcp_ethss.c | |||
| @@ -0,0 +1,2156 @@ | |||
| 1 | /* | ||
| 2 | * Keystone GBE and XGBE subsystem code | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Texas Instruments Incorporated | ||
| 5 | * Authors: Sandeep Nair <sandeep_n@ti.com> | ||
| 6 | * Sandeep Paulraj <s-paulraj@ti.com> | ||
| 7 | * Cyril Chemparathy <cyril@ti.com> | ||
| 8 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
| 9 | * Wingman Kwok <w-kwok2@ti.com> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or | ||
| 12 | * modify it under the terms of the GNU General Public License as | ||
| 13 | * published by the Free Software Foundation version 2. | ||
| 14 | * | ||
| 15 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 16 | * kind, whether express or implied; without even the implied warranty | ||
| 17 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/io.h> | ||
| 22 | #include <linux/of_mdio.h> | ||
| 23 | #include <linux/of_address.h> | ||
| 24 | #include <linux/if_vlan.h> | ||
| 25 | #include <linux/ethtool.h> | ||
| 26 | |||
| 27 | #include "cpsw_ale.h" | ||
| 28 | #include "netcp.h" | ||
| 29 | |||
| 30 | #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver" | ||
| 31 | #define NETCP_DRIVER_VERSION "v1.0" | ||
| 32 | |||
| 33 | #define GBE_IDENT(reg) ((reg >> 16) & 0xffff) | ||
| 34 | #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7) | ||
| 35 | #define GBE_MINOR_VERSION(reg) (reg & 0xff) | ||
| 36 | #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f) | ||
| 37 | |||
| 38 | /* 1G Ethernet SS defines */ | ||
| 39 | #define GBE_MODULE_NAME "netcp-gbe" | ||
| 40 | #define GBE_SS_VERSION_14 0x4ed21104 | ||
| 41 | |||
| 42 | #define GBE13_SGMII_MODULE_OFFSET 0x100 | ||
| 43 | #define GBE13_SGMII34_MODULE_OFFSET 0x400 | ||
| 44 | #define GBE13_SWITCH_MODULE_OFFSET 0x800 | ||
| 45 | #define GBE13_HOST_PORT_OFFSET 0x834 | ||
| 46 | #define GBE13_SLAVE_PORT_OFFSET 0x860 | ||
| 47 | #define GBE13_EMAC_OFFSET 0x900 | ||
| 48 | #define GBE13_SLAVE_PORT2_OFFSET 0xa00 | ||
| 49 | #define GBE13_HW_STATS_OFFSET 0xb00 | ||
| 50 | #define GBE13_ALE_OFFSET 0xe00 | ||
| 51 | #define GBE13_HOST_PORT_NUM 0 | ||
| 52 | #define GBE13_NUM_SLAVES 4 | ||
| 53 | #define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1) | ||
| 54 | #define GBE13_NUM_ALE_ENTRIES 1024 | ||
| 55 | |||
| 56 | /* 10G Ethernet SS defines */ | ||
| 57 | #define XGBE_MODULE_NAME "netcp-xgbe" | ||
| 58 | #define XGBE_SS_VERSION_10 0x4ee42100 | ||
| 59 | |||
| 60 | #define XGBE_SERDES_REG_INDEX 1 | ||
| 61 | #define XGBE10_SGMII_MODULE_OFFSET 0x100 | ||
| 62 | #define XGBE10_SWITCH_MODULE_OFFSET 0x1000 | ||
| 63 | #define XGBE10_HOST_PORT_OFFSET 0x1034 | ||
| 64 | #define XGBE10_SLAVE_PORT_OFFSET 0x1064 | ||
| 65 | #define XGBE10_EMAC_OFFSET 0x1400 | ||
| 66 | #define XGBE10_ALE_OFFSET 0x1700 | ||
| 67 | #define XGBE10_HW_STATS_OFFSET 0x1800 | ||
| 68 | #define XGBE10_HOST_PORT_NUM 0 | ||
| 69 | #define XGBE10_NUM_SLAVES 2 | ||
| 70 | #define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1) | ||
| 71 | #define XGBE10_NUM_ALE_ENTRIES 1024 | ||
| 72 | |||
| 73 | #define GBE_TIMER_INTERVAL (HZ / 2) | ||
| 74 | |||
| 75 | /* Soft reset register values */ | ||
| 76 | #define SOFT_RESET_MASK BIT(0) | ||
| 77 | #define SOFT_RESET BIT(0) | ||
| 78 | #define DEVICE_EMACSL_RESET_POLL_COUNT 100 | ||
| 79 | #define GMACSL_RET_WARN_RESET_INCOMPLETE -2 | ||
| 80 | |||
| 81 | #define MACSL_RX_ENABLE_CSF BIT(23) | ||
| 82 | #define MACSL_ENABLE_EXT_CTL BIT(18) | ||
| 83 | #define MACSL_XGMII_ENABLE BIT(13) | ||
| 84 | #define MACSL_XGIG_MODE BIT(8) | ||
| 85 | #define MACSL_GIG_MODE BIT(7) | ||
| 86 | #define MACSL_GMII_ENABLE BIT(5) | ||
| 87 | #define MACSL_FULLDUPLEX BIT(0) | ||
| 88 | |||
| 89 | #define GBE_CTL_P0_ENABLE BIT(2) | ||
| 90 | #define GBE_REG_VAL_STAT_ENABLE_ALL 0xff | ||
| 91 | #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf | ||
| 92 | #define GBE_STATS_CD_SEL BIT(28) | ||
| 93 | |||
| 94 | #define GBE_PORT_MASK(x) (BIT(x) - 1) | ||
| 95 | #define GBE_MASK_NO_PORTS 0 | ||
| 96 | |||
| 97 | #define GBE_DEF_1G_MAC_CONTROL \ | ||
| 98 | (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \ | ||
| 99 | MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) | ||
| 100 | |||
| 101 | #define GBE_DEF_10G_MAC_CONTROL \ | ||
| 102 | (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \ | ||
| 103 | MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) | ||
| 104 | |||
| 105 | #define GBE_STATSA_MODULE 0 | ||
| 106 | #define GBE_STATSB_MODULE 1 | ||
| 107 | #define GBE_STATSC_MODULE 2 | ||
| 108 | #define GBE_STATSD_MODULE 3 | ||
| 109 | |||
| 110 | #define XGBE_STATS0_MODULE 0 | ||
| 111 | #define XGBE_STATS1_MODULE 1 | ||
| 112 | #define XGBE_STATS2_MODULE 2 | ||
| 113 | |||
| 114 | #define MAX_SLAVES GBE13_NUM_SLAVES | ||
| 115 | /* s: 0-based slave_port */ | ||
| 116 | #define SGMII_BASE(s) \ | ||
| 117 | (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) | ||
| 118 | |||
| 119 | #define GBE_TX_QUEUE 648 | ||
| 120 | #define GBE_TXHOOK_ORDER 0 | ||
| 121 | #define GBE_DEFAULT_ALE_AGEOUT 30 | ||
| 122 | #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY) | ||
| 123 | #define NETCP_LINK_STATE_INVALID -1 | ||
| 124 | |||
| 125 | #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ | ||
| 126 | offsetof(struct gbe##_##rb, rn) | ||
| 127 | #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ | ||
| 128 | offsetof(struct xgbe##_##rb, rn) | ||
| 129 | #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn) | ||
| 130 | |||
| 131 | struct xgbe_ss_regs { | ||
| 132 | u32 id_ver; | ||
| 133 | u32 synce_count; | ||
| 134 | u32 synce_mux; | ||
| 135 | u32 control; | ||
| 136 | }; | ||
| 137 | |||
| 138 | struct xgbe_switch_regs { | ||
| 139 | u32 id_ver; | ||
| 140 | u32 control; | ||
| 141 | u32 emcontrol; | ||
| 142 | u32 stat_port_en; | ||
| 143 | u32 ptype; | ||
| 144 | u32 soft_idle; | ||
| 145 | u32 thru_rate; | ||
| 146 | u32 gap_thresh; | ||
| 147 | u32 tx_start_wds; | ||
| 148 | u32 flow_control; | ||
| 149 | u32 cppi_thresh; | ||
| 150 | }; | ||
| 151 | |||
| 152 | struct xgbe_port_regs { | ||
| 153 | u32 blk_cnt; | ||
| 154 | u32 port_vlan; | ||
| 155 | u32 tx_pri_map; | ||
| 156 | u32 sa_lo; | ||
| 157 | u32 sa_hi; | ||
| 158 | u32 ts_ctl; | ||
| 159 | u32 ts_seq_ltype; | ||
| 160 | u32 ts_vlan; | ||
| 161 | u32 ts_ctl_ltype2; | ||
| 162 | u32 ts_ctl2; | ||
| 163 | u32 control; | ||
| 164 | }; | ||
| 165 | |||
| 166 | struct xgbe_host_port_regs { | ||
| 167 | u32 blk_cnt; | ||
| 168 | u32 port_vlan; | ||
| 169 | u32 tx_pri_map; | ||
| 170 | u32 src_id; | ||
| 171 | u32 rx_pri_map; | ||
| 172 | u32 rx_maxlen; | ||
| 173 | }; | ||
| 174 | |||
| 175 | struct xgbe_emac_regs { | ||
| 176 | u32 id_ver; | ||
| 177 | u32 mac_control; | ||
| 178 | u32 mac_status; | ||
| 179 | u32 soft_reset; | ||
| 180 | u32 rx_maxlen; | ||
| 181 | u32 __reserved_0; | ||
| 182 | u32 rx_pause; | ||
| 183 | u32 tx_pause; | ||
| 184 | u32 em_control; | ||
| 185 | u32 __reserved_1; | ||
| 186 | u32 tx_gap; | ||
| 187 | u32 rsvd[4]; | ||
| 188 | }; | ||
| 189 | |||
| 190 | struct xgbe_host_hw_stats { | ||
| 191 | u32 rx_good_frames; | ||
| 192 | u32 rx_broadcast_frames; | ||
| 193 | u32 rx_multicast_frames; | ||
| 194 | u32 __rsvd_0[3]; | ||
| 195 | u32 rx_oversized_frames; | ||
| 196 | u32 __rsvd_1; | ||
| 197 | u32 rx_undersized_frames; | ||
| 198 | u32 __rsvd_2; | ||
| 199 | u32 overrun_type4; | ||
| 200 | u32 overrun_type5; | ||
| 201 | u32 rx_bytes; | ||
| 202 | u32 tx_good_frames; | ||
| 203 | u32 tx_broadcast_frames; | ||
| 204 | u32 tx_multicast_frames; | ||
| 205 | u32 __rsvd_3[9]; | ||
| 206 | u32 tx_bytes; | ||
| 207 | u32 tx_64byte_frames; | ||
| 208 | u32 tx_65_to_127byte_frames; | ||
| 209 | u32 tx_128_to_255byte_frames; | ||
| 210 | u32 tx_256_to_511byte_frames; | ||
| 211 | u32 tx_512_to_1023byte_frames; | ||
| 212 | u32 tx_1024byte_frames; | ||
| 213 | u32 net_bytes; | ||
| 214 | u32 rx_sof_overruns; | ||
| 215 | u32 rx_mof_overruns; | ||
| 216 | u32 rx_dma_overruns; | ||
| 217 | }; | ||
| 218 | |||
| 219 | struct xgbe_hw_stats { | ||
| 220 | u32 rx_good_frames; | ||
| 221 | u32 rx_broadcast_frames; | ||
| 222 | u32 rx_multicast_frames; | ||
| 223 | u32 rx_pause_frames; | ||
| 224 | u32 rx_crc_errors; | ||
| 225 | u32 rx_align_code_errors; | ||
| 226 | u32 rx_oversized_frames; | ||
| 227 | u32 rx_jabber_frames; | ||
| 228 | u32 rx_undersized_frames; | ||
| 229 | u32 rx_fragments; | ||
| 230 | u32 overrun_type4; | ||
| 231 | u32 overrun_type5; | ||
| 232 | u32 rx_bytes; | ||
| 233 | u32 tx_good_frames; | ||
| 234 | u32 tx_broadcast_frames; | ||
| 235 | u32 tx_multicast_frames; | ||
| 236 | u32 tx_pause_frames; | ||
| 237 | u32 tx_deferred_frames; | ||
| 238 | u32 tx_collision_frames; | ||
| 239 | u32 tx_single_coll_frames; | ||
| 240 | u32 tx_mult_coll_frames; | ||
| 241 | u32 tx_excessive_collisions; | ||
| 242 | u32 tx_late_collisions; | ||
| 243 | u32 tx_underrun; | ||
| 244 | u32 tx_carrier_sense_errors; | ||
| 245 | u32 tx_bytes; | ||
| 246 | u32 tx_64byte_frames; | ||
| 247 | u32 tx_65_to_127byte_frames; | ||
| 248 | u32 tx_128_to_255byte_frames; | ||
| 249 | u32 tx_256_to_511byte_frames; | ||
| 250 | u32 tx_512_to_1023byte_frames; | ||
| 251 | u32 tx_1024byte_frames; | ||
| 252 | u32 net_bytes; | ||
| 253 | u32 rx_sof_overruns; | ||
| 254 | u32 rx_mof_overruns; | ||
| 255 | u32 rx_dma_overruns; | ||
| 256 | }; | ||
| 257 | |||
| 258 | #define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) | ||
| 259 | |||
| 260 | struct gbe_ss_regs { | ||
| 261 | u32 id_ver; | ||
| 262 | u32 synce_count; | ||
| 263 | u32 synce_mux; | ||
| 264 | }; | ||
| 265 | |||
| 266 | struct gbe_ss_regs_ofs { | ||
| 267 | u16 id_ver; | ||
| 268 | u16 control; | ||
| 269 | }; | ||
| 270 | |||
| 271 | struct gbe_switch_regs { | ||
| 272 | u32 id_ver; | ||
| 273 | u32 control; | ||
| 274 | u32 soft_reset; | ||
| 275 | u32 stat_port_en; | ||
| 276 | u32 ptype; | ||
| 277 | u32 soft_idle; | ||
| 278 | u32 thru_rate; | ||
| 279 | u32 gap_thresh; | ||
| 280 | u32 tx_start_wds; | ||
| 281 | u32 flow_control; | ||
| 282 | }; | ||
| 283 | |||
| 284 | struct gbe_switch_regs_ofs { | ||
| 285 | u16 id_ver; | ||
| 286 | u16 control; | ||
| 287 | u16 soft_reset; | ||
| 288 | u16 emcontrol; | ||
| 289 | u16 stat_port_en; | ||
| 290 | u16 ptype; | ||
| 291 | u16 flow_control; | ||
| 292 | }; | ||
| 293 | |||
| 294 | struct gbe_port_regs { | ||
| 295 | u32 max_blks; | ||
| 296 | u32 blk_cnt; | ||
| 297 | u32 port_vlan; | ||
| 298 | u32 tx_pri_map; | ||
| 299 | u32 sa_lo; | ||
| 300 | u32 sa_hi; | ||
| 301 | u32 ts_ctl; | ||
| 302 | u32 ts_seq_ltype; | ||
| 303 | u32 ts_vlan; | ||
| 304 | u32 ts_ctl_ltype2; | ||
| 305 | u32 ts_ctl2; | ||
| 306 | }; | ||
| 307 | |||
| 308 | struct gbe_port_regs_ofs { | ||
| 309 | u16 port_vlan; | ||
| 310 | u16 tx_pri_map; | ||
| 311 | u16 sa_lo; | ||
| 312 | u16 sa_hi; | ||
| 313 | u16 ts_ctl; | ||
| 314 | u16 ts_seq_ltype; | ||
| 315 | u16 ts_vlan; | ||
| 316 | u16 ts_ctl_ltype2; | ||
| 317 | u16 ts_ctl2; | ||
| 318 | }; | ||
| 319 | |||
| 320 | struct gbe_host_port_regs { | ||
| 321 | u32 src_id; | ||
| 322 | u32 port_vlan; | ||
| 323 | u32 rx_pri_map; | ||
| 324 | u32 rx_maxlen; | ||
| 325 | }; | ||
| 326 | |||
| 327 | struct gbe_host_port_regs_ofs { | ||
| 328 | u16 port_vlan; | ||
| 329 | u16 tx_pri_map; | ||
| 330 | u16 rx_maxlen; | ||
| 331 | }; | ||
| 332 | |||
| 333 | struct gbe_emac_regs { | ||
| 334 | u32 id_ver; | ||
| 335 | u32 mac_control; | ||
| 336 | u32 mac_status; | ||
| 337 | u32 soft_reset; | ||
| 338 | u32 rx_maxlen; | ||
| 339 | u32 __reserved_0; | ||
| 340 | u32 rx_pause; | ||
| 341 | u32 tx_pause; | ||
| 342 | u32 __reserved_1; | ||
| 343 | u32 rx_pri_map; | ||
| 344 | u32 rsvd[6]; | ||
| 345 | }; | ||
| 346 | |||
| 347 | struct gbe_emac_regs_ofs { | ||
| 348 | u16 mac_control; | ||
| 349 | u16 soft_reset; | ||
| 350 | u16 rx_maxlen; | ||
| 351 | }; | ||
| 352 | |||
| 353 | struct gbe_hw_stats { | ||
| 354 | u32 rx_good_frames; | ||
| 355 | u32 rx_broadcast_frames; | ||
| 356 | u32 rx_multicast_frames; | ||
| 357 | u32 rx_pause_frames; | ||
| 358 | u32 rx_crc_errors; | ||
| 359 | u32 rx_align_code_errors; | ||
| 360 | u32 rx_oversized_frames; | ||
| 361 | u32 rx_jabber_frames; | ||
| 362 | u32 rx_undersized_frames; | ||
| 363 | u32 rx_fragments; | ||
| 364 | u32 __pad_0[2]; | ||
| 365 | u32 rx_bytes; | ||
| 366 | u32 tx_good_frames; | ||
| 367 | u32 tx_broadcast_frames; | ||
| 368 | u32 tx_multicast_frames; | ||
| 369 | u32 tx_pause_frames; | ||
| 370 | u32 tx_deferred_frames; | ||
| 371 | u32 tx_collision_frames; | ||
| 372 | u32 tx_single_coll_frames; | ||
| 373 | u32 tx_mult_coll_frames; | ||
| 374 | u32 tx_excessive_collisions; | ||
| 375 | u32 tx_late_collisions; | ||
| 376 | u32 tx_underrun; | ||
| 377 | u32 tx_carrier_sense_errors; | ||
| 378 | u32 tx_bytes; | ||
| 379 | u32 tx_64byte_frames; | ||
| 380 | u32 tx_65_to_127byte_frames; | ||
| 381 | u32 tx_128_to_255byte_frames; | ||
| 382 | u32 tx_256_to_511byte_frames; | ||
| 383 | u32 tx_512_to_1023byte_frames; | ||
| 384 | u32 tx_1024byte_frames; | ||
| 385 | u32 net_bytes; | ||
| 386 | u32 rx_sof_overruns; | ||
| 387 | u32 rx_mof_overruns; | ||
| 388 | u32 rx_dma_overruns; | ||
| 389 | }; | ||
| 390 | |||
| 391 | #define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) | ||
| 392 | #define GBE13_NUM_HW_STATS_MOD 2 | ||
| 393 | #define XGBE10_NUM_HW_STATS_MOD 3 | ||
| 394 | #define GBE_MAX_HW_STAT_MODS 3 | ||
| 395 | #define GBE_HW_STATS_REG_MAP_SZ 0x100 | ||
| 396 | |||
| 397 | struct gbe_slave { | ||
| 398 | void __iomem *port_regs; | ||
| 399 | void __iomem *emac_regs; | ||
| 400 | struct gbe_port_regs_ofs port_regs_ofs; | ||
| 401 | struct gbe_emac_regs_ofs emac_regs_ofs; | ||
| 402 | int slave_num; /* 0 based logical number */ | ||
| 403 | int port_num; /* actual port number */ | ||
| 404 | atomic_t link_state; | ||
| 405 | bool open; | ||
| 406 | struct phy_device *phy; | ||
| 407 | u32 link_interface; | ||
| 408 | u32 mac_control; | ||
| 409 | u8 phy_port_t; | ||
| 410 | struct device_node *phy_node; | ||
| 411 | struct list_head slave_list; | ||
| 412 | }; | ||
| 413 | |||
| 414 | struct gbe_priv { | ||
| 415 | struct device *dev; | ||
| 416 | struct netcp_device *netcp_device; | ||
| 417 | struct timer_list timer; | ||
| 418 | u32 num_slaves; | ||
| 419 | u32 ale_entries; | ||
| 420 | u32 ale_ports; | ||
| 421 | bool enable_ale; | ||
| 422 | struct netcp_tx_pipe tx_pipe; | ||
| 423 | |||
| 424 | int host_port; | ||
| 425 | u32 rx_packet_max; | ||
| 426 | u32 ss_version; | ||
| 427 | |||
| 428 | void __iomem *ss_regs; | ||
| 429 | void __iomem *switch_regs; | ||
| 430 | void __iomem *host_port_regs; | ||
| 431 | void __iomem *ale_reg; | ||
| 432 | void __iomem *sgmii_port_regs; | ||
| 433 | void __iomem *sgmii_port34_regs; | ||
| 434 | void __iomem *xgbe_serdes_regs; | ||
| 435 | void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS]; | ||
| 436 | |||
| 437 | struct gbe_ss_regs_ofs ss_regs_ofs; | ||
| 438 | struct gbe_switch_regs_ofs switch_regs_ofs; | ||
| 439 | struct gbe_host_port_regs_ofs host_port_regs_ofs; | ||
| 440 | |||
| 441 | struct cpsw_ale *ale; | ||
| 442 | unsigned int tx_queue_id; | ||
| 443 | const char *dma_chan_name; | ||
| 444 | |||
| 445 | struct list_head gbe_intf_head; | ||
| 446 | struct list_head secondary_slaves; | ||
| 447 | struct net_device *dummy_ndev; | ||
| 448 | |||
| 449 | u64 *hw_stats; | ||
| 450 | const struct netcp_ethtool_stat *et_stats; | ||
| 451 | int num_et_stats; | ||
| 452 | /* Lock for updating the hwstats */ | ||
| 453 | spinlock_t hw_stats_lock; | ||
| 454 | }; | ||
| 455 | |||
| 456 | struct gbe_intf { | ||
| 457 | struct net_device *ndev; | ||
| 458 | struct device *dev; | ||
| 459 | struct gbe_priv *gbe_dev; | ||
| 460 | struct netcp_tx_pipe tx_pipe; | ||
| 461 | struct gbe_slave *slave; | ||
| 462 | struct list_head gbe_intf_list; | ||
| 463 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | ||
| 464 | }; | ||
| 465 | |||
| 466 | static struct netcp_module gbe_module; | ||
| 467 | static struct netcp_module xgbe_module; | ||
| 468 | |||
| 469 | /* Statistic management */ | ||
| 470 | struct netcp_ethtool_stat { | ||
| 471 | char desc[ETH_GSTRING_LEN]; | ||
| 472 | int type; | ||
| 473 | u32 size; | ||
| 474 | int offset; | ||
| 475 | }; | ||
| 476 | |||
| 477 | #define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\ | ||
| 478 | FIELD_SIZEOF(struct gbe_hw_stats, field), \ | ||
| 479 | offsetof(struct gbe_hw_stats, field) | ||
| 480 | |||
| 481 | #define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\ | ||
| 482 | FIELD_SIZEOF(struct gbe_hw_stats, field), \ | ||
| 483 | offsetof(struct gbe_hw_stats, field) | ||
| 484 | |||
| 485 | #define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\ | ||
| 486 | FIELD_SIZEOF(struct gbe_hw_stats, field), \ | ||
| 487 | offsetof(struct gbe_hw_stats, field) | ||
| 488 | |||
| 489 | #define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\ | ||
| 490 | FIELD_SIZEOF(struct gbe_hw_stats, field), \ | ||
| 491 | offsetof(struct gbe_hw_stats, field) | ||
| 492 | |||
| 493 | static const struct netcp_ethtool_stat gbe13_et_stats[] = { | ||
| 494 | /* GBE module A */ | ||
| 495 | {GBE_STATSA_INFO(rx_good_frames)}, | ||
| 496 | {GBE_STATSA_INFO(rx_broadcast_frames)}, | ||
| 497 | {GBE_STATSA_INFO(rx_multicast_frames)}, | ||
| 498 | {GBE_STATSA_INFO(rx_pause_frames)}, | ||
| 499 | {GBE_STATSA_INFO(rx_crc_errors)}, | ||
| 500 | {GBE_STATSA_INFO(rx_align_code_errors)}, | ||
| 501 | {GBE_STATSA_INFO(rx_oversized_frames)}, | ||
| 502 | {GBE_STATSA_INFO(rx_jabber_frames)}, | ||
| 503 | {GBE_STATSA_INFO(rx_undersized_frames)}, | ||
| 504 | {GBE_STATSA_INFO(rx_fragments)}, | ||
| 505 | {GBE_STATSA_INFO(rx_bytes)}, | ||
| 506 | {GBE_STATSA_INFO(tx_good_frames)}, | ||
| 507 | {GBE_STATSA_INFO(tx_broadcast_frames)}, | ||
| 508 | {GBE_STATSA_INFO(tx_multicast_frames)}, | ||
| 509 | {GBE_STATSA_INFO(tx_pause_frames)}, | ||
| 510 | {GBE_STATSA_INFO(tx_deferred_frames)}, | ||
| 511 | {GBE_STATSA_INFO(tx_collision_frames)}, | ||
| 512 | {GBE_STATSA_INFO(tx_single_coll_frames)}, | ||
| 513 | {GBE_STATSA_INFO(tx_mult_coll_frames)}, | ||
| 514 | {GBE_STATSA_INFO(tx_excessive_collisions)}, | ||
| 515 | {GBE_STATSA_INFO(tx_late_collisions)}, | ||
| 516 | {GBE_STATSA_INFO(tx_underrun)}, | ||
| 517 | {GBE_STATSA_INFO(tx_carrier_sense_errors)}, | ||
| 518 | {GBE_STATSA_INFO(tx_bytes)}, | ||
| 519 | {GBE_STATSA_INFO(tx_64byte_frames)}, | ||
| 520 | {GBE_STATSA_INFO(tx_65_to_127byte_frames)}, | ||
| 521 | {GBE_STATSA_INFO(tx_128_to_255byte_frames)}, | ||
| 522 | {GBE_STATSA_INFO(tx_256_to_511byte_frames)}, | ||
| 523 | {GBE_STATSA_INFO(tx_512_to_1023byte_frames)}, | ||
| 524 | {GBE_STATSA_INFO(tx_1024byte_frames)}, | ||
| 525 | {GBE_STATSA_INFO(net_bytes)}, | ||
| 526 | {GBE_STATSA_INFO(rx_sof_overruns)}, | ||
| 527 | {GBE_STATSA_INFO(rx_mof_overruns)}, | ||
| 528 | {GBE_STATSA_INFO(rx_dma_overruns)}, | ||
| 529 | /* GBE module B */ | ||
| 530 | {GBE_STATSB_INFO(rx_good_frames)}, | ||
| 531 | {GBE_STATSB_INFO(rx_broadcast_frames)}, | ||
| 532 | {GBE_STATSB_INFO(rx_multicast_frames)}, | ||
| 533 | {GBE_STATSB_INFO(rx_pause_frames)}, | ||
| 534 | {GBE_STATSB_INFO(rx_crc_errors)}, | ||
| 535 | {GBE_STATSB_INFO(rx_align_code_errors)}, | ||
| 536 | {GBE_STATSB_INFO(rx_oversized_frames)}, | ||
| 537 | {GBE_STATSB_INFO(rx_jabber_frames)}, | ||
| 538 | {GBE_STATSB_INFO(rx_undersized_frames)}, | ||
| 539 | {GBE_STATSB_INFO(rx_fragments)}, | ||
| 540 | {GBE_STATSB_INFO(rx_bytes)}, | ||
| 541 | {GBE_STATSB_INFO(tx_good_frames)}, | ||
| 542 | {GBE_STATSB_INFO(tx_broadcast_frames)}, | ||
| 543 | {GBE_STATSB_INFO(tx_multicast_frames)}, | ||
| 544 | {GBE_STATSB_INFO(tx_pause_frames)}, | ||
| 545 | {GBE_STATSB_INFO(tx_deferred_frames)}, | ||
| 546 | {GBE_STATSB_INFO(tx_collision_frames)}, | ||
| 547 | {GBE_STATSB_INFO(tx_single_coll_frames)}, | ||
| 548 | {GBE_STATSB_INFO(tx_mult_coll_frames)}, | ||
| 549 | {GBE_STATSB_INFO(tx_excessive_collisions)}, | ||
| 550 | {GBE_STATSB_INFO(tx_late_collisions)}, | ||
| 551 | {GBE_STATSB_INFO(tx_underrun)}, | ||
| 552 | {GBE_STATSB_INFO(tx_carrier_sense_errors)}, | ||
| 553 | {GBE_STATSB_INFO(tx_bytes)}, | ||
| 554 | {GBE_STATSB_INFO(tx_64byte_frames)}, | ||
| 555 | {GBE_STATSB_INFO(tx_65_to_127byte_frames)}, | ||
| 556 | {GBE_STATSB_INFO(tx_128_to_255byte_frames)}, | ||
| 557 | {GBE_STATSB_INFO(tx_256_to_511byte_frames)}, | ||
| 558 | {GBE_STATSB_INFO(tx_512_to_1023byte_frames)}, | ||
| 559 | {GBE_STATSB_INFO(tx_1024byte_frames)}, | ||
| 560 | {GBE_STATSB_INFO(net_bytes)}, | ||
| 561 | {GBE_STATSB_INFO(rx_sof_overruns)}, | ||
| 562 | {GBE_STATSB_INFO(rx_mof_overruns)}, | ||
| 563 | {GBE_STATSB_INFO(rx_dma_overruns)}, | ||
| 564 | /* GBE module C */ | ||
| 565 | {GBE_STATSC_INFO(rx_good_frames)}, | ||
| 566 | {GBE_STATSC_INFO(rx_broadcast_frames)}, | ||
| 567 | {GBE_STATSC_INFO(rx_multicast_frames)}, | ||
| 568 | {GBE_STATSC_INFO(rx_pause_frames)}, | ||
| 569 | {GBE_STATSC_INFO(rx_crc_errors)}, | ||
| 570 | {GBE_STATSC_INFO(rx_align_code_errors)}, | ||
| 571 | {GBE_STATSC_INFO(rx_oversized_frames)}, | ||
| 572 | {GBE_STATSC_INFO(rx_jabber_frames)}, | ||
| 573 | {GBE_STATSC_INFO(rx_undersized_frames)}, | ||
| 574 | {GBE_STATSC_INFO(rx_fragments)}, | ||
| 575 | {GBE_STATSC_INFO(rx_bytes)}, | ||
| 576 | {GBE_STATSC_INFO(tx_good_frames)}, | ||
| 577 | {GBE_STATSC_INFO(tx_broadcast_frames)}, | ||
| 578 | {GBE_STATSC_INFO(tx_multicast_frames)}, | ||
| 579 | {GBE_STATSC_INFO(tx_pause_frames)}, | ||
| 580 | {GBE_STATSC_INFO(tx_deferred_frames)}, | ||
| 581 | {GBE_STATSC_INFO(tx_collision_frames)}, | ||
| 582 | {GBE_STATSC_INFO(tx_single_coll_frames)}, | ||
| 583 | {GBE_STATSC_INFO(tx_mult_coll_frames)}, | ||
| 584 | {GBE_STATSC_INFO(tx_excessive_collisions)}, | ||
| 585 | {GBE_STATSC_INFO(tx_late_collisions)}, | ||
| 586 | {GBE_STATSC_INFO(tx_underrun)}, | ||
| 587 | {GBE_STATSC_INFO(tx_carrier_sense_errors)}, | ||
| 588 | {GBE_STATSC_INFO(tx_bytes)}, | ||
| 589 | {GBE_STATSC_INFO(tx_64byte_frames)}, | ||
| 590 | {GBE_STATSC_INFO(tx_65_to_127byte_frames)}, | ||
| 591 | {GBE_STATSC_INFO(tx_128_to_255byte_frames)}, | ||
| 592 | {GBE_STATSC_INFO(tx_256_to_511byte_frames)}, | ||
| 593 | {GBE_STATSC_INFO(tx_512_to_1023byte_frames)}, | ||
| 594 | {GBE_STATSC_INFO(tx_1024byte_frames)}, | ||
| 595 | {GBE_STATSC_INFO(net_bytes)}, | ||
| 596 | {GBE_STATSC_INFO(rx_sof_overruns)}, | ||
| 597 | {GBE_STATSC_INFO(rx_mof_overruns)}, | ||
| 598 | {GBE_STATSC_INFO(rx_dma_overruns)}, | ||
| 599 | /* GBE module D */ | ||
| 600 | {GBE_STATSD_INFO(rx_good_frames)}, | ||
| 601 | {GBE_STATSD_INFO(rx_broadcast_frames)}, | ||
| 602 | {GBE_STATSD_INFO(rx_multicast_frames)}, | ||
| 603 | {GBE_STATSD_INFO(rx_pause_frames)}, | ||
| 604 | {GBE_STATSD_INFO(rx_crc_errors)}, | ||
| 605 | {GBE_STATSD_INFO(rx_align_code_errors)}, | ||
| 606 | {GBE_STATSD_INFO(rx_oversized_frames)}, | ||
| 607 | {GBE_STATSD_INFO(rx_jabber_frames)}, | ||
| 608 | {GBE_STATSD_INFO(rx_undersized_frames)}, | ||
| 609 | {GBE_STATSD_INFO(rx_fragments)}, | ||
| 610 | {GBE_STATSD_INFO(rx_bytes)}, | ||
| 611 | {GBE_STATSD_INFO(tx_good_frames)}, | ||
| 612 | {GBE_STATSD_INFO(tx_broadcast_frames)}, | ||
| 613 | {GBE_STATSD_INFO(tx_multicast_frames)}, | ||
| 614 | {GBE_STATSD_INFO(tx_pause_frames)}, | ||
| 615 | {GBE_STATSD_INFO(tx_deferred_frames)}, | ||
| 616 | {GBE_STATSD_INFO(tx_collision_frames)}, | ||
| 617 | {GBE_STATSD_INFO(tx_single_coll_frames)}, | ||
| 618 | {GBE_STATSD_INFO(tx_mult_coll_frames)}, | ||
| 619 | {GBE_STATSD_INFO(tx_excessive_collisions)}, | ||
| 620 | {GBE_STATSD_INFO(tx_late_collisions)}, | ||
| 621 | {GBE_STATSD_INFO(tx_underrun)}, | ||
| 622 | {GBE_STATSD_INFO(tx_carrier_sense_errors)}, | ||
| 623 | {GBE_STATSD_INFO(tx_bytes)}, | ||
| 624 | {GBE_STATSD_INFO(tx_64byte_frames)}, | ||
| 625 | {GBE_STATSD_INFO(tx_65_to_127byte_frames)}, | ||
| 626 | {GBE_STATSD_INFO(tx_128_to_255byte_frames)}, | ||
| 627 | {GBE_STATSD_INFO(tx_256_to_511byte_frames)}, | ||
| 628 | {GBE_STATSD_INFO(tx_512_to_1023byte_frames)}, | ||
| 629 | {GBE_STATSD_INFO(tx_1024byte_frames)}, | ||
| 630 | {GBE_STATSD_INFO(net_bytes)}, | ||
| 631 | {GBE_STATSD_INFO(rx_sof_overruns)}, | ||
| 632 | {GBE_STATSD_INFO(rx_mof_overruns)}, | ||
| 633 | {GBE_STATSD_INFO(rx_dma_overruns)}, | ||
| 634 | }; | ||
| 635 | |||
| 636 | #define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \ | ||
| 637 | FIELD_SIZEOF(struct xgbe_hw_stats, field), \ | ||
| 638 | offsetof(struct xgbe_hw_stats, field) | ||
| 639 | |||
| 640 | #define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \ | ||
| 641 | FIELD_SIZEOF(struct xgbe_hw_stats, field), \ | ||
| 642 | offsetof(struct xgbe_hw_stats, field) | ||
| 643 | |||
| 644 | #define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \ | ||
| 645 | FIELD_SIZEOF(struct xgbe_hw_stats, field), \ | ||
| 646 | offsetof(struct xgbe_hw_stats, field) | ||
| 647 | |||
| 648 | static const struct netcp_ethtool_stat xgbe10_et_stats[] = { | ||
| 649 | /* GBE module 0 */ | ||
| 650 | {XGBE_STATS0_INFO(rx_good_frames)}, | ||
| 651 | {XGBE_STATS0_INFO(rx_broadcast_frames)}, | ||
| 652 | {XGBE_STATS0_INFO(rx_multicast_frames)}, | ||
| 653 | {XGBE_STATS0_INFO(rx_oversized_frames)}, | ||
| 654 | {XGBE_STATS0_INFO(rx_undersized_frames)}, | ||
| 655 | {XGBE_STATS0_INFO(overrun_type4)}, | ||
| 656 | {XGBE_STATS0_INFO(overrun_type5)}, | ||
| 657 | {XGBE_STATS0_INFO(rx_bytes)}, | ||
| 658 | {XGBE_STATS0_INFO(tx_good_frames)}, | ||
| 659 | {XGBE_STATS0_INFO(tx_broadcast_frames)}, | ||
| 660 | {XGBE_STATS0_INFO(tx_multicast_frames)}, | ||
| 661 | {XGBE_STATS0_INFO(tx_bytes)}, | ||
| 662 | {XGBE_STATS0_INFO(tx_64byte_frames)}, | ||
| 663 | {XGBE_STATS0_INFO(tx_65_to_127byte_frames)}, | ||
| 664 | {XGBE_STATS0_INFO(tx_128_to_255byte_frames)}, | ||
| 665 | {XGBE_STATS0_INFO(tx_256_to_511byte_frames)}, | ||
| 666 | {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)}, | ||
| 667 | {XGBE_STATS0_INFO(tx_1024byte_frames)}, | ||
| 668 | {XGBE_STATS0_INFO(net_bytes)}, | ||
| 669 | {XGBE_STATS0_INFO(rx_sof_overruns)}, | ||
| 670 | {XGBE_STATS0_INFO(rx_mof_overruns)}, | ||
| 671 | {XGBE_STATS0_INFO(rx_dma_overruns)}, | ||
| 672 | /* XGBE module 1 */ | ||
| 673 | {XGBE_STATS1_INFO(rx_good_frames)}, | ||
| 674 | {XGBE_STATS1_INFO(rx_broadcast_frames)}, | ||
| 675 | {XGBE_STATS1_INFO(rx_multicast_frames)}, | ||
| 676 | {XGBE_STATS1_INFO(rx_pause_frames)}, | ||
| 677 | {XGBE_STATS1_INFO(rx_crc_errors)}, | ||
| 678 | {XGBE_STATS1_INFO(rx_align_code_errors)}, | ||
| 679 | {XGBE_STATS1_INFO(rx_oversized_frames)}, | ||
| 680 | {XGBE_STATS1_INFO(rx_jabber_frames)}, | ||
| 681 | {XGBE_STATS1_INFO(rx_undersized_frames)}, | ||
| 682 | {XGBE_STATS1_INFO(rx_fragments)}, | ||
| 683 | {XGBE_STATS1_INFO(overrun_type4)}, | ||
| 684 | {XGBE_STATS1_INFO(overrun_type5)}, | ||
| 685 | {XGBE_STATS1_INFO(rx_bytes)}, | ||
| 686 | {XGBE_STATS1_INFO(tx_good_frames)}, | ||
| 687 | {XGBE_STATS1_INFO(tx_broadcast_frames)}, | ||
| 688 | {XGBE_STATS1_INFO(tx_multicast_frames)}, | ||
| 689 | {XGBE_STATS1_INFO(tx_pause_frames)}, | ||
| 690 | {XGBE_STATS1_INFO(tx_deferred_frames)}, | ||
| 691 | {XGBE_STATS1_INFO(tx_collision_frames)}, | ||
| 692 | {XGBE_STATS1_INFO(tx_single_coll_frames)}, | ||
| 693 | {XGBE_STATS1_INFO(tx_mult_coll_frames)}, | ||
| 694 | {XGBE_STATS1_INFO(tx_excessive_collisions)}, | ||
| 695 | {XGBE_STATS1_INFO(tx_late_collisions)}, | ||
| 696 | {XGBE_STATS1_INFO(tx_underrun)}, | ||
| 697 | {XGBE_STATS1_INFO(tx_carrier_sense_errors)}, | ||
| 698 | {XGBE_STATS1_INFO(tx_bytes)}, | ||
| 699 | {XGBE_STATS1_INFO(tx_64byte_frames)}, | ||
| 700 | {XGBE_STATS1_INFO(tx_65_to_127byte_frames)}, | ||
| 701 | {XGBE_STATS1_INFO(tx_128_to_255byte_frames)}, | ||
| 702 | {XGBE_STATS1_INFO(tx_256_to_511byte_frames)}, | ||
| 703 | {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)}, | ||
| 704 | {XGBE_STATS1_INFO(tx_1024byte_frames)}, | ||
| 705 | {XGBE_STATS1_INFO(net_bytes)}, | ||
| 706 | {XGBE_STATS1_INFO(rx_sof_overruns)}, | ||
| 707 | {XGBE_STATS1_INFO(rx_mof_overruns)}, | ||
| 708 | {XGBE_STATS1_INFO(rx_dma_overruns)}, | ||
| 709 | /* XGBE module 2 */ | ||
| 710 | {XGBE_STATS2_INFO(rx_good_frames)}, | ||
| 711 | {XGBE_STATS2_INFO(rx_broadcast_frames)}, | ||
| 712 | {XGBE_STATS2_INFO(rx_multicast_frames)}, | ||
| 713 | {XGBE_STATS2_INFO(rx_pause_frames)}, | ||
| 714 | {XGBE_STATS2_INFO(rx_crc_errors)}, | ||
| 715 | {XGBE_STATS2_INFO(rx_align_code_errors)}, | ||
| 716 | {XGBE_STATS2_INFO(rx_oversized_frames)}, | ||
| 717 | {XGBE_STATS2_INFO(rx_jabber_frames)}, | ||
| 718 | {XGBE_STATS2_INFO(rx_undersized_frames)}, | ||
| 719 | {XGBE_STATS2_INFO(rx_fragments)}, | ||
| 720 | {XGBE_STATS2_INFO(overrun_type4)}, | ||
| 721 | {XGBE_STATS2_INFO(overrun_type5)}, | ||
| 722 | {XGBE_STATS2_INFO(rx_bytes)}, | ||
| 723 | {XGBE_STATS2_INFO(tx_good_frames)}, | ||
| 724 | {XGBE_STATS2_INFO(tx_broadcast_frames)}, | ||
| 725 | {XGBE_STATS2_INFO(tx_multicast_frames)}, | ||
| 726 | {XGBE_STATS2_INFO(tx_pause_frames)}, | ||
| 727 | {XGBE_STATS2_INFO(tx_deferred_frames)}, | ||
| 728 | {XGBE_STATS2_INFO(tx_collision_frames)}, | ||
| 729 | {XGBE_STATS2_INFO(tx_single_coll_frames)}, | ||
| 730 | {XGBE_STATS2_INFO(tx_mult_coll_frames)}, | ||
| 731 | {XGBE_STATS2_INFO(tx_excessive_collisions)}, | ||
| 732 | {XGBE_STATS2_INFO(tx_late_collisions)}, | ||
| 733 | {XGBE_STATS2_INFO(tx_underrun)}, | ||
| 734 | {XGBE_STATS2_INFO(tx_carrier_sense_errors)}, | ||
| 735 | {XGBE_STATS2_INFO(tx_bytes)}, | ||
| 736 | {XGBE_STATS2_INFO(tx_64byte_frames)}, | ||
| 737 | {XGBE_STATS2_INFO(tx_65_to_127byte_frames)}, | ||
| 738 | {XGBE_STATS2_INFO(tx_128_to_255byte_frames)}, | ||
| 739 | {XGBE_STATS2_INFO(tx_256_to_511byte_frames)}, | ||
| 740 | {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)}, | ||
| 741 | {XGBE_STATS2_INFO(tx_1024byte_frames)}, | ||
| 742 | {XGBE_STATS2_INFO(net_bytes)}, | ||
| 743 | {XGBE_STATS2_INFO(rx_sof_overruns)}, | ||
| 744 | {XGBE_STATS2_INFO(rx_mof_overruns)}, | ||
| 745 | {XGBE_STATS2_INFO(rx_dma_overruns)}, | ||
| 746 | }; | ||
| 747 | |||
| 748 | #define for_each_intf(i, priv) \ | ||
| 749 | list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list) | ||
| 750 | |||
| 751 | #define for_each_sec_slave(slave, priv) \ | ||
| 752 | list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list) | ||
| 753 | |||
| 754 | #define first_sec_slave(priv) \ | ||
| 755 | list_first_entry(&priv->secondary_slaves, \ | ||
| 756 | struct gbe_slave, slave_list) | ||
| 757 | |||
| 758 | static void keystone_get_drvinfo(struct net_device *ndev, | ||
| 759 | struct ethtool_drvinfo *info) | ||
| 760 | { | ||
| 761 | strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); | ||
| 762 | strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); | ||
| 763 | } | ||
| 764 | |||
| 765 | static u32 keystone_get_msglevel(struct net_device *ndev) | ||
| 766 | { | ||
| 767 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 768 | |||
| 769 | return netcp->msg_enable; | ||
| 770 | } | ||
| 771 | |||
| 772 | static void keystone_set_msglevel(struct net_device *ndev, u32 value) | ||
| 773 | { | ||
| 774 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 775 | |||
| 776 | netcp->msg_enable = value; | ||
| 777 | } | ||
| 778 | |||
| 779 | static void keystone_get_stat_strings(struct net_device *ndev, | ||
| 780 | uint32_t stringset, uint8_t *data) | ||
| 781 | { | ||
| 782 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 783 | struct gbe_intf *gbe_intf; | ||
| 784 | struct gbe_priv *gbe_dev; | ||
| 785 | int i; | ||
| 786 | |||
| 787 | gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); | ||
| 788 | if (!gbe_intf) | ||
| 789 | return; | ||
| 790 | gbe_dev = gbe_intf->gbe_dev; | ||
| 791 | |||
| 792 | switch (stringset) { | ||
| 793 | case ETH_SS_STATS: | ||
| 794 | for (i = 0; i < gbe_dev->num_et_stats; i++) { | ||
| 795 | memcpy(data, gbe_dev->et_stats[i].desc, | ||
| 796 | ETH_GSTRING_LEN); | ||
| 797 | data += ETH_GSTRING_LEN; | ||
| 798 | } | ||
| 799 | break; | ||
| 800 | case ETH_SS_TEST: | ||
| 801 | break; | ||
| 802 | } | ||
| 803 | } | ||
| 804 | |||
| 805 | static int keystone_get_sset_count(struct net_device *ndev, int stringset) | ||
| 806 | { | ||
| 807 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 808 | struct gbe_intf *gbe_intf; | ||
| 809 | struct gbe_priv *gbe_dev; | ||
| 810 | |||
| 811 | gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); | ||
| 812 | if (!gbe_intf) | ||
| 813 | return -EINVAL; | ||
| 814 | gbe_dev = gbe_intf->gbe_dev; | ||
| 815 | |||
| 816 | switch (stringset) { | ||
| 817 | case ETH_SS_TEST: | ||
| 818 | return 0; | ||
| 819 | case ETH_SS_STATS: | ||
| 820 | return gbe_dev->num_et_stats; | ||
| 821 | default: | ||
| 822 | return -EINVAL; | ||
| 823 | } | ||
| 824 | } | ||
| 825 | |||
| 826 | static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) | ||
| 827 | { | ||
| 828 | void __iomem *base = NULL; | ||
| 829 | u32 __iomem *p; | ||
| 830 | u32 tmp = 0; | ||
| 831 | int i; | ||
| 832 | |||
| 833 | for (i = 0; i < gbe_dev->num_et_stats; i++) { | ||
| 834 | base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type]; | ||
| 835 | p = base + gbe_dev->et_stats[i].offset; | ||
| 836 | tmp = readl(p); | ||
| 837 | gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp; | ||
| 838 | if (data) | ||
| 839 | data[i] = gbe_dev->hw_stats[i]; | ||
| 840 | /* write-to-decrement: | ||
| 841 | * new register value = old register value - write value | ||
| 842 | */ | ||
| 843 | writel(tmp, p); | ||
| 844 | } | ||
| 845 | } | ||
| 846 | |||
| 847 | static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) | ||
| 848 | { | ||
| 849 | void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0]; | ||
| 850 | void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1]; | ||
| 851 | u64 *hw_stats = &gbe_dev->hw_stats[0]; | ||
| 852 | void __iomem *base = NULL; | ||
| 853 | u32 __iomem *p; | ||
| 854 | u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2); | ||
| 855 | int i, j, pair; | ||
| 856 | |||
| 857 | for (pair = 0; pair < 2; pair++) { | ||
| 858 | val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); | ||
| 859 | |||
| 860 | if (pair == 0) | ||
| 861 | val &= ~GBE_STATS_CD_SEL; | ||
| 862 | else | ||
| 863 | val |= GBE_STATS_CD_SEL; | ||
| 864 | |||
| 865 | /* make the stat modules visible */ | ||
| 866 | writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); | ||
| 867 | |||
| 868 | for (i = 0; i < pair_size; i++) { | ||
| 869 | j = pair * pair_size + i; | ||
| 870 | switch (gbe_dev->et_stats[j].type) { | ||
| 871 | case GBE_STATSA_MODULE: | ||
| 872 | case GBE_STATSC_MODULE: | ||
| 873 | base = gbe_statsa; | ||
| 874 | break; | ||
| 875 | case GBE_STATSB_MODULE: | ||
| 876 | case GBE_STATSD_MODULE: | ||
| 877 | base = gbe_statsb; | ||
| 878 | break; | ||
| 879 | } | ||
| 880 | |||
| 881 | p = base + gbe_dev->et_stats[j].offset; | ||
| 882 | tmp = readl(p); | ||
| 883 | hw_stats[j] += tmp; | ||
| 884 | if (data) | ||
| 885 | data[j] = hw_stats[j]; | ||
| 886 | /* write-to-decrement: | ||
| 887 | * new register value = old register value - write value | ||
| 888 | */ | ||
| 889 | writel(tmp, p); | ||
| 890 | } | ||
| 891 | } | ||
| 892 | } | ||
| 893 | |||
| 894 | static void keystone_get_ethtool_stats(struct net_device *ndev, | ||
| 895 | struct ethtool_stats *stats, | ||
| 896 | uint64_t *data) | ||
| 897 | { | ||
| 898 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 899 | struct gbe_intf *gbe_intf; | ||
| 900 | struct gbe_priv *gbe_dev; | ||
| 901 | |||
| 902 | gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); | ||
| 903 | if (!gbe_intf) | ||
| 904 | return; | ||
| 905 | |||
| 906 | gbe_dev = gbe_intf->gbe_dev; | ||
| 907 | spin_lock_bh(&gbe_dev->hw_stats_lock); | ||
| 908 | if (gbe_dev->ss_version == GBE_SS_VERSION_14) | ||
| 909 | gbe_update_stats_ver14(gbe_dev, data); | ||
| 910 | else | ||
| 911 | gbe_update_stats(gbe_dev, data); | ||
| 912 | spin_unlock_bh(&gbe_dev->hw_stats_lock); | ||
| 913 | } | ||
| 914 | |||
| 915 | static int keystone_get_settings(struct net_device *ndev, | ||
| 916 | struct ethtool_cmd *cmd) | ||
| 917 | { | ||
| 918 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 919 | struct phy_device *phy = ndev->phydev; | ||
| 920 | struct gbe_intf *gbe_intf; | ||
| 921 | int ret; | ||
| 922 | |||
| 923 | if (!phy) | ||
| 924 | return -EINVAL; | ||
| 925 | |||
| 926 | gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); | ||
| 927 | if (!gbe_intf) | ||
| 928 | return -EINVAL; | ||
| 929 | |||
| 930 | if (!gbe_intf->slave) | ||
| 931 | return -EINVAL; | ||
| 932 | |||
| 933 | ret = phy_ethtool_gset(phy, cmd); | ||
| 934 | if (!ret) | ||
| 935 | cmd->port = gbe_intf->slave->phy_port_t; | ||
| 936 | |||
| 937 | return ret; | ||
| 938 | } | ||
| 939 | |||
| 940 | static int keystone_set_settings(struct net_device *ndev, | ||
| 941 | struct ethtool_cmd *cmd) | ||
| 942 | { | ||
| 943 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 944 | struct phy_device *phy = ndev->phydev; | ||
| 945 | struct gbe_intf *gbe_intf; | ||
| 946 | u32 features = cmd->advertising & cmd->supported; | ||
| 947 | |||
| 948 | if (!phy) | ||
| 949 | return -EINVAL; | ||
| 950 | |||
| 951 | gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); | ||
| 952 | if (!gbe_intf) | ||
| 953 | return -EINVAL; | ||
| 954 | |||
| 955 | if (!gbe_intf->slave) | ||
| 956 | return -EINVAL; | ||
| 957 | |||
| 958 | if (cmd->port != gbe_intf->slave->phy_port_t) { | ||
| 959 | if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP)) | ||
| 960 | return -EINVAL; | ||
| 961 | |||
| 962 | if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI)) | ||
| 963 | return -EINVAL; | ||
| 964 | |||
| 965 | if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC)) | ||
| 966 | return -EINVAL; | ||
| 967 | |||
| 968 | if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII)) | ||
| 969 | return -EINVAL; | ||
| 970 | |||
| 971 | if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE)) | ||
| 972 | return -EINVAL; | ||
| 973 | } | ||
| 974 | |||
| 975 | gbe_intf->slave->phy_port_t = cmd->port; | ||
| 976 | return phy_ethtool_sset(phy, cmd); | ||
| 977 | } | ||
| 978 | |||
| 979 | static const struct ethtool_ops keystone_ethtool_ops = { | ||
| 980 | .get_drvinfo = keystone_get_drvinfo, | ||
| 981 | .get_link = ethtool_op_get_link, | ||
| 982 | .get_msglevel = keystone_get_msglevel, | ||
| 983 | .set_msglevel = keystone_set_msglevel, | ||
| 984 | .get_strings = keystone_get_stat_strings, | ||
| 985 | .get_sset_count = keystone_get_sset_count, | ||
| 986 | .get_ethtool_stats = keystone_get_ethtool_stats, | ||
| 987 | .get_settings = keystone_get_settings, | ||
| 988 | .set_settings = keystone_set_settings, | ||
| 989 | }; | ||
| 990 | |||
| 991 | #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ | ||
| 992 | ((mac)[2] << 16) | ((mac)[3] << 24)) | ||
| 993 | #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) | ||
| 994 | |||
| 995 | static void gbe_set_slave_mac(struct gbe_slave *slave, | ||
| 996 | struct gbe_intf *gbe_intf) | ||
| 997 | { | ||
| 998 | struct net_device *ndev = gbe_intf->ndev; | ||
| 999 | |||
| 1000 | writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi)); | ||
| 1001 | writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo)); | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num) | ||
| 1005 | { | ||
| 1006 | if (priv->host_port == 0) | ||
| 1007 | return slave_num + 1; | ||
| 1008 | |||
| 1009 | return slave_num; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, | ||
| 1013 | struct net_device *ndev, | ||
| 1014 | struct gbe_slave *slave, | ||
| 1015 | int up) | ||
| 1016 | { | ||
| 1017 | struct phy_device *phy = slave->phy; | ||
| 1018 | u32 mac_control = 0; | ||
| 1019 | |||
| 1020 | if (up) { | ||
| 1021 | mac_control = slave->mac_control; | ||
| 1022 | if (phy && (phy->speed == SPEED_1000)) { | ||
| 1023 | mac_control |= MACSL_GIG_MODE; | ||
| 1024 | mac_control &= ~MACSL_XGIG_MODE; | ||
| 1025 | } else if (phy && (phy->speed == SPEED_10000)) { | ||
| 1026 | mac_control |= MACSL_XGIG_MODE; | ||
| 1027 | mac_control &= ~MACSL_GIG_MODE; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | writel(mac_control, GBE_REG_ADDR(slave, emac_regs, | ||
| 1031 | mac_control)); | ||
| 1032 | |||
| 1033 | cpsw_ale_control_set(gbe_dev->ale, slave->port_num, | ||
| 1034 | ALE_PORT_STATE, | ||
| 1035 | ALE_PORT_STATE_FORWARD); | ||
| 1036 | |||
| 1037 | if (ndev && slave->open) | ||
| 1038 | netif_carrier_on(ndev); | ||
| 1039 | } else { | ||
| 1040 | writel(mac_control, GBE_REG_ADDR(slave, emac_regs, | ||
| 1041 | mac_control)); | ||
| 1042 | cpsw_ale_control_set(gbe_dev->ale, slave->port_num, | ||
| 1043 | ALE_PORT_STATE, | ||
| 1044 | ALE_PORT_STATE_DISABLE); | ||
| 1045 | if (ndev) | ||
| 1046 | netif_carrier_off(ndev); | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | if (phy) | ||
| 1050 | phy_print_status(phy); | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | static bool gbe_phy_link_status(struct gbe_slave *slave) | ||
| 1054 | { | ||
| 1055 | return !slave->phy || slave->phy->link; | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, | ||
| 1059 | struct gbe_slave *slave, | ||
| 1060 | struct net_device *ndev) | ||
| 1061 | { | ||
| 1062 | int sp = slave->slave_num; | ||
| 1063 | int phy_link_state, sgmii_link_state = 1, link_state; | ||
| 1064 | |||
| 1065 | if (!slave->open) | ||
| 1066 | return; | ||
| 1067 | |||
| 1068 | if (!SLAVE_LINK_IS_XGMII(slave)) | ||
| 1069 | sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp), | ||
| 1070 | sp); | ||
| 1071 | phy_link_state = gbe_phy_link_status(slave); | ||
| 1072 | link_state = phy_link_state & sgmii_link_state; | ||
| 1073 | |||
| 1074 | if (atomic_xchg(&slave->link_state, link_state) != link_state) | ||
| 1075 | netcp_ethss_link_state_action(gbe_dev, ndev, slave, | ||
| 1076 | link_state); | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | static void xgbe_adjust_link(struct net_device *ndev) | ||
| 1080 | { | ||
| 1081 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1082 | struct gbe_intf *gbe_intf; | ||
| 1083 | |||
| 1084 | gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp); | ||
| 1085 | if (!gbe_intf) | ||
| 1086 | return; | ||
| 1087 | |||
| 1088 | netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, | ||
| 1089 | ndev); | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | static void gbe_adjust_link(struct net_device *ndev) | ||
| 1093 | { | ||
| 1094 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1095 | struct gbe_intf *gbe_intf; | ||
| 1096 | |||
| 1097 | gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); | ||
| 1098 | if (!gbe_intf) | ||
| 1099 | return; | ||
| 1100 | |||
| 1101 | netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, | ||
| 1102 | ndev); | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | static void gbe_adjust_link_sec_slaves(struct net_device *ndev) | ||
| 1106 | { | ||
| 1107 | struct gbe_priv *gbe_dev = netdev_priv(ndev); | ||
| 1108 | struct gbe_slave *slave; | ||
| 1109 | |||
| 1110 | for_each_sec_slave(slave, gbe_dev) | ||
| 1111 | netcp_ethss_update_link_state(gbe_dev, slave, NULL); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | /* Reset EMAC | ||
| 1115 | * Soft reset is set and polled until clear, or until a timeout occurs | ||
| 1116 | */ | ||
| 1117 | static int gbe_port_reset(struct gbe_slave *slave) | ||
| 1118 | { | ||
| 1119 | u32 i, v; | ||
| 1120 | |||
| 1121 | /* Set the soft reset bit */ | ||
| 1122 | writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset)); | ||
| 1123 | |||
| 1124 | /* Wait for the bit to clear */ | ||
| 1125 | for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) { | ||
| 1126 | v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset)); | ||
| 1127 | if ((v & SOFT_RESET_MASK) != SOFT_RESET) | ||
| 1128 | return 0; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | /* Timeout on the reset */ | ||
| 1132 | return GMACSL_RET_WARN_RESET_INCOMPLETE; | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | /* Configure EMAC */ | ||
| 1136 | static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, | ||
| 1137 | int max_rx_len) | ||
| 1138 | { | ||
| 1139 | u32 xgmii_mode; | ||
| 1140 | |||
| 1141 | if (max_rx_len > NETCP_MAX_FRAME_SIZE) | ||
| 1142 | max_rx_len = NETCP_MAX_FRAME_SIZE; | ||
| 1143 | |||
| 1144 | /* Enable correct MII mode at SS level */ | ||
| 1145 | if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) && | ||
| 1146 | (slave->link_interface >= XGMII_LINK_MAC_PHY)) { | ||
| 1147 | xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control)); | ||
| 1148 | xgmii_mode |= (1 << slave->slave_num); | ||
| 1149 | writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control)); | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen)); | ||
| 1153 | writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | static void gbe_slave_stop(struct gbe_intf *intf) | ||
| 1157 | { | ||
| 1158 | struct gbe_priv *gbe_dev = intf->gbe_dev; | ||
| 1159 | struct gbe_slave *slave = intf->slave; | ||
| 1160 | |||
| 1161 | gbe_port_reset(slave); | ||
| 1162 | /* Disable forwarding */ | ||
| 1163 | cpsw_ale_control_set(gbe_dev->ale, slave->port_num, | ||
| 1164 | ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); | ||
| 1165 | cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast, | ||
| 1166 | 1 << slave->port_num, 0, 0); | ||
| 1167 | |||
| 1168 | if (!slave->phy) | ||
| 1169 | return; | ||
| 1170 | |||
| 1171 | phy_stop(slave->phy); | ||
| 1172 | phy_disconnect(slave->phy); | ||
| 1173 | slave->phy = NULL; | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) | ||
| 1177 | { | ||
| 1178 | void __iomem *sgmii_port_regs; | ||
| 1179 | |||
| 1180 | sgmii_port_regs = priv->sgmii_port_regs; | ||
| 1181 | if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) | ||
| 1182 | sgmii_port_regs = priv->sgmii_port34_regs; | ||
| 1183 | |||
| 1184 | if (!SLAVE_LINK_IS_XGMII(slave)) { | ||
| 1185 | netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); | ||
| 1186 | netcp_sgmii_config(sgmii_port_regs, slave->slave_num, | ||
| 1187 | slave->link_interface); | ||
| 1188 | } | ||
| 1189 | } | ||
| 1190 | |||
| 1191 | static int gbe_slave_open(struct gbe_intf *gbe_intf) | ||
| 1192 | { | ||
| 1193 | struct gbe_priv *priv = gbe_intf->gbe_dev; | ||
| 1194 | struct gbe_slave *slave = gbe_intf->slave; | ||
| 1195 | phy_interface_t phy_mode; | ||
| 1196 | bool has_phy = false; | ||
| 1197 | |||
| 1198 | void (*hndlr)(struct net_device *) = gbe_adjust_link; | ||
| 1199 | |||
| 1200 | gbe_sgmii_config(priv, slave); | ||
| 1201 | gbe_port_reset(slave); | ||
| 1202 | gbe_port_config(priv, slave, priv->rx_packet_max); | ||
| 1203 | gbe_set_slave_mac(slave, gbe_intf); | ||
| 1204 | /* enable forwarding */ | ||
| 1205 | cpsw_ale_control_set(priv->ale, slave->port_num, | ||
| 1206 | ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); | ||
| 1207 | cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast, | ||
| 1208 | 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2); | ||
| 1209 | |||
| 1210 | if (slave->link_interface == SGMII_LINK_MAC_PHY) { | ||
| 1211 | has_phy = true; | ||
| 1212 | phy_mode = PHY_INTERFACE_MODE_SGMII; | ||
| 1213 | slave->phy_port_t = PORT_MII; | ||
| 1214 | } else if (slave->link_interface == XGMII_LINK_MAC_PHY) { | ||
| 1215 | has_phy = true; | ||
| 1216 | phy_mode = PHY_INTERFACE_MODE_NA; | ||
| 1217 | slave->phy_port_t = PORT_FIBRE; | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | if (has_phy) { | ||
| 1221 | if (priv->ss_version == XGBE_SS_VERSION_10) | ||
| 1222 | hndlr = xgbe_adjust_link; | ||
| 1223 | |||
| 1224 | slave->phy = of_phy_connect(gbe_intf->ndev, | ||
| 1225 | slave->phy_node, | ||
| 1226 | hndlr, 0, | ||
| 1227 | phy_mode); | ||
| 1228 | if (!slave->phy) { | ||
| 1229 | dev_err(priv->dev, "phy not found on slave %d\n", | ||
| 1230 | slave->slave_num); | ||
| 1231 | return -ENODEV; | ||
| 1232 | } | ||
| 1233 | dev_dbg(priv->dev, "phy found: id is: 0x%s\n", | ||
| 1234 | dev_name(&slave->phy->dev)); | ||
| 1235 | phy_start(slave->phy); | ||
| 1236 | phy_read_status(slave->phy); | ||
| 1237 | } | ||
| 1238 | return 0; | ||
| 1239 | } | ||
| 1240 | |||
| 1241 | static void gbe_init_host_port(struct gbe_priv *priv) | ||
| 1242 | { | ||
| 1243 | int bypass_en = 1; | ||
| 1244 | /* Max length register */ | ||
| 1245 | writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs, | ||
| 1246 | rx_maxlen)); | ||
| 1247 | |||
| 1248 | cpsw_ale_start(priv->ale); | ||
| 1249 | |||
| 1250 | if (priv->enable_ale) | ||
| 1251 | bypass_en = 0; | ||
| 1252 | |||
| 1253 | cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en); | ||
| 1254 | |||
| 1255 | cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1); | ||
| 1256 | |||
| 1257 | cpsw_ale_control_set(priv->ale, priv->host_port, | ||
| 1258 | ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); | ||
| 1259 | |||
| 1260 | cpsw_ale_control_set(priv->ale, 0, | ||
| 1261 | ALE_PORT_UNKNOWN_VLAN_MEMBER, | ||
| 1262 | GBE_PORT_MASK(priv->ale_ports)); | ||
| 1263 | |||
| 1264 | cpsw_ale_control_set(priv->ale, 0, | ||
| 1265 | ALE_PORT_UNKNOWN_MCAST_FLOOD, | ||
| 1266 | GBE_PORT_MASK(priv->ale_ports - 1)); | ||
| 1267 | |||
| 1268 | cpsw_ale_control_set(priv->ale, 0, | ||
| 1269 | ALE_PORT_UNKNOWN_REG_MCAST_FLOOD, | ||
| 1270 | GBE_PORT_MASK(priv->ale_ports)); | ||
| 1271 | |||
| 1272 | cpsw_ale_control_set(priv->ale, 0, | ||
| 1273 | ALE_PORT_UNTAGGED_EGRESS, | ||
| 1274 | GBE_PORT_MASK(priv->ale_ports)); | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) | ||
| 1278 | { | ||
| 1279 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1280 | u16 vlan_id; | ||
| 1281 | |||
| 1282 | cpsw_ale_add_mcast(gbe_dev->ale, addr, | ||
| 1283 | GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0, | ||
| 1284 | ALE_MCAST_FWD_2); | ||
| 1285 | for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { | ||
| 1286 | cpsw_ale_add_mcast(gbe_dev->ale, addr, | ||
| 1287 | GBE_PORT_MASK(gbe_dev->ale_ports), | ||
| 1288 | ALE_VLAN, vlan_id, ALE_MCAST_FWD_2); | ||
| 1289 | } | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) | ||
| 1293 | { | ||
| 1294 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1295 | u16 vlan_id; | ||
| 1296 | |||
| 1297 | cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); | ||
| 1298 | |||
| 1299 | for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) | ||
| 1300 | cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, | ||
| 1301 | ALE_VLAN, vlan_id); | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) | ||
| 1305 | { | ||
| 1306 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1307 | u16 vlan_id; | ||
| 1308 | |||
| 1309 | cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0); | ||
| 1310 | |||
| 1311 | for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { | ||
| 1312 | cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id); | ||
| 1313 | } | ||
| 1314 | } | ||
| 1315 | |||
| 1316 | static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) | ||
| 1317 | { | ||
| 1318 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1319 | u16 vlan_id; | ||
| 1320 | |||
| 1321 | cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); | ||
| 1322 | |||
| 1323 | for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { | ||
| 1324 | cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, | ||
| 1325 | ALE_VLAN, vlan_id); | ||
| 1326 | } | ||
| 1327 | } | ||
| 1328 | |||
| 1329 | static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr) | ||
| 1330 | { | ||
| 1331 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 1332 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1333 | |||
| 1334 | dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n", | ||
| 1335 | naddr->addr, naddr->type); | ||
| 1336 | |||
| 1337 | switch (naddr->type) { | ||
| 1338 | case ADDR_MCAST: | ||
| 1339 | case ADDR_BCAST: | ||
| 1340 | gbe_add_mcast_addr(gbe_intf, naddr->addr); | ||
| 1341 | break; | ||
| 1342 | case ADDR_UCAST: | ||
| 1343 | case ADDR_DEV: | ||
| 1344 | gbe_add_ucast_addr(gbe_intf, naddr->addr); | ||
| 1345 | break; | ||
| 1346 | case ADDR_ANY: | ||
| 1347 | /* nothing to do for promiscuous */ | ||
| 1348 | default: | ||
| 1349 | break; | ||
| 1350 | } | ||
| 1351 | |||
| 1352 | return 0; | ||
| 1353 | } | ||
| 1354 | |||
| 1355 | static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr) | ||
| 1356 | { | ||
| 1357 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 1358 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1359 | |||
| 1360 | dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n", | ||
| 1361 | naddr->addr, naddr->type); | ||
| 1362 | |||
| 1363 | switch (naddr->type) { | ||
| 1364 | case ADDR_MCAST: | ||
| 1365 | case ADDR_BCAST: | ||
| 1366 | gbe_del_mcast_addr(gbe_intf, naddr->addr); | ||
| 1367 | break; | ||
| 1368 | case ADDR_UCAST: | ||
| 1369 | case ADDR_DEV: | ||
| 1370 | gbe_del_ucast_addr(gbe_intf, naddr->addr); | ||
| 1371 | break; | ||
| 1372 | case ADDR_ANY: | ||
| 1373 | /* nothing to do for promiscuous */ | ||
| 1374 | default: | ||
| 1375 | break; | ||
| 1376 | } | ||
| 1377 | |||
| 1378 | return 0; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | static int gbe_add_vid(void *intf_priv, int vid) | ||
| 1382 | { | ||
| 1383 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 1384 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1385 | |||
| 1386 | set_bit(vid, gbe_intf->active_vlans); | ||
| 1387 | |||
| 1388 | cpsw_ale_add_vlan(gbe_dev->ale, vid, | ||
| 1389 | GBE_PORT_MASK(gbe_dev->ale_ports), | ||
| 1390 | GBE_MASK_NO_PORTS, | ||
| 1391 | GBE_PORT_MASK(gbe_dev->ale_ports), | ||
| 1392 | GBE_PORT_MASK(gbe_dev->ale_ports - 1)); | ||
| 1393 | |||
| 1394 | return 0; | ||
| 1395 | } | ||
| 1396 | |||
| 1397 | static int gbe_del_vid(void *intf_priv, int vid) | ||
| 1398 | { | ||
| 1399 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 1400 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1401 | |||
| 1402 | cpsw_ale_del_vlan(gbe_dev->ale, vid, 0); | ||
| 1403 | clear_bit(vid, gbe_intf->active_vlans); | ||
| 1404 | return 0; | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) | ||
| 1408 | { | ||
| 1409 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 1410 | struct phy_device *phy = gbe_intf->slave->phy; | ||
| 1411 | int ret = -EOPNOTSUPP; | ||
| 1412 | |||
| 1413 | if (phy) | ||
| 1414 | ret = phy_mii_ioctl(phy, req, cmd); | ||
| 1415 | |||
| 1416 | return ret; | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | static void netcp_ethss_timer(unsigned long arg) | ||
| 1420 | { | ||
| 1421 | struct gbe_priv *gbe_dev = (struct gbe_priv *)arg; | ||
| 1422 | struct gbe_intf *gbe_intf; | ||
| 1423 | struct gbe_slave *slave; | ||
| 1424 | |||
| 1425 | /* Check & update SGMII link state of interfaces */ | ||
| 1426 | for_each_intf(gbe_intf, gbe_dev) { | ||
| 1427 | if (!gbe_intf->slave->open) | ||
| 1428 | continue; | ||
| 1429 | netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave, | ||
| 1430 | gbe_intf->ndev); | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | /* Check & update SGMII link state of secondary ports */ | ||
| 1434 | for_each_sec_slave(slave, gbe_dev) { | ||
| 1435 | netcp_ethss_update_link_state(gbe_dev, slave, NULL); | ||
| 1436 | } | ||
| 1437 | |||
| 1438 | spin_lock_bh(&gbe_dev->hw_stats_lock); | ||
| 1439 | |||
| 1440 | if (gbe_dev->ss_version == GBE_SS_VERSION_14) | ||
| 1441 | gbe_update_stats_ver14(gbe_dev, NULL); | ||
| 1442 | else | ||
| 1443 | gbe_update_stats(gbe_dev, NULL); | ||
| 1444 | |||
| 1445 | spin_unlock_bh(&gbe_dev->hw_stats_lock); | ||
| 1446 | |||
| 1447 | gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; | ||
| 1448 | add_timer(&gbe_dev->timer); | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info) | ||
| 1452 | { | ||
| 1453 | struct gbe_intf *gbe_intf = data; | ||
| 1454 | |||
| 1455 | p_info->tx_pipe = &gbe_intf->tx_pipe; | ||
| 1456 | return 0; | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | static int gbe_open(void *intf_priv, struct net_device *ndev) | ||
| 1460 | { | ||
| 1461 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 1462 | struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; | ||
| 1463 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1464 | struct gbe_slave *slave = gbe_intf->slave; | ||
| 1465 | int port_num = slave->port_num; | ||
| 1466 | u32 reg; | ||
| 1467 | int ret; | ||
| 1468 | |||
| 1469 | reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver)); | ||
| 1470 | dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n", | ||
| 1471 | GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg), | ||
| 1472 | GBE_RTL_VERSION(reg), GBE_IDENT(reg)); | ||
| 1473 | |||
| 1474 | if (gbe_dev->enable_ale) | ||
| 1475 | gbe_intf->tx_pipe.dma_psflags = 0; | ||
| 1476 | else | ||
| 1477 | gbe_intf->tx_pipe.dma_psflags = port_num; | ||
| 1478 | |||
| 1479 | dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n", | ||
| 1480 | gbe_intf->tx_pipe.dma_chan_name, | ||
| 1481 | gbe_intf->tx_pipe.dma_channel, | ||
| 1482 | gbe_intf->tx_pipe.dma_psflags); | ||
| 1483 | |||
| 1484 | gbe_slave_stop(gbe_intf); | ||
| 1485 | |||
| 1486 | /* disable priority elevation and enable statistics on all ports */ | ||
| 1487 | writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype)); | ||
| 1488 | |||
| 1489 | /* Control register */ | ||
| 1490 | writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); | ||
| 1491 | |||
| 1492 | /* All statistics enabled and STAT AB visible by default */ | ||
| 1493 | writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs, | ||
| 1494 | stat_port_en)); | ||
| 1495 | |||
| 1496 | ret = gbe_slave_open(gbe_intf); | ||
| 1497 | if (ret) | ||
| 1498 | goto fail; | ||
| 1499 | |||
| 1500 | netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, | ||
| 1501 | gbe_intf); | ||
| 1502 | |||
| 1503 | slave->open = true; | ||
| 1504 | netcp_ethss_update_link_state(gbe_dev, slave, ndev); | ||
| 1505 | return 0; | ||
| 1506 | |||
| 1507 | fail: | ||
| 1508 | gbe_slave_stop(gbe_intf); | ||
| 1509 | return ret; | ||
| 1510 | } | ||
| 1511 | |||
| 1512 | static int gbe_close(void *intf_priv, struct net_device *ndev) | ||
| 1513 | { | ||
| 1514 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 1515 | struct netcp_intf *netcp = netdev_priv(ndev); | ||
| 1516 | |||
| 1517 | gbe_slave_stop(gbe_intf); | ||
| 1518 | netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, | ||
| 1519 | gbe_intf); | ||
| 1520 | |||
| 1521 | gbe_intf->slave->open = false; | ||
| 1522 | atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID); | ||
| 1523 | return 0; | ||
| 1524 | } | ||
| 1525 | |||
| 1526 | static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, | ||
| 1527 | struct device_node *node) | ||
| 1528 | { | ||
| 1529 | int port_reg_num; | ||
| 1530 | u32 port_reg_ofs, emac_reg_ofs; | ||
| 1531 | |||
| 1532 | if (of_property_read_u32(node, "slave-port", &slave->slave_num)) { | ||
| 1533 | dev_err(gbe_dev->dev, "missing slave-port parameter\n"); | ||
| 1534 | return -EINVAL; | ||
| 1535 | } | ||
| 1536 | |||
| 1537 | if (of_property_read_u32(node, "link-interface", | ||
| 1538 | &slave->link_interface)) { | ||
| 1539 | dev_warn(gbe_dev->dev, | ||
| 1540 | "missing link-interface value defaulting to 1G mac-phy link\n"); | ||
| 1541 | slave->link_interface = SGMII_LINK_MAC_PHY; | ||
| 1542 | } | ||
| 1543 | |||
| 1544 | slave->open = false; | ||
| 1545 | slave->phy_node = of_parse_phandle(node, "phy-handle", 0); | ||
| 1546 | slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); | ||
| 1547 | |||
| 1548 | if (slave->link_interface >= XGMII_LINK_MAC_PHY) | ||
| 1549 | slave->mac_control = GBE_DEF_10G_MAC_CONTROL; | ||
| 1550 | else | ||
| 1551 | slave->mac_control = GBE_DEF_1G_MAC_CONTROL; | ||
| 1552 | |||
| 1553 | /* Emac regs memmap are contiguous but port regs are not */ | ||
| 1554 | port_reg_num = slave->slave_num; | ||
| 1555 | if (gbe_dev->ss_version == GBE_SS_VERSION_14) { | ||
| 1556 | if (slave->slave_num > 1) { | ||
| 1557 | port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET; | ||
| 1558 | port_reg_num -= 2; | ||
| 1559 | } else { | ||
| 1560 | port_reg_ofs = GBE13_SLAVE_PORT_OFFSET; | ||
| 1561 | } | ||
| 1562 | } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { | ||
| 1563 | port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; | ||
| 1564 | } else { | ||
| 1565 | dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n", | ||
| 1566 | gbe_dev->ss_version); | ||
| 1567 | return -EINVAL; | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | if (gbe_dev->ss_version == GBE_SS_VERSION_14) | ||
| 1571 | emac_reg_ofs = GBE13_EMAC_OFFSET; | ||
| 1572 | else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) | ||
| 1573 | emac_reg_ofs = XGBE10_EMAC_OFFSET; | ||
| 1574 | |||
| 1575 | slave->port_regs = gbe_dev->ss_regs + port_reg_ofs + | ||
| 1576 | (0x30 * port_reg_num); | ||
| 1577 | slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs + | ||
| 1578 | (0x40 * slave->slave_num); | ||
| 1579 | |||
| 1580 | if (gbe_dev->ss_version == GBE_SS_VERSION_14) { | ||
| 1581 | /* Initialize slave port register offsets */ | ||
| 1582 | GBE_SET_REG_OFS(slave, port_regs, port_vlan); | ||
| 1583 | GBE_SET_REG_OFS(slave, port_regs, tx_pri_map); | ||
| 1584 | GBE_SET_REG_OFS(slave, port_regs, sa_lo); | ||
| 1585 | GBE_SET_REG_OFS(slave, port_regs, sa_hi); | ||
| 1586 | GBE_SET_REG_OFS(slave, port_regs, ts_ctl); | ||
| 1587 | GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); | ||
| 1588 | GBE_SET_REG_OFS(slave, port_regs, ts_vlan); | ||
| 1589 | GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); | ||
| 1590 | GBE_SET_REG_OFS(slave, port_regs, ts_ctl2); | ||
| 1591 | |||
| 1592 | /* Initialize EMAC register offsets */ | ||
| 1593 | GBE_SET_REG_OFS(slave, emac_regs, mac_control); | ||
| 1594 | GBE_SET_REG_OFS(slave, emac_regs, soft_reset); | ||
| 1595 | GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); | ||
| 1596 | |||
| 1597 | } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { | ||
| 1598 | /* Initialize slave port register offsets */ | ||
| 1599 | XGBE_SET_REG_OFS(slave, port_regs, port_vlan); | ||
| 1600 | XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map); | ||
| 1601 | XGBE_SET_REG_OFS(slave, port_regs, sa_lo); | ||
| 1602 | XGBE_SET_REG_OFS(slave, port_regs, sa_hi); | ||
| 1603 | XGBE_SET_REG_OFS(slave, port_regs, ts_ctl); | ||
| 1604 | XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); | ||
| 1605 | XGBE_SET_REG_OFS(slave, port_regs, ts_vlan); | ||
| 1606 | XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); | ||
| 1607 | XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2); | ||
| 1608 | |||
| 1609 | /* Initialize EMAC register offsets */ | ||
| 1610 | XGBE_SET_REG_OFS(slave, emac_regs, mac_control); | ||
| 1611 | XGBE_SET_REG_OFS(slave, emac_regs, soft_reset); | ||
| 1612 | XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); | ||
| 1613 | } | ||
| 1614 | |||
| 1615 | atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID); | ||
| 1616 | return 0; | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | static void init_secondary_ports(struct gbe_priv *gbe_dev, | ||
| 1620 | struct device_node *node) | ||
| 1621 | { | ||
| 1622 | struct device *dev = gbe_dev->dev; | ||
| 1623 | phy_interface_t phy_mode; | ||
| 1624 | struct gbe_priv **priv; | ||
| 1625 | struct device_node *port; | ||
| 1626 | struct gbe_slave *slave; | ||
| 1627 | bool mac_phy_link = false; | ||
| 1628 | |||
| 1629 | for_each_child_of_node(node, port) { | ||
| 1630 | slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); | ||
| 1631 | if (!slave) { | ||
| 1632 | dev_err(dev, | ||
| 1633 | "memomry alloc failed for secondary port(%s), skipping...\n", | ||
| 1634 | port->name); | ||
| 1635 | continue; | ||
| 1636 | } | ||
| 1637 | |||
| 1638 | if (init_slave(gbe_dev, slave, port)) { | ||
| 1639 | dev_err(dev, | ||
| 1640 | "Failed to initialize secondary port(%s), skipping...\n", | ||
| 1641 | port->name); | ||
| 1642 | devm_kfree(dev, slave); | ||
| 1643 | continue; | ||
| 1644 | } | ||
| 1645 | |||
| 1646 | gbe_sgmii_config(gbe_dev, slave); | ||
| 1647 | gbe_port_reset(slave); | ||
| 1648 | gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max); | ||
| 1649 | list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves); | ||
| 1650 | gbe_dev->num_slaves++; | ||
| 1651 | if ((slave->link_interface == SGMII_LINK_MAC_PHY) || | ||
| 1652 | (slave->link_interface == XGMII_LINK_MAC_PHY)) | ||
| 1653 | mac_phy_link = true; | ||
| 1654 | |||
| 1655 | slave->open = true; | ||
| 1656 | } | ||
| 1657 | |||
| 1658 | /* of_phy_connect() is needed only for MAC-PHY interface */ | ||
| 1659 | if (!mac_phy_link) | ||
| 1660 | return; | ||
| 1661 | |||
| 1662 | /* Allocate dummy netdev device for attaching to phy device */ | ||
| 1663 | gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy", | ||
| 1664 | NET_NAME_UNKNOWN, ether_setup); | ||
| 1665 | if (!gbe_dev->dummy_ndev) { | ||
| 1666 | dev_err(dev, | ||
| 1667 | "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n"); | ||
| 1668 | return; | ||
| 1669 | } | ||
| 1670 | priv = netdev_priv(gbe_dev->dummy_ndev); | ||
| 1671 | *priv = gbe_dev; | ||
| 1672 | |||
| 1673 | if (slave->link_interface == SGMII_LINK_MAC_PHY) { | ||
| 1674 | phy_mode = PHY_INTERFACE_MODE_SGMII; | ||
| 1675 | slave->phy_port_t = PORT_MII; | ||
| 1676 | } else { | ||
| 1677 | phy_mode = PHY_INTERFACE_MODE_NA; | ||
| 1678 | slave->phy_port_t = PORT_FIBRE; | ||
| 1679 | } | ||
| 1680 | |||
| 1681 | for_each_sec_slave(slave, gbe_dev) { | ||
| 1682 | if ((slave->link_interface != SGMII_LINK_MAC_PHY) && | ||
| 1683 | (slave->link_interface != XGMII_LINK_MAC_PHY)) | ||
| 1684 | continue; | ||
| 1685 | slave->phy = | ||
| 1686 | of_phy_connect(gbe_dev->dummy_ndev, | ||
| 1687 | slave->phy_node, | ||
| 1688 | gbe_adjust_link_sec_slaves, | ||
| 1689 | 0, phy_mode); | ||
| 1690 | if (!slave->phy) { | ||
| 1691 | dev_err(dev, "phy not found for slave %d\n", | ||
| 1692 | slave->slave_num); | ||
| 1693 | slave->phy = NULL; | ||
| 1694 | } else { | ||
| 1695 | dev_dbg(dev, "phy found: id is: 0x%s\n", | ||
| 1696 | dev_name(&slave->phy->dev)); | ||
| 1697 | phy_start(slave->phy); | ||
| 1698 | phy_read_status(slave->phy); | ||
| 1699 | } | ||
| 1700 | } | ||
| 1701 | } | ||
| 1702 | |||
| 1703 | static void free_secondary_ports(struct gbe_priv *gbe_dev) | ||
| 1704 | { | ||
| 1705 | struct gbe_slave *slave; | ||
| 1706 | |||
| 1707 | for (;;) { | ||
| 1708 | slave = first_sec_slave(gbe_dev); | ||
| 1709 | if (!slave) | ||
| 1710 | break; | ||
| 1711 | if (slave->phy) | ||
| 1712 | phy_disconnect(slave->phy); | ||
| 1713 | list_del(&slave->slave_list); | ||
| 1714 | } | ||
| 1715 | if (gbe_dev->dummy_ndev) | ||
| 1716 | free_netdev(gbe_dev->dummy_ndev); | ||
| 1717 | } | ||
| 1718 | |||
| 1719 | static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, | ||
| 1720 | struct device_node *node) | ||
| 1721 | { | ||
| 1722 | struct resource res; | ||
| 1723 | void __iomem *regs; | ||
| 1724 | int ret, i; | ||
| 1725 | |||
| 1726 | ret = of_address_to_resource(node, 0, &res); | ||
| 1727 | if (ret) { | ||
| 1728 | dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n", | ||
| 1729 | node->name); | ||
| 1730 | return ret; | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | regs = devm_ioremap_resource(gbe_dev->dev, &res); | ||
| 1734 | if (IS_ERR(regs)) { | ||
| 1735 | dev_err(gbe_dev->dev, "Failed to map xgbe register base\n"); | ||
| 1736 | return PTR_ERR(regs); | ||
| 1737 | } | ||
| 1738 | gbe_dev->ss_regs = regs; | ||
| 1739 | |||
| 1740 | ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); | ||
| 1741 | if (ret) { | ||
| 1742 | dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n", | ||
| 1743 | node->name); | ||
| 1744 | return ret; | ||
| 1745 | } | ||
| 1746 | |||
| 1747 | regs = devm_ioremap_resource(gbe_dev->dev, &res); | ||
| 1748 | if (IS_ERR(regs)) { | ||
| 1749 | dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n"); | ||
| 1750 | return PTR_ERR(regs); | ||
| 1751 | } | ||
| 1752 | gbe_dev->xgbe_serdes_regs = regs; | ||
| 1753 | |||
| 1754 | gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, | ||
| 1755 | XGBE10_NUM_STAT_ENTRIES * | ||
| 1756 | (XGBE10_NUM_SLAVES + 1) * sizeof(u64), | ||
| 1757 | GFP_KERNEL); | ||
| 1758 | if (!gbe_dev->hw_stats) { | ||
| 1759 | dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); | ||
| 1760 | return -ENOMEM; | ||
| 1761 | } | ||
| 1762 | |||
| 1763 | gbe_dev->ss_version = XGBE_SS_VERSION_10; | ||
| 1764 | gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + | ||
| 1765 | XGBE10_SGMII_MODULE_OFFSET; | ||
| 1766 | gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET; | ||
| 1767 | gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET; | ||
| 1768 | |||
| 1769 | for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++) | ||
| 1770 | gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs + | ||
| 1771 | XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i); | ||
| 1772 | |||
| 1773 | gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET; | ||
| 1774 | gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS; | ||
| 1775 | gbe_dev->host_port = XGBE10_HOST_PORT_NUM; | ||
| 1776 | gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; | ||
| 1777 | gbe_dev->et_stats = xgbe10_et_stats; | ||
| 1778 | gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); | ||
| 1779 | |||
| 1780 | /* Subsystem registers */ | ||
| 1781 | XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); | ||
| 1782 | XGBE_SET_REG_OFS(gbe_dev, ss_regs, control); | ||
| 1783 | |||
| 1784 | /* Switch module registers */ | ||
| 1785 | XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); | ||
| 1786 | XGBE_SET_REG_OFS(gbe_dev, switch_regs, control); | ||
| 1787 | XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); | ||
| 1788 | XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); | ||
| 1789 | XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); | ||
| 1790 | |||
| 1791 | /* Host port registers */ | ||
| 1792 | XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); | ||
| 1793 | XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); | ||
| 1794 | XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); | ||
| 1795 | return 0; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | static int get_gbe_resource_version(struct gbe_priv *gbe_dev, | ||
| 1799 | struct device_node *node) | ||
| 1800 | { | ||
| 1801 | struct resource res; | ||
| 1802 | void __iomem *regs; | ||
| 1803 | int ret; | ||
| 1804 | |||
| 1805 | ret = of_address_to_resource(node, 0, &res); | ||
| 1806 | if (ret) { | ||
| 1807 | dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n", | ||
| 1808 | node->name); | ||
| 1809 | return ret; | ||
| 1810 | } | ||
| 1811 | |||
| 1812 | regs = devm_ioremap_resource(gbe_dev->dev, &res); | ||
| 1813 | if (IS_ERR(regs)) { | ||
| 1814 | dev_err(gbe_dev->dev, "Failed to map gbe register base\n"); | ||
| 1815 | return PTR_ERR(regs); | ||
| 1816 | } | ||
| 1817 | gbe_dev->ss_regs = regs; | ||
| 1818 | gbe_dev->ss_version = readl(gbe_dev->ss_regs); | ||
| 1819 | return 0; | ||
| 1820 | } | ||
| 1821 | |||
| 1822 | static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, | ||
| 1823 | struct device_node *node) | ||
| 1824 | { | ||
| 1825 | void __iomem *regs; | ||
| 1826 | int i; | ||
| 1827 | |||
| 1828 | gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, | ||
| 1829 | GBE13_NUM_HW_STAT_ENTRIES * | ||
| 1830 | GBE13_NUM_SLAVES * sizeof(u64), | ||
| 1831 | GFP_KERNEL); | ||
| 1832 | if (!gbe_dev->hw_stats) { | ||
| 1833 | dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); | ||
| 1834 | return -ENOMEM; | ||
| 1835 | } | ||
| 1836 | |||
| 1837 | regs = gbe_dev->ss_regs; | ||
| 1838 | gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET; | ||
| 1839 | gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET; | ||
| 1840 | gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET; | ||
| 1841 | gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET; | ||
| 1842 | |||
| 1843 | for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++) | ||
| 1844 | gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET + | ||
| 1845 | (GBE_HW_STATS_REG_MAP_SZ * i); | ||
| 1846 | |||
| 1847 | gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET; | ||
| 1848 | gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS; | ||
| 1849 | gbe_dev->host_port = GBE13_HOST_PORT_NUM; | ||
| 1850 | gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; | ||
| 1851 | gbe_dev->et_stats = gbe13_et_stats; | ||
| 1852 | gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); | ||
| 1853 | |||
| 1854 | /* Subsystem registers */ | ||
| 1855 | GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); | ||
| 1856 | |||
| 1857 | /* Switch module registers */ | ||
| 1858 | GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); | ||
| 1859 | GBE_SET_REG_OFS(gbe_dev, switch_regs, control); | ||
| 1860 | GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset); | ||
| 1861 | GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); | ||
| 1862 | GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); | ||
| 1863 | GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); | ||
| 1864 | |||
| 1865 | /* Host port registers */ | ||
| 1866 | GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); | ||
| 1867 | GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); | ||
| 1868 | return 0; | ||
| 1869 | } | ||
| 1870 | |||
| 1871 | static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, | ||
| 1872 | struct device_node *node, void **inst_priv) | ||
| 1873 | { | ||
| 1874 | struct device_node *interfaces, *interface; | ||
| 1875 | struct device_node *secondary_ports; | ||
| 1876 | struct cpsw_ale_params ale_params; | ||
| 1877 | struct gbe_priv *gbe_dev; | ||
| 1878 | u32 slave_num; | ||
| 1879 | int ret = 0; | ||
| 1880 | |||
| 1881 | if (!node) { | ||
| 1882 | dev_err(dev, "device tree info unavailable\n"); | ||
| 1883 | return -ENODEV; | ||
| 1884 | } | ||
| 1885 | |||
| 1886 | gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL); | ||
| 1887 | if (!gbe_dev) | ||
| 1888 | return -ENOMEM; | ||
| 1889 | |||
| 1890 | gbe_dev->dev = dev; | ||
| 1891 | gbe_dev->netcp_device = netcp_device; | ||
| 1892 | gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE; | ||
| 1893 | |||
| 1894 | /* init the hw stats lock */ | ||
| 1895 | spin_lock_init(&gbe_dev->hw_stats_lock); | ||
| 1896 | |||
| 1897 | if (of_find_property(node, "enable-ale", NULL)) { | ||
| 1898 | gbe_dev->enable_ale = true; | ||
| 1899 | dev_info(dev, "ALE enabled\n"); | ||
| 1900 | } else { | ||
| 1901 | gbe_dev->enable_ale = false; | ||
| 1902 | dev_dbg(dev, "ALE bypass enabled*\n"); | ||
| 1903 | } | ||
| 1904 | |||
| 1905 | ret = of_property_read_u32(node, "tx-queue", | ||
| 1906 | &gbe_dev->tx_queue_id); | ||
| 1907 | if (ret < 0) { | ||
| 1908 | dev_err(dev, "missing tx_queue parameter\n"); | ||
| 1909 | gbe_dev->tx_queue_id = GBE_TX_QUEUE; | ||
| 1910 | } | ||
| 1911 | |||
| 1912 | ret = of_property_read_string(node, "tx-channel", | ||
| 1913 | &gbe_dev->dma_chan_name); | ||
| 1914 | if (ret < 0) { | ||
| 1915 | dev_err(dev, "missing \"tx-channel\" parameter\n"); | ||
| 1916 | ret = -ENODEV; | ||
| 1917 | goto quit; | ||
| 1918 | } | ||
| 1919 | |||
| 1920 | if (!strcmp(node->name, "gbe")) { | ||
| 1921 | ret = get_gbe_resource_version(gbe_dev, node); | ||
| 1922 | if (ret) | ||
| 1923 | goto quit; | ||
| 1924 | |||
| 1925 | ret = set_gbe_ethss14_priv(gbe_dev, node); | ||
| 1926 | if (ret) | ||
| 1927 | goto quit; | ||
| 1928 | } else if (!strcmp(node->name, "xgbe")) { | ||
| 1929 | ret = set_xgbe_ethss10_priv(gbe_dev, node); | ||
| 1930 | if (ret) | ||
| 1931 | goto quit; | ||
| 1932 | ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, | ||
| 1933 | gbe_dev->ss_regs); | ||
| 1934 | if (ret) | ||
| 1935 | goto quit; | ||
| 1936 | } else { | ||
| 1937 | dev_err(dev, "unknown GBE node(%s)\n", node->name); | ||
| 1938 | ret = -ENODEV; | ||
| 1939 | goto quit; | ||
| 1940 | } | ||
| 1941 | |||
| 1942 | interfaces = of_get_child_by_name(node, "interfaces"); | ||
| 1943 | if (!interfaces) | ||
| 1944 | dev_err(dev, "could not find interfaces\n"); | ||
| 1945 | |||
| 1946 | ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, | ||
| 1947 | gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); | ||
| 1948 | if (ret) | ||
| 1949 | goto quit; | ||
| 1950 | |||
| 1951 | ret = netcp_txpipe_open(&gbe_dev->tx_pipe); | ||
| 1952 | if (ret) | ||
| 1953 | goto quit; | ||
| 1954 | |||
| 1955 | /* Create network interfaces */ | ||
| 1956 | INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); | ||
| 1957 | for_each_child_of_node(interfaces, interface) { | ||
| 1958 | ret = of_property_read_u32(interface, "slave-port", &slave_num); | ||
| 1959 | if (ret) { | ||
| 1960 | dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n", | ||
| 1961 | interface->name); | ||
| 1962 | continue; | ||
| 1963 | } | ||
| 1964 | gbe_dev->num_slaves++; | ||
| 1965 | } | ||
| 1966 | |||
| 1967 | if (!gbe_dev->num_slaves) | ||
| 1968 | dev_warn(dev, "No network interface configured\n"); | ||
| 1969 | |||
| 1970 | /* Initialize Secondary slave ports */ | ||
| 1971 | secondary_ports = of_get_child_by_name(node, "secondary-slave-ports"); | ||
| 1972 | INIT_LIST_HEAD(&gbe_dev->secondary_slaves); | ||
| 1973 | if (secondary_ports) | ||
| 1974 | init_secondary_ports(gbe_dev, secondary_ports); | ||
| 1975 | of_node_put(secondary_ports); | ||
| 1976 | |||
| 1977 | if (!gbe_dev->num_slaves) { | ||
| 1978 | dev_err(dev, "No network interface or secondary ports configured\n"); | ||
| 1979 | ret = -ENODEV; | ||
| 1980 | goto quit; | ||
| 1981 | } | ||
| 1982 | |||
| 1983 | memset(&ale_params, 0, sizeof(ale_params)); | ||
| 1984 | ale_params.dev = gbe_dev->dev; | ||
| 1985 | ale_params.ale_regs = gbe_dev->ale_reg; | ||
| 1986 | ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT; | ||
| 1987 | ale_params.ale_entries = gbe_dev->ale_entries; | ||
| 1988 | ale_params.ale_ports = gbe_dev->ale_ports; | ||
| 1989 | |||
| 1990 | gbe_dev->ale = cpsw_ale_create(&ale_params); | ||
| 1991 | if (!gbe_dev->ale) { | ||
| 1992 | dev_err(gbe_dev->dev, "error initializing ale engine\n"); | ||
| 1993 | ret = -ENODEV; | ||
| 1994 | goto quit; | ||
| 1995 | } else { | ||
| 1996 | dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); | ||
| 1997 | } | ||
| 1998 | |||
| 1999 | /* initialize host port */ | ||
| 2000 | gbe_init_host_port(gbe_dev); | ||
| 2001 | |||
| 2002 | init_timer(&gbe_dev->timer); | ||
| 2003 | gbe_dev->timer.data = (unsigned long)gbe_dev; | ||
| 2004 | gbe_dev->timer.function = netcp_ethss_timer; | ||
| 2005 | gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; | ||
| 2006 | add_timer(&gbe_dev->timer); | ||
| 2007 | *inst_priv = gbe_dev; | ||
| 2008 | return 0; | ||
| 2009 | |||
| 2010 | quit: | ||
| 2011 | if (gbe_dev->hw_stats) | ||
| 2012 | devm_kfree(dev, gbe_dev->hw_stats); | ||
| 2013 | if (gbe_dev->ale) | ||
| 2014 | cpsw_ale_destroy(gbe_dev->ale); | ||
| 2015 | if (gbe_dev->ss_regs) | ||
| 2016 | devm_iounmap(dev, gbe_dev->ss_regs); | ||
| 2017 | if (interfaces) | ||
| 2018 | of_node_put(interfaces); | ||
| 2019 | devm_kfree(dev, gbe_dev); | ||
| 2020 | return ret; | ||
| 2021 | } | ||
| 2022 | |||
| 2023 | static int gbe_attach(void *inst_priv, struct net_device *ndev, | ||
| 2024 | struct device_node *node, void **intf_priv) | ||
| 2025 | { | ||
| 2026 | struct gbe_priv *gbe_dev = inst_priv; | ||
| 2027 | struct gbe_intf *gbe_intf; | ||
| 2028 | int ret; | ||
| 2029 | |||
| 2030 | if (!node) { | ||
| 2031 | dev_err(gbe_dev->dev, "interface node not available\n"); | ||
| 2032 | return -ENODEV; | ||
| 2033 | } | ||
| 2034 | |||
| 2035 | gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL); | ||
| 2036 | if (!gbe_intf) | ||
| 2037 | return -ENOMEM; | ||
| 2038 | |||
| 2039 | gbe_intf->ndev = ndev; | ||
| 2040 | gbe_intf->dev = gbe_dev->dev; | ||
| 2041 | gbe_intf->gbe_dev = gbe_dev; | ||
| 2042 | |||
| 2043 | gbe_intf->slave = devm_kzalloc(gbe_dev->dev, | ||
| 2044 | sizeof(*gbe_intf->slave), | ||
| 2045 | GFP_KERNEL); | ||
| 2046 | if (!gbe_intf->slave) { | ||
| 2047 | ret = -ENOMEM; | ||
| 2048 | goto fail; | ||
| 2049 | } | ||
| 2050 | |||
| 2051 | if (init_slave(gbe_dev, gbe_intf->slave, node)) { | ||
| 2052 | ret = -ENODEV; | ||
| 2053 | goto fail; | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | gbe_intf->tx_pipe = gbe_dev->tx_pipe; | ||
| 2057 | ndev->ethtool_ops = &keystone_ethtool_ops; | ||
| 2058 | list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head); | ||
| 2059 | *intf_priv = gbe_intf; | ||
| 2060 | return 0; | ||
| 2061 | |||
| 2062 | fail: | ||
| 2063 | if (gbe_intf->slave) | ||
| 2064 | devm_kfree(gbe_dev->dev, gbe_intf->slave); | ||
| 2065 | if (gbe_intf) | ||
| 2066 | devm_kfree(gbe_dev->dev, gbe_intf); | ||
| 2067 | return ret; | ||
| 2068 | } | ||
| 2069 | |||
| 2070 | static int gbe_release(void *intf_priv) | ||
| 2071 | { | ||
| 2072 | struct gbe_intf *gbe_intf = intf_priv; | ||
| 2073 | |||
| 2074 | gbe_intf->ndev->ethtool_ops = NULL; | ||
| 2075 | list_del(&gbe_intf->gbe_intf_list); | ||
| 2076 | devm_kfree(gbe_intf->dev, gbe_intf->slave); | ||
| 2077 | devm_kfree(gbe_intf->dev, gbe_intf); | ||
| 2078 | return 0; | ||
| 2079 | } | ||
| 2080 | |||
| 2081 | static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv) | ||
| 2082 | { | ||
| 2083 | struct gbe_priv *gbe_dev = inst_priv; | ||
| 2084 | |||
| 2085 | del_timer_sync(&gbe_dev->timer); | ||
| 2086 | cpsw_ale_stop(gbe_dev->ale); | ||
| 2087 | cpsw_ale_destroy(gbe_dev->ale); | ||
| 2088 | netcp_txpipe_close(&gbe_dev->tx_pipe); | ||
| 2089 | free_secondary_ports(gbe_dev); | ||
| 2090 | |||
| 2091 | if (!list_empty(&gbe_dev->gbe_intf_head)) | ||
| 2092 | dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); | ||
| 2093 | |||
| 2094 | devm_kfree(gbe_dev->dev, gbe_dev->hw_stats); | ||
| 2095 | devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs); | ||
| 2096 | memset(gbe_dev, 0x00, sizeof(*gbe_dev)); | ||
| 2097 | devm_kfree(gbe_dev->dev, gbe_dev); | ||
| 2098 | return 0; | ||
| 2099 | } | ||
| 2100 | |||
| 2101 | static struct netcp_module gbe_module = { | ||
| 2102 | .name = GBE_MODULE_NAME, | ||
| 2103 | .owner = THIS_MODULE, | ||
| 2104 | .primary = true, | ||
| 2105 | .probe = gbe_probe, | ||
| 2106 | .open = gbe_open, | ||
| 2107 | .close = gbe_close, | ||
| 2108 | .remove = gbe_remove, | ||
| 2109 | .attach = gbe_attach, | ||
| 2110 | .release = gbe_release, | ||
| 2111 | .add_addr = gbe_add_addr, | ||
| 2112 | .del_addr = gbe_del_addr, | ||
| 2113 | .add_vid = gbe_add_vid, | ||
| 2114 | .del_vid = gbe_del_vid, | ||
| 2115 | .ioctl = gbe_ioctl, | ||
| 2116 | }; | ||
| 2117 | |||
| 2118 | static struct netcp_module xgbe_module = { | ||
| 2119 | .name = XGBE_MODULE_NAME, | ||
| 2120 | .owner = THIS_MODULE, | ||
| 2121 | .primary = true, | ||
| 2122 | .probe = gbe_probe, | ||
| 2123 | .open = gbe_open, | ||
| 2124 | .close = gbe_close, | ||
| 2125 | .remove = gbe_remove, | ||
| 2126 | .attach = gbe_attach, | ||
| 2127 | .release = gbe_release, | ||
| 2128 | .add_addr = gbe_add_addr, | ||
| 2129 | .del_addr = gbe_del_addr, | ||
| 2130 | .add_vid = gbe_add_vid, | ||
| 2131 | .del_vid = gbe_del_vid, | ||
| 2132 | .ioctl = gbe_ioctl, | ||
| 2133 | }; | ||
| 2134 | |||
| 2135 | static int __init keystone_gbe_init(void) | ||
| 2136 | { | ||
| 2137 | int ret; | ||
| 2138 | |||
| 2139 | ret = netcp_register_module(&gbe_module); | ||
| 2140 | if (ret) | ||
| 2141 | return ret; | ||
| 2142 | |||
| 2143 | ret = netcp_register_module(&xgbe_module); | ||
| 2144 | if (ret) | ||
| 2145 | return ret; | ||
| 2146 | |||
| 2147 | return 0; | ||
| 2148 | } | ||
| 2149 | module_init(keystone_gbe_init); | ||
| 2150 | |||
| 2151 | static void __exit keystone_gbe_exit(void) | ||
| 2152 | { | ||
| 2153 | netcp_unregister_module(&gbe_module); | ||
| 2154 | netcp_unregister_module(&xgbe_module); | ||
| 2155 | } | ||
| 2156 | module_exit(keystone_gbe_exit); | ||
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c new file mode 100644 index 000000000000..dbeb14266e2f --- /dev/null +++ b/drivers/net/ethernet/ti/netcp_sgmii.c | |||
| @@ -0,0 +1,131 @@ | |||
| 1 | /* | ||
| 2 | * SGMI module initialisation | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Texas Instruments Incorporated | ||
| 5 | * Authors: Sandeep Nair <sandeep_n@ti.com> | ||
| 6 | * Sandeep Paulraj <s-paulraj@ti.com> | ||
| 7 | * Wingman Kwok <w-kwok2@ti.com> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or | ||
| 10 | * modify it under the terms of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation version 2. | ||
| 12 | * | ||
| 13 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 14 | * kind, whether express or implied; without even the implied warranty | ||
| 15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include "netcp.h" | ||
| 20 | |||
| 21 | #define SGMII_REG_STATUS_LOCK BIT(4) | ||
| 22 | #define SGMII_REG_STATUS_LINK BIT(0) | ||
| 23 | #define SGMII_REG_STATUS_AUTONEG BIT(2) | ||
| 24 | #define SGMII_REG_CONTROL_AUTONEG BIT(0) | ||
| 25 | |||
| 26 | #define SGMII23_OFFSET(x) ((x - 2) * 0x100) | ||
| 27 | #define SGMII_OFFSET(x) ((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x))) | ||
| 28 | |||
| 29 | /* SGMII registers */ | ||
| 30 | #define SGMII_SRESET_REG(x) (SGMII_OFFSET(x) + 0x004) | ||
| 31 | #define SGMII_CTL_REG(x) (SGMII_OFFSET(x) + 0x010) | ||
| 32 | #define SGMII_STATUS_REG(x) (SGMII_OFFSET(x) + 0x014) | ||
| 33 | #define SGMII_MRADV_REG(x) (SGMII_OFFSET(x) + 0x018) | ||
| 34 | |||
| 35 | static void sgmii_write_reg(void __iomem *base, int reg, u32 val) | ||
| 36 | { | ||
| 37 | writel(val, base + reg); | ||
| 38 | } | ||
| 39 | |||
| 40 | static u32 sgmii_read_reg(void __iomem *base, int reg) | ||
| 41 | { | ||
| 42 | return readl(base + reg); | ||
| 43 | } | ||
| 44 | |||
| 45 | static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val) | ||
| 46 | { | ||
| 47 | writel((readl(base + reg) | val), base + reg); | ||
| 48 | } | ||
| 49 | |||
| 50 | /* port is 0 based */ | ||
| 51 | int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) | ||
| 52 | { | ||
| 53 | /* Soft reset */ | ||
| 54 | sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); | ||
| 55 | while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) | ||
| 56 | ; | ||
| 57 | return 0; | ||
| 58 | } | ||
| 59 | |||
| 60 | int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) | ||
| 61 | { | ||
| 62 | u32 status = 0, link = 0; | ||
| 63 | |||
| 64 | status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port)); | ||
| 65 | if ((status & SGMII_REG_STATUS_LINK) != 0) | ||
| 66 | link = 1; | ||
| 67 | return link; | ||
| 68 | } | ||
| 69 | |||
| 70 | int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface) | ||
| 71 | { | ||
| 72 | unsigned int i, status, mask; | ||
| 73 | u32 mr_adv_ability; | ||
| 74 | u32 control; | ||
| 75 | |||
| 76 | switch (interface) { | ||
| 77 | case SGMII_LINK_MAC_MAC_AUTONEG: | ||
| 78 | mr_adv_ability = 0x9801; | ||
| 79 | control = 0x21; | ||
| 80 | break; | ||
| 81 | |||
| 82 | case SGMII_LINK_MAC_PHY: | ||
| 83 | case SGMII_LINK_MAC_PHY_NO_MDIO: | ||
| 84 | mr_adv_ability = 1; | ||
| 85 | control = 1; | ||
| 86 | break; | ||
| 87 | |||
| 88 | case SGMII_LINK_MAC_MAC_FORCED: | ||
| 89 | mr_adv_ability = 0x9801; | ||
| 90 | control = 0x20; | ||
| 91 | break; | ||
| 92 | |||
| 93 | case SGMII_LINK_MAC_FIBER: | ||
| 94 | mr_adv_ability = 0x20; | ||
| 95 | control = 0x1; | ||
| 96 | break; | ||
| 97 | |||
| 98 | default: | ||
| 99 | WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface); | ||
| 100 | return -EINVAL; | ||
| 101 | } | ||
| 102 | |||
| 103 | sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0); | ||
| 104 | |||
| 105 | /* Wait for the SerDes pll to lock */ | ||
| 106 | for (i = 0; i < 1000; i++) { | ||
| 107 | usleep_range(1000, 2000); | ||
| 108 | status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port)); | ||
| 109 | if ((status & SGMII_REG_STATUS_LOCK) != 0) | ||
| 110 | break; | ||
| 111 | } | ||
| 112 | |||
| 113 | if ((status & SGMII_REG_STATUS_LOCK) == 0) | ||
| 114 | pr_err("serdes PLL not locked\n"); | ||
| 115 | |||
| 116 | sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability); | ||
| 117 | sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control); | ||
| 118 | |||
| 119 | mask = SGMII_REG_STATUS_LINK; | ||
| 120 | if (control & SGMII_REG_CONTROL_AUTONEG) | ||
| 121 | mask |= SGMII_REG_STATUS_AUTONEG; | ||
| 122 | |||
| 123 | for (i = 0; i < 1000; i++) { | ||
| 124 | usleep_range(200, 500); | ||
| 125 | status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port)); | ||
| 126 | if ((status & mask) == mask) | ||
| 127 | break; | ||
| 128 | } | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c new file mode 100644 index 000000000000..33571acc52b6 --- /dev/null +++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c | |||
| @@ -0,0 +1,501 @@ | |||
| 1 | /* | ||
| 2 | * XGE PCSR module initialisation | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Texas Instruments Incorporated | ||
| 5 | * Authors: Sandeep Nair <sandeep_n@ti.com> | ||
| 6 | * WingMan Kwok <w-kwok2@ti.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License as | ||
| 10 | * published by the Free Software Foundation version 2. | ||
| 11 | * | ||
| 12 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 13 | * kind, whether express or implied; without even the implied warranty | ||
| 14 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | #include "netcp.h" | ||
| 18 | |||
| 19 | /* XGBE registers */ | ||
| 20 | #define XGBE_CTRL_OFFSET 0x0c | ||
| 21 | #define XGBE_SGMII_1_OFFSET 0x0114 | ||
| 22 | #define XGBE_SGMII_2_OFFSET 0x0214 | ||
| 23 | |||
| 24 | /* PCS-R registers */ | ||
| 25 | #define PCSR_CPU_CTRL_OFFSET 0x1fd0 | ||
| 26 | #define POR_EN BIT(29) | ||
| 27 | |||
| 28 | #define reg_rmw(addr, value, mask) \ | ||
| 29 | writel(((readl(addr) & (~(mask))) | \ | ||
| 30 | (value & (mask))), (addr)) | ||
| 31 | |||
| 32 | /* bit mask of width w at offset s */ | ||
| 33 | #define MASK_WID_SH(w, s) (((1 << w) - 1) << s) | ||
| 34 | |||
| 35 | /* shift value v to offset s */ | ||
| 36 | #define VAL_SH(v, s) (v << s) | ||
| 37 | |||
| 38 | #define PHY_A(serdes) 0 | ||
| 39 | |||
| 40 | struct serdes_cfg { | ||
| 41 | u32 ofs; | ||
| 42 | u32 val; | ||
| 43 | u32 mask; | ||
| 44 | }; | ||
| 45 | |||
| 46 | static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = { | ||
| 47 | {0x0000, 0x00800002, 0x00ff00ff}, | ||
| 48 | {0x0014, 0x00003838, 0x0000ffff}, | ||
| 49 | {0x0060, 0x1c44e438, 0xffffffff}, | ||
| 50 | {0x0064, 0x00c18400, 0x00ffffff}, | ||
| 51 | {0x0068, 0x17078200, 0xffffff00}, | ||
| 52 | {0x006c, 0x00000014, 0x000000ff}, | ||
| 53 | {0x0078, 0x0000c000, 0x0000ff00}, | ||
| 54 | {0x0000, 0x00000003, 0x000000ff}, | ||
| 55 | }; | ||
| 56 | |||
| 57 | static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = { | ||
| 58 | {0x0c00, 0x00030002, 0x00ff00ff}, | ||
| 59 | {0x0c14, 0x00005252, 0x0000ffff}, | ||
| 60 | {0x0c28, 0x80000000, 0xff000000}, | ||
| 61 | {0x0c2c, 0x000000f6, 0x000000ff}, | ||
| 62 | {0x0c3c, 0x04000405, 0xff00ffff}, | ||
| 63 | {0x0c40, 0xc0800000, 0xffff0000}, | ||
| 64 | {0x0c44, 0x5a202062, 0xffffffff}, | ||
| 65 | {0x0c48, 0x40040424, 0xffffffff}, | ||
| 66 | {0x0c4c, 0x00004002, 0x0000ffff}, | ||
| 67 | {0x0c50, 0x19001c00, 0xff00ff00}, | ||
| 68 | {0x0c54, 0x00002100, 0x0000ff00}, | ||
| 69 | {0x0c58, 0x00000060, 0x000000ff}, | ||
| 70 | {0x0c60, 0x80131e7c, 0xffffffff}, | ||
| 71 | {0x0c64, 0x8400cb02, 0xff00ffff}, | ||
| 72 | {0x0c68, 0x17078200, 0xffffff00}, | ||
| 73 | {0x0c6c, 0x00000016, 0x000000ff}, | ||
| 74 | {0x0c74, 0x00000400, 0x0000ff00}, | ||
| 75 | {0x0c78, 0x0000c000, 0x0000ff00}, | ||
| 76 | {0x0c00, 0x00000003, 0x000000ff}, | ||
| 77 | }; | ||
| 78 | |||
| 79 | static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = { | ||
| 80 | {0x0204, 0x00000080, 0x000000ff}, | ||
| 81 | {0x0208, 0x0000920d, 0x0000ffff}, | ||
| 82 | {0x0204, 0xfc000000, 0xff000000}, | ||
| 83 | {0x0208, 0x00009104, 0x0000ffff}, | ||
| 84 | {0x0210, 0x1a000000, 0xff000000}, | ||
| 85 | {0x0214, 0x00006b58, 0x00ffffff}, | ||
| 86 | {0x0218, 0x75800084, 0xffff00ff}, | ||
| 87 | {0x022c, 0x00300000, 0x00ff0000}, | ||
| 88 | {0x0230, 0x00003800, 0x0000ff00}, | ||
| 89 | {0x024c, 0x008f0000, 0x00ff0000}, | ||
| 90 | {0x0250, 0x30000000, 0xff000000}, | ||
| 91 | {0x0260, 0x00000002, 0x000000ff}, | ||
| 92 | {0x0264, 0x00000057, 0x000000ff}, | ||
| 93 | {0x0268, 0x00575700, 0x00ffff00}, | ||
| 94 | {0x0278, 0xff000000, 0xff000000}, | ||
| 95 | {0x0280, 0x00500050, 0x00ff00ff}, | ||
| 96 | {0x0284, 0x00001f15, 0x0000ffff}, | ||
| 97 | {0x028c, 0x00006f00, 0x0000ff00}, | ||
| 98 | {0x0294, 0x00000000, 0xffffff00}, | ||
| 99 | {0x0298, 0x00002640, 0xff00ffff}, | ||
| 100 | {0x029c, 0x00000003, 0x000000ff}, | ||
| 101 | {0x02a4, 0x00000f13, 0x0000ffff}, | ||
| 102 | {0x02a8, 0x0001b600, 0x00ffff00}, | ||
| 103 | {0x0380, 0x00000030, 0x000000ff}, | ||
| 104 | {0x03c0, 0x00000200, 0x0000ff00}, | ||
| 105 | {0x03cc, 0x00000018, 0x000000ff}, | ||
| 106 | {0x03cc, 0x00000000, 0x000000ff}, | ||
| 107 | }; | ||
| 108 | |||
| 109 | static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = { | ||
| 110 | {0x0a00, 0x00000800, 0x0000ff00}, | ||
| 111 | {0x0a84, 0x00000000, 0x000000ff}, | ||
| 112 | {0x0a8c, 0x00130000, 0x00ff0000}, | ||
| 113 | {0x0a90, 0x77a00000, 0xffff0000}, | ||
| 114 | {0x0a94, 0x00007777, 0x0000ffff}, | ||
| 115 | {0x0b08, 0x000f0000, 0xffff0000}, | ||
| 116 | {0x0b0c, 0x000f0000, 0x00ffffff}, | ||
| 117 | {0x0b10, 0xbe000000, 0xff000000}, | ||
| 118 | {0x0b14, 0x000000ff, 0x000000ff}, | ||
| 119 | {0x0b18, 0x00000014, 0x000000ff}, | ||
| 120 | {0x0b5c, 0x981b0000, 0xffff0000}, | ||
| 121 | {0x0b64, 0x00001100, 0x0000ff00}, | ||
| 122 | {0x0b78, 0x00000c00, 0x0000ff00}, | ||
| 123 | {0x0abc, 0xff000000, 0xff000000}, | ||
| 124 | {0x0ac0, 0x0000008b, 0x000000ff}, | ||
| 125 | }; | ||
| 126 | |||
| 127 | static struct serdes_cfg cfg_cm_c1_c2[] = { | ||
| 128 | {0x0208, 0x00000000, 0x00000f00}, | ||
| 129 | {0x0208, 0x00000000, 0x0000001f}, | ||
| 130 | {0x0204, 0x00000000, 0x00040000}, | ||
| 131 | {0x0208, 0x000000a0, 0x000000e0}, | ||
| 132 | }; | ||
| 133 | |||
| 134 | static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs) | ||
| 135 | { | ||
| 136 | int i; | ||
| 137 | |||
| 138 | /* cmu0 setup */ | ||
| 139 | for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) { | ||
| 140 | reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs, | ||
| 141 | cfg_phyb_1p25g_156p25mhz_cmu0[i].val, | ||
| 142 | cfg_phyb_1p25g_156p25mhz_cmu0[i].mask); | ||
| 143 | } | ||
| 144 | |||
| 145 | /* cmu1 setup */ | ||
| 146 | for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) { | ||
| 147 | reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs, | ||
| 148 | cfg_phyb_10p3125g_156p25mhz_cmu1[i].val, | ||
| 149 | cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask); | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 153 | /* lane is 0 based */ | ||
| 154 | static void netcp_xgbe_serdes_lane_config( | ||
| 155 | void __iomem *serdes_regs, int lane) | ||
| 156 | { | ||
| 157 | int i; | ||
| 158 | |||
| 159 | /* lane setup */ | ||
| 160 | for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) { | ||
| 161 | reg_rmw(serdes_regs + | ||
| 162 | cfg_phyb_10p3125g_16bit_lane[i].ofs + | ||
| 163 | (0x200 * lane), | ||
| 164 | cfg_phyb_10p3125g_16bit_lane[i].val, | ||
| 165 | cfg_phyb_10p3125g_16bit_lane[i].mask); | ||
| 166 | } | ||
| 167 | |||
| 168 | /* disable auto negotiation*/ | ||
| 169 | reg_rmw(serdes_regs + (0x200 * lane) + 0x0380, | ||
| 170 | 0x00000000, 0x00000010); | ||
| 171 | |||
| 172 | /* disable link training */ | ||
| 173 | reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0, | ||
| 174 | 0x00000000, 0x00000200); | ||
| 175 | } | ||
| 176 | |||
| 177 | static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs) | ||
| 178 | { | ||
| 179 | int i; | ||
| 180 | |||
| 181 | for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) { | ||
| 182 | reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs, | ||
| 183 | cfg_phyb_10p3125g_comlane[i].val, | ||
| 184 | cfg_phyb_10p3125g_comlane[i].mask); | ||
| 185 | } | ||
| 186 | } | ||
| 187 | |||
| 188 | static void netcp_xgbe_serdes_lane_enable( | ||
| 189 | void __iomem *serdes_regs, int lane) | ||
| 190 | { | ||
| 191 | /* Set Lane Control Rate */ | ||
| 192 | writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane)); | ||
| 193 | } | ||
| 194 | |||
| 195 | static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs) | ||
| 196 | { | ||
| 197 | reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff); | ||
| 198 | } | ||
| 199 | |||
| 200 | static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs) | ||
| 201 | { | ||
| 202 | writel(0x88000000, serdes_regs + 0x1ff4); | ||
| 203 | } | ||
| 204 | |||
| 205 | static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs) | ||
| 206 | { | ||
| 207 | netcp_xgbe_serdes_phyb_rst_clr(serdes_regs); | ||
| 208 | writel(0xee000000, serdes_regs + 0x1ff4); | ||
| 209 | } | ||
| 210 | |||
| 211 | static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs) | ||
| 212 | { | ||
| 213 | unsigned long timeout; | ||
| 214 | int ret = 0; | ||
| 215 | u32 val_1, val_0; | ||
| 216 | |||
| 217 | timeout = jiffies + msecs_to_jiffies(500); | ||
| 218 | do { | ||
| 219 | val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4)); | ||
| 220 | val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4)); | ||
| 221 | |||
| 222 | if (val_1 && val_0) | ||
| 223 | return 0; | ||
| 224 | |||
| 225 | if (time_after(jiffies, timeout)) { | ||
| 226 | ret = -ETIMEDOUT; | ||
| 227 | break; | ||
| 228 | } | ||
| 229 | |||
| 230 | cpu_relax(); | ||
| 231 | } while (true); | ||
| 232 | |||
| 233 | pr_err("XGBE serdes not locked: time out.\n"); | ||
| 234 | return ret; | ||
| 235 | } | ||
| 236 | |||
| 237 | static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs) | ||
| 238 | { | ||
| 239 | writel(0x03, sw_regs + XGBE_CTRL_OFFSET); | ||
| 240 | } | ||
| 241 | |||
| 242 | static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs) | ||
| 243 | { | ||
| 244 | u32 tmp; | ||
| 245 | |||
| 246 | if (PHY_A(serdes_regs)) { | ||
| 247 | tmp = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff; | ||
| 248 | tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00); | ||
| 249 | } else { | ||
| 250 | tmp = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff; | ||
| 251 | } | ||
| 252 | |||
| 253 | return tmp; | ||
| 254 | } | ||
| 255 | |||
| 256 | static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs, | ||
| 257 | int select, int ofs) | ||
| 258 | { | ||
| 259 | if (PHY_A(serdes_regs)) { | ||
| 260 | reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24, | ||
| 261 | ~0x00ffffff); | ||
| 262 | return; | ||
| 263 | } | ||
| 264 | |||
| 265 | /* For 2 lane Phy-B, lane0 is actually lane1 */ | ||
| 266 | switch (select) { | ||
| 267 | case 1: | ||
| 268 | select = 2; | ||
| 269 | break; | ||
| 270 | case 2: | ||
| 271 | select = 3; | ||
| 272 | break; | ||
| 273 | default: | ||
| 274 | return; | ||
| 275 | } | ||
| 276 | |||
| 277 | reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff); | ||
| 278 | } | ||
| 279 | |||
| 280 | static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs, | ||
| 281 | int select, int ofs) | ||
| 282 | { | ||
| 283 | /* Set tbus address */ | ||
| 284 | netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs); | ||
| 285 | /* Get TBUS Value */ | ||
| 286 | return netcp_xgbe_serdes_read_tbus_val(serdes_regs); | ||
| 287 | } | ||
| 288 | |||
| 289 | static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs, | ||
| 290 | void __iomem *sig_detect_reg, int lane) | ||
| 291 | { | ||
| 292 | u32 tmp, dlpf, tbus; | ||
| 293 | |||
| 294 | /*Get the DLPF values */ | ||
| 295 | tmp = netcp_xgbe_serdes_read_select_tbus( | ||
| 296 | serdes_regs, lane + 1, 5); | ||
| 297 | |||
| 298 | dlpf = tmp >> 2; | ||
| 299 | |||
| 300 | if (dlpf < 400 || dlpf > 700) { | ||
| 301 | reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1)); | ||
| 302 | mdelay(1); | ||
| 303 | reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1)); | ||
| 304 | } else { | ||
| 305 | tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane + | ||
| 306 | 1, 0xe); | ||
| 307 | |||
| 308 | pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n", | ||
| 309 | tmp >> 2, tmp & 3, (tbus >> 2) & 3); | ||
| 310 | } | ||
| 311 | } | ||
| 312 | |||
| 313 | /* Call every 100 ms */ | ||
| 314 | static int netcp_xgbe_check_link_status(void __iomem *serdes_regs, | ||
| 315 | void __iomem *sw_regs, u32 lanes, | ||
| 316 | u32 *current_state, u32 *lane_down) | ||
| 317 | { | ||
| 318 | void __iomem *pcsr_base = sw_regs + 0x0600; | ||
| 319 | void __iomem *sig_detect_reg; | ||
| 320 | u32 pcsr_rx_stat, blk_lock, blk_errs; | ||
| 321 | int loss, i, status = 1; | ||
| 322 | |||
| 323 | for (i = 0; i < lanes; i++) { | ||
| 324 | /* Get the Loss bit */ | ||
| 325 | loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1; | ||
| 326 | |||
| 327 | /* Get Block Errors and Block Lock bits */ | ||
| 328 | pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80)); | ||
| 329 | blk_lock = (pcsr_rx_stat >> 30) & 0x1; | ||
| 330 | blk_errs = (pcsr_rx_stat >> 16) & 0x0ff; | ||
| 331 | |||
| 332 | /* Get Signal Detect Overlay Address */ | ||
| 333 | sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04; | ||
| 334 | |||
| 335 | /* If Block errors maxed out, attempt recovery! */ | ||
| 336 | if (blk_errs == 0x0ff) | ||
| 337 | blk_lock = 0; | ||
| 338 | |||
| 339 | switch (current_state[i]) { | ||
| 340 | case 0: | ||
| 341 | /* if good link lock the signal detect ON! */ | ||
| 342 | if (!loss && blk_lock) { | ||
| 343 | pr_debug("XGBE PCSR Linked Lane: %d\n", i); | ||
| 344 | reg_rmw(sig_detect_reg, VAL_SH(3, 1), | ||
| 345 | MASK_WID_SH(2, 1)); | ||
| 346 | current_state[i] = 1; | ||
| 347 | } else if (!blk_lock) { | ||
| 348 | /* if no lock, then reset CDR */ | ||
| 349 | pr_debug("XGBE PCSR Recover Lane: %d\n", i); | ||
| 350 | netcp_xgbe_serdes_reset_cdr(serdes_regs, | ||
| 351 | sig_detect_reg, i); | ||
| 352 | } | ||
| 353 | break; | ||
| 354 | |||
| 355 | case 1: | ||
| 356 | if (!blk_lock) { | ||
| 357 | /* Link Lost? */ | ||
| 358 | lane_down[i] = 1; | ||
| 359 | current_state[i] = 2; | ||
| 360 | } | ||
| 361 | break; | ||
| 362 | |||
| 363 | case 2: | ||
| 364 | if (blk_lock) | ||
| 365 | /* Nope just noise */ | ||
| 366 | current_state[i] = 1; | ||
| 367 | else { | ||
| 368 | /* Lost the block lock, reset CDR if it is | ||
| 369 | * not centered and go back to sync state | ||
| 370 | */ | ||
| 371 | netcp_xgbe_serdes_reset_cdr(serdes_regs, | ||
| 372 | sig_detect_reg, i); | ||
| 373 | current_state[i] = 0; | ||
| 374 | } | ||
| 375 | break; | ||
| 376 | |||
| 377 | default: | ||
| 378 | pr_err("XGBE: unknown current_state[%d] %d\n", | ||
| 379 | i, current_state[i]); | ||
| 380 | break; | ||
| 381 | } | ||
| 382 | |||
| 383 | if (blk_errs > 0) { | ||
| 384 | /* Reset the Error counts! */ | ||
| 385 | reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0), | ||
| 386 | MASK_WID_SH(8, 0)); | ||
| 387 | |||
| 388 | reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0), | ||
| 389 | MASK_WID_SH(8, 0)); | ||
| 390 | } | ||
| 391 | |||
| 392 | status &= (current_state[i] == 1); | ||
| 393 | } | ||
| 394 | |||
| 395 | return status; | ||
| 396 | } | ||
| 397 | |||
| 398 | static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs, | ||
| 399 | void __iomem *sw_regs) | ||
| 400 | { | ||
| 401 | u32 current_state[2] = {0, 0}; | ||
| 402 | int retries = 0, link_up; | ||
| 403 | u32 lane_down[2]; | ||
| 404 | |||
| 405 | do { | ||
| 406 | lane_down[0] = 0; | ||
| 407 | lane_down[1] = 0; | ||
| 408 | |||
| 409 | link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2, | ||
| 410 | current_state, | ||
| 411 | lane_down); | ||
| 412 | |||
| 413 | /* if we did not get link up then wait 100ms before calling | ||
| 414 | * it again | ||
| 415 | */ | ||
| 416 | if (link_up) | ||
| 417 | break; | ||
| 418 | |||
| 419 | if (lane_down[0]) | ||
| 420 | pr_debug("XGBE: detected link down on lane 0\n"); | ||
| 421 | |||
| 422 | if (lane_down[1]) | ||
| 423 | pr_debug("XGBE: detected link down on lane 1\n"); | ||
| 424 | |||
| 425 | if (++retries > 1) { | ||
| 426 | pr_debug("XGBE: timeout waiting for serdes link up\n"); | ||
| 427 | return -ETIMEDOUT; | ||
| 428 | } | ||
| 429 | mdelay(100); | ||
| 430 | } while (!link_up); | ||
| 431 | |||
| 432 | pr_debug("XGBE: PCSR link is up\n"); | ||
| 433 | return 0; | ||
| 434 | } | ||
| 435 | |||
| 436 | static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs, | ||
| 437 | int lane, int cm, int c1, int c2) | ||
| 438 | { | ||
| 439 | int i; | ||
| 440 | |||
| 441 | for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) { | ||
| 442 | reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane), | ||
| 443 | cfg_cm_c1_c2[i].val, | ||
| 444 | cfg_cm_c1_c2[i].mask); | ||
| 445 | } | ||
| 446 | } | ||
| 447 | |||
| 448 | static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs) | ||
| 449 | { | ||
| 450 | /* Toggle the POR_EN bit in CONFIG.CPU_CTRL */ | ||
| 451 | /* enable POR_EN bit */ | ||
| 452 | reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN); | ||
| 453 | usleep_range(10, 100); | ||
| 454 | |||
| 455 | /* disable POR_EN bit */ | ||
| 456 | reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN); | ||
| 457 | usleep_range(10, 100); | ||
| 458 | } | ||
| 459 | |||
| 460 | static int netcp_xgbe_serdes_config(void __iomem *serdes_regs, | ||
| 461 | void __iomem *sw_regs) | ||
| 462 | { | ||
| 463 | u32 ret, i; | ||
| 464 | |||
| 465 | netcp_xgbe_serdes_pll_disable(serdes_regs); | ||
| 466 | netcp_xgbe_serdes_cmu_init(serdes_regs); | ||
| 467 | |||
| 468 | for (i = 0; i < 2; i++) | ||
| 469 | netcp_xgbe_serdes_lane_config(serdes_regs, i); | ||
| 470 | |||
| 471 | netcp_xgbe_serdes_com_enable(serdes_regs); | ||
| 472 | /* This is EVM + RTM-BOC specific */ | ||
| 473 | for (i = 0; i < 2; i++) | ||
| 474 | netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5); | ||
| 475 | |||
| 476 | netcp_xgbe_serdes_pll_enable(serdes_regs); | ||
| 477 | for (i = 0; i < 2; i++) | ||
| 478 | netcp_xgbe_serdes_lane_enable(serdes_regs, i); | ||
| 479 | |||
| 480 | /* SB PLL Status Poll */ | ||
| 481 | ret = netcp_xgbe_wait_pll_locked(sw_regs); | ||
| 482 | if (ret) | ||
| 483 | return ret; | ||
| 484 | |||
| 485 | netcp_xgbe_serdes_enable_xgmii_port(sw_regs); | ||
| 486 | netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs); | ||
| 487 | return ret; | ||
| 488 | } | ||
| 489 | |||
| 490 | int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs) | ||
| 491 | { | ||
| 492 | u32 val; | ||
| 493 | |||
| 494 | /* read COMLANE bits 4:0 */ | ||
| 495 | val = readl(serdes_regs + 0xa00); | ||
| 496 | if (val & 0x1f) { | ||
| 497 | pr_debug("XGBE: serdes already in operation - reset\n"); | ||
| 498 | netcp_xgbe_reset_serdes(serdes_regs); | ||
| 499 | } | ||
| 500 | return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs); | ||
| 501 | } | ||
