diff options
-rw-r--r-- | Documentation/driver-model/devres.txt | 4 | ||||
-rw-r--r-- | arch/powerpc/configs/dpaa.config | 3 | ||||
-rw-r--r-- | drivers/base/devres.c | 66 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/Makefile | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2753 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | 185 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c | 165 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h | 141 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 417 | ||||
-rw-r--r-- | include/linux/device.h | 19 |
13 files changed, 3778 insertions, 0 deletions
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 167070895498..ca9d1eb46bc0 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt | |||
@@ -332,6 +332,10 @@ MEM | |||
332 | MFD | 332 | MFD |
333 | devm_mfd_add_devices() | 333 | devm_mfd_add_devices() |
334 | 334 | ||
335 | PER-CPU MEM | ||
336 | devm_alloc_percpu() | ||
337 | devm_free_percpu() | ||
338 | |||
335 | PCI | 339 | PCI |
336 | pcim_enable_device() : after success, all PCI ops become managed | 340 | pcim_enable_device() : after success, all PCI ops become managed |
337 | pcim_pin_device() : keep PCI device enabled after release | 341 | pcim_pin_device() : keep PCI device enabled after release |
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config index efa99c048543..2fe76f5e938a 100644 --- a/arch/powerpc/configs/dpaa.config +++ b/arch/powerpc/configs/dpaa.config | |||
@@ -1 +1,4 @@ | |||
1 | CONFIG_FSL_DPAA=y | 1 | CONFIG_FSL_DPAA=y |
2 | CONFIG_FSL_PAMU=y | ||
3 | CONFIG_FSL_FMAN=y | ||
4 | CONFIG_FSL_DPAA_ETH=y | ||
diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 8fc654f0807b..71d577025285 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/percpu.h> | ||
13 | 14 | ||
14 | #include "base.h" | 15 | #include "base.h" |
15 | 16 | ||
@@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev, unsigned long addr) | |||
985 | &devres)); | 986 | &devres)); |
986 | } | 987 | } |
987 | EXPORT_SYMBOL_GPL(devm_free_pages); | 988 | EXPORT_SYMBOL_GPL(devm_free_pages); |
989 | |||
990 | static void devm_percpu_release(struct device *dev, void *pdata) | ||
991 | { | ||
992 | void __percpu *p; | ||
993 | |||
994 | p = *(void __percpu **)pdata; | ||
995 | free_percpu(p); | ||
996 | } | ||
997 | |||
998 | static int devm_percpu_match(struct device *dev, void *data, void *p) | ||
999 | { | ||
1000 | struct devres *devr = container_of(data, struct devres, data); | ||
1001 | |||
1002 | return *(void **)devr->data == p; | ||
1003 | } | ||
1004 | |||
1005 | /** | ||
1006 | * __devm_alloc_percpu - Resource-managed alloc_percpu | ||
1007 | * @dev: Device to allocate per-cpu memory for | ||
1008 | * @size: Size of per-cpu memory to allocate | ||
1009 | * @align: Alignment of per-cpu memory to allocate | ||
1010 | * | ||
1011 | * Managed alloc_percpu. Per-cpu memory allocated with this function is | ||
1012 | * automatically freed on driver detach. | ||
1013 | * | ||
1014 | * RETURNS: | ||
1015 | * Pointer to allocated memory on success, NULL on failure. | ||
1016 | */ | ||
1017 | void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, | ||
1018 | size_t align) | ||
1019 | { | ||
1020 | void *p; | ||
1021 | void __percpu *pcpu; | ||
1022 | |||
1023 | pcpu = __alloc_percpu(size, align); | ||
1024 | if (!pcpu) | ||
1025 | return NULL; | ||
1026 | |||
1027 | p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); | ||
1028 | if (!p) { | ||
1029 | free_percpu(pcpu); | ||
1030 | return NULL; | ||
1031 | } | ||
1032 | |||
1033 | *(void __percpu **)p = pcpu; | ||
1034 | |||
1035 | devres_add(dev, p); | ||
1036 | |||
1037 | return pcpu; | ||
1038 | } | ||
1039 | EXPORT_SYMBOL_GPL(__devm_alloc_percpu); | ||
1040 | |||
1041 | /** | ||
1042 | * devm_free_percpu - Resource-managed free_percpu | ||
1043 | * @dev: Device this memory belongs to | ||
1044 | * @pdata: Per-cpu memory to free | ||
1045 | * | ||
1046 | * Free memory allocated with devm_alloc_percpu(). | ||
1047 | */ | ||
1048 | void devm_free_percpu(struct device *dev, void __percpu *pdata) | ||
1049 | { | ||
1050 | WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, | ||
1051 | (void *)pdata)); | ||
1052 | } | ||
1053 | EXPORT_SYMBOL_GPL(devm_free_percpu); | ||
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index d1ca45fbb164..aa3f615886b4 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig | |||
@@ -93,4 +93,6 @@ config GIANFAR | |||
93 | and MPC86xx family of chips, the eTSEC on LS1021A and the FEC | 93 | and MPC86xx family of chips, the eTSEC on LS1021A and the FEC |
94 | on the 8540. | 94 | on the 8540. |
95 | 95 | ||
96 | source "drivers/net/ethernet/freescale/dpaa/Kconfig" | ||
97 | |||
96 | endif # NET_VENDOR_FREESCALE | 98 | endif # NET_VENDOR_FREESCALE |
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index cbe21dc7e37e..4a13115155c9 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile | |||
@@ -22,3 +22,4 @@ obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o | |||
22 | ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o | 22 | ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o |
23 | 23 | ||
24 | obj-$(CONFIG_FSL_FMAN) += fman/ | 24 | obj-$(CONFIG_FSL_FMAN) += fman/ |
25 | obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig new file mode 100644 index 000000000000..f3a3454805f9 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig | |||
@@ -0,0 +1,10 @@ | |||
1 | menuconfig FSL_DPAA_ETH | ||
2 | tristate "DPAA Ethernet" | ||
3 | depends on FSL_SOC && FSL_DPAA && FSL_FMAN | ||
4 | select PHYLIB | ||
5 | select FSL_FMAN_MAC | ||
6 | ---help--- | ||
7 | Data Path Acceleration Architecture Ethernet driver, | ||
8 | supporting the Freescale QorIQ chips. | ||
9 | Depends on Freescale Buffer Manager and Queue Manager | ||
10 | driver and Frame Manager Driver. | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/Makefile b/drivers/net/ethernet/freescale/dpaa/Makefile new file mode 100644 index 000000000000..7db50bccb137 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for the Freescale DPAA Ethernet controllers | ||
3 | # | ||
4 | |||
5 | # Include FMan headers | ||
6 | FMAN = $(srctree)/drivers/net/ethernet/freescale/fman | ||
7 | ccflags-y += -I$(FMAN) | ||
8 | |||
9 | obj-$(CONFIG_FSL_DPAA_ETH) += fsl_dpa.o | ||
10 | |||
11 | fsl_dpa-objs += dpaa_eth.o dpaa_ethtool.o dpaa_eth_sysfs.o | ||
12 | CFLAGS_dpaa_eth.o := -I$(src) | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c new file mode 100644 index 000000000000..3c48a84dec86 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
@@ -0,0 +1,2753 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
32 | |||
33 | #include <linux/init.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/of_platform.h> | ||
36 | #include <linux/of_mdio.h> | ||
37 | #include <linux/of_net.h> | ||
38 | #include <linux/io.h> | ||
39 | #include <linux/if_arp.h> | ||
40 | #include <linux/if_vlan.h> | ||
41 | #include <linux/icmp.h> | ||
42 | #include <linux/ip.h> | ||
43 | #include <linux/ipv6.h> | ||
44 | #include <linux/udp.h> | ||
45 | #include <linux/tcp.h> | ||
46 | #include <linux/net.h> | ||
47 | #include <linux/skbuff.h> | ||
48 | #include <linux/etherdevice.h> | ||
49 | #include <linux/if_ether.h> | ||
50 | #include <linux/highmem.h> | ||
51 | #include <linux/percpu.h> | ||
52 | #include <linux/dma-mapping.h> | ||
53 | #include <linux/sort.h> | ||
54 | #include <soc/fsl/bman.h> | ||
55 | #include <soc/fsl/qman.h> | ||
56 | |||
57 | #include "fman.h" | ||
58 | #include "fman_port.h" | ||
59 | #include "mac.h" | ||
60 | #include "dpaa_eth.h" | ||
61 | |||
62 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files | ||
63 | * using trace events only need to #include <trace/events/sched.h> | ||
64 | */ | ||
65 | #define CREATE_TRACE_POINTS | ||
66 | #include "dpaa_eth_trace.h" | ||
67 | |||
68 | static int debug = -1; | ||
69 | module_param(debug, int, 0444); | ||
70 | MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); | ||
71 | |||
72 | static u16 tx_timeout = 1000; | ||
73 | module_param(tx_timeout, ushort, 0444); | ||
74 | MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); | ||
75 | |||
76 | #define FM_FD_STAT_RX_ERRORS \ | ||
77 | (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ | ||
78 | FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ | ||
79 | FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ | ||
80 | FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ | ||
81 | FM_FD_ERR_PRS_HDR_ERR) | ||
82 | |||
83 | #define FM_FD_STAT_TX_ERRORS \ | ||
84 | (FM_FD_ERR_UNSUPPORTED_FORMAT | \ | ||
85 | FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) | ||
86 | |||
87 | #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | ||
88 | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ | ||
89 | NETIF_MSG_IFDOWN) | ||
90 | |||
91 | #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 | ||
92 | /* Ingress congestion threshold on FMan ports | ||
93 | * The size in bytes of the ingress tail-drop threshold on FMan ports. | ||
94 | * Traffic piling up above this value will be rejected by QMan and discarded | ||
95 | * by FMan. | ||
96 | */ | ||
97 | |||
98 | /* Size in bytes of the FQ taildrop threshold */ | ||
99 | #define DPAA_FQ_TD 0x200000 | ||
100 | |||
101 | #define DPAA_CS_THRESHOLD_1G 0x06000000 | ||
102 | /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 | ||
103 | * The size in bytes of the egress Congestion State notification threshold on | ||
104 | * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a | ||
105 | * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), | ||
106 | * and the larger the frame size, the more acute the problem. | ||
107 | * So we have to find a balance between these factors: | ||
108 | * - avoiding the device staying congested for a prolonged time (risking | ||
109 | * the netdev watchdog to fire - see also the tx_timeout module param); | ||
110 | * - affecting performance of protocols such as TCP, which otherwise | ||
111 | * behave well under the congestion notification mechanism; | ||
112 | * - preventing the Tx cores from tightly-looping (as if the congestion | ||
113 | * threshold was too low to be effective); | ||
114 | * - running out of memory if the CS threshold is set too high. | ||
115 | */ | ||
116 | |||
117 | #define DPAA_CS_THRESHOLD_10G 0x10000000 | ||
118 | /* The size in bytes of the egress Congestion State notification threshold on | ||
119 | * 10G ports, range 0x1000 .. 0x10000000 | ||
120 | */ | ||
121 | |||
122 | /* Largest value that the FQD's OAL field can hold */ | ||
123 | #define FSL_QMAN_MAX_OAL 127 | ||
124 | |||
125 | /* Default alignment for start of data in an Rx FD */ | ||
126 | #define DPAA_FD_DATA_ALIGNMENT 16 | ||
127 | |||
128 | /* Values for the L3R field of the FM Parse Results | ||
129 | */ | ||
130 | /* L3 Type field: First IP Present IPv4 */ | ||
131 | #define FM_L3_PARSE_RESULT_IPV4 0x8000 | ||
132 | /* L3 Type field: First IP Present IPv6 */ | ||
133 | #define FM_L3_PARSE_RESULT_IPV6 0x4000 | ||
134 | /* Values for the L4R field of the FM Parse Results */ | ||
135 | /* L4 Type field: UDP */ | ||
136 | #define FM_L4_PARSE_RESULT_UDP 0x40 | ||
137 | /* L4 Type field: TCP */ | ||
138 | #define FM_L4_PARSE_RESULT_TCP 0x20 | ||
139 | |||
140 | #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ | ||
141 | #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ | ||
142 | |||
143 | #define FSL_DPAA_BPID_INV 0xff | ||
144 | #define FSL_DPAA_ETH_MAX_BUF_COUNT 128 | ||
145 | #define FSL_DPAA_ETH_REFILL_THRESHOLD 80 | ||
146 | |||
147 | #define DPAA_TX_PRIV_DATA_SIZE 16 | ||
148 | #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) | ||
149 | #define DPAA_TIME_STAMP_SIZE 8 | ||
150 | #define DPAA_HASH_RESULTS_SIZE 8 | ||
151 | #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ | ||
152 | dpaa_rx_extra_headroom) | ||
153 | |||
154 | #define DPAA_ETH_RX_QUEUES 128 | ||
155 | |||
156 | #define DPAA_ENQUEUE_RETRIES 100000 | ||
157 | |||
158 | enum port_type {RX, TX}; | ||
159 | |||
160 | struct fm_port_fqs { | ||
161 | struct dpaa_fq *tx_defq; | ||
162 | struct dpaa_fq *tx_errq; | ||
163 | struct dpaa_fq *rx_defq; | ||
164 | struct dpaa_fq *rx_errq; | ||
165 | }; | ||
166 | |||
167 | /* All the dpa bps in use at any moment */ | ||
168 | static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; | ||
169 | |||
170 | /* The raw buffer size must be cacheline aligned */ | ||
171 | #define DPAA_BP_RAW_SIZE 4096 | ||
172 | /* When using more than one buffer pool, the raw sizes are as follows: | ||
173 | * 1 bp: 4KB | ||
174 | * 2 bp: 2KB, 4KB | ||
175 | * 3 bp: 1KB, 2KB, 4KB | ||
176 | * 4 bp: 1KB, 2KB, 4KB, 8KB | ||
177 | */ | ||
178 | static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) | ||
179 | { | ||
180 | size_t res = DPAA_BP_RAW_SIZE / 4; | ||
181 | u8 i; | ||
182 | |||
183 | for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) | ||
184 | res *= 2; | ||
185 | return res; | ||
186 | } | ||
187 | |||
188 | /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is | ||
189 | * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, | ||
190 | * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us | ||
191 | * half-page-aligned buffers, so we reserve some more space for start-of-buffer | ||
192 | * alignment. | ||
193 | */ | ||
194 | #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) | ||
195 | |||
196 | static int dpaa_max_frm; | ||
197 | |||
198 | static int dpaa_rx_extra_headroom; | ||
199 | |||
200 | #define dpaa_get_max_mtu() \ | ||
201 | (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) | ||
202 | |||
203 | static int dpaa_netdev_init(struct net_device *net_dev, | ||
204 | const struct net_device_ops *dpaa_ops, | ||
205 | u16 tx_timeout) | ||
206 | { | ||
207 | struct dpaa_priv *priv = netdev_priv(net_dev); | ||
208 | struct device *dev = net_dev->dev.parent; | ||
209 | struct dpaa_percpu_priv *percpu_priv; | ||
210 | const u8 *mac_addr; | ||
211 | int i, err; | ||
212 | |||
213 | /* Although we access another CPU's private data here | ||
214 | * we do it at initialization so it is safe | ||
215 | */ | ||
216 | for_each_possible_cpu(i) { | ||
217 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | ||
218 | percpu_priv->net_dev = net_dev; | ||
219 | } | ||
220 | |||
221 | net_dev->netdev_ops = dpaa_ops; | ||
222 | mac_addr = priv->mac_dev->addr; | ||
223 | |||
224 | net_dev->mem_start = priv->mac_dev->res->start; | ||
225 | net_dev->mem_end = priv->mac_dev->res->end; | ||
226 | |||
227 | net_dev->min_mtu = ETH_MIN_MTU; | ||
228 | net_dev->max_mtu = dpaa_get_max_mtu(); | ||
229 | |||
230 | net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | ||
231 | NETIF_F_LLTX); | ||
232 | |||
233 | net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; | ||
234 | /* The kernels enables GSO automatically, if we declare NETIF_F_SG. | ||
235 | * For conformity, we'll still declare GSO explicitly. | ||
236 | */ | ||
237 | net_dev->features |= NETIF_F_GSO; | ||
238 | |||
239 | net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
240 | /* we do not want shared skbs on TX */ | ||
241 | net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; | ||
242 | |||
243 | net_dev->features |= net_dev->hw_features; | ||
244 | net_dev->vlan_features = net_dev->features; | ||
245 | |||
246 | memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); | ||
247 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); | ||
248 | |||
249 | net_dev->ethtool_ops = &dpaa_ethtool_ops; | ||
250 | |||
251 | net_dev->needed_headroom = priv->tx_headroom; | ||
252 | net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); | ||
253 | |||
254 | /* start without the RUNNING flag, phylib controls it later */ | ||
255 | netif_carrier_off(net_dev); | ||
256 | |||
257 | err = register_netdev(net_dev); | ||
258 | if (err < 0) { | ||
259 | dev_err(dev, "register_netdev() = %d\n", err); | ||
260 | return err; | ||
261 | } | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static int dpaa_stop(struct net_device *net_dev) | ||
267 | { | ||
268 | struct mac_device *mac_dev; | ||
269 | struct dpaa_priv *priv; | ||
270 | int i, err, error; | ||
271 | |||
272 | priv = netdev_priv(net_dev); | ||
273 | mac_dev = priv->mac_dev; | ||
274 | |||
275 | netif_tx_stop_all_queues(net_dev); | ||
276 | /* Allow the Fman (Tx) port to process in-flight frames before we | ||
277 | * try switching it off. | ||
278 | */ | ||
279 | usleep_range(5000, 10000); | ||
280 | |||
281 | err = mac_dev->stop(mac_dev); | ||
282 | if (err < 0) | ||
283 | netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", | ||
284 | err); | ||
285 | |||
286 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | ||
287 | error = fman_port_disable(mac_dev->port[i]); | ||
288 | if (error) | ||
289 | err = error; | ||
290 | } | ||
291 | |||
292 | if (net_dev->phydev) | ||
293 | phy_disconnect(net_dev->phydev); | ||
294 | net_dev->phydev = NULL; | ||
295 | |||
296 | return err; | ||
297 | } | ||
298 | |||
299 | static void dpaa_tx_timeout(struct net_device *net_dev) | ||
300 | { | ||
301 | struct dpaa_percpu_priv *percpu_priv; | ||
302 | const struct dpaa_priv *priv; | ||
303 | |||
304 | priv = netdev_priv(net_dev); | ||
305 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | ||
306 | |||
307 | netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", | ||
308 | jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); | ||
309 | |||
310 | percpu_priv->stats.tx_errors++; | ||
311 | } | ||
312 | |||
313 | /* Calculates the statistics for the given device by adding the statistics | ||
314 | * collected by each CPU. | ||
315 | */ | ||
316 | static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, | ||
317 | struct rtnl_link_stats64 *s) | ||
318 | { | ||
319 | int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); | ||
320 | struct dpaa_priv *priv = netdev_priv(net_dev); | ||
321 | struct dpaa_percpu_priv *percpu_priv; | ||
322 | u64 *netstats = (u64 *)s; | ||
323 | u64 *cpustats; | ||
324 | int i, j; | ||
325 | |||
326 | for_each_possible_cpu(i) { | ||
327 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | ||
328 | |||
329 | cpustats = (u64 *)&percpu_priv->stats; | ||
330 | |||
331 | /* add stats from all CPUs */ | ||
332 | for (j = 0; j < numstats; j++) | ||
333 | netstats[j] += cpustats[j]; | ||
334 | } | ||
335 | |||
336 | return s; | ||
337 | } | ||
338 | |||
339 | static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) | ||
340 | { | ||
341 | struct platform_device *of_dev; | ||
342 | struct dpaa_eth_data *eth_data; | ||
343 | struct device *dpaa_dev, *dev; | ||
344 | struct device_node *mac_node; | ||
345 | struct mac_device *mac_dev; | ||
346 | |||
347 | dpaa_dev = &pdev->dev; | ||
348 | eth_data = dpaa_dev->platform_data; | ||
349 | if (!eth_data) | ||
350 | return ERR_PTR(-ENODEV); | ||
351 | |||
352 | mac_node = eth_data->mac_node; | ||
353 | |||
354 | of_dev = of_find_device_by_node(mac_node); | ||
355 | if (!of_dev) { | ||
356 | dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", | ||
357 | mac_node->full_name); | ||
358 | of_node_put(mac_node); | ||
359 | return ERR_PTR(-EINVAL); | ||
360 | } | ||
361 | of_node_put(mac_node); | ||
362 | |||
363 | dev = &of_dev->dev; | ||
364 | |||
365 | mac_dev = dev_get_drvdata(dev); | ||
366 | if (!mac_dev) { | ||
367 | dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n", | ||
368 | dev_name(dev)); | ||
369 | return ERR_PTR(-EINVAL); | ||
370 | } | ||
371 | |||
372 | return mac_dev; | ||
373 | } | ||
374 | |||
375 | static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) | ||
376 | { | ||
377 | const struct dpaa_priv *priv; | ||
378 | struct mac_device *mac_dev; | ||
379 | struct sockaddr old_addr; | ||
380 | int err; | ||
381 | |||
382 | priv = netdev_priv(net_dev); | ||
383 | |||
384 | memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); | ||
385 | |||
386 | err = eth_mac_addr(net_dev, addr); | ||
387 | if (err < 0) { | ||
388 | netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); | ||
389 | return err; | ||
390 | } | ||
391 | |||
392 | mac_dev = priv->mac_dev; | ||
393 | |||
394 | err = mac_dev->change_addr(mac_dev->fman_mac, | ||
395 | (enet_addr_t *)net_dev->dev_addr); | ||
396 | if (err < 0) { | ||
397 | netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", | ||
398 | err); | ||
399 | /* reverting to previous address */ | ||
400 | eth_mac_addr(net_dev, &old_addr); | ||
401 | |||
402 | return err; | ||
403 | } | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static void dpaa_set_rx_mode(struct net_device *net_dev) | ||
409 | { | ||
410 | const struct dpaa_priv *priv; | ||
411 | int err; | ||
412 | |||
413 | priv = netdev_priv(net_dev); | ||
414 | |||
415 | if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { | ||
416 | priv->mac_dev->promisc = !priv->mac_dev->promisc; | ||
417 | err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, | ||
418 | priv->mac_dev->promisc); | ||
419 | if (err < 0) | ||
420 | netif_err(priv, drv, net_dev, | ||
421 | "mac_dev->set_promisc() = %d\n", | ||
422 | err); | ||
423 | } | ||
424 | |||
425 | err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); | ||
426 | if (err < 0) | ||
427 | netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", | ||
428 | err); | ||
429 | } | ||
430 | |||
431 | static struct dpaa_bp *dpaa_bpid2pool(int bpid) | ||
432 | { | ||
433 | if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) | ||
434 | return NULL; | ||
435 | |||
436 | return dpaa_bp_array[bpid]; | ||
437 | } | ||
438 | |||
439 | /* checks if this bpool is already allocated */ | ||
440 | static bool dpaa_bpid2pool_use(int bpid) | ||
441 | { | ||
442 | if (dpaa_bpid2pool(bpid)) { | ||
443 | atomic_inc(&dpaa_bp_array[bpid]->refs); | ||
444 | return true; | ||
445 | } | ||
446 | |||
447 | return false; | ||
448 | } | ||
449 | |||
450 | /* called only once per bpid by dpaa_bp_alloc_pool() */ | ||
451 | static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) | ||
452 | { | ||
453 | dpaa_bp_array[bpid] = dpaa_bp; | ||
454 | atomic_set(&dpaa_bp->refs, 1); | ||
455 | } | ||
456 | |||
457 | static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) | ||
458 | { | ||
459 | int err; | ||
460 | |||
461 | if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { | ||
462 | pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", | ||
463 | __func__); | ||
464 | return -EINVAL; | ||
465 | } | ||
466 | |||
467 | /* If the pool is already specified, we only create one per bpid */ | ||
468 | if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && | ||
469 | dpaa_bpid2pool_use(dpaa_bp->bpid)) | ||
470 | return 0; | ||
471 | |||
472 | if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { | ||
473 | dpaa_bp->pool = bman_new_pool(); | ||
474 | if (!dpaa_bp->pool) { | ||
475 | pr_err("%s: bman_new_pool() failed\n", | ||
476 | __func__); | ||
477 | return -ENODEV; | ||
478 | } | ||
479 | |||
480 | dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); | ||
481 | } | ||
482 | |||
483 | if (dpaa_bp->seed_cb) { | ||
484 | err = dpaa_bp->seed_cb(dpaa_bp); | ||
485 | if (err) | ||
486 | goto pool_seed_failed; | ||
487 | } | ||
488 | |||
489 | dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); | ||
490 | |||
491 | return 0; | ||
492 | |||
493 | pool_seed_failed: | ||
494 | pr_err("%s: pool seeding failed\n", __func__); | ||
495 | bman_free_pool(dpaa_bp->pool); | ||
496 | |||
497 | return err; | ||
498 | } | ||
499 | |||
500 | /* remove and free all the buffers from the given buffer pool */ | ||
501 | static void dpaa_bp_drain(struct dpaa_bp *bp) | ||
502 | { | ||
503 | u8 num = 8; | ||
504 | int ret; | ||
505 | |||
506 | do { | ||
507 | struct bm_buffer bmb[8]; | ||
508 | int i; | ||
509 | |||
510 | ret = bman_acquire(bp->pool, bmb, num); | ||
511 | if (ret < 0) { | ||
512 | if (num == 8) { | ||
513 | /* we have less than 8 buffers left; | ||
514 | * drain them one by one | ||
515 | */ | ||
516 | num = 1; | ||
517 | ret = 1; | ||
518 | continue; | ||
519 | } else { | ||
520 | /* Pool is fully drained */ | ||
521 | break; | ||
522 | } | ||
523 | } | ||
524 | |||
525 | if (bp->free_buf_cb) | ||
526 | for (i = 0; i < num; i++) | ||
527 | bp->free_buf_cb(bp, &bmb[i]); | ||
528 | } while (ret > 0); | ||
529 | } | ||
530 | |||
531 | static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) | ||
532 | { | ||
533 | struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); | ||
534 | |||
535 | /* the mapping between bpid and dpaa_bp is done very late in the | ||
536 | * allocation procedure; if something failed before the mapping, the bp | ||
537 | * was not configured, therefore we don't need the below instructions | ||
538 | */ | ||
539 | if (!bp) | ||
540 | return; | ||
541 | |||
542 | if (!atomic_dec_and_test(&bp->refs)) | ||
543 | return; | ||
544 | |||
545 | if (bp->free_buf_cb) | ||
546 | dpaa_bp_drain(bp); | ||
547 | |||
548 | dpaa_bp_array[bp->bpid] = NULL; | ||
549 | bman_free_pool(bp->pool); | ||
550 | } | ||
551 | |||
552 | static void dpaa_bps_free(struct dpaa_priv *priv) | ||
553 | { | ||
554 | int i; | ||
555 | |||
556 | for (i = 0; i < DPAA_BPS_NUM; i++) | ||
557 | dpaa_bp_free(priv->dpaa_bps[i]); | ||
558 | } | ||
559 | |||
560 | /* Use multiple WQs for FQ assignment: | ||
561 | * - Tx Confirmation queues go to WQ1. | ||
562 | * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance | ||
563 | * to be scheduled, in case there are many more FQs in WQ3). | ||
564 | * - Rx Default and Tx queues go to WQ3 (no differentiation between | ||
565 | * Rx and Tx traffic). | ||
566 | * This ensures that Tx-confirmed buffers are timely released. In particular, | ||
567 | * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they | ||
568 | * are greatly outnumbered by other FQs in the system, while | ||
569 | * dequeue scheduling is round-robin. | ||
570 | */ | ||
571 | static inline void dpaa_assign_wq(struct dpaa_fq *fq) | ||
572 | { | ||
573 | switch (fq->fq_type) { | ||
574 | case FQ_TYPE_TX_CONFIRM: | ||
575 | case FQ_TYPE_TX_CONF_MQ: | ||
576 | fq->wq = 1; | ||
577 | break; | ||
578 | case FQ_TYPE_RX_ERROR: | ||
579 | case FQ_TYPE_TX_ERROR: | ||
580 | fq->wq = 2; | ||
581 | break; | ||
582 | case FQ_TYPE_RX_DEFAULT: | ||
583 | case FQ_TYPE_TX: | ||
584 | fq->wq = 3; | ||
585 | break; | ||
586 | default: | ||
587 | WARN(1, "Invalid FQ type %d for FQID %d!\n", | ||
588 | fq->fq_type, fq->fqid); | ||
589 | } | ||
590 | } | ||
591 | |||
592 | static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, | ||
593 | u32 start, u32 count, | ||
594 | struct list_head *list, | ||
595 | enum dpaa_fq_type fq_type) | ||
596 | { | ||
597 | struct dpaa_fq *dpaa_fq; | ||
598 | int i; | ||
599 | |||
600 | dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count, | ||
601 | GFP_KERNEL); | ||
602 | if (!dpaa_fq) | ||
603 | return NULL; | ||
604 | |||
605 | for (i = 0; i < count; i++) { | ||
606 | dpaa_fq[i].fq_type = fq_type; | ||
607 | dpaa_fq[i].fqid = start ? start + i : 0; | ||
608 | list_add_tail(&dpaa_fq[i].list, list); | ||
609 | } | ||
610 | |||
611 | for (i = 0; i < count; i++) | ||
612 | dpaa_assign_wq(dpaa_fq + i); | ||
613 | |||
614 | return dpaa_fq; | ||
615 | } | ||
616 | |||
617 | static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, | ||
618 | struct fm_port_fqs *port_fqs) | ||
619 | { | ||
620 | struct dpaa_fq *dpaa_fq; | ||
621 | |||
622 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); | ||
623 | if (!dpaa_fq) | ||
624 | goto fq_alloc_failed; | ||
625 | |||
626 | port_fqs->rx_errq = &dpaa_fq[0]; | ||
627 | |||
628 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); | ||
629 | if (!dpaa_fq) | ||
630 | goto fq_alloc_failed; | ||
631 | |||
632 | port_fqs->rx_defq = &dpaa_fq[0]; | ||
633 | |||
634 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) | ||
635 | goto fq_alloc_failed; | ||
636 | |||
637 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); | ||
638 | if (!dpaa_fq) | ||
639 | goto fq_alloc_failed; | ||
640 | |||
641 | port_fqs->tx_errq = &dpaa_fq[0]; | ||
642 | |||
643 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); | ||
644 | if (!dpaa_fq) | ||
645 | goto fq_alloc_failed; | ||
646 | |||
647 | port_fqs->tx_defq = &dpaa_fq[0]; | ||
648 | |||
649 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) | ||
650 | goto fq_alloc_failed; | ||
651 | |||
652 | return 0; | ||
653 | |||
654 | fq_alloc_failed: | ||
655 | dev_err(dev, "dpaa_fq_alloc() failed\n"); | ||
656 | return -ENOMEM; | ||
657 | } | ||
658 | |||
659 | static u32 rx_pool_channel; | ||
660 | static DEFINE_SPINLOCK(rx_pool_channel_init); | ||
661 | |||
662 | static int dpaa_get_channel(void) | ||
663 | { | ||
664 | spin_lock(&rx_pool_channel_init); | ||
665 | if (!rx_pool_channel) { | ||
666 | u32 pool; | ||
667 | int ret; | ||
668 | |||
669 | ret = qman_alloc_pool(&pool); | ||
670 | |||
671 | if (!ret) | ||
672 | rx_pool_channel = pool; | ||
673 | } | ||
674 | spin_unlock(&rx_pool_channel_init); | ||
675 | if (!rx_pool_channel) | ||
676 | return -ENOMEM; | ||
677 | return rx_pool_channel; | ||
678 | } | ||
679 | |||
680 | static void dpaa_release_channel(void) | ||
681 | { | ||
682 | qman_release_pool(rx_pool_channel); | ||
683 | } | ||
684 | |||
685 | static void dpaa_eth_add_channel(u16 channel) | ||
686 | { | ||
687 | u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); | ||
688 | const cpumask_t *cpus = qman_affine_cpus(); | ||
689 | struct qman_portal *portal; | ||
690 | int cpu; | ||
691 | |||
692 | for_each_cpu(cpu, cpus) { | ||
693 | portal = qman_get_affine_portal(cpu); | ||
694 | qman_p_static_dequeue_add(portal, pool); | ||
695 | } | ||
696 | } | ||
697 | |||
698 | /* Congestion group state change notification callback. | ||
699 | * Stops the device's egress queues while they are congested and | ||
700 | * wakes them upon exiting congested state. | ||
701 | * Also updates some CGR-related stats. | ||
702 | */ | ||
703 | static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, | ||
704 | int congested) | ||
705 | { | ||
706 | struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, | ||
707 | struct dpaa_priv, cgr_data.cgr); | ||
708 | |||
709 | if (congested) { | ||
710 | priv->cgr_data.congestion_start_jiffies = jiffies; | ||
711 | netif_tx_stop_all_queues(priv->net_dev); | ||
712 | priv->cgr_data.cgr_congested_count++; | ||
713 | } else { | ||
714 | priv->cgr_data.congested_jiffies += | ||
715 | (jiffies - priv->cgr_data.congestion_start_jiffies); | ||
716 | netif_tx_wake_all_queues(priv->net_dev); | ||
717 | } | ||
718 | } | ||
719 | |||
720 | static int dpaa_eth_cgr_init(struct dpaa_priv *priv) | ||
721 | { | ||
722 | struct qm_mcc_initcgr initcgr; | ||
723 | u32 cs_th; | ||
724 | int err; | ||
725 | |||
726 | err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); | ||
727 | if (err < 0) { | ||
728 | if (netif_msg_drv(priv)) | ||
729 | pr_err("%s: Error %d allocating CGR ID\n", | ||
730 | __func__, err); | ||
731 | goto out_error; | ||
732 | } | ||
733 | priv->cgr_data.cgr.cb = dpaa_eth_cgscn; | ||
734 | |||
735 | /* Enable Congestion State Change Notifications and CS taildrop */ | ||
736 | initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES; | ||
737 | initcgr.cgr.cscn_en = QM_CGR_EN; | ||
738 | |||
739 | /* Set different thresholds based on the MAC speed. | ||
740 | * This may turn suboptimal if the MAC is reconfigured at a speed | ||
741 | * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. | ||
742 | * In such cases, we ought to reconfigure the threshold, too. | ||
743 | */ | ||
744 | if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) | ||
745 | cs_th = DPAA_CS_THRESHOLD_10G; | ||
746 | else | ||
747 | cs_th = DPAA_CS_THRESHOLD_1G; | ||
748 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | ||
749 | |||
750 | initcgr.we_mask |= QM_CGR_WE_CSTD_EN; | ||
751 | initcgr.cgr.cstd_en = QM_CGR_EN; | ||
752 | |||
753 | err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, | ||
754 | &initcgr); | ||
755 | if (err < 0) { | ||
756 | if (netif_msg_drv(priv)) | ||
757 | pr_err("%s: Error %d creating CGR with ID %d\n", | ||
758 | __func__, err, priv->cgr_data.cgr.cgrid); | ||
759 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | ||
760 | goto out_error; | ||
761 | } | ||
762 | if (netif_msg_drv(priv)) | ||
763 | pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", | ||
764 | priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, | ||
765 | priv->cgr_data.cgr.chan); | ||
766 | |||
767 | out_error: | ||
768 | return err; | ||
769 | } | ||
770 | |||
771 | static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, | ||
772 | struct dpaa_fq *fq, | ||
773 | const struct qman_fq *template) | ||
774 | { | ||
775 | fq->fq_base = *template; | ||
776 | fq->net_dev = priv->net_dev; | ||
777 | |||
778 | fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; | ||
779 | fq->channel = priv->channel; | ||
780 | } | ||
781 | |||
782 | static inline void dpaa_setup_egress(const struct dpaa_priv *priv, | ||
783 | struct dpaa_fq *fq, | ||
784 | struct fman_port *port, | ||
785 | const struct qman_fq *template) | ||
786 | { | ||
787 | fq->fq_base = *template; | ||
788 | fq->net_dev = priv->net_dev; | ||
789 | |||
790 | if (port) { | ||
791 | fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; | ||
792 | fq->channel = (u16)fman_port_get_qman_channel_id(port); | ||
793 | } else { | ||
794 | fq->flags = QMAN_FQ_FLAG_NO_MODIFY; | ||
795 | } | ||
796 | } | ||
797 | |||
798 | static void dpaa_fq_setup(struct dpaa_priv *priv, | ||
799 | const struct dpaa_fq_cbs *fq_cbs, | ||
800 | struct fman_port *tx_port) | ||
801 | { | ||
802 | int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu; | ||
803 | const cpumask_t *affine_cpus = qman_affine_cpus(); | ||
804 | u16 portals[NR_CPUS]; | ||
805 | struct dpaa_fq *fq; | ||
806 | |||
807 | for_each_cpu(cpu, affine_cpus) | ||
808 | portals[num_portals++] = qman_affine_channel(cpu); | ||
809 | if (num_portals == 0) | ||
810 | dev_err(priv->net_dev->dev.parent, | ||
811 | "No Qman software (affine) channels found"); | ||
812 | |||
813 | /* Initialize each FQ in the list */ | ||
814 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | ||
815 | switch (fq->fq_type) { | ||
816 | case FQ_TYPE_RX_DEFAULT: | ||
817 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); | ||
818 | break; | ||
819 | case FQ_TYPE_RX_ERROR: | ||
820 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); | ||
821 | break; | ||
822 | case FQ_TYPE_TX: | ||
823 | dpaa_setup_egress(priv, fq, tx_port, | ||
824 | &fq_cbs->egress_ern); | ||
825 | /* If we have more Tx queues than the number of cores, | ||
826 | * just ignore the extra ones. | ||
827 | */ | ||
828 | if (egress_cnt < DPAA_ETH_TXQ_NUM) | ||
829 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | ||
830 | break; | ||
831 | case FQ_TYPE_TX_CONF_MQ: | ||
832 | priv->conf_fqs[conf_cnt++] = &fq->fq_base; | ||
833 | /* fall through */ | ||
834 | case FQ_TYPE_TX_CONFIRM: | ||
835 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); | ||
836 | break; | ||
837 | case FQ_TYPE_TX_ERROR: | ||
838 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); | ||
839 | break; | ||
840 | default: | ||
841 | dev_warn(priv->net_dev->dev.parent, | ||
842 | "Unknown FQ type detected!\n"); | ||
843 | break; | ||
844 | } | ||
845 | } | ||
846 | |||
847 | /* Make sure all CPUs receive a corresponding Tx queue. */ | ||
848 | while (egress_cnt < DPAA_ETH_TXQ_NUM) { | ||
849 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | ||
850 | if (fq->fq_type != FQ_TYPE_TX) | ||
851 | continue; | ||
852 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | ||
853 | if (egress_cnt == DPAA_ETH_TXQ_NUM) | ||
854 | break; | ||
855 | } | ||
856 | } | ||
857 | } | ||
858 | |||
859 | static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, | ||
860 | struct qman_fq *tx_fq) | ||
861 | { | ||
862 | int i; | ||
863 | |||
864 | for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) | ||
865 | if (priv->egress_fqs[i] == tx_fq) | ||
866 | return i; | ||
867 | |||
868 | return -EINVAL; | ||
869 | } | ||
870 | |||
871 | static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) | ||
872 | { | ||
873 | const struct dpaa_priv *priv; | ||
874 | struct qman_fq *confq = NULL; | ||
875 | struct qm_mcc_initfq initfq; | ||
876 | struct device *dev; | ||
877 | struct qman_fq *fq; | ||
878 | int queue_id; | ||
879 | int err; | ||
880 | |||
881 | priv = netdev_priv(dpaa_fq->net_dev); | ||
882 | dev = dpaa_fq->net_dev->dev.parent; | ||
883 | |||
884 | if (dpaa_fq->fqid == 0) | ||
885 | dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; | ||
886 | |||
887 | dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); | ||
888 | |||
889 | err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); | ||
890 | if (err) { | ||
891 | dev_err(dev, "qman_create_fq() failed\n"); | ||
892 | return err; | ||
893 | } | ||
894 | fq = &dpaa_fq->fq_base; | ||
895 | |||
896 | if (dpaa_fq->init) { | ||
897 | memset(&initfq, 0, sizeof(initfq)); | ||
898 | |||
899 | initfq.we_mask = QM_INITFQ_WE_FQCTRL; | ||
900 | /* Note: we may get to keep an empty FQ in cache */ | ||
901 | initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; | ||
902 | |||
903 | /* Try to reduce the number of portal interrupts for | ||
904 | * Tx Confirmation FQs. | ||
905 | */ | ||
906 | if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) | ||
907 | initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; | ||
908 | |||
909 | /* FQ placement */ | ||
910 | initfq.we_mask |= QM_INITFQ_WE_DESTWQ; | ||
911 | |||
912 | qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); | ||
913 | |||
914 | /* Put all egress queues in a congestion group of their own. | ||
915 | * Sensu stricto, the Tx confirmation queues are Rx FQs, | ||
916 | * rather than Tx - but they nonetheless account for the | ||
917 | * memory footprint on behalf of egress traffic. We therefore | ||
918 | * place them in the netdev's CGR, along with the Tx FQs. | ||
919 | */ | ||
920 | if (dpaa_fq->fq_type == FQ_TYPE_TX || | ||
921 | dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || | ||
922 | dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { | ||
923 | initfq.we_mask |= QM_INITFQ_WE_CGID; | ||
924 | initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; | ||
925 | initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; | ||
926 | /* Set a fixed overhead accounting, in an attempt to | ||
927 | * reduce the impact of fixed-size skb shells and the | ||
928 | * driver's needed headroom on system memory. This is | ||
929 | * especially the case when the egress traffic is | ||
930 | * composed of small datagrams. | ||
931 | * Unfortunately, QMan's OAL value is capped to an | ||
932 | * insufficient value, but even that is better than | ||
933 | * no overhead accounting at all. | ||
934 | */ | ||
935 | initfq.we_mask |= QM_INITFQ_WE_OAC; | ||
936 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); | ||
937 | qm_fqd_set_oal(&initfq.fqd, | ||
938 | min(sizeof(struct sk_buff) + | ||
939 | priv->tx_headroom, | ||
940 | (size_t)FSL_QMAN_MAX_OAL)); | ||
941 | } | ||
942 | |||
943 | if (td_enable) { | ||
944 | initfq.we_mask |= QM_INITFQ_WE_TDTHRESH; | ||
945 | qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); | ||
946 | initfq.fqd.fq_ctrl = QM_FQCTRL_TDE; | ||
947 | } | ||
948 | |||
949 | if (dpaa_fq->fq_type == FQ_TYPE_TX) { | ||
950 | queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); | ||
951 | if (queue_id >= 0) | ||
952 | confq = priv->conf_fqs[queue_id]; | ||
953 | if (confq) { | ||
954 | initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; | ||
955 | /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) | ||
956 | * A2V=1 (contextA A2 field is valid) | ||
957 | * A0V=1 (contextA A0 field is valid) | ||
958 | * B0V=1 (contextB field is valid) | ||
959 | * ContextA A2: EBD=1 (deallocate buffers inside FMan) | ||
960 | * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) | ||
961 | */ | ||
962 | initfq.fqd.context_a.hi = 0x1e000000; | ||
963 | initfq.fqd.context_a.lo = 0x80000000; | ||
964 | } | ||
965 | } | ||
966 | |||
967 | /* Put all the ingress queues in our "ingress CGR". */ | ||
968 | if (priv->use_ingress_cgr && | ||
969 | (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || | ||
970 | dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { | ||
971 | initfq.we_mask |= QM_INITFQ_WE_CGID; | ||
972 | initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; | ||
973 | initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; | ||
974 | /* Set a fixed overhead accounting, just like for the | ||
975 | * egress CGR. | ||
976 | */ | ||
977 | initfq.we_mask |= QM_INITFQ_WE_OAC; | ||
978 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); | ||
979 | qm_fqd_set_oal(&initfq.fqd, | ||
980 | min(sizeof(struct sk_buff) + | ||
981 | priv->tx_headroom, | ||
982 | (size_t)FSL_QMAN_MAX_OAL)); | ||
983 | } | ||
984 | |||
985 | /* Initialization common to all ingress queues */ | ||
986 | if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { | ||
987 | initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; | ||
988 | initfq.fqd.fq_ctrl |= | ||
989 | QM_FQCTRL_HOLDACTIVE; | ||
990 | initfq.fqd.context_a.stashing.exclusive = | ||
991 | QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | | ||
992 | QM_STASHING_EXCL_ANNOTATION; | ||
993 | qm_fqd_set_stashing(&initfq.fqd, 1, 2, | ||
994 | DIV_ROUND_UP(sizeof(struct qman_fq), | ||
995 | 64)); | ||
996 | } | ||
997 | |||
998 | err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); | ||
999 | if (err < 0) { | ||
1000 | dev_err(dev, "qman_init_fq(%u) = %d\n", | ||
1001 | qman_fq_fqid(fq), err); | ||
1002 | qman_destroy_fq(fq); | ||
1003 | return err; | ||
1004 | } | ||
1005 | } | ||
1006 | |||
1007 | dpaa_fq->fqid = qman_fq_fqid(fq); | ||
1008 | |||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
1012 | static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) | ||
1013 | { | ||
1014 | const struct dpaa_priv *priv; | ||
1015 | struct dpaa_fq *dpaa_fq; | ||
1016 | int err, error; | ||
1017 | |||
1018 | err = 0; | ||
1019 | |||
1020 | dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | ||
1021 | priv = netdev_priv(dpaa_fq->net_dev); | ||
1022 | |||
1023 | if (dpaa_fq->init) { | ||
1024 | err = qman_retire_fq(fq, NULL); | ||
1025 | if (err < 0 && netif_msg_drv(priv)) | ||
1026 | dev_err(dev, "qman_retire_fq(%u) = %d\n", | ||
1027 | qman_fq_fqid(fq), err); | ||
1028 | |||
1029 | error = qman_oos_fq(fq); | ||
1030 | if (error < 0 && netif_msg_drv(priv)) { | ||
1031 | dev_err(dev, "qman_oos_fq(%u) = %d\n", | ||
1032 | qman_fq_fqid(fq), error); | ||
1033 | if (err >= 0) | ||
1034 | err = error; | ||
1035 | } | ||
1036 | } | ||
1037 | |||
1038 | qman_destroy_fq(fq); | ||
1039 | list_del(&dpaa_fq->list); | ||
1040 | |||
1041 | return err; | ||
1042 | } | ||
1043 | |||
1044 | static int dpaa_fq_free(struct device *dev, struct list_head *list) | ||
1045 | { | ||
1046 | struct dpaa_fq *dpaa_fq, *tmp; | ||
1047 | int err, error; | ||
1048 | |||
1049 | err = 0; | ||
1050 | list_for_each_entry_safe(dpaa_fq, tmp, list, list) { | ||
1051 | error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); | ||
1052 | if (error < 0 && err >= 0) | ||
1053 | err = error; | ||
1054 | } | ||
1055 | |||
1056 | return err; | ||
1057 | } | ||
1058 | |||
1059 | static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, | ||
1060 | struct dpaa_fq *defq, | ||
1061 | struct dpaa_buffer_layout *buf_layout) | ||
1062 | { | ||
1063 | struct fman_buffer_prefix_content buf_prefix_content; | ||
1064 | struct fman_port_params params; | ||
1065 | int err; | ||
1066 | |||
1067 | memset(¶ms, 0, sizeof(params)); | ||
1068 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | ||
1069 | |||
1070 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | ||
1071 | buf_prefix_content.pass_prs_result = true; | ||
1072 | buf_prefix_content.pass_hash_result = true; | ||
1073 | buf_prefix_content.pass_time_stamp = false; | ||
1074 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | ||
1075 | |||
1076 | params.specific_params.non_rx_params.err_fqid = errq->fqid; | ||
1077 | params.specific_params.non_rx_params.dflt_fqid = defq->fqid; | ||
1078 | |||
1079 | err = fman_port_config(port, ¶ms); | ||
1080 | if (err) | ||
1081 | pr_err("%s: fman_port_config failed\n", __func__); | ||
1082 | |||
1083 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | ||
1084 | if (err) | ||
1085 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", | ||
1086 | __func__); | ||
1087 | |||
1088 | err = fman_port_init(port); | ||
1089 | if (err) | ||
1090 | pr_err("%s: fm_port_init failed\n", __func__); | ||
1091 | } | ||
1092 | |||
1093 | static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, | ||
1094 | size_t count, struct dpaa_fq *errq, | ||
1095 | struct dpaa_fq *defq, | ||
1096 | struct dpaa_buffer_layout *buf_layout) | ||
1097 | { | ||
1098 | struct fman_buffer_prefix_content buf_prefix_content; | ||
1099 | struct fman_port_rx_params *rx_p; | ||
1100 | struct fman_port_params params; | ||
1101 | int i, err; | ||
1102 | |||
1103 | memset(¶ms, 0, sizeof(params)); | ||
1104 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | ||
1105 | |||
1106 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | ||
1107 | buf_prefix_content.pass_prs_result = true; | ||
1108 | buf_prefix_content.pass_hash_result = true; | ||
1109 | buf_prefix_content.pass_time_stamp = false; | ||
1110 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | ||
1111 | |||
1112 | rx_p = ¶ms.specific_params.rx_params; | ||
1113 | rx_p->err_fqid = errq->fqid; | ||
1114 | rx_p->dflt_fqid = defq->fqid; | ||
1115 | |||
1116 | count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); | ||
1117 | rx_p->ext_buf_pools.num_of_pools_used = (u8)count; | ||
1118 | for (i = 0; i < count; i++) { | ||
1119 | rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; | ||
1120 | rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; | ||
1121 | } | ||
1122 | |||
1123 | err = fman_port_config(port, ¶ms); | ||
1124 | if (err) | ||
1125 | pr_err("%s: fman_port_config failed\n", __func__); | ||
1126 | |||
1127 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | ||
1128 | if (err) | ||
1129 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", | ||
1130 | __func__); | ||
1131 | |||
1132 | err = fman_port_init(port); | ||
1133 | if (err) | ||
1134 | pr_err("%s: fm_port_init failed\n", __func__); | ||
1135 | } | ||
1136 | |||
1137 | static void dpaa_eth_init_ports(struct mac_device *mac_dev, | ||
1138 | struct dpaa_bp **bps, size_t count, | ||
1139 | struct fm_port_fqs *port_fqs, | ||
1140 | struct dpaa_buffer_layout *buf_layout, | ||
1141 | struct device *dev) | ||
1142 | { | ||
1143 | struct fman_port *rxport = mac_dev->port[RX]; | ||
1144 | struct fman_port *txport = mac_dev->port[TX]; | ||
1145 | |||
1146 | dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, | ||
1147 | port_fqs->tx_defq, &buf_layout[TX]); | ||
1148 | dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, | ||
1149 | port_fqs->rx_defq, &buf_layout[RX]); | ||
1150 | } | ||
1151 | |||
1152 | static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, | ||
1153 | struct bm_buffer *bmb, int cnt) | ||
1154 | { | ||
1155 | int err; | ||
1156 | |||
1157 | err = bman_release(dpaa_bp->pool, bmb, cnt); | ||
1158 | /* Should never occur, address anyway to avoid leaking the buffers */ | ||
1159 | if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) | ||
1160 | while (cnt-- > 0) | ||
1161 | dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); | ||
1162 | |||
1163 | return cnt; | ||
1164 | } | ||
1165 | |||
1166 | static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) | ||
1167 | { | ||
1168 | struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; | ||
1169 | struct dpaa_bp *dpaa_bp; | ||
1170 | int i = 0, j; | ||
1171 | |||
1172 | memset(bmb, 0, sizeof(bmb)); | ||
1173 | |||
1174 | do { | ||
1175 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | ||
1176 | if (!dpaa_bp) | ||
1177 | return; | ||
1178 | |||
1179 | j = 0; | ||
1180 | do { | ||
1181 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | ||
1182 | |||
1183 | bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); | ||
1184 | |||
1185 | j++; i++; | ||
1186 | } while (j < ARRAY_SIZE(bmb) && | ||
1187 | !qm_sg_entry_is_final(&sgt[i - 1]) && | ||
1188 | sgt[i - 1].bpid == sgt[i].bpid); | ||
1189 | |||
1190 | dpaa_bman_release(dpaa_bp, bmb, j); | ||
1191 | } while (!qm_sg_entry_is_final(&sgt[i - 1])); | ||
1192 | } | ||
1193 | |||
1194 | static void dpaa_fd_release(const struct net_device *net_dev, | ||
1195 | const struct qm_fd *fd) | ||
1196 | { | ||
1197 | struct qm_sg_entry *sgt; | ||
1198 | struct dpaa_bp *dpaa_bp; | ||
1199 | struct bm_buffer bmb; | ||
1200 | dma_addr_t addr; | ||
1201 | void *vaddr; | ||
1202 | |||
1203 | bmb.data = 0; | ||
1204 | bm_buffer_set64(&bmb, qm_fd_addr(fd)); | ||
1205 | |||
1206 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | ||
1207 | if (!dpaa_bp) | ||
1208 | return; | ||
1209 | |||
1210 | if (qm_fd_get_format(fd) == qm_fd_sg) { | ||
1211 | vaddr = phys_to_virt(qm_fd_addr(fd)); | ||
1212 | sgt = vaddr + qm_fd_get_offset(fd); | ||
1213 | |||
1214 | dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, | ||
1215 | DMA_FROM_DEVICE); | ||
1216 | |||
1217 | dpaa_release_sgt_members(sgt); | ||
1218 | |||
1219 | addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, | ||
1220 | DMA_FROM_DEVICE); | ||
1221 | if (dma_mapping_error(dpaa_bp->dev, addr)) { | ||
1222 | dev_err(dpaa_bp->dev, "DMA mapping failed"); | ||
1223 | return; | ||
1224 | } | ||
1225 | bm_buffer_set64(&bmb, addr); | ||
1226 | } | ||
1227 | |||
1228 | dpaa_bman_release(dpaa_bp, &bmb, 1); | ||
1229 | } | ||
1230 | |||
1231 | static void count_ern(struct dpaa_percpu_priv *percpu_priv, | ||
1232 | const union qm_mr_entry *msg) | ||
1233 | { | ||
1234 | switch (msg->ern.rc & QM_MR_RC_MASK) { | ||
1235 | case QM_MR_RC_CGR_TAILDROP: | ||
1236 | percpu_priv->ern_cnt.cg_tdrop++; | ||
1237 | break; | ||
1238 | case QM_MR_RC_WRED: | ||
1239 | percpu_priv->ern_cnt.wred++; | ||
1240 | break; | ||
1241 | case QM_MR_RC_ERROR: | ||
1242 | percpu_priv->ern_cnt.err_cond++; | ||
1243 | break; | ||
1244 | case QM_MR_RC_ORPWINDOW_EARLY: | ||
1245 | percpu_priv->ern_cnt.early_window++; | ||
1246 | break; | ||
1247 | case QM_MR_RC_ORPWINDOW_LATE: | ||
1248 | percpu_priv->ern_cnt.late_window++; | ||
1249 | break; | ||
1250 | case QM_MR_RC_FQ_TAILDROP: | ||
1251 | percpu_priv->ern_cnt.fq_tdrop++; | ||
1252 | break; | ||
1253 | case QM_MR_RC_ORPWINDOW_RETIRED: | ||
1254 | percpu_priv->ern_cnt.fq_retired++; | ||
1255 | break; | ||
1256 | case QM_MR_RC_ORP_ZERO: | ||
1257 | percpu_priv->ern_cnt.orp_zero++; | ||
1258 | break; | ||
1259 | } | ||
1260 | } | ||
1261 | |||
1262 | /* Turn on HW checksum computation for this outgoing frame. | ||
1263 | * If the current protocol is not something we support in this regard | ||
1264 | * (or if the stack has already computed the SW checksum), we do nothing. | ||
1265 | * | ||
1266 | * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value | ||
1267 | * otherwise. | ||
1268 | * | ||
1269 | * Note that this function may modify the fd->cmd field and the skb data buffer | ||
1270 | * (the Parse Results area). | ||
1271 | */ | ||
1272 | static int dpaa_enable_tx_csum(struct dpaa_priv *priv, | ||
1273 | struct sk_buff *skb, | ||
1274 | struct qm_fd *fd, | ||
1275 | char *parse_results) | ||
1276 | { | ||
1277 | struct fman_prs_result *parse_result; | ||
1278 | u16 ethertype = ntohs(skb->protocol); | ||
1279 | struct ipv6hdr *ipv6h = NULL; | ||
1280 | struct iphdr *iph; | ||
1281 | int retval = 0; | ||
1282 | u8 l4_proto; | ||
1283 | |||
1284 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
1285 | return 0; | ||
1286 | |||
1287 | /* Note: L3 csum seems to be already computed in sw, but we can't choose | ||
1288 | * L4 alone from the FM configuration anyway. | ||
1289 | */ | ||
1290 | |||
1291 | /* Fill in some fields of the Parse Results array, so the FMan | ||
1292 | * can find them as if they came from the FMan Parser. | ||
1293 | */ | ||
1294 | parse_result = (struct fman_prs_result *)parse_results; | ||
1295 | |||
1296 | /* If we're dealing with VLAN, get the real Ethernet type */ | ||
1297 | if (ethertype == ETH_P_8021Q) { | ||
1298 | /* We can't always assume the MAC header is set correctly | ||
1299 | * by the stack, so reset to beginning of skb->data | ||
1300 | */ | ||
1301 | skb_reset_mac_header(skb); | ||
1302 | ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); | ||
1303 | } | ||
1304 | |||
1305 | /* Fill in the relevant L3 parse result fields | ||
1306 | * and read the L4 protocol type | ||
1307 | */ | ||
1308 | switch (ethertype) { | ||
1309 | case ETH_P_IP: | ||
1310 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); | ||
1311 | iph = ip_hdr(skb); | ||
1312 | WARN_ON(!iph); | ||
1313 | l4_proto = iph->protocol; | ||
1314 | break; | ||
1315 | case ETH_P_IPV6: | ||
1316 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); | ||
1317 | ipv6h = ipv6_hdr(skb); | ||
1318 | WARN_ON(!ipv6h); | ||
1319 | l4_proto = ipv6h->nexthdr; | ||
1320 | break; | ||
1321 | default: | ||
1322 | /* We shouldn't even be here */ | ||
1323 | if (net_ratelimit()) | ||
1324 | netif_alert(priv, tx_err, priv->net_dev, | ||
1325 | "Can't compute HW csum for L3 proto 0x%x\n", | ||
1326 | ntohs(skb->protocol)); | ||
1327 | retval = -EIO; | ||
1328 | goto return_error; | ||
1329 | } | ||
1330 | |||
1331 | /* Fill in the relevant L4 parse result fields */ | ||
1332 | switch (l4_proto) { | ||
1333 | case IPPROTO_UDP: | ||
1334 | parse_result->l4r = FM_L4_PARSE_RESULT_UDP; | ||
1335 | break; | ||
1336 | case IPPROTO_TCP: | ||
1337 | parse_result->l4r = FM_L4_PARSE_RESULT_TCP; | ||
1338 | break; | ||
1339 | default: | ||
1340 | if (net_ratelimit()) | ||
1341 | netif_alert(priv, tx_err, priv->net_dev, | ||
1342 | "Can't compute HW csum for L4 proto 0x%x\n", | ||
1343 | l4_proto); | ||
1344 | retval = -EIO; | ||
1345 | goto return_error; | ||
1346 | } | ||
1347 | |||
1348 | /* At index 0 is IPOffset_1 as defined in the Parse Results */ | ||
1349 | parse_result->ip_off[0] = (u8)skb_network_offset(skb); | ||
1350 | parse_result->l4_off = (u8)skb_transport_offset(skb); | ||
1351 | |||
1352 | /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ | ||
1353 | fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; | ||
1354 | |||
1355 | /* On P1023 and similar platforms fd->cmd interpretation could | ||
1356 | * be disabled by setting CONTEXT_A bit ICMD; currently this bit | ||
1357 | * is not set so we do not need to check; in the future, if/when | ||
1358 | * using context_a we need to check this bit | ||
1359 | */ | ||
1360 | |||
1361 | return_error: | ||
1362 | return retval; | ||
1363 | } | ||
1364 | |||
1365 | static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) | ||
1366 | { | ||
1367 | struct device *dev = dpaa_bp->dev; | ||
1368 | struct bm_buffer bmb[8]; | ||
1369 | dma_addr_t addr; | ||
1370 | void *new_buf; | ||
1371 | u8 i; | ||
1372 | |||
1373 | for (i = 0; i < 8; i++) { | ||
1374 | new_buf = netdev_alloc_frag(dpaa_bp->raw_size); | ||
1375 | if (unlikely(!new_buf)) { | ||
1376 | dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", | ||
1377 | dpaa_bp->raw_size); | ||
1378 | goto release_previous_buffs; | ||
1379 | } | ||
1380 | new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); | ||
1381 | |||
1382 | addr = dma_map_single(dev, new_buf, | ||
1383 | dpaa_bp->size, DMA_FROM_DEVICE); | ||
1384 | if (unlikely(dma_mapping_error(dev, addr))) { | ||
1385 | dev_err(dpaa_bp->dev, "DMA map failed"); | ||
1386 | goto release_previous_buffs; | ||
1387 | } | ||
1388 | |||
1389 | bmb[i].data = 0; | ||
1390 | bm_buffer_set64(&bmb[i], addr); | ||
1391 | } | ||
1392 | |||
1393 | release_bufs: | ||
1394 | return dpaa_bman_release(dpaa_bp, bmb, i); | ||
1395 | |||
1396 | release_previous_buffs: | ||
1397 | WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); | ||
1398 | |||
1399 | bm_buffer_set64(&bmb[i], 0); | ||
1400 | /* Avoid releasing a completely null buffer; bman_release() requires | ||
1401 | * at least one buffer. | ||
1402 | */ | ||
1403 | if (likely(i)) | ||
1404 | goto release_bufs; | ||
1405 | |||
1406 | return 0; | ||
1407 | } | ||
1408 | |||
1409 | static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) | ||
1410 | { | ||
1411 | int i; | ||
1412 | |||
1413 | /* Give each CPU an allotment of "config_count" buffers */ | ||
1414 | for_each_possible_cpu(i) { | ||
1415 | int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); | ||
1416 | int j; | ||
1417 | |||
1418 | /* Although we access another CPU's counters here | ||
1419 | * we do it at boot time so it is safe | ||
1420 | */ | ||
1421 | for (j = 0; j < dpaa_bp->config_count; j += 8) | ||
1422 | *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); | ||
1423 | } | ||
1424 | return 0; | ||
1425 | } | ||
1426 | |||
1427 | /* Add buffers/(pages) for Rx processing whenever bpool count falls below | ||
1428 | * REFILL_THRESHOLD. | ||
1429 | */ | ||
1430 | static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) | ||
1431 | { | ||
1432 | int count = *countptr; | ||
1433 | int new_bufs; | ||
1434 | |||
1435 | if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { | ||
1436 | do { | ||
1437 | new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); | ||
1438 | if (unlikely(!new_bufs)) { | ||
1439 | /* Avoid looping forever if we've temporarily | ||
1440 | * run out of memory. We'll try again at the | ||
1441 | * next NAPI cycle. | ||
1442 | */ | ||
1443 | break; | ||
1444 | } | ||
1445 | count += new_bufs; | ||
1446 | } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); | ||
1447 | |||
1448 | *countptr = count; | ||
1449 | if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) | ||
1450 | return -ENOMEM; | ||
1451 | } | ||
1452 | |||
1453 | return 0; | ||
1454 | } | ||
1455 | |||
1456 | static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) | ||
1457 | { | ||
1458 | struct dpaa_bp *dpaa_bp; | ||
1459 | int *countptr; | ||
1460 | int res, i; | ||
1461 | |||
1462 | for (i = 0; i < DPAA_BPS_NUM; i++) { | ||
1463 | dpaa_bp = priv->dpaa_bps[i]; | ||
1464 | if (!dpaa_bp) | ||
1465 | return -EINVAL; | ||
1466 | countptr = this_cpu_ptr(dpaa_bp->percpu_count); | ||
1467 | res = dpaa_eth_refill_bpool(dpaa_bp, countptr); | ||
1468 | if (res) | ||
1469 | return res; | ||
1470 | } | ||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | /* Cleanup function for outgoing frame descriptors that were built on Tx path, | ||
1475 | * either contiguous frames or scatter/gather ones. | ||
1476 | * Skb freeing is not handled here. | ||
1477 | * | ||
1478 | * This function may be called on error paths in the Tx function, so guard | ||
1479 | * against cases when not all fd relevant fields were filled in. | ||
1480 | * | ||
1481 | * Return the skb backpointer, since for S/G frames the buffer containing it | ||
1482 | * gets freed here. | ||
1483 | */ | ||
1484 | static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, | ||
1485 | const struct qm_fd *fd) | ||
1486 | { | ||
1487 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | ||
1488 | struct device *dev = priv->net_dev->dev.parent; | ||
1489 | dma_addr_t addr = qm_fd_addr(fd); | ||
1490 | const struct qm_sg_entry *sgt; | ||
1491 | struct sk_buff **skbh, *skb; | ||
1492 | int nr_frags, i; | ||
1493 | |||
1494 | skbh = (struct sk_buff **)phys_to_virt(addr); | ||
1495 | skb = *skbh; | ||
1496 | |||
1497 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { | ||
1498 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
1499 | dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + | ||
1500 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | ||
1501 | dma_dir); | ||
1502 | |||
1503 | /* The sgt buffer has been allocated with netdev_alloc_frag(), | ||
1504 | * it's from lowmem. | ||
1505 | */ | ||
1506 | sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); | ||
1507 | |||
1508 | /* sgt[0] is from lowmem, was dma_map_single()-ed */ | ||
1509 | dma_unmap_single(dev, qm_sg_addr(&sgt[0]), | ||
1510 | qm_sg_entry_get_len(&sgt[0]), dma_dir); | ||
1511 | |||
1512 | /* remaining pages were mapped with skb_frag_dma_map() */ | ||
1513 | for (i = 1; i < nr_frags; i++) { | ||
1514 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | ||
1515 | |||
1516 | dma_unmap_page(dev, qm_sg_addr(&sgt[i]), | ||
1517 | qm_sg_entry_get_len(&sgt[i]), dma_dir); | ||
1518 | } | ||
1519 | |||
1520 | /* Free the page frag that we allocated on Tx */ | ||
1521 | skb_free_frag(phys_to_virt(addr)); | ||
1522 | } else { | ||
1523 | dma_unmap_single(dev, addr, | ||
1524 | skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); | ||
1525 | } | ||
1526 | |||
1527 | return skb; | ||
1528 | } | ||
1529 | |||
1530 | /* Build a linear skb around the received buffer. | ||
1531 | * We are guaranteed there is enough room at the end of the data buffer to | ||
1532 | * accommodate the shared info area of the skb. | ||
1533 | */ | ||
1534 | static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, | ||
1535 | const struct qm_fd *fd) | ||
1536 | { | ||
1537 | ssize_t fd_off = qm_fd_get_offset(fd); | ||
1538 | dma_addr_t addr = qm_fd_addr(fd); | ||
1539 | struct dpaa_bp *dpaa_bp; | ||
1540 | struct sk_buff *skb; | ||
1541 | void *vaddr; | ||
1542 | |||
1543 | vaddr = phys_to_virt(addr); | ||
1544 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | ||
1545 | |||
1546 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | ||
1547 | if (!dpaa_bp) | ||
1548 | goto free_buffer; | ||
1549 | |||
1550 | skb = build_skb(vaddr, dpaa_bp->size + | ||
1551 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | ||
1552 | if (unlikely(!skb)) { | ||
1553 | WARN_ONCE(1, "Build skb failure on Rx\n"); | ||
1554 | goto free_buffer; | ||
1555 | } | ||
1556 | WARN_ON(fd_off != priv->rx_headroom); | ||
1557 | skb_reserve(skb, fd_off); | ||
1558 | skb_put(skb, qm_fd_get_length(fd)); | ||
1559 | |||
1560 | skb->ip_summed = CHECKSUM_NONE; | ||
1561 | |||
1562 | return skb; | ||
1563 | |||
1564 | free_buffer: | ||
1565 | skb_free_frag(vaddr); | ||
1566 | return NULL; | ||
1567 | } | ||
1568 | |||
1569 | /* Build an skb with the data of the first S/G entry in the linear portion and | ||
1570 | * the rest of the frame as skb fragments. | ||
1571 | * | ||
1572 | * The page fragment holding the S/G Table is recycled here. | ||
1573 | */ | ||
1574 | static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, | ||
1575 | const struct qm_fd *fd) | ||
1576 | { | ||
1577 | ssize_t fd_off = qm_fd_get_offset(fd); | ||
1578 | dma_addr_t addr = qm_fd_addr(fd); | ||
1579 | const struct qm_sg_entry *sgt; | ||
1580 | struct page *page, *head_page; | ||
1581 | struct dpaa_bp *dpaa_bp; | ||
1582 | void *vaddr, *sg_vaddr; | ||
1583 | int frag_off, frag_len; | ||
1584 | struct sk_buff *skb; | ||
1585 | dma_addr_t sg_addr; | ||
1586 | int page_offset; | ||
1587 | unsigned int sz; | ||
1588 | int *count_ptr; | ||
1589 | int i; | ||
1590 | |||
1591 | vaddr = phys_to_virt(addr); | ||
1592 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | ||
1593 | |||
1594 | /* Iterate through the SGT entries and add data buffers to the skb */ | ||
1595 | sgt = vaddr + fd_off; | ||
1596 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { | ||
1597 | /* Extension bit is not supported */ | ||
1598 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | ||
1599 | |||
1600 | sg_addr = qm_sg_addr(&sgt[i]); | ||
1601 | sg_vaddr = phys_to_virt(sg_addr); | ||
1602 | WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, | ||
1603 | SMP_CACHE_BYTES)); | ||
1604 | |||
1605 | /* We may use multiple Rx pools */ | ||
1606 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | ||
1607 | if (!dpaa_bp) | ||
1608 | goto free_buffers; | ||
1609 | |||
1610 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | ||
1611 | dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, | ||
1612 | DMA_FROM_DEVICE); | ||
1613 | if (i == 0) { | ||
1614 | sz = dpaa_bp->size + | ||
1615 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
1616 | skb = build_skb(sg_vaddr, sz); | ||
1617 | if (WARN_ON(unlikely(!skb))) | ||
1618 | goto free_buffers; | ||
1619 | |||
1620 | skb->ip_summed = CHECKSUM_NONE; | ||
1621 | |||
1622 | /* Make sure forwarded skbs will have enough space | ||
1623 | * on Tx, if extra headers are added. | ||
1624 | */ | ||
1625 | WARN_ON(fd_off != priv->rx_headroom); | ||
1626 | skb_reserve(skb, fd_off); | ||
1627 | skb_put(skb, qm_sg_entry_get_len(&sgt[i])); | ||
1628 | } else { | ||
1629 | /* Not the first S/G entry; all data from buffer will | ||
1630 | * be added in an skb fragment; fragment index is offset | ||
1631 | * by one since first S/G entry was incorporated in the | ||
1632 | * linear part of the skb. | ||
1633 | * | ||
1634 | * Caution: 'page' may be a tail page. | ||
1635 | */ | ||
1636 | page = virt_to_page(sg_vaddr); | ||
1637 | head_page = virt_to_head_page(sg_vaddr); | ||
1638 | |||
1639 | /* Compute offset in (possibly tail) page */ | ||
1640 | page_offset = ((unsigned long)sg_vaddr & | ||
1641 | (PAGE_SIZE - 1)) + | ||
1642 | (page_address(page) - page_address(head_page)); | ||
1643 | /* page_offset only refers to the beginning of sgt[i]; | ||
1644 | * but the buffer itself may have an internal offset. | ||
1645 | */ | ||
1646 | frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; | ||
1647 | frag_len = qm_sg_entry_get_len(&sgt[i]); | ||
1648 | /* skb_add_rx_frag() does no checking on the page; if | ||
1649 | * we pass it a tail page, we'll end up with | ||
1650 | * bad page accounting and eventually with segafults. | ||
1651 | */ | ||
1652 | skb_add_rx_frag(skb, i - 1, head_page, frag_off, | ||
1653 | frag_len, dpaa_bp->size); | ||
1654 | } | ||
1655 | /* Update the pool count for the current {cpu x bpool} */ | ||
1656 | (*count_ptr)--; | ||
1657 | |||
1658 | if (qm_sg_entry_is_final(&sgt[i])) | ||
1659 | break; | ||
1660 | } | ||
1661 | WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); | ||
1662 | |||
1663 | /* free the SG table buffer */ | ||
1664 | skb_free_frag(vaddr); | ||
1665 | |||
1666 | return skb; | ||
1667 | |||
1668 | free_buffers: | ||
1669 | /* compensate sw bpool counter changes */ | ||
1670 | for (i--; i > 0; i--) { | ||
1671 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | ||
1672 | if (dpaa_bp) { | ||
1673 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | ||
1674 | (*count_ptr)++; | ||
1675 | } | ||
1676 | } | ||
1677 | /* free all the SG entries */ | ||
1678 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { | ||
1679 | sg_addr = qm_sg_addr(&sgt[i]); | ||
1680 | sg_vaddr = phys_to_virt(sg_addr); | ||
1681 | skb_free_frag(sg_vaddr); | ||
1682 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | ||
1683 | if (dpaa_bp) { | ||
1684 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | ||
1685 | (*count_ptr)--; | ||
1686 | } | ||
1687 | |||
1688 | if (qm_sg_entry_is_final(&sgt[i])) | ||
1689 | break; | ||
1690 | } | ||
1691 | /* free the SGT fragment */ | ||
1692 | skb_free_frag(vaddr); | ||
1693 | |||
1694 | return NULL; | ||
1695 | } | ||
1696 | |||
1697 | static int skb_to_contig_fd(struct dpaa_priv *priv, | ||
1698 | struct sk_buff *skb, struct qm_fd *fd, | ||
1699 | int *offset) | ||
1700 | { | ||
1701 | struct net_device *net_dev = priv->net_dev; | ||
1702 | struct device *dev = net_dev->dev.parent; | ||
1703 | enum dma_data_direction dma_dir; | ||
1704 | unsigned char *buffer_start; | ||
1705 | struct sk_buff **skbh; | ||
1706 | dma_addr_t addr; | ||
1707 | int err; | ||
1708 | |||
1709 | /* We are guaranteed to have at least tx_headroom bytes | ||
1710 | * available, so just use that for offset. | ||
1711 | */ | ||
1712 | fd->bpid = FSL_DPAA_BPID_INV; | ||
1713 | buffer_start = skb->data - priv->tx_headroom; | ||
1714 | dma_dir = DMA_TO_DEVICE; | ||
1715 | |||
1716 | skbh = (struct sk_buff **)buffer_start; | ||
1717 | *skbh = skb; | ||
1718 | |||
1719 | /* Enable L3/L4 hardware checksum computation. | ||
1720 | * | ||
1721 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | ||
1722 | * need to write into the skb. | ||
1723 | */ | ||
1724 | err = dpaa_enable_tx_csum(priv, skb, fd, | ||
1725 | ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); | ||
1726 | if (unlikely(err < 0)) { | ||
1727 | if (net_ratelimit()) | ||
1728 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | ||
1729 | err); | ||
1730 | return err; | ||
1731 | } | ||
1732 | |||
1733 | /* Fill in the rest of the FD fields */ | ||
1734 | qm_fd_set_contig(fd, priv->tx_headroom, skb->len); | ||
1735 | fd->cmd |= FM_FD_CMD_FCO; | ||
1736 | |||
1737 | /* Map the entire buffer size that may be seen by FMan, but no more */ | ||
1738 | addr = dma_map_single(dev, skbh, | ||
1739 | skb_tail_pointer(skb) - buffer_start, dma_dir); | ||
1740 | if (unlikely(dma_mapping_error(dev, addr))) { | ||
1741 | if (net_ratelimit()) | ||
1742 | netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); | ||
1743 | return -EINVAL; | ||
1744 | } | ||
1745 | qm_fd_addr_set64(fd, addr); | ||
1746 | |||
1747 | return 0; | ||
1748 | } | ||
1749 | |||
1750 | static int skb_to_sg_fd(struct dpaa_priv *priv, | ||
1751 | struct sk_buff *skb, struct qm_fd *fd) | ||
1752 | { | ||
1753 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | ||
1754 | const int nr_frags = skb_shinfo(skb)->nr_frags; | ||
1755 | struct net_device *net_dev = priv->net_dev; | ||
1756 | struct device *dev = net_dev->dev.parent; | ||
1757 | struct qm_sg_entry *sgt; | ||
1758 | struct sk_buff **skbh; | ||
1759 | int i, j, err, sz; | ||
1760 | void *buffer_start; | ||
1761 | skb_frag_t *frag; | ||
1762 | dma_addr_t addr; | ||
1763 | size_t frag_len; | ||
1764 | void *sgt_buf; | ||
1765 | |||
1766 | /* get a page frag to store the SGTable */ | ||
1767 | sz = SKB_DATA_ALIGN(priv->tx_headroom + | ||
1768 | sizeof(struct qm_sg_entry) * (1 + nr_frags)); | ||
1769 | sgt_buf = netdev_alloc_frag(sz); | ||
1770 | if (unlikely(!sgt_buf)) { | ||
1771 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", | ||
1772 | sz); | ||
1773 | return -ENOMEM; | ||
1774 | } | ||
1775 | |||
1776 | /* Enable L3/L4 hardware checksum computation. | ||
1777 | * | ||
1778 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | ||
1779 | * need to write into the skb. | ||
1780 | */ | ||
1781 | err = dpaa_enable_tx_csum(priv, skb, fd, | ||
1782 | sgt_buf + DPAA_TX_PRIV_DATA_SIZE); | ||
1783 | if (unlikely(err < 0)) { | ||
1784 | if (net_ratelimit()) | ||
1785 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | ||
1786 | err); | ||
1787 | goto csum_failed; | ||
1788 | } | ||
1789 | |||
1790 | sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); | ||
1791 | qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); | ||
1792 | sgt[0].bpid = FSL_DPAA_BPID_INV; | ||
1793 | sgt[0].offset = 0; | ||
1794 | addr = dma_map_single(dev, skb->data, | ||
1795 | skb_headlen(skb), dma_dir); | ||
1796 | if (unlikely(dma_mapping_error(dev, addr))) { | ||
1797 | dev_err(dev, "DMA mapping failed"); | ||
1798 | err = -EINVAL; | ||
1799 | goto sg0_map_failed; | ||
1800 | } | ||
1801 | qm_sg_entry_set64(&sgt[0], addr); | ||
1802 | |||
1803 | /* populate the rest of SGT entries */ | ||
1804 | frag = &skb_shinfo(skb)->frags[0]; | ||
1805 | frag_len = frag->size; | ||
1806 | for (i = 1; i <= nr_frags; i++, frag++) { | ||
1807 | WARN_ON(!skb_frag_page(frag)); | ||
1808 | addr = skb_frag_dma_map(dev, frag, 0, | ||
1809 | frag_len, dma_dir); | ||
1810 | if (unlikely(dma_mapping_error(dev, addr))) { | ||
1811 | dev_err(dev, "DMA mapping failed"); | ||
1812 | err = -EINVAL; | ||
1813 | goto sg_map_failed; | ||
1814 | } | ||
1815 | |||
1816 | qm_sg_entry_set_len(&sgt[i], frag_len); | ||
1817 | sgt[i].bpid = FSL_DPAA_BPID_INV; | ||
1818 | sgt[i].offset = 0; | ||
1819 | |||
1820 | /* keep the offset in the address */ | ||
1821 | qm_sg_entry_set64(&sgt[i], addr); | ||
1822 | frag_len = frag->size; | ||
1823 | } | ||
1824 | qm_sg_entry_set_f(&sgt[i - 1], frag_len); | ||
1825 | |||
1826 | qm_fd_set_sg(fd, priv->tx_headroom, skb->len); | ||
1827 | |||
1828 | /* DMA map the SGT page */ | ||
1829 | buffer_start = (void *)sgt - priv->tx_headroom; | ||
1830 | skbh = (struct sk_buff **)buffer_start; | ||
1831 | *skbh = skb; | ||
1832 | |||
1833 | addr = dma_map_single(dev, buffer_start, priv->tx_headroom + | ||
1834 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | ||
1835 | dma_dir); | ||
1836 | if (unlikely(dma_mapping_error(dev, addr))) { | ||
1837 | dev_err(dev, "DMA mapping failed"); | ||
1838 | err = -EINVAL; | ||
1839 | goto sgt_map_failed; | ||
1840 | } | ||
1841 | |||
1842 | fd->bpid = FSL_DPAA_BPID_INV; | ||
1843 | fd->cmd |= FM_FD_CMD_FCO; | ||
1844 | qm_fd_addr_set64(fd, addr); | ||
1845 | |||
1846 | return 0; | ||
1847 | |||
1848 | sgt_map_failed: | ||
1849 | sg_map_failed: | ||
1850 | for (j = 0; j < i; j++) | ||
1851 | dma_unmap_page(dev, qm_sg_addr(&sgt[j]), | ||
1852 | qm_sg_entry_get_len(&sgt[j]), dma_dir); | ||
1853 | sg0_map_failed: | ||
1854 | csum_failed: | ||
1855 | skb_free_frag(sgt_buf); | ||
1856 | |||
1857 | return err; | ||
1858 | } | ||
1859 | |||
1860 | static inline int dpaa_xmit(struct dpaa_priv *priv, | ||
1861 | struct rtnl_link_stats64 *percpu_stats, | ||
1862 | int queue, | ||
1863 | struct qm_fd *fd) | ||
1864 | { | ||
1865 | struct qman_fq *egress_fq; | ||
1866 | int err, i; | ||
1867 | |||
1868 | egress_fq = priv->egress_fqs[queue]; | ||
1869 | if (fd->bpid == FSL_DPAA_BPID_INV) | ||
1870 | fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]); | ||
1871 | |||
1872 | /* Trace this Tx fd */ | ||
1873 | trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); | ||
1874 | |||
1875 | for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { | ||
1876 | err = qman_enqueue(egress_fq, fd); | ||
1877 | if (err != -EBUSY) | ||
1878 | break; | ||
1879 | } | ||
1880 | |||
1881 | if (unlikely(err < 0)) { | ||
1882 | percpu_stats->tx_errors++; | ||
1883 | percpu_stats->tx_fifo_errors++; | ||
1884 | return err; | ||
1885 | } | ||
1886 | |||
1887 | percpu_stats->tx_packets++; | ||
1888 | percpu_stats->tx_bytes += qm_fd_get_length(fd); | ||
1889 | |||
1890 | return 0; | ||
1891 | } | ||
1892 | |||
1893 | static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | ||
1894 | { | ||
1895 | const int queue_mapping = skb_get_queue_mapping(skb); | ||
1896 | bool nonlinear = skb_is_nonlinear(skb); | ||
1897 | struct rtnl_link_stats64 *percpu_stats; | ||
1898 | struct dpaa_percpu_priv *percpu_priv; | ||
1899 | struct dpaa_priv *priv; | ||
1900 | struct qm_fd fd; | ||
1901 | int offset = 0; | ||
1902 | int err = 0; | ||
1903 | |||
1904 | priv = netdev_priv(net_dev); | ||
1905 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | ||
1906 | percpu_stats = &percpu_priv->stats; | ||
1907 | |||
1908 | qm_fd_clear_fd(&fd); | ||
1909 | |||
1910 | if (!nonlinear) { | ||
1911 | /* We're going to store the skb backpointer at the beginning | ||
1912 | * of the data buffer, so we need a privately owned skb | ||
1913 | * | ||
1914 | * We've made sure skb is not shared in dev->priv_flags, | ||
1915 | * we need to verify the skb head is not cloned | ||
1916 | */ | ||
1917 | if (skb_cow_head(skb, priv->tx_headroom)) | ||
1918 | goto enomem; | ||
1919 | |||
1920 | WARN_ON(skb_is_nonlinear(skb)); | ||
1921 | } | ||
1922 | |||
1923 | /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; | ||
1924 | * make sure we don't feed FMan with more fragments than it supports. | ||
1925 | */ | ||
1926 | if (nonlinear && | ||
1927 | likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { | ||
1928 | /* Just create a S/G fd based on the skb */ | ||
1929 | err = skb_to_sg_fd(priv, skb, &fd); | ||
1930 | percpu_priv->tx_frag_skbuffs++; | ||
1931 | } else { | ||
1932 | /* If the egress skb contains more fragments than we support | ||
1933 | * we have no choice but to linearize it ourselves. | ||
1934 | */ | ||
1935 | if (unlikely(nonlinear) && __skb_linearize(skb)) | ||
1936 | goto enomem; | ||
1937 | |||
1938 | /* Finally, create a contig FD from this skb */ | ||
1939 | err = skb_to_contig_fd(priv, skb, &fd, &offset); | ||
1940 | } | ||
1941 | if (unlikely(err < 0)) | ||
1942 | goto skb_to_fd_failed; | ||
1943 | |||
1944 | if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) | ||
1945 | return NETDEV_TX_OK; | ||
1946 | |||
1947 | dpaa_cleanup_tx_fd(priv, &fd); | ||
1948 | skb_to_fd_failed: | ||
1949 | enomem: | ||
1950 | percpu_stats->tx_errors++; | ||
1951 | dev_kfree_skb(skb); | ||
1952 | return NETDEV_TX_OK; | ||
1953 | } | ||
1954 | |||
1955 | static void dpaa_rx_error(struct net_device *net_dev, | ||
1956 | const struct dpaa_priv *priv, | ||
1957 | struct dpaa_percpu_priv *percpu_priv, | ||
1958 | const struct qm_fd *fd, | ||
1959 | u32 fqid) | ||
1960 | { | ||
1961 | if (net_ratelimit()) | ||
1962 | netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", | ||
1963 | fd->status & FM_FD_STAT_RX_ERRORS); | ||
1964 | |||
1965 | percpu_priv->stats.rx_errors++; | ||
1966 | |||
1967 | if (fd->status & FM_FD_ERR_DMA) | ||
1968 | percpu_priv->rx_errors.dme++; | ||
1969 | if (fd->status & FM_FD_ERR_PHYSICAL) | ||
1970 | percpu_priv->rx_errors.fpe++; | ||
1971 | if (fd->status & FM_FD_ERR_SIZE) | ||
1972 | percpu_priv->rx_errors.fse++; | ||
1973 | if (fd->status & FM_FD_ERR_PRS_HDR_ERR) | ||
1974 | percpu_priv->rx_errors.phe++; | ||
1975 | |||
1976 | dpaa_fd_release(net_dev, fd); | ||
1977 | } | ||
1978 | |||
1979 | static void dpaa_tx_error(struct net_device *net_dev, | ||
1980 | const struct dpaa_priv *priv, | ||
1981 | struct dpaa_percpu_priv *percpu_priv, | ||
1982 | const struct qm_fd *fd, | ||
1983 | u32 fqid) | ||
1984 | { | ||
1985 | struct sk_buff *skb; | ||
1986 | |||
1987 | if (net_ratelimit()) | ||
1988 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | ||
1989 | fd->status & FM_FD_STAT_TX_ERRORS); | ||
1990 | |||
1991 | percpu_priv->stats.tx_errors++; | ||
1992 | |||
1993 | skb = dpaa_cleanup_tx_fd(priv, fd); | ||
1994 | dev_kfree_skb(skb); | ||
1995 | } | ||
1996 | |||
1997 | static int dpaa_eth_poll(struct napi_struct *napi, int budget) | ||
1998 | { | ||
1999 | struct dpaa_napi_portal *np = | ||
2000 | container_of(napi, struct dpaa_napi_portal, napi); | ||
2001 | |||
2002 | int cleaned = qman_p_poll_dqrr(np->p, budget); | ||
2003 | |||
2004 | if (cleaned < budget) { | ||
2005 | napi_complete(napi); | ||
2006 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); | ||
2007 | |||
2008 | } else if (np->down) { | ||
2009 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); | ||
2010 | } | ||
2011 | |||
2012 | return cleaned; | ||
2013 | } | ||
2014 | |||
2015 | static void dpaa_tx_conf(struct net_device *net_dev, | ||
2016 | const struct dpaa_priv *priv, | ||
2017 | struct dpaa_percpu_priv *percpu_priv, | ||
2018 | const struct qm_fd *fd, | ||
2019 | u32 fqid) | ||
2020 | { | ||
2021 | struct sk_buff *skb; | ||
2022 | |||
2023 | if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) { | ||
2024 | if (net_ratelimit()) | ||
2025 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | ||
2026 | fd->status & FM_FD_STAT_TX_ERRORS); | ||
2027 | |||
2028 | percpu_priv->stats.tx_errors++; | ||
2029 | } | ||
2030 | |||
2031 | percpu_priv->tx_confirm++; | ||
2032 | |||
2033 | skb = dpaa_cleanup_tx_fd(priv, fd); | ||
2034 | |||
2035 | consume_skb(skb); | ||
2036 | } | ||
2037 | |||
2038 | static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, | ||
2039 | struct qman_portal *portal) | ||
2040 | { | ||
2041 | if (unlikely(in_irq() || !in_serving_softirq())) { | ||
2042 | /* Disable QMan IRQ and invoke NAPI */ | ||
2043 | qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); | ||
2044 | |||
2045 | percpu_priv->np.p = portal; | ||
2046 | napi_schedule(&percpu_priv->np.napi); | ||
2047 | percpu_priv->in_interrupt++; | ||
2048 | return 1; | ||
2049 | } | ||
2050 | return 0; | ||
2051 | } | ||
2052 | |||
2053 | static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, | ||
2054 | struct qman_fq *fq, | ||
2055 | const struct qm_dqrr_entry *dq) | ||
2056 | { | ||
2057 | struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | ||
2058 | struct dpaa_percpu_priv *percpu_priv; | ||
2059 | struct net_device *net_dev; | ||
2060 | struct dpaa_bp *dpaa_bp; | ||
2061 | struct dpaa_priv *priv; | ||
2062 | |||
2063 | net_dev = dpaa_fq->net_dev; | ||
2064 | priv = netdev_priv(net_dev); | ||
2065 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | ||
2066 | if (!dpaa_bp) | ||
2067 | return qman_cb_dqrr_consume; | ||
2068 | |||
2069 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | ||
2070 | |||
2071 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | ||
2072 | return qman_cb_dqrr_stop; | ||
2073 | |||
2074 | if (dpaa_eth_refill_bpools(priv)) | ||
2075 | /* Unable to refill the buffer pool due to insufficient | ||
2076 | * system memory. Just release the frame back into the pool, | ||
2077 | * otherwise we'll soon end up with an empty buffer pool. | ||
2078 | */ | ||
2079 | dpaa_fd_release(net_dev, &dq->fd); | ||
2080 | else | ||
2081 | dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | ||
2082 | |||
2083 | return qman_cb_dqrr_consume; | ||
2084 | } | ||
2085 | |||
2086 | static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, | ||
2087 | struct qman_fq *fq, | ||
2088 | const struct qm_dqrr_entry *dq) | ||
2089 | { | ||
2090 | struct rtnl_link_stats64 *percpu_stats; | ||
2091 | struct dpaa_percpu_priv *percpu_priv; | ||
2092 | const struct qm_fd *fd = &dq->fd; | ||
2093 | dma_addr_t addr = qm_fd_addr(fd); | ||
2094 | enum qm_fd_format fd_format; | ||
2095 | struct net_device *net_dev; | ||
2096 | u32 fd_status = fd->status; | ||
2097 | struct dpaa_bp *dpaa_bp; | ||
2098 | struct dpaa_priv *priv; | ||
2099 | unsigned int skb_len; | ||
2100 | struct sk_buff *skb; | ||
2101 | int *count_ptr; | ||
2102 | |||
2103 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | ||
2104 | priv = netdev_priv(net_dev); | ||
2105 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | ||
2106 | if (!dpaa_bp) | ||
2107 | return qman_cb_dqrr_consume; | ||
2108 | |||
2109 | /* Trace the Rx fd */ | ||
2110 | trace_dpaa_rx_fd(net_dev, fq, &dq->fd); | ||
2111 | |||
2112 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | ||
2113 | percpu_stats = &percpu_priv->stats; | ||
2114 | |||
2115 | if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) | ||
2116 | return qman_cb_dqrr_stop; | ||
2117 | |||
2118 | /* Make sure we didn't run out of buffers */ | ||
2119 | if (unlikely(dpaa_eth_refill_bpools(priv))) { | ||
2120 | /* Unable to refill the buffer pool due to insufficient | ||
2121 | * system memory. Just release the frame back into the pool, | ||
2122 | * otherwise we'll soon end up with an empty buffer pool. | ||
2123 | */ | ||
2124 | dpaa_fd_release(net_dev, &dq->fd); | ||
2125 | return qman_cb_dqrr_consume; | ||
2126 | } | ||
2127 | |||
2128 | if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { | ||
2129 | if (net_ratelimit()) | ||
2130 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | ||
2131 | fd_status & FM_FD_STAT_RX_ERRORS); | ||
2132 | |||
2133 | percpu_stats->rx_errors++; | ||
2134 | dpaa_fd_release(net_dev, fd); | ||
2135 | return qman_cb_dqrr_consume; | ||
2136 | } | ||
2137 | |||
2138 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | ||
2139 | if (!dpaa_bp) | ||
2140 | return qman_cb_dqrr_consume; | ||
2141 | |||
2142 | dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); | ||
2143 | |||
2144 | /* prefetch the first 64 bytes of the frame or the SGT start */ | ||
2145 | prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd)); | ||
2146 | |||
2147 | fd_format = qm_fd_get_format(fd); | ||
2148 | /* The only FD types that we may receive are contig and S/G */ | ||
2149 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); | ||
2150 | |||
2151 | /* Account for either the contig buffer or the SGT buffer (depending on | ||
2152 | * which case we were in) having been removed from the pool. | ||
2153 | */ | ||
2154 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | ||
2155 | (*count_ptr)--; | ||
2156 | |||
2157 | if (likely(fd_format == qm_fd_contig)) | ||
2158 | skb = contig_fd_to_skb(priv, fd); | ||
2159 | else | ||
2160 | skb = sg_fd_to_skb(priv, fd); | ||
2161 | if (!skb) | ||
2162 | return qman_cb_dqrr_consume; | ||
2163 | |||
2164 | skb->protocol = eth_type_trans(skb, net_dev); | ||
2165 | |||
2166 | skb_len = skb->len; | ||
2167 | |||
2168 | if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) | ||
2169 | return qman_cb_dqrr_consume; | ||
2170 | |||
2171 | percpu_stats->rx_packets++; | ||
2172 | percpu_stats->rx_bytes += skb_len; | ||
2173 | |||
2174 | return qman_cb_dqrr_consume; | ||
2175 | } | ||
2176 | |||
2177 | static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, | ||
2178 | struct qman_fq *fq, | ||
2179 | const struct qm_dqrr_entry *dq) | ||
2180 | { | ||
2181 | struct dpaa_percpu_priv *percpu_priv; | ||
2182 | struct net_device *net_dev; | ||
2183 | struct dpaa_priv *priv; | ||
2184 | |||
2185 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | ||
2186 | priv = netdev_priv(net_dev); | ||
2187 | |||
2188 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | ||
2189 | |||
2190 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | ||
2191 | return qman_cb_dqrr_stop; | ||
2192 | |||
2193 | dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | ||
2194 | |||
2195 | return qman_cb_dqrr_consume; | ||
2196 | } | ||
2197 | |||
2198 | static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, | ||
2199 | struct qman_fq *fq, | ||
2200 | const struct qm_dqrr_entry *dq) | ||
2201 | { | ||
2202 | struct dpaa_percpu_priv *percpu_priv; | ||
2203 | struct net_device *net_dev; | ||
2204 | struct dpaa_priv *priv; | ||
2205 | |||
2206 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | ||
2207 | priv = netdev_priv(net_dev); | ||
2208 | |||
2209 | /* Trace the fd */ | ||
2210 | trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); | ||
2211 | |||
2212 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | ||
2213 | |||
2214 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | ||
2215 | return qman_cb_dqrr_stop; | ||
2216 | |||
2217 | dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | ||
2218 | |||
2219 | return qman_cb_dqrr_consume; | ||
2220 | } | ||
2221 | |||
2222 | static void egress_ern(struct qman_portal *portal, | ||
2223 | struct qman_fq *fq, | ||
2224 | const union qm_mr_entry *msg) | ||
2225 | { | ||
2226 | const struct qm_fd *fd = &msg->ern.fd; | ||
2227 | struct dpaa_percpu_priv *percpu_priv; | ||
2228 | const struct dpaa_priv *priv; | ||
2229 | struct net_device *net_dev; | ||
2230 | struct sk_buff *skb; | ||
2231 | |||
2232 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | ||
2233 | priv = netdev_priv(net_dev); | ||
2234 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | ||
2235 | |||
2236 | percpu_priv->stats.tx_dropped++; | ||
2237 | percpu_priv->stats.tx_fifo_errors++; | ||
2238 | count_ern(percpu_priv, msg); | ||
2239 | |||
2240 | skb = dpaa_cleanup_tx_fd(priv, fd); | ||
2241 | dev_kfree_skb_any(skb); | ||
2242 | } | ||
2243 | |||
2244 | static const struct dpaa_fq_cbs dpaa_fq_cbs = { | ||
2245 | .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, | ||
2246 | .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, | ||
2247 | .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, | ||
2248 | .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, | ||
2249 | .egress_ern = { .cb = { .ern = egress_ern } } | ||
2250 | }; | ||
2251 | |||
2252 | static void dpaa_eth_napi_enable(struct dpaa_priv *priv) | ||
2253 | { | ||
2254 | struct dpaa_percpu_priv *percpu_priv; | ||
2255 | int i; | ||
2256 | |||
2257 | for_each_possible_cpu(i) { | ||
2258 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | ||
2259 | |||
2260 | percpu_priv->np.down = 0; | ||
2261 | napi_enable(&percpu_priv->np.napi); | ||
2262 | } | ||
2263 | } | ||
2264 | |||
2265 | static void dpaa_eth_napi_disable(struct dpaa_priv *priv) | ||
2266 | { | ||
2267 | struct dpaa_percpu_priv *percpu_priv; | ||
2268 | int i; | ||
2269 | |||
2270 | for_each_possible_cpu(i) { | ||
2271 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | ||
2272 | |||
2273 | percpu_priv->np.down = 1; | ||
2274 | napi_disable(&percpu_priv->np.napi); | ||
2275 | } | ||
2276 | } | ||
2277 | |||
2278 | static int dpaa_open(struct net_device *net_dev) | ||
2279 | { | ||
2280 | struct mac_device *mac_dev; | ||
2281 | struct dpaa_priv *priv; | ||
2282 | int err, i; | ||
2283 | |||
2284 | priv = netdev_priv(net_dev); | ||
2285 | mac_dev = priv->mac_dev; | ||
2286 | dpaa_eth_napi_enable(priv); | ||
2287 | |||
2288 | net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); | ||
2289 | if (!net_dev->phydev) { | ||
2290 | netif_err(priv, ifup, net_dev, "init_phy() failed\n"); | ||
2291 | return -ENODEV; | ||
2292 | } | ||
2293 | |||
2294 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | ||
2295 | err = fman_port_enable(mac_dev->port[i]); | ||
2296 | if (err) | ||
2297 | goto mac_start_failed; | ||
2298 | } | ||
2299 | |||
2300 | err = priv->mac_dev->start(mac_dev); | ||
2301 | if (err < 0) { | ||
2302 | netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); | ||
2303 | goto mac_start_failed; | ||
2304 | } | ||
2305 | |||
2306 | netif_tx_start_all_queues(net_dev); | ||
2307 | |||
2308 | return 0; | ||
2309 | |||
2310 | mac_start_failed: | ||
2311 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) | ||
2312 | fman_port_disable(mac_dev->port[i]); | ||
2313 | |||
2314 | dpaa_eth_napi_disable(priv); | ||
2315 | |||
2316 | return err; | ||
2317 | } | ||
2318 | |||
2319 | static int dpaa_eth_stop(struct net_device *net_dev) | ||
2320 | { | ||
2321 | struct dpaa_priv *priv; | ||
2322 | int err; | ||
2323 | |||
2324 | err = dpaa_stop(net_dev); | ||
2325 | |||
2326 | priv = netdev_priv(net_dev); | ||
2327 | dpaa_eth_napi_disable(priv); | ||
2328 | |||
2329 | return err; | ||
2330 | } | ||
2331 | |||
2332 | static const struct net_device_ops dpaa_ops = { | ||
2333 | .ndo_open = dpaa_open, | ||
2334 | .ndo_start_xmit = dpaa_start_xmit, | ||
2335 | .ndo_stop = dpaa_eth_stop, | ||
2336 | .ndo_tx_timeout = dpaa_tx_timeout, | ||
2337 | .ndo_get_stats64 = dpaa_get_stats64, | ||
2338 | .ndo_set_mac_address = dpaa_set_mac_address, | ||
2339 | .ndo_validate_addr = eth_validate_addr, | ||
2340 | .ndo_set_rx_mode = dpaa_set_rx_mode, | ||
2341 | }; | ||
2342 | |||
2343 | static int dpaa_napi_add(struct net_device *net_dev) | ||
2344 | { | ||
2345 | struct dpaa_priv *priv = netdev_priv(net_dev); | ||
2346 | struct dpaa_percpu_priv *percpu_priv; | ||
2347 | int cpu; | ||
2348 | |||
2349 | for_each_possible_cpu(cpu) { | ||
2350 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | ||
2351 | |||
2352 | netif_napi_add(net_dev, &percpu_priv->np.napi, | ||
2353 | dpaa_eth_poll, NAPI_POLL_WEIGHT); | ||
2354 | } | ||
2355 | |||
2356 | return 0; | ||
2357 | } | ||
2358 | |||
2359 | static void dpaa_napi_del(struct net_device *net_dev) | ||
2360 | { | ||
2361 | struct dpaa_priv *priv = netdev_priv(net_dev); | ||
2362 | struct dpaa_percpu_priv *percpu_priv; | ||
2363 | int cpu; | ||
2364 | |||
2365 | for_each_possible_cpu(cpu) { | ||
2366 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | ||
2367 | |||
2368 | netif_napi_del(&percpu_priv->np.napi); | ||
2369 | } | ||
2370 | } | ||
2371 | |||
2372 | static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, | ||
2373 | struct bm_buffer *bmb) | ||
2374 | { | ||
2375 | dma_addr_t addr = bm_buf_addr(bmb); | ||
2376 | |||
2377 | dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); | ||
2378 | |||
2379 | skb_free_frag(phys_to_virt(addr)); | ||
2380 | } | ||
2381 | |||
2382 | /* Alloc the dpaa_bp struct and configure default values */ | ||
2383 | static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) | ||
2384 | { | ||
2385 | struct dpaa_bp *dpaa_bp; | ||
2386 | |||
2387 | dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); | ||
2388 | if (!dpaa_bp) | ||
2389 | return ERR_PTR(-ENOMEM); | ||
2390 | |||
2391 | dpaa_bp->bpid = FSL_DPAA_BPID_INV; | ||
2392 | dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); | ||
2393 | dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; | ||
2394 | |||
2395 | dpaa_bp->seed_cb = dpaa_bp_seed; | ||
2396 | dpaa_bp->free_buf_cb = dpaa_bp_free_pf; | ||
2397 | |||
2398 | return dpaa_bp; | ||
2399 | } | ||
2400 | |||
2401 | /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. | ||
2402 | * We won't be sending congestion notifications to FMan; for now, we just use | ||
2403 | * this CGR to generate enqueue rejections to FMan in order to drop the frames | ||
2404 | * before they reach our ingress queues and eat up memory. | ||
2405 | */ | ||
2406 | static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) | ||
2407 | { | ||
2408 | struct qm_mcc_initcgr initcgr; | ||
2409 | u32 cs_th; | ||
2410 | int err; | ||
2411 | |||
2412 | err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); | ||
2413 | if (err < 0) { | ||
2414 | if (netif_msg_drv(priv)) | ||
2415 | pr_err("Error %d allocating CGR ID\n", err); | ||
2416 | goto out_error; | ||
2417 | } | ||
2418 | |||
2419 | /* Enable CS TD, but disable Congestion State Change Notifications. */ | ||
2420 | initcgr.we_mask = QM_CGR_WE_CS_THRES; | ||
2421 | initcgr.cgr.cscn_en = QM_CGR_EN; | ||
2422 | cs_th = DPAA_INGRESS_CS_THRESHOLD; | ||
2423 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | ||
2424 | |||
2425 | initcgr.we_mask |= QM_CGR_WE_CSTD_EN; | ||
2426 | initcgr.cgr.cstd_en = QM_CGR_EN; | ||
2427 | |||
2428 | /* This CGR will be associated with the SWP affined to the current CPU. | ||
2429 | * However, we'll place all our ingress FQs in it. | ||
2430 | */ | ||
2431 | err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, | ||
2432 | &initcgr); | ||
2433 | if (err < 0) { | ||
2434 | if (netif_msg_drv(priv)) | ||
2435 | pr_err("Error %d creating ingress CGR with ID %d\n", | ||
2436 | err, priv->ingress_cgr.cgrid); | ||
2437 | qman_release_cgrid(priv->ingress_cgr.cgrid); | ||
2438 | goto out_error; | ||
2439 | } | ||
2440 | if (netif_msg_drv(priv)) | ||
2441 | pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", | ||
2442 | priv->ingress_cgr.cgrid, priv->mac_dev->addr); | ||
2443 | |||
2444 | priv->use_ingress_cgr = true; | ||
2445 | |||
2446 | out_error: | ||
2447 | return err; | ||
2448 | } | ||
2449 | |||
2450 | static const struct of_device_id dpaa_match[]; | ||
2451 | |||
2452 | static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) | ||
2453 | { | ||
2454 | u16 headroom; | ||
2455 | |||
2456 | /* The frame headroom must accommodate: | ||
2457 | * - the driver private data area | ||
2458 | * - parse results, hash results, timestamp if selected | ||
2459 | * If either hash results or time stamp are selected, both will | ||
2460 | * be copied to/from the frame headroom, as TS is located between PR and | ||
2461 | * HR in the IC and IC copy size has a granularity of 16bytes | ||
2462 | * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) | ||
2463 | * | ||
2464 | * Also make sure the headroom is a multiple of data_align bytes | ||
2465 | */ | ||
2466 | headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + | ||
2467 | DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); | ||
2468 | |||
2469 | return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, | ||
2470 | DPAA_FD_DATA_ALIGNMENT) : | ||
2471 | headroom; | ||
2472 | } | ||
2473 | |||
2474 | static int dpaa_eth_probe(struct platform_device *pdev) | ||
2475 | { | ||
2476 | struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; | ||
2477 | struct dpaa_percpu_priv *percpu_priv; | ||
2478 | struct net_device *net_dev = NULL; | ||
2479 | struct dpaa_fq *dpaa_fq, *tmp; | ||
2480 | struct dpaa_priv *priv = NULL; | ||
2481 | struct fm_port_fqs port_fqs; | ||
2482 | struct mac_device *mac_dev; | ||
2483 | int err = 0, i, channel; | ||
2484 | struct device *dev; | ||
2485 | |||
2486 | dev = &pdev->dev; | ||
2487 | |||
2488 | /* Allocate this early, so we can store relevant information in | ||
2489 | * the private area | ||
2490 | */ | ||
2491 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); | ||
2492 | if (!net_dev) { | ||
2493 | dev_err(dev, "alloc_etherdev_mq() failed\n"); | ||
2494 | goto alloc_etherdev_mq_failed; | ||
2495 | } | ||
2496 | |||
2497 | /* Do this here, so we can be verbose early */ | ||
2498 | SET_NETDEV_DEV(net_dev, dev); | ||
2499 | dev_set_drvdata(dev, net_dev); | ||
2500 | |||
2501 | priv = netdev_priv(net_dev); | ||
2502 | priv->net_dev = net_dev; | ||
2503 | |||
2504 | priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); | ||
2505 | |||
2506 | mac_dev = dpaa_mac_dev_get(pdev); | ||
2507 | if (IS_ERR(mac_dev)) { | ||
2508 | dev_err(dev, "dpaa_mac_dev_get() failed\n"); | ||
2509 | err = PTR_ERR(mac_dev); | ||
2510 | goto mac_probe_failed; | ||
2511 | } | ||
2512 | |||
2513 | /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, | ||
2514 | * we choose conservatively and let the user explicitly set a higher | ||
2515 | * MTU via ifconfig. Otherwise, the user may end up with different MTUs | ||
2516 | * in the same LAN. | ||
2517 | * If on the other hand fsl_fm_max_frm has been chosen below 1500, | ||
2518 | * start with the maximum allowed. | ||
2519 | */ | ||
2520 | net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); | ||
2521 | |||
2522 | netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", | ||
2523 | net_dev->mtu); | ||
2524 | |||
2525 | priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ | ||
2526 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ | ||
2527 | |||
2528 | /* device used for DMA mapping */ | ||
2529 | arch_setup_dma_ops(dev, 0, 0, NULL, false); | ||
2530 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); | ||
2531 | if (err) { | ||
2532 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); | ||
2533 | goto dev_mask_failed; | ||
2534 | } | ||
2535 | |||
2536 | /* bp init */ | ||
2537 | for (i = 0; i < DPAA_BPS_NUM; i++) { | ||
2538 | int err; | ||
2539 | |||
2540 | dpaa_bps[i] = dpaa_bp_alloc(dev); | ||
2541 | if (IS_ERR(dpaa_bps[i])) | ||
2542 | return PTR_ERR(dpaa_bps[i]); | ||
2543 | /* the raw size of the buffers used for reception */ | ||
2544 | dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); | ||
2545 | /* avoid runtime computations by keeping the usable size here */ | ||
2546 | dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); | ||
2547 | dpaa_bps[i]->dev = dev; | ||
2548 | |||
2549 | err = dpaa_bp_alloc_pool(dpaa_bps[i]); | ||
2550 | if (err < 0) { | ||
2551 | dpaa_bps_free(priv); | ||
2552 | priv->dpaa_bps[i] = NULL; | ||
2553 | goto bp_create_failed; | ||
2554 | } | ||
2555 | priv->dpaa_bps[i] = dpaa_bps[i]; | ||
2556 | } | ||
2557 | |||
2558 | INIT_LIST_HEAD(&priv->dpaa_fq_list); | ||
2559 | |||
2560 | memset(&port_fqs, 0, sizeof(port_fqs)); | ||
2561 | |||
2562 | err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); | ||
2563 | if (err < 0) { | ||
2564 | dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); | ||
2565 | goto fq_probe_failed; | ||
2566 | } | ||
2567 | |||
2568 | priv->mac_dev = mac_dev; | ||
2569 | |||
2570 | channel = dpaa_get_channel(); | ||
2571 | if (channel < 0) { | ||
2572 | dev_err(dev, "dpaa_get_channel() failed\n"); | ||
2573 | err = channel; | ||
2574 | goto get_channel_failed; | ||
2575 | } | ||
2576 | |||
2577 | priv->channel = (u16)channel; | ||
2578 | |||
2579 | /* Start a thread that will walk the CPUs with affine portals | ||
2580 | * and add this pool channel to each's dequeue mask. | ||
2581 | */ | ||
2582 | dpaa_eth_add_channel(priv->channel); | ||
2583 | |||
2584 | dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); | ||
2585 | |||
2586 | /* Create a congestion group for this netdev, with | ||
2587 | * dynamically-allocated CGR ID. | ||
2588 | * Must be executed after probing the MAC, but before | ||
2589 | * assigning the egress FQs to the CGRs. | ||
2590 | */ | ||
2591 | err = dpaa_eth_cgr_init(priv); | ||
2592 | if (err < 0) { | ||
2593 | dev_err(dev, "Error initializing CGR\n"); | ||
2594 | goto tx_cgr_init_failed; | ||
2595 | } | ||
2596 | |||
2597 | err = dpaa_ingress_cgr_init(priv); | ||
2598 | if (err < 0) { | ||
2599 | dev_err(dev, "Error initializing ingress CGR\n"); | ||
2600 | goto rx_cgr_init_failed; | ||
2601 | } | ||
2602 | |||
2603 | /* Add the FQs to the interface, and make them active */ | ||
2604 | list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { | ||
2605 | err = dpaa_fq_init(dpaa_fq, false); | ||
2606 | if (err < 0) | ||
2607 | goto fq_alloc_failed; | ||
2608 | } | ||
2609 | |||
2610 | priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); | ||
2611 | priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); | ||
2612 | |||
2613 | /* All real interfaces need their ports initialized */ | ||
2614 | dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, | ||
2615 | &priv->buf_layout[0], dev); | ||
2616 | |||
2617 | priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); | ||
2618 | if (!priv->percpu_priv) { | ||
2619 | dev_err(dev, "devm_alloc_percpu() failed\n"); | ||
2620 | err = -ENOMEM; | ||
2621 | goto alloc_percpu_failed; | ||
2622 | } | ||
2623 | for_each_possible_cpu(i) { | ||
2624 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | ||
2625 | memset(percpu_priv, 0, sizeof(*percpu_priv)); | ||
2626 | } | ||
2627 | |||
2628 | /* Initialize NAPI */ | ||
2629 | err = dpaa_napi_add(net_dev); | ||
2630 | if (err < 0) | ||
2631 | goto napi_add_failed; | ||
2632 | |||
2633 | err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); | ||
2634 | if (err < 0) | ||
2635 | goto netdev_init_failed; | ||
2636 | |||
2637 | dpaa_eth_sysfs_init(&net_dev->dev); | ||
2638 | |||
2639 | netif_info(priv, probe, net_dev, "Probed interface %s\n", | ||
2640 | net_dev->name); | ||
2641 | |||
2642 | return 0; | ||
2643 | |||
2644 | netdev_init_failed: | ||
2645 | napi_add_failed: | ||
2646 | dpaa_napi_del(net_dev); | ||
2647 | alloc_percpu_failed: | ||
2648 | dpaa_fq_free(dev, &priv->dpaa_fq_list); | ||
2649 | fq_alloc_failed: | ||
2650 | qman_delete_cgr_safe(&priv->ingress_cgr); | ||
2651 | qman_release_cgrid(priv->ingress_cgr.cgrid); | ||
2652 | rx_cgr_init_failed: | ||
2653 | qman_delete_cgr_safe(&priv->cgr_data.cgr); | ||
2654 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | ||
2655 | tx_cgr_init_failed: | ||
2656 | get_channel_failed: | ||
2657 | dpaa_bps_free(priv); | ||
2658 | bp_create_failed: | ||
2659 | fq_probe_failed: | ||
2660 | dev_mask_failed: | ||
2661 | mac_probe_failed: | ||
2662 | dev_set_drvdata(dev, NULL); | ||
2663 | free_netdev(net_dev); | ||
2664 | alloc_etherdev_mq_failed: | ||
2665 | for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) { | ||
2666 | if (atomic_read(&dpaa_bps[i]->refs) == 0) | ||
2667 | devm_kfree(dev, dpaa_bps[i]); | ||
2668 | } | ||
2669 | return err; | ||
2670 | } | ||
2671 | |||
2672 | static int dpaa_remove(struct platform_device *pdev) | ||
2673 | { | ||
2674 | struct net_device *net_dev; | ||
2675 | struct dpaa_priv *priv; | ||
2676 | struct device *dev; | ||
2677 | int err; | ||
2678 | |||
2679 | dev = &pdev->dev; | ||
2680 | net_dev = dev_get_drvdata(dev); | ||
2681 | |||
2682 | priv = netdev_priv(net_dev); | ||
2683 | |||
2684 | dpaa_eth_sysfs_remove(dev); | ||
2685 | |||
2686 | dev_set_drvdata(dev, NULL); | ||
2687 | unregister_netdev(net_dev); | ||
2688 | |||
2689 | err = dpaa_fq_free(dev, &priv->dpaa_fq_list); | ||
2690 | |||
2691 | qman_delete_cgr_safe(&priv->ingress_cgr); | ||
2692 | qman_release_cgrid(priv->ingress_cgr.cgrid); | ||
2693 | qman_delete_cgr_safe(&priv->cgr_data.cgr); | ||
2694 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | ||
2695 | |||
2696 | dpaa_napi_del(net_dev); | ||
2697 | |||
2698 | dpaa_bps_free(priv); | ||
2699 | |||
2700 | free_netdev(net_dev); | ||
2701 | |||
2702 | return err; | ||
2703 | } | ||
2704 | |||
2705 | static struct platform_device_id dpaa_devtype[] = { | ||
2706 | { | ||
2707 | .name = "dpaa-ethernet", | ||
2708 | .driver_data = 0, | ||
2709 | }, { | ||
2710 | } | ||
2711 | }; | ||
2712 | MODULE_DEVICE_TABLE(platform, dpaa_devtype); | ||
2713 | |||
2714 | static struct platform_driver dpaa_driver = { | ||
2715 | .driver = { | ||
2716 | .name = KBUILD_MODNAME, | ||
2717 | }, | ||
2718 | .id_table = dpaa_devtype, | ||
2719 | .probe = dpaa_eth_probe, | ||
2720 | .remove = dpaa_remove | ||
2721 | }; | ||
2722 | |||
2723 | static int __init dpaa_load(void) | ||
2724 | { | ||
2725 | int err; | ||
2726 | |||
2727 | pr_debug("FSL DPAA Ethernet driver\n"); | ||
2728 | |||
2729 | /* initialize dpaa_eth mirror values */ | ||
2730 | dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); | ||
2731 | dpaa_max_frm = fman_get_max_frm(); | ||
2732 | |||
2733 | err = platform_driver_register(&dpaa_driver); | ||
2734 | if (err < 0) | ||
2735 | pr_err("Error, platform_driver_register() = %d\n", err); | ||
2736 | |||
2737 | return err; | ||
2738 | } | ||
2739 | module_init(dpaa_load); | ||
2740 | |||
2741 | static void __exit dpaa_unload(void) | ||
2742 | { | ||
2743 | platform_driver_unregister(&dpaa_driver); | ||
2744 | |||
2745 | /* Only one channel is used and needs to be released after all | ||
2746 | * interfaces are removed | ||
2747 | */ | ||
2748 | dpaa_release_channel(); | ||
2749 | } | ||
2750 | module_exit(dpaa_unload); | ||
2751 | |||
2752 | MODULE_LICENSE("Dual BSD/GPL"); | ||
2753 | MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h new file mode 100644 index 000000000000..1f9aebf3f3c5 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | |||
@@ -0,0 +1,185 @@ | |||
1 | /* Copyright 2008 - 2016 Freescale Semiconductor Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
15 | * GNU General Public License ("GPL") as published by the Free Software | ||
16 | * Foundation, either version 2 of that License or (at your option) any | ||
17 | * later version. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | #ifndef __DPAA_H | ||
32 | #define __DPAA_H | ||
33 | |||
34 | #include <linux/netdevice.h> | ||
35 | #include <soc/fsl/qman.h> | ||
36 | #include <soc/fsl/bman.h> | ||
37 | |||
38 | #include "fman.h" | ||
39 | #include "mac.h" | ||
40 | #include "dpaa_eth_trace.h" | ||
41 | |||
42 | #define DPAA_ETH_TXQ_NUM NR_CPUS | ||
43 | |||
44 | #define DPAA_BPS_NUM 3 /* number of bpools per interface */ | ||
45 | |||
46 | /* More detailed FQ types - used for fine-grained WQ assignments */ | ||
47 | enum dpaa_fq_type { | ||
48 | FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */ | ||
49 | FQ_TYPE_RX_ERROR, /* Rx Error FQs */ | ||
50 | FQ_TYPE_TX, /* "Real" Tx FQs */ | ||
51 | FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ | ||
52 | FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ | ||
53 | FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */ | ||
54 | }; | ||
55 | |||
56 | struct dpaa_fq { | ||
57 | struct qman_fq fq_base; | ||
58 | struct list_head list; | ||
59 | struct net_device *net_dev; | ||
60 | bool init; | ||
61 | u32 fqid; | ||
62 | u32 flags; | ||
63 | u16 channel; | ||
64 | u8 wq; | ||
65 | enum dpaa_fq_type fq_type; | ||
66 | }; | ||
67 | |||
68 | struct dpaa_fq_cbs { | ||
69 | struct qman_fq rx_defq; | ||
70 | struct qman_fq tx_defq; | ||
71 | struct qman_fq rx_errq; | ||
72 | struct qman_fq tx_errq; | ||
73 | struct qman_fq egress_ern; | ||
74 | }; | ||
75 | |||
76 | struct dpaa_bp { | ||
77 | /* device used in the DMA mapping operations */ | ||
78 | struct device *dev; | ||
79 | /* current number of buffers in the buffer pool alloted to each CPU */ | ||
80 | int __percpu *percpu_count; | ||
81 | /* all buffers allocated for this pool have this raw size */ | ||
82 | size_t raw_size; | ||
83 | /* all buffers in this pool have this same usable size */ | ||
84 | size_t size; | ||
85 | /* the buffer pools are initialized with config_count buffers for each | ||
86 | * CPU; at runtime the number of buffers per CPU is constantly brought | ||
87 | * back to this level | ||
88 | */ | ||
89 | u16 config_count; | ||
90 | u8 bpid; | ||
91 | struct bman_pool *pool; | ||
92 | /* bpool can be seeded before use by this cb */ | ||
93 | int (*seed_cb)(struct dpaa_bp *); | ||
94 | /* bpool can be emptied before freeing by this cb */ | ||
95 | void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *); | ||
96 | atomic_t refs; | ||
97 | }; | ||
98 | |||
99 | struct dpaa_rx_errors { | ||
100 | u64 dme; /* DMA Error */ | ||
101 | u64 fpe; /* Frame Physical Error */ | ||
102 | u64 fse; /* Frame Size Error */ | ||
103 | u64 phe; /* Header Error */ | ||
104 | }; | ||
105 | |||
106 | /* Counters for QMan ERN frames - one counter per rejection code */ | ||
107 | struct dpaa_ern_cnt { | ||
108 | u64 cg_tdrop; /* Congestion group taildrop */ | ||
109 | u64 wred; /* WRED congestion */ | ||
110 | u64 err_cond; /* Error condition */ | ||
111 | u64 early_window; /* Order restoration, frame too early */ | ||
112 | u64 late_window; /* Order restoration, frame too late */ | ||
113 | u64 fq_tdrop; /* FQ taildrop */ | ||
114 | u64 fq_retired; /* FQ is retired */ | ||
115 | u64 orp_zero; /* ORP disabled */ | ||
116 | }; | ||
117 | |||
118 | struct dpaa_napi_portal { | ||
119 | struct napi_struct napi; | ||
120 | struct qman_portal *p; | ||
121 | bool down; | ||
122 | }; | ||
123 | |||
124 | struct dpaa_percpu_priv { | ||
125 | struct net_device *net_dev; | ||
126 | struct dpaa_napi_portal np; | ||
127 | u64 in_interrupt; | ||
128 | u64 tx_confirm; | ||
129 | /* fragmented (non-linear) skbuffs received from the stack */ | ||
130 | u64 tx_frag_skbuffs; | ||
131 | struct rtnl_link_stats64 stats; | ||
132 | struct dpaa_rx_errors rx_errors; | ||
133 | struct dpaa_ern_cnt ern_cnt; | ||
134 | }; | ||
135 | |||
136 | struct dpaa_buffer_layout { | ||
137 | u16 priv_data_size; | ||
138 | }; | ||
139 | |||
140 | struct dpaa_priv { | ||
141 | struct dpaa_percpu_priv __percpu *percpu_priv; | ||
142 | struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM]; | ||
143 | /* Store here the needed Tx headroom for convenience and speed | ||
144 | * (even though it can be computed based on the fields of buf_layout) | ||
145 | */ | ||
146 | u16 tx_headroom; | ||
147 | struct net_device *net_dev; | ||
148 | struct mac_device *mac_dev; | ||
149 | struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; | ||
150 | struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; | ||
151 | |||
152 | u16 channel; | ||
153 | struct list_head dpaa_fq_list; | ||
154 | |||
155 | u32 msg_enable; /* net_device message level */ | ||
156 | |||
157 | struct { | ||
158 | /* All egress queues to a given net device belong to one | ||
159 | * (and the same) congestion group. | ||
160 | */ | ||
161 | struct qman_cgr cgr; | ||
162 | /* If congested, when it began. Used for performance stats. */ | ||
163 | u32 congestion_start_jiffies; | ||
164 | /* Number of jiffies the Tx port was congested. */ | ||
165 | u32 congested_jiffies; | ||
166 | /* Counter for the number of times the CGR | ||
167 | * entered congestion state | ||
168 | */ | ||
169 | u32 cgr_congested_count; | ||
170 | } cgr_data; | ||
171 | /* Use a per-port CGR for ingress traffic. */ | ||
172 | bool use_ingress_cgr; | ||
173 | struct qman_cgr ingress_cgr; | ||
174 | |||
175 | struct dpaa_buffer_layout buf_layout[2]; | ||
176 | u16 rx_headroom; | ||
177 | }; | ||
178 | |||
179 | /* from dpaa_ethtool.c */ | ||
180 | extern const struct ethtool_ops dpaa_ethtool_ops; | ||
181 | |||
182 | /* from dpaa_eth_sysfs.c */ | ||
183 | void dpaa_eth_sysfs_remove(struct device *dev); | ||
184 | void dpaa_eth_sysfs_init(struct device *dev); | ||
185 | #endif /* __DPAA_H */ | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c new file mode 100644 index 000000000000..ec75d1c6fa89 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c | |||
@@ -0,0 +1,165 @@ | |||
1 | /* Copyright 2008-2016 Freescale Semiconductor Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * | ||
15 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
16 | * GNU General Public License ("GPL") as published by the Free Software | ||
17 | * Foundation, either version 2 of that License or (at your option) any | ||
18 | * later version. | ||
19 | * | ||
20 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
21 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
22 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
23 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
24 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
25 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
27 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
29 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
30 | */ | ||
31 | |||
32 | #include <linux/init.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/of_net.h> | ||
36 | #include "dpaa_eth.h" | ||
37 | #include "mac.h" | ||
38 | |||
39 | static ssize_t dpaa_eth_show_addr(struct device *dev, | ||
40 | struct device_attribute *attr, char *buf) | ||
41 | { | ||
42 | struct dpaa_priv *priv = netdev_priv(to_net_dev(dev)); | ||
43 | struct mac_device *mac_dev = priv->mac_dev; | ||
44 | |||
45 | if (mac_dev) | ||
46 | return sprintf(buf, "%llx", | ||
47 | (unsigned long long)mac_dev->res->start); | ||
48 | else | ||
49 | return sprintf(buf, "none"); | ||
50 | } | ||
51 | |||
52 | static ssize_t dpaa_eth_show_fqids(struct device *dev, | ||
53 | struct device_attribute *attr, char *buf) | ||
54 | { | ||
55 | struct dpaa_priv *priv = netdev_priv(to_net_dev(dev)); | ||
56 | struct dpaa_fq *prev = NULL; | ||
57 | char *prevstr = NULL; | ||
58 | struct dpaa_fq *tmp; | ||
59 | struct dpaa_fq *fq; | ||
60 | u32 first_fqid = 0; | ||
61 | u32 last_fqid = 0; | ||
62 | ssize_t bytes = 0; | ||
63 | char *str; | ||
64 | int i = 0; | ||
65 | |||
66 | list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { | ||
67 | switch (fq->fq_type) { | ||
68 | case FQ_TYPE_RX_DEFAULT: | ||
69 | str = "Rx default"; | ||
70 | break; | ||
71 | case FQ_TYPE_RX_ERROR: | ||
72 | str = "Rx error"; | ||
73 | break; | ||
74 | case FQ_TYPE_TX_CONFIRM: | ||
75 | str = "Tx default confirmation"; | ||
76 | break; | ||
77 | case FQ_TYPE_TX_CONF_MQ: | ||
78 | str = "Tx confirmation (mq)"; | ||
79 | break; | ||
80 | case FQ_TYPE_TX_ERROR: | ||
81 | str = "Tx error"; | ||
82 | break; | ||
83 | case FQ_TYPE_TX: | ||
84 | str = "Tx"; | ||
85 | break; | ||
86 | default: | ||
87 | str = "Unknown"; | ||
88 | } | ||
89 | |||
90 | if (prev && (abs(fq->fqid - prev->fqid) != 1 || | ||
91 | str != prevstr)) { | ||
92 | if (last_fqid == first_fqid) | ||
93 | bytes += sprintf(buf + bytes, | ||
94 | "%s: %d\n", prevstr, prev->fqid); | ||
95 | else | ||
96 | bytes += sprintf(buf + bytes, | ||
97 | "%s: %d - %d\n", prevstr, | ||
98 | first_fqid, last_fqid); | ||
99 | } | ||
100 | |||
101 | if (prev && abs(fq->fqid - prev->fqid) == 1 && | ||
102 | str == prevstr) { | ||
103 | last_fqid = fq->fqid; | ||
104 | } else { | ||
105 | first_fqid = fq->fqid; | ||
106 | last_fqid = fq->fqid; | ||
107 | } | ||
108 | |||
109 | prev = fq; | ||
110 | prevstr = str; | ||
111 | i++; | ||
112 | } | ||
113 | |||
114 | if (prev) { | ||
115 | if (last_fqid == first_fqid) | ||
116 | bytes += sprintf(buf + bytes, "%s: %d\n", prevstr, | ||
117 | prev->fqid); | ||
118 | else | ||
119 | bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr, | ||
120 | first_fqid, last_fqid); | ||
121 | } | ||
122 | |||
123 | return bytes; | ||
124 | } | ||
125 | |||
126 | static ssize_t dpaa_eth_show_bpids(struct device *dev, | ||
127 | struct device_attribute *attr, char *buf) | ||
128 | { | ||
129 | struct dpaa_priv *priv = netdev_priv(to_net_dev(dev)); | ||
130 | ssize_t bytes = 0; | ||
131 | int i = 0; | ||
132 | |||
133 | for (i = 0; i < DPAA_BPS_NUM; i++) | ||
134 | bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n", | ||
135 | priv->dpaa_bps[i]->bpid); | ||
136 | |||
137 | return bytes; | ||
138 | } | ||
139 | |||
140 | static struct device_attribute dpaa_eth_attrs[] = { | ||
141 | __ATTR(device_addr, 0444, dpaa_eth_show_addr, NULL), | ||
142 | __ATTR(fqids, 0444, dpaa_eth_show_fqids, NULL), | ||
143 | __ATTR(bpids, 0444, dpaa_eth_show_bpids, NULL), | ||
144 | }; | ||
145 | |||
146 | void dpaa_eth_sysfs_init(struct device *dev) | ||
147 | { | ||
148 | int i; | ||
149 | |||
150 | for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++) | ||
151 | if (device_create_file(dev, &dpaa_eth_attrs[i])) { | ||
152 | dev_err(dev, "Error creating sysfs file\n"); | ||
153 | while (i > 0) | ||
154 | device_remove_file(dev, &dpaa_eth_attrs[--i]); | ||
155 | return; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | void dpaa_eth_sysfs_remove(struct device *dev) | ||
160 | { | ||
161 | int i; | ||
162 | |||
163 | for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++) | ||
164 | device_remove_file(dev, &dpaa_eth_attrs[i]); | ||
165 | } | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h new file mode 100644 index 000000000000..409c1dc39430 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* Copyright 2013-2015 Freescale Semiconductor Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * | ||
15 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
16 | * GNU General Public License ("GPL") as published by the Free Software | ||
17 | * Foundation, either version 2 of that License or (at your option) any | ||
18 | * later version. | ||
19 | * | ||
20 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
21 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
22 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
23 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
24 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
25 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
27 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
29 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
30 | */ | ||
31 | |||
32 | #undef TRACE_SYSTEM | ||
33 | #define TRACE_SYSTEM dpaa_eth | ||
34 | |||
35 | #if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
36 | #define _DPAA_ETH_TRACE_H | ||
37 | |||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/netdevice.h> | ||
40 | #include "dpaa_eth.h" | ||
41 | #include <linux/tracepoint.h> | ||
42 | |||
43 | #define fd_format_name(format) { qm_fd_##format, #format } | ||
44 | #define fd_format_list \ | ||
45 | fd_format_name(contig), \ | ||
46 | fd_format_name(sg) | ||
47 | |||
48 | /* This is used to declare a class of events. | ||
49 | * individual events of this type will be defined below. | ||
50 | */ | ||
51 | |||
52 | /* Store details about a frame descriptor and the FQ on which it was | ||
53 | * transmitted/received. | ||
54 | */ | ||
55 | DECLARE_EVENT_CLASS(dpaa_eth_fd, | ||
56 | /* Trace function prototype */ | ||
57 | TP_PROTO(struct net_device *netdev, | ||
58 | struct qman_fq *fq, | ||
59 | const struct qm_fd *fd), | ||
60 | |||
61 | /* Repeat argument list here */ | ||
62 | TP_ARGS(netdev, fq, fd), | ||
63 | |||
64 | /* A structure containing the relevant information we want to record. | ||
65 | * Declare name and type for each normal element, name, type and size | ||
66 | * for arrays. Use __string for variable length strings. | ||
67 | */ | ||
68 | TP_STRUCT__entry( | ||
69 | __field(u32, fqid) | ||
70 | __field(u64, fd_addr) | ||
71 | __field(u8, fd_format) | ||
72 | __field(u16, fd_offset) | ||
73 | __field(u32, fd_length) | ||
74 | __field(u32, fd_status) | ||
75 | __string(name, netdev->name) | ||
76 | ), | ||
77 | |||
78 | /* The function that assigns values to the above declared fields */ | ||
79 | TP_fast_assign( | ||
80 | __entry->fqid = fq->fqid; | ||
81 | __entry->fd_addr = qm_fd_addr_get64(fd); | ||
82 | __entry->fd_format = qm_fd_get_format(fd); | ||
83 | __entry->fd_offset = qm_fd_get_offset(fd); | ||
84 | __entry->fd_length = qm_fd_get_length(fd); | ||
85 | __entry->fd_status = fd->status; | ||
86 | __assign_str(name, netdev->name); | ||
87 | ), | ||
88 | |||
89 | /* This is what gets printed when the trace event is triggered */ | ||
90 | TP_printk("[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u, status=0x%08x", | ||
91 | __get_str(name), __entry->fqid, __entry->fd_addr, | ||
92 | __print_symbolic(__entry->fd_format, fd_format_list), | ||
93 | __entry->fd_offset, __entry->fd_length, __entry->fd_status) | ||
94 | ); | ||
95 | |||
96 | /* Now declare events of the above type. Format is: | ||
97 | * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class | ||
98 | */ | ||
99 | |||
100 | /* Tx (egress) fd */ | ||
101 | DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_fd, | ||
102 | |||
103 | TP_PROTO(struct net_device *netdev, | ||
104 | struct qman_fq *fq, | ||
105 | const struct qm_fd *fd), | ||
106 | |||
107 | TP_ARGS(netdev, fq, fd) | ||
108 | ); | ||
109 | |||
110 | /* Rx fd */ | ||
111 | DEFINE_EVENT(dpaa_eth_fd, dpaa_rx_fd, | ||
112 | |||
113 | TP_PROTO(struct net_device *netdev, | ||
114 | struct qman_fq *fq, | ||
115 | const struct qm_fd *fd), | ||
116 | |||
117 | TP_ARGS(netdev, fq, fd) | ||
118 | ); | ||
119 | |||
120 | /* Tx confirmation fd */ | ||
121 | DEFINE_EVENT(dpaa_eth_fd, dpaa_tx_conf_fd, | ||
122 | |||
123 | TP_PROTO(struct net_device *netdev, | ||
124 | struct qman_fq *fq, | ||
125 | const struct qm_fd *fd), | ||
126 | |||
127 | TP_ARGS(netdev, fq, fd) | ||
128 | ); | ||
129 | |||
130 | /* If only one event of a certain type needs to be declared, use TRACE_EVENT(). | ||
131 | * The syntax is the same as for DECLARE_EVENT_CLASS(). | ||
132 | */ | ||
133 | |||
134 | #endif /* _DPAA_ETH_TRACE_H */ | ||
135 | |||
136 | /* This must be outside ifdef _DPAA_ETH_TRACE_H */ | ||
137 | #undef TRACE_INCLUDE_PATH | ||
138 | #define TRACE_INCLUDE_PATH . | ||
139 | #undef TRACE_INCLUDE_FILE | ||
140 | #define TRACE_INCLUDE_FILE dpaa_eth_trace | ||
141 | #include <trace/define_trace.h> | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c new file mode 100644 index 000000000000..27e7044667d1 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | |||
@@ -0,0 +1,417 @@ | |||
1 | /* Copyright 2008-2016 Freescale Semiconductor, Inc. | ||
2 | * | ||
3 | * Redistribution and use in source and binary forms, with or without | ||
4 | * modification, are permitted provided that the following conditions are met: | ||
5 | * * Redistributions of source code must retain the above copyright | ||
6 | * notice, this list of conditions and the following disclaimer. | ||
7 | * * Redistributions in binary form must reproduce the above copyright | ||
8 | * notice, this list of conditions and the following disclaimer in the | ||
9 | * documentation and/or other materials provided with the distribution. | ||
10 | * * Neither the name of Freescale Semiconductor nor the | ||
11 | * names of its contributors may be used to endorse or promote products | ||
12 | * derived from this software without specific prior written permission. | ||
13 | * | ||
14 | * | ||
15 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
16 | * GNU General Public License ("GPL") as published by the Free Software | ||
17 | * Foundation, either version 2 of that License or (at your option) any | ||
18 | * later version. | ||
19 | * | ||
20 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
21 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
22 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
23 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
24 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
25 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
27 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
29 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
30 | */ | ||
31 | |||
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
33 | |||
34 | #include <linux/string.h> | ||
35 | |||
36 | #include "dpaa_eth.h" | ||
37 | #include "mac.h" | ||
38 | |||
39 | static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = { | ||
40 | "interrupts", | ||
41 | "rx packets", | ||
42 | "tx packets", | ||
43 | "tx confirm", | ||
44 | "tx S/G", | ||
45 | "tx error", | ||
46 | "rx error", | ||
47 | }; | ||
48 | |||
49 | static char dpaa_stats_global[][ETH_GSTRING_LEN] = { | ||
50 | /* dpa rx errors */ | ||
51 | "rx dma error", | ||
52 | "rx frame physical error", | ||
53 | "rx frame size error", | ||
54 | "rx header error", | ||
55 | |||
56 | /* demultiplexing errors */ | ||
57 | "qman cg_tdrop", | ||
58 | "qman wred", | ||
59 | "qman error cond", | ||
60 | "qman early window", | ||
61 | "qman late window", | ||
62 | "qman fq tdrop", | ||
63 | "qman fq retired", | ||
64 | "qman orp disabled", | ||
65 | |||
66 | /* congestion related stats */ | ||
67 | "congestion time (ms)", | ||
68 | "entered congestion", | ||
69 | "congested (0/1)" | ||
70 | }; | ||
71 | |||
72 | #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu) | ||
73 | #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global) | ||
74 | |||
75 | static int dpaa_get_settings(struct net_device *net_dev, | ||
76 | struct ethtool_cmd *et_cmd) | ||
77 | { | ||
78 | int err; | ||
79 | |||
80 | if (!net_dev->phydev) { | ||
81 | netdev_dbg(net_dev, "phy device not initialized\n"); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | err = phy_ethtool_gset(net_dev->phydev, et_cmd); | ||
86 | |||
87 | return err; | ||
88 | } | ||
89 | |||
90 | static int dpaa_set_settings(struct net_device *net_dev, | ||
91 | struct ethtool_cmd *et_cmd) | ||
92 | { | ||
93 | int err; | ||
94 | |||
95 | if (!net_dev->phydev) { | ||
96 | netdev_err(net_dev, "phy device not initialized\n"); | ||
97 | return -ENODEV; | ||
98 | } | ||
99 | |||
100 | err = phy_ethtool_sset(net_dev->phydev, et_cmd); | ||
101 | if (err < 0) | ||
102 | netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err); | ||
103 | |||
104 | return err; | ||
105 | } | ||
106 | |||
107 | static void dpaa_get_drvinfo(struct net_device *net_dev, | ||
108 | struct ethtool_drvinfo *drvinfo) | ||
109 | { | ||
110 | int len; | ||
111 | |||
112 | strlcpy(drvinfo->driver, KBUILD_MODNAME, | ||
113 | sizeof(drvinfo->driver)); | ||
114 | len = snprintf(drvinfo->version, sizeof(drvinfo->version), | ||
115 | "%X", 0); | ||
116 | len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), | ||
117 | "%X", 0); | ||
118 | |||
119 | if (len >= sizeof(drvinfo->fw_version)) { | ||
120 | /* Truncated output */ | ||
121 | netdev_notice(net_dev, "snprintf() = %d\n", len); | ||
122 | } | ||
123 | strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), | ||
124 | sizeof(drvinfo->bus_info)); | ||
125 | } | ||
126 | |||
127 | static u32 dpaa_get_msglevel(struct net_device *net_dev) | ||
128 | { | ||
129 | return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable; | ||
130 | } | ||
131 | |||
132 | static void dpaa_set_msglevel(struct net_device *net_dev, | ||
133 | u32 msg_enable) | ||
134 | { | ||
135 | ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable; | ||
136 | } | ||
137 | |||
138 | static int dpaa_nway_reset(struct net_device *net_dev) | ||
139 | { | ||
140 | int err; | ||
141 | |||
142 | if (!net_dev->phydev) { | ||
143 | netdev_err(net_dev, "phy device not initialized\n"); | ||
144 | return -ENODEV; | ||
145 | } | ||
146 | |||
147 | err = 0; | ||
148 | if (net_dev->phydev->autoneg) { | ||
149 | err = phy_start_aneg(net_dev->phydev); | ||
150 | if (err < 0) | ||
151 | netdev_err(net_dev, "phy_start_aneg() = %d\n", | ||
152 | err); | ||
153 | } | ||
154 | |||
155 | return err; | ||
156 | } | ||
157 | |||
158 | static void dpaa_get_pauseparam(struct net_device *net_dev, | ||
159 | struct ethtool_pauseparam *epause) | ||
160 | { | ||
161 | struct mac_device *mac_dev; | ||
162 | struct dpaa_priv *priv; | ||
163 | |||
164 | priv = netdev_priv(net_dev); | ||
165 | mac_dev = priv->mac_dev; | ||
166 | |||
167 | if (!net_dev->phydev) { | ||
168 | netdev_err(net_dev, "phy device not initialized\n"); | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | epause->autoneg = mac_dev->autoneg_pause; | ||
173 | epause->rx_pause = mac_dev->rx_pause_active; | ||
174 | epause->tx_pause = mac_dev->tx_pause_active; | ||
175 | } | ||
176 | |||
177 | static int dpaa_set_pauseparam(struct net_device *net_dev, | ||
178 | struct ethtool_pauseparam *epause) | ||
179 | { | ||
180 | struct mac_device *mac_dev; | ||
181 | struct phy_device *phydev; | ||
182 | bool rx_pause, tx_pause; | ||
183 | struct dpaa_priv *priv; | ||
184 | u32 newadv, oldadv; | ||
185 | int err; | ||
186 | |||
187 | priv = netdev_priv(net_dev); | ||
188 | mac_dev = priv->mac_dev; | ||
189 | |||
190 | phydev = net_dev->phydev; | ||
191 | if (!phydev) { | ||
192 | netdev_err(net_dev, "phy device not initialized\n"); | ||
193 | return -ENODEV; | ||
194 | } | ||
195 | |||
196 | if (!(phydev->supported & SUPPORTED_Pause) || | ||
197 | (!(phydev->supported & SUPPORTED_Asym_Pause) && | ||
198 | (epause->rx_pause != epause->tx_pause))) | ||
199 | return -EINVAL; | ||
200 | |||
201 | /* The MAC should know how to handle PAUSE frame autonegotiation before | ||
202 | * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE | ||
203 | * settings. | ||
204 | */ | ||
205 | mac_dev->autoneg_pause = !!epause->autoneg; | ||
206 | mac_dev->rx_pause_req = !!epause->rx_pause; | ||
207 | mac_dev->tx_pause_req = !!epause->tx_pause; | ||
208 | |||
209 | /* Determine the sym/asym advertised PAUSE capabilities from the desired | ||
210 | * rx/tx pause settings. | ||
211 | */ | ||
212 | newadv = 0; | ||
213 | if (epause->rx_pause) | ||
214 | newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; | ||
215 | if (epause->tx_pause) | ||
216 | newadv |= ADVERTISED_Asym_Pause; | ||
217 | |||
218 | oldadv = phydev->advertising & | ||
219 | (ADVERTISED_Pause | ADVERTISED_Asym_Pause); | ||
220 | |||
221 | /* If there are differences between the old and the new advertised | ||
222 | * values, restart PHY autonegotiation and advertise the new values. | ||
223 | */ | ||
224 | if (oldadv != newadv) { | ||
225 | phydev->advertising &= ~(ADVERTISED_Pause | ||
226 | | ADVERTISED_Asym_Pause); | ||
227 | phydev->advertising |= newadv; | ||
228 | if (phydev->autoneg) { | ||
229 | err = phy_start_aneg(phydev); | ||
230 | if (err < 0) | ||
231 | netdev_err(net_dev, "phy_start_aneg() = %d\n", | ||
232 | err); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); | ||
237 | err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); | ||
238 | if (err < 0) | ||
239 | netdev_err(net_dev, "set_mac_active_pause() = %d\n", err); | ||
240 | |||
241 | return err; | ||
242 | } | ||
243 | |||
244 | static int dpaa_get_sset_count(struct net_device *net_dev, int type) | ||
245 | { | ||
246 | unsigned int total_stats, num_stats; | ||
247 | |||
248 | num_stats = num_online_cpus() + 1; | ||
249 | total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) + | ||
250 | DPAA_STATS_GLOBAL_LEN; | ||
251 | |||
252 | switch (type) { | ||
253 | case ETH_SS_STATS: | ||
254 | return total_stats; | ||
255 | default: | ||
256 | return -EOPNOTSUPP; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus, | ||
261 | int crr_cpu, u64 *bp_count, u64 *data) | ||
262 | { | ||
263 | int num_values = num_cpus + 1; | ||
264 | int crr = 0, j; | ||
265 | |||
266 | /* update current CPU's stats and also add them to the total values */ | ||
267 | data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt; | ||
268 | data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt; | ||
269 | |||
270 | data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets; | ||
271 | data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets; | ||
272 | |||
273 | data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets; | ||
274 | data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets; | ||
275 | |||
276 | data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm; | ||
277 | data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm; | ||
278 | |||
279 | data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs; | ||
280 | data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs; | ||
281 | |||
282 | data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors; | ||
283 | data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors; | ||
284 | |||
285 | data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors; | ||
286 | data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors; | ||
287 | |||
288 | for (j = 0; j < DPAA_BPS_NUM; j++) { | ||
289 | data[crr * num_values + crr_cpu] = bp_count[j]; | ||
290 | data[crr++ * num_values + num_cpus] += bp_count[j]; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | static void dpaa_get_ethtool_stats(struct net_device *net_dev, | ||
295 | struct ethtool_stats *stats, u64 *data) | ||
296 | { | ||
297 | u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num; | ||
298 | struct dpaa_percpu_priv *percpu_priv; | ||
299 | struct dpaa_rx_errors rx_errors; | ||
300 | unsigned int num_cpus, offset; | ||
301 | struct dpaa_ern_cnt ern_cnt; | ||
302 | struct dpaa_bp *dpaa_bp; | ||
303 | struct dpaa_priv *priv; | ||
304 | int total_stats, i, j; | ||
305 | bool cg_status; | ||
306 | |||
307 | total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS); | ||
308 | priv = netdev_priv(net_dev); | ||
309 | num_cpus = num_online_cpus(); | ||
310 | |||
311 | memset(&bp_count, 0, sizeof(bp_count)); | ||
312 | memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors)); | ||
313 | memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt)); | ||
314 | memset(data, 0, total_stats * sizeof(u64)); | ||
315 | |||
316 | for_each_online_cpu(i) { | ||
317 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | ||
318 | for (j = 0; j < DPAA_BPS_NUM; j++) { | ||
319 | dpaa_bp = priv->dpaa_bps[j]; | ||
320 | if (!dpaa_bp->percpu_count) | ||
321 | continue; | ||
322 | bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i)); | ||
323 | } | ||
324 | rx_errors.dme += percpu_priv->rx_errors.dme; | ||
325 | rx_errors.fpe += percpu_priv->rx_errors.fpe; | ||
326 | rx_errors.fse += percpu_priv->rx_errors.fse; | ||
327 | rx_errors.phe += percpu_priv->rx_errors.phe; | ||
328 | |||
329 | ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop; | ||
330 | ern_cnt.wred += percpu_priv->ern_cnt.wred; | ||
331 | ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond; | ||
332 | ern_cnt.early_window += percpu_priv->ern_cnt.early_window; | ||
333 | ern_cnt.late_window += percpu_priv->ern_cnt.late_window; | ||
334 | ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop; | ||
335 | ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; | ||
336 | ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; | ||
337 | |||
338 | copy_stats(percpu_priv, num_cpus, i, bp_count, data); | ||
339 | } | ||
340 | |||
341 | offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM); | ||
342 | memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors)); | ||
343 | |||
344 | offset += sizeof(struct dpaa_rx_errors) / sizeof(u64); | ||
345 | memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt)); | ||
346 | |||
347 | /* gather congestion related counters */ | ||
348 | cg_num = 0; | ||
349 | cg_status = 0; | ||
350 | cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies); | ||
351 | if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) { | ||
352 | cg_num = priv->cgr_data.cgr_congested_count; | ||
353 | |||
354 | /* reset congestion stats (like QMan API does */ | ||
355 | priv->cgr_data.congested_jiffies = 0; | ||
356 | priv->cgr_data.cgr_congested_count = 0; | ||
357 | } | ||
358 | |||
359 | offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64); | ||
360 | data[offset++] = cg_time; | ||
361 | data[offset++] = cg_num; | ||
362 | data[offset++] = cg_status; | ||
363 | } | ||
364 | |||
365 | static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, | ||
366 | u8 *data) | ||
367 | { | ||
368 | unsigned int i, j, num_cpus, size; | ||
369 | char string_cpu[ETH_GSTRING_LEN]; | ||
370 | u8 *strings; | ||
371 | |||
372 | memset(string_cpu, 0, sizeof(string_cpu)); | ||
373 | strings = data; | ||
374 | num_cpus = num_online_cpus(); | ||
375 | size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; | ||
376 | |||
377 | for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) { | ||
378 | for (j = 0; j < num_cpus; j++) { | ||
379 | snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", | ||
380 | dpaa_stats_percpu[i], j); | ||
381 | memcpy(strings, string_cpu, ETH_GSTRING_LEN); | ||
382 | strings += ETH_GSTRING_LEN; | ||
383 | } | ||
384 | snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", | ||
385 | dpaa_stats_percpu[i]); | ||
386 | memcpy(strings, string_cpu, ETH_GSTRING_LEN); | ||
387 | strings += ETH_GSTRING_LEN; | ||
388 | } | ||
389 | for (i = 0; i < DPAA_BPS_NUM; i++) { | ||
390 | for (j = 0; j < num_cpus; j++) { | ||
391 | snprintf(string_cpu, ETH_GSTRING_LEN, | ||
392 | "bpool %c [CPU %d]", 'a' + i, j); | ||
393 | memcpy(strings, string_cpu, ETH_GSTRING_LEN); | ||
394 | strings += ETH_GSTRING_LEN; | ||
395 | } | ||
396 | snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]", | ||
397 | 'a' + i); | ||
398 | memcpy(strings, string_cpu, ETH_GSTRING_LEN); | ||
399 | strings += ETH_GSTRING_LEN; | ||
400 | } | ||
401 | memcpy(strings, dpaa_stats_global, size); | ||
402 | } | ||
403 | |||
404 | const struct ethtool_ops dpaa_ethtool_ops = { | ||
405 | .get_settings = dpaa_get_settings, | ||
406 | .set_settings = dpaa_set_settings, | ||
407 | .get_drvinfo = dpaa_get_drvinfo, | ||
408 | .get_msglevel = dpaa_get_msglevel, | ||
409 | .set_msglevel = dpaa_set_msglevel, | ||
410 | .nway_reset = dpaa_nway_reset, | ||
411 | .get_pauseparam = dpaa_get_pauseparam, | ||
412 | .set_pauseparam = dpaa_set_pauseparam, | ||
413 | .get_link = ethtool_op_get_link, | ||
414 | .get_sset_count = dpaa_get_sset_count, | ||
415 | .get_ethtool_stats = dpaa_get_ethtool_stats, | ||
416 | .get_strings = dpaa_get_strings, | ||
417 | }; | ||
diff --git a/include/linux/device.h b/include/linux/device.h index bc41e87a969b..a00105cf795e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -698,6 +698,25 @@ static inline int devm_add_action_or_reset(struct device *dev, | |||
698 | return ret; | 698 | return ret; |
699 | } | 699 | } |
700 | 700 | ||
701 | /** | ||
702 | * devm_alloc_percpu - Resource-managed alloc_percpu | ||
703 | * @dev: Device to allocate per-cpu memory for | ||
704 | * @type: Type to allocate per-cpu memory for | ||
705 | * | ||
706 | * Managed alloc_percpu. Per-cpu memory allocated with this function is | ||
707 | * automatically freed on driver detach. | ||
708 | * | ||
709 | * RETURNS: | ||
710 | * Pointer to allocated memory on success, NULL on failure. | ||
711 | */ | ||
712 | #define devm_alloc_percpu(dev, type) \ | ||
713 | ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ | ||
714 | __alignof__(type))) | ||
715 | |||
716 | void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, | ||
717 | size_t align); | ||
718 | void devm_free_percpu(struct device *dev, void __percpu *pdata); | ||
719 | |||
701 | struct device_dma_parameters { | 720 | struct device_dma_parameters { |
702 | /* | 721 | /* |
703 | * a low level driver may set these to teach IOMMU code about | 722 | * a low level driver may set these to teach IOMMU code about |