diff options
Diffstat (limited to 'drivers/net/ethernet/renesas/sh_eth.c')
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.c | 1959 |
1 files changed, 1959 insertions, 0 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c new file mode 100644 index 000000000000..ad35c210b839 --- /dev/null +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -0,0 +1,1959 @@ | |||
1 | /* | ||
2 | * SuperH Ethernet device driver | ||
3 | * | ||
4 | * Copyright (C) 2006-2008 Nobuhiro Iwamatsu | ||
5 | * Copyright (C) 2008-2009 Renesas Solutions Corp. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms and conditions of the GNU General Public License, | ||
9 | * version 2, as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * The full GNU General Public License is included in this distribution in | ||
20 | * the file called "COPYING". | ||
21 | */ | ||
22 | |||
23 | #include <linux/init.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/etherdevice.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/mdio-bitbang.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/phy.h> | ||
31 | #include <linux/cache.h> | ||
32 | #include <linux/io.h> | ||
33 | #include <linux/pm_runtime.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | |||
37 | #include "sh_eth.h" | ||
38 | |||
39 | #define SH_ETH_DEF_MSG_ENABLE \ | ||
40 | (NETIF_MSG_LINK | \ | ||
41 | NETIF_MSG_TIMER | \ | ||
42 | NETIF_MSG_RX_ERR| \ | ||
43 | NETIF_MSG_TX_ERR) | ||
44 | |||
45 | /* There is CPU dependent code */ | ||
46 | #if defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
47 | #define SH_ETH_RESET_DEFAULT 1 | ||
48 | static void sh_eth_set_duplex(struct net_device *ndev) | ||
49 | { | ||
50 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
51 | |||
52 | if (mdp->duplex) /* Full */ | ||
53 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); | ||
54 | else /* Half */ | ||
55 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); | ||
56 | } | ||
57 | |||
58 | static void sh_eth_set_rate(struct net_device *ndev) | ||
59 | { | ||
60 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
61 | |||
62 | switch (mdp->speed) { | ||
63 | case 10: /* 10BASE */ | ||
64 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); | ||
65 | break; | ||
66 | case 100:/* 100BASE */ | ||
67 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); | ||
68 | break; | ||
69 | default: | ||
70 | break; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | /* SH7724 */ | ||
75 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
76 | .set_duplex = sh_eth_set_duplex, | ||
77 | .set_rate = sh_eth_set_rate, | ||
78 | |||
79 | .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, | ||
80 | .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, | ||
81 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, | ||
82 | |||
83 | .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, | ||
84 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | | ||
85 | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, | ||
86 | .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, | ||
87 | |||
88 | .apr = 1, | ||
89 | .mpr = 1, | ||
90 | .tpauser = 1, | ||
91 | .hw_swap = 1, | ||
92 | .rpadir = 1, | ||
93 | .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ | ||
94 | }; | ||
95 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) | ||
96 | #define SH_ETH_HAS_BOTH_MODULES 1 | ||
97 | #define SH_ETH_HAS_TSU 1 | ||
98 | static void sh_eth_set_duplex(struct net_device *ndev) | ||
99 | { | ||
100 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
101 | |||
102 | if (mdp->duplex) /* Full */ | ||
103 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); | ||
104 | else /* Half */ | ||
105 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); | ||
106 | } | ||
107 | |||
108 | static void sh_eth_set_rate(struct net_device *ndev) | ||
109 | { | ||
110 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
111 | |||
112 | switch (mdp->speed) { | ||
113 | case 10: /* 10BASE */ | ||
114 | sh_eth_write(ndev, 0, RTRATE); | ||
115 | break; | ||
116 | case 100:/* 100BASE */ | ||
117 | sh_eth_write(ndev, 1, RTRATE); | ||
118 | break; | ||
119 | default: | ||
120 | break; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | /* SH7757 */ | ||
125 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
126 | .set_duplex = sh_eth_set_duplex, | ||
127 | .set_rate = sh_eth_set_rate, | ||
128 | |||
129 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
130 | .rmcr_value = 0x00000001, | ||
131 | |||
132 | .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, | ||
133 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | | ||
134 | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, | ||
135 | .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, | ||
136 | |||
137 | .apr = 1, | ||
138 | .mpr = 1, | ||
139 | .tpauser = 1, | ||
140 | .hw_swap = 1, | ||
141 | .no_ade = 1, | ||
142 | .rpadir = 1, | ||
143 | .rpadir_value = 2 << 16, | ||
144 | }; | ||
145 | |||
146 | #define SH_GIGA_ETH_BASE 0xfee00000 | ||
147 | #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) | ||
148 | #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) | ||
149 | static void sh_eth_chip_reset_giga(struct net_device *ndev) | ||
150 | { | ||
151 | int i; | ||
152 | unsigned long mahr[2], malr[2]; | ||
153 | |||
154 | /* save MAHR and MALR */ | ||
155 | for (i = 0; i < 2; i++) { | ||
156 | malr[i] = readl(GIGA_MALR(i)); | ||
157 | mahr[i] = readl(GIGA_MAHR(i)); | ||
158 | } | ||
159 | |||
160 | /* reset device */ | ||
161 | writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800); | ||
162 | mdelay(1); | ||
163 | |||
164 | /* restore MAHR and MALR */ | ||
165 | for (i = 0; i < 2; i++) { | ||
166 | writel(malr[i], GIGA_MALR(i)); | ||
167 | writel(mahr[i], GIGA_MAHR(i)); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | static int sh_eth_is_gether(struct sh_eth_private *mdp); | ||
172 | static void sh_eth_reset(struct net_device *ndev) | ||
173 | { | ||
174 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
175 | int cnt = 100; | ||
176 | |||
177 | if (sh_eth_is_gether(mdp)) { | ||
178 | sh_eth_write(ndev, 0x03, EDSR); | ||
179 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, | ||
180 | EDMR); | ||
181 | while (cnt > 0) { | ||
182 | if (!(sh_eth_read(ndev, EDMR) & 0x3)) | ||
183 | break; | ||
184 | mdelay(1); | ||
185 | cnt--; | ||
186 | } | ||
187 | if (cnt < 0) | ||
188 | printk(KERN_ERR "Device reset fail\n"); | ||
189 | |||
190 | /* Table Init */ | ||
191 | sh_eth_write(ndev, 0x0, TDLAR); | ||
192 | sh_eth_write(ndev, 0x0, TDFAR); | ||
193 | sh_eth_write(ndev, 0x0, TDFXR); | ||
194 | sh_eth_write(ndev, 0x0, TDFFR); | ||
195 | sh_eth_write(ndev, 0x0, RDLAR); | ||
196 | sh_eth_write(ndev, 0x0, RDFAR); | ||
197 | sh_eth_write(ndev, 0x0, RDFXR); | ||
198 | sh_eth_write(ndev, 0x0, RDFFR); | ||
199 | } else { | ||
200 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, | ||
201 | EDMR); | ||
202 | mdelay(3); | ||
203 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, | ||
204 | EDMR); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static void sh_eth_set_duplex_giga(struct net_device *ndev) | ||
209 | { | ||
210 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
211 | |||
212 | if (mdp->duplex) /* Full */ | ||
213 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); | ||
214 | else /* Half */ | ||
215 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); | ||
216 | } | ||
217 | |||
218 | static void sh_eth_set_rate_giga(struct net_device *ndev) | ||
219 | { | ||
220 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
221 | |||
222 | switch (mdp->speed) { | ||
223 | case 10: /* 10BASE */ | ||
224 | sh_eth_write(ndev, 0x00000000, GECMR); | ||
225 | break; | ||
226 | case 100:/* 100BASE */ | ||
227 | sh_eth_write(ndev, 0x00000010, GECMR); | ||
228 | break; | ||
229 | case 1000: /* 1000BASE */ | ||
230 | sh_eth_write(ndev, 0x00000020, GECMR); | ||
231 | break; | ||
232 | default: | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* SH7757(GETHERC) */ | ||
238 | static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { | ||
239 | .chip_reset = sh_eth_chip_reset_giga, | ||
240 | .set_duplex = sh_eth_set_duplex_giga, | ||
241 | .set_rate = sh_eth_set_rate_giga, | ||
242 | |||
243 | .ecsr_value = ECSR_ICD | ECSR_MPD, | ||
244 | .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, | ||
245 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
246 | |||
247 | .tx_check = EESR_TC1 | EESR_FTC, | ||
248 | .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ | ||
249 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ | ||
250 | EESR_ECI, | ||
251 | .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ | ||
252 | EESR_TFE, | ||
253 | .fdr_value = 0x0000072f, | ||
254 | .rmcr_value = 0x00000001, | ||
255 | |||
256 | .apr = 1, | ||
257 | .mpr = 1, | ||
258 | .tpauser = 1, | ||
259 | .bculr = 1, | ||
260 | .hw_swap = 1, | ||
261 | .rpadir = 1, | ||
262 | .rpadir_value = 2 << 16, | ||
263 | .no_trimd = 1, | ||
264 | .no_ade = 1, | ||
265 | }; | ||
266 | |||
267 | static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) | ||
268 | { | ||
269 | if (sh_eth_is_gether(mdp)) | ||
270 | return &sh_eth_my_cpu_data_giga; | ||
271 | else | ||
272 | return &sh_eth_my_cpu_data; | ||
273 | } | ||
274 | |||
275 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
276 | #define SH_ETH_HAS_TSU 1 | ||
277 | static void sh_eth_chip_reset(struct net_device *ndev) | ||
278 | { | ||
279 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
280 | |||
281 | /* reset device */ | ||
282 | sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); | ||
283 | mdelay(1); | ||
284 | } | ||
285 | |||
286 | static void sh_eth_reset(struct net_device *ndev) | ||
287 | { | ||
288 | int cnt = 100; | ||
289 | |||
290 | sh_eth_write(ndev, EDSR_ENALL, EDSR); | ||
291 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); | ||
292 | while (cnt > 0) { | ||
293 | if (!(sh_eth_read(ndev, EDMR) & 0x3)) | ||
294 | break; | ||
295 | mdelay(1); | ||
296 | cnt--; | ||
297 | } | ||
298 | if (cnt == 0) | ||
299 | printk(KERN_ERR "Device reset fail\n"); | ||
300 | |||
301 | /* Table Init */ | ||
302 | sh_eth_write(ndev, 0x0, TDLAR); | ||
303 | sh_eth_write(ndev, 0x0, TDFAR); | ||
304 | sh_eth_write(ndev, 0x0, TDFXR); | ||
305 | sh_eth_write(ndev, 0x0, TDFFR); | ||
306 | sh_eth_write(ndev, 0x0, RDLAR); | ||
307 | sh_eth_write(ndev, 0x0, RDFAR); | ||
308 | sh_eth_write(ndev, 0x0, RDFXR); | ||
309 | sh_eth_write(ndev, 0x0, RDFFR); | ||
310 | } | ||
311 | |||
312 | static void sh_eth_set_duplex(struct net_device *ndev) | ||
313 | { | ||
314 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
315 | |||
316 | if (mdp->duplex) /* Full */ | ||
317 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); | ||
318 | else /* Half */ | ||
319 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); | ||
320 | } | ||
321 | |||
322 | static void sh_eth_set_rate(struct net_device *ndev) | ||
323 | { | ||
324 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
325 | |||
326 | switch (mdp->speed) { | ||
327 | case 10: /* 10BASE */ | ||
328 | sh_eth_write(ndev, GECMR_10, GECMR); | ||
329 | break; | ||
330 | case 100:/* 100BASE */ | ||
331 | sh_eth_write(ndev, GECMR_100, GECMR); | ||
332 | break; | ||
333 | case 1000: /* 1000BASE */ | ||
334 | sh_eth_write(ndev, GECMR_1000, GECMR); | ||
335 | break; | ||
336 | default: | ||
337 | break; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | /* sh7763 */ | ||
342 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
343 | .chip_reset = sh_eth_chip_reset, | ||
344 | .set_duplex = sh_eth_set_duplex, | ||
345 | .set_rate = sh_eth_set_rate, | ||
346 | |||
347 | .ecsr_value = ECSR_ICD | ECSR_MPD, | ||
348 | .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, | ||
349 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
350 | |||
351 | .tx_check = EESR_TC1 | EESR_FTC, | ||
352 | .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ | ||
353 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ | ||
354 | EESR_ECI, | ||
355 | .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ | ||
356 | EESR_TFE, | ||
357 | |||
358 | .apr = 1, | ||
359 | .mpr = 1, | ||
360 | .tpauser = 1, | ||
361 | .bculr = 1, | ||
362 | .hw_swap = 1, | ||
363 | .no_trimd = 1, | ||
364 | .no_ade = 1, | ||
365 | .tsu = 1, | ||
366 | }; | ||
367 | |||
368 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
369 | #define SH_ETH_RESET_DEFAULT 1 | ||
370 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
371 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
372 | |||
373 | .apr = 1, | ||
374 | .mpr = 1, | ||
375 | .tpauser = 1, | ||
376 | .hw_swap = 1, | ||
377 | }; | ||
378 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | ||
379 | #define SH_ETH_RESET_DEFAULT 1 | ||
380 | #define SH_ETH_HAS_TSU 1 | ||
381 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
382 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
383 | .tsu = 1, | ||
384 | }; | ||
385 | #endif | ||
386 | |||
387 | static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) | ||
388 | { | ||
389 | if (!cd->ecsr_value) | ||
390 | cd->ecsr_value = DEFAULT_ECSR_INIT; | ||
391 | |||
392 | if (!cd->ecsipr_value) | ||
393 | cd->ecsipr_value = DEFAULT_ECSIPR_INIT; | ||
394 | |||
395 | if (!cd->fcftr_value) | ||
396 | cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ | ||
397 | DEFAULT_FIFO_F_D_RFD; | ||
398 | |||
399 | if (!cd->fdr_value) | ||
400 | cd->fdr_value = DEFAULT_FDR_INIT; | ||
401 | |||
402 | if (!cd->rmcr_value) | ||
403 | cd->rmcr_value = DEFAULT_RMCR_VALUE; | ||
404 | |||
405 | if (!cd->tx_check) | ||
406 | cd->tx_check = DEFAULT_TX_CHECK; | ||
407 | |||
408 | if (!cd->eesr_err_check) | ||
409 | cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; | ||
410 | |||
411 | if (!cd->tx_error_check) | ||
412 | cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; | ||
413 | } | ||
414 | |||
415 | #if defined(SH_ETH_RESET_DEFAULT) | ||
416 | /* Chip Reset */ | ||
417 | static void sh_eth_reset(struct net_device *ndev) | ||
418 | { | ||
419 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); | ||
420 | mdelay(3); | ||
421 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); | ||
422 | } | ||
423 | #endif | ||
424 | |||
425 | #if defined(CONFIG_CPU_SH4) | ||
426 | static void sh_eth_set_receive_align(struct sk_buff *skb) | ||
427 | { | ||
428 | int reserve; | ||
429 | |||
430 | reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); | ||
431 | if (reserve) | ||
432 | skb_reserve(skb, reserve); | ||
433 | } | ||
434 | #else | ||
435 | static void sh_eth_set_receive_align(struct sk_buff *skb) | ||
436 | { | ||
437 | skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); | ||
438 | } | ||
439 | #endif | ||
440 | |||
441 | |||
442 | /* CPU <-> EDMAC endian convert */ | ||
443 | static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) | ||
444 | { | ||
445 | switch (mdp->edmac_endian) { | ||
446 | case EDMAC_LITTLE_ENDIAN: | ||
447 | return cpu_to_le32(x); | ||
448 | case EDMAC_BIG_ENDIAN: | ||
449 | return cpu_to_be32(x); | ||
450 | } | ||
451 | return x; | ||
452 | } | ||
453 | |||
454 | static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) | ||
455 | { | ||
456 | switch (mdp->edmac_endian) { | ||
457 | case EDMAC_LITTLE_ENDIAN: | ||
458 | return le32_to_cpu(x); | ||
459 | case EDMAC_BIG_ENDIAN: | ||
460 | return be32_to_cpu(x); | ||
461 | } | ||
462 | return x; | ||
463 | } | ||
464 | |||
465 | /* | ||
466 | * Program the hardware MAC address from dev->dev_addr. | ||
467 | */ | ||
468 | static void update_mac_address(struct net_device *ndev) | ||
469 | { | ||
470 | sh_eth_write(ndev, | ||
471 | (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | | ||
472 | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); | ||
473 | sh_eth_write(ndev, | ||
474 | (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); | ||
475 | } | ||
476 | |||
477 | /* | ||
478 | * Get MAC address from SuperH MAC address register | ||
479 | * | ||
480 | * SuperH's Ethernet device doesn't have 'ROM' to MAC address. | ||
481 | * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). | ||
482 | * When you want use this device, you must set MAC address in bootloader. | ||
483 | * | ||
484 | */ | ||
485 | static void read_mac_address(struct net_device *ndev, unsigned char *mac) | ||
486 | { | ||
487 | if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { | ||
488 | memcpy(ndev->dev_addr, mac, 6); | ||
489 | } else { | ||
490 | ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); | ||
491 | ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; | ||
492 | ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; | ||
493 | ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); | ||
494 | ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; | ||
495 | ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); | ||
496 | } | ||
497 | } | ||
498 | |||
499 | static int sh_eth_is_gether(struct sh_eth_private *mdp) | ||
500 | { | ||
501 | if (mdp->reg_offset == sh_eth_offset_gigabit) | ||
502 | return 1; | ||
503 | else | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) | ||
508 | { | ||
509 | if (sh_eth_is_gether(mdp)) | ||
510 | return EDTRR_TRNS_GETHER; | ||
511 | else | ||
512 | return EDTRR_TRNS_ETHER; | ||
513 | } | ||
514 | |||
515 | struct bb_info { | ||
516 | void (*set_gate)(unsigned long addr); | ||
517 | struct mdiobb_ctrl ctrl; | ||
518 | u32 addr; | ||
519 | u32 mmd_msk;/* MMD */ | ||
520 | u32 mdo_msk; | ||
521 | u32 mdi_msk; | ||
522 | u32 mdc_msk; | ||
523 | }; | ||
524 | |||
525 | /* PHY bit set */ | ||
526 | static void bb_set(u32 addr, u32 msk) | ||
527 | { | ||
528 | writel(readl(addr) | msk, addr); | ||
529 | } | ||
530 | |||
531 | /* PHY bit clear */ | ||
532 | static void bb_clr(u32 addr, u32 msk) | ||
533 | { | ||
534 | writel((readl(addr) & ~msk), addr); | ||
535 | } | ||
536 | |||
537 | /* PHY bit read */ | ||
538 | static int bb_read(u32 addr, u32 msk) | ||
539 | { | ||
540 | return (readl(addr) & msk) != 0; | ||
541 | } | ||
542 | |||
543 | /* Data I/O pin control */ | ||
544 | static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) | ||
545 | { | ||
546 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
547 | |||
548 | if (bitbang->set_gate) | ||
549 | bitbang->set_gate(bitbang->addr); | ||
550 | |||
551 | if (bit) | ||
552 | bb_set(bitbang->addr, bitbang->mmd_msk); | ||
553 | else | ||
554 | bb_clr(bitbang->addr, bitbang->mmd_msk); | ||
555 | } | ||
556 | |||
557 | /* Set bit data*/ | ||
558 | static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) | ||
559 | { | ||
560 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
561 | |||
562 | if (bitbang->set_gate) | ||
563 | bitbang->set_gate(bitbang->addr); | ||
564 | |||
565 | if (bit) | ||
566 | bb_set(bitbang->addr, bitbang->mdo_msk); | ||
567 | else | ||
568 | bb_clr(bitbang->addr, bitbang->mdo_msk); | ||
569 | } | ||
570 | |||
571 | /* Get bit data*/ | ||
572 | static int sh_get_mdio(struct mdiobb_ctrl *ctrl) | ||
573 | { | ||
574 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
575 | |||
576 | if (bitbang->set_gate) | ||
577 | bitbang->set_gate(bitbang->addr); | ||
578 | |||
579 | return bb_read(bitbang->addr, bitbang->mdi_msk); | ||
580 | } | ||
581 | |||
582 | /* MDC pin control */ | ||
583 | static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) | ||
584 | { | ||
585 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
586 | |||
587 | if (bitbang->set_gate) | ||
588 | bitbang->set_gate(bitbang->addr); | ||
589 | |||
590 | if (bit) | ||
591 | bb_set(bitbang->addr, bitbang->mdc_msk); | ||
592 | else | ||
593 | bb_clr(bitbang->addr, bitbang->mdc_msk); | ||
594 | } | ||
595 | |||
596 | /* mdio bus control struct */ | ||
597 | static struct mdiobb_ops bb_ops = { | ||
598 | .owner = THIS_MODULE, | ||
599 | .set_mdc = sh_mdc_ctrl, | ||
600 | .set_mdio_dir = sh_mmd_ctrl, | ||
601 | .set_mdio_data = sh_set_mdio, | ||
602 | .get_mdio_data = sh_get_mdio, | ||
603 | }; | ||
604 | |||
605 | /* free skb and descriptor buffer */ | ||
606 | static void sh_eth_ring_free(struct net_device *ndev) | ||
607 | { | ||
608 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
609 | int i; | ||
610 | |||
611 | /* Free Rx skb ringbuffer */ | ||
612 | if (mdp->rx_skbuff) { | ||
613 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
614 | if (mdp->rx_skbuff[i]) | ||
615 | dev_kfree_skb(mdp->rx_skbuff[i]); | ||
616 | } | ||
617 | } | ||
618 | kfree(mdp->rx_skbuff); | ||
619 | |||
620 | /* Free Tx skb ringbuffer */ | ||
621 | if (mdp->tx_skbuff) { | ||
622 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
623 | if (mdp->tx_skbuff[i]) | ||
624 | dev_kfree_skb(mdp->tx_skbuff[i]); | ||
625 | } | ||
626 | } | ||
627 | kfree(mdp->tx_skbuff); | ||
628 | } | ||
629 | |||
630 | /* format skb and descriptor buffer */ | ||
631 | static void sh_eth_ring_format(struct net_device *ndev) | ||
632 | { | ||
633 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
634 | int i; | ||
635 | struct sk_buff *skb; | ||
636 | struct sh_eth_rxdesc *rxdesc = NULL; | ||
637 | struct sh_eth_txdesc *txdesc = NULL; | ||
638 | int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; | ||
639 | int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; | ||
640 | |||
641 | mdp->cur_rx = mdp->cur_tx = 0; | ||
642 | mdp->dirty_rx = mdp->dirty_tx = 0; | ||
643 | |||
644 | memset(mdp->rx_ring, 0, rx_ringsize); | ||
645 | |||
646 | /* build Rx ring buffer */ | ||
647 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
648 | /* skb */ | ||
649 | mdp->rx_skbuff[i] = NULL; | ||
650 | skb = dev_alloc_skb(mdp->rx_buf_sz); | ||
651 | mdp->rx_skbuff[i] = skb; | ||
652 | if (skb == NULL) | ||
653 | break; | ||
654 | dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, | ||
655 | DMA_FROM_DEVICE); | ||
656 | skb->dev = ndev; /* Mark as being used by this device. */ | ||
657 | sh_eth_set_receive_align(skb); | ||
658 | |||
659 | /* RX descriptor */ | ||
660 | rxdesc = &mdp->rx_ring[i]; | ||
661 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); | ||
662 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | ||
663 | |||
664 | /* The size of the buffer is 16 byte boundary. */ | ||
665 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
666 | /* Rx descriptor address set */ | ||
667 | if (i == 0) { | ||
668 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); | ||
669 | if (sh_eth_is_gether(mdp)) | ||
670 | sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); | ||
671 | } | ||
672 | } | ||
673 | |||
674 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); | ||
675 | |||
676 | /* Mark the last entry as wrapping the ring. */ | ||
677 | rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); | ||
678 | |||
679 | memset(mdp->tx_ring, 0, tx_ringsize); | ||
680 | |||
681 | /* build Tx ring buffer */ | ||
682 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
683 | mdp->tx_skbuff[i] = NULL; | ||
684 | txdesc = &mdp->tx_ring[i]; | ||
685 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); | ||
686 | txdesc->buffer_length = 0; | ||
687 | if (i == 0) { | ||
688 | /* Tx descriptor address set */ | ||
689 | sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); | ||
690 | if (sh_eth_is_gether(mdp)) | ||
691 | sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); | ||
692 | } | ||
693 | } | ||
694 | |||
695 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); | ||
696 | } | ||
697 | |||
698 | /* Get skb and descriptor buffer */ | ||
699 | static int sh_eth_ring_init(struct net_device *ndev) | ||
700 | { | ||
701 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
702 | int rx_ringsize, tx_ringsize, ret = 0; | ||
703 | |||
704 | /* | ||
705 | * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the | ||
706 | * card needs room to do 8 byte alignment, +2 so we can reserve | ||
707 | * the first 2 bytes, and +16 gets room for the status word from the | ||
708 | * card. | ||
709 | */ | ||
710 | mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : | ||
711 | (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); | ||
712 | if (mdp->cd->rpadir) | ||
713 | mdp->rx_buf_sz += NET_IP_ALIGN; | ||
714 | |||
715 | /* Allocate RX and TX skb rings */ | ||
716 | mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, | ||
717 | GFP_KERNEL); | ||
718 | if (!mdp->rx_skbuff) { | ||
719 | dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); | ||
720 | ret = -ENOMEM; | ||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, | ||
725 | GFP_KERNEL); | ||
726 | if (!mdp->tx_skbuff) { | ||
727 | dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); | ||
728 | ret = -ENOMEM; | ||
729 | goto skb_ring_free; | ||
730 | } | ||
731 | |||
732 | /* Allocate all Rx descriptors. */ | ||
733 | rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; | ||
734 | mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, | ||
735 | GFP_KERNEL); | ||
736 | |||
737 | if (!mdp->rx_ring) { | ||
738 | dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", | ||
739 | rx_ringsize); | ||
740 | ret = -ENOMEM; | ||
741 | goto desc_ring_free; | ||
742 | } | ||
743 | |||
744 | mdp->dirty_rx = 0; | ||
745 | |||
746 | /* Allocate all Tx descriptors. */ | ||
747 | tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; | ||
748 | mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, | ||
749 | GFP_KERNEL); | ||
750 | if (!mdp->tx_ring) { | ||
751 | dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", | ||
752 | tx_ringsize); | ||
753 | ret = -ENOMEM; | ||
754 | goto desc_ring_free; | ||
755 | } | ||
756 | return ret; | ||
757 | |||
758 | desc_ring_free: | ||
759 | /* free DMA buffer */ | ||
760 | dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); | ||
761 | |||
762 | skb_ring_free: | ||
763 | /* Free Rx and Tx skb ring buffer */ | ||
764 | sh_eth_ring_free(ndev); | ||
765 | |||
766 | return ret; | ||
767 | } | ||
768 | |||
769 | static int sh_eth_dev_init(struct net_device *ndev) | ||
770 | { | ||
771 | int ret = 0; | ||
772 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
773 | u_int32_t rx_int_var, tx_int_var; | ||
774 | u32 val; | ||
775 | |||
776 | /* Soft Reset */ | ||
777 | sh_eth_reset(ndev); | ||
778 | |||
779 | /* Descriptor format */ | ||
780 | sh_eth_ring_format(ndev); | ||
781 | if (mdp->cd->rpadir) | ||
782 | sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); | ||
783 | |||
784 | /* all sh_eth int mask */ | ||
785 | sh_eth_write(ndev, 0, EESIPR); | ||
786 | |||
787 | #if defined(__LITTLE_ENDIAN__) | ||
788 | if (mdp->cd->hw_swap) | ||
789 | sh_eth_write(ndev, EDMR_EL, EDMR); | ||
790 | else | ||
791 | #endif | ||
792 | sh_eth_write(ndev, 0, EDMR); | ||
793 | |||
794 | /* FIFO size set */ | ||
795 | sh_eth_write(ndev, mdp->cd->fdr_value, FDR); | ||
796 | sh_eth_write(ndev, 0, TFTR); | ||
797 | |||
798 | /* Frame recv control */ | ||
799 | sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); | ||
800 | |||
801 | rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; | ||
802 | tx_int_var = mdp->tx_int_var = DESC_I_TINT2; | ||
803 | sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); | ||
804 | |||
805 | if (mdp->cd->bculr) | ||
806 | sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ | ||
807 | |||
808 | sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); | ||
809 | |||
810 | if (!mdp->cd->no_trimd) | ||
811 | sh_eth_write(ndev, 0, TRIMD); | ||
812 | |||
813 | /* Recv frame limit set register */ | ||
814 | sh_eth_write(ndev, RFLR_VALUE, RFLR); | ||
815 | |||
816 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); | ||
817 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | ||
818 | |||
819 | /* PAUSE Prohibition */ | ||
820 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | | ||
821 | ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; | ||
822 | |||
823 | sh_eth_write(ndev, val, ECMR); | ||
824 | |||
825 | if (mdp->cd->set_rate) | ||
826 | mdp->cd->set_rate(ndev); | ||
827 | |||
828 | /* E-MAC Status Register clear */ | ||
829 | sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); | ||
830 | |||
831 | /* E-MAC Interrupt Enable register */ | ||
832 | sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); | ||
833 | |||
834 | /* Set MAC address */ | ||
835 | update_mac_address(ndev); | ||
836 | |||
837 | /* mask reset */ | ||
838 | if (mdp->cd->apr) | ||
839 | sh_eth_write(ndev, APR_AP, APR); | ||
840 | if (mdp->cd->mpr) | ||
841 | sh_eth_write(ndev, MPR_MP, MPR); | ||
842 | if (mdp->cd->tpauser) | ||
843 | sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); | ||
844 | |||
845 | /* Setting the Rx mode will start the Rx process. */ | ||
846 | sh_eth_write(ndev, EDRRR_R, EDRRR); | ||
847 | |||
848 | netif_start_queue(ndev); | ||
849 | |||
850 | return ret; | ||
851 | } | ||
852 | |||
853 | /* free Tx skb function */ | ||
854 | static int sh_eth_txfree(struct net_device *ndev) | ||
855 | { | ||
856 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
857 | struct sh_eth_txdesc *txdesc; | ||
858 | int freeNum = 0; | ||
859 | int entry = 0; | ||
860 | |||
861 | for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { | ||
862 | entry = mdp->dirty_tx % TX_RING_SIZE; | ||
863 | txdesc = &mdp->tx_ring[entry]; | ||
864 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) | ||
865 | break; | ||
866 | /* Free the original skb. */ | ||
867 | if (mdp->tx_skbuff[entry]) { | ||
868 | dma_unmap_single(&ndev->dev, txdesc->addr, | ||
869 | txdesc->buffer_length, DMA_TO_DEVICE); | ||
870 | dev_kfree_skb_irq(mdp->tx_skbuff[entry]); | ||
871 | mdp->tx_skbuff[entry] = NULL; | ||
872 | freeNum++; | ||
873 | } | ||
874 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); | ||
875 | if (entry >= TX_RING_SIZE - 1) | ||
876 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); | ||
877 | |||
878 | mdp->stats.tx_packets++; | ||
879 | mdp->stats.tx_bytes += txdesc->buffer_length; | ||
880 | } | ||
881 | return freeNum; | ||
882 | } | ||
883 | |||
884 | /* Packet receive function */ | ||
885 | static int sh_eth_rx(struct net_device *ndev) | ||
886 | { | ||
887 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
888 | struct sh_eth_rxdesc *rxdesc; | ||
889 | |||
890 | int entry = mdp->cur_rx % RX_RING_SIZE; | ||
891 | int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; | ||
892 | struct sk_buff *skb; | ||
893 | u16 pkt_len = 0; | ||
894 | u32 desc_status; | ||
895 | |||
896 | rxdesc = &mdp->rx_ring[entry]; | ||
897 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { | ||
898 | desc_status = edmac_to_cpu(mdp, rxdesc->status); | ||
899 | pkt_len = rxdesc->frame_length; | ||
900 | |||
901 | if (--boguscnt < 0) | ||
902 | break; | ||
903 | |||
904 | if (!(desc_status & RDFEND)) | ||
905 | mdp->stats.rx_length_errors++; | ||
906 | |||
907 | if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | | ||
908 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { | ||
909 | mdp->stats.rx_errors++; | ||
910 | if (desc_status & RD_RFS1) | ||
911 | mdp->stats.rx_crc_errors++; | ||
912 | if (desc_status & RD_RFS2) | ||
913 | mdp->stats.rx_frame_errors++; | ||
914 | if (desc_status & RD_RFS3) | ||
915 | mdp->stats.rx_length_errors++; | ||
916 | if (desc_status & RD_RFS4) | ||
917 | mdp->stats.rx_length_errors++; | ||
918 | if (desc_status & RD_RFS6) | ||
919 | mdp->stats.rx_missed_errors++; | ||
920 | if (desc_status & RD_RFS10) | ||
921 | mdp->stats.rx_over_errors++; | ||
922 | } else { | ||
923 | if (!mdp->cd->hw_swap) | ||
924 | sh_eth_soft_swap( | ||
925 | phys_to_virt(ALIGN(rxdesc->addr, 4)), | ||
926 | pkt_len + 2); | ||
927 | skb = mdp->rx_skbuff[entry]; | ||
928 | mdp->rx_skbuff[entry] = NULL; | ||
929 | if (mdp->cd->rpadir) | ||
930 | skb_reserve(skb, NET_IP_ALIGN); | ||
931 | skb_put(skb, pkt_len); | ||
932 | skb->protocol = eth_type_trans(skb, ndev); | ||
933 | netif_rx(skb); | ||
934 | mdp->stats.rx_packets++; | ||
935 | mdp->stats.rx_bytes += pkt_len; | ||
936 | } | ||
937 | rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); | ||
938 | entry = (++mdp->cur_rx) % RX_RING_SIZE; | ||
939 | rxdesc = &mdp->rx_ring[entry]; | ||
940 | } | ||
941 | |||
942 | /* Refill the Rx ring buffers. */ | ||
943 | for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { | ||
944 | entry = mdp->dirty_rx % RX_RING_SIZE; | ||
945 | rxdesc = &mdp->rx_ring[entry]; | ||
946 | /* The size of the buffer is 16 byte boundary. */ | ||
947 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
948 | |||
949 | if (mdp->rx_skbuff[entry] == NULL) { | ||
950 | skb = dev_alloc_skb(mdp->rx_buf_sz); | ||
951 | mdp->rx_skbuff[entry] = skb; | ||
952 | if (skb == NULL) | ||
953 | break; /* Better luck next round. */ | ||
954 | dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, | ||
955 | DMA_FROM_DEVICE); | ||
956 | skb->dev = ndev; | ||
957 | sh_eth_set_receive_align(skb); | ||
958 | |||
959 | skb_checksum_none_assert(skb); | ||
960 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); | ||
961 | } | ||
962 | if (entry >= RX_RING_SIZE - 1) | ||
963 | rxdesc->status |= | ||
964 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); | ||
965 | else | ||
966 | rxdesc->status |= | ||
967 | cpu_to_edmac(mdp, RD_RACT | RD_RFP); | ||
968 | } | ||
969 | |||
970 | /* Restart Rx engine if stopped. */ | ||
971 | /* If we don't need to check status, don't. -KDU */ | ||
972 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) | ||
973 | sh_eth_write(ndev, EDRRR_R, EDRRR); | ||
974 | |||
975 | return 0; | ||
976 | } | ||
977 | |||
978 | static void sh_eth_rcv_snd_disable(struct net_device *ndev) | ||
979 | { | ||
980 | /* disable tx and rx */ | ||
981 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & | ||
982 | ~(ECMR_RE | ECMR_TE), ECMR); | ||
983 | } | ||
984 | |||
985 | static void sh_eth_rcv_snd_enable(struct net_device *ndev) | ||
986 | { | ||
987 | /* enable tx and rx */ | ||
988 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | | ||
989 | (ECMR_RE | ECMR_TE), ECMR); | ||
990 | } | ||
991 | |||
992 | /* error control function */ | ||
993 | static void sh_eth_error(struct net_device *ndev, int intr_status) | ||
994 | { | ||
995 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
996 | u32 felic_stat; | ||
997 | u32 link_stat; | ||
998 | u32 mask; | ||
999 | |||
1000 | if (intr_status & EESR_ECI) { | ||
1001 | felic_stat = sh_eth_read(ndev, ECSR); | ||
1002 | sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ | ||
1003 | if (felic_stat & ECSR_ICD) | ||
1004 | mdp->stats.tx_carrier_errors++; | ||
1005 | if (felic_stat & ECSR_LCHNG) { | ||
1006 | /* Link Changed */ | ||
1007 | if (mdp->cd->no_psr || mdp->no_ether_link) { | ||
1008 | if (mdp->link == PHY_DOWN) | ||
1009 | link_stat = 0; | ||
1010 | else | ||
1011 | link_stat = PHY_ST_LINK; | ||
1012 | } else { | ||
1013 | link_stat = (sh_eth_read(ndev, PSR)); | ||
1014 | if (mdp->ether_link_active_low) | ||
1015 | link_stat = ~link_stat; | ||
1016 | } | ||
1017 | if (!(link_stat & PHY_ST_LINK)) | ||
1018 | sh_eth_rcv_snd_disable(ndev); | ||
1019 | else { | ||
1020 | /* Link Up */ | ||
1021 | sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & | ||
1022 | ~DMAC_M_ECI, EESIPR); | ||
1023 | /*clear int */ | ||
1024 | sh_eth_write(ndev, sh_eth_read(ndev, ECSR), | ||
1025 | ECSR); | ||
1026 | sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | | ||
1027 | DMAC_M_ECI, EESIPR); | ||
1028 | /* enable tx and rx */ | ||
1029 | sh_eth_rcv_snd_enable(ndev); | ||
1030 | } | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | if (intr_status & EESR_TWB) { | ||
1035 | /* Write buck end. unused write back interrupt */ | ||
1036 | if (intr_status & EESR_TABT) /* Transmit Abort int */ | ||
1037 | mdp->stats.tx_aborted_errors++; | ||
1038 | if (netif_msg_tx_err(mdp)) | ||
1039 | dev_err(&ndev->dev, "Transmit Abort\n"); | ||
1040 | } | ||
1041 | |||
1042 | if (intr_status & EESR_RABT) { | ||
1043 | /* Receive Abort int */ | ||
1044 | if (intr_status & EESR_RFRMER) { | ||
1045 | /* Receive Frame Overflow int */ | ||
1046 | mdp->stats.rx_frame_errors++; | ||
1047 | if (netif_msg_rx_err(mdp)) | ||
1048 | dev_err(&ndev->dev, "Receive Abort\n"); | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | if (intr_status & EESR_TDE) { | ||
1053 | /* Transmit Descriptor Empty int */ | ||
1054 | mdp->stats.tx_fifo_errors++; | ||
1055 | if (netif_msg_tx_err(mdp)) | ||
1056 | dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); | ||
1057 | } | ||
1058 | |||
1059 | if (intr_status & EESR_TFE) { | ||
1060 | /* FIFO under flow */ | ||
1061 | mdp->stats.tx_fifo_errors++; | ||
1062 | if (netif_msg_tx_err(mdp)) | ||
1063 | dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); | ||
1064 | } | ||
1065 | |||
1066 | if (intr_status & EESR_RDE) { | ||
1067 | /* Receive Descriptor Empty int */ | ||
1068 | mdp->stats.rx_over_errors++; | ||
1069 | |||
1070 | if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) | ||
1071 | sh_eth_write(ndev, EDRRR_R, EDRRR); | ||
1072 | if (netif_msg_rx_err(mdp)) | ||
1073 | dev_err(&ndev->dev, "Receive Descriptor Empty\n"); | ||
1074 | } | ||
1075 | |||
1076 | if (intr_status & EESR_RFE) { | ||
1077 | /* Receive FIFO Overflow int */ | ||
1078 | mdp->stats.rx_fifo_errors++; | ||
1079 | if (netif_msg_rx_err(mdp)) | ||
1080 | dev_err(&ndev->dev, "Receive FIFO Overflow\n"); | ||
1081 | } | ||
1082 | |||
1083 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { | ||
1084 | /* Address Error */ | ||
1085 | mdp->stats.tx_fifo_errors++; | ||
1086 | if (netif_msg_tx_err(mdp)) | ||
1087 | dev_err(&ndev->dev, "Address Error\n"); | ||
1088 | } | ||
1089 | |||
1090 | mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; | ||
1091 | if (mdp->cd->no_ade) | ||
1092 | mask &= ~EESR_ADE; | ||
1093 | if (intr_status & mask) { | ||
1094 | /* Tx error */ | ||
1095 | u32 edtrr = sh_eth_read(ndev, EDTRR); | ||
1096 | /* dmesg */ | ||
1097 | dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", | ||
1098 | intr_status, mdp->cur_tx); | ||
1099 | dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", | ||
1100 | mdp->dirty_tx, (u32) ndev->state, edtrr); | ||
1101 | /* dirty buffer free */ | ||
1102 | sh_eth_txfree(ndev); | ||
1103 | |||
1104 | /* SH7712 BUG */ | ||
1105 | if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { | ||
1106 | /* tx dma start */ | ||
1107 | sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); | ||
1108 | } | ||
1109 | /* wakeup */ | ||
1110 | netif_wake_queue(ndev); | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | ||
1115 | { | ||
1116 | struct net_device *ndev = netdev; | ||
1117 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1118 | struct sh_eth_cpu_data *cd = mdp->cd; | ||
1119 | irqreturn_t ret = IRQ_NONE; | ||
1120 | u32 intr_status = 0; | ||
1121 | |||
1122 | spin_lock(&mdp->lock); | ||
1123 | |||
1124 | /* Get interrpt stat */ | ||
1125 | intr_status = sh_eth_read(ndev, EESR); | ||
1126 | /* Clear interrupt */ | ||
1127 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | | ||
1128 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | | ||
1129 | cd->tx_check | cd->eesr_err_check)) { | ||
1130 | sh_eth_write(ndev, intr_status, EESR); | ||
1131 | ret = IRQ_HANDLED; | ||
1132 | } else | ||
1133 | goto other_irq; | ||
1134 | |||
1135 | if (intr_status & (EESR_FRC | /* Frame recv*/ | ||
1136 | EESR_RMAF | /* Multi cast address recv*/ | ||
1137 | EESR_RRF | /* Bit frame recv */ | ||
1138 | EESR_RTLF | /* Long frame recv*/ | ||
1139 | EESR_RTSF | /* short frame recv */ | ||
1140 | EESR_PRE | /* PHY-LSI recv error */ | ||
1141 | EESR_CERF)){ /* recv frame CRC error */ | ||
1142 | sh_eth_rx(ndev); | ||
1143 | } | ||
1144 | |||
1145 | /* Tx Check */ | ||
1146 | if (intr_status & cd->tx_check) { | ||
1147 | sh_eth_txfree(ndev); | ||
1148 | netif_wake_queue(ndev); | ||
1149 | } | ||
1150 | |||
1151 | if (intr_status & cd->eesr_err_check) | ||
1152 | sh_eth_error(ndev, intr_status); | ||
1153 | |||
1154 | other_irq: | ||
1155 | spin_unlock(&mdp->lock); | ||
1156 | |||
1157 | return ret; | ||
1158 | } | ||
1159 | |||
1160 | static void sh_eth_timer(unsigned long data) | ||
1161 | { | ||
1162 | struct net_device *ndev = (struct net_device *)data; | ||
1163 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1164 | |||
1165 | mod_timer(&mdp->timer, jiffies + (10 * HZ)); | ||
1166 | } | ||
1167 | |||
1168 | /* PHY state control function */ | ||
1169 | static void sh_eth_adjust_link(struct net_device *ndev) | ||
1170 | { | ||
1171 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1172 | struct phy_device *phydev = mdp->phydev; | ||
1173 | int new_state = 0; | ||
1174 | |||
1175 | if (phydev->link != PHY_DOWN) { | ||
1176 | if (phydev->duplex != mdp->duplex) { | ||
1177 | new_state = 1; | ||
1178 | mdp->duplex = phydev->duplex; | ||
1179 | if (mdp->cd->set_duplex) | ||
1180 | mdp->cd->set_duplex(ndev); | ||
1181 | } | ||
1182 | |||
1183 | if (phydev->speed != mdp->speed) { | ||
1184 | new_state = 1; | ||
1185 | mdp->speed = phydev->speed; | ||
1186 | if (mdp->cd->set_rate) | ||
1187 | mdp->cd->set_rate(ndev); | ||
1188 | } | ||
1189 | if (mdp->link == PHY_DOWN) { | ||
1190 | sh_eth_write(ndev, | ||
1191 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); | ||
1192 | new_state = 1; | ||
1193 | mdp->link = phydev->link; | ||
1194 | } | ||
1195 | } else if (mdp->link) { | ||
1196 | new_state = 1; | ||
1197 | mdp->link = PHY_DOWN; | ||
1198 | mdp->speed = 0; | ||
1199 | mdp->duplex = -1; | ||
1200 | } | ||
1201 | |||
1202 | if (new_state && netif_msg_link(mdp)) | ||
1203 | phy_print_status(phydev); | ||
1204 | } | ||
1205 | |||
1206 | /* PHY init function */ | ||
1207 | static int sh_eth_phy_init(struct net_device *ndev) | ||
1208 | { | ||
1209 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1210 | char phy_id[MII_BUS_ID_SIZE + 3]; | ||
1211 | struct phy_device *phydev = NULL; | ||
1212 | |||
1213 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | ||
1214 | mdp->mii_bus->id , mdp->phy_id); | ||
1215 | |||
1216 | mdp->link = PHY_DOWN; | ||
1217 | mdp->speed = 0; | ||
1218 | mdp->duplex = -1; | ||
1219 | |||
1220 | /* Try connect to PHY */ | ||
1221 | phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, | ||
1222 | 0, mdp->phy_interface); | ||
1223 | if (IS_ERR(phydev)) { | ||
1224 | dev_err(&ndev->dev, "phy_connect failed\n"); | ||
1225 | return PTR_ERR(phydev); | ||
1226 | } | ||
1227 | |||
1228 | dev_info(&ndev->dev, "attached phy %i to driver %s\n", | ||
1229 | phydev->addr, phydev->drv->name); | ||
1230 | |||
1231 | mdp->phydev = phydev; | ||
1232 | |||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | /* PHY control start function */ | ||
1237 | static int sh_eth_phy_start(struct net_device *ndev) | ||
1238 | { | ||
1239 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1240 | int ret; | ||
1241 | |||
1242 | ret = sh_eth_phy_init(ndev); | ||
1243 | if (ret) | ||
1244 | return ret; | ||
1245 | |||
1246 | /* reset phy - this also wakes it from PDOWN */ | ||
1247 | phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); | ||
1248 | phy_start(mdp->phydev); | ||
1249 | |||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | static int sh_eth_get_settings(struct net_device *ndev, | ||
1254 | struct ethtool_cmd *ecmd) | ||
1255 | { | ||
1256 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1257 | unsigned long flags; | ||
1258 | int ret; | ||
1259 | |||
1260 | spin_lock_irqsave(&mdp->lock, flags); | ||
1261 | ret = phy_ethtool_gset(mdp->phydev, ecmd); | ||
1262 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
1263 | |||
1264 | return ret; | ||
1265 | } | ||
1266 | |||
1267 | static int sh_eth_set_settings(struct net_device *ndev, | ||
1268 | struct ethtool_cmd *ecmd) | ||
1269 | { | ||
1270 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1271 | unsigned long flags; | ||
1272 | int ret; | ||
1273 | |||
1274 | spin_lock_irqsave(&mdp->lock, flags); | ||
1275 | |||
1276 | /* disable tx and rx */ | ||
1277 | sh_eth_rcv_snd_disable(ndev); | ||
1278 | |||
1279 | ret = phy_ethtool_sset(mdp->phydev, ecmd); | ||
1280 | if (ret) | ||
1281 | goto error_exit; | ||
1282 | |||
1283 | if (ecmd->duplex == DUPLEX_FULL) | ||
1284 | mdp->duplex = 1; | ||
1285 | else | ||
1286 | mdp->duplex = 0; | ||
1287 | |||
1288 | if (mdp->cd->set_duplex) | ||
1289 | mdp->cd->set_duplex(ndev); | ||
1290 | |||
1291 | error_exit: | ||
1292 | mdelay(1); | ||
1293 | |||
1294 | /* enable tx and rx */ | ||
1295 | sh_eth_rcv_snd_enable(ndev); | ||
1296 | |||
1297 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
1298 | |||
1299 | return ret; | ||
1300 | } | ||
1301 | |||
1302 | static int sh_eth_nway_reset(struct net_device *ndev) | ||
1303 | { | ||
1304 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1305 | unsigned long flags; | ||
1306 | int ret; | ||
1307 | |||
1308 | spin_lock_irqsave(&mdp->lock, flags); | ||
1309 | ret = phy_start_aneg(mdp->phydev); | ||
1310 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
1311 | |||
1312 | return ret; | ||
1313 | } | ||
1314 | |||
1315 | static u32 sh_eth_get_msglevel(struct net_device *ndev) | ||
1316 | { | ||
1317 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1318 | return mdp->msg_enable; | ||
1319 | } | ||
1320 | |||
1321 | static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) | ||
1322 | { | ||
1323 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1324 | mdp->msg_enable = value; | ||
1325 | } | ||
1326 | |||
1327 | static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { | ||
1328 | "rx_current", "tx_current", | ||
1329 | "rx_dirty", "tx_dirty", | ||
1330 | }; | ||
1331 | #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) | ||
1332 | |||
1333 | static int sh_eth_get_sset_count(struct net_device *netdev, int sset) | ||
1334 | { | ||
1335 | switch (sset) { | ||
1336 | case ETH_SS_STATS: | ||
1337 | return SH_ETH_STATS_LEN; | ||
1338 | default: | ||
1339 | return -EOPNOTSUPP; | ||
1340 | } | ||
1341 | } | ||
1342 | |||
1343 | static void sh_eth_get_ethtool_stats(struct net_device *ndev, | ||
1344 | struct ethtool_stats *stats, u64 *data) | ||
1345 | { | ||
1346 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1347 | int i = 0; | ||
1348 | |||
1349 | /* device-specific stats */ | ||
1350 | data[i++] = mdp->cur_rx; | ||
1351 | data[i++] = mdp->cur_tx; | ||
1352 | data[i++] = mdp->dirty_rx; | ||
1353 | data[i++] = mdp->dirty_tx; | ||
1354 | } | ||
1355 | |||
1356 | static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) | ||
1357 | { | ||
1358 | switch (stringset) { | ||
1359 | case ETH_SS_STATS: | ||
1360 | memcpy(data, *sh_eth_gstrings_stats, | ||
1361 | sizeof(sh_eth_gstrings_stats)); | ||
1362 | break; | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | static struct ethtool_ops sh_eth_ethtool_ops = { | ||
1367 | .get_settings = sh_eth_get_settings, | ||
1368 | .set_settings = sh_eth_set_settings, | ||
1369 | .nway_reset = sh_eth_nway_reset, | ||
1370 | .get_msglevel = sh_eth_get_msglevel, | ||
1371 | .set_msglevel = sh_eth_set_msglevel, | ||
1372 | .get_link = ethtool_op_get_link, | ||
1373 | .get_strings = sh_eth_get_strings, | ||
1374 | .get_ethtool_stats = sh_eth_get_ethtool_stats, | ||
1375 | .get_sset_count = sh_eth_get_sset_count, | ||
1376 | }; | ||
1377 | |||
1378 | /* network device open function */ | ||
1379 | static int sh_eth_open(struct net_device *ndev) | ||
1380 | { | ||
1381 | int ret = 0; | ||
1382 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1383 | |||
1384 | pm_runtime_get_sync(&mdp->pdev->dev); | ||
1385 | |||
1386 | ret = request_irq(ndev->irq, sh_eth_interrupt, | ||
1387 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | ||
1388 | defined(CONFIG_CPU_SUBTYPE_SH7764) || \ | ||
1389 | defined(CONFIG_CPU_SUBTYPE_SH7757) | ||
1390 | IRQF_SHARED, | ||
1391 | #else | ||
1392 | 0, | ||
1393 | #endif | ||
1394 | ndev->name, ndev); | ||
1395 | if (ret) { | ||
1396 | dev_err(&ndev->dev, "Can not assign IRQ number\n"); | ||
1397 | return ret; | ||
1398 | } | ||
1399 | |||
1400 | /* Descriptor set */ | ||
1401 | ret = sh_eth_ring_init(ndev); | ||
1402 | if (ret) | ||
1403 | goto out_free_irq; | ||
1404 | |||
1405 | /* device init */ | ||
1406 | ret = sh_eth_dev_init(ndev); | ||
1407 | if (ret) | ||
1408 | goto out_free_irq; | ||
1409 | |||
1410 | /* PHY control start*/ | ||
1411 | ret = sh_eth_phy_start(ndev); | ||
1412 | if (ret) | ||
1413 | goto out_free_irq; | ||
1414 | |||
1415 | /* Set the timer to check for link beat. */ | ||
1416 | init_timer(&mdp->timer); | ||
1417 | mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ | ||
1418 | setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev); | ||
1419 | |||
1420 | return ret; | ||
1421 | |||
1422 | out_free_irq: | ||
1423 | free_irq(ndev->irq, ndev); | ||
1424 | pm_runtime_put_sync(&mdp->pdev->dev); | ||
1425 | return ret; | ||
1426 | } | ||
1427 | |||
1428 | /* Timeout function */ | ||
1429 | static void sh_eth_tx_timeout(struct net_device *ndev) | ||
1430 | { | ||
1431 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1432 | struct sh_eth_rxdesc *rxdesc; | ||
1433 | int i; | ||
1434 | |||
1435 | netif_stop_queue(ndev); | ||
1436 | |||
1437 | if (netif_msg_timer(mdp)) | ||
1438 | dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," | ||
1439 | " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); | ||
1440 | |||
1441 | /* tx_errors count up */ | ||
1442 | mdp->stats.tx_errors++; | ||
1443 | |||
1444 | /* timer off */ | ||
1445 | del_timer_sync(&mdp->timer); | ||
1446 | |||
1447 | /* Free all the skbuffs in the Rx queue. */ | ||
1448 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1449 | rxdesc = &mdp->rx_ring[i]; | ||
1450 | rxdesc->status = 0; | ||
1451 | rxdesc->addr = 0xBADF00D0; | ||
1452 | if (mdp->rx_skbuff[i]) | ||
1453 | dev_kfree_skb(mdp->rx_skbuff[i]); | ||
1454 | mdp->rx_skbuff[i] = NULL; | ||
1455 | } | ||
1456 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1457 | if (mdp->tx_skbuff[i]) | ||
1458 | dev_kfree_skb(mdp->tx_skbuff[i]); | ||
1459 | mdp->tx_skbuff[i] = NULL; | ||
1460 | } | ||
1461 | |||
1462 | /* device init */ | ||
1463 | sh_eth_dev_init(ndev); | ||
1464 | |||
1465 | /* timer on */ | ||
1466 | mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ | ||
1467 | add_timer(&mdp->timer); | ||
1468 | } | ||
1469 | |||
1470 | /* Packet transmit function */ | ||
1471 | static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | ||
1472 | { | ||
1473 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1474 | struct sh_eth_txdesc *txdesc; | ||
1475 | u32 entry; | ||
1476 | unsigned long flags; | ||
1477 | |||
1478 | spin_lock_irqsave(&mdp->lock, flags); | ||
1479 | if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { | ||
1480 | if (!sh_eth_txfree(ndev)) { | ||
1481 | if (netif_msg_tx_queued(mdp)) | ||
1482 | dev_warn(&ndev->dev, "TxFD exhausted.\n"); | ||
1483 | netif_stop_queue(ndev); | ||
1484 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
1485 | return NETDEV_TX_BUSY; | ||
1486 | } | ||
1487 | } | ||
1488 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
1489 | |||
1490 | entry = mdp->cur_tx % TX_RING_SIZE; | ||
1491 | mdp->tx_skbuff[entry] = skb; | ||
1492 | txdesc = &mdp->tx_ring[entry]; | ||
1493 | /* soft swap. */ | ||
1494 | if (!mdp->cd->hw_swap) | ||
1495 | sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), | ||
1496 | skb->len + 2); | ||
1497 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, | ||
1498 | DMA_TO_DEVICE); | ||
1499 | if (skb->len < ETHERSMALL) | ||
1500 | txdesc->buffer_length = ETHERSMALL; | ||
1501 | else | ||
1502 | txdesc->buffer_length = skb->len; | ||
1503 | |||
1504 | if (entry >= TX_RING_SIZE - 1) | ||
1505 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | ||
1506 | else | ||
1507 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT); | ||
1508 | |||
1509 | mdp->cur_tx++; | ||
1510 | |||
1511 | if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) | ||
1512 | sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); | ||
1513 | |||
1514 | return NETDEV_TX_OK; | ||
1515 | } | ||
1516 | |||
1517 | /* device close function */ | ||
1518 | static int sh_eth_close(struct net_device *ndev) | ||
1519 | { | ||
1520 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1521 | int ringsize; | ||
1522 | |||
1523 | netif_stop_queue(ndev); | ||
1524 | |||
1525 | /* Disable interrupts by clearing the interrupt mask. */ | ||
1526 | sh_eth_write(ndev, 0x0000, EESIPR); | ||
1527 | |||
1528 | /* Stop the chip's Tx and Rx processes. */ | ||
1529 | sh_eth_write(ndev, 0, EDTRR); | ||
1530 | sh_eth_write(ndev, 0, EDRRR); | ||
1531 | |||
1532 | /* PHY Disconnect */ | ||
1533 | if (mdp->phydev) { | ||
1534 | phy_stop(mdp->phydev); | ||
1535 | phy_disconnect(mdp->phydev); | ||
1536 | } | ||
1537 | |||
1538 | free_irq(ndev->irq, ndev); | ||
1539 | |||
1540 | del_timer_sync(&mdp->timer); | ||
1541 | |||
1542 | /* Free all the skbuffs in the Rx queue. */ | ||
1543 | sh_eth_ring_free(ndev); | ||
1544 | |||
1545 | /* free DMA buffer */ | ||
1546 | ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; | ||
1547 | dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); | ||
1548 | |||
1549 | /* free DMA buffer */ | ||
1550 | ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; | ||
1551 | dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); | ||
1552 | |||
1553 | pm_runtime_put_sync(&mdp->pdev->dev); | ||
1554 | |||
1555 | return 0; | ||
1556 | } | ||
1557 | |||
1558 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | ||
1559 | { | ||
1560 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1561 | |||
1562 | pm_runtime_get_sync(&mdp->pdev->dev); | ||
1563 | |||
1564 | mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR); | ||
1565 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ | ||
1566 | mdp->stats.collisions += sh_eth_read(ndev, CDCR); | ||
1567 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ | ||
1568 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); | ||
1569 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ | ||
1570 | if (sh_eth_is_gether(mdp)) { | ||
1571 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); | ||
1572 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ | ||
1573 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); | ||
1574 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ | ||
1575 | } else { | ||
1576 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); | ||
1577 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ | ||
1578 | } | ||
1579 | pm_runtime_put_sync(&mdp->pdev->dev); | ||
1580 | |||
1581 | return &mdp->stats; | ||
1582 | } | ||
1583 | |||
1584 | /* ioctl to device funciotn*/ | ||
1585 | static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, | ||
1586 | int cmd) | ||
1587 | { | ||
1588 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1589 | struct phy_device *phydev = mdp->phydev; | ||
1590 | |||
1591 | if (!netif_running(ndev)) | ||
1592 | return -EINVAL; | ||
1593 | |||
1594 | if (!phydev) | ||
1595 | return -ENODEV; | ||
1596 | |||
1597 | return phy_mii_ioctl(phydev, rq, cmd); | ||
1598 | } | ||
1599 | |||
1600 | #if defined(SH_ETH_HAS_TSU) | ||
1601 | /* Multicast reception directions set */ | ||
1602 | static void sh_eth_set_multicast_list(struct net_device *ndev) | ||
1603 | { | ||
1604 | if (ndev->flags & IFF_PROMISC) { | ||
1605 | /* Set promiscuous. */ | ||
1606 | sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) | | ||
1607 | ECMR_PRM, ECMR); | ||
1608 | } else { | ||
1609 | /* Normal, unicast/broadcast-only mode. */ | ||
1610 | sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | | ||
1611 | ECMR_MCT, ECMR); | ||
1612 | } | ||
1613 | } | ||
1614 | #endif /* SH_ETH_HAS_TSU */ | ||
1615 | |||
1616 | /* SuperH's TSU register init function */ | ||
1617 | static void sh_eth_tsu_init(struct sh_eth_private *mdp) | ||
1618 | { | ||
1619 | sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ | ||
1620 | sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ | ||
1621 | sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ | ||
1622 | sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); | ||
1623 | sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); | ||
1624 | sh_eth_tsu_write(mdp, 0, TSU_PRISL0); | ||
1625 | sh_eth_tsu_write(mdp, 0, TSU_PRISL1); | ||
1626 | sh_eth_tsu_write(mdp, 0, TSU_FWSL0); | ||
1627 | sh_eth_tsu_write(mdp, 0, TSU_FWSL1); | ||
1628 | sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); | ||
1629 | if (sh_eth_is_gether(mdp)) { | ||
1630 | sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ | ||
1631 | sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ | ||
1632 | } else { | ||
1633 | sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ | ||
1634 | sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ | ||
1635 | } | ||
1636 | sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ | ||
1637 | sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ | ||
1638 | sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ | ||
1639 | sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ | ||
1640 | sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ | ||
1641 | sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ | ||
1642 | sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ | ||
1643 | } | ||
1644 | |||
1645 | /* MDIO bus release function */ | ||
1646 | static int sh_mdio_release(struct net_device *ndev) | ||
1647 | { | ||
1648 | struct mii_bus *bus = dev_get_drvdata(&ndev->dev); | ||
1649 | |||
1650 | /* unregister mdio bus */ | ||
1651 | mdiobus_unregister(bus); | ||
1652 | |||
1653 | /* remove mdio bus info from net_device */ | ||
1654 | dev_set_drvdata(&ndev->dev, NULL); | ||
1655 | |||
1656 | /* free interrupts memory */ | ||
1657 | kfree(bus->irq); | ||
1658 | |||
1659 | /* free bitbang info */ | ||
1660 | free_mdio_bitbang(bus); | ||
1661 | |||
1662 | return 0; | ||
1663 | } | ||
1664 | |||
1665 | /* MDIO bus init function */ | ||
1666 | static int sh_mdio_init(struct net_device *ndev, int id, | ||
1667 | struct sh_eth_plat_data *pd) | ||
1668 | { | ||
1669 | int ret, i; | ||
1670 | struct bb_info *bitbang; | ||
1671 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1672 | |||
1673 | /* create bit control struct for PHY */ | ||
1674 | bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); | ||
1675 | if (!bitbang) { | ||
1676 | ret = -ENOMEM; | ||
1677 | goto out; | ||
1678 | } | ||
1679 | |||
1680 | /* bitbang init */ | ||
1681 | bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR]; | ||
1682 | bitbang->set_gate = pd->set_mdio_gate; | ||
1683 | bitbang->mdi_msk = 0x08; | ||
1684 | bitbang->mdo_msk = 0x04; | ||
1685 | bitbang->mmd_msk = 0x02;/* MMD */ | ||
1686 | bitbang->mdc_msk = 0x01; | ||
1687 | bitbang->ctrl.ops = &bb_ops; | ||
1688 | |||
1689 | /* MII controller setting */ | ||
1690 | mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); | ||
1691 | if (!mdp->mii_bus) { | ||
1692 | ret = -ENOMEM; | ||
1693 | goto out_free_bitbang; | ||
1694 | } | ||
1695 | |||
1696 | /* Hook up MII support for ethtool */ | ||
1697 | mdp->mii_bus->name = "sh_mii"; | ||
1698 | mdp->mii_bus->parent = &ndev->dev; | ||
1699 | snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id); | ||
1700 | |||
1701 | /* PHY IRQ */ | ||
1702 | mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | ||
1703 | if (!mdp->mii_bus->irq) { | ||
1704 | ret = -ENOMEM; | ||
1705 | goto out_free_bus; | ||
1706 | } | ||
1707 | |||
1708 | for (i = 0; i < PHY_MAX_ADDR; i++) | ||
1709 | mdp->mii_bus->irq[i] = PHY_POLL; | ||
1710 | |||
1711 | /* regist mdio bus */ | ||
1712 | ret = mdiobus_register(mdp->mii_bus); | ||
1713 | if (ret) | ||
1714 | goto out_free_irq; | ||
1715 | |||
1716 | dev_set_drvdata(&ndev->dev, mdp->mii_bus); | ||
1717 | |||
1718 | return 0; | ||
1719 | |||
1720 | out_free_irq: | ||
1721 | kfree(mdp->mii_bus->irq); | ||
1722 | |||
1723 | out_free_bus: | ||
1724 | free_mdio_bitbang(mdp->mii_bus); | ||
1725 | |||
1726 | out_free_bitbang: | ||
1727 | kfree(bitbang); | ||
1728 | |||
1729 | out: | ||
1730 | return ret; | ||
1731 | } | ||
1732 | |||
1733 | static const u16 *sh_eth_get_register_offset(int register_type) | ||
1734 | { | ||
1735 | const u16 *reg_offset = NULL; | ||
1736 | |||
1737 | switch (register_type) { | ||
1738 | case SH_ETH_REG_GIGABIT: | ||
1739 | reg_offset = sh_eth_offset_gigabit; | ||
1740 | break; | ||
1741 | case SH_ETH_REG_FAST_SH4: | ||
1742 | reg_offset = sh_eth_offset_fast_sh4; | ||
1743 | break; | ||
1744 | case SH_ETH_REG_FAST_SH3_SH2: | ||
1745 | reg_offset = sh_eth_offset_fast_sh3_sh2; | ||
1746 | break; | ||
1747 | default: | ||
1748 | printk(KERN_ERR "Unknown register type (%d)\n", register_type); | ||
1749 | break; | ||
1750 | } | ||
1751 | |||
1752 | return reg_offset; | ||
1753 | } | ||
1754 | |||
1755 | static const struct net_device_ops sh_eth_netdev_ops = { | ||
1756 | .ndo_open = sh_eth_open, | ||
1757 | .ndo_stop = sh_eth_close, | ||
1758 | .ndo_start_xmit = sh_eth_start_xmit, | ||
1759 | .ndo_get_stats = sh_eth_get_stats, | ||
1760 | #if defined(SH_ETH_HAS_TSU) | ||
1761 | .ndo_set_multicast_list = sh_eth_set_multicast_list, | ||
1762 | #endif | ||
1763 | .ndo_tx_timeout = sh_eth_tx_timeout, | ||
1764 | .ndo_do_ioctl = sh_eth_do_ioctl, | ||
1765 | .ndo_validate_addr = eth_validate_addr, | ||
1766 | .ndo_set_mac_address = eth_mac_addr, | ||
1767 | .ndo_change_mtu = eth_change_mtu, | ||
1768 | }; | ||
1769 | |||
1770 | static int sh_eth_drv_probe(struct platform_device *pdev) | ||
1771 | { | ||
1772 | int ret, devno = 0; | ||
1773 | struct resource *res; | ||
1774 | struct net_device *ndev = NULL; | ||
1775 | struct sh_eth_private *mdp = NULL; | ||
1776 | struct sh_eth_plat_data *pd; | ||
1777 | |||
1778 | /* get base addr */ | ||
1779 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1780 | if (unlikely(res == NULL)) { | ||
1781 | dev_err(&pdev->dev, "invalid resource\n"); | ||
1782 | ret = -EINVAL; | ||
1783 | goto out; | ||
1784 | } | ||
1785 | |||
1786 | ndev = alloc_etherdev(sizeof(struct sh_eth_private)); | ||
1787 | if (!ndev) { | ||
1788 | dev_err(&pdev->dev, "Could not allocate device.\n"); | ||
1789 | ret = -ENOMEM; | ||
1790 | goto out; | ||
1791 | } | ||
1792 | |||
1793 | /* The sh Ether-specific entries in the device structure. */ | ||
1794 | ndev->base_addr = res->start; | ||
1795 | devno = pdev->id; | ||
1796 | if (devno < 0) | ||
1797 | devno = 0; | ||
1798 | |||
1799 | ndev->dma = -1; | ||
1800 | ret = platform_get_irq(pdev, 0); | ||
1801 | if (ret < 0) { | ||
1802 | ret = -ENODEV; | ||
1803 | goto out_release; | ||
1804 | } | ||
1805 | ndev->irq = ret; | ||
1806 | |||
1807 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
1808 | |||
1809 | /* Fill in the fields of the device structure with ethernet values. */ | ||
1810 | ether_setup(ndev); | ||
1811 | |||
1812 | mdp = netdev_priv(ndev); | ||
1813 | spin_lock_init(&mdp->lock); | ||
1814 | mdp->pdev = pdev; | ||
1815 | pm_runtime_enable(&pdev->dev); | ||
1816 | pm_runtime_resume(&pdev->dev); | ||
1817 | |||
1818 | pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); | ||
1819 | /* get PHY ID */ | ||
1820 | mdp->phy_id = pd->phy; | ||
1821 | mdp->phy_interface = pd->phy_interface; | ||
1822 | /* EDMAC endian */ | ||
1823 | mdp->edmac_endian = pd->edmac_endian; | ||
1824 | mdp->no_ether_link = pd->no_ether_link; | ||
1825 | mdp->ether_link_active_low = pd->ether_link_active_low; | ||
1826 | mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); | ||
1827 | |||
1828 | /* set cpu data */ | ||
1829 | #if defined(SH_ETH_HAS_BOTH_MODULES) | ||
1830 | mdp->cd = sh_eth_get_cpu_data(mdp); | ||
1831 | #else | ||
1832 | mdp->cd = &sh_eth_my_cpu_data; | ||
1833 | #endif | ||
1834 | sh_eth_set_default_cpu_data(mdp->cd); | ||
1835 | |||
1836 | /* set function */ | ||
1837 | ndev->netdev_ops = &sh_eth_netdev_ops; | ||
1838 | SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); | ||
1839 | ndev->watchdog_timeo = TX_TIMEOUT; | ||
1840 | |||
1841 | /* debug message level */ | ||
1842 | mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; | ||
1843 | mdp->post_rx = POST_RX >> (devno << 1); | ||
1844 | mdp->post_fw = POST_FW >> (devno << 1); | ||
1845 | |||
1846 | /* read and set MAC address */ | ||
1847 | read_mac_address(ndev, pd->mac_addr); | ||
1848 | |||
1849 | /* First device only init */ | ||
1850 | if (!devno) { | ||
1851 | if (mdp->cd->tsu) { | ||
1852 | struct resource *rtsu; | ||
1853 | rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1854 | if (!rtsu) { | ||
1855 | dev_err(&pdev->dev, "Not found TSU resource\n"); | ||
1856 | goto out_release; | ||
1857 | } | ||
1858 | mdp->tsu_addr = ioremap(rtsu->start, | ||
1859 | resource_size(rtsu)); | ||
1860 | } | ||
1861 | if (mdp->cd->chip_reset) | ||
1862 | mdp->cd->chip_reset(ndev); | ||
1863 | |||
1864 | if (mdp->cd->tsu) { | ||
1865 | /* TSU init (Init only)*/ | ||
1866 | sh_eth_tsu_init(mdp); | ||
1867 | } | ||
1868 | } | ||
1869 | |||
1870 | /* network device register */ | ||
1871 | ret = register_netdev(ndev); | ||
1872 | if (ret) | ||
1873 | goto out_release; | ||
1874 | |||
1875 | /* mdio bus init */ | ||
1876 | ret = sh_mdio_init(ndev, pdev->id, pd); | ||
1877 | if (ret) | ||
1878 | goto out_unregister; | ||
1879 | |||
1880 | /* print device information */ | ||
1881 | pr_info("Base address at 0x%x, %pM, IRQ %d.\n", | ||
1882 | (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); | ||
1883 | |||
1884 | platform_set_drvdata(pdev, ndev); | ||
1885 | |||
1886 | return ret; | ||
1887 | |||
1888 | out_unregister: | ||
1889 | unregister_netdev(ndev); | ||
1890 | |||
1891 | out_release: | ||
1892 | /* net_dev free */ | ||
1893 | if (mdp && mdp->tsu_addr) | ||
1894 | iounmap(mdp->tsu_addr); | ||
1895 | if (ndev) | ||
1896 | free_netdev(ndev); | ||
1897 | |||
1898 | out: | ||
1899 | return ret; | ||
1900 | } | ||
1901 | |||
1902 | static int sh_eth_drv_remove(struct platform_device *pdev) | ||
1903 | { | ||
1904 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1905 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1906 | |||
1907 | iounmap(mdp->tsu_addr); | ||
1908 | sh_mdio_release(ndev); | ||
1909 | unregister_netdev(ndev); | ||
1910 | pm_runtime_disable(&pdev->dev); | ||
1911 | free_netdev(ndev); | ||
1912 | platform_set_drvdata(pdev, NULL); | ||
1913 | |||
1914 | return 0; | ||
1915 | } | ||
1916 | |||
1917 | static int sh_eth_runtime_nop(struct device *dev) | ||
1918 | { | ||
1919 | /* | ||
1920 | * Runtime PM callback shared between ->runtime_suspend() | ||
1921 | * and ->runtime_resume(). Simply returns success. | ||
1922 | * | ||
1923 | * This driver re-initializes all registers after | ||
1924 | * pm_runtime_get_sync() anyway so there is no need | ||
1925 | * to save and restore registers here. | ||
1926 | */ | ||
1927 | return 0; | ||
1928 | } | ||
1929 | |||
1930 | static struct dev_pm_ops sh_eth_dev_pm_ops = { | ||
1931 | .runtime_suspend = sh_eth_runtime_nop, | ||
1932 | .runtime_resume = sh_eth_runtime_nop, | ||
1933 | }; | ||
1934 | |||
1935 | static struct platform_driver sh_eth_driver = { | ||
1936 | .probe = sh_eth_drv_probe, | ||
1937 | .remove = sh_eth_drv_remove, | ||
1938 | .driver = { | ||
1939 | .name = CARDNAME, | ||
1940 | .pm = &sh_eth_dev_pm_ops, | ||
1941 | }, | ||
1942 | }; | ||
1943 | |||
1944 | static int __init sh_eth_init(void) | ||
1945 | { | ||
1946 | return platform_driver_register(&sh_eth_driver); | ||
1947 | } | ||
1948 | |||
1949 | static void __exit sh_eth_cleanup(void) | ||
1950 | { | ||
1951 | platform_driver_unregister(&sh_eth_driver); | ||
1952 | } | ||
1953 | |||
1954 | module_init(sh_eth_init); | ||
1955 | module_exit(sh_eth_cleanup); | ||
1956 | |||
1957 | MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); | ||
1958 | MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); | ||
1959 | MODULE_LICENSE("GPL v2"); | ||