diff options
Diffstat (limited to 'drivers/net/sh_eth.c')
-rw-r--r-- | drivers/net/sh_eth.c | 499 |
1 files changed, 333 insertions, 166 deletions
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 3ab28bb00c12..341882f959f3 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * SuperH Ethernet device driver | 2 | * SuperH Ethernet device driver |
3 | * | 3 | * |
4 | * Copyright (C) 2006-2008 Nobuhiro Iwamatsu | 4 | * Copyright (C) 2006-2008 Nobuhiro Iwamatsu |
5 | * Copyright (C) 2008 Renesas Solutions Corp. | 5 | * Copyright (C) 2008-2009 Renesas Solutions Corp. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms and conditions of the GNU General Public License, | 8 | * under the terms and conditions of the GNU General Public License, |
@@ -33,6 +33,226 @@ | |||
33 | 33 | ||
34 | #include "sh_eth.h" | 34 | #include "sh_eth.h" |
35 | 35 | ||
36 | /* There is CPU dependent code */ | ||
37 | #if defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
38 | #define SH_ETH_RESET_DEFAULT 1 | ||
39 | static void sh_eth_set_duplex(struct net_device *ndev) | ||
40 | { | ||
41 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
42 | u32 ioaddr = ndev->base_addr; | ||
43 | |||
44 | if (mdp->duplex) /* Full */ | ||
45 | ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); | ||
46 | else /* Half */ | ||
47 | ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); | ||
48 | } | ||
49 | |||
50 | static void sh_eth_set_rate(struct net_device *ndev) | ||
51 | { | ||
52 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
53 | u32 ioaddr = ndev->base_addr; | ||
54 | |||
55 | switch (mdp->speed) { | ||
56 | case 10: /* 10BASE */ | ||
57 | ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); | ||
58 | break; | ||
59 | case 100:/* 100BASE */ | ||
60 | ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); | ||
61 | break; | ||
62 | default: | ||
63 | break; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | /* SH7724 */ | ||
68 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
69 | .set_duplex = sh_eth_set_duplex, | ||
70 | .set_rate = sh_eth_set_rate, | ||
71 | |||
72 | .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, | ||
73 | .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, | ||
74 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, | ||
75 | |||
76 | .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, | ||
77 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | | ||
78 | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, | ||
79 | .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, | ||
80 | |||
81 | .apr = 1, | ||
82 | .mpr = 1, | ||
83 | .tpauser = 1, | ||
84 | .hw_swap = 1, | ||
85 | }; | ||
86 | |||
87 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
88 | #define SH_ETH_HAS_TSU 1 | ||
89 | static void sh_eth_chip_reset(struct net_device *ndev) | ||
90 | { | ||
91 | /* reset device */ | ||
92 | ctrl_outl(ARSTR_ARSTR, ARSTR); | ||
93 | mdelay(1); | ||
94 | } | ||
95 | |||
96 | static void sh_eth_reset(struct net_device *ndev) | ||
97 | { | ||
98 | u32 ioaddr = ndev->base_addr; | ||
99 | int cnt = 100; | ||
100 | |||
101 | ctrl_outl(EDSR_ENALL, ioaddr + EDSR); | ||
102 | ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); | ||
103 | while (cnt > 0) { | ||
104 | if (!(ctrl_inl(ioaddr + EDMR) & 0x3)) | ||
105 | break; | ||
106 | mdelay(1); | ||
107 | cnt--; | ||
108 | } | ||
109 | if (cnt < 0) | ||
110 | printk(KERN_ERR "Device reset fail\n"); | ||
111 | |||
112 | /* Table Init */ | ||
113 | ctrl_outl(0x0, ioaddr + TDLAR); | ||
114 | ctrl_outl(0x0, ioaddr + TDFAR); | ||
115 | ctrl_outl(0x0, ioaddr + TDFXR); | ||
116 | ctrl_outl(0x0, ioaddr + TDFFR); | ||
117 | ctrl_outl(0x0, ioaddr + RDLAR); | ||
118 | ctrl_outl(0x0, ioaddr + RDFAR); | ||
119 | ctrl_outl(0x0, ioaddr + RDFXR); | ||
120 | ctrl_outl(0x0, ioaddr + RDFFR); | ||
121 | } | ||
122 | |||
123 | static void sh_eth_set_duplex(struct net_device *ndev) | ||
124 | { | ||
125 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
126 | u32 ioaddr = ndev->base_addr; | ||
127 | |||
128 | if (mdp->duplex) /* Full */ | ||
129 | ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); | ||
130 | else /* Half */ | ||
131 | ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); | ||
132 | } | ||
133 | |||
134 | static void sh_eth_set_rate(struct net_device *ndev) | ||
135 | { | ||
136 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
137 | u32 ioaddr = ndev->base_addr; | ||
138 | |||
139 | switch (mdp->speed) { | ||
140 | case 10: /* 10BASE */ | ||
141 | ctrl_outl(GECMR_10, ioaddr + GECMR); | ||
142 | break; | ||
143 | case 100:/* 100BASE */ | ||
144 | ctrl_outl(GECMR_100, ioaddr + GECMR); | ||
145 | break; | ||
146 | case 1000: /* 1000BASE */ | ||
147 | ctrl_outl(GECMR_1000, ioaddr + GECMR); | ||
148 | break; | ||
149 | default: | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | /* sh7763 */ | ||
155 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
156 | .chip_reset = sh_eth_chip_reset, | ||
157 | .set_duplex = sh_eth_set_duplex, | ||
158 | .set_rate = sh_eth_set_rate, | ||
159 | |||
160 | .ecsr_value = ECSR_ICD | ECSR_MPD, | ||
161 | .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, | ||
162 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
163 | |||
164 | .tx_check = EESR_TC1 | EESR_FTC, | ||
165 | .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ | ||
166 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ | ||
167 | EESR_ECI, | ||
168 | .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ | ||
169 | EESR_TFE, | ||
170 | |||
171 | .apr = 1, | ||
172 | .mpr = 1, | ||
173 | .tpauser = 1, | ||
174 | .bculr = 1, | ||
175 | .hw_swap = 1, | ||
176 | .rpadir = 1, | ||
177 | .no_trimd = 1, | ||
178 | .no_ade = 1, | ||
179 | }; | ||
180 | |||
181 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
182 | #define SH_ETH_RESET_DEFAULT 1 | ||
183 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
184 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
185 | |||
186 | .apr = 1, | ||
187 | .mpr = 1, | ||
188 | .tpauser = 1, | ||
189 | .hw_swap = 1, | ||
190 | }; | ||
191 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | ||
192 | #define SH_ETH_RESET_DEFAULT 1 | ||
193 | #define SH_ETH_HAS_TSU 1 | ||
194 | static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | ||
195 | .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, | ||
196 | }; | ||
197 | #endif | ||
198 | |||
199 | static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) | ||
200 | { | ||
201 | if (!cd->ecsr_value) | ||
202 | cd->ecsr_value = DEFAULT_ECSR_INIT; | ||
203 | |||
204 | if (!cd->ecsipr_value) | ||
205 | cd->ecsipr_value = DEFAULT_ECSIPR_INIT; | ||
206 | |||
207 | if (!cd->fcftr_value) | ||
208 | cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ | ||
209 | DEFAULT_FIFO_F_D_RFD; | ||
210 | |||
211 | if (!cd->fdr_value) | ||
212 | cd->fdr_value = DEFAULT_FDR_INIT; | ||
213 | |||
214 | if (!cd->rmcr_value) | ||
215 | cd->rmcr_value = DEFAULT_RMCR_VALUE; | ||
216 | |||
217 | if (!cd->tx_check) | ||
218 | cd->tx_check = DEFAULT_TX_CHECK; | ||
219 | |||
220 | if (!cd->eesr_err_check) | ||
221 | cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; | ||
222 | |||
223 | if (!cd->tx_error_check) | ||
224 | cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; | ||
225 | } | ||
226 | |||
227 | #if defined(SH_ETH_RESET_DEFAULT) | ||
228 | /* Chip Reset */ | ||
229 | static void sh_eth_reset(struct net_device *ndev) | ||
230 | { | ||
231 | u32 ioaddr = ndev->base_addr; | ||
232 | |||
233 | ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); | ||
234 | mdelay(3); | ||
235 | ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); | ||
236 | } | ||
237 | #endif | ||
238 | |||
239 | #if defined(CONFIG_CPU_SH4) | ||
240 | static void sh_eth_set_receive_align(struct sk_buff *skb) | ||
241 | { | ||
242 | int reserve; | ||
243 | |||
244 | reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); | ||
245 | if (reserve) | ||
246 | skb_reserve(skb, reserve); | ||
247 | } | ||
248 | #else | ||
249 | static void sh_eth_set_receive_align(struct sk_buff *skb) | ||
250 | { | ||
251 | skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); | ||
252 | } | ||
253 | #endif | ||
254 | |||
255 | |||
36 | /* CPU <-> EDMAC endian convert */ | 256 | /* CPU <-> EDMAC endian convert */ |
37 | static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) | 257 | static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) |
38 | { | 258 | { |
@@ -165,41 +385,6 @@ static struct mdiobb_ops bb_ops = { | |||
165 | .get_mdio_data = sh_get_mdio, | 385 | .get_mdio_data = sh_get_mdio, |
166 | }; | 386 | }; |
167 | 387 | ||
168 | /* Chip Reset */ | ||
169 | static void sh_eth_reset(struct net_device *ndev) | ||
170 | { | ||
171 | u32 ioaddr = ndev->base_addr; | ||
172 | |||
173 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
174 | int cnt = 100; | ||
175 | |||
176 | ctrl_outl(EDSR_ENALL, ioaddr + EDSR); | ||
177 | ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); | ||
178 | while (cnt > 0) { | ||
179 | if (!(ctrl_inl(ioaddr + EDMR) & 0x3)) | ||
180 | break; | ||
181 | mdelay(1); | ||
182 | cnt--; | ||
183 | } | ||
184 | if (cnt < 0) | ||
185 | printk(KERN_ERR "Device reset fail\n"); | ||
186 | |||
187 | /* Table Init */ | ||
188 | ctrl_outl(0x0, ioaddr + TDLAR); | ||
189 | ctrl_outl(0x0, ioaddr + TDFAR); | ||
190 | ctrl_outl(0x0, ioaddr + TDFXR); | ||
191 | ctrl_outl(0x0, ioaddr + TDFFR); | ||
192 | ctrl_outl(0x0, ioaddr + RDLAR); | ||
193 | ctrl_outl(0x0, ioaddr + RDFAR); | ||
194 | ctrl_outl(0x0, ioaddr + RDFXR); | ||
195 | ctrl_outl(0x0, ioaddr + RDFFR); | ||
196 | #else | ||
197 | ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); | ||
198 | mdelay(3); | ||
199 | ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); | ||
200 | #endif | ||
201 | } | ||
202 | |||
203 | /* free skb and descriptor buffer */ | 388 | /* free skb and descriptor buffer */ |
204 | static void sh_eth_ring_free(struct net_device *ndev) | 389 | static void sh_eth_ring_free(struct net_device *ndev) |
205 | { | 390 | { |
@@ -228,7 +413,7 @@ static void sh_eth_ring_free(struct net_device *ndev) | |||
228 | /* format skb and descriptor buffer */ | 413 | /* format skb and descriptor buffer */ |
229 | static void sh_eth_ring_format(struct net_device *ndev) | 414 | static void sh_eth_ring_format(struct net_device *ndev) |
230 | { | 415 | { |
231 | u32 ioaddr = ndev->base_addr, reserve = 0; | 416 | u32 ioaddr = ndev->base_addr; |
232 | struct sh_eth_private *mdp = netdev_priv(ndev); | 417 | struct sh_eth_private *mdp = netdev_priv(ndev); |
233 | int i; | 418 | int i; |
234 | struct sk_buff *skb; | 419 | struct sk_buff *skb; |
@@ -250,37 +435,27 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
250 | mdp->rx_skbuff[i] = skb; | 435 | mdp->rx_skbuff[i] = skb; |
251 | if (skb == NULL) | 436 | if (skb == NULL) |
252 | break; | 437 | break; |
438 | dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, | ||
439 | DMA_FROM_DEVICE); | ||
253 | skb->dev = ndev; /* Mark as being used by this device. */ | 440 | skb->dev = ndev; /* Mark as being used by this device. */ |
254 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 441 | sh_eth_set_receive_align(skb); |
255 | reserve = SH7763_SKB_ALIGN | 442 | |
256 | - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1)); | ||
257 | if (reserve) | ||
258 | skb_reserve(skb, reserve); | ||
259 | #else | ||
260 | skb_reserve(skb, RX_OFFSET); | ||
261 | #endif | ||
262 | /* RX descriptor */ | 443 | /* RX descriptor */ |
263 | rxdesc = &mdp->rx_ring[i]; | 444 | rxdesc = &mdp->rx_ring[i]; |
264 | rxdesc->addr = (u32)skb->data & ~0x3UL; | 445 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); |
265 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 446 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
266 | 447 | ||
267 | /* The size of the buffer is 16 byte boundary. */ | 448 | /* The size of the buffer is 16 byte boundary. */ |
268 | rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; | 449 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
269 | /* Rx descriptor address set */ | 450 | /* Rx descriptor address set */ |
270 | if (i == 0) { | 451 | if (i == 0) { |
271 | ctrl_outl((u32)rxdesc, ioaddr + RDLAR); | 452 | ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR); |
272 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 453 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
273 | ctrl_outl((u32)rxdesc, ioaddr + RDFAR); | 454 | ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR); |
274 | #endif | 455 | #endif |
275 | } | 456 | } |
276 | } | 457 | } |
277 | 458 | ||
278 | /* Rx descriptor address set */ | ||
279 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
280 | ctrl_outl((u32)rxdesc, ioaddr + RDFXR); | ||
281 | ctrl_outl(0x1, ioaddr + RDFFR); | ||
282 | #endif | ||
283 | |||
284 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); | 459 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); |
285 | 460 | ||
286 | /* Mark the last entry as wrapping the ring. */ | 461 | /* Mark the last entry as wrapping the ring. */ |
@@ -296,19 +471,13 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
296 | txdesc->buffer_length = 0; | 471 | txdesc->buffer_length = 0; |
297 | if (i == 0) { | 472 | if (i == 0) { |
298 | /* Tx descriptor address set */ | 473 | /* Tx descriptor address set */ |
299 | ctrl_outl((u32)txdesc, ioaddr + TDLAR); | 474 | ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR); |
300 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 475 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
301 | ctrl_outl((u32)txdesc, ioaddr + TDFAR); | 476 | ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR); |
302 | #endif | 477 | #endif |
303 | } | 478 | } |
304 | } | 479 | } |
305 | 480 | ||
306 | /* Tx descriptor address set */ | ||
307 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
308 | ctrl_outl((u32)txdesc, ioaddr + TDFXR); | ||
309 | ctrl_outl(0x1, ioaddr + TDFFR); | ||
310 | #endif | ||
311 | |||
312 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); | 481 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); |
313 | } | 482 | } |
314 | 483 | ||
@@ -331,7 +500,7 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
331 | mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, | 500 | mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, |
332 | GFP_KERNEL); | 501 | GFP_KERNEL); |
333 | if (!mdp->rx_skbuff) { | 502 | if (!mdp->rx_skbuff) { |
334 | printk(KERN_ERR "%s: Cannot allocate Rx skb\n", ndev->name); | 503 | dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); |
335 | ret = -ENOMEM; | 504 | ret = -ENOMEM; |
336 | return ret; | 505 | return ret; |
337 | } | 506 | } |
@@ -339,7 +508,7 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
339 | mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, | 508 | mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, |
340 | GFP_KERNEL); | 509 | GFP_KERNEL); |
341 | if (!mdp->tx_skbuff) { | 510 | if (!mdp->tx_skbuff) { |
342 | printk(KERN_ERR "%s: Cannot allocate Tx skb\n", ndev->name); | 511 | dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); |
343 | ret = -ENOMEM; | 512 | ret = -ENOMEM; |
344 | goto skb_ring_free; | 513 | goto skb_ring_free; |
345 | } | 514 | } |
@@ -350,8 +519,8 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
350 | GFP_KERNEL); | 519 | GFP_KERNEL); |
351 | 520 | ||
352 | if (!mdp->rx_ring) { | 521 | if (!mdp->rx_ring) { |
353 | printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n", | 522 | dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", |
354 | ndev->name, rx_ringsize); | 523 | rx_ringsize); |
355 | ret = -ENOMEM; | 524 | ret = -ENOMEM; |
356 | goto desc_ring_free; | 525 | goto desc_ring_free; |
357 | } | 526 | } |
@@ -363,8 +532,8 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
363 | mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, | 532 | mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, |
364 | GFP_KERNEL); | 533 | GFP_KERNEL); |
365 | if (!mdp->tx_ring) { | 534 | if (!mdp->tx_ring) { |
366 | printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", | 535 | dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", |
367 | ndev->name, tx_ringsize); | 536 | tx_ringsize); |
368 | ret = -ENOMEM; | 537 | ret = -ENOMEM; |
369 | goto desc_ring_free; | 538 | goto desc_ring_free; |
370 | } | 539 | } |
@@ -394,44 +563,43 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
394 | 563 | ||
395 | /* Descriptor format */ | 564 | /* Descriptor format */ |
396 | sh_eth_ring_format(ndev); | 565 | sh_eth_ring_format(ndev); |
397 | ctrl_outl(RPADIR_INIT, ioaddr + RPADIR); | 566 | if (mdp->cd->rpadir) |
567 | ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR); | ||
398 | 568 | ||
399 | /* all sh_eth int mask */ | 569 | /* all sh_eth int mask */ |
400 | ctrl_outl(0, ioaddr + EESIPR); | 570 | ctrl_outl(0, ioaddr + EESIPR); |
401 | 571 | ||
402 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 572 | #if defined(__LITTLE_ENDIAN__) |
403 | ctrl_outl(EDMR_EL, ioaddr + EDMR); | 573 | if (mdp->cd->hw_swap) |
404 | #else | 574 | ctrl_outl(EDMR_EL, ioaddr + EDMR); |
405 | ctrl_outl(0, ioaddr + EDMR); /* Endian change */ | 575 | else |
406 | #endif | 576 | #endif |
577 | ctrl_outl(0, ioaddr + EDMR); | ||
407 | 578 | ||
408 | /* FIFO size set */ | 579 | /* FIFO size set */ |
409 | ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR); | 580 | ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR); |
410 | ctrl_outl(0, ioaddr + TFTR); | 581 | ctrl_outl(0, ioaddr + TFTR); |
411 | 582 | ||
412 | /* Frame recv control */ | 583 | /* Frame recv control */ |
413 | ctrl_outl(0, ioaddr + RMCR); | 584 | ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR); |
414 | 585 | ||
415 | rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; | 586 | rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; |
416 | tx_int_var = mdp->tx_int_var = DESC_I_TINT2; | 587 | tx_int_var = mdp->tx_int_var = DESC_I_TINT2; |
417 | ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); | 588 | ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); |
418 | 589 | ||
419 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 590 | if (mdp->cd->bculr) |
420 | /* Burst sycle set */ | 591 | ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */ |
421 | ctrl_outl(0x800, ioaddr + BCULR); | ||
422 | #endif | ||
423 | 592 | ||
424 | ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR); | 593 | ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR); |
425 | 594 | ||
426 | #if !defined(CONFIG_CPU_SUBTYPE_SH7763) | 595 | if (!mdp->cd->no_trimd) |
427 | ctrl_outl(0, ioaddr + TRIMD); | 596 | ctrl_outl(0, ioaddr + TRIMD); |
428 | #endif | ||
429 | 597 | ||
430 | /* Recv frame limit set register */ | 598 | /* Recv frame limit set register */ |
431 | ctrl_outl(RFLR_VALUE, ioaddr + RFLR); | 599 | ctrl_outl(RFLR_VALUE, ioaddr + RFLR); |
432 | 600 | ||
433 | ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); | 601 | ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); |
434 | ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR); | 602 | ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR); |
435 | 603 | ||
436 | /* PAUSE Prohibition */ | 604 | /* PAUSE Prohibition */ |
437 | val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) | | 605 | val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) | |
@@ -439,24 +607,25 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
439 | 607 | ||
440 | ctrl_outl(val, ioaddr + ECMR); | 608 | ctrl_outl(val, ioaddr + ECMR); |
441 | 609 | ||
610 | if (mdp->cd->set_rate) | ||
611 | mdp->cd->set_rate(ndev); | ||
612 | |||
442 | /* E-MAC Status Register clear */ | 613 | /* E-MAC Status Register clear */ |
443 | ctrl_outl(ECSR_INIT, ioaddr + ECSR); | 614 | ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR); |
444 | 615 | ||
445 | /* E-MAC Interrupt Enable register */ | 616 | /* E-MAC Interrupt Enable register */ |
446 | ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR); | 617 | ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR); |
447 | 618 | ||
448 | /* Set MAC address */ | 619 | /* Set MAC address */ |
449 | update_mac_address(ndev); | 620 | update_mac_address(ndev); |
450 | 621 | ||
451 | /* mask reset */ | 622 | /* mask reset */ |
452 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763) | 623 | if (mdp->cd->apr) |
453 | ctrl_outl(APR_AP, ioaddr + APR); | 624 | ctrl_outl(APR_AP, ioaddr + APR); |
454 | ctrl_outl(MPR_MP, ioaddr + MPR); | 625 | if (mdp->cd->mpr) |
455 | ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); | 626 | ctrl_outl(MPR_MP, ioaddr + MPR); |
456 | #endif | 627 | if (mdp->cd->tpauser) |
457 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) | 628 | ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); |
458 | ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR); | ||
459 | #endif | ||
460 | 629 | ||
461 | /* Setting the Rx mode will start the Rx process. */ | 630 | /* Setting the Rx mode will start the Rx process. */ |
462 | ctrl_outl(EDRRR_R, ioaddr + EDRRR); | 631 | ctrl_outl(EDRRR_R, ioaddr + EDRRR); |
@@ -505,7 +674,7 @@ static int sh_eth_rx(struct net_device *ndev) | |||
505 | int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; | 674 | int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; |
506 | struct sk_buff *skb; | 675 | struct sk_buff *skb; |
507 | u16 pkt_len = 0; | 676 | u16 pkt_len = 0; |
508 | u32 desc_status, reserve = 0; | 677 | u32 desc_status; |
509 | 678 | ||
510 | rxdesc = &mdp->rx_ring[entry]; | 679 | rxdesc = &mdp->rx_ring[entry]; |
511 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { | 680 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { |
@@ -534,7 +703,10 @@ static int sh_eth_rx(struct net_device *ndev) | |||
534 | if (desc_status & RD_RFS10) | 703 | if (desc_status & RD_RFS10) |
535 | mdp->stats.rx_over_errors++; | 704 | mdp->stats.rx_over_errors++; |
536 | } else { | 705 | } else { |
537 | swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2); | 706 | if (!mdp->cd->hw_swap) |
707 | sh_eth_soft_swap( | ||
708 | phys_to_virt(ALIGN(rxdesc->addr, 4)), | ||
709 | pkt_len + 2); | ||
538 | skb = mdp->rx_skbuff[entry]; | 710 | skb = mdp->rx_skbuff[entry]; |
539 | mdp->rx_skbuff[entry] = NULL; | 711 | mdp->rx_skbuff[entry] = NULL; |
540 | skb_put(skb, pkt_len); | 712 | skb_put(skb, pkt_len); |
@@ -545,6 +717,7 @@ static int sh_eth_rx(struct net_device *ndev) | |||
545 | } | 717 | } |
546 | rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); | 718 | rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); |
547 | entry = (++mdp->cur_rx) % RX_RING_SIZE; | 719 | entry = (++mdp->cur_rx) % RX_RING_SIZE; |
720 | rxdesc = &mdp->rx_ring[entry]; | ||
548 | } | 721 | } |
549 | 722 | ||
550 | /* Refill the Rx ring buffers. */ | 723 | /* Refill the Rx ring buffers. */ |
@@ -552,24 +725,20 @@ static int sh_eth_rx(struct net_device *ndev) | |||
552 | entry = mdp->dirty_rx % RX_RING_SIZE; | 725 | entry = mdp->dirty_rx % RX_RING_SIZE; |
553 | rxdesc = &mdp->rx_ring[entry]; | 726 | rxdesc = &mdp->rx_ring[entry]; |
554 | /* The size of the buffer is 16 byte boundary. */ | 727 | /* The size of the buffer is 16 byte boundary. */ |
555 | rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; | 728 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
556 | 729 | ||
557 | if (mdp->rx_skbuff[entry] == NULL) { | 730 | if (mdp->rx_skbuff[entry] == NULL) { |
558 | skb = dev_alloc_skb(mdp->rx_buf_sz); | 731 | skb = dev_alloc_skb(mdp->rx_buf_sz); |
559 | mdp->rx_skbuff[entry] = skb; | 732 | mdp->rx_skbuff[entry] = skb; |
560 | if (skb == NULL) | 733 | if (skb == NULL) |
561 | break; /* Better luck next round. */ | 734 | break; /* Better luck next round. */ |
735 | dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz, | ||
736 | DMA_FROM_DEVICE); | ||
562 | skb->dev = ndev; | 737 | skb->dev = ndev; |
563 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 738 | sh_eth_set_receive_align(skb); |
564 | reserve = SH7763_SKB_ALIGN | 739 | |
565 | - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1)); | ||
566 | if (reserve) | ||
567 | skb_reserve(skb, reserve); | ||
568 | #else | ||
569 | skb_reserve(skb, RX_OFFSET); | ||
570 | #endif | ||
571 | skb->ip_summed = CHECKSUM_NONE; | 740 | skb->ip_summed = CHECKSUM_NONE; |
572 | rxdesc->addr = (u32)skb->data & ~0x3UL; | 741 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); |
573 | } | 742 | } |
574 | if (entry >= RX_RING_SIZE - 1) | 743 | if (entry >= RX_RING_SIZE - 1) |
575 | rxdesc->status |= | 744 | rxdesc->status |= |
@@ -593,6 +762,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
593 | struct sh_eth_private *mdp = netdev_priv(ndev); | 762 | struct sh_eth_private *mdp = netdev_priv(ndev); |
594 | u32 ioaddr = ndev->base_addr; | 763 | u32 ioaddr = ndev->base_addr; |
595 | u32 felic_stat; | 764 | u32 felic_stat; |
765 | u32 link_stat; | ||
766 | u32 mask; | ||
596 | 767 | ||
597 | if (intr_status & EESR_ECI) { | 768 | if (intr_status & EESR_ECI) { |
598 | felic_stat = ctrl_inl(ioaddr + ECSR); | 769 | felic_stat = ctrl_inl(ioaddr + ECSR); |
@@ -601,7 +772,14 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
601 | mdp->stats.tx_carrier_errors++; | 772 | mdp->stats.tx_carrier_errors++; |
602 | if (felic_stat & ECSR_LCHNG) { | 773 | if (felic_stat & ECSR_LCHNG) { |
603 | /* Link Changed */ | 774 | /* Link Changed */ |
604 | u32 link_stat = (ctrl_inl(ioaddr + PSR)); | 775 | if (mdp->cd->no_psr) { |
776 | if (mdp->link == PHY_DOWN) | ||
777 | link_stat = 0; | ||
778 | else | ||
779 | link_stat = PHY_ST_LINK; | ||
780 | } else { | ||
781 | link_stat = (ctrl_inl(ioaddr + PSR)); | ||
782 | } | ||
605 | if (!(link_stat & PHY_ST_LINK)) { | 783 | if (!(link_stat & PHY_ST_LINK)) { |
606 | /* Link Down : disable tx and rx */ | 784 | /* Link Down : disable tx and rx */ |
607 | ctrl_outl(ctrl_inl(ioaddr + ECMR) & | 785 | ctrl_outl(ctrl_inl(ioaddr + ECMR) & |
@@ -633,17 +811,15 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
633 | if (intr_status & EESR_RFRMER) { | 811 | if (intr_status & EESR_RFRMER) { |
634 | /* Receive Frame Overflow int */ | 812 | /* Receive Frame Overflow int */ |
635 | mdp->stats.rx_frame_errors++; | 813 | mdp->stats.rx_frame_errors++; |
636 | printk(KERN_ERR "Receive Frame Overflow\n"); | 814 | dev_err(&ndev->dev, "Receive Frame Overflow\n"); |
637 | } | 815 | } |
638 | } | 816 | } |
639 | #if !defined(CONFIG_CPU_SUBTYPE_SH7763) | 817 | |
640 | if (intr_status & EESR_ADE) { | 818 | if (!mdp->cd->no_ade) { |
641 | if (intr_status & EESR_TDE) { | 819 | if (intr_status & EESR_ADE && intr_status & EESR_TDE && |
642 | if (intr_status & EESR_TFE) | 820 | intr_status & EESR_TFE) |
643 | mdp->stats.tx_fifo_errors++; | 821 | mdp->stats.tx_fifo_errors++; |
644 | } | ||
645 | } | 822 | } |
646 | #endif | ||
647 | 823 | ||
648 | if (intr_status & EESR_RDE) { | 824 | if (intr_status & EESR_RDE) { |
649 | /* Receive Descriptor Empty int */ | 825 | /* Receive Descriptor Empty int */ |
@@ -651,24 +827,24 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
651 | 827 | ||
652 | if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R) | 828 | if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R) |
653 | ctrl_outl(EDRRR_R, ioaddr + EDRRR); | 829 | ctrl_outl(EDRRR_R, ioaddr + EDRRR); |
654 | printk(KERN_ERR "Receive Descriptor Empty\n"); | 830 | dev_err(&ndev->dev, "Receive Descriptor Empty\n"); |
655 | } | 831 | } |
656 | if (intr_status & EESR_RFE) { | 832 | if (intr_status & EESR_RFE) { |
657 | /* Receive FIFO Overflow int */ | 833 | /* Receive FIFO Overflow int */ |
658 | mdp->stats.rx_fifo_errors++; | 834 | mdp->stats.rx_fifo_errors++; |
659 | printk(KERN_ERR "Receive FIFO Overflow\n"); | 835 | dev_err(&ndev->dev, "Receive FIFO Overflow\n"); |
660 | } | 836 | } |
661 | if (intr_status & (EESR_TWB | EESR_TABT | | 837 | |
662 | #if !defined(CONFIG_CPU_SUBTYPE_SH7763) | 838 | mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; |
663 | EESR_ADE | | 839 | if (mdp->cd->no_ade) |
664 | #endif | 840 | mask &= ~EESR_ADE; |
665 | EESR_TDE | EESR_TFE)) { | 841 | if (intr_status & mask) { |
666 | /* Tx error */ | 842 | /* Tx error */ |
667 | u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); | 843 | u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); |
668 | /* dmesg */ | 844 | /* dmesg */ |
669 | printk(KERN_ERR "%s:TX error. status=%8.8x cur_tx=%8.8x ", | 845 | dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", |
670 | ndev->name, intr_status, mdp->cur_tx); | 846 | intr_status, mdp->cur_tx); |
671 | printk(KERN_ERR "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", | 847 | dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", |
672 | mdp->dirty_tx, (u32) ndev->state, edtrr); | 848 | mdp->dirty_tx, (u32) ndev->state, edtrr); |
673 | /* dirty buffer free */ | 849 | /* dirty buffer free */ |
674 | sh_eth_txfree(ndev); | 850 | sh_eth_txfree(ndev); |
@@ -687,6 +863,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
687 | { | 863 | { |
688 | struct net_device *ndev = netdev; | 864 | struct net_device *ndev = netdev; |
689 | struct sh_eth_private *mdp = netdev_priv(ndev); | 865 | struct sh_eth_private *mdp = netdev_priv(ndev); |
866 | struct sh_eth_cpu_data *cd = mdp->cd; | ||
690 | irqreturn_t ret = IRQ_NONE; | 867 | irqreturn_t ret = IRQ_NONE; |
691 | u32 ioaddr, boguscnt = RX_RING_SIZE; | 868 | u32 ioaddr, boguscnt = RX_RING_SIZE; |
692 | u32 intr_status = 0; | 869 | u32 intr_status = 0; |
@@ -699,7 +876,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
699 | /* Clear interrupt */ | 876 | /* Clear interrupt */ |
700 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | | 877 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | |
701 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | | 878 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | |
702 | TX_CHECK | EESR_ERR_CHECK)) { | 879 | cd->tx_check | cd->eesr_err_check)) { |
703 | ctrl_outl(intr_status, ioaddr + EESR); | 880 | ctrl_outl(intr_status, ioaddr + EESR); |
704 | ret = IRQ_HANDLED; | 881 | ret = IRQ_HANDLED; |
705 | } else | 882 | } else |
@@ -716,12 +893,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
716 | } | 893 | } |
717 | 894 | ||
718 | /* Tx Check */ | 895 | /* Tx Check */ |
719 | if (intr_status & TX_CHECK) { | 896 | if (intr_status & cd->tx_check) { |
720 | sh_eth_txfree(ndev); | 897 | sh_eth_txfree(ndev); |
721 | netif_wake_queue(ndev); | 898 | netif_wake_queue(ndev); |
722 | } | 899 | } |
723 | 900 | ||
724 | if (intr_status & EESR_ERR_CHECK) | 901 | if (intr_status & cd->eesr_err_check) |
725 | sh_eth_error(ndev, intr_status); | 902 | sh_eth_error(ndev, intr_status); |
726 | 903 | ||
727 | if (--boguscnt < 0) { | 904 | if (--boguscnt < 0) { |
@@ -756,32 +933,15 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
756 | if (phydev->duplex != mdp->duplex) { | 933 | if (phydev->duplex != mdp->duplex) { |
757 | new_state = 1; | 934 | new_state = 1; |
758 | mdp->duplex = phydev->duplex; | 935 | mdp->duplex = phydev->duplex; |
759 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 936 | if (mdp->cd->set_duplex) |
760 | if (mdp->duplex) { /* FULL */ | 937 | mdp->cd->set_duplex(ndev); |
761 | ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, | ||
762 | ioaddr + ECMR); | ||
763 | } else { /* Half */ | ||
764 | ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, | ||
765 | ioaddr + ECMR); | ||
766 | } | ||
767 | #endif | ||
768 | } | 938 | } |
769 | 939 | ||
770 | if (phydev->speed != mdp->speed) { | 940 | if (phydev->speed != mdp->speed) { |
771 | new_state = 1; | 941 | new_state = 1; |
772 | mdp->speed = phydev->speed; | 942 | mdp->speed = phydev->speed; |
773 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 943 | if (mdp->cd->set_rate) |
774 | switch (mdp->speed) { | 944 | mdp->cd->set_rate(ndev); |
775 | case 10: /* 10BASE */ | ||
776 | ctrl_outl(GECMR_10, ioaddr + GECMR); break; | ||
777 | case 100:/* 100BASE */ | ||
778 | ctrl_outl(GECMR_100, ioaddr + GECMR); break; | ||
779 | case 1000: /* 1000BASE */ | ||
780 | ctrl_outl(GECMR_1000, ioaddr + GECMR); break; | ||
781 | default: | ||
782 | break; | ||
783 | } | ||
784 | #endif | ||
785 | } | 945 | } |
786 | if (mdp->link == PHY_DOWN) { | 946 | if (mdp->link == PHY_DOWN) { |
787 | ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) | 947 | ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) |
@@ -804,7 +964,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
804 | static int sh_eth_phy_init(struct net_device *ndev) | 964 | static int sh_eth_phy_init(struct net_device *ndev) |
805 | { | 965 | { |
806 | struct sh_eth_private *mdp = netdev_priv(ndev); | 966 | struct sh_eth_private *mdp = netdev_priv(ndev); |
807 | char phy_id[BUS_ID_SIZE]; | 967 | char phy_id[MII_BUS_ID_SIZE + 3]; |
808 | struct phy_device *phydev = NULL; | 968 | struct phy_device *phydev = NULL; |
809 | 969 | ||
810 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | 970 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, |
@@ -821,8 +981,9 @@ static int sh_eth_phy_init(struct net_device *ndev) | |||
821 | dev_err(&ndev->dev, "phy_connect failed\n"); | 981 | dev_err(&ndev->dev, "phy_connect failed\n"); |
822 | return PTR_ERR(phydev); | 982 | return PTR_ERR(phydev); |
823 | } | 983 | } |
984 | |||
824 | dev_info(&ndev->dev, "attached phy %i to driver %s\n", | 985 | dev_info(&ndev->dev, "attached phy %i to driver %s\n", |
825 | phydev->addr, phydev->drv->name); | 986 | phydev->addr, phydev->drv->name); |
826 | 987 | ||
827 | mdp->phydev = phydev; | 988 | mdp->phydev = phydev; |
828 | 989 | ||
@@ -860,7 +1021,7 @@ static int sh_eth_open(struct net_device *ndev) | |||
860 | #endif | 1021 | #endif |
861 | ndev->name, ndev); | 1022 | ndev->name, ndev); |
862 | if (ret) { | 1023 | if (ret) { |
863 | printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME); | 1024 | dev_err(&ndev->dev, "Can not assign IRQ number\n"); |
864 | return ret; | 1025 | return ret; |
865 | } | 1026 | } |
866 | 1027 | ||
@@ -947,7 +1108,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
947 | if (!sh_eth_txfree(ndev)) { | 1108 | if (!sh_eth_txfree(ndev)) { |
948 | netif_stop_queue(ndev); | 1109 | netif_stop_queue(ndev); |
949 | spin_unlock_irqrestore(&mdp->lock, flags); | 1110 | spin_unlock_irqrestore(&mdp->lock, flags); |
950 | return 1; | 1111 | return NETDEV_TX_BUSY; |
951 | } | 1112 | } |
952 | } | 1113 | } |
953 | spin_unlock_irqrestore(&mdp->lock, flags); | 1114 | spin_unlock_irqrestore(&mdp->lock, flags); |
@@ -955,9 +1116,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
955 | entry = mdp->cur_tx % TX_RING_SIZE; | 1116 | entry = mdp->cur_tx % TX_RING_SIZE; |
956 | mdp->tx_skbuff[entry] = skb; | 1117 | mdp->tx_skbuff[entry] = skb; |
957 | txdesc = &mdp->tx_ring[entry]; | 1118 | txdesc = &mdp->tx_ring[entry]; |
958 | txdesc->addr = (u32)(skb->data); | 1119 | txdesc->addr = virt_to_phys(skb->data); |
959 | /* soft swap. */ | 1120 | /* soft swap. */ |
960 | swaps((char *)(txdesc->addr & ~0x3), skb->len + 2); | 1121 | if (!mdp->cd->hw_swap) |
1122 | sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), | ||
1123 | skb->len + 2); | ||
961 | /* write back */ | 1124 | /* write back */ |
962 | __flush_purge_region(skb->data, skb->len); | 1125 | __flush_purge_region(skb->data, skb->len); |
963 | if (skb->len < ETHERSMALL) | 1126 | if (skb->len < ETHERSMALL) |
@@ -1059,7 +1222,7 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, | |||
1059 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); | 1222 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); |
1060 | } | 1223 | } |
1061 | 1224 | ||
1062 | 1225 | #if defined(SH_ETH_HAS_TSU) | |
1063 | /* Multicast reception directions set */ | 1226 | /* Multicast reception directions set */ |
1064 | static void sh_eth_set_multicast_list(struct net_device *ndev) | 1227 | static void sh_eth_set_multicast_list(struct net_device *ndev) |
1065 | { | 1228 | { |
@@ -1104,6 +1267,7 @@ static void sh_eth_tsu_init(u32 ioaddr) | |||
1104 | ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ | 1267 | ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ |
1105 | ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ | 1268 | ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ |
1106 | } | 1269 | } |
1270 | #endif /* SH_ETH_HAS_TSU */ | ||
1107 | 1271 | ||
1108 | /* MDIO bus release function */ | 1272 | /* MDIO bus release function */ |
1109 | static int sh_mdio_release(struct net_device *ndev) | 1273 | static int sh_mdio_release(struct net_device *ndev) |
@@ -1193,7 +1357,9 @@ static const struct net_device_ops sh_eth_netdev_ops = { | |||
1193 | .ndo_stop = sh_eth_close, | 1357 | .ndo_stop = sh_eth_close, |
1194 | .ndo_start_xmit = sh_eth_start_xmit, | 1358 | .ndo_start_xmit = sh_eth_start_xmit, |
1195 | .ndo_get_stats = sh_eth_get_stats, | 1359 | .ndo_get_stats = sh_eth_get_stats, |
1360 | #if defined(SH_ETH_HAS_TSU) | ||
1196 | .ndo_set_multicast_list = sh_eth_set_multicast_list, | 1361 | .ndo_set_multicast_list = sh_eth_set_multicast_list, |
1362 | #endif | ||
1197 | .ndo_tx_timeout = sh_eth_tx_timeout, | 1363 | .ndo_tx_timeout = sh_eth_tx_timeout, |
1198 | .ndo_do_ioctl = sh_eth_do_ioctl, | 1364 | .ndo_do_ioctl = sh_eth_do_ioctl, |
1199 | .ndo_validate_addr = eth_validate_addr, | 1365 | .ndo_validate_addr = eth_validate_addr, |
@@ -1219,7 +1385,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1219 | 1385 | ||
1220 | ndev = alloc_etherdev(sizeof(struct sh_eth_private)); | 1386 | ndev = alloc_etherdev(sizeof(struct sh_eth_private)); |
1221 | if (!ndev) { | 1387 | if (!ndev) { |
1222 | printk(KERN_ERR "%s: could not allocate device.\n", CARDNAME); | 1388 | dev_err(&pdev->dev, "Could not allocate device.\n"); |
1223 | ret = -ENOMEM; | 1389 | ret = -ENOMEM; |
1224 | goto out; | 1390 | goto out; |
1225 | } | 1391 | } |
@@ -1252,6 +1418,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1252 | /* EDMAC endian */ | 1418 | /* EDMAC endian */ |
1253 | mdp->edmac_endian = pd->edmac_endian; | 1419 | mdp->edmac_endian = pd->edmac_endian; |
1254 | 1420 | ||
1421 | /* set cpu data */ | ||
1422 | mdp->cd = &sh_eth_my_cpu_data; | ||
1423 | sh_eth_set_default_cpu_data(mdp->cd); | ||
1424 | |||
1255 | /* set function */ | 1425 | /* set function */ |
1256 | ndev->netdev_ops = &sh_eth_netdev_ops; | 1426 | ndev->netdev_ops = &sh_eth_netdev_ops; |
1257 | ndev->watchdog_timeo = TX_TIMEOUT; | 1427 | ndev->watchdog_timeo = TX_TIMEOUT; |
@@ -1264,13 +1434,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1264 | 1434 | ||
1265 | /* First device only init */ | 1435 | /* First device only init */ |
1266 | if (!devno) { | 1436 | if (!devno) { |
1267 | #if defined(ARSTR) | 1437 | if (mdp->cd->chip_reset) |
1268 | /* reset device */ | 1438 | mdp->cd->chip_reset(ndev); |
1269 | ctrl_outl(ARSTR_ARSTR, ARSTR); | ||
1270 | mdelay(1); | ||
1271 | #endif | ||
1272 | 1439 | ||
1273 | #if defined(SH_TSU_ADDR) | 1440 | #if defined(SH_ETH_HAS_TSU) |
1274 | /* TSU init (Init only)*/ | 1441 | /* TSU init (Init only)*/ |
1275 | sh_eth_tsu_init(SH_TSU_ADDR); | 1442 | sh_eth_tsu_init(SH_TSU_ADDR); |
1276 | #endif | 1443 | #endif |
@@ -1287,8 +1454,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1287 | goto out_unregister; | 1454 | goto out_unregister; |
1288 | 1455 | ||
1289 | /* pritnt device infomation */ | 1456 | /* pritnt device infomation */ |
1290 | printk(KERN_INFO "%s: %s at 0x%x, ", | 1457 | pr_info("Base address at 0x%x, ", |
1291 | ndev->name, CARDNAME, (u32) ndev->base_addr); | 1458 | (u32)ndev->base_addr); |
1292 | 1459 | ||
1293 | for (i = 0; i < 5; i++) | 1460 | for (i = 0; i < 5; i++) |
1294 | printk("%02X:", ndev->dev_addr[i]); | 1461 | printk("%02X:", ndev->dev_addr[i]); |