diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2006-09-21 20:28:13 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-22 20:13:23 -0400 |
commit | 1d22e05df8183b36b3cc0760344774040abc74d5 (patch) | |
tree | ec560b450a60045d0607b450ced38b60a9db9ecb /drivers/net/arm/ep93xx_eth.c | |
parent | 28eb177dfa5982d132edceed891cb3885df258bb (diff) |
[PATCH] Cirrus Logic ep93xx ethernet driver
The Cirrus Logic ep93xx is an ARM SoC that includes an ethernet MAC
-- this patch adds a driver for that ethernet MAC.
Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/arm/ep93xx_eth.c')
-rw-r--r-- | drivers/net/arm/ep93xx_eth.c | 944 |
1 files changed, 944 insertions, 0 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c new file mode 100644 index 000000000000..cef00744a9dc --- /dev/null +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -0,0 +1,944 @@ | |||
1 | /* | ||
2 | * EP93xx ethernet network device driver | ||
3 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> | ||
4 | * Dedicated to Marija Kulikova. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/dma-mapping.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/netdevice.h> | ||
17 | #include <linux/mii.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/ethtool.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/moduleparam.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <asm/arch/ep93xx-regs.h> | ||
25 | #include <asm/arch/platform.h> | ||
26 | #include <asm/io.h> | ||
27 | |||
28 | #define DRV_MODULE_NAME "ep93xx-eth" | ||
29 | #define DRV_MODULE_VERSION "0.1" | ||
30 | |||
31 | #define RX_QUEUE_ENTRIES 64 | ||
32 | #define TX_QUEUE_ENTRIES 8 | ||
33 | |||
34 | #define MAX_PKT_SIZE 2044 | ||
35 | #define PKT_BUF_SIZE 2048 | ||
36 | |||
37 | #define REG_RXCTL 0x0000 | ||
38 | #define REG_RXCTL_DEFAULT 0x00073800 | ||
39 | #define REG_TXCTL 0x0004 | ||
40 | #define REG_TXCTL_ENABLE 0x00000001 | ||
41 | #define REG_MIICMD 0x0010 | ||
42 | #define REG_MIICMD_READ 0x00008000 | ||
43 | #define REG_MIICMD_WRITE 0x00004000 | ||
44 | #define REG_MIIDATA 0x0014 | ||
45 | #define REG_MIISTS 0x0018 | ||
46 | #define REG_MIISTS_BUSY 0x00000001 | ||
47 | #define REG_SELFCTL 0x0020 | ||
48 | #define REG_SELFCTL_RESET 0x00000001 | ||
49 | #define REG_INTEN 0x0024 | ||
50 | #define REG_INTEN_TX 0x00000008 | ||
51 | #define REG_INTEN_RX 0x00000007 | ||
52 | #define REG_INTSTSP 0x0028 | ||
53 | #define REG_INTSTS_TX 0x00000008 | ||
54 | #define REG_INTSTS_RX 0x00000004 | ||
55 | #define REG_INTSTSC 0x002c | ||
56 | #define REG_AFP 0x004c | ||
57 | #define REG_INDAD0 0x0050 | ||
58 | #define REG_INDAD1 0x0051 | ||
59 | #define REG_INDAD2 0x0052 | ||
60 | #define REG_INDAD3 0x0053 | ||
61 | #define REG_INDAD4 0x0054 | ||
62 | #define REG_INDAD5 0x0055 | ||
63 | #define REG_GIINTMSK 0x0064 | ||
64 | #define REG_GIINTMSK_ENABLE 0x00008000 | ||
65 | #define REG_BMCTL 0x0080 | ||
66 | #define REG_BMCTL_ENABLE_TX 0x00000100 | ||
67 | #define REG_BMCTL_ENABLE_RX 0x00000001 | ||
68 | #define REG_BMSTS 0x0084 | ||
69 | #define REG_BMSTS_RX_ACTIVE 0x00000008 | ||
70 | #define REG_RXDQBADD 0x0090 | ||
71 | #define REG_RXDQBLEN 0x0094 | ||
72 | #define REG_RXDCURADD 0x0098 | ||
73 | #define REG_RXDENQ 0x009c | ||
74 | #define REG_RXSTSQBADD 0x00a0 | ||
75 | #define REG_RXSTSQBLEN 0x00a4 | ||
76 | #define REG_RXSTSQCURADD 0x00a8 | ||
77 | #define REG_RXSTSENQ 0x00ac | ||
78 | #define REG_TXDQBADD 0x00b0 | ||
79 | #define REG_TXDQBLEN 0x00b4 | ||
80 | #define REG_TXDQCURADD 0x00b8 | ||
81 | #define REG_TXDENQ 0x00bc | ||
82 | #define REG_TXSTSQBADD 0x00c0 | ||
83 | #define REG_TXSTSQBLEN 0x00c4 | ||
84 | #define REG_TXSTSQCURADD 0x00c8 | ||
85 | #define REG_MAXFRMLEN 0x00e8 | ||
86 | |||
87 | struct ep93xx_rdesc | ||
88 | { | ||
89 | u32 buf_addr; | ||
90 | u32 rdesc1; | ||
91 | }; | ||
92 | |||
93 | #define RDESC1_NSOF 0x80000000 | ||
94 | #define RDESC1_BUFFER_INDEX 0x7fff0000 | ||
95 | #define RDESC1_BUFFER_LENGTH 0x0000ffff | ||
96 | |||
97 | struct ep93xx_rstat | ||
98 | { | ||
99 | u32 rstat0; | ||
100 | u32 rstat1; | ||
101 | }; | ||
102 | |||
103 | #define RSTAT0_RFP 0x80000000 | ||
104 | #define RSTAT0_RWE 0x40000000 | ||
105 | #define RSTAT0_EOF 0x20000000 | ||
106 | #define RSTAT0_EOB 0x10000000 | ||
107 | #define RSTAT0_AM 0x00c00000 | ||
108 | #define RSTAT0_RX_ERR 0x00200000 | ||
109 | #define RSTAT0_OE 0x00100000 | ||
110 | #define RSTAT0_FE 0x00080000 | ||
111 | #define RSTAT0_RUNT 0x00040000 | ||
112 | #define RSTAT0_EDATA 0x00020000 | ||
113 | #define RSTAT0_CRCE 0x00010000 | ||
114 | #define RSTAT0_CRCI 0x00008000 | ||
115 | #define RSTAT0_HTI 0x00003f00 | ||
116 | #define RSTAT1_RFP 0x80000000 | ||
117 | #define RSTAT1_BUFFER_INDEX 0x7fff0000 | ||
118 | #define RSTAT1_FRAME_LENGTH 0x0000ffff | ||
119 | |||
120 | struct ep93xx_tdesc | ||
121 | { | ||
122 | u32 buf_addr; | ||
123 | u32 tdesc1; | ||
124 | }; | ||
125 | |||
126 | #define TDESC1_EOF 0x80000000 | ||
127 | #define TDESC1_BUFFER_INDEX 0x7fff0000 | ||
128 | #define TDESC1_BUFFER_ABORT 0x00008000 | ||
129 | #define TDESC1_BUFFER_LENGTH 0x00000fff | ||
130 | |||
131 | struct ep93xx_tstat | ||
132 | { | ||
133 | u32 tstat0; | ||
134 | }; | ||
135 | |||
136 | #define TSTAT0_TXFP 0x80000000 | ||
137 | #define TSTAT0_TXWE 0x40000000 | ||
138 | #define TSTAT0_FA 0x20000000 | ||
139 | #define TSTAT0_LCRS 0x10000000 | ||
140 | #define TSTAT0_OW 0x04000000 | ||
141 | #define TSTAT0_TXU 0x02000000 | ||
142 | #define TSTAT0_ECOLL 0x01000000 | ||
143 | #define TSTAT0_NCOLL 0x001f0000 | ||
144 | #define TSTAT0_BUFFER_INDEX 0x00007fff | ||
145 | |||
146 | struct ep93xx_descs | ||
147 | { | ||
148 | struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES]; | ||
149 | struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES]; | ||
150 | struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES]; | ||
151 | struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES]; | ||
152 | }; | ||
153 | |||
154 | struct ep93xx_priv | ||
155 | { | ||
156 | struct resource *res; | ||
157 | void *base_addr; | ||
158 | int irq; | ||
159 | |||
160 | struct ep93xx_descs *descs; | ||
161 | dma_addr_t descs_dma_addr; | ||
162 | |||
163 | void *rx_buf[RX_QUEUE_ENTRIES]; | ||
164 | void *tx_buf[TX_QUEUE_ENTRIES]; | ||
165 | |||
166 | spinlock_t rx_lock; | ||
167 | unsigned int rx_pointer; | ||
168 | unsigned int tx_clean_pointer; | ||
169 | unsigned int tx_pointer; | ||
170 | spinlock_t tx_pending_lock; | ||
171 | unsigned int tx_pending; | ||
172 | |||
173 | struct net_device_stats stats; | ||
174 | |||
175 | struct mii_if_info mii; | ||
176 | u8 mdc_divisor; | ||
177 | }; | ||
178 | |||
179 | #define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) | ||
180 | #define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) | ||
181 | #define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) | ||
182 | #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) | ||
183 | #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) | ||
184 | #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) | ||
185 | |||
186 | static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg); | ||
187 | |||
188 | static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) | ||
189 | { | ||
190 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
191 | return &(ep->stats); | ||
192 | } | ||
193 | |||
194 | static int ep93xx_rx(struct net_device *dev, int *budget) | ||
195 | { | ||
196 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
197 | int tail_offset; | ||
198 | int rx_done; | ||
199 | int processed; | ||
200 | |||
201 | tail_offset = rdl(ep, REG_RXSTSQCURADD) - ep->descs_dma_addr; | ||
202 | |||
203 | rx_done = 0; | ||
204 | processed = 0; | ||
205 | while (*budget > 0) { | ||
206 | int entry; | ||
207 | struct ep93xx_rstat *rstat; | ||
208 | u32 rstat0; | ||
209 | u32 rstat1; | ||
210 | int length; | ||
211 | struct sk_buff *skb; | ||
212 | |||
213 | entry = ep->rx_pointer; | ||
214 | rstat = ep->descs->rstat + entry; | ||
215 | if ((void *)rstat - (void *)ep->descs == tail_offset) { | ||
216 | rx_done = 1; | ||
217 | break; | ||
218 | } | ||
219 | |||
220 | rstat0 = rstat->rstat0; | ||
221 | rstat1 = rstat->rstat1; | ||
222 | rstat->rstat0 = 0; | ||
223 | rstat->rstat1 = 0; | ||
224 | |||
225 | if (!(rstat0 & RSTAT0_RFP)) | ||
226 | printk(KERN_CRIT "ep93xx_rx: buffer not done " | ||
227 | " %.8x %.8x\n", rstat0, rstat1); | ||
228 | if (!(rstat0 & RSTAT0_EOF)) | ||
229 | printk(KERN_CRIT "ep93xx_rx: not end-of-frame " | ||
230 | " %.8x %.8x\n", rstat0, rstat1); | ||
231 | if (!(rstat0 & RSTAT0_EOB)) | ||
232 | printk(KERN_CRIT "ep93xx_rx: not end-of-buffer " | ||
233 | " %.8x %.8x\n", rstat0, rstat1); | ||
234 | if (!(rstat1 & RSTAT1_RFP)) | ||
235 | printk(KERN_CRIT "ep93xx_rx: buffer1 not done " | ||
236 | " %.8x %.8x\n", rstat0, rstat1); | ||
237 | if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) | ||
238 | printk(KERN_CRIT "ep93xx_rx: entry mismatch " | ||
239 | " %.8x %.8x\n", rstat0, rstat1); | ||
240 | |||
241 | if (!(rstat0 & RSTAT0_RWE)) { | ||
242 | printk(KERN_NOTICE "ep93xx_rx: receive error " | ||
243 | " %.8x %.8x\n", rstat0, rstat1); | ||
244 | |||
245 | ep->stats.rx_errors++; | ||
246 | if (rstat0 & RSTAT0_OE) | ||
247 | ep->stats.rx_fifo_errors++; | ||
248 | if (rstat0 & RSTAT0_FE) | ||
249 | ep->stats.rx_frame_errors++; | ||
250 | if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) | ||
251 | ep->stats.rx_length_errors++; | ||
252 | if (rstat0 & RSTAT0_CRCE) | ||
253 | ep->stats.rx_crc_errors++; | ||
254 | goto err; | ||
255 | } | ||
256 | |||
257 | length = rstat1 & RSTAT1_FRAME_LENGTH; | ||
258 | if (length > MAX_PKT_SIZE) { | ||
259 | printk(KERN_NOTICE "ep93xx_rx: invalid length " | ||
260 | " %.8x %.8x\n", rstat0, rstat1); | ||
261 | goto err; | ||
262 | } | ||
263 | |||
264 | /* Strip FCS. */ | ||
265 | if (rstat0 & RSTAT0_CRCI) | ||
266 | length -= 4; | ||
267 | |||
268 | skb = dev_alloc_skb(length + 2); | ||
269 | if (likely(skb != NULL)) { | ||
270 | skb->dev = dev; | ||
271 | skb_reserve(skb, 2); | ||
272 | dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, | ||
273 | length, DMA_FROM_DEVICE); | ||
274 | eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0); | ||
275 | skb_put(skb, length); | ||
276 | skb->protocol = eth_type_trans(skb, dev); | ||
277 | |||
278 | dev->last_rx = jiffies; | ||
279 | |||
280 | netif_receive_skb(skb); | ||
281 | |||
282 | ep->stats.rx_packets++; | ||
283 | ep->stats.rx_bytes += length; | ||
284 | } else { | ||
285 | ep->stats.rx_dropped++; | ||
286 | } | ||
287 | |||
288 | err: | ||
289 | ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); | ||
290 | processed++; | ||
291 | dev->quota--; | ||
292 | (*budget)--; | ||
293 | } | ||
294 | |||
295 | if (processed) { | ||
296 | wrw(ep, REG_RXDENQ, processed); | ||
297 | wrw(ep, REG_RXSTSENQ, processed); | ||
298 | } | ||
299 | |||
300 | return !rx_done; | ||
301 | } | ||
302 | |||
303 | static int ep93xx_have_more_rx(struct ep93xx_priv *ep) | ||
304 | { | ||
305 | struct ep93xx_rstat *rstat; | ||
306 | int tail_offset; | ||
307 | |||
308 | rstat = ep->descs->rstat + ep->rx_pointer; | ||
309 | tail_offset = rdl(ep, REG_RXSTSQCURADD) - ep->descs_dma_addr; | ||
310 | |||
311 | return !((void *)rstat - (void *)ep->descs == tail_offset); | ||
312 | } | ||
313 | |||
314 | static int ep93xx_poll(struct net_device *dev, int *budget) | ||
315 | { | ||
316 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
317 | |||
318 | /* | ||
319 | * @@@ Have to stop polling if device is downed while we | ||
320 | * are polling. | ||
321 | */ | ||
322 | |||
323 | poll_some_more: | ||
324 | if (ep93xx_rx(dev, budget)) | ||
325 | return 1; | ||
326 | |||
327 | netif_rx_complete(dev); | ||
328 | |||
329 | spin_lock_irq(&ep->rx_lock); | ||
330 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | ||
331 | if (ep93xx_have_more_rx(ep)) { | ||
332 | wrl(ep, REG_INTEN, REG_INTEN_TX); | ||
333 | wrl(ep, REG_INTSTSP, REG_INTSTS_RX); | ||
334 | spin_unlock_irq(&ep->rx_lock); | ||
335 | |||
336 | if (netif_rx_reschedule(dev, 0)) | ||
337 | goto poll_some_more; | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | spin_unlock_irq(&ep->rx_lock); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | ||
347 | { | ||
348 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
349 | int entry; | ||
350 | |||
351 | if (unlikely(skb->len) > MAX_PKT_SIZE) { | ||
352 | ep->stats.tx_dropped++; | ||
353 | dev_kfree_skb(skb); | ||
354 | return NETDEV_TX_OK; | ||
355 | } | ||
356 | |||
357 | entry = ep->tx_pointer; | ||
358 | ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); | ||
359 | |||
360 | ep->descs->tdesc[entry].tdesc1 = | ||
361 | TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); | ||
362 | skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); | ||
363 | dma_sync_single(NULL, ep->descs->tdesc[entry].buf_addr, | ||
364 | skb->len, DMA_TO_DEVICE); | ||
365 | dev_kfree_skb(skb); | ||
366 | |||
367 | dev->trans_start = jiffies; | ||
368 | |||
369 | spin_lock_irq(&ep->tx_pending_lock); | ||
370 | ep->tx_pending++; | ||
371 | if (ep->tx_pending == TX_QUEUE_ENTRIES) | ||
372 | netif_stop_queue(dev); | ||
373 | spin_unlock_irq(&ep->tx_pending_lock); | ||
374 | |||
375 | wrl(ep, REG_TXDENQ, 1); | ||
376 | |||
377 | return NETDEV_TX_OK; | ||
378 | } | ||
379 | |||
380 | static void ep93xx_tx_complete(struct net_device *dev) | ||
381 | { | ||
382 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
383 | int tail_offset; | ||
384 | int wake; | ||
385 | |||
386 | tail_offset = rdl(ep, REG_TXSTSQCURADD) - ep->descs_dma_addr; | ||
387 | wake = 0; | ||
388 | |||
389 | spin_lock(&ep->tx_pending_lock); | ||
390 | while (1) { | ||
391 | int entry; | ||
392 | struct ep93xx_tstat *tstat; | ||
393 | u32 tstat0; | ||
394 | |||
395 | entry = ep->tx_clean_pointer; | ||
396 | tstat = ep->descs->tstat + entry; | ||
397 | if ((void *)tstat - (void *)ep->descs == tail_offset) | ||
398 | break; | ||
399 | |||
400 | tstat0 = tstat->tstat0; | ||
401 | tstat->tstat0 = 0; | ||
402 | |||
403 | if (!(tstat0 & TSTAT0_TXFP)) | ||
404 | printk(KERN_CRIT "ep93xx_tx_complete: buffer not done " | ||
405 | " %.8x\n", tstat0); | ||
406 | if (tstat0 & TSTAT0_FA) | ||
407 | printk(KERN_CRIT "ep93xx_tx_complete: frame aborted " | ||
408 | " %.8x\n", tstat0); | ||
409 | if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) | ||
410 | printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch " | ||
411 | " %.8x\n", tstat0); | ||
412 | |||
413 | if (tstat0 & TSTAT0_TXWE) { | ||
414 | int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; | ||
415 | |||
416 | ep->stats.tx_packets++; | ||
417 | ep->stats.tx_bytes += length; | ||
418 | } else { | ||
419 | ep->stats.tx_errors++; | ||
420 | } | ||
421 | |||
422 | if (tstat0 & TSTAT0_OW) | ||
423 | ep->stats.tx_window_errors++; | ||
424 | if (tstat0 & TSTAT0_TXU) | ||
425 | ep->stats.tx_fifo_errors++; | ||
426 | ep->stats.collisions += (tstat0 >> 16) & 0x1f; | ||
427 | |||
428 | ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); | ||
429 | if (ep->tx_pending == TX_QUEUE_ENTRIES) | ||
430 | wake = 1; | ||
431 | ep->tx_pending--; | ||
432 | } | ||
433 | spin_unlock(&ep->tx_pending_lock); | ||
434 | |||
435 | if (wake) | ||
436 | netif_wake_queue(dev); | ||
437 | } | ||
438 | |||
439 | static irqreturn_t ep93xx_irq(int irq, void *dev_id, struct pt_regs *regs) | ||
440 | { | ||
441 | struct net_device *dev = dev_id; | ||
442 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
443 | u32 status; | ||
444 | |||
445 | status = rdl(ep, REG_INTSTSC); | ||
446 | if (status == 0) | ||
447 | return IRQ_NONE; | ||
448 | |||
449 | if (status & REG_INTSTS_RX) { | ||
450 | spin_lock(&ep->rx_lock); | ||
451 | if (likely(__netif_rx_schedule_prep(dev))) { | ||
452 | wrl(ep, REG_INTEN, REG_INTEN_TX); | ||
453 | __netif_rx_schedule(dev); | ||
454 | } | ||
455 | spin_unlock(&ep->rx_lock); | ||
456 | } | ||
457 | |||
458 | if (status & REG_INTSTS_TX) | ||
459 | ep93xx_tx_complete(dev); | ||
460 | |||
461 | return IRQ_HANDLED; | ||
462 | } | ||
463 | |||
464 | static void ep93xx_free_buffers(struct ep93xx_priv *ep) | ||
465 | { | ||
466 | int i; | ||
467 | |||
468 | for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { | ||
469 | dma_addr_t d; | ||
470 | |||
471 | d = ep->descs->rdesc[i].buf_addr; | ||
472 | if (d) | ||
473 | dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); | ||
474 | |||
475 | if (ep->rx_buf[i] != NULL) | ||
476 | free_page((unsigned long)ep->rx_buf[i]); | ||
477 | } | ||
478 | |||
479 | for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { | ||
480 | dma_addr_t d; | ||
481 | |||
482 | d = ep->descs->tdesc[i].buf_addr; | ||
483 | if (d) | ||
484 | dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); | ||
485 | |||
486 | if (ep->tx_buf[i] != NULL) | ||
487 | free_page((unsigned long)ep->tx_buf[i]); | ||
488 | } | ||
489 | |||
490 | dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, | ||
491 | ep->descs_dma_addr); | ||
492 | } | ||
493 | |||
494 | /* | ||
495 | * The hardware enforces a sub-2K maximum packet size, so we put | ||
496 | * two buffers on every hardware page. | ||
497 | */ | ||
498 | static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | ||
499 | { | ||
500 | int i; | ||
501 | |||
502 | ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), | ||
503 | &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); | ||
504 | if (ep->descs == NULL) | ||
505 | return 1; | ||
506 | |||
507 | for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { | ||
508 | void *page; | ||
509 | dma_addr_t d; | ||
510 | |||
511 | page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); | ||
512 | if (page == NULL) | ||
513 | goto err; | ||
514 | |||
515 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); | ||
516 | if (dma_mapping_error(d)) { | ||
517 | free_page((unsigned long)page); | ||
518 | goto err; | ||
519 | } | ||
520 | |||
521 | ep->rx_buf[i] = page; | ||
522 | ep->descs->rdesc[i].buf_addr = d; | ||
523 | ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; | ||
524 | |||
525 | ep->rx_buf[i + 1] = page + PKT_BUF_SIZE; | ||
526 | ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; | ||
527 | ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE; | ||
528 | } | ||
529 | |||
530 | for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { | ||
531 | void *page; | ||
532 | dma_addr_t d; | ||
533 | |||
534 | page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); | ||
535 | if (page == NULL) | ||
536 | goto err; | ||
537 | |||
538 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); | ||
539 | if (dma_mapping_error(d)) { | ||
540 | free_page((unsigned long)page); | ||
541 | goto err; | ||
542 | } | ||
543 | |||
544 | ep->tx_buf[i] = page; | ||
545 | ep->descs->tdesc[i].buf_addr = d; | ||
546 | |||
547 | ep->tx_buf[i + 1] = page + PKT_BUF_SIZE; | ||
548 | ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; | ||
549 | } | ||
550 | |||
551 | return 0; | ||
552 | |||
553 | err: | ||
554 | ep93xx_free_buffers(ep); | ||
555 | return 1; | ||
556 | } | ||
557 | |||
558 | static int ep93xx_start_hw(struct net_device *dev) | ||
559 | { | ||
560 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
561 | unsigned long addr; | ||
562 | int i; | ||
563 | |||
564 | wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); | ||
565 | for (i = 0; i < 10; i++) { | ||
566 | if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) | ||
567 | break; | ||
568 | msleep(1); | ||
569 | } | ||
570 | |||
571 | if (i == 10) { | ||
572 | printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); | ||
573 | return 1; | ||
574 | } | ||
575 | |||
576 | wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); | ||
577 | |||
578 | /* Does the PHY support preamble suppress? */ | ||
579 | if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) | ||
580 | wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); | ||
581 | |||
582 | /* Receive descriptor ring. */ | ||
583 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); | ||
584 | wrl(ep, REG_RXDQBADD, addr); | ||
585 | wrl(ep, REG_RXDCURADD, addr); | ||
586 | wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); | ||
587 | |||
588 | /* Receive status ring. */ | ||
589 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); | ||
590 | wrl(ep, REG_RXSTSQBADD, addr); | ||
591 | wrl(ep, REG_RXSTSQCURADD, addr); | ||
592 | wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); | ||
593 | |||
594 | /* Transmit descriptor ring. */ | ||
595 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); | ||
596 | wrl(ep, REG_TXDQBADD, addr); | ||
597 | wrl(ep, REG_TXDQCURADD, addr); | ||
598 | wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); | ||
599 | |||
600 | /* Transmit status ring. */ | ||
601 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); | ||
602 | wrl(ep, REG_TXSTSQBADD, addr); | ||
603 | wrl(ep, REG_TXSTSQCURADD, addr); | ||
604 | wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); | ||
605 | |||
606 | wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); | ||
607 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | ||
608 | wrl(ep, REG_GIINTMSK, 0); | ||
609 | |||
610 | for (i = 0; i < 10; i++) { | ||
611 | if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) | ||
612 | break; | ||
613 | msleep(1); | ||
614 | } | ||
615 | |||
616 | if (i == 10) { | ||
617 | printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n"); | ||
618 | return 1; | ||
619 | } | ||
620 | |||
621 | wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); | ||
622 | wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); | ||
623 | |||
624 | wrb(ep, REG_INDAD0, dev->dev_addr[0]); | ||
625 | wrb(ep, REG_INDAD1, dev->dev_addr[1]); | ||
626 | wrb(ep, REG_INDAD2, dev->dev_addr[2]); | ||
627 | wrb(ep, REG_INDAD3, dev->dev_addr[3]); | ||
628 | wrb(ep, REG_INDAD4, dev->dev_addr[4]); | ||
629 | wrb(ep, REG_INDAD5, dev->dev_addr[5]); | ||
630 | wrl(ep, REG_AFP, 0); | ||
631 | |||
632 | wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); | ||
633 | |||
634 | wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); | ||
635 | wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); | ||
636 | |||
637 | return 0; | ||
638 | } | ||
639 | |||
640 | static void ep93xx_stop_hw(struct net_device *dev) | ||
641 | { | ||
642 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
643 | int i; | ||
644 | |||
645 | wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); | ||
646 | for (i = 0; i < 10; i++) { | ||
647 | if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) | ||
648 | break; | ||
649 | msleep(1); | ||
650 | } | ||
651 | |||
652 | if (i == 10) | ||
653 | printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); | ||
654 | } | ||
655 | |||
656 | static int ep93xx_open(struct net_device *dev) | ||
657 | { | ||
658 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
659 | int err; | ||
660 | |||
661 | if (ep93xx_alloc_buffers(ep)) | ||
662 | return -ENOMEM; | ||
663 | |||
664 | if (is_zero_ether_addr(dev->dev_addr)) { | ||
665 | random_ether_addr(dev->dev_addr); | ||
666 | printk(KERN_INFO "%s: generated random MAC address " | ||
667 | "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name, | ||
668 | dev->dev_addr[0], dev->dev_addr[1], | ||
669 | dev->dev_addr[2], dev->dev_addr[3], | ||
670 | dev->dev_addr[4], dev->dev_addr[5]); | ||
671 | } | ||
672 | |||
673 | if (ep93xx_start_hw(dev)) { | ||
674 | ep93xx_free_buffers(ep); | ||
675 | return -EIO; | ||
676 | } | ||
677 | |||
678 | spin_lock_init(&ep->rx_lock); | ||
679 | ep->rx_pointer = 0; | ||
680 | ep->tx_clean_pointer = 0; | ||
681 | ep->tx_pointer = 0; | ||
682 | spin_lock_init(&ep->tx_pending_lock); | ||
683 | ep->tx_pending = 0; | ||
684 | |||
685 | err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); | ||
686 | if (err) { | ||
687 | ep93xx_stop_hw(dev); | ||
688 | ep93xx_free_buffers(ep); | ||
689 | return err; | ||
690 | } | ||
691 | |||
692 | wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); | ||
693 | |||
694 | netif_start_queue(dev); | ||
695 | |||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static int ep93xx_close(struct net_device *dev) | ||
700 | { | ||
701 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
702 | |||
703 | netif_stop_queue(dev); | ||
704 | |||
705 | wrl(ep, REG_GIINTMSK, 0); | ||
706 | free_irq(ep->irq, dev); | ||
707 | ep93xx_stop_hw(dev); | ||
708 | ep93xx_free_buffers(ep); | ||
709 | |||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
714 | { | ||
715 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
716 | struct mii_ioctl_data *data = if_mii(ifr); | ||
717 | |||
718 | return generic_mii_ioctl(&ep->mii, data, cmd, NULL); | ||
719 | } | ||
720 | |||
721 | static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg) | ||
722 | { | ||
723 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
724 | int data; | ||
725 | int i; | ||
726 | |||
727 | wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); | ||
728 | |||
729 | for (i = 0; i < 10; i++) { | ||
730 | if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) | ||
731 | break; | ||
732 | msleep(1); | ||
733 | } | ||
734 | |||
735 | if (i == 10) { | ||
736 | printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n"); | ||
737 | data = 0xffff; | ||
738 | } else { | ||
739 | data = rdl(ep, REG_MIIDATA); | ||
740 | } | ||
741 | |||
742 | return data; | ||
743 | } | ||
744 | |||
745 | static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data) | ||
746 | { | ||
747 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
748 | int i; | ||
749 | |||
750 | wrl(ep, REG_MIIDATA, data); | ||
751 | wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); | ||
752 | |||
753 | for (i = 0; i < 10; i++) { | ||
754 | if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) | ||
755 | break; | ||
756 | msleep(1); | ||
757 | } | ||
758 | |||
759 | if (i == 10) | ||
760 | printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n"); | ||
761 | } | ||
762 | |||
763 | static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
764 | { | ||
765 | strcpy(info->driver, DRV_MODULE_NAME); | ||
766 | strcpy(info->version, DRV_MODULE_VERSION); | ||
767 | } | ||
768 | |||
769 | static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
770 | { | ||
771 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
772 | return mii_ethtool_gset(&ep->mii, cmd); | ||
773 | } | ||
774 | |||
775 | static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
776 | { | ||
777 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
778 | return mii_ethtool_sset(&ep->mii, cmd); | ||
779 | } | ||
780 | |||
781 | static int ep93xx_nway_reset(struct net_device *dev) | ||
782 | { | ||
783 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
784 | return mii_nway_restart(&ep->mii); | ||
785 | } | ||
786 | |||
787 | static u32 ep93xx_get_link(struct net_device *dev) | ||
788 | { | ||
789 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
790 | return mii_link_ok(&ep->mii); | ||
791 | } | ||
792 | |||
793 | static struct ethtool_ops ep93xx_ethtool_ops = { | ||
794 | .get_drvinfo = ep93xx_get_drvinfo, | ||
795 | .get_settings = ep93xx_get_settings, | ||
796 | .set_settings = ep93xx_set_settings, | ||
797 | .nway_reset = ep93xx_nway_reset, | ||
798 | .get_link = ep93xx_get_link, | ||
799 | }; | ||
800 | |||
801 | struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | ||
802 | { | ||
803 | struct net_device *dev; | ||
804 | struct ep93xx_priv *ep; | ||
805 | |||
806 | dev = alloc_etherdev(sizeof(struct ep93xx_priv)); | ||
807 | if (dev == NULL) | ||
808 | return NULL; | ||
809 | ep = netdev_priv(dev); | ||
810 | |||
811 | memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); | ||
812 | |||
813 | dev->get_stats = ep93xx_get_stats; | ||
814 | dev->ethtool_ops = &ep93xx_ethtool_ops; | ||
815 | dev->poll = ep93xx_poll; | ||
816 | dev->hard_start_xmit = ep93xx_xmit; | ||
817 | dev->open = ep93xx_open; | ||
818 | dev->stop = ep93xx_close; | ||
819 | dev->do_ioctl = ep93xx_ioctl; | ||
820 | |||
821 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | ||
822 | dev->weight = 64; | ||
823 | |||
824 | return dev; | ||
825 | } | ||
826 | |||
827 | |||
828 | static int ep93xx_eth_remove(struct platform_device *pdev) | ||
829 | { | ||
830 | struct net_device *dev; | ||
831 | struct ep93xx_priv *ep; | ||
832 | |||
833 | dev = platform_get_drvdata(pdev); | ||
834 | if (dev == NULL) | ||
835 | return 0; | ||
836 | platform_set_drvdata(pdev, NULL); | ||
837 | |||
838 | ep = netdev_priv(dev); | ||
839 | |||
840 | /* @@@ Force down. */ | ||
841 | unregister_netdev(dev); | ||
842 | ep93xx_free_buffers(ep); | ||
843 | |||
844 | if (ep->base_addr != NULL) | ||
845 | iounmap(ep->base_addr); | ||
846 | |||
847 | if (ep->res != NULL) { | ||
848 | release_resource(ep->res); | ||
849 | kfree(ep->res); | ||
850 | } | ||
851 | |||
852 | free_netdev(dev); | ||
853 | |||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | static int ep93xx_eth_probe(struct platform_device *pdev) | ||
858 | { | ||
859 | struct ep93xx_eth_data *data; | ||
860 | struct net_device *dev; | ||
861 | struct ep93xx_priv *ep; | ||
862 | int err; | ||
863 | |||
864 | data = pdev->dev.platform_data; | ||
865 | if (pdev == NULL) | ||
866 | return -ENODEV; | ||
867 | |||
868 | dev = ep93xx_dev_alloc(data); | ||
869 | if (dev == NULL) { | ||
870 | err = -ENOMEM; | ||
871 | goto err_out; | ||
872 | } | ||
873 | ep = netdev_priv(dev); | ||
874 | |||
875 | platform_set_drvdata(pdev, dev); | ||
876 | |||
877 | ep->res = request_mem_region(pdev->resource[0].start, | ||
878 | pdev->resource[0].end - pdev->resource[0].start + 1, | ||
879 | pdev->dev.bus_id); | ||
880 | if (ep->res == NULL) { | ||
881 | dev_err(&pdev->dev, "Could not reserve memory region\n"); | ||
882 | err = -ENOMEM; | ||
883 | goto err_out; | ||
884 | } | ||
885 | |||
886 | ep->base_addr = ioremap(pdev->resource[0].start, | ||
887 | pdev->resource[0].end - pdev->resource[0].start); | ||
888 | if (ep->base_addr == NULL) { | ||
889 | dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); | ||
890 | err = -EIO; | ||
891 | goto err_out; | ||
892 | } | ||
893 | ep->irq = pdev->resource[1].start; | ||
894 | |||
895 | ep->mii.phy_id = data->phy_id; | ||
896 | ep->mii.phy_id_mask = 0x1f; | ||
897 | ep->mii.reg_num_mask = 0x1f; | ||
898 | ep->mii.dev = dev; | ||
899 | ep->mii.mdio_read = ep93xx_mdio_read; | ||
900 | ep->mii.mdio_write = ep93xx_mdio_write; | ||
901 | ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ | ||
902 | |||
903 | err = register_netdev(dev); | ||
904 | if (err) { | ||
905 | dev_err(&pdev->dev, "Failed to register netdev\n"); | ||
906 | goto err_out; | ||
907 | } | ||
908 | |||
909 | printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, " | ||
910 | "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name, | ||
911 | ep->irq, data->dev_addr[0], data->dev_addr[1], | ||
912 | data->dev_addr[2], data->dev_addr[3], | ||
913 | data->dev_addr[4], data->dev_addr[5]); | ||
914 | |||
915 | return 0; | ||
916 | |||
917 | err_out: | ||
918 | ep93xx_eth_remove(pdev); | ||
919 | return err; | ||
920 | } | ||
921 | |||
922 | |||
923 | static struct platform_driver ep93xx_eth_driver = { | ||
924 | .probe = ep93xx_eth_probe, | ||
925 | .remove = ep93xx_eth_remove, | ||
926 | .driver = { | ||
927 | .name = "ep93xx-eth", | ||
928 | }, | ||
929 | }; | ||
930 | |||
931 | static int __init ep93xx_eth_init_module(void) | ||
932 | { | ||
933 | printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n"); | ||
934 | return platform_driver_register(&ep93xx_eth_driver); | ||
935 | } | ||
936 | |||
937 | static void __exit ep93xx_eth_cleanup_module(void) | ||
938 | { | ||
939 | platform_driver_unregister(&ep93xx_eth_driver); | ||
940 | } | ||
941 | |||
942 | module_init(ep93xx_eth_init_module); | ||
943 | module_exit(ep93xx_eth_cleanup_module); | ||
944 | MODULE_LICENSE("GPL"); | ||