diff options
Diffstat (limited to 'drivers/net/ethernet/cirrus/ep93xx_eth.c')
-rw-r--r-- | drivers/net/ethernet/cirrus/ep93xx_eth.c | 904 |
1 files changed, 904 insertions, 0 deletions
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c new file mode 100644 index 000000000000..4317af8d2f0a --- /dev/null +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c | |||
@@ -0,0 +1,904 @@ | |||
1 | /* | ||
2 | * EP93xx ethernet network device driver | ||
3 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> | ||
4 | * Dedicated to Marija Kulikova. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | ||
13 | |||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/netdevice.h> | ||
18 | #include <linux/mii.h> | ||
19 | #include <linux/etherdevice.h> | ||
20 | #include <linux/ethtool.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/moduleparam.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include <mach/hardware.h> | ||
30 | |||
31 | #define DRV_MODULE_NAME "ep93xx-eth" | ||
32 | #define DRV_MODULE_VERSION "0.1" | ||
33 | |||
34 | #define RX_QUEUE_ENTRIES 64 | ||
35 | #define TX_QUEUE_ENTRIES 8 | ||
36 | |||
37 | #define MAX_PKT_SIZE 2044 | ||
38 | #define PKT_BUF_SIZE 2048 | ||
39 | |||
40 | #define REG_RXCTL 0x0000 | ||
41 | #define REG_RXCTL_DEFAULT 0x00073800 | ||
42 | #define REG_TXCTL 0x0004 | ||
43 | #define REG_TXCTL_ENABLE 0x00000001 | ||
44 | #define REG_MIICMD 0x0010 | ||
45 | #define REG_MIICMD_READ 0x00008000 | ||
46 | #define REG_MIICMD_WRITE 0x00004000 | ||
47 | #define REG_MIIDATA 0x0014 | ||
48 | #define REG_MIISTS 0x0018 | ||
49 | #define REG_MIISTS_BUSY 0x00000001 | ||
50 | #define REG_SELFCTL 0x0020 | ||
51 | #define REG_SELFCTL_RESET 0x00000001 | ||
52 | #define REG_INTEN 0x0024 | ||
53 | #define REG_INTEN_TX 0x00000008 | ||
54 | #define REG_INTEN_RX 0x00000007 | ||
55 | #define REG_INTSTSP 0x0028 | ||
56 | #define REG_INTSTS_TX 0x00000008 | ||
57 | #define REG_INTSTS_RX 0x00000004 | ||
58 | #define REG_INTSTSC 0x002c | ||
59 | #define REG_AFP 0x004c | ||
60 | #define REG_INDAD0 0x0050 | ||
61 | #define REG_INDAD1 0x0051 | ||
62 | #define REG_INDAD2 0x0052 | ||
63 | #define REG_INDAD3 0x0053 | ||
64 | #define REG_INDAD4 0x0054 | ||
65 | #define REG_INDAD5 0x0055 | ||
66 | #define REG_GIINTMSK 0x0064 | ||
67 | #define REG_GIINTMSK_ENABLE 0x00008000 | ||
68 | #define REG_BMCTL 0x0080 | ||
69 | #define REG_BMCTL_ENABLE_TX 0x00000100 | ||
70 | #define REG_BMCTL_ENABLE_RX 0x00000001 | ||
71 | #define REG_BMSTS 0x0084 | ||
72 | #define REG_BMSTS_RX_ACTIVE 0x00000008 | ||
73 | #define REG_RXDQBADD 0x0090 | ||
74 | #define REG_RXDQBLEN 0x0094 | ||
75 | #define REG_RXDCURADD 0x0098 | ||
76 | #define REG_RXDENQ 0x009c | ||
77 | #define REG_RXSTSQBADD 0x00a0 | ||
78 | #define REG_RXSTSQBLEN 0x00a4 | ||
79 | #define REG_RXSTSQCURADD 0x00a8 | ||
80 | #define REG_RXSTSENQ 0x00ac | ||
81 | #define REG_TXDQBADD 0x00b0 | ||
82 | #define REG_TXDQBLEN 0x00b4 | ||
83 | #define REG_TXDQCURADD 0x00b8 | ||
84 | #define REG_TXDENQ 0x00bc | ||
85 | #define REG_TXSTSQBADD 0x00c0 | ||
86 | #define REG_TXSTSQBLEN 0x00c4 | ||
87 | #define REG_TXSTSQCURADD 0x00c8 | ||
88 | #define REG_MAXFRMLEN 0x00e8 | ||
89 | |||
90 | struct ep93xx_rdesc | ||
91 | { | ||
92 | u32 buf_addr; | ||
93 | u32 rdesc1; | ||
94 | }; | ||
95 | |||
96 | #define RDESC1_NSOF 0x80000000 | ||
97 | #define RDESC1_BUFFER_INDEX 0x7fff0000 | ||
98 | #define RDESC1_BUFFER_LENGTH 0x0000ffff | ||
99 | |||
100 | struct ep93xx_rstat | ||
101 | { | ||
102 | u32 rstat0; | ||
103 | u32 rstat1; | ||
104 | }; | ||
105 | |||
106 | #define RSTAT0_RFP 0x80000000 | ||
107 | #define RSTAT0_RWE 0x40000000 | ||
108 | #define RSTAT0_EOF 0x20000000 | ||
109 | #define RSTAT0_EOB 0x10000000 | ||
110 | #define RSTAT0_AM 0x00c00000 | ||
111 | #define RSTAT0_RX_ERR 0x00200000 | ||
112 | #define RSTAT0_OE 0x00100000 | ||
113 | #define RSTAT0_FE 0x00080000 | ||
114 | #define RSTAT0_RUNT 0x00040000 | ||
115 | #define RSTAT0_EDATA 0x00020000 | ||
116 | #define RSTAT0_CRCE 0x00010000 | ||
117 | #define RSTAT0_CRCI 0x00008000 | ||
118 | #define RSTAT0_HTI 0x00003f00 | ||
119 | #define RSTAT1_RFP 0x80000000 | ||
120 | #define RSTAT1_BUFFER_INDEX 0x7fff0000 | ||
121 | #define RSTAT1_FRAME_LENGTH 0x0000ffff | ||
122 | |||
123 | struct ep93xx_tdesc | ||
124 | { | ||
125 | u32 buf_addr; | ||
126 | u32 tdesc1; | ||
127 | }; | ||
128 | |||
129 | #define TDESC1_EOF 0x80000000 | ||
130 | #define TDESC1_BUFFER_INDEX 0x7fff0000 | ||
131 | #define TDESC1_BUFFER_ABORT 0x00008000 | ||
132 | #define TDESC1_BUFFER_LENGTH 0x00000fff | ||
133 | |||
134 | struct ep93xx_tstat | ||
135 | { | ||
136 | u32 tstat0; | ||
137 | }; | ||
138 | |||
139 | #define TSTAT0_TXFP 0x80000000 | ||
140 | #define TSTAT0_TXWE 0x40000000 | ||
141 | #define TSTAT0_FA 0x20000000 | ||
142 | #define TSTAT0_LCRS 0x10000000 | ||
143 | #define TSTAT0_OW 0x04000000 | ||
144 | #define TSTAT0_TXU 0x02000000 | ||
145 | #define TSTAT0_ECOLL 0x01000000 | ||
146 | #define TSTAT0_NCOLL 0x001f0000 | ||
147 | #define TSTAT0_BUFFER_INDEX 0x00007fff | ||
148 | |||
149 | struct ep93xx_descs | ||
150 | { | ||
151 | struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES]; | ||
152 | struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES]; | ||
153 | struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES]; | ||
154 | struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES]; | ||
155 | }; | ||
156 | |||
157 | struct ep93xx_priv | ||
158 | { | ||
159 | struct resource *res; | ||
160 | void __iomem *base_addr; | ||
161 | int irq; | ||
162 | |||
163 | struct ep93xx_descs *descs; | ||
164 | dma_addr_t descs_dma_addr; | ||
165 | |||
166 | void *rx_buf[RX_QUEUE_ENTRIES]; | ||
167 | void *tx_buf[TX_QUEUE_ENTRIES]; | ||
168 | |||
169 | spinlock_t rx_lock; | ||
170 | unsigned int rx_pointer; | ||
171 | unsigned int tx_clean_pointer; | ||
172 | unsigned int tx_pointer; | ||
173 | spinlock_t tx_pending_lock; | ||
174 | unsigned int tx_pending; | ||
175 | |||
176 | struct net_device *dev; | ||
177 | struct napi_struct napi; | ||
178 | |||
179 | struct mii_if_info mii; | ||
180 | u8 mdc_divisor; | ||
181 | }; | ||
182 | |||
183 | #define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) | ||
184 | #define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) | ||
185 | #define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) | ||
186 | #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) | ||
187 | #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) | ||
188 | #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) | ||
189 | |||
190 | static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg) | ||
191 | { | ||
192 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
193 | int data; | ||
194 | int i; | ||
195 | |||
196 | wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); | ||
197 | |||
198 | for (i = 0; i < 10; i++) { | ||
199 | if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) | ||
200 | break; | ||
201 | msleep(1); | ||
202 | } | ||
203 | |||
204 | if (i == 10) { | ||
205 | pr_info("mdio read timed out\n"); | ||
206 | data = 0xffff; | ||
207 | } else { | ||
208 | data = rdl(ep, REG_MIIDATA); | ||
209 | } | ||
210 | |||
211 | return data; | ||
212 | } | ||
213 | |||
214 | static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data) | ||
215 | { | ||
216 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
217 | int i; | ||
218 | |||
219 | wrl(ep, REG_MIIDATA, data); | ||
220 | wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); | ||
221 | |||
222 | for (i = 0; i < 10; i++) { | ||
223 | if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) | ||
224 | break; | ||
225 | msleep(1); | ||
226 | } | ||
227 | |||
228 | if (i == 10) | ||
229 | pr_info("mdio write timed out\n"); | ||
230 | } | ||
231 | |||
232 | static int ep93xx_rx(struct net_device *dev, int processed, int budget) | ||
233 | { | ||
234 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
235 | |||
236 | while (processed < budget) { | ||
237 | int entry; | ||
238 | struct ep93xx_rstat *rstat; | ||
239 | u32 rstat0; | ||
240 | u32 rstat1; | ||
241 | int length; | ||
242 | struct sk_buff *skb; | ||
243 | |||
244 | entry = ep->rx_pointer; | ||
245 | rstat = ep->descs->rstat + entry; | ||
246 | |||
247 | rstat0 = rstat->rstat0; | ||
248 | rstat1 = rstat->rstat1; | ||
249 | if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) | ||
250 | break; | ||
251 | |||
252 | rstat->rstat0 = 0; | ||
253 | rstat->rstat1 = 0; | ||
254 | |||
255 | if (!(rstat0 & RSTAT0_EOF)) | ||
256 | pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1); | ||
257 | if (!(rstat0 & RSTAT0_EOB)) | ||
258 | pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1); | ||
259 | if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) | ||
260 | pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); | ||
261 | |||
262 | if (!(rstat0 & RSTAT0_RWE)) { | ||
263 | dev->stats.rx_errors++; | ||
264 | if (rstat0 & RSTAT0_OE) | ||
265 | dev->stats.rx_fifo_errors++; | ||
266 | if (rstat0 & RSTAT0_FE) | ||
267 | dev->stats.rx_frame_errors++; | ||
268 | if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) | ||
269 | dev->stats.rx_length_errors++; | ||
270 | if (rstat0 & RSTAT0_CRCE) | ||
271 | dev->stats.rx_crc_errors++; | ||
272 | goto err; | ||
273 | } | ||
274 | |||
275 | length = rstat1 & RSTAT1_FRAME_LENGTH; | ||
276 | if (length > MAX_PKT_SIZE) { | ||
277 | pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1); | ||
278 | goto err; | ||
279 | } | ||
280 | |||
281 | /* Strip FCS. */ | ||
282 | if (rstat0 & RSTAT0_CRCI) | ||
283 | length -= 4; | ||
284 | |||
285 | skb = dev_alloc_skb(length + 2); | ||
286 | if (likely(skb != NULL)) { | ||
287 | struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; | ||
288 | skb_reserve(skb, 2); | ||
289 | dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, | ||
290 | length, DMA_FROM_DEVICE); | ||
291 | skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); | ||
292 | dma_sync_single_for_device(dev->dev.parent, | ||
293 | rxd->buf_addr, length, | ||
294 | DMA_FROM_DEVICE); | ||
295 | skb_put(skb, length); | ||
296 | skb->protocol = eth_type_trans(skb, dev); | ||
297 | |||
298 | netif_receive_skb(skb); | ||
299 | |||
300 | dev->stats.rx_packets++; | ||
301 | dev->stats.rx_bytes += length; | ||
302 | } else { | ||
303 | dev->stats.rx_dropped++; | ||
304 | } | ||
305 | |||
306 | err: | ||
307 | ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); | ||
308 | processed++; | ||
309 | } | ||
310 | |||
311 | return processed; | ||
312 | } | ||
313 | |||
314 | static int ep93xx_have_more_rx(struct ep93xx_priv *ep) | ||
315 | { | ||
316 | struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; | ||
317 | return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); | ||
318 | } | ||
319 | |||
320 | static int ep93xx_poll(struct napi_struct *napi, int budget) | ||
321 | { | ||
322 | struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); | ||
323 | struct net_device *dev = ep->dev; | ||
324 | int rx = 0; | ||
325 | |||
326 | poll_some_more: | ||
327 | rx = ep93xx_rx(dev, rx, budget); | ||
328 | if (rx < budget) { | ||
329 | int more = 0; | ||
330 | |||
331 | spin_lock_irq(&ep->rx_lock); | ||
332 | __napi_complete(napi); | ||
333 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | ||
334 | if (ep93xx_have_more_rx(ep)) { | ||
335 | wrl(ep, REG_INTEN, REG_INTEN_TX); | ||
336 | wrl(ep, REG_INTSTSP, REG_INTSTS_RX); | ||
337 | more = 1; | ||
338 | } | ||
339 | spin_unlock_irq(&ep->rx_lock); | ||
340 | |||
341 | if (more && napi_reschedule(napi)) | ||
342 | goto poll_some_more; | ||
343 | } | ||
344 | |||
345 | if (rx) { | ||
346 | wrw(ep, REG_RXDENQ, rx); | ||
347 | wrw(ep, REG_RXSTSENQ, rx); | ||
348 | } | ||
349 | |||
350 | return rx; | ||
351 | } | ||
352 | |||
353 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | ||
354 | { | ||
355 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
356 | struct ep93xx_tdesc *txd; | ||
357 | int entry; | ||
358 | |||
359 | if (unlikely(skb->len > MAX_PKT_SIZE)) { | ||
360 | dev->stats.tx_dropped++; | ||
361 | dev_kfree_skb(skb); | ||
362 | return NETDEV_TX_OK; | ||
363 | } | ||
364 | |||
365 | entry = ep->tx_pointer; | ||
366 | ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); | ||
367 | |||
368 | txd = &ep->descs->tdesc[entry]; | ||
369 | |||
370 | txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); | ||
371 | dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, | ||
372 | DMA_TO_DEVICE); | ||
373 | skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); | ||
374 | dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, | ||
375 | DMA_TO_DEVICE); | ||
376 | dev_kfree_skb(skb); | ||
377 | |||
378 | spin_lock_irq(&ep->tx_pending_lock); | ||
379 | ep->tx_pending++; | ||
380 | if (ep->tx_pending == TX_QUEUE_ENTRIES) | ||
381 | netif_stop_queue(dev); | ||
382 | spin_unlock_irq(&ep->tx_pending_lock); | ||
383 | |||
384 | wrl(ep, REG_TXDENQ, 1); | ||
385 | |||
386 | return NETDEV_TX_OK; | ||
387 | } | ||
388 | |||
389 | static void ep93xx_tx_complete(struct net_device *dev) | ||
390 | { | ||
391 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
392 | int wake; | ||
393 | |||
394 | wake = 0; | ||
395 | |||
396 | spin_lock(&ep->tx_pending_lock); | ||
397 | while (1) { | ||
398 | int entry; | ||
399 | struct ep93xx_tstat *tstat; | ||
400 | u32 tstat0; | ||
401 | |||
402 | entry = ep->tx_clean_pointer; | ||
403 | tstat = ep->descs->tstat + entry; | ||
404 | |||
405 | tstat0 = tstat->tstat0; | ||
406 | if (!(tstat0 & TSTAT0_TXFP)) | ||
407 | break; | ||
408 | |||
409 | tstat->tstat0 = 0; | ||
410 | |||
411 | if (tstat0 & TSTAT0_FA) | ||
412 | pr_crit("frame aborted %.8x\n", tstat0); | ||
413 | if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) | ||
414 | pr_crit("entry mismatch %.8x\n", tstat0); | ||
415 | |||
416 | if (tstat0 & TSTAT0_TXWE) { | ||
417 | int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; | ||
418 | |||
419 | dev->stats.tx_packets++; | ||
420 | dev->stats.tx_bytes += length; | ||
421 | } else { | ||
422 | dev->stats.tx_errors++; | ||
423 | } | ||
424 | |||
425 | if (tstat0 & TSTAT0_OW) | ||
426 | dev->stats.tx_window_errors++; | ||
427 | if (tstat0 & TSTAT0_TXU) | ||
428 | dev->stats.tx_fifo_errors++; | ||
429 | dev->stats.collisions += (tstat0 >> 16) & 0x1f; | ||
430 | |||
431 | ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); | ||
432 | if (ep->tx_pending == TX_QUEUE_ENTRIES) | ||
433 | wake = 1; | ||
434 | ep->tx_pending--; | ||
435 | } | ||
436 | spin_unlock(&ep->tx_pending_lock); | ||
437 | |||
438 | if (wake) | ||
439 | netif_wake_queue(dev); | ||
440 | } | ||
441 | |||
442 | static irqreturn_t ep93xx_irq(int irq, void *dev_id) | ||
443 | { | ||
444 | struct net_device *dev = dev_id; | ||
445 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
446 | u32 status; | ||
447 | |||
448 | status = rdl(ep, REG_INTSTSC); | ||
449 | if (status == 0) | ||
450 | return IRQ_NONE; | ||
451 | |||
452 | if (status & REG_INTSTS_RX) { | ||
453 | spin_lock(&ep->rx_lock); | ||
454 | if (likely(napi_schedule_prep(&ep->napi))) { | ||
455 | wrl(ep, REG_INTEN, REG_INTEN_TX); | ||
456 | __napi_schedule(&ep->napi); | ||
457 | } | ||
458 | spin_unlock(&ep->rx_lock); | ||
459 | } | ||
460 | |||
461 | if (status & REG_INTSTS_TX) | ||
462 | ep93xx_tx_complete(dev); | ||
463 | |||
464 | return IRQ_HANDLED; | ||
465 | } | ||
466 | |||
467 | static void ep93xx_free_buffers(struct ep93xx_priv *ep) | ||
468 | { | ||
469 | struct device *dev = ep->dev->dev.parent; | ||
470 | int i; | ||
471 | |||
472 | for (i = 0; i < RX_QUEUE_ENTRIES; i++) { | ||
473 | dma_addr_t d; | ||
474 | |||
475 | d = ep->descs->rdesc[i].buf_addr; | ||
476 | if (d) | ||
477 | dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); | ||
478 | |||
479 | if (ep->rx_buf[i] != NULL) | ||
480 | kfree(ep->rx_buf[i]); | ||
481 | } | ||
482 | |||
483 | for (i = 0; i < TX_QUEUE_ENTRIES; i++) { | ||
484 | dma_addr_t d; | ||
485 | |||
486 | d = ep->descs->tdesc[i].buf_addr; | ||
487 | if (d) | ||
488 | dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); | ||
489 | |||
490 | if (ep->tx_buf[i] != NULL) | ||
491 | kfree(ep->tx_buf[i]); | ||
492 | } | ||
493 | |||
494 | dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, | ||
495 | ep->descs_dma_addr); | ||
496 | } | ||
497 | |||
498 | static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | ||
499 | { | ||
500 | struct device *dev = ep->dev->dev.parent; | ||
501 | int i; | ||
502 | |||
503 | ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), | ||
504 | &ep->descs_dma_addr, GFP_KERNEL); | ||
505 | if (ep->descs == NULL) | ||
506 | return 1; | ||
507 | |||
508 | for (i = 0; i < RX_QUEUE_ENTRIES; i++) { | ||
509 | void *buf; | ||
510 | dma_addr_t d; | ||
511 | |||
512 | buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); | ||
513 | if (buf == NULL) | ||
514 | goto err; | ||
515 | |||
516 | d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); | ||
517 | if (dma_mapping_error(dev, d)) { | ||
518 | kfree(buf); | ||
519 | goto err; | ||
520 | } | ||
521 | |||
522 | ep->rx_buf[i] = buf; | ||
523 | ep->descs->rdesc[i].buf_addr = d; | ||
524 | ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; | ||
525 | } | ||
526 | |||
527 | for (i = 0; i < TX_QUEUE_ENTRIES; i++) { | ||
528 | void *buf; | ||
529 | dma_addr_t d; | ||
530 | |||
531 | buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); | ||
532 | if (buf == NULL) | ||
533 | goto err; | ||
534 | |||
535 | d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); | ||
536 | if (dma_mapping_error(dev, d)) { | ||
537 | kfree(buf); | ||
538 | goto err; | ||
539 | } | ||
540 | |||
541 | ep->tx_buf[i] = buf; | ||
542 | ep->descs->tdesc[i].buf_addr = d; | ||
543 | } | ||
544 | |||
545 | return 0; | ||
546 | |||
547 | err: | ||
548 | ep93xx_free_buffers(ep); | ||
549 | return 1; | ||
550 | } | ||
551 | |||
552 | static int ep93xx_start_hw(struct net_device *dev) | ||
553 | { | ||
554 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
555 | unsigned long addr; | ||
556 | int i; | ||
557 | |||
558 | wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); | ||
559 | for (i = 0; i < 10; i++) { | ||
560 | if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) | ||
561 | break; | ||
562 | msleep(1); | ||
563 | } | ||
564 | |||
565 | if (i == 10) { | ||
566 | pr_crit("hw failed to reset\n"); | ||
567 | return 1; | ||
568 | } | ||
569 | |||
570 | wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); | ||
571 | |||
572 | /* Does the PHY support preamble suppress? */ | ||
573 | if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) | ||
574 | wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); | ||
575 | |||
576 | /* Receive descriptor ring. */ | ||
577 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); | ||
578 | wrl(ep, REG_RXDQBADD, addr); | ||
579 | wrl(ep, REG_RXDCURADD, addr); | ||
580 | wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); | ||
581 | |||
582 | /* Receive status ring. */ | ||
583 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); | ||
584 | wrl(ep, REG_RXSTSQBADD, addr); | ||
585 | wrl(ep, REG_RXSTSQCURADD, addr); | ||
586 | wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); | ||
587 | |||
588 | /* Transmit descriptor ring. */ | ||
589 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); | ||
590 | wrl(ep, REG_TXDQBADD, addr); | ||
591 | wrl(ep, REG_TXDQCURADD, addr); | ||
592 | wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); | ||
593 | |||
594 | /* Transmit status ring. */ | ||
595 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); | ||
596 | wrl(ep, REG_TXSTSQBADD, addr); | ||
597 | wrl(ep, REG_TXSTSQCURADD, addr); | ||
598 | wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); | ||
599 | |||
600 | wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); | ||
601 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | ||
602 | wrl(ep, REG_GIINTMSK, 0); | ||
603 | |||
604 | for (i = 0; i < 10; i++) { | ||
605 | if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) | ||
606 | break; | ||
607 | msleep(1); | ||
608 | } | ||
609 | |||
610 | if (i == 10) { | ||
611 | pr_crit("hw failed to start\n"); | ||
612 | return 1; | ||
613 | } | ||
614 | |||
615 | wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); | ||
616 | wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); | ||
617 | |||
618 | wrb(ep, REG_INDAD0, dev->dev_addr[0]); | ||
619 | wrb(ep, REG_INDAD1, dev->dev_addr[1]); | ||
620 | wrb(ep, REG_INDAD2, dev->dev_addr[2]); | ||
621 | wrb(ep, REG_INDAD3, dev->dev_addr[3]); | ||
622 | wrb(ep, REG_INDAD4, dev->dev_addr[4]); | ||
623 | wrb(ep, REG_INDAD5, dev->dev_addr[5]); | ||
624 | wrl(ep, REG_AFP, 0); | ||
625 | |||
626 | wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); | ||
627 | |||
628 | wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); | ||
629 | wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); | ||
630 | |||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | static void ep93xx_stop_hw(struct net_device *dev) | ||
635 | { | ||
636 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
637 | int i; | ||
638 | |||
639 | wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); | ||
640 | for (i = 0; i < 10; i++) { | ||
641 | if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) | ||
642 | break; | ||
643 | msleep(1); | ||
644 | } | ||
645 | |||
646 | if (i == 10) | ||
647 | pr_crit("hw failed to reset\n"); | ||
648 | } | ||
649 | |||
650 | static int ep93xx_open(struct net_device *dev) | ||
651 | { | ||
652 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
653 | int err; | ||
654 | |||
655 | if (ep93xx_alloc_buffers(ep)) | ||
656 | return -ENOMEM; | ||
657 | |||
658 | napi_enable(&ep->napi); | ||
659 | |||
660 | if (ep93xx_start_hw(dev)) { | ||
661 | napi_disable(&ep->napi); | ||
662 | ep93xx_free_buffers(ep); | ||
663 | return -EIO; | ||
664 | } | ||
665 | |||
666 | spin_lock_init(&ep->rx_lock); | ||
667 | ep->rx_pointer = 0; | ||
668 | ep->tx_clean_pointer = 0; | ||
669 | ep->tx_pointer = 0; | ||
670 | spin_lock_init(&ep->tx_pending_lock); | ||
671 | ep->tx_pending = 0; | ||
672 | |||
673 | err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); | ||
674 | if (err) { | ||
675 | napi_disable(&ep->napi); | ||
676 | ep93xx_stop_hw(dev); | ||
677 | ep93xx_free_buffers(ep); | ||
678 | return err; | ||
679 | } | ||
680 | |||
681 | wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); | ||
682 | |||
683 | netif_start_queue(dev); | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | static int ep93xx_close(struct net_device *dev) | ||
689 | { | ||
690 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
691 | |||
692 | napi_disable(&ep->napi); | ||
693 | netif_stop_queue(dev); | ||
694 | |||
695 | wrl(ep, REG_GIINTMSK, 0); | ||
696 | free_irq(ep->irq, dev); | ||
697 | ep93xx_stop_hw(dev); | ||
698 | ep93xx_free_buffers(ep); | ||
699 | |||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
704 | { | ||
705 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
706 | struct mii_ioctl_data *data = if_mii(ifr); | ||
707 | |||
708 | return generic_mii_ioctl(&ep->mii, data, cmd, NULL); | ||
709 | } | ||
710 | |||
711 | static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
712 | { | ||
713 | strcpy(info->driver, DRV_MODULE_NAME); | ||
714 | strcpy(info->version, DRV_MODULE_VERSION); | ||
715 | } | ||
716 | |||
717 | static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
718 | { | ||
719 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
720 | return mii_ethtool_gset(&ep->mii, cmd); | ||
721 | } | ||
722 | |||
723 | static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
724 | { | ||
725 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
726 | return mii_ethtool_sset(&ep->mii, cmd); | ||
727 | } | ||
728 | |||
729 | static int ep93xx_nway_reset(struct net_device *dev) | ||
730 | { | ||
731 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
732 | return mii_nway_restart(&ep->mii); | ||
733 | } | ||
734 | |||
735 | static u32 ep93xx_get_link(struct net_device *dev) | ||
736 | { | ||
737 | struct ep93xx_priv *ep = netdev_priv(dev); | ||
738 | return mii_link_ok(&ep->mii); | ||
739 | } | ||
740 | |||
741 | static const struct ethtool_ops ep93xx_ethtool_ops = { | ||
742 | .get_drvinfo = ep93xx_get_drvinfo, | ||
743 | .get_settings = ep93xx_get_settings, | ||
744 | .set_settings = ep93xx_set_settings, | ||
745 | .nway_reset = ep93xx_nway_reset, | ||
746 | .get_link = ep93xx_get_link, | ||
747 | }; | ||
748 | |||
749 | static const struct net_device_ops ep93xx_netdev_ops = { | ||
750 | .ndo_open = ep93xx_open, | ||
751 | .ndo_stop = ep93xx_close, | ||
752 | .ndo_start_xmit = ep93xx_xmit, | ||
753 | .ndo_do_ioctl = ep93xx_ioctl, | ||
754 | .ndo_validate_addr = eth_validate_addr, | ||
755 | .ndo_change_mtu = eth_change_mtu, | ||
756 | .ndo_set_mac_address = eth_mac_addr, | ||
757 | }; | ||
758 | |||
759 | static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | ||
760 | { | ||
761 | struct net_device *dev; | ||
762 | |||
763 | dev = alloc_etherdev(sizeof(struct ep93xx_priv)); | ||
764 | if (dev == NULL) | ||
765 | return NULL; | ||
766 | |||
767 | memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); | ||
768 | |||
769 | dev->ethtool_ops = &ep93xx_ethtool_ops; | ||
770 | dev->netdev_ops = &ep93xx_netdev_ops; | ||
771 | |||
772 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | ||
773 | |||
774 | return dev; | ||
775 | } | ||
776 | |||
777 | |||
778 | static int ep93xx_eth_remove(struct platform_device *pdev) | ||
779 | { | ||
780 | struct net_device *dev; | ||
781 | struct ep93xx_priv *ep; | ||
782 | |||
783 | dev = platform_get_drvdata(pdev); | ||
784 | if (dev == NULL) | ||
785 | return 0; | ||
786 | platform_set_drvdata(pdev, NULL); | ||
787 | |||
788 | ep = netdev_priv(dev); | ||
789 | |||
790 | /* @@@ Force down. */ | ||
791 | unregister_netdev(dev); | ||
792 | ep93xx_free_buffers(ep); | ||
793 | |||
794 | if (ep->base_addr != NULL) | ||
795 | iounmap(ep->base_addr); | ||
796 | |||
797 | if (ep->res != NULL) { | ||
798 | release_resource(ep->res); | ||
799 | kfree(ep->res); | ||
800 | } | ||
801 | |||
802 | free_netdev(dev); | ||
803 | |||
804 | return 0; | ||
805 | } | ||
806 | |||
807 | static int ep93xx_eth_probe(struct platform_device *pdev) | ||
808 | { | ||
809 | struct ep93xx_eth_data *data; | ||
810 | struct net_device *dev; | ||
811 | struct ep93xx_priv *ep; | ||
812 | struct resource *mem; | ||
813 | int irq; | ||
814 | int err; | ||
815 | |||
816 | if (pdev == NULL) | ||
817 | return -ENODEV; | ||
818 | data = pdev->dev.platform_data; | ||
819 | |||
820 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
821 | irq = platform_get_irq(pdev, 0); | ||
822 | if (!mem || irq < 0) | ||
823 | return -ENXIO; | ||
824 | |||
825 | dev = ep93xx_dev_alloc(data); | ||
826 | if (dev == NULL) { | ||
827 | err = -ENOMEM; | ||
828 | goto err_out; | ||
829 | } | ||
830 | ep = netdev_priv(dev); | ||
831 | ep->dev = dev; | ||
832 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
833 | netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); | ||
834 | |||
835 | platform_set_drvdata(pdev, dev); | ||
836 | |||
837 | ep->res = request_mem_region(mem->start, resource_size(mem), | ||
838 | dev_name(&pdev->dev)); | ||
839 | if (ep->res == NULL) { | ||
840 | dev_err(&pdev->dev, "Could not reserve memory region\n"); | ||
841 | err = -ENOMEM; | ||
842 | goto err_out; | ||
843 | } | ||
844 | |||
845 | ep->base_addr = ioremap(mem->start, resource_size(mem)); | ||
846 | if (ep->base_addr == NULL) { | ||
847 | dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); | ||
848 | err = -EIO; | ||
849 | goto err_out; | ||
850 | } | ||
851 | ep->irq = irq; | ||
852 | |||
853 | ep->mii.phy_id = data->phy_id; | ||
854 | ep->mii.phy_id_mask = 0x1f; | ||
855 | ep->mii.reg_num_mask = 0x1f; | ||
856 | ep->mii.dev = dev; | ||
857 | ep->mii.mdio_read = ep93xx_mdio_read; | ||
858 | ep->mii.mdio_write = ep93xx_mdio_write; | ||
859 | ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ | ||
860 | |||
861 | if (is_zero_ether_addr(dev->dev_addr)) | ||
862 | random_ether_addr(dev->dev_addr); | ||
863 | |||
864 | err = register_netdev(dev); | ||
865 | if (err) { | ||
866 | dev_err(&pdev->dev, "Failed to register netdev\n"); | ||
867 | goto err_out; | ||
868 | } | ||
869 | |||
870 | printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n", | ||
871 | dev->name, ep->irq, dev->dev_addr); | ||
872 | |||
873 | return 0; | ||
874 | |||
875 | err_out: | ||
876 | ep93xx_eth_remove(pdev); | ||
877 | return err; | ||
878 | } | ||
879 | |||
880 | |||
881 | static struct platform_driver ep93xx_eth_driver = { | ||
882 | .probe = ep93xx_eth_probe, | ||
883 | .remove = ep93xx_eth_remove, | ||
884 | .driver = { | ||
885 | .name = "ep93xx-eth", | ||
886 | .owner = THIS_MODULE, | ||
887 | }, | ||
888 | }; | ||
889 | |||
890 | static int __init ep93xx_eth_init_module(void) | ||
891 | { | ||
892 | printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n"); | ||
893 | return platform_driver_register(&ep93xx_eth_driver); | ||
894 | } | ||
895 | |||
896 | static void __exit ep93xx_eth_cleanup_module(void) | ||
897 | { | ||
898 | platform_driver_unregister(&ep93xx_eth_driver); | ||
899 | } | ||
900 | |||
901 | module_init(ep93xx_eth_init_module); | ||
902 | module_exit(ep93xx_eth_cleanup_module); | ||
903 | MODULE_LICENSE("GPL"); | ||
904 | MODULE_ALIAS("platform:ep93xx-eth"); | ||