diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/mace.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/net/mace.c')
-rw-r--r-- | drivers/net/mace.c | 1053 |
1 files changed, 1053 insertions, 0 deletions
diff --git a/drivers/net/mace.c b/drivers/net/mace.c new file mode 100644 index 000000000000..6ed2d7dbd44c --- /dev/null +++ b/drivers/net/mace.c | |||
@@ -0,0 +1,1053 @@ | |||
1 | /* | ||
2 | * Network device driver for the MACE ethernet controller on | ||
3 | * Apple Powermacs. Assumes it's under a DBDMA controller. | ||
4 | * | ||
5 | * Copyright (C) 1996 Paul Mackerras. | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/etherdevice.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/timer.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/crc32.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <asm/prom.h> | ||
20 | #include <asm/dbdma.h> | ||
21 | #include <asm/io.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/macio.h> | ||
24 | |||
25 | #include "mace.h" | ||
26 | |||
27 | static int port_aaui = -1; | ||
28 | |||
29 | #define N_RX_RING 8 | ||
30 | #define N_TX_RING 6 | ||
31 | #define MAX_TX_ACTIVE 1 | ||
32 | #define NCMDS_TX 1 /* dma commands per element in tx ring */ | ||
33 | #define RX_BUFLEN (ETH_FRAME_LEN + 8) | ||
34 | #define TX_TIMEOUT HZ /* 1 second */ | ||
35 | |||
36 | /* Chip rev needs workaround on HW & multicast addr change */ | ||
37 | #define BROKEN_ADDRCHG_REV 0x0941 | ||
38 | |||
39 | /* Bits in transmit DMA status */ | ||
40 | #define TX_DMA_ERR 0x80 | ||
41 | |||
42 | struct mace_data { | ||
43 | volatile struct mace __iomem *mace; | ||
44 | volatile struct dbdma_regs __iomem *tx_dma; | ||
45 | int tx_dma_intr; | ||
46 | volatile struct dbdma_regs __iomem *rx_dma; | ||
47 | int rx_dma_intr; | ||
48 | volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ | ||
49 | volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ | ||
50 | struct sk_buff *rx_bufs[N_RX_RING]; | ||
51 | int rx_fill; | ||
52 | int rx_empty; | ||
53 | struct sk_buff *tx_bufs[N_TX_RING]; | ||
54 | int tx_fill; | ||
55 | int tx_empty; | ||
56 | unsigned char maccc; | ||
57 | unsigned char tx_fullup; | ||
58 | unsigned char tx_active; | ||
59 | unsigned char tx_bad_runt; | ||
60 | struct net_device_stats stats; | ||
61 | struct timer_list tx_timeout; | ||
62 | int timeout_active; | ||
63 | int port_aaui; | ||
64 | int chipid; | ||
65 | struct macio_dev *mdev; | ||
66 | spinlock_t lock; | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * Number of bytes of private data per MACE: allow enough for | ||
71 | * the rx and tx dma commands plus a branch dma command each, | ||
72 | * and another 16 bytes to allow us to align the dma command | ||
73 | * buffers on a 16 byte boundary. | ||
74 | */ | ||
75 | #define PRIV_BYTES (sizeof(struct mace_data) \ | ||
76 | + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) | ||
77 | |||
78 | static int bitrev(int); | ||
79 | static int mace_open(struct net_device *dev); | ||
80 | static int mace_close(struct net_device *dev); | ||
81 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | ||
82 | static struct net_device_stats *mace_stats(struct net_device *dev); | ||
83 | static void mace_set_multicast(struct net_device *dev); | ||
84 | static void mace_reset(struct net_device *dev); | ||
85 | static int mace_set_address(struct net_device *dev, void *addr); | ||
86 | static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs); | ||
87 | static irqreturn_t mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs); | ||
88 | static irqreturn_t mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs); | ||
89 | static void mace_set_timeout(struct net_device *dev); | ||
90 | static void mace_tx_timeout(unsigned long data); | ||
91 | static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); | ||
92 | static inline void mace_clean_rings(struct mace_data *mp); | ||
93 | static void __mace_set_address(struct net_device *dev, void *addr); | ||
94 | |||
95 | /* | ||
96 | * If we can't get a skbuff when we need it, we use this area for DMA. | ||
97 | */ | ||
98 | static unsigned char *dummy_buf; | ||
99 | |||
100 | /* Bit-reverse one byte of an ethernet hardware address. */ | ||
101 | static inline int | ||
102 | bitrev(int b) | ||
103 | { | ||
104 | int d = 0, i; | ||
105 | |||
106 | for (i = 0; i < 8; ++i, b >>= 1) | ||
107 | d = (d << 1) | (b & 1); | ||
108 | return d; | ||
109 | } | ||
110 | |||
111 | |||
112 | static int __devinit mace_probe(struct macio_dev *mdev, const struct of_match *match) | ||
113 | { | ||
114 | struct device_node *mace = macio_get_of_node(mdev); | ||
115 | struct net_device *dev; | ||
116 | struct mace_data *mp; | ||
117 | unsigned char *addr; | ||
118 | int j, rev, rc = -EBUSY; | ||
119 | |||
120 | if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { | ||
121 | printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", | ||
122 | mace->full_name); | ||
123 | return -ENODEV; | ||
124 | } | ||
125 | |||
126 | addr = get_property(mace, "mac-address", NULL); | ||
127 | if (addr == NULL) { | ||
128 | addr = get_property(mace, "local-mac-address", NULL); | ||
129 | if (addr == NULL) { | ||
130 | printk(KERN_ERR "Can't get mac-address for MACE %s\n", | ||
131 | mace->full_name); | ||
132 | return -ENODEV; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * lazy allocate the driver-wide dummy buffer. (Note that we | ||
138 | * never have more than one MACE in the system anyway) | ||
139 | */ | ||
140 | if (dummy_buf == NULL) { | ||
141 | dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); | ||
142 | if (dummy_buf == NULL) { | ||
143 | printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n"); | ||
144 | return -ENOMEM; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | if (macio_request_resources(mdev, "mace")) { | ||
149 | printk(KERN_ERR "MACE: can't request IO resources !\n"); | ||
150 | return -EBUSY; | ||
151 | } | ||
152 | |||
153 | dev = alloc_etherdev(PRIV_BYTES); | ||
154 | if (!dev) { | ||
155 | printk(KERN_ERR "MACE: can't allocate ethernet device !\n"); | ||
156 | rc = -ENOMEM; | ||
157 | goto err_release; | ||
158 | } | ||
159 | SET_MODULE_OWNER(dev); | ||
160 | SET_NETDEV_DEV(dev, &mdev->ofdev.dev); | ||
161 | |||
162 | mp = dev->priv; | ||
163 | mp->mdev = mdev; | ||
164 | macio_set_drvdata(mdev, dev); | ||
165 | |||
166 | dev->base_addr = macio_resource_start(mdev, 0); | ||
167 | mp->mace = ioremap(dev->base_addr, 0x1000); | ||
168 | if (mp->mace == NULL) { | ||
169 | printk(KERN_ERR "MACE: can't map IO resources !\n"); | ||
170 | rc = -ENOMEM; | ||
171 | goto err_free; | ||
172 | } | ||
173 | dev->irq = macio_irq(mdev, 0); | ||
174 | |||
175 | rev = addr[0] == 0 && addr[1] == 0xA0; | ||
176 | for (j = 0; j < 6; ++j) { | ||
177 | dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; | ||
178 | } | ||
179 | mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | | ||
180 | in_8(&mp->mace->chipid_lo); | ||
181 | |||
182 | |||
183 | mp = (struct mace_data *) dev->priv; | ||
184 | mp->maccc = ENXMT | ENRCV; | ||
185 | |||
186 | mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); | ||
187 | if (mp->tx_dma == NULL) { | ||
188 | printk(KERN_ERR "MACE: can't map TX DMA resources !\n"); | ||
189 | rc = -ENOMEM; | ||
190 | goto err_unmap_io; | ||
191 | } | ||
192 | mp->tx_dma_intr = macio_irq(mdev, 1); | ||
193 | |||
194 | mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); | ||
195 | if (mp->rx_dma == NULL) { | ||
196 | printk(KERN_ERR "MACE: can't map RX DMA resources !\n"); | ||
197 | rc = -ENOMEM; | ||
198 | goto err_unmap_tx_dma; | ||
199 | } | ||
200 | mp->rx_dma_intr = macio_irq(mdev, 2); | ||
201 | |||
202 | mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); | ||
203 | mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; | ||
204 | |||
205 | memset(&mp->stats, 0, sizeof(mp->stats)); | ||
206 | memset((char *) mp->tx_cmds, 0, | ||
207 | (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); | ||
208 | init_timer(&mp->tx_timeout); | ||
209 | spin_lock_init(&mp->lock); | ||
210 | mp->timeout_active = 0; | ||
211 | |||
212 | if (port_aaui >= 0) | ||
213 | mp->port_aaui = port_aaui; | ||
214 | else { | ||
215 | /* Apple Network Server uses the AAUI port */ | ||
216 | if (machine_is_compatible("AAPL,ShinerESB")) | ||
217 | mp->port_aaui = 1; | ||
218 | else { | ||
219 | #ifdef CONFIG_MACE_AAUI_PORT | ||
220 | mp->port_aaui = 1; | ||
221 | #else | ||
222 | mp->port_aaui = 0; | ||
223 | #endif | ||
224 | } | ||
225 | } | ||
226 | |||
227 | dev->open = mace_open; | ||
228 | dev->stop = mace_close; | ||
229 | dev->hard_start_xmit = mace_xmit_start; | ||
230 | dev->get_stats = mace_stats; | ||
231 | dev->set_multicast_list = mace_set_multicast; | ||
232 | dev->set_mac_address = mace_set_address; | ||
233 | |||
234 | /* | ||
235 | * Most of what is below could be moved to mace_open() | ||
236 | */ | ||
237 | mace_reset(dev); | ||
238 | |||
239 | rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev); | ||
240 | if (rc) { | ||
241 | printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); | ||
242 | goto err_unmap_rx_dma; | ||
243 | } | ||
244 | rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); | ||
245 | if (rc) { | ||
246 | printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line); | ||
247 | goto err_free_irq; | ||
248 | } | ||
249 | rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); | ||
250 | if (rc) { | ||
251 | printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line); | ||
252 | goto err_free_tx_irq; | ||
253 | } | ||
254 | |||
255 | rc = register_netdev(dev); | ||
256 | if (rc) { | ||
257 | printk(KERN_ERR "MACE: Cannot register net device, aborting.\n"); | ||
258 | goto err_free_rx_irq; | ||
259 | } | ||
260 | |||
261 | printk(KERN_INFO "%s: MACE at", dev->name); | ||
262 | for (j = 0; j < 6; ++j) { | ||
263 | printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); | ||
264 | } | ||
265 | printk(", chip revision %d.%d\n", mp->chipid >> 8, mp->chipid & 0xff); | ||
266 | |||
267 | return 0; | ||
268 | |||
269 | err_free_rx_irq: | ||
270 | free_irq(macio_irq(mdev, 2), dev); | ||
271 | err_free_tx_irq: | ||
272 | free_irq(macio_irq(mdev, 1), dev); | ||
273 | err_free_irq: | ||
274 | free_irq(macio_irq(mdev, 0), dev); | ||
275 | err_unmap_rx_dma: | ||
276 | iounmap(mp->rx_dma); | ||
277 | err_unmap_tx_dma: | ||
278 | iounmap(mp->tx_dma); | ||
279 | err_unmap_io: | ||
280 | iounmap(mp->mace); | ||
281 | err_free: | ||
282 | free_netdev(dev); | ||
283 | err_release: | ||
284 | macio_release_resources(mdev); | ||
285 | |||
286 | return rc; | ||
287 | } | ||
288 | |||
289 | static int __devexit mace_remove(struct macio_dev *mdev) | ||
290 | { | ||
291 | struct net_device *dev = macio_get_drvdata(mdev); | ||
292 | struct mace_data *mp; | ||
293 | |||
294 | BUG_ON(dev == NULL); | ||
295 | |||
296 | macio_set_drvdata(mdev, NULL); | ||
297 | |||
298 | mp = dev->priv; | ||
299 | |||
300 | unregister_netdev(dev); | ||
301 | |||
302 | free_irq(dev->irq, dev); | ||
303 | free_irq(mp->tx_dma_intr, dev); | ||
304 | free_irq(mp->rx_dma_intr, dev); | ||
305 | |||
306 | iounmap(mp->rx_dma); | ||
307 | iounmap(mp->tx_dma); | ||
308 | iounmap(mp->mace); | ||
309 | |||
310 | free_netdev(dev); | ||
311 | |||
312 | macio_release_resources(mdev); | ||
313 | |||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static void dbdma_reset(volatile struct dbdma_regs __iomem *dma) | ||
318 | { | ||
319 | int i; | ||
320 | |||
321 | out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16); | ||
322 | |||
323 | /* | ||
324 | * Yes this looks peculiar, but apparently it needs to be this | ||
325 | * way on some machines. | ||
326 | */ | ||
327 | for (i = 200; i > 0; --i) | ||
328 | if (ld_le32(&dma->control) & RUN) | ||
329 | udelay(1); | ||
330 | } | ||
331 | |||
332 | static void mace_reset(struct net_device *dev) | ||
333 | { | ||
334 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
335 | volatile struct mace __iomem *mb = mp->mace; | ||
336 | int i; | ||
337 | |||
338 | /* soft-reset the chip */ | ||
339 | i = 200; | ||
340 | while (--i) { | ||
341 | out_8(&mb->biucc, SWRST); | ||
342 | if (in_8(&mb->biucc) & SWRST) { | ||
343 | udelay(10); | ||
344 | continue; | ||
345 | } | ||
346 | break; | ||
347 | } | ||
348 | if (!i) { | ||
349 | printk(KERN_ERR "mace: cannot reset chip!\n"); | ||
350 | return; | ||
351 | } | ||
352 | |||
353 | out_8(&mb->imr, 0xff); /* disable all intrs for now */ | ||
354 | i = in_8(&mb->ir); | ||
355 | out_8(&mb->maccc, 0); /* turn off tx, rx */ | ||
356 | |||
357 | out_8(&mb->biucc, XMTSP_64); | ||
358 | out_8(&mb->utr, RTRD); | ||
359 | out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST); | ||
360 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */ | ||
361 | out_8(&mb->rcvfc, 0); | ||
362 | |||
363 | /* load up the hardware address */ | ||
364 | __mace_set_address(dev, dev->dev_addr); | ||
365 | |||
366 | /* clear the multicast filter */ | ||
367 | if (mp->chipid == BROKEN_ADDRCHG_REV) | ||
368 | out_8(&mb->iac, LOGADDR); | ||
369 | else { | ||
370 | out_8(&mb->iac, ADDRCHG | LOGADDR); | ||
371 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | ||
372 | ; | ||
373 | } | ||
374 | for (i = 0; i < 8; ++i) | ||
375 | out_8(&mb->ladrf, 0); | ||
376 | |||
377 | /* done changing address */ | ||
378 | if (mp->chipid != BROKEN_ADDRCHG_REV) | ||
379 | out_8(&mb->iac, 0); | ||
380 | |||
381 | if (mp->port_aaui) | ||
382 | out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO); | ||
383 | else | ||
384 | out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); | ||
385 | } | ||
386 | |||
387 | static void __mace_set_address(struct net_device *dev, void *addr) | ||
388 | { | ||
389 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
390 | volatile struct mace __iomem *mb = mp->mace; | ||
391 | unsigned char *p = addr; | ||
392 | int i; | ||
393 | |||
394 | /* load up the hardware address */ | ||
395 | if (mp->chipid == BROKEN_ADDRCHG_REV) | ||
396 | out_8(&mb->iac, PHYADDR); | ||
397 | else { | ||
398 | out_8(&mb->iac, ADDRCHG | PHYADDR); | ||
399 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | ||
400 | ; | ||
401 | } | ||
402 | for (i = 0; i < 6; ++i) | ||
403 | out_8(&mb->padr, dev->dev_addr[i] = p[i]); | ||
404 | if (mp->chipid != BROKEN_ADDRCHG_REV) | ||
405 | out_8(&mb->iac, 0); | ||
406 | } | ||
407 | |||
408 | static int mace_set_address(struct net_device *dev, void *addr) | ||
409 | { | ||
410 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
411 | volatile struct mace __iomem *mb = mp->mace; | ||
412 | unsigned long flags; | ||
413 | |||
414 | spin_lock_irqsave(&mp->lock, flags); | ||
415 | |||
416 | __mace_set_address(dev, addr); | ||
417 | |||
418 | /* note: setting ADDRCHG clears ENRCV */ | ||
419 | out_8(&mb->maccc, mp->maccc); | ||
420 | |||
421 | spin_unlock_irqrestore(&mp->lock, flags); | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static inline void mace_clean_rings(struct mace_data *mp) | ||
426 | { | ||
427 | int i; | ||
428 | |||
429 | /* free some skb's */ | ||
430 | for (i = 0; i < N_RX_RING; ++i) { | ||
431 | if (mp->rx_bufs[i] != 0) { | ||
432 | dev_kfree_skb(mp->rx_bufs[i]); | ||
433 | mp->rx_bufs[i] = NULL; | ||
434 | } | ||
435 | } | ||
436 | for (i = mp->tx_empty; i != mp->tx_fill; ) { | ||
437 | dev_kfree_skb(mp->tx_bufs[i]); | ||
438 | if (++i >= N_TX_RING) | ||
439 | i = 0; | ||
440 | } | ||
441 | } | ||
442 | |||
443 | static int mace_open(struct net_device *dev) | ||
444 | { | ||
445 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
446 | volatile struct mace __iomem *mb = mp->mace; | ||
447 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | ||
448 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | ||
449 | volatile struct dbdma_cmd *cp; | ||
450 | int i; | ||
451 | struct sk_buff *skb; | ||
452 | unsigned char *data; | ||
453 | |||
454 | /* reset the chip */ | ||
455 | mace_reset(dev); | ||
456 | |||
457 | /* initialize list of sk_buffs for receiving and set up recv dma */ | ||
458 | mace_clean_rings(mp); | ||
459 | memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); | ||
460 | cp = mp->rx_cmds; | ||
461 | for (i = 0; i < N_RX_RING - 1; ++i) { | ||
462 | skb = dev_alloc_skb(RX_BUFLEN + 2); | ||
463 | if (skb == 0) { | ||
464 | data = dummy_buf; | ||
465 | } else { | ||
466 | skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */ | ||
467 | data = skb->data; | ||
468 | } | ||
469 | mp->rx_bufs[i] = skb; | ||
470 | st_le16(&cp->req_count, RX_BUFLEN); | ||
471 | st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); | ||
472 | st_le32(&cp->phy_addr, virt_to_bus(data)); | ||
473 | cp->xfer_status = 0; | ||
474 | ++cp; | ||
475 | } | ||
476 | mp->rx_bufs[i] = NULL; | ||
477 | st_le16(&cp->command, DBDMA_STOP); | ||
478 | mp->rx_fill = i; | ||
479 | mp->rx_empty = 0; | ||
480 | |||
481 | /* Put a branch back to the beginning of the receive command list */ | ||
482 | ++cp; | ||
483 | st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); | ||
484 | st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds)); | ||
485 | |||
486 | /* start rx dma */ | ||
487 | out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | ||
488 | out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); | ||
489 | out_le32(&rd->control, (RUN << 16) | RUN); | ||
490 | |||
491 | /* put a branch at the end of the tx command list */ | ||
492 | cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; | ||
493 | st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); | ||
494 | st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds)); | ||
495 | |||
496 | /* reset tx dma */ | ||
497 | out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); | ||
498 | out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); | ||
499 | mp->tx_fill = 0; | ||
500 | mp->tx_empty = 0; | ||
501 | mp->tx_fullup = 0; | ||
502 | mp->tx_active = 0; | ||
503 | mp->tx_bad_runt = 0; | ||
504 | |||
505 | /* turn it on! */ | ||
506 | out_8(&mb->maccc, mp->maccc); | ||
507 | /* enable all interrupts except receive interrupts */ | ||
508 | out_8(&mb->imr, RCVINT); | ||
509 | |||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | static int mace_close(struct net_device *dev) | ||
514 | { | ||
515 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
516 | volatile struct mace __iomem *mb = mp->mace; | ||
517 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | ||
518 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | ||
519 | |||
520 | /* disable rx and tx */ | ||
521 | out_8(&mb->maccc, 0); | ||
522 | out_8(&mb->imr, 0xff); /* disable all intrs */ | ||
523 | |||
524 | /* disable rx and tx dma */ | ||
525 | st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | ||
526 | st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ | ||
527 | |||
528 | mace_clean_rings(mp); | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static inline void mace_set_timeout(struct net_device *dev) | ||
534 | { | ||
535 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
536 | |||
537 | if (mp->timeout_active) | ||
538 | del_timer(&mp->tx_timeout); | ||
539 | mp->tx_timeout.expires = jiffies + TX_TIMEOUT; | ||
540 | mp->tx_timeout.function = mace_tx_timeout; | ||
541 | mp->tx_timeout.data = (unsigned long) dev; | ||
542 | add_timer(&mp->tx_timeout); | ||
543 | mp->timeout_active = 1; | ||
544 | } | ||
545 | |||
546 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | ||
547 | { | ||
548 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
549 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | ||
550 | volatile struct dbdma_cmd *cp, *np; | ||
551 | unsigned long flags; | ||
552 | int fill, next, len; | ||
553 | |||
554 | /* see if there's a free slot in the tx ring */ | ||
555 | spin_lock_irqsave(&mp->lock, flags); | ||
556 | fill = mp->tx_fill; | ||
557 | next = fill + 1; | ||
558 | if (next >= N_TX_RING) | ||
559 | next = 0; | ||
560 | if (next == mp->tx_empty) { | ||
561 | netif_stop_queue(dev); | ||
562 | mp->tx_fullup = 1; | ||
563 | spin_unlock_irqrestore(&mp->lock, flags); | ||
564 | return 1; /* can't take it at the moment */ | ||
565 | } | ||
566 | spin_unlock_irqrestore(&mp->lock, flags); | ||
567 | |||
568 | /* partially fill in the dma command block */ | ||
569 | len = skb->len; | ||
570 | if (len > ETH_FRAME_LEN) { | ||
571 | printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len); | ||
572 | len = ETH_FRAME_LEN; | ||
573 | } | ||
574 | mp->tx_bufs[fill] = skb; | ||
575 | cp = mp->tx_cmds + NCMDS_TX * fill; | ||
576 | st_le16(&cp->req_count, len); | ||
577 | st_le32(&cp->phy_addr, virt_to_bus(skb->data)); | ||
578 | |||
579 | np = mp->tx_cmds + NCMDS_TX * next; | ||
580 | out_le16(&np->command, DBDMA_STOP); | ||
581 | |||
582 | /* poke the tx dma channel */ | ||
583 | spin_lock_irqsave(&mp->lock, flags); | ||
584 | mp->tx_fill = next; | ||
585 | if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { | ||
586 | out_le16(&cp->xfer_status, 0); | ||
587 | out_le16(&cp->command, OUTPUT_LAST); | ||
588 | out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); | ||
589 | ++mp->tx_active; | ||
590 | mace_set_timeout(dev); | ||
591 | } | ||
592 | if (++next >= N_TX_RING) | ||
593 | next = 0; | ||
594 | if (next == mp->tx_empty) | ||
595 | netif_stop_queue(dev); | ||
596 | spin_unlock_irqrestore(&mp->lock, flags); | ||
597 | |||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | static struct net_device_stats *mace_stats(struct net_device *dev) | ||
602 | { | ||
603 | struct mace_data *p = (struct mace_data *) dev->priv; | ||
604 | |||
605 | return &p->stats; | ||
606 | } | ||
607 | |||
608 | static void mace_set_multicast(struct net_device *dev) | ||
609 | { | ||
610 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
611 | volatile struct mace __iomem *mb = mp->mace; | ||
612 | int i, j; | ||
613 | u32 crc; | ||
614 | unsigned long flags; | ||
615 | |||
616 | spin_lock_irqsave(&mp->lock, flags); | ||
617 | mp->maccc &= ~PROM; | ||
618 | if (dev->flags & IFF_PROMISC) { | ||
619 | mp->maccc |= PROM; | ||
620 | } else { | ||
621 | unsigned char multicast_filter[8]; | ||
622 | struct dev_mc_list *dmi = dev->mc_list; | ||
623 | |||
624 | if (dev->flags & IFF_ALLMULTI) { | ||
625 | for (i = 0; i < 8; i++) | ||
626 | multicast_filter[i] = 0xff; | ||
627 | } else { | ||
628 | for (i = 0; i < 8; i++) | ||
629 | multicast_filter[i] = 0; | ||
630 | for (i = 0; i < dev->mc_count; i++) { | ||
631 | crc = ether_crc_le(6, dmi->dmi_addr); | ||
632 | j = crc >> 26; /* bit number in multicast_filter */ | ||
633 | multicast_filter[j >> 3] |= 1 << (j & 7); | ||
634 | dmi = dmi->next; | ||
635 | } | ||
636 | } | ||
637 | #if 0 | ||
638 | printk("Multicast filter :"); | ||
639 | for (i = 0; i < 8; i++) | ||
640 | printk("%02x ", multicast_filter[i]); | ||
641 | printk("\n"); | ||
642 | #endif | ||
643 | |||
644 | if (mp->chipid == BROKEN_ADDRCHG_REV) | ||
645 | out_8(&mb->iac, LOGADDR); | ||
646 | else { | ||
647 | out_8(&mb->iac, ADDRCHG | LOGADDR); | ||
648 | while ((in_8(&mb->iac) & ADDRCHG) != 0) | ||
649 | ; | ||
650 | } | ||
651 | for (i = 0; i < 8; ++i) | ||
652 | out_8(&mb->ladrf, multicast_filter[i]); | ||
653 | if (mp->chipid != BROKEN_ADDRCHG_REV) | ||
654 | out_8(&mb->iac, 0); | ||
655 | } | ||
656 | /* reset maccc */ | ||
657 | out_8(&mb->maccc, mp->maccc); | ||
658 | spin_unlock_irqrestore(&mp->lock, flags); | ||
659 | } | ||
660 | |||
661 | static void mace_handle_misc_intrs(struct mace_data *mp, int intr) | ||
662 | { | ||
663 | volatile struct mace __iomem *mb = mp->mace; | ||
664 | static int mace_babbles, mace_jabbers; | ||
665 | |||
666 | if (intr & MPCO) | ||
667 | mp->stats.rx_missed_errors += 256; | ||
668 | mp->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ | ||
669 | if (intr & RNTPCO) | ||
670 | mp->stats.rx_length_errors += 256; | ||
671 | mp->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ | ||
672 | if (intr & CERR) | ||
673 | ++mp->stats.tx_heartbeat_errors; | ||
674 | if (intr & BABBLE) | ||
675 | if (mace_babbles++ < 4) | ||
676 | printk(KERN_DEBUG "mace: babbling transmitter\n"); | ||
677 | if (intr & JABBER) | ||
678 | if (mace_jabbers++ < 4) | ||
679 | printk(KERN_DEBUG "mace: jabbering transceiver\n"); | ||
680 | } | ||
681 | |||
682 | static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
683 | { | ||
684 | struct net_device *dev = (struct net_device *) dev_id; | ||
685 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
686 | volatile struct mace __iomem *mb = mp->mace; | ||
687 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | ||
688 | volatile struct dbdma_cmd *cp; | ||
689 | int intr, fs, i, stat, x; | ||
690 | int xcount, dstat; | ||
691 | unsigned long flags; | ||
692 | /* static int mace_last_fs, mace_last_xcount; */ | ||
693 | |||
694 | spin_lock_irqsave(&mp->lock, flags); | ||
695 | intr = in_8(&mb->ir); /* read interrupt register */ | ||
696 | in_8(&mb->xmtrc); /* get retries */ | ||
697 | mace_handle_misc_intrs(mp, intr); | ||
698 | |||
699 | i = mp->tx_empty; | ||
700 | while (in_8(&mb->pr) & XMTSV) { | ||
701 | del_timer(&mp->tx_timeout); | ||
702 | mp->timeout_active = 0; | ||
703 | /* | ||
704 | * Clear any interrupt indication associated with this status | ||
705 | * word. This appears to unlatch any error indication from | ||
706 | * the DMA controller. | ||
707 | */ | ||
708 | intr = in_8(&mb->ir); | ||
709 | if (intr != 0) | ||
710 | mace_handle_misc_intrs(mp, intr); | ||
711 | if (mp->tx_bad_runt) { | ||
712 | fs = in_8(&mb->xmtfs); | ||
713 | mp->tx_bad_runt = 0; | ||
714 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); | ||
715 | continue; | ||
716 | } | ||
717 | dstat = ld_le32(&td->status); | ||
718 | /* stop DMA controller */ | ||
719 | out_le32(&td->control, RUN << 16); | ||
720 | /* | ||
721 | * xcount is the number of complete frames which have been | ||
722 | * written to the fifo but for which status has not been read. | ||
723 | */ | ||
724 | xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; | ||
725 | if (xcount == 0 || (dstat & DEAD)) { | ||
726 | /* | ||
727 | * If a packet was aborted before the DMA controller has | ||
728 | * finished transferring it, it seems that there are 2 bytes | ||
729 | * which are stuck in some buffer somewhere. These will get | ||
730 | * transmitted as soon as we read the frame status (which | ||
731 | * reenables the transmit data transfer request). Turning | ||
732 | * off the DMA controller and/or resetting the MACE doesn't | ||
733 | * help. So we disable auto-padding and FCS transmission | ||
734 | * so the two bytes will only be a runt packet which should | ||
735 | * be ignored by other stations. | ||
736 | */ | ||
737 | out_8(&mb->xmtfc, DXMTFCS); | ||
738 | } | ||
739 | fs = in_8(&mb->xmtfs); | ||
740 | if ((fs & XMTSV) == 0) { | ||
741 | printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n", | ||
742 | fs, xcount, dstat); | ||
743 | mace_reset(dev); | ||
744 | /* | ||
745 | * XXX mace likes to hang the machine after a xmtfs error. | ||
746 | * This is hard to reproduce, reseting *may* help | ||
747 | */ | ||
748 | } | ||
749 | cp = mp->tx_cmds + NCMDS_TX * i; | ||
750 | stat = ld_le16(&cp->xfer_status); | ||
751 | if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { | ||
752 | /* | ||
753 | * Check whether there were in fact 2 bytes written to | ||
754 | * the transmit FIFO. | ||
755 | */ | ||
756 | udelay(1); | ||
757 | x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; | ||
758 | if (x != 0) { | ||
759 | /* there were two bytes with an end-of-packet indication */ | ||
760 | mp->tx_bad_runt = 1; | ||
761 | mace_set_timeout(dev); | ||
762 | } else { | ||
763 | /* | ||
764 | * Either there weren't the two bytes buffered up, or they | ||
765 | * didn't have an end-of-packet indication. | ||
766 | * We flush the transmit FIFO just in case (by setting the | ||
767 | * XMTFWU bit with the transmitter disabled). | ||
768 | */ | ||
769 | out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT); | ||
770 | out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU); | ||
771 | udelay(1); | ||
772 | out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT); | ||
773 | out_8(&mb->xmtfc, AUTO_PAD_XMIT); | ||
774 | } | ||
775 | } | ||
776 | /* dma should have finished */ | ||
777 | if (i == mp->tx_fill) { | ||
778 | printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n", | ||
779 | fs, xcount, dstat); | ||
780 | continue; | ||
781 | } | ||
782 | /* Update stats */ | ||
783 | if (fs & (UFLO|LCOL|LCAR|RTRY)) { | ||
784 | ++mp->stats.tx_errors; | ||
785 | if (fs & LCAR) | ||
786 | ++mp->stats.tx_carrier_errors; | ||
787 | if (fs & (UFLO|LCOL|RTRY)) | ||
788 | ++mp->stats.tx_aborted_errors; | ||
789 | } else { | ||
790 | mp->stats.tx_bytes += mp->tx_bufs[i]->len; | ||
791 | ++mp->stats.tx_packets; | ||
792 | } | ||
793 | dev_kfree_skb_irq(mp->tx_bufs[i]); | ||
794 | --mp->tx_active; | ||
795 | if (++i >= N_TX_RING) | ||
796 | i = 0; | ||
797 | #if 0 | ||
798 | mace_last_fs = fs; | ||
799 | mace_last_xcount = xcount; | ||
800 | #endif | ||
801 | } | ||
802 | |||
803 | if (i != mp->tx_empty) { | ||
804 | mp->tx_fullup = 0; | ||
805 | netif_wake_queue(dev); | ||
806 | } | ||
807 | mp->tx_empty = i; | ||
808 | i += mp->tx_active; | ||
809 | if (i >= N_TX_RING) | ||
810 | i -= N_TX_RING; | ||
811 | if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { | ||
812 | do { | ||
813 | /* set up the next one */ | ||
814 | cp = mp->tx_cmds + NCMDS_TX * i; | ||
815 | out_le16(&cp->xfer_status, 0); | ||
816 | out_le16(&cp->command, OUTPUT_LAST); | ||
817 | ++mp->tx_active; | ||
818 | if (++i >= N_TX_RING) | ||
819 | i = 0; | ||
820 | } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); | ||
821 | out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); | ||
822 | mace_set_timeout(dev); | ||
823 | } | ||
824 | spin_unlock_irqrestore(&mp->lock, flags); | ||
825 | return IRQ_HANDLED; | ||
826 | } | ||
827 | |||
828 | static void mace_tx_timeout(unsigned long data) | ||
829 | { | ||
830 | struct net_device *dev = (struct net_device *) data; | ||
831 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
832 | volatile struct mace __iomem *mb = mp->mace; | ||
833 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | ||
834 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | ||
835 | volatile struct dbdma_cmd *cp; | ||
836 | unsigned long flags; | ||
837 | int i; | ||
838 | |||
839 | spin_lock_irqsave(&mp->lock, flags); | ||
840 | mp->timeout_active = 0; | ||
841 | if (mp->tx_active == 0 && !mp->tx_bad_runt) | ||
842 | goto out; | ||
843 | |||
844 | /* update various counters */ | ||
845 | mace_handle_misc_intrs(mp, in_8(&mb->ir)); | ||
846 | |||
847 | cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; | ||
848 | |||
849 | /* turn off both tx and rx and reset the chip */ | ||
850 | out_8(&mb->maccc, 0); | ||
851 | printk(KERN_ERR "mace: transmit timeout - resetting\n"); | ||
852 | dbdma_reset(td); | ||
853 | mace_reset(dev); | ||
854 | |||
855 | /* restart rx dma */ | ||
856 | cp = bus_to_virt(ld_le32(&rd->cmdptr)); | ||
857 | dbdma_reset(rd); | ||
858 | out_le16(&cp->xfer_status, 0); | ||
859 | out_le32(&rd->cmdptr, virt_to_bus(cp)); | ||
860 | out_le32(&rd->control, (RUN << 16) | RUN); | ||
861 | |||
862 | /* fix up the transmit side */ | ||
863 | i = mp->tx_empty; | ||
864 | mp->tx_active = 0; | ||
865 | ++mp->stats.tx_errors; | ||
866 | if (mp->tx_bad_runt) { | ||
867 | mp->tx_bad_runt = 0; | ||
868 | } else if (i != mp->tx_fill) { | ||
869 | dev_kfree_skb(mp->tx_bufs[i]); | ||
870 | if (++i >= N_TX_RING) | ||
871 | i = 0; | ||
872 | mp->tx_empty = i; | ||
873 | } | ||
874 | mp->tx_fullup = 0; | ||
875 | netif_wake_queue(dev); | ||
876 | if (i != mp->tx_fill) { | ||
877 | cp = mp->tx_cmds + NCMDS_TX * i; | ||
878 | out_le16(&cp->xfer_status, 0); | ||
879 | out_le16(&cp->command, OUTPUT_LAST); | ||
880 | out_le32(&td->cmdptr, virt_to_bus(cp)); | ||
881 | out_le32(&td->control, (RUN << 16) | RUN); | ||
882 | ++mp->tx_active; | ||
883 | mace_set_timeout(dev); | ||
884 | } | ||
885 | |||
886 | /* turn it back on */ | ||
887 | out_8(&mb->imr, RCVINT); | ||
888 | out_8(&mb->maccc, mp->maccc); | ||
889 | |||
890 | out: | ||
891 | spin_unlock_irqrestore(&mp->lock, flags); | ||
892 | } | ||
893 | |||
894 | static irqreturn_t mace_txdma_intr(int irq, void *dev_id, struct pt_regs *regs) | ||
895 | { | ||
896 | return IRQ_HANDLED; | ||
897 | } | ||
898 | |||
899 | static irqreturn_t mace_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs) | ||
900 | { | ||
901 | struct net_device *dev = (struct net_device *) dev_id; | ||
902 | struct mace_data *mp = (struct mace_data *) dev->priv; | ||
903 | volatile struct dbdma_regs __iomem *rd = mp->rx_dma; | ||
904 | volatile struct dbdma_cmd *cp, *np; | ||
905 | int i, nb, stat, next; | ||
906 | struct sk_buff *skb; | ||
907 | unsigned frame_status; | ||
908 | static int mace_lost_status; | ||
909 | unsigned char *data; | ||
910 | unsigned long flags; | ||
911 | |||
912 | spin_lock_irqsave(&mp->lock, flags); | ||
913 | for (i = mp->rx_empty; i != mp->rx_fill; ) { | ||
914 | cp = mp->rx_cmds + i; | ||
915 | stat = ld_le16(&cp->xfer_status); | ||
916 | if ((stat & ACTIVE) == 0) { | ||
917 | next = i + 1; | ||
918 | if (next >= N_RX_RING) | ||
919 | next = 0; | ||
920 | np = mp->rx_cmds + next; | ||
921 | if (next != mp->rx_fill | ||
922 | && (ld_le16(&np->xfer_status) & ACTIVE) != 0) { | ||
923 | printk(KERN_DEBUG "mace: lost a status word\n"); | ||
924 | ++mace_lost_status; | ||
925 | } else | ||
926 | break; | ||
927 | } | ||
928 | nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count); | ||
929 | out_le16(&cp->command, DBDMA_STOP); | ||
930 | /* got a packet, have a look at it */ | ||
931 | skb = mp->rx_bufs[i]; | ||
932 | if (skb == 0) { | ||
933 | ++mp->stats.rx_dropped; | ||
934 | } else if (nb > 8) { | ||
935 | data = skb->data; | ||
936 | frame_status = (data[nb-3] << 8) + data[nb-4]; | ||
937 | if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { | ||
938 | ++mp->stats.rx_errors; | ||
939 | if (frame_status & RS_OFLO) | ||
940 | ++mp->stats.rx_over_errors; | ||
941 | if (frame_status & RS_FRAMERR) | ||
942 | ++mp->stats.rx_frame_errors; | ||
943 | if (frame_status & RS_FCSERR) | ||
944 | ++mp->stats.rx_crc_errors; | ||
945 | } else { | ||
946 | /* Mace feature AUTO_STRIP_RCV is on by default, dropping the | ||
947 | * FCS on frames with 802.3 headers. This means that Ethernet | ||
948 | * frames have 8 extra octets at the end, while 802.3 frames | ||
949 | * have only 4. We need to correctly account for this. */ | ||
950 | if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */ | ||
951 | nb -= 4; | ||
952 | else /* Ethernet header; mace includes FCS */ | ||
953 | nb -= 8; | ||
954 | skb_put(skb, nb); | ||
955 | skb->dev = dev; | ||
956 | skb->protocol = eth_type_trans(skb, dev); | ||
957 | mp->stats.rx_bytes += skb->len; | ||
958 | netif_rx(skb); | ||
959 | dev->last_rx = jiffies; | ||
960 | mp->rx_bufs[i] = NULL; | ||
961 | ++mp->stats.rx_packets; | ||
962 | } | ||
963 | } else { | ||
964 | ++mp->stats.rx_errors; | ||
965 | ++mp->stats.rx_length_errors; | ||
966 | } | ||
967 | |||
968 | /* advance to next */ | ||
969 | if (++i >= N_RX_RING) | ||
970 | i = 0; | ||
971 | } | ||
972 | mp->rx_empty = i; | ||
973 | |||
974 | i = mp->rx_fill; | ||
975 | for (;;) { | ||
976 | next = i + 1; | ||
977 | if (next >= N_RX_RING) | ||
978 | next = 0; | ||
979 | if (next == mp->rx_empty) | ||
980 | break; | ||
981 | cp = mp->rx_cmds + i; | ||
982 | skb = mp->rx_bufs[i]; | ||
983 | if (skb == 0) { | ||
984 | skb = dev_alloc_skb(RX_BUFLEN + 2); | ||
985 | if (skb != 0) { | ||
986 | skb_reserve(skb, 2); | ||
987 | mp->rx_bufs[i] = skb; | ||
988 | } | ||
989 | } | ||
990 | st_le16(&cp->req_count, RX_BUFLEN); | ||
991 | data = skb? skb->data: dummy_buf; | ||
992 | st_le32(&cp->phy_addr, virt_to_bus(data)); | ||
993 | out_le16(&cp->xfer_status, 0); | ||
994 | out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); | ||
995 | #if 0 | ||
996 | if ((ld_le32(&rd->status) & ACTIVE) != 0) { | ||
997 | out_le32(&rd->control, (PAUSE << 16) | PAUSE); | ||
998 | while ((in_le32(&rd->status) & ACTIVE) != 0) | ||
999 | ; | ||
1000 | } | ||
1001 | #endif | ||
1002 | i = next; | ||
1003 | } | ||
1004 | if (i != mp->rx_fill) { | ||
1005 | out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE)); | ||
1006 | mp->rx_fill = i; | ||
1007 | } | ||
1008 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1009 | return IRQ_HANDLED; | ||
1010 | } | ||
1011 | |||
1012 | static struct of_match mace_match[] = | ||
1013 | { | ||
1014 | { | ||
1015 | .name = "mace", | ||
1016 | .type = OF_ANY_MATCH, | ||
1017 | .compatible = OF_ANY_MATCH | ||
1018 | }, | ||
1019 | {}, | ||
1020 | }; | ||
1021 | |||
1022 | static struct macio_driver mace_driver = | ||
1023 | { | ||
1024 | .name = "mace", | ||
1025 | .match_table = mace_match, | ||
1026 | .probe = mace_probe, | ||
1027 | .remove = mace_remove, | ||
1028 | }; | ||
1029 | |||
1030 | |||
1031 | static int __init mace_init(void) | ||
1032 | { | ||
1033 | return macio_register_driver(&mace_driver); | ||
1034 | } | ||
1035 | |||
1036 | static void __exit mace_cleanup(void) | ||
1037 | { | ||
1038 | macio_unregister_driver(&mace_driver); | ||
1039 | |||
1040 | if (dummy_buf) { | ||
1041 | kfree(dummy_buf); | ||
1042 | dummy_buf = NULL; | ||
1043 | } | ||
1044 | } | ||
1045 | |||
1046 | MODULE_AUTHOR("Paul Mackerras"); | ||
1047 | MODULE_DESCRIPTION("PowerMac MACE driver."); | ||
1048 | MODULE_PARM(port_aaui, "i"); | ||
1049 | MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)"); | ||
1050 | MODULE_LICENSE("GPL"); | ||
1051 | |||
1052 | module_init(mace_init); | ||
1053 | module_exit(mace_cleanup); | ||