diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2007-08-22 23:56:01 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:51:52 -0400 |
commit | 1d3bb996481e116f5f2b127cbd29b83365d2cf62 (patch) | |
tree | b612a1dbf51c920fb5a9758a6d35f9ed37eb927f /drivers/net/ibm_newemac/mal.c | |
parent | 03233b90b0977d577322a6e1ddd56d9cc570d406 (diff) |
Device tree aware EMAC driver
Based on BenH's earlier work, this is a new version of the EMAC driver
for the built-in ethernet found on PowerPC 4xx embedded CPUs. The
same ASIC is also found in the Axon bridge chip. This new version is
designed to work in the arch/powerpc tree, using the device tree to
probe the device, rather than the old and ugly arch/ppc OCP layer.
This driver is designed to sit alongside the old driver (that lies in
drivers/net/ibm_emac and this one in drivers/net/ibm_newemac). The
old driver is left in place to support arch/ppc until arch/ppc itself
reaches its final demise (not too long now, with luck).
This driver still has a number of things that could do with cleaning
up, but I think they can be fixed up after merging. Specifically:
- Should be adjusted to properly use the dma mapping API.
Axon needs this.
- Probe logic needs reworking, in conjuction with the general
probing code for of_platform devices. The dependencies here between
EMAC, MAL, ZMII etc. make this complicated. At present, it usually
works, because we initialize and register the sub-drivers before the
EMAC driver itself, and (being in driver code) runs after the devices
themselves have been instantiated from the device tree.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ibm_newemac/mal.c')
-rw-r--r-- | drivers/net/ibm_newemac/mal.c | 728 |
1 files changed, 728 insertions, 0 deletions
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c new file mode 100644 index 000000000000..c4335b7d308c --- /dev/null +++ b/drivers/net/ibm_newemac/mal.c | |||
@@ -0,0 +1,728 @@ | |||
1 | /* | ||
2 | * drivers/net/ibm_newemac/mal.c | ||
3 | * | ||
4 | * Memory Access Layer (MAL) support | ||
5 | * | ||
6 | * Copyright (c) 2004, 2005 Zultys Technologies. | ||
7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | ||
8 | * | ||
9 | * Based on original work by | ||
10 | * Benjamin Herrenschmidt <benh@kernel.crashing.org>, | ||
11 | * David Gibson <hermes@gibson.dropbear.id.au>, | ||
12 | * | ||
13 | * Armin Kuster <akuster@mvista.com> | ||
14 | * Copyright 2002 MontaVista Softare Inc. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify it | ||
17 | * under the terms of the GNU General Public License as published by the | ||
18 | * Free Software Foundation; either version 2 of the License, or (at your | ||
19 | * option) any later version. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/delay.h> | ||
24 | |||
25 | #include "core.h" | ||
26 | |||
27 | static int mal_count; | ||
28 | |||
29 | int __devinit mal_register_commac(struct mal_instance *mal, | ||
30 | struct mal_commac *commac) | ||
31 | { | ||
32 | unsigned long flags; | ||
33 | |||
34 | spin_lock_irqsave(&mal->lock, flags); | ||
35 | |||
36 | MAL_DBG(mal, "reg(%08x, %08x)" NL, | ||
37 | commac->tx_chan_mask, commac->rx_chan_mask); | ||
38 | |||
39 | /* Don't let multiple commacs claim the same channel(s) */ | ||
40 | if ((mal->tx_chan_mask & commac->tx_chan_mask) || | ||
41 | (mal->rx_chan_mask & commac->rx_chan_mask)) { | ||
42 | spin_unlock_irqrestore(&mal->lock, flags); | ||
43 | printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", | ||
44 | mal->index); | ||
45 | return -EBUSY; | ||
46 | } | ||
47 | |||
48 | mal->tx_chan_mask |= commac->tx_chan_mask; | ||
49 | mal->rx_chan_mask |= commac->rx_chan_mask; | ||
50 | list_add(&commac->list, &mal->list); | ||
51 | |||
52 | spin_unlock_irqrestore(&mal->lock, flags); | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | void __devexit mal_unregister_commac(struct mal_instance *mal, | ||
58 | struct mal_commac *commac) | ||
59 | { | ||
60 | unsigned long flags; | ||
61 | |||
62 | spin_lock_irqsave(&mal->lock, flags); | ||
63 | |||
64 | MAL_DBG(mal, "unreg(%08x, %08x)" NL, | ||
65 | commac->tx_chan_mask, commac->rx_chan_mask); | ||
66 | |||
67 | mal->tx_chan_mask &= ~commac->tx_chan_mask; | ||
68 | mal->rx_chan_mask &= ~commac->rx_chan_mask; | ||
69 | list_del_init(&commac->list); | ||
70 | |||
71 | spin_unlock_irqrestore(&mal->lock, flags); | ||
72 | } | ||
73 | |||
74 | int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) | ||
75 | { | ||
76 | BUG_ON(channel < 0 || channel >= mal->num_rx_chans || | ||
77 | size > MAL_MAX_RX_SIZE); | ||
78 | |||
79 | MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); | ||
80 | |||
81 | if (size & 0xf) { | ||
82 | printk(KERN_WARNING | ||
83 | "mal%d: incorrect RX size %lu for the channel %d\n", | ||
84 | mal->index, size, channel); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | int mal_tx_bd_offset(struct mal_instance *mal, int channel) | ||
93 | { | ||
94 | BUG_ON(channel < 0 || channel >= mal->num_tx_chans); | ||
95 | |||
96 | return channel * NUM_TX_BUFF; | ||
97 | } | ||
98 | |||
99 | int mal_rx_bd_offset(struct mal_instance *mal, int channel) | ||
100 | { | ||
101 | BUG_ON(channel < 0 || channel >= mal->num_rx_chans); | ||
102 | return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; | ||
103 | } | ||
104 | |||
105 | void mal_enable_tx_channel(struct mal_instance *mal, int channel) | ||
106 | { | ||
107 | unsigned long flags; | ||
108 | |||
109 | spin_lock_irqsave(&mal->lock, flags); | ||
110 | |||
111 | MAL_DBG(mal, "enable_tx(%d)" NL, channel); | ||
112 | |||
113 | set_mal_dcrn(mal, MAL_TXCASR, | ||
114 | get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); | ||
115 | |||
116 | spin_unlock_irqrestore(&mal->lock, flags); | ||
117 | } | ||
118 | |||
119 | void mal_disable_tx_channel(struct mal_instance *mal, int channel) | ||
120 | { | ||
121 | set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); | ||
122 | |||
123 | MAL_DBG(mal, "disable_tx(%d)" NL, channel); | ||
124 | } | ||
125 | |||
126 | void mal_enable_rx_channel(struct mal_instance *mal, int channel) | ||
127 | { | ||
128 | unsigned long flags; | ||
129 | |||
130 | spin_lock_irqsave(&mal->lock, flags); | ||
131 | |||
132 | MAL_DBG(mal, "enable_rx(%d)" NL, channel); | ||
133 | |||
134 | set_mal_dcrn(mal, MAL_RXCASR, | ||
135 | get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); | ||
136 | |||
137 | spin_unlock_irqrestore(&mal->lock, flags); | ||
138 | } | ||
139 | |||
140 | void mal_disable_rx_channel(struct mal_instance *mal, int channel) | ||
141 | { | ||
142 | set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); | ||
143 | |||
144 | MAL_DBG(mal, "disable_rx(%d)" NL, channel); | ||
145 | } | ||
146 | |||
147 | void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | |||
151 | spin_lock_irqsave(&mal->lock, flags); | ||
152 | |||
153 | MAL_DBG(mal, "poll_add(%p)" NL, commac); | ||
154 | |||
155 | /* starts disabled */ | ||
156 | set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | ||
157 | |||
158 | list_add_tail(&commac->poll_list, &mal->poll_list); | ||
159 | |||
160 | spin_unlock_irqrestore(&mal->lock, flags); | ||
161 | } | ||
162 | |||
163 | void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) | ||
164 | { | ||
165 | unsigned long flags; | ||
166 | |||
167 | spin_lock_irqsave(&mal->lock, flags); | ||
168 | |||
169 | MAL_DBG(mal, "poll_del(%p)" NL, commac); | ||
170 | |||
171 | list_del(&commac->poll_list); | ||
172 | |||
173 | spin_unlock_irqrestore(&mal->lock, flags); | ||
174 | } | ||
175 | |||
176 | /* synchronized by mal_poll() */ | ||
177 | static inline void mal_enable_eob_irq(struct mal_instance *mal) | ||
178 | { | ||
179 | MAL_DBG2(mal, "enable_irq" NL); | ||
180 | |||
181 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | ||
182 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); | ||
183 | } | ||
184 | |||
185 | /* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ | ||
186 | static inline void mal_disable_eob_irq(struct mal_instance *mal) | ||
187 | { | ||
188 | // XXX might want to cache MAL_CFG as the DCR read can be slooooow | ||
189 | set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); | ||
190 | |||
191 | MAL_DBG2(mal, "disable_irq" NL); | ||
192 | } | ||
193 | |||
194 | static irqreturn_t mal_serr(int irq, void *dev_instance) | ||
195 | { | ||
196 | struct mal_instance *mal = dev_instance; | ||
197 | |||
198 | u32 esr = get_mal_dcrn(mal, MAL_ESR); | ||
199 | |||
200 | /* Clear the error status register */ | ||
201 | set_mal_dcrn(mal, MAL_ESR, esr); | ||
202 | |||
203 | MAL_DBG(mal, "SERR %08x" NL, esr); | ||
204 | |||
205 | if (esr & MAL_ESR_EVB) { | ||
206 | if (esr & MAL_ESR_DE) { | ||
207 | /* We ignore Descriptor error, | ||
208 | * TXDE or RXDE interrupt will be generated anyway. | ||
209 | */ | ||
210 | return IRQ_HANDLED; | ||
211 | } | ||
212 | |||
213 | if (esr & MAL_ESR_PEIN) { | ||
214 | /* PLB error, it's probably buggy hardware or | ||
215 | * incorrect physical address in BD (i.e. bug) | ||
216 | */ | ||
217 | if (net_ratelimit()) | ||
218 | printk(KERN_ERR | ||
219 | "mal%d: system error, " | ||
220 | "PLB (ESR = 0x%08x)\n", | ||
221 | mal->index, esr); | ||
222 | return IRQ_HANDLED; | ||
223 | } | ||
224 | |||
225 | /* OPB error, it's probably buggy hardware or incorrect | ||
226 | * EBC setup | ||
227 | */ | ||
228 | if (net_ratelimit()) | ||
229 | printk(KERN_ERR | ||
230 | "mal%d: system error, OPB (ESR = 0x%08x)\n", | ||
231 | mal->index, esr); | ||
232 | } | ||
233 | return IRQ_HANDLED; | ||
234 | } | ||
235 | |||
236 | static inline void mal_schedule_poll(struct mal_instance *mal) | ||
237 | { | ||
238 | if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { | ||
239 | MAL_DBG2(mal, "schedule_poll" NL); | ||
240 | mal_disable_eob_irq(mal); | ||
241 | __netif_rx_schedule(&mal->poll_dev); | ||
242 | } else | ||
243 | MAL_DBG2(mal, "already in poll" NL); | ||
244 | } | ||
245 | |||
246 | static irqreturn_t mal_txeob(int irq, void *dev_instance) | ||
247 | { | ||
248 | struct mal_instance *mal = dev_instance; | ||
249 | |||
250 | u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); | ||
251 | |||
252 | MAL_DBG2(mal, "txeob %08x" NL, r); | ||
253 | |||
254 | mal_schedule_poll(mal); | ||
255 | set_mal_dcrn(mal, MAL_TXEOBISR, r); | ||
256 | |||
257 | return IRQ_HANDLED; | ||
258 | } | ||
259 | |||
260 | static irqreturn_t mal_rxeob(int irq, void *dev_instance) | ||
261 | { | ||
262 | struct mal_instance *mal = dev_instance; | ||
263 | |||
264 | u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); | ||
265 | |||
266 | MAL_DBG2(mal, "rxeob %08x" NL, r); | ||
267 | |||
268 | mal_schedule_poll(mal); | ||
269 | set_mal_dcrn(mal, MAL_RXEOBISR, r); | ||
270 | |||
271 | return IRQ_HANDLED; | ||
272 | } | ||
273 | |||
274 | static irqreturn_t mal_txde(int irq, void *dev_instance) | ||
275 | { | ||
276 | struct mal_instance *mal = dev_instance; | ||
277 | |||
278 | u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); | ||
279 | set_mal_dcrn(mal, MAL_TXDEIR, deir); | ||
280 | |||
281 | MAL_DBG(mal, "txde %08x" NL, deir); | ||
282 | |||
283 | if (net_ratelimit()) | ||
284 | printk(KERN_ERR | ||
285 | "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", | ||
286 | mal->index, deir); | ||
287 | |||
288 | return IRQ_HANDLED; | ||
289 | } | ||
290 | |||
291 | static irqreturn_t mal_rxde(int irq, void *dev_instance) | ||
292 | { | ||
293 | struct mal_instance *mal = dev_instance; | ||
294 | struct list_head *l; | ||
295 | |||
296 | u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); | ||
297 | |||
298 | MAL_DBG(mal, "rxde %08x" NL, deir); | ||
299 | |||
300 | list_for_each(l, &mal->list) { | ||
301 | struct mal_commac *mc = list_entry(l, struct mal_commac, list); | ||
302 | if (deir & mc->rx_chan_mask) { | ||
303 | set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); | ||
304 | mc->ops->rxde(mc->dev); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | mal_schedule_poll(mal); | ||
309 | set_mal_dcrn(mal, MAL_RXDEIR, deir); | ||
310 | |||
311 | return IRQ_HANDLED; | ||
312 | } | ||
313 | |||
314 | void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) | ||
315 | { | ||
316 | /* Spinlock-type semantics: only one caller disable poll at a time */ | ||
317 | while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) | ||
318 | msleep(1); | ||
319 | |||
320 | /* Synchronize with the MAL NAPI poller. */ | ||
321 | while (test_bit(__LINK_STATE_RX_SCHED, &mal->poll_dev.state)) | ||
322 | msleep(1); | ||
323 | } | ||
324 | |||
325 | void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) | ||
326 | { | ||
327 | smp_wmb(); | ||
328 | clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); | ||
329 | |||
330 | // XXX might want to kick a poll now... | ||
331 | } | ||
332 | |||
333 | static int mal_poll(struct net_device *ndev, int *budget) | ||
334 | { | ||
335 | struct mal_instance *mal = netdev_priv(ndev); | ||
336 | struct list_head *l; | ||
337 | int rx_work_limit = min(ndev->quota, *budget), received = 0, done; | ||
338 | unsigned long flags; | ||
339 | |||
340 | MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, | ||
341 | rx_work_limit); | ||
342 | again: | ||
343 | /* Process TX skbs */ | ||
344 | list_for_each(l, &mal->poll_list) { | ||
345 | struct mal_commac *mc = | ||
346 | list_entry(l, struct mal_commac, poll_list); | ||
347 | mc->ops->poll_tx(mc->dev); | ||
348 | } | ||
349 | |||
350 | /* Process RX skbs. | ||
351 | * | ||
352 | * We _might_ need something more smart here to enforce polling | ||
353 | * fairness. | ||
354 | */ | ||
355 | list_for_each(l, &mal->poll_list) { | ||
356 | struct mal_commac *mc = | ||
357 | list_entry(l, struct mal_commac, poll_list); | ||
358 | int n; | ||
359 | if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) | ||
360 | continue; | ||
361 | n = mc->ops->poll_rx(mc->dev, rx_work_limit); | ||
362 | if (n) { | ||
363 | received += n; | ||
364 | rx_work_limit -= n; | ||
365 | if (rx_work_limit <= 0) { | ||
366 | done = 0; | ||
367 | // XXX What if this is the last one ? | ||
368 | goto more_work; | ||
369 | } | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* We need to disable IRQs to protect from RXDE IRQ here */ | ||
374 | spin_lock_irqsave(&mal->lock, flags); | ||
375 | __netif_rx_complete(ndev); | ||
376 | mal_enable_eob_irq(mal); | ||
377 | spin_unlock_irqrestore(&mal->lock, flags); | ||
378 | |||
379 | done = 1; | ||
380 | |||
381 | /* Check for "rotting" packet(s) */ | ||
382 | list_for_each(l, &mal->poll_list) { | ||
383 | struct mal_commac *mc = | ||
384 | list_entry(l, struct mal_commac, poll_list); | ||
385 | if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) | ||
386 | continue; | ||
387 | if (unlikely(mc->ops->peek_rx(mc->dev) || | ||
388 | test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { | ||
389 | MAL_DBG2(mal, "rotting packet" NL); | ||
390 | if (netif_rx_reschedule(ndev, received)) | ||
391 | mal_disable_eob_irq(mal); | ||
392 | else | ||
393 | MAL_DBG2(mal, "already in poll list" NL); | ||
394 | |||
395 | if (rx_work_limit > 0) | ||
396 | goto again; | ||
397 | else | ||
398 | goto more_work; | ||
399 | } | ||
400 | mc->ops->poll_tx(mc->dev); | ||
401 | } | ||
402 | |||
403 | more_work: | ||
404 | ndev->quota -= received; | ||
405 | *budget -= received; | ||
406 | |||
407 | MAL_DBG2(mal, "poll() %d <- %d" NL, *budget, | ||
408 | done ? 0 : 1); | ||
409 | |||
410 | return done ? 0 : 1; | ||
411 | } | ||
412 | |||
413 | static void mal_reset(struct mal_instance *mal) | ||
414 | { | ||
415 | int n = 10; | ||
416 | |||
417 | MAL_DBG(mal, "reset" NL); | ||
418 | |||
419 | set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); | ||
420 | |||
421 | /* Wait for reset to complete (1 system clock) */ | ||
422 | while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) | ||
423 | --n; | ||
424 | |||
425 | if (unlikely(!n)) | ||
426 | printk(KERN_ERR "mal%d: reset timeout\n", mal->index); | ||
427 | } | ||
428 | |||
429 | int mal_get_regs_len(struct mal_instance *mal) | ||
430 | { | ||
431 | return sizeof(struct emac_ethtool_regs_subhdr) + | ||
432 | sizeof(struct mal_regs); | ||
433 | } | ||
434 | |||
435 | void *mal_dump_regs(struct mal_instance *mal, void *buf) | ||
436 | { | ||
437 | struct emac_ethtool_regs_subhdr *hdr = buf; | ||
438 | struct mal_regs *regs = (struct mal_regs *)(hdr + 1); | ||
439 | int i; | ||
440 | |||
441 | hdr->version = mal->version; | ||
442 | hdr->index = mal->index; | ||
443 | |||
444 | regs->tx_count = mal->num_tx_chans; | ||
445 | regs->rx_count = mal->num_rx_chans; | ||
446 | |||
447 | regs->cfg = get_mal_dcrn(mal, MAL_CFG); | ||
448 | regs->esr = get_mal_dcrn(mal, MAL_ESR); | ||
449 | regs->ier = get_mal_dcrn(mal, MAL_IER); | ||
450 | regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); | ||
451 | regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); | ||
452 | regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); | ||
453 | regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); | ||
454 | regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); | ||
455 | regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); | ||
456 | regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); | ||
457 | regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); | ||
458 | |||
459 | for (i = 0; i < regs->tx_count; ++i) | ||
460 | regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); | ||
461 | |||
462 | for (i = 0; i < regs->rx_count; ++i) { | ||
463 | regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); | ||
464 | regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); | ||
465 | } | ||
466 | return regs + 1; | ||
467 | } | ||
468 | |||
469 | static int __devinit mal_probe(struct of_device *ofdev, | ||
470 | const struct of_device_id *match) | ||
471 | { | ||
472 | struct mal_instance *mal; | ||
473 | int err = 0, i, bd_size; | ||
474 | int index = mal_count++; | ||
475 | const u32 *prop; | ||
476 | u32 cfg; | ||
477 | |||
478 | mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); | ||
479 | if (!mal) { | ||
480 | printk(KERN_ERR | ||
481 | "mal%d: out of memory allocating MAL structure!\n", | ||
482 | index); | ||
483 | return -ENOMEM; | ||
484 | } | ||
485 | mal->index = index; | ||
486 | mal->ofdev = ofdev; | ||
487 | mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1; | ||
488 | |||
489 | MAL_DBG(mal, "probe" NL); | ||
490 | |||
491 | prop = of_get_property(ofdev->node, "num-tx-chans", NULL); | ||
492 | if (prop == NULL) { | ||
493 | printk(KERN_ERR | ||
494 | "mal%d: can't find MAL num-tx-chans property!\n", | ||
495 | index); | ||
496 | err = -ENODEV; | ||
497 | goto fail; | ||
498 | } | ||
499 | mal->num_tx_chans = prop[0]; | ||
500 | |||
501 | prop = of_get_property(ofdev->node, "num-rx-chans", NULL); | ||
502 | if (prop == NULL) { | ||
503 | printk(KERN_ERR | ||
504 | "mal%d: can't find MAL num-rx-chans property!\n", | ||
505 | index); | ||
506 | err = -ENODEV; | ||
507 | goto fail; | ||
508 | } | ||
509 | mal->num_rx_chans = prop[0]; | ||
510 | |||
511 | mal->dcr_base = dcr_resource_start(ofdev->node, 0); | ||
512 | if (mal->dcr_base == 0) { | ||
513 | printk(KERN_ERR | ||
514 | "mal%d: can't find DCR resource!\n", index); | ||
515 | err = -ENODEV; | ||
516 | goto fail; | ||
517 | } | ||
518 | mal->dcr_host = dcr_map(ofdev->node, mal->dcr_base, 0x100); | ||
519 | if (!DCR_MAP_OK(mal->dcr_host)) { | ||
520 | printk(KERN_ERR | ||
521 | "mal%d: failed to map DCRs !\n", index); | ||
522 | err = -ENODEV; | ||
523 | goto fail; | ||
524 | } | ||
525 | |||
526 | mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); | ||
527 | mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); | ||
528 | mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2); | ||
529 | mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3); | ||
530 | mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4); | ||
531 | if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || | ||
532 | mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || | ||
533 | mal->rxde_irq == NO_IRQ) { | ||
534 | printk(KERN_ERR | ||
535 | "mal%d: failed to map interrupts !\n", index); | ||
536 | err = -ENODEV; | ||
537 | goto fail_unmap; | ||
538 | } | ||
539 | |||
540 | INIT_LIST_HEAD(&mal->poll_list); | ||
541 | set_bit(__LINK_STATE_START, &mal->poll_dev.state); | ||
542 | mal->poll_dev.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; | ||
543 | mal->poll_dev.poll = mal_poll; | ||
544 | mal->poll_dev.priv = mal; | ||
545 | atomic_set(&mal->poll_dev.refcnt, 1); | ||
546 | INIT_LIST_HEAD(&mal->list); | ||
547 | spin_lock_init(&mal->lock); | ||
548 | |||
549 | /* Load power-on reset defaults */ | ||
550 | mal_reset(mal); | ||
551 | |||
552 | /* Set the MAL configuration register */ | ||
553 | cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; | ||
554 | cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; | ||
555 | |||
556 | /* Current Axon is not happy with priority being non-0, it can | ||
557 | * deadlock, fix it up here | ||
558 | */ | ||
559 | if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon")) | ||
560 | cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); | ||
561 | |||
562 | /* Apply configuration */ | ||
563 | set_mal_dcrn(mal, MAL_CFG, cfg); | ||
564 | |||
565 | /* Allocate space for BD rings */ | ||
566 | BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); | ||
567 | BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); | ||
568 | |||
569 | bd_size = sizeof(struct mal_descriptor) * | ||
570 | (NUM_TX_BUFF * mal->num_tx_chans + | ||
571 | NUM_RX_BUFF * mal->num_rx_chans); | ||
572 | mal->bd_virt = | ||
573 | dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, | ||
574 | GFP_KERNEL); | ||
575 | if (mal->bd_virt == NULL) { | ||
576 | printk(KERN_ERR | ||
577 | "mal%d: out of memory allocating RX/TX descriptors!\n", | ||
578 | index); | ||
579 | err = -ENOMEM; | ||
580 | goto fail_unmap; | ||
581 | } | ||
582 | memset(mal->bd_virt, 0, bd_size); | ||
583 | |||
584 | for (i = 0; i < mal->num_tx_chans; ++i) | ||
585 | set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + | ||
586 | sizeof(struct mal_descriptor) * | ||
587 | mal_tx_bd_offset(mal, i)); | ||
588 | |||
589 | for (i = 0; i < mal->num_rx_chans; ++i) | ||
590 | set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + | ||
591 | sizeof(struct mal_descriptor) * | ||
592 | mal_rx_bd_offset(mal, i)); | ||
593 | |||
594 | err = request_irq(mal->serr_irq, mal_serr, 0, "MAL SERR", mal); | ||
595 | if (err) | ||
596 | goto fail2; | ||
597 | err = request_irq(mal->txde_irq, mal_txde, 0, "MAL TX DE", mal); | ||
598 | if (err) | ||
599 | goto fail3; | ||
600 | err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); | ||
601 | if (err) | ||
602 | goto fail4; | ||
603 | err = request_irq(mal->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); | ||
604 | if (err) | ||
605 | goto fail5; | ||
606 | err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); | ||
607 | if (err) | ||
608 | goto fail6; | ||
609 | |||
610 | /* Enable all MAL SERR interrupt sources */ | ||
611 | if (mal->version == 2) | ||
612 | set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS); | ||
613 | else | ||
614 | set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS); | ||
615 | |||
616 | /* Enable EOB interrupt */ | ||
617 | mal_enable_eob_irq(mal); | ||
618 | |||
619 | printk(KERN_INFO | ||
620 | "MAL v%d %s, %d TX channels, %d RX channels\n", | ||
621 | mal->version, ofdev->node->full_name, | ||
622 | mal->num_tx_chans, mal->num_rx_chans); | ||
623 | |||
624 | /* Advertise this instance to the rest of the world */ | ||
625 | wmb(); | ||
626 | dev_set_drvdata(&ofdev->dev, mal); | ||
627 | |||
628 | mal_dbg_register(mal); | ||
629 | |||
630 | return 0; | ||
631 | |||
632 | fail6: | ||
633 | free_irq(mal->rxde_irq, mal); | ||
634 | fail5: | ||
635 | free_irq(mal->txeob_irq, mal); | ||
636 | fail4: | ||
637 | free_irq(mal->txde_irq, mal); | ||
638 | fail3: | ||
639 | free_irq(mal->serr_irq, mal); | ||
640 | fail2: | ||
641 | dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); | ||
642 | fail_unmap: | ||
643 | dcr_unmap(mal->dcr_host, mal->dcr_base, 0x100); | ||
644 | fail: | ||
645 | kfree(mal); | ||
646 | |||
647 | return err; | ||
648 | } | ||
649 | |||
650 | static int __devexit mal_remove(struct of_device *ofdev) | ||
651 | { | ||
652 | struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); | ||
653 | |||
654 | MAL_DBG(mal, "remove" NL); | ||
655 | |||
656 | /* Syncronize with scheduled polling, | ||
657 | stolen from net/core/dev.c:dev_close() | ||
658 | */ | ||
659 | clear_bit(__LINK_STATE_START, &mal->poll_dev.state); | ||
660 | netif_poll_disable(&mal->poll_dev); | ||
661 | |||
662 | if (!list_empty(&mal->list)) { | ||
663 | /* This is *very* bad */ | ||
664 | printk(KERN_EMERG | ||
665 | "mal%d: commac list is not empty on remove!\n", | ||
666 | mal->index); | ||
667 | WARN_ON(1); | ||
668 | } | ||
669 | |||
670 | dev_set_drvdata(&ofdev->dev, NULL); | ||
671 | |||
672 | free_irq(mal->serr_irq, mal); | ||
673 | free_irq(mal->txde_irq, mal); | ||
674 | free_irq(mal->txeob_irq, mal); | ||
675 | free_irq(mal->rxde_irq, mal); | ||
676 | free_irq(mal->rxeob_irq, mal); | ||
677 | |||
678 | mal_reset(mal); | ||
679 | |||
680 | mal_dbg_unregister(mal); | ||
681 | |||
682 | dma_free_coherent(&ofdev->dev, | ||
683 | sizeof(struct mal_descriptor) * | ||
684 | (NUM_TX_BUFF * mal->num_tx_chans + | ||
685 | NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, | ||
686 | mal->bd_dma); | ||
687 | kfree(mal); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static struct of_device_id mal_platform_match[] = | ||
693 | { | ||
694 | { | ||
695 | .compatible = "ibm,mcmal", | ||
696 | }, | ||
697 | { | ||
698 | .compatible = "ibm,mcmal2", | ||
699 | }, | ||
700 | /* Backward compat */ | ||
701 | { | ||
702 | .type = "mcmal-dma", | ||
703 | .compatible = "ibm,mcmal", | ||
704 | }, | ||
705 | { | ||
706 | .type = "mcmal-dma", | ||
707 | .compatible = "ibm,mcmal2", | ||
708 | }, | ||
709 | {}, | ||
710 | }; | ||
711 | |||
712 | static struct of_platform_driver mal_of_driver = { | ||
713 | .name = "mcmal", | ||
714 | .match_table = mal_platform_match, | ||
715 | |||
716 | .probe = mal_probe, | ||
717 | .remove = mal_remove, | ||
718 | }; | ||
719 | |||
720 | int __init mal_init(void) | ||
721 | { | ||
722 | return of_register_platform_driver(&mal_of_driver); | ||
723 | } | ||
724 | |||
725 | void mal_exit(void) | ||
726 | { | ||
727 | of_unregister_platform_driver(&mal_of_driver); | ||
728 | } | ||