diff options
Diffstat (limited to 'drivers/net/sgiseeq.c')
-rw-r--r-- | drivers/net/sgiseeq.c | 773 |
1 files changed, 773 insertions, 0 deletions
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c new file mode 100644 index 000000000000..9bc3b1c0dd6a --- /dev/null +++ b/drivers/net/sgiseeq.c | |||
@@ -0,0 +1,773 @@ | |||
1 | /* | ||
2 | * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. | ||
3 | * | ||
4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <linux/socket.h> | ||
14 | #include <linux/in.h> | ||
15 | #include <linux/route.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/netdevice.h> | ||
20 | #include <linux/etherdevice.h> | ||
21 | #include <linux/skbuff.h> | ||
22 | #include <linux/bitops.h> | ||
23 | |||
24 | #include <asm/byteorder.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/system.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/sgi/hpc3.h> | ||
30 | #include <asm/sgi/ip22.h> | ||
31 | #include <asm/sgialib.h> | ||
32 | |||
33 | #include "sgiseeq.h" | ||
34 | |||
35 | static char *version = "sgiseeq.c: David S. Miller (dm@engr.sgi.com)\n"; | ||
36 | |||
37 | static char *sgiseeqstr = "SGI Seeq8003"; | ||
38 | |||
39 | /* | ||
40 | * If you want speed, you do something silly, it always has worked for me. So, | ||
41 | * with that in mind, I've decided to make this driver look completely like a | ||
42 | * stupid Lance from a driver architecture perspective. Only difference is that | ||
43 | * here our "ring buffer" looks and acts like a real Lance one does but is | ||
44 | * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised | ||
45 | * how a stupid idea like this can pay off in performance, not to mention | ||
46 | * making this driver 2,000 times easier to write. ;-) | ||
47 | */ | ||
48 | |||
49 | /* Tune these if we tend to run out often etc. */ | ||
50 | #define SEEQ_RX_BUFFERS 16 | ||
51 | #define SEEQ_TX_BUFFERS 16 | ||
52 | |||
53 | #define PKT_BUF_SZ 1584 | ||
54 | |||
55 | #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) | ||
56 | #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) | ||
57 | #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) | ||
58 | #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) | ||
59 | |||
60 | #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ | ||
61 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ | ||
62 | sp->tx_old - sp->tx_new - 1) | ||
63 | |||
64 | #define DEBUG | ||
65 | |||
66 | struct sgiseeq_rx_desc { | ||
67 | volatile struct hpc_dma_desc rdma; | ||
68 | volatile signed int buf_vaddr; | ||
69 | }; | ||
70 | |||
71 | struct sgiseeq_tx_desc { | ||
72 | volatile struct hpc_dma_desc tdma; | ||
73 | volatile signed int buf_vaddr; | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * Warning: This structure is layed out in a certain way because HPC dma | ||
78 | * descriptors must be 8-byte aligned. So don't touch this without | ||
79 | * some care. | ||
80 | */ | ||
81 | struct sgiseeq_init_block { /* Note the name ;-) */ | ||
82 | struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; | ||
83 | struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; | ||
84 | }; | ||
85 | |||
86 | struct sgiseeq_private { | ||
87 | struct sgiseeq_init_block *srings; | ||
88 | |||
89 | /* Ptrs to the descriptors in uncached space. */ | ||
90 | struct sgiseeq_rx_desc *rx_desc; | ||
91 | struct sgiseeq_tx_desc *tx_desc; | ||
92 | |||
93 | char *name; | ||
94 | struct hpc3_ethregs *hregs; | ||
95 | struct sgiseeq_regs *sregs; | ||
96 | |||
97 | /* Ring entry counters. */ | ||
98 | unsigned int rx_new, tx_new; | ||
99 | unsigned int rx_old, tx_old; | ||
100 | |||
101 | int is_edlc; | ||
102 | unsigned char control; | ||
103 | unsigned char mode; | ||
104 | |||
105 | struct net_device_stats stats; | ||
106 | |||
107 | struct net_device *next_module; | ||
108 | spinlock_t tx_lock; | ||
109 | }; | ||
110 | |||
111 | /* A list of all installed seeq devices, for removing the driver module. */ | ||
112 | static struct net_device *root_sgiseeq_dev; | ||
113 | |||
114 | static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) | ||
115 | { | ||
116 | hregs->rx_reset = HPC3_ERXRST_CRESET | HPC3_ERXRST_CLRIRQ; | ||
117 | udelay(20); | ||
118 | hregs->rx_reset = 0; | ||
119 | } | ||
120 | |||
121 | static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, | ||
122 | struct sgiseeq_regs *sregs) | ||
123 | { | ||
124 | hregs->rx_ctrl = hregs->tx_ctrl = 0; | ||
125 | hpc3_eth_reset(hregs); | ||
126 | } | ||
127 | |||
128 | #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ | ||
129 | SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) | ||
130 | |||
131 | static inline void seeq_go(struct sgiseeq_private *sp, | ||
132 | struct hpc3_ethregs *hregs, | ||
133 | struct sgiseeq_regs *sregs) | ||
134 | { | ||
135 | sregs->rstat = sp->mode | RSTAT_GO_BITS; | ||
136 | hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; | ||
137 | } | ||
138 | |||
139 | static inline void __sgiseeq_set_mac_address(struct net_device *dev) | ||
140 | { | ||
141 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
142 | struct sgiseeq_regs *sregs = sp->sregs; | ||
143 | int i; | ||
144 | |||
145 | sregs->tstat = SEEQ_TCMD_RB0; | ||
146 | for (i = 0; i < 6; i++) | ||
147 | sregs->rw.eth_addr[i] = dev->dev_addr[i]; | ||
148 | } | ||
149 | |||
150 | static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) | ||
151 | { | ||
152 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
153 | struct sockaddr *sa = addr; | ||
154 | |||
155 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); | ||
156 | |||
157 | spin_lock_irq(&sp->tx_lock); | ||
158 | __sgiseeq_set_mac_address(dev); | ||
159 | spin_unlock_irq(&sp->tx_lock); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) | ||
165 | #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) | ||
166 | #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) | ||
167 | |||
168 | static int seeq_init_ring(struct net_device *dev) | ||
169 | { | ||
170 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
171 | int i; | ||
172 | |||
173 | netif_stop_queue(dev); | ||
174 | sp->rx_new = sp->tx_new = 0; | ||
175 | sp->rx_old = sp->tx_old = 0; | ||
176 | |||
177 | __sgiseeq_set_mac_address(dev); | ||
178 | |||
179 | /* Setup tx ring. */ | ||
180 | for(i = 0; i < SEEQ_TX_BUFFERS; i++) { | ||
181 | if (!sp->tx_desc[i].tdma.pbuf) { | ||
182 | unsigned long buffer; | ||
183 | |||
184 | buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); | ||
185 | if (!buffer) | ||
186 | return -ENOMEM; | ||
187 | sp->tx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); | ||
188 | sp->tx_desc[i].tdma.pbuf = CPHYSADDR(buffer); | ||
189 | } | ||
190 | sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; | ||
191 | } | ||
192 | |||
193 | /* And now the rx ring. */ | ||
194 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | ||
195 | if (!sp->rx_desc[i].rdma.pbuf) { | ||
196 | unsigned long buffer; | ||
197 | |||
198 | buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); | ||
199 | if (!buffer) | ||
200 | return -ENOMEM; | ||
201 | sp->rx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); | ||
202 | sp->rx_desc[i].rdma.pbuf = CPHYSADDR(buffer); | ||
203 | } | ||
204 | sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; | ||
205 | } | ||
206 | sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | #ifdef DEBUG | ||
211 | static struct sgiseeq_private *gpriv; | ||
212 | static struct net_device *gdev; | ||
213 | |||
214 | void sgiseeq_dump_rings(void) | ||
215 | { | ||
216 | static int once; | ||
217 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; | ||
218 | struct sgiseeq_tx_desc *t = gpriv->tx_desc; | ||
219 | struct hpc3_ethregs *hregs = gpriv->hregs; | ||
220 | int i; | ||
221 | |||
222 | if (once) | ||
223 | return; | ||
224 | once++; | ||
225 | printk("RING DUMP:\n"); | ||
226 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | ||
227 | printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", | ||
228 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | ||
229 | r[i].rdma.pnext); | ||
230 | i += 1; | ||
231 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | ||
232 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | ||
233 | r[i].rdma.pnext); | ||
234 | } | ||
235 | for (i = 0; i < SEEQ_TX_BUFFERS; i++) { | ||
236 | printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", | ||
237 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | ||
238 | t[i].tdma.pnext); | ||
239 | i += 1; | ||
240 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | ||
241 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | ||
242 | t[i].tdma.pnext); | ||
243 | } | ||
244 | printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", | ||
245 | gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); | ||
246 | printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", | ||
247 | hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); | ||
248 | printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", | ||
249 | hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); | ||
250 | } | ||
251 | #endif | ||
252 | |||
253 | #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) | ||
254 | #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) | ||
255 | #define RDMACFG_INIT (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ) | ||
256 | |||
257 | static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, | ||
258 | struct sgiseeq_regs *sregs) | ||
259 | { | ||
260 | struct hpc3_ethregs *hregs = sp->hregs; | ||
261 | int err; | ||
262 | |||
263 | reset_hpc3_and_seeq(hregs, sregs); | ||
264 | err = seeq_init_ring(dev); | ||
265 | if (err) | ||
266 | return err; | ||
267 | |||
268 | /* Setup to field the proper interrupt types. */ | ||
269 | if (sp->is_edlc) { | ||
270 | sregs->tstat = TSTAT_INIT_EDLC; | ||
271 | sregs->rw.wregs.control = sp->control; | ||
272 | sregs->rw.wregs.frame_gap = 0; | ||
273 | } else { | ||
274 | sregs->tstat = TSTAT_INIT_SEEQ; | ||
275 | } | ||
276 | |||
277 | hregs->rx_dconfig |= RDMACFG_INIT; | ||
278 | |||
279 | hregs->rx_ndptr = CPHYSADDR(sp->rx_desc); | ||
280 | hregs->tx_ndptr = CPHYSADDR(sp->tx_desc); | ||
281 | |||
282 | seeq_go(sp, hregs, sregs); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static inline void record_rx_errors(struct sgiseeq_private *sp, | ||
287 | unsigned char status) | ||
288 | { | ||
289 | if (status & SEEQ_RSTAT_OVERF || | ||
290 | status & SEEQ_RSTAT_SFRAME) | ||
291 | sp->stats.rx_over_errors++; | ||
292 | if (status & SEEQ_RSTAT_CERROR) | ||
293 | sp->stats.rx_crc_errors++; | ||
294 | if (status & SEEQ_RSTAT_DERROR) | ||
295 | sp->stats.rx_frame_errors++; | ||
296 | if (status & SEEQ_RSTAT_REOF) | ||
297 | sp->stats.rx_errors++; | ||
298 | } | ||
299 | |||
300 | static inline void rx_maybe_restart(struct sgiseeq_private *sp, | ||
301 | struct hpc3_ethregs *hregs, | ||
302 | struct sgiseeq_regs *sregs) | ||
303 | { | ||
304 | if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { | ||
305 | hregs->rx_ndptr = CPHYSADDR(sp->rx_desc + sp->rx_new); | ||
306 | seeq_go(sp, hregs, sregs); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | #define for_each_rx(rd, sp) for((rd) = &(sp)->rx_desc[(sp)->rx_new]; \ | ||
311 | !((rd)->rdma.cntinfo & HPCDMA_OWN); \ | ||
312 | (rd) = &(sp)->rx_desc[(sp)->rx_new]) | ||
313 | |||
314 | static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, | ||
315 | struct hpc3_ethregs *hregs, | ||
316 | struct sgiseeq_regs *sregs) | ||
317 | { | ||
318 | struct sgiseeq_rx_desc *rd; | ||
319 | struct sk_buff *skb = 0; | ||
320 | unsigned char pkt_status; | ||
321 | unsigned char *pkt_pointer = 0; | ||
322 | int len = 0; | ||
323 | unsigned int orig_end = PREV_RX(sp->rx_new); | ||
324 | |||
325 | /* Service every received packet. */ | ||
326 | for_each_rx(rd, sp) { | ||
327 | len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; | ||
328 | pkt_pointer = (unsigned char *)(long)rd->buf_vaddr; | ||
329 | pkt_status = pkt_pointer[len + 2]; | ||
330 | |||
331 | if (pkt_status & SEEQ_RSTAT_FIG) { | ||
332 | /* Packet is OK. */ | ||
333 | skb = dev_alloc_skb(len + 2); | ||
334 | |||
335 | if (skb) { | ||
336 | skb->dev = dev; | ||
337 | skb_reserve(skb, 2); | ||
338 | skb_put(skb, len); | ||
339 | |||
340 | /* Copy out of kseg1 to avoid silly cache flush. */ | ||
341 | eth_copy_and_sum(skb, pkt_pointer + 2, len, 0); | ||
342 | skb->protocol = eth_type_trans(skb, dev); | ||
343 | |||
344 | /* We don't want to receive our own packets */ | ||
345 | if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { | ||
346 | netif_rx(skb); | ||
347 | dev->last_rx = jiffies; | ||
348 | sp->stats.rx_packets++; | ||
349 | sp->stats.rx_bytes += len; | ||
350 | } else { | ||
351 | /* Silently drop my own packets */ | ||
352 | dev_kfree_skb_irq(skb); | ||
353 | } | ||
354 | } else { | ||
355 | printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", | ||
356 | dev->name); | ||
357 | sp->stats.rx_dropped++; | ||
358 | } | ||
359 | } else { | ||
360 | record_rx_errors(sp, pkt_status); | ||
361 | } | ||
362 | |||
363 | /* Return the entry to the ring pool. */ | ||
364 | rd->rdma.cntinfo = RCNTINFO_INIT; | ||
365 | sp->rx_new = NEXT_RX(sp->rx_new); | ||
366 | } | ||
367 | sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); | ||
368 | sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; | ||
369 | rx_maybe_restart(sp, hregs, sregs); | ||
370 | } | ||
371 | |||
372 | static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, | ||
373 | struct sgiseeq_regs *sregs) | ||
374 | { | ||
375 | if (sp->is_edlc) { | ||
376 | sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); | ||
377 | sregs->rw.wregs.control = sp->control; | ||
378 | } | ||
379 | } | ||
380 | |||
381 | static inline void kick_tx(struct sgiseeq_tx_desc *td, | ||
382 | struct hpc3_ethregs *hregs) | ||
383 | { | ||
384 | /* If the HPC aint doin nothin, and there are more packets | ||
385 | * with ETXD cleared and XIU set we must make very certain | ||
386 | * that we restart the HPC else we risk locking up the | ||
387 | * adapter. The following code is only safe iff the HPCDMA | ||
388 | * is not active! | ||
389 | */ | ||
390 | while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == | ||
391 | (HPCDMA_XIU | HPCDMA_ETXD)) | ||
392 | td = (struct sgiseeq_tx_desc *)(long) CKSEG1ADDR(td->tdma.pnext); | ||
393 | if (td->tdma.cntinfo & HPCDMA_XIU) { | ||
394 | hregs->tx_ndptr = CPHYSADDR(td); | ||
395 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, | ||
400 | struct hpc3_ethregs *hregs, | ||
401 | struct sgiseeq_regs *sregs) | ||
402 | { | ||
403 | struct sgiseeq_tx_desc *td; | ||
404 | unsigned long status = hregs->tx_ctrl; | ||
405 | int j; | ||
406 | |||
407 | tx_maybe_reset_collisions(sp, sregs); | ||
408 | |||
409 | if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { | ||
410 | /* Oops, HPC detected some sort of error. */ | ||
411 | if (status & SEEQ_TSTAT_R16) | ||
412 | sp->stats.tx_aborted_errors++; | ||
413 | if (status & SEEQ_TSTAT_UFLOW) | ||
414 | sp->stats.tx_fifo_errors++; | ||
415 | if (status & SEEQ_TSTAT_LCLS) | ||
416 | sp->stats.collisions++; | ||
417 | } | ||
418 | |||
419 | /* Ack 'em... */ | ||
420 | for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { | ||
421 | td = &sp->tx_desc[j]; | ||
422 | |||
423 | if (!(td->tdma.cntinfo & (HPCDMA_XIU))) | ||
424 | break; | ||
425 | if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { | ||
426 | if (!(status & HPC3_ETXCTRL_ACTIVE)) { | ||
427 | hregs->tx_ndptr = CPHYSADDR(td); | ||
428 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | ||
429 | } | ||
430 | break; | ||
431 | } | ||
432 | sp->stats.tx_packets++; | ||
433 | sp->tx_old = NEXT_TX(sp->tx_old); | ||
434 | td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); | ||
435 | td->tdma.cntinfo |= HPCDMA_EOX; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
440 | { | ||
441 | struct net_device *dev = (struct net_device *) dev_id; | ||
442 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
443 | struct hpc3_ethregs *hregs = sp->hregs; | ||
444 | struct sgiseeq_regs *sregs = sp->sregs; | ||
445 | |||
446 | spin_lock(&sp->tx_lock); | ||
447 | |||
448 | /* Ack the IRQ and set software state. */ | ||
449 | hregs->rx_reset = HPC3_ERXRST_CLRIRQ; | ||
450 | |||
451 | /* Always check for received packets. */ | ||
452 | sgiseeq_rx(dev, sp, hregs, sregs); | ||
453 | |||
454 | /* Only check for tx acks if we have something queued. */ | ||
455 | if (sp->tx_old != sp->tx_new) | ||
456 | sgiseeq_tx(dev, sp, hregs, sregs); | ||
457 | |||
458 | if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { | ||
459 | netif_wake_queue(dev); | ||
460 | } | ||
461 | spin_unlock(&sp->tx_lock); | ||
462 | |||
463 | return IRQ_HANDLED; | ||
464 | } | ||
465 | |||
466 | static int sgiseeq_open(struct net_device *dev) | ||
467 | { | ||
468 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
469 | struct sgiseeq_regs *sregs = sp->sregs; | ||
470 | unsigned int irq = dev->irq; | ||
471 | int err; | ||
472 | |||
473 | if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { | ||
474 | printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); | ||
475 | err = -EAGAIN; | ||
476 | } | ||
477 | |||
478 | err = init_seeq(dev, sp, sregs); | ||
479 | if (err) | ||
480 | goto out_free_irq; | ||
481 | |||
482 | netif_start_queue(dev); | ||
483 | |||
484 | return 0; | ||
485 | |||
486 | out_free_irq: | ||
487 | free_irq(irq, dev); | ||
488 | |||
489 | return err; | ||
490 | } | ||
491 | |||
492 | static int sgiseeq_close(struct net_device *dev) | ||
493 | { | ||
494 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
495 | struct sgiseeq_regs *sregs = sp->sregs; | ||
496 | |||
497 | netif_stop_queue(dev); | ||
498 | |||
499 | /* Shutdown the Seeq. */ | ||
500 | reset_hpc3_and_seeq(sp->hregs, sregs); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static inline int sgiseeq_reset(struct net_device *dev) | ||
506 | { | ||
507 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
508 | struct sgiseeq_regs *sregs = sp->sregs; | ||
509 | int err; | ||
510 | |||
511 | err = init_seeq(dev, sp, sregs); | ||
512 | if (err) | ||
513 | return err; | ||
514 | |||
515 | dev->trans_start = jiffies; | ||
516 | netif_wake_queue(dev); | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | void sgiseeq_my_reset(void) | ||
522 | { | ||
523 | printk("RESET!\n"); | ||
524 | sgiseeq_reset(gdev); | ||
525 | } | ||
526 | |||
527 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
528 | { | ||
529 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
530 | struct hpc3_ethregs *hregs = sp->hregs; | ||
531 | unsigned long flags; | ||
532 | struct sgiseeq_tx_desc *td; | ||
533 | int skblen, len, entry; | ||
534 | |||
535 | spin_lock_irqsave(&sp->tx_lock, flags); | ||
536 | |||
537 | /* Setup... */ | ||
538 | skblen = skb->len; | ||
539 | len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; | ||
540 | sp->stats.tx_bytes += len; | ||
541 | entry = sp->tx_new; | ||
542 | td = &sp->tx_desc[entry]; | ||
543 | |||
544 | /* Create entry. There are so many races with adding a new | ||
545 | * descriptor to the chain: | ||
546 | * 1) Assume that the HPC is off processing a DMA chain while | ||
547 | * we are changing all of the following. | ||
548 | * 2) Do no allow the HPC to look at a new descriptor until | ||
549 | * we have completely set up it's state. This means, do | ||
550 | * not clear HPCDMA_EOX in the current last descritptor | ||
551 | * until the one we are adding looks consistent and could | ||
552 | * be processes right now. | ||
553 | * 3) The tx interrupt code must notice when we've added a new | ||
554 | * entry and the HPC got to the end of the chain before we | ||
555 | * added this new entry and restarted it. | ||
556 | */ | ||
557 | memcpy((char *)(long)td->buf_vaddr, skb->data, skblen); | ||
558 | if (len != skblen) | ||
559 | memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); | ||
560 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | | ||
561 | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; | ||
562 | if (sp->tx_old != sp->tx_new) { | ||
563 | struct sgiseeq_tx_desc *backend; | ||
564 | |||
565 | backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; | ||
566 | backend->tdma.cntinfo &= ~HPCDMA_EOX; | ||
567 | } | ||
568 | sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ | ||
569 | |||
570 | /* Maybe kick the HPC back into motion. */ | ||
571 | if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) | ||
572 | kick_tx(&sp->tx_desc[sp->tx_old], hregs); | ||
573 | |||
574 | dev->trans_start = jiffies; | ||
575 | dev_kfree_skb(skb); | ||
576 | |||
577 | if (!TX_BUFFS_AVAIL(sp)) | ||
578 | netif_stop_queue(dev); | ||
579 | spin_unlock_irqrestore(&sp->tx_lock, flags); | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | static void timeout(struct net_device *dev) | ||
585 | { | ||
586 | printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); | ||
587 | sgiseeq_reset(dev); | ||
588 | |||
589 | dev->trans_start = jiffies; | ||
590 | netif_wake_queue(dev); | ||
591 | } | ||
592 | |||
593 | static struct net_device_stats *sgiseeq_get_stats(struct net_device *dev) | ||
594 | { | ||
595 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
596 | |||
597 | return &sp->stats; | ||
598 | } | ||
599 | |||
600 | static void sgiseeq_set_multicast(struct net_device *dev) | ||
601 | { | ||
602 | struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; | ||
603 | unsigned char oldmode = sp->mode; | ||
604 | |||
605 | if(dev->flags & IFF_PROMISC) | ||
606 | sp->mode = SEEQ_RCMD_RANY; | ||
607 | else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) | ||
608 | sp->mode = SEEQ_RCMD_RBMCAST; | ||
609 | else | ||
610 | sp->mode = SEEQ_RCMD_RBCAST; | ||
611 | |||
612 | /* XXX I know this sucks, but is there a better way to reprogram | ||
613 | * XXX the receiver? At least, this shouldn't happen too often. | ||
614 | */ | ||
615 | |||
616 | if (oldmode != sp->mode) | ||
617 | sgiseeq_reset(dev); | ||
618 | } | ||
619 | |||
620 | static inline void setup_tx_ring(struct sgiseeq_tx_desc *buf, int nbufs) | ||
621 | { | ||
622 | int i = 0; | ||
623 | |||
624 | while (i < (nbufs - 1)) { | ||
625 | buf[i].tdma.pnext = CPHYSADDR(buf + i + 1); | ||
626 | buf[i].tdma.pbuf = 0; | ||
627 | i++; | ||
628 | } | ||
629 | buf[i].tdma.pnext = CPHYSADDR(buf); | ||
630 | } | ||
631 | |||
632 | static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs) | ||
633 | { | ||
634 | int i = 0; | ||
635 | |||
636 | while (i < (nbufs - 1)) { | ||
637 | buf[i].rdma.pnext = CPHYSADDR(buf + i + 1); | ||
638 | buf[i].rdma.pbuf = 0; | ||
639 | i++; | ||
640 | } | ||
641 | buf[i].rdma.pbuf = 0; | ||
642 | buf[i].rdma.pnext = CPHYSADDR(buf); | ||
643 | } | ||
644 | |||
645 | #define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf)) | ||
646 | |||
647 | static int sgiseeq_init(struct hpc3_regs* regs, int irq) | ||
648 | { | ||
649 | struct sgiseeq_init_block *sr; | ||
650 | struct sgiseeq_private *sp; | ||
651 | struct net_device *dev; | ||
652 | int err, i; | ||
653 | |||
654 | dev = alloc_etherdev(sizeof (struct sgiseeq_private)); | ||
655 | if (!dev) { | ||
656 | printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n"); | ||
657 | err = -ENOMEM; | ||
658 | goto err_out; | ||
659 | } | ||
660 | sp = netdev_priv(dev); | ||
661 | |||
662 | /* Make private data page aligned */ | ||
663 | sr = (struct sgiseeq_init_block *) get_zeroed_page(GFP_KERNEL); | ||
664 | if (!sr) { | ||
665 | printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); | ||
666 | err = -ENOMEM; | ||
667 | goto err_out_free_dev; | ||
668 | } | ||
669 | sp->srings = sr; | ||
670 | |||
671 | #define EADDR_NVOFS 250 | ||
672 | for (i = 0; i < 3; i++) { | ||
673 | unsigned short tmp = ip22_nvram_read(EADDR_NVOFS / 2 + i); | ||
674 | |||
675 | dev->dev_addr[2 * i] = tmp >> 8; | ||
676 | dev->dev_addr[2 * i + 1] = tmp & 0xff; | ||
677 | } | ||
678 | |||
679 | #ifdef DEBUG | ||
680 | gpriv = sp; | ||
681 | gdev = dev; | ||
682 | #endif | ||
683 | sp->sregs = (struct sgiseeq_regs *) &hpc3c0->eth_ext[0]; | ||
684 | sp->hregs = &hpc3c0->ethregs; | ||
685 | sp->name = sgiseeqstr; | ||
686 | sp->mode = SEEQ_RCMD_RBCAST; | ||
687 | |||
688 | sp->rx_desc = (struct sgiseeq_rx_desc *) | ||
689 | CKSEG1ADDR(ALIGNED(&sp->srings->rxvector[0])); | ||
690 | dma_cache_wback_inv((unsigned long)&sp->srings->rxvector, | ||
691 | sizeof(sp->srings->rxvector)); | ||
692 | sp->tx_desc = (struct sgiseeq_tx_desc *) | ||
693 | CKSEG1ADDR(ALIGNED(&sp->srings->txvector[0])); | ||
694 | dma_cache_wback_inv((unsigned long)&sp->srings->txvector, | ||
695 | sizeof(sp->srings->txvector)); | ||
696 | |||
697 | /* A couple calculations now, saves many cycles later. */ | ||
698 | setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); | ||
699 | setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); | ||
700 | |||
701 | /* Reset the chip. */ | ||
702 | hpc3_eth_reset(sp->hregs); | ||
703 | |||
704 | sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); | ||
705 | if (sp->is_edlc) | ||
706 | sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | | ||
707 | SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | | ||
708 | SEEQ_CTRL_ENCARR; | ||
709 | |||
710 | dev->open = sgiseeq_open; | ||
711 | dev->stop = sgiseeq_close; | ||
712 | dev->hard_start_xmit = sgiseeq_start_xmit; | ||
713 | dev->tx_timeout = timeout; | ||
714 | dev->watchdog_timeo = (200 * HZ) / 1000; | ||
715 | dev->get_stats = sgiseeq_get_stats; | ||
716 | dev->set_multicast_list = sgiseeq_set_multicast; | ||
717 | dev->set_mac_address = sgiseeq_set_mac_address; | ||
718 | dev->irq = irq; | ||
719 | |||
720 | if (register_netdev(dev)) { | ||
721 | printk(KERN_ERR "Sgiseeq: Cannot register net device, " | ||
722 | "aborting.\n"); | ||
723 | err = -ENODEV; | ||
724 | goto err_out_free_page; | ||
725 | } | ||
726 | |||
727 | printk(KERN_INFO "%s: SGI Seeq8003 ", dev->name); | ||
728 | for (i = 0; i < 6; i++) | ||
729 | printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); | ||
730 | |||
731 | sp->next_module = root_sgiseeq_dev; | ||
732 | root_sgiseeq_dev = dev; | ||
733 | |||
734 | return 0; | ||
735 | |||
736 | err_out_free_page: | ||
737 | free_page((unsigned long) sp); | ||
738 | err_out_free_dev: | ||
739 | kfree(dev); | ||
740 | |||
741 | err_out: | ||
742 | return err; | ||
743 | } | ||
744 | |||
745 | static int __init sgiseeq_probe(void) | ||
746 | { | ||
747 | printk(version); | ||
748 | |||
749 | /* On board adapter on 1st HPC is always present */ | ||
750 | return sgiseeq_init(hpc3c0, SGI_ENET_IRQ); | ||
751 | } | ||
752 | |||
753 | static void __exit sgiseeq_exit(void) | ||
754 | { | ||
755 | struct net_device *next, *dev; | ||
756 | struct sgiseeq_private *sp; | ||
757 | int irq; | ||
758 | |||
759 | for (dev = root_sgiseeq_dev; dev; dev = next) { | ||
760 | sp = (struct sgiseeq_private *) netdev_priv(dev); | ||
761 | next = sp->next_module; | ||
762 | irq = dev->irq; | ||
763 | unregister_netdev(dev); | ||
764 | free_irq(irq, dev); | ||
765 | free_page((unsigned long) sp); | ||
766 | free_netdev(dev); | ||
767 | } | ||
768 | } | ||
769 | |||
770 | module_init(sgiseeq_probe); | ||
771 | module_exit(sgiseeq_exit); | ||
772 | |||
773 | MODULE_LICENSE("GPL"); | ||