aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tokenring
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tokenring')
-rw-r--r--drivers/net/tokenring/3c359.c1848
-rw-r--r--drivers/net/tokenring/3c359.h291
-rw-r--r--drivers/net/tokenring/Kconfig185
-rw-r--r--drivers/net/tokenring/Makefile15
-rw-r--r--drivers/net/tokenring/abyss.c469
-rw-r--r--drivers/net/tokenring/abyss.h58
-rw-r--r--drivers/net/tokenring/ibmtr.c1964
-rw-r--r--drivers/net/tokenring/lanstreamer.c1918
-rw-r--r--drivers/net/tokenring/lanstreamer.h343
-rw-r--r--drivers/net/tokenring/madgemc.c763
-rw-r--r--drivers/net/tokenring/madgemc.h70
-rw-r--r--drivers/net/tokenring/olympic.c1750
-rw-r--r--drivers/net/tokenring/olympic.h321
-rw-r--r--drivers/net/tokenring/proteon.c423
-rw-r--r--drivers/net/tokenring/skisa.c433
-rw-r--r--drivers/net/tokenring/smctr.c5718
-rw-r--r--drivers/net/tokenring/smctr.h1585
-rw-r--r--drivers/net/tokenring/tms380tr.c2352
-rw-r--r--drivers/net/tokenring/tms380tr.h1141
-rw-r--r--drivers/net/tokenring/tmspci.c249
20 files changed, 21896 insertions, 0 deletions
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
new file mode 100644
index 00000000000..b6162fe2348
--- /dev/null
+++ b/drivers/net/tokenring/3c359.c
@@ -0,0 +1,1848 @@
1/*
2 * 3c359.c (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
3 *
4 * Linux driver for 3Com 3c359 Tokenlink Velocity XL PCI NIC
5 *
6 * Base Driver Olympic:
7 * Written 1999 Peter De Schrijver & Mike Phillips
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 *
12 * 7/17/00 - Clean up, version number 0.9.0. Ready to release to the world.
13 *
14 * 2/16/01 - Port up to kernel 2.4.2 ready for submission into the kernel.
15 * 3/05/01 - Last clean up stuff before submission.
16 * 2/15/01 - Finally, update to new pci api.
17 *
18 * To Do:
19 */
20
21/*
22 * Technical Card Details
23 *
24 * All access to data is done with 16/8 bit transfers. The transfer
25 * method really sucks. You can only read or write one location at a time.
26 *
27 * Also, the microcode for the card must be uploaded if the card does not have
28 * the flashrom on board. This is a 28K bloat in the driver when compiled
29 * as a module.
30 *
31 * Rx is very simple, status into a ring of descriptors, dma data transfer,
32 * interrupts to tell us when a packet is received.
33 *
34 * Tx is a little more interesting. Similar scenario, descriptor and dma data
35 * transfers, but we don't have to interrupt the card to tell it another packet
36 * is ready for transmission, we are just doing simple memory writes, not io or mmio
37 * writes. The card can be set up to simply poll on the next
38 * descriptor pointer and when this value is non-zero will automatically download
39 * the next packet. The card then interrupts us when the packet is done.
40 *
41 */
42
43#define XL_DEBUG 0
44
45#include <linux/jiffies.h>
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/errno.h>
49#include <linux/timer.h>
50#include <linux/in.h>
51#include <linux/ioport.h>
52#include <linux/string.h>
53#include <linux/proc_fs.h>
54#include <linux/ptrace.h>
55#include <linux/skbuff.h>
56#include <linux/interrupt.h>
57#include <linux/delay.h>
58#include <linux/netdevice.h>
59#include <linux/trdevice.h>
60#include <linux/stddef.h>
61#include <linux/init.h>
62#include <linux/pci.h>
63#include <linux/spinlock.h>
64#include <linux/bitops.h>
65#include <linux/firmware.h>
66#include <linux/slab.h>
67
68#include <net/checksum.h>
69
70#include <asm/io.h>
71#include <asm/system.h>
72
73#include "3c359.h"
74
75static char version[] __devinitdata =
76"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ;
77
78#define FW_NAME "3com/3C359.bin"
79MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
80MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
81MODULE_FIRMWARE(FW_NAME);
82
83/* Module parameters */
84
85/* Ring Speed 0,4,16
86 * 0 = Autosense
87 * 4,16 = Selected speed only, no autosense
88 * This allows the card to be the first on the ring
89 * and become the active monitor.
90 *
91 * WARNING: Some hubs will allow you to insert
92 * at the wrong speed.
93 *
94 * The adapter will _not_ fail to open if there are no
95 * active monitors on the ring, it will simply open up in
96 * its last known ringspeed if no ringspeed is specified.
97 */
98
99static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
100
101module_param_array(ringspeed, int, NULL, 0);
102MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
103
104/* Packet buffer size */
105
106static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
107
108module_param_array(pkt_buf_sz, int, NULL, 0) ;
109MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
110/* Message Level */
111
112static int message_level[XL_MAX_ADAPTERS] = {0,} ;
113
114module_param_array(message_level, int, NULL, 0) ;
115MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
116/*
117 * This is a real nasty way of doing this, but otherwise you
118 * will be stuck with 1555 lines of hex #'s in the code.
119 */
120
121static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
122{
123 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
124 { } /* terminate list */
125};
126MODULE_DEVICE_TABLE(pci,xl_pci_tbl) ;
127
128static int xl_init(struct net_device *dev);
129static int xl_open(struct net_device *dev);
130static int xl_open_hw(struct net_device *dev) ;
131static int xl_hw_reset(struct net_device *dev);
132static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev);
133static void xl_dn_comp(struct net_device *dev);
134static int xl_close(struct net_device *dev);
135static void xl_set_rx_mode(struct net_device *dev);
136static irqreturn_t xl_interrupt(int irq, void *dev_id);
137static int xl_set_mac_address(struct net_device *dev, void *addr) ;
138static void xl_arb_cmd(struct net_device *dev);
139static void xl_asb_cmd(struct net_device *dev) ;
140static void xl_srb_cmd(struct net_device *dev, int srb_cmd) ;
141static void xl_wait_misr_flags(struct net_device *dev) ;
142static int xl_change_mtu(struct net_device *dev, int mtu);
143static void xl_srb_bh(struct net_device *dev) ;
144static void xl_asb_bh(struct net_device *dev) ;
145static void xl_reset(struct net_device *dev) ;
146static void xl_freemem(struct net_device *dev) ;
147
148
149/* EEProm Access Functions */
150static u16 xl_ee_read(struct net_device *dev, int ee_addr) ;
151static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) ;
152
153/* Debugging functions */
154#if XL_DEBUG
155static void print_tx_state(struct net_device *dev) ;
156static void print_rx_state(struct net_device *dev) ;
157
158static void print_tx_state(struct net_device *dev)
159{
160
161 struct xl_private *xl_priv = netdev_priv(dev);
162 struct xl_tx_desc *txd ;
163 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
164 int i ;
165
166 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
167 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
168 printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
169 for (i = 0; i < 16; i++) {
170 txd = &(xl_priv->xl_tx_ring[i]) ;
171 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
172 txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
173 }
174
175 printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
176
177 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
178 printk("Queue status = %0x\n",netif_running(dev) ) ;
179}
180
181static void print_rx_state(struct net_device *dev)
182{
183
184 struct xl_private *xl_priv = netdev_priv(dev);
185 struct xl_rx_desc *rxd ;
186 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
187 int i ;
188
189 printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
190 printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
191 for (i = 0; i < 16; i++) {
192 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
193 rxd = &(xl_priv->xl_rx_ring[i]) ;
194 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
195 rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
196 }
197
198 printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
199
200 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
201 printk("Queue status = %0x\n",netif_running(dev));
202}
203#endif
204
205/*
206 * Read values from the on-board EEProm. This looks very strange
207 * but you have to wait for the EEProm to get/set the value before
208 * passing/getting the next value from the nic. As with all requests
209 * on this nic it has to be done in two stages, a) tell the nic which
210 * memory address you want to access and b) pass/get the value from the nic.
211 * With the EEProm, you have to wait before and between access a) and b).
212 * As this is only read at initialization time and the wait period is very
213 * small we shouldn't have to worry about scheduling issues.
214 */
215
216static u16 xl_ee_read(struct net_device *dev, int ee_addr)
217{
218 struct xl_private *xl_priv = netdev_priv(dev);
219 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
220
221 /* Wait for EEProm to not be busy */
222 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
223 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
224
225 /* Tell EEProm what we want to do and where */
226 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
227 writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
228
229 /* Wait for EEProm to not be busy */
230 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
231 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
232
233 /* Tell EEProm what we want to do and where */
234 writel(IO_WORD_WRITE | EECONTROL , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
235 writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
236
237 /* Finally read the value from the EEProm */
238 writel(IO_WORD_READ | EEDATA , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
239 return readw(xl_mmio + MMIO_MACDATA) ;
240}
241
242/*
243 * Write values to the onboard eeprom. As with eeprom read you need to
244 * set which location to write, wait, value to write, wait, with the
245 * added twist of having to enable eeprom writes as well.
246 */
247
248static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value)
249{
250 struct xl_private *xl_priv = netdev_priv(dev);
251 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
252
253 /* Wait for EEProm to not be busy */
254 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
255 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
256
257 /* Enable write/erase */
258 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
259 writew(EE_ENABLE_WRITE, xl_mmio + MMIO_MACDATA) ;
260
261 /* Wait for EEProm to not be busy */
262 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
263 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
264
265 /* Put the value we want to write into EEDATA */
266 writel(IO_WORD_WRITE | EEDATA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
267 writew(ee_value, xl_mmio + MMIO_MACDATA) ;
268
269 /* Tell EEProm to write eevalue into ee_addr */
270 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
271 writew(EEWRITE + ee_addr, xl_mmio + MMIO_MACDATA) ;
272
273 /* Wait for EEProm to not be busy, to ensure write gets done */
274 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
275 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
276
277 return ;
278}
279
280static const struct net_device_ops xl_netdev_ops = {
281 .ndo_open = xl_open,
282 .ndo_stop = xl_close,
283 .ndo_start_xmit = xl_xmit,
284 .ndo_change_mtu = xl_change_mtu,
285 .ndo_set_multicast_list = xl_set_rx_mode,
286 .ndo_set_mac_address = xl_set_mac_address,
287};
288
289static int __devinit xl_probe(struct pci_dev *pdev,
290 const struct pci_device_id *ent)
291{
292 struct net_device *dev ;
293 struct xl_private *xl_priv ;
294 static int card_no = -1 ;
295 int i ;
296
297 card_no++ ;
298
299 if (pci_enable_device(pdev)) {
300 return -ENODEV ;
301 }
302
303 pci_set_master(pdev);
304
305 if ((i = pci_request_regions(pdev,"3c359"))) {
306 return i ;
307 }
308
309 /*
310 * Allowing init_trdev to allocate the private data will align
311 * xl_private on a 32 bytes boundary which we need for the rx/tx
312 * descriptors
313 */
314
315 dev = alloc_trdev(sizeof(struct xl_private)) ;
316 if (!dev) {
317 pci_release_regions(pdev) ;
318 return -ENOMEM ;
319 }
320 xl_priv = netdev_priv(dev);
321
322#if XL_DEBUG
323 printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n",
324 pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start);
325#endif
326
327 dev->irq=pdev->irq;
328 dev->base_addr=pci_resource_start(pdev,0) ;
329 xl_priv->xl_card_name = pci_name(pdev);
330 xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE);
331 xl_priv->pdev = pdev ;
332
333 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
334 xl_priv->pkt_buf_sz = PKT_BUF_SZ ;
335 else
336 xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
337
338 dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ;
339 xl_priv->xl_ring_speed = ringspeed[card_no] ;
340 xl_priv->xl_message_level = message_level[card_no] ;
341 xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ;
342 xl_priv->xl_copy_all_options = 0 ;
343
344 if((i = xl_init(dev))) {
345 iounmap(xl_priv->xl_mmio) ;
346 free_netdev(dev) ;
347 pci_release_regions(pdev) ;
348 return i ;
349 }
350
351 dev->netdev_ops = &xl_netdev_ops;
352 SET_NETDEV_DEV(dev, &pdev->dev);
353
354 pci_set_drvdata(pdev,dev) ;
355 if ((i = register_netdev(dev))) {
356 printk(KERN_ERR "3C359, register netdev failed\n") ;
357 pci_set_drvdata(pdev,NULL) ;
358 iounmap(xl_priv->xl_mmio) ;
359 free_netdev(dev) ;
360 pci_release_regions(pdev) ;
361 return i ;
362 }
363
364 printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ;
365
366 return 0;
367}
368
369static int xl_init_firmware(struct xl_private *xl_priv)
370{
371 int err;
372
373 err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev);
374 if (err) {
375 printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME);
376 return err;
377 }
378
379 if (xl_priv->fw->size < 16) {
380 printk(KERN_ERR "Bogus length %zu in \"%s\"\n",
381 xl_priv->fw->size, FW_NAME);
382 release_firmware(xl_priv->fw);
383 err = -EINVAL;
384 }
385
386 return err;
387}
388
389static int __devinit xl_init(struct net_device *dev)
390{
391 struct xl_private *xl_priv = netdev_priv(dev);
392 int err;
393
394 printk(KERN_INFO "%s\n", version);
395 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
396 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
397
398 spin_lock_init(&xl_priv->xl_lock) ;
399
400 err = xl_init_firmware(xl_priv);
401 if (err == 0)
402 err = xl_hw_reset(dev);
403
404 return err;
405}
406
407
408/*
409 * Hardware reset. This needs to be a separate entity as we need to reset the card
410 * when we change the EEProm settings.
411 */
412
413static int xl_hw_reset(struct net_device *dev)
414{
415 struct xl_private *xl_priv = netdev_priv(dev);
416 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
417 unsigned long t ;
418 u16 i ;
419 u16 result_16 ;
420 u8 result_8 ;
421 u16 start ;
422 int j ;
423
424 if (xl_priv->fw == NULL)
425 return -EINVAL;
426
427 /*
428 * Reset the card. If the card has got the microcode on board, we have
429 * missed the initialization interrupt, so we must always do this.
430 */
431
432 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
433
434 /*
435 * Must wait for cmdInProgress bit (12) to clear before continuing with
436 * card configuration.
437 */
438
439 t=jiffies;
440 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
441 schedule();
442 if (time_after(jiffies, t + 40 * HZ)) {
443 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name);
444 return -ENODEV;
445 }
446 }
447
448 /*
449 * Enable pmbar by setting bit in CPAttention
450 */
451
452 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
453 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
454 result_8 = result_8 | CPA_PMBARVIS ;
455 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
456 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
457
458 /*
459 * Read cpHold bit in pmbar, if cleared we have got Flashrom on board.
460 * If not, we need to upload the microcode to the card
461 */
462
463 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
464
465#if XL_DEBUG
466 printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
467#endif
468
469 if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
470
471 /* Set PmBar, privateMemoryBase bits (8:2) to 0 */
472
473 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
474 result_16 = readw(xl_mmio + MMIO_MACDATA) ;
475 result_16 = result_16 & ~((0x7F) << 2) ;
476 writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
477 writew(result_16,xl_mmio + MMIO_MACDATA) ;
478
479 /* Set CPAttention, memWrEn bit */
480
481 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
482 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
483 result_8 = result_8 | CPA_MEMWREN ;
484 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
485 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
486
487 /*
488 * Now to write the microcode into the shared ram
489 * The microcode must finish at position 0xFFFF,
490 * so we must subtract to get the start position for the code
491 *
492 * Looks strange but ensures compiler only uses
493 * 16 bit unsigned int
494 */
495 start = (0xFFFF - (xl_priv->fw->size) + 1) ;
496
497 printk(KERN_INFO "3C359: Uploading Microcode: ");
498
499 for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) {
500 writel(MEM_BYTE_WRITE | 0XD0000 | i,
501 xl_mmio + MMIO_MAC_ACCESS_CMD);
502 writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA);
503 if (j % 1024 == 0)
504 printk(".");
505 }
506 printk("\n") ;
507
508 for (i = 0; i < 16; i++) {
509 writel((MEM_BYTE_WRITE | 0xDFFF0) + i,
510 xl_mmio + MMIO_MAC_ACCESS_CMD);
511 writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i],
512 xl_mmio + MMIO_MACDATA);
513 }
514
515 /*
516 * Have to write the start address of the upload to FFF4, but
517 * the address must be >> 4. You do not want to know how long
518 * it took me to discover this.
519 */
520
521 writel(MEM_WORD_WRITE | 0xDFFF4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
522 writew(start >> 4, xl_mmio + MMIO_MACDATA);
523
524 /* Clear the CPAttention, memWrEn Bit */
525
526 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
527 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
528 result_8 = result_8 & ~CPA_MEMWREN ;
529 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
530 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
531
532 /* Clear the cpHold bit in pmbar */
533
534 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
535 result_16 = readw(xl_mmio + MMIO_MACDATA) ;
536 result_16 = result_16 & ~PMB_CPHOLD ;
537 writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
538 writew(result_16,xl_mmio + MMIO_MACDATA) ;
539
540
541 } /* If microcode upload required */
542
543 /*
544 * The card should now go though a self test procedure and get itself ready
545 * to be opened, we must wait for an srb response with the initialization
546 * information.
547 */
548
549#if XL_DEBUG
550 printk(KERN_INFO "%s: Microcode uploaded, must wait for the self test to complete\n", dev->name);
551#endif
552
553 writew(SETINDENABLE | 0xFFF, xl_mmio + MMIO_COMMAND) ;
554
555 t=jiffies;
556 while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) {
557 schedule();
558 if (time_after(jiffies, t + 15 * HZ)) {
559 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
560 return -ENODEV;
561 }
562 }
563
564 /*
565 * Write the RxBufArea with D000, RxEarlyThresh, TxStartThresh,
566 * DnPriReqThresh, read the tech docs if you want to know what
567 * values they need to be.
568 */
569
570 writel(MMIO_WORD_WRITE | RXBUFAREA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
571 writew(0xD000, xl_mmio + MMIO_MACDATA) ;
572
573 writel(MMIO_WORD_WRITE | RXEARLYTHRESH, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
574 writew(0X0020, xl_mmio + MMIO_MACDATA) ;
575
576 writew( SETTXSTARTTHRESH | 0x40 , xl_mmio + MMIO_COMMAND) ;
577
578 writeb(0x04, xl_mmio + MMIO_DNBURSTTHRESH) ;
579 writeb(0x04, xl_mmio + DNPRIREQTHRESH) ;
580
581 /*
582 * Read WRBR to provide the location of the srb block, have to use byte reads not word reads.
583 * Tech docs have this wrong !!!!
584 */
585
586 writel(MMIO_BYTE_READ | WRBR, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
587 xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ;
588 writel( (MMIO_BYTE_READ | WRBR) + 1, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
589 xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ;
590
591#if XL_DEBUG
592 writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
593 if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
594 printk(KERN_INFO "Default ring speed 4 mbps\n");
595 } else {
596 printk(KERN_INFO "Default ring speed 16 mbps\n");
597 }
598 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
599#endif
600
601 return 0;
602}
603
604static int xl_open(struct net_device *dev)
605{
606 struct xl_private *xl_priv=netdev_priv(dev);
607 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
608 u8 i ;
609 __le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */
610 int open_err ;
611
612 u16 switchsettings, switchsettings_eeprom ;
613
614 if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
615 return -EAGAIN;
616
617 /*
618 * Read the information from the EEPROM that we need.
619 */
620
621 hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10));
622 hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11));
623 hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12));
624
625 /* Ring speed */
626
627 switchsettings_eeprom = xl_ee_read(dev,0x08) ;
628 switchsettings = switchsettings_eeprom ;
629
630 if (xl_priv->xl_ring_speed != 0) {
631 if (xl_priv->xl_ring_speed == 4)
632 switchsettings = switchsettings | 0x02 ;
633 else
634 switchsettings = switchsettings & ~0x02 ;
635 }
636
637 /* Only write EEProm if there has been a change */
638 if (switchsettings != switchsettings_eeprom) {
639 xl_ee_write(dev,0x08,switchsettings) ;
640 /* Hardware reset after changing EEProm */
641 xl_hw_reset(dev) ;
642 }
643
644 memcpy(dev->dev_addr,hwaddr,dev->addr_len) ;
645
646 open_err = xl_open_hw(dev) ;
647
648 /*
649 * This really needs to be cleaned up with better error reporting.
650 */
651
652 if (open_err != 0) { /* Something went wrong with the open command */
653 if (open_err & 0x07) { /* Wrong speed, retry at different speed */
654 printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
655 switchsettings = switchsettings ^ 2 ;
656 xl_ee_write(dev,0x08,switchsettings) ;
657 xl_hw_reset(dev) ;
658 open_err = xl_open_hw(dev) ;
659 if (open_err != 0) {
660 printk(KERN_WARNING "%s: Open error returned a second time, we're bombing out now\n", dev->name);
661 free_irq(dev->irq,dev) ;
662 return -ENODEV ;
663 }
664 } else {
665 printk(KERN_WARNING "%s: Open Error = %04x\n", dev->name, open_err) ;
666 free_irq(dev->irq,dev) ;
667 return -ENODEV ;
668 }
669 }
670
671 /*
672 * Now to set up the Rx and Tx buffer structures
673 */
674 /* These MUST be on 8 byte boundaries */
675 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
676 if (xl_priv->xl_tx_ring == NULL) {
677 printk(KERN_WARNING "%s: Not enough memory to allocate tx buffers.\n",
678 dev->name);
679 free_irq(dev->irq,dev);
680 return -ENOMEM;
681 }
682 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
683 if (xl_priv->xl_rx_ring == NULL) {
684 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n",
685 dev->name);
686 free_irq(dev->irq,dev);
687 kfree(xl_priv->xl_tx_ring);
688 return -ENOMEM;
689 }
690
691 /* Setup Rx Ring */
692 for (i=0 ; i < XL_RX_RING_SIZE ; i++) {
693 struct sk_buff *skb ;
694
695 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
696 if (skb==NULL)
697 break ;
698
699 skb->dev = dev ;
700 xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
701 xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
702 xl_priv->rx_ring_skb[i] = skb ;
703 }
704
705 if (i==0) {
706 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
707 free_irq(dev->irq,dev) ;
708 kfree(xl_priv->xl_tx_ring);
709 kfree(xl_priv->xl_rx_ring);
710 return -EIO ;
711 }
712
713 xl_priv->rx_ring_no = i ;
714 xl_priv->rx_ring_tail = 0 ;
715 xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ;
716 for (i=0;i<(xl_priv->rx_ring_no-1);i++) {
717 xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)));
718 }
719 xl_priv->xl_rx_ring[i].upnextptr = 0 ;
720
721 writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ;
722
723 /* Setup Tx Ring */
724
725 xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
726
727 xl_priv->tx_ring_head = 1 ;
728 xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */
729 xl_priv->free_ring_entries = XL_TX_RING_SIZE ;
730
731 /*
732 * Setup the first dummy DPD entry for polling to start working.
733 */
734
735 xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY;
736 xl_priv->xl_tx_ring[0].buffer = 0 ;
737 xl_priv->xl_tx_ring[0].buffer_length = 0 ;
738 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
739
740 writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ;
741 writel(DNUNSTALL, xl_mmio + MMIO_COMMAND) ;
742 writel(UPUNSTALL, xl_mmio + MMIO_COMMAND) ;
743 writel(DNENABLE, xl_mmio + MMIO_COMMAND) ;
744 writeb(0x40, xl_mmio + MMIO_DNPOLL) ;
745
746 /*
747 * Enable interrupts on the card
748 */
749
750 writel(SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
751 writel(SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
752
753 netif_start_queue(dev) ;
754 return 0;
755
756}
757
758static int xl_open_hw(struct net_device *dev)
759{
760 struct xl_private *xl_priv=netdev_priv(dev);
761 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
762 u16 vsoff ;
763 char ver_str[33];
764 int open_err ;
765 int i ;
766 unsigned long t ;
767
768 /*
769 * Okay, let's build up the Open.NIC srb command
770 *
771 */
772
773 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
774 writeb(OPEN_NIC, xl_mmio + MMIO_MACDATA) ;
775
776 /*
777 * Use this as a test byte, if it comes back with the same value, the command didn't work
778 */
779
780 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
781 writeb(0xff,xl_mmio + MMIO_MACDATA) ;
782
783 /* Open options */
784 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
785 writeb(0x00, xl_mmio + MMIO_MACDATA) ;
786 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
787 writeb(0x00, xl_mmio + MMIO_MACDATA) ;
788
789 /*
790 * Node address, be careful here, the docs say you can just put zeros here and it will use
791 * the hardware address, it doesn't, you must include the node address in the open command.
792 */
793
794 if (xl_priv->xl_laa[0]) { /* If using a LAA address */
795 for (i=10;i<16;i++) {
796 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
797 writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ;
798 }
799 memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ;
800 } else { /* Regular hardware address */
801 for (i=10;i<16;i++) {
802 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
803 writeb(dev->dev_addr[i-10], xl_mmio + MMIO_MACDATA) ;
804 }
805 }
806
807 /* Default everything else to 0 */
808 for (i = 16; i < 34; i++) {
809 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
810 writeb(0x00,xl_mmio + MMIO_MACDATA) ;
811 }
812
813 /*
814 * Set the csrb bit in the MISR register
815 */
816
817 xl_wait_misr_flags(dev) ;
818 writel(MEM_BYTE_WRITE | MF_CSRB, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
819 writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
820 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
821 writeb(MISR_CSRB , xl_mmio + MMIO_MACDATA) ;
822
823 /*
824 * Now wait for the command to run
825 */
826
827 t=jiffies;
828 while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
829 schedule();
830 if (time_after(jiffies, t + 40 * HZ)) {
831 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
832 break ;
833 }
834 }
835
836 /*
837 * Let's interpret the open response
838 */
839
840 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
841 if (readb(xl_mmio + MMIO_MACDATA)!=0) {
842 open_err = readb(xl_mmio + MMIO_MACDATA) << 8 ;
843 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
844 open_err |= readb(xl_mmio + MMIO_MACDATA) ;
845 return open_err ;
846 } else {
847 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
848 xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
849 printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ;
850 printk("ASB: %04x",xl_priv->asb ) ;
851 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
852 printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ;
853
854 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
855 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
856 printk(", ARB: %04x\n",xl_priv->arb );
857 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
858 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
859
860 /*
861 * Interesting, sending the individual characters directly to printk was causing klogd to use
862 * use 100% of processor time, so we build up the string and print that instead.
863 */
864
865 for (i=0;i<0x20;i++) {
866 writel( (MEM_BYTE_READ | 0xD0000 | vsoff) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
867 ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
868 }
869 ver_str[i] = '\0' ;
870 printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
871 }
872
873 /*
874 * Issue the AckInterrupt
875 */
876 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
877
878 return 0 ;
879}
880
881/*
882 * There are two ways of implementing rx on the 359 NIC, either
883 * interrupt driven or polling. We are going to uses interrupts,
884 * it is the easier way of doing things.
885 *
886 * The Rx works with a ring of Rx descriptors. At initialise time the ring
887 * entries point to the next entry except for the last entry in the ring
888 * which points to 0. The card is programmed with the location of the first
889 * available descriptor and keeps reading the next_ptr until next_ptr is set
890 * to 0. Hopefully with a ring size of 16 the card will never get to read a next_ptr
891 * of 0. As the Rx interrupt is received we copy the frame up to the protocol layers
892 * and then point the end of the ring to our current position and point our current
893 * position to 0, therefore making the current position the last position on the ring.
894 * The last position on the ring therefore loops continually loops around the rx ring.
895 *
896 * rx_ring_tail is the position on the ring to process next. (Think of a snake, the head
897 * expands as the card adds new packets and we go around eating the tail processing the
898 * packets.)
899 *
900 * Undoubtably it could be streamlined and improved upon, but at the moment it works
901 * and the fast path through the routine is fine.
902 *
903 * adv_rx_ring could be inlined to increase performance, but its called a *lot* of times
904 * in xl_rx so would increase the size of the function significantly.
905 */
906
907static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */
908{
909 struct xl_private *xl_priv=netdev_priv(dev);
910 int n = xl_priv->rx_ring_tail;
911 int prev_ring_loc;
912
913 prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
914 xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n));
915 xl_priv->xl_rx_ring[n].framestatus = 0;
916 xl_priv->xl_rx_ring[n].upnextptr = 0;
917 xl_priv->rx_ring_tail++;
918 xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1);
919}
920
921static void xl_rx(struct net_device *dev)
922{
923 struct xl_private *xl_priv=netdev_priv(dev);
924 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
925 struct sk_buff *skb, *skb2 ;
926 int frame_length = 0, copy_len = 0 ;
927 int temp_ring_loc ;
928
929 /*
930 * Receive the next frame, loop around the ring until all frames
931 * have been received.
932 */
933
934 while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */
935
936 if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */
937
938 /*
939 * This is a pain, you need to go through all the descriptors until the last one
940 * for this frame to find the framelength
941 */
942
943 temp_ring_loc = xl_priv->rx_ring_tail ;
944
945 while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) {
946 temp_ring_loc++ ;
947 temp_ring_loc &= (XL_RX_RING_SIZE-1) ;
948 }
949
950 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF;
951
952 skb = dev_alloc_skb(frame_length) ;
953
954 if (skb==NULL) { /* No memory for frame, still need to roll forward the rx ring */
955 printk(KERN_WARNING "%s: dev_alloc_skb failed - multi buffer !\n", dev->name) ;
956 while (xl_priv->rx_ring_tail != temp_ring_loc)
957 adv_rx_ring(dev) ;
958
959 adv_rx_ring(dev) ; /* One more time just for luck :) */
960 dev->stats.rx_dropped++ ;
961
962 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
963 return ;
964 }
965
966 while (xl_priv->rx_ring_tail != temp_ring_loc) {
967 copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF;
968 frame_length -= copy_len ;
969 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
970 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
971 skb_put(skb, copy_len),
972 copy_len);
973 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
974 adv_rx_ring(dev) ;
975 }
976
977 /* Now we have found the last fragment */
978 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
979 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
980 skb_put(skb,copy_len), frame_length);
981/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
982 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
983 adv_rx_ring(dev) ;
984 skb->protocol = tr_type_trans(skb,dev) ;
985 netif_rx(skb) ;
986
987 } else { /* Single Descriptor Used, simply swap buffers over, fast path */
988
989 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF;
990
991 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
992
993 if (skb==NULL) { /* Still need to fix the rx ring */
994 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
995 adv_rx_ring(dev) ;
996 dev->stats.rx_dropped++ ;
997 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
998 return ;
999 }
1000
1001 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
1002 pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
1003 skb_put(skb2, frame_length) ;
1004 skb2->protocol = tr_type_trans(skb2,dev) ;
1005
1006 xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ;
1007 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
1008 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
1009 adv_rx_ring(dev) ;
1010 dev->stats.rx_packets++ ;
1011 dev->stats.rx_bytes += frame_length ;
1012
1013 netif_rx(skb2) ;
1014 } /* if multiple buffers */
1015 } /* while packet to do */
1016
1017 /* Clear the updComplete interrupt */
1018 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1019 return ;
1020}
1021
1022/*
1023 * This is ruthless, it doesn't care what state the card is in it will
1024 * completely reset the adapter.
1025 */
1026
1027static void xl_reset(struct net_device *dev)
1028{
1029 struct xl_private *xl_priv=netdev_priv(dev);
1030 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1031 unsigned long t;
1032
1033 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
1034
1035 /*
1036 * Must wait for cmdInProgress bit (12) to clear before continuing with
1037 * card configuration.
1038 */
1039
1040 t=jiffies;
1041 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1042 if (time_after(jiffies, t + 40 * HZ)) {
1043 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
1044 break ;
1045 }
1046 }
1047
1048}
1049
1050static void xl_freemem(struct net_device *dev)
1051{
1052 struct xl_private *xl_priv=netdev_priv(dev);
1053 int i ;
1054
1055 for (i=0;i<XL_RX_RING_SIZE;i++) {
1056 dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ;
1057 pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1058 xl_priv->rx_ring_tail++ ;
1059 xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1;
1060 }
1061
1062 /* unmap ring */
1063 pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ;
1064
1065 pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ;
1066
1067 kfree(xl_priv->xl_rx_ring) ;
1068 kfree(xl_priv->xl_tx_ring) ;
1069
1070 return ;
1071}
1072
1073static irqreturn_t xl_interrupt(int irq, void *dev_id)
1074{
1075 struct net_device *dev = (struct net_device *)dev_id;
1076 struct xl_private *xl_priv =netdev_priv(dev);
1077 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1078 u16 intstatus, macstatus ;
1079
1080 intstatus = readw(xl_mmio + MMIO_INTSTATUS) ;
1081
1082 if (!(intstatus & 1)) /* We didn't generate the interrupt */
1083 return IRQ_NONE;
1084
1085 spin_lock(&xl_priv->xl_lock) ;
1086
1087 /*
1088 * Process the interrupt
1089 */
1090 /*
1091 * Something fishy going on here, we shouldn't get 0001 ints, not fatal though.
1092 */
1093 if (intstatus == 0x0001) {
1094 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1095 printk(KERN_INFO "%s: 00001 int received\n",dev->name);
1096 } else {
1097 if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
1098
1099 /*
1100 * Host Error.
1101 * It may be possible to recover from this, but usually it means something
1102 * is seriously fubar, so we just close the adapter.
1103 */
1104
1105 if (intstatus & HOSTERRINT) {
1106 printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
1107 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
1108 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1109 netif_stop_queue(dev) ;
1110 xl_freemem(dev) ;
1111 free_irq(dev->irq,dev);
1112 xl_reset(dev) ;
1113 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1114 spin_unlock(&xl_priv->xl_lock) ;
1115 return IRQ_HANDLED;
1116 } /* Host Error */
1117
1118 if (intstatus & SRBRINT ) { /* Srbc interrupt */
1119 writel(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1120 if (xl_priv->srb_queued)
1121 xl_srb_bh(dev) ;
1122 } /* SRBR Interrupt */
1123
1124 if (intstatus & TXUNDERRUN) { /* Issue DnReset command */
1125 writel(DNRESET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1126 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { /* Wait for command to run */
1127 /* !!! FIX-ME !!!!
1128 Must put a timeout check here ! */
1129 /* Empty Loop */
1130 }
1131 printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
1132 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1133 } /* TxUnderRun */
1134
1135 if (intstatus & ARBCINT ) { /* Arbc interrupt */
1136 xl_arb_cmd(dev) ;
1137 } /* Arbc */
1138
1139 if (intstatus & ASBFINT) {
1140 if (xl_priv->asb_queued == 1) {
1141 xl_asb_cmd(dev) ;
1142 } else if (xl_priv->asb_queued == 2) {
1143 xl_asb_bh(dev) ;
1144 } else {
1145 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1146 }
1147 } /* Asbf */
1148
1149 if (intstatus & UPCOMPINT ) /* UpComplete */
1150 xl_rx(dev) ;
1151
1152 if (intstatus & DNCOMPINT ) /* DnComplete */
1153 xl_dn_comp(dev) ;
1154
1155 if (intstatus & HARDERRINT ) { /* Hardware error */
1156 writel(MMIO_WORD_READ | MACSTATUS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1157 macstatus = readw(xl_mmio + MMIO_MACDATA) ;
1158 printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
1159 if (macstatus & (1<<14))
1160 printk(KERN_WARNING "tchk error: Unrecoverable error\n");
1161 if (macstatus & (1<<3))
1162 printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
1163 if (macstatus & (1<<2))
1164 printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
1165 printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
1166 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1167 netif_stop_queue(dev) ;
1168 xl_freemem(dev) ;
1169 free_irq(dev->irq,dev);
1170 unregister_netdev(dev) ;
1171 free_netdev(dev) ;
1172 xl_reset(dev) ;
1173 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1174 spin_unlock(&xl_priv->xl_lock) ;
1175 return IRQ_HANDLED;
1176 }
1177 } else {
1178 printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
1179 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1180 }
1181 }
1182
1183 /* Turn interrupts back on */
1184
1185 writel( SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
1186 writel( SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
1187
1188 spin_unlock(&xl_priv->xl_lock) ;
1189 return IRQ_HANDLED;
1190}
1191
1192/*
1193 * Tx - Polling configuration
1194 */
1195
1196static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
1197{
1198 struct xl_private *xl_priv=netdev_priv(dev);
1199 struct xl_tx_desc *txd ;
1200 int tx_head, tx_tail, tx_prev ;
1201 unsigned long flags ;
1202
1203 spin_lock_irqsave(&xl_priv->xl_lock,flags) ;
1204
1205 netif_stop_queue(dev) ;
1206
1207 if (xl_priv->free_ring_entries > 1 ) {
1208 /*
1209 * Set up the descriptor for the packet
1210 */
1211 tx_head = xl_priv->tx_ring_head ;
1212 tx_tail = xl_priv->tx_ring_tail ;
1213
1214 txd = &(xl_priv->xl_tx_ring[tx_head]) ;
1215 txd->dnnextptr = 0 ;
1216 txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE;
1217 txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
1218 txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST;
1219 xl_priv->tx_ring_skb[tx_head] = skb ;
1220 dev->stats.tx_packets++ ;
1221 dev->stats.tx_bytes += skb->len ;
1222
1223 /*
1224 * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1
1225 * to ensure no negative numbers in unsigned locations.
1226 */
1227
1228 tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ;
1229
1230 xl_priv->tx_ring_head++ ;
1231 xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
1232 xl_priv->free_ring_entries-- ;
1233
1234 xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head));
1235
1236 /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */
1237 /* readl(xl_mmio + MMIO_DNLISTPTR) ; */
1238
1239 netif_wake_queue(dev) ;
1240
1241 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
1242
1243 return NETDEV_TX_OK;
1244 } else {
1245 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
1246 return NETDEV_TX_BUSY;
1247 }
1248
1249}
1250
1251/*
1252 * The NIC has told us that a packet has been downloaded onto the card, we must
1253 * find out which packet it has done, clear the skb and information for the packet
1254 * then advance around the ring for all transmitted packets
1255 */
1256
1257static void xl_dn_comp(struct net_device *dev)
1258{
1259 struct xl_private *xl_priv=netdev_priv(dev);
1260 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1261 struct xl_tx_desc *txd ;
1262
1263
1264 if (xl_priv->tx_ring_tail == 255) {/* First time */
1265 xl_priv->xl_tx_ring[0].framestartheader = 0 ;
1266 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
1267 xl_priv->tx_ring_tail = 1 ;
1268 }
1269
1270 while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) {
1271 txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
1272 pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE);
1273 txd->framestartheader = 0 ;
1274 txd->buffer = cpu_to_le32(0xdeadbeef);
1275 txd->buffer_length = 0 ;
1276 dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
1277 xl_priv->tx_ring_tail++ ;
1278 xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ;
1279 xl_priv->free_ring_entries++ ;
1280 }
1281
1282 netif_wake_queue(dev) ;
1283
1284 writel(ACK_INTERRUPT | DNCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1285}
1286
1287/*
1288 * Close the adapter properly.
1289 * This srb reply cannot be handled from interrupt context as we have
1290 * to free the interrupt from the driver.
1291 */
1292
1293static int xl_close(struct net_device *dev)
1294{
1295 struct xl_private *xl_priv = netdev_priv(dev);
1296 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1297 unsigned long t ;
1298
1299 netif_stop_queue(dev) ;
1300
1301 /*
1302 * Close the adapter, need to stall the rx and tx queues.
1303 */
1304
1305 writew(DNSTALL, xl_mmio + MMIO_COMMAND) ;
1306 t=jiffies;
1307 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1308 schedule();
1309 if (time_after(jiffies, t + 10 * HZ)) {
1310 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name);
1311 break ;
1312 }
1313 }
1314 writew(DNDISABLE, xl_mmio + MMIO_COMMAND) ;
1315 t=jiffies;
1316 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1317 schedule();
1318 if (time_after(jiffies, t + 10 * HZ)) {
1319 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name);
1320 break ;
1321 }
1322 }
1323 writew(UPSTALL, xl_mmio + MMIO_COMMAND) ;
1324 t=jiffies;
1325 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1326 schedule();
1327 if (time_after(jiffies, t + 10 * HZ)) {
1328 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name);
1329 break ;
1330 }
1331 }
1332
1333 /* Turn off interrupts, we will still get the indication though
1334 * so we can trap it
1335 */
1336
1337 writel(SETINTENABLE, xl_mmio + MMIO_COMMAND) ;
1338
1339 xl_srb_cmd(dev,CLOSE_NIC) ;
1340
1341 t=jiffies;
1342 while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
1343 schedule();
1344 if (time_after(jiffies, t + 10 * HZ)) {
1345 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name);
1346 break ;
1347 }
1348 }
1349 /* Read the srb response from the adapter */
1350
1351 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
1352 if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
1353 printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
1354 } else {
1355 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1356 if (readb(xl_mmio + MMIO_MACDATA)==0) {
1357 printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
1358 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1359
1360 xl_freemem(dev) ;
1361 free_irq(dev->irq,dev) ;
1362 } else {
1363 printk(KERN_INFO "%s: Close nic command returned error code %02x\n",dev->name, readb(xl_mmio + MMIO_MACDATA)) ;
1364 }
1365 }
1366
1367 /* Reset the upload and download logic */
1368
1369 writew(UPRESET, xl_mmio + MMIO_COMMAND) ;
1370 t=jiffies;
1371 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1372 schedule();
1373 if (time_after(jiffies, t + 10 * HZ)) {
1374 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name);
1375 break ;
1376 }
1377 }
1378 writew(DNRESET, xl_mmio + MMIO_COMMAND) ;
1379 t=jiffies;
1380 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1381 schedule();
1382 if (time_after(jiffies, t + 10 * HZ)) {
1383 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name);
1384 break ;
1385 }
1386 }
1387 xl_hw_reset(dev) ;
1388 return 0 ;
1389}
1390
1391static void xl_set_rx_mode(struct net_device *dev)
1392{
1393 struct xl_private *xl_priv = netdev_priv(dev);
1394 struct netdev_hw_addr *ha;
1395 unsigned char dev_mc_address[4] ;
1396 u16 options ;
1397
1398 if (dev->flags & IFF_PROMISC)
1399 options = 0x0004 ;
1400 else
1401 options = 0x0000 ;
1402
1403 if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */
1404 xl_priv->xl_copy_all_options = options ;
1405 xl_srb_cmd(dev, SET_RECEIVE_MODE) ;
1406 return ;
1407 }
1408
1409 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1410
1411 netdev_for_each_mc_addr(ha, dev) {
1412 dev_mc_address[0] |= ha->addr[2];
1413 dev_mc_address[1] |= ha->addr[3];
1414 dev_mc_address[2] |= ha->addr[4];
1415 dev_mc_address[3] |= ha->addr[5];
1416 }
1417
1418 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
1419 memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ;
1420 xl_srb_cmd(dev, SET_FUNC_ADDRESS) ;
1421 }
1422 return ;
1423}
1424
1425
1426/*
1427 * We issued an srb command and now we must read
1428 * the response from the completed command.
1429 */
1430
1431static void xl_srb_bh(struct net_device *dev)
1432{
1433 struct xl_private *xl_priv = netdev_priv(dev);
1434 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1435 u8 srb_cmd, ret_code ;
1436 int i ;
1437
1438 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1439 srb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
1440 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1441 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1442
1443 /* Ret_code is standard across all commands */
1444
1445 switch (ret_code) {
1446 case 1:
1447 printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
1448 break ;
1449 case 4:
1450 printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
1451 break ;
1452
1453 case 6:
1454 printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
1455 break ;
1456
1457 case 0: /* Successful command execution */
1458 switch (srb_cmd) {
1459 case READ_LOG: /* Returns 14 bytes of data from the NIC */
1460 if(xl_priv->xl_message_level)
1461 printk(KERN_INFO "%s: READ.LOG 14 bytes of data ",dev->name) ;
1462 /*
1463 * We still have to read the log even if message_level = 0 and we don't want
1464 * to see it
1465 */
1466 for (i=0;i<14;i++) {
1467 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1468 if(xl_priv->xl_message_level)
1469 printk("%02x:",readb(xl_mmio + MMIO_MACDATA)) ;
1470 }
1471 printk("\n") ;
1472 break ;
1473 case SET_FUNC_ADDRESS:
1474 if(xl_priv->xl_message_level)
1475 printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
1476 break ;
1477 case CLOSE_NIC:
1478 if(xl_priv->xl_message_level)
1479 printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
1480 break ;
1481 case SET_MULTICAST_MODE:
1482 if(xl_priv->xl_message_level)
1483 printk(KERN_INFO "%s: Multicast options successfully changed\n",dev->name) ;
1484 break ;
1485 case SET_RECEIVE_MODE:
1486 if(xl_priv->xl_message_level) {
1487 if (xl_priv->xl_copy_all_options == 0x0004)
1488 printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
1489 else
1490 printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
1491 }
1492 break ;
1493
1494 } /* switch */
1495 break ;
1496 } /* switch */
1497 return ;
1498}
1499
1500static int xl_set_mac_address (struct net_device *dev, void *addr)
1501{
1502 struct sockaddr *saddr = addr ;
1503 struct xl_private *xl_priv = netdev_priv(dev);
1504
1505 if (netif_running(dev)) {
1506 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1507 return -EIO ;
1508 }
1509
1510 memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ;
1511
1512 if (xl_priv->xl_message_level) {
1513 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0],
1514 xl_priv->xl_laa[1], xl_priv->xl_laa[2],
1515 xl_priv->xl_laa[3], xl_priv->xl_laa[4],
1516 xl_priv->xl_laa[5]);
1517 }
1518
1519 return 0 ;
1520}
1521
1522static void xl_arb_cmd(struct net_device *dev)
1523{
1524 struct xl_private *xl_priv = netdev_priv(dev);
1525 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1526 u8 arb_cmd ;
1527 u16 lan_status, lan_status_diff ;
1528
1529 writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1530 arb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
1531
1532 if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */
1533 writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1534
1535 printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ;
1536
1537 lan_status = swab16(readw(xl_mmio + MMIO_MACDATA));
1538
1539 /* Acknowledge interrupt, this tells nic we are done with the arb */
1540 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1541
1542 lan_status_diff = xl_priv->xl_lan_status ^ lan_status ;
1543
1544 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1545 if (lan_status_diff & LSC_LWF)
1546 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1547 if (lan_status_diff & LSC_ARW)
1548 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1549 if (lan_status_diff & LSC_FPE)
1550 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1551 if (lan_status_diff & LSC_RR)
1552 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1553
1554 /* Adapter has been closed by the hardware */
1555
1556 netif_stop_queue(dev);
1557 xl_freemem(dev) ;
1558 free_irq(dev->irq,dev);
1559
1560 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1561 } /* If serious error */
1562
1563 if (xl_priv->xl_message_level) {
1564 if (lan_status_diff & LSC_SIG_LOSS)
1565 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1566 if (lan_status_diff & LSC_HARD_ERR)
1567 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1568 if (lan_status_diff & LSC_SOFT_ERR)
1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1570 if (lan_status_diff & LSC_TRAN_BCN)
1571 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1572 if (lan_status_diff & LSC_SS)
1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1574 if (lan_status_diff & LSC_RING_REC)
1575 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1576 if (lan_status_diff & LSC_FDX_MODE)
1577 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1578 }
1579
1580 if (lan_status_diff & LSC_CO) {
1581 if (xl_priv->xl_message_level)
1582 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1583 /* Issue READ.LOG command */
1584 xl_srb_cmd(dev, READ_LOG) ;
1585 }
1586
1587 /* There is no command in the tech docs to issue the read_sr_counters */
1588 if (lan_status_diff & LSC_SR_CO) {
1589 if (xl_priv->xl_message_level)
1590 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1591 }
1592
1593 xl_priv->xl_lan_status = lan_status ;
1594
1595 } /* Lan.change.status */
1596 else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
1597#if XL_DEBUG
1598 printk(KERN_INFO "Received.Data\n");
1599#endif
1600 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1601 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
1602
1603 /* Now we are going to be really basic here and not do anything
1604 * with the data at all. The tech docs do not give me enough
1605 * information to calculate the buffers properly so we're
1606 * just going to tell the nic that we've dealt with the frame
1607 * anyway.
1608 */
1609
1610 /* Acknowledge interrupt, this tells nic we are done with the arb */
1611 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1612
1613 /* Is the ASB free ? */
1614
1615 xl_priv->asb_queued = 0 ;
1616 writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1617 if (readb(xl_mmio + MMIO_MACDATA) != 0xff) {
1618 xl_priv->asb_queued = 1 ;
1619
1620 xl_wait_misr_flags(dev) ;
1621
1622 writel(MEM_BYTE_WRITE | MF_ASBFR, xl_mmio + MMIO_MAC_ACCESS_CMD);
1623 writeb(0xff, xl_mmio + MMIO_MACDATA) ;
1624 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1625 writeb(MISR_ASBFR, xl_mmio + MMIO_MACDATA) ;
1626 return ;
1627 /* Drop out and wait for the bottom half to be run */
1628 }
1629
1630 xl_asb_cmd(dev) ;
1631
1632 } else {
1633 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
1634 }
1635
1636 /* Acknowledge the arb interrupt */
1637
1638 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1639
1640 return ;
1641}
1642
1643
1644/*
1645 * There is only one asb command, but we can get called from different
1646 * places.
1647 */
1648
1649static void xl_asb_cmd(struct net_device *dev)
1650{
1651 struct xl_private *xl_priv = netdev_priv(dev);
1652 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1653
1654 if (xl_priv->asb_queued == 1)
1655 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1656
1657 writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1658 writeb(0x81, xl_mmio + MMIO_MACDATA) ;
1659
1660 writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1661 writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
1662
1663 xl_wait_misr_flags(dev) ;
1664
1665 writel(MEM_BYTE_WRITE | MF_RASB, xl_mmio + MMIO_MAC_ACCESS_CMD);
1666 writeb(0xff, xl_mmio + MMIO_MACDATA) ;
1667
1668 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1669 writeb(MISR_RASB, xl_mmio + MMIO_MACDATA) ;
1670
1671 xl_priv->asb_queued = 2 ;
1672
1673 return ;
1674}
1675
1676/*
1677 * This will only get called if there was an error
1678 * from the asb cmd.
1679 */
1680static void xl_asb_bh(struct net_device *dev)
1681{
1682 struct xl_private *xl_priv = netdev_priv(dev);
1683 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1684 u8 ret_code ;
1685
1686 writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1687 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1688 switch (ret_code) {
1689 case 0x01:
1690 printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
1691 break ;
1692 case 0x26:
1693 printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
1694 break ;
1695 case 0x40:
1696 printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
1697 break ;
1698 }
1699 xl_priv->asb_queued = 0 ;
1700 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1701 return ;
1702}
1703
1704/*
1705 * Issue srb commands to the nic
1706 */
1707
1708static void xl_srb_cmd(struct net_device *dev, int srb_cmd)
1709{
1710 struct xl_private *xl_priv = netdev_priv(dev);
1711 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1712
1713 switch (srb_cmd) {
1714 case READ_LOG:
1715 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1716 writeb(READ_LOG, xl_mmio + MMIO_MACDATA) ;
1717 break;
1718
1719 case CLOSE_NIC:
1720 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1721 writeb(CLOSE_NIC, xl_mmio + MMIO_MACDATA) ;
1722 break ;
1723
1724 case SET_RECEIVE_MODE:
1725 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1726 writeb(SET_RECEIVE_MODE, xl_mmio + MMIO_MACDATA) ;
1727 writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1728 writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ;
1729 break ;
1730
1731 case SET_FUNC_ADDRESS:
1732 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1733 writeb(SET_FUNC_ADDRESS, xl_mmio + MMIO_MACDATA) ;
1734 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1735 writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ;
1736 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1737 writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ;
1738 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1739 writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ;
1740 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1741 writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ;
1742 break ;
1743 } /* switch */
1744
1745
1746 xl_wait_misr_flags(dev) ;
1747
1748 /* Write 0xff to the CSRB flag */
1749 writel(MEM_BYTE_WRITE | MF_CSRB , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1750 writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
1751 /* Set csrb bit in MISR register to process command */
1752 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1753 writeb(MISR_CSRB, xl_mmio + MMIO_MACDATA) ;
1754 xl_priv->srb_queued = 1 ;
1755
1756 return ;
1757}
1758
1759/*
1760 * This is nasty, to use the MISR command you have to wait for 6 memory locations
1761 * to be zero. This is the way the driver does on other OS'es so we should be ok with
1762 * the empty loop.
1763 */
1764
1765static void xl_wait_misr_flags(struct net_device *dev)
1766{
1767 struct xl_private *xl_priv = netdev_priv(dev);
1768 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1769
1770 int i ;
1771
1772 writel(MMIO_BYTE_READ | MISR_RW, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1773 if (readb(xl_mmio + MMIO_MACDATA) != 0) { /* Misr not clear */
1774 for (i=0; i<6; i++) {
1775 writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1776 while (readb(xl_mmio + MMIO_MACDATA) != 0) {
1777 ; /* Empty Loop */
1778 }
1779 }
1780 }
1781
1782 writel(MMIO_BYTE_WRITE | MISR_AND, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1783 writeb(0x80, xl_mmio + MMIO_MACDATA) ;
1784
1785 return ;
1786}
1787
1788/*
1789 * Change mtu size, this should work the same as olympic
1790 */
1791
1792static int xl_change_mtu(struct net_device *dev, int mtu)
1793{
1794 struct xl_private *xl_priv = netdev_priv(dev);
1795 u16 max_mtu ;
1796
1797 if (xl_priv->xl_ring_speed == 4)
1798 max_mtu = 4500 ;
1799 else
1800 max_mtu = 18000 ;
1801
1802 if (mtu > max_mtu)
1803 return -EINVAL ;
1804 if (mtu < 100)
1805 return -EINVAL ;
1806
1807 dev->mtu = mtu ;
1808 xl_priv->pkt_buf_sz = mtu + TR_HLEN ;
1809
1810 return 0 ;
1811}
1812
1813static void __devexit xl_remove_one (struct pci_dev *pdev)
1814{
1815 struct net_device *dev = pci_get_drvdata(pdev);
1816 struct xl_private *xl_priv=netdev_priv(dev);
1817
1818 release_firmware(xl_priv->fw);
1819 unregister_netdev(dev);
1820 iounmap(xl_priv->xl_mmio) ;
1821 pci_release_regions(pdev) ;
1822 pci_set_drvdata(pdev,NULL) ;
1823 free_netdev(dev);
1824 return ;
1825}
1826
1827static struct pci_driver xl_3c359_driver = {
1828 .name = "3c359",
1829 .id_table = xl_pci_tbl,
1830 .probe = xl_probe,
1831 .remove = __devexit_p(xl_remove_one),
1832};
1833
1834static int __init xl_pci_init (void)
1835{
1836 return pci_register_driver(&xl_3c359_driver);
1837}
1838
1839
1840static void __exit xl_pci_cleanup (void)
1841{
1842 pci_unregister_driver (&xl_3c359_driver);
1843}
1844
1845module_init(xl_pci_init);
1846module_exit(xl_pci_cleanup);
1847
1848MODULE_LICENSE("GPL") ;
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
new file mode 100644
index 00000000000..bcb1a6b4a4c
--- /dev/null
+++ b/drivers/net/tokenring/3c359.h
@@ -0,0 +1,291 @@
1/*
2 * 3c359.h (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
3 *
4 * Linux driver for 3Com 3C359 Token Link PCI XL cards.
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License Version 2 or (at your option)
8 * any later verion, incorporated herein by reference.
9 */
10
11/* Memory Access Commands */
12#define IO_BYTE_READ 0x28 << 24
13#define IO_BYTE_WRITE 0x18 << 24
14#define IO_WORD_READ 0x20 << 24
15#define IO_WORD_WRITE 0x10 << 24
16#define MMIO_BYTE_READ 0x88 << 24
17#define MMIO_BYTE_WRITE 0x48 << 24
18#define MMIO_WORD_READ 0x80 << 24
19#define MMIO_WORD_WRITE 0x40 << 24
20#define MEM_BYTE_READ 0x8C << 24
21#define MEM_BYTE_WRITE 0x4C << 24
22#define MEM_WORD_READ 0x84 << 24
23#define MEM_WORD_WRITE 0x44 << 24
24
25#define PMBAR 0x1C80
26#define PMB_CPHOLD (1<<10)
27
28#define CPATTENTION 0x180D
29#define CPA_PMBARVIS (1<<7)
30#define CPA_MEMWREN (1<<6)
31
32#define SWITCHSETTINGS 0x1C88
33#define EECONTROL 0x1C8A
34#define EEDATA 0x1C8C
35#define EEREAD 0x0080
36#define EEWRITE 0x0040
37#define EEERASE 0x0060
38#define EE_ENABLE_WRITE 0x0030
39#define EEBUSY (1<<15)
40
41#define WRBR 0xCDE02
42#define WWOR 0xCDE04
43#define WWCR 0xCDE06
44#define MACSTATUS 0xCDE08
45#define MISR_RW 0xCDE0B
46#define MISR_AND 0xCDE2B
47#define MISR_SET 0xCDE4B
48#define RXBUFAREA 0xCDE10
49#define RXEARLYTHRESH 0xCDE12
50#define TXSTARTTHRESH 0x58
51#define DNPRIREQTHRESH 0x2C
52
53#define MISR_CSRB (1<<5)
54#define MISR_RASB (1<<4)
55#define MISR_SRBFR (1<<3)
56#define MISR_ASBFR (1<<2)
57#define MISR_ARBF (1<<1)
58
59/* MISR Flags memory locations */
60#define MF_SSBF 0xDFFE0
61#define MF_ARBF 0xDFFE1
62#define MF_ASBFR 0xDFFE2
63#define MF_SRBFR 0xDFFE3
64#define MF_RASB 0xDFFE4
65#define MF_CSRB 0xDFFE5
66
67#define MMIO_MACDATA 0x10
68#define MMIO_MAC_ACCESS_CMD 0x14
69#define MMIO_TIMER 0x1A
70#define MMIO_DMA_CTRL 0x20
71#define MMIO_DNLISTPTR 0x24
72#define MMIO_HASHFILTER 0x28
73#define MMIO_CONFIG 0x29
74#define MMIO_DNPRIREQTHRESH 0x2C
75#define MMIO_DNPOLL 0x2D
76#define MMIO_UPPKTSTATUS 0x30
77#define MMIO_FREETIMER 0x34
78#define MMIO_COUNTDOWN 0x36
79#define MMIO_UPLISTPTR 0x38
80#define MMIO_UPPOLL 0x3C
81#define MMIO_UPBURSTTHRESH 0x40
82#define MMIO_DNBURSTTHRESH 0x41
83#define MMIO_INTSTATUS_AUTO 0x56
84#define MMIO_TXSTARTTHRESH 0x58
85#define MMIO_INTERRUPTENABLE 0x5A
86#define MMIO_INDICATIONENABLE 0x5C
87#define MMIO_COMMAND 0x5E /* These two are meant to be the same */
88#define MMIO_INTSTATUS 0x5E /* Makes the code more readable this way */
89#define INTSTAT_CMD_IN_PROGRESS (1<<12)
90#define INTSTAT_SRB (1<<14)
91#define INTSTAT_INTLATCH (1<<0)
92
93/* Indication / Interrupt Mask
94 * Annoyingly the bits to be set in the indication and interrupt enable
95 * do not match with the actual bits received in the interrupt, although
96 * they are in the same order.
97 * The mapping for the indication / interrupt are:
98 * Bit Indication / Interrupt
99 * 0 HostError
100 * 1 txcomplete
101 * 2 updneeded
102 * 3 rxcomplete
103 * 4 intrequested
104 * 5 macerror
105 * 6 dncomplete
106 * 7 upcomplete
107 * 8 txunderrun
108 * 9 asbf
109 * 10 srbr
110 * 11 arbc
111 *
112 * The only ones we don't want to receive are txcomplete and rxcomplete
113 * we use dncomplete and upcomplete instead.
114 */
115
116#define INT_MASK 0xFF5
117
118/* Note the subtle difference here, IND and INT */
119
120#define SETINDENABLE (8<<12)
121#define SETINTENABLE (7<<12)
122#define SRBBIT (1<<10)
123#define ASBBIT (1<<9)
124#define ARBBIT (1<<11)
125
126#define SRB 0xDFE90
127#define ASB 0xDFED0
128#define ARB 0xD0000
129#define SCRATCH 0xDFEF0
130
131#define INT_REQUEST 0x6000 /* (6 << 12) */
132#define ACK_INTERRUPT 0x6800 /* (13 <<11) */
133#define GLOBAL_RESET 0x00
134#define DNDISABLE 0x5000
135#define DNENABLE 0x4800
136#define DNSTALL 0x3002
137#define DNRESET 0x5800
138#define DNUNSTALL 0x3003
139#define UPRESET 0x2800
140#define UPSTALL 0x3000
141#define UPUNSTALL 0x3001
142#define SETCONFIG 0x4000
143#define SETTXSTARTTHRESH 0x9800
144
145/* Received Interrupts */
146#define ASBFINT (1<<13)
147#define SRBRINT (1<<14)
148#define ARBCINT (1<<15)
149#define TXUNDERRUN (1<<11)
150
151#define UPCOMPINT (1<<10)
152#define DNCOMPINT (1<<9)
153#define HARDERRINT (1<<7)
154#define RXCOMPLETE (1<<4)
155#define TXCOMPINT (1<<2)
156#define HOSTERRINT (1<<1)
157
158/* Receive descriptor bits */
159#define RXOVERRUN cpu_to_le32(1<<19)
160#define RXFC cpu_to_le32(1<<21)
161#define RXAR cpu_to_le32(1<<22)
162#define RXUPDCOMPLETE cpu_to_le32(1<<23)
163#define RXUPDFULL cpu_to_le32(1<<24)
164#define RXUPLASTFRAG cpu_to_le32(1<<31)
165
166/* Transmit descriptor bits */
167#define TXDNCOMPLETE cpu_to_le32(1<<16)
168#define TXTXINDICATE cpu_to_le32(1<<27)
169#define TXDPDEMPTY cpu_to_le32(1<<29)
170#define TXDNINDICATE cpu_to_le32(1<<31)
171#define TXDNFRAGLAST cpu_to_le32(1<<31)
172
173/* Interrupts to Acknowledge */
174#define LATCH_ACK 1
175#define TXCOMPACK (1<<1)
176#define INTREQACK (1<<2)
177#define DNCOMPACK (1<<3)
178#define UPCOMPACK (1<<4)
179#define ASBFACK (1<<5)
180#define SRBRACK (1<<6)
181#define ARBCACK (1<<7)
182
183#define XL_IO_SPACE 128
184#define SRB_COMMAND_SIZE 50
185
186/* Adapter Commands */
187#define REQUEST_INT 0x00
188#define MODIFY_OPEN_PARMS 0x01
189#define RESTORE_OPEN_PARMS 0x02
190#define OPEN_NIC 0x03
191#define CLOSE_NIC 0x04
192#define SET_SLEEP_MODE 0x05
193#define SET_GROUP_ADDRESS 0x06
194#define SET_FUNC_ADDRESS 0x07
195#define READ_LOG 0x08
196#define SET_MULTICAST_MODE 0x0C
197#define CHANGE_WAKEUP_PATTERN 0x0D
198#define GET_STATISTICS 0x13
199#define SET_RECEIVE_MODE 0x1F
200
201/* ARB Commands */
202#define RECEIVE_DATA 0x81
203#define RING_STATUS_CHANGE 0x84
204
205/* ASB Commands */
206#define ASB_RECEIVE_DATE 0x81
207
208/* Defines for LAN STATUS CHANGE reports */
209#define LSC_SIG_LOSS 0x8000
210#define LSC_HARD_ERR 0x4000
211#define LSC_SOFT_ERR 0x2000
212#define LSC_TRAN_BCN 0x1000
213#define LSC_LWF 0x0800
214#define LSC_ARW 0x0400
215#define LSC_FPE 0x0200
216#define LSC_RR 0x0100
217#define LSC_CO 0x0080
218#define LSC_SS 0x0040
219#define LSC_RING_REC 0x0020
220#define LSC_SR_CO 0x0010
221#define LSC_FDX_MODE 0x0004
222
223#define XL_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
224
225/* 3c359 defaults for buffers */
226
227#define XL_RX_RING_SIZE 16 /* must be a power of 2 */
228#define XL_TX_RING_SIZE 16 /* must be a power of 2 */
229
230#define PKT_BUF_SZ 4096 /* Default packet size */
231
232/* 3c359 data structures */
233
234struct xl_tx_desc {
235 __le32 dnnextptr;
236 __le32 framestartheader;
237 __le32 buffer;
238 __le32 buffer_length;
239};
240
241struct xl_rx_desc {
242 __le32 upnextptr;
243 __le32 framestatus;
244 __le32 upfragaddr;
245 __le32 upfraglen;
246};
247
248struct xl_private {
249
250
251 /* These two structures must be aligned on 8 byte boundaries */
252
253 /* struct xl_rx_desc xl_rx_ring[XL_RX_RING_SIZE]; */
254 /* struct xl_tx_desc xl_tx_ring[XL_TX_RING_SIZE]; */
255 struct xl_rx_desc *xl_rx_ring ;
256 struct xl_tx_desc *xl_tx_ring ;
257 struct sk_buff *tx_ring_skb[XL_TX_RING_SIZE], *rx_ring_skb[XL_RX_RING_SIZE];
258 int tx_ring_head, tx_ring_tail ;
259 int rx_ring_tail, rx_ring_no ;
260 int free_ring_entries ;
261
262 u16 srb;
263 u16 arb;
264 u16 asb;
265
266 u8 __iomem *xl_mmio;
267 const char *xl_card_name;
268 struct pci_dev *pdev ;
269
270 spinlock_t xl_lock ;
271
272 volatile int srb_queued;
273 struct wait_queue *srb_wait;
274 volatile int asb_queued;
275
276 u16 mac_buffer ;
277 u16 xl_lan_status ;
278 u8 xl_ring_speed ;
279 u16 pkt_buf_sz ;
280 u8 xl_message_level;
281 u16 xl_copy_all_options ;
282 unsigned char xl_functional_addr[4] ;
283 u16 xl_addr_table_addr, xl_parms_addr ;
284 u8 xl_laa[6] ;
285 u32 rx_ring_dma_addr ;
286 u32 tx_ring_dma_addr ;
287
288 /* firmware section */
289 const struct firmware *fw;
290};
291
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
new file mode 100644
index 00000000000..c4137b0f808
--- /dev/null
+++ b/drivers/net/tokenring/Kconfig
@@ -0,0 +1,185 @@
1#
2# Token Ring driver configuration
3#
4
5# So far, we only have PCI, ISA, and MCA token ring devices
6menuconfig TR
7 tristate "Token Ring driver support"
8 depends on NETDEVICES && !UML
9 depends on (PCI || ISA || MCA || CCW)
10 select LLC
11 help
12 Token Ring is IBM's way of communication on a local network; the
13 rest of the world uses Ethernet. To participate on a Token Ring
14 network, you need a special Token ring network card. If you are
15 connected to such a Token Ring network and want to use your Token
16 Ring card under Linux, say Y here and to the driver for your
17 particular card below and read the Token-Ring mini-HOWTO, available
18 from <http://www.tldp.org/docs.html#howto>. Most people can
19 say N here.
20
21if TR
22
23config IBMTR
24 tristate "IBM Tropic chipset based adapter support"
25 depends on ISA || MCA
26 ---help---
27 This is support for all IBM Token Ring cards that don't use DMA. If
28 you have such a beast, say Y and read the Token-Ring mini-HOWTO,
29 available from <http://www.tldp.org/docs.html#howto>.
30
31 Warning: this driver will almost definitely fail if more than one
32 active Token Ring card is present.
33
34 To compile this driver as a module, choose M here: the module will be
35 called ibmtr.
36
37config IBMOL
38 tristate "IBM Olympic chipset PCI adapter support"
39 depends on PCI
40 ---help---
41 This is support for all non-Lanstreamer IBM PCI Token Ring Cards.
42 Specifically this is all IBM PCI, PCI Wake On Lan, PCI II, PCI II
43 Wake On Lan, and PCI 100/16/4 adapters.
44
45 If you have such an adapter, say Y and read the Token-Ring
46 mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
47
48 To compile this driver as a module, choose M here: the module will be
49 called olympic.
50
51 Also read <file:Documentation/networking/olympic.txt> or check the
52 Linux Token Ring Project site for the latest information at
53 <http://www.linuxtr.net/>.
54
55config IBMLS
56 tristate "IBM Lanstreamer chipset PCI adapter support"
57 depends on PCI && !64BIT
58 help
59 This is support for IBM Lanstreamer PCI Token Ring Cards.
60
61 If you have such an adapter, say Y and read the Token-Ring
62 mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
63
64 To compile this driver as a module, choose M here: the module will be
65 called lanstreamer.
66
67config 3C359
68 tristate "3Com 3C359 Token Link Velocity XL adapter support"
69 depends on PCI
70 ---help---
71 This is support for the 3Com PCI Velocity XL cards, specifically
72 the 3Com 3C359, please note this is not for the 3C339 cards, you
73 should use the tms380 driver instead.
74
75 If you have such an adapter, say Y and read the Token-Ring
76 mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
77
78 To compile this driver as a module, choose M here: the module will be
79 called 3c359.
80
81 Also read the file <file:Documentation/networking/3c359.txt> or check the
82 Linux Token Ring Project site for the latest information at
83 <http://www.linuxtr.net>
84
85config TMS380TR
86 tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
87 depends on PCI || ISA && ISA_DMA_API || MCA
88 select FW_LOADER
89 ---help---
90 This driver provides generic support for token ring adapters
91 based on the Texas Instruments TMS380 series chipsets. This
92 includes the SysKonnect TR4/16(+) ISA (SK-4190), SysKonnect
93 TR4/16(+) PCI (SK-4590), SysKonnect TR4/16 PCI (SK-4591),
94 Compaq 4/16 PCI, Thomas-Conrad TC4048 4/16 PCI, and several
95 Madge adapters. If you say Y here, you will be asked to select
96 which cards to support below. If you're using modules, each
97 class of card will be supported by a separate module.
98
99 If you have such an adapter and would like to use it, say Y and
100 read the Token-Ring mini-HOWTO, available from
101 <http://www.tldp.org/docs.html#howto>.
102
103 Also read the file <file:Documentation/networking/tms380tr.txt> or
104 check <http://www.auk.cx/tms380tr/>.
105
106 To compile this driver as a module, choose M here: the module will be
107 called tms380tr.
108
109config TMSPCI
110 tristate "Generic TMS380 PCI support"
111 depends on TMS380TR && PCI
112 ---help---
113 This tms380 module supports generic TMS380-based PCI cards.
114
115 These cards are known to work:
116 - Compaq 4/16 TR PCI
117 - SysKonnect TR4/16 PCI (SK-4590/SK-4591)
118 - Thomas-Conrad TC4048 PCI 4/16
119 - 3Com Token Link Velocity
120
121 To compile this driver as a module, choose M here: the module will be
122 called tmspci.
123
124config SKISA
125 tristate "SysKonnect TR4/16 ISA support"
126 depends on TMS380TR && ISA
127 help
128 This tms380 module supports SysKonnect TR4/16 ISA cards.
129
130 These cards are known to work:
131 - SysKonnect TR4/16 ISA (SK-4190)
132
133 To compile this driver as a module, choose M here: the module will be
134 called skisa.
135
136config PROTEON
137 tristate "Proteon ISA support"
138 depends on TMS380TR && ISA
139 help
140 This tms380 module supports Proteon ISA cards.
141
142 These cards are known to work:
143 - Proteon 1392
144 - Proteon 1392 plus
145
146 To compile this driver as a module, choose M here: the module will be
147 called proteon.
148
149config ABYSS
150 tristate "Madge Smart 16/4 PCI Mk2 support"
151 depends on TMS380TR && PCI
152 help
153 This tms380 module supports the Madge Smart 16/4 PCI Mk2
154 cards (51-02).
155
156 To compile this driver as a module, choose M here: the module will be
157 called abyss.
158
159config MADGEMC
160 tristate "Madge Smart 16/4 Ringnode MicroChannel"
161 depends on TMS380TR && MCA
162 help
163 This tms380 module supports the Madge Smart 16/4 MC16 and MC32
164 MicroChannel adapters.
165
166 To compile this driver as a module, choose M here: the module will be
167 called madgemc.
168
169config SMCTR
170 tristate "SMC ISA/MCA adapter support"
171 depends on (ISA || MCA_LEGACY) && (BROKEN || !64BIT)
172 ---help---
173 This is support for the ISA and MCA SMC Token Ring cards,
174 specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A
175 (8115T/A) adapters.
176
177 If you have such an adapter and would like to use it, say Y or M and
178 read the Token-Ring mini-HOWTO, available from
179 <http://www.tldp.org/docs.html#howto> and the file
180 <file:Documentation/networking/smctr.txt>.
181
182 To compile this driver as a module, choose M here: the module will be
183 called smctr.
184
185endif # TR
diff --git a/drivers/net/tokenring/Makefile b/drivers/net/tokenring/Makefile
new file mode 100644
index 00000000000..c88b0a5e538
--- /dev/null
+++ b/drivers/net/tokenring/Makefile
@@ -0,0 +1,15 @@
1#
2# Makefile for drivers/net/tokenring
3#
4
5obj-$(CONFIG_IBMTR) += ibmtr.o
6obj-$(CONFIG_IBMOL) += olympic.o
7obj-$(CONFIG_IBMLS) += lanstreamer.o
8obj-$(CONFIG_TMS380TR) += tms380tr.o
9obj-$(CONFIG_ABYSS) += abyss.o
10obj-$(CONFIG_MADGEMC) += madgemc.o
11obj-$(CONFIG_PROTEON) += proteon.o
12obj-$(CONFIG_TMSPCI) += tmspci.o
13obj-$(CONFIG_SKISA) += skisa.o
14obj-$(CONFIG_SMCTR) += smctr.o
15obj-$(CONFIG_3C359) += 3c359.o
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
new file mode 100644
index 00000000000..515f122777a
--- /dev/null
+++ b/drivers/net/tokenring/abyss.c
@@ -0,0 +1,469 @@
1/*
2 * abyss.c: Network driver for the Madge Smart 16/4 PCI Mk2 token ring card.
3 *
4 * Written 1999-2000 by Adam Fritzler
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This driver module supports the following cards:
10 * - Madge Smart 16/4 PCI Mk2
11 *
12 * Maintainer(s):
13 * AF Adam Fritzler
14 *
15 * Modification History:
16 * 30-Dec-99 AF Split off from the tms380tr driver.
17 * 22-Jan-00 AF Updated to use indirect read/writes
18 * 23-Nov-00 JG New PCI API, cleanups
19 *
20 *
21 * TODO:
22 * 1. See if we can use MMIO instead of inb/outb/inw/outw
23 * 2. Add support for Mk1 (has AT24 attached to the PCI
24 * config registers)
25 *
26 */
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/netdevice.h>
34#include <linux/trdevice.h>
35
36#include <asm/system.h>
37#include <asm/io.h>
38#include <asm/irq.h>
39
40#include "tms380tr.h"
41#include "abyss.h" /* Madge-specific constants */
42
43static char version[] __devinitdata =
44"abyss.c: v1.02 23/11/2000 by Adam Fritzler\n";
45
46#define ABYSS_IO_EXTENT 64
47
48static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
49 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
50 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
51 { } /* Terminating entry */
52};
53MODULE_DEVICE_TABLE(pci, abyss_pci_tbl);
54
55MODULE_LICENSE("GPL");
56
57static int abyss_open(struct net_device *dev);
58static int abyss_close(struct net_device *dev);
59static void abyss_enable(struct net_device *dev);
60static int abyss_chipset_init(struct net_device *dev);
61static void abyss_read_eeprom(struct net_device *dev);
62static unsigned short abyss_setnselout_pins(struct net_device *dev);
63
64static void at24_writedatabyte(unsigned long regaddr, unsigned char byte);
65static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr);
66static int at24_sendcmd(unsigned long regaddr, unsigned char cmd);
67static unsigned char at24_readdatabit(unsigned long regaddr);
68static unsigned char at24_readdatabyte(unsigned long regaddr);
69static int at24_waitforack(unsigned long regaddr);
70static int at24_waitfornack(unsigned long regaddr);
71static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data);
72static void at24_start(unsigned long regaddr);
73static unsigned char at24_readb(unsigned long regaddr, unsigned char addr);
74
75static unsigned short abyss_sifreadb(struct net_device *dev, unsigned short reg)
76{
77 return inb(dev->base_addr + reg);
78}
79
80static unsigned short abyss_sifreadw(struct net_device *dev, unsigned short reg)
81{
82 return inw(dev->base_addr + reg);
83}
84
85static void abyss_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
86{
87 outb(val, dev->base_addr + reg);
88}
89
90static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
91{
92 outw(val, dev->base_addr + reg);
93}
94
95static struct net_device_ops abyss_netdev_ops;
96
97static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
98{
99 static int versionprinted;
100 struct net_device *dev;
101 struct net_local *tp;
102 int ret, pci_irq_line;
103 unsigned long pci_ioaddr;
104
105 if (versionprinted++ == 0)
106 printk("%s", version);
107
108 if (pci_enable_device(pdev))
109 return -EIO;
110
111 /* Remove I/O space marker in bit 0. */
112 pci_irq_line = pdev->irq;
113 pci_ioaddr = pci_resource_start (pdev, 0);
114
115 /* At this point we have found a valid card. */
116
117 dev = alloc_trdev(sizeof(struct net_local));
118 if (!dev)
119 return -ENOMEM;
120
121 if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) {
122 ret = -EBUSY;
123 goto err_out_trdev;
124 }
125
126 ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
127 dev->name, dev);
128 if (ret)
129 goto err_out_region;
130
131 dev->base_addr = pci_ioaddr;
132 dev->irq = pci_irq_line;
133
134 printk("%s: Madge Smart 16/4 PCI Mk2 (Abyss)\n", dev->name);
135 printk("%s: IO: %#4lx IRQ: %d\n",
136 dev->name, pci_ioaddr, dev->irq);
137 /*
138 * The TMS SIF registers lay 0x10 above the card base address.
139 */
140 dev->base_addr += 0x10;
141
142 ret = tmsdev_init(dev, &pdev->dev);
143 if (ret) {
144 printk("%s: unable to get memory for dev->priv.\n",
145 dev->name);
146 goto err_out_irq;
147 }
148
149 abyss_read_eeprom(dev);
150
151 printk("%s: Ring Station Address: %pM\n", dev->name, dev->dev_addr);
152
153 tp = netdev_priv(dev);
154 tp->setnselout = abyss_setnselout_pins;
155 tp->sifreadb = abyss_sifreadb;
156 tp->sifreadw = abyss_sifreadw;
157 tp->sifwriteb = abyss_sifwriteb;
158 tp->sifwritew = abyss_sifwritew;
159
160 memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
161
162 dev->netdev_ops = &abyss_netdev_ops;
163
164 pci_set_drvdata(pdev, dev);
165 SET_NETDEV_DEV(dev, &pdev->dev);
166
167 ret = register_netdev(dev);
168 if (ret)
169 goto err_out_tmsdev;
170 return 0;
171
172err_out_tmsdev:
173 pci_set_drvdata(pdev, NULL);
174 tmsdev_term(dev);
175err_out_irq:
176 free_irq(pdev->irq, dev);
177err_out_region:
178 release_region(pci_ioaddr, ABYSS_IO_EXTENT);
179err_out_trdev:
180 free_netdev(dev);
181 return ret;
182}
183
184static unsigned short abyss_setnselout_pins(struct net_device *dev)
185{
186 unsigned short val = 0;
187 struct net_local *tp = netdev_priv(dev);
188
189 if(tp->DataRate == SPEED_4)
190 val |= 0x01; /* Set 4Mbps */
191 else
192 val |= 0x00; /* Set 16Mbps */
193
194 return val;
195}
196
197/*
198 * The following Madge boards should use this code:
199 * - Smart 16/4 PCI Mk2 (Abyss)
200 * - Smart 16/4 PCI Mk1 (PCI T)
201 * - Smart 16/4 Client Plus PnP (Big Apple)
202 * - Smart 16/4 Cardbus Mk2
203 *
204 * These access an Atmel AT24 SEEPROM using their glue chip registers.
205 *
206 */
207static void at24_writedatabyte(unsigned long regaddr, unsigned char byte)
208{
209 int i;
210
211 for (i = 0; i < 8; i++) {
212 at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
213 at24_setlines(regaddr, 1, (byte >> (7-i))&0x01);
214 at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
215 }
216}
217
218static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr)
219{
220 if (at24_sendcmd(regaddr, cmd)) {
221 at24_writedatabyte(regaddr, addr);
222 return at24_waitforack(regaddr);
223 }
224 return 0;
225}
226
227static int at24_sendcmd(unsigned long regaddr, unsigned char cmd)
228{
229 int i;
230
231 for (i = 0; i < 10; i++) {
232 at24_start(regaddr);
233 at24_writedatabyte(regaddr, cmd);
234 if (at24_waitforack(regaddr))
235 return 1;
236 }
237 return 0;
238}
239
240static unsigned char at24_readdatabit(unsigned long regaddr)
241{
242 unsigned char val;
243
244 at24_setlines(regaddr, 0, 1);
245 at24_setlines(regaddr, 1, 1);
246 val = (inb(regaddr) & AT24_DATA)?1:0;
247 at24_setlines(regaddr, 1, 1);
248 at24_setlines(regaddr, 0, 1);
249 return val;
250}
251
252static unsigned char at24_readdatabyte(unsigned long regaddr)
253{
254 unsigned char data = 0;
255 int i;
256
257 for (i = 0; i < 8; i++) {
258 data <<= 1;
259 data |= at24_readdatabit(regaddr);
260 }
261
262 return data;
263}
264
265static int at24_waitforack(unsigned long regaddr)
266{
267 int i;
268
269 for (i = 0; i < 10; i++) {
270 if ((at24_readdatabit(regaddr) & 0x01) == 0x00)
271 return 1;
272 }
273 return 0;
274}
275
276static int at24_waitfornack(unsigned long regaddr)
277{
278 int i;
279 for (i = 0; i < 10; i++) {
280 if ((at24_readdatabit(regaddr) & 0x01) == 0x01)
281 return 1;
282 }
283 return 0;
284}
285
286static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data)
287{
288 unsigned char val = AT24_ENABLE;
289 if (clock)
290 val |= AT24_CLOCK;
291 if (data)
292 val |= AT24_DATA;
293
294 outb(val, regaddr);
295 tms380tr_wait(20); /* Very necessary. */
296}
297
298static void at24_start(unsigned long regaddr)
299{
300 at24_setlines(regaddr, 0, 1);
301 at24_setlines(regaddr, 1, 1);
302 at24_setlines(regaddr, 1, 0);
303 at24_setlines(regaddr, 0, 1);
304}
305
306static unsigned char at24_readb(unsigned long regaddr, unsigned char addr)
307{
308 unsigned char data = 0xff;
309
310 if (at24_sendfullcmd(regaddr, AT24_WRITE, addr)) {
311 if (at24_sendcmd(regaddr, AT24_READ)) {
312 data = at24_readdatabyte(regaddr);
313 if (!at24_waitfornack(regaddr))
314 data = 0xff;
315 }
316 }
317 return data;
318}
319
320
321/*
322 * Enable basic functions of the Madge chipset needed
323 * for initialization.
324 */
325static void abyss_enable(struct net_device *dev)
326{
327 unsigned char reset_reg;
328 unsigned long ioaddr;
329
330 ioaddr = dev->base_addr;
331 reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
332 reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
333 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
334 tms380tr_wait(100);
335}
336
337/*
338 * Enable the functions of the Madge chipset needed for
339 * full working order.
340 */
341static int abyss_chipset_init(struct net_device *dev)
342{
343 unsigned char reset_reg;
344 unsigned long ioaddr;
345
346 ioaddr = dev->base_addr;
347
348 reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
349
350 reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
351 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
352
353 reset_reg &= ~(PCIBM2_RESET_REG_CHIP_NRES |
354 PCIBM2_RESET_REG_FIFO_NRES |
355 PCIBM2_RESET_REG_SIF_NRES);
356 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
357
358 tms380tr_wait(100);
359
360 reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
361 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
362
363 reset_reg |= PCIBM2_RESET_REG_SIF_NRES;
364 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
365
366 reset_reg |= PCIBM2_RESET_REG_FIFO_NRES;
367 outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
368
369 outb(PCIBM2_INT_CONTROL_REG_SINTEN |
370 PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE,
371 ioaddr + PCIBM2_INT_CONTROL_REG);
372
373 outb(30, ioaddr + PCIBM2_FIFO_THRESHOLD);
374
375 return 0;
376}
377
378static inline void abyss_chipset_close(struct net_device *dev)
379{
380 unsigned long ioaddr;
381
382 ioaddr = dev->base_addr;
383 outb(0, ioaddr + PCIBM2_RESET_REG);
384}
385
386/*
387 * Read configuration data from the AT24 SEEPROM on Madge cards.
388 *
389 */
390static void abyss_read_eeprom(struct net_device *dev)
391{
392 struct net_local *tp;
393 unsigned long ioaddr;
394 unsigned short val;
395 int i;
396
397 tp = netdev_priv(dev);
398 ioaddr = dev->base_addr;
399
400 /* Must enable glue chip first */
401 abyss_enable(dev);
402
403 val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
404 PCIBM2_SEEPROM_RING_SPEED);
405 tp->DataRate = val?SPEED_4:SPEED_16; /* set open speed */
406 printk("%s: SEEPROM: ring speed: %dMb/sec\n", dev->name, tp->DataRate);
407
408 val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
409 PCIBM2_SEEPROM_RAM_SIZE) * 128;
410 printk("%s: SEEPROM: adapter RAM: %dkb\n", dev->name, val);
411
412 dev->addr_len = 6;
413 for (i = 0; i < 6; i++)
414 dev->dev_addr[i] = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
415 PCIBM2_SEEPROM_BIA+i);
416}
417
418static int abyss_open(struct net_device *dev)
419{
420 abyss_chipset_init(dev);
421 tms380tr_open(dev);
422 return 0;
423}
424
425static int abyss_close(struct net_device *dev)
426{
427 tms380tr_close(dev);
428 abyss_chipset_close(dev);
429 return 0;
430}
431
432static void __devexit abyss_detach (struct pci_dev *pdev)
433{
434 struct net_device *dev = pci_get_drvdata(pdev);
435
436 BUG_ON(!dev);
437 unregister_netdev(dev);
438 release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
439 free_irq(dev->irq, dev);
440 tmsdev_term(dev);
441 free_netdev(dev);
442 pci_set_drvdata(pdev, NULL);
443}
444
445static struct pci_driver abyss_driver = {
446 .name = "abyss",
447 .id_table = abyss_pci_tbl,
448 .probe = abyss_attach,
449 .remove = __devexit_p(abyss_detach),
450};
451
452static int __init abyss_init (void)
453{
454 abyss_netdev_ops = tms380tr_netdev_ops;
455
456 abyss_netdev_ops.ndo_open = abyss_open;
457 abyss_netdev_ops.ndo_stop = abyss_close;
458
459 return pci_register_driver(&abyss_driver);
460}
461
462static void __exit abyss_rmmod (void)
463{
464 pci_unregister_driver (&abyss_driver);
465}
466
467module_init(abyss_init);
468module_exit(abyss_rmmod);
469
diff --git a/drivers/net/tokenring/abyss.h b/drivers/net/tokenring/abyss.h
new file mode 100644
index 00000000000..b0a473b8913
--- /dev/null
+++ b/drivers/net/tokenring/abyss.h
@@ -0,0 +1,58 @@
1/*
2 * abyss.h: Header for the abyss tms380tr module
3 *
4 * Authors:
5 * - Adam Fritzler
6 */
7
8#ifndef __LINUX_MADGETR_H
9#define __LINUX_MADGETR_H
10
11#ifdef __KERNEL__
12
13/*
14 * For Madge Smart 16/4 PCI Mk2. Since we increment the base address
15 * to get everything correct for the TMS SIF, we do these as negatives
16 * as they fall below the SIF in addressing.
17 */
18#define PCIBM2_INT_STATUS_REG ((short)-15)/* 0x01 */
19#define PCIBM2_INT_CONTROL_REG ((short)-14)/* 0x02 */
20#define PCIBM2_RESET_REG ((short)-12)/* 0x04 */
21#define PCIBM2_SEEPROM_REG ((short)-9) /* 0x07 */
22
23#define PCIBM2_INT_CONTROL_REG_SINTEN 0x02
24#define PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE 0x80
25#define PCIBM2_INT_STATUS_REG_PCI_ERR 0x80
26
27#define PCIBM2_RESET_REG_CHIP_NRES 0x01
28#define PCIBM2_RESET_REG_FIFO_NRES 0x02
29#define PCIBM2_RESET_REG_SIF_NRES 0x04
30
31#define PCIBM2_FIFO_THRESHOLD 0x21
32#define PCIBM2_BURST_LENGTH 0x22
33
34/*
35 * Bits in PCIBM2_SEEPROM_REG.
36 */
37#define AT24_ENABLE 0x04
38#define AT24_DATA 0x02
39#define AT24_CLOCK 0x01
40
41/*
42 * AT24 Commands.
43 */
44#define AT24_WRITE 0xA0
45#define AT24_READ 0xA1
46
47/*
48 * Addresses in AT24 SEEPROM.
49 */
50#define PCIBM2_SEEPROM_BIA 0x12
51#define PCIBM2_SEEPROM_RING_SPEED 0x18
52#define PCIBM2_SEEPROM_RAM_SIZE 0x1A
53#define PCIBM2_SEEPROM_HWF1 0x1C
54#define PCIBM2_SEEPROM_HWF2 0x1E
55
56
57#endif /* __KERNEL__ */
58#endif /* __LINUX_MADGETR_H */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
new file mode 100644
index 00000000000..e257a00fe14
--- /dev/null
+++ b/drivers/net/tokenring/ibmtr.c
@@ -0,0 +1,1964 @@
1/* ibmtr.c: A shared-memory IBM Token Ring 16/4 driver for linux
2 *
3 * Written 1993 by Mark Swanson and Peter De Schrijver.
4 * This software may be used and distributed according to the terms
5 * of the GNU General Public License, incorporated herein by reference.
6 *
7 * This device driver should work with Any IBM Token Ring Card that does
8 * not use DMA.
9 *
10 * I used Donald Becker's (becker@scyld.com) device driver work
11 * as a base for most of my initial work.
12 *
13 * Changes by Peter De Schrijver
14 * (Peter.Deschrijver@linux.cc.kuleuven.ac.be) :
15 *
16 * + changed name to ibmtr.c in anticipation of other tr boards.
17 * + changed reset code and adapter open code.
18 * + added SAP open code.
19 * + a first attempt to write interrupt, transmit and receive routines.
20 *
21 * Changes by David W. Morris (dwm@shell.portal.com) :
22 * 941003 dwm: - Restructure tok_probe for multiple adapters, devices.
23 * + Add comments, misc reorg for clarity.
24 * + Flatten interrupt handler levels.
25 *
26 * Changes by Farzad Farid (farzy@zen.via.ecp.fr)
27 * and Pascal Andre (andre@chimay.via.ecp.fr) (March 9 1995) :
28 * + multi ring support clean up.
29 * + RFC1042 compliance enhanced.
30 *
31 * Changes by Pascal Andre (andre@chimay.via.ecp.fr) (September 7 1995) :
32 * + bug correction in tr_tx
33 * + removed redundant information display
34 * + some code reworking
35 *
36 * Changes by Michel Lespinasse (walken@via.ecp.fr),
37 * Yann Doussot (doussot@via.ecp.fr) and Pascal Andre (andre@via.ecp.fr)
38 * (February 18, 1996) :
39 * + modified shared memory and mmio access port the driver to
40 * alpha platform (structure access -> readb/writeb)
41 *
42 * Changes by Steve Kipisz (bungy@ibm.net or kipisz@vnet.ibm.com)
43 * (January 18 1996):
44 * + swapped WWOR and WWCR in ibmtr.h
45 * + moved some init code from tok_probe into trdev_init. The
46 * PCMCIA code can call trdev_init to complete initializing
47 * the driver.
48 * + added -DPCMCIA to support PCMCIA
49 * + detecting PCMCIA Card Removal in interrupt handler. If
50 * ISRP is FF, then a PCMCIA card has been removed
51 * 10/2000 Burt needed a new method to avoid crashing the OS
52 *
53 * Changes by Paul Norton (pnorton@cts.com) :
54 * + restructured the READ.LOG logic to prevent the transmit SRB
55 * from being rudely overwritten before the transmit cycle is
56 * complete. (August 15 1996)
57 * + completed multiple adapter support. (November 20 1996)
58 * + implemented csum_partial_copy in tr_rx and increased receive
59 * buffer size and count. Minor fixes. (March 15, 1997)
60 *
61 * Changes by Christopher Turcksin <wabbit@rtfc.demon.co.uk>
62 * + Now compiles ok as a module again.
63 *
64 * Changes by Paul Norton (pnorton@ieee.org) :
65 * + moved the header manipulation code in tr_tx and tr_rx to
66 * net/802/tr.c. (July 12 1997)
67 * + add retry and timeout on open if cable disconnected. (May 5 1998)
68 * + lifted 2000 byte mtu limit. now depends on shared-RAM size.
69 * May 25 1998)
70 * + can't allocate 2k recv buff at 8k shared-RAM. (20 October 1998)
71 *
72 * Changes by Joel Sloan (jjs@c-me.com) :
73 * + disable verbose debug messages by default - to enable verbose
74 * debugging, edit the IBMTR_DEBUG_MESSAGES define below
75 *
76 * Changes by Mike Phillips <phillim@amtrak.com> :
77 * + Added extra #ifdef's to work with new PCMCIA Token Ring Code.
78 * The PCMCIA code now just sets up the card so it can be recognized
79 * by ibmtr_probe. Also checks allocated memory vs. on-board memory
80 * for correct figure to use.
81 *
82 * Changes by Tim Hockin (thockin@isunix.it.ilstu.edu) :
83 * + added spinlocks for SMP sanity (10 March 1999)
84 *
85 * Changes by Jochen Friedrich to enable RFC1469 Option 2 multicasting
86 * i.e. using functional address C0 00 00 04 00 00 to transmit and
87 * receive multicast packets.
88 *
89 * Changes by Mike Sullivan (based on original sram patch by Dave Grothe
90 * to support windowing into on adapter shared ram.
91 * i.e. Use LANAID to setup a PnP configuration with 16K RAM. Paging
92 * will shift this 16K window over the entire available shared RAM.
93 *
94 * Changes by Peter De Schrijver (p2@mind.be) :
95 * + fixed a problem with PCMCIA card removal
96 *
97 * Change by Mike Sullivan et al.:
98 * + added turbo card support. No need to use lanaid to configure
99 * the adapter into isa compatibility mode.
100 *
101 * Changes by Burt Silverman to allow the computer to behave nicely when
102 * a cable is pulled or not in place, or a PCMCIA card is removed hot.
103 */
104
105/* change the define of IBMTR_DEBUG_MESSAGES to a nonzero value
106in the event that chatty debug messages are desired - jjs 12/30/98 */
107
108#define IBMTR_DEBUG_MESSAGES 0
109
110#include <linux/module.h>
111#include <linux/sched.h>
112
113#ifdef PCMCIA /* required for ibmtr_cs.c to build */
114#undef MODULE /* yes, really */
115#undef ENABLE_PAGING
116#else
117#define ENABLE_PAGING 1
118#endif
119
120/* changes the output format of driver initialization */
121#define TR_VERBOSE 0
122
123/* some 95 OS send many non UI frame; this allow removing the warning */
124#define TR_FILTERNONUI 1
125
126#include <linux/interrupt.h>
127#include <linux/ioport.h>
128#include <linux/netdevice.h>
129#include <linux/ip.h>
130#include <linux/trdevice.h>
131#include <linux/ibmtr.h>
132
133#include <net/checksum.h>
134
135#include <asm/io.h>
136
137#define DPRINTK(format, args...) printk("%s: " format, dev->name , ## args)
138#define DPRINTD(format, args...) DummyCall("%s: " format, dev->name , ## args)
139
140/* version and credits */
141#ifndef PCMCIA
142static char version[] __devinitdata =
143 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
144 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
145 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
146 " v2.2.1 02/08/00 Mike Sullivan <sullivam@us.ibm.com>\n"
147 " v2.2.2 07/27/00 Burt Silverman <burts@us.ibm.com>\n"
148 " v2.4.0 03/01/01 Mike Sullivan <sullivan@us.ibm.com>\n";
149#endif
150
151/* this allows displaying full adapter information */
152
153static char *channel_def[] __devinitdata = { "ISA", "MCA", "ISA P&P" };
154
155static char pcchannelid[] __devinitdata = {
156 0x05, 0x00, 0x04, 0x09,
157 0x04, 0x03, 0x04, 0x0f,
158 0x03, 0x06, 0x03, 0x01,
159 0x03, 0x01, 0x03, 0x00,
160 0x03, 0x09, 0x03, 0x09,
161 0x03, 0x00, 0x02, 0x00
162};
163
164static char mcchannelid[] __devinitdata = {
165 0x04, 0x0d, 0x04, 0x01,
166 0x05, 0x02, 0x05, 0x03,
167 0x03, 0x06, 0x03, 0x03,
168 0x05, 0x08, 0x03, 0x04,
169 0x03, 0x05, 0x03, 0x01,
170 0x03, 0x08, 0x02, 0x00
171};
172
173static char __devinit *adapter_def(char type)
174{
175 switch (type) {
176 case 0xF: return "PC Adapter | PC Adapter II | Adapter/A";
177 case 0xE: return "16/4 Adapter | 16/4 Adapter/A (long)";
178 case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter";
179 case 0xC: return "Auto 16/4 Adapter";
180 default: return "adapter (unknown type)";
181 }
182};
183
184#define TRC_INIT 0x01 /* Trace initialization & PROBEs */
185#define TRC_INITV 0x02 /* verbose init trace points */
186static unsigned char ibmtr_debug_trace = 0;
187
188static int ibmtr_probe1(struct net_device *dev, int ioaddr);
189static unsigned char get_sram_size(struct tok_info *adapt_info);
190static int trdev_init(struct net_device *dev);
191static int tok_open(struct net_device *dev);
192static int tok_init_card(struct net_device *dev);
193static void tok_open_adapter(unsigned long dev_addr);
194static void open_sap(unsigned char type, struct net_device *dev);
195static void tok_set_multicast_list(struct net_device *dev);
196static netdev_tx_t tok_send_packet(struct sk_buff *skb,
197 struct net_device *dev);
198static int tok_close(struct net_device *dev);
199static irqreturn_t tok_interrupt(int irq, void *dev_id);
200static void initial_tok_int(struct net_device *dev);
201static void tr_tx(struct net_device *dev);
202static void tr_rx(struct net_device *dev);
203static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
204static void tok_rerun(unsigned long dev_addr);
205static void ibmtr_readlog(struct net_device *dev);
206static int ibmtr_change_mtu(struct net_device *dev, int mtu);
207static void find_turbo_adapters(int *iolist);
208
209static int ibmtr_portlist[IBMTR_MAX_ADAPTERS+1] __devinitdata = {
210 0xa20, 0xa24, 0, 0, 0
211};
212static int __devinitdata turbo_io[IBMTR_MAX_ADAPTERS] = {0};
213static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
214static int __devinitdata turbo_searched = 0;
215
216#ifndef PCMCIA
217static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
218#endif
219
220static void __devinit PrtChanID(char *pcid, short stride)
221{
222 short i, j;
223 for (i = 0, j = 0; i < 24; i++, j += stride)
224 printk("%1x", ((int) pcid[j]) & 0x0f);
225 printk("\n");
226}
227
228static void __devinit HWPrtChanID(void __iomem *pcid, short stride)
229{
230 short i, j;
231 for (i = 0, j = 0; i < 24; i++, j += stride)
232 printk("%1x", ((int) readb(pcid + j)) & 0x0f);
233 printk("\n");
234}
235
236/* We have to ioremap every checked address, because isa_readb is
237 * going away.
238 */
239
240static void __devinit find_turbo_adapters(int *iolist)
241{
242 int ram_addr;
243 int index=0;
244 void __iomem *chanid;
245 int found_turbo=0;
246 unsigned char *tchanid, ctemp;
247 int i, j;
248 unsigned long jif;
249 void __iomem *ram_mapped ;
250
251 if (turbo_searched == 1) return;
252 turbo_searched=1;
253 for (ram_addr=0xC0000; ram_addr < 0xE0000; ram_addr+=0x2000) {
254
255 __u32 intf_tbl=0;
256
257 found_turbo=1;
258 ram_mapped = ioremap((u32)ram_addr,0x1fff) ;
259 if (ram_mapped==NULL)
260 continue ;
261 chanid=(CHANNEL_ID + ram_mapped);
262 tchanid=pcchannelid;
263 ctemp=readb(chanid) & 0x0f;
264 if (ctemp != *tchanid) continue;
265 for (i=2,j=1; i<=46; i=i+2,j++) {
266 if ((readb(chanid+i) & 0x0f) != tchanid[j]){
267 found_turbo=0;
268 break;
269 }
270 }
271 if (!found_turbo) continue;
272
273 writeb(0x90, ram_mapped+0x1E01);
274 for(i=2; i<0x0f; i++) {
275 writeb(0x00, ram_mapped+0x1E01+i);
276 }
277 writeb(0x00, ram_mapped+0x1E01);
278 for(jif=jiffies+TR_BUSY_INTERVAL; time_before_eq(jiffies,jif););
279 intf_tbl=ntohs(readw(ram_mapped+ACA_OFFSET+ACA_RW+WRBR_EVEN));
280 if (intf_tbl) {
281#if IBMTR_DEBUG_MESSAGES
282 printk("ibmtr::find_turbo_adapters, Turbo found at "
283 "ram_addr %x\n",ram_addr);
284 printk("ibmtr::find_turbo_adapters, interface_table ");
285 for(i=0; i<6; i++) {
286 printk("%x:",readb(ram_addr+intf_tbl+i));
287 }
288 printk("\n");
289#endif
290 turbo_io[index]=ntohs(readw(ram_mapped+intf_tbl+4));
291 turbo_irq[index]=readb(ram_mapped+intf_tbl+3);
292 outb(0, turbo_io[index] + ADAPTRESET);
293 for(jif=jiffies+TR_RST_TIME;time_before_eq(jiffies,jif););
294 outb(0, turbo_io[index] + ADAPTRESETREL);
295 index++;
296 continue;
297 }
298#if IBMTR_DEBUG_MESSAGES
299 printk("ibmtr::find_turbo_adapters, ibmtr card found at"
300 " %x but not a Turbo model\n",ram_addr);
301#endif
302 iounmap(ram_mapped) ;
303 } /* for */
304 for(i=0; i<IBMTR_MAX_ADAPTERS; i++) {
305 if(!turbo_io[i]) break;
306 for (j=0; j<IBMTR_MAX_ADAPTERS; j++) {
307 if ( iolist[j] && iolist[j] != turbo_io[i]) continue;
308 iolist[j]=turbo_io[i];
309 break;
310 }
311 }
312}
313
314static void ibmtr_cleanup_card(struct net_device *dev)
315{
316 if (dev->base_addr) {
317 outb(0,dev->base_addr+ADAPTRESET);
318
319 schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
320
321 outb(0,dev->base_addr+ADAPTRESETREL);
322 }
323
324#ifndef PCMCIA
325 free_irq(dev->irq, dev);
326 release_region(dev->base_addr, IBMTR_IO_EXTENT);
327
328 {
329 struct tok_info *ti = netdev_priv(dev);
330 iounmap(ti->mmio);
331 iounmap(ti->sram_virt);
332 }
333#endif
334}
335
336/****************************************************************************
337 * ibmtr_probe(): Routine specified in the network device structure
338 * to probe for an IBM Token Ring Adapter. Routine outline:
339 * I. Interrogate hardware to determine if an adapter exists
340 * and what the speeds and feeds are
341 * II. Setup data structures to control execution based upon
342 * adapter characteristics.
343 *
344 * We expect ibmtr_probe to be called once for each device entry
345 * which references it.
346 ****************************************************************************/
347
348static int __devinit ibmtr_probe(struct net_device *dev)
349{
350 int i;
351 int base_addr = dev->base_addr;
352
353 if (base_addr && base_addr <= 0x1ff) /* Don't probe at all. */
354 return -ENXIO;
355 if (base_addr > 0x1ff) { /* Check a single specified location. */
356 if (!ibmtr_probe1(dev, base_addr)) return 0;
357 return -ENODEV;
358 }
359 find_turbo_adapters(ibmtr_portlist);
360 for (i = 0; ibmtr_portlist[i]; i++) {
361 int ioaddr = ibmtr_portlist[i];
362
363 if (!ibmtr_probe1(dev, ioaddr)) return 0;
364 }
365 return -ENODEV;
366}
367
368int __devinit ibmtr_probe_card(struct net_device *dev)
369{
370 int err = ibmtr_probe(dev);
371 if (!err) {
372 err = register_netdev(dev);
373 if (err)
374 ibmtr_cleanup_card(dev);
375 }
376 return err;
377}
378
379/*****************************************************************************/
380
381static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
382{
383
384 unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0;
385 void __iomem * t_mmio = NULL;
386 struct tok_info *ti = netdev_priv(dev);
387 void __iomem *cd_chanid;
388 unsigned char *tchanid, ctemp;
389#ifndef PCMCIA
390 unsigned char t_irq=0;
391 unsigned long timeout;
392 static int version_printed;
393#endif
394
395 /* Query the adapter PIO base port which will return
396 * indication of where MMIO was placed. We also have a
397 * coded interrupt number.
398 */
399 segment = inb(PIOaddr);
400 if (segment < 0x40 || segment > 0xe0) {
401 /* Out of range values so we'll assume non-existent IO device
402 * but this is not necessarily a problem, esp if a turbo
403 * adapter is being used. */
404#if IBMTR_DEBUG_MESSAGES
405 DPRINTK("ibmtr_probe1(): unhappy that inb(0x%X) == 0x%X, "
406 "Hardware Problem?\n",PIOaddr,segment);
407#endif
408 return -ENODEV;
409 }
410 /*
411 * Compute the linear base address of the MMIO area
412 * as LINUX doesn't care about segments
413 */
414 t_mmio = ioremap(((__u32) (segment & 0xfc) << 11) + 0x80000,2048);
415 if (!t_mmio) {
416 DPRINTK("Cannot remap mmiobase memory area") ;
417 return -ENODEV ;
418 }
419 intr = segment & 0x03; /* low bits is coded interrupt # */
420 if (ibmtr_debug_trace & TRC_INIT)
421 DPRINTK("PIOaddr: %4hx seg/intr: %2x mmio base: %p intr: %d\n"
422 , PIOaddr, (int) segment, t_mmio, (int) intr);
423
424 /*
425 * Now we will compare expected 'channelid' strings with
426 * what we is there to learn of ISA/MCA or not TR card
427 */
428#ifdef PCMCIA
429 iounmap(t_mmio);
430 t_mmio = ti->mmio; /*BMS to get virtual address */
431 irq = ti->irq; /*BMS to display the irq! */
432#endif
433 cd_chanid = (CHANNEL_ID + t_mmio); /* for efficiency */
434 tchanid = pcchannelid;
435 cardpresent = TR_ISA; /* try ISA */
436
437 /* Suboptimize knowing first byte different */
438 ctemp = readb(cd_chanid) & 0x0f;
439 if (ctemp != *tchanid) { /* NOT ISA card, try MCA */
440 tchanid = mcchannelid;
441 cardpresent = TR_MCA;
442 if (ctemp != *tchanid) /* Neither ISA nor MCA */
443 cardpresent = NOTOK;
444 }
445 if (cardpresent != NOTOK) {
446 /* Know presumed type, try rest of ID */
447 for (i = 2, j = 1; i <= 46; i = i + 2, j++) {
448 if( (readb(cd_chanid+i)&0x0f) == tchanid[j]) continue;
449 /* match failed, not TR card */
450 cardpresent = NOTOK;
451 break;
452 }
453 }
454 /*
455 * If we have an ISA board check for the ISA P&P version,
456 * as it has different IRQ settings
457 */
458 if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio) == 0x0e))
459 cardpresent = TR_ISAPNP;
460 if (cardpresent == NOTOK) { /* "channel_id" did not match, report */
461 if (!(ibmtr_debug_trace & TRC_INIT)) {
462#ifndef PCMCIA
463 iounmap(t_mmio);
464#endif
465 return -ENODEV;
466 }
467 DPRINTK( "Channel ID string not found for PIOaddr: %4hx\n",
468 PIOaddr);
469 DPRINTK("Expected for ISA: ");
470 PrtChanID(pcchannelid, 1);
471 DPRINTK(" found: ");
472/* BMS Note that this can be misleading, when hardware is flaky, because you
473 are reading it a second time here. So with my flaky hardware, I'll see my-
474 self in this block, with the HW ID matching the ISA ID exactly! */
475 HWPrtChanID(cd_chanid, 2);
476 DPRINTK("Expected for MCA: ");
477 PrtChanID(mcchannelid, 1);
478 }
479 /* Now, setup some of the pl0 buffers for this driver.. */
480 /* If called from PCMCIA, it is already set up, so no need to
481 waste the memory, just use the existing structure */
482#ifndef PCMCIA
483 ti->mmio = t_mmio;
484 for (i = 0; i < IBMTR_MAX_ADAPTERS; i++) {
485 if (turbo_io[i] != PIOaddr)
486 continue;
487#if IBMTR_DEBUG_MESSAGES
488 printk("ibmtr::tr_probe1, setting PIOaddr %x to Turbo\n",
489 PIOaddr);
490#endif
491 ti->turbo = 1;
492 t_irq = turbo_irq[i];
493 }
494#endif /* !PCMCIA */
495 ti->readlog_pending = 0;
496 init_waitqueue_head(&ti->wait_for_reset);
497
498 /* if PCMCIA, the card can be recognized as either TR_ISA or TR_ISAPNP
499 * depending which card is inserted. */
500
501#ifndef PCMCIA
502 switch (cardpresent) {
503 case TR_ISA:
504 if (intr == 0) irq = 9; /* irq2 really is irq9 */
505 if (intr == 1) irq = 3;
506 if (intr == 2) irq = 6;
507 if (intr == 3) irq = 7;
508 ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
509 break;
510 case TR_MCA:
511 if (intr == 0) irq = 9;
512 if (intr == 1) irq = 3;
513 if (intr == 2) irq = 10;
514 if (intr == 3) irq = 11;
515 ti->global_int_enable = 0;
516 ti->adapter_int_enable = 0;
517 ti->sram_phys=(__u32)(inb(PIOaddr+ADAPTRESETREL) & 0xfe) << 12;
518 break;
519 case TR_ISAPNP:
520 if (!t_irq) {
521 if (intr == 0) irq = 9;
522 if (intr == 1) irq = 3;
523 if (intr == 2) irq = 10;
524 if (intr == 3) irq = 11;
525 } else
526 irq=t_irq;
527 timeout = jiffies + TR_SPIN_INTERVAL;
528 while (!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)){
529 if (!time_after(jiffies, timeout)) continue;
530 DPRINTK( "Hardware timeout during initialization.\n");
531 iounmap(t_mmio);
532 return -ENODEV;
533 }
534 ti->sram_phys =
535 ((__u32)readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_EVEN)<<12);
536 ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
537 break;
538 } /*end switch (cardpresent) */
539#endif /*not PCMCIA */
540
541 if (ibmtr_debug_trace & TRC_INIT) { /* just report int */
542 DPRINTK("irq=%d", irq);
543 printk(", sram_phys=0x%x", ti->sram_phys);
544 if(ibmtr_debug_trace&TRC_INITV){ /* full chat in verbose only */
545 DPRINTK(", ti->mmio=%p", ti->mmio);
546 printk(", segment=%02X", segment);
547 }
548 printk(".\n");
549 }
550
551 /* Get hw address of token ring card */
552 j = 0;
553 for (i = 0; i < 0x18; i = i + 2) {
554 /* technical reference states to do this */
555 temp = readb(ti->mmio + AIP + i) & 0x0f;
556 ti->hw_address[j] = temp;
557 if (j & 1)
558 dev->dev_addr[(j / 2)] =
559 ti->hw_address[j]+ (ti->hw_address[j - 1] << 4);
560 ++j;
561 }
562 /* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,... */
563 ti->adapter_type = readb(ti->mmio + AIPADAPTYPE);
564
565 /* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */
566 ti->data_rate = readb(ti->mmio + AIPDATARATE);
567
568 /* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */
569 ti->token_release = readb(ti->mmio + AIPEARLYTOKEN);
570
571 /* How much shared RAM is on adapter ? */
572 if (ti->turbo) {
573 ti->avail_shared_ram=127;
574 } else {
575 ti->avail_shared_ram = get_sram_size(ti);/*in 512 byte units */
576 }
577 /* We need to set or do a bunch of work here based on previous results*/
578 /* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */
579 ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE);
580
581 /* Available DHB 4Mb size: F=2048, E=4096, D=4464 */
582 switch (readb(ti->mmio + AIP4MBDHB)) {
583 case 0xe: ti->dhb_size4mb = 4096; break;
584 case 0xd: ti->dhb_size4mb = 4464; break;
585 default: ti->dhb_size4mb = 2048; break;
586 }
587
588 /* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */
589 switch (readb(ti->mmio + AIP16MBDHB)) {
590 case 0xe: ti->dhb_size16mb = 4096; break;
591 case 0xd: ti->dhb_size16mb = 8192; break;
592 case 0xc: ti->dhb_size16mb = 16384; break;
593 case 0xb: ti->dhb_size16mb = 17960; break;
594 default: ti->dhb_size16mb = 2048; break;
595 }
596
597 /* We must figure out how much shared memory space this adapter
598 * will occupy so that if there are two adapters we can fit both
599 * in. Given a choice, we will limit this adapter to 32K. The
600 * maximum space will will use for two adapters is 64K so if the
601 * adapter we are working on demands 64K (it also doesn't support
602 * paging), then only one adapter can be supported.
603 */
604
605 /*
606 * determine how much of total RAM is mapped into PC space
607 */
608 ti->mapped_ram_size= /*sixteen to onehundredtwentyeight 512byte blocks*/
609 1<< ((readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03) + 4);
610 ti->page_mask = 0;
611 if (ti->turbo) ti->page_mask=0xf0;
612 else if (ti->shared_ram_paging == 0xf); /* No paging in adapter */
613 else {
614#ifdef ENABLE_PAGING
615 unsigned char pg_size = 0;
616 /* BMS: page size: PCMCIA, use configuration register;
617 ISAPNP, use LANAIDC config tool from www.ibm.com */
618 switch (ti->shared_ram_paging) {
619 case 0xf:
620 break;
621 case 0xe:
622 ti->page_mask = (ti->mapped_ram_size == 32) ? 0xc0 : 0;
623 pg_size = 32; /* 16KB page size */
624 break;
625 case 0xd:
626 ti->page_mask = (ti->mapped_ram_size == 64) ? 0x80 : 0;
627 pg_size = 64; /* 32KB page size */
628 break;
629 case 0xc:
630 switch (ti->mapped_ram_size) {
631 case 32:
632 ti->page_mask = 0xc0;
633 pg_size = 32;
634 break;
635 case 64:
636 ti->page_mask = 0x80;
637 pg_size = 64;
638 break;
639 }
640 break;
641 default:
642 DPRINTK("Unknown shared ram paging info %01X\n",
643 ti->shared_ram_paging);
644 iounmap(t_mmio);
645 return -ENODEV;
646 break;
647 } /*end switch shared_ram_paging */
648
649 if (ibmtr_debug_trace & TRC_INIT)
650 DPRINTK("Shared RAM paging code: %02X, "
651 "mapped RAM size: %dK, shared RAM size: %dK, "
652 "page mask: %02X\n:",
653 ti->shared_ram_paging, ti->mapped_ram_size / 2,
654 ti->avail_shared_ram / 2, ti->page_mask);
655#endif /*ENABLE_PAGING */
656 }
657
658#ifndef PCMCIA
659 /* finish figuring the shared RAM address */
660 if (cardpresent == TR_ISA) {
661 static const __u32 ram_bndry_mask[] = {
662 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000
663 };
664 __u32 new_base, rrr_32, chk_base, rbm;
665
666 rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
667 rbm = ram_bndry_mask[rrr_32];
668 new_base = (ibmtr_mem_base + (~rbm)) & rbm;/* up to boundary */
669 chk_base = new_base + (ti->mapped_ram_size << 9);
670 if (chk_base > (ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE)) {
671 DPRINTK("Shared RAM for this adapter (%05x) exceeds "
672 "driver limit (%05x), adapter not started.\n",
673 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
674 iounmap(t_mmio);
675 return -ENODEV;
676 } else { /* seems cool, record what we have figured out */
677 ti->sram_base = new_base >> 12;
678 ibmtr_mem_base = chk_base;
679 }
680 }
681 else ti->sram_base = ti->sram_phys >> 12;
682
683 /* The PCMCIA has already got the interrupt line and the io port,
684 so no chance of anybody else getting it - MLP */
685 if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) {
686 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
687 irq);
688 iounmap(t_mmio);
689 return -ENODEV;
690 }
691 /*?? Now, allocate some of the PIO PORTs for this driver.. */
692 /* record PIOaddr range as busy */
693 if (!request_region(PIOaddr, IBMTR_IO_EXTENT, "ibmtr")) {
694 DPRINTK("Could not grab PIO range. Halting driver.\n");
695 free_irq(dev->irq, dev);
696 iounmap(t_mmio);
697 return -EBUSY;
698 }
699
700 if (!version_printed++) {
701 printk(version);
702 }
703#endif /* !PCMCIA */
704 DPRINTK("%s %s found\n",
705 channel_def[cardpresent - 1], adapter_def(ti->adapter_type));
706 DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n",
707 irq, PIOaddr, ti->mapped_ram_size / 2);
708 DPRINTK("Hardware address : %pM\n", dev->dev_addr);
709 if (ti->page_mask)
710 DPRINTK("Shared RAM paging enabled. "
711 "Page size: %uK Shared Ram size %dK\n",
712 ((ti->page_mask^0xff)+1) >>2, ti->avail_shared_ram / 2);
713 else
714 DPRINTK("Shared RAM paging disabled. ti->page_mask %x\n",
715 ti->page_mask);
716
717 /* Calculate the maximum DHB we can use */
718 /* two cases where avail_shared_ram doesn't equal mapped_ram_size:
719 1. avail_shared_ram is 127 but mapped_ram_size is 128 (typical)
720 2. user has configured adapter for less than avail_shared_ram
721 but is not using paging (she should use paging, I believe)
722 */
723 if (!ti->page_mask) {
724 ti->avail_shared_ram=
725 min(ti->mapped_ram_size,ti->avail_shared_ram);
726 }
727
728 switch (ti->avail_shared_ram) {
729 case 16: /* 8KB shared RAM */
730 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)2048);
731 ti->rbuf_len4 = 1032;
732 ti->rbuf_cnt4=2;
733 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)2048);
734 ti->rbuf_len16 = 1032;
735 ti->rbuf_cnt16=2;
736 break;
737 case 32: /* 16KB shared RAM */
738 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
739 ti->rbuf_len4 = 1032;
740 ti->rbuf_cnt4=4;
741 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)4096);
742 ti->rbuf_len16 = 1032; /*1024 usable */
743 ti->rbuf_cnt16=4;
744 break;
745 case 64: /* 32KB shared RAM */
746 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
747 ti->rbuf_len4 = 1032;
748 ti->rbuf_cnt4=6;
749 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)10240);
750 ti->rbuf_len16 = 1032;
751 ti->rbuf_cnt16=6;
752 break;
753 case 127: /* 63.5KB shared RAM */
754 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
755 ti->rbuf_len4 = 1032;
756 ti->rbuf_cnt4=6;
757 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)16384);
758 ti->rbuf_len16 = 1032;
759 ti->rbuf_cnt16=16;
760 break;
761 case 128: /* 64KB shared RAM */
762 ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
763 ti->rbuf_len4 = 1032;
764 ti->rbuf_cnt4=6;
765 ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)17960);
766 ti->rbuf_len16 = 1032;
767 ti->rbuf_cnt16=16;
768 break;
769 default:
770 ti->dhb_size4mb = 2048;
771 ti->rbuf_len4 = 1032;
772 ti->rbuf_cnt4=2;
773 ti->dhb_size16mb = 2048;
774 ti->rbuf_len16 = 1032;
775 ti->rbuf_cnt16=2;
776 break;
777 }
778 /* this formula is not smart enough for the paging case
779 ti->rbuf_cnt<x> = (ti->avail_shared_ram * BLOCKSZ - ADAPT_PRIVATE -
780 ARBLENGTH - SSBLENGTH - DLC_MAX_SAP * SAPLENGTH -
781 DLC_MAX_STA * STALENGTH - ti->dhb_size<x>mb * NUM_DHB -
782 SRBLENGTH - ASBLENGTH) / ti->rbuf_len<x>;
783 */
784 ti->maxmtu16 = (ti->rbuf_len16 - 8) * ti->rbuf_cnt16 - TR_HLEN;
785 ti->maxmtu4 = (ti->rbuf_len4 - 8) * ti->rbuf_cnt4 - TR_HLEN;
786 /*BMS assuming 18 bytes of Routing Information (usually works) */
787 DPRINTK("Maximum Receive Internet Protocol MTU 16Mbps: %d, 4Mbps: %d\n",
788 ti->maxmtu16, ti->maxmtu4);
789
790 dev->base_addr = PIOaddr; /* set the value for device */
791 dev->mem_start = ti->sram_base << 12;
792 dev->mem_end = dev->mem_start + (ti->mapped_ram_size << 9) - 1;
793 trdev_init(dev);
794 return 0; /* Return 0 to indicate we have found a Token Ring card. */
795} /*ibmtr_probe1() */
796
797/*****************************************************************************/
798
799/* query the adapter for the size of shared RAM */
800/* the function returns the RAM size in units of 512 bytes */
801
802static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
803{
804 unsigned char avail_sram_code;
805 static unsigned char size_code[] = { 0, 16, 32, 64, 127, 128 };
806 /* Adapter gives
807 'F' -- use RRR bits 3,2
808 'E' -- 8kb 'D' -- 16kb
809 'C' -- 32kb 'A' -- 64KB
810 'B' - 64KB less 512 bytes at top
811 (WARNING ... must zero top bytes in INIT */
812
813 avail_sram_code = 0xf - readb(adapt_info->mmio + AIPAVAILSHRAM);
814 if (avail_sram_code) return size_code[avail_sram_code];
815 else /* for code 'F', must compute size from RRR(3,2) bits */
816 return 1 <<
817 ((readb(adapt_info->mmio+ACA_OFFSET+ACA_RW+RRR_ODD)>>2&3)+4);
818}
819
820/*****************************************************************************/
821
822static const struct net_device_ops trdev_netdev_ops = {
823 .ndo_open = tok_open,
824 .ndo_stop = tok_close,
825 .ndo_start_xmit = tok_send_packet,
826 .ndo_set_multicast_list = tok_set_multicast_list,
827 .ndo_change_mtu = ibmtr_change_mtu,
828};
829
830static int __devinit trdev_init(struct net_device *dev)
831{
832 struct tok_info *ti = netdev_priv(dev);
833
834 SET_PAGE(ti->srb_page);
835 ti->open_failure = NO ;
836 dev->netdev_ops = &trdev_netdev_ops;
837
838 return 0;
839}
840
841/*****************************************************************************/
842
843static int tok_init_card(struct net_device *dev)
844{
845 struct tok_info *ti;
846 short PIOaddr;
847 unsigned long i;
848
849 PIOaddr = dev->base_addr;
850 ti = netdev_priv(dev);
851 /* Special processing for first interrupt after reset */
852 ti->do_tok_int = FIRST_INT;
853 /* Reset adapter */
854 writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
855 outb(0, PIOaddr + ADAPTRESET);
856
857 schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
858
859 outb(0, PIOaddr + ADAPTRESETREL);
860#ifdef ENABLE_PAGING
861 if (ti->page_mask)
862 writeb(SRPR_ENABLE_PAGING,ti->mmio+ACA_OFFSET+ACA_RW+SRPR_EVEN);
863#endif
864 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
865 i = sleep_on_timeout(&ti->wait_for_reset, 4 * HZ);
866 return i? 0 : -EAGAIN;
867}
868
869/*****************************************************************************/
870static int tok_open(struct net_device *dev)
871{
872 struct tok_info *ti = netdev_priv(dev);
873 int i;
874
875 /*the case we were left in a failure state during a previous open */
876 if (ti->open_failure == YES) {
877 DPRINTK("Last time you were disconnected, how about now?\n");
878 printk("You can't insert with an ICS connector half-cocked.\n");
879 }
880
881 ti->open_status = CLOSED; /* CLOSED or OPEN */
882 ti->sap_status = CLOSED; /* CLOSED or OPEN */
883 ti->open_failure = NO; /* NO or YES */
884 ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */
885
886 ti->sram_phys &= ~1; /* to reverse what we do in tok_close */
887 /* init the spinlock */
888 spin_lock_init(&ti->lock);
889 init_timer(&ti->tr_timer);
890
891 i = tok_init_card(dev);
892 if (i) return i;
893
894 while (1){
895 tok_open_adapter((unsigned long) dev);
896 i= interruptible_sleep_on_timeout(&ti->wait_for_reset, 25 * HZ);
897 /* sig catch: estimate opening adapter takes more than .5 sec*/
898 if (i>(245*HZ)/10) break; /* fancier than if (i==25*HZ) */
899 if (i==0) break;
900 if (ti->open_status == OPEN && ti->sap_status==OPEN) {
901 netif_start_queue(dev);
902 DPRINTK("Adapter is up and running\n");
903 return 0;
904 }
905 i=schedule_timeout_interruptible(TR_RETRY_INTERVAL);
906 /* wait 30 seconds */
907 if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
908 }
909 outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
910 DPRINTK("TERMINATED via signal\n"); /*BMS useful */
911 return -EAGAIN;
912}
913
914/*****************************************************************************/
915
916#define COMMAND_OFST 0
917#define OPEN_OPTIONS_OFST 8
918#define NUM_RCV_BUF_OFST 24
919#define RCV_BUF_LEN_OFST 26
920#define DHB_LENGTH_OFST 28
921#define NUM_DHB_OFST 30
922#define DLC_MAX_SAP_OFST 32
923#define DLC_MAX_STA_OFST 33
924
925static void tok_open_adapter(unsigned long dev_addr)
926{
927 struct net_device *dev = (struct net_device *) dev_addr;
928 struct tok_info *ti;
929 int i;
930
931 ti = netdev_priv(dev);
932 SET_PAGE(ti->init_srb_page);
933 writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
934 for (i = 0; i < sizeof(struct dir_open_adapter); i++)
935 writeb(0, ti->init_srb + i);
936 writeb(DIR_OPEN_ADAPTER, ti->init_srb + COMMAND_OFST);
937 writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + OPEN_OPTIONS_OFST);
938 if (ti->ring_speed == 16) {
939 writew(htons(ti->dhb_size16mb), ti->init_srb + DHB_LENGTH_OFST);
940 writew(htons(ti->rbuf_cnt16), ti->init_srb + NUM_RCV_BUF_OFST);
941 writew(htons(ti->rbuf_len16), ti->init_srb + RCV_BUF_LEN_OFST);
942 } else {
943 writew(htons(ti->dhb_size4mb), ti->init_srb + DHB_LENGTH_OFST);
944 writew(htons(ti->rbuf_cnt4), ti->init_srb + NUM_RCV_BUF_OFST);
945 writew(htons(ti->rbuf_len4), ti->init_srb + RCV_BUF_LEN_OFST);
946 }
947 writeb(NUM_DHB, /* always 2 */ ti->init_srb + NUM_DHB_OFST);
948 writeb(DLC_MAX_SAP, ti->init_srb + DLC_MAX_SAP_OFST);
949 writeb(DLC_MAX_STA, ti->init_srb + DLC_MAX_STA_OFST);
950 ti->srb = ti->init_srb; /* We use this one in the interrupt handler */
951 ti->srb_page = ti->init_srb_page;
952 DPRINTK("Opening adapter: Xmit bfrs: %d X %d, Rcv bfrs: %d X %d\n",
953 readb(ti->init_srb + NUM_DHB_OFST),
954 ntohs(readw(ti->init_srb + DHB_LENGTH_OFST)),
955 ntohs(readw(ti->init_srb + NUM_RCV_BUF_OFST)),
956 ntohs(readw(ti->init_srb + RCV_BUF_LEN_OFST)));
957 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
958 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
959}
960
961/*****************************************************************************/
962
963static void open_sap(unsigned char type, struct net_device *dev)
964{
965 int i;
966 struct tok_info *ti = netdev_priv(dev);
967
968 SET_PAGE(ti->srb_page);
969 for (i = 0; i < sizeof(struct dlc_open_sap); i++)
970 writeb(0, ti->srb + i);
971
972#define MAX_I_FIELD_OFST 14
973#define SAP_VALUE_OFST 16
974#define SAP_OPTIONS_OFST 17
975#define STATION_COUNT_OFST 18
976
977 writeb(DLC_OPEN_SAP, ti->srb + COMMAND_OFST);
978 writew(htons(MAX_I_FIELD), ti->srb + MAX_I_FIELD_OFST);
979 writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb+ SAP_OPTIONS_OFST);
980 writeb(SAP_OPEN_STATION_CNT, ti->srb + STATION_COUNT_OFST);
981 writeb(type, ti->srb + SAP_VALUE_OFST);
982 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
983}
984
985
986/*****************************************************************************/
987
988static void tok_set_multicast_list(struct net_device *dev)
989{
990 struct tok_info *ti = netdev_priv(dev);
991 struct netdev_hw_addr *ha;
992 unsigned char address[4];
993
994 int i;
995
996 /*BMS the next line is CRUCIAL or you may be sad when you */
997 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
998 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
999 address[0] = address[1] = address[2] = address[3] = 0;
1000 netdev_for_each_mc_addr(ha, dev) {
1001 address[0] |= ha->addr[2];
1002 address[1] |= ha->addr[3];
1003 address[2] |= ha->addr[4];
1004 address[3] |= ha->addr[5];
1005 }
1006 SET_PAGE(ti->srb_page);
1007 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
1008 writeb(0, ti->srb + i);
1009
1010#define FUNCT_ADDRESS_OFST 6
1011
1012 writeb(DIR_SET_FUNC_ADDR, ti->srb + COMMAND_OFST);
1013 for (i = 0; i < 4; i++)
1014 writeb(address[i], ti->srb + FUNCT_ADDRESS_OFST + i);
1015 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1016#if TR_VERBOSE
1017 DPRINTK("Setting functional address: ");
1018 for (i=0;i<4;i++) printk("%02X ", address[i]);
1019 printk("\n");
1020#endif
1021}
1022
1023/*****************************************************************************/
1024
1025#define STATION_ID_OFST 4
1026
1027static netdev_tx_t tok_send_packet(struct sk_buff *skb,
1028 struct net_device *dev)
1029{
1030 struct tok_info *ti;
1031 unsigned long flags;
1032 ti = netdev_priv(dev);
1033
1034 netif_stop_queue(dev);
1035
1036 /* lock against other CPUs */
1037 spin_lock_irqsave(&(ti->lock), flags);
1038
1039 /* Save skb; we'll need it when the adapter asks for the data */
1040 ti->current_skb = skb;
1041 SET_PAGE(ti->srb_page);
1042 writeb(XMIT_UI_FRAME, ti->srb + COMMAND_OFST);
1043 writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
1044 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1045 spin_unlock_irqrestore(&(ti->lock), flags);
1046 return NETDEV_TX_OK;
1047}
1048
1049/*****************************************************************************/
1050
1051static int tok_close(struct net_device *dev)
1052{
1053 struct tok_info *ti = netdev_priv(dev);
1054
1055 /* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */
1056 /* unloading the module from memory, and then if a timer pops, ouch */
1057 del_timer_sync(&ti->tr_timer);
1058 outb(0, dev->base_addr + ADAPTRESET);
1059 ti->sram_phys |= 1;
1060 ti->open_status = CLOSED;
1061
1062 netif_stop_queue(dev);
1063 DPRINTK("Adapter is closed.\n");
1064 return 0;
1065}
1066
1067/*****************************************************************************/
1068
1069#define RETCODE_OFST 2
1070#define OPEN_ERROR_CODE_OFST 6
1071#define ASB_ADDRESS_OFST 8
1072#define SRB_ADDRESS_OFST 10
1073#define ARB_ADDRESS_OFST 12
1074#define SSB_ADDRESS_OFST 14
1075
1076static char *printphase[]= {"Lobe media test","Physical insertion",
1077 "Address verification","Roll call poll","Request Parameters"};
1078static char *printerror[]={"Function failure","Signal loss","Reserved",
1079 "Frequency error","Timeout","Ring failure","Ring beaconing",
1080 "Duplicate node address",
1081 "Parameter request-retry count exceeded","Remove received",
1082 "IMPL force received","Duplicate modifier",
1083 "No monitor detected","Monitor contention failed for RPL"};
1084
1085static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page)
1086{
1087 if (ti->page_mask) {
1088 *page = (index >> 8) & ti->page_mask;
1089 index &= ~(ti->page_mask << 8);
1090 }
1091 return ti->sram_virt + index;
1092}
1093
1094static void dir_open_adapter (struct net_device *dev)
1095{
1096 struct tok_info *ti = netdev_priv(dev);
1097 unsigned char ret_code;
1098 __u16 err;
1099
1100 ti->srb = map_address(ti,
1101 ntohs(readw(ti->init_srb + SRB_ADDRESS_OFST)),
1102 &ti->srb_page);
1103 ti->ssb = map_address(ti,
1104 ntohs(readw(ti->init_srb + SSB_ADDRESS_OFST)),
1105 &ti->ssb_page);
1106 ti->arb = map_address(ti,
1107 ntohs(readw(ti->init_srb + ARB_ADDRESS_OFST)),
1108 &ti->arb_page);
1109 ti->asb = map_address(ti,
1110 ntohs(readw(ti->init_srb + ASB_ADDRESS_OFST)),
1111 &ti->asb_page);
1112 ti->current_skb = NULL;
1113 ret_code = readb(ti->init_srb + RETCODE_OFST);
1114 err = ntohs(readw(ti->init_srb + OPEN_ERROR_CODE_OFST));
1115 if (!ret_code) {
1116 ti->open_status = OPEN; /* TR adapter is now available */
1117 if (ti->open_mode == AUTOMATIC) {
1118 DPRINTK("Adapter reopened.\n");
1119 }
1120 writeb(~SRB_RESP_INT, ti->mmio+ACA_OFFSET+ACA_RESET+ISRP_ODD);
1121 open_sap(EXTENDED_SAP, dev);
1122 return;
1123 }
1124 ti->open_failure = YES;
1125 if (ret_code == 7){
1126 if (err == 0x24) {
1127 if (!ti->auto_speedsave) {
1128 DPRINTK("Open failed: Adapter speed must match "
1129 "ring speed if Automatic Ring Speed Save is "
1130 "disabled.\n");
1131 ti->open_action = FAIL;
1132 }else
1133 DPRINTK("Retrying open to adjust to "
1134 "ring speed, ");
1135 } else if (err == 0x2d) {
1136 DPRINTK("Physical Insertion: No Monitor Detected, ");
1137 printk("retrying after %ds delay...\n",
1138 TR_RETRY_INTERVAL/HZ);
1139 } else if (err == 0x11) {
1140 DPRINTK("Lobe Media Function Failure (0x11), ");
1141 printk(" retrying after %ds delay...\n",
1142 TR_RETRY_INTERVAL/HZ);
1143 } else {
1144 char **prphase = printphase;
1145 char **prerror = printerror;
1146 int pnr = err / 16 - 1;
1147 int enr = err % 16 - 1;
1148 DPRINTK("TR Adapter misc open failure, error code = ");
1149 if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) ||
1150 enr < 0 ||
1151 enr >= ARRAY_SIZE(printerror))
1152 printk("0x%x, invalid Phase/Error.", err);
1153 else
1154 printk("0x%x, Phase: %s, Error: %s\n", err,
1155 prphase[pnr], prerror[enr]);
1156 printk(" retrying after %ds delay...\n",
1157 TR_RETRY_INTERVAL/HZ);
1158 }
1159 } else DPRINTK("open failed: ret_code = %02X..., ", ret_code);
1160 if (ti->open_action != FAIL) {
1161 if (ti->open_mode==AUTOMATIC){
1162 ti->open_action = REOPEN;
1163 ibmtr_reset_timer(&(ti->tr_timer), dev);
1164 return;
1165 }
1166 wake_up(&ti->wait_for_reset);
1167 return;
1168 }
1169 DPRINTK("FAILURE, CAPUT\n");
1170}
1171
1172/******************************************************************************/
1173
1174static irqreturn_t tok_interrupt(int irq, void *dev_id)
1175{
1176 unsigned char status;
1177 /* unsigned char status_even ; */
1178 struct tok_info *ti;
1179 struct net_device *dev;
1180#ifdef ENABLE_PAGING
1181 unsigned char save_srpr;
1182#endif
1183
1184 dev = dev_id;
1185#if TR_VERBOSE
1186 DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq);
1187#endif
1188 ti = netdev_priv(dev);
1189 if (ti->sram_phys & 1)
1190 return IRQ_NONE; /* PCMCIA card extraction flag */
1191 spin_lock(&(ti->lock));
1192#ifdef ENABLE_PAGING
1193 save_srpr = readb(ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1194#endif
1195
1196 /* Disable interrupts till processing is finished */
1197 writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
1198
1199 /* Reset interrupt for ISA boards */
1200 if (ti->adapter_int_enable)
1201 outb(0, ti->adapter_int_enable);
1202 else /* used for PCMCIA cards */
1203 outb(0, ti->global_int_enable);
1204 if (ti->do_tok_int == FIRST_INT){
1205 initial_tok_int(dev);
1206#ifdef ENABLE_PAGING
1207 writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1208#endif
1209 spin_unlock(&(ti->lock));
1210 return IRQ_HANDLED;
1211 }
1212 /* Begin interrupt handler HERE inline to avoid the extra
1213 levels of logic and call depth for the original solution. */
1214 status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
1215 /*BMSstatus_even = readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) */
1216 /*BMSdebugprintk("tok_interrupt: ISRP_ODD = 0x%x ISRP_EVEN = 0x%x\n", */
1217 /*BMS status,status_even); */
1218
1219 if (status & ADAP_CHK_INT) {
1220 int i;
1221 void __iomem *check_reason;
1222 __u8 check_reason_page = 0;
1223 check_reason = map_address(ti,
1224 ntohs(readw(ti->mmio+ ACA_OFFSET+ACA_RW + WWCR_EVEN)),
1225 &check_reason_page);
1226 SET_PAGE(check_reason_page);
1227
1228 DPRINTK("Adapter check interrupt\n");
1229 DPRINTK("8 reason bytes follow: ");
1230 for (i = 0; i < 8; i++, check_reason++)
1231 printk("%02X ", (int) readb(check_reason));
1232 printk("\n");
1233 writeb(~ADAP_CHK_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1234 status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRA_EVEN);
1235 DPRINTK("ISRA_EVEN == 0x02%x\n",status);
1236 ti->open_status = CLOSED;
1237 ti->sap_status = CLOSED;
1238 ti->open_mode = AUTOMATIC;
1239 netif_carrier_off(dev);
1240 netif_stop_queue(dev);
1241 ti->open_action = RESTART;
1242 outb(0, dev->base_addr + ADAPTRESET);
1243 ibmtr_reset_timer(&(ti->tr_timer), dev);/*BMS try to reopen*/
1244 spin_unlock(&(ti->lock));
1245 return IRQ_HANDLED;
1246 }
1247 if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
1248 & (TCR_INT | ERR_INT | ACCESS_INT)) {
1249 DPRINTK("adapter error: ISRP_EVEN : %02x\n",
1250 (int)readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRP_EVEN));
1251 writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
1252 ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
1253 status= readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRA_EVEN);/*BMS*/
1254 DPRINTK("ISRA_EVEN == 0x02%x\n",status);/*BMS*/
1255 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1256#ifdef ENABLE_PAGING
1257 writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1258#endif
1259 spin_unlock(&(ti->lock));
1260 return IRQ_HANDLED;
1261 }
1262 if (status & SRB_RESP_INT) { /* SRB response */
1263 SET_PAGE(ti->srb_page);
1264#if TR_VERBOSE
1265 DPRINTK("SRB resp: cmd=%02X rsp=%02X\n",
1266 readb(ti->srb), readb(ti->srb + RETCODE_OFST));
1267#endif
1268 switch (readb(ti->srb)) { /* SRB command check */
1269 case XMIT_DIR_FRAME:{
1270 unsigned char xmit_ret_code;
1271 xmit_ret_code = readb(ti->srb + RETCODE_OFST);
1272 if (xmit_ret_code == 0xff) break;
1273 DPRINTK("error on xmit_dir_frame request: %02X\n",
1274 xmit_ret_code);
1275 if (ti->current_skb) {
1276 dev_kfree_skb_irq(ti->current_skb);
1277 ti->current_skb = NULL;
1278 }
1279 /*dev->tbusy = 0;*/
1280 netif_wake_queue(dev);
1281 if (ti->readlog_pending)
1282 ibmtr_readlog(dev);
1283 break;
1284 }
1285 case XMIT_UI_FRAME:{
1286 unsigned char xmit_ret_code;
1287
1288 xmit_ret_code = readb(ti->srb + RETCODE_OFST);
1289 if (xmit_ret_code == 0xff) break;
1290 DPRINTK("error on xmit_ui_frame request: %02X\n",
1291 xmit_ret_code);
1292 if (ti->current_skb) {
1293 dev_kfree_skb_irq(ti->current_skb);
1294 ti->current_skb = NULL;
1295 }
1296 netif_wake_queue(dev);
1297 if (ti->readlog_pending)
1298 ibmtr_readlog(dev);
1299 break;
1300 }
1301 case DIR_OPEN_ADAPTER:
1302 dir_open_adapter(dev);
1303 break;
1304 case DLC_OPEN_SAP:
1305 if (readb(ti->srb + RETCODE_OFST)) {
1306 DPRINTK("open_sap failed: ret_code = %02X, "
1307 "retrying\n",
1308 (int) readb(ti->srb + RETCODE_OFST));
1309 ti->open_action = REOPEN;
1310 ibmtr_reset_timer(&(ti->tr_timer), dev);
1311 break;
1312 }
1313 ti->exsap_station_id = readw(ti->srb + STATION_ID_OFST);
1314 ti->sap_status = OPEN;/* TR adapter is now available */
1315 if (ti->open_mode==MANUAL){
1316 wake_up(&ti->wait_for_reset);
1317 break;
1318 }
1319 netif_wake_queue(dev);
1320 netif_carrier_on(dev);
1321 break;
1322 case DIR_INTERRUPT:
1323 case DIR_MOD_OPEN_PARAMS:
1324 case DIR_SET_GRP_ADDR:
1325 case DIR_SET_FUNC_ADDR:
1326 case DLC_CLOSE_SAP:
1327 if (readb(ti->srb + RETCODE_OFST))
1328 DPRINTK("error on %02X: %02X\n",
1329 (int) readb(ti->srb + COMMAND_OFST),
1330 (int) readb(ti->srb + RETCODE_OFST));
1331 break;
1332 case DIR_READ_LOG:
1333 if (readb(ti->srb + RETCODE_OFST)){
1334 DPRINTK("error on dir_read_log: %02X\n",
1335 (int) readb(ti->srb + RETCODE_OFST));
1336 netif_wake_queue(dev);
1337 break;
1338 }
1339#if IBMTR_DEBUG_MESSAGES
1340
1341#define LINE_ERRORS_OFST 0
1342#define INTERNAL_ERRORS_OFST 1
1343#define BURST_ERRORS_OFST 2
1344#define AC_ERRORS_OFST 3
1345#define ABORT_DELIMITERS_OFST 4
1346#define LOST_FRAMES_OFST 6
1347#define RECV_CONGEST_COUNT_OFST 7
1348#define FRAME_COPIED_ERRORS_OFST 8
1349#define FREQUENCY_ERRORS_OFST 9
1350#define TOKEN_ERRORS_OFST 10
1351
1352 DPRINTK("Line errors %02X, Internal errors %02X, "
1353 "Burst errors %02X\n" "A/C errors %02X, "
1354 "Abort delimiters %02X, Lost frames %02X\n"
1355 "Receive congestion count %02X, "
1356 "Frame copied errors %02X\nFrequency errors %02X, "
1357 "Token errors %02X\n",
1358 (int) readb(ti->srb + LINE_ERRORS_OFST),
1359 (int) readb(ti->srb + INTERNAL_ERRORS_OFST),
1360 (int) readb(ti->srb + BURST_ERRORS_OFST),
1361 (int) readb(ti->srb + AC_ERRORS_OFST),
1362 (int) readb(ti->srb + ABORT_DELIMITERS_OFST),
1363 (int) readb(ti->srb + LOST_FRAMES_OFST),
1364 (int) readb(ti->srb + RECV_CONGEST_COUNT_OFST),
1365 (int) readb(ti->srb + FRAME_COPIED_ERRORS_OFST),
1366 (int) readb(ti->srb + FREQUENCY_ERRORS_OFST),
1367 (int) readb(ti->srb + TOKEN_ERRORS_OFST));
1368#endif
1369 netif_wake_queue(dev);
1370 break;
1371 default:
1372 DPRINTK("Unknown command %02X encountered\n",
1373 (int) readb(ti->srb));
1374 } /* end switch SRB command check */
1375 writeb(~SRB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1376 } /* if SRB response */
1377 if (status & ASB_FREE_INT) { /* ASB response */
1378 SET_PAGE(ti->asb_page);
1379#if TR_VERBOSE
1380 DPRINTK("ASB resp: cmd=%02X\n", readb(ti->asb));
1381#endif
1382
1383 switch (readb(ti->asb)) { /* ASB command check */
1384 case REC_DATA:
1385 case XMIT_UI_FRAME:
1386 case XMIT_DIR_FRAME:
1387 break;
1388 default:
1389 DPRINTK("unknown command in asb %02X\n",
1390 (int) readb(ti->asb));
1391 } /* switch ASB command check */
1392 if (readb(ti->asb + 2) != 0xff) /* checks ret_code */
1393 DPRINTK("ASB error %02X in cmd %02X\n",
1394 (int) readb(ti->asb + 2), (int) readb(ti->asb));
1395 writeb(~ASB_FREE_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1396 } /* if ASB response */
1397
1398#define STATUS_OFST 6
1399#define NETW_STATUS_OFST 6
1400
1401 if (status & ARB_CMD_INT) { /* ARB response */
1402 SET_PAGE(ti->arb_page);
1403#if TR_VERBOSE
1404 DPRINTK("ARB resp: cmd=%02X\n", readb(ti->arb));
1405#endif
1406
1407 switch (readb(ti->arb)) { /* ARB command check */
1408 case DLC_STATUS:
1409 DPRINTK("DLC_STATUS new status: %02X on station %02X\n",
1410 ntohs(readw(ti->arb + STATUS_OFST)),
1411 ntohs(readw(ti->arb+ STATION_ID_OFST)));
1412 break;
1413 case REC_DATA:
1414 tr_rx(dev);
1415 break;
1416 case RING_STAT_CHANGE:{
1417 unsigned short ring_status;
1418 ring_status= ntohs(readw(ti->arb + NETW_STATUS_OFST));
1419 if (ibmtr_debug_trace & TRC_INIT)
1420 DPRINTK("Ring Status Change...(0x%x)\n",
1421 ring_status);
1422 if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){
1423 netif_stop_queue(dev);
1424 netif_carrier_off(dev);
1425 DPRINTK("Remove received, or Auto-removal error"
1426 ", or Lobe fault\n");
1427 DPRINTK("We'll try to reopen the closed adapter"
1428 " after a %d second delay.\n",
1429 TR_RETRY_INTERVAL/HZ);
1430 /*I was confused: I saw the TR reopening but */
1431 /*forgot:with an RJ45 in an RJ45/ICS adapter */
1432 /*but adapter not in the ring, the TR will */
1433 /* open, and then soon close and come here. */
1434 ti->open_mode = AUTOMATIC;
1435 ti->open_status = CLOSED; /*12/2000 BMS*/
1436 ti->open_action = REOPEN;
1437 ibmtr_reset_timer(&(ti->tr_timer), dev);
1438 } else if (ring_status & LOG_OVERFLOW) {
1439 if(netif_queue_stopped(dev))
1440 ti->readlog_pending = 1;
1441 else
1442 ibmtr_readlog(dev);
1443 }
1444 break;
1445 }
1446 case XMIT_DATA_REQ:
1447 tr_tx(dev);
1448 break;
1449 default:
1450 DPRINTK("Unknown command %02X in arb\n",
1451 (int) readb(ti->arb));
1452 break;
1453 } /* switch ARB command check */
1454 writeb(~ARB_CMD_INT, ti->mmio+ ACA_OFFSET+ACA_RESET + ISRP_ODD);
1455 writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1456 } /* if ARB response */
1457 if (status & SSB_RESP_INT) { /* SSB response */
1458 unsigned char retcode;
1459 SET_PAGE(ti->ssb_page);
1460#if TR_VERBOSE
1461 DPRINTK("SSB resp: cmd=%02X rsp=%02X\n",
1462 readb(ti->ssb), readb(ti->ssb + 2));
1463#endif
1464
1465 switch (readb(ti->ssb)) { /* SSB command check */
1466 case XMIT_DIR_FRAME:
1467 case XMIT_UI_FRAME:
1468 retcode = readb(ti->ssb + 2);
1469 if (retcode && (retcode != 0x22))/* checks ret_code */
1470 DPRINTK("xmit ret_code: %02X xmit error code: "
1471 "%02X\n",
1472 (int)retcode, (int)readb(ti->ssb + 6));
1473 else
1474 dev->stats.tx_packets++;
1475 break;
1476 case XMIT_XID_CMD:
1477 DPRINTK("xmit xid ret_code: %02X\n",
1478 (int) readb(ti->ssb + 2));
1479 default:
1480 DPRINTK("Unknown command %02X in ssb\n",
1481 (int) readb(ti->ssb));
1482 } /* SSB command check */
1483 writeb(~SSB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
1484 writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1485 } /* if SSB response */
1486#ifdef ENABLE_PAGING
1487 writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1488#endif
1489 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1490 spin_unlock(&(ti->lock));
1491 return IRQ_HANDLED;
1492} /*tok_interrupt */
1493
1494/*****************************************************************************/
1495
1496#define INIT_STATUS_OFST 1
1497#define INIT_STATUS_2_OFST 2
1498#define ENCODED_ADDRESS_OFST 8
1499
1500static void initial_tok_int(struct net_device *dev)
1501{
1502
1503 __u32 encoded_addr, hw_encoded_addr;
1504 struct tok_info *ti;
1505 unsigned char init_status; /*BMS 12/2000*/
1506
1507 ti = netdev_priv(dev);
1508
1509 ti->do_tok_int = NOT_FIRST;
1510
1511 /* we assign the shared-ram address for ISA devices */
1512 writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
1513#ifndef PCMCIA
1514 ti->sram_virt = ioremap(((__u32)ti->sram_base << 12), ti->avail_shared_ram);
1515#endif
1516 ti->init_srb = map_address(ti,
1517 ntohs(readw(ti->mmio + ACA_OFFSET + WRBR_EVEN)),
1518 &ti->init_srb_page);
1519 if (ti->page_mask && ti->avail_shared_ram == 127) {
1520 void __iomem *last_512;
1521 __u8 last_512_page=0;
1522 int i;
1523 last_512 = map_address(ti, 0xfe00, &last_512_page);
1524 /* initialize high section of ram (if necessary) */
1525 SET_PAGE(last_512_page);
1526 for (i = 0; i < 512; i++)
1527 writeb(0, last_512 + i);
1528 }
1529 SET_PAGE(ti->init_srb_page);
1530
1531#if TR_VERBOSE
1532 {
1533 int i;
1534
1535 DPRINTK("ti->init_srb_page=0x%x\n", ti->init_srb_page);
1536 DPRINTK("init_srb(%p):", ti->init_srb );
1537 for (i = 0; i < 20; i++)
1538 printk("%02X ", (int) readb(ti->init_srb + i));
1539 printk("\n");
1540 }
1541#endif
1542
1543 hw_encoded_addr = readw(ti->init_srb + ENCODED_ADDRESS_OFST);
1544 encoded_addr = ntohs(hw_encoded_addr);
1545 init_status= /*BMS 12/2000 check for shallow mode possibility (Turbo)*/
1546 readb(ti->init_srb+offsetof(struct srb_init_response,init_status));
1547 /*printk("Initial interrupt: init_status= 0x%02x\n",init_status);*/
1548 ti->ring_speed = init_status & 0x01 ? 16 : 4;
1549 DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n",
1550 ti->ring_speed, (unsigned int)dev->mem_start);
1551 ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0;
1552
1553 if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset);
1554 else tok_open_adapter((unsigned long)dev);
1555
1556} /*initial_tok_int() */
1557
1558/*****************************************************************************/
1559
1560#define CMD_CORRELATE_OFST 1
1561#define DHB_ADDRESS_OFST 6
1562
1563#define FRAME_LENGTH_OFST 6
1564#define HEADER_LENGTH_OFST 8
1565#define RSAP_VALUE_OFST 9
1566
1567static void tr_tx(struct net_device *dev)
1568{
1569 struct tok_info *ti = netdev_priv(dev);
1570 struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data;
1571 unsigned int hdr_len;
1572 __u32 dhb=0,dhb_base;
1573 void __iomem *dhbuf = NULL;
1574 unsigned char xmit_command;
1575 int i,dhb_len=0x4000,src_len,src_offset;
1576 struct trllc *llc;
1577 struct srb_xmit xsrb;
1578 __u8 dhb_page = 0;
1579 __u8 llc_ssap;
1580
1581 SET_PAGE(ti->asb_page);
1582
1583 if (readb(ti->asb+RETCODE_OFST) != 0xFF) DPRINTK("ASB not free !!!\n");
1584
1585 /* in providing the transmit interrupts, is telling us it is ready for
1586 data and providing a shared memory address for us to stuff with data.
1587 Here we compute the effective address where we will place data.
1588 */
1589 SET_PAGE(ti->arb_page);
1590 dhb=dhb_base=ntohs(readw(ti->arb + DHB_ADDRESS_OFST));
1591 if (ti->page_mask) {
1592 dhb_page = (dhb_base >> 8) & ti->page_mask;
1593 dhb=dhb_base & ~(ti->page_mask << 8);
1594 }
1595 dhbuf = ti->sram_virt + dhb;
1596
1597 /* Figure out the size of the 802.5 header */
1598 if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */
1599 hdr_len = sizeof(struct trh_hdr) - TR_MAXRIFLEN;
1600 else
1601 hdr_len = ((ntohs(trhdr->rcf) & TR_RCF_LEN_MASK) >> 8)
1602 + sizeof(struct trh_hdr) - TR_MAXRIFLEN;
1603
1604 llc = (struct trllc *) (ti->current_skb->data + hdr_len);
1605
1606 llc_ssap = llc->ssap;
1607 SET_PAGE(ti->srb_page);
1608 memcpy_fromio(&xsrb, ti->srb, sizeof(xsrb));
1609 SET_PAGE(ti->asb_page);
1610 xmit_command = xsrb.command;
1611
1612 writeb(xmit_command, ti->asb + COMMAND_OFST);
1613 writew(xsrb.station_id, ti->asb + STATION_ID_OFST);
1614 writeb(llc_ssap, ti->asb + RSAP_VALUE_OFST);
1615 writeb(xsrb.cmd_corr, ti->asb + CMD_CORRELATE_OFST);
1616 writeb(0, ti->asb + RETCODE_OFST);
1617 if ((xmit_command == XMIT_XID_CMD) || (xmit_command == XMIT_TEST_CMD)) {
1618 writew(htons(0x11), ti->asb + FRAME_LENGTH_OFST);
1619 writeb(0x0e, ti->asb + HEADER_LENGTH_OFST);
1620 SET_PAGE(dhb_page);
1621 writeb(AC, dhbuf);
1622 writeb(LLC_FRAME, dhbuf + 1);
1623 for (i = 0; i < TR_ALEN; i++)
1624 writeb((int) 0x0FF, dhbuf + i + 2);
1625 for (i = 0; i < TR_ALEN; i++)
1626 writeb(0, dhbuf + i + TR_ALEN + 2);
1627 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1628 return;
1629 }
1630 /*
1631 * the token ring packet is copied from sk_buff to the adapter
1632 * buffer identified in the command data received with the interrupt.
1633 */
1634 writeb(hdr_len, ti->asb + HEADER_LENGTH_OFST);
1635 writew(htons(ti->current_skb->len), ti->asb + FRAME_LENGTH_OFST);
1636 src_len=ti->current_skb->len;
1637 src_offset=0;
1638 dhb=dhb_base;
1639 while(1) {
1640 if (ti->page_mask) {
1641 dhb_page=(dhb >> 8) & ti->page_mask;
1642 dhb=dhb & ~(ti->page_mask << 8);
1643 dhb_len=0x4000-dhb; /* remaining size of this page */
1644 }
1645 dhbuf = ti->sram_virt + dhb;
1646 SET_PAGE(dhb_page);
1647 if (src_len > dhb_len) {
1648 memcpy_toio(dhbuf,&ti->current_skb->data[src_offset],
1649 dhb_len);
1650 src_len -= dhb_len;
1651 src_offset += dhb_len;
1652 dhb_base+=dhb_len;
1653 dhb=dhb_base;
1654 continue;
1655 }
1656 memcpy_toio(dhbuf, &ti->current_skb->data[src_offset], src_len);
1657 break;
1658 }
1659 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1660 dev->stats.tx_bytes += ti->current_skb->len;
1661 dev_kfree_skb_irq(ti->current_skb);
1662 ti->current_skb = NULL;
1663 netif_wake_queue(dev);
1664 if (ti->readlog_pending)
1665 ibmtr_readlog(dev);
1666} /*tr_tx */
1667
1668/*****************************************************************************/
1669
1670
1671#define RECEIVE_BUFFER_OFST 6
1672#define LAN_HDR_LENGTH_OFST 8
1673#define DLC_HDR_LENGTH_OFST 9
1674
1675#define DSAP_OFST 0
1676#define SSAP_OFST 1
1677#define LLC_OFST 2
1678#define PROTID_OFST 3
1679#define ETHERTYPE_OFST 6
1680
1681static void tr_rx(struct net_device *dev)
1682{
1683 struct tok_info *ti = netdev_priv(dev);
1684 __u32 rbuffer;
1685 void __iomem *rbuf, *rbufdata, *llc;
1686 __u8 rbuffer_page = 0;
1687 unsigned char *data;
1688 unsigned int rbuffer_len, lan_hdr_len, hdr_len, ip_len, length;
1689 unsigned char dlc_hdr_len;
1690 struct sk_buff *skb;
1691 unsigned int skb_size = 0;
1692 int IPv4_p = 0;
1693 unsigned int chksum = 0;
1694 struct iphdr *iph;
1695 struct arb_rec_req rarb;
1696
1697 SET_PAGE(ti->arb_page);
1698 memcpy_fromio(&rarb, ti->arb, sizeof(rarb));
1699 rbuffer = ntohs(rarb.rec_buf_addr) ;
1700 rbuf = map_address(ti, rbuffer, &rbuffer_page);
1701
1702 SET_PAGE(ti->asb_page);
1703
1704 if (readb(ti->asb + RETCODE_OFST) !=0xFF) DPRINTK("ASB not free !!!\n");
1705
1706 writeb(REC_DATA, ti->asb + COMMAND_OFST);
1707 writew(rarb.station_id, ti->asb + STATION_ID_OFST);
1708 writew(rarb.rec_buf_addr, ti->asb + RECEIVE_BUFFER_OFST);
1709
1710 lan_hdr_len = rarb.lan_hdr_len;
1711 if (lan_hdr_len > sizeof(struct trh_hdr)) {
1712 DPRINTK("Linux cannot handle greater than 18 bytes RIF\n");
1713 return;
1714 } /*BMS I added this above just to be very safe */
1715 dlc_hdr_len = readb(ti->arb + DLC_HDR_LENGTH_OFST);
1716 hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr);
1717
1718 SET_PAGE(rbuffer_page);
1719 llc = rbuf + offsetof(struct rec_buf, data) + lan_hdr_len;
1720
1721#if TR_VERBOSE
1722 DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n",
1723 (__u32) offsetof(struct rec_buf, data), (unsigned int) lan_hdr_len);
1724 DPRINTK("llc: %08X rec_buf_addr: %04X dev->mem_start: %lX\n",
1725 llc, ntohs(rarb.rec_buf_addr), dev->mem_start);
1726 DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, "
1727 "ethertype: %04X\n",
1728 (int) readb(llc + DSAP_OFST), (int) readb(llc + SSAP_OFST),
1729 (int) readb(llc + LLC_OFST), (int) readb(llc + PROTID_OFST),
1730 (int) readb(llc+PROTID_OFST+1),(int)readb(llc+PROTID_OFST + 2),
1731 (int) ntohs(readw(llc + ETHERTYPE_OFST)));
1732#endif
1733 if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
1734 SET_PAGE(ti->asb_page);
1735 writeb(DATA_LOST, ti->asb + RETCODE_OFST);
1736 dev->stats.rx_dropped++;
1737 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1738 return;
1739 }
1740 length = ntohs(rarb.frame_len);
1741 if (readb(llc + DSAP_OFST) == EXTENDED_SAP &&
1742 readb(llc + SSAP_OFST) == EXTENDED_SAP &&
1743 length >= hdr_len) IPv4_p = 1;
1744#if TR_VERBOSE
1745#define SADDR_OFST 8
1746#define DADDR_OFST 2
1747
1748 if (!IPv4_p) {
1749
1750 void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data);
1751 u8 saddr[6];
1752 u8 daddr[6];
1753 int i;
1754 for (i = 0 ; i < 6 ; i++)
1755 saddr[i] = readb(trhhdr + SADDR_OFST + i);
1756 for (i = 0 ; i < 6 ; i++)
1757 daddr[i] = readb(trhhdr + DADDR_OFST + i);
1758 DPRINTK("Probably non-IP frame received.\n");
1759 DPRINTK("ssap: %02X dsap: %02X "
1760 "saddr: %pM daddr: %pM\n",
1761 readb(llc + SSAP_OFST), readb(llc + DSAP_OFST),
1762 saddr, daddr);
1763 }
1764#endif
1765
1766 /*BMS handle the case she comes in with few hops but leaves with many */
1767 skb_size=length-lan_hdr_len+sizeof(struct trh_hdr)+sizeof(struct trllc);
1768
1769 if (!(skb = dev_alloc_skb(skb_size))) {
1770 DPRINTK("out of memory. frame dropped.\n");
1771 dev->stats.rx_dropped++;
1772 SET_PAGE(ti->asb_page);
1773 writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
1774 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1775 return;
1776 }
1777 /*BMS again, if she comes in with few but leaves with many */
1778 skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len);
1779 skb_put(skb, length);
1780 data = skb->data;
1781 rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len)));
1782 rbufdata = rbuf + offsetof(struct rec_buf, data);
1783
1784 if (IPv4_p) {
1785 /* Copy the headers without checksumming */
1786 memcpy_fromio(data, rbufdata, hdr_len);
1787
1788 /* Watch for padded packets and bogons */
1789 iph= (struct iphdr *)(data+ lan_hdr_len + sizeof(struct trllc));
1790 ip_len = ntohs(iph->tot_len) - sizeof(struct iphdr);
1791 length -= hdr_len;
1792 if ((ip_len <= length) && (ip_len > 7))
1793 length = ip_len;
1794 data += hdr_len;
1795 rbuffer_len -= hdr_len;
1796 rbufdata += hdr_len;
1797 }
1798 /* Copy the payload... */
1799#define BUFFER_POINTER_OFST 2
1800#define BUFFER_LENGTH_OFST 6
1801 for (;;) {
1802 if (ibmtr_debug_trace&TRC_INITV && length < rbuffer_len)
1803 DPRINTK("CURIOUS, length=%d < rbuffer_len=%d\n",
1804 length,rbuffer_len);
1805 if (IPv4_p)
1806 chksum=csum_partial_copy_nocheck((void*)rbufdata,
1807 data,length<rbuffer_len?length:rbuffer_len,chksum);
1808 else
1809 memcpy_fromio(data, rbufdata, rbuffer_len);
1810 rbuffer = ntohs(readw(rbuf+BUFFER_POINTER_OFST)) ;
1811 if (!rbuffer)
1812 break;
1813 rbuffer -= 2;
1814 length -= rbuffer_len;
1815 data += rbuffer_len;
1816 rbuf = map_address(ti, rbuffer, &rbuffer_page);
1817 SET_PAGE(rbuffer_page);
1818 rbuffer_len = ntohs(readw(rbuf + BUFFER_LENGTH_OFST));
1819 rbufdata = rbuf + offsetof(struct rec_buf, data);
1820 }
1821
1822 SET_PAGE(ti->asb_page);
1823 writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
1824
1825 writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1826
1827 dev->stats.rx_bytes += skb->len;
1828 dev->stats.rx_packets++;
1829
1830 skb->protocol = tr_type_trans(skb, dev);
1831 if (IPv4_p) {
1832 skb->csum = chksum;
1833 skb->ip_summed = CHECKSUM_COMPLETE;
1834 }
1835 netif_rx(skb);
1836} /*tr_rx */
1837
1838/*****************************************************************************/
1839
1840static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev)
1841{
1842 tmr->expires = jiffies + TR_RETRY_INTERVAL;
1843 tmr->data = (unsigned long) dev;
1844 tmr->function = tok_rerun;
1845 init_timer(tmr);
1846 add_timer(tmr);
1847}
1848
1849/*****************************************************************************/
1850
1851static void tok_rerun(unsigned long dev_addr)
1852{
1853 struct net_device *dev = (struct net_device *)dev_addr;
1854 struct tok_info *ti = netdev_priv(dev);
1855
1856 if ( ti->open_action == RESTART){
1857 ti->do_tok_int = FIRST_INT;
1858 outb(0, dev->base_addr + ADAPTRESETREL);
1859#ifdef ENABLE_PAGING
1860 if (ti->page_mask)
1861 writeb(SRPR_ENABLE_PAGING,
1862 ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
1863#endif
1864
1865 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1866 } else
1867 tok_open_adapter(dev_addr);
1868}
1869
1870/*****************************************************************************/
1871
1872static void ibmtr_readlog(struct net_device *dev)
1873{
1874 struct tok_info *ti;
1875
1876 ti = netdev_priv(dev);
1877
1878 ti->readlog_pending = 0;
1879 SET_PAGE(ti->srb_page);
1880 writeb(DIR_READ_LOG, ti->srb);
1881 writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
1882 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1883
1884 netif_stop_queue(dev);
1885
1886}
1887
1888/*****************************************************************************/
1889
1890static int ibmtr_change_mtu(struct net_device *dev, int mtu)
1891{
1892 struct tok_info *ti = netdev_priv(dev);
1893
1894 if (ti->ring_speed == 16 && mtu > ti->maxmtu16)
1895 return -EINVAL;
1896 if (ti->ring_speed == 4 && mtu > ti->maxmtu4)
1897 return -EINVAL;
1898 dev->mtu = mtu;
1899 return 0;
1900}
1901
1902/*****************************************************************************/
1903#ifdef MODULE
1904
1905/* 3COM 3C619C supports 8 interrupts, 32 I/O ports */
1906static struct net_device *dev_ibmtr[IBMTR_MAX_ADAPTERS];
1907static int io[IBMTR_MAX_ADAPTERS] = { 0xa20, 0xa24 };
1908static int irq[IBMTR_MAX_ADAPTERS];
1909static int mem[IBMTR_MAX_ADAPTERS];
1910
1911MODULE_LICENSE("GPL");
1912
1913module_param_array(io, int, NULL, 0);
1914module_param_array(irq, int, NULL, 0);
1915module_param_array(mem, int, NULL, 0);
1916
1917static int __init ibmtr_init(void)
1918{
1919 int i;
1920 int count=0;
1921
1922 find_turbo_adapters(io);
1923
1924 for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) {
1925 struct net_device *dev;
1926 irq[i] = 0;
1927 mem[i] = 0;
1928 dev = alloc_trdev(sizeof(struct tok_info));
1929 if (dev == NULL) {
1930 if (i == 0)
1931 return -ENOMEM;
1932 break;
1933 }
1934 dev->base_addr = io[i];
1935 dev->irq = irq[i];
1936 dev->mem_start = mem[i];
1937
1938 if (ibmtr_probe_card(dev)) {
1939 free_netdev(dev);
1940 continue;
1941 }
1942 dev_ibmtr[i] = dev;
1943 count++;
1944 }
1945 if (count) return 0;
1946 printk("ibmtr: register_netdev() returned non-zero.\n");
1947 return -EIO;
1948}
1949module_init(ibmtr_init);
1950
1951static void __exit ibmtr_cleanup(void)
1952{
1953 int i;
1954
1955 for (i = 0; i < IBMTR_MAX_ADAPTERS; i++){
1956 if (!dev_ibmtr[i])
1957 continue;
1958 unregister_netdev(dev_ibmtr[i]);
1959 ibmtr_cleanup_card(dev_ibmtr[i]);
1960 free_netdev(dev_ibmtr[i]);
1961 }
1962}
1963module_exit(ibmtr_cleanup);
1964#endif
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
new file mode 100644
index 00000000000..9354ca9da57
--- /dev/null
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -0,0 +1,1918 @@
1/*
2 * lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter
3 *
4 * Written By: Mike Sullivan, IBM Corporation
5 *
6 * Copyright (C) 1999 IBM Corporation
7 *
8 * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
9 * chipset.
10 *
11 * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
12 * chipsets) written by:
13 * 1999 Peter De Schrijver All Rights Reserved
14 * 1999 Mike Phillips (phillim@amtrak.com)
15 *
16 * Base Driver Skeleton:
17 * Written 1993-94 by Donald Becker.
18 *
19 * Copyright 1993 United States Government as represented by the
20 * Director, National Security Agency.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * NO WARRANTY
33 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
34 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
35 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
36 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
37 * solely responsible for determining the appropriateness of using and
38 * distributing the Program and assumes all risks associated with its
39 * exercise of rights under this Agreement, including but not limited to
40 * the risks and costs of program errors, damage to or loss of data,
41 * programs or equipment, and unavailability or interruption of operations.
42 *
43 * DISCLAIMER OF LIABILITY
44 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
47 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
48 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
49 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
50 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
51 *
52 * You should have received a copy of the GNU General Public License
53 * along with this program; if not, write to the Free Software
54 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55 *
56 *
57 * 12/10/99 - Alpha Release 0.1.0
58 * First release to the public
59 * 03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing
60 * malloc free checks, reviewed code. <alan@redhat.com>
61 * 03/13/00 - Added spinlocks for smp
62 * 03/08/01 - Added support for module_init() and module_exit()
63 * 08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue
64 * calls and other incorrectness - Kent Yoder <yoder1@us.ibm.com>
65 * 11/05/01 - Restructured the interrupt function, added delays, reduced the
66 * the number of TX descriptors to 1, which together can prevent
67 * the card from locking up the box - <yoder1@us.ibm.com>
68 * 09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com>
69 * 11/13/02 - Removed free_irq calls which could cause a hang, added
70 * netif_carrier_{on|off} - <yoder1@us.ibm.com>
71 *
72 * To Do:
73 *
74 *
75 * If Problems do Occur
76 * Most problems can be rectified by either closing and opening the interface
77 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
78 * if compiled into the kernel).
79 */
80
81/* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */
82
83#define STREAMER_DEBUG 0
84#define STREAMER_DEBUG_PACKETS 0
85
86/* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel.
87 * Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the
88 * kernel.
89 * Intended to be used to create a ring-error reporting network module
90 * i.e. it will give you the source address of beaconers on the ring
91 */
92
93#define STREAMER_NETWORK_MONITOR 0
94
95/* #define CONFIG_PROC_FS */
96
97/*
98 * Allow or disallow ioctl's for debugging
99 */
100
101#define STREAMER_IOCTL 0
102
103#include <linux/module.h>
104#include <linux/kernel.h>
105#include <linux/errno.h>
106#include <linux/timer.h>
107#include <linux/in.h>
108#include <linux/ioport.h>
109#include <linux/string.h>
110#include <linux/proc_fs.h>
111#include <linux/ptrace.h>
112#include <linux/skbuff.h>
113#include <linux/interrupt.h>
114#include <linux/delay.h>
115#include <linux/netdevice.h>
116#include <linux/trdevice.h>
117#include <linux/stddef.h>
118#include <linux/init.h>
119#include <linux/pci.h>
120#include <linux/dma-mapping.h>
121#include <linux/spinlock.h>
122#include <linux/bitops.h>
123#include <linux/jiffies.h>
124#include <linux/slab.h>
125
126#include <net/net_namespace.h>
127#include <net/checksum.h>
128
129#include <asm/io.h>
130#include <asm/system.h>
131
132#include "lanstreamer.h"
133
134#if (BITS_PER_LONG == 64)
135#error broken on 64-bit: stores pointer to rx_ring->buffer in 32-bit int
136#endif
137
138
139/* I've got to put some intelligence into the version number so that Peter and I know
140 * which version of the code somebody has got.
141 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
142 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
143 *
144 * Official releases will only have an a.b.c version number format.
145 */
146
147static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
148 " v0.5.3 11/13/02 - Kent Yoder";
149
150static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
151 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
152 {} /* terminating entry */
153};
154MODULE_DEVICE_TABLE(pci,streamer_pci_tbl);
155
156
157static char *open_maj_error[] = {
158 "No error", "Lobe Media Test", "Physical Insertion",
159 "Address Verification", "Neighbor Notification (Ring Poll)",
160 "Request Parameters", "FDX Registration Request",
161 "FDX Lobe Media Test", "FDX Duplicate Address Check",
162 "Unknown stage"
163};
164
165static char *open_min_error[] = {
166 "No error", "Function Failure", "Signal Lost", "Wire Fault",
167 "Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing",
168 "Duplicate Node Address", "Request Parameters", "Remove Received",
169 "Reserved", "Reserved", "No Monitor Detected for RPL",
170 "Monitor Contention failer for RPL", "FDX Protocol Error"
171};
172
173/* Module parameters */
174
175/* Ring Speed 0,4,16
176 * 0 = Autosense
177 * 4,16 = Selected speed only, no autosense
178 * This allows the card to be the first on the ring
179 * and become the active monitor.
180 *
181 * WARNING: Some hubs will allow you to insert
182 * at the wrong speed
183 */
184
185static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, };
186
187module_param_array(ringspeed, int, NULL, 0);
188
189/* Packet buffer size */
190
191static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, };
192
193module_param_array(pkt_buf_sz, int, NULL, 0);
194
195/* Message Level */
196
197static int message_level[STREAMER_MAX_ADAPTERS] = { 1, };
198
199module_param_array(message_level, int, NULL, 0);
200
201#if STREAMER_IOCTL
202static int streamer_ioctl(struct net_device *, struct ifreq *, int);
203#endif
204
205static int streamer_reset(struct net_device *dev);
206static int streamer_open(struct net_device *dev);
207static netdev_tx_t streamer_xmit(struct sk_buff *skb,
208 struct net_device *dev);
209static int streamer_close(struct net_device *dev);
210static void streamer_set_rx_mode(struct net_device *dev);
211static irqreturn_t streamer_interrupt(int irq, void *dev_id);
212static int streamer_set_mac_address(struct net_device *dev, void *addr);
213static void streamer_arb_cmd(struct net_device *dev);
214static int streamer_change_mtu(struct net_device *dev, int mtu);
215static void streamer_srb_bh(struct net_device *dev);
216static void streamer_asb_bh(struct net_device *dev);
217#if STREAMER_NETWORK_MONITOR
218#ifdef CONFIG_PROC_FS
219static int streamer_proc_info(char *buffer, char **start, off_t offset,
220 int length, int *eof, void *data);
221static int sprintf_info(char *buffer, struct net_device *dev);
222struct streamer_private *dev_streamer=NULL;
223#endif
224#endif
225
226static const struct net_device_ops streamer_netdev_ops = {
227 .ndo_open = streamer_open,
228 .ndo_stop = streamer_close,
229 .ndo_start_xmit = streamer_xmit,
230 .ndo_change_mtu = streamer_change_mtu,
231#if STREAMER_IOCTL
232 .ndo_do_ioctl = streamer_ioctl,
233#endif
234 .ndo_set_multicast_list = streamer_set_rx_mode,
235 .ndo_set_mac_address = streamer_set_mac_address,
236};
237
238static int __devinit streamer_init_one(struct pci_dev *pdev,
239 const struct pci_device_id *ent)
240{
241 struct net_device *dev;
242 struct streamer_private *streamer_priv;
243 unsigned long pio_start, pio_end, pio_flags, pio_len;
244 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
245 int rc = 0;
246 static int card_no=-1;
247 u16 pcr;
248
249#if STREAMER_DEBUG
250 printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev);
251#endif
252
253 card_no++;
254 dev = alloc_trdev(sizeof(*streamer_priv));
255 if (dev==NULL) {
256 printk(KERN_ERR "lanstreamer: out of memory.\n");
257 return -ENOMEM;
258 }
259
260 streamer_priv = netdev_priv(dev);
261
262#if STREAMER_NETWORK_MONITOR
263#ifdef CONFIG_PROC_FS
264 if (!dev_streamer)
265 create_proc_read_entry("streamer_tr", 0, init_net.proc_net,
266 streamer_proc_info, NULL);
267 streamer_priv->next = dev_streamer;
268 dev_streamer = streamer_priv;
269#endif
270#endif
271
272 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
273 if (rc) {
274 printk(KERN_ERR "%s: No suitable PCI mapping available.\n",
275 dev->name);
276 rc = -ENODEV;
277 goto err_out;
278 }
279
280 rc = pci_enable_device(pdev);
281 if (rc) {
282 printk(KERN_ERR "lanstreamer: unable to enable pci device\n");
283 rc=-EIO;
284 goto err_out;
285 }
286
287 pci_set_master(pdev);
288
289 rc = pci_set_mwi(pdev);
290 if (rc) {
291 printk(KERN_ERR "lanstreamer: unable to enable MWI on pci device\n");
292 goto err_out_disable;
293 }
294
295 pio_start = pci_resource_start(pdev, 0);
296 pio_end = pci_resource_end(pdev, 0);
297 pio_flags = pci_resource_flags(pdev, 0);
298 pio_len = pci_resource_len(pdev, 0);
299
300 mmio_start = pci_resource_start(pdev, 1);
301 mmio_end = pci_resource_end(pdev, 1);
302 mmio_flags = pci_resource_flags(pdev, 1);
303 mmio_len = pci_resource_len(pdev, 1);
304
305#if STREAMER_DEBUG
306 printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n",
307 pio_start, pio_end, pio_len, pio_flags);
308 printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n",
309 mmio_start, mmio_end, mmio_flags, mmio_len);
310#endif
311
312 if (!request_region(pio_start, pio_len, "lanstreamer")) {
313 printk(KERN_ERR "lanstreamer: unable to get pci io addr %lx\n",
314 pio_start);
315 rc= -EBUSY;
316 goto err_out_mwi;
317 }
318
319 if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) {
320 printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %lx\n",
321 mmio_start);
322 rc= -EBUSY;
323 goto err_out_free_pio;
324 }
325
326 streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len);
327 if (streamer_priv->streamer_mmio == NULL) {
328 printk(KERN_ERR "lanstreamer: unable to remap MMIO %lx\n",
329 mmio_start);
330 rc= -EIO;
331 goto err_out_free_mmio;
332 }
333
334 init_waitqueue_head(&streamer_priv->srb_wait);
335 init_waitqueue_head(&streamer_priv->trb_wait);
336
337 dev->netdev_ops = &streamer_netdev_ops;
338 dev->irq = pdev->irq;
339 dev->base_addr=pio_start;
340 SET_NETDEV_DEV(dev, &pdev->dev);
341
342 streamer_priv->streamer_card_name = (char *)pdev->resource[0].name;
343 streamer_priv->pci_dev = pdev;
344
345 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000))
346 streamer_priv->pkt_buf_sz = PKT_BUF_SZ;
347 else
348 streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no];
349
350 streamer_priv->streamer_ring_speed = ringspeed[card_no];
351 streamer_priv->streamer_message_level = message_level[card_no];
352
353 pci_set_drvdata(pdev, dev);
354
355 spin_lock_init(&streamer_priv->streamer_lock);
356
357 pci_read_config_word (pdev, PCI_COMMAND, &pcr);
358 pcr |= PCI_COMMAND_SERR;
359 pci_write_config_word (pdev, PCI_COMMAND, pcr);
360
361 printk("%s\n", version);
362 printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
363 streamer_priv->streamer_card_name,
364 (unsigned int) dev->base_addr,
365 streamer_priv->streamer_mmio,
366 dev->irq);
367
368 if (streamer_reset(dev))
369 goto err_out_unmap;
370
371 rc = register_netdev(dev);
372 if (rc)
373 goto err_out_unmap;
374 return 0;
375
376err_out_unmap:
377 iounmap(streamer_priv->streamer_mmio);
378err_out_free_mmio:
379 release_mem_region(mmio_start, mmio_len);
380err_out_free_pio:
381 release_region(pio_start, pio_len);
382err_out_mwi:
383 pci_clear_mwi(pdev);
384err_out_disable:
385 pci_disable_device(pdev);
386err_out:
387 free_netdev(dev);
388#if STREAMER_DEBUG
389 printk("lanstreamer: Exit error %x\n",rc);
390#endif
391 return rc;
392}
393
394static void __devexit streamer_remove_one(struct pci_dev *pdev)
395{
396 struct net_device *dev=pci_get_drvdata(pdev);
397 struct streamer_private *streamer_priv;
398
399#if STREAMER_DEBUG
400 printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev);
401#endif
402
403 if (dev == NULL) {
404 printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n");
405 return;
406 }
407
408 streamer_priv=netdev_priv(dev);
409 if (streamer_priv == NULL) {
410 printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n");
411 return;
412 }
413
414#if STREAMER_NETWORK_MONITOR
415#ifdef CONFIG_PROC_FS
416 {
417 struct streamer_private **p, **next;
418
419 for (p = &dev_streamer; *p; p = next) {
420 next = &(*p)->next;
421 if (*p == streamer_priv) {
422 *p = *next;
423 break;
424 }
425 }
426 if (!dev_streamer)
427 remove_proc_entry("streamer_tr", init_net.proc_net);
428 }
429#endif
430#endif
431
432 unregister_netdev(dev);
433 iounmap(streamer_priv->streamer_mmio);
434 release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1));
435 release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0));
436 pci_clear_mwi(pdev);
437 pci_disable_device(pdev);
438 free_netdev(dev);
439 pci_set_drvdata(pdev, NULL);
440}
441
442
443static int streamer_reset(struct net_device *dev)
444{
445 struct streamer_private *streamer_priv;
446 __u8 __iomem *streamer_mmio;
447 unsigned long t;
448 unsigned int uaa_addr;
449 struct sk_buff *skb = NULL;
450 __u16 misr;
451
452 streamer_priv = netdev_priv(dev);
453 streamer_mmio = streamer_priv->streamer_mmio;
454
455 writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL);
456 t = jiffies;
457 /* Hold soft reset bit for a while */
458 ssleep(1);
459
460 writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET,
461 streamer_mmio + BCTL);
462
463#if STREAMER_DEBUG
464 printk("BCTL: %x\n", readw(streamer_mmio + BCTL));
465 printk("GPR: %x\n", readw(streamer_mmio + GPR));
466 printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK));
467#endif
468 writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL );
469
470 if (streamer_priv->streamer_ring_speed == 0) { /* Autosense */
471 writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE,
472 streamer_mmio + GPR);
473 if (streamer_priv->streamer_message_level)
474 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",
475 dev->name);
476 } else if (streamer_priv->streamer_ring_speed == 16) {
477 if (streamer_priv->streamer_message_level)
478 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n",
479 dev->name);
480 writew(GPR_16MBPS, streamer_mmio + GPR);
481 } else if (streamer_priv->streamer_ring_speed == 4) {
482 if (streamer_priv->streamer_message_level)
483 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n",
484 dev->name);
485 writew(0, streamer_mmio + GPR);
486 }
487
488 skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
489 if (!skb) {
490 printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n",
491 dev->name);
492 } else {
493 struct streamer_rx_desc *rx_ring;
494 u8 *data;
495
496 rx_ring=(struct streamer_rx_desc *)skb->data;
497 data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc);
498 rx_ring->forward=0;
499 rx_ring->status=0;
500 rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data,
501 512, PCI_DMA_FROMDEVICE));
502 rx_ring->framelen_buflen=512;
503 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)),
504 streamer_mmio+RXBDA);
505 }
506
507#if STREAMER_DEBUG
508 printk("GPR = %x\n", readw(streamer_mmio + GPR));
509#endif
510 /* start solo init */
511 writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
512
513 while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) {
514 msleep_interruptible(100);
515 if (time_after(jiffies, t + 40 * HZ)) {
516 printk(KERN_ERR
517 "IBM PCI tokenring card not responding\n");
518 release_region(dev->base_addr, STREAMER_IO_SPACE);
519 if (skb)
520 dev_kfree_skb(skb);
521 return -1;
522 }
523 }
524 writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
525 misr = readw(streamer_mmio + MISR_RUM);
526 writew(~misr, streamer_mmio + MISR_RUM);
527
528 if (skb)
529 dev_kfree_skb(skb); /* release skb used for diagnostics */
530
531#if STREAMER_DEBUG
532 printk("LAPWWO: %x, LAPA: %x LAPE: %x\n",
533 readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA),
534 readw(streamer_mmio + LAPE));
535#endif
536
537#if STREAMER_DEBUG
538 {
539 int i;
540 writew(readw(streamer_mmio + LAPWWO),
541 streamer_mmio + LAPA);
542 printk("initialization response srb dump: ");
543 for (i = 0; i < 10; i++)
544 printk("%x:",
545 ntohs(readw(streamer_mmio + LAPDINC)));
546 printk("\n");
547 }
548#endif
549
550 writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA);
551 if (readw(streamer_mmio + LAPD)) {
552 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",
553 ntohs(readw(streamer_mmio + LAPD)));
554 release_region(dev->base_addr, STREAMER_IO_SPACE);
555 return -1;
556 }
557
558 writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
559 uaa_addr = ntohs(readw(streamer_mmio + LAPDINC));
560 readw(streamer_mmio + LAPDINC); /* skip over Level.Addr field */
561 streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC));
562 streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC));
563
564#if STREAMER_DEBUG
565 printk("UAA resides at %x\n", uaa_addr);
566#endif
567
568 /* setup uaa area for access with LAPD */
569 {
570 int i;
571 __u16 addr;
572 writew(uaa_addr, streamer_mmio + LAPA);
573 for (i = 0; i < 6; i += 2) {
574 addr=ntohs(readw(streamer_mmio+LAPDINC));
575 dev->dev_addr[i]= (addr >> 8) & 0xff;
576 dev->dev_addr[i+1]= addr & 0xff;
577 }
578#if STREAMER_DEBUG
579 printk("Adapter address: %pM\n", dev->dev_addr);
580#endif
581 }
582 return 0;
583}
584
585static int streamer_open(struct net_device *dev)
586{
587 struct streamer_private *streamer_priv = netdev_priv(dev);
588 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
589 unsigned long flags;
590 char open_error[255];
591 int i, open_finished = 1;
592 __u16 srb_word;
593 __u16 srb_open;
594 int rc;
595
596 if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) {
597 rc=streamer_reset(dev);
598 }
599
600 if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) {
601 return -EAGAIN;
602 }
603#if STREAMER_DEBUG
604 printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
605 printk("pending ints: %x\n", readw(streamer_mmio + SISR));
606#endif
607
608 writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
609 writew(LISR_LIE, streamer_mmio + LISR); /* more ints later */
610
611 /* adapter is closed, so SRB is pointed to by LAPWWO */
612 writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
613
614#if STREAMER_DEBUG
615 printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO),
616 readw(streamer_mmio + LAPA));
617 printk("LAPE: %x\n", readw(streamer_mmio + LAPE));
618 printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK));
619#endif
620 do {
621 for (i = 0; i < SRB_COMMAND_SIZE; i += 2) {
622 writew(0, streamer_mmio + LAPDINC);
623 }
624
625 writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA);
626 writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; /* open */
627 writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC);
628 writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC);
629
630 writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
631#if STREAMER_NETWORK_MONITOR
632 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
633 writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC); /* offset 8 word contains open options */
634#else
635 writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC); /* Offset 8 word contains Open.Options */
636#endif
637
638 if (streamer_priv->streamer_laa[0]) {
639 writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA);
640 writew(htons((streamer_priv->streamer_laa[0] << 8) |
641 streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC);
642 writew(htons((streamer_priv->streamer_laa[2] << 8) |
643 streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC);
644 writew(htons((streamer_priv->streamer_laa[4] << 8) |
645 streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC);
646 memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len);
647 }
648
649 /* save off srb open offset */
650 srb_open = readw(streamer_mmio + LAPWWO);
651#if STREAMER_DEBUG
652 writew(readw(streamer_mmio + LAPWWO),
653 streamer_mmio + LAPA);
654 printk("srb open request:\n");
655 for (i = 0; i < 16; i++) {
656 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
657 }
658 printk("\n");
659#endif
660 spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
661 streamer_priv->srb_queued = 1;
662
663 /* signal solo that SRB command has been issued */
664 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
665 spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
666
667 while (streamer_priv->srb_queued) {
668 interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ);
669 if (signal_pending(current)) {
670 printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
671 printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n",
672 readw(streamer_mmio + SISR),
673 readw(streamer_mmio + MISR_RUM),
674 readw(streamer_mmio + LISR));
675 streamer_priv->srb_queued = 0;
676 break;
677 }
678 }
679
680#if STREAMER_DEBUG
681 printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK));
682 printk("srb open response:\n");
683 writew(srb_open, streamer_mmio + LAPA);
684 for (i = 0; i < 10; i++) {
685 printk("%x:",
686 ntohs(readw(streamer_mmio + LAPDINC)));
687 }
688#endif
689
690 /* If we get the same return response as we set, the interrupt wasn't raised and the open
691 * timed out.
692 */
693 writew(srb_open + 2, streamer_mmio + LAPA);
694 srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8;
695 if (srb_word == STREAMER_CLEAR_RET_CODE) {
696 printk(KERN_WARNING "%s: Adapter Open time out or error.\n",
697 dev->name);
698 return -EIO;
699 }
700
701 if (srb_word != 0) {
702 if (srb_word == 0x07) {
703 if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
704 printk(KERN_WARNING "%s: Retrying at different ring speed\n",
705 dev->name);
706 open_finished = 0;
707 } else {
708 __u16 error_code;
709
710 writew(srb_open + 6, streamer_mmio + LAPA);
711 error_code = ntohs(readw(streamer_mmio + LAPD));
712 strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]);
713 strcat(open_error, " - ");
714 strcat(open_error, open_min_error[(error_code & 0x0f)]);
715
716 if (!streamer_priv->streamer_ring_speed &&
717 ((error_code & 0x0f) == 0x0d))
718 {
719 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
720 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
721 free_irq(dev->irq, dev);
722 return -EIO;
723 }
724
725 printk(KERN_WARNING "%s: %s\n",
726 dev->name, open_error);
727 free_irq(dev->irq, dev);
728 return -EIO;
729
730 } /* if autosense && open_finished */
731 } else {
732 printk(KERN_WARNING "%s: Bad OPEN response: %x\n",
733 dev->name, srb_word);
734 free_irq(dev->irq, dev);
735 return -EIO;
736 }
737 } else
738 open_finished = 1;
739 } while (!(open_finished)); /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
740
741 writew(srb_open + 18, streamer_mmio + LAPA);
742 srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
743 if (srb_word & (1 << 3))
744 if (streamer_priv->streamer_message_level)
745 printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name);
746
747 if (srb_word & 1)
748 streamer_priv->streamer_ring_speed = 16;
749 else
750 streamer_priv->streamer_ring_speed = 4;
751
752 if (streamer_priv->streamer_message_level)
753 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",
754 dev->name,
755 streamer_priv->streamer_ring_speed);
756
757 writew(srb_open + 8, streamer_mmio + LAPA);
758 streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC));
759 streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC));
760 streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC));
761 readw(streamer_mmio + LAPDINC); /* offset 14 word is rsvd */
762 streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC));
763
764 streamer_priv->streamer_receive_options = 0x00;
765 streamer_priv->streamer_copy_all_options = 0;
766
767 /* setup rx ring */
768 /* enable rx channel */
769 writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM);
770
771 /* setup rx descriptors */
772 streamer_priv->streamer_rx_ring=
773 kmalloc( sizeof(struct streamer_rx_desc)*
774 STREAMER_RX_RING_SIZE,GFP_KERNEL);
775 if (!streamer_priv->streamer_rx_ring) {
776 printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name);
777 return -EIO;
778 }
779
780 for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
781 struct sk_buff *skb;
782
783 skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
784 if (skb == NULL)
785 break;
786
787 skb->dev = dev;
788
789 streamer_priv->streamer_rx_ring[i].forward =
790 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1],
791 sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
792 streamer_priv->streamer_rx_ring[i].status = 0;
793 streamer_priv->streamer_rx_ring[i].buffer =
794 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data,
795 streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
796 streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz;
797 streamer_priv->rx_ring_skb[i] = skb;
798 }
799 streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward =
800 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
801 sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
802
803 if (i == 0) {
804 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name);
805 free_irq(dev->irq, dev);
806 return -EIO;
807 }
808
809 streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */
810
811 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
812 sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
813 streamer_mmio + RXBDA);
814 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1],
815 sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
816 streamer_mmio + RXLBDA);
817
818 /* set bus master interrupt event mask */
819 writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
820
821
822 /* setup tx ring */
823 streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)*
824 STREAMER_TX_RING_SIZE,GFP_KERNEL);
825 if (!streamer_priv->streamer_tx_ring) {
826 printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name);
827 return -EIO;
828 }
829
830 writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */
831 for (i = 0; i < STREAMER_TX_RING_SIZE; i++) {
832 streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
833 &streamer_priv->streamer_tx_ring[i + 1],
834 sizeof(struct streamer_tx_desc),
835 PCI_DMA_TODEVICE));
836 streamer_priv->streamer_tx_ring[i].status = 0;
837 streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0;
838 streamer_priv->streamer_tx_ring[i].buffer = 0;
839 streamer_priv->streamer_tx_ring[i].buflen = 0;
840 streamer_priv->streamer_tx_ring[i].rsvd1 = 0;
841 streamer_priv->streamer_tx_ring[i].rsvd2 = 0;
842 streamer_priv->streamer_tx_ring[i].rsvd3 = 0;
843 }
844 streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward =
845 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0],
846 sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE));
847
848 streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE;
849 streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */
850 streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1;
851
852 /* set Busmaster interrupt event mask (handle receives on interrupt only */
853 writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
854 /* set system event interrupt mask */
855 writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM);
856
857#if STREAMER_DEBUG
858 printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
859 printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK));
860#endif
861
862#if STREAMER_NETWORK_MONITOR
863
864 writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
865 printk("%s: Node Address: %04x:%04x:%04x\n", dev->name,
866 ntohs(readw(streamer_mmio + LAPDINC)),
867 ntohs(readw(streamer_mmio + LAPDINC)),
868 ntohs(readw(streamer_mmio + LAPDINC)));
869 readw(streamer_mmio + LAPDINC);
870 readw(streamer_mmio + LAPDINC);
871 printk("%s: Functional Address: %04x:%04x\n", dev->name,
872 ntohs(readw(streamer_mmio + LAPDINC)),
873 ntohs(readw(streamer_mmio + LAPDINC)));
874
875 writew(streamer_priv->streamer_parms_addr + 4,
876 streamer_mmio + LAPA);
877 printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name,
878 ntohs(readw(streamer_mmio + LAPDINC)),
879 ntohs(readw(streamer_mmio + LAPDINC)),
880 ntohs(readw(streamer_mmio + LAPDINC)));
881#endif
882
883 netif_start_queue(dev);
884 netif_carrier_on(dev);
885 return 0;
886}
887
888/*
889 * When we enter the rx routine we do not know how many frames have been
890 * queued on the rx channel. Therefore we start at the next rx status
891 * position and travel around the receive ring until we have completed
892 * all the frames.
893 *
894 * This means that we may process the frame before we receive the end
895 * of frame interrupt. This is why we always test the status instead
896 * of blindly processing the next frame.
897 *
898 */
899static void streamer_rx(struct net_device *dev)
900{
901 struct streamer_private *streamer_priv =
902 netdev_priv(dev);
903 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
904 struct streamer_rx_desc *rx_desc;
905 int rx_ring_last_received, length, frame_length, buffer_cnt = 0;
906 struct sk_buff *skb, *skb2;
907
908 /* setup the next rx descriptor to be received */
909 rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
910 rx_ring_last_received = streamer_priv->rx_ring_last_received;
911
912 while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */
913 if (rx_ring_last_received != streamer_priv->rx_ring_last_received)
914 {
915 printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n",
916 rx_ring_last_received, streamer_priv->rx_ring_last_received);
917 }
918 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
919 rx_ring_last_received = streamer_priv->rx_ring_last_received;
920
921 length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
922 frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff;
923
924 if (rx_desc->status & 0x7E830000) { /* errors */
925 if (streamer_priv->streamer_message_level) {
926 printk(KERN_WARNING "%s: Rx Error %x\n",
927 dev->name, rx_desc->status);
928 }
929 } else { /* received without errors */
930 if (rx_desc->status & 0x80000000) { /* frame complete */
931 buffer_cnt = 1;
932 skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
933 } else {
934 skb = dev_alloc_skb(frame_length);
935 }
936
937 if (skb == NULL)
938 {
939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
940 dev->stats.rx_dropped++;
941 } else { /* we allocated an skb OK */
942 if (buffer_cnt == 1) {
943 /* release the DMA mapping */
944 pci_unmap_single(streamer_priv->pci_dev,
945 le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer),
946 streamer_priv->pkt_buf_sz,
947 PCI_DMA_FROMDEVICE);
948 skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];
949#if STREAMER_DEBUG_PACKETS
950 {
951 int i;
952 printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head);
953 for (i = 0; i < frame_length; i++)
954 {
955 printk("%x:", skb2->data[i]);
956 if (((i + 1) % 16) == 0)
957 printk("\n");
958 }
959 printk("\n");
960 }
961#endif
962 skb_put(skb2, length);
963 skb2->protocol = tr_type_trans(skb2, dev);
964 /* recycle this descriptor */
965 streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
966 streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
967 streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer =
968 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz,
969 PCI_DMA_FROMDEVICE));
970 streamer_priv->rx_ring_skb[rx_ring_last_received] = skb;
971 /* place recycled descriptor back on the adapter */
972 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
973 &streamer_priv->streamer_rx_ring[rx_ring_last_received],
974 sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)),
975 streamer_mmio + RXLBDA);
976 /* pass the received skb up to the protocol */
977 netif_rx(skb2);
978 } else {
979 do { /* Walk the buffers */
980 pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE),
981 memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */
982 streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
983 streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
984
985 /* give descriptor back to the adapter */
986 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
987 &streamer_priv->streamer_rx_ring[rx_ring_last_received],
988 length, PCI_DMA_FROMDEVICE)),
989 streamer_mmio + RXLBDA);
990
991 if (rx_desc->status & 0x80000000)
992 break; /* this descriptor completes the frame */
993
994 /* else get the next pending descriptor */
995 if (rx_ring_last_received!= streamer_priv->rx_ring_last_received)
996 {
997 printk("RX Error rx_ring_last_received not the same %x %x\n",
998 rx_ring_last_received,
999 streamer_priv->rx_ring_last_received);
1000 }
1001 rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)];
1002
1003 length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
1004 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1);
1005 rx_ring_last_received = streamer_priv->rx_ring_last_received;
1006 } while (1);
1007
1008 skb->protocol = tr_type_trans(skb, dev);
1009 /* send up to the protocol */
1010 netif_rx(skb);
1011 }
1012 dev->stats.rx_packets++;
1013 dev->stats.rx_bytes += length;
1014 } /* if skb == null */
1015 } /* end received without errors */
1016
1017 /* try the next one */
1018 rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
1019 } /* end for all completed rx descriptors */
1020}
1021
1022static irqreturn_t streamer_interrupt(int irq, void *dev_id)
1023{
1024 struct net_device *dev = (struct net_device *) dev_id;
1025 struct streamer_private *streamer_priv =
1026 netdev_priv(dev);
1027 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1028 __u16 sisr;
1029 __u16 misr;
1030 u8 max_intr = MAX_INTR;
1031
1032 spin_lock(&streamer_priv->streamer_lock);
1033 sisr = readw(streamer_mmio + SISR);
1034
1035 while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE |
1036 SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) &&
1037 (max_intr > 0)) {
1038
1039 if(sisr & SISR_PAR_ERR) {
1040 writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM);
1041 (void)readw(streamer_mmio + SISR_RUM);
1042 }
1043
1044 else if(sisr & SISR_SERR_ERR) {
1045 writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM);
1046 (void)readw(streamer_mmio + SISR_RUM);
1047 }
1048
1049 else if(sisr & SISR_MI) {
1050 misr = readw(streamer_mmio + MISR_RUM);
1051
1052 if (misr & MISR_TX2_EOF) {
1053 while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
1054 streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
1055 streamer_priv->free_tx_ring_entries++;
1056 dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
1057 dev->stats.tx_packets++;
1058 dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
1059 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
1060 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
1061 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0;
1062 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0;
1063 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0;
1064 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0;
1065 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0;
1066 }
1067 netif_wake_queue(dev);
1068 }
1069
1070 if (misr & MISR_RX_EOF) {
1071 streamer_rx(dev);
1072 }
1073 /* MISR_RX_EOF */
1074
1075 if (misr & MISR_RX_NOBUF) {
1076 /* According to the documentation, we don't have to do anything,
1077 * but trapping it keeps it out of /var/log/messages.
1078 */
1079 } /* SISR_RX_NOBUF */
1080
1081 writew(~misr, streamer_mmio + MISR_RUM);
1082 (void)readw(streamer_mmio + MISR_RUM);
1083 }
1084
1085 else if (sisr & SISR_SRB_REPLY) {
1086 if (streamer_priv->srb_queued == 1) {
1087 wake_up_interruptible(&streamer_priv->srb_wait);
1088 } else if (streamer_priv->srb_queued == 2) {
1089 streamer_srb_bh(dev);
1090 }
1091 streamer_priv->srb_queued = 0;
1092
1093 writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
1094 (void)readw(streamer_mmio + SISR_RUM);
1095 }
1096
1097 else if (sisr & SISR_ADAPTER_CHECK) {
1098 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
1099 writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
1100 printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n",
1101 dev->name, readw(streamer_mmio + LAPDINC),
1102 ntohs(readw(streamer_mmio + LAPDINC)),
1103 ntohs(readw(streamer_mmio + LAPDINC)),
1104 ntohs(readw(streamer_mmio + LAPDINC)));
1105 netif_stop_queue(dev);
1106 netif_carrier_off(dev);
1107 printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
1108 }
1109
1110 /* SISR_ADAPTER_CHECK */
1111 else if (sisr & SISR_ASB_FREE) {
1112 /* Wake up anything that is waiting for the asb response */
1113 if (streamer_priv->asb_queued) {
1114 streamer_asb_bh(dev);
1115 }
1116 writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM);
1117 (void)readw(streamer_mmio + SISR_RUM);
1118 }
1119 /* SISR_ASB_FREE */
1120 else if (sisr & SISR_ARB_CMD) {
1121 streamer_arb_cmd(dev);
1122 writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM);
1123 (void)readw(streamer_mmio + SISR_RUM);
1124 }
1125 /* SISR_ARB_CMD */
1126 else if (sisr & SISR_TRB_REPLY) {
1127 /* Wake up anything that is waiting for the trb response */
1128 if (streamer_priv->trb_queued) {
1129 wake_up_interruptible(&streamer_priv->
1130 trb_wait);
1131 }
1132 streamer_priv->trb_queued = 0;
1133 writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM);
1134 (void)readw(streamer_mmio + SISR_RUM);
1135 }
1136 /* SISR_TRB_REPLY */
1137
1138 sisr = readw(streamer_mmio + SISR);
1139 max_intr--;
1140 } /* while() */
1141
1142 spin_unlock(&streamer_priv->streamer_lock) ;
1143 return IRQ_HANDLED;
1144}
1145
1146static netdev_tx_t streamer_xmit(struct sk_buff *skb,
1147 struct net_device *dev)
1148{
1149 struct streamer_private *streamer_priv =
1150 netdev_priv(dev);
1151 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1152 unsigned long flags ;
1153
1154 spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
1155
1156 if (streamer_priv->free_tx_ring_entries) {
1157 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0;
1158 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len;
1159 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer =
1160 cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE));
1161 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len;
1162 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0;
1163 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0;
1164 streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len;
1165
1166 streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb;
1167 streamer_priv->free_tx_ring_entries--;
1168#if STREAMER_DEBUG_PACKETS
1169 {
1170 int i;
1171 printk("streamer_xmit packet print:\n");
1172 for (i = 0; i < skb->len; i++) {
1173 printk("%x:", skb->data[i]);
1174 if (((i + 1) % 16) == 0)
1175 printk("\n");
1176 }
1177 printk("\n");
1178 }
1179#endif
1180
1181 writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
1182 &streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free],
1183 sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)),
1184 streamer_mmio + TX2LFDA);
1185 (void)readl(streamer_mmio + TX2LFDA);
1186
1187 streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1);
1188 spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
1189 return NETDEV_TX_OK;
1190 } else {
1191 netif_stop_queue(dev);
1192 spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
1193 return NETDEV_TX_BUSY;
1194 }
1195}
1196
1197
1198static int streamer_close(struct net_device *dev)
1199{
1200 struct streamer_private *streamer_priv =
1201 netdev_priv(dev);
1202 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1203 unsigned long flags;
1204 int i;
1205
1206 netif_stop_queue(dev);
1207 netif_carrier_off(dev);
1208 writew(streamer_priv->srb, streamer_mmio + LAPA);
1209 writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC);
1210 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1211
1212 spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
1213
1214 streamer_priv->srb_queued = 1;
1215 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1216
1217 spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
1218
1219 while (streamer_priv->srb_queued)
1220 {
1221 interruptible_sleep_on_timeout(&streamer_priv->srb_wait,
1222 jiffies + 60 * HZ);
1223 if (signal_pending(current))
1224 {
1225 printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
1226 printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n",
1227 readw(streamer_mmio + SISR),
1228 readw(streamer_mmio + MISR_RUM),
1229 readw(streamer_mmio + LISR));
1230 streamer_priv->srb_queued = 0;
1231 break;
1232 }
1233 }
1234
1235 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
1236
1237 for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
1238 if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) {
1239 dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]);
1240 }
1241 streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
1242 }
1243
1244 /* reset tx/rx fifo's and busmaster logic */
1245
1246 /* TBD. Add graceful way to reset the LLC channel without doing a soft reset.
1247 writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
1248 udelay(1);
1249 writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL);
1250 */
1251
1252#if STREAMER_DEBUG
1253 writew(streamer_priv->srb, streamer_mmio + LAPA);
1254 printk("srb): ");
1255 for (i = 0; i < 2; i++) {
1256 printk("%x ", ntohs(readw(streamer_mmio + LAPDINC)));
1257 }
1258 printk("\n");
1259#endif
1260 free_irq(dev->irq, dev);
1261 return 0;
1262}
1263
1264static void streamer_set_rx_mode(struct net_device *dev)
1265{
1266 struct streamer_private *streamer_priv =
1267 netdev_priv(dev);
1268 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1269 __u8 options = 0;
1270 struct netdev_hw_addr *ha;
1271 unsigned char dev_mc_address[5];
1272
1273 writel(streamer_priv->srb, streamer_mmio + LAPA);
1274 options = streamer_priv->streamer_copy_all_options;
1275
1276 if (dev->flags & IFF_PROMISC)
1277 options |= (3 << 5); /* All LLC and MAC frames, all through the main rx channel */
1278 else
1279 options &= ~(3 << 5);
1280
1281 /* Only issue the srb if there is a change in options */
1282
1283 if ((options ^ streamer_priv->streamer_copy_all_options))
1284 {
1285 /* Now to issue the srb command to alter the copy.all.options */
1286 writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC);
1287 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1288 writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC);
1289 writew(htons(0x4a41),streamer_mmio+LAPDINC);
1290 writew(htons(0x4d45),streamer_mmio+LAPDINC);
1291 writew(htons(0x5320),streamer_mmio+LAPDINC);
1292 writew(0x2020, streamer_mmio + LAPDINC);
1293
1294 streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
1295
1296 writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1297
1298 streamer_priv->streamer_copy_all_options = options;
1299 return;
1300 }
1301
1302 /* Set the functional addresses we need for multicast */
1303 writel(streamer_priv->srb,streamer_mmio+LAPA);
1304 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1305
1306 netdev_for_each_mc_addr(ha, dev) {
1307 dev_mc_address[0] |= ha->addr[2];
1308 dev_mc_address[1] |= ha->addr[3];
1309 dev_mc_address[2] |= ha->addr[4];
1310 dev_mc_address[3] |= ha->addr[5];
1311 }
1312
1313 writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
1314 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1315 writew(0,streamer_mmio+LAPDINC);
1316 writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC);
1317 writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC);
1318 streamer_priv->srb_queued = 2 ;
1319 writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM);
1320}
1321
1322static void streamer_srb_bh(struct net_device *dev)
1323{
1324 struct streamer_private *streamer_priv = netdev_priv(dev);
1325 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1326 __u16 srb_word;
1327
1328 writew(streamer_priv->srb, streamer_mmio + LAPA);
1329 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1330
1331 switch (srb_word) {
1332
1333 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1334 * At some point we should do something if we get an error, such as
1335 * resetting the IFF_PROMISC flag in dev
1336 */
1337
1338 case SRB_MODIFY_RECEIVE_OPTIONS:
1339 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1340
1341 switch (srb_word) {
1342 case 0x01:
1343 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1344 break;
1345 case 0x04:
1346 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1347 break;
1348 default:
1349 if (streamer_priv->streamer_message_level)
1350 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",
1351 dev->name,
1352 streamer_priv->streamer_copy_all_options,
1353 streamer_priv->streamer_receive_options);
1354 break;
1355 } /* switch srb[2] */
1356 break;
1357
1358
1359 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1360 */
1361 case SRB_SET_GROUP_ADDRESS:
1362 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1363 switch (srb_word) {
1364 case 0x00:
1365 break;
1366 case 0x01:
1367 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1368 break;
1369 case 0x04:
1370 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1371 break;
1372 case 0x3c:
1373 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name);
1374 break;
1375 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1376 printk(KERN_WARNING "%s: Group address registers full\n", dev->name);
1377 break;
1378 case 0x55:
1379 printk(KERN_INFO "%s: Group Address already set.\n", dev->name);
1380 break;
1381 default:
1382 break;
1383 } /* switch srb[2] */
1384 break;
1385
1386
1387 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1388 */
1389 case SRB_RESET_GROUP_ADDRESS:
1390 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1391 switch (srb_word) {
1392 case 0x00:
1393 break;
1394 case 0x01:
1395 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1396 break;
1397 case 0x04:
1398 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1399 break;
1400 case 0x39: /* Must deal with this if individual multicast addresses used */
1401 printk(KERN_INFO "%s: Group address not found\n", dev->name);
1402 break;
1403 default:
1404 break;
1405 } /* switch srb[2] */
1406 break;
1407
1408
1409 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1410 */
1411
1412 case SRB_SET_FUNC_ADDRESS:
1413 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1414 switch (srb_word) {
1415 case 0x00:
1416 if (streamer_priv->streamer_message_level)
1417 printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
1418 break;
1419 case 0x01:
1420 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1421 break;
1422 case 0x04:
1423 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1424 break;
1425 default:
1426 break;
1427 } /* switch srb[2] */
1428 break;
1429
1430 /* SRB_READ_LOG - Read and reset the adapter error counters
1431 */
1432
1433 case SRB_READ_LOG:
1434 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1435 switch (srb_word) {
1436 case 0x00:
1437 {
1438 int i;
1439 if (streamer_priv->streamer_message_level)
1440 printk(KERN_INFO "%s: Read Log command complete\n", dev->name);
1441 printk("Read Log statistics: ");
1442 writew(streamer_priv->srb + 6,
1443 streamer_mmio + LAPA);
1444 for (i = 0; i < 5; i++) {
1445 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
1446 }
1447 printk("\n");
1448 }
1449 break;
1450 case 0x01:
1451 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1452 break;
1453 case 0x04:
1454 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1455 break;
1456
1457 } /* switch srb[2] */
1458 break;
1459
1460 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1461
1462 case SRB_READ_SR_COUNTERS:
1463 srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
1464 switch (srb_word) {
1465 case 0x00:
1466 if (streamer_priv->streamer_message_level)
1467 printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
1468 break;
1469 case 0x01:
1470 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1471 break;
1472 case 0x04:
1473 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1474 break;
1475 default:
1476 break;
1477 } /* switch srb[2] */
1478 break;
1479
1480 default:
1481 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name);
1482 break;
1483 } /* switch srb[0] */
1484}
1485
1486static int streamer_set_mac_address(struct net_device *dev, void *addr)
1487{
1488 struct sockaddr *saddr = addr;
1489 struct streamer_private *streamer_priv = netdev_priv(dev);
1490
1491 if (netif_running(dev))
1492 {
1493 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name);
1494 return -EIO;
1495 }
1496
1497 memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len);
1498
1499 if (streamer_priv->streamer_message_level) {
1500 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",
1501 dev->name, streamer_priv->streamer_laa[0],
1502 streamer_priv->streamer_laa[1],
1503 streamer_priv->streamer_laa[2],
1504 streamer_priv->streamer_laa[3],
1505 streamer_priv->streamer_laa[4],
1506 streamer_priv->streamer_laa[5]);
1507 }
1508 return 0;
1509}
1510
1511static void streamer_arb_cmd(struct net_device *dev)
1512{
1513 struct streamer_private *streamer_priv =
1514 netdev_priv(dev);
1515 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1516 __u8 header_len;
1517 __u16 frame_len, buffer_len;
1518 struct sk_buff *mac_frame;
1519 __u8 frame_data[256];
1520 __u16 buff_off;
1521 __u16 lan_status = 0, lan_status_diff; /* Initialize to stop compiler warning */
1522 __u8 fdx_prot_error;
1523 __u16 next_ptr;
1524 __u16 arb_word;
1525
1526#if STREAMER_NETWORK_MONITOR
1527 struct trh_hdr *mac_hdr;
1528#endif
1529
1530 writew(streamer_priv->arb, streamer_mmio + LAPA);
1531 arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
1532
1533 if (arb_word == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1534 writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
1535 streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC));
1536 header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */
1537 frame_len = ntohs(readw(streamer_mmio + LAPDINC));
1538
1539#if STREAMER_DEBUG
1540 {
1541 int i;
1542 __u16 next;
1543 __u8 status;
1544 __u16 len;
1545
1546 writew(ntohs(buff_off), streamer_mmio + LAPA); /*setup window to frame data */
1547 next = htons(readw(streamer_mmio + LAPDINC));
1548 status =
1549 ntohs(readw(streamer_mmio + LAPDINC)) & 0xff;
1550 len = ntohs(readw(streamer_mmio + LAPDINC));
1551
1552 /* print out 1st 14 bytes of frame data */
1553 for (i = 0; i < 7; i++) {
1554 printk("Loc %d = %04x\n", i,
1555 ntohs(readw
1556 (streamer_mmio + LAPDINC)));
1557 }
1558
1559 printk("next %04x, fs %02x, len %04x\n", next,
1560 status, len);
1561 }
1562#endif
1563 if (!(mac_frame = dev_alloc_skb(frame_len))) {
1564 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n",
1565 dev->name);
1566 goto drop_frame;
1567 }
1568 /* Walk the buffer chain, creating the frame */
1569
1570 do {
1571 int i;
1572 __u16 rx_word;
1573
1574 writew(htons(buff_off), streamer_mmio + LAPA); /* setup window to frame data */
1575 next_ptr = ntohs(readw(streamer_mmio + LAPDINC));
1576 readw(streamer_mmio + LAPDINC); /* read thru status word */
1577 buffer_len = ntohs(readw(streamer_mmio + LAPDINC));
1578
1579 if (buffer_len > 256)
1580 break;
1581
1582 i = 0;
1583 while (i < buffer_len) {
1584 rx_word=ntohs(readw(streamer_mmio+LAPDINC));
1585 frame_data[i]=rx_word >> 8;
1586 frame_data[i+1]=rx_word & 0xff;
1587 i += 2;
1588 }
1589
1590 memcpy(skb_put(mac_frame, buffer_len),
1591 frame_data, buffer_len);
1592 } while (next_ptr && (buff_off = next_ptr));
1593
1594 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1595#if STREAMER_NETWORK_MONITOR
1596 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
1597 dev->name);
1598 mac_hdr = tr_hdr(mac_frame);
1599 printk(KERN_WARNING
1600 "%s: MAC Frame Dest. Addr: %pM\n",
1601 dev->name, mac_hdr->daddr);
1602 printk(KERN_WARNING
1603 "%s: MAC Frame Srce. Addr: %pM\n",
1604 dev->name, mac_hdr->saddr);
1605#endif
1606 netif_rx(mac_frame);
1607
1608 /* Now tell the card we have dealt with the received frame */
1609drop_frame:
1610 /* Set LISR Bit 1 */
1611 writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
1612
1613 /* Is the ASB free ? */
1614
1615 if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE))
1616 {
1617 streamer_priv->asb_queued = 1;
1618 writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
1619 return;
1620 /* Drop out and wait for the bottom half to be run */
1621 }
1622
1623
1624 writew(streamer_priv->asb, streamer_mmio + LAPA);
1625 writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC);
1626 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1627 writew(0, streamer_mmio + LAPDINC);
1628 writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
1629
1630 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
1631
1632 streamer_priv->asb_queued = 2;
1633 return;
1634
1635 } else if (arb_word == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1636 writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
1637 lan_status = ntohs(readw(streamer_mmio + LAPDINC));
1638 fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8;
1639
1640 /* Issue ARB Free */
1641 writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
1642
1643 lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) &
1644 lan_status;
1645
1646 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR))
1647 {
1648 if (lan_status_diff & LSC_LWF)
1649 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name);
1650 if (lan_status_diff & LSC_ARW)
1651 printk(KERN_WARNING "%s: Auto removal error\n", dev->name);
1652 if (lan_status_diff & LSC_FPE)
1653 printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name);
1654 if (lan_status_diff & LSC_RR)
1655 printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name);
1656
1657 /* Adapter has been closed by the hardware */
1658
1659 /* reset tx/rx fifo's and busmaster logic */
1660
1661 /* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
1662 udelay(1);
1663 writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */
1664
1665 netif_stop_queue(dev);
1666 netif_carrier_off(dev);
1667 printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
1668 }
1669 /* If serious error */
1670 if (streamer_priv->streamer_message_level) {
1671 if (lan_status_diff & LSC_SIG_LOSS)
1672 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1673 if (lan_status_diff & LSC_HARD_ERR)
1674 printk(KERN_INFO "%s: Beaconing\n", dev->name);
1675 if (lan_status_diff & LSC_SOFT_ERR)
1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
1677 if (lan_status_diff & LSC_TRAN_BCN)
1678 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
1679 if (lan_status_diff & LSC_SS)
1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1681 if (lan_status_diff & LSC_RING_REC)
1682 printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
1683 if (lan_status_diff & LSC_FDX_MODE)
1684 printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name);
1685 }
1686
1687 if (lan_status_diff & LSC_CO) {
1688 if (streamer_priv->streamer_message_level)
1689 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1690
1691 /* Issue READ.LOG command */
1692
1693 writew(streamer_priv->srb, streamer_mmio + LAPA);
1694 writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC);
1695 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1696 writew(0, streamer_mmio + LAPDINC);
1697 streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
1698
1699 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1700 }
1701
1702 if (lan_status_diff & LSC_SR_CO) {
1703 if (streamer_priv->streamer_message_level)
1704 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1705
1706 /* Issue a READ.SR.COUNTERS */
1707 writew(streamer_priv->srb, streamer_mmio + LAPA);
1708 writew(htons(SRB_READ_SR_COUNTERS << 8),
1709 streamer_mmio+LAPDINC);
1710 writew(htons(STREAMER_CLEAR_RET_CODE << 8),
1711 streamer_mmio+LAPDINC);
1712 streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
1713 writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
1714
1715 }
1716 streamer_priv->streamer_lan_status = lan_status;
1717 } /* Lan.change.status */
1718 else
1719 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1720}
1721
1722static void streamer_asb_bh(struct net_device *dev)
1723{
1724 struct streamer_private *streamer_priv =
1725 netdev_priv(dev);
1726 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1727
1728 if (streamer_priv->asb_queued == 1)
1729 {
1730 /* Dropped through the first time */
1731
1732 writew(streamer_priv->asb, streamer_mmio + LAPA);
1733 writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC);
1734 writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
1735 writew(0, streamer_mmio + LAPDINC);
1736 writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
1737
1738 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
1739 streamer_priv->asb_queued = 2;
1740
1741 return;
1742 }
1743
1744 if (streamer_priv->asb_queued == 2) {
1745 __u8 rc;
1746 writew(streamer_priv->asb + 2, streamer_mmio + LAPA);
1747 rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
1748 switch (rc) {
1749 case 0x01:
1750 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1751 break;
1752 case 0x26:
1753 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1754 break;
1755 case 0xFF:
1756 /* Valid response, everything should be ok again */
1757 break;
1758 default:
1759 printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name);
1760 break;
1761 }
1762 }
1763 streamer_priv->asb_queued = 0;
1764}
1765
1766static int streamer_change_mtu(struct net_device *dev, int mtu)
1767{
1768 struct streamer_private *streamer_priv =
1769 netdev_priv(dev);
1770 __u16 max_mtu;
1771
1772 if (streamer_priv->streamer_ring_speed == 4)
1773 max_mtu = 4500;
1774 else
1775 max_mtu = 18000;
1776
1777 if (mtu > max_mtu)
1778 return -EINVAL;
1779 if (mtu < 100)
1780 return -EINVAL;
1781
1782 dev->mtu = mtu;
1783 streamer_priv->pkt_buf_sz = mtu + TR_HLEN;
1784
1785 return 0;
1786}
1787
1788#if STREAMER_NETWORK_MONITOR
1789#ifdef CONFIG_PROC_FS
1790static int streamer_proc_info(char *buffer, char **start, off_t offset,
1791 int length, int *eof, void *data)
1792{
1793 struct streamer_private *sdev=NULL;
1794 struct pci_dev *pci_device = NULL;
1795 int len = 0;
1796 off_t begin = 0;
1797 off_t pos = 0;
1798 int size;
1799
1800 struct net_device *dev;
1801
1802 size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n");
1803
1804 pos += size;
1805 len += size;
1806
1807 for(sdev=dev_streamer; sdev; sdev=sdev->next) {
1808 pci_device=sdev->pci_dev;
1809 dev=pci_get_drvdata(pci_device);
1810
1811 size = sprintf_info(buffer + len, dev);
1812 len += size;
1813 pos = begin + len;
1814
1815 if (pos < offset) {
1816 len = 0;
1817 begin = pos;
1818 }
1819 if (pos > offset + length)
1820 break;
1821 } /* for */
1822
1823 *start = buffer + (offset - begin); /* Start of wanted data */
1824 len -= (offset - begin); /* Start slop */
1825 if (len > length)
1826 len = length; /* Ending slop */
1827 return len;
1828}
1829
1830static int sprintf_info(char *buffer, struct net_device *dev)
1831{
1832 struct streamer_private *streamer_priv =
1833 netdev_priv(dev);
1834 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1835 struct streamer_adapter_addr_table sat;
1836 struct streamer_parameters_table spt;
1837 int size = 0;
1838 int i;
1839
1840 writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
1841 for (i = 0; i < 14; i += 2) {
1842 __u16 io_word;
1843 __u8 *datap = (__u8 *) & sat;
1844 io_word=ntohs(readw(streamer_mmio+LAPDINC));
1845 datap[size]=io_word >> 8;
1846 datap[size+1]=io_word & 0xff;
1847 }
1848 writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA);
1849 for (i = 0; i < 68; i += 2) {
1850 __u16 io_word;
1851 __u8 *datap = (__u8 *) & spt;
1852 io_word=ntohs(readw(streamer_mmio+LAPDINC));
1853 datap[size]=io_word >> 8;
1854 datap[size+1]=io_word & 0xff;
1855 }
1856
1857 size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name);
1858
1859 size += sprintf(buffer + size,
1860 "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1861 dev->name, dev->dev_addr, sat.node_addr,
1862 sat.func_addr[0], sat.func_addr[1],
1863 sat.func_addr[2], sat.func_addr[3]);
1864
1865 size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1866
1867 size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name);
1868
1869 size += sprintf(buffer + size,
1870 "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1871 dev->name, spt.phys_addr[0], spt.phys_addr[1],
1872 spt.phys_addr[2], spt.phys_addr[3],
1873 spt.up_node_addr, spt.poll_addr,
1874 ntohs(spt.acc_priority), ntohs(spt.auth_source_class),
1875 ntohs(spt.att_code));
1876
1877 size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name);
1878
1879 size += sprintf(buffer + size,
1880 "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1881 dev->name, spt.source_addr,
1882 ntohs(spt.beacon_type), ntohs(spt.major_vector),
1883 ntohs(spt.lan_status), ntohs(spt.local_ring),
1884 ntohs(spt.mon_error), ntohs(spt.frame_correl));
1885
1886 size += sprintf(buffer + size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1887 dev->name);
1888
1889 size += sprintf(buffer + size,
1890 "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1891 dev->name, ntohs(spt.beacon_transmit),
1892 ntohs(spt.beacon_receive),
1893 spt.beacon_naun,
1894 spt.beacon_phys[0], spt.beacon_phys[1],
1895 spt.beacon_phys[2], spt.beacon_phys[3]);
1896 return size;
1897}
1898#endif
1899#endif
1900
1901static struct pci_driver streamer_pci_driver = {
1902 .name = "lanstreamer",
1903 .id_table = streamer_pci_tbl,
1904 .probe = streamer_init_one,
1905 .remove = __devexit_p(streamer_remove_one),
1906};
1907
1908static int __init streamer_init_module(void) {
1909 return pci_register_driver(&streamer_pci_driver);
1910}
1911
1912static void __exit streamer_cleanup_module(void) {
1913 pci_unregister_driver(&streamer_pci_driver);
1914}
1915
1916module_init(streamer_init_module);
1917module_exit(streamer_cleanup_module);
1918MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
new file mode 100644
index 00000000000..3c58d6a3fbc
--- /dev/null
+++ b/drivers/net/tokenring/lanstreamer.h
@@ -0,0 +1,343 @@
1/*
2 * lanstreamer.h -- driver for the IBM Auto LANStreamer PCI Adapter
3 *
4 * Written By: Mike Sullivan, IBM Corporation
5 *
6 * Copyright (C) 1999 IBM Corporation
7 *
8 * Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
9 * chipset.
10 *
11 * This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
12 * chipsets) written by:
13 * 1999 Peter De Schrijver All Rights Reserved
14 * 1999 Mike Phillips (phillim@amtrak.com)
15 *
16 * Base Driver Skeleton:
17 * Written 1993-94 by Donald Becker.
18 *
19 * Copyright 1993 United States Government as represented by the
20 * Director, National Security Agency.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * NO WARRANTY
33 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
34 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
35 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
36 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
37 * solely responsible for determining the appropriateness of using and
38 * distributing the Program and assumes all risks associated with its
39 * exercise of rights under this Agreement, including but not limited to
40 * the risks and costs of program errors, damage to or loss of data,
41 * programs or equipment, and unavailability or interruption of operations.
42 *
43 * DISCLAIMER OF LIABILITY
44 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
47 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
48 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
49 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
50 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
51 *
52 * You should have received a copy of the GNU General Public License
53 * along with this program; if not, write to the Free Software
54 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55 *
56 *
57 * 12/10/99 - Alpha Release 0.1.0
58 * First release to the public
59 * 08/15/01 - Added ioctl() definitions and others - Kent Yoder <yoder1@us.ibm.com>
60 *
61 */
62
63/* MAX_INTR - the maximum number of times we can loop
64 * inside the interrupt function before returning
65 * control to the OS (maximum value is 256)
66 */
67#define MAX_INTR 5
68
69#define CLS 0x0C
70#define MLR 0x86
71#define LTR 0x0D
72
73#define BCTL 0x60
74#define BCTL_SOFTRESET (1<<15)
75#define BCTL_RX_FIFO_8 (1<<1)
76#define BCTL_TX_FIFO_8 (1<<3)
77
78#define GPR 0x4a
79#define GPR_AUTOSENSE (1<<2)
80#define GPR_16MBPS (1<<3)
81
82#define LISR 0x10
83#define LISR_SUM 0x12
84#define LISR_RUM 0x14
85
86#define LISR_LIE (1<<15)
87#define LISR_SLIM (1<<13)
88#define LISR_SLI (1<<12)
89#define LISR_BPEI (1<<9)
90#define LISR_BPE (1<<8)
91#define LISR_SRB_CMD (1<<5)
92#define LISR_ASB_REPLY (1<<4)
93#define LISR_ASB_FREE_REQ (1<<2)
94#define LISR_ARB_FREE (1<<1)
95#define LISR_TRB_FRAME (1<<0)
96
97#define SISR 0x16
98#define SISR_SUM 0x18
99#define SISR_RUM 0x1A
100#define SISR_MASK 0x54
101#define SISR_MASK_SUM 0x56
102#define SISR_MASK_RUM 0x58
103
104#define SISR_MI (1<<15)
105#define SISR_SERR_ERR (1<<14)
106#define SISR_TIMER (1<<11)
107#define SISR_LAP_PAR_ERR (1<<10)
108#define SISR_LAP_ACC_ERR (1<<9)
109#define SISR_PAR_ERR (1<<8)
110#define SISR_ADAPTER_CHECK (1<<6)
111#define SISR_SRB_REPLY (1<<5)
112#define SISR_ASB_FREE (1<<4)
113#define SISR_ARB_CMD (1<<3)
114#define SISR_TRB_REPLY (1<<2)
115
116#define MISR_RUM 0x5A
117#define MISR_MASK 0x5C
118#define MISR_MASK_RUM 0x5E
119
120#define MISR_TX2_IDLE (1<<15)
121#define MISR_TX2_NO_STATUS (1<<14)
122#define MISR_TX2_HALT (1<<13)
123#define MISR_TX2_EOF (1<<12)
124#define MISR_TX1_IDLE (1<<11)
125#define MISR_TX1_NO_STATUS (1<<10)
126#define MISR_TX1_HALT (1<<9)
127#define MISR_TX1_EOF (1<<8)
128#define MISR_RX_NOBUF (1<<5)
129#define MISR_RX_EOB (1<<4)
130#define MISR_RX_NO_STATUS (1<<2)
131#define MISR_RX_HALT (1<<1)
132#define MISR_RX_EOF (1<<0)
133
134#define LAPA 0x62
135#define LAPE 0x64
136#define LAPD 0x66
137#define LAPDINC 0x68
138#define LAPWWO 0x6A
139#define LAPWWC 0x6C
140#define LAPCTL 0x6E
141
142#define TIMER 0x4E4
143
144#define BMCTL_SUM 0x50
145#define BMCTL_RUM 0x52
146#define BMCTL_TX1_DIS (1<<14)
147#define BMCTL_TX2_DIS (1<<10)
148#define BMCTL_RX_DIS (1<<6)
149#define BMCTL_RX_ENABLED (1<<5)
150
151#define RXLBDA 0x90
152#define RXBDA 0x94
153#define RXSTAT 0x98
154#define RXDBA 0x9C
155
156#define TX1LFDA 0xA0
157#define TX1FDA 0xA4
158#define TX1STAT 0xA8
159#define TX1DBA 0xAC
160#define TX2LFDA 0xB0
161#define TX2FDA 0xB4
162#define TX2STAT 0xB8
163#define TX2DBA 0xBC
164
165#define STREAMER_IO_SPACE 256
166
167#define SRB_COMMAND_SIZE 50
168
169#define STREAMER_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
170
171/* Defines for LAN STATUS CHANGE reports */
172#define LSC_SIG_LOSS 0x8000
173#define LSC_HARD_ERR 0x4000
174#define LSC_SOFT_ERR 0x2000
175#define LSC_TRAN_BCN 0x1000
176#define LSC_LWF 0x0800
177#define LSC_ARW 0x0400
178#define LSC_FPE 0x0200
179#define LSC_RR 0x0100
180#define LSC_CO 0x0080
181#define LSC_SS 0x0040
182#define LSC_RING_REC 0x0020
183#define LSC_SR_CO 0x0010
184#define LSC_FDX_MODE 0x0004
185
186/* Defines for OPEN ADAPTER command */
187
188#define OPEN_ADAPTER_EXT_WRAP (1<<15)
189#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
190#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
191#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
192#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
193#define OPEN_ADAPTER_ENABLE_EC (1<<10)
194#define OPEN_ADAPTER_CONTENDER (1<<8)
195#define OPEN_ADAPTER_PASS_BEACON (1<<7)
196#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
197#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
198#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
199#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
200
201
202/* Defines for SRB Commands */
203#define SRB_CLOSE_ADAPTER 0x04
204#define SRB_CONFIGURE_BRIDGE 0x0c
205#define SRB_CONFIGURE_HP_CHANNEL 0x13
206#define SRB_MODIFY_BRIDGE_PARMS 0x15
207#define SRB_MODIFY_OPEN_OPTIONS 0x01
208#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
209#define SRB_NO_OPERATION 0x00
210#define SRB_OPEN_ADAPTER 0x03
211#define SRB_READ_LOG 0x08
212#define SRB_READ_SR_COUNTERS 0x16
213#define SRB_RESET_GROUP_ADDRESS 0x02
214#define SRB_RESET_TARGET_SEGMETN 0x14
215#define SRB_SAVE_CONFIGURATION 0x1b
216#define SRB_SET_BRIDGE_PARMS 0x09
217#define SRB_SET_FUNC_ADDRESS 0x07
218#define SRB_SET_GROUP_ADDRESS 0x06
219#define SRB_SET_TARGET_SEGMENT 0x05
220
221/* Clear return code */
222#define STREAMER_CLEAR_RET_CODE 0xfe
223
224/* ARB Commands */
225#define ARB_RECEIVE_DATA 0x81
226#define ARB_LAN_CHANGE_STATUS 0x84
227
228/* ASB Response commands */
229#define ASB_RECEIVE_DATA 0x81
230
231
232/* Streamer defaults for buffers */
233
234#define STREAMER_RX_RING_SIZE 16 /* should be a power of 2 */
235/* Setting the number of TX descriptors to 1 is a workaround for an
236 * undocumented hardware problem with the lanstreamer board. Setting
237 * this to something higher may slightly increase the throughput you
238 * can get from the card, but at the risk of locking up the box. -
239 * <yoder1@us.ibm.com>
240 */
241#define STREAMER_TX_RING_SIZE 1 /* should be a power of 2 */
242
243#define PKT_BUF_SZ 4096 /* Default packet size */
244
245/* Streamer data structures */
246
247struct streamer_tx_desc {
248 __u32 forward;
249 __u32 status;
250 __u32 bufcnt_framelen;
251 __u32 buffer;
252 __u32 buflen;
253 __u32 rsvd1;
254 __u32 rsvd2;
255 __u32 rsvd3;
256};
257
258struct streamer_rx_desc {
259 __u32 forward;
260 __u32 status;
261 __u32 buffer;
262 __u32 framelen_buflen;
263};
264
265struct mac_receive_buffer {
266 __u16 next;
267 __u8 padding;
268 __u8 frame_status;
269 __u16 buffer_length;
270 __u8 frame_data;
271};
272
273struct streamer_private {
274
275 __u16 srb;
276 __u16 trb;
277 __u16 arb;
278 __u16 asb;
279
280 struct streamer_private *next;
281 struct pci_dev *pci_dev;
282 __u8 __iomem *streamer_mmio;
283 char *streamer_card_name;
284
285 spinlock_t streamer_lock;
286
287 volatile int srb_queued; /* True if an SRB is still posted */
288 wait_queue_head_t srb_wait;
289
290 volatile int asb_queued; /* True if an ASB is posted */
291
292 volatile int trb_queued; /* True if a TRB is posted */
293 wait_queue_head_t trb_wait;
294
295 struct streamer_rx_desc *streamer_rx_ring;
296 struct streamer_tx_desc *streamer_tx_ring;
297 struct sk_buff *tx_ring_skb[STREAMER_TX_RING_SIZE],
298 *rx_ring_skb[STREAMER_RX_RING_SIZE];
299 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,
300 free_tx_ring_entries;
301
302 __u16 streamer_lan_status;
303 __u8 streamer_ring_speed;
304 __u16 pkt_buf_sz;
305 __u8 streamer_receive_options, streamer_copy_all_options,
306 streamer_message_level;
307 __u16 streamer_addr_table_addr, streamer_parms_addr;
308 __u16 mac_rx_buffer;
309 __u8 streamer_laa[6];
310};
311
312struct streamer_adapter_addr_table {
313
314 __u8 node_addr[6];
315 __u8 reserved[4];
316 __u8 func_addr[4];
317};
318
319struct streamer_parameters_table {
320
321 __u8 phys_addr[4];
322 __u8 up_node_addr[6];
323 __u8 up_phys_addr[4];
324 __u8 poll_addr[6];
325 __u16 reserved;
326 __u16 acc_priority;
327 __u16 auth_source_class;
328 __u16 att_code;
329 __u8 source_addr[6];
330 __u16 beacon_type;
331 __u16 major_vector;
332 __u16 lan_status;
333 __u16 soft_error_time;
334 __u16 reserved1;
335 __u16 local_ring;
336 __u16 mon_error;
337 __u16 beacon_transmit;
338 __u16 beacon_receive;
339 __u16 frame_correl;
340 __u8 beacon_naun[6];
341 __u32 reserved2;
342 __u8 beacon_phys[4];
343};
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
new file mode 100644
index 00000000000..6153cfd696b
--- /dev/null
+++ b/drivers/net/tokenring/madgemc.c
@@ -0,0 +1,763 @@
1/*
2 * madgemc.c: Driver for the Madge Smart 16/4 MC16 MCA token ring card.
3 *
4 * Written 2000 by Adam Fritzler
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This driver module supports the following cards:
10 * - Madge Smart 16/4 Ringnode MC16
11 * - Madge Smart 16/4 Ringnode MC32 (??)
12 *
13 * Maintainer(s):
14 * AF Adam Fritzler
15 *
16 * Modification History:
17 * 16-Jan-00 AF Created
18 *
19 */
20static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
21
22#include <linux/module.h>
23#include <linux/mca.h>
24#include <linux/slab.h>
25#include <linux/kernel.h>
26#include <linux/errno.h>
27#include <linux/init.h>
28#include <linux/netdevice.h>
29#include <linux/trdevice.h>
30
31#include <asm/system.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34
35#include "tms380tr.h"
36#include "madgemc.h" /* Madge-specific constants */
37
38#define MADGEMC_IO_EXTENT 32
39#define MADGEMC_SIF_OFFSET 0x08
40
41struct card_info {
42 /*
43 * These are read from the BIA ROM.
44 */
45 unsigned int manid;
46 unsigned int cardtype;
47 unsigned int cardrev;
48 unsigned int ramsize;
49
50 /*
51 * These are read from the MCA POS registers.
52 */
53 unsigned int burstmode:2;
54 unsigned int fairness:1; /* 0 = Fair, 1 = Unfair */
55 unsigned int arblevel:4;
56 unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */
57 unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */
58};
59
60static int madgemc_open(struct net_device *dev);
61static int madgemc_close(struct net_device *dev);
62static int madgemc_chipset_init(struct net_device *dev);
63static void madgemc_read_rom(struct net_device *dev, struct card_info *card);
64static unsigned short madgemc_setnselout_pins(struct net_device *dev);
65static void madgemc_setcabletype(struct net_device *dev, int type);
66
67static int madgemc_mcaproc(char *buf, int slot, void *d);
68
69static void madgemc_setregpage(struct net_device *dev, int page);
70static void madgemc_setsifsel(struct net_device *dev, int val);
71static void madgemc_setint(struct net_device *dev, int val);
72
73static irqreturn_t madgemc_interrupt(int irq, void *dev_id);
74
75/*
76 * These work around paging, however they don't guarantee you're on the
77 * right page.
78 */
79#define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
80#define SIFWRITEB(val, reg) (outb(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
81#define SIFREADW(reg) (inw(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
82#define SIFWRITEW(val, reg) (outw(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
83
84/*
85 * Read a byte-length value from the register.
86 */
87static unsigned short madgemc_sifreadb(struct net_device *dev, unsigned short reg)
88{
89 unsigned short ret;
90 if (reg<0x8)
91 ret = SIFREADB(reg);
92 else {
93 madgemc_setregpage(dev, 1);
94 ret = SIFREADB(reg);
95 madgemc_setregpage(dev, 0);
96 }
97 return ret;
98}
99
100/*
101 * Write a byte-length value to a register.
102 */
103static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
104{
105 if (reg<0x8)
106 SIFWRITEB(val, reg);
107 else {
108 madgemc_setregpage(dev, 1);
109 SIFWRITEB(val, reg);
110 madgemc_setregpage(dev, 0);
111 }
112}
113
114/*
115 * Read a word-length value from a register
116 */
117static unsigned short madgemc_sifreadw(struct net_device *dev, unsigned short reg)
118{
119 unsigned short ret;
120 if (reg<0x8)
121 ret = SIFREADW(reg);
122 else {
123 madgemc_setregpage(dev, 1);
124 ret = SIFREADW(reg);
125 madgemc_setregpage(dev, 0);
126 }
127 return ret;
128}
129
130/*
131 * Write a word-length value to a register.
132 */
133static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
134{
135 if (reg<0x8)
136 SIFWRITEW(val, reg);
137 else {
138 madgemc_setregpage(dev, 1);
139 SIFWRITEW(val, reg);
140 madgemc_setregpage(dev, 0);
141 }
142}
143
144static struct net_device_ops madgemc_netdev_ops __read_mostly;
145
146static int __devinit madgemc_probe(struct device *device)
147{
148 static int versionprinted;
149 struct net_device *dev;
150 struct net_local *tp;
151 struct card_info *card;
152 struct mca_device *mdev = to_mca_device(device);
153 int ret = 0;
154
155 if (versionprinted++ == 0)
156 printk("%s", version);
157
158 if(mca_device_claimed(mdev))
159 return -EBUSY;
160 mca_device_set_claim(mdev, 1);
161
162 dev = alloc_trdev(sizeof(struct net_local));
163 if (!dev) {
164 printk("madgemc: unable to allocate dev space\n");
165 mca_device_set_claim(mdev, 0);
166 ret = -ENOMEM;
167 goto getout;
168 }
169
170 dev->netdev_ops = &madgemc_netdev_ops;
171
172 card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
173 if (card==NULL) {
174 printk("madgemc: unable to allocate card struct\n");
175 ret = -ENOMEM;
176 goto getout1;
177 }
178
179 /*
180 * Parse configuration information. This all comes
181 * directly from the publicly available @002d.ADF.
182 * Get it from Madge or your local ADF library.
183 */
184
185 /*
186 * Base address
187 */
188 dev->base_addr = 0x0a20 +
189 ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) +
190 ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) +
191 ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0);
192
193 /*
194 * Interrupt line
195 */
196 switch(mdev->pos[0] >> 6) { /* upper two bits */
197 case 0x1: dev->irq = 3; break;
198 case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */
199 case 0x3: dev->irq = 10; break;
200 default: dev->irq = 0; break;
201 }
202
203 if (dev->irq == 0) {
204 printk("%s: invalid IRQ\n", dev->name);
205 ret = -EBUSY;
206 goto getout2;
207 }
208
209 if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT,
210 "madgemc")) {
211 printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr);
212 dev->base_addr += MADGEMC_SIF_OFFSET;
213 ret = -EBUSY;
214 goto getout2;
215 }
216 dev->base_addr += MADGEMC_SIF_OFFSET;
217
218 /*
219 * Arbitration Level
220 */
221 card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8;
222
223 /*
224 * Burst mode and Fairness
225 */
226 card->burstmode = ((mdev->pos[2] >> 6) & 0x3);
227 card->fairness = ((mdev->pos[2] >> 4) & 0x1);
228
229 /*
230 * Ring Speed
231 */
232 if ((mdev->pos[1] >> 2)&0x1)
233 card->ringspeed = 2; /* not selected */
234 else if ((mdev->pos[2] >> 5) & 0x1)
235 card->ringspeed = 1; /* 16Mb */
236 else
237 card->ringspeed = 0; /* 4Mb */
238
239 /*
240 * Cable type
241 */
242 if ((mdev->pos[1] >> 6)&0x1)
243 card->cabletype = 1; /* STP/DB9 */
244 else
245 card->cabletype = 0; /* UTP/RJ-45 */
246
247
248 /*
249 * ROM Info. This requires us to actually twiddle
250 * bits on the card, so we must ensure above that
251 * the base address is free of conflict (request_region above).
252 */
253 madgemc_read_rom(dev, card);
254
255 if (card->manid != 0x4d) { /* something went wrong */
256 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
257 goto getout3;
258 }
259
260 if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
261 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
262 ret = -EIO;
263 goto getout3;
264 }
265
266 /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
267 if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
268 card->ramsize = 128;
269 else
270 card->ramsize = 256;
271
272 printk("%s: %s Rev %d at 0x%04lx IRQ %d\n",
273 dev->name,
274 (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
275 MADGEMC32_CARDNAME, card->cardrev,
276 dev->base_addr, dev->irq);
277
278 if (card->cardtype == 0x0d)
279 printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name);
280
281 if (card->ringspeed==2) { /* Unknown */
282 printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
283 card->ringspeed = 1; /* default to 16mb */
284 }
285
286 printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize);
287
288 printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name,
289 (card->ringspeed)?16:4,
290 card->cabletype?"STP/DB9":"UTP/RJ-45");
291 printk("%s: Arbitration Level: %d\n", dev->name,
292 card->arblevel);
293
294 printk("%s: Burst Mode: ", dev->name);
295 switch(card->burstmode) {
296 case 0: printk("Cycle steal"); break;
297 case 1: printk("Limited burst"); break;
298 case 2: printk("Delayed release"); break;
299 case 3: printk("Immediate release"); break;
300 }
301 printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
302
303
304 /*
305 * Enable SIF before we assign the interrupt handler,
306 * just in case we get spurious interrupts that need
307 * handling.
308 */
309 outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
310 madgemc_setsifsel(dev, 1);
311 if (request_irq(dev->irq, madgemc_interrupt, IRQF_SHARED,
312 "madgemc", dev)) {
313 ret = -EBUSY;
314 goto getout3;
315 }
316
317 madgemc_chipset_init(dev); /* enables interrupts! */
318 madgemc_setcabletype(dev, card->cabletype);
319
320 /* Setup MCA structures */
321 mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
322 mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev);
323
324 printk("%s: Ring Station Address: %pM\n",
325 dev->name, dev->dev_addr);
326
327 if (tmsdev_init(dev, device)) {
328 printk("%s: unable to get memory for dev->priv.\n",
329 dev->name);
330 ret = -ENOMEM;
331 goto getout4;
332 }
333 tp = netdev_priv(dev);
334
335 /*
336 * The MC16 is physically a 32bit card. However, Madge
337 * insists on calling it 16bit, so I'll assume here that
338 * they know what they're talking about. Cut off DMA
339 * at 16mb.
340 */
341 tp->setnselout = madgemc_setnselout_pins;
342 tp->sifwriteb = madgemc_sifwriteb;
343 tp->sifreadb = madgemc_sifreadb;
344 tp->sifwritew = madgemc_sifwritew;
345 tp->sifreadw = madgemc_sifreadw;
346 tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
347
348 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
349
350 tp->tmspriv = card;
351 dev_set_drvdata(device, dev);
352
353 if (register_netdev(dev) == 0)
354 return 0;
355
356 dev_set_drvdata(device, NULL);
357 ret = -ENOMEM;
358getout4:
359 free_irq(dev->irq, dev);
360getout3:
361 release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
362 MADGEMC_IO_EXTENT);
363getout2:
364 kfree(card);
365getout1:
366 free_netdev(dev);
367getout:
368 mca_device_set_claim(mdev, 0);
369 return ret;
370}
371
372/*
373 * Handle interrupts generated by the card
374 *
375 * The MicroChannel Madge cards need slightly more handling
376 * after an interrupt than other TMS380 cards do.
377 *
378 * First we must make sure it was this card that generated the
379 * interrupt (since interrupt sharing is allowed). Then,
380 * because we're using level-triggered interrupts (as is
381 * standard on MCA), we must toggle the interrupt line
382 * on the card in order to claim and acknowledge the interrupt.
383 * Once that is done, the interrupt should be handlable in
384 * the normal tms380tr_interrupt() routine.
385 *
386 * There's two ways we can check to see if the interrupt is ours,
387 * both with their own disadvantages...
388 *
389 * 1) Read in the SIFSTS register from the TMS controller. This
390 * is guaranteed to be accurate, however, there's a fairly
391 * large performance penalty for doing so: the Madge chips
392 * must request the register from the Eagle, the Eagle must
393 * read them from its internal bus, and then take the route
394 * back out again, for a 16bit read.
395 *
396 * 2) Use the MC_CONTROL_REG0_SINTR bit from the Madge ASICs.
397 * The major disadvantage here is that the accuracy of the
398 * bit is in question. However, it cuts out the extra read
399 * cycles it takes to read the Eagle's SIF, as its only an
400 * 8bit read, and theoretically the Madge bit is directly
401 * connected to the interrupt latch coming out of the Eagle
402 * hardware (that statement is not verified).
403 *
404 * I can't determine which of these methods has the best win. For now,
405 * we make a compromise. Use the Madge way for the first interrupt,
406 * which should be the fast-path, and then once we hit the first
407 * interrupt, keep on trying using the SIF method until we've
408 * exhausted all contiguous interrupts.
409 *
410 */
411static irqreturn_t madgemc_interrupt(int irq, void *dev_id)
412{
413 int pending,reg1;
414 struct net_device *dev;
415
416 if (!dev_id) {
417 printk("madgemc_interrupt: was not passed a dev_id!\n");
418 return IRQ_NONE;
419 }
420
421 dev = dev_id;
422
423 /* Make sure its really us. -- the Madge way */
424 pending = inb(dev->base_addr + MC_CONTROL_REG0);
425 if (!(pending & MC_CONTROL_REG0_SINTR))
426 return IRQ_NONE; /* not our interrupt */
427
428 /*
429 * Since we're level-triggered, we may miss the rising edge
430 * of the next interrupt while we're off handling this one,
431 * so keep checking until the SIF verifies that it has nothing
432 * left for us to do.
433 */
434 pending = STS_SYSTEM_IRQ;
435 do {
436 if (pending & STS_SYSTEM_IRQ) {
437
438 /* Toggle the interrupt to reset the latch on card */
439 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
440 outb(reg1 ^ MC_CONTROL_REG1_SINTEN,
441 dev->base_addr + MC_CONTROL_REG1);
442 outb(reg1, dev->base_addr + MC_CONTROL_REG1);
443
444 /* Continue handling as normal */
445 tms380tr_interrupt(irq, dev_id);
446
447 pending = SIFREADW(SIFSTS); /* restart - the SIF way */
448
449 } else
450 return IRQ_HANDLED;
451 } while (1);
452
453 return IRQ_HANDLED; /* not reachable */
454}
455
456/*
457 * Set the card to the preferred ring speed.
458 *
459 * Unlike newer cards, the MC16/32 have their speed selection
460 * circuit connected to the Madge ASICs and not to the TMS380
461 * NSELOUT pins. Set the ASIC bits correctly here, and return
462 * zero to leave the TMS NSELOUT bits unaffected.
463 *
464 */
465static unsigned short madgemc_setnselout_pins(struct net_device *dev)
466{
467 unsigned char reg1;
468 struct net_local *tp = netdev_priv(dev);
469
470 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
471
472 if(tp->DataRate == SPEED_16)
473 reg1 |= MC_CONTROL_REG1_SPEED_SEL; /* add for 16mb */
474 else if (reg1 & MC_CONTROL_REG1_SPEED_SEL)
475 reg1 ^= MC_CONTROL_REG1_SPEED_SEL; /* remove for 4mb */
476 outb(reg1, dev->base_addr + MC_CONTROL_REG1);
477
478 return 0; /* no change */
479}
480
481/*
482 * Set the register page. This equates to the SRSX line
483 * on the TMS380Cx6.
484 *
485 * Register selection is normally done via three contiguous
486 * bits. However, some boards (such as the MC16/32) use only
487 * two bits, plus a separate bit in the glue chip. This
488 * sets the SRSX bit (the top bit). See page 4-17 in the
489 * Yellow Book for which registers are affected.
490 *
491 */
492static void madgemc_setregpage(struct net_device *dev, int page)
493{
494 static int reg1;
495
496 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
497 if ((page == 0) && (reg1 & MC_CONTROL_REG1_SRSX)) {
498 outb(reg1 ^ MC_CONTROL_REG1_SRSX,
499 dev->base_addr + MC_CONTROL_REG1);
500 }
501 else if (page == 1) {
502 outb(reg1 | MC_CONTROL_REG1_SRSX,
503 dev->base_addr + MC_CONTROL_REG1);
504 }
505 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
506}
507
508/*
509 * The SIF registers are not mapped into register space by default
510 * Set this to 1 to map them, 0 to map the BIA ROM.
511 *
512 */
513static void madgemc_setsifsel(struct net_device *dev, int val)
514{
515 unsigned int reg0;
516
517 reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
518 if ((val == 0) && (reg0 & MC_CONTROL_REG0_SIFSEL)) {
519 outb(reg0 ^ MC_CONTROL_REG0_SIFSEL,
520 dev->base_addr + MC_CONTROL_REG0);
521 } else if (val == 1) {
522 outb(reg0 | MC_CONTROL_REG0_SIFSEL,
523 dev->base_addr + MC_CONTROL_REG0);
524 }
525 reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
526}
527
528/*
529 * Enable SIF interrupts
530 *
531 * This does not enable interrupts in the SIF, but rather
532 * enables SIF interrupts to be passed onto the host.
533 *
534 */
535static void madgemc_setint(struct net_device *dev, int val)
536{
537 unsigned int reg1;
538
539 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
540 if ((val == 0) && (reg1 & MC_CONTROL_REG1_SINTEN)) {
541 outb(reg1 ^ MC_CONTROL_REG1_SINTEN,
542 dev->base_addr + MC_CONTROL_REG1);
543 } else if (val == 1) {
544 outb(reg1 | MC_CONTROL_REG1_SINTEN,
545 dev->base_addr + MC_CONTROL_REG1);
546 }
547}
548
549/*
550 * Cable type is set via control register 7. Bit zero high
551 * for UTP, low for STP.
552 */
553static void madgemc_setcabletype(struct net_device *dev, int type)
554{
555 outb((type==0)?MC_CONTROL_REG7_CABLEUTP:MC_CONTROL_REG7_CABLESTP,
556 dev->base_addr + MC_CONTROL_REG7);
557}
558
559/*
560 * Enable the functions of the Madge chipset needed for
561 * full working order.
562 */
563static int madgemc_chipset_init(struct net_device *dev)
564{
565 outb(0, dev->base_addr + MC_CONTROL_REG1); /* pull SRESET low */
566 tms380tr_wait(100); /* wait for card to reset */
567
568 /* bring back into normal operating mode */
569 outb(MC_CONTROL_REG1_NSRESET, dev->base_addr + MC_CONTROL_REG1);
570
571 /* map SIF registers */
572 madgemc_setsifsel(dev, 1);
573
574 /* enable SIF interrupts */
575 madgemc_setint(dev, 1);
576
577 return 0;
578}
579
580/*
581 * Disable the board, and put back into power-up state.
582 */
583static void madgemc_chipset_close(struct net_device *dev)
584{
585 /* disable interrupts */
586 madgemc_setint(dev, 0);
587 /* unmap SIF registers */
588 madgemc_setsifsel(dev, 0);
589}
590
591/*
592 * Read the card type (MC16 or MC32) from the card.
593 *
594 * The configuration registers are stored in two separate
595 * pages. Pages are flipped by clearing bit 3 of CONTROL_REG0 (PAGE)
596 * for page zero, or setting bit 3 for page one.
597 *
598 * Page zero contains the following data:
599 * Byte 0: Manufacturer ID (0x4D -- ASCII "M")
600 * Byte 1: Card type:
601 * 0x08 for MC16
602 * 0x0D for MC32
603 * Byte 2: Card revision
604 * Byte 3: Mirror of POS config register 0
605 * Byte 4: Mirror of POS 1
606 * Byte 5: Mirror of POS 2
607 *
608 * Page one contains the following data:
609 * Byte 0: Unused
610 * Byte 1-6: BIA, MSB to LSB.
611 *
612 * Note that to read the BIA, we must unmap the SIF registers
613 * by clearing bit 2 of CONTROL_REG0 (SIFSEL), as the data
614 * will reside in the same logical location. For this reason,
615 * _never_ read the BIA while the Eagle processor is running!
616 * The SIF will be completely inaccessible until the BIA operation
617 * is complete.
618 *
619 */
620static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
621{
622 unsigned long ioaddr;
623 unsigned char reg0, reg1, tmpreg0, i;
624
625 ioaddr = dev->base_addr;
626
627 reg0 = inb(ioaddr + MC_CONTROL_REG0);
628 reg1 = inb(ioaddr + MC_CONTROL_REG1);
629
630 /* Switch to page zero and unmap SIF */
631 tmpreg0 = reg0 & ~(MC_CONTROL_REG0_PAGE + MC_CONTROL_REG0_SIFSEL);
632 outb(tmpreg0, ioaddr + MC_CONTROL_REG0);
633
634 card->manid = inb(ioaddr + MC_ROM_MANUFACTURERID);
635 card->cardtype = inb(ioaddr + MC_ROM_ADAPTERID);
636 card->cardrev = inb(ioaddr + MC_ROM_REVISION);
637
638 /* Switch to rom page one */
639 outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0);
640
641 /* Read BIA */
642 dev->addr_len = 6;
643 for (i = 0; i < 6; i++)
644 dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
645
646 /* Restore original register values */
647 outb(reg0, ioaddr + MC_CONTROL_REG0);
648 outb(reg1, ioaddr + MC_CONTROL_REG1);
649}
650
651static int madgemc_open(struct net_device *dev)
652{
653 /*
654 * Go ahead and reinitialize the chipset again, just to
655 * make sure we didn't get left in a bad state.
656 */
657 madgemc_chipset_init(dev);
658 tms380tr_open(dev);
659 return 0;
660}
661
662static int madgemc_close(struct net_device *dev)
663{
664 tms380tr_close(dev);
665 madgemc_chipset_close(dev);
666 return 0;
667}
668
669/*
670 * Give some details available from /proc/mca/slotX
671 */
672static int madgemc_mcaproc(char *buf, int slot, void *d)
673{
674 struct net_device *dev = (struct net_device *)d;
675 struct net_local *tp = netdev_priv(dev);
676 struct card_info *curcard = tp->tmspriv;
677 int len = 0;
678
679 len += sprintf(buf+len, "-------\n");
680 if (curcard) {
681 len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev);
682 len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize);
683 len += sprintf(buf+len, "Cable type: %s\n", (curcard->cabletype)?"STP/DB9":"UTP/RJ-45");
684 len += sprintf(buf+len, "Configured ring speed: %dMb/sec\n", (curcard->ringspeed)?16:4);
685 len += sprintf(buf+len, "Running ring speed: %dMb/sec\n", (tp->DataRate==SPEED_16)?16:4);
686 len += sprintf(buf+len, "Device: %s\n", dev->name);
687 len += sprintf(buf+len, "IO Port: 0x%04lx\n", dev->base_addr);
688 len += sprintf(buf+len, "IRQ: %d\n", dev->irq);
689 len += sprintf(buf+len, "Arbitration Level: %d\n", curcard->arblevel);
690 len += sprintf(buf+len, "Burst Mode: ");
691 switch(curcard->burstmode) {
692 case 0: len += sprintf(buf+len, "Cycle steal"); break;
693 case 1: len += sprintf(buf+len, "Limited burst"); break;
694 case 2: len += sprintf(buf+len, "Delayed release"); break;
695 case 3: len += sprintf(buf+len, "Immediate release"); break;
696 }
697 len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair");
698
699 len += sprintf(buf+len, "Ring Station Address: %pM\n",
700 dev->dev_addr);
701 } else
702 len += sprintf(buf+len, "Card not configured\n");
703
704 return len;
705}
706
707static int __devexit madgemc_remove(struct device *device)
708{
709 struct net_device *dev = dev_get_drvdata(device);
710 struct net_local *tp;
711 struct card_info *card;
712
713 BUG_ON(!dev);
714
715 tp = netdev_priv(dev);
716 card = tp->tmspriv;
717 kfree(card);
718 tp->tmspriv = NULL;
719
720 unregister_netdev(dev);
721 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
722 free_irq(dev->irq, dev);
723 tmsdev_term(dev);
724 free_netdev(dev);
725 dev_set_drvdata(device, NULL);
726
727 return 0;
728}
729
730static short madgemc_adapter_ids[] __initdata = {
731 0x002d,
732 0x0000
733};
734
735static struct mca_driver madgemc_driver = {
736 .id_table = madgemc_adapter_ids,
737 .driver = {
738 .name = "madgemc",
739 .bus = &mca_bus_type,
740 .probe = madgemc_probe,
741 .remove = __devexit_p(madgemc_remove),
742 },
743};
744
745static int __init madgemc_init (void)
746{
747 madgemc_netdev_ops = tms380tr_netdev_ops;
748 madgemc_netdev_ops.ndo_open = madgemc_open;
749 madgemc_netdev_ops.ndo_stop = madgemc_close;
750
751 return mca_register_driver (&madgemc_driver);
752}
753
754static void __exit madgemc_exit (void)
755{
756 mca_unregister_driver (&madgemc_driver);
757}
758
759module_init(madgemc_init);
760module_exit(madgemc_exit);
761
762MODULE_LICENSE("GPL");
763
diff --git a/drivers/net/tokenring/madgemc.h b/drivers/net/tokenring/madgemc.h
new file mode 100644
index 00000000000..fe88e272c53
--- /dev/null
+++ b/drivers/net/tokenring/madgemc.h
@@ -0,0 +1,70 @@
1/*
2 * madgemc.h: Header for the madgemc tms380tr module
3 *
4 * Authors:
5 * - Adam Fritzler
6 */
7
8#ifndef __LINUX_MADGEMC_H
9#define __LINUX_MADGEMC_H
10
11#ifdef __KERNEL__
12
13#define MADGEMC16_CARDNAME "Madge Smart 16/4 MC16 Ringnode"
14#define MADGEMC32_CARDNAME "Madge Smart 16/4 MC32 Ringnode"
15
16/*
17 * Bit definitions for the POS config registers
18 */
19#define MC16_POS0_ADDR1 0x20
20#define MC16_POS2_ADDR2 0x04
21#define MC16_POS3_ADDR3 0x20
22
23#define MC_CONTROL_REG0 ((long)-8) /* 0x00 */
24#define MC_CONTROL_REG1 ((long)-7) /* 0x01 */
25#define MC_ADAPTER_POS_REG0 ((long)-6) /* 0x02 */
26#define MC_ADAPTER_POS_REG1 ((long)-5) /* 0x03 */
27#define MC_ADAPTER_POS_REG2 ((long)-4) /* 0x04 */
28#define MC_ADAPTER_REG5_UNUSED ((long)-3) /* 0x05 */
29#define MC_ADAPTER_REG6_UNUSED ((long)-2) /* 0x06 */
30#define MC_CONTROL_REG7 ((long)-1) /* 0x07 */
31
32#define MC_CONTROL_REG0_UNKNOWN1 0x01
33#define MC_CONTROL_REG0_UNKNOWN2 0x02
34#define MC_CONTROL_REG0_SIFSEL 0x04
35#define MC_CONTROL_REG0_PAGE 0x08
36#define MC_CONTROL_REG0_TESTINTERRUPT 0x10
37#define MC_CONTROL_REG0_UNKNOWN20 0x20
38#define MC_CONTROL_REG0_SINTR 0x40
39#define MC_CONTROL_REG0_UNKNOWN80 0x80
40
41#define MC_CONTROL_REG1_SINTEN 0x01
42#define MC_CONTROL_REG1_BITOFDEATH 0x02
43#define MC_CONTROL_REG1_NSRESET 0x04
44#define MC_CONTROL_REG1_UNKNOWN8 0x08
45#define MC_CONTROL_REG1_UNKNOWN10 0x10
46#define MC_CONTROL_REG1_UNKNOWN20 0x20
47#define MC_CONTROL_REG1_SRSX 0x40
48#define MC_CONTROL_REG1_SPEED_SEL 0x80
49
50#define MC_CONTROL_REG7_CABLESTP 0x00
51#define MC_CONTROL_REG7_CABLEUTP 0x01
52
53/*
54 * ROM Page Zero
55 */
56#define MC_ROM_MANUFACTURERID 0x00
57#define MC_ROM_ADAPTERID 0x01
58#define MC_ROM_REVISION 0x02
59#define MC_ROM_CONFIG0 0x03
60#define MC_ROM_CONFIG1 0x04
61#define MC_ROM_CONFIG2 0x05
62
63/*
64 * ROM Page One
65 */
66#define MC_ROM_UNUSED_BYTE 0x00
67#define MC_ROM_BIA_START 0x01
68
69#endif /* __KERNEL__ */
70#endif /* __LINUX_MADGEMC_H */
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
new file mode 100644
index 00000000000..e3855aeb13d
--- /dev/null
+++ b/drivers/net/tokenring/olympic.c
@@ -0,0 +1,1750 @@
1/*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
7 *
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
10 *
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
13 *
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
16 *
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
19 *
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
22 *
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
41 *
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
43 *
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
45 *
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
48 *
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
51 *
52 * 06/02/01 - Clean up, copy skb for small packets
53 *
54 * 06/22/01 - Add EISR error handling routines
55 *
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
69 *
70 * Wake on lan
71 *
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
76 */
77
78/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
79
80#define OLYMPIC_DEBUG 0
81
82
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/errno.h>
86#include <linux/timer.h>
87#include <linux/in.h>
88#include <linux/ioport.h>
89#include <linux/seq_file.h>
90#include <linux/string.h>
91#include <linux/proc_fs.h>
92#include <linux/ptrace.h>
93#include <linux/skbuff.h>
94#include <linux/interrupt.h>
95#include <linux/delay.h>
96#include <linux/netdevice.h>
97#include <linux/trdevice.h>
98#include <linux/stddef.h>
99#include <linux/init.h>
100#include <linux/pci.h>
101#include <linux/spinlock.h>
102#include <linux/bitops.h>
103#include <linux/jiffies.h>
104
105#include <net/checksum.h>
106#include <net/net_namespace.h>
107
108#include <asm/io.h>
109#include <asm/system.h>
110
111#include "olympic.h"
112
113/* I've got to put some intelligence into the version number so that Peter and I know
114 * which version of the code somebody has got.
115 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
116 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
117 *
118 * Official releases will only have an a.b.c version number format.
119 */
120
121static char version[] =
122"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
123
124static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
125 "Address Verification", "Neighbor Notification (Ring Poll)",
126 "Request Parameters","FDX Registration Request",
127 "FDX Duplicate Address Check", "Station registration Query Wait",
128 "Unknown stage"};
129
130static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
131 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
132 "Duplicate Node Address","Request Parameters","Remove Received",
133 "Reserved", "Reserved", "No Monitor Detected for RPL",
134 "Monitor Contention failer for RPL", "FDX Protocol Error"};
135
136/* Module parameters */
137
138MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
139MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
140
141/* Ring Speed 0,4,16,100
142 * 0 = Autosense
143 * 4,16 = Selected speed only, no autosense
144 * This allows the card to be the first on the ring
145 * and become the active monitor.
146 * 100 = Nothing at present, 100mbps is autodetected
147 * if FDX is turned on. May be implemented in the future to
148 * fail if 100mpbs is not detected.
149 *
150 * WARNING: Some hubs will allow you to insert
151 * at the wrong speed
152 */
153
154static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
155module_param_array(ringspeed, int, NULL, 0);
156
157/* Packet buffer size */
158
159static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
160module_param_array(pkt_buf_sz, int, NULL, 0) ;
161
162/* Message Level */
163
164static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
165module_param_array(message_level, int, NULL, 0) ;
166
167/* Change network_monitor to receive mac frames through the arb channel.
168 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
169 * device, i.e. tr0, tr1 etc.
170 * Intended to be used to create a ring-error reporting network module
171 * i.e. it will give you the source address of beaconers on the ring
172 */
173static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
174module_param_array(network_monitor, int, NULL, 0);
175
176static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
177 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
178 { } /* Terminating Entry */
179};
180MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
181
182
183static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
184static int olympic_init(struct net_device *dev);
185static int olympic_open(struct net_device *dev);
186static netdev_tx_t olympic_xmit(struct sk_buff *skb,
187 struct net_device *dev);
188static int olympic_close(struct net_device *dev);
189static void olympic_set_rx_mode(struct net_device *dev);
190static void olympic_freemem(struct net_device *dev) ;
191static irqreturn_t olympic_interrupt(int irq, void *dev_id);
192static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
193static void olympic_arb_cmd(struct net_device *dev);
194static int olympic_change_mtu(struct net_device *dev, int mtu);
195static void olympic_srb_bh(struct net_device *dev) ;
196static void olympic_asb_bh(struct net_device *dev) ;
197static const struct file_operations olympic_proc_ops;
198
199static const struct net_device_ops olympic_netdev_ops = {
200 .ndo_open = olympic_open,
201 .ndo_stop = olympic_close,
202 .ndo_start_xmit = olympic_xmit,
203 .ndo_change_mtu = olympic_change_mtu,
204 .ndo_set_multicast_list = olympic_set_rx_mode,
205 .ndo_set_mac_address = olympic_set_mac_address,
206};
207
208static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
209{
210 struct net_device *dev ;
211 struct olympic_private *olympic_priv;
212 static int card_no = -1 ;
213 int i ;
214
215 card_no++ ;
216
217 if ((i = pci_enable_device(pdev))) {
218 return i ;
219 }
220
221 pci_set_master(pdev);
222
223 if ((i = pci_request_regions(pdev,"olympic"))) {
224 goto op_disable_dev;
225 }
226
227 dev = alloc_trdev(sizeof(struct olympic_private)) ;
228 if (!dev) {
229 i = -ENOMEM;
230 goto op_release_dev;
231 }
232
233 olympic_priv = netdev_priv(dev) ;
234
235 spin_lock_init(&olympic_priv->olympic_lock) ;
236
237 init_waitqueue_head(&olympic_priv->srb_wait);
238 init_waitqueue_head(&olympic_priv->trb_wait);
239#if OLYMPIC_DEBUG
240 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
241#endif
242 dev->irq=pdev->irq;
243 dev->base_addr=pci_resource_start(pdev, 0);
244 olympic_priv->olympic_card_name = pci_name(pdev);
245 olympic_priv->pdev = pdev;
246 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
247 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
248 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
249 goto op_free_iomap;
250 }
251
252 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
253 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
254 else
255 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
256
257 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
258 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
259 olympic_priv->olympic_message_level = message_level[card_no] ;
260 olympic_priv->olympic_network_monitor = network_monitor[card_no];
261
262 if ((i = olympic_init(dev))) {
263 goto op_free_iomap;
264 }
265
266 dev->netdev_ops = &olympic_netdev_ops;
267 SET_NETDEV_DEV(dev, &pdev->dev);
268
269 pci_set_drvdata(pdev,dev) ;
270 register_netdev(dev) ;
271 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
272 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
273 char proc_name[20] ;
274 strcpy(proc_name,"olympic_") ;
275 strcat(proc_name,dev->name) ;
276 proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
277 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
278 }
279 return 0 ;
280
281op_free_iomap:
282 if (olympic_priv->olympic_mmio)
283 iounmap(olympic_priv->olympic_mmio);
284 if (olympic_priv->olympic_lap)
285 iounmap(olympic_priv->olympic_lap);
286
287 free_netdev(dev);
288op_release_dev:
289 pci_release_regions(pdev);
290
291op_disable_dev:
292 pci_disable_device(pdev);
293 return i;
294}
295
296static int olympic_init(struct net_device *dev)
297{
298 struct olympic_private *olympic_priv;
299 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
300 unsigned long t;
301 unsigned int uaa_addr;
302
303 olympic_priv=netdev_priv(dev);
304 olympic_mmio=olympic_priv->olympic_mmio;
305
306 printk("%s\n", version);
307 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
308
309 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
310 t=jiffies;
311 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
312 schedule();
313 if(time_after(jiffies, t + 40*HZ)) {
314 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
315 return -ENODEV;
316 }
317 }
318
319
320 /* Needed for cardbus */
321 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
322 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
323 }
324
325#if OLYMPIC_DEBUG
326 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
327 printk("GPR: %x\n",readw(olympic_mmio+GPR));
328 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
329#endif
330 /* Aaaahhh, You have got to be real careful setting GPR, the card
331 holds the previous values from flash memory, including autosense
332 and ring speed */
333
334 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
335
336 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
337 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
338 if (olympic_priv->olympic_message_level)
339 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
340 } else if (olympic_priv->olympic_ring_speed == 16) {
341 if (olympic_priv->olympic_message_level)
342 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
343 writew(GPR_16MBPS, olympic_mmio+GPR);
344 } else if (olympic_priv->olympic_ring_speed == 4) {
345 if (olympic_priv->olympic_message_level)
346 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
347 writew(0, olympic_mmio+GPR);
348 }
349
350 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
351
352#if OLYMPIC_DEBUG
353 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
354#endif
355 /* Solo has been paused to meet the Cardbus power
356 * specs if the adapter is cardbus. Check to
357 * see its been paused and then restart solo. The
358 * adapter should set the pause bit within 1 second.
359 */
360
361 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
362 t=jiffies;
363 while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
364 schedule() ;
365 if(time_after(jiffies, t + 2*HZ)) {
366 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
367 return -ENODEV;
368 }
369 }
370 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
371 }
372
373 /* start solo init */
374 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
375
376 t=jiffies;
377 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
378 schedule();
379 if(time_after(jiffies, t + 15*HZ)) {
380 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
381 return -ENODEV;
382 }
383 }
384
385 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
386
387#if OLYMPIC_DEBUG
388 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
389#endif
390
391 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
392
393#if OLYMPIC_DEBUG
394{
395 int i;
396 printk("init_srb(%p): ",init_srb);
397 for(i=0;i<20;i++)
398 printk("%x ",readb(init_srb+i));
399 printk("\n");
400}
401#endif
402 if(readw(init_srb+6)) {
403 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
404 return -ENODEV;
405 }
406
407 if (olympic_priv->olympic_message_level) {
408 if ( readb(init_srb +2) & 0x40) {
409 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
410 } else {
411 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
412 }
413 }
414
415 uaa_addr=swab16(readw(init_srb+8));
416
417#if OLYMPIC_DEBUG
418 printk("UAA resides at %x\n",uaa_addr);
419#endif
420
421 writel(uaa_addr,olympic_mmio+LAPA);
422 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
423
424 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
425
426#if OLYMPIC_DEBUG
427 printk("adapter address: %pM\n", dev->dev_addr);
428#endif
429
430 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
431 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
432
433 return 0;
434
435}
436
437static int olympic_open(struct net_device *dev)
438{
439 struct olympic_private *olympic_priv=netdev_priv(dev);
440 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
441 unsigned long flags, t;
442 int i, open_finished = 1 ;
443 u8 resp, err;
444
445 DECLARE_WAITQUEUE(wait,current) ;
446
447 olympic_init(dev);
448
449 if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
450 dev))
451 return -EAGAIN;
452
453#if OLYMPIC_DEBUG
454 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
455 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
456#endif
457
458 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
459
460 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
461
462 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
463
464 /* adapter is closed, so SRB is pointed to by LAPWWO */
465
466 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
467 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
468
469#if OLYMPIC_DEBUG
470 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
471 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
472 printk("Before the open command\n");
473#endif
474 do {
475 memset_io(init_srb,0,SRB_COMMAND_SIZE);
476
477 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
478 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
479
480 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
481 if (olympic_priv->olympic_network_monitor)
482 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
483 else
484 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
485
486 /* Test OR of first 3 bytes as its totally possible for
487 * someone to set the first 2 bytes to be zero, although this
488 * is an error, the first byte must have bit 6 set to 1 */
489
490 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
491 writeb(olympic_priv->olympic_laa[0],init_srb+12);
492 writeb(olympic_priv->olympic_laa[1],init_srb+13);
493 writeb(olympic_priv->olympic_laa[2],init_srb+14);
494 writeb(olympic_priv->olympic_laa[3],init_srb+15);
495 writeb(olympic_priv->olympic_laa[4],init_srb+16);
496 writeb(olympic_priv->olympic_laa[5],init_srb+17);
497 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
498 }
499 writeb(1,init_srb+30);
500
501 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
502 olympic_priv->srb_queued=1;
503
504 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
505 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
506
507 t = jiffies ;
508
509 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
510 set_current_state(TASK_INTERRUPTIBLE) ;
511
512 while(olympic_priv->srb_queued) {
513 schedule() ;
514 if(signal_pending(current)) {
515 printk(KERN_WARNING "%s: Signal received in open.\n",
516 dev->name);
517 printk(KERN_WARNING "SISR=%x LISR=%x\n",
518 readl(olympic_mmio+SISR),
519 readl(olympic_mmio+LISR));
520 olympic_priv->srb_queued=0;
521 break;
522 }
523 if (time_after(jiffies, t + 10*HZ)) {
524 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
525 olympic_priv->srb_queued=0;
526 break ;
527 }
528 set_current_state(TASK_INTERRUPTIBLE) ;
529 }
530 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
531 set_current_state(TASK_RUNNING) ;
532 olympic_priv->srb_queued = 0 ;
533#if OLYMPIC_DEBUG
534 printk("init_srb(%p): ",init_srb);
535 for(i=0;i<20;i++)
536 printk("%02x ",readb(init_srb+i));
537 printk("\n");
538#endif
539
540 /* If we get the same return response as we set, the interrupt wasn't raised and the open
541 * timed out.
542 */
543
544 switch (resp = readb(init_srb+2)) {
545 case OLYMPIC_CLEAR_RET_CODE:
546 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
547 goto out;
548 case 0:
549 open_finished = 1;
550 break;
551 case 0x07:
552 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
553 printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
554 open_finished = 0 ;
555 continue;
556 }
557
558 err = readb(init_srb+7);
559
560 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
561 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
562 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
563 } else {
564 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
565 open_maj_error[(err & 0xf0) >> 4],
566 open_min_error[(err & 0x0f)]);
567 }
568 goto out;
569
570 case 0x32:
571 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
572 dev->name, olympic_priv->olympic_laa);
573 goto out;
574
575 default:
576 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
577 goto out;
578
579 }
580 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
581
582 if (readb(init_srb+18) & (1<<3))
583 if (olympic_priv->olympic_message_level)
584 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
585
586 if (readb(init_srb+18) & (1<<1))
587 olympic_priv->olympic_ring_speed = 100 ;
588 else if (readb(init_srb+18) & 1)
589 olympic_priv->olympic_ring_speed = 16 ;
590 else
591 olympic_priv->olympic_ring_speed = 4 ;
592
593 if (olympic_priv->olympic_message_level)
594 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
595
596 olympic_priv->asb = swab16(readw(init_srb+8));
597 olympic_priv->srb = swab16(readw(init_srb+10));
598 olympic_priv->arb = swab16(readw(init_srb+12));
599 olympic_priv->trb = swab16(readw(init_srb+16));
600
601 olympic_priv->olympic_receive_options = 0x01 ;
602 olympic_priv->olympic_copy_all_options = 0 ;
603
604 /* setup rx ring */
605
606 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
607
608 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
609
610 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
611
612 struct sk_buff *skb;
613
614 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
615 if(skb == NULL)
616 break;
617
618 skb->dev = dev;
619
620 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
621 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
622 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
623 olympic_priv->rx_ring_skb[i]=skb;
624 }
625
626 if (i==0) {
627 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
628 goto out;
629 }
630
631 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
632 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
633 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
634 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
635 writew(i, olympic_mmio+RXDESCQCNT);
636
637 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
638 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
639 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
640 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
641
642 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
643 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
644
645 writew(i, olympic_mmio+RXSTATQCNT);
646
647#if OLYMPIC_DEBUG
648 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
649 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
650 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
651 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
652 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
653
654 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
655 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
656 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
657#endif
658
659 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
660
661#if OLYMPIC_DEBUG
662 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
663 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
664 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
665#endif
666
667 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
668
669 /* setup tx ring */
670
671 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
672 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
673 olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
674
675 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
676 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
677 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
678 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
679 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
680 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
681
682 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
683 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
684 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
685 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
686 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
687
688 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
689 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
690
691 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
692 writel(0,olympic_mmio+EISR) ;
693 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
694 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
695
696#if OLYMPIC_DEBUG
697 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
698 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
699#endif
700
701 if (olympic_priv->olympic_network_monitor) {
702 u8 __iomem *oat;
703 u8 __iomem *opt;
704 u8 addr[6];
705 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
706 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
707
708 for (i = 0; i < 6; i++)
709 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
710 printk("%s: Node Address: %pM\n", dev->name, addr);
711 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
712 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
713 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
714 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
715 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
716
717 for (i = 0; i < 6; i++)
718 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
719 printk("%s: NAUN Address: %pM\n", dev->name, addr);
720 }
721
722 netif_start_queue(dev);
723 return 0;
724
725out:
726 free_irq(dev->irq, dev);
727 return -EIO;
728}
729
730/*
731 * When we enter the rx routine we do not know how many frames have been
732 * queued on the rx channel. Therefore we start at the next rx status
733 * position and travel around the receive ring until we have completed
734 * all the frames.
735 *
736 * This means that we may process the frame before we receive the end
737 * of frame interrupt. This is why we always test the status instead
738 * of blindly processing the next frame.
739 *
740 * We also remove the last 4 bytes from the packet as well, these are
741 * just token ring trailer info and upset protocols that don't check
742 * their own length, i.e. SNA.
743 *
744 */
745static void olympic_rx(struct net_device *dev)
746{
747 struct olympic_private *olympic_priv=netdev_priv(dev);
748 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
749 struct olympic_rx_status *rx_status;
750 struct olympic_rx_desc *rx_desc ;
751 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
752 struct sk_buff *skb, *skb2;
753 int i;
754
755 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
756
757 while (rx_status->status_buffercnt) {
758 u32 l_status_buffercnt;
759
760 olympic_priv->rx_status_last_received++ ;
761 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
762#if OLYMPIC_DEBUG
763 printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
764#endif
765 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
766 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
767 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
768 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
769
770#if OLYMPIC_DEBUG
771 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
772#endif
773 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
774 if(l_status_buffercnt & 0xC0000000) {
775 if (l_status_buffercnt & 0x3B000000) {
776 if (olympic_priv->olympic_message_level) {
777 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
778 printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
779 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
780 printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
781 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
782 printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
783 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
784 printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
785 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
786 printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
787 }
788 olympic_priv->rx_ring_last_received += i ;
789 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
790 dev->stats.rx_errors++;
791 } else {
792
793 if (buffer_cnt == 1) {
794 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
795 } else {
796 skb = dev_alloc_skb(length) ;
797 }
798
799 if (skb == NULL) {
800 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
801 dev->stats.rx_dropped++;
802 /* Update counters even though we don't transfer the frame */
803 olympic_priv->rx_ring_last_received += i ;
804 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
805 } else {
806 /* Optimise based upon number of buffers used.
807 If only one buffer is used we can simply swap the buffers around.
808 If more than one then we must use the new buffer and copy the information
809 first. Ideally all frames would be in a single buffer, this can be tuned by
810 altering the buffer size. If the length of the packet is less than
811 1500 bytes we're going to copy it over anyway to stop packets getting
812 dropped from sockets with buffers smaller than our pkt_buf_sz. */
813
814 if (buffer_cnt==1) {
815 olympic_priv->rx_ring_last_received++ ;
816 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
817 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
818 if (length > 1500) {
819 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
820 /* unmap buffer */
821 pci_unmap_single(olympic_priv->pdev,
822 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
823 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
824 skb_put(skb2,length-4);
825 skb2->protocol = tr_type_trans(skb2,dev);
826 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
827 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
828 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
829 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
830 cpu_to_le32(olympic_priv->pkt_buf_sz);
831 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
832 netif_rx(skb2) ;
833 } else {
834 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
835 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
836 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
837 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
838 skb_put(skb,length - 4),
839 length - 4);
840 pci_dma_sync_single_for_device(olympic_priv->pdev,
841 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
842 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
843 skb->protocol = tr_type_trans(skb,dev) ;
844 netif_rx(skb) ;
845 }
846 } else {
847 do { /* Walk the buffers */
848 olympic_priv->rx_ring_last_received++ ;
849 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
850 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
851 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
852 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
853 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
854 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
855 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
856 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
857 skb_put(skb, cpy_length),
858 cpy_length);
859 pci_dma_sync_single_for_device(olympic_priv->pdev,
860 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
861 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
862 } while (--i) ;
863 skb_trim(skb,skb->len-4) ;
864 skb->protocol = tr_type_trans(skb,dev);
865 netif_rx(skb) ;
866 }
867 dev->stats.rx_packets++ ;
868 dev->stats.rx_bytes += length ;
869 } /* if skb == null */
870 } /* If status & 0x3b */
871
872 } else { /*if buffercnt & 0xC */
873 olympic_priv->rx_ring_last_received += i ;
874 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
875 }
876
877 rx_status->fragmentcnt_framelen = 0 ;
878 rx_status->status_buffercnt = 0 ;
879 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
880
881 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
882 } /* while */
883
884}
885
886static void olympic_freemem(struct net_device *dev)
887{
888 struct olympic_private *olympic_priv=netdev_priv(dev);
889 int i;
890
891 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
892 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
893 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
894 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
895 }
896 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
897 pci_unmap_single(olympic_priv->pdev,
898 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
899 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
900 }
901 olympic_priv->rx_status_last_received++;
902 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
903 }
904 /* unmap rings */
905 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
906 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
907 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
908 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
909
910 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
911 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
912 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
913 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
914
915 return ;
916}
917
918static irqreturn_t olympic_interrupt(int irq, void *dev_id)
919{
920 struct net_device *dev= (struct net_device *)dev_id;
921 struct olympic_private *olympic_priv=netdev_priv(dev);
922 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
923 u32 sisr;
924 u8 __iomem *adapter_check_area ;
925
926 /*
927 * Read sisr but don't reset it yet.
928 * The indication bit may have been set but the interrupt latch
929 * bit may not be set, so we'd lose the interrupt later.
930 */
931 sisr=readl(olympic_mmio+SISR) ;
932 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
933 return IRQ_NONE;
934 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
935
936 spin_lock(&olympic_priv->olympic_lock);
937
938 /* Hotswap gives us this on removal */
939 if (sisr == 0xffffffff) {
940 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
941 spin_unlock(&olympic_priv->olympic_lock) ;
942 return IRQ_NONE;
943 }
944
945 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
946 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
947
948 /* If we ever get this the adapter is seriously dead. Only a reset is going to
949 * bring it back to life. We're talking pci bus errors and such like :( */
950 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
951 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
952 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
953 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
954 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
955 wake_up_interruptible(&olympic_priv->srb_wait);
956 spin_unlock(&olympic_priv->olympic_lock) ;
957 return IRQ_HANDLED;
958 } /* SISR_ERR */
959
960 if(sisr & SISR_SRB_REPLY) {
961 if(olympic_priv->srb_queued==1) {
962 wake_up_interruptible(&olympic_priv->srb_wait);
963 } else if (olympic_priv->srb_queued==2) {
964 olympic_srb_bh(dev) ;
965 }
966 olympic_priv->srb_queued=0;
967 } /* SISR_SRB_REPLY */
968
969 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
970 we get all tx completions. */
971 if (sisr & SISR_TX1_EOF) {
972 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
973 olympic_priv->tx_ring_last_status++;
974 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
975 olympic_priv->free_tx_ring_entries++;
976 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
977 dev->stats.tx_packets++ ;
978 pci_unmap_single(olympic_priv->pdev,
979 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
980 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
981 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
982 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
983 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
984 }
985 netif_wake_queue(dev);
986 } /* SISR_TX1_EOF */
987
988 if (sisr & SISR_RX_STATUS) {
989 olympic_rx(dev);
990 } /* SISR_RX_STATUS */
991
992 if (sisr & SISR_ADAPTER_CHECK) {
993 netif_stop_queue(dev);
994 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
995 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
996 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
997 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
998 spin_unlock(&olympic_priv->olympic_lock) ;
999 return IRQ_HANDLED;
1000 } /* SISR_ADAPTER_CHECK */
1001
1002 if (sisr & SISR_ASB_FREE) {
1003 /* Wake up anything that is waiting for the asb response */
1004 if (olympic_priv->asb_queued) {
1005 olympic_asb_bh(dev) ;
1006 }
1007 } /* SISR_ASB_FREE */
1008
1009 if (sisr & SISR_ARB_CMD) {
1010 olympic_arb_cmd(dev) ;
1011 } /* SISR_ARB_CMD */
1012
1013 if (sisr & SISR_TRB_REPLY) {
1014 /* Wake up anything that is waiting for the trb response */
1015 if (olympic_priv->trb_queued) {
1016 wake_up_interruptible(&olympic_priv->trb_wait);
1017 }
1018 olympic_priv->trb_queued = 0 ;
1019 } /* SISR_TRB_REPLY */
1020
1021 if (sisr & SISR_RX_NOBUF) {
1022 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1023 /var/log/messages. */
1024 } /* SISR_RX_NOBUF */
1025 } else {
1026 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1027 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1028 } /* One if the interrupts we want */
1029 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1030
1031 spin_unlock(&olympic_priv->olympic_lock) ;
1032 return IRQ_HANDLED;
1033}
1034
1035static netdev_tx_t olympic_xmit(struct sk_buff *skb,
1036 struct net_device *dev)
1037{
1038 struct olympic_private *olympic_priv=netdev_priv(dev);
1039 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1040 unsigned long flags ;
1041
1042 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1043
1044 netif_stop_queue(dev);
1045
1046 if(olympic_priv->free_tx_ring_entries) {
1047 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1048 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1049 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1050 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1051 olympic_priv->free_tx_ring_entries--;
1052
1053 olympic_priv->tx_ring_free++;
1054 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1055 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1056 netif_wake_queue(dev);
1057 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1058 return NETDEV_TX_OK;
1059 } else {
1060 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1061 return NETDEV_TX_BUSY;
1062 }
1063
1064}
1065
1066
1067static int olympic_close(struct net_device *dev)
1068{
1069 struct olympic_private *olympic_priv=netdev_priv(dev);
1070 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1071 unsigned long t,flags;
1072
1073 DECLARE_WAITQUEUE(wait,current) ;
1074
1075 netif_stop_queue(dev);
1076
1077 writel(olympic_priv->srb,olympic_mmio+LAPA);
1078 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1079
1080 writeb(SRB_CLOSE_ADAPTER,srb+0);
1081 writeb(0,srb+1);
1082 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1083
1084 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1085 set_current_state(TASK_INTERRUPTIBLE) ;
1086
1087 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1088 olympic_priv->srb_queued=1;
1089
1090 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1091 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1092
1093 while(olympic_priv->srb_queued) {
1094
1095 t = schedule_timeout_interruptible(60*HZ);
1096
1097 if(signal_pending(current)) {
1098 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1099 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1100 olympic_priv->srb_queued=0;
1101 break;
1102 }
1103
1104 if (t == 0) {
1105 printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
1106 }
1107 olympic_priv->srb_queued=0;
1108 }
1109 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1110
1111 olympic_priv->rx_status_last_received++;
1112 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1113
1114 olympic_freemem(dev) ;
1115
1116 /* reset tx/rx fifo's and busmaster logic */
1117
1118 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1119 udelay(1);
1120 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1121
1122#if OLYMPIC_DEBUG
1123 {
1124 int i ;
1125 printk("srb(%p): ",srb);
1126 for(i=0;i<4;i++)
1127 printk("%x ",readb(srb+i));
1128 printk("\n");
1129 }
1130#endif
1131 free_irq(dev->irq,dev);
1132
1133 return 0;
1134
1135}
1136
1137static void olympic_set_rx_mode(struct net_device *dev)
1138{
1139 struct olympic_private *olympic_priv = netdev_priv(dev);
1140 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1141 u8 options = 0;
1142 u8 __iomem *srb;
1143 struct netdev_hw_addr *ha;
1144 unsigned char dev_mc_address[4] ;
1145
1146 writel(olympic_priv->srb,olympic_mmio+LAPA);
1147 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1148 options = olympic_priv->olympic_copy_all_options;
1149
1150 if (dev->flags&IFF_PROMISC)
1151 options |= 0x61 ;
1152 else
1153 options &= ~0x61 ;
1154
1155 /* Only issue the srb if there is a change in options */
1156
1157 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1158
1159 /* Now to issue the srb command to alter the copy.all.options */
1160
1161 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1162 writeb(0,srb+1);
1163 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1164 writeb(0,srb+3);
1165 writeb(olympic_priv->olympic_receive_options,srb+4);
1166 writeb(options,srb+5);
1167
1168 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1169
1170 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1171
1172 olympic_priv->olympic_copy_all_options = options ;
1173
1174 return ;
1175 }
1176
1177 /* Set the functional addresses we need for multicast */
1178
1179 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1180
1181 netdev_for_each_mc_addr(ha, dev) {
1182 dev_mc_address[0] |= ha->addr[2];
1183 dev_mc_address[1] |= ha->addr[3];
1184 dev_mc_address[2] |= ha->addr[4];
1185 dev_mc_address[3] |= ha->addr[5];
1186 }
1187
1188 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1189 writeb(0,srb+1);
1190 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1191 writeb(0,srb+3);
1192 writeb(0,srb+4);
1193 writeb(0,srb+5);
1194 writeb(dev_mc_address[0],srb+6);
1195 writeb(dev_mc_address[1],srb+7);
1196 writeb(dev_mc_address[2],srb+8);
1197 writeb(dev_mc_address[3],srb+9);
1198
1199 olympic_priv->srb_queued = 2 ;
1200 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1201
1202}
1203
1204static void olympic_srb_bh(struct net_device *dev)
1205{
1206 struct olympic_private *olympic_priv = netdev_priv(dev);
1207 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1208 u8 __iomem *srb;
1209
1210 writel(olympic_priv->srb,olympic_mmio+LAPA);
1211 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1212
1213 switch (readb(srb)) {
1214
1215 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1216 * At some point we should do something if we get an error, such as
1217 * resetting the IFF_PROMISC flag in dev
1218 */
1219
1220 case SRB_MODIFY_RECEIVE_OPTIONS:
1221 switch (readb(srb+2)) {
1222 case 0x01:
1223 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1224 break ;
1225 case 0x04:
1226 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1227 break ;
1228 default:
1229 if (olympic_priv->olympic_message_level)
1230 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1231 break ;
1232 } /* switch srb[2] */
1233 break ;
1234
1235 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1236 */
1237
1238 case SRB_SET_GROUP_ADDRESS:
1239 switch (readb(srb+2)) {
1240 case 0x00:
1241 break ;
1242 case 0x01:
1243 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1244 break ;
1245 case 0x04:
1246 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1247 break ;
1248 case 0x3c:
1249 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1250 break ;
1251 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1252 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1253 break ;
1254 case 0x55:
1255 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1256 break ;
1257 default:
1258 break ;
1259 } /* switch srb[2] */
1260 break ;
1261
1262 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1263 */
1264
1265 case SRB_RESET_GROUP_ADDRESS:
1266 switch (readb(srb+2)) {
1267 case 0x00:
1268 break ;
1269 case 0x01:
1270 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1271 break ;
1272 case 0x04:
1273 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1274 break ;
1275 case 0x39: /* Must deal with this if individual multicast addresses used */
1276 printk(KERN_INFO "%s: Group address not found\n",dev->name);
1277 break ;
1278 default:
1279 break ;
1280 } /* switch srb[2] */
1281 break ;
1282
1283
1284 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1285 */
1286
1287 case SRB_SET_FUNC_ADDRESS:
1288 switch (readb(srb+2)) {
1289 case 0x00:
1290 if (olympic_priv->olympic_message_level)
1291 printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
1292 break ;
1293 case 0x01:
1294 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1295 break ;
1296 case 0x04:
1297 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1298 break ;
1299 default:
1300 break ;
1301 } /* switch srb[2] */
1302 break ;
1303
1304 /* SRB_READ_LOG - Read and reset the adapter error counters
1305 */
1306
1307 case SRB_READ_LOG:
1308 switch (readb(srb+2)) {
1309 case 0x00:
1310 if (olympic_priv->olympic_message_level)
1311 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1312 break ;
1313 case 0x01:
1314 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1315 break ;
1316 case 0x04:
1317 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1318 break ;
1319
1320 } /* switch srb[2] */
1321 break ;
1322
1323 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1324
1325 case SRB_READ_SR_COUNTERS:
1326 switch (readb(srb+2)) {
1327 case 0x00:
1328 if (olympic_priv->olympic_message_level)
1329 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1330 break ;
1331 case 0x01:
1332 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1333 break ;
1334 case 0x04:
1335 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1336 break ;
1337 default:
1338 break ;
1339 } /* switch srb[2] */
1340 break ;
1341
1342 default:
1343 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1344 break ;
1345 } /* switch srb[0] */
1346
1347}
1348
1349static int olympic_set_mac_address (struct net_device *dev, void *addr)
1350{
1351 struct sockaddr *saddr = addr ;
1352 struct olympic_private *olympic_priv = netdev_priv(dev);
1353
1354 if (netif_running(dev)) {
1355 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1356 return -EIO ;
1357 }
1358
1359 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1360
1361 if (olympic_priv->olympic_message_level) {
1362 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1363 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1364 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1365 olympic_priv->olympic_laa[5]);
1366 }
1367
1368 return 0 ;
1369}
1370
1371static void olympic_arb_cmd(struct net_device *dev)
1372{
1373 struct olympic_private *olympic_priv = netdev_priv(dev);
1374 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1375 u8 __iomem *arb_block, *asb_block, *srb ;
1376 u8 header_len ;
1377 u16 frame_len, buffer_len ;
1378 struct sk_buff *mac_frame ;
1379 u8 __iomem *buf_ptr ;
1380 u8 __iomem *frame_data ;
1381 u16 buff_off ;
1382 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1383 u8 fdx_prot_error ;
1384 u16 next_ptr;
1385
1386 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1387 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1388 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1389
1390 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1391
1392 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1393 frame_len = swab16(readw(arb_block + 10)) ;
1394
1395 buff_off = swab16(readw(arb_block + 6)) ;
1396
1397 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1398
1399#if OLYMPIC_DEBUG
1400{
1401 int i;
1402 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1403
1404 for (i=0 ; i < 14 ; i++) {
1405 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1406 }
1407
1408 printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1409}
1410#endif
1411 mac_frame = dev_alloc_skb(frame_len) ;
1412 if (!mac_frame) {
1413 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1414 goto drop_frame;
1415 }
1416
1417 /* Walk the buffer chain, creating the frame */
1418
1419 do {
1420 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1421 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1422 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1423 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1424 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
1425
1426 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1427
1428 if (olympic_priv->olympic_network_monitor) {
1429 struct trh_hdr *mac_hdr;
1430 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
1431 mac_hdr = tr_hdr(mac_frame);
1432 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1433 dev->name, mac_hdr->daddr);
1434 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1435 dev->name, mac_hdr->saddr);
1436 }
1437 netif_rx(mac_frame);
1438
1439drop_frame:
1440 /* Now tell the card we have dealt with the received frame */
1441
1442 /* Set LISR Bit 1 */
1443 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1444
1445 /* Is the ASB free ? */
1446
1447 if (readb(asb_block + 2) != 0xff) {
1448 olympic_priv->asb_queued = 1 ;
1449 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1450 return ;
1451 /* Drop out and wait for the bottom half to be run */
1452 }
1453
1454 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1455 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1456 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1457 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1458
1459 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1460
1461 olympic_priv->asb_queued = 2 ;
1462
1463 return ;
1464
1465 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1466 lan_status = swab16(readw(arb_block+6));
1467 fdx_prot_error = readb(arb_block+8) ;
1468
1469 /* Issue ARB Free */
1470 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1471
1472 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1473
1474 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1475 if (lan_status_diff & LSC_LWF)
1476 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1477 if (lan_status_diff & LSC_ARW)
1478 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1479 if (lan_status_diff & LSC_FPE)
1480 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1481 if (lan_status_diff & LSC_RR)
1482 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1483
1484 /* Adapter has been closed by the hardware */
1485
1486 /* reset tx/rx fifo's and busmaster logic */
1487
1488 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1489 udelay(1);
1490 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1491 netif_stop_queue(dev);
1492 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1493 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1494 } /* If serious error */
1495
1496 if (olympic_priv->olympic_message_level) {
1497 if (lan_status_diff & LSC_SIG_LOSS)
1498 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1499 if (lan_status_diff & LSC_HARD_ERR)
1500 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1501 if (lan_status_diff & LSC_SOFT_ERR)
1502 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1503 if (lan_status_diff & LSC_TRAN_BCN)
1504 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1505 if (lan_status_diff & LSC_SS)
1506 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1507 if (lan_status_diff & LSC_RING_REC)
1508 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1509 if (lan_status_diff & LSC_FDX_MODE)
1510 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1511 }
1512
1513 if (lan_status_diff & LSC_CO) {
1514
1515 if (olympic_priv->olympic_message_level)
1516 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1517
1518 /* Issue READ.LOG command */
1519
1520 writeb(SRB_READ_LOG, srb);
1521 writeb(0,srb+1);
1522 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1523 writeb(0,srb+3);
1524 writeb(0,srb+4);
1525 writeb(0,srb+5);
1526
1527 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1528
1529 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1530
1531 }
1532
1533 if (lan_status_diff & LSC_SR_CO) {
1534
1535 if (olympic_priv->olympic_message_level)
1536 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1537
1538 /* Issue a READ.SR.COUNTERS */
1539
1540 writeb(SRB_READ_SR_COUNTERS,srb);
1541 writeb(0,srb+1);
1542 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1543 writeb(0,srb+3);
1544
1545 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1546
1547 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1548
1549 }
1550
1551 olympic_priv->olympic_lan_status = lan_status ;
1552
1553 } /* Lan.change.status */
1554 else
1555 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1556}
1557
1558static void olympic_asb_bh(struct net_device *dev)
1559{
1560 struct olympic_private *olympic_priv = netdev_priv(dev);
1561 u8 __iomem *arb_block, *asb_block ;
1562
1563 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1564 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1565
1566 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1567
1568 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1569 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1570 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1571 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1572
1573 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1574 olympic_priv->asb_queued = 2 ;
1575
1576 return ;
1577 }
1578
1579 if (olympic_priv->asb_queued == 2) {
1580 switch (readb(asb_block+2)) {
1581 case 0x01:
1582 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1583 break ;
1584 case 0x26:
1585 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1586 break ;
1587 case 0xFF:
1588 /* Valid response, everything should be ok again */
1589 break ;
1590 default:
1591 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1592 break ;
1593 }
1594 }
1595 olympic_priv->asb_queued = 0 ;
1596}
1597
1598static int olympic_change_mtu(struct net_device *dev, int mtu)
1599{
1600 struct olympic_private *olympic_priv = netdev_priv(dev);
1601 u16 max_mtu ;
1602
1603 if (olympic_priv->olympic_ring_speed == 4)
1604 max_mtu = 4500 ;
1605 else
1606 max_mtu = 18000 ;
1607
1608 if (mtu > max_mtu)
1609 return -EINVAL ;
1610 if (mtu < 100)
1611 return -EINVAL ;
1612
1613 dev->mtu = mtu ;
1614 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1615
1616 return 0 ;
1617}
1618
1619static int olympic_proc_show(struct seq_file *m, void *v)
1620{
1621 struct net_device *dev = m->private;
1622 struct olympic_private *olympic_priv=netdev_priv(dev);
1623 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1624 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1625 u8 addr[6];
1626 u8 addr2[6];
1627 int i;
1628
1629 seq_printf(m,
1630 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1631 seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1632 dev->name);
1633
1634 for (i = 0 ; i < 6 ; i++)
1635 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1636
1637 seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1638 dev->name,
1639 dev->dev_addr, addr,
1640 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1641 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1642 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1643 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1644
1645 seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1646
1647 seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1648 dev->name) ;
1649
1650 for (i = 0 ; i < 6 ; i++)
1651 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
1652 for (i = 0 ; i < 6 ; i++)
1653 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1654
1655 seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1656 dev->name,
1657 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1658 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1659 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1660 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1661 addr, addr2,
1662 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1663 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1664 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1665
1666 seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1667 dev->name) ;
1668
1669 for (i = 0 ; i < 6 ; i++)
1670 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1671 seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1672 dev->name, addr,
1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1674 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1675 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1676 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1678 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1679
1680 seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1681 dev->name) ;
1682
1683 for (i = 0 ; i < 6 ; i++)
1684 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1685 seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1686 dev->name,
1687 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1688 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1689 addr,
1690 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1691 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1692 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1693 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1694
1695 return 0;
1696}
1697
1698static int olympic_proc_open(struct inode *inode, struct file *file)
1699{
1700 return single_open(file, olympic_proc_show, PDE(inode)->data);
1701}
1702
1703static const struct file_operations olympic_proc_ops = {
1704 .open = olympic_proc_open,
1705 .read = seq_read,
1706 .llseek = seq_lseek,
1707 .release = single_release,
1708};
1709
1710static void __devexit olympic_remove_one(struct pci_dev *pdev)
1711{
1712 struct net_device *dev = pci_get_drvdata(pdev) ;
1713 struct olympic_private *olympic_priv=netdev_priv(dev);
1714
1715 if (olympic_priv->olympic_network_monitor) {
1716 char proc_name[20] ;
1717 strcpy(proc_name,"olympic_") ;
1718 strcat(proc_name,dev->name) ;
1719 remove_proc_entry(proc_name,init_net.proc_net);
1720 }
1721 unregister_netdev(dev) ;
1722 iounmap(olympic_priv->olympic_mmio) ;
1723 iounmap(olympic_priv->olympic_lap) ;
1724 pci_release_regions(pdev) ;
1725 pci_set_drvdata(pdev,NULL) ;
1726 free_netdev(dev) ;
1727}
1728
1729static struct pci_driver olympic_driver = {
1730 .name = "olympic",
1731 .id_table = olympic_pci_tbl,
1732 .probe = olympic_probe,
1733 .remove = __devexit_p(olympic_remove_one),
1734};
1735
1736static int __init olympic_pci_init(void)
1737{
1738 return pci_register_driver(&olympic_driver) ;
1739}
1740
1741static void __exit olympic_pci_cleanup(void)
1742{
1743 pci_unregister_driver(&olympic_driver) ;
1744}
1745
1746
1747module_init(olympic_pci_init) ;
1748module_exit(olympic_pci_cleanup) ;
1749
1750MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
new file mode 100644
index 00000000000..30631bae4c9
--- /dev/null
+++ b/drivers/net/tokenring/olympic.h
@@ -0,0 +1,321 @@
1/*
2 * olympic.h (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999,2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the olympic and the PIT/PHY chipset.
6 *
7 * Base Driver Skeleton:
8 * Written 1993-94 by Donald Becker.
9 *
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.
12 *
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference.
15 */
16
17#define CID 0x4e
18
19#define BCTL 0x70
20#define BCTL_SOFTRESET (1<<15)
21#define BCTL_MIMREB (1<<6)
22#define BCTL_MODE_INDICATOR (1<<5)
23
24#define GPR 0x4a
25#define GPR_OPTI_BF (1<<6)
26#define GPR_NEPTUNE_BF (1<<4)
27#define GPR_AUTOSENSE (1<<2)
28#define GPR_16MBPS (1<<3)
29
30#define PAG 0x85
31#define LBC 0x8e
32
33#define LISR 0x10
34#define LISR_SUM 0x14
35#define LISR_RWM 0x18
36
37#define LISR_LIE (1<<15)
38#define LISR_SLIM (1<<13)
39#define LISR_SLI (1<<12)
40#define LISR_PCMSRMASK (1<<11)
41#define LISR_PCMSRINT (1<<10)
42#define LISR_WOLMASK (1<<9)
43#define LISR_WOL (1<<8)
44#define LISR_SRB_CMD (1<<5)
45#define LISR_ASB_REPLY (1<<4)
46#define LISR_ASB_FREE_REQ (1<<2)
47#define LISR_ARB_FREE (1<<1)
48#define LISR_TRB_FRAME (1<<0)
49
50#define SISR 0x20
51#define SISR_SUM 0x24
52#define SISR_RWM 0x28
53#define SISR_RR 0x2C
54#define SISR_RESMASK 0x30
55#define SISR_MASK 0x54
56#define SISR_MASK_SUM 0x58
57#define SISR_MASK_RWM 0x5C
58
59#define SISR_TX2_IDLE (1<<31)
60#define SISR_TX2_HALT (1<<29)
61#define SISR_TX2_EOF (1<<28)
62#define SISR_TX1_IDLE (1<<27)
63#define SISR_TX1_HALT (1<<25)
64#define SISR_TX1_EOF (1<<24)
65#define SISR_TIMEOUT (1<<23)
66#define SISR_RX_NOBUF (1<<22)
67#define SISR_RX_STATUS (1<<21)
68#define SISR_RX_HALT (1<<18)
69#define SISR_RX_EOF_EARLY (1<<16)
70#define SISR_MI (1<<15)
71#define SISR_PI (1<<13)
72#define SISR_ERR (1<<9)
73#define SISR_ADAPTER_CHECK (1<<6)
74#define SISR_SRB_REPLY (1<<5)
75#define SISR_ASB_FREE (1<<4)
76#define SISR_ARB_CMD (1<<3)
77#define SISR_TRB_REPLY (1<<2)
78
79#define EISR 0x34
80#define EISR_RWM 0x38
81#define EISR_MASK 0x3c
82#define EISR_MASK_OPTIONS 0x001FFF7F
83
84#define LAPA 0x60
85#define LAPWWO 0x64
86#define LAPWWC 0x68
87#define LAPCTL 0x6C
88#define LAIPD 0x78
89#define LAIPDDINC 0x7C
90
91#define TIMER 0x50
92
93#define CLKCTL 0x74
94#define CLKCTL_PAUSE (1<<15)
95
96#define PM_CON 0x4
97
98#define BMCTL_SUM 0x40
99#define BMCTL_RWM 0x44
100#define BMCTL_TX2_DIS (1<<30)
101#define BMCTL_TX1_DIS (1<<26)
102#define BMCTL_RX_DIS (1<<22)
103
104#define BMASR 0xcc
105
106#define RXDESCQ 0x90
107#define RXDESCQCNT 0x94
108#define RXCDA 0x98
109#define RXENQ 0x9C
110#define RXSTATQ 0xA0
111#define RXSTATQCNT 0xA4
112#define RXCSA 0xA8
113#define RXCLEN 0xAC
114#define RXHLEN 0xAE
115
116#define TXDESCQ_1 0xb0
117#define TXDESCQ_2 0xd0
118#define TXDESCQCNT_1 0xb4
119#define TXDESCQCNT_2 0xd4
120#define TXCDA_1 0xb8
121#define TXCDA_2 0xd8
122#define TXENQ_1 0xbc
123#define TXENQ_2 0xdc
124#define TXSTATQ_1 0xc0
125#define TXSTATQ_2 0xe0
126#define TXSTATQCNT_1 0xc4
127#define TXSTATQCNT_2 0xe4
128#define TXCSA_1 0xc8
129#define TXCSA_2 0xe8
130/* Cardbus */
131#define FERMASK 0xf4
132#define FERMASK_INT_BIT (1<<15)
133
134#define OLYMPIC_IO_SPACE 256
135
136#define SRB_COMMAND_SIZE 50
137
138#define OLYMPIC_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
139
140/* Defines for LAN STATUS CHANGE reports */
141#define LSC_SIG_LOSS 0x8000
142#define LSC_HARD_ERR 0x4000
143#define LSC_SOFT_ERR 0x2000
144#define LSC_TRAN_BCN 0x1000
145#define LSC_LWF 0x0800
146#define LSC_ARW 0x0400
147#define LSC_FPE 0x0200
148#define LSC_RR 0x0100
149#define LSC_CO 0x0080
150#define LSC_SS 0x0040
151#define LSC_RING_REC 0x0020
152#define LSC_SR_CO 0x0010
153#define LSC_FDX_MODE 0x0004
154
155/* Defines for OPEN ADAPTER command */
156
157#define OPEN_ADAPTER_EXT_WRAP (1<<15)
158#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
159#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
160#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
161#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
162#define OPEN_ADAPTER_ENABLE_EC (1<<10)
163#define OPEN_ADAPTER_CONTENDER (1<<8)
164#define OPEN_ADAPTER_PASS_BEACON (1<<7)
165#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
166#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
167#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
168#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
169#define OPEN_ADAPTER_USE_OPTS2 (1<<0)
170
171#define OPEN_ADAPTER_2_ENABLE_ONNOW (1<<15)
172
173/* Defines for SRB Commands */
174
175#define SRB_ACCESS_REGISTER 0x1f
176#define SRB_CLOSE_ADAPTER 0x04
177#define SRB_CONFIGURE_BRIDGE 0x0c
178#define SRB_CONFIGURE_WAKEUP_EVENT 0x1a
179#define SRB_MODIFY_BRIDGE_PARMS 0x15
180#define SRB_MODIFY_OPEN_OPTIONS 0x01
181#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
182#define SRB_NO_OPERATION 0x00
183#define SRB_OPEN_ADAPTER 0x03
184#define SRB_READ_LOG 0x08
185#define SRB_READ_SR_COUNTERS 0x16
186#define SRB_RESET_GROUP_ADDRESS 0x02
187#define SRB_SAVE_CONFIGURATION 0x1b
188#define SRB_SET_BRIDGE_PARMS 0x09
189#define SRB_SET_BRIDGE_TARGETS 0x10
190#define SRB_SET_FUNC_ADDRESS 0x07
191#define SRB_SET_GROUP_ADDRESS 0x06
192#define SRB_SET_GROUP_ADDR_OPTIONS 0x11
193#define SRB_UPDATE_WAKEUP_PATTERN 0x19
194
195/* Clear return code */
196
197#define OLYMPIC_CLEAR_RET_CODE 0xfe
198
199/* ARB Commands */
200#define ARB_RECEIVE_DATA 0x81
201#define ARB_LAN_CHANGE_STATUS 0x84
202/* ASB Response commands */
203
204#define ASB_RECEIVE_DATA 0x81
205
206
207/* Olympic defaults for buffers */
208
209#define OLYMPIC_RX_RING_SIZE 16 /* should be a power of 2 */
210#define OLYMPIC_TX_RING_SIZE 8 /* should be a power of 2 */
211
212#define PKT_BUF_SZ 4096 /* Default packet size */
213
214/* Olympic data structures */
215
216/* xxxx These structures are all little endian in hardware. */
217
218struct olympic_tx_desc {
219 __le32 buffer;
220 __le32 status_length;
221};
222
223struct olympic_tx_status {
224 __le32 status;
225};
226
227struct olympic_rx_desc {
228 __le32 buffer;
229 __le32 res_length;
230};
231
232struct olympic_rx_status {
233 __le32 fragmentcnt_framelen;
234 __le32 status_buffercnt;
235};
236/* xxxx END These structures are all little endian in hardware. */
237/* xxxx There may be more, but I'm pretty sure about these */
238
239struct mac_receive_buffer {
240 __le16 next ;
241 u8 padding ;
242 u8 frame_status ;
243 __le16 buffer_length ;
244 u8 frame_data ;
245};
246
247struct olympic_private {
248
249 u16 srb; /* be16 */
250 u16 trb; /* be16 */
251 u16 arb; /* be16 */
252 u16 asb; /* be16 */
253
254 u8 __iomem *olympic_mmio;
255 u8 __iomem *olympic_lap;
256 struct pci_dev *pdev ;
257 const char *olympic_card_name;
258
259 spinlock_t olympic_lock ;
260
261 volatile int srb_queued; /* True if an SRB is still posted */
262 wait_queue_head_t srb_wait;
263
264 volatile int asb_queued; /* True if an ASB is posted */
265
266 volatile int trb_queued; /* True if a TRB is posted */
267 wait_queue_head_t trb_wait ;
268
269 /* These must be on a 4 byte boundary. */
270 struct olympic_rx_desc olympic_rx_ring[OLYMPIC_RX_RING_SIZE];
271 struct olympic_tx_desc olympic_tx_ring[OLYMPIC_TX_RING_SIZE];
272 struct olympic_rx_status olympic_rx_status_ring[OLYMPIC_RX_RING_SIZE];
273 struct olympic_tx_status olympic_tx_status_ring[OLYMPIC_TX_RING_SIZE];
274
275 struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE];
276 int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
277
278 u16 olympic_lan_status ;
279 u8 olympic_ring_speed ;
280 u16 pkt_buf_sz ;
281 u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor;
282 u16 olympic_addr_table_addr, olympic_parms_addr ;
283 u8 olympic_laa[6] ;
284 u32 rx_ring_dma_addr;
285 u32 rx_status_ring_dma_addr;
286 u32 tx_ring_dma_addr;
287 u32 tx_status_ring_dma_addr;
288};
289
290struct olympic_adapter_addr_table {
291
292 u8 node_addr[6] ;
293 u8 reserved[4] ;
294 u8 func_addr[4] ;
295} ;
296
297struct olympic_parameters_table {
298
299 u8 phys_addr[4] ;
300 u8 up_node_addr[6] ;
301 u8 up_phys_addr[4] ;
302 u8 poll_addr[6] ;
303 u16 reserved ;
304 u16 acc_priority ;
305 u16 auth_source_class ;
306 u16 att_code ;
307 u8 source_addr[6] ;
308 u16 beacon_type ;
309 u16 major_vector ;
310 u16 lan_status ;
311 u16 soft_error_time ;
312 u16 reserved1 ;
313 u16 local_ring ;
314 u16 mon_error ;
315 u16 beacon_transmit ;
316 u16 beacon_receive ;
317 u16 frame_correl ;
318 u8 beacon_naun[6] ;
319 u32 reserved2 ;
320 u8 beacon_phys[4] ;
321};
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
new file mode 100644
index 00000000000..8d362e64a40
--- /dev/null
+++ b/drivers/net/tokenring/proteon.c
@@ -0,0 +1,423 @@
1/*
2 * proteon.c: A network driver for Proteon ISA token ring cards.
3 *
4 * Based on tmspci written 1999 by Adam Fritzler
5 *
6 * Written 2003 by Jochen Friedrich
7 *
8 * This software may be used and distributed according to the terms
9 * of the GNU General Public License, incorporated herein by reference.
10 *
11 * This driver module supports the following cards:
12 * - Proteon 1392, 1392+
13 *
14 * Maintainer(s):
15 * AF Adam Fritzler
16 * JF Jochen Friedrich jochen@scram.de
17 *
18 * Modification History:
19 * 02-Jan-03 JF Created
20 *
21 */
22static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n";
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/delay.h>
27#include <linux/errno.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/trdevice.h>
32#include <linux/platform_device.h>
33
34#include <asm/system.h>
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/pci.h>
38#include <asm/dma.h>
39
40#include "tms380tr.h"
41
42#define PROTEON_IO_EXTENT 32
43
44/* A zero-terminated list of I/O addresses to be probed. */
45static unsigned int portlist[] __initdata = {
46 0x0A20, 0x0E20, 0x1A20, 0x1E20, 0x2A20, 0x2E20, 0x3A20, 0x3E20,// Prot.
47 0x4A20, 0x4E20, 0x5A20, 0x5E20, 0x6A20, 0x6E20, 0x7A20, 0x7E20,// Prot.
48 0x8A20, 0x8E20, 0x9A20, 0x9E20, 0xAA20, 0xAE20, 0xBA20, 0xBE20,// Prot.
49 0xCA20, 0xCE20, 0xDA20, 0xDE20, 0xEA20, 0xEE20, 0xFA20, 0xFE20,// Prot.
50 0
51};
52
53/* A zero-terminated list of IRQs to be probed. */
54static unsigned short irqlist[] = {
55 7, 6, 5, 4, 3, 12, 11, 10, 9,
56 0
57};
58
59/* A zero-terminated list of DMAs to be probed. */
60static int dmalist[] __initdata = {
61 5, 6, 7,
62 0
63};
64
65static char cardname[] = "Proteon 1392\0";
66static u64 dma_mask = ISA_MAX_ADDRESS;
67static int proteon_open(struct net_device *dev);
68static void proteon_read_eeprom(struct net_device *dev);
69static unsigned short proteon_setnselout_pins(struct net_device *dev);
70
71static unsigned short proteon_sifreadb(struct net_device *dev, unsigned short reg)
72{
73 return inb(dev->base_addr + reg);
74}
75
76static unsigned short proteon_sifreadw(struct net_device *dev, unsigned short reg)
77{
78 return inw(dev->base_addr + reg);
79}
80
81static void proteon_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
82{
83 outb(val, dev->base_addr + reg);
84}
85
86static void proteon_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
87{
88 outw(val, dev->base_addr + reg);
89}
90
91static int __init proteon_probe1(struct net_device *dev, int ioaddr)
92{
93 unsigned char chk1, chk2;
94 int i;
95
96 if (!request_region(ioaddr, PROTEON_IO_EXTENT, cardname))
97 return -ENODEV;
98
99
100 chk1 = inb(ioaddr + 0x1f); /* Get Proteon ID reg 1 */
101 if (chk1 != 0x1f)
102 goto nodev;
103
104 chk1 = inb(ioaddr + 0x1e) & 0x07; /* Get Proteon ID reg 0 */
105 for (i=0; i<16; i++) {
106 chk2 = inb(ioaddr + 0x1e) & 0x07;
107 if (((chk1 + 1) & 0x07) != chk2)
108 goto nodev;
109 chk1 = chk2;
110 }
111
112 dev->base_addr = ioaddr;
113 return 0;
114nodev:
115 release_region(ioaddr, PROTEON_IO_EXTENT);
116 return -ENODEV;
117}
118
119static struct net_device_ops proteon_netdev_ops __read_mostly;
120
121static int __init setup_card(struct net_device *dev, struct device *pdev)
122{
123 struct net_local *tp;
124 static int versionprinted;
125 const unsigned *port;
126 int j,err = 0;
127
128 if (!dev)
129 return -ENOMEM;
130
131 if (dev->base_addr) /* probe specific location */
132 err = proteon_probe1(dev, dev->base_addr);
133 else {
134 for (port = portlist; *port; port++) {
135 err = proteon_probe1(dev, *port);
136 if (!err)
137 break;
138 }
139 }
140 if (err)
141 goto out5;
142
143 /* At this point we have found a valid card. */
144
145 if (versionprinted++ == 0)
146 printk(KERN_DEBUG "%s", version);
147
148 err = -EIO;
149 pdev->dma_mask = &dma_mask;
150 if (tmsdev_init(dev, pdev))
151 goto out4;
152
153 dev->base_addr &= ~3;
154
155 proteon_read_eeprom(dev);
156
157 printk(KERN_DEBUG "proteon.c: Ring Station Address: %pM\n",
158 dev->dev_addr);
159
160 tp = netdev_priv(dev);
161 tp->setnselout = proteon_setnselout_pins;
162
163 tp->sifreadb = proteon_sifreadb;
164 tp->sifreadw = proteon_sifreadw;
165 tp->sifwriteb = proteon_sifwriteb;
166 tp->sifwritew = proteon_sifwritew;
167
168 memcpy(tp->ProductID, cardname, PROD_ID_SIZE + 1);
169
170 tp->tmspriv = NULL;
171
172 dev->netdev_ops = &proteon_netdev_ops;
173
174 if (dev->irq == 0)
175 {
176 for(j = 0; irqlist[j] != 0; j++)
177 {
178 dev->irq = irqlist[j];
179 if (!request_irq(dev->irq, tms380tr_interrupt, 0,
180 cardname, dev))
181 break;
182 }
183
184 if(irqlist[j] == 0)
185 {
186 printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
187 goto out3;
188 }
189 }
190 else
191 {
192 for(j = 0; irqlist[j] != 0; j++)
193 if (irqlist[j] == dev->irq)
194 break;
195 if (irqlist[j] == 0)
196 {
197 printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
198 dev->irq);
199 goto out3;
200 }
201 if (request_irq(dev->irq, tms380tr_interrupt, 0,
202 cardname, dev))
203 {
204 printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
205 dev->irq);
206 goto out3;
207 }
208 }
209
210 if (dev->dma == 0)
211 {
212 for(j = 0; dmalist[j] != 0; j++)
213 {
214 dev->dma = dmalist[j];
215 if (!request_dma(dev->dma, cardname))
216 break;
217 }
218
219 if(dmalist[j] == 0)
220 {
221 printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
222 goto out2;
223 }
224 }
225 else
226 {
227 for(j = 0; dmalist[j] != 0; j++)
228 if (dmalist[j] == dev->dma)
229 break;
230 if (dmalist[j] == 0)
231 {
232 printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
233 dev->dma);
234 goto out2;
235 }
236 if (request_dma(dev->dma, cardname))
237 {
238 printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
239 dev->dma);
240 goto out2;
241 }
242 }
243
244 err = register_netdev(dev);
245 if (err)
246 goto out;
247
248 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
249 dev->name, dev->base_addr, dev->irq, dev->dma);
250
251 return 0;
252out:
253 free_dma(dev->dma);
254out2:
255 free_irq(dev->irq, dev);
256out3:
257 tmsdev_term(dev);
258out4:
259 release_region(dev->base_addr, PROTEON_IO_EXTENT);
260out5:
261 return err;
262}
263
264/*
265 * Reads MAC address from adapter RAM, which should've read it from
266 * the onboard ROM.
267 *
268 * Calling this on a board that does not support it can be a very
269 * dangerous thing. The Madge board, for instance, will lock your
270 * machine hard when this is called. Luckily, its supported in a
271 * separate driver. --ASF
272 */
273static void proteon_read_eeprom(struct net_device *dev)
274{
275 int i;
276
277 /* Address: 0000:0000 */
278 proteon_sifwritew(dev, 0, SIFADX);
279 proteon_sifwritew(dev, 0, SIFADR);
280
281 /* Read six byte MAC address data */
282 dev->addr_len = 6;
283 for(i = 0; i < 6; i++)
284 dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8;
285}
286
287static unsigned short proteon_setnselout_pins(struct net_device *dev)
288{
289 return 0;
290}
291
292static int proteon_open(struct net_device *dev)
293{
294 struct net_local *tp = netdev_priv(dev);
295 unsigned short val = 0;
296 int i;
297
298 /* Proteon reset sequence */
299 outb(0, dev->base_addr + 0x11);
300 mdelay(20);
301 outb(0x04, dev->base_addr + 0x11);
302 mdelay(20);
303 outb(0, dev->base_addr + 0x11);
304 mdelay(100);
305
306 /* set control/status reg */
307 val = inb(dev->base_addr + 0x11);
308 val |= 0x78;
309 val &= 0xf9;
310 if(tp->DataRate == SPEED_4)
311 val |= 0x20;
312 else
313 val &= ~0x20;
314
315 outb(val, dev->base_addr + 0x11);
316 outb(0xff, dev->base_addr + 0x12);
317 for(i = 0; irqlist[i] != 0; i++)
318 {
319 if(irqlist[i] == dev->irq)
320 break;
321 }
322 val = i;
323 i = (7 - dev->dma) << 4;
324 val |= i;
325 outb(val, dev->base_addr + 0x13);
326
327 return tms380tr_open(dev);
328}
329
330#define ISATR_MAX_ADAPTERS 3
331
332static int io[ISATR_MAX_ADAPTERS];
333static int irq[ISATR_MAX_ADAPTERS];
334static int dma[ISATR_MAX_ADAPTERS];
335
336MODULE_LICENSE("GPL");
337
338module_param_array(io, int, NULL, 0);
339module_param_array(irq, int, NULL, 0);
340module_param_array(dma, int, NULL, 0);
341
342static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
343
344static struct platform_driver proteon_driver = {
345 .driver = {
346 .name = "proteon",
347 },
348};
349
350static int __init proteon_init(void)
351{
352 struct net_device *dev;
353 struct platform_device *pdev;
354 int i, num = 0, err = 0;
355
356 proteon_netdev_ops = tms380tr_netdev_ops;
357 proteon_netdev_ops.ndo_open = proteon_open;
358 proteon_netdev_ops.ndo_stop = tms380tr_close;
359
360 err = platform_driver_register(&proteon_driver);
361 if (err)
362 return err;
363
364 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
365 dev = alloc_trdev(sizeof(struct net_local));
366 if (!dev)
367 continue;
368
369 dev->base_addr = io[i];
370 dev->irq = irq[i];
371 dev->dma = dma[i];
372 pdev = platform_device_register_simple("proteon",
373 i, NULL, 0);
374 if (IS_ERR(pdev)) {
375 free_netdev(dev);
376 continue;
377 }
378 err = setup_card(dev, &pdev->dev);
379 if (!err) {
380 proteon_dev[i] = pdev;
381 platform_set_drvdata(pdev, dev);
382 ++num;
383 } else {
384 platform_device_unregister(pdev);
385 free_netdev(dev);
386 }
387 }
388
389 printk(KERN_NOTICE "proteon.c: %d cards found.\n", num);
390 /* Probe for cards. */
391 if (num == 0) {
392 printk(KERN_NOTICE "proteon.c: No cards found.\n");
393 platform_driver_unregister(&proteon_driver);
394 return -ENODEV;
395 }
396 return 0;
397}
398
399static void __exit proteon_cleanup(void)
400{
401 struct net_device *dev;
402 int i;
403
404 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
405 struct platform_device *pdev = proteon_dev[i];
406
407 if (!pdev)
408 continue;
409 dev = platform_get_drvdata(pdev);
410 unregister_netdev(dev);
411 release_region(dev->base_addr, PROTEON_IO_EXTENT);
412 free_irq(dev->irq, dev);
413 free_dma(dev->dma);
414 tmsdev_term(dev);
415 free_netdev(dev);
416 platform_set_drvdata(pdev, NULL);
417 platform_device_unregister(pdev);
418 }
419 platform_driver_unregister(&proteon_driver);
420}
421
422module_init(proteon_init);
423module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
new file mode 100644
index 00000000000..46db5c5395b
--- /dev/null
+++ b/drivers/net/tokenring/skisa.c
@@ -0,0 +1,433 @@
1/*
2 * skisa.c: A network driver for SK-NET TMS380-based ISA token ring cards.
3 *
4 * Based on tmspci written 1999 by Adam Fritzler
5 *
6 * Written 2000 by Jochen Friedrich
7 * Dedicated to my girlfriend Steffi Bopp
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 *
12 * This driver module supports the following cards:
13 * - SysKonnect TR4/16(+) ISA (SK-4190)
14 *
15 * Maintainer(s):
16 * AF Adam Fritzler
17 * JF Jochen Friedrich jochen@scram.de
18 *
19 * Modification History:
20 * 14-Jan-01 JF Created
21 * 28-Oct-02 JF Fixed probe of card for static compilation.
22 * Fixed module init to not make hotplug go wild.
23 * 09-Nov-02 JF Fixed early bail out on out of memory
24 * situations if multiple cards are found.
25 * Cleaned up some unnecessary console SPAM.
26 * 09-Dec-02 JF Fixed module reference counting.
27 * 02-Jan-03 JF Renamed to skisa.c
28 *
29 */
30static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n";
31
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/errno.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/netdevice.h>
38#include <linux/trdevice.h>
39#include <linux/platform_device.h>
40
41#include <asm/system.h>
42#include <asm/io.h>
43#include <asm/irq.h>
44#include <asm/pci.h>
45#include <asm/dma.h>
46
47#include "tms380tr.h"
48
49#define SK_ISA_IO_EXTENT 32
50
51/* A zero-terminated list of I/O addresses to be probed. */
52static unsigned int portlist[] __initdata = {
53 0x0A20, 0x1A20, 0x0B20, 0x1B20, 0x0980, 0x1980, 0x0900, 0x1900,// SK
54 0
55};
56
57/* A zero-terminated list of IRQs to be probed.
58 * Used again after initial probe for sktr_chipset_init, called from sktr_open.
59 */
60static const unsigned short irqlist[] = {
61 3, 5, 9, 10, 11, 12, 15,
62 0
63};
64
65/* A zero-terminated list of DMAs to be probed. */
66static int dmalist[] __initdata = {
67 5, 6, 7,
68 0
69};
70
71static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
72static u64 dma_mask = ISA_MAX_ADDRESS;
73static int sk_isa_open(struct net_device *dev);
74static void sk_isa_read_eeprom(struct net_device *dev);
75static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
76
77static unsigned short sk_isa_sifreadb(struct net_device *dev, unsigned short reg)
78{
79 return inb(dev->base_addr + reg);
80}
81
82static unsigned short sk_isa_sifreadw(struct net_device *dev, unsigned short reg)
83{
84 return inw(dev->base_addr + reg);
85}
86
87static void sk_isa_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
88{
89 outb(val, dev->base_addr + reg);
90}
91
92static void sk_isa_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
93{
94 outw(val, dev->base_addr + reg);
95}
96
97
98static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
99{
100 unsigned char old, chk1, chk2;
101
102 if (!request_region(ioaddr, SK_ISA_IO_EXTENT, isa_cardname))
103 return -ENODEV;
104
105 old = inb(ioaddr + SIFADR); /* Get the old SIFADR value */
106
107 chk1 = 0; /* Begin with check value 0 */
108 do {
109 /* Write new SIFADR value */
110 outb(chk1, ioaddr + SIFADR);
111
112 /* Read, invert and write */
113 chk2 = inb(ioaddr + SIFADD);
114 chk2 ^= 0x0FE;
115 outb(chk2, ioaddr + SIFADR);
116
117 /* Read, invert and compare */
118 chk2 = inb(ioaddr + SIFADD);
119 chk2 ^= 0x0FE;
120
121 if(chk1 != chk2) {
122 release_region(ioaddr, SK_ISA_IO_EXTENT);
123 return -ENODEV;
124 }
125
126 chk1 -= 2;
127 } while(chk1 != 0); /* Repeat 128 times (all byte values) */
128
129 /* Restore the SIFADR value */
130 outb(old, ioaddr + SIFADR);
131
132 dev->base_addr = ioaddr;
133 return 0;
134}
135
136static struct net_device_ops sk_isa_netdev_ops __read_mostly;
137
138static int __init setup_card(struct net_device *dev, struct device *pdev)
139{
140 struct net_local *tp;
141 static int versionprinted;
142 const unsigned *port;
143 int j, err = 0;
144
145 if (!dev)
146 return -ENOMEM;
147
148 if (dev->base_addr) /* probe specific location */
149 err = sk_isa_probe1(dev, dev->base_addr);
150 else {
151 for (port = portlist; *port; port++) {
152 err = sk_isa_probe1(dev, *port);
153 if (!err)
154 break;
155 }
156 }
157 if (err)
158 goto out5;
159
160 /* At this point we have found a valid card. */
161
162 if (versionprinted++ == 0)
163 printk(KERN_DEBUG "%s", version);
164
165 err = -EIO;
166 pdev->dma_mask = &dma_mask;
167 if (tmsdev_init(dev, pdev))
168 goto out4;
169
170 dev->base_addr &= ~3;
171
172 sk_isa_read_eeprom(dev);
173
174 printk(KERN_DEBUG "skisa.c: Ring Station Address: %pM\n",
175 dev->dev_addr);
176
177 tp = netdev_priv(dev);
178 tp->setnselout = sk_isa_setnselout_pins;
179
180 tp->sifreadb = sk_isa_sifreadb;
181 tp->sifreadw = sk_isa_sifreadw;
182 tp->sifwriteb = sk_isa_sifwriteb;
183 tp->sifwritew = sk_isa_sifwritew;
184
185 memcpy(tp->ProductID, isa_cardname, PROD_ID_SIZE + 1);
186
187 tp->tmspriv = NULL;
188
189 dev->netdev_ops = &sk_isa_netdev_ops;
190
191 if (dev->irq == 0)
192 {
193 for(j = 0; irqlist[j] != 0; j++)
194 {
195 dev->irq = irqlist[j];
196 if (!request_irq(dev->irq, tms380tr_interrupt, 0,
197 isa_cardname, dev))
198 break;
199 }
200
201 if(irqlist[j] == 0)
202 {
203 printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
204 goto out3;
205 }
206 }
207 else
208 {
209 for(j = 0; irqlist[j] != 0; j++)
210 if (irqlist[j] == dev->irq)
211 break;
212 if (irqlist[j] == 0)
213 {
214 printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
215 dev->irq);
216 goto out3;
217 }
218 if (request_irq(dev->irq, tms380tr_interrupt, 0,
219 isa_cardname, dev))
220 {
221 printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
222 dev->irq);
223 goto out3;
224 }
225 }
226
227 if (dev->dma == 0)
228 {
229 for(j = 0; dmalist[j] != 0; j++)
230 {
231 dev->dma = dmalist[j];
232 if (!request_dma(dev->dma, isa_cardname))
233 break;
234 }
235
236 if(dmalist[j] == 0)
237 {
238 printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
239 goto out2;
240 }
241 }
242 else
243 {
244 for(j = 0; dmalist[j] != 0; j++)
245 if (dmalist[j] == dev->dma)
246 break;
247 if (dmalist[j] == 0)
248 {
249 printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
250 dev->dma);
251 goto out2;
252 }
253 if (request_dma(dev->dma, isa_cardname))
254 {
255 printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
256 dev->dma);
257 goto out2;
258 }
259 }
260
261 err = register_netdev(dev);
262 if (err)
263 goto out;
264
265 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
266 dev->name, dev->base_addr, dev->irq, dev->dma);
267
268 return 0;
269out:
270 free_dma(dev->dma);
271out2:
272 free_irq(dev->irq, dev);
273out3:
274 tmsdev_term(dev);
275out4:
276 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
277out5:
278 return err;
279}
280
281/*
282 * Reads MAC address from adapter RAM, which should've read it from
283 * the onboard ROM.
284 *
285 * Calling this on a board that does not support it can be a very
286 * dangerous thing. The Madge board, for instance, will lock your
287 * machine hard when this is called. Luckily, its supported in a
288 * separate driver. --ASF
289 */
290static void sk_isa_read_eeprom(struct net_device *dev)
291{
292 int i;
293
294 /* Address: 0000:0000 */
295 sk_isa_sifwritew(dev, 0, SIFADX);
296 sk_isa_sifwritew(dev, 0, SIFADR);
297
298 /* Read six byte MAC address data */
299 dev->addr_len = 6;
300 for(i = 0; i < 6; i++)
301 dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8;
302}
303
304static unsigned short sk_isa_setnselout_pins(struct net_device *dev)
305{
306 return 0;
307}
308
309static int sk_isa_open(struct net_device *dev)
310{
311 struct net_local *tp = netdev_priv(dev);
312 unsigned short val = 0;
313 unsigned short oldval;
314 int i;
315
316 val = 0;
317 for(i = 0; irqlist[i] != 0; i++)
318 {
319 if(irqlist[i] == dev->irq)
320 break;
321 }
322
323 val |= CYCLE_TIME << 2;
324 val |= i << 4;
325 i = dev->dma - 5;
326 val |= i;
327 if(tp->DataRate == SPEED_4)
328 val |= LINE_SPEED_BIT;
329 else
330 val &= ~LINE_SPEED_BIT;
331 oldval = sk_isa_sifreadb(dev, POSREG);
332 /* Leave cycle bits alone */
333 oldval |= 0xf3;
334 val &= oldval;
335 sk_isa_sifwriteb(dev, val, POSREG);
336
337 return tms380tr_open(dev);
338}
339
340#define ISATR_MAX_ADAPTERS 3
341
342static int io[ISATR_MAX_ADAPTERS];
343static int irq[ISATR_MAX_ADAPTERS];
344static int dma[ISATR_MAX_ADAPTERS];
345
346MODULE_LICENSE("GPL");
347
348module_param_array(io, int, NULL, 0);
349module_param_array(irq, int, NULL, 0);
350module_param_array(dma, int, NULL, 0);
351
352static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
353
354static struct platform_driver sk_isa_driver = {
355 .driver = {
356 .name = "skisa",
357 },
358};
359
360static int __init sk_isa_init(void)
361{
362 struct net_device *dev;
363 struct platform_device *pdev;
364 int i, num = 0, err = 0;
365
366 sk_isa_netdev_ops = tms380tr_netdev_ops;
367 sk_isa_netdev_ops.ndo_open = sk_isa_open;
368 sk_isa_netdev_ops.ndo_stop = tms380tr_close;
369
370 err = platform_driver_register(&sk_isa_driver);
371 if (err)
372 return err;
373
374 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
375 dev = alloc_trdev(sizeof(struct net_local));
376 if (!dev)
377 continue;
378
379 dev->base_addr = io[i];
380 dev->irq = irq[i];
381 dev->dma = dma[i];
382 pdev = platform_device_register_simple("skisa",
383 i, NULL, 0);
384 if (IS_ERR(pdev)) {
385 free_netdev(dev);
386 continue;
387 }
388 err = setup_card(dev, &pdev->dev);
389 if (!err) {
390 sk_isa_dev[i] = pdev;
391 platform_set_drvdata(sk_isa_dev[i], dev);
392 ++num;
393 } else {
394 platform_device_unregister(pdev);
395 free_netdev(dev);
396 }
397 }
398
399 printk(KERN_NOTICE "skisa.c: %d cards found.\n", num);
400 /* Probe for cards. */
401 if (num == 0) {
402 printk(KERN_NOTICE "skisa.c: No cards found.\n");
403 platform_driver_unregister(&sk_isa_driver);
404 return -ENODEV;
405 }
406 return 0;
407}
408
409static void __exit sk_isa_cleanup(void)
410{
411 struct net_device *dev;
412 int i;
413
414 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
415 struct platform_device *pdev = sk_isa_dev[i];
416
417 if (!pdev)
418 continue;
419 dev = platform_get_drvdata(pdev);
420 unregister_netdev(dev);
421 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
422 free_irq(dev->irq, dev);
423 free_dma(dev->dma);
424 tmsdev_term(dev);
425 free_netdev(dev);
426 platform_set_drvdata(pdev, NULL);
427 platform_device_unregister(pdev);
428 }
429 platform_driver_unregister(&sk_isa_driver);
430}
431
432module_init(sk_isa_init);
433module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
new file mode 100644
index 00000000000..d9044aba7af
--- /dev/null
+++ b/drivers/net/tokenring/smctr.c
@@ -0,0 +1,5718 @@
1/*
2 * smctr.c: A network driver for the SMC Token Ring Adapters.
3 *
4 * Written by Jay Schulist <jschlst@samba.org>
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This device driver works with the following SMC adapters:
10 * - SMC TokenCard Elite (8115T, chips 825/584)
11 * - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594)
12 *
13 * Source(s):
14 * - SMC TokenCard SDK.
15 *
16 * Maintainer(s):
17 * JS Jay Schulist <jschlst@samba.org>
18 *
19 * Changes:
20 * 07102000 JS Fixed a timing problem in smctr_wait_cmd();
21 * Also added a bit more discriptive error msgs.
22 * 07122000 JS Fixed problem with detecting a card with
23 * module io/irq/mem specified.
24 *
25 * To do:
26 * 1. Multicast support.
27 *
28 * Initial 2.5 cleanup Alan Cox <alan@lxorguk.ukuu.org.uk> 2002/10/28
29 */
30
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/types.h>
34#include <linux/fcntl.h>
35#include <linux/interrupt.h>
36#include <linux/ptrace.h>
37#include <linux/ioport.h>
38#include <linux/in.h>
39#include <linux/string.h>
40#include <linux/time.h>
41#include <linux/errno.h>
42#include <linux/init.h>
43#include <linux/mca-legacy.h>
44#include <linux/delay.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48#include <linux/trdevice.h>
49#include <linux/bitops.h>
50#include <linux/firmware.h>
51
52#include <asm/system.h>
53#include <asm/io.h>
54#include <asm/dma.h>
55#include <asm/irq.h>
56
57#if BITS_PER_LONG == 64
58#error FIXME: driver does not support 64-bit platforms
59#endif
60
61#include "smctr.h" /* Our Stuff */
62
63static const char version[] __initdata =
64 KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n";
65static const char cardname[] = "smctr";
66
67
68#define SMCTR_IO_EXTENT 20
69
70#ifdef CONFIG_MCA_LEGACY
71static unsigned int smctr_posid = 0x6ec6;
72#endif
73
74static int ringspeed;
75
76/* SMC Name of the Adapter. */
77static char smctr_name[] = "SMC TokenCard";
78static char *smctr_model = "Unknown";
79
80/* Use 0 for production, 1 for verification, 2 for debug, and
81 * 3 for very verbose debug.
82 */
83#ifndef SMCTR_DEBUG
84#define SMCTR_DEBUG 1
85#endif
86static unsigned int smctr_debug = SMCTR_DEBUG;
87
88/* smctr.c prototypes and functions are arranged alphabeticly
89 * for clearity, maintainability and pure old fashion fun.
90 */
91/* A */
92static int smctr_alloc_shared_memory(struct net_device *dev);
93
94/* B */
95static int smctr_bypass_state(struct net_device *dev);
96
97/* C */
98static int smctr_checksum_firmware(struct net_device *dev);
99static int __init smctr_chk_isa(struct net_device *dev);
100static int smctr_chg_rx_mask(struct net_device *dev);
101static int smctr_clear_int(struct net_device *dev);
102static int smctr_clear_trc_reset(int ioaddr);
103static int smctr_close(struct net_device *dev);
104
105/* D */
106static int smctr_decode_firmware(struct net_device *dev,
107 const struct firmware *fw);
108static int smctr_disable_16bit(struct net_device *dev);
109static int smctr_disable_adapter_ctrl_store(struct net_device *dev);
110static int smctr_disable_bic_int(struct net_device *dev);
111
112/* E */
113static int smctr_enable_16bit(struct net_device *dev);
114static int smctr_enable_adapter_ctrl_store(struct net_device *dev);
115static int smctr_enable_adapter_ram(struct net_device *dev);
116static int smctr_enable_bic_int(struct net_device *dev);
117
118/* G */
119static int __init smctr_get_boardid(struct net_device *dev, int mca);
120static int smctr_get_group_address(struct net_device *dev);
121static int smctr_get_functional_address(struct net_device *dev);
122static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
123static int smctr_get_physical_drop_number(struct net_device *dev);
124static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
125static int smctr_get_station_id(struct net_device *dev);
126static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
127 __u16 bytes_count);
128static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
129
130/* H */
131static int smctr_hardware_send_packet(struct net_device *dev,
132 struct net_local *tp);
133/* I */
134static int smctr_init_acbs(struct net_device *dev);
135static int smctr_init_adapter(struct net_device *dev);
136static int smctr_init_card_real(struct net_device *dev);
137static int smctr_init_rx_bdbs(struct net_device *dev);
138static int smctr_init_rx_fcbs(struct net_device *dev);
139static int smctr_init_shared_memory(struct net_device *dev);
140static int smctr_init_tx_bdbs(struct net_device *dev);
141static int smctr_init_tx_fcbs(struct net_device *dev);
142static int smctr_internal_self_test(struct net_device *dev);
143static irqreturn_t smctr_interrupt(int irq, void *dev_id);
144static int smctr_issue_enable_int_cmd(struct net_device *dev,
145 __u16 interrupt_enable_mask);
146static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
147 __u16 ibits);
148static int smctr_issue_init_timers_cmd(struct net_device *dev);
149static int smctr_issue_init_txrx_cmd(struct net_device *dev);
150static int smctr_issue_insert_cmd(struct net_device *dev);
151static int smctr_issue_read_ring_status_cmd(struct net_device *dev);
152static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt);
153static int smctr_issue_remove_cmd(struct net_device *dev);
154static int smctr_issue_resume_acb_cmd(struct net_device *dev);
155static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue);
156static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue);
157static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue);
158static int smctr_issue_test_internal_rom_cmd(struct net_device *dev);
159static int smctr_issue_test_hic_cmd(struct net_device *dev);
160static int smctr_issue_test_mac_reg_cmd(struct net_device *dev);
161static int smctr_issue_trc_loopback_cmd(struct net_device *dev);
162static int smctr_issue_tri_loopback_cmd(struct net_device *dev);
163static int smctr_issue_write_byte_cmd(struct net_device *dev,
164 short aword_cnt, void *byte);
165static int smctr_issue_write_word_cmd(struct net_device *dev,
166 short aword_cnt, void *word);
167
168/* J */
169static int smctr_join_complete_state(struct net_device *dev);
170
171/* L */
172static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev);
173static int smctr_load_firmware(struct net_device *dev);
174static int smctr_load_node_addr(struct net_device *dev);
175static int smctr_lobe_media_test(struct net_device *dev);
176static int smctr_lobe_media_test_cmd(struct net_device *dev);
177static int smctr_lobe_media_test_state(struct net_device *dev);
178
179/* M */
180static int smctr_make_8025_hdr(struct net_device *dev,
181 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc);
182static int smctr_make_access_pri(struct net_device *dev,
183 MAC_SUB_VECTOR *tsv);
184static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv);
185static int smctr_make_auth_funct_class(struct net_device *dev,
186 MAC_SUB_VECTOR *tsv);
187static int smctr_make_corr(struct net_device *dev,
188 MAC_SUB_VECTOR *tsv, __u16 correlator);
189static int smctr_make_funct_addr(struct net_device *dev,
190 MAC_SUB_VECTOR *tsv);
191static int smctr_make_group_addr(struct net_device *dev,
192 MAC_SUB_VECTOR *tsv);
193static int smctr_make_phy_drop_num(struct net_device *dev,
194 MAC_SUB_VECTOR *tsv);
195static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
196static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
197static int smctr_make_ring_station_status(struct net_device *dev,
198 MAC_SUB_VECTOR *tsv);
199static int smctr_make_ring_station_version(struct net_device *dev,
200 MAC_SUB_VECTOR *tsv);
201static int smctr_make_tx_status_code(struct net_device *dev,
202 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus);
203static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
204 MAC_SUB_VECTOR *tsv);
205static int smctr_make_wrap_data(struct net_device *dev,
206 MAC_SUB_VECTOR *tsv);
207
208/* O */
209static int smctr_open(struct net_device *dev);
210static int smctr_open_tr(struct net_device *dev);
211
212/* P */
213struct net_device *smctr_probe(int unit);
214static int __init smctr_probe1(struct net_device *dev, int ioaddr);
215static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
216 struct net_device *dev, __u16 rx_status);
217
218/* R */
219static int smctr_ram_memory_test(struct net_device *dev);
220static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
221 __u16 *correlator);
222static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
223 __u16 *correlator);
224static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf);
225static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
226 MAC_HEADER *rmf, __u16 *correlator);
227static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
228 __u16 *correlator);
229static int smctr_reset_adapter(struct net_device *dev);
230static int smctr_restart_tx_chain(struct net_device *dev, short queue);
231static int smctr_ring_status_chg(struct net_device *dev);
232static int smctr_rx_frame(struct net_device *dev);
233
234/* S */
235static int smctr_send_dat(struct net_device *dev);
236static netdev_tx_t smctr_send_packet(struct sk_buff *skb,
237 struct net_device *dev);
238static int smctr_send_lobe_media_test(struct net_device *dev);
239static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
240 __u16 correlator);
241static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
242 __u16 correlator);
243static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
244 __u16 correlator);
245static int smctr_send_rpt_tx_forward(struct net_device *dev,
246 MAC_HEADER *rmf, __u16 tx_fstatus);
247static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
248 __u16 rcode, __u16 correlator);
249static int smctr_send_rq_init(struct net_device *dev);
250static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
251 __u16 *tx_fstatus);
252static int smctr_set_auth_access_pri(struct net_device *dev,
253 MAC_SUB_VECTOR *rsv);
254static int smctr_set_auth_funct_class(struct net_device *dev,
255 MAC_SUB_VECTOR *rsv);
256static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
257 __u16 *correlator);
258static int smctr_set_error_timer_value(struct net_device *dev,
259 MAC_SUB_VECTOR *rsv);
260static int smctr_set_frame_forward(struct net_device *dev,
261 MAC_SUB_VECTOR *rsv, __u8 dc_sc);
262static int smctr_set_local_ring_num(struct net_device *dev,
263 MAC_SUB_VECTOR *rsv);
264static unsigned short smctr_set_ctrl_attention(struct net_device *dev);
265static void smctr_set_multicast_list(struct net_device *dev);
266static int smctr_set_page(struct net_device *dev, __u8 *buf);
267static int smctr_set_phy_drop(struct net_device *dev,
268 MAC_SUB_VECTOR *rsv);
269static int smctr_set_ring_speed(struct net_device *dev);
270static int smctr_set_rx_look_ahead(struct net_device *dev);
271static int smctr_set_trc_reset(int ioaddr);
272static int smctr_setup_single_cmd(struct net_device *dev,
273 __u16 command, __u16 subcommand);
274static int smctr_setup_single_cmd_w_data(struct net_device *dev,
275 __u16 command, __u16 subcommand);
276static char *smctr_malloc(struct net_device *dev, __u16 size);
277static int smctr_status_chg(struct net_device *dev);
278
279/* T */
280static void smctr_timeout(struct net_device *dev);
281static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
282 __u16 queue);
283static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue);
284static unsigned short smctr_tx_move_frame(struct net_device *dev,
285 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes);
286
287/* U */
288static int smctr_update_err_stats(struct net_device *dev);
289static int smctr_update_rx_chain(struct net_device *dev, __u16 queue);
290static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
291 __u16 queue);
292
293/* W */
294static int smctr_wait_cmd(struct net_device *dev);
295static int smctr_wait_while_cbusy(struct net_device *dev);
296
297#define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X)
298#define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X)
299#define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X))
300
301/* Allocate Adapter Shared Memory.
302 * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the
303 * function "get_num_rx_bdbs" below!!!
304 *
305 * Order of memory allocation:
306 *
307 * 0. Initial System Configuration Block Pointer
308 * 1. System Configuration Block
309 * 2. System Control Block
310 * 3. Action Command Block
311 * 4. Interrupt Status Block
312 *
313 * 5. MAC TX FCB'S
314 * 6. NON-MAC TX FCB'S
315 * 7. MAC TX BDB'S
316 * 8. NON-MAC TX BDB'S
317 * 9. MAC RX FCB'S
318 * 10. NON-MAC RX FCB'S
319 * 11. MAC RX BDB'S
320 * 12. NON-MAC RX BDB'S
321 * 13. MAC TX Data Buffer( 1, 256 byte buffer)
322 * 14. MAC RX Data Buffer( 1, 256 byte buffer)
323 *
324 * 15. NON-MAC TX Data Buffer
325 * 16. NON-MAC RX Data Buffer
326 */
327static int smctr_alloc_shared_memory(struct net_device *dev)
328{
329 struct net_local *tp = netdev_priv(dev);
330
331 if(smctr_debug > 10)
332 printk(KERN_DEBUG "%s: smctr_alloc_shared_memory\n", dev->name);
333
334 /* Allocate initial System Control Block pointer.
335 * This pointer is located in the last page, last offset - 4.
336 */
337 tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400)
338 - (long)ISCP_BLOCK_SIZE);
339
340 /* Allocate System Control Blocks. */
341 tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock));
342 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
343
344 tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock));
345 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
346
347 tp->acb_head = (ACBlock *)smctr_malloc(dev,
348 sizeof(ACBlock)*tp->num_acbs);
349 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
350
351 tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock));
352 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
353
354 tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE);
355 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
356
357 /* Allocate transmit FCBs. */
358 tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
359 sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]);
360
361 tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
362 sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]);
363
364 tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev,
365 sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]);
366
367 /* Allocate transmit BDBs. */
368 tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
369 sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]);
370
371 tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
372 sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]);
373
374 tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev,
375 sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]);
376
377 /* Allocate receive FCBs. */
378 tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
379 sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]);
380
381 tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
382 sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]);
383
384 /* Allocate receive BDBs. */
385 tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
386 sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]);
387
388 tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
389
390 tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
391 sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]);
392
393 tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
394
395 /* Allocate MAC transmit buffers.
396 * MAC Tx Buffers doen't have to be on an ODD Boundary.
397 */
398 tp->tx_buff_head[MAC_QUEUE]
399 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]);
400 tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE];
401 tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
402
403 /* Allocate BUG transmit buffers. */
404 tp->tx_buff_head[BUG_QUEUE]
405 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]);
406 tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE];
407 tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
408
409 /* Allocate MAC receive data buffers.
410 * MAC Rx buffer doesn't have to be on a 256 byte boundary.
411 */
412 tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
413 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]);
414 tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
415
416 /* Allocate Non-MAC transmit buffers.
417 * ?? For maximum Netware performance, put Tx Buffers on
418 * ODD Boundary and then restore malloc to Even Boundrys.
419 */
420 smctr_malloc(dev, 1L);
421 tp->tx_buff_head[NON_MAC_QUEUE]
422 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]);
423 tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE];
424 tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
425 smctr_malloc(dev, 1L);
426
427 /* Allocate Non-MAC receive data buffers.
428 * To guarantee a minimum of 256 contiguous memory to
429 * UM_Receive_Packet's lookahead pointer, before a page
430 * change or ring end is encountered, place each rx buffer on
431 * a 256 byte boundary.
432 */
433 smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used));
434 tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
435 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
436 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
437
438 return 0;
439}
440
441/* Enter Bypass state. */
442static int smctr_bypass_state(struct net_device *dev)
443{
444 int err;
445
446 if(smctr_debug > 10)
447 printk(KERN_DEBUG "%s: smctr_bypass_state\n", dev->name);
448
449 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE);
450
451 return err;
452}
453
454static int smctr_checksum_firmware(struct net_device *dev)
455{
456 struct net_local *tp = netdev_priv(dev);
457 __u16 i, checksum = 0;
458
459 if(smctr_debug > 10)
460 printk(KERN_DEBUG "%s: smctr_checksum_firmware\n", dev->name);
461
462 smctr_enable_adapter_ctrl_store(dev);
463
464 for(i = 0; i < CS_RAM_SIZE; i += 2)
465 checksum += *((__u16 *)(tp->ram_access + i));
466
467 tp->microcode_version = *(__u16 *)(tp->ram_access
468 + CS_RAM_VERSION_OFFSET);
469 tp->microcode_version >>= 8;
470
471 smctr_disable_adapter_ctrl_store(dev);
472
473 if(checksum)
474 return checksum;
475
476 return 0;
477}
478
479static int __init smctr_chk_mca(struct net_device *dev)
480{
481#ifdef CONFIG_MCA_LEGACY
482 struct net_local *tp = netdev_priv(dev);
483 int current_slot;
484 __u8 r1, r2, r3, r4, r5;
485
486 current_slot = mca_find_unused_adapter(smctr_posid, 0);
487 if(current_slot == MCA_NOTFOUND)
488 return -ENODEV;
489
490 mca_set_adapter_name(current_slot, smctr_name);
491 mca_mark_as_used(current_slot);
492 tp->slot_num = current_slot;
493
494 r1 = mca_read_stored_pos(tp->slot_num, 2);
495 r2 = mca_read_stored_pos(tp->slot_num, 3);
496
497 if(tp->slot_num)
498 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT));
499 else
500 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT));
501
502 r1 = inb(CNFG_POS_REG1);
503 r2 = inb(CNFG_POS_REG0);
504
505 tp->bic_type = BIC_594_CHIP;
506
507 /* IO */
508 r2 = mca_read_stored_pos(tp->slot_num, 2);
509 r2 &= 0xF0;
510 dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800;
511 request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name);
512
513 /* IRQ */
514 r5 = mca_read_stored_pos(tp->slot_num, 5);
515 r5 &= 0xC;
516 switch(r5)
517 {
518 case 0:
519 dev->irq = 3;
520 break;
521
522 case 0x4:
523 dev->irq = 4;
524 break;
525
526 case 0x8:
527 dev->irq = 10;
528 break;
529
530 default:
531 dev->irq = 15;
532 break;
533 }
534 if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) {
535 release_region(dev->base_addr, SMCTR_IO_EXTENT);
536 return -ENODEV;
537 }
538
539 /* Get RAM base */
540 r3 = mca_read_stored_pos(tp->slot_num, 3);
541 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000;
542 if (r3 & 0x8)
543 tp->ram_base += 0x010000;
544 if (r3 & 0x80)
545 tp->ram_base += 0xF00000;
546
547 /* Get Ram Size */
548 r3 &= 0x30;
549 r3 >>= 4;
550
551 tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3;
552 tp->ram_size = (__u16)CNFG_SIZE_64KB;
553 tp->board_id |= TOKEN_MEDIA;
554
555 r4 = mca_read_stored_pos(tp->slot_num, 4);
556 tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0x0C0000;
557 if (r4 & 0x8)
558 tp->rom_base += 0x010000;
559
560 /* Get ROM size. */
561 r4 >>= 4;
562 switch (r4) {
563 case 0:
564 tp->rom_size = CNFG_SIZE_8KB;
565 break;
566 case 1:
567 tp->rom_size = CNFG_SIZE_16KB;
568 break;
569 case 2:
570 tp->rom_size = CNFG_SIZE_32KB;
571 break;
572 default:
573 tp->rom_size = ROM_DISABLE;
574 }
575
576 /* Get Media Type. */
577 r5 = mca_read_stored_pos(tp->slot_num, 5);
578 r5 &= CNFG_MEDIA_TYPE_MASK;
579 switch(r5)
580 {
581 case (0):
582 tp->media_type = MEDIA_STP_4;
583 break;
584
585 case (1):
586 tp->media_type = MEDIA_STP_16;
587 break;
588
589 case (3):
590 tp->media_type = MEDIA_UTP_16;
591 break;
592
593 default:
594 tp->media_type = MEDIA_UTP_4;
595 break;
596 }
597 tp->media_menu = 14;
598
599 r2 = mca_read_stored_pos(tp->slot_num, 2);
600 if(!(r2 & 0x02))
601 tp->mode_bits |= EARLY_TOKEN_REL;
602
603 /* Disable slot */
604 outb(CNFG_POS_CONTROL_REG, 0);
605
606 tp->board_id = smctr_get_boardid(dev, 1);
607 switch(tp->board_id & 0xffff)
608 {
609 case WD8115TA:
610 smctr_model = "8115T/A";
611 break;
612
613 case WD8115T:
614 if(tp->extra_info & CHIP_REV_MASK)
615 smctr_model = "8115T rev XE";
616 else
617 smctr_model = "8115T rev XD";
618 break;
619
620 default:
621 smctr_model = "Unknown";
622 break;
623 }
624
625 return 0;
626#else
627 return -1;
628#endif /* CONFIG_MCA_LEGACY */
629}
630
631static int smctr_chg_rx_mask(struct net_device *dev)
632{
633 struct net_local *tp = netdev_priv(dev);
634 int err = 0;
635
636 if(smctr_debug > 10)
637 printk(KERN_DEBUG "%s: smctr_chg_rx_mask\n", dev->name);
638
639 smctr_enable_16bit(dev);
640 smctr_set_page(dev, (__u8 *)tp->ram_access);
641
642 if(tp->mode_bits & LOOPING_MODE_MASK)
643 tp->config_word0 |= RX_OWN_BIT;
644 else
645 tp->config_word0 &= ~RX_OWN_BIT;
646
647 if(tp->receive_mask & PROMISCUOUS_MODE)
648 tp->config_word0 |= PROMISCUOUS_BIT;
649 else
650 tp->config_word0 &= ~PROMISCUOUS_BIT;
651
652 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
653 tp->config_word0 |= SAVBAD_BIT;
654 else
655 tp->config_word0 &= ~SAVBAD_BIT;
656
657 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
658 tp->config_word0 |= RXATMAC;
659 else
660 tp->config_word0 &= ~RXATMAC;
661
662 if(tp->receive_mask & ACCEPT_MULTI_PROM)
663 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
664 else
665 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
666
667 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
668 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
669 else
670 {
671 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
672 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
673 else
674 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
675 }
676
677 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
678 &tp->config_word0)))
679 {
680 return err;
681 }
682
683 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
684 &tp->config_word1)))
685 {
686 return err;
687 }
688
689 smctr_disable_16bit(dev);
690
691 return 0;
692}
693
694static int smctr_clear_int(struct net_device *dev)
695{
696 struct net_local *tp = netdev_priv(dev);
697
698 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
699
700 return 0;
701}
702
703static int smctr_clear_trc_reset(int ioaddr)
704{
705 __u8 r;
706
707 r = inb(ioaddr + MSR);
708 outb(~MSR_RST & r, ioaddr + MSR);
709
710 return 0;
711}
712
713/*
714 * The inverse routine to smctr_open().
715 */
716static int smctr_close(struct net_device *dev)
717{
718 struct net_local *tp = netdev_priv(dev);
719 struct sk_buff *skb;
720 int err;
721
722 netif_stop_queue(dev);
723
724 tp->cleanup = 1;
725
726 /* Check to see if adapter is already in a closed state. */
727 if(tp->status != OPEN)
728 return 0;
729
730 smctr_enable_16bit(dev);
731 smctr_set_page(dev, (__u8 *)tp->ram_access);
732
733 if((err = smctr_issue_remove_cmd(dev)))
734 {
735 smctr_disable_16bit(dev);
736 return err;
737 }
738
739 for(;;)
740 {
741 skb = skb_dequeue(&tp->SendSkbQueue);
742 if(skb == NULL)
743 break;
744 tp->QueueSkb++;
745 dev_kfree_skb(skb);
746 }
747
748
749 return 0;
750}
751
752static int smctr_decode_firmware(struct net_device *dev,
753 const struct firmware *fw)
754{
755 struct net_local *tp = netdev_priv(dev);
756 short bit = 0x80, shift = 12;
757 DECODE_TREE_NODE *tree;
758 short branch, tsize;
759 __u16 buff = 0;
760 long weight;
761 __u8 *ucode;
762 __u16 *mem;
763
764 if(smctr_debug > 10)
765 printk(KERN_DEBUG "%s: smctr_decode_firmware\n", dev->name);
766
767 weight = *(long *)(fw->data + WEIGHT_OFFSET);
768 tsize = *(__u8 *)(fw->data + TREE_SIZE_OFFSET);
769 tree = (DECODE_TREE_NODE *)(fw->data + TREE_OFFSET);
770 ucode = (__u8 *)(fw->data + TREE_OFFSET
771 + (tsize * sizeof(DECODE_TREE_NODE)));
772 mem = (__u16 *)(tp->ram_access);
773
774 while(weight)
775 {
776 branch = ROOT;
777 while((tree + branch)->tag != LEAF && weight)
778 {
779 branch = *ucode & bit ? (tree + branch)->llink
780 : (tree + branch)->rlink;
781
782 bit >>= 1;
783 weight--;
784
785 if(bit == 0)
786 {
787 bit = 0x80;
788 ucode++;
789 }
790 }
791
792 buff |= (tree + branch)->info << shift;
793 shift -= 4;
794
795 if(shift < 0)
796 {
797 *(mem++) = SWAP_BYTES(buff);
798 buff = 0;
799 shift = 12;
800 }
801 }
802
803 /* The following assumes the Control Store Memory has
804 * been initialized to zero. If the last partial word
805 * is zero, it will not be written.
806 */
807 if(buff)
808 *(mem++) = SWAP_BYTES(buff);
809
810 return 0;
811}
812
813static int smctr_disable_16bit(struct net_device *dev)
814{
815 return 0;
816}
817
818/*
819 * On Exit, Adapter is:
820 * 1. TRC is in a reset state and un-initialized.
821 * 2. Adapter memory is enabled.
822 * 3. Control Store memory is out of context (-WCSS is 1).
823 */
824static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
825{
826 struct net_local *tp = netdev_priv(dev);
827 int ioaddr = dev->base_addr;
828
829 if(smctr_debug > 10)
830 printk(KERN_DEBUG "%s: smctr_disable_adapter_ctrl_store\n", dev->name);
831
832 tp->trc_mask |= CSR_WCSS;
833 outb(tp->trc_mask, ioaddr + CSR);
834
835 return 0;
836}
837
838static int smctr_disable_bic_int(struct net_device *dev)
839{
840 struct net_local *tp = netdev_priv(dev);
841 int ioaddr = dev->base_addr;
842
843 tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY
844 | CSR_MSKTINT | CSR_WCSS;
845 outb(tp->trc_mask, ioaddr + CSR);
846
847 return 0;
848}
849
850static int smctr_enable_16bit(struct net_device *dev)
851{
852 struct net_local *tp = netdev_priv(dev);
853 __u8 r;
854
855 if(tp->adapter_bus == BUS_ISA16_TYPE)
856 {
857 r = inb(dev->base_addr + LAAR);
858 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
859 }
860
861 return 0;
862}
863
864/*
865 * To enable the adapter control store memory:
866 * 1. Adapter must be in a RESET state.
867 * 2. Adapter memory must be enabled.
868 * 3. Control Store Memory is in context (-WCSS is 0).
869 */
870static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
871{
872 struct net_local *tp = netdev_priv(dev);
873 int ioaddr = dev->base_addr;
874
875 if(smctr_debug > 10)
876 printk(KERN_DEBUG "%s: smctr_enable_adapter_ctrl_store\n", dev->name);
877
878 smctr_set_trc_reset(ioaddr);
879 smctr_enable_adapter_ram(dev);
880
881 tp->trc_mask &= ~CSR_WCSS;
882 outb(tp->trc_mask, ioaddr + CSR);
883
884 return 0;
885}
886
887static int smctr_enable_adapter_ram(struct net_device *dev)
888{
889 int ioaddr = dev->base_addr;
890 __u8 r;
891
892 if(smctr_debug > 10)
893 printk(KERN_DEBUG "%s: smctr_enable_adapter_ram\n", dev->name);
894
895 r = inb(ioaddr + MSR);
896 outb(MSR_MEMB | r, ioaddr + MSR);
897
898 return 0;
899}
900
901static int smctr_enable_bic_int(struct net_device *dev)
902{
903 struct net_local *tp = netdev_priv(dev);
904 int ioaddr = dev->base_addr;
905 __u8 r;
906
907 switch(tp->bic_type)
908 {
909 case (BIC_584_CHIP):
910 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
911 outb(tp->trc_mask, ioaddr + CSR);
912 r = inb(ioaddr + IRR);
913 outb(r | IRR_IEN, ioaddr + IRR);
914 break;
915
916 case (BIC_594_CHIP):
917 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
918 outb(tp->trc_mask, ioaddr + CSR);
919 r = inb(ioaddr + IMCCR);
920 outb(r | IMCCR_EIL, ioaddr + IMCCR);
921 break;
922 }
923
924 return 0;
925}
926
927static int __init smctr_chk_isa(struct net_device *dev)
928{
929 struct net_local *tp = netdev_priv(dev);
930 int ioaddr = dev->base_addr;
931 __u8 r1, r2, b, chksum = 0;
932 __u16 r;
933 int i;
934 int err = -ENODEV;
935
936 if(smctr_debug > 10)
937 printk(KERN_DEBUG "%s: smctr_chk_isa %#4x\n", dev->name, ioaddr);
938
939 if((ioaddr & 0x1F) != 0)
940 goto out;
941
942 /* Grab the region so that no one else tries to probe our ioports. */
943 if (!request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name)) {
944 err = -EBUSY;
945 goto out;
946 }
947
948 /* Checksum SMC node address */
949 for(i = 0; i < 8; i++)
950 {
951 b = inb(ioaddr + LAR0 + i);
952 chksum += b;
953 }
954
955 if (chksum != NODE_ADDR_CKSUM)
956 goto out2;
957
958 b = inb(ioaddr + BDID);
959 if(b != BRD_ID_8115T)
960 {
961 printk(KERN_ERR "%s: The adapter found is not supported\n", dev->name);
962 goto out2;
963 }
964
965 /* Check for 8115T Board ID */
966 r2 = 0;
967 for(r = 0; r < 8; r++)
968 {
969 r1 = inb(ioaddr + 0x8 + r);
970 r2 += r1;
971 }
972
973 /* value of RegF adds up the sum to 0xFF */
974 if((r2 != 0xFF) && (r2 != 0xEE))
975 goto out2;
976
977 /* Get adapter ID */
978 tp->board_id = smctr_get_boardid(dev, 0);
979 switch(tp->board_id & 0xffff)
980 {
981 case WD8115TA:
982 smctr_model = "8115T/A";
983 break;
984
985 case WD8115T:
986 if(tp->extra_info & CHIP_REV_MASK)
987 smctr_model = "8115T rev XE";
988 else
989 smctr_model = "8115T rev XD";
990 break;
991
992 default:
993 smctr_model = "Unknown";
994 break;
995 }
996
997 /* Store BIC type. */
998 tp->bic_type = BIC_584_CHIP;
999 tp->nic_type = NIC_825_CHIP;
1000
1001 /* Copy Ram Size */
1002 tp->ram_usable = CNFG_SIZE_16KB;
1003 tp->ram_size = CNFG_SIZE_64KB;
1004
1005 /* Get 58x Ram Base */
1006 r1 = inb(ioaddr);
1007 r1 &= 0x3F;
1008
1009 r2 = inb(ioaddr + CNFG_LAAR_584);
1010 r2 &= CNFG_LAAR_MASK;
1011 r2 <<= 3;
1012 r2 |= ((r1 & 0x38) >> 3);
1013
1014 tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13);
1015
1016 /* Get 584 Irq */
1017 r1 = 0;
1018 r1 = inb(ioaddr + CNFG_ICR_583);
1019 r1 &= CNFG_ICR_IR2_584;
1020
1021 r2 = inb(ioaddr + CNFG_IRR_583);
1022 r2 &= CNFG_IRR_IRQS; /* 0x60 */
1023 r2 >>= 5;
1024
1025 switch(r2)
1026 {
1027 case 0:
1028 if(r1 == 0)
1029 dev->irq = 2;
1030 else
1031 dev->irq = 10;
1032 break;
1033
1034 case 1:
1035 if(r1 == 0)
1036 dev->irq = 3;
1037 else
1038 dev->irq = 11;
1039 break;
1040
1041 case 2:
1042 if(r1 == 0)
1043 {
1044 if(tp->extra_info & ALTERNATE_IRQ_BIT)
1045 dev->irq = 5;
1046 else
1047 dev->irq = 4;
1048 }
1049 else
1050 dev->irq = 15;
1051 break;
1052
1053 case 3:
1054 if(r1 == 0)
1055 dev->irq = 7;
1056 else
1057 dev->irq = 4;
1058 break;
1059
1060 default:
1061 printk(KERN_ERR "%s: No IRQ found aborting\n", dev->name);
1062 goto out2;
1063 }
1064
1065 if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev))
1066 goto out2;
1067
1068 /* Get 58x Rom Base */
1069 r1 = inb(ioaddr + CNFG_BIO_583);
1070 r1 &= 0x3E;
1071 r1 |= 0x40;
1072
1073 tp->rom_base = (__u32)r1 << 13;
1074
1075 /* Get 58x Rom Size */
1076 r1 = inb(ioaddr + CNFG_BIO_583);
1077 r1 &= 0xC0;
1078 if(r1 == 0)
1079 tp->rom_size = ROM_DISABLE;
1080 else
1081 {
1082 r1 >>= 6;
1083 tp->rom_size = (__u16)CNFG_SIZE_8KB << r1;
1084 }
1085
1086 /* Get 58x Boot Status */
1087 r1 = inb(ioaddr + CNFG_GP2);
1088
1089 tp->mode_bits &= (~BOOT_STATUS_MASK);
1090
1091 if(r1 & CNFG_GP2_BOOT_NIBBLE)
1092 tp->mode_bits |= BOOT_TYPE_1;
1093
1094 /* Get 58x Zero Wait State */
1095 tp->mode_bits &= (~ZERO_WAIT_STATE_MASK);
1096
1097 r1 = inb(ioaddr + CNFG_IRR_583);
1098
1099 if(r1 & CNFG_IRR_ZWS)
1100 tp->mode_bits |= ZERO_WAIT_STATE_8_BIT;
1101
1102 if(tp->board_id & BOARD_16BIT)
1103 {
1104 r1 = inb(ioaddr + CNFG_LAAR_584);
1105
1106 if(r1 & CNFG_LAAR_ZWS)
1107 tp->mode_bits |= ZERO_WAIT_STATE_16_BIT;
1108 }
1109
1110 /* Get 584 Media Menu */
1111 tp->media_menu = 14;
1112 r1 = inb(ioaddr + CNFG_IRR_583);
1113
1114 tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */
1115 if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA)
1116 {
1117 /* Get Advanced Features */
1118 if(((r1 & 0x6) >> 1) == 0x3)
1119 tp->media_type |= MEDIA_UTP_16;
1120 else
1121 {
1122 if(((r1 & 0x6) >> 1) == 0x2)
1123 tp->media_type |= MEDIA_STP_16;
1124 else
1125 {
1126 if(((r1 & 0x6) >> 1) == 0x1)
1127 tp->media_type |= MEDIA_UTP_4;
1128
1129 else
1130 tp->media_type |= MEDIA_STP_4;
1131 }
1132 }
1133
1134 r1 = inb(ioaddr + CNFG_GP2);
1135 if(!(r1 & 0x2) ) /* GP2_ETRD */
1136 tp->mode_bits |= EARLY_TOKEN_REL;
1137
1138 /* see if the chip is corrupted
1139 if(smctr_read_584_chksum(ioaddr))
1140 {
1141 printk(KERN_ERR "%s: EEPROM Checksum Failure\n", dev->name);
1142 free_irq(dev->irq, dev);
1143 goto out2;
1144 }
1145 */
1146 }
1147
1148 return 0;
1149
1150out2:
1151 release_region(ioaddr, SMCTR_IO_EXTENT);
1152out:
1153 return err;
1154}
1155
1156static int __init smctr_get_boardid(struct net_device *dev, int mca)
1157{
1158 struct net_local *tp = netdev_priv(dev);
1159 int ioaddr = dev->base_addr;
1160 __u8 r, r1, IdByte;
1161 __u16 BoardIdMask;
1162
1163 tp->board_id = BoardIdMask = 0;
1164
1165 if(mca)
1166 {
1167 BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1168 tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT);
1169 }
1170 else
1171 {
1172 BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1173 tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K
1174 + NIC_825_BIT + ALTERNATE_IRQ_BIT);
1175 }
1176
1177 if(!mca)
1178 {
1179 r = inb(ioaddr + BID_REG_1);
1180 r &= 0x0c;
1181 outb(r, ioaddr + BID_REG_1);
1182 r = inb(ioaddr + BID_REG_1);
1183
1184 if(r & BID_SIXTEEN_BIT_BIT)
1185 {
1186 tp->extra_info |= SLOT_16BIT;
1187 tp->adapter_bus = BUS_ISA16_TYPE;
1188 }
1189 else
1190 tp->adapter_bus = BUS_ISA8_TYPE;
1191 }
1192 else
1193 tp->adapter_bus = BUS_MCA_TYPE;
1194
1195 /* Get Board Id Byte */
1196 IdByte = inb(ioaddr + BID_BOARD_ID_BYTE);
1197
1198 /* if Major version > 1.0 then
1199 * return;
1200 */
1201 if(IdByte & 0xF8)
1202 return -1;
1203
1204 r1 = inb(ioaddr + BID_REG_1);
1205 r1 &= BID_ICR_MASK;
1206 r1 |= BID_OTHER_BIT;
1207
1208 outb(r1, ioaddr + BID_REG_1);
1209 r1 = inb(ioaddr + BID_REG_3);
1210
1211 r1 &= BID_EAR_MASK;
1212 r1 |= BID_ENGR_PAGE;
1213
1214 outb(r1, ioaddr + BID_REG_3);
1215 r1 = inb(ioaddr + BID_REG_1);
1216 r1 &= BID_ICR_MASK;
1217 r1 |= (BID_RLA | BID_OTHER_BIT);
1218
1219 outb(r1, ioaddr + BID_REG_1);
1220
1221 r1 = inb(ioaddr + BID_REG_1);
1222 while(r1 & BID_RECALL_DONE_MASK)
1223 r1 = inb(ioaddr + BID_REG_1);
1224
1225 r = inb(ioaddr + BID_LAR_0 + BID_REG_6);
1226
1227 /* clear chip rev bits */
1228 tp->extra_info &= ~CHIP_REV_MASK;
1229 tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6);
1230
1231 r1 = inb(ioaddr + BID_REG_1);
1232 r1 &= BID_ICR_MASK;
1233 r1 |= BID_OTHER_BIT;
1234
1235 outb(r1, ioaddr + BID_REG_1);
1236 r1 = inb(ioaddr + BID_REG_3);
1237
1238 r1 &= BID_EAR_MASK;
1239 r1 |= BID_EA6;
1240
1241 outb(r1, ioaddr + BID_REG_3);
1242 r1 = inb(ioaddr + BID_REG_1);
1243
1244 r1 &= BID_ICR_MASK;
1245 r1 |= BID_RLA;
1246
1247 outb(r1, ioaddr + BID_REG_1);
1248 r1 = inb(ioaddr + BID_REG_1);
1249
1250 while(r1 & BID_RECALL_DONE_MASK)
1251 r1 = inb(ioaddr + BID_REG_1);
1252
1253 return BoardIdMask;
1254}
1255
1256static int smctr_get_group_address(struct net_device *dev)
1257{
1258 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
1259
1260 return smctr_wait_cmd(dev);
1261}
1262
1263static int smctr_get_functional_address(struct net_device *dev)
1264{
1265 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
1266
1267 return smctr_wait_cmd(dev);
1268}
1269
1270/* Calculate number of Non-MAC receive BDB's and data buffers.
1271 * This function must simulate allocateing shared memory exactly
1272 * as the allocate_shared_memory function above.
1273 */
1274static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
1275{
1276 struct net_local *tp = netdev_priv(dev);
1277 unsigned int mem_used = 0;
1278
1279 /* Allocate System Control Blocks. */
1280 mem_used += sizeof(SCGBlock);
1281
1282 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1283 mem_used += sizeof(SCLBlock);
1284
1285 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1286 mem_used += sizeof(ACBlock) * tp->num_acbs;
1287
1288 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1289 mem_used += sizeof(ISBlock);
1290
1291 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1292 mem_used += MISC_DATA_SIZE;
1293
1294 /* Allocate transmit FCB's. */
1295 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1296
1297 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE];
1298 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE];
1299 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE];
1300
1301 /* Allocate transmit BDBs. */
1302 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE];
1303 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE];
1304 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE];
1305
1306 /* Allocate receive FCBs. */
1307 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE];
1308 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE];
1309
1310 /* Allocate receive BDBs. */
1311 mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE];
1312
1313 /* Allocate MAC transmit buffers.
1314 * MAC transmit buffers don't have to be on an ODD Boundary.
1315 */
1316 mem_used += tp->tx_buff_size[MAC_QUEUE];
1317
1318 /* Allocate BUG transmit buffers. */
1319 mem_used += tp->tx_buff_size[BUG_QUEUE];
1320
1321 /* Allocate MAC receive data buffers.
1322 * MAC receive buffers don't have to be on a 256 byte boundary.
1323 */
1324 mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE];
1325
1326 /* Allocate Non-MAC transmit buffers.
1327 * For maximum Netware performance, put Tx Buffers on
1328 * ODD Boundary,and then restore malloc to Even Boundrys.
1329 */
1330 mem_used += 1L;
1331 mem_used += tp->tx_buff_size[NON_MAC_QUEUE];
1332 mem_used += 1L;
1333
1334 /* CALCULATE NUMBER OF NON-MAC RX BDB'S
1335 * AND NON-MAC RX DATA BUFFERS
1336 *
1337 * Make sure the mem_used offset at this point is the
1338 * same as in allocate_shared memory or the following
1339 * boundary adjustment will be incorrect (i.e. not allocating
1340 * the non-mac receive buffers above cannot change the 256
1341 * byte offset).
1342 *
1343 * Since this cannot be guaranteed, adding the full 256 bytes
1344 * to the amount of shared memory used at this point will guaranteed
1345 * that the rx data buffers do not overflow shared memory.
1346 */
1347 mem_used += 0x100;
1348
1349 return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock));
1350}
1351
1352static int smctr_get_physical_drop_number(struct net_device *dev)
1353{
1354 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
1355
1356 return smctr_wait_cmd(dev);
1357}
1358
1359static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
1360{
1361 struct net_local *tp = netdev_priv(dev);
1362 BDBlock *bdb;
1363
1364 bdb = (BDBlock *)((__u32)tp->ram_access
1365 + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr));
1366
1367 tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
1368
1369 return (__u8 *)bdb->data_block_ptr;
1370}
1371
1372static int smctr_get_station_id(struct net_device *dev)
1373{
1374 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
1375
1376 return smctr_wait_cmd(dev);
1377}
1378
1379/*
1380 * Get the current statistics. This may be called with the card open
1381 * or closed.
1382 */
1383static struct net_device_stats *smctr_get_stats(struct net_device *dev)
1384{
1385 struct net_local *tp = netdev_priv(dev);
1386
1387 return (struct net_device_stats *)&tp->MacStat;
1388}
1389
1390static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1391 __u16 bytes_count)
1392{
1393 struct net_local *tp = netdev_priv(dev);
1394 FCBlock *pFCB;
1395 BDBlock *pbdb;
1396 unsigned short alloc_size;
1397 unsigned short *temp;
1398
1399 if(smctr_debug > 20)
1400 printk(KERN_DEBUG "smctr_get_tx_fcb\n");
1401
1402 /* check if there is enough FCB blocks */
1403 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
1404 return (FCBlock *)(-1L);
1405
1406 /* round off the input pkt size to the nearest even number */
1407 alloc_size = (bytes_count + 1) & 0xfffe;
1408
1409 /* check if enough mem */
1410 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
1411 return (FCBlock *)(-1L);
1412
1413 /* check if past the end ;
1414 * if exactly enough mem to end of ring, alloc from front.
1415 * this avoids update of curr when curr = end
1416 */
1417 if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size)
1418 >= (unsigned long)(tp->tx_buff_end[queue]))
1419 {
1420 /* check if enough memory from ring head */
1421 alloc_size = alloc_size +
1422 (__u16)((__u32)tp->tx_buff_end[queue]
1423 - (__u32)tp->tx_buff_curr[queue]);
1424
1425 if((tp->tx_buff_used[queue] + alloc_size)
1426 > tp->tx_buff_size[queue])
1427 {
1428 return (FCBlock *)(-1L);
1429 }
1430
1431 /* ring wrap */
1432 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
1433 }
1434
1435 tp->tx_buff_used[queue] += alloc_size;
1436 tp->num_tx_fcbs_used[queue]++;
1437 tp->tx_fcb_curr[queue]->frame_length = bytes_count;
1438 tp->tx_fcb_curr[queue]->memory_alloc = alloc_size;
1439 temp = tp->tx_buff_curr[queue];
1440 tp->tx_buff_curr[queue]
1441 = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe));
1442
1443 pbdb = tp->tx_fcb_curr[queue]->bdb_ptr;
1444 pbdb->buffer_length = bytes_count;
1445 pbdb->data_block_ptr = temp;
1446 pbdb->trc_data_block_ptr = TRC_POINTER(temp);
1447
1448 pFCB = tp->tx_fcb_curr[queue];
1449 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
1450
1451 return pFCB;
1452}
1453
1454static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
1455{
1456 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
1457
1458 return smctr_wait_cmd(dev);
1459}
1460
1461static int smctr_hardware_send_packet(struct net_device *dev,
1462 struct net_local *tp)
1463{
1464 struct tr_statistics *tstat = &tp->MacStat;
1465 struct sk_buff *skb;
1466 FCBlock *fcb;
1467
1468 if(smctr_debug > 10)
1469 printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name);
1470
1471 if(tp->status != OPEN)
1472 return -1;
1473
1474 if(tp->monitor_state_ready != 1)
1475 return -1;
1476
1477 for(;;)
1478 {
1479 /* Send first buffer from queue */
1480 skb = skb_dequeue(&tp->SendSkbQueue);
1481 if(skb == NULL)
1482 return -1;
1483
1484 tp->QueueSkb++;
1485
1486 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size)
1487 return -1;
1488
1489 smctr_enable_16bit(dev);
1490 smctr_set_page(dev, (__u8 *)tp->ram_access);
1491
1492 if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len))
1493 == (FCBlock *)(-1L))
1494 {
1495 smctr_disable_16bit(dev);
1496 return -1;
1497 }
1498
1499 smctr_tx_move_frame(dev, skb,
1500 (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len);
1501
1502 smctr_set_page(dev, (__u8 *)fcb);
1503
1504 smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE);
1505 dev_kfree_skb(skb);
1506
1507 tstat->tx_packets++;
1508
1509 smctr_disable_16bit(dev);
1510 }
1511
1512 return 0;
1513}
1514
1515static int smctr_init_acbs(struct net_device *dev)
1516{
1517 struct net_local *tp = netdev_priv(dev);
1518 unsigned int i;
1519 ACBlock *acb;
1520
1521 if(smctr_debug > 10)
1522 printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name);
1523
1524 acb = tp->acb_head;
1525 acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1526 acb->cmd_info = ACB_CHAIN_END;
1527 acb->cmd = 0;
1528 acb->subcmd = 0;
1529 acb->data_offset_lo = 0;
1530 acb->data_offset_hi = 0;
1531 acb->next_ptr
1532 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1533 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1534
1535 for(i = 1; i < tp->num_acbs; i++)
1536 {
1537 acb = acb->next_ptr;
1538 acb->cmd_done_status
1539 = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1540 acb->cmd_info = ACB_CHAIN_END;
1541 acb->cmd = 0;
1542 acb->subcmd = 0;
1543 acb->data_offset_lo = 0;
1544 acb->data_offset_hi = 0;
1545 acb->next_ptr
1546 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1547 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1548 }
1549
1550 acb->next_ptr = tp->acb_head;
1551 acb->trc_next_ptr = TRC_POINTER(tp->acb_head);
1552 tp->acb_next = tp->acb_head->next_ptr;
1553 tp->acb_curr = tp->acb_head->next_ptr;
1554 tp->num_acbs_used = 0;
1555
1556 return 0;
1557}
1558
1559static int smctr_init_adapter(struct net_device *dev)
1560{
1561 struct net_local *tp = netdev_priv(dev);
1562 int err;
1563
1564 if(smctr_debug > 10)
1565 printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name);
1566
1567 tp->status = CLOSED;
1568 tp->page_offset_mask = (tp->ram_usable * 1024) - 1;
1569 skb_queue_head_init(&tp->SendSkbQueue);
1570 tp->QueueSkb = MAX_TX_QUEUE;
1571
1572 if(!(tp->group_address_0 & 0x0080))
1573 tp->group_address_0 |= 0x00C0;
1574
1575 if(!(tp->functional_address_0 & 0x00C0))
1576 tp->functional_address_0 |= 0x00C0;
1577
1578 tp->functional_address[0] &= 0xFF7F;
1579
1580 if(tp->authorized_function_classes == 0)
1581 tp->authorized_function_classes = 0x7FFF;
1582
1583 if(tp->authorized_access_priority == 0)
1584 tp->authorized_access_priority = 0x06;
1585
1586 smctr_disable_bic_int(dev);
1587 smctr_set_trc_reset(dev->base_addr);
1588
1589 smctr_enable_16bit(dev);
1590 smctr_set_page(dev, (__u8 *)tp->ram_access);
1591
1592 if(smctr_checksum_firmware(dev))
1593 {
1594 printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name);
1595 return -ENOENT;
1596 }
1597
1598 if((err = smctr_ram_memory_test(dev)))
1599 {
1600 printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name);
1601 return -EIO;
1602 }
1603
1604 smctr_set_rx_look_ahead(dev);
1605 smctr_load_node_addr(dev);
1606
1607 /* Initialize adapter for Internal Self Test. */
1608 smctr_reset_adapter(dev);
1609 if((err = smctr_init_card_real(dev)))
1610 {
1611 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1612 dev->name, err);
1613 return -EINVAL;
1614 }
1615
1616 /* This routine clobbers the TRC's internal registers. */
1617 if((err = smctr_internal_self_test(dev)))
1618 {
1619 printk(KERN_ERR "%s: Card failed internal self test (%d)\n",
1620 dev->name, err);
1621 return -EINVAL;
1622 }
1623
1624 /* Re-Initialize adapter's internal registers */
1625 smctr_reset_adapter(dev);
1626 if((err = smctr_init_card_real(dev)))
1627 {
1628 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1629 dev->name, err);
1630 return -EINVAL;
1631 }
1632
1633 smctr_enable_bic_int(dev);
1634
1635 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
1636 return err;
1637
1638 smctr_disable_16bit(dev);
1639
1640 return 0;
1641}
1642
1643static int smctr_init_card_real(struct net_device *dev)
1644{
1645 struct net_local *tp = netdev_priv(dev);
1646 int err = 0;
1647
1648 if(smctr_debug > 10)
1649 printk(KERN_DEBUG "%s: smctr_init_card_real\n", dev->name);
1650
1651 tp->sh_mem_used = 0;
1652 tp->num_acbs = NUM_OF_ACBS;
1653
1654 /* Range Check Max Packet Size */
1655 if(tp->max_packet_size < 256)
1656 tp->max_packet_size = 256;
1657 else
1658 {
1659 if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY)
1660 tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY;
1661 }
1662
1663 tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY
1664 / tp->max_packet_size) - 1;
1665
1666 if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS)
1667 tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS;
1668 else
1669 {
1670 if(tp->num_of_tx_buffs == 0)
1671 tp->num_of_tx_buffs = 1;
1672 }
1673
1674 /* Tx queue constants */
1675 tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS;
1676 tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS;
1677 tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY;
1678 tp->tx_buff_used [BUG_QUEUE] = 0;
1679 tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING;
1680
1681 tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS;
1682 tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS;
1683 tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY;
1684 tp->tx_buff_used [MAC_QUEUE] = 0;
1685 tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING;
1686
1687 tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS;
1688 tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS;
1689 tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY;
1690 tp->tx_buff_used [NON_MAC_QUEUE] = 0;
1691 tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING;
1692
1693 /* Receive Queue Constants */
1694 tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS;
1695 tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS;
1696
1697 if(tp->extra_info & CHIP_REV_MASK)
1698 tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */
1699 else
1700 tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */
1701
1702 tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev);
1703
1704 smctr_alloc_shared_memory(dev);
1705 smctr_init_shared_memory(dev);
1706
1707 if((err = smctr_issue_init_timers_cmd(dev)))
1708 return err;
1709
1710 if((err = smctr_issue_init_txrx_cmd(dev)))
1711 {
1712 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
1713 return err;
1714 }
1715
1716 return 0;
1717}
1718
1719static int smctr_init_rx_bdbs(struct net_device *dev)
1720{
1721 struct net_local *tp = netdev_priv(dev);
1722 unsigned int i, j;
1723 BDBlock *bdb;
1724 __u16 *buf;
1725
1726 if(smctr_debug > 10)
1727 printk(KERN_DEBUG "%s: smctr_init_rx_bdbs\n", dev->name);
1728
1729 for(i = 0; i < NUM_RX_QS_USED; i++)
1730 {
1731 bdb = tp->rx_bdb_head[i];
1732 buf = tp->rx_buff_head[i];
1733 bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING);
1734 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1735 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1736 bdb->data_block_ptr = buf;
1737 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1738
1739 if(i == NON_MAC_QUEUE)
1740 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1741 else
1742 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1743
1744 for(j = 1; j < tp->num_rx_bdbs[i]; j++)
1745 {
1746 bdb->next_ptr->back_ptr = bdb;
1747 bdb = bdb->next_ptr;
1748 buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE);
1749 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1750 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1751 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1752 bdb->data_block_ptr = buf;
1753 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1754
1755 if(i == NON_MAC_QUEUE)
1756 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1757 else
1758 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1759 }
1760
1761 bdb->next_ptr = tp->rx_bdb_head[i];
1762 bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]);
1763
1764 tp->rx_bdb_head[i]->back_ptr = bdb;
1765 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
1766 }
1767
1768 return 0;
1769}
1770
1771static int smctr_init_rx_fcbs(struct net_device *dev)
1772{
1773 struct net_local *tp = netdev_priv(dev);
1774 unsigned int i, j;
1775 FCBlock *fcb;
1776
1777 for(i = 0; i < NUM_RX_QS_USED; i++)
1778 {
1779 fcb = tp->rx_fcb_head[i];
1780 fcb->frame_status = 0;
1781 fcb->frame_length = 0;
1782 fcb->info = FCB_CHAIN_END;
1783 fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock));
1784 if(i == NON_MAC_QUEUE)
1785 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1786 else
1787 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1788
1789 for(j = 1; j < tp->num_rx_fcbs[i]; j++)
1790 {
1791 fcb->next_ptr->back_ptr = fcb;
1792 fcb = fcb->next_ptr;
1793 fcb->frame_status = 0;
1794 fcb->frame_length = 0;
1795 fcb->info = FCB_WARNING;
1796 fcb->next_ptr
1797 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1798
1799 if(i == NON_MAC_QUEUE)
1800 fcb->trc_next_ptr
1801 = RX_FCB_TRC_POINTER(fcb->next_ptr);
1802 else
1803 fcb->trc_next_ptr
1804 = TRC_POINTER(fcb->next_ptr);
1805 }
1806
1807 fcb->next_ptr = tp->rx_fcb_head[i];
1808
1809 if(i == NON_MAC_QUEUE)
1810 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1811 else
1812 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1813
1814 tp->rx_fcb_head[i]->back_ptr = fcb;
1815 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
1816 }
1817
1818 return 0;
1819}
1820
1821static int smctr_init_shared_memory(struct net_device *dev)
1822{
1823 struct net_local *tp = netdev_priv(dev);
1824 unsigned int i;
1825 __u32 *iscpb;
1826
1827 if(smctr_debug > 10)
1828 printk(KERN_DEBUG "%s: smctr_init_shared_memory\n", dev->name);
1829
1830 smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr);
1831
1832 /* Initialize Initial System Configuration Point. (ISCP) */
1833 iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr);
1834 *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr)));
1835
1836 smctr_set_page(dev, (__u8 *)tp->ram_access);
1837
1838 /* Initialize System Configuration Pointers. (SCP) */
1839 tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT
1840 | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT
1841 | SCGB_BURST_LENGTH);
1842
1843 tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr);
1844 tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head);
1845 tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr);
1846 tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2;
1847
1848 /* Initialize System Control Block. (SCB) */
1849 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP;
1850 tp->sclb_ptr->iack_code = 0;
1851 tp->sclb_ptr->resume_control = 0;
1852 tp->sclb_ptr->int_mask_control = 0;
1853 tp->sclb_ptr->int_mask_state = 0;
1854
1855 /* Initialize Interrupt Status Block. (ISB) */
1856 for(i = 0; i < NUM_OF_INTERRUPTS; i++)
1857 {
1858 tp->isb_ptr->IStatus[i].IType = 0xf0;
1859 tp->isb_ptr->IStatus[i].ISubtype = 0;
1860 }
1861
1862 tp->current_isb_index = 0;
1863
1864 /* Initialize Action Command Block. (ACB) */
1865 smctr_init_acbs(dev);
1866
1867 /* Initialize transmit FCB's and BDB's. */
1868 smctr_link_tx_fcbs_to_bdbs(dev);
1869 smctr_init_tx_bdbs(dev);
1870 smctr_init_tx_fcbs(dev);
1871
1872 /* Initialize receive FCB's and BDB's. */
1873 smctr_init_rx_bdbs(dev);
1874 smctr_init_rx_fcbs(dev);
1875
1876 return 0;
1877}
1878
1879static int smctr_init_tx_bdbs(struct net_device *dev)
1880{
1881 struct net_local *tp = netdev_priv(dev);
1882 unsigned int i, j;
1883 BDBlock *bdb;
1884
1885 for(i = 0; i < NUM_TX_QS_USED; i++)
1886 {
1887 bdb = tp->tx_bdb_head[i];
1888 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1889 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1890 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1891
1892 for(j = 1; j < tp->num_tx_bdbs[i]; j++)
1893 {
1894 bdb->next_ptr->back_ptr = bdb;
1895 bdb = bdb->next_ptr;
1896 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1897 bdb->next_ptr
1898 = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1899 }
1900
1901 bdb->next_ptr = tp->tx_bdb_head[i];
1902 bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]);
1903 tp->tx_bdb_head[i]->back_ptr = bdb;
1904 }
1905
1906 return 0;
1907}
1908
1909static int smctr_init_tx_fcbs(struct net_device *dev)
1910{
1911 struct net_local *tp = netdev_priv(dev);
1912 unsigned int i, j;
1913 FCBlock *fcb;
1914
1915 for(i = 0; i < NUM_TX_QS_USED; i++)
1916 {
1917 fcb = tp->tx_fcb_head[i];
1918 fcb->frame_status = 0;
1919 fcb->frame_length = 0;
1920 fcb->info = FCB_CHAIN_END;
1921 fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1922 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1923
1924 for(j = 1; j < tp->num_tx_fcbs[i]; j++)
1925 {
1926 fcb->next_ptr->back_ptr = fcb;
1927 fcb = fcb->next_ptr;
1928 fcb->frame_status = 0;
1929 fcb->frame_length = 0;
1930 fcb->info = FCB_CHAIN_END;
1931 fcb->next_ptr
1932 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1933 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1934 }
1935
1936 fcb->next_ptr = tp->tx_fcb_head[i];
1937 fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]);
1938
1939 tp->tx_fcb_head[i]->back_ptr = fcb;
1940 tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr;
1941 tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr;
1942 tp->num_tx_fcbs_used[i] = 0;
1943 }
1944
1945 return 0;
1946}
1947
1948static int smctr_internal_self_test(struct net_device *dev)
1949{
1950 struct net_local *tp = netdev_priv(dev);
1951 int err;
1952
1953 if((err = smctr_issue_test_internal_rom_cmd(dev)))
1954 return err;
1955
1956 if((err = smctr_wait_cmd(dev)))
1957 return err;
1958
1959 if(tp->acb_head->cmd_done_status & 0xff)
1960 return -1;
1961
1962 if((err = smctr_issue_test_hic_cmd(dev)))
1963 return err;
1964
1965 if((err = smctr_wait_cmd(dev)))
1966 return err;
1967
1968 if(tp->acb_head->cmd_done_status & 0xff)
1969 return -1;
1970
1971 if((err = smctr_issue_test_mac_reg_cmd(dev)))
1972 return err;
1973
1974 if((err = smctr_wait_cmd(dev)))
1975 return err;
1976
1977 if(tp->acb_head->cmd_done_status & 0xff)
1978 return -1;
1979
1980 return 0;
1981}
1982
1983/*
1984 * The typical workload of the driver: Handle the network interface interrupts.
1985 */
1986static irqreturn_t smctr_interrupt(int irq, void *dev_id)
1987{
1988 struct net_device *dev = dev_id;
1989 struct net_local *tp;
1990 int ioaddr;
1991 __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00;
1992 __u16 err1, err = NOT_MY_INTERRUPT;
1993 __u8 isb_type, isb_subtype;
1994 __u16 isb_index;
1995
1996 ioaddr = dev->base_addr;
1997 tp = netdev_priv(dev);
1998
1999 if(tp->status == NOT_INITIALIZED)
2000 return IRQ_NONE;
2001
2002 spin_lock(&tp->lock);
2003
2004 smctr_disable_bic_int(dev);
2005 smctr_enable_16bit(dev);
2006
2007 smctr_clear_int(dev);
2008
2009 /* First read the LSB */
2010 while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0)
2011 {
2012 isb_index = tp->current_isb_index;
2013 isb_type = tp->isb_ptr->IStatus[isb_index].IType;
2014 isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype;
2015
2016 (tp->current_isb_index)++;
2017 if(tp->current_isb_index == NUM_OF_INTERRUPTS)
2018 tp->current_isb_index = 0;
2019
2020 if(isb_type >= 0x10)
2021 {
2022 smctr_disable_16bit(dev);
2023 spin_unlock(&tp->lock);
2024 return IRQ_HANDLED;
2025 }
2026
2027 err = HARDWARE_FAILED;
2028 interrupt_ack_code = isb_index;
2029 tp->isb_ptr->IStatus[isb_index].IType |= 0xf0;
2030
2031 interrupt_unmask_bits |= (1 << (__u16)isb_type);
2032
2033 switch(isb_type)
2034 {
2035 case ISB_IMC_MAC_TYPE_3:
2036 smctr_disable_16bit(dev);
2037
2038 switch(isb_subtype)
2039 {
2040 case 0:
2041 tp->monitor_state = MS_MONITOR_FSM_INACTIVE;
2042 break;
2043
2044 case 1:
2045 tp->monitor_state = MS_REPEAT_BEACON_STATE;
2046 break;
2047
2048 case 2:
2049 tp->monitor_state = MS_REPEAT_CLAIM_TOKEN_STATE;
2050 break;
2051
2052 case 3:
2053 tp->monitor_state = MS_TRANSMIT_CLAIM_TOKEN_STATE; break;
2054
2055 case 4:
2056 tp->monitor_state = MS_STANDBY_MONITOR_STATE;
2057 break;
2058
2059 case 5:
2060 tp->monitor_state = MS_TRANSMIT_BEACON_STATE;
2061 break;
2062
2063 case 6:
2064 tp->monitor_state = MS_ACTIVE_MONITOR_STATE;
2065 break;
2066
2067 case 7:
2068 tp->monitor_state = MS_TRANSMIT_RING_PURGE_STATE;
2069 break;
2070
2071 case 8: /* diagnostic state */
2072 break;
2073
2074 case 9:
2075 tp->monitor_state = MS_BEACON_TEST_STATE;
2076 if(smctr_lobe_media_test(dev))
2077 {
2078 tp->ring_status_flags = RING_STATUS_CHANGED;
2079 tp->ring_status = AUTO_REMOVAL_ERROR;
2080 smctr_ring_status_chg(dev);
2081 smctr_bypass_state(dev);
2082 }
2083 else
2084 smctr_issue_insert_cmd(dev);
2085 break;
2086
2087 /* case 0x0a-0xff, illegal states */
2088 default:
2089 break;
2090 }
2091
2092 tp->ring_status_flags = MONITOR_STATE_CHANGED;
2093 err = smctr_ring_status_chg(dev);
2094
2095 smctr_enable_16bit(dev);
2096 break;
2097
2098 /* Type 0x02 - MAC Error Counters Interrupt
2099 * One or more MAC Error Counter is half full
2100 * MAC Error Counters
2101 * Lost_FR_Error_Counter
2102 * RCV_Congestion_Counter
2103 * FR_copied_Error_Counter
2104 * FREQ_Error_Counter
2105 * Token_Error_Counter
2106 * Line_Error_Counter
2107 * Internal_Error_Count
2108 */
2109 case ISB_IMC_MAC_ERROR_COUNTERS:
2110 /* Read 802.5 Error Counters */
2111 err = smctr_issue_read_ring_status_cmd(dev);
2112 break;
2113
2114 /* Type 0x04 - MAC Type 2 Interrupt
2115 * HOST needs to enqueue MAC Frame for transmission
2116 * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to
2117 * TRC_Status_Changed_Indicate
2118 */
2119 case ISB_IMC_MAC_TYPE_2:
2120 err = smctr_issue_read_ring_status_cmd(dev);
2121 break;
2122
2123
2124 /* Type 0x05 - TX Frame Interrupt (FI). */
2125 case ISB_IMC_TX_FRAME:
2126 /* BUG QUEUE for TRC stuck receive BUG */
2127 if(isb_subtype & TX_PENDING_PRIORITY_2)
2128 {
2129 if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
2130 break;
2131 }
2132
2133 /* NON-MAC frames only */
2134 if(isb_subtype & TX_PENDING_PRIORITY_1)
2135 {
2136 if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
2137 break;
2138 }
2139
2140 /* MAC frames only */
2141 if(isb_subtype & TX_PENDING_PRIORITY_0)
2142 err = smctr_tx_complete(dev, MAC_QUEUE); break;
2143
2144 /* Type 0x06 - TX END OF QUEUE (FE) */
2145 case ISB_IMC_END_OF_TX_QUEUE:
2146 /* BUG queue */
2147 if(isb_subtype & TX_PENDING_PRIORITY_2)
2148 {
2149 /* ok to clear Receive FIFO overrun
2150 * imask send_BUG now completes.
2151 */
2152 interrupt_unmask_bits |= 0x800;
2153
2154 tp->tx_queue_status[BUG_QUEUE] = NOT_TRANSMITING;
2155 if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
2156 break;
2157 if((err = smctr_restart_tx_chain(dev, BUG_QUEUE)) != SUCCESS)
2158 break;
2159 }
2160
2161 /* NON-MAC queue only */
2162 if(isb_subtype & TX_PENDING_PRIORITY_1)
2163 {
2164 tp->tx_queue_status[NON_MAC_QUEUE] = NOT_TRANSMITING;
2165 if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
2166 break;
2167 if((err = smctr_restart_tx_chain(dev, NON_MAC_QUEUE)) != SUCCESS)
2168 break;
2169 }
2170
2171 /* MAC queue only */
2172 if(isb_subtype & TX_PENDING_PRIORITY_0)
2173 {
2174 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
2175 if((err = smctr_tx_complete(dev, MAC_QUEUE)) != SUCCESS)
2176 break;
2177
2178 err = smctr_restart_tx_chain(dev, MAC_QUEUE);
2179 }
2180 break;
2181
2182 /* Type 0x07 - NON-MAC RX Resource Interrupt
2183 * Subtype bit 12 - (BW) BDB warning
2184 * Subtype bit 13 - (FW) FCB warning
2185 * Subtype bit 14 - (BE) BDB End of chain
2186 * Subtype bit 15 - (FE) FCB End of chain
2187 */
2188 case ISB_IMC_NON_MAC_RX_RESOURCE:
2189 tp->rx_fifo_overrun_count = 0;
2190 tp->receive_queue_number = NON_MAC_QUEUE;
2191 err1 = smctr_rx_frame(dev);
2192
2193 if(isb_subtype & NON_MAC_RX_RESOURCE_FE)
2194 {
2195 if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2196
2197 if(tp->ptr_rx_fcb_overruns)
2198 (*tp->ptr_rx_fcb_overruns)++;
2199 }
2200
2201 if(isb_subtype & NON_MAC_RX_RESOURCE_BE)
2202 {
2203 if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2204
2205 if(tp->ptr_rx_bdb_overruns)
2206 (*tp->ptr_rx_bdb_overruns)++;
2207 }
2208 err = err1;
2209 break;
2210
2211 /* Type 0x08 - MAC RX Resource Interrupt
2212 * Subtype bit 12 - (BW) BDB warning
2213 * Subtype bit 13 - (FW) FCB warning
2214 * Subtype bit 14 - (BE) BDB End of chain
2215 * Subtype bit 15 - (FE) FCB End of chain
2216 */
2217 case ISB_IMC_MAC_RX_RESOURCE:
2218 tp->receive_queue_number = MAC_QUEUE;
2219 err1 = smctr_rx_frame(dev);
2220
2221 if(isb_subtype & MAC_RX_RESOURCE_FE)
2222 {
2223 if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2224 break;
2225
2226 if(tp->ptr_rx_fcb_overruns)
2227 (*tp->ptr_rx_fcb_overruns)++;
2228 }
2229
2230 if(isb_subtype & MAC_RX_RESOURCE_BE)
2231 {
2232 if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2233 break;
2234
2235 if(tp->ptr_rx_bdb_overruns)
2236 (*tp->ptr_rx_bdb_overruns)++;
2237 }
2238 err = err1;
2239 break;
2240
2241 /* Type 0x09 - NON_MAC RX Frame Interrupt */
2242 case ISB_IMC_NON_MAC_RX_FRAME:
2243 tp->rx_fifo_overrun_count = 0;
2244 tp->receive_queue_number = NON_MAC_QUEUE;
2245 err = smctr_rx_frame(dev);
2246 break;
2247
2248 /* Type 0x0A - MAC RX Frame Interrupt */
2249 case ISB_IMC_MAC_RX_FRAME:
2250 tp->receive_queue_number = MAC_QUEUE;
2251 err = smctr_rx_frame(dev);
2252 break;
2253
2254 /* Type 0x0B - TRC status
2255 * TRC has encountered an error condition
2256 * subtype bit 14 - transmit FIFO underrun
2257 * subtype bit 15 - receive FIFO overrun
2258 */
2259 case ISB_IMC_TRC_FIFO_STATUS:
2260 if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN)
2261 {
2262 if(tp->ptr_tx_fifo_underruns)
2263 (*tp->ptr_tx_fifo_underruns)++;
2264 }
2265
2266 if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN)
2267 {
2268 /* update overrun stuck receive counter
2269 * if >= 3, has to clear it by sending
2270 * back to back frames. We pick
2271 * DAT(duplicate address MAC frame)
2272 */
2273 tp->rx_fifo_overrun_count++;
2274
2275 if(tp->rx_fifo_overrun_count >= 3)
2276 {
2277 tp->rx_fifo_overrun_count = 0;
2278
2279 /* delay clearing fifo overrun
2280 * imask till send_BUG tx
2281 * complete posted
2282 */
2283 interrupt_unmask_bits &= (~0x800);
2284 printk(KERN_CRIT "Jay please send bug\n");// smctr_send_bug(dev);
2285 }
2286
2287 if(tp->ptr_rx_fifo_overruns)
2288 (*tp->ptr_rx_fifo_overruns)++;
2289 }
2290
2291 err = SUCCESS;
2292 break;
2293
2294 /* Type 0x0C - Action Command Status Interrupt
2295 * Subtype bit 14 - CB end of command chain (CE)
2296 * Subtype bit 15 - CB command interrupt (CI)
2297 */
2298 case ISB_IMC_COMMAND_STATUS:
2299 err = SUCCESS;
2300 if(tp->acb_head->cmd == ACB_CMD_HIC_NOP)
2301 {
2302 printk(KERN_ERR "i1\n");
2303 smctr_disable_16bit(dev);
2304
2305 /* XXXXXXXXXXXXXXXXX */
2306 /* err = UM_Interrupt(dev); */
2307
2308 smctr_enable_16bit(dev);
2309 }
2310 else
2311 {
2312 if((tp->acb_head->cmd
2313 == ACB_CMD_READ_TRC_STATUS) &&
2314 (tp->acb_head->subcmd
2315 == RW_TRC_STATUS_BLOCK))
2316 {
2317 if(tp->ptr_bcn_type)
2318 {
2319 *(tp->ptr_bcn_type)
2320 = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type;
2321 }
2322
2323 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED)
2324 {
2325 smctr_update_err_stats(dev);
2326 }
2327
2328 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED)
2329 {
2330 tp->ring_status
2331 = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status;
2332 smctr_disable_16bit(dev);
2333 err = smctr_ring_status_chg(dev);
2334 smctr_enable_16bit(dev);
2335 if((tp->ring_status & REMOVE_RECEIVED) &&
2336 (tp->config_word0 & NO_AUTOREMOVE))
2337 {
2338 smctr_issue_remove_cmd(dev);
2339 }
2340
2341 if(err != SUCCESS)
2342 {
2343 tp->acb_pending = 0;
2344 break;
2345 }
2346 }
2347
2348 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED)
2349 {
2350 if(tp->ptr_una)
2351 {
2352 tp->ptr_una[0] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]);
2353 tp->ptr_una[1] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]);
2354 tp->ptr_una[2] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]);
2355 }
2356
2357 }
2358
2359 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & READY_TO_SEND_RQ_INIT) {
2360 err = smctr_send_rq_init(dev);
2361 }
2362 }
2363 }
2364
2365 tp->acb_pending = 0;
2366 break;
2367
2368 /* Type 0x0D - MAC Type 1 interrupt
2369 * Subtype -- 00 FR_BCN received at S12
2370 * 01 FR_BCN received at S21
2371 * 02 FR_DAT(DA=MA, A<>0) received at S21
2372 * 03 TSM_EXP at S21
2373 * 04 FR_REMOVE received at S42
2374 * 05 TBR_EXP, BR_FLAG_SET at S42
2375 * 06 TBT_EXP at S53
2376 */
2377 case ISB_IMC_MAC_TYPE_1:
2378 if(isb_subtype > 8)
2379 {
2380 err = HARDWARE_FAILED;
2381 break;
2382 }
2383
2384 err = SUCCESS;
2385 switch(isb_subtype)
2386 {
2387 case 0:
2388 tp->join_state = JS_BYPASS_STATE;
2389 if(tp->status != CLOSED)
2390 {
2391 tp->status = CLOSED;
2392 err = smctr_status_chg(dev);
2393 }
2394 break;
2395
2396 case 1:
2397 tp->join_state = JS_LOBE_TEST_STATE;
2398 break;
2399
2400 case 2:
2401 tp->join_state = JS_DETECT_MONITOR_PRESENT_STATE;
2402 break;
2403
2404 case 3:
2405 tp->join_state = JS_AWAIT_NEW_MONITOR_STATE;
2406 break;
2407
2408 case 4:
2409 tp->join_state = JS_DUPLICATE_ADDRESS_TEST_STATE;
2410 break;
2411
2412 case 5:
2413 tp->join_state = JS_NEIGHBOR_NOTIFICATION_STATE;
2414 break;
2415
2416 case 6:
2417 tp->join_state = JS_REQUEST_INITIALIZATION_STATE;
2418 break;
2419
2420 case 7:
2421 tp->join_state = JS_JOIN_COMPLETE_STATE;
2422 tp->status = OPEN;
2423 err = smctr_status_chg(dev);
2424 break;
2425
2426 case 8:
2427 tp->join_state = JS_BYPASS_WAIT_STATE;
2428 break;
2429 }
2430 break ;
2431
2432 /* Type 0x0E - TRC Initialization Sequence Interrupt
2433 * Subtype -- 00-FF Initializatin sequence complete
2434 */
2435 case ISB_IMC_TRC_INTRNL_TST_STATUS:
2436 tp->status = INITIALIZED;
2437 smctr_disable_16bit(dev);
2438 err = smctr_status_chg(dev);
2439 smctr_enable_16bit(dev);
2440 break;
2441
2442 /* other interrupt types, illegal */
2443 default:
2444 break;
2445 }
2446
2447 if(err != SUCCESS)
2448 break;
2449 }
2450
2451 /* Checking the ack code instead of the unmask bits here is because :
2452 * while fixing the stuck receive, DAT frame are sent and mask off
2453 * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0)
2454 * but we still want to issue ack to ISB
2455 */
2456 if(!(interrupt_ack_code & 0xff00))
2457 smctr_issue_int_ack(dev, interrupt_ack_code, interrupt_unmask_bits);
2458
2459 smctr_disable_16bit(dev);
2460 smctr_enable_bic_int(dev);
2461 spin_unlock(&tp->lock);
2462
2463 return IRQ_HANDLED;
2464}
2465
2466static int smctr_issue_enable_int_cmd(struct net_device *dev,
2467 __u16 interrupt_enable_mask)
2468{
2469 struct net_local *tp = netdev_priv(dev);
2470 int err;
2471
2472 if((err = smctr_wait_while_cbusy(dev)))
2473 return err;
2474
2475 tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
2476 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2477
2478 smctr_set_ctrl_attention(dev);
2479
2480 return 0;
2481}
2482
2483static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits)
2484{
2485 struct net_local *tp = netdev_priv(dev);
2486
2487 if(smctr_wait_while_cbusy(dev))
2488 return -1;
2489
2490 tp->sclb_ptr->int_mask_control = ibits;
2491 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
2492 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_IACK_CODE_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2493
2494 smctr_set_ctrl_attention(dev);
2495
2496 return 0;
2497}
2498
2499static int smctr_issue_init_timers_cmd(struct net_device *dev)
2500{
2501 struct net_local *tp = netdev_priv(dev);
2502 unsigned int i;
2503 int err;
2504 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
2505
2506 if((err = smctr_wait_while_cbusy(dev)))
2507 return err;
2508
2509 if((err = smctr_wait_cmd(dev)))
2510 return err;
2511
2512 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
2513 tp->config_word1 = 0;
2514
2515 if((tp->media_type == MEDIA_STP_16) ||
2516 (tp->media_type == MEDIA_UTP_16) ||
2517 (tp->media_type == MEDIA_STP_16_UTP_16))
2518 {
2519 tp->config_word0 |= FREQ_16MB_BIT;
2520 }
2521
2522 if(tp->mode_bits & EARLY_TOKEN_REL)
2523 tp->config_word0 |= ETREN;
2524
2525 if(tp->mode_bits & LOOPING_MODE_MASK)
2526 tp->config_word0 |= RX_OWN_BIT;
2527 else
2528 tp->config_word0 &= ~RX_OWN_BIT;
2529
2530 if(tp->receive_mask & PROMISCUOUS_MODE)
2531 tp->config_word0 |= PROMISCUOUS_BIT;
2532 else
2533 tp->config_word0 &= ~PROMISCUOUS_BIT;
2534
2535 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
2536 tp->config_word0 |= SAVBAD_BIT;
2537 else
2538 tp->config_word0 &= ~SAVBAD_BIT;
2539
2540 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
2541 tp->config_word0 |= RXATMAC;
2542 else
2543 tp->config_word0 &= ~RXATMAC;
2544
2545 if(tp->receive_mask & ACCEPT_MULTI_PROM)
2546 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
2547 else
2548 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
2549
2550 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
2551 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
2552 else
2553 {
2554 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
2555 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
2556 else
2557 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
2558 }
2559
2560 if((tp->media_type == MEDIA_STP_16) ||
2561 (tp->media_type == MEDIA_UTP_16) ||
2562 (tp->media_type == MEDIA_STP_16_UTP_16))
2563 {
2564 tp->config_word1 |= INTERFRAME_SPACING_16;
2565 }
2566 else
2567 tp->config_word1 |= INTERFRAME_SPACING_4;
2568
2569 *pTimer_Struc++ = tp->config_word0;
2570 *pTimer_Struc++ = tp->config_word1;
2571
2572 if((tp->media_type == MEDIA_STP_4) ||
2573 (tp->media_type == MEDIA_UTP_4) ||
2574 (tp->media_type == MEDIA_STP_4_UTP_4))
2575 {
2576 *pTimer_Struc++ = 0x00FA; /* prescale */
2577 *pTimer_Struc++ = 0x2710; /* TPT_limit */
2578 *pTimer_Struc++ = 0x2710; /* TQP_limit */
2579 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2580 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2581 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2582 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2583 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2584 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2585 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2586 *pTimer_Struc++ = 0x1162; /* THT_limit */
2587 *pTimer_Struc++ = 0x07D0; /* TRR_limit */
2588 *pTimer_Struc++ = 0x1388; /* TVX_limit */
2589 *pTimer_Struc++ = 0x0000; /* reserved */
2590 }
2591 else
2592 {
2593 *pTimer_Struc++ = 0x03E8; /* prescale */
2594 *pTimer_Struc++ = 0x9C40; /* TPT_limit */
2595 *pTimer_Struc++ = 0x9C40; /* TQP_limit */
2596 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2597 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2598 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2599 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2600 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2601 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2602 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2603 *pTimer_Struc++ = 0x4588; /* THT_limit */
2604 *pTimer_Struc++ = 0x1F40; /* TRR_limit */
2605 *pTimer_Struc++ = 0x4E20; /* TVX_limit */
2606 *pTimer_Struc++ = 0x0000; /* reserved */
2607 }
2608
2609 /* Set node address. */
2610 *pTimer_Struc++ = dev->dev_addr[0] << 8
2611 | (dev->dev_addr[1] & 0xFF);
2612 *pTimer_Struc++ = dev->dev_addr[2] << 8
2613 | (dev->dev_addr[3] & 0xFF);
2614 *pTimer_Struc++ = dev->dev_addr[4] << 8
2615 | (dev->dev_addr[5] & 0xFF);
2616
2617 /* Set group address. */
2618 *pTimer_Struc++ = tp->group_address_0 << 8
2619 | tp->group_address_0 >> 8;
2620 *pTimer_Struc++ = tp->group_address[0] << 8
2621 | tp->group_address[0] >> 8;
2622 *pTimer_Struc++ = tp->group_address[1] << 8
2623 | tp->group_address[1] >> 8;
2624
2625 /* Set functional address. */
2626 *pTimer_Struc++ = tp->functional_address_0 << 8
2627 | tp->functional_address_0 >> 8;
2628 *pTimer_Struc++ = tp->functional_address[0] << 8
2629 | tp->functional_address[0] >> 8;
2630 *pTimer_Struc++ = tp->functional_address[1] << 8
2631 | tp->functional_address[1] >> 8;
2632
2633 /* Set Bit-Wise group address. */
2634 *pTimer_Struc++ = tp->bitwise_group_address[0] << 8
2635 | tp->bitwise_group_address[0] >> 8;
2636 *pTimer_Struc++ = tp->bitwise_group_address[1] << 8
2637 | tp->bitwise_group_address[1] >> 8;
2638
2639 /* Set ring number address. */
2640 *pTimer_Struc++ = tp->source_ring_number;
2641 *pTimer_Struc++ = tp->target_ring_number;
2642
2643 /* Physical drop number. */
2644 *pTimer_Struc++ = (unsigned short)0;
2645 *pTimer_Struc++ = (unsigned short)0;
2646
2647 /* Product instance ID. */
2648 for(i = 0; i < 9; i++)
2649 *pTimer_Struc++ = (unsigned short)0;
2650
2651 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
2652
2653 return err;
2654}
2655
2656static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2657{
2658 struct net_local *tp = netdev_priv(dev);
2659 unsigned int i;
2660 int err;
2661 void **txrx_ptrs = (void *)tp->misc_command_data;
2662
2663 if((err = smctr_wait_while_cbusy(dev)))
2664 return err;
2665
2666 if((err = smctr_wait_cmd(dev)))
2667 {
2668 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
2669 return err;
2670 }
2671
2672 /* Initialize Transmit Queue Pointers that are used, to point to
2673 * a single FCB.
2674 */
2675 for(i = 0; i < NUM_TX_QS_USED; i++)
2676 *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]);
2677
2678 /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */
2679 for(; i < MAX_TX_QS; i++)
2680 *txrx_ptrs++ = (void *)0;
2681
2682 /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are
2683 * used, to point to a single FCB and a BDB chain of buffers.
2684 */
2685 for(i = 0; i < NUM_RX_QS_USED; i++)
2686 {
2687 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]);
2688 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]);
2689 }
2690
2691 /* Initialize Receive Queue Pointers that are NOT used to ZERO. */
2692 for(; i < MAX_RX_QS; i++)
2693 {
2694 *txrx_ptrs++ = (void *)0;
2695 *txrx_ptrs++ = (void *)0;
2696 }
2697
2698 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
2699
2700 return err;
2701}
2702
2703static int smctr_issue_insert_cmd(struct net_device *dev)
2704{
2705 int err;
2706
2707 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
2708
2709 return err;
2710}
2711
2712static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
2713{
2714 int err;
2715
2716 if((err = smctr_wait_while_cbusy(dev)))
2717 return err;
2718
2719 if((err = smctr_wait_cmd(dev)))
2720 return err;
2721
2722 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
2723 RW_TRC_STATUS_BLOCK);
2724
2725 return err;
2726}
2727
2728static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
2729{
2730 int err;
2731
2732 if((err = smctr_wait_while_cbusy(dev)))
2733 return err;
2734
2735 if((err = smctr_wait_cmd(dev)))
2736 return err;
2737
2738 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
2739 aword_cnt);
2740
2741 return err;
2742}
2743
2744static int smctr_issue_remove_cmd(struct net_device *dev)
2745{
2746 struct net_local *tp = netdev_priv(dev);
2747 int err;
2748
2749 if((err = smctr_wait_while_cbusy(dev)))
2750 return err;
2751
2752 tp->sclb_ptr->resume_control = 0;
2753 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
2754
2755 smctr_set_ctrl_attention(dev);
2756
2757 return 0;
2758}
2759
2760static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2761{
2762 struct net_local *tp = netdev_priv(dev);
2763 int err;
2764
2765 if((err = smctr_wait_while_cbusy(dev)))
2766 return err;
2767
2768 tp->sclb_ptr->resume_control = SCLB_RC_ACB;
2769 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2770
2771 tp->acb_pending = 1;
2772
2773 smctr_set_ctrl_attention(dev);
2774
2775 return 0;
2776}
2777
2778static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2779{
2780 struct net_local *tp = netdev_priv(dev);
2781 int err;
2782
2783 if((err = smctr_wait_while_cbusy(dev)))
2784 return err;
2785
2786 if(queue == MAC_QUEUE)
2787 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
2788 else
2789 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB;
2790
2791 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2792
2793 smctr_set_ctrl_attention(dev);
2794
2795 return 0;
2796}
2797
2798static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2799{
2800 struct net_local *tp = netdev_priv(dev);
2801
2802 if(smctr_debug > 10)
2803 printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
2804
2805 if(smctr_wait_while_cbusy(dev))
2806 return -1;
2807
2808 if(queue == MAC_QUEUE)
2809 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
2810 else
2811 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB;
2812
2813 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2814
2815 smctr_set_ctrl_attention(dev);
2816
2817 return 0;
2818}
2819
2820static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
2821{
2822 struct net_local *tp = netdev_priv(dev);
2823
2824 if(smctr_debug > 10)
2825 printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
2826
2827 if(smctr_wait_while_cbusy(dev))
2828 return -1;
2829
2830 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
2831 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
2832
2833 smctr_set_ctrl_attention(dev);
2834
2835 return 0;
2836}
2837
2838static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
2839{
2840 int err;
2841
2842 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2843 TRC_INTERNAL_ROM_TEST);
2844
2845 return err;
2846}
2847
2848static int smctr_issue_test_hic_cmd(struct net_device *dev)
2849{
2850 int err;
2851
2852 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
2853 TRC_HOST_INTERFACE_REG_TEST);
2854
2855 return err;
2856}
2857
2858static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
2859{
2860 int err;
2861
2862 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2863 TRC_MAC_REGISTERS_TEST);
2864
2865 return err;
2866}
2867
2868static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
2869{
2870 int err;
2871
2872 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2873 TRC_INTERNAL_LOOPBACK);
2874
2875 return err;
2876}
2877
2878static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
2879{
2880 int err;
2881
2882 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2883 TRC_TRI_LOOPBACK);
2884
2885 return err;
2886}
2887
2888static int smctr_issue_write_byte_cmd(struct net_device *dev,
2889 short aword_cnt, void *byte)
2890{
2891 struct net_local *tp = netdev_priv(dev);
2892 unsigned int iword, ibyte;
2893 int err;
2894
2895 if((err = smctr_wait_while_cbusy(dev)))
2896 return err;
2897
2898 if((err = smctr_wait_cmd(dev)))
2899 return err;
2900
2901 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
2902 iword++, ibyte += 2)
2903 {
2904 tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8)
2905 | (*((__u8 *)byte + ibyte + 1));
2906 }
2907
2908 return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2909 aword_cnt);
2910}
2911
2912static int smctr_issue_write_word_cmd(struct net_device *dev,
2913 short aword_cnt, void *word)
2914{
2915 struct net_local *tp = netdev_priv(dev);
2916 unsigned int i, err;
2917
2918 if((err = smctr_wait_while_cbusy(dev)))
2919 return err;
2920
2921 if((err = smctr_wait_cmd(dev)))
2922 return err;
2923
2924 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
2925 tp->misc_command_data[i] = *((__u16 *)word + i);
2926
2927 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2928 aword_cnt);
2929
2930 return err;
2931}
2932
2933static int smctr_join_complete_state(struct net_device *dev)
2934{
2935 int err;
2936
2937 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
2938 JS_JOIN_COMPLETE_STATE);
2939
2940 return err;
2941}
2942
2943static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
2944{
2945 struct net_local *tp = netdev_priv(dev);
2946 unsigned int i, j;
2947 FCBlock *fcb;
2948 BDBlock *bdb;
2949
2950 for(i = 0; i < NUM_TX_QS_USED; i++)
2951 {
2952 fcb = tp->tx_fcb_head[i];
2953 bdb = tp->tx_bdb_head[i];
2954
2955 for(j = 0; j < tp->num_tx_fcbs[i]; j++)
2956 {
2957 fcb->bdb_ptr = bdb;
2958 fcb->trc_bdb_ptr = TRC_POINTER(bdb);
2959 fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock));
2960 bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock));
2961 }
2962 }
2963
2964 return 0;
2965}
2966
2967static int smctr_load_firmware(struct net_device *dev)
2968{
2969 struct net_local *tp = netdev_priv(dev);
2970 const struct firmware *fw;
2971 __u16 i, checksum = 0;
2972 int err = 0;
2973
2974 if(smctr_debug > 10)
2975 printk(KERN_DEBUG "%s: smctr_load_firmware\n", dev->name);
2976
2977 if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) {
2978 printk(KERN_ERR "%s: firmware not found\n", dev->name);
2979 return UCODE_NOT_PRESENT;
2980 }
2981
2982 tp->num_of_tx_buffs = 4;
2983 tp->mode_bits |= UMAC;
2984 tp->receive_mask = 0;
2985 tp->max_packet_size = 4177;
2986
2987 /* Can only upload the firmware once per adapter reset. */
2988 if (tp->microcode_version != 0) {
2989 err = (UCODE_PRESENT);
2990 goto out;
2991 }
2992
2993 /* Verify the firmware exists and is there in the right amount. */
2994 if (!fw->data ||
2995 (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION))
2996 {
2997 err = (UCODE_NOT_PRESENT);
2998 goto out;
2999 }
3000
3001 /* UCODE_SIZE is not included in Checksum. */
3002 for(i = 0; i < *((__u16 *)(fw->data + UCODE_SIZE_OFFSET)); i += 2)
3003 checksum += *((__u16 *)(fw->data + 2 + i));
3004 if (checksum) {
3005 err = (UCODE_NOT_PRESENT);
3006 goto out;
3007 }
3008
3009 /* At this point we have a valid firmware image, lets kick it on up. */
3010 smctr_enable_adapter_ram(dev);
3011 smctr_enable_16bit(dev);
3012 smctr_set_page(dev, (__u8 *)tp->ram_access);
3013
3014 if((smctr_checksum_firmware(dev)) ||
3015 (*(fw->data + UCODE_VERSION_OFFSET) > tp->microcode_version))
3016 {
3017 smctr_enable_adapter_ctrl_store(dev);
3018
3019 /* Zero out ram space for firmware. */
3020 for(i = 0; i < CS_RAM_SIZE; i += 2)
3021 *((__u16 *)(tp->ram_access + i)) = 0;
3022
3023 smctr_decode_firmware(dev, fw);
3024
3025 tp->microcode_version = *(fw->data + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET))
3026 = (tp->microcode_version << 8);
3027 *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET))
3028 = ~(tp->microcode_version << 8) + 1;
3029
3030 smctr_disable_adapter_ctrl_store(dev);
3031
3032 if(smctr_checksum_firmware(dev))
3033 err = HARDWARE_FAILED;
3034 }
3035 else
3036 err = UCODE_PRESENT;
3037
3038 smctr_disable_16bit(dev);
3039 out:
3040 release_firmware(fw);
3041 return err;
3042}
3043
3044static int smctr_load_node_addr(struct net_device *dev)
3045{
3046 int ioaddr = dev->base_addr;
3047 unsigned int i;
3048 __u8 r;
3049
3050 for(i = 0; i < 6; i++)
3051 {
3052 r = inb(ioaddr + LAR0 + i);
3053 dev->dev_addr[i] = (char)r;
3054 }
3055 dev->addr_len = 6;
3056
3057 return 0;
3058}
3059
3060/* Lobe Media Test.
3061 * During the transmission of the initial 1500 lobe media MAC frames,
3062 * the phase lock loop in the 805 chip may lock, and then un-lock, causing
3063 * the 825 to go into a PURGE state. When performing a PURGE, the MCT
3064 * microcode will not transmit any frames given to it by the host, and
3065 * will consequently cause a timeout.
3066 *
3067 * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit
3068 * queues other than the one used for the lobe_media_test should be
3069 * disabled.!?
3070 *
3071 * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
3072 * has any multi-cast or promiscuous bits set, the receive_mask needs to
3073 * be changed to clear the multi-cast or promiscuous mode bits, the lobe_test
3074 * run, and then the receive mask set back to its original value if the test
3075 * is successful.
3076 */
3077static int smctr_lobe_media_test(struct net_device *dev)
3078{
3079 struct net_local *tp = netdev_priv(dev);
3080 unsigned int i, perror = 0;
3081 unsigned short saved_rcv_mask;
3082
3083 if(smctr_debug > 10)
3084 printk(KERN_DEBUG "%s: smctr_lobe_media_test\n", dev->name);
3085
3086 /* Clear receive mask for lobe test. */
3087 saved_rcv_mask = tp->receive_mask;
3088 tp->receive_mask = 0;
3089
3090 smctr_chg_rx_mask(dev);
3091
3092 /* Setup the lobe media test. */
3093 smctr_lobe_media_test_cmd(dev);
3094 if(smctr_wait_cmd(dev))
3095 goto err;
3096
3097 /* Tx lobe media test frames. */
3098 for(i = 0; i < 1500; ++i)
3099 {
3100 if(smctr_send_lobe_media_test(dev))
3101 {
3102 if(perror)
3103 goto err;
3104 else
3105 {
3106 perror = 1;
3107 if(smctr_lobe_media_test_cmd(dev))
3108 goto err;
3109 }
3110 }
3111 }
3112
3113 if(smctr_send_dat(dev))
3114 {
3115 if(smctr_send_dat(dev))
3116 goto err;
3117 }
3118
3119 /* Check if any frames received during test. */
3120 if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status) ||
3121 (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
3122 goto err;
3123
3124 /* Set receive mask to "Promisc" mode. */
3125 tp->receive_mask = saved_rcv_mask;
3126
3127 smctr_chg_rx_mask(dev);
3128
3129 return 0;
3130err:
3131 smctr_reset_adapter(dev);
3132 tp->status = CLOSED;
3133 return LOBE_MEDIA_TEST_FAILED;
3134}
3135
3136static int smctr_lobe_media_test_cmd(struct net_device *dev)
3137{
3138 struct net_local *tp = netdev_priv(dev);
3139 int err;
3140
3141 if(smctr_debug > 10)
3142 printk(KERN_DEBUG "%s: smctr_lobe_media_test_cmd\n", dev->name);
3143
3144 /* Change to lobe media test state. */
3145 if(tp->monitor_state != MS_BEACON_TEST_STATE)
3146 {
3147 smctr_lobe_media_test_state(dev);
3148 if(smctr_wait_cmd(dev))
3149 {
3150 printk(KERN_ERR "Lobe Failed test state\n");
3151 return LOBE_MEDIA_TEST_FAILED;
3152 }
3153 }
3154
3155 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
3156 TRC_LOBE_MEDIA_TEST);
3157
3158 return err;
3159}
3160
3161static int smctr_lobe_media_test_state(struct net_device *dev)
3162{
3163 int err;
3164
3165 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3166 JS_LOBE_TEST_STATE);
3167
3168 return err;
3169}
3170
3171static int smctr_make_8025_hdr(struct net_device *dev,
3172 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc)
3173{
3174 tmf->ac = MSB(ac_fc); /* msb is access control */
3175 tmf->fc = LSB(ac_fc); /* lsb is frame control */
3176
3177 tmf->sa[0] = dev->dev_addr[0];
3178 tmf->sa[1] = dev->dev_addr[1];
3179 tmf->sa[2] = dev->dev_addr[2];
3180 tmf->sa[3] = dev->dev_addr[3];
3181 tmf->sa[4] = dev->dev_addr[4];
3182 tmf->sa[5] = dev->dev_addr[5];
3183
3184 switch(tmf->vc)
3185 {
3186 /* Send RQ_INIT to RPS */
3187 case RQ_INIT:
3188 tmf->da[0] = 0xc0;
3189 tmf->da[1] = 0x00;
3190 tmf->da[2] = 0x00;
3191 tmf->da[3] = 0x00;
3192 tmf->da[4] = 0x00;
3193 tmf->da[5] = 0x02;
3194 break;
3195
3196 /* Send RPT_TX_FORWARD to CRS */
3197 case RPT_TX_FORWARD:
3198 tmf->da[0] = 0xc0;
3199 tmf->da[1] = 0x00;
3200 tmf->da[2] = 0x00;
3201 tmf->da[3] = 0x00;
3202 tmf->da[4] = 0x00;
3203 tmf->da[5] = 0x10;
3204 break;
3205
3206 /* Everything else goes to sender */
3207 default:
3208 tmf->da[0] = rmf->sa[0];
3209 tmf->da[1] = rmf->sa[1];
3210 tmf->da[2] = rmf->sa[2];
3211 tmf->da[3] = rmf->sa[3];
3212 tmf->da[4] = rmf->sa[4];
3213 tmf->da[5] = rmf->sa[5];
3214 break;
3215 }
3216
3217 return 0;
3218}
3219
3220static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3221{
3222 struct net_local *tp = netdev_priv(dev);
3223
3224 tsv->svi = AUTHORIZED_ACCESS_PRIORITY;
3225 tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY;
3226
3227 tsv->svv[0] = MSB(tp->authorized_access_priority);
3228 tsv->svv[1] = LSB(tp->authorized_access_priority);
3229
3230 return 0;
3231}
3232
3233static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3234{
3235 tsv->svi = ADDRESS_MODIFER;
3236 tsv->svl = S_ADDRESS_MODIFER;
3237
3238 tsv->svv[0] = 0;
3239 tsv->svv[1] = 0;
3240
3241 return 0;
3242}
3243
3244static int smctr_make_auth_funct_class(struct net_device *dev,
3245 MAC_SUB_VECTOR *tsv)
3246{
3247 struct net_local *tp = netdev_priv(dev);
3248
3249 tsv->svi = AUTHORIZED_FUNCTION_CLASS;
3250 tsv->svl = S_AUTHORIZED_FUNCTION_CLASS;
3251
3252 tsv->svv[0] = MSB(tp->authorized_function_classes);
3253 tsv->svv[1] = LSB(tp->authorized_function_classes);
3254
3255 return 0;
3256}
3257
3258static int smctr_make_corr(struct net_device *dev,
3259 MAC_SUB_VECTOR *tsv, __u16 correlator)
3260{
3261 tsv->svi = CORRELATOR;
3262 tsv->svl = S_CORRELATOR;
3263
3264 tsv->svv[0] = MSB(correlator);
3265 tsv->svv[1] = LSB(correlator);
3266
3267 return 0;
3268}
3269
3270static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3271{
3272 struct net_local *tp = netdev_priv(dev);
3273
3274 smctr_get_functional_address(dev);
3275
3276 tsv->svi = FUNCTIONAL_ADDRESS;
3277 tsv->svl = S_FUNCTIONAL_ADDRESS;
3278
3279 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3280 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3281
3282 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3283 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3284
3285 return 0;
3286}
3287
3288static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3289{
3290 struct net_local *tp = netdev_priv(dev);
3291
3292 smctr_get_group_address(dev);
3293
3294 tsv->svi = GROUP_ADDRESS;
3295 tsv->svl = S_GROUP_ADDRESS;
3296
3297 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3298 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3299
3300 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3301 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3302
3303 /* Set Group Address Sub-vector to all zeros if only the
3304 * Group Address/Functional Address Indicator is set.
3305 */
3306 if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00 &&
3307 tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
3308 tsv->svv[0] = 0x00;
3309
3310 return 0;
3311}
3312
3313static int smctr_make_phy_drop_num(struct net_device *dev,
3314 MAC_SUB_VECTOR *tsv)
3315{
3316 struct net_local *tp = netdev_priv(dev);
3317
3318 smctr_get_physical_drop_number(dev);
3319
3320 tsv->svi = PHYSICAL_DROP;
3321 tsv->svl = S_PHYSICAL_DROP;
3322
3323 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3324 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3325
3326 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3327 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3328
3329 return 0;
3330}
3331
3332static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3333{
3334 int i;
3335
3336 tsv->svi = PRODUCT_INSTANCE_ID;
3337 tsv->svl = S_PRODUCT_INSTANCE_ID;
3338
3339 for(i = 0; i < 18; i++)
3340 tsv->svv[i] = 0xF0;
3341
3342 return 0;
3343}
3344
3345static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3346{
3347 struct net_local *tp = netdev_priv(dev);
3348
3349 smctr_get_station_id(dev);
3350
3351 tsv->svi = STATION_IDENTIFER;
3352 tsv->svl = S_STATION_IDENTIFER;
3353
3354 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3355 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3356
3357 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3358 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3359
3360 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3361 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3362
3363 return 0;
3364}
3365
3366static int smctr_make_ring_station_status(struct net_device *dev,
3367 MAC_SUB_VECTOR * tsv)
3368{
3369 tsv->svi = RING_STATION_STATUS;
3370 tsv->svl = S_RING_STATION_STATUS;
3371
3372 tsv->svv[0] = 0;
3373 tsv->svv[1] = 0;
3374 tsv->svv[2] = 0;
3375 tsv->svv[3] = 0;
3376 tsv->svv[4] = 0;
3377 tsv->svv[5] = 0;
3378
3379 return 0;
3380}
3381
3382static int smctr_make_ring_station_version(struct net_device *dev,
3383 MAC_SUB_VECTOR *tsv)
3384{
3385 struct net_local *tp = netdev_priv(dev);
3386
3387 tsv->svi = RING_STATION_VERSION_NUMBER;
3388 tsv->svl = S_RING_STATION_VERSION_NUMBER;
3389
3390 tsv->svv[0] = 0xe2; /* EBCDIC - S */
3391 tsv->svv[1] = 0xd4; /* EBCDIC - M */
3392 tsv->svv[2] = 0xc3; /* EBCDIC - C */
3393 tsv->svv[3] = 0x40; /* EBCDIC - */
3394 tsv->svv[4] = 0xe5; /* EBCDIC - V */
3395 tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4);
3396 tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f);
3397 tsv->svv[7] = 0x40; /* EBCDIC - */
3398 tsv->svv[8] = 0xe7; /* EBCDIC - X */
3399
3400 if(tp->extra_info & CHIP_REV_MASK)
3401 tsv->svv[9] = 0xc5; /* EBCDIC - E */
3402 else
3403 tsv->svv[9] = 0xc4; /* EBCDIC - D */
3404
3405 return 0;
3406}
3407
3408static int smctr_make_tx_status_code(struct net_device *dev,
3409 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus)
3410{
3411 tsv->svi = TRANSMIT_STATUS_CODE;
3412 tsv->svl = S_TRANSMIT_STATUS_CODE;
3413
3414 tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) | IBM_PASS_SOURCE_ADDR);
3415
3416 /* Stripped frame status of Transmitted Frame */
3417 tsv->svv[1] = tx_fstatus & 0xff;
3418
3419 return 0;
3420}
3421
3422static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
3423 MAC_SUB_VECTOR *tsv)
3424{
3425 struct net_local *tp = netdev_priv(dev);
3426
3427 smctr_get_upstream_neighbor_addr(dev);
3428
3429 tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS;
3430 tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS;
3431
3432 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3433 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3434
3435 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3436 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3437
3438 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3439 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3440
3441 return 0;
3442}
3443
3444static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3445{
3446 tsv->svi = WRAP_DATA;
3447 tsv->svl = S_WRAP_DATA;
3448
3449 return 0;
3450}
3451
3452/*
3453 * Open/initialize the board. This is called sometime after
3454 * booting when the 'ifconfig' program is run.
3455 *
3456 * This routine should set everything up anew at each open, even
3457 * registers that "should" only need to be set once at boot, so that
3458 * there is non-reboot way to recover if something goes wrong.
3459 */
3460static int smctr_open(struct net_device *dev)
3461{
3462 int err;
3463
3464 if(smctr_debug > 10)
3465 printk(KERN_DEBUG "%s: smctr_open\n", dev->name);
3466
3467 err = smctr_init_adapter(dev);
3468 if(err < 0)
3469 return err;
3470
3471 return err;
3472}
3473
3474/* Interrupt driven open of Token card. */
3475static int smctr_open_tr(struct net_device *dev)
3476{
3477 struct net_local *tp = netdev_priv(dev);
3478 unsigned long flags;
3479 int err;
3480
3481 if(smctr_debug > 10)
3482 printk(KERN_DEBUG "%s: smctr_open_tr\n", dev->name);
3483
3484 /* Now we can actually open the adapter. */
3485 if(tp->status == OPEN)
3486 return 0;
3487 if(tp->status != INITIALIZED)
3488 return -1;
3489
3490 /* FIXME: it would work a lot better if we masked the irq sources
3491 on the card here, then we could skip the locking and poll nicely */
3492 spin_lock_irqsave(&tp->lock, flags);
3493
3494 smctr_set_page(dev, (__u8 *)tp->ram_access);
3495
3496 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE)))
3497 goto out;
3498
3499 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE)))
3500 goto out;
3501
3502 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE)))
3503 goto out;
3504
3505 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE)))
3506 goto out;
3507
3508 tp->status = CLOSED;
3509
3510 /* Insert into the Ring or Enter Loopback Mode. */
3511 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1)
3512 {
3513 tp->status = CLOSED;
3514
3515 if(!(err = smctr_issue_trc_loopback_cmd(dev)))
3516 {
3517 if(!(err = smctr_wait_cmd(dev)))
3518 tp->status = OPEN;
3519 }
3520
3521 smctr_status_chg(dev);
3522 }
3523 else
3524 {
3525 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2)
3526 {
3527 tp->status = CLOSED;
3528 if(!(err = smctr_issue_tri_loopback_cmd(dev)))
3529 {
3530 if(!(err = smctr_wait_cmd(dev)))
3531 tp->status = OPEN;
3532 }
3533
3534 smctr_status_chg(dev);
3535 }
3536 else
3537 {
3538 if((tp->mode_bits & LOOPING_MODE_MASK)
3539 == LOOPBACK_MODE_3)
3540 {
3541 tp->status = CLOSED;
3542 if(!(err = smctr_lobe_media_test_cmd(dev)))
3543 {
3544 if(!(err = smctr_wait_cmd(dev)))
3545 tp->status = OPEN;
3546 }
3547 smctr_status_chg(dev);
3548 }
3549 else
3550 {
3551 if(!(err = smctr_lobe_media_test(dev)))
3552 err = smctr_issue_insert_cmd(dev);
3553 else
3554 {
3555 if(err == LOBE_MEDIA_TEST_FAILED)
3556 printk(KERN_WARNING "%s: Lobe Media Test Failure - Check cable?\n", dev->name);
3557 }
3558 }
3559 }
3560 }
3561
3562out:
3563 spin_unlock_irqrestore(&tp->lock, flags);
3564
3565 return err;
3566}
3567
3568/* Check for a network adapter of this type,
3569 * and return device structure if one exists.
3570 */
3571struct net_device __init *smctr_probe(int unit)
3572{
3573 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
3574 static const unsigned ports[] = {
3575 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300,
3576 0x320, 0x340, 0x360, 0x380, 0
3577 };
3578 const unsigned *port;
3579 int err = 0;
3580
3581 if (!dev)
3582 return ERR_PTR(-ENOMEM);
3583
3584 if (unit >= 0) {
3585 sprintf(dev->name, "tr%d", unit);
3586 netdev_boot_setup_check(dev);
3587 }
3588
3589 if (dev->base_addr > 0x1ff) /* Check a single specified location. */
3590 err = smctr_probe1(dev, dev->base_addr);
3591 else if(dev->base_addr != 0) /* Don't probe at all. */
3592 err =-ENXIO;
3593 else {
3594 for (port = ports; *port; port++) {
3595 err = smctr_probe1(dev, *port);
3596 if (!err)
3597 break;
3598 }
3599 }
3600 if (err)
3601 goto out;
3602 err = register_netdev(dev);
3603 if (err)
3604 goto out1;
3605 return dev;
3606out1:
3607#ifdef CONFIG_MCA_LEGACY
3608 { struct net_local *tp = netdev_priv(dev);
3609 if (tp->slot_num)
3610 mca_mark_as_unused(tp->slot_num);
3611 }
3612#endif
3613 release_region(dev->base_addr, SMCTR_IO_EXTENT);
3614 free_irq(dev->irq, dev);
3615out:
3616 free_netdev(dev);
3617 return ERR_PTR(err);
3618}
3619
3620static const struct net_device_ops smctr_netdev_ops = {
3621 .ndo_open = smctr_open,
3622 .ndo_stop = smctr_close,
3623 .ndo_start_xmit = smctr_send_packet,
3624 .ndo_tx_timeout = smctr_timeout,
3625 .ndo_get_stats = smctr_get_stats,
3626 .ndo_set_multicast_list = smctr_set_multicast_list,
3627};
3628
3629static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3630{
3631 static unsigned version_printed;
3632 struct net_local *tp = netdev_priv(dev);
3633 int err;
3634 __u32 *ram;
3635
3636 if(smctr_debug && version_printed++ == 0)
3637 printk(version);
3638
3639 spin_lock_init(&tp->lock);
3640 dev->base_addr = ioaddr;
3641
3642 /* Actually detect an adapter now. */
3643 err = smctr_chk_isa(dev);
3644 if(err < 0)
3645 {
3646 if ((err = smctr_chk_mca(dev)) < 0) {
3647 err = -ENODEV;
3648 goto out;
3649 }
3650 }
3651
3652 tp = netdev_priv(dev);
3653 dev->mem_start = tp->ram_base;
3654 dev->mem_end = dev->mem_start + 0x10000;
3655 ram = (__u32 *)phys_to_virt(dev->mem_start);
3656 tp->ram_access = *(__u32 *)&ram;
3657 tp->status = NOT_INITIALIZED;
3658
3659 err = smctr_load_firmware(dev);
3660 if(err != UCODE_PRESENT && err != SUCCESS)
3661 {
3662 printk(KERN_ERR "%s: Firmware load failed (%d)\n", dev->name, err);
3663 err = -EIO;
3664 goto out;
3665 }
3666
3667 /* Allow user to specify ring speed on module insert. */
3668 if(ringspeed == 4)
3669 tp->media_type = MEDIA_UTP_4;
3670 else
3671 tp->media_type = MEDIA_UTP_16;
3672
3673 printk(KERN_INFO "%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n",
3674 dev->name, smctr_name, smctr_model,
3675 (unsigned int)dev->base_addr,
3676 dev->irq, tp->rom_base, tp->ram_base);
3677
3678 dev->netdev_ops = &smctr_netdev_ops;
3679 dev->watchdog_timeo = HZ;
3680 return 0;
3681
3682out:
3683 return err;
3684}
3685
3686static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3687 struct net_device *dev, __u16 rx_status)
3688{
3689 struct net_local *tp = netdev_priv(dev);
3690 struct sk_buff *skb;
3691 __u16 rcode, correlator;
3692 int err = 0;
3693 __u8 xframe = 1;
3694
3695 rmf->vl = SWAP_BYTES(rmf->vl);
3696 if(rx_status & FCB_RX_STATUS_DA_MATCHED)
3697 {
3698 switch(rmf->vc)
3699 {
3700 /* Received MAC Frames Processed by RS. */
3701 case INIT:
3702 if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED)
3703 {
3704 return rcode;
3705 }
3706
3707 if((err = smctr_send_rsp(dev, rmf, rcode,
3708 correlator)))
3709 {
3710 return err;
3711 }
3712 break;
3713
3714 case CHG_PARM:
3715 if((rcode = smctr_rcv_chg_param(dev, rmf,
3716 &correlator)) ==HARDWARE_FAILED)
3717 {
3718 return rcode;
3719 }
3720
3721 if((err = smctr_send_rsp(dev, rmf, rcode,
3722 correlator)))
3723 {
3724 return err;
3725 }
3726 break;
3727
3728 case RQ_ADDR:
3729 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3730 rmf, &correlator)) != POSITIVE_ACK)
3731 {
3732 if(rcode == HARDWARE_FAILED)
3733 return rcode;
3734 else
3735 return smctr_send_rsp(dev, rmf,
3736 rcode, correlator);
3737 }
3738
3739 if((err = smctr_send_rpt_addr(dev, rmf,
3740 correlator)))
3741 {
3742 return err;
3743 }
3744 break;
3745
3746 case RQ_ATTCH:
3747 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3748 rmf, &correlator)) != POSITIVE_ACK)
3749 {
3750 if(rcode == HARDWARE_FAILED)
3751 return rcode;
3752 else
3753 return smctr_send_rsp(dev, rmf,
3754 rcode,
3755 correlator);
3756 }
3757
3758 if((err = smctr_send_rpt_attch(dev, rmf,
3759 correlator)))
3760 {
3761 return err;
3762 }
3763 break;
3764
3765 case RQ_STATE:
3766 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3767 rmf, &correlator)) != POSITIVE_ACK)
3768 {
3769 if(rcode == HARDWARE_FAILED)
3770 return rcode;
3771 else
3772 return smctr_send_rsp(dev, rmf,
3773 rcode,
3774 correlator);
3775 }
3776
3777 if((err = smctr_send_rpt_state(dev, rmf,
3778 correlator)))
3779 {
3780 return err;
3781 }
3782 break;
3783
3784 case TX_FORWARD: {
3785 __u16 uninitialized_var(tx_fstatus);
3786
3787 if((rcode = smctr_rcv_tx_forward(dev, rmf))
3788 != POSITIVE_ACK)
3789 {
3790 if(rcode == HARDWARE_FAILED)
3791 return rcode;
3792 else
3793 return smctr_send_rsp(dev, rmf,
3794 rcode,
3795 correlator);
3796 }
3797
3798 if((err = smctr_send_tx_forward(dev, rmf,
3799 &tx_fstatus)) == HARDWARE_FAILED)
3800 {
3801 return err;
3802 }
3803
3804 if(err == A_FRAME_WAS_FORWARDED)
3805 {
3806 if((err = smctr_send_rpt_tx_forward(dev,
3807 rmf, tx_fstatus))
3808 == HARDWARE_FAILED)
3809 {
3810 return err;
3811 }
3812 }
3813 break;
3814 }
3815
3816 /* Received MAC Frames Processed by CRS/REM/RPS. */
3817 case RSP:
3818 case RQ_INIT:
3819 case RPT_NEW_MON:
3820 case RPT_SUA_CHG:
3821 case RPT_ACTIVE_ERR:
3822 case RPT_NN_INCMP:
3823 case RPT_ERROR:
3824 case RPT_ATTCH:
3825 case RPT_STATE:
3826 case RPT_ADDR:
3827 break;
3828
3829 /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */
3830 default:
3831 xframe = 0;
3832 if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES))
3833 {
3834 rcode = smctr_rcv_unknown(dev, rmf,
3835 &correlator);
3836 if((err = smctr_send_rsp(dev, rmf,rcode,
3837 correlator)))
3838 {
3839 return err;
3840 }
3841 }
3842
3843 break;
3844 }
3845 }
3846 else
3847 {
3848 /* 1. DA doesn't match (Promiscuous Mode).
3849 * 2. Parse for Extended MAC Frame Type.
3850 */
3851 switch(rmf->vc)
3852 {
3853 case RSP:
3854 case INIT:
3855 case RQ_INIT:
3856 case RQ_ADDR:
3857 case RQ_ATTCH:
3858 case RQ_STATE:
3859 case CHG_PARM:
3860 case RPT_ADDR:
3861 case RPT_ERROR:
3862 case RPT_ATTCH:
3863 case RPT_STATE:
3864 case RPT_NEW_MON:
3865 case RPT_SUA_CHG:
3866 case RPT_NN_INCMP:
3867 case RPT_ACTIVE_ERR:
3868 break;
3869
3870 default:
3871 xframe = 0;
3872 break;
3873 }
3874 }
3875
3876 /* NOTE: UNKNOWN MAC frames will NOT be passed up unless
3877 * ACCEPT_ATT_MAC_FRAMES is set.
3878 */
3879 if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) &&
3880 (xframe == (__u8)0)) ||
3881 ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES) &&
3882 (xframe == (__u8)1)))
3883 {
3884 rmf->vl = SWAP_BYTES(rmf->vl);
3885
3886 if (!(skb = dev_alloc_skb(size)))
3887 return -ENOMEM;
3888 skb->len = size;
3889
3890 /* Slide data into a sleek skb. */
3891 skb_put(skb, skb->len);
3892 skb_copy_to_linear_data(skb, rmf, skb->len);
3893
3894 /* Update Counters */
3895 tp->MacStat.rx_packets++;
3896 tp->MacStat.rx_bytes += skb->len;
3897
3898 /* Kick the packet on up. */
3899 skb->protocol = tr_type_trans(skb, dev);
3900 netif_rx(skb);
3901 err = 0;
3902 }
3903
3904 return err;
3905}
3906
3907/* Adapter RAM test. Incremental word ODD boundary data test. */
3908static int smctr_ram_memory_test(struct net_device *dev)
3909{
3910 struct net_local *tp = netdev_priv(dev);
3911 __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0,
3912 word_read = 0, err_word = 0, err_pattern = 0;
3913 unsigned int err_offset;
3914 __u32 j, pword;
3915 __u8 err = 0;
3916
3917 if(smctr_debug > 10)
3918 printk(KERN_DEBUG "%s: smctr_ram_memory_test\n", dev->name);
3919
3920 start_pattern = 0x0001;
3921 pages_of_ram = tp->ram_size / tp->ram_usable;
3922 pword = tp->ram_access;
3923
3924 /* Incremental word ODD boundary test. */
3925 for(page = 0; (page < pages_of_ram) && (~err);
3926 page++, start_pattern += 0x8000)
3927 {
3928 smctr_set_page(dev, (__u8 *)(tp->ram_access
3929 + (page * tp->ram_usable * 1024) + 1));
3930 word_pattern = start_pattern;
3931
3932 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2)
3933 *(__u16 *)(pword + j) = word_pattern++;
3934
3935 word_pattern = start_pattern;
3936
3937 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1 && (~err);
3938 j += 2, word_pattern++)
3939 {
3940 word_read = *(__u16 *)(pword + j);
3941 if(word_read != word_pattern)
3942 {
3943 err = (__u8)1;
3944 err_offset = j;
3945 err_word = word_read;
3946 err_pattern = word_pattern;
3947 return RAM_TEST_FAILED;
3948 }
3949 }
3950 }
3951
3952 /* Zero out memory. */
3953 for(page = 0; page < pages_of_ram && (~err); page++)
3954 {
3955 smctr_set_page(dev, (__u8 *)(tp->ram_access
3956 + (page * tp->ram_usable * 1024)));
3957 word_pattern = 0;
3958
3959 for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2)
3960 *(__u16 *)(pword + j) = word_pattern;
3961
3962 for(j =0; j < (__u32)tp->ram_usable * 1024 && (~err); j += 2)
3963 {
3964 word_read = *(__u16 *)(pword + j);
3965 if(word_read != word_pattern)
3966 {
3967 err = (__u8)1;
3968 err_offset = j;
3969 err_word = word_read;
3970 err_pattern = word_pattern;
3971 return RAM_TEST_FAILED;
3972 }
3973 }
3974 }
3975
3976 smctr_set_page(dev, (__u8 *)tp->ram_access);
3977
3978 return 0;
3979}
3980
3981static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
3982 __u16 *correlator)
3983{
3984 MAC_SUB_VECTOR *rsv;
3985 signed short vlen;
3986 __u16 rcode = POSITIVE_ACK;
3987 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
3988
3989 /* This Frame can only come from a CRS */
3990 if((rmf->dc_sc & SC_MASK) != SC_CRS)
3991 return E_INAPPROPRIATE_SOURCE_CLASS;
3992
3993 /* Remove MVID Length from total length. */
3994 vlen = (signed short)rmf->vl - 4;
3995
3996 /* Point to First SVID */
3997 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
3998
3999 /* Search for Appropriate SVID's. */
4000 while((vlen > 0) && (rcode == POSITIVE_ACK))
4001 {
4002 switch(rsv->svi)
4003 {
4004 case CORRELATOR:
4005 svectors |= F_CORRELATOR;
4006 rcode = smctr_set_corr(dev, rsv, correlator);
4007 break;
4008
4009 case LOCAL_RING_NUMBER:
4010 svectors |= F_LOCAL_RING_NUMBER;
4011 rcode = smctr_set_local_ring_num(dev, rsv);
4012 break;
4013
4014 case ASSIGN_PHYSICAL_DROP:
4015 svectors |= F_ASSIGN_PHYSICAL_DROP;
4016 rcode = smctr_set_phy_drop(dev, rsv);
4017 break;
4018
4019 case ERROR_TIMER_VALUE:
4020 svectors |= F_ERROR_TIMER_VALUE;
4021 rcode = smctr_set_error_timer_value(dev, rsv);
4022 break;
4023
4024 case AUTHORIZED_FUNCTION_CLASS:
4025 svectors |= F_AUTHORIZED_FUNCTION_CLASS;
4026 rcode = smctr_set_auth_funct_class(dev, rsv);
4027 break;
4028
4029 case AUTHORIZED_ACCESS_PRIORITY:
4030 svectors |= F_AUTHORIZED_ACCESS_PRIORITY;
4031 rcode = smctr_set_auth_access_pri(dev, rsv);
4032 break;
4033
4034 default:
4035 rcode = E_SUB_VECTOR_UNKNOWN;
4036 break;
4037 }
4038
4039 /* Let Sender Know if SUM of SV length's is
4040 * larger then length in MVID length field
4041 */
4042 if((vlen -= rsv->svl) < 0)
4043 rcode = E_VECTOR_LENGTH_ERROR;
4044
4045 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4046 }
4047
4048 if(rcode == POSITIVE_ACK)
4049 {
4050 /* Let Sender Know if MVID length field
4051 * is larger then SUM of SV length's
4052 */
4053 if(vlen != 0)
4054 rcode = E_VECTOR_LENGTH_ERROR;
4055 else
4056 {
4057 /* Let Sender Know if Expected SVID Missing */
4058 if((svectors & R_CHG_PARM) ^ R_CHG_PARM)
4059 rcode = E_MISSING_SUB_VECTOR;
4060 }
4061 }
4062
4063 return rcode;
4064}
4065
4066static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4067 __u16 *correlator)
4068{
4069 MAC_SUB_VECTOR *rsv;
4070 signed short vlen;
4071 __u16 rcode = POSITIVE_ACK;
4072 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4073
4074 /* This Frame can only come from a RPS */
4075 if((rmf->dc_sc & SC_MASK) != SC_RPS)
4076 return E_INAPPROPRIATE_SOURCE_CLASS;
4077
4078 /* Remove MVID Length from total length. */
4079 vlen = (signed short)rmf->vl - 4;
4080
4081 /* Point to First SVID */
4082 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4083
4084 /* Search for Appropriate SVID's */
4085 while((vlen > 0) && (rcode == POSITIVE_ACK))
4086 {
4087 switch(rsv->svi)
4088 {
4089 case CORRELATOR:
4090 svectors |= F_CORRELATOR;
4091 rcode = smctr_set_corr(dev, rsv, correlator);
4092 break;
4093
4094 case LOCAL_RING_NUMBER:
4095 svectors |= F_LOCAL_RING_NUMBER;
4096 rcode = smctr_set_local_ring_num(dev, rsv);
4097 break;
4098
4099 case ASSIGN_PHYSICAL_DROP:
4100 svectors |= F_ASSIGN_PHYSICAL_DROP;
4101 rcode = smctr_set_phy_drop(dev, rsv);
4102 break;
4103
4104 case ERROR_TIMER_VALUE:
4105 svectors |= F_ERROR_TIMER_VALUE;
4106 rcode = smctr_set_error_timer_value(dev, rsv);
4107 break;
4108
4109 default:
4110 rcode = E_SUB_VECTOR_UNKNOWN;
4111 break;
4112 }
4113
4114 /* Let Sender Know if SUM of SV length's is
4115 * larger then length in MVID length field
4116 */
4117 if((vlen -= rsv->svl) < 0)
4118 rcode = E_VECTOR_LENGTH_ERROR;
4119
4120 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4121 }
4122
4123 if(rcode == POSITIVE_ACK)
4124 {
4125 /* Let Sender Know if MVID length field
4126 * is larger then SUM of SV length's
4127 */
4128 if(vlen != 0)
4129 rcode = E_VECTOR_LENGTH_ERROR;
4130 else
4131 {
4132 /* Let Sender Know if Expected SV Missing */
4133 if((svectors & R_INIT) ^ R_INIT)
4134 rcode = E_MISSING_SUB_VECTOR;
4135 }
4136 }
4137
4138 return rcode;
4139}
4140
4141static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4142{
4143 MAC_SUB_VECTOR *rsv;
4144 signed short vlen;
4145 __u16 rcode = POSITIVE_ACK;
4146 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4147
4148 /* This Frame can only come from a CRS */
4149 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4150 return E_INAPPROPRIATE_SOURCE_CLASS;
4151
4152 /* Remove MVID Length from total length */
4153 vlen = (signed short)rmf->vl - 4;
4154
4155 /* Point to First SVID */
4156 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4157
4158 /* Search for Appropriate SVID's */
4159 while((vlen > 0) && (rcode == POSITIVE_ACK))
4160 {
4161 switch(rsv->svi)
4162 {
4163 case FRAME_FORWARD:
4164 svectors |= F_FRAME_FORWARD;
4165 rcode = smctr_set_frame_forward(dev, rsv,
4166 rmf->dc_sc);
4167 break;
4168
4169 default:
4170 rcode = E_SUB_VECTOR_UNKNOWN;
4171 break;
4172 }
4173
4174 /* Let Sender Know if SUM of SV length's is
4175 * larger then length in MVID length field
4176 */
4177 if((vlen -= rsv->svl) < 0)
4178 rcode = E_VECTOR_LENGTH_ERROR;
4179
4180 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4181 }
4182
4183 if(rcode == POSITIVE_ACK)
4184 {
4185 /* Let Sender Know if MVID length field
4186 * is larger then SUM of SV length's
4187 */
4188 if(vlen != 0)
4189 rcode = E_VECTOR_LENGTH_ERROR;
4190 else
4191 {
4192 /* Let Sender Know if Expected SV Missing */
4193 if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD)
4194 rcode = E_MISSING_SUB_VECTOR;
4195 }
4196 }
4197
4198 return rcode;
4199}
4200
4201static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
4202 MAC_HEADER *rmf, __u16 *correlator)
4203{
4204 MAC_SUB_VECTOR *rsv;
4205 signed short vlen;
4206 __u16 rcode = POSITIVE_ACK;
4207 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4208
4209 /* Remove MVID Length from total length */
4210 vlen = (signed short)rmf->vl - 4;
4211
4212 /* Point to First SVID */
4213 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4214
4215 /* Search for Appropriate SVID's */
4216 while((vlen > 0) && (rcode == POSITIVE_ACK))
4217 {
4218 switch(rsv->svi)
4219 {
4220 case CORRELATOR:
4221 svectors |= F_CORRELATOR;
4222 rcode = smctr_set_corr(dev, rsv, correlator);
4223 break;
4224
4225 default:
4226 rcode = E_SUB_VECTOR_UNKNOWN;
4227 break;
4228 }
4229
4230 /* Let Sender Know if SUM of SV length's is
4231 * larger then length in MVID length field
4232 */
4233 if((vlen -= rsv->svl) < 0)
4234 rcode = E_VECTOR_LENGTH_ERROR;
4235
4236 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4237 }
4238
4239 if(rcode == POSITIVE_ACK)
4240 {
4241 /* Let Sender Know if MVID length field
4242 * is larger then SUM of SV length's
4243 */
4244 if(vlen != 0)
4245 rcode = E_VECTOR_LENGTH_ERROR;
4246 else
4247 {
4248 /* Let Sender Know if Expected SVID Missing */
4249 if((svectors & R_RQ_ATTCH_STATE_ADDR)
4250 ^ R_RQ_ATTCH_STATE_ADDR)
4251 rcode = E_MISSING_SUB_VECTOR;
4252 }
4253 }
4254
4255 return rcode;
4256}
4257
4258static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
4259 __u16 *correlator)
4260{
4261 MAC_SUB_VECTOR *rsv;
4262 signed short vlen;
4263
4264 *correlator = 0;
4265
4266 /* Remove MVID Length from total length */
4267 vlen = (signed short)rmf->vl - 4;
4268
4269 /* Point to First SVID */
4270 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4271
4272 /* Search for CORRELATOR for RSP to UNKNOWN */
4273 while((vlen > 0) && (*correlator == 0))
4274 {
4275 switch(rsv->svi)
4276 {
4277 case CORRELATOR:
4278 smctr_set_corr(dev, rsv, correlator);
4279 break;
4280
4281 default:
4282 break;
4283 }
4284
4285 vlen -= rsv->svl;
4286 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4287 }
4288
4289 return E_UNRECOGNIZED_VECTOR_ID;
4290}
4291
4292/*
4293 * Reset the 825 NIC and exit w:
4294 * 1. The NIC reset cleared (non-reset state), halted and un-initialized.
4295 * 2. TINT masked.
4296 * 3. CBUSY masked.
4297 * 4. TINT clear.
4298 * 5. CBUSY clear.
4299 */
4300static int smctr_reset_adapter(struct net_device *dev)
4301{
4302 struct net_local *tp = netdev_priv(dev);
4303 int ioaddr = dev->base_addr;
4304
4305 /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr);
4306 mdelay(200); /* ~2 ms */
4307
4308 smctr_clear_trc_reset(ioaddr);
4309 mdelay(200); /* ~2 ms */
4310
4311 /* Remove any latched interrupts that occurred prior to reseting the
4312 * adapter or possibily caused by line glitches due to the reset.
4313 */
4314 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
4315
4316 return 0;
4317}
4318
4319static int smctr_restart_tx_chain(struct net_device *dev, short queue)
4320{
4321 struct net_local *tp = netdev_priv(dev);
4322 int err = 0;
4323
4324 if(smctr_debug > 10)
4325 printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name);
4326
4327 if(tp->num_tx_fcbs_used[queue] != 0 &&
4328 tp->tx_queue_status[queue] == NOT_TRANSMITING)
4329 {
4330 tp->tx_queue_status[queue] = TRANSMITING;
4331 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
4332 }
4333
4334 return err;
4335}
4336
4337static int smctr_ring_status_chg(struct net_device *dev)
4338{
4339 struct net_local *tp = netdev_priv(dev);
4340
4341 if(smctr_debug > 10)
4342 printk(KERN_DEBUG "%s: smctr_ring_status_chg\n", dev->name);
4343
4344 /* Check for ring_status_flag: whenever MONITOR_STATE_BIT
4345 * Bit is set, check value of monitor_state, only then we
4346 * enable and start transmit/receive timeout (if and only
4347 * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE)
4348 */
4349 if(tp->ring_status_flags == MONITOR_STATE_CHANGED)
4350 {
4351 if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE) ||
4352 (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
4353 {
4354 tp->monitor_state_ready = 1;
4355 }
4356 else
4357 {
4358 /* if adapter is NOT in either active monitor
4359 * or standby monitor state => Disable
4360 * transmit/receive timeout.
4361 */
4362 tp->monitor_state_ready = 0;
4363
4364 /* Ring speed problem, switching to auto mode. */
4365 if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE &&
4366 !tp->cleanup)
4367 {
4368 printk(KERN_INFO "%s: Incorrect ring speed switching.\n",
4369 dev->name);
4370 smctr_set_ring_speed(dev);
4371 }
4372 }
4373 }
4374
4375 if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
4376 return 0;
4377
4378 switch(tp->ring_status)
4379 {
4380 case RING_RECOVERY:
4381 printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
4382 break;
4383
4384 case SINGLE_STATION:
4385 printk(KERN_INFO "%s: Single Statinon\n", dev->name);
4386 break;
4387
4388 case COUNTER_OVERFLOW:
4389 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
4390 break;
4391
4392 case REMOVE_RECEIVED:
4393 printk(KERN_INFO "%s: Remove Received\n", dev->name);
4394 break;
4395
4396 case AUTO_REMOVAL_ERROR:
4397 printk(KERN_INFO "%s: Auto Remove Error\n", dev->name);
4398 break;
4399
4400 case LOBE_WIRE_FAULT:
4401 printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name);
4402 break;
4403
4404 case TRANSMIT_BEACON:
4405 printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
4406 break;
4407
4408 case SOFT_ERROR:
4409 printk(KERN_INFO "%s: Soft Error\n", dev->name);
4410 break;
4411
4412 case HARD_ERROR:
4413 printk(KERN_INFO "%s: Hard Error\n", dev->name);
4414 break;
4415
4416 case SIGNAL_LOSS:
4417 printk(KERN_INFO "%s: Signal Loss\n", dev->name);
4418 break;
4419
4420 default:
4421 printk(KERN_INFO "%s: Unknown ring status change\n",
4422 dev->name);
4423 break;
4424 }
4425
4426 return 0;
4427}
4428
4429static int smctr_rx_frame(struct net_device *dev)
4430{
4431 struct net_local *tp = netdev_priv(dev);
4432 __u16 queue, status, rx_size, err = 0;
4433 __u8 *pbuff;
4434
4435 if(smctr_debug > 10)
4436 printk(KERN_DEBUG "%s: smctr_rx_frame\n", dev->name);
4437
4438 queue = tp->receive_queue_number;
4439
4440 while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS)
4441 {
4442 err = HARDWARE_FAILED;
4443
4444 if(((status & 0x007f) == 0) ||
4445 ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
4446 {
4447 /* frame length less the CRC (4 bytes) + FS (1 byte) */
4448 rx_size = tp->rx_fcb_curr[queue]->frame_length - 5;
4449
4450 pbuff = smctr_get_rx_pointer(dev, queue);
4451
4452 smctr_set_page(dev, pbuff);
4453 smctr_disable_16bit(dev);
4454
4455 /* pbuff points to addr within one page */
4456 pbuff = (__u8 *)PAGE_POINTER(pbuff);
4457
4458 if(queue == NON_MAC_QUEUE)
4459 {
4460 struct sk_buff *skb;
4461
4462 skb = dev_alloc_skb(rx_size);
4463 if (skb) {
4464 skb_put(skb, rx_size);
4465
4466 skb_copy_to_linear_data(skb, pbuff, rx_size);
4467
4468 /* Update Counters */
4469 tp->MacStat.rx_packets++;
4470 tp->MacStat.rx_bytes += skb->len;
4471
4472 /* Kick the packet on up. */
4473 skb->protocol = tr_type_trans(skb, dev);
4474 netif_rx(skb);
4475 } else {
4476 }
4477 }
4478 else
4479 smctr_process_rx_packet((MAC_HEADER *)pbuff,
4480 rx_size, dev, status);
4481 }
4482
4483 smctr_enable_16bit(dev);
4484 smctr_set_page(dev, (__u8 *)tp->ram_access);
4485 smctr_update_rx_chain(dev, queue);
4486
4487 if(err != SUCCESS)
4488 break;
4489 }
4490
4491 return err;
4492}
4493
4494static int smctr_send_dat(struct net_device *dev)
4495{
4496 struct net_local *tp = netdev_priv(dev);
4497 unsigned int i, err;
4498 MAC_HEADER *tmf;
4499 FCBlock *fcb;
4500
4501 if(smctr_debug > 10)
4502 printk(KERN_DEBUG "%s: smctr_send_dat\n", dev->name);
4503
4504 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
4505 sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
4506 {
4507 return OUT_OF_RESOURCES;
4508 }
4509
4510 /* Initialize DAT Data Fields. */
4511 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4512 tmf->ac = MSB(AC_FC_DAT);
4513 tmf->fc = LSB(AC_FC_DAT);
4514
4515 for(i = 0; i < 6; i++)
4516 {
4517 tmf->sa[i] = dev->dev_addr[i];
4518 tmf->da[i] = dev->dev_addr[i];
4519
4520 }
4521
4522 tmf->vc = DAT;
4523 tmf->dc_sc = DC_RS | SC_RS;
4524 tmf->vl = 4;
4525 tmf->vl = SWAP_BYTES(tmf->vl);
4526
4527 /* Start Transmit. */
4528 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4529 return err;
4530
4531 /* Wait for Transmit to Complete */
4532 for(i = 0; i < 10000; i++)
4533 {
4534 if(fcb->frame_status & FCB_COMMAND_DONE)
4535 break;
4536 mdelay(1);
4537 }
4538
4539 /* Check if GOOD frame Tx'ed. */
4540 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4541 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4542 {
4543 return INITIALIZE_FAILED;
4544 }
4545
4546 /* De-allocated Tx FCB and Frame Buffer
4547 * The FCB must be de-allocated manually if executing with
4548 * interrupts disabled, other wise the ISR (LM_Service_Events)
4549 * will de-allocate it when the interrupt occurs.
4550 */
4551 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4552 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4553
4554 return 0;
4555}
4556
4557static void smctr_timeout(struct net_device *dev)
4558{
4559 /*
4560 * If we get here, some higher level has decided we are broken.
4561 * There should really be a "kick me" function call instead.
4562 *
4563 * Resetting the token ring adapter takes a long time so just
4564 * fake transmission time and go on trying. Our own timeout
4565 * routine is in sktr_timer_chk()
4566 */
4567 dev->trans_start = jiffies; /* prevent tx timeout */
4568 netif_wake_queue(dev);
4569}
4570
4571/*
4572 * Gets skb from system, queues it and checks if it can be sent
4573 */
4574static netdev_tx_t smctr_send_packet(struct sk_buff *skb,
4575 struct net_device *dev)
4576{
4577 struct net_local *tp = netdev_priv(dev);
4578
4579 if(smctr_debug > 10)
4580 printk(KERN_DEBUG "%s: smctr_send_packet\n", dev->name);
4581
4582 /*
4583 * Block a transmit overlap
4584 */
4585
4586 netif_stop_queue(dev);
4587
4588 if(tp->QueueSkb == 0)
4589 return NETDEV_TX_BUSY; /* Return with tbusy set: queue full */
4590
4591 tp->QueueSkb--;
4592 skb_queue_tail(&tp->SendSkbQueue, skb);
4593 smctr_hardware_send_packet(dev, tp);
4594 if(tp->QueueSkb > 0)
4595 netif_wake_queue(dev);
4596
4597 return NETDEV_TX_OK;
4598}
4599
4600static int smctr_send_lobe_media_test(struct net_device *dev)
4601{
4602 struct net_local *tp = netdev_priv(dev);
4603 MAC_SUB_VECTOR *tsv;
4604 MAC_HEADER *tmf;
4605 FCBlock *fcb;
4606 __u32 i;
4607 int err;
4608
4609 if(smctr_debug > 15)
4610 printk(KERN_DEBUG "%s: smctr_send_lobe_media_test\n", dev->name);
4611
4612 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
4613 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
4614 {
4615 return OUT_OF_RESOURCES;
4616 }
4617
4618 /* Initialize DAT Data Fields. */
4619 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4620 tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST);
4621 tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST);
4622
4623 for(i = 0; i < 6; i++)
4624 {
4625 tmf->da[i] = 0;
4626 tmf->sa[i] = dev->dev_addr[i];
4627 }
4628
4629 tmf->vc = LOBE_MEDIA_TEST;
4630 tmf->dc_sc = DC_RS | SC_RS;
4631 tmf->vl = 4;
4632
4633 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4634 smctr_make_wrap_data(dev, tsv);
4635 tmf->vl += tsv->svl;
4636
4637 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4638 smctr_make_wrap_data(dev, tsv);
4639 tmf->vl += tsv->svl;
4640
4641 /* Start Transmit. */
4642 tmf->vl = SWAP_BYTES(tmf->vl);
4643 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4644 return err;
4645
4646 /* Wait for Transmit to Complete. (10 ms). */
4647 for(i=0; i < 10000; i++)
4648 {
4649 if(fcb->frame_status & FCB_COMMAND_DONE)
4650 break;
4651 mdelay(1);
4652 }
4653
4654 /* Check if GOOD frame Tx'ed */
4655 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4656 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4657 {
4658 return LOBE_MEDIA_TEST_FAILED;
4659 }
4660
4661 /* De-allocated Tx FCB and Frame Buffer
4662 * The FCB must be de-allocated manually if executing with
4663 * interrupts disabled, other wise the ISR (LM_Service_Events)
4664 * will de-allocate it when the interrupt occurs.
4665 */
4666 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4667 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4668
4669 return 0;
4670}
4671
4672static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4673 __u16 correlator)
4674{
4675 MAC_HEADER *tmf;
4676 MAC_SUB_VECTOR *tsv;
4677 FCBlock *fcb;
4678
4679 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4680 + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS
4681 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
4682 == (FCBlock *)(-1L))
4683 {
4684 return 0;
4685 }
4686
4687 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4688 tmf->vc = RPT_ADDR;
4689 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4690 tmf->vl = 4;
4691
4692 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR);
4693
4694 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4695 smctr_make_corr(dev, tsv, correlator);
4696
4697 tmf->vl += tsv->svl;
4698 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4699 smctr_make_phy_drop_num(dev, tsv);
4700
4701 tmf->vl += tsv->svl;
4702 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4703 smctr_make_upstream_neighbor_addr(dev, tsv);
4704
4705 tmf->vl += tsv->svl;
4706 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4707 smctr_make_addr_mod(dev, tsv);
4708
4709 tmf->vl += tsv->svl;
4710 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4711 smctr_make_group_addr(dev, tsv);
4712
4713 tmf->vl += tsv->svl;
4714 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4715 smctr_make_funct_addr(dev, tsv);
4716
4717 tmf->vl += tsv->svl;
4718
4719 /* Subtract out MVID and MVL which is
4720 * include in both vl and MAC_HEADER
4721 */
4722/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4723 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4724*/
4725 tmf->vl = SWAP_BYTES(tmf->vl);
4726
4727 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4728}
4729
4730static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4731 __u16 correlator)
4732{
4733 MAC_HEADER *tmf;
4734 MAC_SUB_VECTOR *tsv;
4735 FCBlock *fcb;
4736
4737 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4738 + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS
4739 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
4740 == (FCBlock *)(-1L))
4741 {
4742 return 0;
4743 }
4744
4745 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4746 tmf->vc = RPT_ATTCH;
4747 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4748 tmf->vl = 4;
4749
4750 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH);
4751
4752 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4753 smctr_make_corr(dev, tsv, correlator);
4754
4755 tmf->vl += tsv->svl;
4756 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4757 smctr_make_product_id(dev, tsv);
4758
4759 tmf->vl += tsv->svl;
4760 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4761 smctr_make_funct_addr(dev, tsv);
4762
4763 tmf->vl += tsv->svl;
4764 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4765 smctr_make_auth_funct_class(dev, tsv);
4766
4767 tmf->vl += tsv->svl;
4768 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4769 smctr_make_access_pri(dev, tsv);
4770
4771 tmf->vl += tsv->svl;
4772
4773 /* Subtract out MVID and MVL which is
4774 * include in both vl and MAC_HEADER
4775 */
4776/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4777 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4778*/
4779 tmf->vl = SWAP_BYTES(tmf->vl);
4780
4781 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4782}
4783
4784static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4785 __u16 correlator)
4786{
4787 MAC_HEADER *tmf;
4788 MAC_SUB_VECTOR *tsv;
4789 FCBlock *fcb;
4790
4791 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4792 + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER
4793 + S_RING_STATION_STATUS + S_STATION_IDENTIFER))
4794 == (FCBlock *)(-1L))
4795 {
4796 return 0;
4797 }
4798
4799 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4800 tmf->vc = RPT_STATE;
4801 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4802 tmf->vl = 4;
4803
4804 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE);
4805
4806 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4807 smctr_make_corr(dev, tsv, correlator);
4808
4809 tmf->vl += tsv->svl;
4810 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4811 smctr_make_ring_station_version(dev, tsv);
4812
4813 tmf->vl += tsv->svl;
4814 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4815 smctr_make_ring_station_status(dev, tsv);
4816
4817 tmf->vl += tsv->svl;
4818 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4819 smctr_make_station_id(dev, tsv);
4820
4821 tmf->vl += tsv->svl;
4822
4823 /* Subtract out MVID and MVL which is
4824 * include in both vl and MAC_HEADER
4825 */
4826/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4827 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4828*/
4829 tmf->vl = SWAP_BYTES(tmf->vl);
4830
4831 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4832}
4833
4834static int smctr_send_rpt_tx_forward(struct net_device *dev,
4835 MAC_HEADER *rmf, __u16 tx_fstatus)
4836{
4837 MAC_HEADER *tmf;
4838 MAC_SUB_VECTOR *tsv;
4839 FCBlock *fcb;
4840
4841 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4842 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
4843 {
4844 return 0;
4845 }
4846
4847 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4848 tmf->vc = RPT_TX_FORWARD;
4849 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4850 tmf->vl = 4;
4851
4852 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD);
4853
4854 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4855 smctr_make_tx_status_code(dev, tsv, tx_fstatus);
4856
4857 tmf->vl += tsv->svl;
4858
4859 /* Subtract out MVID and MVL which is
4860 * include in both vl and MAC_HEADER
4861 */
4862/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4863 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4864*/
4865 tmf->vl = SWAP_BYTES(tmf->vl);
4866
4867 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4868}
4869
4870static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
4871 __u16 rcode, __u16 correlator)
4872{
4873 MAC_HEADER *tmf;
4874 MAC_SUB_VECTOR *tsv;
4875 FCBlock *fcb;
4876
4877 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4878 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
4879 {
4880 return 0;
4881 }
4882
4883 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4884 tmf->vc = RSP;
4885 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4886 tmf->vl = 4;
4887
4888 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP);
4889
4890 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4891 smctr_make_corr(dev, tsv, correlator);
4892
4893 return 0;
4894}
4895
4896static int smctr_send_rq_init(struct net_device *dev)
4897{
4898 struct net_local *tp = netdev_priv(dev);
4899 MAC_HEADER *tmf;
4900 MAC_SUB_VECTOR *tsv;
4901 FCBlock *fcb;
4902 unsigned int i, count = 0;
4903 __u16 fstatus;
4904 int err;
4905
4906 do {
4907 if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4908 + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS
4909 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
4910 == (FCBlock *)(-1L)))
4911 {
4912 return 0;
4913 }
4914
4915 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4916 tmf->vc = RQ_INIT;
4917 tmf->dc_sc = DC_RPS | SC_RS;
4918 tmf->vl = 4;
4919
4920 smctr_make_8025_hdr(dev, NULL, tmf, AC_FC_RQ_INIT);
4921
4922 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4923 smctr_make_product_id(dev, tsv);
4924
4925 tmf->vl += tsv->svl;
4926 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4927 smctr_make_upstream_neighbor_addr(dev, tsv);
4928
4929 tmf->vl += tsv->svl;
4930 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4931 smctr_make_ring_station_version(dev, tsv);
4932
4933 tmf->vl += tsv->svl;
4934 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4935 smctr_make_addr_mod(dev, tsv);
4936
4937 tmf->vl += tsv->svl;
4938
4939 /* Subtract out MVID and MVL which is
4940 * include in both vl and MAC_HEADER
4941 */
4942/* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4943 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4944*/
4945 tmf->vl = SWAP_BYTES(tmf->vl);
4946
4947 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4948 return err;
4949
4950 /* Wait for Transmit to Complete */
4951 for(i = 0; i < 10000; i++)
4952 {
4953 if(fcb->frame_status & FCB_COMMAND_DONE)
4954 break;
4955 mdelay(1);
4956 }
4957
4958 /* Check if GOOD frame Tx'ed */
4959 fstatus = fcb->frame_status;
4960
4961 if(!(fstatus & FCB_COMMAND_DONE))
4962 return HARDWARE_FAILED;
4963
4964 if(!(fstatus & FCB_TX_STATUS_E))
4965 count++;
4966
4967 /* De-allocated Tx FCB and Frame Buffer
4968 * The FCB must be de-allocated manually if executing with
4969 * interrupts disabled, other wise the ISR (LM_Service_Events)
4970 * will de-allocate it when the interrupt occurs.
4971 */
4972 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4973 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4974 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
4975
4976 return smctr_join_complete_state(dev);
4977}
4978
4979static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
4980 __u16 *tx_fstatus)
4981{
4982 struct net_local *tp = netdev_priv(dev);
4983 FCBlock *fcb;
4984 unsigned int i;
4985 int err;
4986
4987 /* Check if this is the END POINT of the Transmit Forward Chain. */
4988 if(rmf->vl <= 18)
4989 return 0;
4990
4991 /* Allocate Transmit FCB only by requesting 0 bytes
4992 * of data buffer.
4993 */
4994 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
4995 return 0;
4996
4997 /* Set pointer to Transmit Frame Buffer to the data
4998 * portion of the received TX Forward frame, making
4999 * sure to skip over the Vector Code (vc) and Vector
5000 * length (vl).
5001 */
5002 fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf
5003 + sizeof(MAC_HEADER) + 2);
5004 fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf
5005 + sizeof(MAC_HEADER) + 2);
5006
5007 fcb->frame_length = rmf->vl - 4 - 2;
5008 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
5009
5010 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5011 return err;
5012
5013 /* Wait for Transmit to Complete */
5014 for(i = 0; i < 10000; i++)
5015 {
5016 if(fcb->frame_status & FCB_COMMAND_DONE)
5017 break;
5018 mdelay(1);
5019 }
5020
5021 /* Check if GOOD frame Tx'ed */
5022 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5023 {
5024 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
5025 return err;
5026
5027 for(i = 0; i < 10000; i++)
5028 {
5029 if(fcb->frame_status & FCB_COMMAND_DONE)
5030 break;
5031 mdelay(1);
5032 }
5033
5034 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5035 return HARDWARE_FAILED;
5036 }
5037
5038 *tx_fstatus = fcb->frame_status;
5039
5040 return A_FRAME_WAS_FORWARDED;
5041}
5042
5043static int smctr_set_auth_access_pri(struct net_device *dev,
5044 MAC_SUB_VECTOR *rsv)
5045{
5046 struct net_local *tp = netdev_priv(dev);
5047
5048 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
5049 return E_SUB_VECTOR_LENGTH_ERROR;
5050
5051 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
5052
5053 return POSITIVE_ACK;
5054}
5055
5056static int smctr_set_auth_funct_class(struct net_device *dev,
5057 MAC_SUB_VECTOR *rsv)
5058{
5059 struct net_local *tp = netdev_priv(dev);
5060
5061 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
5062 return E_SUB_VECTOR_LENGTH_ERROR;
5063
5064 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
5065
5066 return POSITIVE_ACK;
5067}
5068
5069static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
5070 __u16 *correlator)
5071{
5072 if(rsv->svl != S_CORRELATOR)
5073 return E_SUB_VECTOR_LENGTH_ERROR;
5074
5075 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
5076
5077 return POSITIVE_ACK;
5078}
5079
5080static int smctr_set_error_timer_value(struct net_device *dev,
5081 MAC_SUB_VECTOR *rsv)
5082{
5083 __u16 err_tval;
5084 int err;
5085
5086 if(rsv->svl != S_ERROR_TIMER_VALUE)
5087 return E_SUB_VECTOR_LENGTH_ERROR;
5088
5089 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
5090
5091 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
5092
5093 if((err = smctr_wait_cmd(dev)))
5094 return err;
5095
5096 return POSITIVE_ACK;
5097}
5098
5099static int smctr_set_frame_forward(struct net_device *dev,
5100 MAC_SUB_VECTOR *rsv, __u8 dc_sc)
5101{
5102 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
5103 return E_SUB_VECTOR_LENGTH_ERROR;
5104
5105 if((dc_sc & DC_MASK) != DC_CRS)
5106 {
5107 if(rsv->svl >= 2 && rsv->svl < 20)
5108 return E_TRANSMIT_FORWARD_INVALID;
5109
5110 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
5111 return E_TRANSMIT_FORWARD_INVALID;
5112 }
5113
5114 return POSITIVE_ACK;
5115}
5116
5117static int smctr_set_local_ring_num(struct net_device *dev,
5118 MAC_SUB_VECTOR *rsv)
5119{
5120 struct net_local *tp = netdev_priv(dev);
5121
5122 if(rsv->svl != S_LOCAL_RING_NUMBER)
5123 return E_SUB_VECTOR_LENGTH_ERROR;
5124
5125 if(tp->ptr_local_ring_num)
5126 *(__u16 *)(tp->ptr_local_ring_num)
5127 = (rsv->svv[0] << 8 | rsv->svv[1]);
5128
5129 return POSITIVE_ACK;
5130}
5131
5132static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
5133{
5134 struct net_local *tp = netdev_priv(dev);
5135 int ioaddr = dev->base_addr;
5136
5137 if(tp->bic_type == BIC_585_CHIP)
5138 outb((tp->trc_mask | HWR_CA), ioaddr + HWR);
5139 else
5140 {
5141 outb((tp->trc_mask | CSR_CA), ioaddr + CSR);
5142 outb(tp->trc_mask, ioaddr + CSR);
5143 }
5144
5145 return 0;
5146}
5147
5148static void smctr_set_multicast_list(struct net_device *dev)
5149{
5150 if(smctr_debug > 10)
5151 printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
5152}
5153
5154static int smctr_set_page(struct net_device *dev, __u8 *buf)
5155{
5156 struct net_local *tp = netdev_priv(dev);
5157 __u8 amask;
5158 __u32 tptr;
5159
5160 tptr = (__u32)buf - (__u32)tp->ram_access;
5161 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
5162 outb(amask, dev->base_addr + PR);
5163
5164 return 0;
5165}
5166
5167static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
5168{
5169 int err;
5170
5171 if(rsv->svl != S_PHYSICAL_DROP)
5172 return E_SUB_VECTOR_LENGTH_ERROR;
5173
5174 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
5175 if((err = smctr_wait_cmd(dev)))
5176 return err;
5177
5178 return POSITIVE_ACK;
5179}
5180
5181/* Reset the ring speed to the opposite of what it was. This auto-pilot
5182 * mode requires a complete reset and re-init of the adapter.
5183 */
5184static int smctr_set_ring_speed(struct net_device *dev)
5185{
5186 struct net_local *tp = netdev_priv(dev);
5187 int err;
5188
5189 if(tp->media_type == MEDIA_UTP_16)
5190 tp->media_type = MEDIA_UTP_4;
5191 else
5192 tp->media_type = MEDIA_UTP_16;
5193
5194 smctr_enable_16bit(dev);
5195
5196 /* Re-Initialize adapter's internal registers */
5197 smctr_reset_adapter(dev);
5198
5199 if((err = smctr_init_card_real(dev)))
5200 return err;
5201
5202 smctr_enable_bic_int(dev);
5203
5204 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
5205 return err;
5206
5207 smctr_disable_16bit(dev);
5208
5209 return 0;
5210}
5211
5212static int smctr_set_rx_look_ahead(struct net_device *dev)
5213{
5214 struct net_local *tp = netdev_priv(dev);
5215 __u16 sword, rword;
5216
5217 if(smctr_debug > 10)
5218 printk(KERN_DEBUG "%s: smctr_set_rx_look_ahead_flag\n", dev->name);
5219
5220 tp->adapter_flags &= ~(FORCED_16BIT_MODE);
5221 tp->adapter_flags |= RX_VALID_LOOKAHEAD;
5222
5223 if(tp->adapter_bus == BUS_ISA16_TYPE)
5224 {
5225 sword = *((__u16 *)(tp->ram_access));
5226 *((__u16 *)(tp->ram_access)) = 0x1234;
5227
5228 smctr_disable_16bit(dev);
5229 rword = *((__u16 *)(tp->ram_access));
5230 smctr_enable_16bit(dev);
5231
5232 if(rword != 0x1234)
5233 tp->adapter_flags |= FORCED_16BIT_MODE;
5234
5235 *((__u16 *)(tp->ram_access)) = sword;
5236 }
5237
5238 return 0;
5239}
5240
5241static int smctr_set_trc_reset(int ioaddr)
5242{
5243 __u8 r;
5244
5245 r = inb(ioaddr + MSR);
5246 outb(MSR_RST | r, ioaddr + MSR);
5247
5248 return 0;
5249}
5250
5251/*
5252 * This function can be called if the adapter is busy or not.
5253 */
5254static int smctr_setup_single_cmd(struct net_device *dev,
5255 __u16 command, __u16 subcommand)
5256{
5257 struct net_local *tp = netdev_priv(dev);
5258 unsigned int err;
5259
5260 if(smctr_debug > 10)
5261 printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name);
5262
5263 if((err = smctr_wait_while_cbusy(dev)))
5264 return err;
5265
5266 if((err = (unsigned int)smctr_wait_cmd(dev)))
5267 return err;
5268
5269 tp->acb_head->cmd_done_status = 0;
5270 tp->acb_head->cmd = command;
5271 tp->acb_head->subcmd = subcommand;
5272
5273 err = smctr_issue_resume_acb_cmd(dev);
5274
5275 return err;
5276}
5277
5278/*
5279 * This function can not be called with the adapter busy.
5280 */
5281static int smctr_setup_single_cmd_w_data(struct net_device *dev,
5282 __u16 command, __u16 subcommand)
5283{
5284 struct net_local *tp = netdev_priv(dev);
5285
5286 tp->acb_head->cmd_done_status = ACB_COMMAND_NOT_DONE;
5287 tp->acb_head->cmd = command;
5288 tp->acb_head->subcmd = subcommand;
5289 tp->acb_head->data_offset_lo
5290 = (__u16)TRC_POINTER(tp->misc_command_data);
5291
5292 return smctr_issue_resume_acb_cmd(dev);
5293}
5294
5295static char *smctr_malloc(struct net_device *dev, __u16 size)
5296{
5297 struct net_local *tp = netdev_priv(dev);
5298 char *m;
5299
5300 m = (char *)(tp->ram_access + tp->sh_mem_used);
5301 tp->sh_mem_used += (__u32)size;
5302
5303 return m;
5304}
5305
5306static int smctr_status_chg(struct net_device *dev)
5307{
5308 struct net_local *tp = netdev_priv(dev);
5309
5310 if(smctr_debug > 10)
5311 printk(KERN_DEBUG "%s: smctr_status_chg\n", dev->name);
5312
5313 switch(tp->status)
5314 {
5315 case OPEN:
5316 break;
5317
5318 case CLOSED:
5319 break;
5320
5321 /* Interrupt driven open() completion. XXX */
5322 case INITIALIZED:
5323 tp->group_address_0 = 0;
5324 tp->group_address[0] = 0;
5325 tp->group_address[1] = 0;
5326 tp->functional_address_0 = 0;
5327 tp->functional_address[0] = 0;
5328 tp->functional_address[1] = 0;
5329 smctr_open_tr(dev);
5330 break;
5331
5332 default:
5333 printk(KERN_INFO "%s: status change unknown %x\n",
5334 dev->name, tp->status);
5335 break;
5336 }
5337
5338 return 0;
5339}
5340
5341static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
5342 __u16 queue)
5343{
5344 struct net_local *tp = netdev_priv(dev);
5345 int err = 0;
5346
5347 if(smctr_debug > 10)
5348 printk(KERN_DEBUG "%s: smctr_trc_send_packet\n", dev->name);
5349
5350 fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS;
5351 if(tp->num_tx_fcbs[queue] != 1)
5352 fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS;
5353
5354 if(tp->tx_queue_status[queue] == NOT_TRANSMITING)
5355 {
5356 tp->tx_queue_status[queue] = TRANSMITING;
5357 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
5358 }
5359
5360 return err;
5361}
5362
5363static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
5364{
5365 struct net_local *tp = netdev_priv(dev);
5366 __u16 status, err = 0;
5367 int cstatus;
5368
5369 if(smctr_debug > 10)
5370 printk(KERN_DEBUG "%s: smctr_tx_complete\n", dev->name);
5371
5372 while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS)
5373 {
5374 if(status & 0x7e00 )
5375 {
5376 err = HARDWARE_FAILED;
5377 break;
5378 }
5379
5380 if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue],
5381 queue)) != SUCCESS)
5382 break;
5383
5384 smctr_disable_16bit(dev);
5385
5386 if(tp->mode_bits & UMAC)
5387 {
5388 if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2)))
5389 cstatus = NO_SUCH_DESTINATION;
5390 else
5391 {
5392 if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2)))
5393 cstatus = DEST_OUT_OF_RESOURCES;
5394 else
5395 {
5396 if(status & FCB_TX_STATUS_E)
5397 cstatus = MAX_COLLISIONS;
5398 else
5399 cstatus = SUCCESS;
5400 }
5401 }
5402 }
5403 else
5404 cstatus = SUCCESS;
5405
5406 if(queue == BUG_QUEUE)
5407 err = SUCCESS;
5408
5409 smctr_enable_16bit(dev);
5410 if(err != SUCCESS)
5411 break;
5412 }
5413
5414 return err;
5415}
5416
5417static unsigned short smctr_tx_move_frame(struct net_device *dev,
5418 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes)
5419{
5420 struct net_local *tp = netdev_priv(dev);
5421 unsigned int ram_usable;
5422 __u32 flen, len, offset = 0;
5423 __u8 *frag, *page;
5424
5425 if(smctr_debug > 10)
5426 printk(KERN_DEBUG "%s: smctr_tx_move_frame\n", dev->name);
5427
5428 ram_usable = ((unsigned int)tp->ram_usable) << 10;
5429 frag = skb->data;
5430 flen = skb->len;
5431
5432 while(flen > 0 && bytes > 0)
5433 {
5434 smctr_set_page(dev, pbuff);
5435
5436 offset = SMC_PAGE_OFFSET(pbuff);
5437
5438 if(offset + flen > ram_usable)
5439 len = ram_usable - offset;
5440 else
5441 len = flen;
5442
5443 if(len > bytes)
5444 len = bytes;
5445
5446 page = (char *) (offset + tp->ram_access);
5447 memcpy(page, frag, len);
5448
5449 flen -=len;
5450 bytes -= len;
5451 frag += len;
5452 pbuff += len;
5453 }
5454
5455 return 0;
5456}
5457
5458/* Update the error statistic counters for this adapter. */
5459static int smctr_update_err_stats(struct net_device *dev)
5460{
5461 struct net_local *tp = netdev_priv(dev);
5462 struct tr_statistics *tstat = &tp->MacStat;
5463
5464 if(tstat->internal_errors)
5465 tstat->internal_errors
5466 += *(tp->misc_command_data + 0) & 0x00ff;
5467
5468 if(tstat->line_errors)
5469 tstat->line_errors += *(tp->misc_command_data + 0) >> 8;
5470
5471 if(tstat->A_C_errors)
5472 tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff;
5473
5474 if(tstat->burst_errors)
5475 tstat->burst_errors += *(tp->misc_command_data + 1) >> 8;
5476
5477 if(tstat->abort_delimiters)
5478 tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8;
5479
5480 if(tstat->recv_congest_count)
5481 tstat->recv_congest_count
5482 += *(tp->misc_command_data + 3) & 0x00ff;
5483
5484 if(tstat->lost_frames)
5485 tstat->lost_frames
5486 += *(tp->misc_command_data + 3) >> 8;
5487
5488 if(tstat->frequency_errors)
5489 tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff;
5490
5491 if(tstat->frame_copied_errors)
5492 tstat->frame_copied_errors
5493 += *(tp->misc_command_data + 4) >> 8;
5494
5495 if(tstat->token_errors)
5496 tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
5497
5498 return 0;
5499}
5500
5501static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
5502{
5503 struct net_local *tp = netdev_priv(dev);
5504 FCBlock *fcb;
5505 BDBlock *bdb;
5506 __u16 size, len;
5507
5508 fcb = tp->rx_fcb_curr[queue];
5509 len = fcb->frame_length;
5510
5511 fcb->frame_status = 0;
5512 fcb->info = FCB_CHAIN_END;
5513 fcb->back_ptr->info = FCB_WARNING;
5514
5515 tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr;
5516
5517 /* update RX BDBs */
5518 size = (len >> RX_BDB_SIZE_SHIFT);
5519 if(len & RX_DATA_BUFFER_SIZE_MASK)
5520 size += sizeof(BDBlock);
5521 size &= (~RX_BDB_SIZE_MASK);
5522
5523 /* check if wrap around */
5524 bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size));
5525 if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue])
5526 {
5527 bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue])
5528 + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue]));
5529 }
5530
5531 bdb->back_ptr->info = BDB_CHAIN_END;
5532 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
5533 tp->rx_bdb_curr[queue] = bdb;
5534
5535 return 0;
5536}
5537
5538static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5539 __u16 queue)
5540{
5541 struct net_local *tp = netdev_priv(dev);
5542
5543 if(smctr_debug > 20)
5544 printk(KERN_DEBUG "smctr_update_tx_chain\n");
5545
5546 if(tp->num_tx_fcbs_used[queue] <= 0)
5547 return HARDWARE_FAILED;
5548 else
5549 {
5550 if(tp->tx_buff_used[queue] < fcb->memory_alloc)
5551 {
5552 tp->tx_buff_used[queue] = 0;
5553 return HARDWARE_FAILED;
5554 }
5555
5556 tp->tx_buff_used[queue] -= fcb->memory_alloc;
5557
5558 /* if all transmit buffer are cleared
5559 * need to set the tx_buff_curr[] to tx_buff_head[]
5560 * otherwise, tx buffer will be segregate and cannot
5561 * accommodate and buffer greater than (curr - head) and
5562 * (end - curr) since we do not allow wrap around allocation.
5563 */
5564 if(tp->tx_buff_used[queue] == 0)
5565 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
5566
5567 tp->num_tx_fcbs_used[queue]--;
5568 fcb->frame_status = 0;
5569 tp->tx_fcb_end[queue] = fcb->next_ptr;
5570 netif_wake_queue(dev);
5571 return 0;
5572 }
5573}
5574
5575static int smctr_wait_cmd(struct net_device *dev)
5576{
5577 struct net_local *tp = netdev_priv(dev);
5578 unsigned int loop_count = 0x20000;
5579
5580 if(smctr_debug > 10)
5581 printk(KERN_DEBUG "%s: smctr_wait_cmd\n", dev->name);
5582
5583 while(loop_count)
5584 {
5585 if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE)
5586 break;
5587 udelay(1);
5588 loop_count--;
5589 }
5590
5591 if(loop_count == 0)
5592 return HARDWARE_FAILED;
5593
5594 if(tp->acb_head->cmd_done_status & 0xff)
5595 return HARDWARE_FAILED;
5596
5597 return 0;
5598}
5599
5600static int smctr_wait_while_cbusy(struct net_device *dev)
5601{
5602 struct net_local *tp = netdev_priv(dev);
5603 unsigned int timeout = 0x20000;
5604 int ioaddr = dev->base_addr;
5605 __u8 r;
5606
5607 if(tp->bic_type == BIC_585_CHIP)
5608 {
5609 while(timeout)
5610 {
5611 r = inb(ioaddr + HWR);
5612 if((r & HWR_CBUSY) == 0)
5613 break;
5614 timeout--;
5615 }
5616 }
5617 else
5618 {
5619 while(timeout)
5620 {
5621 r = inb(ioaddr + CSR);
5622 if((r & CSR_CBUSY) == 0)
5623 break;
5624 timeout--;
5625 }
5626 }
5627
5628 if(timeout)
5629 return 0;
5630 else
5631 return HARDWARE_FAILED;
5632}
5633
5634#ifdef MODULE
5635
5636static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS];
5637static int io[SMCTR_MAX_ADAPTERS];
5638static int irq[SMCTR_MAX_ADAPTERS];
5639
5640MODULE_LICENSE("GPL");
5641MODULE_FIRMWARE("tr_smctr.bin");
5642
5643module_param_array(io, int, NULL, 0);
5644module_param_array(irq, int, NULL, 0);
5645module_param(ringspeed, int, 0);
5646
5647static struct net_device * __init setup_card(int n)
5648{
5649 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
5650 int err;
5651
5652 if (!dev)
5653 return ERR_PTR(-ENOMEM);
5654
5655 dev->irq = irq[n];
5656 err = smctr_probe1(dev, io[n]);
5657 if (err)
5658 goto out;
5659
5660 err = register_netdev(dev);
5661 if (err)
5662 goto out1;
5663 return dev;
5664 out1:
5665#ifdef CONFIG_MCA_LEGACY
5666 { struct net_local *tp = netdev_priv(dev);
5667 if (tp->slot_num)
5668 mca_mark_as_unused(tp->slot_num);
5669 }
5670#endif
5671 release_region(dev->base_addr, SMCTR_IO_EXTENT);
5672 free_irq(dev->irq, dev);
5673out:
5674 free_netdev(dev);
5675 return ERR_PTR(err);
5676}
5677
5678int __init init_module(void)
5679{
5680 int i, found = 0;
5681 struct net_device *dev;
5682
5683 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
5684 dev = io[0]? setup_card(i) : smctr_probe(-1);
5685 if (!IS_ERR(dev)) {
5686 ++found;
5687 dev_smctr[i] = dev;
5688 }
5689 }
5690
5691 return found ? 0 : -ENODEV;
5692}
5693
5694void __exit cleanup_module(void)
5695{
5696 int i;
5697
5698 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
5699 struct net_device *dev = dev_smctr[i];
5700
5701 if (dev) {
5702
5703 unregister_netdev(dev);
5704#ifdef CONFIG_MCA_LEGACY
5705 { struct net_local *tp = netdev_priv(dev);
5706 if (tp->slot_num)
5707 mca_mark_as_unused(tp->slot_num);
5708 }
5709#endif
5710 release_region(dev->base_addr, SMCTR_IO_EXTENT);
5711 if (dev->irq)
5712 free_irq(dev->irq, dev);
5713
5714 free_netdev(dev);
5715 }
5716 }
5717}
5718#endif /* MODULE */
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h
new file mode 100644
index 00000000000..6e5700ab4fc
--- /dev/null
+++ b/drivers/net/tokenring/smctr.h
@@ -0,0 +1,1585 @@
1/* smctr.h: SMC Token Ring driver header for Linux
2 *
3 * Authors:
4 * - Jay Schulist <jschlst@samba.org>
5 */
6
7#ifndef __LINUX_SMCTR_H
8#define __LINUX_SMCTR_H
9
10#ifdef __KERNEL__
11
12#define MAX_TX_QUEUE 10
13
14#define SMC_HEADER_SIZE 14
15
16#define SMC_PAGE_OFFSET(X) (((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask)
17
18#define INIT 0x0D
19#define RQ_ATTCH 0x10
20#define RQ_STATE 0x0F
21#define RQ_ADDR 0x0E
22#define CHG_PARM 0x0C
23#define RSP 0x00
24#define TX_FORWARD 0x09
25
26#define AC_FC_DAT ((3<<13) | 1)
27#define DAT 0x07
28
29#define RPT_NEW_MON 0x25
30#define RPT_SUA_CHG 0x26
31#define RPT_ACTIVE_ERR 0x28
32#define RPT_NN_INCMP 0x27
33#define RPT_ERROR 0x29
34
35#define RQ_INIT 0x20
36#define RPT_ATTCH 0x24
37#define RPT_STATE 0x23
38#define RPT_ADDR 0x22
39
40#define POSITIVE_ACK 0x0001
41#define A_FRAME_WAS_FORWARDED 0x8888
42
43#define GROUP_ADDRESS 0x2B
44#define PHYSICAL_DROP 0x0B
45#define AUTHORIZED_ACCESS_PRIORITY 0x07
46#define AUTHORIZED_FUNCTION_CLASS 0x06
47#define FUNCTIONAL_ADDRESS 0x2C
48#define RING_STATION_STATUS 0x29
49#define TRANSMIT_STATUS_CODE 0x2A
50#define IBM_PASS_SOURCE_ADDR 0x01
51#define AC_FC_RPT_TX_FORWARD ((0<<13) | 0)
52#define AC_FC_RPT_STATE ((0<<13) | 0)
53#define AC_FC_RPT_ADDR ((0<<13) | 0)
54#define CORRELATOR 0x09
55
56#define POSITIVE_ACK 0x0001 /* */
57#define E_MAC_DATA_INCOMPLETE 0x8001 /* not used */
58#define E_VECTOR_LENGTH_ERROR 0x8002 /* */
59#define E_UNRECOGNIZED_VECTOR_ID 0x8003 /* */
60#define E_INAPPROPRIATE_SOURCE_CLASS 0x8004 /* */
61#define E_SUB_VECTOR_LENGTH_ERROR 0x8005 /* */
62#define E_TRANSMIT_FORWARD_INVALID 0x8006 /* def. by IBM */
63#define E_MISSING_SUB_VECTOR 0x8007 /* */
64#define E_SUB_VECTOR_UNKNOWN 0x8008 /* */
65#define E_MAC_HEADER_TOO_LONG 0x8009 /* */
66#define E_FUNCTION_DISABLED 0x800A /* not used */
67
68#define A_FRAME_WAS_FORWARDED 0x8888 /* used by send_TX_FORWARD */
69
70#define UPSTREAM_NEIGHBOR_ADDRESS 0x02
71#define LOCAL_RING_NUMBER 0x03
72#define ASSIGN_PHYSICAL_DROP 0x04
73#define ERROR_TIMER_VALUE 0x05
74#define AUTHORIZED_FUNCTION_CLASS 0x06
75#define AUTHORIZED_ACCESS_PRIORITY 0x07
76#define CORRELATOR 0x09
77#define PHYSICAL_DROP 0x0B
78#define RESPONSE_CODE 0x20
79#define ADDRESS_MODIFER 0x21
80#define PRODUCT_INSTANCE_ID 0x22
81#define RING_STATION_VERSION_NUMBER 0x23
82#define WRAP_DATA 0x26
83#define FRAME_FORWARD 0x27
84#define STATION_IDENTIFER 0x28
85#define RING_STATION_STATUS 0x29
86#define TRANSMIT_STATUS_CODE 0x2A
87#define GROUP_ADDRESS 0x2B
88#define FUNCTIONAL_ADDRESS 0x2C
89
90#define F_NO_SUB_VECTORS_FOUND 0x0000
91#define F_UPSTREAM_NEIGHBOR_ADDRESS 0x0001
92#define F_LOCAL_RING_NUMBER 0x0002
93#define F_ASSIGN_PHYSICAL_DROP 0x0004
94#define F_ERROR_TIMER_VALUE 0x0008
95#define F_AUTHORIZED_FUNCTION_CLASS 0x0010
96#define F_AUTHORIZED_ACCESS_PRIORITY 0x0020
97#define F_CORRELATOR 0x0040
98#define F_PHYSICAL_DROP 0x0080
99#define F_RESPONSE_CODE 0x0100
100#define F_PRODUCT_INSTANCE_ID 0x0200
101#define F_RING_STATION_VERSION_NUMBER 0x0400
102#define F_STATION_IDENTIFER 0x0800
103#define F_RING_STATION_STATUS 0x1000
104#define F_GROUP_ADDRESS 0x2000
105#define F_FUNCTIONAL_ADDRESS 0x4000
106#define F_FRAME_FORWARD 0x8000
107
108#define R_INIT 0x00
109#define R_RQ_ATTCH_STATE_ADDR 0x00
110#define R_CHG_PARM 0x00
111#define R_TX_FORWARD F_FRAME_FORWARD
112
113
114#define UPSTREAM_NEIGHBOR_ADDRESS 0x02
115#define ADDRESS_MODIFER 0x21
116#define RING_STATION_VERSION_NUMBER 0x23
117#define PRODUCT_INSTANCE_ID 0x22
118
119#define RPT_TX_FORWARD 0x2A
120
121#define AC_FC_INIT (3<<13) | 0 /* */
122#define AC_FC_RQ_INIT ((3<<13) | 0) /* */
123#define AC_FC_RQ_ATTCH (3<<13) | 0 /* DC = SC of rx frame */
124#define AC_FC_RQ_STATE (3<<13) | 0 /* DC = SC of rx frame */
125#define AC_FC_RQ_ADDR (3<<13) | 0 /* DC = SC of rx frame */
126#define AC_FC_CHG_PARM (3<<13) | 0 /* */
127#define AC_FC_RSP (0<<13) | 0 /* DC = SC of rx frame */
128#define AC_FC_RPT_ATTCH (0<<13) | 0
129
130#define S_UPSTREAM_NEIGHBOR_ADDRESS 6 + 2
131#define S_LOCAL_RING_NUMBER 2 + 2
132#define S_ASSIGN_PHYSICAL_DROP 4 + 2
133#define S_ERROR_TIMER_VALUE 2 + 2
134#define S_AUTHORIZED_FUNCTION_CLASS 2 + 2
135#define S_AUTHORIZED_ACCESS_PRIORITY 2 + 2
136#define S_CORRELATOR 2 + 2
137#define S_PHYSICAL_DROP 4 + 2
138#define S_RESPONSE_CODE 4 + 2
139#define S_ADDRESS_MODIFER 2 + 2
140#define S_PRODUCT_INSTANCE_ID 18 + 2
141#define S_RING_STATION_VERSION_NUMBER 10 + 2
142#define S_STATION_IDENTIFER 6 + 2
143#define S_RING_STATION_STATUS 6 + 2
144#define S_GROUP_ADDRESS 4 + 2
145#define S_FUNCTIONAL_ADDRESS 4 + 2
146#define S_FRAME_FORWARD 252 + 2
147#define S_TRANSMIT_STATUS_CODE 2 + 2
148
149#define ISB_IMC_RES0 0x0000 /* */
150#define ISB_IMC_MAC_TYPE_3 0x0001 /* MAC_ARC_INDICATE */
151#define ISB_IMC_MAC_ERROR_COUNTERS 0x0002 /* */
152#define ISB_IMC_RES1 0x0003 /* */
153#define ISB_IMC_MAC_TYPE_2 0x0004 /* QUE_MAC_INDICATE */
154#define ISB_IMC_TX_FRAME 0x0005 /* */
155#define ISB_IMC_END_OF_TX_QUEUE 0x0006 /* */
156#define ISB_IMC_NON_MAC_RX_RESOURCE 0x0007 /* */
157#define ISB_IMC_MAC_RX_RESOURCE 0x0008 /* */
158#define ISB_IMC_NON_MAC_RX_FRAME 0x0009 /* */
159#define ISB_IMC_MAC_RX_FRAME 0x000A /* */
160#define ISB_IMC_TRC_FIFO_STATUS 0x000B /* */
161#define ISB_IMC_COMMAND_STATUS 0x000C /* */
162#define ISB_IMC_MAC_TYPE_1 0x000D /* Self Removed */
163#define ISB_IMC_TRC_INTRNL_TST_STATUS 0x000E /* */
164#define ISB_IMC_RES2 0x000F /* */
165
166#define NON_MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */
167#define NON_MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */
168#define NON_MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */
169#define NON_MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */
170#define RAW_NON_MAC_RX_RESOURCE_BW 0x1000 /* */
171#define RAW_NON_MAC_RX_RESOURCE_FW 0x2000 /* */
172#define RAW_NON_MAC_RX_RESOURCE_BE 0x4000 /* */
173#define RAW_NON_MAC_RX_RESOURCE_FE 0x8000 /* */
174
175#define MAC_RX_RESOURCE_BW 0x10 /* shifted right 8 bits */
176#define MAC_RX_RESOURCE_FW 0x20 /* shifted right 8 bits */
177#define MAC_RX_RESOURCE_BE 0x40 /* shifted right 8 bits */
178#define MAC_RX_RESOURCE_FE 0x80 /* shifted right 8 bits */
179#define RAW_MAC_RX_RESOURCE_BW 0x1000 /* */
180#define RAW_MAC_RX_RESOURCE_FW 0x2000 /* */
181#define RAW_MAC_RX_RESOURCE_BE 0x4000 /* */
182#define RAW_MAC_RX_RESOURCE_FE 0x8000 /* */
183
184#define TRC_FIFO_STATUS_TX_UNDERRUN 0x40 /* shifted right 8 bits */
185#define TRC_FIFO_STATUS_RX_OVERRUN 0x80 /* shifted right 8 bits */
186#define RAW_TRC_FIFO_STATUS_TX_UNDERRUN 0x4000 /* */
187#define RAW_TRC_FIFO_STATUS_RX_OVERRUN 0x8000 /* */
188
189#define CSR_CLRTINT 0x08
190
191#define MSB(X) ((__u8)((__u16) X >> 8))
192#define LSB(X) ((__u8)((__u16) X & 0xff))
193
194#define AC_FC_LOBE_MEDIA_TEST ((3<<13) | 0)
195#define S_WRAP_DATA 248 + 2 /* 500 + 2 */
196#define WRAP_DATA 0x26
197#define LOBE_MEDIA_TEST 0x08
198
199/* Destination Class (dc) */
200
201#define DC_MASK 0xF0
202#define DC_RS 0x00
203#define DC_CRS 0x40
204#define DC_RPS 0x50
205#define DC_REM 0x60
206
207/* Source Classes (sc) */
208
209#define SC_MASK 0x0F
210#define SC_RS 0x00
211#define SC_CRS 0x04
212#define SC_RPS 0x05
213#define SC_REM 0x06
214
215#define PR 0x11
216#define PR_PAGE_MASK 0x0C000
217
218#define MICROCHANNEL 0x0008
219#define INTERFACE_CHIP 0x0010
220#define BOARD_16BIT 0x0040
221#define PAGED_RAM 0x0080
222#define WD8115TA (TOKEN_MEDIA | MICROCHANNEL | INTERFACE_CHIP | PAGED_RAM)
223#define WD8115T (TOKEN_MEDIA | INTERFACE_CHIP | BOARD_16BIT | PAGED_RAM)
224
225#define BRD_ID_8316 0x50
226
227#define r587_SER 0x001
228#define SER_DIN 0x80
229#define SER_DOUT 0x40
230#define SER_CLK 0x20
231#define SER_ECS 0x10
232#define SER_E806 0x08
233#define SER_PNP 0x04
234#define SER_BIO 0x02
235#define SER_16B 0x01
236
237#define r587_IDR 0x004
238#define IDR_IRQ_MASK 0x0F0
239#define IDR_DCS_MASK 0x007
240#define IDR_RWS 0x008
241
242
243#define r587_BIO 0x003
244#define BIO_ENB 0x080
245#define BIO_MASK 0x03F
246
247#define r587_PCR 0x005
248#define PCR_RAMS 0x040
249
250
251
252#define NUM_ADDR_BITS 8
253
254#define ISA_MAX_ADDRESS 0x00ffffff
255
256#define SMCTR_MAX_ADAPTERS 7
257
258#define MC_TABLE_ENTRIES 16
259
260#define MAXFRAGMENTS 32
261
262#define CHIP_REV_MASK 0x3000
263
264#define MAX_TX_QS 8
265#define NUM_TX_QS_USED 3
266
267#define MAX_RX_QS 2
268#define NUM_RX_QS_USED 2
269
270#define INTEL_DATA_FORMAT 0x4000
271#define INTEL_ADDRESS_POINTER_FORMAT 0x8000
272#define PAGE_POINTER(X) ((((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) + tp->ram_access)
273#define SWAP_WORDS(X) (((X & 0xFFFF) << 16) | (X >> 16))
274
275#define INTERFACE_CHIP 0x0010 /* Soft Config Adapter */
276#define ADVANCED_FEATURES 0x0020 /* Adv. netw. interface features */
277#define BOARD_16BIT 0x0040 /* 16 bit capability */
278#define PAGED_RAM 0x0080 /* Adapter has paged RAM */
279
280#define PAGED_ROM 0x0100 /* Adapter has paged ROM */
281
282#define RAM_SIZE_UNKNOWN 0x0000 /* Unknown RAM size */
283#define RAM_SIZE_0K 0x0001 /* 0K RAM */
284#define RAM_SIZE_8K 0x0002 /* 8k RAM */
285#define RAM_SIZE_16K 0x0003 /* 16k RAM */
286#define RAM_SIZE_32K 0x0004 /* 32k RAM */
287#define RAM_SIZE_64K 0x0005 /* 64k RAM */
288#define RAM_SIZE_RESERVED_6 0x0006 /* Reserved RAM size */
289#define RAM_SIZE_RESERVED_7 0x0007 /* Reserved RAM size */
290#define RAM_SIZE_MASK 0x0007 /* Isolates RAM Size */
291
292#define TOKEN_MEDIA 0x0005
293
294#define BID_REG_0 0x00
295#define BID_REG_1 0x01
296#define BID_REG_2 0x02
297#define BID_REG_3 0x03
298#define BID_REG_4 0x04
299#define BID_REG_5 0x05
300#define BID_REG_6 0x06
301#define BID_REG_7 0x07
302#define BID_LAR_0 0x08
303#define BID_LAR_1 0x09
304#define BID_LAR_2 0x0A
305#define BID_LAR_3 0x0B
306#define BID_LAR_4 0x0C
307#define BID_LAR_5 0x0D
308
309#define BID_BOARD_ID_BYTE 0x0E
310#define BID_CHCKSM_BYTE 0x0F
311#define BID_LAR_OFFSET 0x08
312
313#define BID_MSZ_583_BIT 0x08
314#define BID_SIXTEEN_BIT_BIT 0x01
315
316#define BID_BOARD_REV_MASK 0x1E
317
318#define BID_MEDIA_TYPE_BIT 0x01
319#define BID_SOFT_CONFIG_BIT 0x20
320#define BID_RAM_SIZE_BIT 0x40
321#define BID_BUS_TYPE_BIT 0x80
322
323#define BID_CR 0x10
324
325#define BID_TXP 0x04 /* Transmit Packet Command */
326
327#define BID_TCR_DIFF 0x0D /* Transmit Configuration Register */
328
329#define BID_TCR_VAL 0x18 /* Value to Test 8390 or 690 */
330#define BID_PS0 0x00 /* Register Page Select 0 */
331#define BID_PS1 0x40 /* Register Page Select 1 */
332#define BID_PS2 0x80 /* Register Page Select 2 */
333#define BID_PS_MASK 0x3F /* For Masking Off Page Select Bits */
334
335#define BID_EEPROM_0 0x08
336#define BID_EEPROM_1 0x09
337#define BID_EEPROM_2 0x0A
338#define BID_EEPROM_3 0x0B
339#define BID_EEPROM_4 0x0C
340#define BID_EEPROM_5 0x0D
341#define BID_EEPROM_6 0x0E
342#define BID_EEPROM_7 0x0F
343
344#define BID_OTHER_BIT 0x02
345#define BID_ICR_MASK 0x0C
346#define BID_EAR_MASK 0x0F
347#define BID_ENGR_PAGE 0x0A0
348#define BID_RLA 0x10
349#define BID_EA6 0x80
350#define BID_RECALL_DONE_MASK 0x10
351#define BID_BID_EEPROM_OVERRIDE 0xFFB0
352#define BID_EXTRA_EEPROM_OVERRIDE 0xFFD0
353#define BID_EEPROM_MEDIA_MASK 0x07
354#define BID_STARLAN_TYPE 0x00
355#define BID_ETHERNET_TYPE 0x01
356#define BID_TP_TYPE 0x02
357#define BID_EW_TYPE 0x03
358#define BID_TOKEN_RING_TYPE 0x04
359#define BID_UTP2_TYPE 0x05
360#define BID_EEPROM_IRQ_MASK 0x18
361#define BID_PRIMARY_IRQ 0x00
362#define BID_ALTERNATE_IRQ_1 0x08
363#define BID_ALTERNATE_IRQ_2 0x10
364#define BID_ALTERNATE_IRQ_3 0x18
365#define BID_EEPROM_RAM_SIZE_MASK 0xE0
366#define BID_EEPROM_RAM_SIZE_RES1 0x00
367#define BID_EEPROM_RAM_SIZE_RES2 0x20
368#define BID_EEPROM_RAM_SIZE_8K 0x40
369#define BID_EEPROM_RAM_SIZE_16K 0x60
370#define BID_EEPROM_RAM_SIZE_32K 0x80
371#define BID_EEPROM_RAM_SIZE_64K 0xA0
372#define BID_EEPROM_RAM_SIZE_RES3 0xC0
373#define BID_EEPROM_RAM_SIZE_RES4 0xE0
374#define BID_EEPROM_BUS_TYPE_MASK 0x07
375#define BID_EEPROM_BUS_TYPE_AT 0x00
376#define BID_EEPROM_BUS_TYPE_MCA 0x01
377#define BID_EEPROM_BUS_TYPE_EISA 0x02
378#define BID_EEPROM_BUS_TYPE_NEC 0x03
379#define BID_EEPROM_BUS_SIZE_MASK 0x18
380#define BID_EEPROM_BUS_SIZE_8BIT 0x00
381#define BID_EEPROM_BUS_SIZE_16BIT 0x08
382#define BID_EEPROM_BUS_SIZE_32BIT 0x10
383#define BID_EEPROM_BUS_SIZE_64BIT 0x18
384#define BID_EEPROM_BUS_MASTER 0x20
385#define BID_EEPROM_RAM_PAGING 0x40
386#define BID_EEPROM_ROM_PAGING 0x80
387#define BID_EEPROM_PAGING_MASK 0xC0
388#define BID_EEPROM_LOW_COST 0x08
389#define BID_EEPROM_IO_MAPPED 0x10
390#define BID_EEPROM_HMI 0x01
391#define BID_EEPROM_AUTO_MEDIA_DETECT 0x01
392#define BID_EEPROM_CHIP_REV_MASK 0x0C
393
394#define BID_EEPROM_LAN_ADDR 0x30
395
396#define BID_EEPROM_MEDIA_OPTION 0x54
397#define BID_EEPROM_MEDIA_UTP 0x01
398#define BID_EEPROM_4MB_RING 0x08
399#define BID_EEPROM_16MB_RING 0x10
400#define BID_EEPROM_MEDIA_STP 0x40
401
402#define BID_EEPROM_MISC_DATA 0x56
403#define BID_EEPROM_EARLY_TOKEN_RELEASE 0x02
404
405#define CNFG_ID_8003E 0x6fc0
406#define CNFG_ID_8003S 0x6fc1
407#define CNFG_ID_8003W 0x6fc2
408#define CNFG_ID_8115TRA 0x6ec6
409#define CNFG_ID_8013E 0x61C8
410#define CNFG_ID_8013W 0x61C9
411#define CNFG_ID_BISTRO03E 0xEFE5
412#define CNFG_ID_BISTRO13E 0xEFD5
413#define CNFG_ID_BISTRO13W 0xEFD4
414#define CNFG_MSR_583 0x0
415#define CNFG_ICR_583 0x1
416#define CNFG_IAR_583 0x2
417#define CNFG_BIO_583 0x3
418#define CNFG_EAR_583 0x3
419#define CNFG_IRR_583 0x4
420#define CNFG_LAAR_584 0x5
421#define CNFG_GP2 0x7
422#define CNFG_LAAR_MASK 0x1F
423#define CNFG_LAAR_ZWS 0x20
424#define CNFG_LAAR_L16E 0x40
425#define CNFG_ICR_IR2_584 0x04
426#define CNFG_ICR_MASK 0x08
427#define CNFG_ICR_MSZ 0x08
428#define CNFG_ICR_RLA 0x10
429#define CNFG_ICR_STO 0x80
430#define CNFG_IRR_IRQS 0x60
431#define CNFG_IRR_IEN 0x80
432#define CNFG_IRR_ZWS 0x01
433#define CNFG_GP2_BOOT_NIBBLE 0x0F
434#define CNFG_IRR_OUT2 0x04
435#define CNFG_IRR_OUT1 0x02
436
437#define CNFG_SIZE_8KB 8
438#define CNFG_SIZE_16KB 16
439#define CNFG_SIZE_32KB 32
440#define CNFG_SIZE_64KB 64
441#define CNFG_SIZE_128KB 128
442#define CNFG_SIZE_256KB 256
443#define ROM_DISABLE 0x0
444
445#define CNFG_SLOT_ENABLE_BIT 0x08
446
447#define CNFG_POS_CONTROL_REG 0x096
448#define CNFG_POS_REG0 0x100
449#define CNFG_POS_REG1 0x101
450#define CNFG_POS_REG2 0x102
451#define CNFG_POS_REG3 0x103
452#define CNFG_POS_REG4 0x104
453#define CNFG_POS_REG5 0x105
454
455#define CNFG_ADAPTER_TYPE_MASK 0x0e
456
457#define SLOT_16BIT 0x0008
458#define INTERFACE_5X3_CHIP 0x0000 /* 0000 = 583 or 593 chips */
459#define NIC_690_BIT 0x0010 /* NIC is 690 */
460#define ALTERNATE_IRQ_BIT 0x0020 /* Alternate IRQ is used */
461#define INTERFACE_584_CHIP 0x0040 /* 0001 = 584 chip */
462#define INTERFACE_594_CHIP 0x0080 /* 0010 = 594 chip */
463#define INTERFACE_585_CHIP 0x0100 /* 0100 = 585/790 chip */
464#define INTERFACE_CHIP_MASK 0x03C0 /* Isolates Intfc Chip Type */
465
466#define BOARD_16BIT 0x0040
467#define NODE_ADDR_CKSUM 0xEE
468#define BRD_ID_8115T 0x04
469
470#define NIC_825_BIT 0x0400 /* TRC 83C825 NIC */
471#define NIC_790_BIT 0x0800 /* NIC is 83C790 Ethernet */
472
473#define CHIP_REV_MASK 0x3000
474
475#define HWR_CBUSY 0x02
476#define HWR_CA 0x01
477
478#define MAC_QUEUE 0
479#define NON_MAC_QUEUE 1
480#define BUG_QUEUE 2 /* NO RECEIVE QUEUE, ONLY TX */
481
482#define NUM_MAC_TX_FCBS 8
483#define NUM_MAC_TX_BDBS NUM_MAC_TX_FCBS
484#define NUM_MAC_RX_FCBS 7
485#define NUM_MAC_RX_BDBS 8
486
487#define NUM_NON_MAC_TX_FCBS 6
488#define NUM_NON_MAC_TX_BDBS NUM_NON_MAC_TX_FCBS
489
490#define NUM_NON_MAC_RX_BDBS 0 /* CALCULATED DYNAMICALLY */
491
492#define NUM_BUG_TX_FCBS 8
493#define NUM_BUG_TX_BDBS NUM_BUG_TX_FCBS
494
495#define MAC_TX_BUFFER_MEMORY 1024
496#define NON_MAC_TX_BUFFER_MEMORY (20 * 1024)
497#define BUG_TX_BUFFER_MEMORY (NUM_BUG_TX_FCBS * 32)
498
499#define RX_BUFFER_MEMORY 0 /* CALCULATED DYNAMICALLY */
500#define RX_DATA_BUFFER_SIZE 256
501#define RX_BDB_SIZE_SHIFT 3 /* log2(RX_DATA_BUFFER_SIZE)-log2(sizeof(BDBlock)) */
502#define RX_BDB_SIZE_MASK (sizeof(BDBlock) - 1)
503#define RX_DATA_BUFFER_SIZE_MASK (RX_DATA_BUFFER_SIZE-1)
504
505#define NUM_OF_INTERRUPTS 0x20
506
507#define NOT_TRANSMITING 0
508#define TRANSMITING 1
509
510#define TRC_INTERRUPT_ENABLE_MASK 0x7FF6
511
512#define UCODE_VERSION 0x58
513
514#define UCODE_SIZE_OFFSET 0x0000 /* WORD */
515#define UCODE_CHECKSUM_OFFSET 0x0002 /* WORD */
516#define UCODE_VERSION_OFFSET 0x0004 /* BYTE */
517
518#define CS_RAM_SIZE 0X2000
519#define CS_RAM_CHECKSUM_OFFSET 0x1FFE /* WORD 1FFE(MSB)-1FFF(LSB)*/
520#define CS_RAM_VERSION_OFFSET 0x1FFC /* WORD 1FFC(MSB)-1FFD(LSB)*/
521
522#define MISC_DATA_SIZE 128
523#define NUM_OF_ACBS 1
524
525#define ACB_COMMAND_NOT_DONE 0x0000 /* Init, command not done */
526#define ACB_COMMAND_DONE 0x8000 /* TRC says command done */
527#define ACB_COMMAND_STATUS_MASK 0x00FF /* low byte is status */
528#define ACB_COMMAND_SUCCESSFUL 0x0000 /* means cmd was successful */
529#define ACB_NOT_CHAIN_END 0x0000 /* tell TRC more CBs in chain */
530#define ACB_CHAIN_END 0x8000 /* tell TRC last CB in chain */
531#define ACB_COMMAND_NO_INTERRUPT 0x0000 /* tell TRC no INT after CB */
532#define ACB_COMMAND_INTERRUPT 0x2000 /* tell TRC to INT after CB */
533#define ACB_SUB_CMD_NOP 0x0000
534#define ACB_CMD_HIC_NOP 0x0080
535#define ACB_CMD_MCT_NOP 0x0000
536#define ACB_CMD_MCT_TEST 0x0001
537#define ACB_CMD_HIC_TEST 0x0081
538#define ACB_CMD_INSERT 0x0002
539#define ACB_CMD_REMOVE 0x0003
540#define ACB_CMD_MCT_WRITE_VALUE 0x0004
541#define ACB_CMD_HIC_WRITE_VALUE 0x0084
542#define ACB_CMD_MCT_READ_VALUE 0x0005
543#define ACB_CMD_HIC_READ_VALUE 0x0085
544#define ACB_CMD_INIT_TX_RX 0x0086
545#define ACB_CMD_INIT_TRC_TIMERS 0x0006
546#define ACB_CMD_READ_TRC_STATUS 0x0007
547#define ACB_CMD_CHANGE_JOIN_STATE 0x0008
548#define ACB_CMD_RESERVED_9 0x0009
549#define ACB_CMD_RESERVED_A 0x000A
550#define ACB_CMD_RESERVED_B 0x000B
551#define ACB_CMD_RESERVED_C 0x000C
552#define ACB_CMD_RESERVED_D 0x000D
553#define ACB_CMD_RESERVED_E 0x000E
554#define ACB_CMD_RESERVED_F 0x000F
555
556#define TRC_MAC_REGISTERS_TEST 0x0000
557#define TRC_INTERNAL_LOOPBACK 0x0001
558#define TRC_TRI_LOOPBACK 0x0002
559#define TRC_INTERNAL_ROM_TEST 0x0003
560#define TRC_LOBE_MEDIA_TEST 0x0004
561#define TRC_ANALOG_TEST 0x0005
562#define TRC_HOST_INTERFACE_REG_TEST 0x0003
563
564#define TEST_DMA_1 0x0000
565#define TEST_DMA_2 0x0001
566#define TEST_MCT_ROM 0x0002
567#define HIC_INTERNAL_DIAG 0x0003
568
569#define ABORT_TRANSMIT_PRIORITY_0 0x0001
570#define ABORT_TRANSMIT_PRIORITY_1 0x0002
571#define ABORT_TRANSMIT_PRIORITY_2 0x0004
572#define ABORT_TRANSMIT_PRIORITY_3 0x0008
573#define ABORT_TRANSMIT_PRIORITY_4 0x0010
574#define ABORT_TRANSMIT_PRIORITY_5 0x0020
575#define ABORT_TRANSMIT_PRIORITY_6 0x0040
576#define ABORT_TRANSMIT_PRIORITY_7 0x0080
577
578#define TX_PENDING_PRIORITY_0 0x0001
579#define TX_PENDING_PRIORITY_1 0x0002
580#define TX_PENDING_PRIORITY_2 0x0004
581#define TX_PENDING_PRIORITY_3 0x0008
582#define TX_PENDING_PRIORITY_4 0x0010
583#define TX_PENDING_PRIORITY_5 0x0020
584#define TX_PENDING_PRIORITY_6 0x0040
585#define TX_PENDING_PRIORITY_7 0x0080
586
587#define FCB_FRAME_LENGTH 0x100
588#define FCB_COMMAND_DONE 0x8000 /* FCB Word 0 */
589#define FCB_NOT_CHAIN_END 0x0000 /* FCB Word 1 */
590#define FCB_CHAIN_END 0x8000
591#define FCB_NO_WARNING 0x0000
592#define FCB_WARNING 0x4000
593#define FCB_INTERRUPT_DISABLE 0x0000
594#define FCB_INTERRUPT_ENABLE 0x2000
595
596#define FCB_ENABLE_IMA 0x0008
597#define FCB_ENABLE_TES 0x0004 /* Guarantee Tx before Int */
598#define FCB_ENABLE_TFS 0x0002 /* Post Tx Frame Status */
599#define FCB_ENABLE_NTC 0x0001 /* No Tx CRC */
600
601#define FCB_TX_STATUS_CR2 0x0004
602#define FCB_TX_STATUS_AR2 0x0008
603#define FCB_TX_STATUS_CR1 0x0040
604#define FCB_TX_STATUS_AR1 0x0080
605#define FCB_TX_AC_BITS (FCB_TX_STATUS_AR1+FCB_TX_STATUS_AR2+FCB_TX_STATUS_CR1+FCB_TX_STATUS_CR2)
606#define FCB_TX_STATUS_E 0x0100
607
608#define FCB_RX_STATUS_ANY_ERROR 0x0001
609#define FCB_RX_STATUS_FCS_ERROR 0x0002
610
611#define FCB_RX_STATUS_IA_MATCHED 0x0400
612#define FCB_RX_STATUS_IGA_BSGA_MATCHED 0x0500
613#define FCB_RX_STATUS_FA_MATCHED 0x0600
614#define FCB_RX_STATUS_BA_MATCHED 0x0700
615#define FCB_RX_STATUS_DA_MATCHED 0x0400
616#define FCB_RX_STATUS_SOURCE_ROUTING 0x0800
617
618#define BDB_BUFFER_SIZE 0x100
619#define BDB_NOT_CHAIN_END 0x0000
620#define BDB_CHAIN_END 0x8000
621#define BDB_NO_WARNING 0x0000
622#define BDB_WARNING 0x4000
623
624#define ERROR_COUNTERS_CHANGED 0x0001
625#define TI_NDIS_RING_STATUS_CHANGED 0x0002
626#define UNA_CHANGED 0x0004
627#define READY_TO_SEND_RQ_INIT 0x0008
628
629#define SCGB_ADDRESS_POINTER_FORMAT INTEL_ADDRESS_POINTER_FORMAT
630#define SCGB_DATA_FORMAT INTEL_DATA_FORMAT
631#define SCGB_MULTI_WORD_CONTROL 0
632#define SCGB_BURST_LENGTH 0x000E /* DMA Burst Length */
633
634#define SCGB_CONFIG (INTEL_ADDRESS_POINTER_FORMAT+INTEL_DATA_FORMAT+SCGB_BURST_LENGTH)
635
636#define ISCP_BLOCK_SIZE 0x0A
637#define RAM_SIZE 0x10000
638#define INIT_SYS_CONFIG_PTR_OFFSET (RAM_SIZE-ISCP_BLOCK_SIZE)
639#define SCGP_BLOCK_OFFSET 0
640
641#define SCLB_NOT_VALID 0x0000 /* Initially, SCLB not valid */
642#define SCLB_VALID 0x8000 /* Host tells TRC SCLB valid */
643#define SCLB_PROCESSED 0x0000 /* TRC says SCLB processed */
644#define SCLB_RESUME_CONTROL_NOT_VALID 0x0000 /* Initially, RC not valid */
645#define SCLB_RESUME_CONTROL_VALID 0x4000 /* Host tells TRC RC valid */
646#define SCLB_IACK_CODE_NOT_VALID 0x0000 /* Initially, IACK not valid */
647#define SCLB_IACK_CODE_VALID 0x2000 /* Host tells TRC IACK valid */
648#define SCLB_CMD_NOP 0x0000
649#define SCLB_CMD_REMOVE 0x0001
650#define SCLB_CMD_SUSPEND_ACB_CHAIN 0x0002
651#define SCLB_CMD_SET_INTERRUPT_MASK 0x0003
652#define SCLB_CMD_CLEAR_INTERRUPT_MASK 0x0004
653#define SCLB_CMD_RESERVED_5 0x0005
654#define SCLB_CMD_RESERVED_6 0x0006
655#define SCLB_CMD_RESERVED_7 0x0007
656#define SCLB_CMD_RESERVED_8 0x0008
657#define SCLB_CMD_RESERVED_9 0x0009
658#define SCLB_CMD_RESERVED_A 0x000A
659#define SCLB_CMD_RESERVED_B 0x000B
660#define SCLB_CMD_RESERVED_C 0x000C
661#define SCLB_CMD_RESERVED_D 0x000D
662#define SCLB_CMD_RESERVED_E 0x000E
663#define SCLB_CMD_RESERVED_F 0x000F
664
665#define SCLB_RC_ACB 0x0001 /* Action Command Block Chain */
666#define SCLB_RC_RES0 0x0002 /* Always Zero */
667#define SCLB_RC_RES1 0x0004 /* Always Zero */
668#define SCLB_RC_RES2 0x0008 /* Always Zero */
669#define SCLB_RC_RX_MAC_FCB 0x0010 /* RX_MAC_FCB Chain */
670#define SCLB_RC_RX_MAC_BDB 0x0020 /* RX_MAC_BDB Chain */
671#define SCLB_RC_RX_NON_MAC_FCB 0x0040 /* RX_NON_MAC_FCB Chain */
672#define SCLB_RC_RX_NON_MAC_BDB 0x0080 /* RX_NON_MAC_BDB Chain */
673#define SCLB_RC_TFCB0 0x0100 /* TX Priority 0 FCB Chain */
674#define SCLB_RC_TFCB1 0x0200 /* TX Priority 1 FCB Chain */
675#define SCLB_RC_TFCB2 0x0400 /* TX Priority 2 FCB Chain */
676#define SCLB_RC_TFCB3 0x0800 /* TX Priority 3 FCB Chain */
677#define SCLB_RC_TFCB4 0x1000 /* TX Priority 4 FCB Chain */
678#define SCLB_RC_TFCB5 0x2000 /* TX Priority 5 FCB Chain */
679#define SCLB_RC_TFCB6 0x4000 /* TX Priority 6 FCB Chain */
680#define SCLB_RC_TFCB7 0x8000 /* TX Priority 7 FCB Chain */
681
682#define SCLB_IMC_RES0 0x0001 /* */
683#define SCLB_IMC_MAC_TYPE_3 0x0002 /* MAC_ARC_INDICATE */
684#define SCLB_IMC_MAC_ERROR_COUNTERS 0x0004 /* */
685#define SCLB_IMC_RES1 0x0008 /* */
686#define SCLB_IMC_MAC_TYPE_2 0x0010 /* QUE_MAC_INDICATE */
687#define SCLB_IMC_TX_FRAME 0x0020 /* */
688#define SCLB_IMC_END_OF_TX_QUEUE 0x0040 /* */
689#define SCLB_IMC_NON_MAC_RX_RESOURCE 0x0080 /* */
690#define SCLB_IMC_MAC_RX_RESOURCE 0x0100 /* */
691#define SCLB_IMC_NON_MAC_RX_FRAME 0x0200 /* */
692#define SCLB_IMC_MAC_RX_FRAME 0x0400 /* */
693#define SCLB_IMC_TRC_FIFO_STATUS 0x0800 /* */
694#define SCLB_IMC_COMMAND_STATUS 0x1000 /* */
695#define SCLB_IMC_MAC_TYPE_1 0x2000 /* Self Removed */
696#define SCLB_IMC_TRC_INTRNL_TST_STATUS 0x4000 /* */
697#define SCLB_IMC_RES2 0x8000 /* */
698
699#define DMA_TRIGGER 0x0004
700#define FREQ_16MB_BIT 0x0010
701#define THDREN 0x0020
702#define CFG0_RSV1 0x0040
703#define CFG0_RSV2 0x0080
704#define ETREN 0x0100
705#define RX_OWN_BIT 0x0200
706#define RXATMAC 0x0400
707#define PROMISCUOUS_BIT 0x0800
708#define USETPT 0x1000
709#define SAVBAD_BIT 0x2000
710#define ONEQUE 0x4000
711#define NO_AUTOREMOVE 0x8000
712
713#define RX_FCB_AREA_8316 0x00000000
714#define RX_BUFF_AREA_8316 0x00000000
715
716#define TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access)
717#define RX_FCB_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_FCB_AREA_8316)
718#define RX_BUFF_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_BUFF_AREA_8316)
719
720// Offset 0: MSR - Memory Select Register
721//
722#define r587_MSR 0x000 // Register Offset
723//#define MSR_RST 0x080 // LAN Controller Reset
724#define MSR_MENB 0x040 // Shared Memory Enable
725#define MSR_RA18 0x020 // Ram Address bit 18 (583, 584, 587)
726#define MSR_RA17 0x010 // Ram Address bit 17 (583, 584, 585/790)
727#define MSR_RA16 0x008 // Ram Address bit 16 (583, 584, 585/790)
728#define MSR_RA15 0x004 // Ram Address bit 15 (583, 584, 585/790)
729#define MSR_RA14 0x002 // Ram Address bit 14 (583, 584, 585/790)
730#define MSR_RA13 0x001 // Ram Address bit 13 (583, 584, 585/790)
731
732#define MSR_MASK 0x03F // Mask for Address bits RA18-RA13 (583, 584, 587)
733
734#define MSR 0x00
735#define IRR 0x04
736#define HWR 0x04
737#define LAAR 0x05
738#define IMCCR 0x05
739#define LAR0 0x08
740#define BDID 0x0E // Adapter ID byte register offset
741#define CSR 0x10
742#define PR 0x11
743
744#define MSR_RST 0x80
745#define MSR_MEMB 0x40
746#define MSR_0WS 0x20
747
748#define FORCED_16BIT_MODE 0x0002
749
750#define INTERFRAME_SPACING_16 0x0003 /* 6 bytes */
751#define INTERFRAME_SPACING_4 0x0001 /* 2 bytes */
752#define MULTICAST_ADDRESS_BIT 0x0010
753#define NON_SRC_ROUTING_BIT 0x0020
754
755#define LOOPING_MODE_MASK 0x0007
756
757/*
758 * Decode firmware defines.
759 */
760#define SWAP_BYTES(X) ((X & 0xff) << 8) | (X >> 8)
761#define WEIGHT_OFFSET 5
762#define TREE_SIZE_OFFSET 9
763#define TREE_OFFSET 11
764
765/* The Huffman Encoding Tree is constructed of these nodes. */
766typedef struct {
767 __u8 llink; /* Short version of above node. */
768 __u8 tag;
769 __u8 info; /* This node is used on decodes. */
770 __u8 rlink;
771} DECODE_TREE_NODE;
772
773#define ROOT 0 /* Branch value. */
774#define LEAF 0 /* Tag field value. */
775#define BRANCH 1 /* Tag field value. */
776
777/*
778 * Multicast Table Structure
779 */
780typedef struct {
781 __u8 address[6];
782 __u8 instance_count;
783} McTable;
784
785/*
786 * Fragment Descriptor Definition
787 */
788typedef struct {
789 __u8 *fragment_ptr;
790 __u32 fragment_length;
791} FragmentStructure;
792
793/*
794 * Data Buffer Structure Definition
795 */
796typedef struct {
797 __u32 fragment_count;
798 FragmentStructure fragment_list[MAXFRAGMENTS];
799} DataBufferStructure;
800
801#pragma pack(1)
802typedef struct {
803 __u8 IType;
804 __u8 ISubtype;
805} Interrupt_Status_Word;
806
807#pragma pack(1)
808typedef struct BDBlockType {
809 __u16 info; /* 02 */
810 __u32 trc_next_ptr; /* 06 */
811 __u32 trc_data_block_ptr; /* 10 */
812 __u16 buffer_length; /* 12 */
813
814 __u16 *data_block_ptr; /* 16 */
815 struct BDBlockType *next_ptr; /* 20 */
816 struct BDBlockType *back_ptr; /* 24 */
817 __u8 filler[8]; /* 32 */
818} BDBlock;
819
820#pragma pack(1)
821typedef struct FCBlockType {
822 __u16 frame_status; /* 02 */
823 __u16 info; /* 04 */
824 __u32 trc_next_ptr; /* 08 */
825 __u32 trc_bdb_ptr; /* 12 */
826 __u16 frame_length; /* 14 */
827
828 BDBlock *bdb_ptr; /* 18 */
829 struct FCBlockType *next_ptr; /* 22 */
830 struct FCBlockType *back_ptr; /* 26 */
831 __u16 memory_alloc; /* 28 */
832 __u8 filler[4]; /* 32 */
833
834} FCBlock;
835
836#pragma pack(1)
837typedef struct SBlockType{
838 __u8 Internal_Error_Count;
839 __u8 Line_Error_Count;
840 __u8 AC_Error_Count;
841 __u8 Burst_Error_Count;
842 __u8 RESERVED_COUNTER_0;
843 __u8 AD_TRANS_Count;
844 __u8 RCV_Congestion_Count;
845 __u8 Lost_FR_Error_Count;
846 __u8 FREQ_Error_Count;
847 __u8 FR_Copied_Error_Count;
848 __u8 RESERVED_COUNTER_1;
849 __u8 Token_Error_Count;
850
851 __u16 TI_NDIS_Ring_Status;
852 __u16 BCN_Type;
853 __u16 Error_Code;
854 __u16 SA_of_Last_AMP_SMP[3];
855 __u16 UNA[3];
856 __u16 Ucode_Version_Number;
857 __u16 Status_CHG_Indicate;
858 __u16 RESERVED_STATUS_0;
859} SBlock;
860
861#pragma pack(1)
862typedef struct ACBlockType {
863 __u16 cmd_done_status; /* 02 */
864 __u16 cmd_info; /* 04 */
865 __u32 trc_next_ptr; /* 08 */
866 __u16 cmd; /* 10 */
867 __u16 subcmd; /* 12 */
868 __u16 data_offset_lo; /* 14 */
869 __u16 data_offset_hi; /* 16 */
870
871 struct ACBlockType *next_ptr; /* 20 */
872
873 __u8 filler[12]; /* 32 */
874} ACBlock;
875
876#define NUM_OF_INTERRUPTS 0x20
877
878#pragma pack(1)
879typedef struct {
880 Interrupt_Status_Word IStatus[NUM_OF_INTERRUPTS];
881} ISBlock;
882
883#pragma pack(1)
884typedef struct {
885 __u16 valid_command; /* 02 */
886 __u16 iack_code; /* 04 */
887 __u16 resume_control; /* 06 */
888 __u16 int_mask_control; /* 08 */
889 __u16 int_mask_state; /* 10 */
890
891 __u8 filler[6]; /* 16 */
892} SCLBlock;
893
894#pragma pack(1)
895typedef struct
896{
897 __u16 config; /* 02 */
898 __u32 trc_sclb_ptr; /* 06 */
899 __u32 trc_acb_ptr; /* 10 */
900 __u32 trc_isb_ptr; /* 14 */
901 __u16 isbsiz; /* 16 */
902
903 SCLBlock *sclb_ptr; /* 20 */
904 ACBlock *acb_ptr; /* 24 */
905 ISBlock *isb_ptr; /* 28 */
906
907 __u16 Non_Mac_Rx_Bdbs; /* 30 DEBUG */
908 __u8 filler[2]; /* 32 */
909
910} SCGBlock;
911
912#pragma pack(1)
913typedef struct
914{
915 __u32 trc_scgb_ptr;
916 SCGBlock *scgb_ptr;
917} ISCPBlock;
918#pragma pack()
919
920typedef struct net_local {
921 ISCPBlock *iscpb_ptr;
922 SCGBlock *scgb_ptr;
923 SCLBlock *sclb_ptr;
924 ISBlock *isb_ptr;
925
926 ACBlock *acb_head;
927 ACBlock *acb_curr;
928 ACBlock *acb_next;
929
930 __u8 adapter_name[12];
931
932 __u16 num_rx_bdbs [NUM_RX_QS_USED];
933 __u16 num_rx_fcbs [NUM_RX_QS_USED];
934
935 __u16 num_tx_bdbs [NUM_TX_QS_USED];
936 __u16 num_tx_fcbs [NUM_TX_QS_USED];
937
938 __u16 num_of_tx_buffs;
939
940 __u16 tx_buff_size [NUM_TX_QS_USED];
941 __u16 tx_buff_used [NUM_TX_QS_USED];
942 __u16 tx_queue_status [NUM_TX_QS_USED];
943
944 FCBlock *tx_fcb_head[NUM_TX_QS_USED];
945 FCBlock *tx_fcb_curr[NUM_TX_QS_USED];
946 FCBlock *tx_fcb_end[NUM_TX_QS_USED];
947 BDBlock *tx_bdb_head[NUM_TX_QS_USED];
948 __u16 *tx_buff_head[NUM_TX_QS_USED];
949 __u16 *tx_buff_end[NUM_TX_QS_USED];
950 __u16 *tx_buff_curr[NUM_TX_QS_USED];
951 __u16 num_tx_fcbs_used[NUM_TX_QS_USED];
952
953 FCBlock *rx_fcb_head[NUM_RX_QS_USED];
954 FCBlock *rx_fcb_curr[NUM_RX_QS_USED];
955 BDBlock *rx_bdb_head[NUM_RX_QS_USED];
956 BDBlock *rx_bdb_curr[NUM_RX_QS_USED];
957 BDBlock *rx_bdb_end[NUM_RX_QS_USED];
958 __u16 *rx_buff_head[NUM_RX_QS_USED];
959 __u16 *rx_buff_end[NUM_RX_QS_USED];
960
961 __u32 *ptr_local_ring_num;
962
963 __u32 sh_mem_used;
964
965 __u16 page_offset_mask;
966
967 __u16 authorized_function_classes;
968 __u16 authorized_access_priority;
969
970 __u16 num_acbs;
971 __u16 num_acbs_used;
972 __u16 acb_pending;
973
974 __u16 current_isb_index;
975
976 __u8 monitor_state;
977 __u8 monitor_state_ready;
978 __u16 ring_status;
979 __u8 ring_status_flags;
980 __u8 state;
981
982 __u8 join_state;
983
984 __u8 slot_num;
985 __u16 pos_id;
986
987 __u32 *ptr_una;
988 __u32 *ptr_bcn_type;
989 __u32 *ptr_tx_fifo_underruns;
990 __u32 *ptr_rx_fifo_underruns;
991 __u32 *ptr_rx_fifo_overruns;
992 __u32 *ptr_tx_fifo_overruns;
993 __u32 *ptr_tx_fcb_overruns;
994 __u32 *ptr_rx_fcb_overruns;
995 __u32 *ptr_tx_bdb_overruns;
996 __u32 *ptr_rx_bdb_overruns;
997
998 __u16 receive_queue_number;
999
1000 __u8 rx_fifo_overrun_count;
1001 __u8 tx_fifo_overrun_count;
1002
1003 __u16 adapter_flags;
1004 __u16 adapter_flags1;
1005 __u16 *misc_command_data;
1006 __u16 max_packet_size;
1007
1008 __u16 config_word0;
1009 __u16 config_word1;
1010
1011 __u8 trc_mask;
1012
1013 __u16 source_ring_number;
1014 __u16 target_ring_number;
1015
1016 __u16 microcode_version;
1017
1018 __u16 bic_type;
1019 __u16 nic_type;
1020 __u16 board_id;
1021
1022 __u16 rom_size;
1023 __u32 rom_base;
1024 __u16 ram_size;
1025 __u16 ram_usable;
1026 __u32 ram_base;
1027 __u32 ram_access;
1028
1029 __u16 extra_info;
1030 __u16 mode_bits;
1031 __u16 media_menu;
1032 __u16 media_type;
1033 __u16 adapter_bus;
1034
1035 __u16 status;
1036 __u16 receive_mask;
1037
1038 __u16 group_address_0;
1039 __u16 group_address[2];
1040 __u16 functional_address_0;
1041 __u16 functional_address[2];
1042 __u16 bitwise_group_address[2];
1043
1044 __u8 cleanup;
1045
1046 struct sk_buff_head SendSkbQueue;
1047 __u16 QueueSkb;
1048
1049 struct tr_statistics MacStat; /* MAC statistics structure */
1050
1051 spinlock_t lock;
1052} NET_LOCAL;
1053
1054/************************************
1055 * SNMP-ON-BOARD Agent Link Structure
1056 ************************************/
1057
1058typedef struct {
1059 __u8 LnkSigStr[12]; /* signature string "SmcLinkTable" */
1060 __u8 LnkDrvTyp; /* 1=Redbox ODI, 2=ODI DOS, 3=ODI OS/2, 4=NDIS DOS */
1061 __u8 LnkFlg; /* 0 if no agent linked, 1 if agent linked */
1062 void *LnkNfo; /* routine which returns pointer to NIC info */
1063 void *LnkAgtRcv; /* pointer to agent receive trap entry */
1064 void *LnkAgtXmt; /* pointer to agent transmit trap
1065entry */
1066void *LnkGet; /* pointer to NIC receive data
1067copy routine */
1068 void *LnkSnd; /* pointer to NIC send routine
1069*/
1070 void *LnkRst; /* pointer to NIC driver reset
1071routine */
1072 void *LnkMib; /* pointer to MIB data base */
1073 void *LnkMibAct; /* pointer to MIB action routine list */
1074 __u16 LnkCntOffset; /* offset to error counters */
1075 __u16 LnkCntNum; /* number of error counters */
1076 __u16 LnkCntSize; /* size of error counters i.e. 32 = 32 bits */
1077 void *LnkISR; /* pointer to interrupt vector */
1078 __u8 LnkFrmTyp; /* 1=Ethernet, 2=Token Ring */
1079 __u8 LnkDrvVer1 ; /* driver major version */
1080 __u8 LnkDrvVer2 ; /* driver minor version */
1081} AgentLink;
1082
1083/*
1084 * Definitions for pcm_card_flags(bit_mapped)
1085 */
1086#define REG_COMPLETE 0x0001
1087#define INSERTED 0x0002
1088#define PCC_INSERTED 0x0004 /* 1=currently inserted, 0=cur removed */
1089
1090/*
1091 * Adapter RAM test patterns
1092 */
1093#define RAM_PATTERN_1 0x55AA
1094#define RAM_PATTERN_2 0x9249
1095#define RAM_PATTERN_3 0xDB6D
1096
1097/*
1098 * definitions for RAM test
1099 */
1100#define ROM_SIGNATURE 0xAA55
1101#define MIN_ROM_SIZE 0x2000
1102
1103/*
1104 * Return Codes
1105 */
1106#define SUCCESS 0x0000
1107#define ADAPTER_AND_CONFIG 0x0001
1108#define ADAPTER_NO_CONFIG 0x0002
1109#define NOT_MY_INTERRUPT 0x0003
1110#define FRAME_REJECTED 0x0004
1111#define EVENTS_DISABLED 0x0005
1112#define OUT_OF_RESOURCES 0x0006
1113#define INVALID_PARAMETER 0x0007
1114#define INVALID_FUNCTION 0x0008
1115#define INITIALIZE_FAILED 0x0009
1116#define CLOSE_FAILED 0x000A
1117#define MAX_COLLISIONS 0x000B
1118#define NO_SUCH_DESTINATION 0x000C
1119#define BUFFER_TOO_SMALL_ERROR 0x000D
1120#define ADAPTER_CLOSED 0x000E
1121#define UCODE_NOT_PRESENT 0x000F
1122#define FIFO_UNDERRUN 0x0010
1123#define DEST_OUT_OF_RESOURCES 0x0011
1124#define ADAPTER_NOT_INITIALIZED 0x0012
1125#define PENDING 0x0013
1126#define UCODE_PRESENT 0x0014
1127#define NOT_INIT_BY_BRIDGE 0x0015
1128
1129#define OPEN_FAILED 0x0080
1130#define HARDWARE_FAILED 0x0081
1131#define SELF_TEST_FAILED 0x0082
1132#define RAM_TEST_FAILED 0x0083
1133#define RAM_CONFLICT 0x0084
1134#define ROM_CONFLICT 0x0085
1135#define UNKNOWN_ADAPTER 0x0086
1136#define CONFIG_ERROR 0x0087
1137#define CONFIG_WARNING 0x0088
1138#define NO_FIXED_CNFG 0x0089
1139#define EEROM_CKSUM_ERROR 0x008A
1140#define ROM_SIGNATURE_ERROR 0x008B
1141#define ROM_CHECKSUM_ERROR 0x008C
1142#define ROM_SIZE_ERROR 0x008D
1143#define UNSUPPORTED_NIC_CHIP 0x008E
1144#define NIC_REG_ERROR 0x008F
1145#define BIC_REG_ERROR 0x0090
1146#define MICROCODE_TEST_ERROR 0x0091
1147#define LOBE_MEDIA_TEST_FAILED 0x0092
1148
1149#define ADAPTER_FOUND_LAN_CORRUPT 0x009B
1150
1151#define ADAPTER_NOT_FOUND 0xFFFF
1152
1153#define ILLEGAL_FUNCTION INVALID_FUNCTION
1154
1155/* Errors */
1156#define IO_BASE_INVALID 0x0001
1157#define IO_BASE_RANGE 0x0002
1158#define IRQ_INVALID 0x0004
1159#define IRQ_RANGE 0x0008
1160#define RAM_BASE_INVALID 0x0010
1161#define RAM_BASE_RANGE 0x0020
1162#define RAM_SIZE_RANGE 0x0040
1163#define MEDIA_INVALID 0x0800
1164
1165/* Warnings */
1166#define IRQ_MISMATCH 0x0080
1167#define RAM_BASE_MISMATCH 0x0100
1168#define RAM_SIZE_MISMATCH 0x0200
1169#define BUS_MODE_MISMATCH 0x0400
1170
1171#define RX_CRC_ERROR 0x01
1172#define RX_ALIGNMENT_ERROR 0x02
1173#define RX_HW_FAILED 0x80
1174
1175/*
1176 * Definitions for the field RING_STATUS_FLAGS
1177 */
1178#define RING_STATUS_CHANGED 0X01
1179#define MONITOR_STATE_CHANGED 0X02
1180#define JOIN_STATE_CHANGED 0X04
1181
1182/*
1183 * Definitions for the field JOIN_STATE
1184 */
1185#define JS_BYPASS_STATE 0x00
1186#define JS_LOBE_TEST_STATE 0x01
1187#define JS_DETECT_MONITOR_PRESENT_STATE 0x02
1188#define JS_AWAIT_NEW_MONITOR_STATE 0x03
1189#define JS_DUPLICATE_ADDRESS_TEST_STATE 0x04
1190#define JS_NEIGHBOR_NOTIFICATION_STATE 0x05
1191#define JS_REQUEST_INITIALIZATION_STATE 0x06
1192#define JS_JOIN_COMPLETE_STATE 0x07
1193#define JS_BYPASS_WAIT_STATE 0x08
1194
1195/*
1196 * Definitions for the field MONITOR_STATE
1197 */
1198#define MS_MONITOR_FSM_INACTIVE 0x00
1199#define MS_REPEAT_BEACON_STATE 0x01
1200#define MS_REPEAT_CLAIM_TOKEN_STATE 0x02
1201#define MS_TRANSMIT_CLAIM_TOKEN_STATE 0x03
1202#define MS_STANDBY_MONITOR_STATE 0x04
1203#define MS_TRANSMIT_BEACON_STATE 0x05
1204#define MS_ACTIVE_MONITOR_STATE 0x06
1205#define MS_TRANSMIT_RING_PURGE_STATE 0x07
1206#define MS_BEACON_TEST_STATE 0x09
1207
1208/*
1209 * Definitions for the bit-field RING_STATUS
1210 */
1211#define SIGNAL_LOSS 0x8000
1212#define HARD_ERROR 0x4000
1213#define SOFT_ERROR 0x2000
1214#define TRANSMIT_BEACON 0x1000
1215#define LOBE_WIRE_FAULT 0x0800
1216#define AUTO_REMOVAL_ERROR 0x0400
1217#define REMOVE_RECEIVED 0x0100
1218#define COUNTER_OVERFLOW 0x0080
1219#define SINGLE_STATION 0x0040
1220#define RING_RECOVERY 0x0020
1221
1222/*
1223 * Definitions for the field BUS_TYPE
1224 */
1225#define AT_BUS 0x00
1226#define MCA_BUS 0x01
1227#define EISA_BUS 0x02
1228#define PCI_BUS 0x03
1229#define PCMCIA_BUS 0x04
1230
1231/*
1232 * Definitions for adapter_flags
1233 */
1234#define RX_VALID_LOOKAHEAD 0x0001
1235#define FORCED_16BIT_MODE 0x0002
1236#define ADAPTER_DISABLED 0x0004
1237#define TRANSMIT_CHAIN_INT 0x0008
1238#define EARLY_RX_FRAME 0x0010
1239#define EARLY_TX 0x0020
1240#define EARLY_RX_COPY 0x0040
1241#define USES_PHYSICAL_ADDR 0x0080 /* Rsvd for DEC PCI and 9232 */
1242#define NEEDS_PHYSICAL_ADDR 0x0100 /* Reserved*/
1243#define RX_STATUS_PENDING 0x0200
1244#define ERX_DISABLED 0x0400 /* EARLY_RX_ENABLE rcv_mask */
1245#define ENABLE_TX_PENDING 0x0800
1246#define ENABLE_RX_PENDING 0x1000
1247#define PERM_CLOSE 0x2000
1248#define IO_MAPPED 0x4000 /* IOmapped bus interface 795 */
1249#define ETX_DISABLED 0x8000
1250
1251
1252/*
1253 * Definitions for adapter_flags1
1254 */
1255#define TX_PHY_RX_VIRT 0x0001
1256#define NEEDS_HOST_RAM 0x0002
1257#define NEEDS_MEDIA_TYPE 0x0004
1258#define EARLY_RX_DONE 0x0008
1259#define PNP_BOOT_BIT 0x0010 /* activates PnP & config on power-up */
1260 /* clear => regular PnP operation */
1261#define PNP_ENABLE 0x0020 /* regular PnP operation clear => */
1262 /* no PnP, overrides PNP_BOOT_BIT */
1263#define SATURN_ENABLE 0x0040
1264
1265#define ADAPTER_REMOVABLE 0x0080 /* adapter is hot swappable */
1266#define TX_PHY 0x0100 /* Uses physical address for tx bufs */
1267#define RX_PHY 0x0200 /* Uses physical address for rx bufs */
1268#define TX_VIRT 0x0400 /* Uses virtual addr for tx bufs */
1269#define RX_VIRT 0x0800
1270#define NEEDS_SERVICE 0x1000
1271
1272/*
1273 * Adapter Status Codes
1274 */
1275#define OPEN 0x0001
1276#define INITIALIZED 0x0002
1277#define CLOSED 0x0003
1278#define FAILED 0x0005
1279#define NOT_INITIALIZED 0x0006
1280#define IO_CONFLICT 0x0007
1281#define CARD_REMOVED 0x0008
1282#define CARD_INSERTED 0x0009
1283
1284/*
1285 * Mode Bit Definitions
1286 */
1287#define INTERRUPT_STATUS_BIT 0x8000 /* PC Interrupt Line: 0 = Not Enabled */
1288#define BOOT_STATUS_MASK 0x6000 /* Mask to isolate BOOT_STATUS */
1289#define BOOT_INHIBIT 0x0000 /* BOOT_STATUS is 'inhibited' */
1290#define BOOT_TYPE_1 0x2000 /* Unused BOOT_STATUS value */
1291#define BOOT_TYPE_2 0x4000 /* Unused BOOT_STATUS value */
1292#define BOOT_TYPE_3 0x6000 /* Unused BOOT_STATUS value */
1293#define ZERO_WAIT_STATE_MASK 0x1800 /* Mask to isolate Wait State flags */
1294#define ZERO_WAIT_STATE_8_BIT 0x1000 /* 0 = Disabled (Inserts Wait States) */
1295#define ZERO_WAIT_STATE_16_BIT 0x0800 /* 0 = Disabled (Inserts Wait States) */
1296#define LOOPING_MODE_MASK 0x0007
1297#define LOOPBACK_MODE_0 0x0000
1298#define LOOPBACK_MODE_1 0x0001
1299#define LOOPBACK_MODE_2 0x0002
1300#define LOOPBACK_MODE_3 0x0003
1301#define LOOPBACK_MODE_4 0x0004
1302#define LOOPBACK_MODE_5 0x0005
1303#define LOOPBACK_MODE_6 0x0006
1304#define LOOPBACK_MODE_7 0x0007
1305#define AUTO_MEDIA_DETECT 0x0008
1306#define MANUAL_CRC 0x0010
1307#define EARLY_TOKEN_REL 0x0020 /* Early Token Release for Token Ring */
1308#define UMAC 0x0040
1309#define UTP2_PORT 0x0080 /* For 8216T2, 0=port A, 1=Port B. */
1310#define BNC_10BT_INTERFACE 0x0600 /* BNC and UTP current media set */
1311#define UTP_INTERFACE 0x0500 /* Ethernet UTP Only. */
1312#define BNC_INTERFACE 0x0400
1313#define AUI_INTERFACE 0x0300
1314#define AUI_10BT_INTERFACE 0x0200
1315#define STARLAN_10_INTERFACE 0x0100
1316#define INTERFACE_TYPE_MASK 0x0700
1317
1318/*
1319 * Media Type Bit Definitions
1320 *
1321 * legend: TP = Twisted Pair
1322 * STP = Shielded twisted pair
1323 * UTP = Unshielded twisted pair
1324 */
1325
1326#define CNFG_MEDIA_TYPE_MASK 0x001e /* POS Register 3 Mask */
1327
1328#define MEDIA_S10 0x0000 /* Ethernet adapter, TP. */
1329#define MEDIA_AUI_UTP 0x0001 /* Ethernet adapter, AUI/UTP media */
1330#define MEDIA_BNC 0x0002 /* Ethernet adapter, BNC media. */
1331#define MEDIA_AUI 0x0003 /* Ethernet Adapter, AUI media. */
1332#define MEDIA_STP_16 0x0004 /* TokenRing adap, 16Mbit STP. */
1333#define MEDIA_STP_4 0x0005 /* TokenRing adap, 4Mbit STP. */
1334#define MEDIA_UTP_16 0x0006 /* TokenRing adap, 16Mbit UTP. */
1335#define MEDIA_UTP_4 0x0007 /* TokenRing adap, 4Mbit UTP. */
1336#define MEDIA_UTP 0x0008 /* Ethernet adapter, UTP media (no AUI)
1337*/
1338#define MEDIA_BNC_UTP 0x0010 /* Ethernet adapter, BNC/UTP media */
1339#define MEDIA_UTPFD 0x0011 /* Ethernet adapter, TP full duplex */
1340#define MEDIA_UTPNL 0x0012 /* Ethernet adapter, TP with link integrity test disabled */
1341#define MEDIA_AUI_BNC 0x0013 /* Ethernet adapter, AUI/BNC media */
1342#define MEDIA_AUI_BNC_UTP 0x0014 /* Ethernet adapter, AUI_BNC/UTP */
1343#define MEDIA_UTPA 0x0015 /* Ethernet UTP-10Mbps Ports A */
1344#define MEDIA_UTPB 0x0016 /* Ethernet UTP-10Mbps Ports B */
1345#define MEDIA_STP_16_UTP_16 0x0017 /* Token Ring STP-16Mbps/UTP-16Mbps */
1346#define MEDIA_STP_4_UTP_4 0x0018 /* Token Ring STP-4Mbps/UTP-4Mbps */
1347
1348#define MEDIA_STP100_UTP100 0x0020 /* Ethernet STP-100Mbps/UTP-100Mbps */
1349#define MEDIA_UTP100FD 0x0021 /* Ethernet UTP-100Mbps, full duplex */
1350#define MEDIA_UTP100 0x0022 /* Ethernet UTP-100Mbps */
1351
1352
1353#define MEDIA_UNKNOWN 0xFFFF /* Unknown adapter/media type */
1354
1355/*
1356 * Definitions for the field:
1357 * media_type2
1358 */
1359#define MEDIA_TYPE_MII 0x0001
1360#define MEDIA_TYPE_UTP 0x0002
1361#define MEDIA_TYPE_BNC 0x0004
1362#define MEDIA_TYPE_AUI 0x0008
1363#define MEDIA_TYPE_S10 0x0010
1364#define MEDIA_TYPE_AUTO_SENSE 0x1000
1365#define MEDIA_TYPE_AUTO_DETECT 0x4000
1366#define MEDIA_TYPE_AUTO_NEGOTIATE 0x8000
1367
1368/*
1369 * Definitions for the field:
1370 * line_speed
1371 */
1372#define LINE_SPEED_UNKNOWN 0x0000
1373#define LINE_SPEED_4 0x0001
1374#define LINE_SPEED_10 0x0002
1375#define LINE_SPEED_16 0x0004
1376#define LINE_SPEED_100 0x0008
1377#define LINE_SPEED_T4 0x0008 /* 100BaseT4 aliased for 9332BVT */
1378#define LINE_SPEED_FULL_DUPLEX 0x8000
1379
1380/*
1381 * Definitions for the field:
1382 * bic_type (Bus interface chip type)
1383 */
1384#define BIC_NO_CHIP 0x0000 /* Bus interface chip not implemented */
1385#define BIC_583_CHIP 0x0001 /* 83C583 bus interface chip */
1386#define BIC_584_CHIP 0x0002 /* 83C584 bus interface chip */
1387#define BIC_585_CHIP 0x0003 /* 83C585 bus interface chip */
1388#define BIC_593_CHIP 0x0004 /* 83C593 bus interface chip */
1389#define BIC_594_CHIP 0x0005 /* 83C594 bus interface chip */
1390#define BIC_564_CHIP 0x0006 /* PCMCIA Bus interface chip */
1391#define BIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */
1392#define BIC_571_CHIP 0x0008 /* 83C571 EISA bus master i-face */
1393#define BIC_587_CHIP 0x0009 /* Token Ring AT bus master i-face */
1394#define BIC_574_CHIP 0x0010 /* FEAST bus interface chip */
1395#define BIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
1396#define BIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
1397#define BIC_8432E_CHIP 0x0013 /* 8432 Enhanced bus iface/Ethernet NIC(DEC) */
1398#define BIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
1399#define BIC_C94_CHIP 0x0015 /* 91C94 bus i-face in PCMCIA mode */
1400#define BIC_X8020_CHIP 0x0016 /* Xilinx PCMCIA multi-func i-face */
1401
1402/*
1403 * Definitions for the field:
1404 * nic_type (Bus interface chip type)
1405 */
1406#define NIC_UNK_CHIP 0x0000 /* Unknown NIC chip */
1407#define NIC_8390_CHIP 0x0001 /* DP8390 Ethernet NIC */
1408#define NIC_690_CHIP 0x0002 /* 83C690 Ethernet NIC */
1409#define NIC_825_CHIP 0x0003 /* 83C825 Token Ring NIC */
1410/* #define NIC_???_CHIP 0x0004 */ /* Not used */
1411/* #define NIC_???_CHIP 0x0005 */ /* Not used */
1412/* #define NIC_???_CHIP 0x0006 */ /* Not used */
1413#define NIC_790_CHIP 0x0007 /* 83C790 bus i-face/Ethernet NIC chip */
1414#define NIC_C100_CHIP 0x0010 /* FEAST 100Mbps Ethernet NIC */
1415#define NIC_8432_CHIP 0x0011 /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
1416#define NIC_9332_CHIP 0x0012 /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
1417#define NIC_8432E_CHIP 0x0013 /* 8432 enhanced bus iface/Ethernet NIC(DEC) */
1418#define NIC_EPIC100_CHIP 0x0014 /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
1419#define NIC_C94_CHIP 0x0015 /* 91C94 PC Card with multi func */
1420
1421/*
1422 * Definitions for the field:
1423 * adapter_type The adapter_type field describes the adapter/bus
1424 * configuration.
1425 */
1426#define BUS_ISA16_TYPE 0x0001 /* 16 bit adap in 16 bit (E)ISA slot */
1427#define BUS_ISA8_TYPE 0x0002 /* 8/16b adap in 8 bit XT/(E)ISA slot */
1428#define BUS_MCA_TYPE 0x0003 /* Micro Channel adapter */
1429
1430/*
1431 * Receive Mask definitions
1432 */
1433#define ACCEPT_MULTICAST 0x0001
1434#define ACCEPT_BROADCAST 0x0002
1435#define PROMISCUOUS_MODE 0x0004
1436#define ACCEPT_SOURCE_ROUTING 0x0008
1437#define ACCEPT_ERR_PACKETS 0x0010
1438#define ACCEPT_ATT_MAC_FRAMES 0x0020
1439#define ACCEPT_MULTI_PROM 0x0040
1440#define TRANSMIT_ONLY 0x0080
1441#define ACCEPT_EXT_MAC_FRAMES 0x0100
1442#define EARLY_RX_ENABLE 0x0200
1443#define PKT_SIZE_NOT_NEEDED 0x0400
1444#define ACCEPT_SOURCE_ROUTING_SPANNING 0x0808
1445
1446#define ACCEPT_ALL_MAC_FRAMES 0x0120
1447
1448/*
1449 * config_mode defs
1450 */
1451#define STORE_EEROM 0x0001 /* Store config in EEROM. */
1452#define STORE_REGS 0x0002 /* Store config in register set. */
1453
1454/*
1455 * equates for lmac_flags in adapter structure (Ethernet)
1456 */
1457#define MEM_DISABLE 0x0001
1458#define RX_STATUS_POLL 0x0002
1459#define USE_RE_BIT 0x0004
1460/*#define RESERVED 0x0008 */
1461/*#define RESERVED 0x0010 */
1462/*#define RESERVED 0x0020 */
1463/*#define RESERVED 0x0040 */
1464/*#define RESERVED 0x0080 */
1465/*#define RESERVED 0x0100 */
1466/*#define RESERVED 0x0200 */
1467/*#define RESERVED 0x0400 */
1468/*#define RESERVED 0x0800 */
1469/*#define RESERVED 0x1000 */
1470/*#define RESERVED 0x2000 */
1471/*#define RESERVED 0x4000 */
1472/*#define RESERVED 0x8000 */
1473
1474/* media_opts & media_set Fields bit defs for Ethernet ... */
1475#define MED_OPT_BNC 0x01
1476#define MED_OPT_UTP 0x02
1477#define MED_OPT_AUI 0x04
1478#define MED_OPT_10MB 0x08
1479#define MED_OPT_100MB 0x10
1480#define MED_OPT_S10 0x20
1481
1482/* media_opts & media_set Fields bit defs for Token Ring ... */
1483#define MED_OPT_4MB 0x08
1484#define MED_OPT_16MB 0x10
1485#define MED_OPT_STP 0x40
1486
1487#define MAX_8023_SIZE 1500 /* Max 802.3 size of frame. */
1488#define DEFAULT_ERX_VALUE 4 /* Number of 16-byte blocks for 790B early Rx. */
1489#define DEFAULT_ETX_VALUE 32 /* Number of bytes for 790B early Tx. */
1490#define DEFAULT_TX_RETRIES 3 /* Number of transmit retries */
1491#define LPBK_FRAME_SIZE 1024 /* Default loopback frame for Rx calibration test. */
1492#define MAX_LOOKAHEAD_SIZE 252 /* Max lookahead size for ethernet. */
1493
1494#define RW_MAC_STATE 0x1101
1495#define RW_SA_OF_LAST_AMP_OR_SMP 0x2803
1496#define RW_PHYSICAL_DROP_NUMBER 0x3B02
1497#define RW_UPSTREAM_NEIGHBOR_ADDRESS 0x3E03
1498#define RW_PRODUCT_INSTANCE_ID 0x4B09
1499
1500#define RW_TRC_STATUS_BLOCK 0x5412
1501
1502#define RW_MAC_ERROR_COUNTERS_NO_CLEAR 0x8006
1503#define RW_MAC_ERROR_COUNTER_CLEAR 0x7A06
1504#define RW_CONFIG_REGISTER_0 0xA001
1505#define RW_CONFIG_REGISTER_1 0xA101
1506#define RW_PRESCALE_TIMER_THRESHOLD 0xA201
1507#define RW_TPT_THRESHOLD 0xA301
1508#define RW_TQP_THRESHOLD 0xA401
1509#define RW_TNT_THRESHOLD 0xA501
1510#define RW_TBT_THRESHOLD 0xA601
1511#define RW_TSM_THRESHOLD 0xA701
1512#define RW_TAM_THRESHOLD 0xA801
1513#define RW_TBR_THRESHOLD 0xA901
1514#define RW_TER_THRESHOLD 0xAA01
1515#define RW_TGT_THRESHOLD 0xAB01
1516#define RW_THT_THRESHOLD 0xAC01
1517#define RW_TRR_THRESHOLD 0xAD01
1518#define RW_TVX_THRESHOLD 0xAE01
1519#define RW_INDIVIDUAL_MAC_ADDRESS 0xB003
1520
1521#define RW_INDIVIDUAL_GROUP_ADDRESS 0xB303 /* all of group addr */
1522#define RW_INDIVIDUAL_GROUP_ADDR_WORD_0 0xB301 /* 1st word of group addr */
1523#define RW_INDIVIDUAL_GROUP_ADDR 0xB402 /* 2nd-3rd word of group addr */
1524#define RW_FUNCTIONAL_ADDRESS 0xB603 /* all of functional addr */
1525#define RW_FUNCTIONAL_ADDR_WORD_0 0xB601 /* 1st word of func addr */
1526#define RW_FUNCTIONAL_ADDR 0xB702 /* 2nd-3rd word func addr */
1527
1528#define RW_BIT_SIGNIFICANT_GROUP_ADDR 0xB902
1529#define RW_SOURCE_RING_BRIDGE_NUMBER 0xBB01
1530#define RW_TARGET_RING_NUMBER 0xBC01
1531
1532#define RW_HIC_INTERRUPT_MASK 0xC601
1533
1534#define SOURCE_ROUTING_SPANNING_BITS 0x00C0 /* Spanning Tree Frames */
1535#define SOURCE_ROUTING_EXPLORER_BIT 0x0040 /* Explorer and Single Route */
1536
1537 /* write */
1538
1539#define CSR_MSK_ALL 0x80 // Bic 587 Only
1540#define CSR_MSKTINT 0x20
1541#define CSR_MSKCBUSY 0x10
1542#define CSR_CLRTINT 0x08
1543#define CSR_CLRCBUSY 0x04
1544#define CSR_WCSS 0x02
1545#define CSR_CA 0x01
1546
1547 /* read */
1548
1549#define CSR_TINT 0x20
1550#define CSR_CINT 0x10
1551#define CSR_TSTAT 0x08
1552#define CSR_CSTAT 0x04
1553#define CSR_FAULT 0x02
1554#define CSR_CBUSY 0x01
1555
1556#define LAAR_MEM16ENB 0x80
1557#define Zws16 0x20
1558
1559#define IRR_IEN 0x80
1560#define Zws8 0x01
1561
1562#define IMCCR_EIL 0x04
1563
1564typedef struct {
1565 __u8 ac; /* Access Control */
1566 __u8 fc; /* Frame Control */
1567 __u8 da[6]; /* Dest Addr */
1568 __u8 sa[6]; /* Source Addr */
1569
1570 __u16 vl; /* Vector Length */
1571 __u8 dc_sc; /* Dest/Source Class */
1572 __u8 vc; /* Vector Code */
1573 } MAC_HEADER;
1574
1575#define MAX_SUB_VECTOR_INFO (RX_DATA_BUFFER_SIZE - sizeof(MAC_HEADER) - 2)
1576
1577typedef struct
1578 {
1579 __u8 svl; /* Sub-vector Length */
1580 __u8 svi; /* Sub-vector Code */
1581 __u8 svv[MAX_SUB_VECTOR_INFO]; /* Sub-vector Info */
1582 } MAC_SUB_VECTOR;
1583
1584#endif /* __KERNEL__ */
1585#endif /* __LINUX_SMCTR_H */
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
new file mode 100644
index 00000000000..793020347e5
--- /dev/null
+++ b/drivers/net/tokenring/tms380tr.c
@@ -0,0 +1,2352 @@
1/*
2 * tms380tr.c: A network driver library for Texas Instruments TMS380-based
3 * Token Ring Adapters.
4 *
5 * Originally sktr.c: Written 1997 by Christoph Goos
6 *
7 * A fine result of the Linux Systems Network Architecture Project.
8 * http://www.vanheusden.com/sna/
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * The following modules are currently available for card support:
14 * - tmspci (Generic PCI card support)
15 * - abyss (Madge PCI support)
16 * - tmsisa (SysKonnect TR4/16 ISA)
17 *
18 * Sources:
19 * - The hardware related parts of this driver are take from
20 * the SysKonnect Token Ring driver for Windows NT.
21 * - I used the IBM Token Ring driver 'ibmtr.c' as a base for this
22 * driver, as well as the 'skeleton.c' driver by Donald Becker.
23 * - Also various other drivers in the linux source tree were taken
24 * as samples for some tasks.
25 * - TI TMS380 Second-Generation Token Ring User's Guide
26 * - TI datasheets for respective chips
27 * - David Hein at Texas Instruments
28 * - Various Madge employees
29 *
30 * Maintainer(s):
31 * JS Jay Schulist jschlst@samba.org
32 * CG Christoph Goos cgoos@syskonnect.de
33 * AF Adam Fritzler
34 * MLP Mike Phillips phillim@amtrak.com
35 * JF Jochen Friedrich jochen@scram.de
36 *
37 * Modification History:
38 * 29-Aug-97 CG Created
39 * 04-Apr-98 CG Fixed problems caused by tok_timer_check
40 * 10-Apr-98 CG Fixed lockups at cable disconnection
41 * 27-May-98 JS Formated to Linux Kernel Format
42 * 31-May-98 JS Hacked in PCI support
43 * 16-Jun-98 JS Modulized for multiple cards with one driver
44 * Sep-99 AF Renamed to tms380tr (supports more than SK's)
45 * 23-Sep-99 AF Added Compaq and Thomas-Conrad PCI support
46 * Fixed a bug causing double copies on PCI
47 * Fixed for new multicast stuff (2.2/2.3)
48 * 25-Sep-99 AF Uped TPL_NUM from 3 to 9
49 * Removed extraneous 'No free TPL'
50 * 22-Dec-99 AF Added Madge PCI Mk2 support and generalized
51 * parts of the initilization procedure.
52 * 30-Dec-99 AF Turned tms380tr into a library ala 8390.
53 * Madge support is provided in the abyss module
54 * Generic PCI support is in the tmspci module.
55 * 30-Nov-00 JF Updated PCI code to support IO MMU via
56 * pci_map_static(). Alpha uses this MMU for ISA
57 * as well.
58 * 14-Jan-01 JF Fix DMA on ifdown/ifup sequences. Some
59 * cleanup.
60 * 13-Jan-02 JF Add spinlock to fix race condition.
61 * 09-Nov-02 JF Fixed printks to not SPAM the console during
62 * normal operation.
63 * 30-Dec-02 JF Removed incorrect __init from
64 * tms380tr_init_card.
65 * 22-Jul-05 JF Converted to dma-mapping.
66 *
67 * To do:
68 * 1. Multi/Broadcast packet handling (this may have fixed itself)
69 * 2. Write a sktrisa module that includes the old ISA support (done)
70 * 3. Allow modules to load their own microcode
71 * 4. Speed up the BUD process -- freezing the kernel for 3+sec is
72 * quite unacceptable.
73 * 5. Still a few remaining stalls when the cable is unplugged.
74 */
75
76#ifdef MODULE
77static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, Adam Fritzler\n";
78#endif
79
80#include <linux/module.h>
81#include <linux/kernel.h>
82#include <linux/types.h>
83#include <linux/fcntl.h>
84#include <linux/interrupt.h>
85#include <linux/ptrace.h>
86#include <linux/ioport.h>
87#include <linux/in.h>
88#include <linux/string.h>
89#include <linux/time.h>
90#include <linux/errno.h>
91#include <linux/init.h>
92#include <linux/dma-mapping.h>
93#include <linux/delay.h>
94#include <linux/netdevice.h>
95#include <linux/etherdevice.h>
96#include <linux/skbuff.h>
97#include <linux/trdevice.h>
98#include <linux/firmware.h>
99#include <linux/bitops.h>
100
101#include <asm/system.h>
102#include <asm/io.h>
103#include <asm/dma.h>
104#include <asm/irq.h>
105#include <asm/uaccess.h>
106
107#include "tms380tr.h" /* Our Stuff */
108
109/* Use 0 for production, 1 for verification, 2 for debug, and
110 * 3 for very verbose debug.
111 */
112#ifndef TMS380TR_DEBUG
113#define TMS380TR_DEBUG 0
114#endif
115static unsigned int tms380tr_debug = TMS380TR_DEBUG;
116
117/* Index to functions, as function prototypes.
118 * Alphabetical by function name.
119 */
120
121/* "A" */
122/* "B" */
123static int tms380tr_bringup_diags(struct net_device *dev);
124/* "C" */
125static void tms380tr_cancel_tx_queue(struct net_local* tp);
126static int tms380tr_chipset_init(struct net_device *dev);
127static void tms380tr_chk_irq(struct net_device *dev);
128static void tms380tr_chk_outstanding_cmds(struct net_device *dev);
129static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr);
130static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType);
131int tms380tr_close(struct net_device *dev);
132static void tms380tr_cmd_status_irq(struct net_device *dev);
133/* "D" */
134static void tms380tr_disable_interrupts(struct net_device *dev);
135#if TMS380TR_DEBUG > 0
136static void tms380tr_dump(unsigned char *Data, int length);
137#endif
138/* "E" */
139static void tms380tr_enable_interrupts(struct net_device *dev);
140static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command);
141static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue);
142/* "F" */
143/* "G" */
144static struct net_device_stats *tms380tr_get_stats(struct net_device *dev);
145/* "H" */
146static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
147 struct net_device *dev);
148/* "I" */
149static int tms380tr_init_adapter(struct net_device *dev);
150static void tms380tr_init_ipb(struct net_local *tp);
151static void tms380tr_init_net_local(struct net_device *dev);
152static void tms380tr_init_opb(struct net_device *dev);
153/* "M" */
154/* "O" */
155int tms380tr_open(struct net_device *dev);
156static void tms380tr_open_adapter(struct net_device *dev);
157/* "P" */
158/* "R" */
159static void tms380tr_rcv_status_irq(struct net_device *dev);
160static int tms380tr_read_ptr(struct net_device *dev);
161static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
162 unsigned short Address, int Length);
163static int tms380tr_reset_adapter(struct net_device *dev);
164static void tms380tr_reset_interrupt(struct net_device *dev);
165static void tms380tr_ring_status_irq(struct net_device *dev);
166/* "S" */
167static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb,
168 struct net_device *dev);
169static void tms380tr_set_multicast_list(struct net_device *dev);
170static int tms380tr_set_mac_address(struct net_device *dev, void *addr);
171/* "T" */
172static void tms380tr_timer_chk(unsigned long data);
173static void tms380tr_timer_end_wait(unsigned long data);
174static void tms380tr_tx_status_irq(struct net_device *dev);
175/* "U" */
176static void tms380tr_update_rcv_stats(struct net_local *tp,
177 unsigned char DataPtr[], unsigned int Length);
178/* "W" */
179void tms380tr_wait(unsigned long time);
180static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status);
181static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status);
182
183#define SIFREADB(reg) \
184 (((struct net_local *)netdev_priv(dev))->sifreadb(dev, reg))
185#define SIFWRITEB(val, reg) \
186 (((struct net_local *)netdev_priv(dev))->sifwriteb(dev, val, reg))
187#define SIFREADW(reg) \
188 (((struct net_local *)netdev_priv(dev))->sifreadw(dev, reg))
189#define SIFWRITEW(val, reg) \
190 (((struct net_local *)netdev_priv(dev))->sifwritew(dev, val, reg))
191
192
193
194#if 0 /* TMS380TR_DEBUG > 0 */
195static int madgemc_sifprobe(struct net_device *dev)
196{
197 unsigned char old, chk1, chk2;
198
199 old = SIFREADB(SIFADR); /* Get the old SIFADR value */
200
201 chk1 = 0; /* Begin with check value 0 */
202 do {
203 madgemc_setregpage(dev, 0);
204 /* Write new SIFADR value */
205 SIFWRITEB(chk1, SIFADR);
206 chk2 = SIFREADB(SIFADR);
207 if (chk2 != chk1)
208 return -1;
209
210 madgemc_setregpage(dev, 1);
211 /* Read, invert and write */
212 chk2 = SIFREADB(SIFADD);
213 if (chk2 != chk1)
214 return -1;
215
216 madgemc_setregpage(dev, 0);
217 chk2 ^= 0x0FE;
218 SIFWRITEB(chk2, SIFADR);
219
220 /* Read, invert and compare */
221 madgemc_setregpage(dev, 1);
222 chk2 = SIFREADB(SIFADD);
223 madgemc_setregpage(dev, 0);
224 chk2 ^= 0x0FE;
225
226 if(chk1 != chk2)
227 return -1; /* No adapter */
228 chk1 -= 2;
229 } while(chk1 != 0); /* Repeat 128 times (all byte values) */
230
231 madgemc_setregpage(dev, 0); /* sanity */
232 /* Restore the SIFADR value */
233 SIFWRITEB(old, SIFADR);
234
235 return 0;
236}
237#endif
238
239/*
240 * Open/initialize the board. This is called sometime after
241 * booting when the 'ifconfig' program is run.
242 *
243 * This routine should set everything up anew at each open, even
244 * registers that "should" only need to be set once at boot, so that
245 * there is non-reboot way to recover if something goes wrong.
246 */
247int tms380tr_open(struct net_device *dev)
248{
249 struct net_local *tp = netdev_priv(dev);
250 int err;
251
252 /* init the spinlock */
253 spin_lock_init(&tp->lock);
254 init_timer(&tp->timer);
255
256 /* Reset the hardware here. Don't forget to set the station address. */
257
258#ifdef CONFIG_ISA
259 if(dev->dma > 0)
260 {
261 unsigned long flags=claim_dma_lock();
262 disable_dma(dev->dma);
263 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
264 enable_dma(dev->dma);
265 release_dma_lock(flags);
266 }
267#endif
268
269 err = tms380tr_chipset_init(dev);
270 if(err)
271 {
272 printk(KERN_INFO "%s: Chipset initialization error\n",
273 dev->name);
274 return -1;
275 }
276
277 tp->timer.expires = jiffies + 30*HZ;
278 tp->timer.function = tms380tr_timer_end_wait;
279 tp->timer.data = (unsigned long)dev;
280 add_timer(&tp->timer);
281
282 printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n",
283 dev->name, tms380tr_read_ptr(dev));
284
285 tms380tr_enable_interrupts(dev);
286 tms380tr_open_adapter(dev);
287
288 netif_start_queue(dev);
289
290 /* Wait for interrupt from hardware. If interrupt does not come,
291 * there will be a timeout from the timer.
292 */
293 tp->Sleeping = 1;
294 interruptible_sleep_on(&tp->wait_for_tok_int);
295 del_timer(&tp->timer);
296
297 /* If AdapterVirtOpenFlag is 1, the adapter is now open for use */
298 if(tp->AdapterVirtOpenFlag == 0)
299 {
300 tms380tr_disable_interrupts(dev);
301 return -1;
302 }
303
304 tp->StartTime = jiffies;
305
306 /* Start function control timer */
307 tp->timer.expires = jiffies + 2*HZ;
308 tp->timer.function = tms380tr_timer_chk;
309 tp->timer.data = (unsigned long)dev;
310 add_timer(&tp->timer);
311
312 return 0;
313}
314
315/*
316 * Timeout function while waiting for event
317 */
318static void tms380tr_timer_end_wait(unsigned long data)
319{
320 struct net_device *dev = (struct net_device*)data;
321 struct net_local *tp = netdev_priv(dev);
322
323 if(tp->Sleeping)
324 {
325 tp->Sleeping = 0;
326 wake_up_interruptible(&tp->wait_for_tok_int);
327 }
328}
329
330/*
331 * Initialize the chipset
332 */
333static int tms380tr_chipset_init(struct net_device *dev)
334{
335 struct net_local *tp = netdev_priv(dev);
336 int err;
337
338 tms380tr_init_ipb(tp);
339 tms380tr_init_opb(dev);
340 tms380tr_init_net_local(dev);
341
342 if(tms380tr_debug > 3)
343 printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name);
344 err = tms380tr_reset_adapter(dev);
345 if(err < 0)
346 return -1;
347
348 if(tms380tr_debug > 3)
349 printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name);
350 err = tms380tr_bringup_diags(dev);
351 if(err < 0)
352 return -1;
353
354 if(tms380tr_debug > 3)
355 printk(KERN_DEBUG "%s: Init adapter...\n", dev->name);
356 err = tms380tr_init_adapter(dev);
357 if(err < 0)
358 return -1;
359
360 if(tms380tr_debug > 3)
361 printk(KERN_DEBUG "%s: Done!\n", dev->name);
362 return 0;
363}
364
365/*
366 * Initializes the net_local structure.
367 */
368static void tms380tr_init_net_local(struct net_device *dev)
369{
370 struct net_local *tp = netdev_priv(dev);
371 int i;
372 dma_addr_t dmabuf;
373
374 tp->scb.CMD = 0;
375 tp->scb.Parm[0] = 0;
376 tp->scb.Parm[1] = 0;
377
378 tp->ssb.STS = 0;
379 tp->ssb.Parm[0] = 0;
380 tp->ssb.Parm[1] = 0;
381 tp->ssb.Parm[2] = 0;
382
383 tp->CMDqueue = 0;
384
385 tp->AdapterOpenFlag = 0;
386 tp->AdapterVirtOpenFlag = 0;
387 tp->ScbInUse = 0;
388 tp->OpenCommandIssued = 0;
389 tp->ReOpenInProgress = 0;
390 tp->HaltInProgress = 0;
391 tp->TransmitHaltScheduled = 0;
392 tp->LobeWireFaultLogged = 0;
393 tp->LastOpenStatus = 0;
394 tp->MaxPacketSize = DEFAULT_PACKET_SIZE;
395
396 /* Create circular chain of transmit lists */
397 for (i = 0; i < TPL_NUM; i++)
398 {
399 tp->Tpl[i].NextTPLAddr = htonl(((char *)(&tp->Tpl[(i+1) % TPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
400 tp->Tpl[i].Status = 0;
401 tp->Tpl[i].FrameSize = 0;
402 tp->Tpl[i].FragList[0].DataCount = 0;
403 tp->Tpl[i].FragList[0].DataAddr = 0;
404 tp->Tpl[i].NextTPLPtr = &tp->Tpl[(i+1) % TPL_NUM];
405 tp->Tpl[i].MData = NULL;
406 tp->Tpl[i].TPLIndex = i;
407 tp->Tpl[i].DMABuff = 0;
408 tp->Tpl[i].BusyFlag = 0;
409 }
410
411 tp->TplFree = tp->TplBusy = &tp->Tpl[0];
412
413 /* Create circular chain of receive lists */
414 for (i = 0; i < RPL_NUM; i++)
415 {
416 tp->Rpl[i].NextRPLAddr = htonl(((char *)(&tp->Rpl[(i+1) % RPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
417 tp->Rpl[i].Status = (RX_VALID | RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
418 tp->Rpl[i].FrameSize = 0;
419 tp->Rpl[i].FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
420
421 /* Alloc skb and point adapter to data area */
422 tp->Rpl[i].Skb = dev_alloc_skb(tp->MaxPacketSize);
423 tp->Rpl[i].DMABuff = 0;
424
425 /* skb == NULL ? then use local buffer */
426 if(tp->Rpl[i].Skb == NULL)
427 {
428 tp->Rpl[i].SkbStat = SKB_UNAVAILABLE;
429 tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
430 tp->Rpl[i].MData = tp->LocalRxBuffers[i];
431 }
432 else /* SKB != NULL */
433 {
434 tp->Rpl[i].Skb->dev = dev;
435 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
436
437 /* data unreachable for DMA ? then use local buffer */
438 dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
439 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
440 {
441 tp->Rpl[i].SkbStat = SKB_DATA_COPY;
442 tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
443 tp->Rpl[i].MData = tp->LocalRxBuffers[i];
444 }
445 else /* DMA directly in skb->data */
446 {
447 tp->Rpl[i].SkbStat = SKB_DMA_DIRECT;
448 tp->Rpl[i].FragList[0].DataAddr = htonl(dmabuf);
449 tp->Rpl[i].MData = tp->Rpl[i].Skb->data;
450 tp->Rpl[i].DMABuff = dmabuf;
451 }
452 }
453
454 tp->Rpl[i].NextRPLPtr = &tp->Rpl[(i+1) % RPL_NUM];
455 tp->Rpl[i].RPLIndex = i;
456 }
457
458 tp->RplHead = &tp->Rpl[0];
459 tp->RplTail = &tp->Rpl[RPL_NUM-1];
460 tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
461}
462
463/*
464 * Initializes the initialisation parameter block.
465 */
466static void tms380tr_init_ipb(struct net_local *tp)
467{
468 tp->ipb.Init_Options = BURST_MODE;
469 tp->ipb.CMD_Status_IV = 0;
470 tp->ipb.TX_IV = 0;
471 tp->ipb.RX_IV = 0;
472 tp->ipb.Ring_Status_IV = 0;
473 tp->ipb.SCB_Clear_IV = 0;
474 tp->ipb.Adapter_CHK_IV = 0;
475 tp->ipb.RX_Burst_Size = BURST_SIZE;
476 tp->ipb.TX_Burst_Size = BURST_SIZE;
477 tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
478 tp->ipb.SCB_Addr = 0;
479 tp->ipb.SSB_Addr = 0;
480}
481
482/*
483 * Initializes the open parameter block.
484 */
485static void tms380tr_init_opb(struct net_device *dev)
486{
487 struct net_local *tp;
488 unsigned long Addr;
489 unsigned short RplSize = RPL_SIZE;
490 unsigned short TplSize = TPL_SIZE;
491 unsigned short BufferSize = BUFFER_SIZE;
492 int i;
493
494 tp = netdev_priv(dev);
495
496 tp->ocpl.OPENOptions = 0;
497 tp->ocpl.OPENOptions |= ENABLE_FULL_DUPLEX_SELECTION;
498 tp->ocpl.FullDuplex = 0;
499 tp->ocpl.FullDuplex |= OPEN_FULL_DUPLEX_OFF;
500
501 /*
502 * Set node address
503 *
504 * We go ahead and put it in the OPB even though on
505 * most of the generic adapters this isn't required.
506 * Its simpler this way. -- ASF
507 */
508 for (i=0;i<6;i++)
509 tp->ocpl.NodeAddr[i] = ((unsigned char *)dev->dev_addr)[i];
510
511 tp->ocpl.GroupAddr = 0;
512 tp->ocpl.FunctAddr = 0;
513 tp->ocpl.RxListSize = cpu_to_be16((unsigned short)RplSize);
514 tp->ocpl.TxListSize = cpu_to_be16((unsigned short)TplSize);
515 tp->ocpl.BufSize = cpu_to_be16((unsigned short)BufferSize);
516 tp->ocpl.Reserved = 0;
517 tp->ocpl.TXBufMin = TX_BUF_MIN;
518 tp->ocpl.TXBufMax = TX_BUF_MAX;
519
520 Addr = htonl(((char *)tp->ProductID - (char *)tp) + tp->dmabuffer);
521
522 tp->ocpl.ProdIDAddr[0] = LOWORD(Addr);
523 tp->ocpl.ProdIDAddr[1] = HIWORD(Addr);
524}
525
526/*
527 * Send OPEN command to adapter
528 */
529static void tms380tr_open_adapter(struct net_device *dev)
530{
531 struct net_local *tp = netdev_priv(dev);
532
533 if(tp->OpenCommandIssued)
534 return;
535
536 tp->OpenCommandIssued = 1;
537 tms380tr_exec_cmd(dev, OC_OPEN);
538}
539
540/*
541 * Clear the adapter's interrupt flag. Clear system interrupt enable
542 * (SINTEN): disable adapter to system interrupts.
543 */
544static void tms380tr_disable_interrupts(struct net_device *dev)
545{
546 SIFWRITEB(0, SIFACL);
547}
548
549/*
550 * Set the adapter's interrupt flag. Set system interrupt enable
551 * (SINTEN): enable adapter to system interrupts.
552 */
553static void tms380tr_enable_interrupts(struct net_device *dev)
554{
555 SIFWRITEB(ACL_SINTEN, SIFACL);
556}
557
558/*
559 * Put command in command queue, try to execute it.
560 */
561static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
562{
563 struct net_local *tp = netdev_priv(dev);
564
565 tp->CMDqueue |= Command;
566 tms380tr_chk_outstanding_cmds(dev);
567}
568
569static void tms380tr_timeout(struct net_device *dev)
570{
571 /*
572 * If we get here, some higher level has decided we are broken.
573 * There should really be a "kick me" function call instead.
574 *
575 * Resetting the token ring adapter takes a long time so just
576 * fake transmission time and go on trying. Our own timeout
577 * routine is in tms380tr_timer_chk()
578 */
579 dev->trans_start = jiffies; /* prevent tx timeout */
580 netif_wake_queue(dev);
581}
582
583/*
584 * Gets skb from system, queues it and checks if it can be sent
585 */
586static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb,
587 struct net_device *dev)
588{
589 struct net_local *tp = netdev_priv(dev);
590 netdev_tx_t rc;
591
592 rc = tms380tr_hardware_send_packet(skb, dev);
593 if(tp->TplFree->NextTPLPtr->BusyFlag)
594 netif_stop_queue(dev);
595 return rc;
596}
597
598/*
599 * Move frames into adapter tx queue
600 */
601static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
602 struct net_device *dev)
603{
604 TPL *tpl;
605 short length;
606 unsigned char *buf;
607 unsigned long flags;
608 int i;
609 dma_addr_t dmabuf, newbuf;
610 struct net_local *tp = netdev_priv(dev);
611
612 /* Try to get a free TPL from the chain.
613 *
614 * NOTE: We *must* always leave one unused TPL in the chain,
615 * because otherwise the adapter might send frames twice.
616 */
617 spin_lock_irqsave(&tp->lock, flags);
618 if(tp->TplFree->NextTPLPtr->BusyFlag) { /* No free TPL */
619 if (tms380tr_debug > 0)
620 printk(KERN_DEBUG "%s: No free TPL\n", dev->name);
621 spin_unlock_irqrestore(&tp->lock, flags);
622 return NETDEV_TX_BUSY;
623 }
624
625 dmabuf = 0;
626
627 /* Is buffer reachable for Busmaster-DMA? */
628
629 length = skb->len;
630 dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
631 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
632 /* Copy frame to local buffer */
633 dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
634 dmabuf = 0;
635 i = tp->TplFree->TPLIndex;
636 buf = tp->LocalTxBuffers[i];
637 skb_copy_from_linear_data(skb, buf, length);
638 newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer;
639 }
640 else {
641 /* Send direct from skb->data */
642 newbuf = dmabuf;
643 buf = skb->data;
644 }
645 /* Source address in packet? */
646 tms380tr_chk_src_addr(buf, dev->dev_addr);
647 tp->LastSendTime = jiffies;
648 tpl = tp->TplFree; /* Get the "free" TPL */
649 tpl->BusyFlag = 1; /* Mark TPL as busy */
650 tp->TplFree = tpl->NextTPLPtr;
651
652 /* Save the skb for delayed return of skb to system */
653 tpl->Skb = skb;
654 tpl->DMABuff = dmabuf;
655 tpl->FragList[0].DataCount = cpu_to_be16((unsigned short)length);
656 tpl->FragList[0].DataAddr = htonl(newbuf);
657
658 /* Write the data length in the transmit list. */
659 tpl->FrameSize = cpu_to_be16((unsigned short)length);
660 tpl->MData = buf;
661
662 /* Transmit the frame and set the status values. */
663 tms380tr_write_tpl_status(tpl, TX_VALID | TX_START_FRAME
664 | TX_END_FRAME | TX_PASS_SRC_ADDR
665 | TX_FRAME_IRQ);
666
667 /* Let adapter send the frame. */
668 tms380tr_exec_sifcmd(dev, CMD_TX_VALID);
669 spin_unlock_irqrestore(&tp->lock, flags);
670
671 return NETDEV_TX_OK;
672}
673
674/*
675 * Write the given value to the 'Status' field of the specified TPL.
676 * NOTE: This function should be used whenever the status of any TPL must be
677 * modified by the driver, because the compiler may otherwise change the
678 * order of instructions such that writing the TPL status may be executed at
679 * an undesirable time. When this function is used, the status is always
680 * written when the function is called.
681 */
682static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status)
683{
684 tpl->Status = Status;
685}
686
687static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
688{
689 unsigned char SRBit;
690
691 if((((unsigned long)frame[8]) & ~0x80) != 0) /* Compare 4 bytes */
692 return;
693 if((unsigned short)frame[12] != 0) /* Compare 2 bytes */
694 return;
695
696 SRBit = frame[8] & 0x80;
697 memcpy(&frame[8], hw_addr, 6);
698 frame[8] |= SRBit;
699}
700
701/*
702 * The timer routine: Check if adapter still open and working, reopen if not.
703 */
704static void tms380tr_timer_chk(unsigned long data)
705{
706 struct net_device *dev = (struct net_device*)data;
707 struct net_local *tp = netdev_priv(dev);
708
709 if(tp->HaltInProgress)
710 return;
711
712 tms380tr_chk_outstanding_cmds(dev);
713 if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies) &&
714 (tp->TplFree != tp->TplBusy))
715 {
716 /* Anything to send, but stalled too long */
717 tp->LastSendTime = jiffies;
718 tms380tr_exec_cmd(dev, OC_CLOSE); /* Does reopen automatically */
719 }
720
721 tp->timer.expires = jiffies + 2*HZ;
722 add_timer(&tp->timer);
723
724 if(tp->AdapterOpenFlag || tp->ReOpenInProgress)
725 return;
726 tp->ReOpenInProgress = 1;
727 tms380tr_open_adapter(dev);
728}
729
730/*
731 * The typical workload of the driver: Handle the network interface interrupts.
732 */
733irqreturn_t tms380tr_interrupt(int irq, void *dev_id)
734{
735 struct net_device *dev = dev_id;
736 struct net_local *tp;
737 unsigned short irq_type;
738 int handled = 0;
739
740 tp = netdev_priv(dev);
741
742 irq_type = SIFREADW(SIFSTS);
743
744 while(irq_type & STS_SYSTEM_IRQ) {
745 handled = 1;
746 irq_type &= STS_IRQ_MASK;
747
748 if(!tms380tr_chk_ssb(tp, irq_type)) {
749 printk(KERN_DEBUG "%s: DATA LATE occurred\n", dev->name);
750 break;
751 }
752
753 switch(irq_type) {
754 case STS_IRQ_RECEIVE_STATUS:
755 tms380tr_reset_interrupt(dev);
756 tms380tr_rcv_status_irq(dev);
757 break;
758
759 case STS_IRQ_TRANSMIT_STATUS:
760 /* Check if TRANSMIT.HALT command is complete */
761 if(tp->ssb.Parm[0] & COMMAND_COMPLETE) {
762 tp->TransmitCommandActive = 0;
763 tp->TransmitHaltScheduled = 0;
764
765 /* Issue a new transmit command. */
766 tms380tr_exec_cmd(dev, OC_TRANSMIT);
767 }
768
769 tms380tr_reset_interrupt(dev);
770 tms380tr_tx_status_irq(dev);
771 break;
772
773 case STS_IRQ_COMMAND_STATUS:
774 /* The SSB contains status of last command
775 * other than receive/transmit.
776 */
777 tms380tr_cmd_status_irq(dev);
778 break;
779
780 case STS_IRQ_SCB_CLEAR:
781 /* The SCB is free for another command. */
782 tp->ScbInUse = 0;
783 tms380tr_chk_outstanding_cmds(dev);
784 break;
785
786 case STS_IRQ_RING_STATUS:
787 tms380tr_ring_status_irq(dev);
788 break;
789
790 case STS_IRQ_ADAPTER_CHECK:
791 tms380tr_chk_irq(dev);
792 break;
793
794 case STS_IRQ_LLC_STATUS:
795 printk(KERN_DEBUG "tms380tr: unexpected LLC status IRQ\n");
796 break;
797
798 case STS_IRQ_TIMER:
799 printk(KERN_DEBUG "tms380tr: unexpected Timer IRQ\n");
800 break;
801
802 case STS_IRQ_RECEIVE_PENDING:
803 printk(KERN_DEBUG "tms380tr: unexpected Receive Pending IRQ\n");
804 break;
805
806 default:
807 printk(KERN_DEBUG "Unknown Token Ring IRQ (0x%04x)\n", irq_type);
808 break;
809 }
810
811 /* Reset system interrupt if not already done. */
812 if(irq_type != STS_IRQ_TRANSMIT_STATUS &&
813 irq_type != STS_IRQ_RECEIVE_STATUS) {
814 tms380tr_reset_interrupt(dev);
815 }
816
817 irq_type = SIFREADW(SIFSTS);
818 }
819
820 return IRQ_RETVAL(handled);
821}
822
823/*
824 * Reset the INTERRUPT SYSTEM bit and issue SSB CLEAR command.
825 */
826static void tms380tr_reset_interrupt(struct net_device *dev)
827{
828 struct net_local *tp = netdev_priv(dev);
829 SSB *ssb = &tp->ssb;
830
831 /*
832 * [Workaround for "Data Late"]
833 * Set all fields of the SSB to well-defined values so we can
834 * check if the adapter has written the SSB.
835 */
836
837 ssb->STS = (unsigned short) -1;
838 ssb->Parm[0] = (unsigned short) -1;
839 ssb->Parm[1] = (unsigned short) -1;
840 ssb->Parm[2] = (unsigned short) -1;
841
842 /* Free SSB by issuing SSB_CLEAR command after reading IRQ code
843 * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
844 */
845 tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
846}
847
848/*
849 * Check if the SSB has actually been written by the adapter.
850 */
851static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType)
852{
853 SSB *ssb = &tp->ssb; /* The address of the SSB. */
854
855 /* C 0 1 2 INTERRUPT CODE
856 * - - - - --------------
857 * 1 1 1 1 TRANSMIT STATUS
858 * 1 1 1 1 RECEIVE STATUS
859 * 1 ? ? 0 COMMAND STATUS
860 * 0 0 0 0 SCB CLEAR
861 * 1 1 0 0 RING STATUS
862 * 0 0 0 0 ADAPTER CHECK
863 *
864 * 0 = SSB field not affected by interrupt
865 * 1 = SSB field is affected by interrupt
866 *
867 * C = SSB ADDRESS +0: COMMAND
868 * 0 = SSB ADDRESS +2: STATUS 0
869 * 1 = SSB ADDRESS +4: STATUS 1
870 * 2 = SSB ADDRESS +6: STATUS 2
871 */
872
873 /* Check if this interrupt does use the SSB. */
874
875 if(IrqType != STS_IRQ_TRANSMIT_STATUS &&
876 IrqType != STS_IRQ_RECEIVE_STATUS &&
877 IrqType != STS_IRQ_COMMAND_STATUS &&
878 IrqType != STS_IRQ_RING_STATUS)
879 {
880 return 1; /* SSB not involved. */
881 }
882
883 /* Note: All fields of the SSB have been set to all ones (-1) after it
884 * has last been used by the software (see DriverIsr()).
885 *
886 * Check if the affected SSB fields are still unchanged.
887 */
888
889 if(ssb->STS == (unsigned short) -1)
890 return 0; /* Command field not yet available. */
891 if(IrqType == STS_IRQ_COMMAND_STATUS)
892 return 1; /* Status fields not always affected. */
893 if(ssb->Parm[0] == (unsigned short) -1)
894 return 0; /* Status 1 field not yet available. */
895 if(IrqType == STS_IRQ_RING_STATUS)
896 return 1; /* Status 2 & 3 fields not affected. */
897
898 /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */
899 if(ssb->Parm[1] == (unsigned short) -1)
900 return 0; /* Status 2 field not yet available. */
901 if(ssb->Parm[2] == (unsigned short) -1)
902 return 0; /* Status 3 field not yet available. */
903
904 return 1; /* All SSB fields have been written by the adapter. */
905}
906
907/*
908 * Evaluates the command results status in the SSB status field.
909 */
910static void tms380tr_cmd_status_irq(struct net_device *dev)
911{
912 struct net_local *tp = netdev_priv(dev);
913 unsigned short ssb_cmd, ssb_parm_0;
914 unsigned short ssb_parm_1;
915 char *open_err = "Open error -";
916 char *code_err = "Open code -";
917
918 /* Copy the ssb values to local variables */
919 ssb_cmd = tp->ssb.STS;
920 ssb_parm_0 = tp->ssb.Parm[0];
921 ssb_parm_1 = tp->ssb.Parm[1];
922
923 if(ssb_cmd == OPEN)
924 {
925 tp->Sleeping = 0;
926 if(!tp->ReOpenInProgress)
927 wake_up_interruptible(&tp->wait_for_tok_int);
928
929 tp->OpenCommandIssued = 0;
930 tp->ScbInUse = 0;
931
932 if((ssb_parm_0 & 0x00FF) == GOOD_COMPLETION)
933 {
934 /* Success, the adapter is open. */
935 tp->LobeWireFaultLogged = 0;
936 tp->AdapterOpenFlag = 1;
937 tp->AdapterVirtOpenFlag = 1;
938 tp->TransmitCommandActive = 0;
939 tms380tr_exec_cmd(dev, OC_TRANSMIT);
940 tms380tr_exec_cmd(dev, OC_RECEIVE);
941
942 if(tp->ReOpenInProgress)
943 tp->ReOpenInProgress = 0;
944
945 return;
946 }
947 else /* The adapter did not open. */
948 {
949 if(ssb_parm_0 & NODE_ADDR_ERROR)
950 printk(KERN_INFO "%s: Node address error\n",
951 dev->name);
952 if(ssb_parm_0 & LIST_SIZE_ERROR)
953 printk(KERN_INFO "%s: List size error\n",
954 dev->name);
955 if(ssb_parm_0 & BUF_SIZE_ERROR)
956 printk(KERN_INFO "%s: Buffer size error\n",
957 dev->name);
958 if(ssb_parm_0 & TX_BUF_COUNT_ERROR)
959 printk(KERN_INFO "%s: Tx buffer count error\n",
960 dev->name);
961 if(ssb_parm_0 & INVALID_OPEN_OPTION)
962 printk(KERN_INFO "%s: Invalid open option\n",
963 dev->name);
964 if(ssb_parm_0 & OPEN_ERROR)
965 {
966 /* Show the open phase. */
967 switch(ssb_parm_0 & OPEN_PHASES_MASK)
968 {
969 case LOBE_MEDIA_TEST:
970 if(!tp->LobeWireFaultLogged)
971 {
972 tp->LobeWireFaultLogged = 1;
973 printk(KERN_INFO "%s: %s Lobe wire fault (check cable !).\n", dev->name, open_err);
974 }
975 tp->ReOpenInProgress = 1;
976 tp->AdapterOpenFlag = 0;
977 tp->AdapterVirtOpenFlag = 1;
978 tms380tr_open_adapter(dev);
979 return;
980
981 case PHYSICAL_INSERTION:
982 printk(KERN_INFO "%s: %s Physical insertion.\n", dev->name, open_err);
983 break;
984
985 case ADDRESS_VERIFICATION:
986 printk(KERN_INFO "%s: %s Address verification.\n", dev->name, open_err);
987 break;
988
989 case PARTICIPATION_IN_RING_POLL:
990 printk(KERN_INFO "%s: %s Participation in ring poll.\n", dev->name, open_err);
991 break;
992
993 case REQUEST_INITIALISATION:
994 printk(KERN_INFO "%s: %s Request initialisation.\n", dev->name, open_err);
995 break;
996
997 case FULLDUPLEX_CHECK:
998 printk(KERN_INFO "%s: %s Full duplex check.\n", dev->name, open_err);
999 break;
1000
1001 default:
1002 printk(KERN_INFO "%s: %s Unknown open phase\n", dev->name, open_err);
1003 break;
1004 }
1005
1006 /* Show the open errors. */
1007 switch(ssb_parm_0 & OPEN_ERROR_CODES_MASK)
1008 {
1009 case OPEN_FUNCTION_FAILURE:
1010 printk(KERN_INFO "%s: %s OPEN_FUNCTION_FAILURE", dev->name, code_err);
1011 tp->LastOpenStatus =
1012 OPEN_FUNCTION_FAILURE;
1013 break;
1014
1015 case OPEN_SIGNAL_LOSS:
1016 printk(KERN_INFO "%s: %s OPEN_SIGNAL_LOSS\n", dev->name, code_err);
1017 tp->LastOpenStatus =
1018 OPEN_SIGNAL_LOSS;
1019 break;
1020
1021 case OPEN_TIMEOUT:
1022 printk(KERN_INFO "%s: %s OPEN_TIMEOUT\n", dev->name, code_err);
1023 tp->LastOpenStatus =
1024 OPEN_TIMEOUT;
1025 break;
1026
1027 case OPEN_RING_FAILURE:
1028 printk(KERN_INFO "%s: %s OPEN_RING_FAILURE\n", dev->name, code_err);
1029 tp->LastOpenStatus =
1030 OPEN_RING_FAILURE;
1031 break;
1032
1033 case OPEN_RING_BEACONING:
1034 printk(KERN_INFO "%s: %s OPEN_RING_BEACONING\n", dev->name, code_err);
1035 tp->LastOpenStatus =
1036 OPEN_RING_BEACONING;
1037 break;
1038
1039 case OPEN_DUPLICATE_NODEADDR:
1040 printk(KERN_INFO "%s: %s OPEN_DUPLICATE_NODEADDR\n", dev->name, code_err);
1041 tp->LastOpenStatus =
1042 OPEN_DUPLICATE_NODEADDR;
1043 break;
1044
1045 case OPEN_REQUEST_INIT:
1046 printk(KERN_INFO "%s: %s OPEN_REQUEST_INIT\n", dev->name, code_err);
1047 tp->LastOpenStatus =
1048 OPEN_REQUEST_INIT;
1049 break;
1050
1051 case OPEN_REMOVE_RECEIVED:
1052 printk(KERN_INFO "%s: %s OPEN_REMOVE_RECEIVED", dev->name, code_err);
1053 tp->LastOpenStatus =
1054 OPEN_REMOVE_RECEIVED;
1055 break;
1056
1057 case OPEN_FULLDUPLEX_SET:
1058 printk(KERN_INFO "%s: %s OPEN_FULLDUPLEX_SET\n", dev->name, code_err);
1059 tp->LastOpenStatus =
1060 OPEN_FULLDUPLEX_SET;
1061 break;
1062
1063 default:
1064 printk(KERN_INFO "%s: %s Unknown open err code", dev->name, code_err);
1065 tp->LastOpenStatus =
1066 OPEN_FUNCTION_FAILURE;
1067 break;
1068 }
1069 }
1070
1071 tp->AdapterOpenFlag = 0;
1072 tp->AdapterVirtOpenFlag = 0;
1073
1074 return;
1075 }
1076 }
1077 else
1078 {
1079 if(ssb_cmd != READ_ERROR_LOG)
1080 return;
1081
1082 /* Add values from the error log table to the MAC
1083 * statistics counters and update the errorlogtable
1084 * memory.
1085 */
1086 tp->MacStat.line_errors += tp->errorlogtable.Line_Error;
1087 tp->MacStat.burst_errors += tp->errorlogtable.Burst_Error;
1088 tp->MacStat.A_C_errors += tp->errorlogtable.ARI_FCI_Error;
1089 tp->MacStat.lost_frames += tp->errorlogtable.Lost_Frame_Error;
1090 tp->MacStat.recv_congest_count += tp->errorlogtable.Rx_Congest_Error;
1091 tp->MacStat.rx_errors += tp->errorlogtable.Rx_Congest_Error;
1092 tp->MacStat.frame_copied_errors += tp->errorlogtable.Frame_Copied_Error;
1093 tp->MacStat.token_errors += tp->errorlogtable.Token_Error;
1094 tp->MacStat.dummy1 += tp->errorlogtable.DMA_Bus_Error;
1095 tp->MacStat.dummy1 += tp->errorlogtable.DMA_Parity_Error;
1096 tp->MacStat.abort_delimiters += tp->errorlogtable.AbortDelimeters;
1097 tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
1098 tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
1099 }
1100}
1101
1102/*
1103 * The inverse routine to tms380tr_open().
1104 */
1105int tms380tr_close(struct net_device *dev)
1106{
1107 struct net_local *tp = netdev_priv(dev);
1108 netif_stop_queue(dev);
1109
1110 del_timer(&tp->timer);
1111
1112 /* Flush the Tx and disable Rx here. */
1113
1114 tp->HaltInProgress = 1;
1115 tms380tr_exec_cmd(dev, OC_CLOSE);
1116 tp->timer.expires = jiffies + 1*HZ;
1117 tp->timer.function = tms380tr_timer_end_wait;
1118 tp->timer.data = (unsigned long)dev;
1119 add_timer(&tp->timer);
1120
1121 tms380tr_enable_interrupts(dev);
1122
1123 tp->Sleeping = 1;
1124 interruptible_sleep_on(&tp->wait_for_tok_int);
1125 tp->TransmitCommandActive = 0;
1126
1127 del_timer(&tp->timer);
1128 tms380tr_disable_interrupts(dev);
1129
1130#ifdef CONFIG_ISA
1131 if(dev->dma > 0)
1132 {
1133 unsigned long flags=claim_dma_lock();
1134 disable_dma(dev->dma);
1135 release_dma_lock(flags);
1136 }
1137#endif
1138
1139 SIFWRITEW(0xFF00, SIFCMD);
1140#if 0
1141 if(dev->dma > 0) /* what the? */
1142 SIFWRITEB(0xff, POSREG);
1143#endif
1144 tms380tr_cancel_tx_queue(tp);
1145
1146 return 0;
1147}
1148
1149/*
1150 * Get the current statistics. This may be called with the card open
1151 * or closed.
1152 */
1153static struct net_device_stats *tms380tr_get_stats(struct net_device *dev)
1154{
1155 struct net_local *tp = netdev_priv(dev);
1156
1157 return (struct net_device_stats *)&tp->MacStat;
1158}
1159
1160/*
1161 * Set or clear the multicast filter for this adapter.
1162 */
1163static void tms380tr_set_multicast_list(struct net_device *dev)
1164{
1165 struct net_local *tp = netdev_priv(dev);
1166 unsigned int OpenOptions;
1167
1168 OpenOptions = tp->ocpl.OPENOptions &
1169 ~(PASS_ADAPTER_MAC_FRAMES
1170 | PASS_ATTENTION_FRAMES
1171 | PASS_BEACON_MAC_FRAMES
1172 | COPY_ALL_MAC_FRAMES
1173 | COPY_ALL_NON_MAC_FRAMES);
1174
1175 tp->ocpl.FunctAddr = 0;
1176
1177 if(dev->flags & IFF_PROMISC)
1178 /* Enable promiscuous mode */
1179 OpenOptions |= COPY_ALL_NON_MAC_FRAMES |
1180 COPY_ALL_MAC_FRAMES;
1181 else
1182 {
1183 if(dev->flags & IFF_ALLMULTI)
1184 {
1185 /* Disable promiscuous mode, use normal mode. */
1186 tp->ocpl.FunctAddr = 0xFFFFFFFF;
1187 }
1188 else
1189 {
1190 struct netdev_hw_addr *ha;
1191
1192 netdev_for_each_mc_addr(ha, dev) {
1193 ((char *)(&tp->ocpl.FunctAddr))[0] |=
1194 ha->addr[2];
1195 ((char *)(&tp->ocpl.FunctAddr))[1] |=
1196 ha->addr[3];
1197 ((char *)(&tp->ocpl.FunctAddr))[2] |=
1198 ha->addr[4];
1199 ((char *)(&tp->ocpl.FunctAddr))[3] |=
1200 ha->addr[5];
1201 }
1202 }
1203 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
1204 }
1205
1206 tp->ocpl.OPENOptions = OpenOptions;
1207 tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
1208}
1209
1210/*
1211 * Wait for some time (microseconds)
1212 */
1213void tms380tr_wait(unsigned long time)
1214{
1215#if 0
1216 long tmp;
1217
1218 tmp = jiffies + time/(1000000/HZ);
1219 do {
1220 tmp = schedule_timeout_interruptible(tmp);
1221 } while(time_after(tmp, jiffies));
1222#else
1223 mdelay(time / 1000);
1224#endif
1225}
1226
1227/*
1228 * Write a command value to the SIFCMD register
1229 */
1230static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue)
1231{
1232 unsigned short cmd;
1233 unsigned short SifStsValue;
1234 unsigned long loop_counter;
1235
1236 WriteValue = ((WriteValue ^ CMD_SYSTEM_IRQ) | CMD_INTERRUPT_ADAPTER);
1237 cmd = (unsigned short)WriteValue;
1238 loop_counter = 0,5 * 800000;
1239 do {
1240 SifStsValue = SIFREADW(SIFSTS);
1241 } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
1242 SIFWRITEW(cmd, SIFCMD);
1243}
1244
1245/*
1246 * Processes adapter hardware reset, halts adapter and downloads firmware,
1247 * clears the halt bit.
1248 */
1249static int tms380tr_reset_adapter(struct net_device *dev)
1250{
1251 struct net_local *tp = netdev_priv(dev);
1252 unsigned short *fw_ptr;
1253 unsigned short count, c, count2;
1254 const struct firmware *fw_entry = NULL;
1255
1256 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
1257 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
1258 dev->name, "tms380tr.bin");
1259 return -1;
1260 }
1261
1262 fw_ptr = (unsigned short *)fw_entry->data;
1263 count2 = fw_entry->size / 2;
1264
1265 /* Hardware adapter reset */
1266 SIFWRITEW(ACL_ARESET, SIFACL);
1267 tms380tr_wait(40);
1268
1269 c = SIFREADW(SIFACL);
1270 tms380tr_wait(20);
1271
1272 if(dev->dma == 0) /* For PCI adapters */
1273 {
1274 c &= ~(ACL_NSELOUT0 | ACL_NSELOUT1); /* Clear bits */
1275 if(tp->setnselout)
1276 c |= (*tp->setnselout)(dev);
1277 }
1278
1279 /* In case a command is pending - forget it */
1280 tp->ScbInUse = 0;
1281
1282 c &= ~ACL_ARESET; /* Clear adapter reset bit */
1283 c |= ACL_CPHALT; /* Halt adapter CPU, allow download */
1284 c |= ACL_BOOT;
1285 c |= ACL_SINTEN;
1286 c &= ~ACL_PSDMAEN; /* Clear pseudo dma bit */
1287 SIFWRITEW(c, SIFACL);
1288 tms380tr_wait(40);
1289
1290 count = 0;
1291 /* Download firmware via DIO interface: */
1292 do {
1293 if (count2 < 3) continue;
1294
1295 /* Download first address part */
1296 SIFWRITEW(*fw_ptr, SIFADX);
1297 fw_ptr++;
1298 count2--;
1299 /* Download second address part */
1300 SIFWRITEW(*fw_ptr, SIFADD);
1301 fw_ptr++;
1302 count2--;
1303
1304 if((count = *fw_ptr) != 0) /* Load loop counter */
1305 {
1306 fw_ptr++; /* Download block data */
1307 count2--;
1308 if (count > count2) continue;
1309
1310 for(; count > 0; count--)
1311 {
1312 SIFWRITEW(*fw_ptr, SIFINC);
1313 fw_ptr++;
1314 count2--;
1315 }
1316 }
1317 else /* Stop, if last block downloaded */
1318 {
1319 c = SIFREADW(SIFACL);
1320 c &= (~ACL_CPHALT | ACL_SINTEN);
1321
1322 /* Clear CPHALT and start BUD */
1323 SIFWRITEW(c, SIFACL);
1324 release_firmware(fw_entry);
1325 return 1;
1326 }
1327 } while(count == 0);
1328
1329 release_firmware(fw_entry);
1330 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
1331 return -1;
1332}
1333
1334MODULE_FIRMWARE("tms380tr.bin");
1335
1336/*
1337 * Starts bring up diagnostics of token ring adapter and evaluates
1338 * diagnostic results.
1339 */
1340static int tms380tr_bringup_diags(struct net_device *dev)
1341{
1342 int loop_cnt, retry_cnt;
1343 unsigned short Status;
1344
1345 tms380tr_wait(HALF_SECOND);
1346 tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
1347 tms380tr_wait(HALF_SECOND);
1348
1349 retry_cnt = BUD_MAX_RETRIES; /* maximal number of retrys */
1350
1351 do {
1352 retry_cnt--;
1353 if(tms380tr_debug > 3)
1354 printk(KERN_DEBUG "BUD-Status: ");
1355 loop_cnt = BUD_MAX_LOOPCNT; /* maximum: three seconds*/
1356 do { /* Inspect BUD results */
1357 loop_cnt--;
1358 tms380tr_wait(HALF_SECOND);
1359 Status = SIFREADW(SIFSTS);
1360 Status &= STS_MASK;
1361
1362 if(tms380tr_debug > 3)
1363 printk(KERN_DEBUG " %04X\n", Status);
1364 /* BUD successfully completed */
1365 if(Status == STS_INITIALIZE)
1366 return 1;
1367 /* Unrecoverable hardware error, BUD not completed? */
1368 } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST))
1369 != (STS_ERROR | STS_TEST)));
1370
1371 /* Error preventing completion of BUD */
1372 if(retry_cnt > 0)
1373 {
1374 printk(KERN_INFO "%s: Adapter Software Reset.\n",
1375 dev->name);
1376 tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
1377 tms380tr_wait(HALF_SECOND);
1378 }
1379 } while(retry_cnt > 0);
1380
1381 Status = SIFREADW(SIFSTS);
1382
1383 printk(KERN_INFO "%s: Hardware error\n", dev->name);
1384 /* Hardware error occurred! */
1385 Status &= 0x001f;
1386 if (Status & 0x0010)
1387 printk(KERN_INFO "%s: BUD Error: Timeout\n", dev->name);
1388 else if ((Status & 0x000f) > 6)
1389 printk(KERN_INFO "%s: BUD Error: Illegal Failure\n", dev->name);
1390 else
1391 printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f);
1392
1393 return -1;
1394}
1395
1396/*
1397 * Copy initialisation data to adapter memory, beginning at address
1398 * 1:0A00; Starting DMA test and evaluating result bits.
1399 */
1400static int tms380tr_init_adapter(struct net_device *dev)
1401{
1402 struct net_local *tp = netdev_priv(dev);
1403
1404 const unsigned char SCB_Test[6] = {0x00, 0x00, 0xC1, 0xE2, 0xD4, 0x8B};
1405 const unsigned char SSB_Test[8] = {0xFF, 0xFF, 0xD1, 0xD7,
1406 0xC5, 0xD9, 0xC3, 0xD4};
1407 void *ptr = (void *)&tp->ipb;
1408 unsigned short *ipb_ptr = (unsigned short *)ptr;
1409 unsigned char *cb_ptr = (unsigned char *) &tp->scb;
1410 unsigned char *sb_ptr = (unsigned char *) &tp->ssb;
1411 unsigned short Status;
1412 int i, loop_cnt, retry_cnt;
1413
1414 /* Normalize: byte order low/high, word order high/low! (only IPB!) */
1415 tp->ipb.SCB_Addr = SWAPW(((char *)&tp->scb - (char *)tp) + tp->dmabuffer);
1416 tp->ipb.SSB_Addr = SWAPW(((char *)&tp->ssb - (char *)tp) + tp->dmabuffer);
1417
1418 if(tms380tr_debug > 3)
1419 {
1420 printk(KERN_DEBUG "%s: buffer (real): %lx\n", dev->name, (long) &tp->scb);
1421 printk(KERN_DEBUG "%s: buffer (virt): %lx\n", dev->name, (long) ((char *)&tp->scb - (char *)tp) + (long) tp->dmabuffer);
1422 printk(KERN_DEBUG "%s: buffer (DMA) : %lx\n", dev->name, (long) tp->dmabuffer);
1423 printk(KERN_DEBUG "%s: buffer (tp) : %lx\n", dev->name, (long) tp);
1424 }
1425 /* Maximum: three initialization retries */
1426 retry_cnt = INIT_MAX_RETRIES;
1427
1428 do {
1429 retry_cnt--;
1430
1431 /* Transfer initialization block */
1432 SIFWRITEW(0x0001, SIFADX);
1433
1434 /* To address 0001:0A00 of adapter RAM */
1435 SIFWRITEW(0x0A00, SIFADD);
1436
1437 /* Write 11 words to adapter RAM */
1438 for(i = 0; i < 11; i++)
1439 SIFWRITEW(ipb_ptr[i], SIFINC);
1440
1441 /* Execute SCB adapter command */
1442 tms380tr_exec_sifcmd(dev, CMD_EXECUTE);
1443
1444 loop_cnt = INIT_MAX_LOOPCNT; /* Maximum: 11 seconds */
1445
1446 /* While remaining retries, no error and not completed */
1447 do {
1448 Status = 0;
1449 loop_cnt--;
1450 tms380tr_wait(HALF_SECOND);
1451
1452 /* Mask interesting status bits */
1453 Status = SIFREADW(SIFSTS);
1454 Status &= STS_MASK;
1455 } while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0) &&
1456 ((Status & STS_ERROR) == 0) && (loop_cnt != 0));
1457
1458 if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0)
1459 {
1460 /* Initialization completed without error */
1461 i = 0;
1462 do { /* Test if contents of SCB is valid */
1463 if(SCB_Test[i] != *(cb_ptr + i))
1464 {
1465 printk(KERN_INFO "%s: DMA failed\n", dev->name);
1466 /* DMA data error: wrong data in SCB */
1467 return -1;
1468 }
1469 i++;
1470 } while(i < 6);
1471
1472 i = 0;
1473 do { /* Test if contents of SSB is valid */
1474 if(SSB_Test[i] != *(sb_ptr + i))
1475 /* DMA data error: wrong data in SSB */
1476 return -1;
1477 i++;
1478 } while (i < 8);
1479
1480 return 1; /* Adapter successfully initialized */
1481 }
1482 else
1483 {
1484 if((Status & STS_ERROR) != 0)
1485 {
1486 /* Initialization error occurred */
1487 Status = SIFREADW(SIFSTS);
1488 Status &= STS_ERROR_MASK;
1489 /* ShowInitialisationErrorCode(Status); */
1490 printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status);
1491 return -1; /* Unrecoverable error */
1492 }
1493 else
1494 {
1495 if(retry_cnt > 0)
1496 {
1497 /* Reset adapter and try init again */
1498 tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
1499 tms380tr_wait(HALF_SECOND);
1500 }
1501 }
1502 }
1503 } while(retry_cnt > 0);
1504
1505 printk(KERN_INFO "%s: Retry exceeded\n", dev->name);
1506 return -1;
1507}
1508
1509/*
1510 * Check for outstanding commands in command queue and tries to execute
1511 * command immediately. Corresponding command flag in command queue is cleared.
1512 */
1513static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
1514{
1515 struct net_local *tp = netdev_priv(dev);
1516 unsigned long Addr = 0;
1517
1518 if(tp->CMDqueue == 0)
1519 return; /* No command execution */
1520
1521 /* If SCB in use: no command */
1522 if(tp->ScbInUse == 1)
1523 return;
1524
1525 /* Check if adapter is opened, avoiding COMMAND_REJECT
1526 * interrupt by the adapter!
1527 */
1528 if(tp->AdapterOpenFlag == 0)
1529 {
1530 if(tp->CMDqueue & OC_OPEN)
1531 {
1532 /* Execute OPEN command */
1533 tp->CMDqueue ^= OC_OPEN;
1534
1535 Addr = htonl(((char *)&tp->ocpl - (char *)tp) + tp->dmabuffer);
1536 tp->scb.Parm[0] = LOWORD(Addr);
1537 tp->scb.Parm[1] = HIWORD(Addr);
1538 tp->scb.CMD = OPEN;
1539 }
1540 else
1541 /* No OPEN command queued, but adapter closed. Note:
1542 * We'll try to re-open the adapter in DriverPoll()
1543 */
1544 return; /* No adapter command issued */
1545 }
1546 else
1547 {
1548 /* Adapter is open; evaluate command queue: try to execute
1549 * outstanding commands (depending on priority!) CLOSE
1550 * command queued
1551 */
1552 if(tp->CMDqueue & OC_CLOSE)
1553 {
1554 tp->CMDqueue ^= OC_CLOSE;
1555 tp->AdapterOpenFlag = 0;
1556 tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */
1557 tp->scb.Parm[1] = 0; /* but should be set to zero! */
1558 tp->scb.CMD = CLOSE;
1559 if(!tp->HaltInProgress)
1560 tp->CMDqueue |= OC_OPEN; /* re-open adapter */
1561 else
1562 tp->CMDqueue = 0; /* no more commands */
1563 }
1564 else
1565 {
1566 if(tp->CMDqueue & OC_RECEIVE)
1567 {
1568 tp->CMDqueue ^= OC_RECEIVE;
1569 Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer);
1570 tp->scb.Parm[0] = LOWORD(Addr);
1571 tp->scb.Parm[1] = HIWORD(Addr);
1572 tp->scb.CMD = RECEIVE;
1573 }
1574 else
1575 {
1576 if(tp->CMDqueue & OC_TRANSMIT_HALT)
1577 {
1578 /* NOTE: TRANSMIT.HALT must be checked
1579 * before TRANSMIT.
1580 */
1581 tp->CMDqueue ^= OC_TRANSMIT_HALT;
1582 tp->scb.CMD = TRANSMIT_HALT;
1583
1584 /* Parm[0] and Parm[1] are ignored
1585 * but should be set to zero!
1586 */
1587 tp->scb.Parm[0] = 0;
1588 tp->scb.Parm[1] = 0;
1589 }
1590 else
1591 {
1592 if(tp->CMDqueue & OC_TRANSMIT)
1593 {
1594 /* NOTE: TRANSMIT must be
1595 * checked after TRANSMIT.HALT
1596 */
1597 if(tp->TransmitCommandActive)
1598 {
1599 if(!tp->TransmitHaltScheduled)
1600 {
1601 tp->TransmitHaltScheduled = 1;
1602 tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT) ;
1603 }
1604 tp->TransmitCommandActive = 0;
1605 return;
1606 }
1607
1608 tp->CMDqueue ^= OC_TRANSMIT;
1609 tms380tr_cancel_tx_queue(tp);
1610 Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
1611 tp->scb.Parm[0] = LOWORD(Addr);
1612 tp->scb.Parm[1] = HIWORD(Addr);
1613 tp->scb.CMD = TRANSMIT;
1614 tp->TransmitCommandActive = 1;
1615 }
1616 else
1617 {
1618 if(tp->CMDqueue & OC_MODIFY_OPEN_PARMS)
1619 {
1620 tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
1621 tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
1622 tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
1623 tp->scb.Parm[1] = 0; /* is ignored but should be zero */
1624 tp->scb.CMD = MODIFY_OPEN_PARMS;
1625 }
1626 else
1627 {
1628 if(tp->CMDqueue & OC_SET_FUNCT_ADDR)
1629 {
1630 tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
1631 tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
1632 tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
1633 tp->scb.CMD = SET_FUNCT_ADDR;
1634 }
1635 else
1636 {
1637 if(tp->CMDqueue & OC_SET_GROUP_ADDR)
1638 {
1639 tp->CMDqueue ^= OC_SET_GROUP_ADDR;
1640 tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
1641 tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
1642 tp->scb.CMD = SET_GROUP_ADDR;
1643 }
1644 else
1645 {
1646 if(tp->CMDqueue & OC_READ_ERROR_LOG)
1647 {
1648 tp->CMDqueue ^= OC_READ_ERROR_LOG;
1649 Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
1650 tp->scb.Parm[0] = LOWORD(Addr);
1651 tp->scb.Parm[1] = HIWORD(Addr);
1652 tp->scb.CMD = READ_ERROR_LOG;
1653 }
1654 else
1655 {
1656 printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
1657 tp->CMDqueue = 0;
1658 return;
1659 }
1660 }
1661 }
1662 }
1663 }
1664 }
1665 }
1666 }
1667 }
1668
1669 tp->ScbInUse = 1; /* Set semaphore: SCB in use. */
1670
1671 /* Execute SCB and generate IRQ when done. */
1672 tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
1673}
1674
1675/*
1676 * IRQ conditions: signal loss on the ring, transmit or receive of beacon
1677 * frames (disabled if bit 1 of OPEN option is set); report error MAC
1678 * frame transmit (disabled if bit 2 of OPEN option is set); open or short
1679 * circuit fault on the lobe is detected; remove MAC frame received;
1680 * error counter overflow (255); opened adapter is the only station in ring.
1681 * After some of the IRQs the adapter is closed!
1682 */
1683static void tms380tr_ring_status_irq(struct net_device *dev)
1684{
1685 struct net_local *tp = netdev_priv(dev);
1686
1687 tp->CurrentRingStatus = be16_to_cpu((unsigned short)tp->ssb.Parm[0]);
1688
1689 /* First: fill up statistics */
1690 if(tp->ssb.Parm[0] & SIGNAL_LOSS)
1691 {
1692 printk(KERN_INFO "%s: Signal Loss\n", dev->name);
1693 tp->MacStat.line_errors++;
1694 }
1695
1696 /* Adapter is closed, but initialized */
1697 if(tp->ssb.Parm[0] & LOBE_WIRE_FAULT)
1698 {
1699 printk(KERN_INFO "%s: Lobe Wire Fault, Reopen Adapter\n",
1700 dev->name);
1701 tp->MacStat.line_errors++;
1702 }
1703
1704 if(tp->ssb.Parm[0] & RING_RECOVERY)
1705 printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
1706
1707 /* Counter overflow: read error log */
1708 if(tp->ssb.Parm[0] & COUNTER_OVERFLOW)
1709 {
1710 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1711 tms380tr_exec_cmd(dev, OC_READ_ERROR_LOG);
1712 }
1713
1714 /* Adapter is closed, but initialized */
1715 if(tp->ssb.Parm[0] & REMOVE_RECEIVED)
1716 printk(KERN_INFO "%s: Remove Received, Reopen Adapter\n",
1717 dev->name);
1718
1719 /* Adapter is closed, but initialized */
1720 if(tp->ssb.Parm[0] & AUTO_REMOVAL_ERROR)
1721 printk(KERN_INFO "%s: Auto Removal Error, Reopen Adapter\n",
1722 dev->name);
1723
1724 if(tp->ssb.Parm[0] & HARD_ERROR)
1725 printk(KERN_INFO "%s: Hard Error\n", dev->name);
1726
1727 if(tp->ssb.Parm[0] & SOFT_ERROR)
1728 printk(KERN_INFO "%s: Soft Error\n", dev->name);
1729
1730 if(tp->ssb.Parm[0] & TRANSMIT_BEACON)
1731 printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
1732
1733 if(tp->ssb.Parm[0] & SINGLE_STATION)
1734 printk(KERN_INFO "%s: Single Station\n", dev->name);
1735
1736 /* Check if adapter has been closed */
1737 if(tp->ssb.Parm[0] & ADAPTER_CLOSED)
1738 {
1739 printk(KERN_INFO "%s: Adapter closed (Reopening),"
1740 "CurrentRingStat %x\n",
1741 dev->name, tp->CurrentRingStatus);
1742 tp->AdapterOpenFlag = 0;
1743 tms380tr_open_adapter(dev);
1744 }
1745}
1746
1747/*
1748 * Issued if adapter has encountered an unrecoverable hardware
1749 * or software error.
1750 */
1751static void tms380tr_chk_irq(struct net_device *dev)
1752{
1753 int i;
1754 unsigned short AdapterCheckBlock[4];
1755 struct net_local *tp = netdev_priv(dev);
1756
1757 tp->AdapterOpenFlag = 0; /* Adapter closed now */
1758
1759 /* Page number of adapter memory */
1760 SIFWRITEW(0x0001, SIFADX);
1761 /* Address offset */
1762 SIFWRITEW(CHECKADDR, SIFADR);
1763
1764 /* Reading 8 byte adapter check block. */
1765 for(i = 0; i < 4; i++)
1766 AdapterCheckBlock[i] = SIFREADW(SIFINC);
1767
1768 if(tms380tr_debug > 3)
1769 {
1770 printk(KERN_DEBUG "%s: AdapterCheckBlock: ", dev->name);
1771 for (i = 0; i < 4; i++)
1772 printk("%04X", AdapterCheckBlock[i]);
1773 printk("\n");
1774 }
1775
1776 switch(AdapterCheckBlock[0])
1777 {
1778 case DIO_PARITY:
1779 printk(KERN_INFO "%s: DIO parity error\n", dev->name);
1780 break;
1781
1782 case DMA_READ_ABORT:
1783 printk(KERN_INFO "%s DMA read operation aborted:\n",
1784 dev->name);
1785 switch (AdapterCheckBlock[1])
1786 {
1787 case 0:
1788 printk(KERN_INFO "Timeout\n");
1789 printk(KERN_INFO "Address: %04X %04X\n",
1790 AdapterCheckBlock[2],
1791 AdapterCheckBlock[3]);
1792 break;
1793
1794 case 1:
1795 printk(KERN_INFO "Parity error\n");
1796 printk(KERN_INFO "Address: %04X %04X\n",
1797 AdapterCheckBlock[2],
1798 AdapterCheckBlock[3]);
1799 break;
1800
1801 case 2:
1802 printk(KERN_INFO "Bus error\n");
1803 printk(KERN_INFO "Address: %04X %04X\n",
1804 AdapterCheckBlock[2],
1805 AdapterCheckBlock[3]);
1806 break;
1807
1808 default:
1809 printk(KERN_INFO "Unknown error.\n");
1810 break;
1811 }
1812 break;
1813
1814 case DMA_WRITE_ABORT:
1815 printk(KERN_INFO "%s: DMA write operation aborted:\n",
1816 dev->name);
1817 switch (AdapterCheckBlock[1])
1818 {
1819 case 0:
1820 printk(KERN_INFO "Timeout\n");
1821 printk(KERN_INFO "Address: %04X %04X\n",
1822 AdapterCheckBlock[2],
1823 AdapterCheckBlock[3]);
1824 break;
1825
1826 case 1:
1827 printk(KERN_INFO "Parity error\n");
1828 printk(KERN_INFO "Address: %04X %04X\n",
1829 AdapterCheckBlock[2],
1830 AdapterCheckBlock[3]);
1831 break;
1832
1833 case 2:
1834 printk(KERN_INFO "Bus error\n");
1835 printk(KERN_INFO "Address: %04X %04X\n",
1836 AdapterCheckBlock[2],
1837 AdapterCheckBlock[3]);
1838 break;
1839
1840 default:
1841 printk(KERN_INFO "Unknown error.\n");
1842 break;
1843 }
1844 break;
1845
1846 case ILLEGAL_OP_CODE:
1847 printk(KERN_INFO "%s: Illegal operation code in firmware\n",
1848 dev->name);
1849 /* Parm[0-3]: adapter internal register R13-R15 */
1850 break;
1851
1852 case PARITY_ERRORS:
1853 printk(KERN_INFO "%s: Adapter internal bus parity error\n",
1854 dev->name);
1855 /* Parm[0-3]: adapter internal register R13-R15 */
1856 break;
1857
1858 case RAM_DATA_ERROR:
1859 printk(KERN_INFO "%s: RAM data error\n", dev->name);
1860 /* Parm[0-1]: MSW/LSW address of RAM location. */
1861 break;
1862
1863 case RAM_PARITY_ERROR:
1864 printk(KERN_INFO "%s: RAM parity error\n", dev->name);
1865 /* Parm[0-1]: MSW/LSW address of RAM location. */
1866 break;
1867
1868 case RING_UNDERRUN:
1869 printk(KERN_INFO "%s: Internal DMA underrun detected\n",
1870 dev->name);
1871 break;
1872
1873 case INVALID_IRQ:
1874 printk(KERN_INFO "%s: Unrecognized interrupt detected\n",
1875 dev->name);
1876 /* Parm[0-3]: adapter internal register R13-R15 */
1877 break;
1878
1879 case INVALID_ERROR_IRQ:
1880 printk(KERN_INFO "%s: Unrecognized error interrupt detected\n",
1881 dev->name);
1882 /* Parm[0-3]: adapter internal register R13-R15 */
1883 break;
1884
1885 case INVALID_XOP:
1886 printk(KERN_INFO "%s: Unrecognized XOP request detected\n",
1887 dev->name);
1888 /* Parm[0-3]: adapter internal register R13-R15 */
1889 break;
1890
1891 default:
1892 printk(KERN_INFO "%s: Unknown status", dev->name);
1893 break;
1894 }
1895
1896 if(tms380tr_chipset_init(dev) == 1)
1897 {
1898 /* Restart of firmware successful */
1899 tp->AdapterOpenFlag = 1;
1900 }
1901}
1902
1903/*
1904 * Internal adapter pointer to RAM data are copied from adapter into
1905 * host system.
1906 */
1907static int tms380tr_read_ptr(struct net_device *dev)
1908{
1909 struct net_local *tp = netdev_priv(dev);
1910 unsigned short adapterram;
1911
1912 tms380tr_read_ram(dev, (unsigned char *)&tp->intptrs.BurnedInAddrPtr,
1913 ADAPTER_INT_PTRS, 16);
1914 tms380tr_read_ram(dev, (unsigned char *)&adapterram,
1915 cpu_to_be16((unsigned short)tp->intptrs.AdapterRAMPtr), 2);
1916 return be16_to_cpu(adapterram);
1917}
1918
1919/*
1920 * Reads a number of bytes from adapter to system memory.
1921 */
1922static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
1923 unsigned short Address, int Length)
1924{
1925 int i;
1926 unsigned short old_sifadx, old_sifadr, InWord;
1927
1928 /* Save the current values */
1929 old_sifadx = SIFREADW(SIFADX);
1930 old_sifadr = SIFREADW(SIFADR);
1931
1932 /* Page number of adapter memory */
1933 SIFWRITEW(0x0001, SIFADX);
1934 /* Address offset in adapter RAM */
1935 SIFWRITEW(Address, SIFADR);
1936
1937 /* Copy len byte from adapter memory to system data area. */
1938 i = 0;
1939 for(;;)
1940 {
1941 InWord = SIFREADW(SIFINC);
1942
1943 *(Data + i) = HIBYTE(InWord); /* Write first byte */
1944 if(++i == Length) /* All is done break */
1945 break;
1946
1947 *(Data + i) = LOBYTE(InWord); /* Write second byte */
1948 if (++i == Length) /* All is done break */
1949 break;
1950 }
1951
1952 /* Restore original values */
1953 SIFWRITEW(old_sifadx, SIFADX);
1954 SIFWRITEW(old_sifadr, SIFADR);
1955}
1956
1957/*
1958 * Cancel all queued packets in the transmission queue.
1959 */
1960static void tms380tr_cancel_tx_queue(struct net_local* tp)
1961{
1962 TPL *tpl;
1963
1964 /*
1965 * NOTE: There must not be an active TRANSMIT command pending, when
1966 * this function is called.
1967 */
1968 if(tp->TransmitCommandActive)
1969 return;
1970
1971 for(;;)
1972 {
1973 tpl = tp->TplBusy;
1974 if(!tpl->BusyFlag)
1975 break;
1976 /* "Remove" TPL from busy list. */
1977 tp->TplBusy = tpl->NextTPLPtr;
1978 tms380tr_write_tpl_status(tpl, 0); /* Clear VALID bit */
1979 tpl->BusyFlag = 0; /* "free" TPL */
1980
1981 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
1982 if (tpl->DMABuff)
1983 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
1984 dev_kfree_skb_any(tpl->Skb);
1985 }
1986}
1987
1988/*
1989 * This function is called whenever a transmit interrupt is generated by the
1990 * adapter. For a command complete interrupt, it is checked if we have to
1991 * issue a new transmit command or not.
1992 */
1993static void tms380tr_tx_status_irq(struct net_device *dev)
1994{
1995 struct net_local *tp = netdev_priv(dev);
1996 unsigned char HighByte, HighAc, LowAc;
1997 TPL *tpl;
1998
1999 /* NOTE: At this point the SSB from TRANSMIT STATUS is no longer
2000 * available, because the CLEAR SSB command has already been issued.
2001 *
2002 * Process all complete transmissions.
2003 */
2004
2005 for(;;)
2006 {
2007 tpl = tp->TplBusy;
2008 if(!tpl->BusyFlag || (tpl->Status
2009 & (TX_VALID | TX_FRAME_COMPLETE))
2010 != TX_FRAME_COMPLETE)
2011 {
2012 break;
2013 }
2014
2015 /* "Remove" TPL from busy list. */
2016 tp->TplBusy = tpl->NextTPLPtr ;
2017
2018 /* Check the transmit status field only for directed frames*/
2019 if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0)
2020 {
2021 HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status);
2022 HighAc = GET_FRAME_STATUS_HIGH_AC(HighByte);
2023 LowAc = GET_FRAME_STATUS_LOW_AC(HighByte);
2024
2025 if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED))
2026 {
2027 printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n",
2028 dev->name,
2029 *(unsigned long *)&tpl->MData[2+2]);
2030 }
2031 else
2032 {
2033 if(tms380tr_debug > 3)
2034 printk(KERN_DEBUG "%s: Directed frame tx'd\n",
2035 dev->name);
2036 }
2037 }
2038 else
2039 {
2040 if(!DIRECTED_FRAME(tpl))
2041 {
2042 if(tms380tr_debug > 3)
2043 printk(KERN_DEBUG "%s: Broadcast frame tx'd\n",
2044 dev->name);
2045 }
2046 }
2047
2048 tp->MacStat.tx_packets++;
2049 if (tpl->DMABuff)
2050 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2051 dev_kfree_skb_irq(tpl->Skb);
2052 tpl->BusyFlag = 0; /* "free" TPL */
2053 }
2054
2055 if(!tp->TplFree->NextTPLPtr->BusyFlag)
2056 netif_wake_queue(dev);
2057}
2058
2059/*
2060 * Called if a frame receive interrupt is generated by the adapter.
2061 * Check if the frame is valid and indicate it to system.
2062 */
2063static void tms380tr_rcv_status_irq(struct net_device *dev)
2064{
2065 struct net_local *tp = netdev_priv(dev);
2066 unsigned char *ReceiveDataPtr;
2067 struct sk_buff *skb;
2068 unsigned int Length, Length2;
2069 RPL *rpl;
2070 RPL *SaveHead;
2071 dma_addr_t dmabuf;
2072
2073 /* NOTE: At this point the SSB from RECEIVE STATUS is no longer
2074 * available, because the CLEAR SSB command has already been issued.
2075 *
2076 * Process all complete receives.
2077 */
2078
2079 for(;;)
2080 {
2081 rpl = tp->RplHead;
2082 if(rpl->Status & RX_VALID)
2083 break; /* RPL still in use by adapter */
2084
2085 /* Forward RPLHead pointer to next list. */
2086 SaveHead = tp->RplHead;
2087 tp->RplHead = rpl->NextRPLPtr;
2088
2089 /* Get the frame size (Byte swap for Intel).
2090 * Do this early (see workaround comment below)
2091 */
2092 Length = be16_to_cpu(rpl->FrameSize);
2093
2094 /* Check if the Frame_Start, Frame_End and
2095 * Frame_Complete bits are set.
2096 */
2097 if((rpl->Status & VALID_SINGLE_BUFFER_FRAME)
2098 == VALID_SINGLE_BUFFER_FRAME)
2099 {
2100 ReceiveDataPtr = rpl->MData;
2101
2102 /* Workaround for delayed write of FrameSize on ISA
2103 * (FrameSize is false but valid-bit is reset)
2104 * Frame size is set to zero when the RPL is freed.
2105 * Length2 is there because there have also been
2106 * cases where the FrameSize was partially written
2107 */
2108 Length2 = be16_to_cpu(rpl->FrameSize);
2109
2110 if(Length == 0 || Length != Length2)
2111 {
2112 tp->RplHead = SaveHead;
2113 break; /* Return to tms380tr_interrupt */
2114 }
2115 tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length);
2116
2117 if(tms380tr_debug > 3)
2118 printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n",
2119 dev->name, Length, Length);
2120
2121 /* Indicate the received frame to system the
2122 * adapter does the Source-Routing padding for
2123 * us. See: OpenOptions in tms380tr_init_opb()
2124 */
2125 skb = rpl->Skb;
2126 if(rpl->SkbStat == SKB_UNAVAILABLE)
2127 {
2128 /* Try again to allocate skb */
2129 skb = dev_alloc_skb(tp->MaxPacketSize);
2130 if(skb == NULL)
2131 {
2132 /* Update Stats ?? */
2133 }
2134 else
2135 {
2136 skb_put(skb, tp->MaxPacketSize);
2137 rpl->SkbStat = SKB_DATA_COPY;
2138 ReceiveDataPtr = rpl->MData;
2139 }
2140 }
2141
2142 if(skb && (rpl->SkbStat == SKB_DATA_COPY ||
2143 rpl->SkbStat == SKB_DMA_DIRECT))
2144 {
2145 if(rpl->SkbStat == SKB_DATA_COPY)
2146 skb_copy_to_linear_data(skb, ReceiveDataPtr,
2147 Length);
2148
2149 /* Deliver frame to system */
2150 rpl->Skb = NULL;
2151 skb_trim(skb,Length);
2152 skb->protocol = tr_type_trans(skb,dev);
2153 netif_rx(skb);
2154 }
2155 }
2156 else /* Invalid frame */
2157 {
2158 if(rpl->Skb != NULL)
2159 dev_kfree_skb_irq(rpl->Skb);
2160
2161 /* Skip list. */
2162 if(rpl->Status & RX_START_FRAME)
2163 /* Frame start bit is set -> overflow. */
2164 tp->MacStat.rx_errors++;
2165 }
2166 if (rpl->DMABuff)
2167 dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
2168 rpl->DMABuff = 0;
2169
2170 /* Allocate new skb for rpl */
2171 rpl->Skb = dev_alloc_skb(tp->MaxPacketSize);
2172 /* skb == NULL ? then use local buffer */
2173 if(rpl->Skb == NULL)
2174 {
2175 rpl->SkbStat = SKB_UNAVAILABLE;
2176 rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
2177 rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
2178 }
2179 else /* skb != NULL */
2180 {
2181 rpl->Skb->dev = dev;
2182 skb_put(rpl->Skb, tp->MaxPacketSize);
2183
2184 /* Data unreachable for DMA ? then use local buffer */
2185 dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
2186 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
2187 {
2188 rpl->SkbStat = SKB_DATA_COPY;
2189 rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
2190 rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
2191 }
2192 else
2193 {
2194 /* DMA directly in skb->data */
2195 rpl->SkbStat = SKB_DMA_DIRECT;
2196 rpl->FragList[0].DataAddr = htonl(dmabuf);
2197 rpl->MData = rpl->Skb->data;
2198 rpl->DMABuff = dmabuf;
2199 }
2200 }
2201
2202 rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
2203 rpl->FrameSize = 0;
2204
2205 /* Pass the last RPL back to the adapter */
2206 tp->RplTail->FrameSize = 0;
2207
2208 /* Reset the CSTAT field in the list. */
2209 tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ);
2210
2211 /* Current RPL becomes last one in list. */
2212 tp->RplTail = tp->RplTail->NextRPLPtr;
2213
2214 /* Inform adapter about RPL valid. */
2215 tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
2216 }
2217}
2218
2219/*
2220 * This function should be used whenever the status of any RPL must be
2221 * modified by the driver, because the compiler may otherwise change the
2222 * order of instructions such that writing the RPL status may be executed
2223 * at an undesirable time. When this function is used, the status is
2224 * always written when the function is called.
2225 */
2226static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
2227{
2228 rpl->Status = Status;
2229}
2230
2231/*
2232 * The function updates the statistic counters in mac->MacStat.
2233 * It differtiates between directed and broadcast/multicast ( ==functional)
2234 * frames.
2235 */
2236static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[],
2237 unsigned int Length)
2238{
2239 tp->MacStat.rx_packets++;
2240 tp->MacStat.rx_bytes += Length;
2241
2242 /* Test functional bit */
2243 if(DataPtr[2] & GROUP_BIT)
2244 tp->MacStat.multicast++;
2245}
2246
2247static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
2248{
2249 struct net_local *tp = netdev_priv(dev);
2250 struct sockaddr *saddr = addr;
2251
2252 if (tp->AdapterOpenFlag || tp->AdapterVirtOpenFlag) {
2253 printk(KERN_WARNING "%s: Cannot set MAC/LAA address while card is open\n", dev->name);
2254 return -EIO;
2255 }
2256 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
2257 return 0;
2258}
2259
2260#if TMS380TR_DEBUG > 0
2261/*
2262 * Dump Packet (data)
2263 */
2264static void tms380tr_dump(unsigned char *Data, int length)
2265{
2266 int i, j;
2267
2268 for (i = 0, j = 0; i < length / 8; i++, j += 8)
2269 {
2270 printk(KERN_DEBUG "%02x %02x %02x %02x %02x %02x %02x %02x\n",
2271 Data[j+0],Data[j+1],Data[j+2],Data[j+3],
2272 Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
2273 }
2274}
2275#endif
2276
2277void tmsdev_term(struct net_device *dev)
2278{
2279 struct net_local *tp;
2280
2281 tp = netdev_priv(dev);
2282 dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
2283 DMA_BIDIRECTIONAL);
2284}
2285
2286const struct net_device_ops tms380tr_netdev_ops = {
2287 .ndo_open = tms380tr_open,
2288 .ndo_stop = tms380tr_close,
2289 .ndo_start_xmit = tms380tr_send_packet,
2290 .ndo_tx_timeout = tms380tr_timeout,
2291 .ndo_get_stats = tms380tr_get_stats,
2292 .ndo_set_multicast_list = tms380tr_set_multicast_list,
2293 .ndo_set_mac_address = tms380tr_set_mac_address,
2294};
2295EXPORT_SYMBOL(tms380tr_netdev_ops);
2296
2297int tmsdev_init(struct net_device *dev, struct device *pdev)
2298{
2299 struct net_local *tms_local;
2300
2301 memset(netdev_priv(dev), 0, sizeof(struct net_local));
2302 tms_local = netdev_priv(dev);
2303 init_waitqueue_head(&tms_local->wait_for_tok_int);
2304 if (pdev->dma_mask)
2305 tms_local->dmalimit = *pdev->dma_mask;
2306 else
2307 return -ENOMEM;
2308 tms_local->pdev = pdev;
2309 tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
2310 sizeof(struct net_local), DMA_BIDIRECTIONAL);
2311 if (tms_local->dmabuffer + sizeof(struct net_local) >
2312 tms_local->dmalimit)
2313 {
2314 printk(KERN_INFO "%s: Memory not accessible for DMA\n",
2315 dev->name);
2316 tmsdev_term(dev);
2317 return -ENOMEM;
2318 }
2319
2320 dev->netdev_ops = &tms380tr_netdev_ops;
2321 dev->watchdog_timeo = HZ;
2322
2323 return 0;
2324}
2325
2326EXPORT_SYMBOL(tms380tr_open);
2327EXPORT_SYMBOL(tms380tr_close);
2328EXPORT_SYMBOL(tms380tr_interrupt);
2329EXPORT_SYMBOL(tmsdev_init);
2330EXPORT_SYMBOL(tmsdev_term);
2331EXPORT_SYMBOL(tms380tr_wait);
2332
2333#ifdef MODULE
2334
2335static struct module *TMS380_module = NULL;
2336
2337int init_module(void)
2338{
2339 printk(KERN_DEBUG "%s", version);
2340
2341 TMS380_module = &__this_module;
2342 return 0;
2343}
2344
2345void cleanup_module(void)
2346{
2347 TMS380_module = NULL;
2348}
2349#endif
2350
2351MODULE_LICENSE("GPL");
2352
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
new file mode 100644
index 00000000000..e5a617c586c
--- /dev/null
+++ b/drivers/net/tokenring/tms380tr.h
@@ -0,0 +1,1141 @@
1/*
2 * tms380tr.h: TI TMS380 Token Ring driver for Linux
3 *
4 * Authors:
5 * - Christoph Goos <cgoos@syskonnect.de>
6 * - Adam Fritzler
7 */
8
9#ifndef __LINUX_TMS380TR_H
10#define __LINUX_TMS380TR_H
11
12#ifdef __KERNEL__
13
14#include <linux/interrupt.h>
15
16/* module prototypes */
17extern const struct net_device_ops tms380tr_netdev_ops;
18int tms380tr_open(struct net_device *dev);
19int tms380tr_close(struct net_device *dev);
20irqreturn_t tms380tr_interrupt(int irq, void *dev_id);
21int tmsdev_init(struct net_device *dev, struct device *pdev);
22void tmsdev_term(struct net_device *dev);
23void tms380tr_wait(unsigned long time);
24
25#define TMS380TR_MAX_ADAPTERS 7
26
27#define SEND_TIMEOUT 10*HZ
28
29#define TR_RCF_LONGEST_FRAME_MASK 0x0070
30#define TR_RCF_FRAME4K 0x0030
31
32/*------------------------------------------------------------------*/
33/* Bit order for adapter communication with DMA */
34/* -------------------------------------------------------------- */
35/* Bit 8 | 9| 10| 11|| 12| 13| 14| 15|| 0| 1| 2| 3|| 4| 5| 6| 7| */
36/* -------------------------------------------------------------- */
37/* The bytes in a word must be byte swapped. Also, if a double */
38/* word is used for storage, then the words, as well as the bytes, */
39/* must be swapped. */
40/* Bit order for adapter communication with DIO */
41/* -------------------------------------------------------------- */
42/* Bit 0 | 1| 2| 3|| 4| 5| 6| 7|| 8| 9| 10| 11|| 12| 13| 14| 15| */
43/* -------------------------------------------------------------- */
44/*------------------------------------------------------------------*/
45
46/* Swap words of a long. */
47#define SWAPW(x) (((x) << 16) | ((x) >> 16))
48
49/* Get the low byte of a word. */
50#define LOBYTE(w) ((unsigned char)(w))
51
52/* Get the high byte of a word. */
53#define HIBYTE(w) ((unsigned char)((unsigned short)(w) >> 8))
54
55/* Get the low word of a long. */
56#define LOWORD(l) ((unsigned short)(l))
57
58/* Get the high word of a long. */
59#define HIWORD(l) ((unsigned short)((unsigned long)(l) >> 16))
60
61
62
63/* Token ring adapter I/O addresses for normal mode. */
64
65/*
66 * The SIF registers. Common to all adapters.
67 */
68/* Basic SIF (SRSX = 0) */
69#define SIFDAT 0x00 /* SIF/DMA data. */
70#define SIFINC 0x02 /* IO Word data with auto increment. */
71#define SIFINH 0x03 /* IO Byte data with auto increment. */
72#define SIFADR 0x04 /* SIF/DMA Address. */
73#define SIFCMD 0x06 /* SIF Command. */
74#define SIFSTS 0x06 /* SIF Status. */
75
76/* "Extended" SIF (SRSX = 1) */
77#define SIFACL 0x08 /* SIF Adapter Control Register. */
78#define SIFADD 0x0a /* SIF/DMA Address. -- 0x0a */
79#define SIFADX 0x0c /* 0x0c */
80#define DMALEN 0x0e /* SIF DMA length. -- 0x0e */
81
82/*
83 * POS Registers. Only for ISA Adapters.
84 */
85#define POSREG 0x10 /* Adapter Program Option Select (POS)
86 * Register: base IO address + 16 byte.
87 */
88#define POSREG_2 24L /* only for TR4/16+ adapter
89 * base IO address + 24 byte. -- 0x18
90 */
91
92/* SIFCMD command codes (high-low) */
93#define CMD_INTERRUPT_ADAPTER 0x8000 /* Cause internal adapter interrupt */
94#define CMD_ADAPTER_RESET 0x4000 /* Hardware reset of adapter */
95#define CMD_SSB_CLEAR 0x2000 /* Acknowledge to adapter to
96 * system interrupts.
97 */
98#define CMD_EXECUTE 0x1000 /* Execute SCB command */
99#define CMD_SCB_REQUEST 0x0800 /* Request adapter to interrupt
100 * system when SCB is available for
101 * another command.
102 */
103#define CMD_RX_CONTINUE 0x0400 /* Continue receive after odd pointer
104 * stop. (odd pointer receive method)
105 */
106#define CMD_RX_VALID 0x0200 /* Now actual RPL is valid. */
107#define CMD_TX_VALID 0x0100 /* Now actual TPL is valid. (valid
108 * bit receive/transmit method)
109 */
110#define CMD_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system
111 * interrupt is reset.
112 */
113#define CMD_CLEAR_SYSTEM_IRQ 0x0080 /* Clear SYSTEM_INTERRUPT bit.
114 * (write: 1=ignore, 0=reset)
115 */
116#define EXEC_SOFT_RESET 0xFF00 /* adapter soft reset. (restart
117 * adapter after hardware reset)
118 */
119
120
121/* ACL commands (high-low) */
122#define ACL_SWHLDA 0x0800 /* Software hold acknowledge. */
123#define ACL_SWDDIR 0x0400 /* Data transfer direction. */
124#define ACL_SWHRQ 0x0200 /* Pseudo DMA operation. */
125#define ACL_PSDMAEN 0x0100 /* Enable pseudo system DMA. */
126#define ACL_ARESET 0x0080 /* Adapter hardware reset command.
127 * (held in reset condition as
128 * long as bit is set)
129 */
130#define ACL_CPHALT 0x0040 /* Communication processor halt.
131 * (can only be set while ACL_ARESET
132 * bit is set; prevents adapter
133 * processor from executing code while
134 * downloading firmware)
135 */
136#define ACL_BOOT 0x0020
137#define ACL_SINTEN 0x0008 /* System interrupt enable/disable
138 * (1/0): can be written if ACL_ARESET
139 * is zero.
140 */
141#define ACL_PEN 0x0004
142
143#define ACL_NSELOUT0 0x0002
144#define ACL_NSELOUT1 0x0001 /* NSELOUTx have a card-specific
145 * meaning for setting ring speed.
146 */
147
148#define PS_DMA_MASK (ACL_SWHRQ | ACL_PSDMAEN)
149
150
151/* SIFSTS register return codes (high-low) */
152#define STS_SYSTEM_IRQ 0x0080 /* Adapter-to-attached-system
153 * interrupt is valid.
154 */
155#define STS_INITIALIZE 0x0040 /* INITIALIZE status. (ready to
156 * initialize)
157 */
158#define STS_TEST 0x0020 /* TEST status. (BUD not completed) */
159#define STS_ERROR 0x0010 /* ERROR status. (unrecoverable
160 * HW error occurred)
161 */
162#define STS_MASK 0x00F0 /* Mask interesting status bits. */
163#define STS_ERROR_MASK 0x000F /* Get Error Code by masking the
164 * interrupt code bits.
165 */
166#define ADAPTER_INT_PTRS 0x0A00 /* Address offset of adapter internal
167 * pointers 01:0a00 (high-low) have to
168 * be read after init and before open.
169 */
170
171
172/* Interrupt Codes (only MAC IRQs) */
173#define STS_IRQ_ADAPTER_CHECK 0x0000 /* unrecoverable hardware or
174 * software error.
175 */
176#define STS_IRQ_RING_STATUS 0x0004 /* SSB is updated with ring status. */
177#define STS_IRQ_LLC_STATUS 0x0005 /* Not used in MAC-only microcode */
178#define STS_IRQ_SCB_CLEAR 0x0006 /* SCB clear, following an
179 * SCB_REQUEST IRQ.
180 */
181#define STS_IRQ_TIMER 0x0007 /* Not normally used in MAC ucode */
182#define STS_IRQ_COMMAND_STATUS 0x0008 /* SSB is updated with command
183 * status.
184 */
185#define STS_IRQ_RECEIVE_STATUS 0x000A /* SSB is updated with receive
186 * status.
187 */
188#define STS_IRQ_TRANSMIT_STATUS 0x000C /* SSB is updated with transmit
189 * status
190 */
191#define STS_IRQ_RECEIVE_PENDING 0x000E /* Not used in MAC-only microcode */
192#define STS_IRQ_MASK 0x000F /* = STS_ERROR_MASK. */
193
194
195/* TRANSMIT_STATUS completion code: (SSB.Parm[0]) */
196#define COMMAND_COMPLETE 0x0080 /* TRANSMIT command completed
197 * (avoid this!) issue another transmit
198 * to send additional frames.
199 */
200#define FRAME_COMPLETE 0x0040 /* Frame has been transmitted;
201 * INTERRUPT_FRAME bit was set in the
202 * CSTAT request; indication of possibly
203 * more than one frame transmissions!
204 * SSB.Parm[0-1]: 32 bit pointer to
205 * TPL of last frame.
206 */
207#define LIST_ERROR 0x0020 /* Error in one of the TPLs that
208 * compose the frame; TRANSMIT
209 * terminated; Parm[1-2]: 32bit pointer
210 * to TPL which starts the error
211 * frame; error details in bits 8-13.
212 * (14?)
213 */
214#define FRAME_SIZE_ERROR 0x8000 /* FRAME_SIZE does not equal the sum of
215 * the valid DATA_COUNT fields;
216 * FRAME_SIZE less than header plus
217 * information field. (15 bytes +
218 * routing field) Or if FRAME_SIZE
219 * was specified as zero in one list.
220 */
221#define TX_THRESHOLD 0x4000 /* FRAME_SIZE greater than (BUFFER_SIZE
222 * - 9) * TX_BUF_MAX.
223 */
224#define ODD_ADDRESS 0x2000 /* Odd forward pointer value is
225 * read on a list without END_FRAME
226 * indication.
227 */
228#define FRAME_ERROR 0x1000 /* START_FRAME bit (not) anticipated,
229 * but (not) set.
230 */
231#define ACCESS_PRIORITY_ERROR 0x0800 /* Access priority requested has not
232 * been allowed.
233 */
234#define UNENABLED_MAC_FRAME 0x0400 /* MAC frame has source class of zero
235 * or MAC frame PCF ATTN field is
236 * greater than one.
237 */
238#define ILLEGAL_FRAME_FORMAT 0x0200 /* Bit 0 or FC field was set to one. */
239
240
241/*
242 * Since we need to support some functions even if the adapter is in a
243 * CLOSED state, we have a (pseudo-) command queue which holds commands
244 * that are outstandig to be executed.
245 *
246 * Each time a command completes, an interrupt occurs and the next
247 * command is executed. The command queue is actually a simple word with
248 * a bit for each outstandig command. Therefore the commands will not be
249 * executed in the order they have been queued.
250 *
251 * The following defines the command code bits and the command queue:
252 */
253#define OC_OPEN 0x0001 /* OPEN command */
254#define OC_TRANSMIT 0x0002 /* TRANSMIT command */
255#define OC_TRANSMIT_HALT 0x0004 /* TRANSMIT_HALT command */
256#define OC_RECEIVE 0x0008 /* RECEIVE command */
257#define OC_CLOSE 0x0010 /* CLOSE command */
258#define OC_SET_GROUP_ADDR 0x0020 /* SET_GROUP_ADDR command */
259#define OC_SET_FUNCT_ADDR 0x0040 /* SET_FUNCT_ADDR command */
260#define OC_READ_ERROR_LOG 0x0080 /* READ_ERROR_LOG command */
261#define OC_READ_ADAPTER 0x0100 /* READ_ADAPTER command */
262#define OC_MODIFY_OPEN_PARMS 0x0400 /* MODIFY_OPEN_PARMS command */
263#define OC_RESTORE_OPEN_PARMS 0x0800 /* RESTORE_OPEN_PARMS command */
264#define OC_SET_FIRST_16_GROUP 0x1000 /* SET_FIRST_16_GROUP command */
265#define OC_SET_BRIDGE_PARMS 0x2000 /* SET_BRIDGE_PARMS command */
266#define OC_CONFIG_BRIDGE_PARMS 0x4000 /* CONFIG_BRIDGE_PARMS command */
267
268#define OPEN 0x0300 /* C: open command. S: completion. */
269#define TRANSMIT 0x0400 /* C: transmit command. S: completion
270 * status. (reject: COMMAND_REJECT if
271 * adapter not opened, TRANSMIT already
272 * issued or address passed in the SCB
273 * not word aligned)
274 */
275#define TRANSMIT_HALT 0x0500 /* C: interrupt TX TPL chain; if no
276 * TRANSMIT command issued, the command
277 * is ignored (completion with TRANSMIT
278 * status (0x0400)!)
279 */
280#define RECEIVE 0x0600 /* C: receive command. S: completion
281 * status. (reject: COMMAND_REJECT if
282 * adapter not opened, RECEIVE already
283 * issued or address passed in the SCB
284 * not word aligned)
285 */
286#define CLOSE 0x0700 /* C: close adapter. S: completion.
287 * (COMMAND_REJECT if adapter not open)
288 */
289#define SET_GROUP_ADDR 0x0800 /* C: alter adapter group address after
290 * OPEN. S: completion. (COMMAND_REJECT
291 * if adapter not open)
292 */
293#define SET_FUNCT_ADDR 0x0900 /* C: alter adapter functional address
294 * after OPEN. S: completion.
295 * (COMMAND_REJECT if adapter not open)
296 */
297#define READ_ERROR_LOG 0x0A00 /* C: read adapter error counters.
298 * S: completion. (command ignored
299 * if adapter not open!)
300 */
301#define READ_ADAPTER 0x0B00 /* C: read data from adapter memory.
302 * (important: after init and before
303 * open!) S: completion. (ADAPTER_CHECK
304 * interrupt if undefined storage area
305 * read)
306 */
307#define MODIFY_OPEN_PARMS 0x0D00 /* C: modify some adapter operational
308 * parameters. (bit correspondend to
309 * WRAP_INTERFACE is ignored)
310 * S: completion. (reject:
311 * COMMAND_REJECT)
312 */
313#define RESTORE_OPEN_PARMS 0x0E00 /* C: modify some adapter operational
314 * parameters. (bit correspondend
315 * to WRAP_INTERFACE is ignored)
316 * S: completion. (reject:
317 * COMMAND_REJECT)
318 */
319#define SET_FIRST_16_GROUP 0x0F00 /* C: alter the first two bytes in
320 * adapter group address.
321 * S: completion. (reject:
322 * COMMAND_REJECT)
323 */
324#define SET_BRIDGE_PARMS 0x1000 /* C: values and conditions for the
325 * adapter hardware to use when frames
326 * are copied for forwarding.
327 * S: completion. (reject:
328 * COMMAND_REJECT)
329 */
330#define CONFIG_BRIDGE_PARMS 0x1100 /* C: ..
331 * S: completion. (reject:
332 * COMMAND_REJECT)
333 */
334
335#define SPEED_4 4
336#define SPEED_16 16 /* Default transmission speed */
337
338
339/* Initialization Parameter Block (IPB); word alignment necessary! */
340#define BURST_SIZE 0x0018 /* Default burst size */
341#define BURST_MODE 0x9F00 /* Burst mode enable */
342#define DMA_RETRIES 0x0505 /* Magic DMA retry number... */
343
344#define CYCLE_TIME 3 /* Default AT-bus cycle time: 500 ns
345 * (later adapter version: fix cycle time!)
346 */
347#define LINE_SPEED_BIT 0x80
348
349/* Macro definition for the wait function. */
350#define ONE_SECOND_TICKS 1000000
351#define HALF_SECOND (ONE_SECOND_TICKS / 2)
352#define ONE_SECOND (ONE_SECOND_TICKS)
353#define TWO_SECONDS (ONE_SECOND_TICKS * 2)
354#define THREE_SECONDS (ONE_SECOND_TICKS * 3)
355#define FOUR_SECONDS (ONE_SECOND_TICKS * 4)
356#define FIVE_SECONDS (ONE_SECOND_TICKS * 5)
357
358#define BUFFER_SIZE 2048 /* Buffers on Adapter */
359
360#pragma pack(1)
361typedef struct {
362 unsigned short Init_Options; /* Initialize with burst mode;
363 * LLC disabled. (MAC only)
364 */
365
366 /* Interrupt vectors the adapter places on attached system bus. */
367 u_int8_t CMD_Status_IV; /* Interrupt vector: command status. */
368 u_int8_t TX_IV; /* Interrupt vector: transmit. */
369 u_int8_t RX_IV; /* Interrupt vector: receive. */
370 u_int8_t Ring_Status_IV; /* Interrupt vector: ring status. */
371 u_int8_t SCB_Clear_IV; /* Interrupt vector: SCB clear. */
372 u_int8_t Adapter_CHK_IV; /* Interrupt vector: adapter check. */
373
374 u_int16_t RX_Burst_Size; /* Max. number of transfer cycles. */
375 u_int16_t TX_Burst_Size; /* During DMA burst; even value! */
376 u_int16_t DMA_Abort_Thrhld; /* Number of DMA retries. */
377
378 u_int32_t SCB_Addr; /* SCB address: even, word aligned, high-low */
379 u_int32_t SSB_Addr; /* SSB address: even, word aligned, high-low */
380} IPB, *IPB_Ptr;
381#pragma pack()
382
383/*
384 * OPEN Command Parameter List (OCPL) (can be reused, if the adapter has to
385 * be reopened)
386 */
387#define BUFFER_SIZE 2048 /* Buffers on Adapter. */
388#define TPL_SIZE 8+6*TX_FRAG_NUM /* Depending on fragments per TPL. */
389#define RPL_SIZE 14 /* (with TI firmware v2.26 handling
390 * up to nine fragments possible)
391 */
392#define TX_BUF_MIN 20 /* ??? (Stephan: calculation with */
393#define TX_BUF_MAX 40 /* BUFFER_SIZE and MAX_FRAME_SIZE) ???
394 */
395#define DISABLE_EARLY_TOKEN_RELEASE 0x1000
396
397/* OPEN Options (high-low) */
398#define WRAP_INTERFACE 0x0080 /* Inserting omitted for test
399 * purposes; transmit data appears
400 * as receive data. (useful for
401 * testing; change: CLOSE necessary)
402 */
403#define DISABLE_HARD_ERROR 0x0040 /* On HARD_ERROR & TRANSMIT_BEACON
404 * no RING.STATUS interrupt.
405 */
406#define DISABLE_SOFT_ERROR 0x0020 /* On SOFT_ERROR, no RING.STATUS
407 * interrupt.
408 */
409#define PASS_ADAPTER_MAC_FRAMES 0x0010 /* Passing unsupported MAC frames
410 * to system.
411 */
412#define PASS_ATTENTION_FRAMES 0x0008 /* All changed attention MAC frames are
413 * passed to the system.
414 */
415#define PAD_ROUTING_FIELD 0x0004 /* Routing field is padded to 18
416 * bytes.
417 */
418#define FRAME_HOLD 0x0002 /*Adapter waits for entire frame before
419 * initiating DMA transfer; otherwise:
420 * DMA transfer initiation if internal
421 * buffer filled.
422 */
423#define CONTENDER 0x0001 /* Adapter participates in the monitor
424 * contention process.
425 */
426#define PASS_BEACON_MAC_FRAMES 0x8000 /* Adapter passes beacon MAC frames
427 * to the system.
428 */
429#define EARLY_TOKEN_RELEASE 0x1000 /* Only valid in 16 Mbps operation;
430 * 0 = ETR. (no effect in 4 Mbps
431 * operation)
432 */
433#define COPY_ALL_MAC_FRAMES 0x0400 /* All MAC frames are copied to
434 * the system. (after OPEN: duplicate
435 * address test (DAT) MAC frame is
436 * first received frame copied to the
437 * system)
438 */
439#define COPY_ALL_NON_MAC_FRAMES 0x0200 /* All non MAC frames are copied to
440 * the system.
441 */
442#define PASS_FIRST_BUF_ONLY 0x0100 /* Passes only first internal buffer
443 * of each received frame; FrameSize
444 * of RPLs must contain internal
445 * BUFFER_SIZE bits for promiscuous mode.
446 */
447#define ENABLE_FULL_DUPLEX_SELECTION 0x2000
448 /* Enable the use of full-duplex
449 * settings with bits in byte 22 in
450 * ocpl. (new feature in firmware
451 * version 3.09)
452 */
453
454/* Full-duplex settings */
455#define OPEN_FULL_DUPLEX_OFF 0x0000
456#define OPEN_FULL_DUPLEX_ON 0x00c0
457#define OPEN_FULL_DUPLEX_AUTO 0x0080
458
459#define PROD_ID_SIZE 18 /* Length of product ID. */
460
461#define TX_FRAG_NUM 3 /* Number of fragments used in one TPL. */
462#define TX_MORE_FRAGMENTS 0x8000 /* Bit set in DataCount to indicate more
463 * fragments following.
464 */
465
466/* XXX is there some better way to do this? */
467#define ISA_MAX_ADDRESS 0x00ffffff
468#define PCI_MAX_ADDRESS 0xffffffff
469
470#pragma pack(1)
471typedef struct {
472 u_int16_t OPENOptions;
473 u_int8_t NodeAddr[6]; /* Adapter node address; use ROM
474 * address
475 */
476 u_int32_t GroupAddr; /* Multicast: high order
477 * bytes = 0xC000
478 */
479 u_int32_t FunctAddr; /* High order bytes = 0xC000 */
480 __be16 RxListSize; /* RPL size: 0 (=26), 14, 20 or
481 * 26 bytes read by the adapter.
482 * (Depending on the number of
483 * fragments/list)
484 */
485 __be16 TxListSize; /* TPL size */
486 __be16 BufSize; /* Is automatically rounded up to the
487 * nearest nK boundary.
488 */
489 u_int16_t FullDuplex;
490 u_int16_t Reserved;
491 u_int8_t TXBufMin; /* Number of adapter buffers reserved
492 * for transmission a minimum of 2
493 * buffers must be allocated.
494 */
495 u_int8_t TXBufMax; /* Maximum number of adapter buffers
496 * for transmit; a minimum of 2 buffers
497 * must be available for receive.
498 * Default: 6
499 */
500 u_int16_t ProdIDAddr[2];/* Pointer to product ID. */
501} OPB, *OPB_Ptr;
502#pragma pack()
503
504/*
505 * SCB: adapter commands enabled by the host system started by writing
506 * CMD_INTERRUPT_ADAPTER | CMD_EXECUTE (|SCB_REQUEST) to the SIFCMD IO
507 * register. (special case: | CMD_SYSTEM_IRQ for initialization)
508 */
509#pragma pack(1)
510typedef struct {
511 u_int16_t CMD; /* Command code */
512 u_int16_t Parm[2]; /* Pointer to Command Parameter Block */
513} SCB; /* System Command Block (32 bit physical address; big endian)*/
514#pragma pack()
515
516/*
517 * SSB: adapter command return status can be evaluated after COMMAND_STATUS
518 * adapter to system interrupt after reading SSB, the availability of the SSB
519 * has to be told the adapter by writing CMD_INTERRUPT_ADAPTER | CMD_SSB_CLEAR
520 * in the SIFCMD IO register.
521 */
522#pragma pack(1)
523typedef struct {
524 u_int16_t STS; /* Status code */
525 u_int16_t Parm[3]; /* Parameter or pointer to Status Parameter
526 * Block.
527 */
528} SSB; /* System Status Block (big endian - physical address) */
529#pragma pack()
530
531typedef struct {
532 unsigned short BurnedInAddrPtr; /* Pointer to adapter burned in
533 * address. (BIA)
534 */
535 unsigned short SoftwareLevelPtr;/* Pointer to software level data. */
536 unsigned short AdapterAddrPtr; /* Pointer to adapter addresses. */
537 unsigned short AdapterParmsPtr; /* Pointer to adapter parameters. */
538 unsigned short MACBufferPtr; /* Pointer to MAC buffer. (internal) */
539 unsigned short LLCCountersPtr; /* Pointer to LLC counters. */
540 unsigned short SpeedFlagPtr; /* Pointer to data rate flag.
541 * (4/16 Mbps)
542 */
543 unsigned short AdapterRAMPtr; /* Pointer to adapter RAM found. (KB) */
544} INTPTRS; /* Adapter internal pointers */
545
546#pragma pack(1)
547typedef struct {
548 u_int8_t Line_Error; /* Line error: code violation in
549 * frame or in a token, or FCS error.
550 */
551 u_int8_t Internal_Error; /* IBM specific. (Reserved_1) */
552 u_int8_t Burst_Error;
553 u_int8_t ARI_FCI_Error; /* ARI/FCI bit zero in AMP or
554 * SMP MAC frame.
555 */
556 u_int8_t AbortDelimeters; /* IBM specific. (Reserved_2) */
557 u_int8_t Reserved_3;
558 u_int8_t Lost_Frame_Error; /* Receive of end of transmitted
559 * frame failed.
560 */
561 u_int8_t Rx_Congest_Error; /* Adapter in repeat mode has not
562 * enough buffer space to copy incoming
563 * frame.
564 */
565 u_int8_t Frame_Copied_Error; /* ARI bit not zero in frame
566 * addressed to adapter.
567 */
568 u_int8_t Frequency_Error; /* IBM specific. (Reserved_4) */
569 u_int8_t Token_Error; /* (active only in monitor station) */
570 u_int8_t Reserved_5;
571 u_int8_t DMA_Bus_Error; /* DMA bus errors not exceeding the
572 * abort thresholds.
573 */
574 u_int8_t DMA_Parity_Error; /* DMA parity errors not exceeding
575 * the abort thresholds.
576 */
577} ERRORTAB; /* Adapter error counters */
578#pragma pack()
579
580
581/*--------------------- Send and Receive definitions -------------------*/
582#pragma pack(1)
583typedef struct {
584 __be16 DataCount; /* Value 0, even and odd values are
585 * permitted; value is unaltered most
586 * significant bit set: following
587 * fragments last fragment: most
588 * significant bit is not evaluated.
589 * (???)
590 */
591 __be32 DataAddr; /* Pointer to frame data fragment;
592 * even or odd.
593 */
594} Fragment;
595#pragma pack()
596
597#define MAX_FRAG_NUMBERS 9 /* Maximal number of fragments possible to use
598 * in one RPL/TPL. (depending on TI firmware
599 * version)
600 */
601
602/*
603 * AC (1), FC (1), Dst (6), Src (6), RIF (18), Data (4472) = 4504
604 * The packet size can be one of the follows: 548, 1502, 2084, 4504, 8176,
605 * 11439, 17832. Refer to TMS380 Second Generation Token Ring User's Guide
606 * Page 2-27.
607 */
608#define HEADER_SIZE (1 + 1 + 6 + 6)
609#define SRC_SIZE 18
610#define MIN_DATA_SIZE 516
611#define DEFAULT_DATA_SIZE 4472
612#define MAX_DATA_SIZE 17800
613
614#define DEFAULT_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + DEFAULT_DATA_SIZE)
615#define MIN_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MIN_DATA_SIZE)
616#define MAX_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + MAX_DATA_SIZE)
617
618/*
619 * Macros to deal with the frame status field.
620 */
621#define AC_NOT_RECOGNIZED 0x00
622#define GROUP_BIT 0x80
623#define GET_TRANSMIT_STATUS_HIGH_BYTE(Ts) ((unsigned char)((Ts) >> 8))
624#define GET_FRAME_STATUS_HIGH_AC(Fs) ((unsigned char)(((Fs) & 0xC0) >> 6))
625#define GET_FRAME_STATUS_LOW_AC(Fs) ((unsigned char)(((Fs) & 0x0C) >> 2))
626#define DIRECTED_FRAME(Context) (!((Context)->MData[2] & GROUP_BIT))
627
628
629/*--------------------- Send Functions ---------------------------------*/
630/* define TX_CSTAT _REQUEST (R) and _COMPLETE (C) values (high-low) */
631
632#define TX_VALID 0x0080 /* R: set via TRANSMIT.VALID interrupt.
633 * C: always reset to zero!
634 */
635#define TX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero.
636 * C: set to one.
637 */
638#define TX_START_FRAME 0x0020 /* R: start of a frame: 1
639 * C: unchanged.
640 */
641#define TX_END_FRAME 0x0010 /* R: end of a frame: 1
642 * C: unchanged.
643 */
644#define TX_FRAME_IRQ 0x0008 /* R: request interrupt generation
645 * after transmission.
646 * C: unchanged.
647 */
648#define TX_ERROR 0x0004 /* R: reserved.
649 * C: set to one if Error occurred.
650 */
651#define TX_INTERFRAME_WAIT 0x0004
652#define TX_PASS_CRC 0x0002 /* R: set if CRC value is already
653 * calculated. (valid only in
654 * FRAME_START TPL)
655 * C: unchanged.
656 */
657#define TX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame
658 * source address and does not overwrite
659 * with the adapter node address.
660 * (valid only in FRAME_START TPL)
661 *
662 * C: unchanged.
663 */
664#define TX_STRIP_FS 0xFF00 /* R: reserved.
665 * C: if no Transmission Error,
666 * field contains copy of FS byte after
667 * stripping of frame.
668 */
669
670/*
671 * Structure of Transmit Parameter Lists (TPLs) (only one frame every TPL,
672 * but possibly multiple TPLs for one frame) the length of the TPLs has to be
673 * initialized in the OPL. (OPEN parameter list)
674 */
675#define TPL_NUM 3 /* Number of Transmit Parameter Lists.
676 * !! MUST BE >= 3 !!
677 */
678
679#pragma pack(1)
680typedef struct s_TPL TPL;
681
682struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */
683 __be32 NextTPLAddr; /* Pointer to next TPL in chain; if
684 * pointer is odd: this is the last
685 * TPL. Pointing to itself can cause
686 * problems!
687 */
688 volatile u_int16_t Status; /* Initialized by the adapter:
689 * CSTAT_REQUEST important: update least
690 * significant bit first! Set by the
691 * adapter: CSTAT_COMPLETE status.
692 */
693 __be16 FrameSize; /* Number of bytes to be transmitted
694 * as a frame including AC/FC,
695 * Destination, Source, Routing field
696 * not including CRC, FS, End Delimiter
697 * (valid only if START_FRAME bit in
698 * CSTAT nonzero) must not be zero in
699 * any list; maximum value: (BUFFER_SIZE
700 * - 8) * TX_BUF_MAX sum of DataCount
701 * values in FragmentList must equal
702 * Frame_Size value in START_FRAME TPL!
703 * frame data fragment list.
704 */
705
706 /* TPL/RPL size in OPEN parameter list depending on maximal
707 * numbers of fragments used in one parameter list.
708 */
709 Fragment FragList[TX_FRAG_NUM]; /* Maximum: nine frame fragments in one
710 * TPL actual version of firmware: 9
711 * fragments possible.
712 */
713#pragma pack()
714
715 /* Special proprietary data and precalculations */
716
717 TPL *NextTPLPtr; /* Pointer to next TPL in chain. */
718 unsigned char *MData;
719 struct sk_buff *Skb;
720 unsigned char TPLIndex;
721 volatile unsigned char BusyFlag;/* Flag: TPL busy? */
722 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
723};
724
725/* ---------------------Receive Functions-------------------------------*
726 * define RECEIVE_CSTAT_REQUEST (R) and RECEIVE_CSTAT_COMPLETE (C) values.
727 * (high-low)
728 */
729#define RX_VALID 0x0080 /* R: set; tell adapter with
730 * RECEIVE.VALID interrupt.
731 * C: reset to zero.
732 */
733#define RX_FRAME_COMPLETE 0x0040 /* R: must be reset to zero,
734 * C: set to one.
735 */
736#define RX_START_FRAME 0x0020 /* R: must be reset to zero.
737 * C: set to one on the list.
738 */
739#define RX_END_FRAME 0x0010 /* R: must be reset to zero.
740 * C: set to one on the list
741 * that ends the frame.
742 */
743#define RX_FRAME_IRQ 0x0008 /* R: request interrupt generation
744 * after receive.
745 * C: unchanged.
746 */
747#define RX_INTERFRAME_WAIT 0x0004 /* R: after receiving a frame:
748 * interrupt and wait for a
749 * RECEIVE.CONTINUE.
750 * C: unchanged.
751 */
752#define RX_PASS_CRC 0x0002 /* R: if set, the adapter includes
753 * the CRC in data passed. (last four
754 * bytes; valid only if FRAME_START is
755 * set)
756 * C: set, if CRC is included in
757 * received data.
758 */
759#define RX_PASS_SRC_ADDR 0x0001 /* R: adapter uses explicit frame
760 * source address and does not
761 * overwrite with the adapter node
762 * address. (valid only if FRAME_START
763 * is set)
764 * C: unchanged.
765 */
766#define RX_RECEIVE_FS 0xFC00 /* R: reserved; must be reset to zero.
767 * C: on lists with START_FRAME, field
768 * contains frame status field from
769 * received frame; otherwise cleared.
770 */
771#define RX_ADDR_MATCH 0x0300 /* R: reserved; must be reset to zero.
772 * C: address match code mask.
773 */
774#define RX_STATUS_MASK 0x00FF /* Mask for receive status bits. */
775
776#define RX_INTERN_ADDR_MATCH 0x0100 /* C: internally address match. */
777#define RX_EXTERN_ADDR_MATCH 0x0200 /* C: externally matched via
778 * XMATCH/XFAIL interface.
779 */
780#define RX_INTEXT_ADDR_MATCH 0x0300 /* C: internally and externally
781 * matched.
782 */
783#define RX_READY (RX_VALID | RX_FRAME_IRQ) /* Ready for receive. */
784
785/* Constants for Command Status Interrupt.
786 * COMMAND_REJECT status field bit functions (SSB.Parm[0])
787 */
788#define ILLEGAL_COMMAND 0x0080 /* Set if an unknown command
789 * is issued to the adapter
790 */
791#define ADDRESS_ERROR 0x0040 /* Set if any address field in
792 * the SCB is odd. (not word aligned)
793 */
794#define ADAPTER_OPEN 0x0020 /* Command issued illegal with
795 * open adapter.
796 */
797#define ADAPTER_CLOSE 0x0010 /* Command issued illegal with
798 * closed adapter.
799 */
800#define SAME_COMMAND 0x0008 /* Command issued with same command
801 * already executing.
802 */
803
804/* OPEN_COMPLETION values (SSB.Parm[0], MSB) */
805#define NODE_ADDR_ERROR 0x0040 /* Wrong address or BIA read
806 * zero address.
807 */
808#define LIST_SIZE_ERROR 0x0020 /* If List_Size value not in 0,
809 * 14, 20, 26.
810 */
811#define BUF_SIZE_ERROR 0x0010 /* Not enough available memory for
812 * two buffers.
813 */
814#define TX_BUF_COUNT_ERROR 0x0004 /* Remaining receive buffers less than
815 * two.
816 */
817#define OPEN_ERROR 0x0002 /* Error during ring insertion; more
818 * information in bits 8-15.
819 */
820
821/* Standard return codes */
822#define GOOD_COMPLETION 0x0080 /* =OPEN_SUCCESSFULL */
823#define INVALID_OPEN_OPTION 0x0001 /* OPEN options are not supported by
824 * the adapter.
825 */
826
827/* OPEN phases; details of OPEN_ERROR (SSB.Parm[0], LSB) */
828#define OPEN_PHASES_MASK 0xF000 /* Check only the bits 8-11. */
829#define LOBE_MEDIA_TEST 0x1000
830#define PHYSICAL_INSERTION 0x2000
831#define ADDRESS_VERIFICATION 0x3000
832#define PARTICIPATION_IN_RING_POLL 0x4000
833#define REQUEST_INITIALISATION 0x5000
834#define FULLDUPLEX_CHECK 0x6000
835
836/* OPEN error codes; details of OPEN_ERROR (SSB.Parm[0], LSB) */
837#define OPEN_ERROR_CODES_MASK 0x0F00 /* Check only the bits 12-15. */
838#define OPEN_FUNCTION_FAILURE 0x0100 /* Unable to transmit to itself or
839 * frames received before insertion.
840 */
841#define OPEN_SIGNAL_LOSS 0x0200 /* Signal loss condition detected at
842 * receiver.
843 */
844#define OPEN_TIMEOUT 0x0500 /* Insertion timer expired before
845 * logical insertion.
846 */
847#define OPEN_RING_FAILURE 0x0600 /* Unable to receive own ring purge
848 * MAC frames.
849 */
850#define OPEN_RING_BEACONING 0x0700 /* Beacon MAC frame received after
851 * ring insertion.
852 */
853#define OPEN_DUPLICATE_NODEADDR 0x0800 /* Other station in ring found
854 * with the same address.
855 */
856#define OPEN_REQUEST_INIT 0x0900 /* RPS present but does not respond. */
857#define OPEN_REMOVE_RECEIVED 0x0A00 /* Adapter received a remove adapter
858 * MAC frame.
859 */
860#define OPEN_FULLDUPLEX_SET 0x0D00 /* Got this with full duplex on when
861 * trying to connect to a normal ring.
862 */
863
864/* SET_BRIDGE_PARMS return codes: */
865#define BRIDGE_INVALID_MAX_LEN 0x4000 /* MAX_ROUTING_FIELD_LENGTH odd,
866 * less than 6 or > 30.
867 */
868#define BRIDGE_INVALID_SRC_RING 0x2000 /* SOURCE_RING number zero, too large
869 * or = TARGET_RING.
870 */
871#define BRIDGE_INVALID_TRG_RING 0x1000 /* TARGET_RING number zero, too large
872 * or = SOURCE_RING.
873 */
874#define BRIDGE_INVALID_BRDGE_NO 0x0800 /* BRIDGE_NUMBER too large. */
875#define BRIDGE_INVALID_OPTIONS 0x0400 /* Invalid bridge options. */
876#define BRIDGE_DIAGS_FAILED 0x0200 /* Diagnostics of TMS380SRA failed. */
877#define BRIDGE_NO_SRA 0x0100 /* The TMS380SRA does not exist in HW
878 * configuration.
879 */
880
881/*
882 * Bring Up Diagnostics error codes.
883 */
884#define BUD_INITIAL_ERROR 0x0
885#define BUD_CHECKSUM_ERROR 0x1
886#define BUD_ADAPTER_RAM_ERROR 0x2
887#define BUD_INSTRUCTION_ERROR 0x3
888#define BUD_CONTEXT_ERROR 0x4
889#define BUD_PROTOCOL_ERROR 0x5
890#define BUD_INTERFACE_ERROR 0x6
891
892/* BUD constants */
893#define BUD_MAX_RETRIES 3
894#define BUD_MAX_LOOPCNT 6
895#define BUD_TIMEOUT 3000
896
897/* Initialization constants */
898#define INIT_MAX_RETRIES 3 /* Maximum three retries. */
899#define INIT_MAX_LOOPCNT 22 /* Maximum loop counts. */
900
901/* RING STATUS field values (high/low) */
902#define SIGNAL_LOSS 0x0080 /* Loss of signal on the ring
903 * detected.
904 */
905#define HARD_ERROR 0x0040 /* Transmitting or receiving beacon
906 * frames.
907 */
908#define SOFT_ERROR 0x0020 /* Report error MAC frame
909 * transmitted.
910 */
911#define TRANSMIT_BEACON 0x0010 /* Transmitting beacon frames on the
912 * ring.
913 */
914#define LOBE_WIRE_FAULT 0x0008 /* Open or short circuit in the
915 * cable to concentrator; adapter
916 * closed.
917 */
918#define AUTO_REMOVAL_ERROR 0x0004 /* Lobe wrap test failed, deinserted;
919 * adapter closed.
920 */
921#define REMOVE_RECEIVED 0x0001 /* Received a remove ring station MAC
922 * MAC frame request; adapter closed.
923 */
924#define COUNTER_OVERFLOW 0x8000 /* Overflow of one of the adapters
925 * error counters; READ.ERROR.LOG.
926 */
927#define SINGLE_STATION 0x4000 /* Adapter is the only station on the
928 * ring.
929 */
930#define RING_RECOVERY 0x2000 /* Claim token MAC frames on the ring;
931 * reset after ring purge frame.
932 */
933
934#define ADAPTER_CLOSED (LOBE_WIRE_FAULT | AUTO_REMOVAL_ERROR |\
935 REMOVE_RECEIVED)
936
937/* Adapter_check_block.Status field bit assignments: */
938#define DIO_PARITY 0x8000 /* Adapter detects bad parity
939 * through direct I/O access.
940 */
941#define DMA_READ_ABORT 0x4000 /* Aborting DMA read operation
942 * from system Parm[0]: 0=timeout,
943 * 1=parity error, 2=bus error;
944 * Parm[1]: 32 bit pointer to host
945 * system address at failure.
946 */
947#define DMA_WRITE_ABORT 0x2000 /* Aborting DMA write operation
948 * to system. (parameters analogous to
949 * DMA_READ_ABORT)
950 */
951#define ILLEGAL_OP_CODE 0x1000 /* Illegal operation code in the
952 * the adapters firmware Parm[0]-2:
953 * communications processor registers
954 * R13-R15.
955 */
956#define PARITY_ERRORS 0x0800 /* Adapter detects internal bus
957 * parity error.
958 */
959#define RAM_DATA_ERROR 0x0080 /* Valid only during RAM testing;
960 * RAM data error Parm[0-1]: 32 bit
961 * pointer to RAM location.
962 */
963#define RAM_PARITY_ERROR 0x0040 /* Valid only during RAM testing;
964 * RAM parity error Parm[0-1]: 32 bit
965 * pointer to RAM location.
966 */
967#define RING_UNDERRUN 0x0020 /* Internal DMA underrun when
968 * transmitting onto ring.
969 */
970#define INVALID_IRQ 0x0008 /* Unrecognized interrupt generated
971 * internal to adapter Parm[0-2]:
972 * adapter register R13-R15.
973 */
974#define INVALID_ERROR_IRQ 0x0004 /* Unrecognized error interrupt
975 * generated Parm[0-2]: adapter register
976 * R13-R15.
977 */
978#define INVALID_XOP 0x0002 /* Unrecognized XOP request in
979 * communication processor Parm[0-2]:
980 * adapter register R13-R15.
981 */
982#define CHECKADDR 0x05E0 /* Adapter check status information
983 * address offset.
984 */
985#define ROM_PAGE_0 0x0000 /* Adapter ROM page 0. */
986
987/*
988 * RECEIVE.STATUS interrupt result SSB values: (high-low)
989 * (RECEIVE_COMPLETE field bit definitions in SSB.Parm[0])
990 */
991#define RX_COMPLETE 0x0080 /* SSB.Parm[0]; SSB.Parm[1]: 32
992 * bit pointer to last RPL.
993 */
994#define RX_SUSPENDED 0x0040 /* SSB.Parm[0]; SSB.Parm[1]: 32
995 * bit pointer to RPL with odd
996 * forward pointer.
997 */
998
999/* Valid receive CSTAT: */
1000#define RX_FRAME_CONTROL_BITS (RX_VALID | RX_START_FRAME | RX_END_FRAME | \
1001 RX_FRAME_COMPLETE)
1002#define VALID_SINGLE_BUFFER_FRAME (RX_START_FRAME | RX_END_FRAME | \
1003 RX_FRAME_COMPLETE)
1004
1005typedef enum SKB_STAT SKB_STAT;
1006enum SKB_STAT {
1007 SKB_UNAVAILABLE,
1008 SKB_DMA_DIRECT,
1009 SKB_DATA_COPY
1010};
1011
1012/* Receive Parameter List (RPL) The length of the RPLs has to be initialized
1013 * in the OPL. (OPEN parameter list)
1014 */
1015#define RPL_NUM 3
1016
1017#define RX_FRAG_NUM 1 /* Maximal number of used fragments in one RPL.
1018 * (up to firmware v2.24: 3, now: up to 9)
1019 */
1020
1021#pragma pack(1)
1022typedef struct s_RPL RPL;
1023struct s_RPL { /* Receive Parameter List */
1024 __be32 NextRPLAddr; /* Pointer to next RPL in chain
1025 * (normalized = physical 32 bit
1026 * address) if pointer is odd: this
1027 * is last RPL. Pointing to itself can
1028 * cause problems!
1029 */
1030 volatile u_int16_t Status; /* Set by creation of Receive Parameter
1031 * List RECEIVE_CSTAT_COMPLETE set by
1032 * adapter in lists that start or end
1033 * a frame.
1034 */
1035 volatile __be16 FrameSize; /* Number of bytes received as a
1036 * frame including AC/FC, Destination,
1037 * Source, Routing field not including
1038 * CRC, FS (Frame Status), End Delimiter
1039 * (valid only if START_FRAME bit in
1040 * CSTAT nonzero) must not be zero in
1041 * any list; maximum value: (BUFFER_SIZE
1042 * - 8) * TX_BUF_MAX sum of DataCount
1043 * values in FragmentList must equal
1044 * Frame_Size value in START_FRAME TPL!
1045 * frame data fragment list
1046 */
1047
1048 /* TPL/RPL size in OPEN parameter list depending on maximal numbers
1049 * of fragments used in one parameter list.
1050 */
1051 Fragment FragList[RX_FRAG_NUM]; /* Maximum: nine frame fragments in
1052 * one TPL. Actual version of firmware:
1053 * 9 fragments possible.
1054 */
1055#pragma pack()
1056
1057 /* Special proprietary data and precalculations. */
1058 RPL *NextRPLPtr; /* Logical pointer to next RPL in chain. */
1059 unsigned char *MData;
1060 struct sk_buff *Skb;
1061 SKB_STAT SkbStat;
1062 int RPLIndex;
1063 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
1064};
1065
1066/* Information that need to be kept for each board. */
1067typedef struct net_local {
1068#pragma pack(1)
1069 IPB ipb; /* Initialization Parameter Block. */
1070 SCB scb; /* System Command Block: system to adapter
1071 * communication.
1072 */
1073 SSB ssb; /* System Status Block: adapter to system
1074 * communication.
1075 */
1076 OPB ocpl; /* Open Options Parameter Block. */
1077
1078 ERRORTAB errorlogtable; /* Adapter statistic error counters.
1079 * (read from adapter memory)
1080 */
1081 unsigned char ProductID[PROD_ID_SIZE + 1]; /* Product ID */
1082#pragma pack()
1083
1084 TPL Tpl[TPL_NUM];
1085 TPL *TplFree;
1086 TPL *TplBusy;
1087 unsigned char LocalTxBuffers[TPL_NUM][DEFAULT_PACKET_SIZE];
1088
1089 RPL Rpl[RPL_NUM];
1090 RPL *RplHead;
1091 RPL *RplTail;
1092 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
1093
1094 struct device *pdev;
1095 int DataRate;
1096 unsigned char ScbInUse;
1097 unsigned short CMDqueue;
1098
1099 unsigned long AdapterOpenFlag:1;
1100 unsigned long AdapterVirtOpenFlag:1;
1101 unsigned long OpenCommandIssued:1;
1102 unsigned long TransmitCommandActive:1;
1103 unsigned long TransmitHaltScheduled:1;
1104 unsigned long HaltInProgress:1;
1105 unsigned long LobeWireFaultLogged:1;
1106 unsigned long ReOpenInProgress:1;
1107 unsigned long Sleeping:1;
1108
1109 unsigned long LastOpenStatus;
1110 unsigned short CurrentRingStatus;
1111 unsigned long MaxPacketSize;
1112
1113 unsigned long StartTime;
1114 unsigned long LastSendTime;
1115
1116 struct tr_statistics MacStat; /* MAC statistics structure */
1117
1118 unsigned long dmalimit; /* the max DMA address (ie, ISA) */
1119 dma_addr_t dmabuffer; /* the DMA bus address corresponding to
1120 priv. Might be different from virt_to_bus()
1121 for architectures with IO MMU (Alpha) */
1122
1123 struct timer_list timer;
1124
1125 wait_queue_head_t wait_for_tok_int;
1126
1127 INTPTRS intptrs; /* Internal adapter pointer. Must be read
1128 * before OPEN command.
1129 */
1130 unsigned short (*setnselout)(struct net_device *);
1131 unsigned short (*sifreadb)(struct net_device *, unsigned short);
1132 void (*sifwriteb)(struct net_device *, unsigned short, unsigned short);
1133 unsigned short (*sifreadw)(struct net_device *, unsigned short);
1134 void (*sifwritew)(struct net_device *, unsigned short, unsigned short);
1135
1136 spinlock_t lock; /* SMP protection */
1137 void *tmspriv;
1138} NET_LOCAL;
1139
1140#endif /* __KERNEL__ */
1141#endif /* __LINUX_TMS380TR_H */
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
new file mode 100644
index 00000000000..d3e788a9cd1
--- /dev/null
+++ b/drivers/net/tokenring/tmspci.c
@@ -0,0 +1,249 @@
1/*
2 * tmspci.c: A generic network driver for TMS380-based PCI token ring cards.
3 *
4 * Written 1999 by Adam Fritzler
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 * This driver module supports the following cards:
10 * - SysKonnect TR4/16(+) PCI (SK-4590)
11 * - SysKonnect TR4/16 PCI (SK-4591)
12 * - Compaq TR 4/16 PCI
13 * - Thomas-Conrad TC4048 4/16 PCI
14 * - 3Com 3C339 Token Link Velocity
15 *
16 * Maintainer(s):
17 * AF Adam Fritzler
18 *
19 * Modification History:
20 * 30-Dec-99 AF Split off from the tms380tr driver.
21 * 22-Jan-00 AF Updated to use indirect read/writes
22 * 23-Nov-00 JG New PCI API, cleanups
23 *
24 * TODO:
25 * 1. See if we can use MMIO instead of port accesses
26 *
27 */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/netdevice.h>
35#include <linux/trdevice.h>
36
37#include <asm/system.h>
38#include <asm/io.h>
39#include <asm/irq.h>
40
41#include "tms380tr.h"
42
43static char version[] __devinitdata =
44"tmspci.c: v1.02 23/11/2000 by Adam Fritzler\n";
45
46#define TMS_PCI_IO_EXTENT 32
47
48struct card_info {
49 unsigned char nselout[2]; /* NSELOUT vals for 4mb([0]) and 16mb([1]) */
50 char *name;
51};
52
53static struct card_info card_info_table[] = {
54 { {0x03, 0x01}, "Compaq 4/16 TR PCI"},
55 { {0x03, 0x01}, "SK NET TR 4/16 PCI"},
56 { {0x03, 0x01}, "Thomas-Conrad TC4048 PCI 4/16"},
57 { {0x03, 0x01}, "3Com Token Link Velocity"},
58};
59
60static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
61 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
62 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
63 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
64 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
65 { } /* Terminating entry */
66};
67MODULE_DEVICE_TABLE(pci, tmspci_pci_tbl);
68
69MODULE_LICENSE("GPL");
70
71static void tms_pci_read_eeprom(struct net_device *dev);
72static unsigned short tms_pci_setnselout_pins(struct net_device *dev);
73
74static unsigned short tms_pci_sifreadb(struct net_device *dev, unsigned short reg)
75{
76 return inb(dev->base_addr + reg);
77}
78
79static unsigned short tms_pci_sifreadw(struct net_device *dev, unsigned short reg)
80{
81 return inw(dev->base_addr + reg);
82}
83
84static void tms_pci_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
85{
86 outb(val, dev->base_addr + reg);
87}
88
89static void tms_pci_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
90{
91 outw(val, dev->base_addr + reg);
92}
93
94static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
95{
96 static int versionprinted;
97 struct net_device *dev;
98 struct net_local *tp;
99 int ret;
100 unsigned int pci_irq_line;
101 unsigned long pci_ioaddr;
102 struct card_info *cardinfo = &card_info_table[ent->driver_data];
103
104 if (versionprinted++ == 0)
105 printk("%s", version);
106
107 if (pci_enable_device(pdev))
108 return -EIO;
109
110 /* Remove I/O space marker in bit 0. */
111 pci_irq_line = pdev->irq;
112 pci_ioaddr = pci_resource_start (pdev, 0);
113
114 /* At this point we have found a valid card. */
115 dev = alloc_trdev(sizeof(struct net_local));
116 if (!dev)
117 return -ENOMEM;
118
119 if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) {
120 ret = -EBUSY;
121 goto err_out_trdev;
122 }
123
124 dev->base_addr = pci_ioaddr;
125 dev->irq = pci_irq_line;
126 dev->dma = 0;
127
128 dev_info(&pdev->dev, "%s\n", cardinfo->name);
129 dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq);
130
131 tms_pci_read_eeprom(dev);
132
133 dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr);
134
135 ret = tmsdev_init(dev, &pdev->dev);
136 if (ret) {
137 dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
138 goto err_out_region;
139 }
140
141 tp = netdev_priv(dev);
142 tp->setnselout = tms_pci_setnselout_pins;
143
144 tp->sifreadb = tms_pci_sifreadb;
145 tp->sifreadw = tms_pci_sifreadw;
146 tp->sifwriteb = tms_pci_sifwriteb;
147 tp->sifwritew = tms_pci_sifwritew;
148
149 memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1);
150
151 tp->tmspriv = cardinfo;
152
153 dev->netdev_ops = &tms380tr_netdev_ops;
154
155 ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
156 dev->name, dev);
157 if (ret)
158 goto err_out_tmsdev;
159
160 pci_set_drvdata(pdev, dev);
161 SET_NETDEV_DEV(dev, &pdev->dev);
162
163 ret = register_netdev(dev);
164 if (ret)
165 goto err_out_irq;
166
167 return 0;
168
169err_out_irq:
170 free_irq(pdev->irq, dev);
171err_out_tmsdev:
172 pci_set_drvdata(pdev, NULL);
173 tmsdev_term(dev);
174err_out_region:
175 release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
176err_out_trdev:
177 free_netdev(dev);
178 return ret;
179}
180
181/*
182 * Reads MAC address from adapter RAM, which should've read it from
183 * the onboard ROM.
184 *
185 * Calling this on a board that does not support it can be a very
186 * dangerous thing. The Madge board, for instance, will lock your
187 * machine hard when this is called. Luckily, its supported in a
188 * separate driver. --ASF
189 */
190static void tms_pci_read_eeprom(struct net_device *dev)
191{
192 int i;
193
194 /* Address: 0000:0000 */
195 tms_pci_sifwritew(dev, 0, SIFADX);
196 tms_pci_sifwritew(dev, 0, SIFADR);
197
198 /* Read six byte MAC address data */
199 dev->addr_len = 6;
200 for(i = 0; i < 6; i++)
201 dev->dev_addr[i] = tms_pci_sifreadw(dev, SIFINC) >> 8;
202}
203
204static unsigned short tms_pci_setnselout_pins(struct net_device *dev)
205{
206 unsigned short val = 0;
207 struct net_local *tp = netdev_priv(dev);
208 struct card_info *cardinfo = tp->tmspriv;
209
210 if(tp->DataRate == SPEED_4)
211 val |= cardinfo->nselout[0]; /* Set 4Mbps */
212 else
213 val |= cardinfo->nselout[1]; /* Set 16Mbps */
214 return val;
215}
216
217static void __devexit tms_pci_detach (struct pci_dev *pdev)
218{
219 struct net_device *dev = pci_get_drvdata(pdev);
220
221 BUG_ON(!dev);
222 unregister_netdev(dev);
223 release_region(dev->base_addr, TMS_PCI_IO_EXTENT);
224 free_irq(dev->irq, dev);
225 tmsdev_term(dev);
226 free_netdev(dev);
227 pci_set_drvdata(pdev, NULL);
228}
229
230static struct pci_driver tms_pci_driver = {
231 .name = "tmspci",
232 .id_table = tmspci_pci_tbl,
233 .probe = tms_pci_attach,
234 .remove = __devexit_p(tms_pci_detach),
235};
236
237static int __init tms_pci_init (void)
238{
239 return pci_register_driver(&tms_pci_driver);
240}
241
242static void __exit tms_pci_rmmod (void)
243{
244 pci_unregister_driver (&tms_pci_driver);
245}
246
247module_init(tms_pci_init);
248module_exit(tms_pci_rmmod);
249