aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/atm/fore200e.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/atm/fore200e.c')
-rw-r--r--drivers/atm/fore200e.c3249
1 files changed, 3249 insertions, 0 deletions
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
new file mode 100644
index 000000000000..196b33644627
--- /dev/null
+++ b/drivers/atm/fore200e.c
@@ -0,0 +1,3249 @@
1/*
2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
3
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
6
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
8
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25*/
26
27
28#include <linux/config.h>
29#include <linux/kernel.h>
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/capability.h>
33#include <linux/sched.h>
34#include <linux/interrupt.h>
35#include <linux/bitops.h>
36#include <linux/pci.h>
37#include <linux/module.h>
38#include <linux/atmdev.h>
39#include <linux/sonet.h>
40#include <linux/atm_suni.h>
41#include <linux/dma-mapping.h>
42#include <linux/delay.h>
43#include <asm/io.h>
44#include <asm/string.h>
45#include <asm/page.h>
46#include <asm/irq.h>
47#include <asm/dma.h>
48#include <asm/byteorder.h>
49#include <asm/uaccess.h>
50#include <asm/atomic.h>
51
52#ifdef CONFIG_ATM_FORE200E_SBA
53#include <asm/idprom.h>
54#include <asm/sbus.h>
55#include <asm/openprom.h>
56#include <asm/oplib.h>
57#include <asm/pgtable.h>
58#endif
59
60#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
61#define FORE200E_USE_TASKLET
62#endif
63
64#if 0 /* enable the debugging code of the buffer supply queues */
65#define FORE200E_BSQ_DEBUG
66#endif
67
68#if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
69#define FORE200E_52BYTE_AAL0_SDU
70#endif
71
72#include "fore200e.h"
73#include "suni.h"
74
75#define FORE200E_VERSION "0.3e"
76
77#define FORE200E "fore200e: "
78
79#if 0 /* override .config */
80#define CONFIG_ATM_FORE200E_DEBUG 1
81#endif
82#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
83#define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
84 printk(FORE200E format, ##args); } while (0)
85#else
86#define DPRINTK(level, format, args...) do {} while (0)
87#endif
88
89
90#define FORE200E_ALIGN(addr, alignment) \
91 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
92
93#define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
94
95#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
96
97#define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
98
99#if 1
100#define ASSERT(expr) if (!(expr)) { \
101 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
102 __FUNCTION__, __LINE__, #expr); \
103 panic(FORE200E "%s", __FUNCTION__); \
104 }
105#else
106#define ASSERT(expr) do {} while (0)
107#endif
108
109
110static const struct atmdev_ops fore200e_ops;
111static const struct fore200e_bus fore200e_bus[];
112
113static LIST_HEAD(fore200e_boards);
114
115
116MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
117MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
118MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
119
120
121static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
122 { BUFFER_S1_NBR, BUFFER_L1_NBR },
123 { BUFFER_S2_NBR, BUFFER_L2_NBR }
124};
125
126static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
127 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
128 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
129};
130
131
132#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
133static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
134#endif
135
136
137#if 0 /* currently unused */
138static int
139fore200e_fore2atm_aal(enum fore200e_aal aal)
140{
141 switch(aal) {
142 case FORE200E_AAL0: return ATM_AAL0;
143 case FORE200E_AAL34: return ATM_AAL34;
144 case FORE200E_AAL5: return ATM_AAL5;
145 }
146
147 return -EINVAL;
148}
149#endif
150
151
152static enum fore200e_aal
153fore200e_atm2fore_aal(int aal)
154{
155 switch(aal) {
156 case ATM_AAL0: return FORE200E_AAL0;
157 case ATM_AAL34: return FORE200E_AAL34;
158 case ATM_AAL1:
159 case ATM_AAL2:
160 case ATM_AAL5: return FORE200E_AAL5;
161 }
162
163 return -EINVAL;
164}
165
166
167static char*
168fore200e_irq_itoa(int irq)
169{
170#if defined(__sparc_v9__)
171 return __irq_itoa(irq);
172#else
173 static char str[8];
174 sprintf(str, "%d", irq);
175 return str;
176#endif
177}
178
179
180static void*
181fore200e_kmalloc(int size, int flags)
182{
183 void* chunk = kmalloc(size, flags);
184
185 if (chunk)
186 memset(chunk, 0x00, size);
187 else
188 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
189
190 return chunk;
191}
192
193
194static void
195fore200e_kfree(void* chunk)
196{
197 kfree(chunk);
198}
199
200
201/* allocate and align a chunk of memory intended to hold the data behing exchanged
202 between the driver and the adapter (using streaming DVMA) */
203
204static int
205fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
206{
207 unsigned long offset = 0;
208
209 if (alignment <= sizeof(int))
210 alignment = 0;
211
212 chunk->alloc_size = size + alignment;
213 chunk->align_size = size;
214 chunk->direction = direction;
215
216 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
217 if (chunk->alloc_addr == NULL)
218 return -ENOMEM;
219
220 if (alignment > 0)
221 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
222
223 chunk->align_addr = chunk->alloc_addr + offset;
224
225 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
226
227 return 0;
228}
229
230
231/* free a chunk of memory */
232
233static void
234fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
235{
236 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
237
238 fore200e_kfree(chunk->alloc_addr);
239}
240
241
242static void
243fore200e_spin(int msecs)
244{
245 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
246 while (time_before(jiffies, timeout));
247}
248
249
250static int
251fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
252{
253 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
254 int ok;
255
256 mb();
257 do {
258 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
259 break;
260
261 } while (time_before(jiffies, timeout));
262
263#if 1
264 if (!ok) {
265 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
266 *addr, val);
267 }
268#endif
269
270 return ok;
271}
272
273
274static int
275fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
276{
277 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
278 int ok;
279
280 do {
281 if ((ok = (fore200e->bus->read(addr) == val)))
282 break;
283
284 } while (time_before(jiffies, timeout));
285
286#if 1
287 if (!ok) {
288 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
289 fore200e->bus->read(addr), val);
290 }
291#endif
292
293 return ok;
294}
295
296
297static void
298fore200e_free_rx_buf(struct fore200e* fore200e)
299{
300 int scheme, magn, nbr;
301 struct buffer* buffer;
302
303 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
304 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
305
306 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
307
308 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
309
310 struct chunk* data = &buffer[ nbr ].data;
311
312 if (data->alloc_addr != NULL)
313 fore200e_chunk_free(fore200e, data);
314 }
315 }
316 }
317 }
318}
319
320
321static void
322fore200e_uninit_bs_queue(struct fore200e* fore200e)
323{
324 int scheme, magn;
325
326 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
327 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
328
329 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
330 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
331
332 if (status->alloc_addr)
333 fore200e->bus->dma_chunk_free(fore200e, status);
334
335 if (rbd_block->alloc_addr)
336 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
337 }
338 }
339}
340
341
342static int
343fore200e_reset(struct fore200e* fore200e, int diag)
344{
345 int ok;
346
347 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
348
349 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
350
351 fore200e->bus->reset(fore200e);
352
353 if (diag) {
354 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
355 if (ok == 0) {
356
357 printk(FORE200E "device %s self-test failed\n", fore200e->name);
358 return -ENODEV;
359 }
360
361 printk(FORE200E "device %s self-test passed\n", fore200e->name);
362
363 fore200e->state = FORE200E_STATE_RESET;
364 }
365
366 return 0;
367}
368
369
370static void
371fore200e_shutdown(struct fore200e* fore200e)
372{
373 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
374 fore200e->name, fore200e->phys_base,
375 fore200e_irq_itoa(fore200e->irq));
376
377 if (fore200e->state > FORE200E_STATE_RESET) {
378 /* first, reset the board to prevent further interrupts or data transfers */
379 fore200e_reset(fore200e, 0);
380 }
381
382 /* then, release all allocated resources */
383 switch(fore200e->state) {
384
385 case FORE200E_STATE_COMPLETE:
386 if (fore200e->stats)
387 kfree(fore200e->stats);
388
389 case FORE200E_STATE_IRQ:
390 free_irq(fore200e->irq, fore200e->atm_dev);
391
392 case FORE200E_STATE_ALLOC_BUF:
393 fore200e_free_rx_buf(fore200e);
394
395 case FORE200E_STATE_INIT_BSQ:
396 fore200e_uninit_bs_queue(fore200e);
397
398 case FORE200E_STATE_INIT_RXQ:
399 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
400 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
401
402 case FORE200E_STATE_INIT_TXQ:
403 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
404 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
405
406 case FORE200E_STATE_INIT_CMDQ:
407 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
408
409 case FORE200E_STATE_INITIALIZE:
410 /* nothing to do for that state */
411
412 case FORE200E_STATE_START_FW:
413 /* nothing to do for that state */
414
415 case FORE200E_STATE_LOAD_FW:
416 /* nothing to do for that state */
417
418 case FORE200E_STATE_RESET:
419 /* nothing to do for that state */
420
421 case FORE200E_STATE_MAP:
422 fore200e->bus->unmap(fore200e);
423
424 case FORE200E_STATE_CONFIGURE:
425 /* nothing to do for that state */
426
427 case FORE200E_STATE_REGISTER:
428 /* XXX shouldn't we *start* by deregistering the device? */
429 atm_dev_deregister(fore200e->atm_dev);
430
431 case FORE200E_STATE_BLANK:
432 /* nothing to do for that state */
433 break;
434 }
435}
436
437
438#ifdef CONFIG_ATM_FORE200E_PCA
439
440static u32 fore200e_pca_read(volatile u32 __iomem *addr)
441{
442 /* on big-endian hosts, the board is configured to convert
443 the endianess of slave RAM accesses */
444 return le32_to_cpu(readl(addr));
445}
446
447
448static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
449{
450 /* on big-endian hosts, the board is configured to convert
451 the endianess of slave RAM accesses */
452 writel(cpu_to_le32(val), addr);
453}
454
455
456static u32
457fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
458{
459 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
460
461 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
462 virt_addr, size, direction, dma_addr);
463
464 return dma_addr;
465}
466
467
468static void
469fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
470{
471 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
472 dma_addr, size, direction);
473
474 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
475}
476
477
478static void
479fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
480{
481 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
482
483 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
484}
485
486static void
487fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
488{
489 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
490
491 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
492}
493
494
495/* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
496 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
497
498static int
499fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
500 int size, int nbr, int alignment)
501{
502 /* returned chunks are page-aligned */
503 chunk->alloc_size = size * nbr;
504 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
505 chunk->alloc_size,
506 &chunk->dma_addr);
507
508 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
509 return -ENOMEM;
510
511 chunk->align_addr = chunk->alloc_addr;
512
513 return 0;
514}
515
516
517/* free a DMA consistent chunk of memory */
518
519static void
520fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
521{
522 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
523 chunk->alloc_size,
524 chunk->alloc_addr,
525 chunk->dma_addr);
526}
527
528
529static int
530fore200e_pca_irq_check(struct fore200e* fore200e)
531{
532 /* this is a 1 bit register */
533 int irq_posted = readl(fore200e->regs.pca.psr);
534
535#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
536 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
537 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
538 }
539#endif
540
541 return irq_posted;
542}
543
544
545static void
546fore200e_pca_irq_ack(struct fore200e* fore200e)
547{
548 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
549}
550
551
552static void
553fore200e_pca_reset(struct fore200e* fore200e)
554{
555 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
556 fore200e_spin(10);
557 writel(0, fore200e->regs.pca.hcr);
558}
559
560
561static int __init
562fore200e_pca_map(struct fore200e* fore200e)
563{
564 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
565
566 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
567
568 if (fore200e->virt_base == NULL) {
569 printk(FORE200E "can't map device %s\n", fore200e->name);
570 return -EFAULT;
571 }
572
573 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
574
575 /* gain access to the PCA specific registers */
576 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
577 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
578 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
579
580 fore200e->state = FORE200E_STATE_MAP;
581 return 0;
582}
583
584
585static void
586fore200e_pca_unmap(struct fore200e* fore200e)
587{
588 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
589
590 if (fore200e->virt_base != NULL)
591 iounmap(fore200e->virt_base);
592}
593
594
595static int __init
596fore200e_pca_configure(struct fore200e* fore200e)
597{
598 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
599 u8 master_ctrl, latency;
600
601 DPRINTK(2, "device %s being configured\n", fore200e->name);
602
603 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
604 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
605 return -EIO;
606 }
607
608 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
609
610 master_ctrl = master_ctrl
611#if defined(__BIG_ENDIAN)
612 /* request the PCA board to convert the endianess of slave RAM accesses */
613 | PCA200E_CTRL_CONVERT_ENDIAN
614#endif
615#if 0
616 | PCA200E_CTRL_DIS_CACHE_RD
617 | PCA200E_CTRL_DIS_WRT_INVAL
618 | PCA200E_CTRL_ENA_CONT_REQ_MODE
619 | PCA200E_CTRL_2_CACHE_WRT_INVAL
620#endif
621 | PCA200E_CTRL_LARGE_PCI_BURSTS;
622
623 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
624
625 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
626 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
627 this may impact the performances of other PCI devices on the same bus, though */
628 latency = 192;
629 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
630
631 fore200e->state = FORE200E_STATE_CONFIGURE;
632 return 0;
633}
634
635
636static int __init
637fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
638{
639 struct host_cmdq* cmdq = &fore200e->host_cmdq;
640 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
641 struct prom_opcode opcode;
642 int ok;
643 u32 prom_dma;
644
645 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
646
647 opcode.opcode = OPCODE_GET_PROM;
648 opcode.pad = 0;
649
650 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
651
652 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
653
654 *entry->status = STATUS_PENDING;
655
656 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
657
658 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
659
660 *entry->status = STATUS_FREE;
661
662 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
663
664 if (ok == 0) {
665 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
666 return -EIO;
667 }
668
669#if defined(__BIG_ENDIAN)
670
671#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
672
673 /* MAC address is stored as little-endian */
674 swap_here(&prom->mac_addr[0]);
675 swap_here(&prom->mac_addr[4]);
676#endif
677
678 return 0;
679}
680
681
682static int
683fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
684{
685 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
686
687 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
688 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
689}
690
691#endif /* CONFIG_ATM_FORE200E_PCA */
692
693
694#ifdef CONFIG_ATM_FORE200E_SBA
695
696static u32
697fore200e_sba_read(volatile u32 __iomem *addr)
698{
699 return sbus_readl(addr);
700}
701
702
703static void
704fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
705{
706 sbus_writel(val, addr);
707}
708
709
710static u32
711fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
712{
713 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
714
715 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
716 virt_addr, size, direction, dma_addr);
717
718 return dma_addr;
719}
720
721
722static void
723fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
724{
725 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
726 dma_addr, size, direction);
727
728 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
729}
730
731
732static void
733fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
734{
735 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
736
737 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
738}
739
740static void
741fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
742{
743 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
744
745 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
746}
747
748
749/* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
750 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
751
752static int
753fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
754 int size, int nbr, int alignment)
755{
756 chunk->alloc_size = chunk->align_size = size * nbr;
757
758 /* returned chunks are page-aligned */
759 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
760 chunk->alloc_size,
761 &chunk->dma_addr);
762
763 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
764 return -ENOMEM;
765
766 chunk->align_addr = chunk->alloc_addr;
767
768 return 0;
769}
770
771
772/* free a DVMA consistent chunk of memory */
773
774static void
775fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
776{
777 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
778 chunk->alloc_size,
779 chunk->alloc_addr,
780 chunk->dma_addr);
781}
782
783
784static void
785fore200e_sba_irq_enable(struct fore200e* fore200e)
786{
787 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
788 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
789}
790
791
792static int
793fore200e_sba_irq_check(struct fore200e* fore200e)
794{
795 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
796}
797
798
799static void
800fore200e_sba_irq_ack(struct fore200e* fore200e)
801{
802 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
803 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
804}
805
806
807static void
808fore200e_sba_reset(struct fore200e* fore200e)
809{
810 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
811 fore200e_spin(10);
812 fore200e->bus->write(0, fore200e->regs.sba.hcr);
813}
814
815
816static int __init
817fore200e_sba_map(struct fore200e* fore200e)
818{
819 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
820 unsigned int bursts;
821
822 /* gain access to the SBA specific registers */
823 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
824 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
825 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
826 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
827
828 if (fore200e->virt_base == NULL) {
829 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
830 return -EFAULT;
831 }
832
833 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
834
835 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
836
837 /* get the supported DVMA burst sizes */
838 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
839
840 if (sbus_can_dma_64bit(sbus_dev))
841 sbus_set_sbus64(sbus_dev, bursts);
842
843 fore200e->state = FORE200E_STATE_MAP;
844 return 0;
845}
846
847
848static void
849fore200e_sba_unmap(struct fore200e* fore200e)
850{
851 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
852 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
853 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
854 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
855}
856
857
858static int __init
859fore200e_sba_configure(struct fore200e* fore200e)
860{
861 fore200e->state = FORE200E_STATE_CONFIGURE;
862 return 0;
863}
864
865
866static struct fore200e* __init
867fore200e_sba_detect(const struct fore200e_bus* bus, int index)
868{
869 struct fore200e* fore200e;
870 struct sbus_bus* sbus_bus;
871 struct sbus_dev* sbus_dev = NULL;
872
873 unsigned int count = 0;
874
875 for_each_sbus (sbus_bus) {
876 for_each_sbusdev (sbus_dev, sbus_bus) {
877 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
878 if (count >= index)
879 goto found;
880 count++;
881 }
882 }
883 }
884 return NULL;
885
886 found:
887 if (sbus_dev->num_registers != 4) {
888 printk(FORE200E "this %s device has %d instead of 4 registers\n",
889 bus->model_name, sbus_dev->num_registers);
890 return NULL;
891 }
892
893 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
894 if (fore200e == NULL)
895 return NULL;
896
897 fore200e->bus = bus;
898 fore200e->bus_dev = sbus_dev;
899 fore200e->irq = sbus_dev->irqs[ 0 ];
900
901 fore200e->phys_base = (unsigned long)sbus_dev;
902
903 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
904
905 return fore200e;
906}
907
908
909static int __init
910fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
911{
912 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
913 int len;
914
915 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
916 if (len < 0)
917 return -EBUSY;
918
919 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
920 if (len < 0)
921 return -EBUSY;
922
923 prom_getproperty(sbus_dev->prom_node, "serialnumber",
924 (char*)&prom->serial_number, sizeof(prom->serial_number));
925
926 prom_getproperty(sbus_dev->prom_node, "promversion",
927 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
928
929 return 0;
930}
931
932
933static int
934fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
935{
936 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
937
938 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
939}
940#endif /* CONFIG_ATM_FORE200E_SBA */
941
942
943static void
944fore200e_tx_irq(struct fore200e* fore200e)
945{
946 struct host_txq* txq = &fore200e->host_txq;
947 struct host_txq_entry* entry;
948 struct atm_vcc* vcc;
949 struct fore200e_vc_map* vc_map;
950
951 if (fore200e->host_txq.txing == 0)
952 return;
953
954 for (;;) {
955
956 entry = &txq->host_entry[ txq->tail ];
957
958 if ((*entry->status & STATUS_COMPLETE) == 0) {
959 break;
960 }
961
962 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
963 entry, txq->tail, entry->vc_map, entry->skb);
964
965 /* free copy of misaligned data */
966 if (entry->data)
967 kfree(entry->data);
968
969 /* remove DMA mapping */
970 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
971 DMA_TO_DEVICE);
972
973 vc_map = entry->vc_map;
974
975 /* vcc closed since the time the entry was submitted for tx? */
976 if ((vc_map->vcc == NULL) ||
977 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
978
979 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
980 fore200e->atm_dev->number);
981
982 dev_kfree_skb_any(entry->skb);
983 }
984 else {
985 ASSERT(vc_map->vcc);
986
987 /* vcc closed then immediately re-opened? */
988 if (vc_map->incarn != entry->incarn) {
989
990 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
991 if the same vcc is immediately re-opened, those pending PDUs must
992 not be popped after the completion of their emission, as they refer
993 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
994 would be decremented by the size of the (unrelated) skb, possibly
995 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
996 we thus bind the tx entry to the current incarnation of the vcc
997 when the entry is submitted for tx. When the tx later completes,
998 if the incarnation number of the tx entry does not match the one
999 of the vcc, then this implies that the vcc has been closed then re-opened.
1000 we thus just drop the skb here. */
1001
1002 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
1003 fore200e->atm_dev->number);
1004
1005 dev_kfree_skb_any(entry->skb);
1006 }
1007 else {
1008 vcc = vc_map->vcc;
1009 ASSERT(vcc);
1010
1011 /* notify tx completion */
1012 if (vcc->pop) {
1013 vcc->pop(vcc, entry->skb);
1014 }
1015 else {
1016 dev_kfree_skb_any(entry->skb);
1017 }
1018#if 1
1019 /* race fixed by the above incarnation mechanism, but... */
1020 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
1021 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
1022 }
1023#endif
1024 /* check error condition */
1025 if (*entry->status & STATUS_ERROR)
1026 atomic_inc(&vcc->stats->tx_err);
1027 else
1028 atomic_inc(&vcc->stats->tx);
1029 }
1030 }
1031
1032 *entry->status = STATUS_FREE;
1033
1034 fore200e->host_txq.txing--;
1035
1036 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1037 }
1038}
1039
1040
1041#ifdef FORE200E_BSQ_DEBUG
1042int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1043{
1044 struct buffer* buffer;
1045 int count = 0;
1046
1047 buffer = bsq->freebuf;
1048 while (buffer) {
1049
1050 if (buffer->supplied) {
1051 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1052 where, scheme, magn, buffer->index);
1053 }
1054
1055 if (buffer->magn != magn) {
1056 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1057 where, scheme, magn, buffer->index, buffer->magn);
1058 }
1059
1060 if (buffer->scheme != scheme) {
1061 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1062 where, scheme, magn, buffer->index, buffer->scheme);
1063 }
1064
1065 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1066 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1067 where, scheme, magn, buffer->index);
1068 }
1069
1070 count++;
1071 buffer = buffer->next;
1072 }
1073
1074 if (count != bsq->freebuf_count) {
1075 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1076 where, scheme, magn, count, bsq->freebuf_count);
1077 }
1078 return 0;
1079}
1080#endif
1081
1082
1083static void
1084fore200e_supply(struct fore200e* fore200e)
1085{
1086 int scheme, magn, i;
1087
1088 struct host_bsq* bsq;
1089 struct host_bsq_entry* entry;
1090 struct buffer* buffer;
1091
1092 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1093 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1094
1095 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1096
1097#ifdef FORE200E_BSQ_DEBUG
1098 bsq_audit(1, bsq, scheme, magn);
1099#endif
1100 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1101
1102 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1103 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1104
1105 entry = &bsq->host_entry[ bsq->head ];
1106
1107 for (i = 0; i < RBD_BLK_SIZE; i++) {
1108
1109 /* take the first buffer in the free buffer list */
1110 buffer = bsq->freebuf;
1111 if (!buffer) {
1112 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1113 scheme, magn, bsq->freebuf_count);
1114 return;
1115 }
1116 bsq->freebuf = buffer->next;
1117
1118#ifdef FORE200E_BSQ_DEBUG
1119 if (buffer->supplied)
1120 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1121 scheme, magn, buffer->index);
1122 buffer->supplied = 1;
1123#endif
1124 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1125 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1126 }
1127
1128 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1129
1130 /* decrease accordingly the number of free rx buffers */
1131 bsq->freebuf_count -= RBD_BLK_SIZE;
1132
1133 *entry->status = STATUS_PENDING;
1134 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1135 }
1136 }
1137 }
1138}
1139
1140
1141static int
1142fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1143{
1144 struct sk_buff* skb;
1145 struct buffer* buffer;
1146 struct fore200e_vcc* fore200e_vcc;
1147 int i, pdu_len = 0;
1148#ifdef FORE200E_52BYTE_AAL0_SDU
1149 u32 cell_header = 0;
1150#endif
1151
1152 ASSERT(vcc);
1153
1154 fore200e_vcc = FORE200E_VCC(vcc);
1155 ASSERT(fore200e_vcc);
1156
1157#ifdef FORE200E_52BYTE_AAL0_SDU
1158 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1159
1160 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1161 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1162 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1163 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1164 rpd->atm_header.clp;
1165 pdu_len = 4;
1166 }
1167#endif
1168
1169 /* compute total PDU length */
1170 for (i = 0; i < rpd->nseg; i++)
1171 pdu_len += rpd->rsd[ i ].length;
1172
1173 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1174 if (skb == NULL) {
1175 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1176
1177 atomic_inc(&vcc->stats->rx_drop);
1178 return -ENOMEM;
1179 }
1180
1181 do_gettimeofday(&skb->stamp);
1182
1183#ifdef FORE200E_52BYTE_AAL0_SDU
1184 if (cell_header) {
1185 *((u32*)skb_put(skb, 4)) = cell_header;
1186 }
1187#endif
1188
1189 /* reassemble segments */
1190 for (i = 0; i < rpd->nseg; i++) {
1191
1192 /* rebuild rx buffer address from rsd handle */
1193 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1194
1195 /* Make device DMA transfer visible to CPU. */
1196 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1197
1198 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1199
1200 /* Now let the device get at it again. */
1201 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1202 }
1203
1204 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1205
1206 if (pdu_len < fore200e_vcc->rx_min_pdu)
1207 fore200e_vcc->rx_min_pdu = pdu_len;
1208 if (pdu_len > fore200e_vcc->rx_max_pdu)
1209 fore200e_vcc->rx_max_pdu = pdu_len;
1210 fore200e_vcc->rx_pdu++;
1211
1212 /* push PDU */
1213 if (atm_charge(vcc, skb->truesize) == 0) {
1214
1215 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1216 vcc->itf, vcc->vpi, vcc->vci);
1217
1218 dev_kfree_skb_any(skb);
1219
1220 atomic_inc(&vcc->stats->rx_drop);
1221 return -ENOMEM;
1222 }
1223
1224 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1225
1226 vcc->push(vcc, skb);
1227 atomic_inc(&vcc->stats->rx);
1228
1229 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1230
1231 return 0;
1232}
1233
1234
1235static void
1236fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1237{
1238 struct host_bsq* bsq;
1239 struct buffer* buffer;
1240 int i;
1241
1242 for (i = 0; i < rpd->nseg; i++) {
1243
1244 /* rebuild rx buffer address from rsd handle */
1245 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1246
1247 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1248
1249#ifdef FORE200E_BSQ_DEBUG
1250 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1251
1252 if (buffer->supplied == 0)
1253 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1254 buffer->scheme, buffer->magn, buffer->index);
1255 buffer->supplied = 0;
1256#endif
1257
1258 /* re-insert the buffer into the free buffer list */
1259 buffer->next = bsq->freebuf;
1260 bsq->freebuf = buffer;
1261
1262 /* then increment the number of free rx buffers */
1263 bsq->freebuf_count++;
1264 }
1265}
1266
1267
1268static void
1269fore200e_rx_irq(struct fore200e* fore200e)
1270{
1271 struct host_rxq* rxq = &fore200e->host_rxq;
1272 struct host_rxq_entry* entry;
1273 struct atm_vcc* vcc;
1274 struct fore200e_vc_map* vc_map;
1275
1276 for (;;) {
1277
1278 entry = &rxq->host_entry[ rxq->head ];
1279
1280 /* no more received PDUs */
1281 if ((*entry->status & STATUS_COMPLETE) == 0)
1282 break;
1283
1284 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1285
1286 if ((vc_map->vcc == NULL) ||
1287 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1288
1289 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1290 fore200e->atm_dev->number,
1291 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1292 }
1293 else {
1294 vcc = vc_map->vcc;
1295 ASSERT(vcc);
1296
1297 if ((*entry->status & STATUS_ERROR) == 0) {
1298
1299 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1300 }
1301 else {
1302 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1303 fore200e->atm_dev->number,
1304 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1305 atomic_inc(&vcc->stats->rx_err);
1306 }
1307 }
1308
1309 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1310
1311 fore200e_collect_rpd(fore200e, entry->rpd);
1312
1313 /* rewrite the rpd address to ack the received PDU */
1314 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1315 *entry->status = STATUS_FREE;
1316
1317 fore200e_supply(fore200e);
1318 }
1319}
1320
1321
1322#ifndef FORE200E_USE_TASKLET
1323static void
1324fore200e_irq(struct fore200e* fore200e)
1325{
1326 unsigned long flags;
1327
1328 spin_lock_irqsave(&fore200e->q_lock, flags);
1329 fore200e_rx_irq(fore200e);
1330 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1331
1332 spin_lock_irqsave(&fore200e->q_lock, flags);
1333 fore200e_tx_irq(fore200e);
1334 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1335}
1336#endif
1337
1338
1339static irqreturn_t
1340fore200e_interrupt(int irq, void* dev, struct pt_regs* regs)
1341{
1342 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1343
1344 if (fore200e->bus->irq_check(fore200e) == 0) {
1345
1346 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1347 return IRQ_NONE;
1348 }
1349 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1350
1351#ifdef FORE200E_USE_TASKLET
1352 tasklet_schedule(&fore200e->tx_tasklet);
1353 tasklet_schedule(&fore200e->rx_tasklet);
1354#else
1355 fore200e_irq(fore200e);
1356#endif
1357
1358 fore200e->bus->irq_ack(fore200e);
1359 return IRQ_HANDLED;
1360}
1361
1362
1363#ifdef FORE200E_USE_TASKLET
1364static void
1365fore200e_tx_tasklet(unsigned long data)
1366{
1367 struct fore200e* fore200e = (struct fore200e*) data;
1368 unsigned long flags;
1369
1370 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1371
1372 spin_lock_irqsave(&fore200e->q_lock, flags);
1373 fore200e_tx_irq(fore200e);
1374 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1375}
1376
1377
1378static void
1379fore200e_rx_tasklet(unsigned long data)
1380{
1381 struct fore200e* fore200e = (struct fore200e*) data;
1382 unsigned long flags;
1383
1384 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1385
1386 spin_lock_irqsave(&fore200e->q_lock, flags);
1387 fore200e_rx_irq((struct fore200e*) data);
1388 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1389}
1390#endif
1391
1392
1393static int
1394fore200e_select_scheme(struct atm_vcc* vcc)
1395{
1396 /* fairly balance the VCs over (identical) buffer schemes */
1397 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1398
1399 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1400 vcc->itf, vcc->vpi, vcc->vci, scheme);
1401
1402 return scheme;
1403}
1404
1405
1406static int
1407fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1408{
1409 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1410 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1411 struct activate_opcode activ_opcode;
1412 struct deactivate_opcode deactiv_opcode;
1413 struct vpvc vpvc;
1414 int ok;
1415 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1416
1417 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1418
1419 if (activate) {
1420 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1421
1422 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1423 activ_opcode.aal = aal;
1424 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1425 activ_opcode.pad = 0;
1426 }
1427 else {
1428 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1429 deactiv_opcode.pad = 0;
1430 }
1431
1432 vpvc.vci = vcc->vci;
1433 vpvc.vpi = vcc->vpi;
1434
1435 *entry->status = STATUS_PENDING;
1436
1437 if (activate) {
1438
1439#ifdef FORE200E_52BYTE_AAL0_SDU
1440 mtu = 48;
1441#endif
1442 /* the MTU is not used by the cp, except in the case of AAL0 */
1443 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1444 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1445 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1446 }
1447 else {
1448 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1449 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1450 }
1451
1452 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1453
1454 *entry->status = STATUS_FREE;
1455
1456 if (ok == 0) {
1457 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1458 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1459 return -EIO;
1460 }
1461
1462 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1463 activate ? "open" : "clos");
1464
1465 return 0;
1466}
1467
1468
1469#define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1470
1471static void
1472fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1473{
1474 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1475
1476 /* compute the data cells to idle cells ratio from the tx PCR */
1477 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1478 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1479 }
1480 else {
1481 /* disable rate control */
1482 rate->data_cells = rate->idle_cells = 0;
1483 }
1484}
1485
1486
1487static int
1488fore200e_open(struct atm_vcc *vcc)
1489{
1490 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1491 struct fore200e_vcc* fore200e_vcc;
1492 struct fore200e_vc_map* vc_map;
1493 unsigned long flags;
1494 int vci = vcc->vci;
1495 short vpi = vcc->vpi;
1496
1497 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1498 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1499
1500 spin_lock_irqsave(&fore200e->q_lock, flags);
1501
1502 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1503 if (vc_map->vcc) {
1504
1505 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1506
1507 printk(FORE200E "VC %d.%d.%d already in use\n",
1508 fore200e->atm_dev->number, vpi, vci);
1509
1510 return -EINVAL;
1511 }
1512
1513 vc_map->vcc = vcc;
1514
1515 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1516
1517 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1518 if (fore200e_vcc == NULL) {
1519 vc_map->vcc = NULL;
1520 return -ENOMEM;
1521 }
1522
1523 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1524 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1525 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1526 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1527 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1528 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1529 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1530
1531 /* pseudo-CBR bandwidth requested? */
1532 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1533
1534 down(&fore200e->rate_sf);
1535 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1536 up(&fore200e->rate_sf);
1537
1538 fore200e_kfree(fore200e_vcc);
1539 vc_map->vcc = NULL;
1540 return -EAGAIN;
1541 }
1542
1543 /* reserve bandwidth */
1544 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1545 up(&fore200e->rate_sf);
1546 }
1547
1548 vcc->itf = vcc->dev->number;
1549
1550 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1551 set_bit(ATM_VF_ADDR, &vcc->flags);
1552
1553 vcc->dev_data = fore200e_vcc;
1554
1555 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1556
1557 vc_map->vcc = NULL;
1558
1559 clear_bit(ATM_VF_ADDR, &vcc->flags);
1560 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1561
1562 vcc->dev_data = NULL;
1563
1564 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1565
1566 fore200e_kfree(fore200e_vcc);
1567 return -EINVAL;
1568 }
1569
1570 /* compute rate control parameters */
1571 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1572
1573 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1574 set_bit(ATM_VF_HASQOS, &vcc->flags);
1575
1576 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1577 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1578 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1579 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1580 }
1581
1582 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1583 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1584 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1585
1586 /* new incarnation of the vcc */
1587 vc_map->incarn = ++fore200e->incarn_count;
1588
1589 /* VC unusable before this flag is set */
1590 set_bit(ATM_VF_READY, &vcc->flags);
1591
1592 return 0;
1593}
1594
1595
1596static void
1597fore200e_close(struct atm_vcc* vcc)
1598{
1599 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1600 struct fore200e_vcc* fore200e_vcc;
1601 struct fore200e_vc_map* vc_map;
1602 unsigned long flags;
1603
1604 ASSERT(vcc);
1605 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1606 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1607
1608 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1609
1610 clear_bit(ATM_VF_READY, &vcc->flags);
1611
1612 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1613
1614 spin_lock_irqsave(&fore200e->q_lock, flags);
1615
1616 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1617
1618 /* the vc is no longer considered as "in use" by fore200e_open() */
1619 vc_map->vcc = NULL;
1620
1621 vcc->itf = vcc->vci = vcc->vpi = 0;
1622
1623 fore200e_vcc = FORE200E_VCC(vcc);
1624 vcc->dev_data = NULL;
1625
1626 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1627
1628 /* release reserved bandwidth, if any */
1629 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1630
1631 down(&fore200e->rate_sf);
1632 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1633 up(&fore200e->rate_sf);
1634
1635 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1636 }
1637
1638 clear_bit(ATM_VF_ADDR, &vcc->flags);
1639 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1640
1641 ASSERT(fore200e_vcc);
1642 fore200e_kfree(fore200e_vcc);
1643}
1644
1645
1646static int
1647fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1648{
1649 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1650 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1651 struct fore200e_vc_map* vc_map;
1652 struct host_txq* txq = &fore200e->host_txq;
1653 struct host_txq_entry* entry;
1654 struct tpd* tpd;
1655 struct tpd_haddr tpd_haddr;
1656 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1657 int tx_copy = 0;
1658 int tx_len = skb->len;
1659 u32* cell_header = NULL;
1660 unsigned char* skb_data;
1661 int skb_len;
1662 unsigned char* data;
1663 unsigned long flags;
1664
1665 ASSERT(vcc);
1666 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1667 ASSERT(fore200e);
1668 ASSERT(fore200e_vcc);
1669
1670 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1671 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1672 dev_kfree_skb_any(skb);
1673 return -EINVAL;
1674 }
1675
1676#ifdef FORE200E_52BYTE_AAL0_SDU
1677 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1678 cell_header = (u32*) skb->data;
1679 skb_data = skb->data + 4; /* skip 4-byte cell header */
1680 skb_len = tx_len = skb->len - 4;
1681
1682 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1683 }
1684 else
1685#endif
1686 {
1687 skb_data = skb->data;
1688 skb_len = skb->len;
1689 }
1690
1691 if (((unsigned long)skb_data) & 0x3) {
1692
1693 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1694 tx_copy = 1;
1695 tx_len = skb_len;
1696 }
1697
1698 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1699
1700 /* this simply NUKES the PCA board */
1701 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1702 tx_copy = 1;
1703 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1704 }
1705
1706 if (tx_copy) {
1707 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1708 if (data == NULL) {
1709 if (vcc->pop) {
1710 vcc->pop(vcc, skb);
1711 }
1712 else {
1713 dev_kfree_skb_any(skb);
1714 }
1715 return -ENOMEM;
1716 }
1717
1718 memcpy(data, skb_data, skb_len);
1719 if (skb_len < tx_len)
1720 memset(data + skb_len, 0x00, tx_len - skb_len);
1721 }
1722 else {
1723 data = skb_data;
1724 }
1725
1726 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1727 ASSERT(vc_map->vcc == vcc);
1728
1729 retry_here:
1730
1731 spin_lock_irqsave(&fore200e->q_lock, flags);
1732
1733 entry = &txq->host_entry[ txq->head ];
1734
1735 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1736
1737 /* try to free completed tx queue entries */
1738 fore200e_tx_irq(fore200e);
1739
1740 if (*entry->status != STATUS_FREE) {
1741
1742 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1743
1744 /* retry once again? */
1745 if (--retry > 0) {
1746 udelay(50);
1747 goto retry_here;
1748 }
1749
1750 atomic_inc(&vcc->stats->tx_err);
1751
1752 fore200e->tx_sat++;
1753 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1754 fore200e->name, fore200e->cp_queues->heartbeat);
1755 if (vcc->pop) {
1756 vcc->pop(vcc, skb);
1757 }
1758 else {
1759 dev_kfree_skb_any(skb);
1760 }
1761
1762 if (tx_copy)
1763 kfree(data);
1764
1765 return -ENOBUFS;
1766 }
1767 }
1768
1769 entry->incarn = vc_map->incarn;
1770 entry->vc_map = vc_map;
1771 entry->skb = skb;
1772 entry->data = tx_copy ? data : NULL;
1773
1774 tpd = entry->tpd;
1775 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1776 tpd->tsd[ 0 ].length = tx_len;
1777
1778 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1779 txq->txing++;
1780
1781 /* The dma_map call above implies a dma_sync so the device can use it,
1782 * thus no explicit dma_sync call is necessary here.
1783 */
1784
1785 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1786 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1787 tpd->tsd[0].length, skb_len);
1788
1789 if (skb_len < fore200e_vcc->tx_min_pdu)
1790 fore200e_vcc->tx_min_pdu = skb_len;
1791 if (skb_len > fore200e_vcc->tx_max_pdu)
1792 fore200e_vcc->tx_max_pdu = skb_len;
1793 fore200e_vcc->tx_pdu++;
1794
1795 /* set tx rate control information */
1796 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1797 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1798
1799 if (cell_header) {
1800 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1801 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1802 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1803 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1804 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1805 }
1806 else {
1807 /* set the ATM header, common to all cells conveying the PDU */
1808 tpd->atm_header.clp = 0;
1809 tpd->atm_header.plt = 0;
1810 tpd->atm_header.vci = vcc->vci;
1811 tpd->atm_header.vpi = vcc->vpi;
1812 tpd->atm_header.gfc = 0;
1813 }
1814
1815 tpd->spec.length = tx_len;
1816 tpd->spec.nseg = 1;
1817 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1818 tpd->spec.intr = 1;
1819
1820 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1821 tpd_haddr.pad = 0;
1822 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1823
1824 *entry->status = STATUS_PENDING;
1825 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1826
1827 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1828
1829 return 0;
1830}
1831
1832
1833static int
1834fore200e_getstats(struct fore200e* fore200e)
1835{
1836 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1837 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1838 struct stats_opcode opcode;
1839 int ok;
1840 u32 stats_dma_addr;
1841
1842 if (fore200e->stats == NULL) {
1843 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1844 if (fore200e->stats == NULL)
1845 return -ENOMEM;
1846 }
1847
1848 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1849 sizeof(struct stats), DMA_FROM_DEVICE);
1850
1851 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1852
1853 opcode.opcode = OPCODE_GET_STATS;
1854 opcode.pad = 0;
1855
1856 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1857
1858 *entry->status = STATUS_PENDING;
1859
1860 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1861
1862 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1863
1864 *entry->status = STATUS_FREE;
1865
1866 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1867
1868 if (ok == 0) {
1869 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1870 return -EIO;
1871 }
1872
1873 return 0;
1874}
1875
1876
1877static int
1878fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1879{
1880 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1881
1882 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1883 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1884
1885 return -EINVAL;
1886}
1887
1888
1889static int
1890fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1891{
1892 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1893
1894 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1895 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1896
1897 return -EINVAL;
1898}
1899
1900
1901#if 0 /* currently unused */
1902static int
1903fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1904{
1905 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1906 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1907 struct oc3_opcode opcode;
1908 int ok;
1909 u32 oc3_regs_dma_addr;
1910
1911 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1912
1913 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1914
1915 opcode.opcode = OPCODE_GET_OC3;
1916 opcode.reg = 0;
1917 opcode.value = 0;
1918 opcode.mask = 0;
1919
1920 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1921
1922 *entry->status = STATUS_PENDING;
1923
1924 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1925
1926 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1927
1928 *entry->status = STATUS_FREE;
1929
1930 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1931
1932 if (ok == 0) {
1933 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1934 return -EIO;
1935 }
1936
1937 return 0;
1938}
1939#endif
1940
1941
1942static int
1943fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1944{
1945 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1946 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1947 struct oc3_opcode opcode;
1948 int ok;
1949
1950 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1951
1952 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1953
1954 opcode.opcode = OPCODE_SET_OC3;
1955 opcode.reg = reg;
1956 opcode.value = value;
1957 opcode.mask = mask;
1958
1959 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1960
1961 *entry->status = STATUS_PENDING;
1962
1963 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1964
1965 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1966
1967 *entry->status = STATUS_FREE;
1968
1969 if (ok == 0) {
1970 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1971 return -EIO;
1972 }
1973
1974 return 0;
1975}
1976
1977
1978static int
1979fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1980{
1981 u32 mct_value, mct_mask;
1982 int error;
1983
1984 if (!capable(CAP_NET_ADMIN))
1985 return -EPERM;
1986
1987 switch (loop_mode) {
1988
1989 case ATM_LM_NONE:
1990 mct_value = 0;
1991 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1992 break;
1993
1994 case ATM_LM_LOC_PHY:
1995 mct_value = mct_mask = SUNI_MCT_DLE;
1996 break;
1997
1998 case ATM_LM_RMT_PHY:
1999 mct_value = mct_mask = SUNI_MCT_LLE;
2000 break;
2001
2002 default:
2003 return -EINVAL;
2004 }
2005
2006 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
2007 if (error == 0)
2008 fore200e->loop_mode = loop_mode;
2009
2010 return error;
2011}
2012
2013
2014static inline unsigned int
2015fore200e_swap(unsigned int in)
2016{
2017#if defined(__LITTLE_ENDIAN)
2018 return swab32(in);
2019#else
2020 return in;
2021#endif
2022}
2023
2024
2025static int
2026fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2027{
2028 struct sonet_stats tmp;
2029
2030 if (fore200e_getstats(fore200e) < 0)
2031 return -EIO;
2032
2033 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2034 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2035 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2036 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2037 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2038 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2039 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2040 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2041 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2042 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2043 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2044 fore200e_swap(fore200e->stats->aal34.cells_received) +
2045 fore200e_swap(fore200e->stats->aal5.cells_received);
2046
2047 if (arg)
2048 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2049
2050 return 0;
2051}
2052
2053
2054static int
2055fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2056{
2057 struct fore200e* fore200e = FORE200E_DEV(dev);
2058
2059 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2060
2061 switch (cmd) {
2062
2063 case SONET_GETSTAT:
2064 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2065
2066 case SONET_GETDIAG:
2067 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2068
2069 case ATM_SETLOOP:
2070 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2071
2072 case ATM_GETLOOP:
2073 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2074
2075 case ATM_QUERYLOOP:
2076 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2077 }
2078
2079 return -ENOSYS; /* not implemented */
2080}
2081
2082
2083static int
2084fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2085{
2086 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2087 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2088
2089 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2090 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2091 return -EINVAL;
2092 }
2093
2094 DPRINTK(2, "change_qos %d.%d.%d, "
2095 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2096 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2097 "available_cell_rate = %u",
2098 vcc->itf, vcc->vpi, vcc->vci,
2099 fore200e_traffic_class[ qos->txtp.traffic_class ],
2100 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2101 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2102 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2103 flags, fore200e->available_cell_rate);
2104
2105 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2106
2107 down(&fore200e->rate_sf);
2108 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2109 up(&fore200e->rate_sf);
2110 return -EAGAIN;
2111 }
2112
2113 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2114 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2115
2116 up(&fore200e->rate_sf);
2117
2118 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2119
2120 /* update rate control parameters */
2121 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2122
2123 set_bit(ATM_VF_HASQOS, &vcc->flags);
2124
2125 return 0;
2126 }
2127
2128 return -EINVAL;
2129}
2130
2131
2132static int __init
2133fore200e_irq_request(struct fore200e* fore200e)
2134{
2135 if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) {
2136
2137 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2138 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2139 return -EBUSY;
2140 }
2141
2142 printk(FORE200E "IRQ %s reserved for device %s\n",
2143 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2144
2145#ifdef FORE200E_USE_TASKLET
2146 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2147 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2148#endif
2149
2150 fore200e->state = FORE200E_STATE_IRQ;
2151 return 0;
2152}
2153
2154
2155static int __init
2156fore200e_get_esi(struct fore200e* fore200e)
2157{
2158 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2159 int ok, i;
2160
2161 if (!prom)
2162 return -ENOMEM;
2163
2164 ok = fore200e->bus->prom_read(fore200e, prom);
2165 if (ok < 0) {
2166 fore200e_kfree(prom);
2167 return -EBUSY;
2168 }
2169
2170 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2171 fore200e->name,
2172 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2173 prom->serial_number & 0xFFFF,
2174 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2175 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2176
2177 for (i = 0; i < ESI_LEN; i++) {
2178 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2179 }
2180
2181 fore200e_kfree(prom);
2182
2183 return 0;
2184}
2185
2186
2187static int __init
2188fore200e_alloc_rx_buf(struct fore200e* fore200e)
2189{
2190 int scheme, magn, nbr, size, i;
2191
2192 struct host_bsq* bsq;
2193 struct buffer* buffer;
2194
2195 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2196 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2197
2198 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2199
2200 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2201 size = fore200e_rx_buf_size[ scheme ][ magn ];
2202
2203 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2204
2205 /* allocate the array of receive buffers */
2206 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2207
2208 if (buffer == NULL)
2209 return -ENOMEM;
2210
2211 bsq->freebuf = NULL;
2212
2213 for (i = 0; i < nbr; i++) {
2214
2215 buffer[ i ].scheme = scheme;
2216 buffer[ i ].magn = magn;
2217#ifdef FORE200E_BSQ_DEBUG
2218 buffer[ i ].index = i;
2219 buffer[ i ].supplied = 0;
2220#endif
2221
2222 /* allocate the receive buffer body */
2223 if (fore200e_chunk_alloc(fore200e,
2224 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2225 DMA_FROM_DEVICE) < 0) {
2226
2227 while (i > 0)
2228 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2229 fore200e_kfree(buffer);
2230
2231 return -ENOMEM;
2232 }
2233
2234 /* insert the buffer into the free buffer list */
2235 buffer[ i ].next = bsq->freebuf;
2236 bsq->freebuf = &buffer[ i ];
2237 }
2238 /* all the buffers are free, initially */
2239 bsq->freebuf_count = nbr;
2240
2241#ifdef FORE200E_BSQ_DEBUG
2242 bsq_audit(3, bsq, scheme, magn);
2243#endif
2244 }
2245 }
2246
2247 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2248 return 0;
2249}
2250
2251
2252static int __init
2253fore200e_init_bs_queue(struct fore200e* fore200e)
2254{
2255 int scheme, magn, i;
2256
2257 struct host_bsq* bsq;
2258 struct cp_bsq_entry __iomem * cp_entry;
2259
2260 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2261 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2262
2263 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2264
2265 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2266
2267 /* allocate and align the array of status words */
2268 if (fore200e->bus->dma_chunk_alloc(fore200e,
2269 &bsq->status,
2270 sizeof(enum status),
2271 QUEUE_SIZE_BS,
2272 fore200e->bus->status_alignment) < 0) {
2273 return -ENOMEM;
2274 }
2275
2276 /* allocate and align the array of receive buffer descriptors */
2277 if (fore200e->bus->dma_chunk_alloc(fore200e,
2278 &bsq->rbd_block,
2279 sizeof(struct rbd_block),
2280 QUEUE_SIZE_BS,
2281 fore200e->bus->descr_alignment) < 0) {
2282
2283 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2284 return -ENOMEM;
2285 }
2286
2287 /* get the base address of the cp resident buffer supply queue entries */
2288 cp_entry = fore200e->virt_base +
2289 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2290
2291 /* fill the host resident and cp resident buffer supply queue entries */
2292 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2293
2294 bsq->host_entry[ i ].status =
2295 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2296 bsq->host_entry[ i ].rbd_block =
2297 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2298 bsq->host_entry[ i ].rbd_block_dma =
2299 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2300 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2301
2302 *bsq->host_entry[ i ].status = STATUS_FREE;
2303
2304 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2305 &cp_entry[ i ].status_haddr);
2306 }
2307 }
2308 }
2309
2310 fore200e->state = FORE200E_STATE_INIT_BSQ;
2311 return 0;
2312}
2313
2314
2315static int __init
2316fore200e_init_rx_queue(struct fore200e* fore200e)
2317{
2318 struct host_rxq* rxq = &fore200e->host_rxq;
2319 struct cp_rxq_entry __iomem * cp_entry;
2320 int i;
2321
2322 DPRINTK(2, "receive queue is being initialized\n");
2323
2324 /* allocate and align the array of status words */
2325 if (fore200e->bus->dma_chunk_alloc(fore200e,
2326 &rxq->status,
2327 sizeof(enum status),
2328 QUEUE_SIZE_RX,
2329 fore200e->bus->status_alignment) < 0) {
2330 return -ENOMEM;
2331 }
2332
2333 /* allocate and align the array of receive PDU descriptors */
2334 if (fore200e->bus->dma_chunk_alloc(fore200e,
2335 &rxq->rpd,
2336 sizeof(struct rpd),
2337 QUEUE_SIZE_RX,
2338 fore200e->bus->descr_alignment) < 0) {
2339
2340 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2341 return -ENOMEM;
2342 }
2343
2344 /* get the base address of the cp resident rx queue entries */
2345 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2346
2347 /* fill the host resident and cp resident rx entries */
2348 for (i=0; i < QUEUE_SIZE_RX; i++) {
2349
2350 rxq->host_entry[ i ].status =
2351 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2352 rxq->host_entry[ i ].rpd =
2353 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2354 rxq->host_entry[ i ].rpd_dma =
2355 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2356 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2357
2358 *rxq->host_entry[ i ].status = STATUS_FREE;
2359
2360 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2361 &cp_entry[ i ].status_haddr);
2362
2363 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2364 &cp_entry[ i ].rpd_haddr);
2365 }
2366
2367 /* set the head entry of the queue */
2368 rxq->head = 0;
2369
2370 fore200e->state = FORE200E_STATE_INIT_RXQ;
2371 return 0;
2372}
2373
2374
2375static int __init
2376fore200e_init_tx_queue(struct fore200e* fore200e)
2377{
2378 struct host_txq* txq = &fore200e->host_txq;
2379 struct cp_txq_entry __iomem * cp_entry;
2380 int i;
2381
2382 DPRINTK(2, "transmit queue is being initialized\n");
2383
2384 /* allocate and align the array of status words */
2385 if (fore200e->bus->dma_chunk_alloc(fore200e,
2386 &txq->status,
2387 sizeof(enum status),
2388 QUEUE_SIZE_TX,
2389 fore200e->bus->status_alignment) < 0) {
2390 return -ENOMEM;
2391 }
2392
2393 /* allocate and align the array of transmit PDU descriptors */
2394 if (fore200e->bus->dma_chunk_alloc(fore200e,
2395 &txq->tpd,
2396 sizeof(struct tpd),
2397 QUEUE_SIZE_TX,
2398 fore200e->bus->descr_alignment) < 0) {
2399
2400 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2401 return -ENOMEM;
2402 }
2403
2404 /* get the base address of the cp resident tx queue entries */
2405 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2406
2407 /* fill the host resident and cp resident tx entries */
2408 for (i=0; i < QUEUE_SIZE_TX; i++) {
2409
2410 txq->host_entry[ i ].status =
2411 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2412 txq->host_entry[ i ].tpd =
2413 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2414 txq->host_entry[ i ].tpd_dma =
2415 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2416 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2417
2418 *txq->host_entry[ i ].status = STATUS_FREE;
2419
2420 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2421 &cp_entry[ i ].status_haddr);
2422
2423 /* although there is a one-to-one mapping of tx queue entries and tpds,
2424 we do not write here the DMA (physical) base address of each tpd into
2425 the related cp resident entry, because the cp relies on this write
2426 operation to detect that a new pdu has been submitted for tx */
2427 }
2428
2429 /* set the head and tail entries of the queue */
2430 txq->head = 0;
2431 txq->tail = 0;
2432
2433 fore200e->state = FORE200E_STATE_INIT_TXQ;
2434 return 0;
2435}
2436
2437
2438static int __init
2439fore200e_init_cmd_queue(struct fore200e* fore200e)
2440{
2441 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2442 struct cp_cmdq_entry __iomem * cp_entry;
2443 int i;
2444
2445 DPRINTK(2, "command queue is being initialized\n");
2446
2447 /* allocate and align the array of status words */
2448 if (fore200e->bus->dma_chunk_alloc(fore200e,
2449 &cmdq->status,
2450 sizeof(enum status),
2451 QUEUE_SIZE_CMD,
2452 fore200e->bus->status_alignment) < 0) {
2453 return -ENOMEM;
2454 }
2455
2456 /* get the base address of the cp resident cmd queue entries */
2457 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2458
2459 /* fill the host resident and cp resident cmd entries */
2460 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2461
2462 cmdq->host_entry[ i ].status =
2463 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2464 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2465
2466 *cmdq->host_entry[ i ].status = STATUS_FREE;
2467
2468 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2469 &cp_entry[ i ].status_haddr);
2470 }
2471
2472 /* set the head entry of the queue */
2473 cmdq->head = 0;
2474
2475 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2476 return 0;
2477}
2478
2479
2480static void __init
2481fore200e_param_bs_queue(struct fore200e* fore200e,
2482 enum buffer_scheme scheme, enum buffer_magn magn,
2483 int queue_length, int pool_size, int supply_blksize)
2484{
2485 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2486
2487 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2488 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2489 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2490 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2491}
2492
2493
2494static int __init
2495fore200e_initialize(struct fore200e* fore200e)
2496{
2497 struct cp_queues __iomem * cpq;
2498 int ok, scheme, magn;
2499
2500 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2501
2502 init_MUTEX(&fore200e->rate_sf);
2503 spin_lock_init(&fore200e->q_lock);
2504
2505 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2506
2507 /* enable cp to host interrupts */
2508 fore200e->bus->write(1, &cpq->imask);
2509
2510 if (fore200e->bus->irq_enable)
2511 fore200e->bus->irq_enable(fore200e);
2512
2513 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2514
2515 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2516 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2517 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2518
2519 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2520 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2521
2522 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2523 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2524 fore200e_param_bs_queue(fore200e, scheme, magn,
2525 QUEUE_SIZE_BS,
2526 fore200e_rx_buf_nbr[ scheme ][ magn ],
2527 RBD_BLK_SIZE);
2528
2529 /* issue the initialize command */
2530 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2531 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2532
2533 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2534 if (ok == 0) {
2535 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2536 return -ENODEV;
2537 }
2538
2539 printk(FORE200E "device %s initialized\n", fore200e->name);
2540
2541 fore200e->state = FORE200E_STATE_INITIALIZE;
2542 return 0;
2543}
2544
2545
2546static void __init
2547fore200e_monitor_putc(struct fore200e* fore200e, char c)
2548{
2549 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2550
2551#if 0
2552 printk("%c", c);
2553#endif
2554 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2555}
2556
2557
2558static int __init
2559fore200e_monitor_getc(struct fore200e* fore200e)
2560{
2561 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2562 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2563 int c;
2564
2565 while (time_before(jiffies, timeout)) {
2566
2567 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2568
2569 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2570
2571 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2572#if 0
2573 printk("%c", c & 0xFF);
2574#endif
2575 return c & 0xFF;
2576 }
2577 }
2578
2579 return -1;
2580}
2581
2582
2583static void __init
2584fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2585{
2586 while (*str) {
2587
2588 /* the i960 monitor doesn't accept any new character if it has something to say */
2589 while (fore200e_monitor_getc(fore200e) >= 0);
2590
2591 fore200e_monitor_putc(fore200e, *str++);
2592 }
2593
2594 while (fore200e_monitor_getc(fore200e) >= 0);
2595}
2596
2597
2598static int __init
2599fore200e_start_fw(struct fore200e* fore200e)
2600{
2601 int ok;
2602 char cmd[ 48 ];
2603 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2604
2605 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2606
2607#if defined(__sparc_v9__)
2608 /* reported to be required by SBA cards on some sparc64 hosts */
2609 fore200e_spin(100);
2610#endif
2611
2612 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2613
2614 fore200e_monitor_puts(fore200e, cmd);
2615
2616 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2617 if (ok == 0) {
2618 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2619 return -ENODEV;
2620 }
2621
2622 printk(FORE200E "device %s firmware started\n", fore200e->name);
2623
2624 fore200e->state = FORE200E_STATE_START_FW;
2625 return 0;
2626}
2627
2628
2629static int __init
2630fore200e_load_fw(struct fore200e* fore200e)
2631{
2632 u32* fw_data = (u32*) fore200e->bus->fw_data;
2633 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2634
2635 struct fw_header* fw_header = (struct fw_header*) fw_data;
2636
2637 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2638
2639 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2640 fore200e->name, load_addr, fw_size);
2641
2642 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2643 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2644 return -ENODEV;
2645 }
2646
2647 for (; fw_size--; fw_data++, load_addr++)
2648 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2649
2650 fore200e->state = FORE200E_STATE_LOAD_FW;
2651 return 0;
2652}
2653
2654
2655static int __init
2656fore200e_register(struct fore200e* fore200e)
2657{
2658 struct atm_dev* atm_dev;
2659
2660 DPRINTK(2, "device %s being registered\n", fore200e->name);
2661
2662 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2663 NULL);
2664 if (atm_dev == NULL) {
2665 printk(FORE200E "unable to register device %s\n", fore200e->name);
2666 return -ENODEV;
2667 }
2668
2669 atm_dev->dev_data = fore200e;
2670 fore200e->atm_dev = atm_dev;
2671
2672 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2673 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2674
2675 fore200e->available_cell_rate = ATM_OC3_PCR;
2676
2677 fore200e->state = FORE200E_STATE_REGISTER;
2678 return 0;
2679}
2680
2681
2682static int __init
2683fore200e_init(struct fore200e* fore200e)
2684{
2685 if (fore200e_register(fore200e) < 0)
2686 return -ENODEV;
2687
2688 if (fore200e->bus->configure(fore200e) < 0)
2689 return -ENODEV;
2690
2691 if (fore200e->bus->map(fore200e) < 0)
2692 return -ENODEV;
2693
2694 if (fore200e_reset(fore200e, 1) < 0)
2695 return -ENODEV;
2696
2697 if (fore200e_load_fw(fore200e) < 0)
2698 return -ENODEV;
2699
2700 if (fore200e_start_fw(fore200e) < 0)
2701 return -ENODEV;
2702
2703 if (fore200e_initialize(fore200e) < 0)
2704 return -ENODEV;
2705
2706 if (fore200e_init_cmd_queue(fore200e) < 0)
2707 return -ENOMEM;
2708
2709 if (fore200e_init_tx_queue(fore200e) < 0)
2710 return -ENOMEM;
2711
2712 if (fore200e_init_rx_queue(fore200e) < 0)
2713 return -ENOMEM;
2714
2715 if (fore200e_init_bs_queue(fore200e) < 0)
2716 return -ENOMEM;
2717
2718 if (fore200e_alloc_rx_buf(fore200e) < 0)
2719 return -ENOMEM;
2720
2721 if (fore200e_get_esi(fore200e) < 0)
2722 return -EIO;
2723
2724 if (fore200e_irq_request(fore200e) < 0)
2725 return -EBUSY;
2726
2727 fore200e_supply(fore200e);
2728
2729 /* all done, board initialization is now complete */
2730 fore200e->state = FORE200E_STATE_COMPLETE;
2731 return 0;
2732}
2733
2734
2735static int __devinit
2736fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2737{
2738 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2739 struct fore200e* fore200e;
2740 int err = 0;
2741 static int index = 0;
2742
2743 if (pci_enable_device(pci_dev)) {
2744 err = -EINVAL;
2745 goto out;
2746 }
2747
2748 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
2749 if (fore200e == NULL) {
2750 err = -ENOMEM;
2751 goto out_disable;
2752 }
2753
2754 fore200e->bus = bus;
2755 fore200e->bus_dev = pci_dev;
2756 fore200e->irq = pci_dev->irq;
2757 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2758
2759 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2760
2761 pci_set_master(pci_dev);
2762
2763 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2764 fore200e->bus->model_name,
2765 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2766
2767 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2768
2769 err = fore200e_init(fore200e);
2770 if (err < 0) {
2771 fore200e_shutdown(fore200e);
2772 goto out_free;
2773 }
2774
2775 ++index;
2776 pci_set_drvdata(pci_dev, fore200e);
2777
2778out:
2779 return err;
2780
2781out_free:
2782 kfree(fore200e);
2783out_disable:
2784 pci_disable_device(pci_dev);
2785 goto out;
2786}
2787
2788
2789static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2790{
2791 struct fore200e *fore200e;
2792
2793 fore200e = pci_get_drvdata(pci_dev);
2794
2795 list_del(&fore200e->entry);
2796
2797 fore200e_shutdown(fore200e);
2798 kfree(fore200e);
2799 pci_disable_device(pci_dev);
2800}
2801
2802
2803#ifdef CONFIG_ATM_FORE200E_PCA
2804static struct pci_device_id fore200e_pca_tbl[] = {
2805 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2806 0, 0, (unsigned long) &fore200e_bus[0] },
2807 { 0, }
2808};
2809
2810MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2811
2812static struct pci_driver fore200e_pca_driver = {
2813 .name = "fore_200e",
2814 .probe = fore200e_pca_detect,
2815 .remove = __devexit_p(fore200e_pca_remove_one),
2816 .id_table = fore200e_pca_tbl,
2817};
2818#endif
2819
2820
2821static int __init
2822fore200e_module_init(void)
2823{
2824 const struct fore200e_bus* bus;
2825 struct fore200e* fore200e;
2826 int index;
2827
2828 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2829
2830 /* for each configured bus interface */
2831 for (bus = fore200e_bus; bus->model_name; bus++) {
2832
2833 /* detect all boards present on that bus */
2834 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2835
2836 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2837 fore200e->bus->model_name,
2838 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2839
2840 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2841
2842 if (fore200e_init(fore200e) < 0) {
2843
2844 fore200e_shutdown(fore200e);
2845 break;
2846 }
2847
2848 list_add(&fore200e->entry, &fore200e_boards);
2849 }
2850 }
2851
2852#ifdef CONFIG_ATM_FORE200E_PCA
2853 if (!pci_module_init(&fore200e_pca_driver))
2854 return 0;
2855#endif
2856
2857 if (!list_empty(&fore200e_boards))
2858 return 0;
2859
2860 return -ENODEV;
2861}
2862
2863
2864static void __exit
2865fore200e_module_cleanup(void)
2866{
2867 struct fore200e *fore200e, *next;
2868
2869#ifdef CONFIG_ATM_FORE200E_PCA
2870 pci_unregister_driver(&fore200e_pca_driver);
2871#endif
2872
2873 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2874 fore200e_shutdown(fore200e);
2875 kfree(fore200e);
2876 }
2877 DPRINTK(1, "module being removed\n");
2878}
2879
2880
2881static int
2882fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2883{
2884 struct fore200e* fore200e = FORE200E_DEV(dev);
2885 struct fore200e_vcc* fore200e_vcc;
2886 struct atm_vcc* vcc;
2887 int i, len, left = *pos;
2888 unsigned long flags;
2889
2890 if (!left--) {
2891
2892 if (fore200e_getstats(fore200e) < 0)
2893 return -EIO;
2894
2895 len = sprintf(page,"\n"
2896 " device:\n"
2897 " internal name:\t\t%s\n", fore200e->name);
2898
2899 /* print bus-specific information */
2900 if (fore200e->bus->proc_read)
2901 len += fore200e->bus->proc_read(fore200e, page + len);
2902
2903 len += sprintf(page + len,
2904 " interrupt line:\t\t%s\n"
2905 " physical base address:\t0x%p\n"
2906 " virtual base address:\t0x%p\n"
2907 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2908 " board serial number:\t\t%d\n\n",
2909 fore200e_irq_itoa(fore200e->irq),
2910 (void*)fore200e->phys_base,
2911 fore200e->virt_base,
2912 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2913 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2914 fore200e->esi[4] * 256 + fore200e->esi[5]);
2915
2916 return len;
2917 }
2918
2919 if (!left--)
2920 return sprintf(page,
2921 " free small bufs, scheme 1:\t%d\n"
2922 " free large bufs, scheme 1:\t%d\n"
2923 " free small bufs, scheme 2:\t%d\n"
2924 " free large bufs, scheme 2:\t%d\n",
2925 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2926 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2927 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2928 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2929
2930 if (!left--) {
2931 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2932
2933 len = sprintf(page,"\n\n"
2934 " cell processor:\n"
2935 " heartbeat state:\t\t");
2936
2937 if (hb >> 16 != 0xDEAD)
2938 len += sprintf(page + len, "0x%08x\n", hb);
2939 else
2940 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2941
2942 return len;
2943 }
2944
2945 if (!left--) {
2946 static const char* media_name[] = {
2947 "unshielded twisted pair",
2948 "multimode optical fiber ST",
2949 "multimode optical fiber SC",
2950 "single-mode optical fiber ST",
2951 "single-mode optical fiber SC",
2952 "unknown"
2953 };
2954
2955 static const char* oc3_mode[] = {
2956 "normal operation",
2957 "diagnostic loopback",
2958 "line loopback",
2959 "unknown"
2960 };
2961
2962 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2963 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2964 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2965 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2966 u32 oc3_index;
2967
2968 if ((media_index < 0) || (media_index > 4))
2969 media_index = 5;
2970
2971 switch (fore200e->loop_mode) {
2972 case ATM_LM_NONE: oc3_index = 0;
2973 break;
2974 case ATM_LM_LOC_PHY: oc3_index = 1;
2975 break;
2976 case ATM_LM_RMT_PHY: oc3_index = 2;
2977 break;
2978 default: oc3_index = 3;
2979 }
2980
2981 return sprintf(page,
2982 " firmware release:\t\t%d.%d.%d\n"
2983 " monitor release:\t\t%d.%d\n"
2984 " media type:\t\t\t%s\n"
2985 " OC-3 revision:\t\t0x%x\n"
2986 " OC-3 mode:\t\t\t%s",
2987 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2988 mon960_release >> 16, mon960_release << 16 >> 16,
2989 media_name[ media_index ],
2990 oc3_revision,
2991 oc3_mode[ oc3_index ]);
2992 }
2993
2994 if (!left--) {
2995 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2996
2997 return sprintf(page,
2998 "\n\n"
2999 " monitor:\n"
3000 " version number:\t\t%d\n"
3001 " boot status word:\t\t0x%08x\n",
3002 fore200e->bus->read(&cp_monitor->mon_version),
3003 fore200e->bus->read(&cp_monitor->bstat));
3004 }
3005
3006 if (!left--)
3007 return sprintf(page,
3008 "\n"
3009 " device statistics:\n"
3010 " 4b5b:\n"
3011 " crc_header_errors:\t\t%10u\n"
3012 " framing_errors:\t\t%10u\n",
3013 fore200e_swap(fore200e->stats->phy.crc_header_errors),
3014 fore200e_swap(fore200e->stats->phy.framing_errors));
3015
3016 if (!left--)
3017 return sprintf(page, "\n"
3018 " OC-3:\n"
3019 " section_bip8_errors:\t%10u\n"
3020 " path_bip8_errors:\t\t%10u\n"
3021 " line_bip24_errors:\t\t%10u\n"
3022 " line_febe_errors:\t\t%10u\n"
3023 " path_febe_errors:\t\t%10u\n"
3024 " corr_hcs_errors:\t\t%10u\n"
3025 " ucorr_hcs_errors:\t\t%10u\n",
3026 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
3027 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
3028 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
3029 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
3030 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
3031 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
3032 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
3033
3034 if (!left--)
3035 return sprintf(page,"\n"
3036 " ATM:\t\t\t\t cells\n"
3037 " TX:\t\t\t%10u\n"
3038 " RX:\t\t\t%10u\n"
3039 " vpi out of range:\t\t%10u\n"
3040 " vpi no conn:\t\t%10u\n"
3041 " vci out of range:\t\t%10u\n"
3042 " vci no conn:\t\t%10u\n",
3043 fore200e_swap(fore200e->stats->atm.cells_transmitted),
3044 fore200e_swap(fore200e->stats->atm.cells_received),
3045 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
3046 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
3047 fore200e_swap(fore200e->stats->atm.vci_bad_range),
3048 fore200e_swap(fore200e->stats->atm.vci_no_conn));
3049
3050 if (!left--)
3051 return sprintf(page,"\n"
3052 " AAL0:\t\t\t cells\n"
3053 " TX:\t\t\t%10u\n"
3054 " RX:\t\t\t%10u\n"
3055 " dropped:\t\t\t%10u\n",
3056 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3057 fore200e_swap(fore200e->stats->aal0.cells_received),
3058 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3059
3060 if (!left--)
3061 return sprintf(page,"\n"
3062 " AAL3/4:\n"
3063 " SAR sublayer:\t\t cells\n"
3064 " TX:\t\t\t%10u\n"
3065 " RX:\t\t\t%10u\n"
3066 " dropped:\t\t\t%10u\n"
3067 " CRC errors:\t\t%10u\n"
3068 " protocol errors:\t\t%10u\n\n"
3069 " CS sublayer:\t\t PDUs\n"
3070 " TX:\t\t\t%10u\n"
3071 " RX:\t\t\t%10u\n"
3072 " dropped:\t\t\t%10u\n"
3073 " protocol errors:\t\t%10u\n",
3074 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3075 fore200e_swap(fore200e->stats->aal34.cells_received),
3076 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3077 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3078 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3079 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3080 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3081 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3082 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3083
3084 if (!left--)
3085 return sprintf(page,"\n"
3086 " AAL5:\n"
3087 " SAR sublayer:\t\t cells\n"
3088 " TX:\t\t\t%10u\n"
3089 " RX:\t\t\t%10u\n"
3090 " dropped:\t\t\t%10u\n"
3091 " congestions:\t\t%10u\n\n"
3092 " CS sublayer:\t\t PDUs\n"
3093 " TX:\t\t\t%10u\n"
3094 " RX:\t\t\t%10u\n"
3095 " dropped:\t\t\t%10u\n"
3096 " CRC errors:\t\t%10u\n"
3097 " protocol errors:\t\t%10u\n",
3098 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3099 fore200e_swap(fore200e->stats->aal5.cells_received),
3100 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3101 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3102 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3103 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3104 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3105 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3106 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3107
3108 if (!left--)
3109 return sprintf(page,"\n"
3110 " AUX:\t\t allocation failures\n"
3111 " small b1:\t\t\t%10u\n"
3112 " large b1:\t\t\t%10u\n"
3113 " small b2:\t\t\t%10u\n"
3114 " large b2:\t\t\t%10u\n"
3115 " RX PDUs:\t\t\t%10u\n"
3116 " TX PDUs:\t\t\t%10lu\n",
3117 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3118 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3119 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3120 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3121 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3122 fore200e->tx_sat);
3123
3124 if (!left--)
3125 return sprintf(page,"\n"
3126 " receive carrier:\t\t\t%s\n",
3127 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3128
3129 if (!left--) {
3130 return sprintf(page,"\n"
3131 " VCCs:\n address VPI VCI AAL "
3132 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3133 }
3134
3135 for (i = 0; i < NBR_CONNECT; i++) {
3136
3137 vcc = fore200e->vc_map[i].vcc;
3138
3139 if (vcc == NULL)
3140 continue;
3141
3142 spin_lock_irqsave(&fore200e->q_lock, flags);
3143
3144 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3145
3146 fore200e_vcc = FORE200E_VCC(vcc);
3147 ASSERT(fore200e_vcc);
3148
3149 len = sprintf(page,
3150 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3151 (u32)(unsigned long)vcc,
3152 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3153 fore200e_vcc->tx_pdu,
3154 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3155 fore200e_vcc->tx_max_pdu,
3156 fore200e_vcc->rx_pdu,
3157 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3158 fore200e_vcc->rx_max_pdu);
3159
3160 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3161 return len;
3162 }
3163
3164 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3165 }
3166
3167 return 0;
3168}
3169
3170module_init(fore200e_module_init);
3171module_exit(fore200e_module_cleanup);
3172
3173
3174static const struct atmdev_ops fore200e_ops =
3175{
3176 .open = fore200e_open,
3177 .close = fore200e_close,
3178 .ioctl = fore200e_ioctl,
3179 .getsockopt = fore200e_getsockopt,
3180 .setsockopt = fore200e_setsockopt,
3181 .send = fore200e_send,
3182 .change_qos = fore200e_change_qos,
3183 .proc_read = fore200e_proc_read,
3184 .owner = THIS_MODULE
3185};
3186
3187
3188#ifdef CONFIG_ATM_FORE200E_PCA
3189extern const unsigned char _fore200e_pca_fw_data[];
3190extern const unsigned int _fore200e_pca_fw_size;
3191#endif
3192#ifdef CONFIG_ATM_FORE200E_SBA
3193extern const unsigned char _fore200e_sba_fw_data[];
3194extern const unsigned int _fore200e_sba_fw_size;
3195#endif
3196
3197static const struct fore200e_bus fore200e_bus[] = {
3198#ifdef CONFIG_ATM_FORE200E_PCA
3199 { "PCA-200E", "pca200e", 32, 4, 32,
3200 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3201 fore200e_pca_read,
3202 fore200e_pca_write,
3203 fore200e_pca_dma_map,
3204 fore200e_pca_dma_unmap,
3205 fore200e_pca_dma_sync_for_cpu,
3206 fore200e_pca_dma_sync_for_device,
3207 fore200e_pca_dma_chunk_alloc,
3208 fore200e_pca_dma_chunk_free,
3209 NULL,
3210 fore200e_pca_configure,
3211 fore200e_pca_map,
3212 fore200e_pca_reset,
3213 fore200e_pca_prom_read,
3214 fore200e_pca_unmap,
3215 NULL,
3216 fore200e_pca_irq_check,
3217 fore200e_pca_irq_ack,
3218 fore200e_pca_proc_read,
3219 },
3220#endif
3221#ifdef CONFIG_ATM_FORE200E_SBA
3222 { "SBA-200E", "sba200e", 32, 64, 32,
3223 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3224 fore200e_sba_read,
3225 fore200e_sba_write,
3226 fore200e_sba_dma_map,
3227 fore200e_sba_dma_unmap,
3228 fore200e_sba_dma_sync_for_cpu,
3229 fore200e_sba_dma_sync_for_device,
3230 fore200e_sba_dma_chunk_alloc,
3231 fore200e_sba_dma_chunk_free,
3232 fore200e_sba_detect,
3233 fore200e_sba_configure,
3234 fore200e_sba_map,
3235 fore200e_sba_reset,
3236 fore200e_sba_prom_read,
3237 fore200e_sba_unmap,
3238 fore200e_sba_irq_enable,
3239 fore200e_sba_irq_check,
3240 fore200e_sba_irq_ack,
3241 fore200e_sba_proc_read,
3242 },
3243#endif
3244 {}
3245};
3246
3247#ifdef MODULE_LICENSE
3248MODULE_LICENSE("GPL");
3249#endif