aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/atm/he.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/atm/he.c')
-rw-r--r--drivers/atm/he.c3091
1 files changed, 3091 insertions, 0 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
new file mode 100644
index 000000000000..c2c31a5f4513
--- /dev/null
+++ b/drivers/atm/he.c
@@ -0,0 +1,3091 @@
1/* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2
3/*
4
5 he.c
6
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
9
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
14
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
19
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23
24*/
25
26/*
27
28 he.c
29
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
32
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
38
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
42
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45
46 AUTHORS:
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49
50 NOTES:
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
55
56 */
57
58#include <linux/config.h>
59#include <linux/module.h>
60#include <linux/version.h>
61#include <linux/kernel.h>
62#include <linux/skbuff.h>
63#include <linux/pci.h>
64#include <linux/errno.h>
65#include <linux/types.h>
66#include <linux/string.h>
67#include <linux/delay.h>
68#include <linux/init.h>
69#include <linux/mm.h>
70#include <linux/sched.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <asm/io.h>
74#include <asm/byteorder.h>
75#include <asm/uaccess.h>
76
77#include <linux/atmdev.h>
78#include <linux/atm.h>
79#include <linux/sonet.h>
80
81#define USE_TASKLET
82#undef USE_SCATTERGATHER
83#undef USE_CHECKSUM_HW /* still confused about this */
84#define USE_RBPS
85#undef USE_RBPS_POOL /* if memory is tight try this */
86#undef USE_RBPL_POOL /* if memory is tight try this */
87#define USE_TPD_POOL
88/* #undef CONFIG_ATM_HE_USE_SUNI */
89/* #undef HE_DEBUG */
90
91#include "he.h"
92#include "suni.h"
93#include <linux/atm_he.h>
94
95#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
96
97#ifdef HE_DEBUG
98#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
99#else /* !HE_DEBUG */
100#define HPRINTK(fmt,args...) do { } while (0)
101#endif /* HE_DEBUG */
102
103/* version definition */
104
105static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
106
107/* declarations */
108
109static int he_open(struct atm_vcc *vcc);
110static void he_close(struct atm_vcc *vcc);
111static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
112static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
113static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
114static void he_tasklet(unsigned long data);
115static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
116static int he_start(struct atm_dev *dev);
117static void he_stop(struct he_dev *dev);
118static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
119static unsigned char he_phy_get(struct atm_dev *, unsigned long);
120
121static u8 read_prom_byte(struct he_dev *he_dev, int addr);
122
123/* globals */
124
125static struct he_dev *he_devs;
126static int disable64;
127static short nvpibits = -1;
128static short nvcibits = -1;
129static short rx_skb_reserve = 16;
130static int irq_coalesce = 1;
131static int sdh = 0;
132
133/* Read from EEPROM = 0000 0011b */
134static unsigned int readtab[] = {
135 CS_HIGH | CLK_HIGH,
136 CS_LOW | CLK_LOW,
137 CLK_HIGH, /* 0 */
138 CLK_LOW,
139 CLK_HIGH, /* 0 */
140 CLK_LOW,
141 CLK_HIGH, /* 0 */
142 CLK_LOW,
143 CLK_HIGH, /* 0 */
144 CLK_LOW,
145 CLK_HIGH, /* 0 */
146 CLK_LOW,
147 CLK_HIGH, /* 0 */
148 CLK_LOW | SI_HIGH,
149 CLK_HIGH | SI_HIGH, /* 1 */
150 CLK_LOW | SI_HIGH,
151 CLK_HIGH | SI_HIGH /* 1 */
152};
153
154/* Clock to read from/write to the EEPROM */
155static unsigned int clocktab[] = {
156 CLK_LOW,
157 CLK_HIGH,
158 CLK_LOW,
159 CLK_HIGH,
160 CLK_LOW,
161 CLK_HIGH,
162 CLK_LOW,
163 CLK_HIGH,
164 CLK_LOW,
165 CLK_HIGH,
166 CLK_LOW,
167 CLK_HIGH,
168 CLK_LOW,
169 CLK_HIGH,
170 CLK_LOW,
171 CLK_HIGH,
172 CLK_LOW
173};
174
175static struct atmdev_ops he_ops =
176{
177 .open = he_open,
178 .close = he_close,
179 .ioctl = he_ioctl,
180 .send = he_send,
181 .phy_put = he_phy_put,
182 .phy_get = he_phy_get,
183 .proc_read = he_proc_read,
184 .owner = THIS_MODULE
185};
186
187#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
188#define he_readl(dev, reg) readl((dev)->membase + (reg))
189
190/* section 2.12 connection memory access */
191
192static __inline__ void
193he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
194 unsigned flags)
195{
196 he_writel(he_dev, val, CON_DAT);
197 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
198 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
199 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
200}
201
202#define he_writel_rcm(dev, val, reg) \
203 he_writel_internal(dev, val, reg, CON_CTL_RCM)
204
205#define he_writel_tcm(dev, val, reg) \
206 he_writel_internal(dev, val, reg, CON_CTL_TCM)
207
208#define he_writel_mbox(dev, val, reg) \
209 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
210
211static unsigned
212he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
213{
214 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
215 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
216 return he_readl(he_dev, CON_DAT);
217}
218
219#define he_readl_rcm(dev, reg) \
220 he_readl_internal(dev, reg, CON_CTL_RCM)
221
222#define he_readl_tcm(dev, reg) \
223 he_readl_internal(dev, reg, CON_CTL_TCM)
224
225#define he_readl_mbox(dev, reg) \
226 he_readl_internal(dev, reg, CON_CTL_MBOX)
227
228
229/* figure 2.2 connection id */
230
231#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
232
233/* 2.5.1 per connection transmit state registers */
234
235#define he_writel_tsr0(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
237#define he_readl_tsr0(dev, cid) \
238 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
239
240#define he_writel_tsr1(dev, val, cid) \
241 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
242
243#define he_writel_tsr2(dev, val, cid) \
244 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
245
246#define he_writel_tsr3(dev, val, cid) \
247 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
248
249#define he_writel_tsr4(dev, val, cid) \
250 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
251
252 /* from page 2-20
253 *
254 * NOTE While the transmit connection is active, bits 23 through 0
255 * of this register must not be written by the host. Byte
256 * enables should be used during normal operation when writing
257 * the most significant byte.
258 */
259
260#define he_writel_tsr4_upper(dev, val, cid) \
261 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
262 CON_CTL_TCM \
263 | CON_BYTE_DISABLE_2 \
264 | CON_BYTE_DISABLE_1 \
265 | CON_BYTE_DISABLE_0)
266
267#define he_readl_tsr4(dev, cid) \
268 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
269
270#define he_writel_tsr5(dev, val, cid) \
271 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
272
273#define he_writel_tsr6(dev, val, cid) \
274 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
275
276#define he_writel_tsr7(dev, val, cid) \
277 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
278
279
280#define he_writel_tsr8(dev, val, cid) \
281 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
282
283#define he_writel_tsr9(dev, val, cid) \
284 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
285
286#define he_writel_tsr10(dev, val, cid) \
287 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
288
289#define he_writel_tsr11(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
291
292
293#define he_writel_tsr12(dev, val, cid) \
294 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
295
296#define he_writel_tsr13(dev, val, cid) \
297 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
298
299
300#define he_writel_tsr14(dev, val, cid) \
301 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
302
303#define he_writel_tsr14_upper(dev, val, cid) \
304 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
305 CON_CTL_TCM \
306 | CON_BYTE_DISABLE_2 \
307 | CON_BYTE_DISABLE_1 \
308 | CON_BYTE_DISABLE_0)
309
310/* 2.7.1 per connection receive state registers */
311
312#define he_writel_rsr0(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
314#define he_readl_rsr0(dev, cid) \
315 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
316
317#define he_writel_rsr1(dev, val, cid) \
318 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
319
320#define he_writel_rsr2(dev, val, cid) \
321 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
322
323#define he_writel_rsr3(dev, val, cid) \
324 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
325
326#define he_writel_rsr4(dev, val, cid) \
327 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
328
329#define he_writel_rsr5(dev, val, cid) \
330 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
331
332#define he_writel_rsr6(dev, val, cid) \
333 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
334
335#define he_writel_rsr7(dev, val, cid) \
336 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
337
338static __inline__ struct atm_vcc*
339__find_vcc(struct he_dev *he_dev, unsigned cid)
340{
341 struct hlist_head *head;
342 struct atm_vcc *vcc;
343 struct hlist_node *node;
344 struct sock *s;
345 short vpi;
346 int vci;
347
348 vpi = cid >> he_dev->vcibits;
349 vci = cid & ((1 << he_dev->vcibits) - 1);
350 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
351
352 sk_for_each(s, node, head) {
353 vcc = atm_sk(s);
354 if (vcc->dev == he_dev->atm_dev &&
355 vcc->vci == vci && vcc->vpi == vpi &&
356 vcc->qos.rxtp.traffic_class != ATM_NONE) {
357 return vcc;
358 }
359 }
360 return NULL;
361}
362
363static int __devinit
364he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
365{
366 struct atm_dev *atm_dev = NULL;
367 struct he_dev *he_dev = NULL;
368 int err = 0;
369
370 printk(KERN_INFO "he: %s\n", version);
371
372 if (pci_enable_device(pci_dev))
373 return -EIO;
374 if (pci_set_dma_mask(pci_dev, HE_DMA_MASK) != 0) {
375 printk(KERN_WARNING "he: no suitable dma available\n");
376 err = -EIO;
377 goto init_one_failure;
378 }
379
380 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
381 if (!atm_dev) {
382 err = -ENODEV;
383 goto init_one_failure;
384 }
385 pci_set_drvdata(pci_dev, atm_dev);
386
387 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
388 GFP_KERNEL);
389 if (!he_dev) {
390 err = -ENOMEM;
391 goto init_one_failure;
392 }
393 memset(he_dev, 0, sizeof(struct he_dev));
394
395 he_dev->pci_dev = pci_dev;
396 he_dev->atm_dev = atm_dev;
397 he_dev->atm_dev->dev_data = he_dev;
398 atm_dev->dev_data = he_dev;
399 he_dev->number = atm_dev->number;
400 if (he_start(atm_dev)) {
401 he_stop(he_dev);
402 err = -ENODEV;
403 goto init_one_failure;
404 }
405 he_dev->next = NULL;
406 if (he_devs)
407 he_dev->next = he_devs;
408 he_devs = he_dev;
409 return 0;
410
411init_one_failure:
412 if (atm_dev)
413 atm_dev_deregister(atm_dev);
414 if (he_dev)
415 kfree(he_dev);
416 pci_disable_device(pci_dev);
417 return err;
418}
419
420static void __devexit
421he_remove_one (struct pci_dev *pci_dev)
422{
423 struct atm_dev *atm_dev;
424 struct he_dev *he_dev;
425
426 atm_dev = pci_get_drvdata(pci_dev);
427 he_dev = HE_DEV(atm_dev);
428
429 /* need to remove from he_devs */
430
431 he_stop(he_dev);
432 atm_dev_deregister(atm_dev);
433 kfree(he_dev);
434
435 pci_set_drvdata(pci_dev, NULL);
436 pci_disable_device(pci_dev);
437}
438
439
440static unsigned
441rate_to_atmf(unsigned rate) /* cps to atm forum format */
442{
443#define NONZERO (1 << 14)
444
445 unsigned exp = 0;
446
447 if (rate == 0)
448 return 0;
449
450 rate <<= 9;
451 while (rate > 0x3ff) {
452 ++exp;
453 rate >>= 1;
454 }
455
456 return (NONZERO | (exp << 9) | (rate & 0x1ff));
457}
458
459static void __init
460he_init_rx_lbfp0(struct he_dev *he_dev)
461{
462 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
463 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
464 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
465 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
466
467 lbufd_index = 0;
468 lbm_offset = he_readl(he_dev, RCMLBM_BA);
469
470 he_writel(he_dev, lbufd_index, RLBF0_H);
471
472 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
473 lbufd_index += 2;
474 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
475
476 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
477 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
478
479 if (++lbuf_count == lbufs_per_row) {
480 lbuf_count = 0;
481 row_offset += he_dev->bytes_per_row;
482 }
483 lbm_offset += 4;
484 }
485
486 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
487 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
488}
489
490static void __init
491he_init_rx_lbfp1(struct he_dev *he_dev)
492{
493 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
494 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
495 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
496 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
497
498 lbufd_index = 1;
499 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
500
501 he_writel(he_dev, lbufd_index, RLBF1_H);
502
503 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
504 lbufd_index += 2;
505 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
506
507 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
508 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
509
510 if (++lbuf_count == lbufs_per_row) {
511 lbuf_count = 0;
512 row_offset += he_dev->bytes_per_row;
513 }
514 lbm_offset += 4;
515 }
516
517 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
518 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
519}
520
521static void __init
522he_init_tx_lbfp(struct he_dev *he_dev)
523{
524 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
525 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
526 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
527 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
528
529 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
530 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
531
532 he_writel(he_dev, lbufd_index, TLBF_H);
533
534 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
535 lbufd_index += 1;
536 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
537
538 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
539 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
540
541 if (++lbuf_count == lbufs_per_row) {
542 lbuf_count = 0;
543 row_offset += he_dev->bytes_per_row;
544 }
545 lbm_offset += 2;
546 }
547
548 he_writel(he_dev, lbufd_index - 1, TLBF_T);
549}
550
551static int __init
552he_init_tpdrq(struct he_dev *he_dev)
553{
554 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
555 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
556 if (he_dev->tpdrq_base == NULL) {
557 hprintk("failed to alloc tpdrq\n");
558 return -ENOMEM;
559 }
560 memset(he_dev->tpdrq_base, 0,
561 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
562
563 he_dev->tpdrq_tail = he_dev->tpdrq_base;
564 he_dev->tpdrq_head = he_dev->tpdrq_base;
565
566 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
567 he_writel(he_dev, 0, TPDRQ_T);
568 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
569
570 return 0;
571}
572
573static void __init
574he_init_cs_block(struct he_dev *he_dev)
575{
576 unsigned clock, rate, delta;
577 int reg;
578
579 /* 5.1.7 cs block initialization */
580
581 for (reg = 0; reg < 0x20; ++reg)
582 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
583
584 /* rate grid timer reload values */
585
586 clock = he_is622(he_dev) ? 66667000 : 50000000;
587 rate = he_dev->atm_dev->link_rate;
588 delta = rate / 16 / 2;
589
590 for (reg = 0; reg < 0x10; ++reg) {
591 /* 2.4 internal transmit function
592 *
593 * we initialize the first row in the rate grid.
594 * values are period (in clock cycles) of timer
595 */
596 unsigned period = clock / rate;
597
598 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
599 rate -= delta;
600 }
601
602 if (he_is622(he_dev)) {
603 /* table 5.2 (4 cells per lbuf) */
604 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
605 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
606 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
607 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
608 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
609
610 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
611 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
612 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
613 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
614 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
615 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
616 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
617
618 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
619
620 /* table 5.8 */
621 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
622 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
623 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
624 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
625 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
626 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
627
628 /* table 5.9 */
629 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
630 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
631 } else {
632 /* table 5.1 (4 cells per lbuf) */
633 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
634 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
635 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
636 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
637 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
638
639 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
640 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
641 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
642 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
643 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
644 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
645 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
646
647 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
648
649 /* table 5.8 */
650 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
651 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
652 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
653 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
654 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
655 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
656
657 /* table 5.9 */
658 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
659 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
660 }
661
662 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
663
664 for (reg = 0; reg < 0x8; ++reg)
665 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
666
667}
668
669static int __init
670he_init_cs_block_rcm(struct he_dev *he_dev)
671{
672 unsigned (*rategrid)[16][16];
673 unsigned rate, delta;
674 int i, j, reg;
675
676 unsigned rate_atmf, exp, man;
677 unsigned long long rate_cps;
678 int mult, buf, buf_limit = 4;
679
680 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
681 if (!rategrid)
682 return -ENOMEM;
683
684 /* initialize rate grid group table */
685
686 for (reg = 0x0; reg < 0xff; ++reg)
687 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
688
689 /* initialize rate controller groups */
690
691 for (reg = 0x100; reg < 0x1ff; ++reg)
692 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
693
694 /* initialize tNrm lookup table */
695
696 /* the manual makes reference to a routine in a sample driver
697 for proper configuration; fortunately, we only need this
698 in order to support abr connection */
699
700 /* initialize rate to group table */
701
702 rate = he_dev->atm_dev->link_rate;
703 delta = rate / 32;
704
705 /*
706 * 2.4 transmit internal functions
707 *
708 * we construct a copy of the rate grid used by the scheduler
709 * in order to construct the rate to group table below
710 */
711
712 for (j = 0; j < 16; j++) {
713 (*rategrid)[0][j] = rate;
714 rate -= delta;
715 }
716
717 for (i = 1; i < 16; i++)
718 for (j = 0; j < 16; j++)
719 if (i > 14)
720 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
721 else
722 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
723
724 /*
725 * 2.4 transmit internal function
726 *
727 * this table maps the upper 5 bits of exponent and mantissa
728 * of the atm forum representation of the rate into an index
729 * on rate grid
730 */
731
732 rate_atmf = 0;
733 while (rate_atmf < 0x400) {
734 man = (rate_atmf & 0x1f) << 4;
735 exp = rate_atmf >> 5;
736
737 /*
738 instead of '/ 512', use '>> 9' to prevent a call
739 to divdu3 on x86 platforms
740 */
741 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
742
743 if (rate_cps < 10)
744 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
745
746 for (i = 255; i > 0; i--)
747 if ((*rategrid)[i/16][i%16] >= rate_cps)
748 break; /* pick nearest rate instead? */
749
750 /*
751 * each table entry is 16 bits: (rate grid index (8 bits)
752 * and a buffer limit (8 bits)
753 * there are two table entries in each 32-bit register
754 */
755
756#ifdef notdef
757 buf = rate_cps * he_dev->tx_numbuffs /
758 (he_dev->atm_dev->link_rate * 2);
759#else
760 /* this is pretty, but avoids _divdu3 and is mostly correct */
761 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
762 if (rate_cps > (272 * mult))
763 buf = 4;
764 else if (rate_cps > (204 * mult))
765 buf = 3;
766 else if (rate_cps > (136 * mult))
767 buf = 2;
768 else if (rate_cps > (68 * mult))
769 buf = 1;
770 else
771 buf = 0;
772#endif
773 if (buf > buf_limit)
774 buf = buf_limit;
775 reg = (reg << 16) | ((i << 8) | buf);
776
777#define RTGTBL_OFFSET 0x400
778
779 if (rate_atmf & 0x1)
780 he_writel_rcm(he_dev, reg,
781 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
782
783 ++rate_atmf;
784 }
785
786 kfree(rategrid);
787 return 0;
788}
789
790static int __init
791he_init_group(struct he_dev *he_dev, int group)
792{
793 int i;
794
795#ifdef USE_RBPS
796 /* small buffer pool */
797#ifdef USE_RBPS_POOL
798 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
799 CONFIG_RBPS_BUFSIZE, 8, 0);
800 if (he_dev->rbps_pool == NULL) {
801 hprintk("unable to create rbps pages\n");
802 return -ENOMEM;
803 }
804#else /* !USE_RBPS_POOL */
805 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
806 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
807 if (he_dev->rbps_pages == NULL) {
808 hprintk("unable to create rbps page pool\n");
809 return -ENOMEM;
810 }
811#endif /* USE_RBPS_POOL */
812
813 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
814 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
815 if (he_dev->rbps_base == NULL) {
816 hprintk("failed to alloc rbps\n");
817 return -ENOMEM;
818 }
819 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
820 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
821
822 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
823 dma_addr_t dma_handle;
824 void *cpuaddr;
825
826#ifdef USE_RBPS_POOL
827 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
828 if (cpuaddr == NULL)
829 return -ENOMEM;
830#else
831 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
832 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
833#endif
834
835 he_dev->rbps_virt[i].virt = cpuaddr;
836 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
837 he_dev->rbps_base[i].phys = dma_handle;
838
839 }
840 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
841
842 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
843 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
844 G0_RBPS_T + (group * 32));
845 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
846 G0_RBPS_BS + (group * 32));
847 he_writel(he_dev,
848 RBP_THRESH(CONFIG_RBPS_THRESH) |
849 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
850 RBP_INT_ENB,
851 G0_RBPS_QI + (group * 32));
852#else /* !USE_RBPS */
853 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
854 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
855 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
856 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
857 G0_RBPS_BS + (group * 32));
858#endif /* USE_RBPS */
859
860 /* large buffer pool */
861#ifdef USE_RBPL_POOL
862 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
863 CONFIG_RBPL_BUFSIZE, 8, 0);
864 if (he_dev->rbpl_pool == NULL) {
865 hprintk("unable to create rbpl pool\n");
866 return -ENOMEM;
867 }
868#else /* !USE_RBPL_POOL */
869 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
870 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
871 if (he_dev->rbpl_pages == NULL) {
872 hprintk("unable to create rbpl pages\n");
873 return -ENOMEM;
874 }
875#endif /* USE_RBPL_POOL */
876
877 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
878 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
879 if (he_dev->rbpl_base == NULL) {
880 hprintk("failed to alloc rbpl\n");
881 return -ENOMEM;
882 }
883 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
884 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
885
886 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
887 dma_addr_t dma_handle;
888 void *cpuaddr;
889
890#ifdef USE_RBPL_POOL
891 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
892 if (cpuaddr == NULL)
893 return -ENOMEM;
894#else
895 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
896 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
897#endif
898
899 he_dev->rbpl_virt[i].virt = cpuaddr;
900 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
901 he_dev->rbpl_base[i].phys = dma_handle;
902 }
903 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
904
905 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
906 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
907 G0_RBPL_T + (group * 32));
908 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
909 G0_RBPL_BS + (group * 32));
910 he_writel(he_dev,
911 RBP_THRESH(CONFIG_RBPL_THRESH) |
912 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
913 RBP_INT_ENB,
914 G0_RBPL_QI + (group * 32));
915
916 /* rx buffer ready queue */
917
918 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
919 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
920 if (he_dev->rbrq_base == NULL) {
921 hprintk("failed to allocate rbrq\n");
922 return -ENOMEM;
923 }
924 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
925
926 he_dev->rbrq_head = he_dev->rbrq_base;
927 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
928 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
929 he_writel(he_dev,
930 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
931 G0_RBRQ_Q + (group * 16));
932 if (irq_coalesce) {
933 hprintk("coalescing interrupts\n");
934 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
935 G0_RBRQ_I + (group * 16));
936 } else
937 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
938 G0_RBRQ_I + (group * 16));
939
940 /* tx buffer ready queue */
941
942 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
943 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
944 if (he_dev->tbrq_base == NULL) {
945 hprintk("failed to allocate tbrq\n");
946 return -ENOMEM;
947 }
948 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
949
950 he_dev->tbrq_head = he_dev->tbrq_base;
951
952 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
953 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
954 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
955 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
956
957 return 0;
958}
959
960static int __init
961he_init_irq(struct he_dev *he_dev)
962{
963 int i;
964
965 /* 2.9.3.5 tail offset for each interrupt queue is located after the
966 end of the interrupt queue */
967
968 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
969 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
970 if (he_dev->irq_base == NULL) {
971 hprintk("failed to allocate irq\n");
972 return -ENOMEM;
973 }
974 he_dev->irq_tailoffset = (unsigned *)
975 &he_dev->irq_base[CONFIG_IRQ_SIZE];
976 *he_dev->irq_tailoffset = 0;
977 he_dev->irq_head = he_dev->irq_base;
978 he_dev->irq_tail = he_dev->irq_base;
979
980 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
981 he_dev->irq_base[i].isw = ITYPE_INVALID;
982
983 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
984 he_writel(he_dev,
985 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
986 IRQ0_HEAD);
987 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
988 he_writel(he_dev, 0x0, IRQ0_DATA);
989
990 he_writel(he_dev, 0x0, IRQ1_BASE);
991 he_writel(he_dev, 0x0, IRQ1_HEAD);
992 he_writel(he_dev, 0x0, IRQ1_CNTL);
993 he_writel(he_dev, 0x0, IRQ1_DATA);
994
995 he_writel(he_dev, 0x0, IRQ2_BASE);
996 he_writel(he_dev, 0x0, IRQ2_HEAD);
997 he_writel(he_dev, 0x0, IRQ2_CNTL);
998 he_writel(he_dev, 0x0, IRQ2_DATA);
999
1000 he_writel(he_dev, 0x0, IRQ3_BASE);
1001 he_writel(he_dev, 0x0, IRQ3_HEAD);
1002 he_writel(he_dev, 0x0, IRQ3_CNTL);
1003 he_writel(he_dev, 0x0, IRQ3_DATA);
1004
1005 /* 2.9.3.2 interrupt queue mapping registers */
1006
1007 he_writel(he_dev, 0x0, GRP_10_MAP);
1008 he_writel(he_dev, 0x0, GRP_32_MAP);
1009 he_writel(he_dev, 0x0, GRP_54_MAP);
1010 he_writel(he_dev, 0x0, GRP_76_MAP);
1011
1012 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
1013 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1014 return -EINVAL;
1015 }
1016
1017 he_dev->irq = he_dev->pci_dev->irq;
1018
1019 return 0;
1020}
1021
1022static int __init
1023he_start(struct atm_dev *dev)
1024{
1025 struct he_dev *he_dev;
1026 struct pci_dev *pci_dev;
1027 unsigned long membase;
1028
1029 u16 command;
1030 u32 gen_cntl_0, host_cntl, lb_swap;
1031 u8 cache_size, timer;
1032
1033 unsigned err;
1034 unsigned int status, reg;
1035 int i, group;
1036
1037 he_dev = HE_DEV(dev);
1038 pci_dev = he_dev->pci_dev;
1039
1040 membase = pci_resource_start(pci_dev, 0);
1041 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1042
1043 /*
1044 * pci bus controller initialization
1045 */
1046
1047 /* 4.3 pci bus controller-specific initialization */
1048 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1049 hprintk("can't read GEN_CNTL_0\n");
1050 return -EINVAL;
1051 }
1052 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1053 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1054 hprintk("can't write GEN_CNTL_0.\n");
1055 return -EINVAL;
1056 }
1057
1058 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1059 hprintk("can't read PCI_COMMAND.\n");
1060 return -EINVAL;
1061 }
1062
1063 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1064 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1065 hprintk("can't enable memory.\n");
1066 return -EINVAL;
1067 }
1068
1069 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1070 hprintk("can't read cache line size?\n");
1071 return -EINVAL;
1072 }
1073
1074 if (cache_size < 16) {
1075 cache_size = 16;
1076 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1077 hprintk("can't set cache line size to %d\n", cache_size);
1078 }
1079
1080 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1081 hprintk("can't read latency timer?\n");
1082 return -EINVAL;
1083 }
1084
1085 /* from table 3.9
1086 *
1087 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1088 *
1089 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1090 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1091 *
1092 */
1093#define LAT_TIMER 209
1094 if (timer < LAT_TIMER) {
1095 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1096 timer = LAT_TIMER;
1097 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1098 hprintk("can't set latency timer to %d\n", timer);
1099 }
1100
1101 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1102 hprintk("can't set up page mapping\n");
1103 return -EINVAL;
1104 }
1105
1106 /* 4.4 card reset */
1107 he_writel(he_dev, 0x0, RESET_CNTL);
1108 he_writel(he_dev, 0xff, RESET_CNTL);
1109
1110 udelay(16*1000); /* 16 ms */
1111 status = he_readl(he_dev, RESET_CNTL);
1112 if ((status & BOARD_RST_STATUS) == 0) {
1113 hprintk("reset failed\n");
1114 return -EINVAL;
1115 }
1116
1117 /* 4.5 set bus width */
1118 host_cntl = he_readl(he_dev, HOST_CNTL);
1119 if (host_cntl & PCI_BUS_SIZE64)
1120 gen_cntl_0 |= ENBL_64;
1121 else
1122 gen_cntl_0 &= ~ENBL_64;
1123
1124 if (disable64 == 1) {
1125 hprintk("disabling 64-bit pci bus transfers\n");
1126 gen_cntl_0 &= ~ENBL_64;
1127 }
1128
1129 if (gen_cntl_0 & ENBL_64)
1130 hprintk("64-bit transfers enabled\n");
1131
1132 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1133
1134 /* 4.7 read prom contents */
1135 for (i = 0; i < PROD_ID_LEN; ++i)
1136 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1137
1138 he_dev->media = read_prom_byte(he_dev, MEDIA);
1139
1140 for (i = 0; i < 6; ++i)
1141 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1142
1143 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1144 he_dev->prod_id,
1145 he_dev->media & 0x40 ? "SM" : "MM",
1146 dev->esi[0],
1147 dev->esi[1],
1148 dev->esi[2],
1149 dev->esi[3],
1150 dev->esi[4],
1151 dev->esi[5]);
1152 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1153 ATM_OC12_PCR : ATM_OC3_PCR;
1154
1155 /* 4.6 set host endianess */
1156 lb_swap = he_readl(he_dev, LB_SWAP);
1157 if (he_is622(he_dev))
1158 lb_swap &= ~XFER_SIZE; /* 4 cells */
1159 else
1160 lb_swap |= XFER_SIZE; /* 8 cells */
1161#ifdef __BIG_ENDIAN
1162 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1163#else
1164 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1165 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1166#endif /* __BIG_ENDIAN */
1167 he_writel(he_dev, lb_swap, LB_SWAP);
1168
1169 /* 4.8 sdram controller initialization */
1170 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1171
1172 /* 4.9 initialize rnum value */
1173 lb_swap |= SWAP_RNUM_MAX(0xf);
1174 he_writel(he_dev, lb_swap, LB_SWAP);
1175
1176 /* 4.10 initialize the interrupt queues */
1177 if ((err = he_init_irq(he_dev)) != 0)
1178 return err;
1179
1180#ifdef USE_TASKLET
1181 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1182#endif
1183 spin_lock_init(&he_dev->global_lock);
1184
1185 /* 4.11 enable pci bus controller state machines */
1186 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1187 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1188 he_writel(he_dev, host_cntl, HOST_CNTL);
1189
1190 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1191 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1192
1193 /*
1194 * atm network controller initialization
1195 */
1196
1197 /* 5.1.1 generic configuration state */
1198
1199 /*
1200 * local (cell) buffer memory map
1201 *
1202 * HE155 HE622
1203 *
1204 * 0 ____________1023 bytes 0 _______________________2047 bytes
1205 * | | | | |
1206 * | utility | | rx0 | |
1207 * 5|____________| 255|___________________| u |
1208 * 6| | 256| | t |
1209 * | | | | i |
1210 * | rx0 | row | tx | l |
1211 * | | | | i |
1212 * | | 767|___________________| t |
1213 * 517|____________| 768| | y |
1214 * row 518| | | rx1 | |
1215 * | | 1023|___________________|___|
1216 * | |
1217 * | tx |
1218 * | |
1219 * | |
1220 * 1535|____________|
1221 * 1536| |
1222 * | rx1 |
1223 * 2047|____________|
1224 *
1225 */
1226
1227 /* total 4096 connections */
1228 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1229 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1230
1231 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1232 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1233 return -ENODEV;
1234 }
1235
1236 if (nvpibits != -1) {
1237 he_dev->vpibits = nvpibits;
1238 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1239 }
1240
1241 if (nvcibits != -1) {
1242 he_dev->vcibits = nvcibits;
1243 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1244 }
1245
1246
1247 if (he_is622(he_dev)) {
1248 he_dev->cells_per_row = 40;
1249 he_dev->bytes_per_row = 2048;
1250 he_dev->r0_numrows = 256;
1251 he_dev->tx_numrows = 512;
1252 he_dev->r1_numrows = 256;
1253 he_dev->r0_startrow = 0;
1254 he_dev->tx_startrow = 256;
1255 he_dev->r1_startrow = 768;
1256 } else {
1257 he_dev->cells_per_row = 20;
1258 he_dev->bytes_per_row = 1024;
1259 he_dev->r0_numrows = 512;
1260 he_dev->tx_numrows = 1018;
1261 he_dev->r1_numrows = 512;
1262 he_dev->r0_startrow = 6;
1263 he_dev->tx_startrow = 518;
1264 he_dev->r1_startrow = 1536;
1265 }
1266
1267 he_dev->cells_per_lbuf = 4;
1268 he_dev->buffer_limit = 4;
1269 he_dev->r0_numbuffs = he_dev->r0_numrows *
1270 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1271 if (he_dev->r0_numbuffs > 2560)
1272 he_dev->r0_numbuffs = 2560;
1273
1274 he_dev->r1_numbuffs = he_dev->r1_numrows *
1275 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1276 if (he_dev->r1_numbuffs > 2560)
1277 he_dev->r1_numbuffs = 2560;
1278
1279 he_dev->tx_numbuffs = he_dev->tx_numrows *
1280 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1281 if (he_dev->tx_numbuffs > 5120)
1282 he_dev->tx_numbuffs = 5120;
1283
1284 /* 5.1.2 configure hardware dependent registers */
1285
1286 he_writel(he_dev,
1287 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1288 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1289 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1290 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1291 LBARB);
1292
1293 he_writel(he_dev, BANK_ON |
1294 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1295 SDRAMCON);
1296
1297 he_writel(he_dev,
1298 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1299 RM_RW_WAIT(1), RCMCONFIG);
1300 he_writel(he_dev,
1301 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1302 TM_RW_WAIT(1), TCMCONFIG);
1303
1304 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1305
1306 he_writel(he_dev,
1307 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1308 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1309 RX_VALVP(he_dev->vpibits) |
1310 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1311
1312 he_writel(he_dev, DRF_THRESH(0x20) |
1313 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1314 TX_VCI_MASK(he_dev->vcibits) |
1315 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1316
1317 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1318
1319 he_writel(he_dev, PHY_INT_ENB |
1320 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1321 RH_CONFIG);
1322
1323 /* 5.1.3 initialize connection memory */
1324
1325 for (i = 0; i < TCM_MEM_SIZE; ++i)
1326 he_writel_tcm(he_dev, 0, i);
1327
1328 for (i = 0; i < RCM_MEM_SIZE; ++i)
1329 he_writel_rcm(he_dev, 0, i);
1330
1331 /*
1332 * transmit connection memory map
1333 *
1334 * tx memory
1335 * 0x0 ___________________
1336 * | |
1337 * | |
1338 * | TSRa |
1339 * | |
1340 * | |
1341 * 0x8000|___________________|
1342 * | |
1343 * | TSRb |
1344 * 0xc000|___________________|
1345 * | |
1346 * | TSRc |
1347 * 0xe000|___________________|
1348 * | TSRd |
1349 * 0xf000|___________________|
1350 * | tmABR |
1351 * 0x10000|___________________|
1352 * | |
1353 * | tmTPD |
1354 * |___________________|
1355 * | |
1356 * ....
1357 * 0x1ffff|___________________|
1358 *
1359 *
1360 */
1361
1362 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1363 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1364 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1365 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1366 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1367
1368
1369 /*
1370 * receive connection memory map
1371 *
1372 * 0x0 ___________________
1373 * | |
1374 * | |
1375 * | RSRa |
1376 * | |
1377 * | |
1378 * 0x8000|___________________|
1379 * | |
1380 * | rx0/1 |
1381 * | LBM | link lists of local
1382 * | tx | buffer memory
1383 * | |
1384 * 0xd000|___________________|
1385 * | |
1386 * | rmABR |
1387 * 0xe000|___________________|
1388 * | |
1389 * | RSRb |
1390 * |___________________|
1391 * | |
1392 * ....
1393 * 0xffff|___________________|
1394 */
1395
1396 he_writel(he_dev, 0x08000, RCMLBM_BA);
1397 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1398 he_writel(he_dev, 0x0d800, RCMABR_BA);
1399
1400 /* 5.1.4 initialize local buffer free pools linked lists */
1401
1402 he_init_rx_lbfp0(he_dev);
1403 he_init_rx_lbfp1(he_dev);
1404
1405 he_writel(he_dev, 0x0, RLBC_H);
1406 he_writel(he_dev, 0x0, RLBC_T);
1407 he_writel(he_dev, 0x0, RLBC_H2);
1408
1409 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1410 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1411
1412 he_init_tx_lbfp(he_dev);
1413
1414 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1415
1416 /* 5.1.5 initialize intermediate receive queues */
1417
1418 if (he_is622(he_dev)) {
1419 he_writel(he_dev, 0x000f, G0_INMQ_S);
1420 he_writel(he_dev, 0x200f, G0_INMQ_L);
1421
1422 he_writel(he_dev, 0x001f, G1_INMQ_S);
1423 he_writel(he_dev, 0x201f, G1_INMQ_L);
1424
1425 he_writel(he_dev, 0x002f, G2_INMQ_S);
1426 he_writel(he_dev, 0x202f, G2_INMQ_L);
1427
1428 he_writel(he_dev, 0x003f, G3_INMQ_S);
1429 he_writel(he_dev, 0x203f, G3_INMQ_L);
1430
1431 he_writel(he_dev, 0x004f, G4_INMQ_S);
1432 he_writel(he_dev, 0x204f, G4_INMQ_L);
1433
1434 he_writel(he_dev, 0x005f, G5_INMQ_S);
1435 he_writel(he_dev, 0x205f, G5_INMQ_L);
1436
1437 he_writel(he_dev, 0x006f, G6_INMQ_S);
1438 he_writel(he_dev, 0x206f, G6_INMQ_L);
1439
1440 he_writel(he_dev, 0x007f, G7_INMQ_S);
1441 he_writel(he_dev, 0x207f, G7_INMQ_L);
1442 } else {
1443 he_writel(he_dev, 0x0000, G0_INMQ_S);
1444 he_writel(he_dev, 0x0008, G0_INMQ_L);
1445
1446 he_writel(he_dev, 0x0001, G1_INMQ_S);
1447 he_writel(he_dev, 0x0009, G1_INMQ_L);
1448
1449 he_writel(he_dev, 0x0002, G2_INMQ_S);
1450 he_writel(he_dev, 0x000a, G2_INMQ_L);
1451
1452 he_writel(he_dev, 0x0003, G3_INMQ_S);
1453 he_writel(he_dev, 0x000b, G3_INMQ_L);
1454
1455 he_writel(he_dev, 0x0004, G4_INMQ_S);
1456 he_writel(he_dev, 0x000c, G4_INMQ_L);
1457
1458 he_writel(he_dev, 0x0005, G5_INMQ_S);
1459 he_writel(he_dev, 0x000d, G5_INMQ_L);
1460
1461 he_writel(he_dev, 0x0006, G6_INMQ_S);
1462 he_writel(he_dev, 0x000e, G6_INMQ_L);
1463
1464 he_writel(he_dev, 0x0007, G7_INMQ_S);
1465 he_writel(he_dev, 0x000f, G7_INMQ_L);
1466 }
1467
1468 /* 5.1.6 application tunable parameters */
1469
1470 he_writel(he_dev, 0x0, MCC);
1471 he_writel(he_dev, 0x0, OEC);
1472 he_writel(he_dev, 0x0, DCC);
1473 he_writel(he_dev, 0x0, CEC);
1474
1475 /* 5.1.7 cs block initialization */
1476
1477 he_init_cs_block(he_dev);
1478
1479 /* 5.1.8 cs block connection memory initialization */
1480
1481 if (he_init_cs_block_rcm(he_dev) < 0)
1482 return -ENOMEM;
1483
1484 /* 5.1.10 initialize host structures */
1485
1486 he_init_tpdrq(he_dev);
1487
1488#ifdef USE_TPD_POOL
1489 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1490 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1491 if (he_dev->tpd_pool == NULL) {
1492 hprintk("unable to create tpd pci_pool\n");
1493 return -ENOMEM;
1494 }
1495
1496 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1497#else
1498 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1499 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1500 if (!he_dev->tpd_base)
1501 return -ENOMEM;
1502
1503 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1504 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1505 he_dev->tpd_base[i].inuse = 0;
1506 }
1507
1508 he_dev->tpd_head = he_dev->tpd_base;
1509 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1510#endif
1511
1512 if (he_init_group(he_dev, 0) != 0)
1513 return -ENOMEM;
1514
1515 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1516 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1517 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1518 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1519 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1520 G0_RBPS_BS + (group * 32));
1521
1522 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1523 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1524 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1525 G0_RBPL_QI + (group * 32));
1526 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1527
1528 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1529 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1530 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1531 G0_RBRQ_Q + (group * 16));
1532 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1533
1534 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1535 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1536 he_writel(he_dev, TBRQ_THRESH(0x1),
1537 G0_TBRQ_THRESH + (group * 16));
1538 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1539 }
1540
1541 /* host status page */
1542
1543 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1544 sizeof(struct he_hsp), &he_dev->hsp_phys);
1545 if (he_dev->hsp == NULL) {
1546 hprintk("failed to allocate host status page\n");
1547 return -ENOMEM;
1548 }
1549 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1550 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1551
1552 /* initialize framer */
1553
1554#ifdef CONFIG_ATM_HE_USE_SUNI
1555 suni_init(he_dev->atm_dev);
1556 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1557 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1558#endif /* CONFIG_ATM_HE_USE_SUNI */
1559
1560 if (sdh) {
1561 /* this really should be in suni.c but for now... */
1562 int val;
1563
1564 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1565 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1566 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1567 }
1568
1569 /* 5.1.12 enable transmit and receive */
1570
1571 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1572 reg |= TX_ENABLE|ER_ENABLE;
1573 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1574
1575 reg = he_readl(he_dev, RC_CONFIG);
1576 reg |= RX_ENABLE;
1577 he_writel(he_dev, reg, RC_CONFIG);
1578
1579 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1580 he_dev->cs_stper[i].inuse = 0;
1581 he_dev->cs_stper[i].pcr = -1;
1582 }
1583 he_dev->total_bw = 0;
1584
1585
1586 /* atm linux initialization */
1587
1588 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1589 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1590
1591 he_dev->irq_peak = 0;
1592 he_dev->rbrq_peak = 0;
1593 he_dev->rbpl_peak = 0;
1594 he_dev->tbrq_peak = 0;
1595
1596 HPRINTK("hell bent for leather!\n");
1597
1598 return 0;
1599}
1600
1601static void
1602he_stop(struct he_dev *he_dev)
1603{
1604 u16 command;
1605 u32 gen_cntl_0, reg;
1606 struct pci_dev *pci_dev;
1607
1608 pci_dev = he_dev->pci_dev;
1609
1610 /* disable interrupts */
1611
1612 if (he_dev->membase) {
1613 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1614 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1615 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1616
1617#ifdef USE_TASKLET
1618 tasklet_disable(&he_dev->tasklet);
1619#endif
1620
1621 /* disable recv and transmit */
1622
1623 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1624 reg &= ~(TX_ENABLE|ER_ENABLE);
1625 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1626
1627 reg = he_readl(he_dev, RC_CONFIG);
1628 reg &= ~(RX_ENABLE);
1629 he_writel(he_dev, reg, RC_CONFIG);
1630 }
1631
1632#ifdef CONFIG_ATM_HE_USE_SUNI
1633 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1634 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1635#endif /* CONFIG_ATM_HE_USE_SUNI */
1636
1637 if (he_dev->irq)
1638 free_irq(he_dev->irq, he_dev);
1639
1640 if (he_dev->irq_base)
1641 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1642 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1643
1644 if (he_dev->hsp)
1645 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1646 he_dev->hsp, he_dev->hsp_phys);
1647
1648 if (he_dev->rbpl_base) {
1649#ifdef USE_RBPL_POOL
1650 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1651 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1652 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1653
1654 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1655 }
1656#else
1657 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1658 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1659#endif
1660 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1661 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1662 }
1663
1664#ifdef USE_RBPL_POOL
1665 if (he_dev->rbpl_pool)
1666 pci_pool_destroy(he_dev->rbpl_pool);
1667#endif
1668
1669#ifdef USE_RBPS
1670 if (he_dev->rbps_base) {
1671#ifdef USE_RBPS_POOL
1672 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1673 void *cpuaddr = he_dev->rbps_virt[i].virt;
1674 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1675
1676 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1677 }
1678#else
1679 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1680 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1681#endif
1682 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1683 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1684 }
1685
1686#ifdef USE_RBPS_POOL
1687 if (he_dev->rbps_pool)
1688 pci_pool_destroy(he_dev->rbps_pool);
1689#endif
1690
1691#endif /* USE_RBPS */
1692
1693 if (he_dev->rbrq_base)
1694 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1695 he_dev->rbrq_base, he_dev->rbrq_phys);
1696
1697 if (he_dev->tbrq_base)
1698 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1699 he_dev->tbrq_base, he_dev->tbrq_phys);
1700
1701 if (he_dev->tpdrq_base)
1702 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1703 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1704
1705#ifdef USE_TPD_POOL
1706 if (he_dev->tpd_pool)
1707 pci_pool_destroy(he_dev->tpd_pool);
1708#else
1709 if (he_dev->tpd_base)
1710 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1711 he_dev->tpd_base, he_dev->tpd_base_phys);
1712#endif
1713
1714 if (he_dev->pci_dev) {
1715 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1716 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1717 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1718 }
1719
1720 if (he_dev->membase)
1721 iounmap(he_dev->membase);
1722}
1723
1724static struct he_tpd *
1725__alloc_tpd(struct he_dev *he_dev)
1726{
1727#ifdef USE_TPD_POOL
1728 struct he_tpd *tpd;
1729 dma_addr_t dma_handle;
1730
1731 tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1732 if (tpd == NULL)
1733 return NULL;
1734
1735 tpd->status = TPD_ADDR(dma_handle);
1736 tpd->reserved = 0;
1737 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1738 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1739 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1740
1741 return tpd;
1742#else
1743 int i;
1744
1745 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1746 ++he_dev->tpd_head;
1747 if (he_dev->tpd_head > he_dev->tpd_end) {
1748 he_dev->tpd_head = he_dev->tpd_base;
1749 }
1750
1751 if (!he_dev->tpd_head->inuse) {
1752 he_dev->tpd_head->inuse = 1;
1753 he_dev->tpd_head->status &= TPD_MASK;
1754 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1755 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1756 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1757 return he_dev->tpd_head;
1758 }
1759 }
1760 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1761 return NULL;
1762#endif
1763}
1764
1765#define AAL5_LEN(buf,len) \
1766 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1767 (((unsigned char *)(buf))[(len)-5]))
1768
1769/* 2.10.1.2 receive
1770 *
1771 * aal5 packets can optionally return the tcp checksum in the lower
1772 * 16 bits of the crc (RSR0_TCP_CKSUM)
1773 */
1774
1775#define TCP_CKSUM(buf,len) \
1776 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1777 (((unsigned char *)(buf))[(len-1)]))
1778
1779static int
1780he_service_rbrq(struct he_dev *he_dev, int group)
1781{
1782 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1783 ((unsigned long)he_dev->rbrq_base |
1784 he_dev->hsp->group[group].rbrq_tail);
1785 struct he_rbp *rbp = NULL;
1786 unsigned cid, lastcid = -1;
1787 unsigned buf_len = 0;
1788 struct sk_buff *skb;
1789 struct atm_vcc *vcc = NULL;
1790 struct he_vcc *he_vcc;
1791 struct he_iovec *iov;
1792 int pdus_assembled = 0;
1793 int updated = 0;
1794
1795 read_lock(&vcc_sklist_lock);
1796 while (he_dev->rbrq_head != rbrq_tail) {
1797 ++updated;
1798
1799 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1800 he_dev->rbrq_head, group,
1801 RBRQ_ADDR(he_dev->rbrq_head),
1802 RBRQ_BUFLEN(he_dev->rbrq_head),
1803 RBRQ_CID(he_dev->rbrq_head),
1804 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1805 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1806 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1807 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1808 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1809 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1810
1811#ifdef USE_RBPS
1812 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1813 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1814 else
1815#endif
1816 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1817
1818 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1819 cid = RBRQ_CID(he_dev->rbrq_head);
1820
1821 if (cid != lastcid)
1822 vcc = __find_vcc(he_dev, cid);
1823 lastcid = cid;
1824
1825 if (vcc == NULL) {
1826 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1827 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1828 rbp->status &= ~RBP_LOANED;
1829
1830 goto next_rbrq_entry;
1831 }
1832
1833 he_vcc = HE_VCC(vcc);
1834 if (he_vcc == NULL) {
1835 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1836 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1837 rbp->status &= ~RBP_LOANED;
1838 goto next_rbrq_entry;
1839 }
1840
1841 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1842 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1843 atomic_inc(&vcc->stats->rx_drop);
1844 goto return_host_buffers;
1845 }
1846
1847 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1848 he_vcc->iov_tail->iov_len = buf_len;
1849 he_vcc->pdu_len += buf_len;
1850 ++he_vcc->iov_tail;
1851
1852 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1853 lastcid = -1;
1854 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1855 wake_up(&he_vcc->rx_waitq);
1856 goto return_host_buffers;
1857 }
1858
1859#ifdef notdef
1860 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1861 hprintk("iovec full! cid 0x%x\n", cid);
1862 goto return_host_buffers;
1863 }
1864#endif
1865 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1866 goto next_rbrq_entry;
1867
1868 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1869 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1870 HPRINTK("%s%s (%d.%d)\n",
1871 RBRQ_CRC_ERR(he_dev->rbrq_head)
1872 ? "CRC_ERR " : "",
1873 RBRQ_LEN_ERR(he_dev->rbrq_head)
1874 ? "LEN_ERR" : "",
1875 vcc->vpi, vcc->vci);
1876 atomic_inc(&vcc->stats->rx_err);
1877 goto return_host_buffers;
1878 }
1879
1880 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1881 GFP_ATOMIC);
1882 if (!skb) {
1883 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1884 goto return_host_buffers;
1885 }
1886
1887 if (rx_skb_reserve > 0)
1888 skb_reserve(skb, rx_skb_reserve);
1889
1890 do_gettimeofday(&skb->stamp);
1891
1892 for (iov = he_vcc->iov_head;
1893 iov < he_vcc->iov_tail; ++iov) {
1894#ifdef USE_RBPS
1895 if (iov->iov_base & RBP_SMALLBUF)
1896 memcpy(skb_put(skb, iov->iov_len),
1897 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1898 else
1899#endif
1900 memcpy(skb_put(skb, iov->iov_len),
1901 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1902 }
1903
1904 switch (vcc->qos.aal) {
1905 case ATM_AAL0:
1906 /* 2.10.1.5 raw cell receive */
1907 skb->len = ATM_AAL0_SDU;
1908 skb->tail = skb->data + skb->len;
1909 break;
1910 case ATM_AAL5:
1911 /* 2.10.1.2 aal5 receive */
1912
1913 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1914 skb->tail = skb->data + skb->len;
1915#ifdef USE_CHECKSUM_HW
1916 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1917 skb->ip_summed = CHECKSUM_HW;
1918 skb->csum = TCP_CKSUM(skb->data,
1919 he_vcc->pdu_len);
1920 }
1921#endif
1922 break;
1923 }
1924
1925#ifdef should_never_happen
1926 if (skb->len > vcc->qos.rxtp.max_sdu)
1927 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1928#endif
1929
1930#ifdef notdef
1931 ATM_SKB(skb)->vcc = vcc;
1932#endif
1933 vcc->push(vcc, skb);
1934
1935 atomic_inc(&vcc->stats->rx);
1936
1937return_host_buffers:
1938 ++pdus_assembled;
1939
1940 for (iov = he_vcc->iov_head;
1941 iov < he_vcc->iov_tail; ++iov) {
1942#ifdef USE_RBPS
1943 if (iov->iov_base & RBP_SMALLBUF)
1944 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1945 else
1946#endif
1947 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1948
1949 rbp->status &= ~RBP_LOANED;
1950 }
1951
1952 he_vcc->iov_tail = he_vcc->iov_head;
1953 he_vcc->pdu_len = 0;
1954
1955next_rbrq_entry:
1956 he_dev->rbrq_head = (struct he_rbrq *)
1957 ((unsigned long) he_dev->rbrq_base |
1958 RBRQ_MASK(++he_dev->rbrq_head));
1959
1960 }
1961 read_unlock(&vcc_sklist_lock);
1962
1963 if (updated) {
1964 if (updated > he_dev->rbrq_peak)
1965 he_dev->rbrq_peak = updated;
1966
1967 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1968 G0_RBRQ_H + (group * 16));
1969 }
1970
1971 return pdus_assembled;
1972}
1973
1974static void
1975he_service_tbrq(struct he_dev *he_dev, int group)
1976{
1977 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1978 ((unsigned long)he_dev->tbrq_base |
1979 he_dev->hsp->group[group].tbrq_tail);
1980 struct he_tpd *tpd;
1981 int slot, updated = 0;
1982#ifdef USE_TPD_POOL
1983 struct he_tpd *__tpd;
1984#endif
1985
1986 /* 2.1.6 transmit buffer return queue */
1987
1988 while (he_dev->tbrq_head != tbrq_tail) {
1989 ++updated;
1990
1991 HPRINTK("tbrq%d 0x%x%s%s\n",
1992 group,
1993 TBRQ_TPD(he_dev->tbrq_head),
1994 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1995 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1996#ifdef USE_TPD_POOL
1997 tpd = NULL;
1998 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1999 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
2000 tpd = __tpd;
2001 list_del(&__tpd->entry);
2002 break;
2003 }
2004 }
2005
2006 if (tpd == NULL) {
2007 hprintk("unable to locate tpd for dma buffer %x\n",
2008 TBRQ_TPD(he_dev->tbrq_head));
2009 goto next_tbrq_entry;
2010 }
2011#else
2012 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2013#endif
2014
2015 if (TBRQ_EOS(he_dev->tbrq_head)) {
2016 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2017 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2018 if (tpd->vcc)
2019 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2020
2021 goto next_tbrq_entry;
2022 }
2023
2024 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2025 if (tpd->iovec[slot].addr)
2026 pci_unmap_single(he_dev->pci_dev,
2027 tpd->iovec[slot].addr,
2028 tpd->iovec[slot].len & TPD_LEN_MASK,
2029 PCI_DMA_TODEVICE);
2030 if (tpd->iovec[slot].len & TPD_LST)
2031 break;
2032
2033 }
2034
2035 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2036 if (tpd->vcc && tpd->vcc->pop)
2037 tpd->vcc->pop(tpd->vcc, tpd->skb);
2038 else
2039 dev_kfree_skb_any(tpd->skb);
2040 }
2041
2042next_tbrq_entry:
2043#ifdef USE_TPD_POOL
2044 if (tpd)
2045 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2046#else
2047 tpd->inuse = 0;
2048#endif
2049 he_dev->tbrq_head = (struct he_tbrq *)
2050 ((unsigned long) he_dev->tbrq_base |
2051 TBRQ_MASK(++he_dev->tbrq_head));
2052 }
2053
2054 if (updated) {
2055 if (updated > he_dev->tbrq_peak)
2056 he_dev->tbrq_peak = updated;
2057
2058 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2059 G0_TBRQ_H + (group * 16));
2060 }
2061}
2062
2063
2064static void
2065he_service_rbpl(struct he_dev *he_dev, int group)
2066{
2067 struct he_rbp *newtail;
2068 struct he_rbp *rbpl_head;
2069 int moved = 0;
2070
2071 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2072 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2073
2074 for (;;) {
2075 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2076 RBPL_MASK(he_dev->rbpl_tail+1));
2077
2078 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2079 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2080 break;
2081
2082 newtail->status |= RBP_LOANED;
2083 he_dev->rbpl_tail = newtail;
2084 ++moved;
2085 }
2086
2087 if (moved)
2088 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2089}
2090
2091#ifdef USE_RBPS
2092static void
2093he_service_rbps(struct he_dev *he_dev, int group)
2094{
2095 struct he_rbp *newtail;
2096 struct he_rbp *rbps_head;
2097 int moved = 0;
2098
2099 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2100 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2101
2102 for (;;) {
2103 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2104 RBPS_MASK(he_dev->rbps_tail+1));
2105
2106 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2107 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2108 break;
2109
2110 newtail->status |= RBP_LOANED;
2111 he_dev->rbps_tail = newtail;
2112 ++moved;
2113 }
2114
2115 if (moved)
2116 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2117}
2118#endif /* USE_RBPS */
2119
2120static void
2121he_tasklet(unsigned long data)
2122{
2123 unsigned long flags;
2124 struct he_dev *he_dev = (struct he_dev *) data;
2125 int group, type;
2126 int updated = 0;
2127
2128 HPRINTK("tasklet (0x%lx)\n", data);
2129#ifdef USE_TASKLET
2130 spin_lock_irqsave(&he_dev->global_lock, flags);
2131#endif
2132
2133 while (he_dev->irq_head != he_dev->irq_tail) {
2134 ++updated;
2135
2136 type = ITYPE_TYPE(he_dev->irq_head->isw);
2137 group = ITYPE_GROUP(he_dev->irq_head->isw);
2138
2139 switch (type) {
2140 case ITYPE_RBRQ_THRESH:
2141 HPRINTK("rbrq%d threshold\n", group);
2142 /* fall through */
2143 case ITYPE_RBRQ_TIMER:
2144 if (he_service_rbrq(he_dev, group)) {
2145 he_service_rbpl(he_dev, group);
2146#ifdef USE_RBPS
2147 he_service_rbps(he_dev, group);
2148#endif /* USE_RBPS */
2149 }
2150 break;
2151 case ITYPE_TBRQ_THRESH:
2152 HPRINTK("tbrq%d threshold\n", group);
2153 /* fall through */
2154 case ITYPE_TPD_COMPLETE:
2155 he_service_tbrq(he_dev, group);
2156 break;
2157 case ITYPE_RBPL_THRESH:
2158 he_service_rbpl(he_dev, group);
2159 break;
2160 case ITYPE_RBPS_THRESH:
2161#ifdef USE_RBPS
2162 he_service_rbps(he_dev, group);
2163#endif /* USE_RBPS */
2164 break;
2165 case ITYPE_PHY:
2166 HPRINTK("phy interrupt\n");
2167#ifdef CONFIG_ATM_HE_USE_SUNI
2168 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2169 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2170 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2171 spin_lock_irqsave(&he_dev->global_lock, flags);
2172#endif
2173 break;
2174 case ITYPE_OTHER:
2175 switch (type|group) {
2176 case ITYPE_PARITY:
2177 hprintk("parity error\n");
2178 break;
2179 case ITYPE_ABORT:
2180 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2181 break;
2182 }
2183 break;
2184 case ITYPE_TYPE(ITYPE_INVALID):
2185 /* see 8.1.1 -- check all queues */
2186
2187 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2188
2189 he_service_rbrq(he_dev, 0);
2190 he_service_rbpl(he_dev, 0);
2191#ifdef USE_RBPS
2192 he_service_rbps(he_dev, 0);
2193#endif /* USE_RBPS */
2194 he_service_tbrq(he_dev, 0);
2195 break;
2196 default:
2197 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2198 }
2199
2200 he_dev->irq_head->isw = ITYPE_INVALID;
2201
2202 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2203 }
2204
2205 if (updated) {
2206 if (updated > he_dev->irq_peak)
2207 he_dev->irq_peak = updated;
2208
2209 he_writel(he_dev,
2210 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2211 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2212 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2213 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2214 }
2215#ifdef USE_TASKLET
2216 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2217#endif
2218}
2219
2220static irqreturn_t
2221he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2222{
2223 unsigned long flags;
2224 struct he_dev *he_dev = (struct he_dev * )dev_id;
2225 int handled = 0;
2226
2227 if (he_dev == NULL)
2228 return IRQ_NONE;
2229
2230 spin_lock_irqsave(&he_dev->global_lock, flags);
2231
2232 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2233 (*he_dev->irq_tailoffset << 2));
2234
2235 if (he_dev->irq_tail == he_dev->irq_head) {
2236 HPRINTK("tailoffset not updated?\n");
2237 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2238 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2239 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2240 }
2241
2242#ifdef DEBUG
2243 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2244 hprintk("spurious (or shared) interrupt?\n");
2245#endif
2246
2247 if (he_dev->irq_head != he_dev->irq_tail) {
2248 handled = 1;
2249#ifdef USE_TASKLET
2250 tasklet_schedule(&he_dev->tasklet);
2251#else
2252 he_tasklet((unsigned long) he_dev);
2253#endif
2254 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2255 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2256 }
2257 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2258 return IRQ_RETVAL(handled);
2259
2260}
2261
2262static __inline__ void
2263__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2264{
2265 struct he_tpdrq *new_tail;
2266
2267 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2268 tpd, cid, he_dev->tpdrq_tail);
2269
2270 /* new_tail = he_dev->tpdrq_tail; */
2271 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2272 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2273
2274 /*
2275 * check to see if we are about to set the tail == head
2276 * if true, update the head pointer from the adapter
2277 * to see if this is really the case (reading the queue
2278 * head for every enqueue would be unnecessarily slow)
2279 */
2280
2281 if (new_tail == he_dev->tpdrq_head) {
2282 he_dev->tpdrq_head = (struct he_tpdrq *)
2283 (((unsigned long)he_dev->tpdrq_base) |
2284 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2285
2286 if (new_tail == he_dev->tpdrq_head) {
2287 hprintk("tpdrq full (cid 0x%x)\n", cid);
2288 /*
2289 * FIXME
2290 * push tpd onto a transmit backlog queue
2291 * after service_tbrq, service the backlog
2292 * for now, we just drop the pdu
2293 */
2294 if (tpd->skb) {
2295 if (tpd->vcc->pop)
2296 tpd->vcc->pop(tpd->vcc, tpd->skb);
2297 else
2298 dev_kfree_skb_any(tpd->skb);
2299 atomic_inc(&tpd->vcc->stats->tx_err);
2300 }
2301#ifdef USE_TPD_POOL
2302 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2303#else
2304 tpd->inuse = 0;
2305#endif
2306 return;
2307 }
2308 }
2309
2310 /* 2.1.5 transmit packet descriptor ready queue */
2311#ifdef USE_TPD_POOL
2312 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2313 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2314#else
2315 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2316 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2317#endif
2318 he_dev->tpdrq_tail->cid = cid;
2319 wmb();
2320
2321 he_dev->tpdrq_tail = new_tail;
2322
2323 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2324 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2325}
2326
2327static int
2328he_open(struct atm_vcc *vcc)
2329{
2330 unsigned long flags;
2331 struct he_dev *he_dev = HE_DEV(vcc->dev);
2332 struct he_vcc *he_vcc;
2333 int err = 0;
2334 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2335 short vpi = vcc->vpi;
2336 int vci = vcc->vci;
2337
2338 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2339 return 0;
2340
2341 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2342
2343 set_bit(ATM_VF_ADDR, &vcc->flags);
2344
2345 cid = he_mkcid(he_dev, vpi, vci);
2346
2347 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2348 if (he_vcc == NULL) {
2349 hprintk("unable to allocate he_vcc during open\n");
2350 return -ENOMEM;
2351 }
2352
2353 he_vcc->iov_tail = he_vcc->iov_head;
2354 he_vcc->pdu_len = 0;
2355 he_vcc->rc_index = -1;
2356
2357 init_waitqueue_head(&he_vcc->rx_waitq);
2358 init_waitqueue_head(&he_vcc->tx_waitq);
2359
2360 vcc->dev_data = he_vcc;
2361
2362 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2363 int pcr_goal;
2364
2365 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2366 if (pcr_goal == 0)
2367 pcr_goal = he_dev->atm_dev->link_rate;
2368 if (pcr_goal < 0) /* means round down, technically */
2369 pcr_goal = -pcr_goal;
2370
2371 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2372
2373 switch (vcc->qos.aal) {
2374 case ATM_AAL5:
2375 tsr0_aal = TSR0_AAL5;
2376 tsr4 = TSR4_AAL5;
2377 break;
2378 case ATM_AAL0:
2379 tsr0_aal = TSR0_AAL0_SDU;
2380 tsr4 = TSR4_AAL0_SDU;
2381 break;
2382 default:
2383 err = -EINVAL;
2384 goto open_failed;
2385 }
2386
2387 spin_lock_irqsave(&he_dev->global_lock, flags);
2388 tsr0 = he_readl_tsr0(he_dev, cid);
2389 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2390
2391 if (TSR0_CONN_STATE(tsr0) != 0) {
2392 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2393 err = -EBUSY;
2394 goto open_failed;
2395 }
2396
2397 switch (vcc->qos.txtp.traffic_class) {
2398 case ATM_UBR:
2399 /* 2.3.3.1 open connection ubr */
2400
2401 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2402 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2403 break;
2404
2405 case ATM_CBR:
2406 /* 2.3.3.2 open connection cbr */
2407
2408 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2409 if ((he_dev->total_bw + pcr_goal)
2410 > (he_dev->atm_dev->link_rate * 9 / 10))
2411 {
2412 err = -EBUSY;
2413 goto open_failed;
2414 }
2415
2416 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2417
2418 /* find an unused cs_stper register */
2419 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2420 if (he_dev->cs_stper[reg].inuse == 0 ||
2421 he_dev->cs_stper[reg].pcr == pcr_goal)
2422 break;
2423
2424 if (reg == HE_NUM_CS_STPER) {
2425 err = -EBUSY;
2426 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2427 goto open_failed;
2428 }
2429
2430 he_dev->total_bw += pcr_goal;
2431
2432 he_vcc->rc_index = reg;
2433 ++he_dev->cs_stper[reg].inuse;
2434 he_dev->cs_stper[reg].pcr = pcr_goal;
2435
2436 clock = he_is622(he_dev) ? 66667000 : 50000000;
2437 period = clock / pcr_goal;
2438
2439 HPRINTK("rc_index = %d period = %d\n",
2440 reg, period);
2441
2442 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2443 CS_STPER0 + reg);
2444 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2445
2446 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2447 TSR0_RC_INDEX(reg);
2448
2449 break;
2450 default:
2451 err = -EINVAL;
2452 goto open_failed;
2453 }
2454
2455 spin_lock_irqsave(&he_dev->global_lock, flags);
2456
2457 he_writel_tsr0(he_dev, tsr0, cid);
2458 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2459 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2460 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2461 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2462 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2463
2464 he_writel_tsr3(he_dev, 0x0, cid);
2465 he_writel_tsr5(he_dev, 0x0, cid);
2466 he_writel_tsr6(he_dev, 0x0, cid);
2467 he_writel_tsr7(he_dev, 0x0, cid);
2468 he_writel_tsr8(he_dev, 0x0, cid);
2469 he_writel_tsr10(he_dev, 0x0, cid);
2470 he_writel_tsr11(he_dev, 0x0, cid);
2471 he_writel_tsr12(he_dev, 0x0, cid);
2472 he_writel_tsr13(he_dev, 0x0, cid);
2473 he_writel_tsr14(he_dev, 0x0, cid);
2474 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2475 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2476 }
2477
2478 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2479 unsigned aal;
2480
2481 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2482 &HE_VCC(vcc)->rx_waitq);
2483
2484 switch (vcc->qos.aal) {
2485 case ATM_AAL5:
2486 aal = RSR0_AAL5;
2487 break;
2488 case ATM_AAL0:
2489 aal = RSR0_RAWCELL;
2490 break;
2491 default:
2492 err = -EINVAL;
2493 goto open_failed;
2494 }
2495
2496 spin_lock_irqsave(&he_dev->global_lock, flags);
2497
2498 rsr0 = he_readl_rsr0(he_dev, cid);
2499 if (rsr0 & RSR0_OPEN_CONN) {
2500 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2501
2502 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2503 err = -EBUSY;
2504 goto open_failed;
2505 }
2506
2507#ifdef USE_RBPS
2508 rsr1 = RSR1_GROUP(0);
2509 rsr4 = RSR4_GROUP(0);
2510#else /* !USE_RBPS */
2511 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2512 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2513#endif /* USE_RBPS */
2514 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2515 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2516
2517#ifdef USE_CHECKSUM_HW
2518 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2519 rsr0 |= RSR0_TCP_CKSUM;
2520#endif
2521
2522 he_writel_rsr4(he_dev, rsr4, cid);
2523 he_writel_rsr1(he_dev, rsr1, cid);
2524 /* 5.1.11 last parameter initialized should be
2525 the open/closed indication in rsr0 */
2526 he_writel_rsr0(he_dev,
2527 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2528 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2529
2530 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2531 }
2532
2533open_failed:
2534
2535 if (err) {
2536 if (he_vcc)
2537 kfree(he_vcc);
2538 clear_bit(ATM_VF_ADDR, &vcc->flags);
2539 }
2540 else
2541 set_bit(ATM_VF_READY, &vcc->flags);
2542
2543 return err;
2544}
2545
2546static void
2547he_close(struct atm_vcc *vcc)
2548{
2549 unsigned long flags;
2550 DECLARE_WAITQUEUE(wait, current);
2551 struct he_dev *he_dev = HE_DEV(vcc->dev);
2552 struct he_tpd *tpd;
2553 unsigned cid;
2554 struct he_vcc *he_vcc = HE_VCC(vcc);
2555#define MAX_RETRY 30
2556 int retry = 0, sleep = 1, tx_inuse;
2557
2558 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2559
2560 clear_bit(ATM_VF_READY, &vcc->flags);
2561 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2562
2563 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2564 int timeout;
2565
2566 HPRINTK("close rx cid 0x%x\n", cid);
2567
2568 /* 2.7.2.2 close receive operation */
2569
2570 /* wait for previous close (if any) to finish */
2571
2572 spin_lock_irqsave(&he_dev->global_lock, flags);
2573 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2574 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2575 udelay(250);
2576 }
2577
2578 set_current_state(TASK_UNINTERRUPTIBLE);
2579 add_wait_queue(&he_vcc->rx_waitq, &wait);
2580
2581 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2582 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2583 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2584 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2585
2586 timeout = schedule_timeout(30*HZ);
2587
2588 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2589 set_current_state(TASK_RUNNING);
2590
2591 if (timeout == 0)
2592 hprintk("close rx timeout cid 0x%x\n", cid);
2593
2594 HPRINTK("close rx cid 0x%x complete\n", cid);
2595
2596 }
2597
2598 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2599 volatile unsigned tsr4, tsr0;
2600 int timeout;
2601
2602 HPRINTK("close tx cid 0x%x\n", cid);
2603
2604 /* 2.1.2
2605 *
2606 * ... the host must first stop queueing packets to the TPDRQ
2607 * on the connection to be closed, then wait for all outstanding
2608 * packets to be transmitted and their buffers returned to the
2609 * TBRQ. When the last packet on the connection arrives in the
2610 * TBRQ, the host issues the close command to the adapter.
2611 */
2612
2613 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2614 (retry < MAX_RETRY)) {
2615 msleep(sleep);
2616 if (sleep < 250)
2617 sleep = sleep * 2;
2618
2619 ++retry;
2620 }
2621
2622 if (tx_inuse)
2623 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2624
2625 /* 2.3.1.1 generic close operations with flush */
2626
2627 spin_lock_irqsave(&he_dev->global_lock, flags);
2628 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2629 /* also clears TSR4_SESSION_ENDED */
2630
2631 switch (vcc->qos.txtp.traffic_class) {
2632 case ATM_UBR:
2633 he_writel_tsr1(he_dev,
2634 TSR1_MCR(rate_to_atmf(200000))
2635 | TSR1_PCR(0), cid);
2636 break;
2637 case ATM_CBR:
2638 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2639 break;
2640 }
2641 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2642
2643 tpd = __alloc_tpd(he_dev);
2644 if (tpd == NULL) {
2645 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2646 goto close_tx_incomplete;
2647 }
2648 tpd->status |= TPD_EOS | TPD_INT;
2649 tpd->skb = NULL;
2650 tpd->vcc = vcc;
2651 wmb();
2652
2653 set_current_state(TASK_UNINTERRUPTIBLE);
2654 add_wait_queue(&he_vcc->tx_waitq, &wait);
2655 __enqueue_tpd(he_dev, tpd, cid);
2656 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2657
2658 timeout = schedule_timeout(30*HZ);
2659
2660 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2661 set_current_state(TASK_RUNNING);
2662
2663 spin_lock_irqsave(&he_dev->global_lock, flags);
2664
2665 if (timeout == 0) {
2666 hprintk("close tx timeout cid 0x%x\n", cid);
2667 goto close_tx_incomplete;
2668 }
2669
2670 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2671 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2672 udelay(250);
2673 }
2674
2675 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2676 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2677 udelay(250);
2678 }
2679
2680close_tx_incomplete:
2681
2682 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2683 int reg = he_vcc->rc_index;
2684
2685 HPRINTK("cs_stper reg = %d\n", reg);
2686
2687 if (he_dev->cs_stper[reg].inuse == 0)
2688 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2689 else
2690 --he_dev->cs_stper[reg].inuse;
2691
2692 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2693 }
2694 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2695
2696 HPRINTK("close tx cid 0x%x complete\n", cid);
2697 }
2698
2699 kfree(he_vcc);
2700
2701 clear_bit(ATM_VF_ADDR, &vcc->flags);
2702}
2703
2704static int
2705he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2706{
2707 unsigned long flags;
2708 struct he_dev *he_dev = HE_DEV(vcc->dev);
2709 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2710 struct he_tpd *tpd;
2711#ifdef USE_SCATTERGATHER
2712 int i, slot = 0;
2713#endif
2714
2715#define HE_TPD_BUFSIZE 0xffff
2716
2717 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2718
2719 if ((skb->len > HE_TPD_BUFSIZE) ||
2720 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2721 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2722 if (vcc->pop)
2723 vcc->pop(vcc, skb);
2724 else
2725 dev_kfree_skb_any(skb);
2726 atomic_inc(&vcc->stats->tx_err);
2727 return -EINVAL;
2728 }
2729
2730#ifndef USE_SCATTERGATHER
2731 if (skb_shinfo(skb)->nr_frags) {
2732 hprintk("no scatter/gather support\n");
2733 if (vcc->pop)
2734 vcc->pop(vcc, skb);
2735 else
2736 dev_kfree_skb_any(skb);
2737 atomic_inc(&vcc->stats->tx_err);
2738 return -EINVAL;
2739 }
2740#endif
2741 spin_lock_irqsave(&he_dev->global_lock, flags);
2742
2743 tpd = __alloc_tpd(he_dev);
2744 if (tpd == NULL) {
2745 if (vcc->pop)
2746 vcc->pop(vcc, skb);
2747 else
2748 dev_kfree_skb_any(skb);
2749 atomic_inc(&vcc->stats->tx_err);
2750 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2751 return -ENOMEM;
2752 }
2753
2754 if (vcc->qos.aal == ATM_AAL5)
2755 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2756 else {
2757 char *pti_clp = (void *) (skb->data + 3);
2758 int clp, pti;
2759
2760 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2761 clp = (*pti_clp & ATM_HDR_CLP);
2762 tpd->status |= TPD_CELLTYPE(pti);
2763 if (clp)
2764 tpd->status |= TPD_CLP;
2765
2766 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2767 }
2768
2769#ifdef USE_SCATTERGATHER
2770 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2771 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2772 tpd->iovec[slot].len = skb->len - skb->data_len;
2773 ++slot;
2774
2775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2777
2778 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2779 tpd->vcc = vcc;
2780 tpd->skb = NULL; /* not the last fragment
2781 so dont ->push() yet */
2782 wmb();
2783
2784 __enqueue_tpd(he_dev, tpd, cid);
2785 tpd = __alloc_tpd(he_dev);
2786 if (tpd == NULL) {
2787 if (vcc->pop)
2788 vcc->pop(vcc, skb);
2789 else
2790 dev_kfree_skb_any(skb);
2791 atomic_inc(&vcc->stats->tx_err);
2792 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2793 return -ENOMEM;
2794 }
2795 tpd->status |= TPD_USERCELL;
2796 slot = 0;
2797 }
2798
2799 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2800 (void *) page_address(frag->page) + frag->page_offset,
2801 frag->size, PCI_DMA_TODEVICE);
2802 tpd->iovec[slot].len = frag->size;
2803 ++slot;
2804
2805 }
2806
2807 tpd->iovec[slot - 1].len |= TPD_LST;
2808#else
2809 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2810 tpd->length0 = skb->len | TPD_LST;
2811#endif
2812 tpd->status |= TPD_INT;
2813
2814 tpd->vcc = vcc;
2815 tpd->skb = skb;
2816 wmb();
2817 ATM_SKB(skb)->vcc = vcc;
2818
2819 __enqueue_tpd(he_dev, tpd, cid);
2820 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2821
2822 atomic_inc(&vcc->stats->tx);
2823
2824 return 0;
2825}
2826
2827static int
2828he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2829{
2830 unsigned long flags;
2831 struct he_dev *he_dev = HE_DEV(atm_dev);
2832 struct he_ioctl_reg reg;
2833 int err = 0;
2834
2835 switch (cmd) {
2836 case HE_GET_REG:
2837 if (!capable(CAP_NET_ADMIN))
2838 return -EPERM;
2839
2840 if (copy_from_user(&reg, arg,
2841 sizeof(struct he_ioctl_reg)))
2842 return -EFAULT;
2843
2844 spin_lock_irqsave(&he_dev->global_lock, flags);
2845 switch (reg.type) {
2846 case HE_REGTYPE_PCI:
2847 reg.val = he_readl(he_dev, reg.addr);
2848 break;
2849 case HE_REGTYPE_RCM:
2850 reg.val =
2851 he_readl_rcm(he_dev, reg.addr);
2852 break;
2853 case HE_REGTYPE_TCM:
2854 reg.val =
2855 he_readl_tcm(he_dev, reg.addr);
2856 break;
2857 case HE_REGTYPE_MBOX:
2858 reg.val =
2859 he_readl_mbox(he_dev, reg.addr);
2860 break;
2861 default:
2862 err = -EINVAL;
2863 break;
2864 }
2865 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2866 if (err == 0)
2867 if (copy_to_user(arg, &reg,
2868 sizeof(struct he_ioctl_reg)))
2869 return -EFAULT;
2870 break;
2871 default:
2872#ifdef CONFIG_ATM_HE_USE_SUNI
2873 if (atm_dev->phy && atm_dev->phy->ioctl)
2874 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2875#else /* CONFIG_ATM_HE_USE_SUNI */
2876 err = -EINVAL;
2877#endif /* CONFIG_ATM_HE_USE_SUNI */
2878 break;
2879 }
2880
2881 return err;
2882}
2883
2884static void
2885he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2886{
2887 unsigned long flags;
2888 struct he_dev *he_dev = HE_DEV(atm_dev);
2889
2890 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2891
2892 spin_lock_irqsave(&he_dev->global_lock, flags);
2893 he_writel(he_dev, val, FRAMER + (addr*4));
2894 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2895 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2896}
2897
2898
2899static unsigned char
2900he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2901{
2902 unsigned long flags;
2903 struct he_dev *he_dev = HE_DEV(atm_dev);
2904 unsigned reg;
2905
2906 spin_lock_irqsave(&he_dev->global_lock, flags);
2907 reg = he_readl(he_dev, FRAMER + (addr*4));
2908 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2909
2910 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2911 return reg;
2912}
2913
2914static int
2915he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2916{
2917 unsigned long flags;
2918 struct he_dev *he_dev = HE_DEV(dev);
2919 int left, i;
2920#ifdef notdef
2921 struct he_rbrq *rbrq_tail;
2922 struct he_tpdrq *tpdrq_head;
2923 int rbpl_head, rbpl_tail;
2924#endif
2925 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2926
2927
2928 left = *pos;
2929 if (!left--)
2930 return sprintf(page, "%s\n", version);
2931
2932 if (!left--)
2933 return sprintf(page, "%s%s\n\n",
2934 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2935
2936 if (!left--)
2937 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2938
2939 spin_lock_irqsave(&he_dev->global_lock, flags);
2940 mcc += he_readl(he_dev, MCC);
2941 oec += he_readl(he_dev, OEC);
2942 dcc += he_readl(he_dev, DCC);
2943 cec += he_readl(he_dev, CEC);
2944 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2945
2946 if (!left--)
2947 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2948 mcc, oec, dcc, cec);
2949
2950 if (!left--)
2951 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2952 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2953
2954 if (!left--)
2955 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2956 CONFIG_TPDRQ_SIZE);
2957
2958 if (!left--)
2959 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2960 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2961
2962 if (!left--)
2963 return sprintf(page, "tbrq_size = %d peak = %d\n",
2964 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2965
2966
2967#ifdef notdef
2968 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2969 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2970
2971 inuse = rbpl_head - rbpl_tail;
2972 if (inuse < 0)
2973 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2974 inuse /= sizeof(struct he_rbp);
2975
2976 if (!left--)
2977 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2978 CONFIG_RBPL_SIZE, inuse);
2979#endif
2980
2981 if (!left--)
2982 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2983
2984 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2985 if (!left--)
2986 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2987 he_dev->cs_stper[i].pcr,
2988 he_dev->cs_stper[i].inuse);
2989
2990 if (!left--)
2991 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2992 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2993
2994 return 0;
2995}
2996
2997/* eeprom routines -- see 4.7 */
2998
2999u8
3000read_prom_byte(struct he_dev *he_dev, int addr)
3001{
3002 u32 val = 0, tmp_read = 0;
3003 int i, j = 0;
3004 u8 byte_read = 0;
3005
3006 val = readl(he_dev->membase + HOST_CNTL);
3007 val &= 0xFFFFE0FF;
3008
3009 /* Turn on write enable */
3010 val |= 0x800;
3011 he_writel(he_dev, val, HOST_CNTL);
3012
3013 /* Send READ instruction */
3014 for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3015 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3016 udelay(EEPROM_DELAY);
3017 }
3018
3019 /* Next, we need to send the byte address to read from */
3020 for (i = 7; i >= 0; i--) {
3021 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3022 udelay(EEPROM_DELAY);
3023 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3024 udelay(EEPROM_DELAY);
3025 }
3026
3027 j = 0;
3028
3029 val &= 0xFFFFF7FF; /* Turn off write enable */
3030 he_writel(he_dev, val, HOST_CNTL);
3031
3032 /* Now, we can read data from the EEPROM by clocking it in */
3033 for (i = 7; i >= 0; i--) {
3034 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3035 udelay(EEPROM_DELAY);
3036 tmp_read = he_readl(he_dev, HOST_CNTL);
3037 byte_read |= (unsigned char)
3038 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3039 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3040 udelay(EEPROM_DELAY);
3041 }
3042
3043 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3044 udelay(EEPROM_DELAY);
3045
3046 return byte_read;
3047}
3048
3049MODULE_LICENSE("GPL");
3050MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3051MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3052module_param(disable64, bool, 0);
3053MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3054module_param(nvpibits, short, 0);
3055MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3056module_param(nvcibits, short, 0);
3057MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3058module_param(rx_skb_reserve, short, 0);
3059MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3060module_param(irq_coalesce, bool, 0);
3061MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3062module_param(sdh, bool, 0);
3063MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3064
3065static struct pci_device_id he_pci_tbl[] = {
3066 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3067 0, 0, 0 },
3068 { 0, }
3069};
3070
3071MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3072
3073static struct pci_driver he_driver = {
3074 .name = "he",
3075 .probe = he_init_one,
3076 .remove = __devexit_p(he_remove_one),
3077 .id_table = he_pci_tbl,
3078};
3079
3080static int __init he_init(void)
3081{
3082 return pci_register_driver(&he_driver);
3083}
3084
3085static void __exit he_cleanup(void)
3086{
3087 pci_unregister_driver(&he_driver);
3088}
3089
3090module_init(he_init);
3091module_exit(he_cleanup);