aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/pcilynx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ieee1394/pcilynx.c')
-rw-r--r--drivers/ieee1394/pcilynx.c1982
1 files changed, 1982 insertions, 0 deletions
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
new file mode 100644
index 000000000000..a261d2b0e5ac
--- /dev/null
+++ b/drivers/ieee1394/pcilynx.c
@@ -0,0 +1,1982 @@
1/*
2 * pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
5 * Manfred Weihs <weihs@ict.tuwien.ac.at>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Contributions:
24 *
25 * Manfred Weihs <weihs@ict.tuwien.ac.at>
26 * reading bus info block (containing GUID) from serial
27 * eeprom via i2c and storing it in config ROM
28 * Reworked code for initiating bus resets
29 * (long, short, with or without hold-off)
30 * Enhancements in async and iso send code
31 */
32
33#include <linux/config.h>
34#include <linux/kernel.h>
35#include <linux/slab.h>
36#include <linux/interrupt.h>
37#include <linux/wait.h>
38#include <linux/errno.h>
39#include <linux/module.h>
40#include <linux/moduleparam.h>
41#include <linux/init.h>
42#include <linux/pci.h>
43#include <linux/fs.h>
44#include <linux/poll.h>
45#include <linux/kdev_t.h>
46#include <asm/byteorder.h>
47#include <asm/atomic.h>
48#include <asm/io.h>
49#include <asm/uaccess.h>
50#include <asm/irq.h>
51
52#include "csr1212.h"
53#include "ieee1394.h"
54#include "ieee1394_types.h"
55#include "hosts.h"
56#include "ieee1394_core.h"
57#include "highlevel.h"
58#include "pcilynx.h"
59
60#include <linux/i2c.h>
61#include <linux/i2c-algo-bit.h>
62
63/* print general (card independent) information */
64#define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
65/* print card specific information */
66#define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
67
68#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
69#define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
70#define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
71#else
72#define PRINT_GD(level, fmt, args...) do {} while (0)
73#define PRINTD(level, card, fmt, args...) do {} while (0)
74#endif
75
76
77/* Module Parameters */
78static int skip_eeprom = 0;
79module_param(skip_eeprom, int, 0444);
80MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
81
82
83static struct hpsb_host_driver lynx_driver;
84static unsigned int card_id;
85
86
87
88/*
89 * I2C stuff
90 */
91
92/* the i2c stuff was inspired by i2c-philips-par.c */
93
94static void bit_setscl(void *data, int state)
95{
96 if (state) {
97 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
98 } else {
99 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
100 }
101 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
102}
103
104static void bit_setsda(void *data, int state)
105{
106 if (state) {
107 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
108 } else {
109 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
110 }
111 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
112}
113
114static int bit_getscl(void *data)
115{
116 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
117}
118
119static int bit_getsda(void *data)
120{
121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
122}
123
124static int bit_reg(struct i2c_client *client)
125{
126 return 0;
127}
128
129static int bit_unreg(struct i2c_client *client)
130{
131 return 0;
132}
133
134static struct i2c_algo_bit_data bit_data = {
135 .setsda = bit_setsda,
136 .setscl = bit_setscl,
137 .getsda = bit_getsda,
138 .getscl = bit_getscl,
139 .udelay = 5,
140 .mdelay = 5,
141 .timeout = 100,
142};
143
144static struct i2c_adapter bit_ops = {
145 .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
146 .client_register = bit_reg,
147 .client_unregister = bit_unreg,
148 .name = "PCILynx I2C",
149};
150
151
152
153/*
154 * PCL handling functions.
155 */
156
157static pcl_t alloc_pcl(struct ti_lynx *lynx)
158{
159 u8 m;
160 int i, j;
161
162 spin_lock(&lynx->lock);
163 /* FIXME - use ffz() to make this readable */
164 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
165 m = lynx->pcl_bmap[i];
166 for (j = 0; j < 8; j++) {
167 if (m & 1<<j) {
168 continue;
169 }
170 m |= 1<<j;
171 lynx->pcl_bmap[i] = m;
172 spin_unlock(&lynx->lock);
173 return 8 * i + j;
174 }
175 }
176 spin_unlock(&lynx->lock);
177
178 return -1;
179}
180
181
182#if 0
183static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
184{
185 int off, bit;
186
187 off = pclid / 8;
188 bit = pclid % 8;
189
190 if (pclid < 0) {
191 return;
192 }
193
194 spin_lock(&lynx->lock);
195 if (lynx->pcl_bmap[off] & 1<<bit) {
196 lynx->pcl_bmap[off] &= ~(1<<bit);
197 } else {
198 PRINT(KERN_ERR, lynx->id,
199 "attempted to free unallocated PCL %d", pclid);
200 }
201 spin_unlock(&lynx->lock);
202}
203
204/* functions useful for debugging */
205static void pretty_print_pcl(const struct ti_pcl *pcl)
206{
207 int i;
208
209 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
210 pcl->next, pcl->user_data, pcl->pcl_status,
211 pcl->remaining_transfer_count, pcl->next_data_buffer);
212
213 printk("PCL");
214 for (i=0; i<13; i++) {
215 printk(" c%x:%08x d%x:%08x",
216 i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
217 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
218 }
219 printk("\n");
220}
221
222static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
223{
224 struct ti_pcl pcl;
225
226 get_pcl(lynx, pclid, &pcl);
227 pretty_print_pcl(&pcl);
228}
229#endif
230
231
232
233/***********************************
234 * IEEE-1394 functionality section *
235 ***********************************/
236
237
238static int get_phy_reg(struct ti_lynx *lynx, int addr)
239{
240 int retval;
241 int i = 0;
242
243 unsigned long flags;
244
245 if (addr > 15) {
246 PRINT(KERN_ERR, lynx->id,
247 "%s: PHY register address %d out of range",
248 __FUNCTION__, addr);
249 return -1;
250 }
251
252 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
253
254 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
255 do {
256 retval = reg_read(lynx, LINK_PHY);
257
258 if (i > 10000) {
259 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
260 __FUNCTION__);
261 retval = -1;
262 break;
263 }
264 i++;
265 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
266
267 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
268 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
269
270 if (retval != -1) {
271 return retval & 0xff;
272 } else {
273 return -1;
274 }
275}
276
277static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
278{
279 unsigned long flags;
280
281 if (addr > 15) {
282 PRINT(KERN_ERR, lynx->id,
283 "%s: PHY register address %d out of range", __FUNCTION__, addr);
284 return -1;
285 }
286
287 if (val > 0xff) {
288 PRINT(KERN_ERR, lynx->id,
289 "%s: PHY register value %d out of range", __FUNCTION__, val);
290 return -1;
291 }
292
293 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
294
295 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
296 | LINK_PHY_WDATA(val));
297
298 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
299
300 return 0;
301}
302
303static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
304{
305 int reg;
306
307 if (page > 7) {
308 PRINT(KERN_ERR, lynx->id,
309 "%s: PHY page %d out of range", __FUNCTION__, page);
310 return -1;
311 }
312
313 reg = get_phy_reg(lynx, 7);
314 if (reg != -1) {
315 reg &= 0x1f;
316 reg |= (page << 5);
317 set_phy_reg(lynx, 7, reg);
318 return 0;
319 } else {
320 return -1;
321 }
322}
323
324#if 0 /* not needed at this time */
325static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
326{
327 int reg;
328
329 if (port > 15) {
330 PRINT(KERN_ERR, lynx->id,
331 "%s: PHY port %d out of range", __FUNCTION__, port);
332 return -1;
333 }
334
335 reg = get_phy_reg(lynx, 7);
336 if (reg != -1) {
337 reg &= 0xf0;
338 reg |= port;
339 set_phy_reg(lynx, 7, reg);
340 return 0;
341 } else {
342 return -1;
343 }
344}
345#endif
346
347static u32 get_phy_vendorid(struct ti_lynx *lynx)
348{
349 u32 pvid = 0;
350 sel_phy_reg_page(lynx, 1);
351 pvid |= (get_phy_reg(lynx, 10) << 16);
352 pvid |= (get_phy_reg(lynx, 11) << 8);
353 pvid |= get_phy_reg(lynx, 12);
354 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
355 return pvid;
356}
357
358static u32 get_phy_productid(struct ti_lynx *lynx)
359{
360 u32 id = 0;
361 sel_phy_reg_page(lynx, 1);
362 id |= (get_phy_reg(lynx, 13) << 16);
363 id |= (get_phy_reg(lynx, 14) << 8);
364 id |= get_phy_reg(lynx, 15);
365 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
366 return id;
367}
368
369static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
370 struct hpsb_host *host)
371{
372 quadlet_t lsid;
373 char phyreg[7];
374 int i;
375
376 phyreg[0] = lynx->phy_reg0;
377 for (i = 1; i < 7; i++) {
378 phyreg[i] = get_phy_reg(lynx, i);
379 }
380
381 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
382 more than 3 ports on the PHY anyway. */
383
384 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
385 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
386 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
387 if (!hpsb_disable_irm)
388 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
389 /* lsid |= 1 << 11; *//* set contender (hack) */
390 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
391
392 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
393 if (phyreg[3 + i] & 0x4) {
394 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
395 << (6 - i*2);
396 } else {
397 lsid |= 1 << (6 - i*2);
398 }
399 }
400
401 cpu_to_be32s(&lsid);
402 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
403 return lsid;
404}
405
406static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
407{
408 quadlet_t *q = lynx->rcv_page;
409 int phyid, isroot, size;
410 quadlet_t lsid = 0;
411 int i;
412
413 if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
414
415 size = lynx->selfid_size;
416 phyid = lynx->phy_reg0;
417
418 i = (size > 16 ? 16 : size) / 4 - 1;
419 while (i >= 0) {
420 cpu_to_be32s(&q[i]);
421 i--;
422 }
423
424 if (!lynx->phyic.reg_1394a) {
425 lsid = generate_own_selfid(lynx, host);
426 }
427
428 isroot = (phyid & 2) != 0;
429 phyid >>= 2;
430 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
431 phyid, (isroot ? "root" : "not root"));
432 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
433
434 if (!lynx->phyic.reg_1394a && !size) {
435 hpsb_selfid_received(host, lsid);
436 }
437
438 while (size > 0) {
439 struct selfid *sid = (struct selfid *)q;
440
441 if (!lynx->phyic.reg_1394a && !sid->extended
442 && (sid->phy_id == (phyid + 1))) {
443 hpsb_selfid_received(host, lsid);
444 }
445
446 if (q[0] == ~q[1]) {
447 PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
448 q[0]);
449 hpsb_selfid_received(host, q[0]);
450 } else {
451 PRINT(KERN_INFO, lynx->id,
452 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
453 }
454 q += 2;
455 size -= 8;
456 }
457
458 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
459 hpsb_selfid_received(host, lsid);
460 }
461
462 hpsb_selfid_complete(host, phyid, isroot);
463
464 if (host->in_bus_reset) return; /* in bus reset again */
465
466 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
467 reg_set_bits(lynx, LINK_CONTROL,
468 LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
469 | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
470}
471
472
473
474/* This must be called with the respective queue_lock held. */
475static void send_next(struct ti_lynx *lynx, int what)
476{
477 struct ti_pcl pcl;
478 struct lynx_send_data *d;
479 struct hpsb_packet *packet;
480
481 d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
482 if (!list_empty(&d->pcl_queue)) {
483 PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
484 BUG();
485 }
486
487 packet = driver_packet(d->queue.next);
488 list_move_tail(&packet->driver_list, &d->pcl_queue);
489
490 d->header_dma = pci_map_single(lynx->dev, packet->header,
491 packet->header_size, PCI_DMA_TODEVICE);
492 if (packet->data_size) {
493 d->data_dma = pci_map_single(lynx->dev, packet->data,
494 packet->data_size,
495 PCI_DMA_TODEVICE);
496 } else {
497 d->data_dma = 0;
498 }
499
500 pcl.next = PCL_NEXT_INVALID;
501 pcl.async_error_next = PCL_NEXT_INVALID;
502 pcl.pcl_status = 0;
503 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
504#ifndef __BIG_ENDIAN
505 pcl.buffer[0].control |= PCL_BIGENDIAN;
506#endif
507 pcl.buffer[0].pointer = d->header_dma;
508 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
509 pcl.buffer[1].pointer = d->data_dma;
510
511 switch (packet->type) {
512 case hpsb_async:
513 pcl.buffer[0].control |= PCL_CMD_XMT;
514 break;
515 case hpsb_iso:
516 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
517 break;
518 case hpsb_raw:
519 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
520 break;
521 }
522
523 put_pcl(lynx, d->pcl, &pcl);
524 run_pcl(lynx, d->pcl_start, d->channel);
525}
526
527
528/* called from subsystem core */
529static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
530{
531 struct ti_lynx *lynx = host->hostdata;
532 struct lynx_send_data *d;
533 unsigned long flags;
534
535 if (packet->data_size >= 4096) {
536 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
537 packet->data_size);
538 return -EOVERFLOW;
539 }
540
541 switch (packet->type) {
542 case hpsb_async:
543 case hpsb_raw:
544 d = &lynx->async;
545 break;
546 case hpsb_iso:
547 d = &lynx->iso_send;
548 break;
549 default:
550 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
551 packet->type);
552 return -EINVAL;
553 }
554
555 if (packet->tcode == TCODE_WRITEQ
556 || packet->tcode == TCODE_READQ_RESPONSE) {
557 cpu_to_be32s(&packet->header[3]);
558 }
559
560 spin_lock_irqsave(&d->queue_lock, flags);
561
562 list_add_tail(&packet->driver_list, &d->queue);
563 if (list_empty(&d->pcl_queue))
564 send_next(lynx, packet->type);
565
566 spin_unlock_irqrestore(&d->queue_lock, flags);
567
568 return 0;
569}
570
571
572/* called from subsystem core */
573static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
574{
575 struct ti_lynx *lynx = host->hostdata;
576 int retval = 0;
577 struct hpsb_packet *packet;
578 LIST_HEAD(packet_list);
579 unsigned long flags;
580 int phy_reg;
581
582 switch (cmd) {
583 case RESET_BUS:
584 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
585 retval = 0;
586 break;
587 }
588
589 switch (arg) {
590 case SHORT_RESET:
591 if (lynx->phyic.reg_1394a) {
592 phy_reg = get_phy_reg(lynx, 5);
593 if (phy_reg == -1) {
594 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
595 retval = -1;
596 break;
597 }
598 phy_reg |= 0x40;
599
600 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
601
602 lynx->selfid_size = -1;
603 lynx->phy_reg0 = -1;
604 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
605 break;
606 } else {
607 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
608 /* fall through to long bus reset */
609 }
610 case LONG_RESET:
611 phy_reg = get_phy_reg(lynx, 1);
612 if (phy_reg == -1) {
613 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
614 retval = -1;
615 break;
616 }
617 phy_reg |= 0x40;
618
619 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
620
621 lynx->selfid_size = -1;
622 lynx->phy_reg0 = -1;
623 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
624 break;
625 case SHORT_RESET_NO_FORCE_ROOT:
626 if (lynx->phyic.reg_1394a) {
627 phy_reg = get_phy_reg(lynx, 1);
628 if (phy_reg == -1) {
629 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
630 retval = -1;
631 break;
632 }
633 if (phy_reg & 0x80) {
634 phy_reg &= ~0x80;
635 set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
636 }
637
638 phy_reg = get_phy_reg(lynx, 5);
639 if (phy_reg == -1) {
640 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
641 retval = -1;
642 break;
643 }
644 phy_reg |= 0x40;
645
646 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
647
648 lynx->selfid_size = -1;
649 lynx->phy_reg0 = -1;
650 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
651 break;
652 } else {
653 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
654 /* fall through to long bus reset */
655 }
656 case LONG_RESET_NO_FORCE_ROOT:
657 phy_reg = get_phy_reg(lynx, 1);
658 if (phy_reg == -1) {
659 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
660 retval = -1;
661 break;
662 }
663 phy_reg &= ~0x80;
664 phy_reg |= 0x40;
665
666 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
667
668 lynx->selfid_size = -1;
669 lynx->phy_reg0 = -1;
670 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
671 break;
672 case SHORT_RESET_FORCE_ROOT:
673 if (lynx->phyic.reg_1394a) {
674 phy_reg = get_phy_reg(lynx, 1);
675 if (phy_reg == -1) {
676 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
677 retval = -1;
678 break;
679 }
680 if (!(phy_reg & 0x80)) {
681 phy_reg |= 0x80;
682 set_phy_reg(lynx, 1, phy_reg); /* set RHB */
683 }
684
685 phy_reg = get_phy_reg(lynx, 5);
686 if (phy_reg == -1) {
687 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
688 retval = -1;
689 break;
690 }
691 phy_reg |= 0x40;
692
693 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
694
695 lynx->selfid_size = -1;
696 lynx->phy_reg0 = -1;
697 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
698 break;
699 } else {
700 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
701 /* fall through to long bus reset */
702 }
703 case LONG_RESET_FORCE_ROOT:
704 phy_reg = get_phy_reg(lynx, 1);
705 if (phy_reg == -1) {
706 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
707 retval = -1;
708 break;
709 }
710 phy_reg |= 0xc0;
711
712 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
713
714 lynx->selfid_size = -1;
715 lynx->phy_reg0 = -1;
716 set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
717 break;
718 default:
719 PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
720 retval = -1;
721 }
722
723 break;
724
725 case GET_CYCLE_COUNTER:
726 retval = reg_read(lynx, CYCLE_TIMER);
727 break;
728
729 case SET_CYCLE_COUNTER:
730 reg_write(lynx, CYCLE_TIMER, arg);
731 break;
732
733 case SET_BUS_ID:
734 reg_write(lynx, LINK_ID,
735 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
736 break;
737
738 case ACT_CYCLE_MASTER:
739 if (arg) {
740 reg_set_bits(lynx, LINK_CONTROL,
741 LINK_CONTROL_CYCMASTER);
742 } else {
743 reg_clear_bits(lynx, LINK_CONTROL,
744 LINK_CONTROL_CYCMASTER);
745 }
746 break;
747
748 case CANCEL_REQUESTS:
749 spin_lock_irqsave(&lynx->async.queue_lock, flags);
750
751 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
752 list_splice(&lynx->async.queue, &packet_list);
753 INIT_LIST_HEAD(&lynx->async.queue);
754
755 if (list_empty(&lynx->async.pcl_queue)) {
756 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
757 PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
758 } else {
759 struct ti_pcl pcl;
760 u32 ack;
761 struct hpsb_packet *packet;
762
763 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
764
765 get_pcl(lynx, lynx->async.pcl, &pcl);
766
767 packet = driver_packet(lynx->async.pcl_queue.next);
768 list_del_init(&packet->driver_list);
769
770 pci_unmap_single(lynx->dev, lynx->async.header_dma,
771 packet->header_size, PCI_DMA_TODEVICE);
772 if (packet->data_size) {
773 pci_unmap_single(lynx->dev, lynx->async.data_dma,
774 packet->data_size, PCI_DMA_TODEVICE);
775 }
776
777 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
778
779 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
780 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
781 ack = (pcl.pcl_status >> 15) & 0xf;
782 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
783 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
784 } else {
785 ack = (pcl.pcl_status >> 15) & 0xf;
786 }
787 } else {
788 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
789 ack = ACKX_ABORTED;
790 }
791 hpsb_packet_sent(host, packet, ack);
792 }
793
794 while (!list_empty(&packet_list)) {
795 packet = driver_packet(packet_list.next);
796 list_del_init(&packet->driver_list);
797 hpsb_packet_sent(host, packet, ACKX_ABORTED);
798 }
799
800 break;
801
802 case ISO_LISTEN_CHANNEL:
803 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
804
805 if (lynx->iso_rcv.chan_count++ == 0) {
806 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
807 DMA_WORD1_CMP_ENABLE_MASTER);
808 }
809
810 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
811 break;
812
813 case ISO_UNLISTEN_CHANNEL:
814 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
815
816 if (--lynx->iso_rcv.chan_count == 0) {
817 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
818 0);
819 }
820
821 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
822 break;
823
824 default:
825 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
826 retval = -1;
827 }
828
829 return retval;
830}
831
832
833/***************************************
834 * IEEE-1394 functionality section END *
835 ***************************************/
836
837#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
838/* VFS functions for local bus / aux device access. Access to those
839 * is implemented as a character device instead of block devices
840 * because buffers are not wanted for this. Therefore llseek (from
841 * VFS) can be used for these char devices with obvious effects.
842 */
843static int mem_open(struct inode*, struct file*);
844static int mem_release(struct inode*, struct file*);
845static unsigned int aux_poll(struct file*, struct poll_table_struct*);
846static loff_t mem_llseek(struct file*, loff_t, int);
847static ssize_t mem_read (struct file*, char*, size_t, loff_t*);
848static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
849
850
851static struct file_operations aux_ops = {
852 .owner = THIS_MODULE,
853 .read = mem_read,
854 .write = mem_write,
855 .poll = aux_poll,
856 .llseek = mem_llseek,
857 .open = mem_open,
858 .release = mem_release,
859};
860
861
862static void aux_setup_pcls(struct ti_lynx *lynx)
863{
864 struct ti_pcl pcl;
865
866 pcl.next = PCL_NEXT_INVALID;
867 pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl);
868 put_pcl(lynx, lynx->dmem_pcl, &pcl);
869}
870
871static int mem_open(struct inode *inode, struct file *file)
872{
873 int cid = iminor(inode);
874 enum { t_rom, t_aux, t_ram } type;
875 struct memdata *md;
876
877 if (cid < PCILYNX_MINOR_AUX_START) {
878 /* just for completeness */
879 return -ENXIO;
880 } else if (cid < PCILYNX_MINOR_ROM_START) {
881 cid -= PCILYNX_MINOR_AUX_START;
882 if (cid >= num_of_cards || !cards[cid].aux_port)
883 return -ENXIO;
884 type = t_aux;
885 } else if (cid < PCILYNX_MINOR_RAM_START) {
886 cid -= PCILYNX_MINOR_ROM_START;
887 if (cid >= num_of_cards || !cards[cid].local_rom)
888 return -ENXIO;
889 type = t_rom;
890 } else {
891 /* WARNING: Know what you are doing when opening RAM.
892 * It is currently used inside the driver! */
893 cid -= PCILYNX_MINOR_RAM_START;
894 if (cid >= num_of_cards || !cards[cid].local_ram)
895 return -ENXIO;
896 type = t_ram;
897 }
898
899 md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
900 if (md == NULL)
901 return -ENOMEM;
902
903 md->lynx = &cards[cid];
904 md->cid = cid;
905
906 switch (type) {
907 case t_rom:
908 md->type = rom;
909 break;
910 case t_ram:
911 md->type = ram;
912 break;
913 case t_aux:
914 atomic_set(&md->aux_intr_last_seen,
915 atomic_read(&cards[cid].aux_intr_seen));
916 md->type = aux;
917 break;
918 }
919
920 file->private_data = md;
921
922 return 0;
923}
924
925static int mem_release(struct inode *inode, struct file *file)
926{
927 kfree(file->private_data);
928 return 0;
929}
930
931static unsigned int aux_poll(struct file *file, poll_table *pt)
932{
933 struct memdata *md = (struct memdata *)file->private_data;
934 int cid = md->cid;
935 unsigned int mask;
936
937 /* reading and writing is always allowed */
938 mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
939
940 if (md->type == aux) {
941 poll_wait(file, &cards[cid].aux_intr_wait, pt);
942
943 if (atomic_read(&md->aux_intr_last_seen)
944 != atomic_read(&cards[cid].aux_intr_seen)) {
945 mask |= POLLPRI;
946 atomic_inc(&md->aux_intr_last_seen);
947 }
948 }
949
950 return mask;
951}
952
953loff_t mem_llseek(struct file *file, loff_t offs, int orig)
954{
955 loff_t newoffs;
956
957 switch (orig) {
958 case 0:
959 newoffs = offs;
960 break;
961 case 1:
962 newoffs = offs + file->f_pos;
963 break;
964 case 2:
965 newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
966 break;
967 default:
968 return -EINVAL;
969 }
970
971 if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) return -EINVAL;
972
973 file->f_pos = newoffs;
974 return newoffs;
975}
976
977/*
978 * do not DMA if count is too small because this will have a serious impact
979 * on performance - the value 2400 was found by experiment and may not work
980 * everywhere as good as here - use mem_mindma option for modules to change
981 */
982static short mem_mindma = 2400;
983module_param(mem_mindma, short, 0444);
984MODULE_PARM_DESC(mem_mindma, "Minimum amount of data required to use DMA");
985
986static ssize_t mem_dmaread(struct memdata *md, u32 physbuf, ssize_t count,
987 int offset)
988{
989 pcltmp_t pcltmp;
990 struct ti_pcl *pcl;
991 size_t retval;
992 int i;
993 DECLARE_WAITQUEUE(wait, current);
994
995 count &= ~3;
996 count = min(count, 53196);
997 retval = count;
998
999 if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
1000 & DMA_CHAN_CTRL_BUSY) {
1001 PRINT(KERN_WARNING, md->lynx->id, "DMA ALREADY ACTIVE!");
1002 }
1003
1004 reg_write(md->lynx, LBUS_ADDR, md->type | offset);
1005
1006 pcl = edit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
1007 pcl->buffer[0].control = PCL_CMD_LBUS_TO_PCI | min(count, 4092);
1008 pcl->buffer[0].pointer = physbuf;
1009 count -= 4092;
1010
1011 i = 0;
1012 while (count > 0) {
1013 i++;
1014 pcl->buffer[i].control = min(count, 4092);
1015 pcl->buffer[i].pointer = physbuf + i * 4092;
1016 count -= 4092;
1017 }
1018 pcl->buffer[i].control |= PCL_LAST_BUFF;
1019 commit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
1020
1021 set_current_state(TASK_INTERRUPTIBLE);
1022 add_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
1023 run_sub_pcl(md->lynx, md->lynx->dmem_pcl, 2, CHANNEL_LOCALBUS);
1024
1025 schedule();
1026 while (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
1027 & DMA_CHAN_CTRL_BUSY) {
1028 if (signal_pending(current)) {
1029 retval = -EINTR;
1030 break;
1031 }
1032 schedule();
1033 }
1034
1035 reg_write(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS), 0);
1036 remove_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
1037
1038 if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
1039 & DMA_CHAN_CTRL_BUSY) {
1040 PRINT(KERN_ERR, md->lynx->id, "DMA STILL ACTIVE!");
1041 }
1042
1043 return retval;
1044}
1045
1046static ssize_t mem_read(struct file *file, char *buffer, size_t count,
1047 loff_t *offset)
1048{
1049 struct memdata *md = (struct memdata *)file->private_data;
1050 ssize_t bcount;
1051 size_t alignfix;
1052 loff_t off = *offset; /* avoid useless 64bit-arithmetic */
1053 ssize_t retval;
1054 void *membase;
1055
1056 if ((off + count) > PCILYNX_MAX_MEMORY+1) {
1057 count = PCILYNX_MAX_MEMORY+1 - off;
1058 }
1059 if (count == 0 || off > PCILYNX_MAX_MEMORY) {
1060 return -ENOSPC;
1061 }
1062
1063 switch (md->type) {
1064 case rom:
1065 membase = md->lynx->local_rom;
1066 break;
1067 case ram:
1068 membase = md->lynx->local_ram;
1069 break;
1070 case aux:
1071 membase = md->lynx->aux_port;
1072 break;
1073 default:
1074 panic("pcilynx%d: unsupported md->type %d in %s",
1075 md->lynx->id, md->type, __FUNCTION__);
1076 }
1077
1078 down(&md->lynx->mem_dma_mutex);
1079
1080 if (count < mem_mindma) {
1081 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off, count);
1082 goto out;
1083 }
1084
1085 bcount = count;
1086 alignfix = 4 - (off % 4);
1087 if (alignfix != 4) {
1088 if (bcount < alignfix) {
1089 alignfix = bcount;
1090 }
1091 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off,
1092 alignfix);
1093 if (bcount == alignfix) {
1094 goto out;
1095 }
1096 bcount -= alignfix;
1097 off += alignfix;
1098 }
1099
1100 while (bcount >= 4) {
1101 retval = mem_dmaread(md, md->lynx->mem_dma_buffer_dma
1102 + count - bcount, bcount, off);
1103 if (retval < 0) return retval;
1104
1105 bcount -= retval;
1106 off += retval;
1107 }
1108
1109 if (bcount) {
1110 memcpy_fromio(md->lynx->mem_dma_buffer + count - bcount,
1111 membase+off, bcount);
1112 }
1113
1114 out:
1115 retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
1116 up(&md->lynx->mem_dma_mutex);
1117
1118 if (retval) return -EFAULT;
1119 *offset += count;
1120 return count;
1121}
1122
1123
1124static ssize_t mem_write(struct file *file, const char *buffer, size_t count,
1125 loff_t *offset)
1126{
1127 struct memdata *md = (struct memdata *)file->private_data;
1128
1129 if (((*offset) + count) > PCILYNX_MAX_MEMORY+1) {
1130 count = PCILYNX_MAX_MEMORY+1 - *offset;
1131 }
1132 if (count == 0 || *offset > PCILYNX_MAX_MEMORY) {
1133 return -ENOSPC;
1134 }
1135
1136 /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
1137 switch (md->type) {
1138 case aux:
1139 if (copy_from_user(md->lynx->aux_port+(*offset), buffer, count))
1140 return -EFAULT;
1141 break;
1142 case ram:
1143 if (copy_from_user(md->lynx->local_ram+(*offset), buffer, count))
1144 return -EFAULT;
1145 break;
1146 case rom:
1147 /* the ROM may be writeable */
1148 if (copy_from_user(md->lynx->local_rom+(*offset), buffer, count))
1149 return -EFAULT;
1150 break;
1151 }
1152
1153 file->f_pos += count;
1154 return count;
1155}
1156#endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
1157
1158
1159/********************************************************
1160 * Global stuff (interrupt handler, init/shutdown code) *
1161 ********************************************************/
1162
1163
1164static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
1165 struct pt_regs *regs_are_unused)
1166{
1167 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
1168 struct hpsb_host *host = lynx->host;
1169 u32 intmask;
1170 u32 linkint;
1171
1172 linkint = reg_read(lynx, LINK_INT_STATUS);
1173 intmask = reg_read(lynx, PCI_INT_STATUS);
1174
1175 if (!(intmask & PCI_INT_INT_PEND))
1176 return IRQ_NONE;
1177
1178 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
1179 linkint);
1180
1181 reg_write(lynx, LINK_INT_STATUS, linkint);
1182 reg_write(lynx, PCI_INT_STATUS, intmask);
1183
1184#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1185 if (intmask & PCI_INT_AUX_INT) {
1186 atomic_inc(&lynx->aux_intr_seen);
1187 wake_up_interruptible(&lynx->aux_intr_wait);
1188 }
1189
1190 if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) {
1191 wake_up_interruptible(&lynx->mem_dma_intr_wait);
1192 }
1193#endif
1194
1195
1196 if (intmask & PCI_INT_1394) {
1197 if (linkint & LINK_INT_PHY_TIMEOUT) {
1198 PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
1199 }
1200 if (linkint & LINK_INT_PHY_BUSRESET) {
1201 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
1202 lynx->selfid_size = -1;
1203 lynx->phy_reg0 = -1;
1204 if (!host->in_bus_reset)
1205 hpsb_bus_reset(host);
1206 }
1207 if (linkint & LINK_INT_PHY_REG_RCVD) {
1208 u32 reg;
1209
1210 spin_lock(&lynx->phy_reg_lock);
1211 reg = reg_read(lynx, LINK_PHY);
1212 spin_unlock(&lynx->phy_reg_lock);
1213
1214 if (!host->in_bus_reset) {
1215 PRINT(KERN_INFO, lynx->id,
1216 "phy reg received without reset");
1217 } else if (reg & 0xf00) {
1218 PRINT(KERN_INFO, lynx->id,
1219 "unsolicited phy reg %d received",
1220 (reg >> 8) & 0xf);
1221 } else {
1222 lynx->phy_reg0 = reg & 0xff;
1223 handle_selfid(lynx, host);
1224 }
1225 }
1226 if (linkint & LINK_INT_ISO_STUCK) {
1227 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
1228 }
1229 if (linkint & LINK_INT_ASYNC_STUCK) {
1230 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
1231 }
1232 if (linkint & LINK_INT_SENT_REJECT) {
1233 PRINT(KERN_INFO, lynx->id, "sent reject");
1234 }
1235 if (linkint & LINK_INT_TX_INVALID_TC) {
1236 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
1237 }
1238 if (linkint & LINK_INT_GRF_OVERFLOW) {
1239 /* flush FIFO if overflow happens during reset */
1240 if (host->in_bus_reset)
1241 reg_write(lynx, FIFO_CONTROL,
1242 FIFO_CONTROL_GRF_FLUSH);
1243 PRINT(KERN_INFO, lynx->id, "GRF overflow");
1244 }
1245 if (linkint & LINK_INT_ITF_UNDERFLOW) {
1246 PRINT(KERN_INFO, lynx->id, "ITF underflow");
1247 }
1248 if (linkint & LINK_INT_ATF_UNDERFLOW) {
1249 PRINT(KERN_INFO, lynx->id, "ATF underflow");
1250 }
1251 }
1252
1253 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
1254 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
1255
1256 spin_lock(&lynx->iso_rcv.lock);
1257
1258 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
1259 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
1260
1261 lynx->iso_rcv.used++;
1262 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
1263
1264 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
1265 || !lynx->iso_rcv.chan_count) {
1266 PRINTD(KERN_DEBUG, lynx->id, "stopped");
1267 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1268 }
1269
1270 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
1271 CHANNEL_ISO_RCV);
1272
1273 spin_unlock(&lynx->iso_rcv.lock);
1274
1275 tasklet_schedule(&lynx->iso_rcv.tq);
1276 }
1277
1278 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
1279 PRINTD(KERN_DEBUG, lynx->id, "async sent");
1280 spin_lock(&lynx->async.queue_lock);
1281
1282 if (list_empty(&lynx->async.pcl_queue)) {
1283 spin_unlock(&lynx->async.queue_lock);
1284 PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
1285 } else {
1286 struct ti_pcl pcl;
1287 u32 ack;
1288 struct hpsb_packet *packet;
1289
1290 get_pcl(lynx, lynx->async.pcl, &pcl);
1291
1292 packet = driver_packet(lynx->async.pcl_queue.next);
1293 list_del_init(&packet->driver_list);
1294
1295 pci_unmap_single(lynx->dev, lynx->async.header_dma,
1296 packet->header_size, PCI_DMA_TODEVICE);
1297 if (packet->data_size) {
1298 pci_unmap_single(lynx->dev, lynx->async.data_dma,
1299 packet->data_size, PCI_DMA_TODEVICE);
1300 }
1301
1302 if (!list_empty(&lynx->async.queue)) {
1303 send_next(lynx, hpsb_async);
1304 }
1305
1306 spin_unlock(&lynx->async.queue_lock);
1307
1308 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1309 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1310 ack = (pcl.pcl_status >> 15) & 0xf;
1311 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1312 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1313 } else {
1314 ack = (pcl.pcl_status >> 15) & 0xf;
1315 }
1316 } else {
1317 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
1318 ack = ACKX_SEND_ERROR;
1319 }
1320 hpsb_packet_sent(host, packet, ack);
1321 }
1322 }
1323
1324 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
1325 PRINTD(KERN_DEBUG, lynx->id, "iso sent");
1326 spin_lock(&lynx->iso_send.queue_lock);
1327
1328 if (list_empty(&lynx->iso_send.pcl_queue)) {
1329 spin_unlock(&lynx->iso_send.queue_lock);
1330 PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
1331 } else {
1332 struct ti_pcl pcl;
1333 u32 ack;
1334 struct hpsb_packet *packet;
1335
1336 get_pcl(lynx, lynx->iso_send.pcl, &pcl);
1337
1338 packet = driver_packet(lynx->iso_send.pcl_queue.next);
1339 list_del_init(&packet->driver_list);
1340
1341 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
1342 packet->header_size, PCI_DMA_TODEVICE);
1343 if (packet->data_size) {
1344 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
1345 packet->data_size, PCI_DMA_TODEVICE);
1346 }
1347
1348 if (!list_empty(&lynx->iso_send.queue)) {
1349 send_next(lynx, hpsb_iso);
1350 }
1351
1352 spin_unlock(&lynx->iso_send.queue_lock);
1353
1354 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1355 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1356 ack = (pcl.pcl_status >> 15) & 0xf;
1357 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1358 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1359 } else {
1360 ack = (pcl.pcl_status >> 15) & 0xf;
1361 }
1362 } else {
1363 PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
1364 ack = ACKX_SEND_ERROR;
1365 }
1366
1367 hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
1368 }
1369 }
1370
1371 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1372 /* general receive DMA completed */
1373 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1374
1375 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1376 stat & 0x1fff);
1377
1378 if (stat & DMA_CHAN_STAT_SELFID) {
1379 lynx->selfid_size = stat & 0x1fff;
1380 handle_selfid(lynx, host);
1381 } else {
1382 quadlet_t *q_data = lynx->rcv_page;
1383 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1384 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1385 cpu_to_be32s(q_data + 3);
1386 }
1387 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1388 }
1389
1390 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1391 }
1392
1393 return IRQ_HANDLED;
1394}
1395
1396
1397static void iso_rcv_bh(struct ti_lynx *lynx)
1398{
1399 unsigned int idx;
1400 quadlet_t *data;
1401 unsigned long flags;
1402
1403 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1404
1405 while (lynx->iso_rcv.used) {
1406 idx = lynx->iso_rcv.last;
1407 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1408
1409 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1410 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1411
1412 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1413 PRINT(KERN_ERR, lynx->id,
1414 "iso length mismatch 0x%08x/0x%08x", *data,
1415 lynx->iso_rcv.stat[idx]);
1416 }
1417
1418 if (lynx->iso_rcv.stat[idx]
1419 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1420 PRINT(KERN_INFO, lynx->id,
1421 "iso receive error on %d to 0x%p", idx, data);
1422 } else {
1423 hpsb_packet_received(lynx->host, data,
1424 lynx->iso_rcv.stat[idx] & 0x1fff,
1425 0);
1426 }
1427
1428 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1429 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1430 lynx->iso_rcv.used--;
1431 }
1432
1433 if (lynx->iso_rcv.chan_count) {
1434 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1435 DMA_WORD1_CMP_ENABLE_MASTER);
1436 }
1437 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1438}
1439
1440
1441static void remove_card(struct pci_dev *dev)
1442{
1443 struct ti_lynx *lynx;
1444 struct device *lynx_dev;
1445 int i;
1446
1447 lynx = pci_get_drvdata(dev);
1448 if (!lynx) return;
1449 pci_set_drvdata(dev, NULL);
1450
1451 lynx_dev = get_device(&lynx->host->device);
1452
1453 switch (lynx->state) {
1454 case is_host:
1455 reg_write(lynx, PCI_INT_ENABLE, 0);
1456 hpsb_remove_host(lynx->host);
1457 case have_intr:
1458 reg_write(lynx, PCI_INT_ENABLE, 0);
1459 free_irq(lynx->dev->irq, lynx);
1460
1461 /* Disable IRM Contender and LCtrl */
1462 if (lynx->phyic.reg_1394a)
1463 set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
1464
1465 /* Let all other nodes know to ignore us */
1466 lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
1467
1468 case have_iomappings:
1469 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1470 /* Fix buggy cards with autoboot pin not tied low: */
1471 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1472 iounmap(lynx->registers);
1473 iounmap(lynx->local_rom);
1474 iounmap(lynx->local_ram);
1475 iounmap(lynx->aux_port);
1476 case have_1394_buffers:
1477 for (i = 0; i < ISORCV_PAGES; i++) {
1478 if (lynx->iso_rcv.page[i]) {
1479 pci_free_consistent(lynx->dev, PAGE_SIZE,
1480 lynx->iso_rcv.page[i],
1481 lynx->iso_rcv.page_dma[i]);
1482 }
1483 }
1484 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1485 lynx->rcv_page_dma);
1486 case have_aux_buf:
1487#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1488 pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer,
1489 lynx->mem_dma_buffer_dma);
1490#endif
1491 case have_pcl_mem:
1492#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1493 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1494 lynx->pcl_mem_dma);
1495#endif
1496 case clear:
1497 /* do nothing - already freed */
1498 ;
1499 }
1500
1501 tasklet_kill(&lynx->iso_rcv.tq);
1502
1503 if (lynx_dev)
1504 put_device(lynx_dev);
1505}
1506
1507
1508static int __devinit add_card(struct pci_dev *dev,
1509 const struct pci_device_id *devid_is_unused)
1510{
1511#define FAIL(fmt, args...) do { \
1512 PRINT_G(KERN_ERR, fmt , ## args); \
1513 remove_card(dev); \
1514 return error; \
1515 } while (0)
1516
1517 char irq_buf[16];
1518 struct hpsb_host *host;
1519 struct ti_lynx *lynx; /* shortcut to currently handled device */
1520 struct ti_pcl pcl;
1521 u32 *pcli;
1522 int i;
1523 int error;
1524
1525 error = -ENXIO;
1526
1527 if (pci_set_dma_mask(dev, 0xffffffff))
1528 FAIL("DMA address limits not supported for PCILynx hardware");
1529 if (pci_enable_device(dev))
1530 FAIL("failed to enable PCILynx hardware");
1531 pci_set_master(dev);
1532
1533 error = -ENOMEM;
1534
1535 host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
1536 if (!host) FAIL("failed to allocate control structure memory");
1537
1538 lynx = host->hostdata;
1539 lynx->id = card_id++;
1540 lynx->dev = dev;
1541 lynx->state = clear;
1542 lynx->host = host;
1543 host->pdev = dev;
1544 pci_set_drvdata(dev, lynx);
1545
1546 spin_lock_init(&lynx->lock);
1547 spin_lock_init(&lynx->phy_reg_lock);
1548
1549#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1550 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1551 &lynx->pcl_mem_dma);
1552
1553 if (lynx->pcl_mem != NULL) {
1554 lynx->state = have_pcl_mem;
1555 PRINT(KERN_INFO, lynx->id,
1556 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1557 lynx->pcl_mem);
1558 } else {
1559 FAIL("failed to allocate PCL memory area");
1560 }
1561#endif
1562
1563#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1564 lynx->mem_dma_buffer = pci_alloc_consistent(dev, 65536,
1565 &lynx->mem_dma_buffer_dma);
1566 if (lynx->mem_dma_buffer == NULL) {
1567 FAIL("failed to allocate DMA buffer for aux");
1568 }
1569 lynx->state = have_aux_buf;
1570#endif
1571
1572 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1573 &lynx->rcv_page_dma);
1574 if (lynx->rcv_page == NULL) {
1575 FAIL("failed to allocate receive buffer");
1576 }
1577 lynx->state = have_1394_buffers;
1578
1579 for (i = 0; i < ISORCV_PAGES; i++) {
1580 lynx->iso_rcv.page[i] =
1581 pci_alloc_consistent(dev, PAGE_SIZE,
1582 &lynx->iso_rcv.page_dma[i]);
1583 if (lynx->iso_rcv.page[i] == NULL) {
1584 FAIL("failed to allocate iso receive buffers");
1585 }
1586 }
1587
1588 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1589 PCILYNX_MAX_REGISTER);
1590 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1591 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1592 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1593 PCILYNX_MAX_MEMORY);
1594 lynx->state = have_iomappings;
1595
1596 if (lynx->registers == NULL) {
1597 FAIL("failed to remap registers - card not accessible");
1598 }
1599
1600#ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1601 if (lynx->local_ram == NULL) {
1602 FAIL("failed to remap local RAM which is required for "
1603 "operation");
1604 }
1605#endif
1606
1607 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1608 /* Fix buggy cards with autoboot pin not tied low: */
1609 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1610
1611#ifndef __sparc__
1612 sprintf (irq_buf, "%d", dev->irq);
1613#else
1614 sprintf (irq_buf, "%s", __irq_itoa(dev->irq));
1615#endif
1616
1617 if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
1618 PCILYNX_DRIVER_NAME, lynx)) {
1619 PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
1620 lynx->state = have_intr;
1621 } else {
1622 FAIL("failed to allocate shared interrupt %s", irq_buf);
1623 }
1624
1625 /* alloc_pcl return values are not checked, it is expected that the
1626 * provided PCL space is sufficient for the initial allocations */
1627#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1628 if (lynx->aux_port != NULL) {
1629 lynx->dmem_pcl = alloc_pcl(lynx);
1630 aux_setup_pcls(lynx);
1631 sema_init(&lynx->mem_dma_mutex, 1);
1632 }
1633#endif
1634 lynx->rcv_pcl = alloc_pcl(lynx);
1635 lynx->rcv_pcl_start = alloc_pcl(lynx);
1636 lynx->async.pcl = alloc_pcl(lynx);
1637 lynx->async.pcl_start = alloc_pcl(lynx);
1638 lynx->iso_send.pcl = alloc_pcl(lynx);
1639 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1640
1641 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1642 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1643 }
1644 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1645
1646 /* all allocations successful - simple init stuff follows */
1647
1648 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1649
1650#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1651 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_AUX_INT);
1652 init_waitqueue_head(&lynx->mem_dma_intr_wait);
1653 init_waitqueue_head(&lynx->aux_intr_wait);
1654#endif
1655
1656 tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1657 (unsigned long)lynx);
1658
1659 spin_lock_init(&lynx->iso_rcv.lock);
1660
1661 spin_lock_init(&lynx->async.queue_lock);
1662 lynx->async.channel = CHANNEL_ASYNC_SEND;
1663 spin_lock_init(&lynx->iso_send.queue_lock);
1664 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1665
1666 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1667 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1668 lynx->local_ram, lynx->aux_port);
1669
1670 /* now, looking for PHY register set */
1671 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1672 lynx->phyic.reg_1394a = 1;
1673 PRINT(KERN_INFO, lynx->id,
1674 "found 1394a conform PHY (using extended register set)");
1675 lynx->phyic.vendor = get_phy_vendorid(lynx);
1676 lynx->phyic.product = get_phy_productid(lynx);
1677 } else {
1678 lynx->phyic.reg_1394a = 0;
1679 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1680 }
1681
1682 lynx->selfid_size = -1;
1683 lynx->phy_reg0 = -1;
1684
1685 INIT_LIST_HEAD(&lynx->async.queue);
1686 INIT_LIST_HEAD(&lynx->async.pcl_queue);
1687 INIT_LIST_HEAD(&lynx->iso_send.queue);
1688 INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
1689
1690 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1691 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1692
1693 pcl.next = PCL_NEXT_INVALID;
1694 pcl.async_error_next = PCL_NEXT_INVALID;
1695
1696 pcl.buffer[0].control = PCL_CMD_RCV | 16;
1697#ifndef __BIG_ENDIAN
1698 pcl.buffer[0].control |= PCL_BIGENDIAN;
1699#endif
1700 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1701
1702 pcl.buffer[0].pointer = lynx->rcv_page_dma;
1703 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1704 put_pcl(lynx, lynx->rcv_pcl, &pcl);
1705
1706 pcl.next = pcl_bus(lynx, lynx->async.pcl);
1707 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1708 put_pcl(lynx, lynx->async.pcl_start, &pcl);
1709
1710 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1711 pcl.async_error_next = PCL_NEXT_INVALID;
1712 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1713
1714 pcl.next = PCL_NEXT_INVALID;
1715 pcl.async_error_next = PCL_NEXT_INVALID;
1716 pcl.buffer[0].control = PCL_CMD_RCV | 4;
1717#ifndef __BIG_ENDIAN
1718 pcl.buffer[0].control |= PCL_BIGENDIAN;
1719#endif
1720 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1721
1722 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1723 int page = i / ISORCV_PER_PAGE;
1724 int sec = i % ISORCV_PER_PAGE;
1725
1726 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
1727 + sec * MAX_ISORCV_SIZE;
1728 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1729 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1730 }
1731
1732 pcli = (u32 *)&pcl;
1733 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1734 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1735 }
1736 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1737
1738 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1739 reg_write(lynx, FIFO_SIZES, 0x003030a0);
1740 /* 20 byte threshold before triggering PCI transfer */
1741 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1742 /* threshold on both send FIFOs before transmitting:
1743 FIFO size - cache line size - 1 */
1744 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1745 i = 0x30 - i - 1;
1746 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1747
1748 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1749
1750 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1751 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
1752 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
1753 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
1754 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
1755 | LINK_INT_ATF_UNDERFLOW);
1756
1757 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1758 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1759 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1760 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1761 DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1762 | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
1763 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1764
1765 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1766
1767 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1768 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1769 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1770 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1771
1772 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1773
1774 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1775 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
1776 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1777 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
1778
1779 if (!lynx->phyic.reg_1394a) {
1780 if (!hpsb_disable_irm) {
1781 /* attempt to enable contender bit -FIXME- would this
1782 * work elsewhere? */
1783 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1784 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
1785 }
1786 } else {
1787 /* set the contender (if appropriate) and LCtrl bit in the
1788 * extended PHY register set. (Should check that PHY_02_EXTENDED
1789 * is set in register 2?)
1790 */
1791 i = get_phy_reg(lynx, 4);
1792 i |= PHY_04_LCTRL;
1793 if (hpsb_disable_irm)
1794 i &= !PHY_04_CONTENDER;
1795 else
1796 i |= PHY_04_CONTENDER;
1797 if (i != -1) set_phy_reg(lynx, 4, i);
1798 }
1799
1800 if (!skip_eeprom)
1801 {
1802 /* needed for i2c communication with serial eeprom */
1803 struct i2c_adapter *i2c_ad;
1804 struct i2c_algo_bit_data i2c_adapter_data;
1805
1806 error = -ENOMEM;
1807 i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL);
1808 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1809
1810 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
1811 i2c_adapter_data = bit_data;
1812 i2c_ad->algo_data = &i2c_adapter_data;
1813 i2c_adapter_data.data = lynx;
1814
1815 PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
1816 reg_read(lynx, SERIAL_EEPROM_CONTROL));
1817
1818 /* reset hardware to sane state */
1819 lynx->i2c_driven_state = 0x00000070;
1820 reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
1821
1822 if (i2c_bit_add_bus(i2c_ad) < 0)
1823 {
1824 kfree(i2c_ad);
1825 error = -ENXIO;
1826 FAIL("unable to register i2c");
1827 }
1828 else
1829 {
1830 /* do i2c stuff */
1831 unsigned char i2c_cmd = 0x10;
1832 struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
1833 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1834 };
1835
1836
1837#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
1838 union i2c_smbus_data data;
1839
1840 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
1841 PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
1842 else
1843 {
1844 u16 addr;
1845 for (addr=0x00; addr < 0x100; addr++) {
1846 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
1847 PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
1848 break;
1849 }
1850 else
1851 PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
1852 }
1853 }
1854#endif
1855
1856 /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
1857 do it more efficiently in one transaction rather then using several reads */
1858 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1859 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1860 } else {
1861 int i;
1862
1863 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1864 /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
1865 * generation(1394a) and link_spd(1394a) field and recalculate
1866 * the CRC */
1867
1868 for (i = 0; i < 5 ; i++)
1869 PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
1870 i, be32_to_cpu(lynx->bus_info_block[i]));
1871
1872 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
1873 if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
1874 (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
1875 {
1876 PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
1877 } else {
1878 kfree(i2c_ad);
1879 error = -ENXIO;
1880 FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
1881 }
1882
1883 }
1884
1885 i2c_bit_del_bus(i2c_ad);
1886 kfree(i2c_ad);
1887 }
1888 }
1889
1890 host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
1891 host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
1892 host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
1893 host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
1894 if (!lynx->phyic.reg_1394a)
1895 host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
1896 else
1897 host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
1898
1899 if (hpsb_add_host(host)) {
1900 error = -ENOMEM;
1901 FAIL("Failed to register host with highlevel");
1902 }
1903
1904 lynx->state = is_host;
1905
1906 return 0;
1907#undef FAIL
1908}
1909
1910
1911static struct pci_device_id pci_table[] = {
1912 {
1913 .vendor = PCI_VENDOR_ID_TI,
1914 .device = PCI_DEVICE_ID_TI_PCILYNX,
1915 .subvendor = PCI_ANY_ID,
1916 .subdevice = PCI_ANY_ID,
1917 },
1918 { } /* Terminating entry */
1919};
1920
1921static struct pci_driver lynx_pci_driver = {
1922 .name = PCILYNX_DRIVER_NAME,
1923 .id_table = pci_table,
1924 .probe = add_card,
1925 .remove = remove_card,
1926};
1927
1928static struct hpsb_host_driver lynx_driver = {
1929 .owner = THIS_MODULE,
1930 .name = PCILYNX_DRIVER_NAME,
1931 .set_hw_config_rom = NULL,
1932 .transmit_packet = lynx_transmit,
1933 .devctl = lynx_devctl,
1934 .isoctl = NULL,
1935};
1936
1937MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1938MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1939MODULE_LICENSE("GPL");
1940MODULE_SUPPORTED_DEVICE("pcilynx");
1941MODULE_DEVICE_TABLE(pci, pci_table);
1942
1943static int __init pcilynx_init(void)
1944{
1945 int ret;
1946
1947#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1948 if (register_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME, &aux_ops)) {
1949 PRINT_G(KERN_ERR, "allocation of char major number %d failed",
1950 PCILYNX_MAJOR);
1951 return -EBUSY;
1952 }
1953#endif
1954
1955 ret = pci_register_driver(&lynx_pci_driver);
1956 if (ret < 0) {
1957 PRINT_G(KERN_ERR, "PCI module init failed");
1958 goto free_char_dev;
1959 }
1960
1961 return 0;
1962
1963 free_char_dev:
1964#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1965 unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
1966#endif
1967
1968 return ret;
1969}
1970
1971static void __exit pcilynx_cleanup(void)
1972{
1973 pci_unregister_driver(&lynx_pci_driver);
1974
1975#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1976 unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
1977#endif
1978}
1979
1980
1981module_init(pcilynx_init);
1982module_exit(pcilynx_cleanup);