aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/ieee1394
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/ieee1394')
-rw-r--r--drivers/ieee1394/Kconfig188
-rw-r--r--drivers/ieee1394/Makefile26
-rw-r--r--drivers/ieee1394/amdtp.c1300
-rw-r--r--drivers/ieee1394/amdtp.h84
-rw-r--r--drivers/ieee1394/cmp.c311
-rw-r--r--drivers/ieee1394/cmp.h31
-rw-r--r--drivers/ieee1394/config_roms.c236
-rw-r--r--drivers/ieee1394/config_roms.h27
-rw-r--r--drivers/ieee1394/csr.c857
-rw-r--r--drivers/ieee1394/csr.h96
-rw-r--r--drivers/ieee1394/csr1212.c1612
-rw-r--r--drivers/ieee1394/csr1212.h727
-rw-r--r--drivers/ieee1394/dma.c260
-rw-r--r--drivers/ieee1394/dma.h78
-rw-r--r--drivers/ieee1394/dv1394-private.h587
-rw-r--r--drivers/ieee1394/dv1394.c2663
-rw-r--r--drivers/ieee1394/dv1394.h305
-rw-r--r--drivers/ieee1394/eth1394.c1801
-rw-r--r--drivers/ieee1394/eth1394.h236
-rw-r--r--drivers/ieee1394/highlevel.c704
-rw-r--r--drivers/ieee1394/highlevel.h190
-rw-r--r--drivers/ieee1394/hosts.c233
-rw-r--r--drivers/ieee1394/hosts.h215
-rw-r--r--drivers/ieee1394/ieee1394-ioctl.h111
-rw-r--r--drivers/ieee1394/ieee1394.h202
-rw-r--r--drivers/ieee1394/ieee1394_core.c1330
-rw-r--r--drivers/ieee1394/ieee1394_core.h228
-rw-r--r--drivers/ieee1394/ieee1394_hotplug.h33
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c601
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h64
-rw-r--r--drivers/ieee1394/ieee1394_types.h101
-rw-r--r--drivers/ieee1394/iso.c451
-rw-r--r--drivers/ieee1394/iso.h201
-rw-r--r--drivers/ieee1394/nodemgr.c1732
-rw-r--r--drivers/ieee1394/nodemgr.h207
-rw-r--r--drivers/ieee1394/ohci1394.c3705
-rw-r--r--drivers/ieee1394/ohci1394.h456
-rw-r--r--drivers/ieee1394/oui.db7048
-rw-r--r--drivers/ieee1394/oui2c.sh23
-rw-r--r--drivers/ieee1394/pcilynx.c1982
-rw-r--r--drivers/ieee1394/pcilynx.h516
-rw-r--r--drivers/ieee1394/raw1394-private.h86
-rw-r--r--drivers/ieee1394/raw1394.c2958
-rw-r--r--drivers/ieee1394/raw1394.h181
-rw-r--r--drivers/ieee1394/sbp2.c2864
-rw-r--r--drivers/ieee1394/sbp2.h484
-rw-r--r--drivers/ieee1394/video1394.c1527
-rw-r--r--drivers/ieee1394/video1394.h67
48 files changed, 39925 insertions, 0 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
new file mode 100644
index 000000000000..78b201fb5e8a
--- /dev/null
+++ b/drivers/ieee1394/Kconfig
@@ -0,0 +1,188 @@
1# -*- shell-script -*-
2
3menu "IEEE 1394 (FireWire) support"
4
5config IEEE1394
6 tristate "IEEE 1394 (FireWire) support"
7 depends on PCI || BROKEN
8 select NET
9 help
10 IEEE 1394 describes a high performance serial bus, which is also
11 known as FireWire(tm) or i.Link(tm) and is used for connecting all
12 sorts of devices (most notably digital video cameras) to your
13 computer.
14
15 If you have FireWire hardware and want to use it, say Y here. This
16 is the core support only, you will also need to select a driver for
17 your IEEE 1394 adapter.
18
19 To compile this driver as a module, say M here: the
20 module will be called ieee1394.
21
22comment "Subsystem Options"
23 depends on IEEE1394
24
25config IEEE1394_VERBOSEDEBUG
26 bool "Excessive debugging output"
27 depends on IEEE1394
28 help
29 If you say Y here, you will get very verbose debugging logs from
30 the subsystem which includes a dump of the header of every sent
31 and received packet. This can amount to a high amount of data
32 collected in a very short time which is usually also saved to
33 disk by the system logging daemons.
34
35 Say Y if you really want or need the debugging output, everyone
36 else says N.
37
38config IEEE1394_OUI_DB
39 bool "OUI Database built-in"
40 depends on IEEE1394
41 help
42 If you say Y here, then an OUI list (vendor unique ID's) will be
43 compiled into the ieee1394 module. This doesn't really do much
44 except being able to display the vendor of a hardware node. The
45 downside is that it adds about 300k to the size of the module,
46 or kernel (depending on whether you compile ieee1394 as a
47 module, or static in the kernel).
48
49 This option is not needed for userspace programs like gscanbus
50 to show this information.
51
52config IEEE1394_EXTRA_CONFIG_ROMS
53 bool "Build in extra config rom entries for certain functionality"
54 depends on IEEE1394
55 help
56 Some IEEE1394 functionality depends on extra config rom entries
57 being available in the host adapters CSR. These options will
58 allow you to choose which ones.
59
60config IEEE1394_CONFIG_ROM_IP1394
61 bool "IP-1394 Entry"
62 depends on IEEE1394_EXTRA_CONFIG_ROMS && IEEE1394
63 help
64 Adds an entry for using IP-over-1394. If you want to use your
65 IEEE1394 bus as a network for IP systems (including interacting
66 with MacOSX and WinXP IP-over-1394), enable this option and the
67 eth1394 option below.
68
69comment "Device Drivers"
70 depends on IEEE1394
71
72comment "Texas Instruments PCILynx requires I2C"
73 depends on IEEE1394 && I2C=n
74
75config IEEE1394_PCILYNX
76 tristate "Texas Instruments PCILynx support"
77 depends on PCI && IEEE1394 && I2C
78 select I2C_ALGOBIT
79 help
80 Say Y here if you have an IEEE-1394 controller with the Texas
81 Instruments PCILynx chip. Note: this driver is written for revision
82 2 of this chip and may not work with revision 0.
83
84 To compile this driver as a module, say M here: the
85 module will be called pcilynx.
86
87# Non-maintained pcilynx options
88# if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
89# bool ' Use PCILynx local RAM' CONFIG_IEEE1394_PCILYNX_LOCALRAM
90# bool ' Support for non-IEEE1394 local ports' CONFIG_IEEE1394_PCILYNX_PORTS
91# fi
92config IEEE1394_OHCI1394
93 tristate "OHCI-1394 support"
94 depends on PCI && IEEE1394
95 help
96 Enable this driver if you have an IEEE 1394 controller based on the
97 OHCI-1394 specification. The current driver is only tested with OHCI
98 chipsets made by Texas Instruments and NEC. Most third-party vendors
99 use one of these chipsets. It should work with any OHCI-1394
100 compliant card, however.
101
102 To compile this driver as a module, say M here: the
103 module will be called ohci1394.
104
105comment "Protocol Drivers"
106 depends on IEEE1394
107
108config IEEE1394_VIDEO1394
109 tristate "OHCI-1394 Video support"
110 depends on IEEE1394 && IEEE1394_OHCI1394
111 help
112 This option enables video device usage for OHCI-1394 cards. Enable
113 this option only if you have an IEEE 1394 video device connected to
114 an OHCI-1394 card.
115
116config IEEE1394_SBP2
117 tristate "SBP-2 support (Harddisks etc.)"
118 depends on IEEE1394 && SCSI && (PCI || BROKEN)
119 help
120 This option enables you to use SBP-2 devices connected to your IEEE
121 1394 bus. SBP-2 devices include harddrives and DVD devices.
122
123config IEEE1394_SBP2_PHYS_DMA
124 bool "Enable Phys DMA support for SBP2 (Debug)"
125 depends on IEEE1394 && IEEE1394_SBP2
126
127config IEEE1394_ETH1394
128 tristate "Ethernet over 1394"
129 depends on IEEE1394 && EXPERIMENTAL && INET
130 select IEEE1394_CONFIG_ROM_IP1394
131 select IEEE1394_EXTRA_CONFIG_ROMS
132 help
133 This driver implements a functional majority of RFC 2734: IPv4 over
134 1394. It will provide IP connectivity with implementations of RFC
135 2734 found on other operating systems. It will not communicate with
136 older versions of this driver found in stock kernels prior to 2.6.3.
137 This driver is still considered experimental. It does not yet support
138 MCAP, therefore multicast support is significantly limited.
139
140config IEEE1394_DV1394
141 tristate "OHCI-DV I/O support"
142 depends on IEEE1394 && IEEE1394_OHCI1394
143 help
144 This driver allows you to transmit and receive DV (digital video)
145 streams on an OHCI-1394 card using a simple frame-oriented
146 interface.
147
148 The user-space API for dv1394 is documented in dv1394.h.
149
150 To compile this driver as a module, say M here: the
151 module will be called dv1394.
152
153config IEEE1394_RAWIO
154 tristate "Raw IEEE1394 I/O support"
155 depends on IEEE1394
156 help
157 Say Y here if you want support for the raw device. This is generally
158 a good idea, so you should say Y here. The raw device enables
159 direct communication of user programs with the IEEE 1394 bus and
160 thus with the attached peripherals.
161
162 To compile this driver as a module, say M here: the
163 module will be called raw1394.
164
165config IEEE1394_CMP
166 tristate "IEC61883-1 Plug support"
167 depends on IEEE1394
168 help
169 This option enables the Connection Management Procedures
170 (IEC61883-1) driver, which implements input and output plugs.
171
172 To compile this driver as a module, say M here: the
173 module will be called cmp.
174
175config IEEE1394_AMDTP
176 tristate "IEC61883-6 (Audio transmission) support"
177 depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP
178 help
179 This option enables the Audio & Music Data Transmission Protocol
180 (IEC61883-6) driver, which implements audio transmission over
181 IEEE1394.
182
183 The userspace interface is documented in amdtp.h.
184
185 To compile this driver as a module, say M here: the
186 module will be called amdtp.
187
188endmenu
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
new file mode 100644
index 000000000000..e8b4d48d376e
--- /dev/null
+++ b/drivers/ieee1394/Makefile
@@ -0,0 +1,26 @@
1#
2# Makefile for the Linux IEEE 1394 implementation
3#
4
5ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
6 highlevel.o csr.o nodemgr.o oui.o dma.o iso.o \
7 csr1212.o config_roms.o
8
9obj-$(CONFIG_IEEE1394) += ieee1394.o
10obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
11obj-$(CONFIG_IEEE1394_OHCI1394) += ohci1394.o
12obj-$(CONFIG_IEEE1394_VIDEO1394) += video1394.o
13obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
14obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
17obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
18obj-$(CONFIG_IEEE1394_CMP) += cmp.o
19
20quiet_cmd_oui2c = OUI2C $@
21 cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@
22
23targets := oui.c
24$(obj)/oui.o: $(obj)/oui.c
25$(obj)/oui.c: $(src)/oui.db $(src)/oui2c.sh FORCE
26 $(call if_changed,oui2c)
diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c
new file mode 100644
index 000000000000..84ae027b021a
--- /dev/null
+++ b/drivers/ieee1394/amdtp.c
@@ -0,0 +1,1300 @@
1/* -*- c-basic-offset: 8 -*-
2 *
3 * amdtp.c - Audio and Music Data Transmission Protocol Driver
4 * Copyright (C) 2001 Kristian Høgsberg
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/* OVERVIEW
22 * --------
23 *
24 * The AMDTP driver is designed to expose the IEEE1394 bus as a
25 * regular OSS soundcard, i.e. you can link /dev/dsp to /dev/amdtp and
26 * then your favourite MP3 player, game or whatever sound program will
27 * output to an IEEE1394 isochronous channel. The signal destination
28 * could be a set of IEEE1394 loudspeakers (if and when such things
29 * become available) or an amplifier with IEEE1394 input (like the
30 * Sony STR-LSA1). The driver only handles the actual streaming, some
31 * connection management is also required for this to actually work.
32 * That is outside the scope of this driver, and furthermore it is not
33 * really standardized yet.
34 *
35 * The Audio and Music Data Tranmission Protocol is available at
36 *
37 * http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf
38 *
39 *
40 * TODO
41 * ----
42 *
43 * - We should be able to change input sample format between LE/BE, as
44 * we already shift the bytes around when we construct the iso
45 * packets.
46 *
47 * - Fix DMA stop after bus reset!
48 *
49 * - Clean up iso context handling in ohci1394.
50 *
51 *
52 * MAYBE TODO
53 * ----------
54 *
55 * - Receive data for local playback or recording. Playback requires
56 * soft syncing with the sound card.
57 *
58 * - Signal processing, i.e. receive packets, do some processing, and
59 * transmit them again using the same packet structure and timestamps
60 * offset by processing time.
61 *
62 * - Maybe make an ALSA interface, that is, create a file_ops
63 * implementation that recognizes ALSA ioctls and uses defaults for
64 * things that can't be controlled through ALSA (iso channel).
65 *
66 * Changes:
67 *
68 * - Audit copy_from_user in amdtp_write.
69 * Daniele Bellucci <bellucda@tiscali.it>
70 *
71 */
72
73#include <linux/module.h>
74#include <linux/list.h>
75#include <linux/sched.h>
76#include <linux/types.h>
77#include <linux/fs.h>
78#include <linux/ioctl.h>
79#include <linux/wait.h>
80#include <linux/pci.h>
81#include <linux/interrupt.h>
82#include <linux/poll.h>
83#include <linux/ioctl32.h>
84#include <linux/compat.h>
85#include <linux/cdev.h>
86#include <asm/uaccess.h>
87#include <asm/atomic.h>
88
89#include "hosts.h"
90#include "highlevel.h"
91#include "ieee1394.h"
92#include "ieee1394_core.h"
93#include "ohci1394.h"
94
95#include "amdtp.h"
96#include "cmp.h"
97
98#define FMT_AMDTP 0x10
99#define FDF_AM824 0x00
100#define FDF_SFC_32KHZ 0x00
101#define FDF_SFC_44K1HZ 0x01
102#define FDF_SFC_48KHZ 0x02
103#define FDF_SFC_88K2HZ 0x03
104#define FDF_SFC_96KHZ 0x04
105#define FDF_SFC_176K4HZ 0x05
106#define FDF_SFC_192KHZ 0x06
107
108struct descriptor_block {
109 struct output_more_immediate {
110 u32 control;
111 u32 pad0;
112 u32 skip;
113 u32 pad1;
114 u32 header[4];
115 } header_desc;
116
117 struct output_last {
118 u32 control;
119 u32 data_address;
120 u32 branch;
121 u32 status;
122 } payload_desc;
123};
124
125struct packet {
126 struct descriptor_block *db;
127 dma_addr_t db_bus;
128 struct iso_packet *payload;
129 dma_addr_t payload_bus;
130};
131
132#include <asm/byteorder.h>
133
134#if defined __BIG_ENDIAN_BITFIELD
135
136struct iso_packet {
137 /* First quadlet */
138 unsigned int dbs : 8;
139 unsigned int eoh0 : 2;
140 unsigned int sid : 6;
141
142 unsigned int dbc : 8;
143 unsigned int fn : 2;
144 unsigned int qpc : 3;
145 unsigned int sph : 1;
146 unsigned int reserved : 2;
147
148 /* Second quadlet */
149 unsigned int fdf : 8;
150 unsigned int eoh1 : 2;
151 unsigned int fmt : 6;
152
153 unsigned int syt : 16;
154
155 quadlet_t data[0];
156};
157
158#elif defined __LITTLE_ENDIAN_BITFIELD
159
160struct iso_packet {
161 /* First quadlet */
162 unsigned int sid : 6;
163 unsigned int eoh0 : 2;
164 unsigned int dbs : 8;
165
166 unsigned int reserved : 2;
167 unsigned int sph : 1;
168 unsigned int qpc : 3;
169 unsigned int fn : 2;
170 unsigned int dbc : 8;
171
172 /* Second quadlet */
173 unsigned int fmt : 6;
174 unsigned int eoh1 : 2;
175 unsigned int fdf : 8;
176
177 unsigned int syt : 16;
178
179 quadlet_t data[0];
180};
181
182#else
183
184#error Unknown bitfield type
185
186#endif
187
188struct fraction {
189 int integer;
190 int numerator;
191 int denominator;
192};
193
194#define PACKET_LIST_SIZE 256
195#define MAX_PACKET_LISTS 4
196
197struct packet_list {
198 struct list_head link;
199 int last_cycle_count;
200 struct packet packets[PACKET_LIST_SIZE];
201};
202
203#define BUFFER_SIZE 128
204
205/* This implements a circular buffer for incoming samples. */
206
207struct buffer {
208 size_t head, tail, length, size;
209 unsigned char data[0];
210};
211
212struct stream {
213 int iso_channel;
214 int format;
215 int rate;
216 int dimension;
217 int fdf;
218 int mode;
219 int sample_format;
220 struct cmp_pcr *opcr;
221
222 /* Input samples are copied here. */
223 struct buffer *input;
224
225 /* ISO Packer state */
226 unsigned char dbc;
227 struct packet_list *current_packet_list;
228 int current_packet;
229 struct fraction ready_samples, samples_per_cycle;
230
231 /* We use these to generate control bits when we are packing
232 * iec958 data.
233 */
234 int iec958_frame_count;
235 int iec958_rate_code;
236
237 /* The cycle_count and cycle_offset fields are used for the
238 * synchronization timestamps (syt) in the cip header. They
239 * are incremented by at least a cycle every time we put a
240 * time stamp in a packet. As we don't time stamp all
241 * packages, cycle_count isn't updated in every cycle, and
242 * sometimes it's incremented by 2. Thus, we have
243 * cycle_count2, which is simply incremented by one with each
244 * packet, so we can compare it to the transmission time
245 * written back in the dma programs.
246 */
247 atomic_t cycle_count, cycle_count2;
248 struct fraction cycle_offset, ticks_per_syt_offset;
249 int syt_interval;
250 int stale_count;
251
252 /* Theses fields control the sample output to the DMA engine.
253 * The dma_packet_lists list holds packet lists currently
254 * queued for dma; the head of the list is currently being
255 * processed. The last program in a packet list generates an
256 * interrupt, which removes the head from dma_packet_lists and
257 * puts it back on the free list.
258 */
259 struct list_head dma_packet_lists;
260 struct list_head free_packet_lists;
261 wait_queue_head_t packet_list_wait;
262 spinlock_t packet_list_lock;
263 struct ohci1394_iso_tasklet iso_tasklet;
264 struct pci_pool *descriptor_pool, *packet_pool;
265
266 /* Streams at a host controller are chained through this field. */
267 struct list_head link;
268 struct amdtp_host *host;
269};
270
271struct amdtp_host {
272 struct hpsb_host *host;
273 struct ti_ohci *ohci;
274 struct list_head stream_list;
275 spinlock_t stream_list_lock;
276};
277
278static struct hpsb_highlevel amdtp_highlevel;
279
280
281/* FIXME: This doesn't belong here... */
282
283#define OHCI1394_CONTEXT_CYCLE_MATCH 0x80000000
284#define OHCI1394_CONTEXT_RUN 0x00008000
285#define OHCI1394_CONTEXT_WAKE 0x00001000
286#define OHCI1394_CONTEXT_DEAD 0x00000800
287#define OHCI1394_CONTEXT_ACTIVE 0x00000400
288
289static void ohci1394_start_it_ctx(struct ti_ohci *ohci, int ctx,
290 dma_addr_t first_cmd, int z, int cycle_match)
291{
292 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx);
293 reg_write(ohci, OHCI1394_IsoXmitCommandPtr + ctx * 16, first_cmd | z);
294 reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16, ~0);
295 wmb();
296 reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
297 OHCI1394_CONTEXT_CYCLE_MATCH | (cycle_match << 16) |
298 OHCI1394_CONTEXT_RUN);
299}
300
301static void ohci1394_wake_it_ctx(struct ti_ohci *ohci, int ctx)
302{
303 reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
304 OHCI1394_CONTEXT_WAKE);
305}
306
307static void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx, int synchronous)
308{
309 u32 control;
310 int wait;
311
312 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << ctx);
313 reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16,
314 OHCI1394_CONTEXT_RUN);
315 wmb();
316
317 if (synchronous) {
318 for (wait = 0; wait < 5; wait++) {
319 control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
320 if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
321 break;
322
323 set_current_state(TASK_INTERRUPTIBLE);
324 schedule_timeout(1);
325 }
326 }
327}
328
329/* Note: we can test if free_packet_lists is empty without aquiring
330 * the packet_list_lock. The interrupt handler only adds to the free
331 * list, there is no race condition between testing the list non-empty
332 * and acquiring the lock.
333 */
334
335static struct packet_list *stream_get_free_packet_list(struct stream *s)
336{
337 struct packet_list *pl;
338 unsigned long flags;
339
340 if (list_empty(&s->free_packet_lists))
341 return NULL;
342
343 spin_lock_irqsave(&s->packet_list_lock, flags);
344 pl = list_entry(s->free_packet_lists.next, struct packet_list, link);
345 list_del(&pl->link);
346 spin_unlock_irqrestore(&s->packet_list_lock, flags);
347
348 return pl;
349}
350
351static void stream_start_dma(struct stream *s, struct packet_list *pl)
352{
353 u32 syt_cycle, cycle_count, start_cycle;
354
355 cycle_count = reg_read(s->host->ohci,
356 OHCI1394_IsochronousCycleTimer) >> 12;
357 syt_cycle = (pl->last_cycle_count - PACKET_LIST_SIZE + 1) & 0x0f;
358
359 /* We program the DMA controller to start transmission at
360 * least 17 cycles from now - this happens when the lower four
361 * bits of cycle_count is 0x0f and syt_cycle is 0, in this
362 * case the start cycle is cycle_count - 15 + 32. */
363 start_cycle = (cycle_count & ~0x0f) + 32 + syt_cycle;
364 if ((start_cycle & 0x1fff) >= 8000)
365 start_cycle = start_cycle - 8000 + 0x2000;
366
367 ohci1394_start_it_ctx(s->host->ohci, s->iso_tasklet.context,
368 pl->packets[0].db_bus, 3,
369 start_cycle & 0x7fff);
370}
371
372static void stream_put_dma_packet_list(struct stream *s,
373 struct packet_list *pl)
374{
375 unsigned long flags;
376 struct packet_list *prev;
377
378 /* Remember the cycle_count used for timestamping the last packet. */
379 pl->last_cycle_count = atomic_read(&s->cycle_count2) - 1;
380 pl->packets[PACKET_LIST_SIZE - 1].db->payload_desc.branch = 0;
381
382 spin_lock_irqsave(&s->packet_list_lock, flags);
383 list_add_tail(&pl->link, &s->dma_packet_lists);
384 spin_unlock_irqrestore(&s->packet_list_lock, flags);
385
386 prev = list_entry(pl->link.prev, struct packet_list, link);
387 if (pl->link.prev != &s->dma_packet_lists) {
388 struct packet *last = &prev->packets[PACKET_LIST_SIZE - 1];
389 last->db->payload_desc.branch = pl->packets[0].db_bus | 3;
390 last->db->header_desc.skip = pl->packets[0].db_bus | 3;
391 ohci1394_wake_it_ctx(s->host->ohci, s->iso_tasklet.context);
392 }
393 else
394 stream_start_dma(s, pl);
395}
396
397static void stream_shift_packet_lists(unsigned long l)
398{
399 struct stream *s = (struct stream *) l;
400 struct packet_list *pl;
401 struct packet *last;
402 int diff;
403
404 if (list_empty(&s->dma_packet_lists)) {
405 HPSB_ERR("empty dma_packet_lists in %s", __FUNCTION__);
406 return;
407 }
408
409 /* Now that we know the list is non-empty, we can get the head
410 * of the list without locking, because the process context
411 * only adds to the tail.
412 */
413 pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
414 last = &pl->packets[PACKET_LIST_SIZE - 1];
415
416 /* This is weird... if we stop dma processing in the middle of
417 * a packet list, the dma context immediately generates an
418 * interrupt if we enable it again later. This only happens
419 * when amdtp_release is interrupted while waiting for dma to
420 * complete, though. Anyway, we detect this by seeing that
421 * the status of the dma descriptor that we expected an
422 * interrupt from is still 0.
423 */
424 if (last->db->payload_desc.status == 0) {
425 HPSB_INFO("weird interrupt...");
426 return;
427 }
428
429 /* If the last descriptor block does not specify a branch
430 * address, we have a sample underflow.
431 */
432 if (last->db->payload_desc.branch == 0)
433 HPSB_INFO("FIXME: sample underflow...");
434
435 /* Here we check when (which cycle) the last packet was sent
436 * and compare it to what the iso packer was using at the
437 * time. If there is a mismatch, we adjust the cycle count in
438 * the iso packer. However, there are still up to
439 * MAX_PACKET_LISTS packet lists queued with bad time stamps,
440 * so we disable time stamp monitoring for the next
441 * MAX_PACKET_LISTS packet lists.
442 */
443 diff = (last->db->payload_desc.status - pl->last_cycle_count) & 0xf;
444 if (diff > 0 && s->stale_count == 0) {
445 atomic_add(diff, &s->cycle_count);
446 atomic_add(diff, &s->cycle_count2);
447 s->stale_count = MAX_PACKET_LISTS;
448 }
449
450 if (s->stale_count > 0)
451 s->stale_count--;
452
453 /* Finally, we move the packet list that was just processed
454 * back to the free list, and notify any waiters.
455 */
456 spin_lock(&s->packet_list_lock);
457 list_del(&pl->link);
458 list_add_tail(&pl->link, &s->free_packet_lists);
459 spin_unlock(&s->packet_list_lock);
460
461 wake_up_interruptible(&s->packet_list_wait);
462}
463
464static struct packet *stream_current_packet(struct stream *s)
465{
466 if (s->current_packet_list == NULL &&
467 (s->current_packet_list = stream_get_free_packet_list(s)) == NULL)
468 return NULL;
469
470 return &s->current_packet_list->packets[s->current_packet];
471}
472
473static void stream_queue_packet(struct stream *s)
474{
475 s->current_packet++;
476 if (s->current_packet == PACKET_LIST_SIZE) {
477 stream_put_dma_packet_list(s, s->current_packet_list);
478 s->current_packet_list = NULL;
479 s->current_packet = 0;
480 }
481}
482
483/* Integer fractional math. When we transmit a 44k1Hz signal we must
484 * send 5 41/80 samples per isochronous cycle, as these occur 8000
485 * times a second. Of course, we must send an integral number of
486 * samples in a packet, so we use the integer math to alternate
487 * between sending 5 and 6 samples per packet.
488 */
489
490static void fraction_init(struct fraction *f, int numerator, int denominator)
491{
492 f->integer = numerator / denominator;
493 f->numerator = numerator % denominator;
494 f->denominator = denominator;
495}
496
497static __inline__ void fraction_add(struct fraction *dst,
498 struct fraction *src1,
499 struct fraction *src2)
500{
501 /* assert: src1->denominator == src2->denominator */
502
503 int sum, denom;
504
505 /* We use these two local variables to allow gcc to optimize
506 * the division and the modulo into only one division. */
507
508 sum = src1->numerator + src2->numerator;
509 denom = src1->denominator;
510 dst->integer = src1->integer + src2->integer + sum / denom;
511 dst->numerator = sum % denom;
512 dst->denominator = denom;
513}
514
515static __inline__ void fraction_sub_int(struct fraction *dst,
516 struct fraction *src, int integer)
517{
518 dst->integer = src->integer - integer;
519 dst->numerator = src->numerator;
520 dst->denominator = src->denominator;
521}
522
523static __inline__ int fraction_floor(struct fraction *frac)
524{
525 return frac->integer;
526}
527
528static __inline__ int fraction_ceil(struct fraction *frac)
529{
530 return frac->integer + (frac->numerator > 0 ? 1 : 0);
531}
532
533static void packet_initialize(struct packet *p, struct packet *next)
534{
535 /* Here we initialize the dma descriptor block for
536 * transferring one iso packet. We use two descriptors per
537 * packet: an OUTPUT_MORE_IMMMEDIATE descriptor for the
538 * IEEE1394 iso packet header and an OUTPUT_LAST descriptor
539 * for the payload.
540 */
541
542 p->db->header_desc.control =
543 DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 8;
544
545 if (next) {
546 p->db->payload_desc.control =
547 DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
548 p->db->payload_desc.branch = next->db_bus | 3;
549 p->db->header_desc.skip = next->db_bus | 3;
550 }
551 else {
552 p->db->payload_desc.control =
553 DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH |
554 DMA_CTL_UPDATE | DMA_CTL_IRQ;
555 p->db->payload_desc.branch = 0;
556 p->db->header_desc.skip = 0;
557 }
558 p->db->payload_desc.data_address = p->payload_bus;
559 p->db->payload_desc.status = 0;
560}
561
562static struct packet_list *packet_list_alloc(struct stream *s)
563{
564 int i;
565 struct packet_list *pl;
566 struct packet *next;
567
568 pl = kmalloc(sizeof *pl, SLAB_KERNEL);
569 if (pl == NULL)
570 return NULL;
571
572 for (i = 0; i < PACKET_LIST_SIZE; i++) {
573 struct packet *p = &pl->packets[i];
574 p->db = pci_pool_alloc(s->descriptor_pool, SLAB_KERNEL,
575 &p->db_bus);
576 p->payload = pci_pool_alloc(s->packet_pool, SLAB_KERNEL,
577 &p->payload_bus);
578 }
579
580 for (i = 0; i < PACKET_LIST_SIZE; i++) {
581 if (i < PACKET_LIST_SIZE - 1)
582 next = &pl->packets[i + 1];
583 else
584 next = NULL;
585 packet_initialize(&pl->packets[i], next);
586 }
587
588 return pl;
589}
590
591static void packet_list_free(struct packet_list *pl, struct stream *s)
592{
593 int i;
594
595 for (i = 0; i < PACKET_LIST_SIZE; i++) {
596 struct packet *p = &pl->packets[i];
597 pci_pool_free(s->descriptor_pool, p->db, p->db_bus);
598 pci_pool_free(s->packet_pool, p->payload, p->payload_bus);
599 }
600 kfree(pl);
601}
602
603static struct buffer *buffer_alloc(int size)
604{
605 struct buffer *b;
606
607 b = kmalloc(sizeof *b + size, SLAB_KERNEL);
608 if (b == NULL)
609 return NULL;
610 b->head = 0;
611 b->tail = 0;
612 b->length = 0;
613 b->size = size;
614
615 return b;
616}
617
618static unsigned char *buffer_get_bytes(struct buffer *buffer, int size)
619{
620 unsigned char *p;
621
622 if (buffer->head + size > buffer->size)
623 BUG();
624
625 p = &buffer->data[buffer->head];
626 buffer->head += size;
627 if (buffer->head == buffer->size)
628 buffer->head = 0;
629 buffer->length -= size;
630
631 return p;
632}
633
634static unsigned char *buffer_put_bytes(struct buffer *buffer,
635 size_t max, size_t *actual)
636{
637 size_t length;
638 unsigned char *p;
639
640 p = &buffer->data[buffer->tail];
641 length = min(buffer->size - buffer->length, max);
642 if (buffer->tail + length < buffer->size) {
643 *actual = length;
644 buffer->tail += length;
645 }
646 else {
647 *actual = buffer->size - buffer->tail;
648 buffer->tail = 0;
649 }
650
651 buffer->length += *actual;
652 return p;
653}
654
655static u32 get_iec958_header_bits(struct stream *s, int sub_frame, u32 sample)
656{
657 int csi, parity, shift;
658 int block_start;
659 u32 bits;
660
661 switch (s->iec958_frame_count) {
662 case 1:
663 csi = s->format == AMDTP_FORMAT_IEC958_AC3;
664 break;
665 case 2:
666 case 9:
667 csi = 1;
668 break;
669 case 24 ... 27:
670 csi = (s->iec958_rate_code >> (27 - s->iec958_frame_count)) & 0x01;
671 break;
672 default:
673 csi = 0;
674 break;
675 }
676
677 block_start = (s->iec958_frame_count == 0 && sub_frame == 0);
678
679 /* The parity bit is the xor of the sample bits and the
680 * channel status info bit. */
681 for (shift = 16, parity = sample ^ csi; shift > 0; shift >>= 1)
682 parity ^= (parity >> shift);
683
684 bits = (block_start << 5) | /* Block start bit */
685 ((sub_frame == 0) << 4) | /* Subframe bit */
686 ((parity & 1) << 3) | /* Parity bit */
687 (csi << 2); /* Channel status info bit */
688
689 return bits;
690}
691
692static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
693{
694 switch (s->format) {
695 case AMDTP_FORMAT_IEC958_PCM:
696 case AMDTP_FORMAT_IEC958_AC3:
697 return get_iec958_header_bits(s, sub_frame, sample);
698
699 case AMDTP_FORMAT_RAW:
700 return 0x40;
701
702 default:
703 return 0;
704 }
705}
706
707static void fill_payload_le16(struct stream *s, quadlet_t *data, int nevents)
708{
709 quadlet_t *event, sample, bits;
710 unsigned char *p;
711 int i, j;
712
713 for (i = 0, event = data; i < nevents; i++) {
714
715 for (j = 0; j < s->dimension; j++) {
716 p = buffer_get_bytes(s->input, 2);
717 sample = (p[1] << 16) | (p[0] << 8);
718 bits = get_header_bits(s, j, sample);
719 event[j] = cpu_to_be32((bits << 24) | sample);
720 }
721
722 event += s->dimension;
723 if (++s->iec958_frame_count == 192)
724 s->iec958_frame_count = 0;
725 }
726}
727
728static void fill_packet(struct stream *s, struct packet *packet, int nevents)
729{
730 int syt_index, syt, size;
731 u32 control;
732
733 size = (nevents * s->dimension + 2) * sizeof(quadlet_t);
734
735 /* Update DMA descriptors */
736 packet->db->payload_desc.status = 0;
737 control = packet->db->payload_desc.control & 0xffff0000;
738 packet->db->payload_desc.control = control | size;
739
740 /* Fill IEEE1394 headers */
741 packet->db->header_desc.header[0] =
742 (IEEE1394_SPEED_100 << 16) | (0x01 << 14) |
743 (s->iso_channel << 8) | (TCODE_ISO_DATA << 4);
744 packet->db->header_desc.header[1] = size << 16;
745
746 /* Calculate synchronization timestamp (syt). First we
747 * determine syt_index, that is, the index in the packet of
748 * the sample for which the timestamp is valid. */
749 syt_index = (s->syt_interval - s->dbc) & (s->syt_interval - 1);
750 if (syt_index < nevents) {
751 syt = ((atomic_read(&s->cycle_count) << 12) |
752 s->cycle_offset.integer) & 0xffff;
753 fraction_add(&s->cycle_offset,
754 &s->cycle_offset, &s->ticks_per_syt_offset);
755
756 /* This next addition should be modulo 8000 (0x1f40),
757 * but we only use the lower 4 bits of cycle_count, so
758 * we don't need the modulo. */
759 atomic_add(s->cycle_offset.integer / 3072, &s->cycle_count);
760 s->cycle_offset.integer %= 3072;
761 }
762 else
763 syt = 0xffff;
764
765 atomic_inc(&s->cycle_count2);
766
767 /* Fill cip header */
768 packet->payload->eoh0 = 0;
769 packet->payload->sid = s->host->host->node_id & 0x3f;
770 packet->payload->dbs = s->dimension;
771 packet->payload->fn = 0;
772 packet->payload->qpc = 0;
773 packet->payload->sph = 0;
774 packet->payload->reserved = 0;
775 packet->payload->dbc = s->dbc;
776 packet->payload->eoh1 = 2;
777 packet->payload->fmt = FMT_AMDTP;
778 packet->payload->fdf = s->fdf;
779 packet->payload->syt = cpu_to_be16(syt);
780
781 switch (s->sample_format) {
782 case AMDTP_INPUT_LE16:
783 fill_payload_le16(s, packet->payload->data, nevents);
784 break;
785 }
786
787 s->dbc += nevents;
788}
789
790static void stream_flush(struct stream *s)
791{
792 struct packet *p;
793 int nevents;
794 struct fraction next;
795
796 /* The AMDTP specifies two transmission modes: blocking and
797 * non-blocking. In blocking mode you always transfer
798 * syt_interval or zero samples, whereas in non-blocking mode
799 * you send as many samples as you have available at transfer
800 * time.
801 *
802 * The fraction samples_per_cycle specifies the number of
803 * samples that become available per cycle. We add this to
804 * the fraction ready_samples, which specifies the number of
805 * leftover samples from the previous transmission. The sum,
806 * stored in the fraction next, specifies the number of
807 * samples available for transmission, and from this we
808 * determine the number of samples to actually transmit.
809 */
810
811 while (1) {
812 fraction_add(&next, &s->ready_samples, &s->samples_per_cycle);
813 if (s->mode == AMDTP_MODE_BLOCKING) {
814 if (fraction_floor(&next) >= s->syt_interval)
815 nevents = s->syt_interval;
816 else
817 nevents = 0;
818 }
819 else
820 nevents = fraction_floor(&next);
821
822 p = stream_current_packet(s);
823 if (s->input->length < nevents * s->dimension * 2 || p == NULL)
824 break;
825
826 fill_packet(s, p, nevents);
827 stream_queue_packet(s);
828
829 /* Now that we have successfully queued the packet for
830 * transmission, we update the fraction ready_samples. */
831 fraction_sub_int(&s->ready_samples, &next, nevents);
832 }
833}
834
835static int stream_alloc_packet_lists(struct stream *s)
836{
837 int max_nevents, max_packet_size, i;
838
839 if (s->mode == AMDTP_MODE_BLOCKING)
840 max_nevents = s->syt_interval;
841 else
842 max_nevents = fraction_ceil(&s->samples_per_cycle);
843
844 max_packet_size = max_nevents * s->dimension * 4 + 8;
845 s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
846 max_packet_size, 0, 0);
847
848 if (s->packet_pool == NULL)
849 return -1;
850
851 INIT_LIST_HEAD(&s->free_packet_lists);
852 INIT_LIST_HEAD(&s->dma_packet_lists);
853 for (i = 0; i < MAX_PACKET_LISTS; i++) {
854 struct packet_list *pl = packet_list_alloc(s);
855 if (pl == NULL)
856 break;
857 list_add_tail(&pl->link, &s->free_packet_lists);
858 }
859
860 return i < MAX_PACKET_LISTS ? -1 : 0;
861}
862
863static void stream_free_packet_lists(struct stream *s)
864{
865 struct packet_list *packet_l, *packet_l_next;
866
867 if (s->current_packet_list != NULL)
868 packet_list_free(s->current_packet_list, s);
869 list_for_each_entry_safe(packet_l, packet_l_next, &s->dma_packet_lists, link)
870 packet_list_free(packet_l, s);
871 list_for_each_entry_safe(packet_l, packet_l_next, &s->free_packet_lists, link)
872 packet_list_free(packet_l, s);
873 if (s->packet_pool != NULL)
874 pci_pool_destroy(s->packet_pool);
875
876 s->current_packet_list = NULL;
877 INIT_LIST_HEAD(&s->free_packet_lists);
878 INIT_LIST_HEAD(&s->dma_packet_lists);
879 s->packet_pool = NULL;
880}
881
882static void plug_update(struct cmp_pcr *plug, void *data)
883{
884 struct stream *s = data;
885
886 HPSB_INFO("plug update: p2p_count=%d, channel=%d",
887 plug->p2p_count, plug->channel);
888 s->iso_channel = plug->channel;
889 if (plug->p2p_count > 0) {
890 struct packet_list *pl;
891
892 pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
893 stream_start_dma(s, pl);
894 }
895 else {
896 ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 0);
897 }
898}
899
900static int stream_configure(struct stream *s, int cmd, struct amdtp_ioctl *cfg)
901{
902 const int transfer_delay = 9000;
903
904 if (cfg->format <= AMDTP_FORMAT_IEC958_AC3)
905 s->format = cfg->format;
906 else
907 return -EINVAL;
908
909 switch (cfg->rate) {
910 case 32000:
911 s->syt_interval = 8;
912 s->fdf = FDF_SFC_32KHZ;
913 s->iec958_rate_code = 0x0c;
914 break;
915 case 44100:
916 s->syt_interval = 8;
917 s->fdf = FDF_SFC_44K1HZ;
918 s->iec958_rate_code = 0x00;
919 break;
920 case 48000:
921 s->syt_interval = 8;
922 s->fdf = FDF_SFC_48KHZ;
923 s->iec958_rate_code = 0x04;
924 break;
925 case 88200:
926 s->syt_interval = 16;
927 s->fdf = FDF_SFC_88K2HZ;
928 s->iec958_rate_code = 0x00;
929 break;
930 case 96000:
931 s->syt_interval = 16;
932 s->fdf = FDF_SFC_96KHZ;
933 s->iec958_rate_code = 0x00;
934 break;
935 case 176400:
936 s->syt_interval = 32;
937 s->fdf = FDF_SFC_176K4HZ;
938 s->iec958_rate_code = 0x00;
939 break;
940 case 192000:
941 s->syt_interval = 32;
942 s->fdf = FDF_SFC_192KHZ;
943 s->iec958_rate_code = 0x00;
944 break;
945
946 default:
947 return -EINVAL;
948 }
949
950 s->rate = cfg->rate;
951 fraction_init(&s->samples_per_cycle, s->rate, 8000);
952 fraction_init(&s->ready_samples, 0, 8000);
953
954 /* The ticks_per_syt_offset is initialized to the number of
955 * ticks between syt_interval events. The number of ticks per
956 * second is 24.576e6, so the number of ticks between
957 * syt_interval events is 24.576e6 * syt_interval / rate.
958 */
959 fraction_init(&s->ticks_per_syt_offset,
960 24576000 * s->syt_interval, s->rate);
961 fraction_init(&s->cycle_offset, (transfer_delay % 3072) * s->rate, s->rate);
962 atomic_set(&s->cycle_count, transfer_delay / 3072);
963 atomic_set(&s->cycle_count2, 0);
964
965 s->mode = cfg->mode;
966 s->sample_format = AMDTP_INPUT_LE16;
967
968 /* When using the AM824 raw subformat we can stream signals of
969 * any dimension. The IEC958 subformat, however, only
970 * supports 2 channels.
971 */
972 if (s->format == AMDTP_FORMAT_RAW || cfg->dimension == 2)
973 s->dimension = cfg->dimension;
974 else
975 return -EINVAL;
976
977 if (s->opcr != NULL) {
978 cmp_unregister_opcr(s->host->host, s->opcr);
979 s->opcr = NULL;
980 }
981
982 switch(cmd) {
983 case AMDTP_IOC_PLUG:
984 s->opcr = cmp_register_opcr(s->host->host, cfg->u.plug,
985 /*payload*/ 12, plug_update, s);
986 if (s->opcr == NULL)
987 return -EINVAL;
988 s->iso_channel = s->opcr->channel;
989 break;
990
991 case AMDTP_IOC_CHANNEL:
992 if (cfg->u.channel >= 0 && cfg->u.channel < 64)
993 s->iso_channel = cfg->u.channel;
994 else
995 return -EINVAL;
996 break;
997 }
998
999 /* The ioctl settings were all valid, so we realloc the packet
1000 * lists to make sure the packet size is big enough.
1001 */
1002 if (s->packet_pool != NULL)
1003 stream_free_packet_lists(s);
1004
1005 if (stream_alloc_packet_lists(s) < 0) {
1006 stream_free_packet_lists(s);
1007 return -ENOMEM;
1008 }
1009
1010 return 0;
1011}
1012
1013static struct stream *stream_alloc(struct amdtp_host *host)
1014{
1015 struct stream *s;
1016 unsigned long flags;
1017
1018 s = kmalloc(sizeof(struct stream), SLAB_KERNEL);
1019 if (s == NULL)
1020 return NULL;
1021
1022 memset(s, 0, sizeof(struct stream));
1023 s->host = host;
1024
1025 s->input = buffer_alloc(BUFFER_SIZE);
1026 if (s->input == NULL) {
1027 kfree(s);
1028 return NULL;
1029 }
1030
1031 s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
1032 sizeof(struct descriptor_block),
1033 16, 0);
1034
1035 if (s->descriptor_pool == NULL) {
1036 kfree(s->input);
1037 kfree(s);
1038 return NULL;
1039 }
1040
1041 INIT_LIST_HEAD(&s->free_packet_lists);
1042 INIT_LIST_HEAD(&s->dma_packet_lists);
1043
1044 init_waitqueue_head(&s->packet_list_wait);
1045 spin_lock_init(&s->packet_list_lock);
1046
1047 ohci1394_init_iso_tasklet(&s->iso_tasklet, OHCI_ISO_TRANSMIT,
1048 stream_shift_packet_lists,
1049 (unsigned long) s);
1050
1051 if (ohci1394_register_iso_tasklet(host->ohci, &s->iso_tasklet) < 0) {
1052 pci_pool_destroy(s->descriptor_pool);
1053 kfree(s->input);
1054 kfree(s);
1055 return NULL;
1056 }
1057
1058 spin_lock_irqsave(&host->stream_list_lock, flags);
1059 list_add_tail(&s->link, &host->stream_list);
1060 spin_unlock_irqrestore(&host->stream_list_lock, flags);
1061
1062 return s;
1063}
1064
1065static void stream_free(struct stream *s)
1066{
1067 unsigned long flags;
1068
1069 /* Stop the DMA. We wait for the dma packet list to become
1070 * empty and let the dma controller run out of programs. This
1071 * seems to be more reliable than stopping it directly, since
1072 * that sometimes generates an it transmit interrupt if we
1073 * later re-enable the context.
1074 */
1075 wait_event_interruptible(s->packet_list_wait,
1076 list_empty(&s->dma_packet_lists));
1077
1078 ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 1);
1079 ohci1394_unregister_iso_tasklet(s->host->ohci, &s->iso_tasklet);
1080
1081 if (s->opcr != NULL)
1082 cmp_unregister_opcr(s->host->host, s->opcr);
1083
1084 spin_lock_irqsave(&s->host->stream_list_lock, flags);
1085 list_del(&s->link);
1086 spin_unlock_irqrestore(&s->host->stream_list_lock, flags);
1087
1088 kfree(s->input);
1089
1090 stream_free_packet_lists(s);
1091 pci_pool_destroy(s->descriptor_pool);
1092
1093 kfree(s);
1094}
1095
1096/* File operations */
1097
1098static ssize_t amdtp_write(struct file *file, const char __user *buffer, size_t count,
1099 loff_t *offset_is_ignored)
1100{
1101 struct stream *s = file->private_data;
1102 unsigned char *p;
1103 int i;
1104 size_t length;
1105
1106 if (s->packet_pool == NULL)
1107 return -EBADFD;
1108
1109 /* Fill the circular buffer from the input buffer and call the
1110 * iso packer when the buffer is full. The iso packer may
1111 * leave bytes in the buffer for two reasons: either the
1112 * remaining bytes wasn't enough to build a new packet, or
1113 * there were no free packet lists. In the first case we
1114 * re-fill the buffer and call the iso packer again or return
1115 * if we used all the data from userspace. In the second
1116 * case, the wait_event_interruptible will block until the irq
1117 * handler frees a packet list.
1118 */
1119
1120 for (i = 0; i < count; i += length) {
1121 p = buffer_put_bytes(s->input, count - i, &length);
1122 if (copy_from_user(p, buffer + i, length))
1123 return -EFAULT;
1124 if (s->input->length < s->input->size)
1125 continue;
1126
1127 stream_flush(s);
1128
1129 if (s->current_packet_list != NULL)
1130 continue;
1131
1132 if (file->f_flags & O_NONBLOCK)
1133 return i + length > 0 ? i + length : -EAGAIN;
1134
1135 if (wait_event_interruptible(s->packet_list_wait,
1136 !list_empty(&s->free_packet_lists)))
1137 return -EINTR;
1138 }
1139
1140 return count;
1141}
1142
1143static long amdtp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1144{
1145 struct stream *s = file->private_data;
1146 struct amdtp_ioctl cfg;
1147 int err;
1148 lock_kernel();
1149 switch(cmd)
1150 {
1151 case AMDTP_IOC_PLUG:
1152 case AMDTP_IOC_CHANNEL:
1153 if (copy_from_user(&cfg, (struct amdtp_ioctl __user *) arg, sizeof cfg))
1154 err = -EFAULT;
1155 else
1156 err = stream_configure(s, cmd, &cfg);
1157 break;
1158
1159 default:
1160 err = -EINVAL;
1161 break;
1162 }
1163 unlock_kernel();
1164 return err;
1165}
1166
1167static unsigned int amdtp_poll(struct file *file, poll_table *pt)
1168{
1169 struct stream *s = file->private_data;
1170
1171 poll_wait(file, &s->packet_list_wait, pt);
1172
1173 if (!list_empty(&s->free_packet_lists))
1174 return POLLOUT | POLLWRNORM;
1175 else
1176 return 0;
1177}
1178
1179static int amdtp_open(struct inode *inode, struct file *file)
1180{
1181 struct amdtp_host *host;
1182 int i = ieee1394_file_to_instance(file);
1183
1184 host = hpsb_get_hostinfo_bykey(&amdtp_highlevel, i);
1185 if (host == NULL)
1186 return -ENODEV;
1187
1188 file->private_data = stream_alloc(host);
1189 if (file->private_data == NULL)
1190 return -ENOMEM;
1191
1192 return 0;
1193}
1194
1195static int amdtp_release(struct inode *inode, struct file *file)
1196{
1197 struct stream *s = file->private_data;
1198
1199 stream_free(s);
1200
1201 return 0;
1202}
1203
1204static struct cdev amdtp_cdev;
1205static struct file_operations amdtp_fops =
1206{
1207 .owner = THIS_MODULE,
1208 .write = amdtp_write,
1209 .poll = amdtp_poll,
1210 .unlocked_ioctl = amdtp_ioctl,
1211 .compat_ioctl = amdtp_ioctl, /* All amdtp ioctls are compatible */
1212 .open = amdtp_open,
1213 .release = amdtp_release
1214};
1215
1216/* IEEE1394 Subsystem functions */
1217
1218static void amdtp_add_host(struct hpsb_host *host)
1219{
1220 struct amdtp_host *ah;
1221 int minor;
1222
1223 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME) != 0)
1224 return;
1225
1226 ah = hpsb_create_hostinfo(&amdtp_highlevel, host, sizeof(*ah));
1227 if (!ah) {
1228 HPSB_ERR("amdtp: Unable able to alloc hostinfo");
1229 return;
1230 }
1231
1232 ah->host = host;
1233 ah->ohci = host->hostdata;
1234
1235 hpsb_set_hostinfo_key(&amdtp_highlevel, host, ah->host->id);
1236
1237 minor = IEEE1394_MINOR_BLOCK_AMDTP * 16 + ah->host->id;
1238
1239 INIT_LIST_HEAD(&ah->stream_list);
1240 spin_lock_init(&ah->stream_list_lock);
1241
1242 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, minor),
1243 S_IFCHR|S_IRUSR|S_IWUSR, "amdtp/%d", ah->host->id);
1244}
1245
1246static void amdtp_remove_host(struct hpsb_host *host)
1247{
1248 struct amdtp_host *ah = hpsb_get_hostinfo(&amdtp_highlevel, host);
1249
1250 if (ah)
1251 devfs_remove("amdtp/%d", ah->host->id);
1252
1253 return;
1254}
1255
1256static struct hpsb_highlevel amdtp_highlevel = {
1257 .name = "amdtp",
1258 .add_host = amdtp_add_host,
1259 .remove_host = amdtp_remove_host,
1260};
1261
1262/* Module interface */
1263
1264MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
1265MODULE_DESCRIPTION("Driver for Audio & Music Data Transmission Protocol "
1266 "on OHCI boards.");
1267MODULE_SUPPORTED_DEVICE("amdtp");
1268MODULE_LICENSE("GPL");
1269
1270static int __init amdtp_init_module (void)
1271{
1272 cdev_init(&amdtp_cdev, &amdtp_fops);
1273 amdtp_cdev.owner = THIS_MODULE;
1274 kobject_set_name(&amdtp_cdev.kobj, "amdtp");
1275 if (cdev_add(&amdtp_cdev, IEEE1394_AMDTP_DEV, 16)) {
1276 HPSB_ERR("amdtp: unable to add char device");
1277 return -EIO;
1278 }
1279
1280 devfs_mk_dir("amdtp");
1281
1282 hpsb_register_highlevel(&amdtp_highlevel);
1283
1284 HPSB_INFO("Loaded AMDTP driver");
1285
1286 return 0;
1287}
1288
1289static void __exit amdtp_exit_module (void)
1290{
1291 hpsb_unregister_highlevel(&amdtp_highlevel);
1292 devfs_remove("amdtp");
1293 cdev_del(&amdtp_cdev);
1294
1295 HPSB_INFO("Unloaded AMDTP driver");
1296}
1297
1298module_init(amdtp_init_module);
1299module_exit(amdtp_exit_module);
1300MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16);
diff --git a/drivers/ieee1394/amdtp.h b/drivers/ieee1394/amdtp.h
new file mode 100644
index 000000000000..531f28e3ab50
--- /dev/null
+++ b/drivers/ieee1394/amdtp.h
@@ -0,0 +1,84 @@
1/* -*- c-basic-offset: 8 -*- */
2
3#ifndef __AMDTP_H
4#define __AMDTP_H
5
6#include <asm/types.h>
7#include "ieee1394-ioctl.h"
8
9/* The userspace interface for the Audio & Music Data Transmission
10 * Protocol driver is really simple. First, open /dev/amdtp, use the
11 * ioctl to configure format, rate, dimension and either plug or
12 * channel, then start writing samples.
13 *
14 * The formats supported by the driver are listed below.
15 * AMDTP_FORMAT_RAW corresponds to the AM824 raw format, which can
16 * carry any number of channels, so use this if you're streaming
17 * multichannel audio. The AMDTP_FORMAT_IEC958_PCM corresponds to the
18 * AM824 IEC958 encapsulation without the IEC958 data bit set, using
19 * AMDTP_FORMAT_IEC958_AC3 will transmit the samples with the data bit
20 * set, suitable for transmitting compressed AC-3 audio.
21 *
22 * The rate field specifies the transmission rate; supported values
23 * are 32000, 44100, 48000, 88200, 96000, 176400 and 192000.
24 *
25 * The dimension field specifies the dimension of the signal, that is,
26 * the number of audio channels. Only AMDTP_FORMAT_RAW supports
27 * settings greater than 2.
28 *
29 * The mode field specifies which transmission mode to use. The AMDTP
30 * specifies two different transmission modes: blocking and
31 * non-blocking. The blocking transmission mode always send a fixed
32 * number of samples, typically 8, 16 or 32. To exactly match the
33 * transmission rate, the driver alternates between sending empty and
34 * non-empty packets. In non-blocking mode, the driver transmits as
35 * small packets as possible. For example, for a transmission rate of
36 * 44100Hz, the driver should send 5 41/80 samples in every cycle, but
37 * this is not possible so instead the driver alternates between
38 * sending 5 and 6 samples.
39 *
40 * The last thing to specify is either the isochronous channel to use
41 * or the output plug to connect to. If you know what channel the
42 * destination device will listen on, you can specify the channel
43 * directly and use the AMDTP_IOC_CHANNEL ioctl. However, if the
44 * destination device chooses the channel and uses the IEC61883-1 plug
45 * mechanism, you can specify an output plug to connect to. The
46 * driver will pick up the channel number from the plug once the
47 * destination device locks the output plug control register. In this
48 * case set the plug field and use the AMDTP_IOC_PLUG ioctl.
49 *
50 * Having configured the interface, the driver now accepts writes of
51 * regular 16 bit signed little endian samples, with the channels
52 * interleaved. For example, 4 channels would look like:
53 *
54 * | sample 0 | sample 1 ...
55 * | ch. 0 | ch. 1 | ch. 2 | ch. 3 | ch. 0 | ...
56 * | lsb | msb | lsb | msb | lsb | msb | lsb | msb | lsb | msb | ...
57 *
58 */
59
60enum {
61 AMDTP_FORMAT_RAW,
62 AMDTP_FORMAT_IEC958_PCM,
63 AMDTP_FORMAT_IEC958_AC3
64};
65
66enum {
67 AMDTP_MODE_BLOCKING,
68 AMDTP_MODE_NON_BLOCKING,
69};
70
71enum {
72 AMDTP_INPUT_LE16,
73 AMDTP_INPUT_BE16,
74};
75
76struct amdtp_ioctl {
77 __u32 format;
78 __u32 rate;
79 __u32 dimension;
80 __u32 mode;
81 union { __u32 channel; __u32 plug; } u;
82};
83
84#endif /* __AMDTP_H */
diff --git a/drivers/ieee1394/cmp.c b/drivers/ieee1394/cmp.c
new file mode 100644
index 000000000000..69aed26e83a1
--- /dev/null
+++ b/drivers/ieee1394/cmp.c
@@ -0,0 +1,311 @@
1/* -*- c-basic-offset: 8 -*-
2 *
3 * cmp.c - Connection Management Procedures
4 * Copyright (C) 2001 Kristian Høgsberg
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/* TODO
22 * ----
23 *
24 * - Implement IEC61883-1 output plugs and connection management.
25 * This should probably be part of the general subsystem, as it could
26 * be shared with dv1394.
27 *
28 * - Add IEC61883 unit directory when loading this module. This
29 * requires a run-time changeable config rom.
30 */
31
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/sched.h>
35#include <linux/types.h>
36#include <linux/wait.h>
37#include <linux/interrupt.h>
38
39#include "hosts.h"
40#include "highlevel.h"
41#include "ieee1394.h"
42#include "ieee1394_core.h"
43#include "cmp.h"
44
45struct plug {
46 union {
47 struct cmp_pcr pcr;
48 quadlet_t quadlet;
49 } u;
50 void (*update)(struct cmp_pcr *plug, void *data);
51 void *data;
52};
53
54struct cmp_host {
55 struct hpsb_host *host;
56
57 union {
58 struct cmp_mpr ompr;
59 quadlet_t ompr_quadlet;
60 } u;
61 struct plug opcr[2];
62
63 union {
64 struct cmp_mpr impr;
65 quadlet_t impr_quadlet;
66 } v;
67 struct plug ipcr[2];
68};
69
70enum {
71 CMP_P2P_CONNECTION,
72 CMP_BC_CONNECTION
73};
74
75#define CSR_PCR_MAP 0x900
76#define CSR_PCR_MAP_END 0x9fc
77
78static struct hpsb_highlevel cmp_highlevel;
79
80static void cmp_add_host(struct hpsb_host *host);
81static void cmp_host_reset(struct hpsb_host *host);
82static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
83 u64 addr, size_t length, u16 flags);
84static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
85 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 flags);
86
87static struct hpsb_highlevel cmp_highlevel = {
88 .name = "cmp",
89 .add_host = cmp_add_host,
90 .host_reset = cmp_host_reset,
91};
92
93static struct hpsb_address_ops pcr_ops = {
94 .read = pcr_read,
95 .lock = pcr_lock,
96};
97
98
99struct cmp_pcr *
100cmp_register_opcr(struct hpsb_host *host, int opcr_number, int payload,
101 void (*update)(struct cmp_pcr *pcr, void *data),
102 void *data)
103{
104 struct cmp_host *ch;
105 struct plug *plug;
106
107 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
108
109 if (opcr_number >= ch->u.ompr.nplugs ||
110 ch->opcr[opcr_number].update != NULL)
111 return NULL;
112
113 plug = &ch->opcr[opcr_number];
114 plug->u.pcr.online = 1;
115 plug->u.pcr.bcast_count = 0;
116 plug->u.pcr.p2p_count = 0;
117 plug->u.pcr.overhead = 0;
118 plug->u.pcr.payload = payload;
119 plug->update = update;
120 plug->data = data;
121
122 return &plug->u.pcr;
123}
124
125void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *opcr)
126{
127 struct cmp_host *ch;
128 struct plug *plug;
129
130 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
131 plug = (struct plug *)opcr;
132 if (plug - ch->opcr >= ch->u.ompr.nplugs) BUG();
133
134 plug->u.pcr.online = 0;
135 plug->update = NULL;
136}
137
138static void reset_plugs(struct cmp_host *ch)
139{
140 int i;
141
142 ch->u.ompr.non_persistent_ext = 0xff;
143 for (i = 0; i < ch->u.ompr.nplugs; i++) {
144 ch->opcr[i].u.pcr.bcast_count = 0;
145 ch->opcr[i].u.pcr.p2p_count = 0;
146 ch->opcr[i].u.pcr.overhead = 0;
147 }
148}
149
150static void cmp_add_host(struct hpsb_host *host)
151{
152 struct cmp_host *ch = hpsb_create_hostinfo(&cmp_highlevel, host, sizeof (*ch));
153
154 if (ch == NULL) {
155 HPSB_ERR("Failed to allocate cmp_host");
156 return;
157 }
158
159 hpsb_register_addrspace(&cmp_highlevel, host, &pcr_ops,
160 CSR_REGISTER_BASE + CSR_PCR_MAP,
161 CSR_REGISTER_BASE + CSR_PCR_MAP_END);
162
163 ch->host = host;
164 ch->u.ompr.rate = IEEE1394_SPEED_100;
165 ch->u.ompr.bcast_channel_base = 63;
166 ch->u.ompr.nplugs = 2;
167
168 reset_plugs(ch);
169}
170
171static void cmp_host_reset(struct hpsb_host *host)
172{
173 struct cmp_host *ch;
174
175 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
176 if (ch == NULL) {
177 HPSB_ERR("cmp: Tried to reset unknown host");
178 return;
179 }
180
181 reset_plugs(ch);
182}
183
184static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
185 u64 addr, size_t length, u16 flags)
186{
187 int csraddr = addr - CSR_REGISTER_BASE;
188 int plug;
189 struct cmp_host *ch;
190
191 if (length != 4)
192 return RCODE_TYPE_ERROR;
193
194 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
195 if (csraddr == 0x900) {
196 *buf = cpu_to_be32(ch->u.ompr_quadlet);
197 return RCODE_COMPLETE;
198 }
199 else if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
200 plug = (csraddr - 0x904) / 4;
201 *buf = cpu_to_be32(ch->opcr[plug].u.quadlet);
202 return RCODE_COMPLETE;
203 }
204 else if (csraddr < 0x980) {
205 return RCODE_ADDRESS_ERROR;
206 }
207 else if (csraddr == 0x980) {
208 *buf = cpu_to_be32(ch->v.impr_quadlet);
209 return RCODE_COMPLETE;
210 }
211 else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
212 plug = (csraddr - 0x984) / 4;
213 *buf = cpu_to_be32(ch->ipcr[plug].u.quadlet);
214 return RCODE_COMPLETE;
215 }
216 else
217 return RCODE_ADDRESS_ERROR;
218}
219
220static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
221 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 flags)
222{
223 int csraddr = addr - CSR_REGISTER_BASE;
224 int plug;
225 struct cmp_host *ch;
226
227 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
228
229 if (extcode != EXTCODE_COMPARE_SWAP)
230 return RCODE_TYPE_ERROR;
231
232 if (csraddr == 0x900) {
233 /* FIXME: Ignore writes to bits 30-31 and 0-7 */
234 *store = cpu_to_be32(ch->u.ompr_quadlet);
235 if (arg == cpu_to_be32(ch->u.ompr_quadlet))
236 ch->u.ompr_quadlet = be32_to_cpu(data);
237
238 return RCODE_COMPLETE;
239 }
240 if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
241 plug = (csraddr - 0x904) / 4;
242 *store = cpu_to_be32(ch->opcr[plug].u.quadlet);
243
244 if (arg == *store)
245 ch->opcr[plug].u.quadlet = be32_to_cpu(data);
246
247 if (be32_to_cpu(*store) != ch->opcr[plug].u.quadlet &&
248 ch->opcr[plug].update != NULL)
249 ch->opcr[plug].update(&ch->opcr[plug].u.pcr,
250 ch->opcr[plug].data);
251
252 return RCODE_COMPLETE;
253 }
254 else if (csraddr < 0x980) {
255 return RCODE_ADDRESS_ERROR;
256 }
257 else if (csraddr == 0x980) {
258 /* FIXME: Ignore writes to bits 24-31 and 0-7 */
259 *store = cpu_to_be32(ch->u.ompr_quadlet);
260 if (arg == cpu_to_be32(ch->u.ompr_quadlet))
261 ch->u.ompr_quadlet = be32_to_cpu(data);
262
263 return RCODE_COMPLETE;
264 }
265 else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
266 plug = (csraddr - 0x984) / 4;
267 *store = cpu_to_be32(ch->ipcr[plug].u.quadlet);
268
269 if (arg == *store)
270 ch->ipcr[plug].u.quadlet = be32_to_cpu(data);
271
272 if (be32_to_cpu(*store) != ch->ipcr[plug].u.quadlet &&
273 ch->ipcr[plug].update != NULL)
274 ch->ipcr[plug].update(&ch->ipcr[plug].u.pcr,
275 ch->ipcr[plug].data);
276
277 return RCODE_COMPLETE;
278 }
279 else
280 return RCODE_ADDRESS_ERROR;
281}
282
283
284/* Module interface */
285
286MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
287MODULE_DESCRIPTION("Connection Management Procedures (CMP)");
288MODULE_SUPPORTED_DEVICE("cmp");
289MODULE_LICENSE("GPL");
290
291EXPORT_SYMBOL(cmp_register_opcr);
292EXPORT_SYMBOL(cmp_unregister_opcr);
293
294static int __init cmp_init_module (void)
295{
296 hpsb_register_highlevel (&cmp_highlevel);
297
298 HPSB_INFO("Loaded CMP driver");
299
300 return 0;
301}
302
303static void __exit cmp_exit_module (void)
304{
305 hpsb_unregister_highlevel(&cmp_highlevel);
306
307 HPSB_INFO("Unloaded CMP driver");
308}
309
310module_init(cmp_init_module);
311module_exit(cmp_exit_module);
diff --git a/drivers/ieee1394/cmp.h b/drivers/ieee1394/cmp.h
new file mode 100644
index 000000000000..f9288bfcd494
--- /dev/null
+++ b/drivers/ieee1394/cmp.h
@@ -0,0 +1,31 @@
1#ifndef __CMP_H
2#define __CMP_H
3
4struct cmp_mpr {
5 u32 nplugs:5;
6 u32 reserved:3;
7 u32 persistent_ext:8;
8 u32 non_persistent_ext:8;
9 u32 bcast_channel_base:6;
10 u32 rate:2;
11} __attribute__((packed));
12
13struct cmp_pcr {
14 u32 payload:10;
15 u32 overhead:4;
16 u32 speed:2;
17 u32 channel:6;
18 u32 reserved:2;
19 u32 p2p_count:6;
20 u32 bcast_count:1;
21 u32 online:1;
22} __attribute__((packed));
23
24struct cmp_pcr *cmp_register_opcr(struct hpsb_host *host, int plug,
25 int payload,
26 void (*update)(struct cmp_pcr *plug,
27 void *data),
28 void *data);
29void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *plug);
30
31#endif /* __CMP_H */
diff --git a/drivers/ieee1394/config_roms.c b/drivers/ieee1394/config_roms.c
new file mode 100644
index 000000000000..1017fd717248
--- /dev/null
+++ b/drivers/ieee1394/config_roms.c
@@ -0,0 +1,236 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * ConfigROM entries
5 *
6 * Copyright (C) 2004 Ben Collins
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#include <linux/config.h>
13#include <linux/types.h>
14
15#include "csr1212.h"
16#include "ieee1394.h"
17#include "ieee1394_types.h"
18#include "hosts.h"
19#include "ieee1394_core.h"
20#include "highlevel.h"
21#include "csr.h"
22#include "config_roms.h"
23
24struct hpsb_config_rom_entry {
25 const char *name;
26
27 /* Base initialization, called at module load */
28 int (*init)(void);
29
30 /* Add entry to specified host */
31 int (*add)(struct hpsb_host *host);
32
33 /* Remove entry from specified host */
34 void (*remove)(struct hpsb_host *host);
35
36 /* Cleanup called at module exit */
37 void (*cleanup)(void);
38
39 /* The flag added to host->config_roms */
40 unsigned int flag;
41};
42
43
44int hpsb_default_host_entry(struct hpsb_host *host)
45{
46 struct csr1212_keyval *root;
47 struct csr1212_keyval *vend_id = NULL;
48 struct csr1212_keyval *text = NULL;
49 char csr_name[128];
50 int ret;
51
52 sprintf(csr_name, "Linux - %s", host->driver->name);
53 root = host->csr.rom->root_kv;
54
55 vend_id = csr1212_new_immediate(CSR1212_KV_ID_VENDOR, host->csr.guid_hi >> 8);
56 text = csr1212_new_string_descriptor_leaf(csr_name);
57
58 if (!vend_id || !text) {
59 if (vend_id)
60 csr1212_release_keyval(vend_id);
61 if (text)
62 csr1212_release_keyval(text);
63 csr1212_destroy_csr(host->csr.rom);
64 return -ENOMEM;
65 }
66
67 ret = csr1212_associate_keyval(vend_id, text);
68 csr1212_release_keyval(text);
69 ret |= csr1212_attach_keyval_to_directory(root, vend_id);
70 csr1212_release_keyval(vend_id);
71 if (ret != CSR1212_SUCCESS) {
72 csr1212_destroy_csr(host->csr.rom);
73 return -ENOMEM;
74 }
75
76 host->update_config_rom = 1;
77
78 return 0;
79}
80
81
82#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
83#include "eth1394.h"
84
85static struct csr1212_keyval *ip1394_ud;
86
87static int config_rom_ip1394_init(void)
88{
89 struct csr1212_keyval *spec_id = NULL;
90 struct csr1212_keyval *spec_desc = NULL;
91 struct csr1212_keyval *ver = NULL;
92 struct csr1212_keyval *ver_desc = NULL;
93 int ret = -ENOMEM;
94
95 ip1394_ud = csr1212_new_directory(CSR1212_KV_ID_UNIT);
96
97 spec_id = csr1212_new_immediate(CSR1212_KV_ID_SPECIFIER_ID,
98 ETHER1394_GASP_SPECIFIER_ID);
99 spec_desc = csr1212_new_string_descriptor_leaf("IANA");
100 ver = csr1212_new_immediate(CSR1212_KV_ID_VERSION,
101 ETHER1394_GASP_VERSION);
102 ver_desc = csr1212_new_string_descriptor_leaf("IPv4");
103
104 if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc)
105 goto ip1394_fail;
106
107 if (csr1212_associate_keyval(spec_id, spec_desc) == CSR1212_SUCCESS &&
108 csr1212_associate_keyval(ver, ver_desc) == CSR1212_SUCCESS &&
109 csr1212_attach_keyval_to_directory(ip1394_ud, spec_id) == CSR1212_SUCCESS &&
110 csr1212_attach_keyval_to_directory(ip1394_ud, ver) == CSR1212_SUCCESS)
111 ret = 0;
112
113ip1394_fail:
114 if (ret && ip1394_ud) {
115 csr1212_release_keyval(ip1394_ud);
116 ip1394_ud = NULL;
117 }
118
119 if (spec_id)
120 csr1212_release_keyval(spec_id);
121 if (spec_desc)
122 csr1212_release_keyval(spec_desc);
123 if (ver)
124 csr1212_release_keyval(ver);
125 if (ver_desc)
126 csr1212_release_keyval(ver_desc);
127
128 return ret;
129}
130
131static void config_rom_ip1394_cleanup(void)
132{
133 if (ip1394_ud) {
134 csr1212_release_keyval(ip1394_ud);
135 ip1394_ud = NULL;
136 }
137}
138
139static int config_rom_ip1394_add(struct hpsb_host *host)
140{
141 if (!ip1394_ud)
142 return -ENODEV;
143
144 if (csr1212_attach_keyval_to_directory(host->csr.rom->root_kv,
145 ip1394_ud) != CSR1212_SUCCESS)
146 return -ENOMEM;
147
148 return 0;
149}
150
151static void config_rom_ip1394_remove(struct hpsb_host *host)
152{
153 csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud);
154}
155
156static struct hpsb_config_rom_entry ip1394_entry = {
157 .name = "ip1394",
158 .init = config_rom_ip1394_init,
159 .add = config_rom_ip1394_add,
160 .remove = config_rom_ip1394_remove,
161 .cleanup = config_rom_ip1394_cleanup,
162 .flag = HPSB_CONFIG_ROM_ENTRY_IP1394,
163};
164#endif /* CONFIG_IEEE1394_CONFIG_ROM_IP1394 */
165
166
167static struct hpsb_config_rom_entry *const config_rom_entries[] = {
168#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
169 &ip1394_entry,
170#endif
171 NULL,
172};
173
174
175int hpsb_init_config_roms(void)
176{
177 int i, error = 0;
178
179 for (i = 0; config_rom_entries[i]; i++) {
180 if (!config_rom_entries[i]->init)
181 continue;
182
183 if (config_rom_entries[i]->init()) {
184 HPSB_ERR("Failed to initialize config rom entry `%s'",
185 config_rom_entries[i]->name);
186 error = -1;
187 } else
188 HPSB_DEBUG("Initialized config rom entry `%s'",
189 config_rom_entries[i]->name);
190 }
191
192 return error;
193}
194
195void hpsb_cleanup_config_roms(void)
196{
197 int i;
198
199 for (i = 0; config_rom_entries[i]; i++) {
200 if (config_rom_entries[i]->cleanup)
201 config_rom_entries[i]->cleanup();
202 }
203}
204
205int hpsb_add_extra_config_roms(struct hpsb_host *host)
206{
207 int i, error = 0;
208
209 for (i = 0; config_rom_entries[i]; i++) {
210 if (config_rom_entries[i]->add(host)) {
211 HPSB_ERR("fw-host%d: Failed to attach config rom entry `%s'",
212 host->id, config_rom_entries[i]->name);
213 error = -1;
214 } else {
215 host->config_roms |= config_rom_entries[i]->flag;
216 host->update_config_rom = 1;
217 }
218 }
219
220 return error;
221}
222
223void hpsb_remove_extra_config_roms(struct hpsb_host *host)
224{
225 int i;
226
227 for (i = 0; config_rom_entries[i]; i++) {
228 if (!(host->config_roms & config_rom_entries[i]->flag))
229 continue;
230
231 config_rom_entries[i]->remove(host);
232
233 host->config_roms &= ~config_rom_entries[i]->flag;
234 host->update_config_rom = 1;
235 }
236}
diff --git a/drivers/ieee1394/config_roms.h b/drivers/ieee1394/config_roms.h
new file mode 100644
index 000000000000..0a70544cfe65
--- /dev/null
+++ b/drivers/ieee1394/config_roms.h
@@ -0,0 +1,27 @@
1#ifndef _IEEE1394_CONFIG_ROMS_H
2#define _IEEE1394_CONFIG_ROMS_H
3
4#include "ieee1394_types.h"
5#include "hosts.h"
6
7/* The default host entry. This must succeed. */
8int hpsb_default_host_entry(struct hpsb_host *host);
9
10/* Initialize all config roms */
11int hpsb_init_config_roms(void);
12
13/* Cleanup all config roms */
14void hpsb_cleanup_config_roms(void);
15
16/* Add extra config roms to specified host */
17int hpsb_add_extra_config_roms(struct hpsb_host *host);
18
19/* Remove extra config roms from specified host */
20void hpsb_remove_extra_config_roms(struct hpsb_host *host);
21
22
23/* List of flags to check if a host contains a certain extra config rom
24 * entry. Available in the host->config_roms member. */
25#define HPSB_CONFIG_ROM_ENTRY_IP1394 0x00000001
26
27#endif /* _IEEE1394_CONFIG_ROMS_H */
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
new file mode 100644
index 000000000000..1b98684aebcd
--- /dev/null
+++ b/drivers/ieee1394/csr.c
@@ -0,0 +1,857 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * CSR implementation, iso/bus manager implementation.
5 *
6 * Copyright (C) 1999 Andreas E. Bombe
7 * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
8 *
9 * This code is licensed under the GPL. See the file COPYING in the root
10 * directory of the kernel sources for details.
11 *
12 *
13 * Contributions:
14 *
15 * Manfred Weihs <weihs@ict.tuwien.ac.at>
16 * configuration ROM manipulation
17 *
18 */
19
20#include <linux/string.h>
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/param.h>
24#include <linux/spinlock.h>
25
26#include "csr1212.h"
27#include "ieee1394_types.h"
28#include "hosts.h"
29#include "ieee1394.h"
30#include "highlevel.h"
31
32/* Module Parameters */
33/* this module parameter can be used to disable mapping of the FCP registers */
34
35static int fcp = 1;
36module_param(fcp, int, 0444);
37MODULE_PARM_DESC(fcp, "Map FCP registers (default = 1, disable = 0).");
38
39static struct csr1212_keyval *node_cap = NULL;
40
41static void add_host(struct hpsb_host *host);
42static void remove_host(struct hpsb_host *host);
43static void host_reset(struct hpsb_host *host);
44static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
45 u64 addr, size_t length, u16 fl);
46static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
47 quadlet_t *data, u64 addr, size_t length, u16 flags);
48static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
49 u64 addr, size_t length, u16 flags);
50static int write_regs(struct hpsb_host *host, int nodeid, int destid,
51 quadlet_t *data, u64 addr, size_t length, u16 flags);
52static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
53 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl);
54static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
55 u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl);
56static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
57 u64 addr, size_t length, u16 fl);
58static u64 allocate_addr_range(u64 size, u32 alignment, void *__host);
59static void release_addr_range(u64 addr, void *__host);
60
61static struct hpsb_highlevel csr_highlevel = {
62 .name = "standard registers",
63 .add_host = add_host,
64 .remove_host = remove_host,
65 .host_reset = host_reset,
66};
67
68static struct hpsb_address_ops map_ops = {
69 .read = read_maps,
70};
71
72static struct hpsb_address_ops fcp_ops = {
73 .write = write_fcp,
74};
75
76static struct hpsb_address_ops reg_ops = {
77 .read = read_regs,
78 .write = write_regs,
79 .lock = lock_regs,
80 .lock64 = lock64_regs,
81};
82
83static struct hpsb_address_ops config_rom_ops = {
84 .read = read_config_rom,
85};
86
87struct csr1212_bus_ops csr_bus_ops = {
88 .allocate_addr_range = allocate_addr_range,
89 .release_addr = release_addr_range,
90};
91
92
93static u16 csr_crc16(unsigned *data, int length)
94{
95 int check=0, i;
96 int shift, sum, next=0;
97
98 for (i = length; i; i--) {
99 for (next = check, shift = 28; shift >= 0; shift -= 4 ) {
100 sum = ((next >> 12) ^ (be32_to_cpu(*data) >> shift)) & 0xf;
101 next = (next << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
102 }
103 check = next & 0xffff;
104 data++;
105 }
106
107 return check;
108}
109
110static void host_reset(struct hpsb_host *host)
111{
112 host->csr.state &= 0x300;
113
114 host->csr.bus_manager_id = 0x3f;
115 host->csr.bandwidth_available = 4915;
116 host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
117 host->csr.channels_available_lo = ~0;
118 host->csr.broadcast_channel = 0x80000000 | 31;
119
120 if (host->is_irm) {
121 if (host->driver->hw_csr_reg) {
122 host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
123 }
124 }
125
126 host->csr.node_ids = host->node_id << 16;
127
128 if (!host->is_root) {
129 /* clear cmstr bit */
130 host->csr.state &= ~0x100;
131 }
132
133 host->csr.topology_map[1] =
134 cpu_to_be32(be32_to_cpu(host->csr.topology_map[1]) + 1);
135 host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
136 | host->selfid_count);
137 host->csr.topology_map[0] =
138 cpu_to_be32((host->selfid_count + 2) << 16
139 | csr_crc16(host->csr.topology_map + 1,
140 host->selfid_count + 2));
141
142 host->csr.speed_map[1] =
143 cpu_to_be32(be32_to_cpu(host->csr.speed_map[1]) + 1);
144 host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
145 | csr_crc16(host->csr.speed_map+1,
146 0x3f1));
147}
148
149/*
150 * HI == seconds (bits 0:2)
151 * LO == fraction units of 1/8000 of a second, as per 1394 (bits 19:31)
152 *
153 * Convert to units and then to HZ, for comparison to jiffies.
154 *
155 * By default this will end up being 800 units, or 100ms (125usec per
156 * unit).
157 *
158 * NOTE: The spec says 1/8000, but also says we can compute based on 1/8192
159 * like CSR specifies. Should make our math less complex.
160 */
161static inline void calculate_expire(struct csr_control *csr)
162{
163 unsigned long units;
164
165 /* Take the seconds, and convert to units */
166 units = (unsigned long)(csr->split_timeout_hi & 0x07) << 13;
167
168 /* Add in the fractional units */
169 units += (unsigned long)(csr->split_timeout_lo >> 19);
170
171 /* Convert to jiffies */
172 csr->expire = (unsigned long)(units * HZ) >> 13UL;
173
174 /* Just to keep from rounding low */
175 csr->expire++;
176
177 HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ);
178}
179
180
181static void add_host(struct hpsb_host *host)
182{
183 struct csr1212_keyval *root;
184 quadlet_t bus_info[CSR_BUS_INFO_SIZE];
185
186 hpsb_register_addrspace(&csr_highlevel, host, &reg_ops,
187 CSR_REGISTER_BASE,
188 CSR_REGISTER_BASE + CSR_CONFIG_ROM);
189 hpsb_register_addrspace(&csr_highlevel, host, &config_rom_ops,
190 CSR_REGISTER_BASE + CSR_CONFIG_ROM,
191 CSR_REGISTER_BASE + CSR_CONFIG_ROM_END);
192 if (fcp) {
193 hpsb_register_addrspace(&csr_highlevel, host, &fcp_ops,
194 CSR_REGISTER_BASE + CSR_FCP_COMMAND,
195 CSR_REGISTER_BASE + CSR_FCP_END);
196 }
197 hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
198 CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP,
199 CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP_END);
200 hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
201 CSR_REGISTER_BASE + CSR_SPEED_MAP,
202 CSR_REGISTER_BASE + CSR_SPEED_MAP_END);
203
204 spin_lock_init(&host->csr.lock);
205
206 host->csr.state = 0;
207 host->csr.node_ids = 0;
208 host->csr.split_timeout_hi = 0;
209 host->csr.split_timeout_lo = 800 << 19;
210 calculate_expire(&host->csr);
211 host->csr.cycle_time = 0;
212 host->csr.bus_time = 0;
213 host->csr.bus_manager_id = 0x3f;
214 host->csr.bandwidth_available = 4915;
215 host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
216 host->csr.channels_available_lo = ~0;
217 host->csr.broadcast_channel = 0x80000000 | 31;
218
219 if (host->is_irm) {
220 if (host->driver->hw_csr_reg) {
221 host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
222 }
223 }
224
225 if (host->csr.max_rec >= 9)
226 host->csr.max_rom = 2;
227 else if (host->csr.max_rec >= 5)
228 host->csr.max_rom = 1;
229 else
230 host->csr.max_rom = 0;
231
232 host->csr.generation = 2;
233
234 bus_info[1] = __constant_cpu_to_be32(0x31333934);
235 bus_info[2] = cpu_to_be32((1 << CSR_IRMC_SHIFT) |
236 (1 << CSR_CMC_SHIFT) |
237 (1 << CSR_ISC_SHIFT) |
238 (0 << CSR_BMC_SHIFT) |
239 (0 << CSR_PMC_SHIFT) |
240 (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
241 (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
242 (host->csr.max_rom << CSR_MAX_ROM_SHIFT) |
243 (host->csr.generation << CSR_GENERATION_SHIFT) |
244 host->csr.lnk_spd);
245
246 bus_info[3] = cpu_to_be32(host->csr.guid_hi);
247 bus_info[4] = cpu_to_be32(host->csr.guid_lo);
248
249 /* The hardware copy of the bus info block will be set later when a
250 * bus reset is issued. */
251
252 csr1212_init_local_csr(host->csr.rom, bus_info, host->csr.max_rom);
253
254 root = host->csr.rom->root_kv;
255
256 if(csr1212_attach_keyval_to_directory(root, node_cap) != CSR1212_SUCCESS) {
257 HPSB_ERR("Failed to attach Node Capabilities to root directory");
258 }
259
260 host->update_config_rom = 1;
261}
262
263static void remove_host(struct hpsb_host *host)
264{
265 quadlet_t bus_info[CSR_BUS_INFO_SIZE];
266
267 bus_info[1] = __constant_cpu_to_be32(0x31333934);
268 bus_info[2] = cpu_to_be32((0 << CSR_IRMC_SHIFT) |
269 (0 << CSR_CMC_SHIFT) |
270 (0 << CSR_ISC_SHIFT) |
271 (0 << CSR_BMC_SHIFT) |
272 (0 << CSR_PMC_SHIFT) |
273 (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
274 (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
275 (0 << CSR_MAX_ROM_SHIFT) |
276 (0 << CSR_GENERATION_SHIFT) |
277 host->csr.lnk_spd);
278
279 bus_info[3] = cpu_to_be32(host->csr.guid_hi);
280 bus_info[4] = cpu_to_be32(host->csr.guid_lo);
281
282 csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, node_cap);
283
284 csr1212_init_local_csr(host->csr.rom, bus_info, 0);
285 host->update_config_rom = 1;
286}
287
288
289int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
290 size_t buffersize, unsigned char rom_version)
291{
292 unsigned long flags;
293 int ret;
294
295 HPSB_NOTICE("hpsb_update_config_rom() is deprecated");
296
297 spin_lock_irqsave(&host->csr.lock, flags);
298 if (rom_version != host->csr.generation)
299 ret = -1;
300 else if (buffersize > host->csr.rom->cache_head->size)
301 ret = -2;
302 else {
303 /* Just overwrite the generated ConfigROM image with new data,
304 * it can be regenerated later. */
305 memcpy(host->csr.rom->cache_head->data, new_rom, buffersize);
306 host->csr.rom->cache_head->len = buffersize;
307
308 if (host->driver->set_hw_config_rom)
309 host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data);
310 /* Increment the generation number to keep some sort of sync
311 * with the newer ConfigROM manipulation method. */
312 host->csr.generation++;
313 if (host->csr.generation > 0xf || host->csr.generation < 2)
314 host->csr.generation = 2;
315 ret=0;
316 }
317 spin_unlock_irqrestore(&host->csr.lock, flags);
318 return ret;
319}
320
321
322/* Read topology / speed maps and configuration ROM */
323static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
324 u64 addr, size_t length, u16 fl)
325{
326 unsigned long flags;
327 int csraddr = addr - CSR_REGISTER_BASE;
328 const char *src;
329
330 spin_lock_irqsave(&host->csr.lock, flags);
331
332 if (csraddr < CSR_SPEED_MAP) {
333 src = ((char *)host->csr.topology_map) + csraddr
334 - CSR_TOPOLOGY_MAP;
335 } else {
336 src = ((char *)host->csr.speed_map) + csraddr - CSR_SPEED_MAP;
337 }
338
339 memcpy(buffer, src, length);
340 spin_unlock_irqrestore(&host->csr.lock, flags);
341 return RCODE_COMPLETE;
342}
343
344
345#define out if (--length == 0) break
346
347static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
348 u64 addr, size_t length, u16 flags)
349{
350 int csraddr = addr - CSR_REGISTER_BASE;
351 int oldcycle;
352 quadlet_t ret;
353
354 if ((csraddr | length) & 0x3)
355 return RCODE_TYPE_ERROR;
356
357 length /= 4;
358
359 switch (csraddr) {
360 case CSR_STATE_CLEAR:
361 *(buf++) = cpu_to_be32(host->csr.state);
362 out;
363 case CSR_STATE_SET:
364 *(buf++) = cpu_to_be32(host->csr.state);
365 out;
366 case CSR_NODE_IDS:
367 *(buf++) = cpu_to_be32(host->csr.node_ids);
368 out;
369
370 case CSR_RESET_START:
371 return RCODE_TYPE_ERROR;
372
373 /* address gap - handled by default below */
374
375 case CSR_SPLIT_TIMEOUT_HI:
376 *(buf++) = cpu_to_be32(host->csr.split_timeout_hi);
377 out;
378 case CSR_SPLIT_TIMEOUT_LO:
379 *(buf++) = cpu_to_be32(host->csr.split_timeout_lo);
380 out;
381
382 /* address gap */
383 return RCODE_ADDRESS_ERROR;
384
385 case CSR_CYCLE_TIME:
386 oldcycle = host->csr.cycle_time;
387 host->csr.cycle_time =
388 host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
389
390 if (oldcycle > host->csr.cycle_time) {
391 /* cycle time wrapped around */
392 host->csr.bus_time += 1 << 7;
393 }
394 *(buf++) = cpu_to_be32(host->csr.cycle_time);
395 out;
396 case CSR_BUS_TIME:
397 oldcycle = host->csr.cycle_time;
398 host->csr.cycle_time =
399 host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
400
401 if (oldcycle > host->csr.cycle_time) {
402 /* cycle time wrapped around */
403 host->csr.bus_time += (1 << 7);
404 }
405 *(buf++) = cpu_to_be32(host->csr.bus_time
406 | (host->csr.cycle_time >> 25));
407 out;
408
409 /* address gap */
410 return RCODE_ADDRESS_ERROR;
411
412 case CSR_BUSY_TIMEOUT:
413 /* not yet implemented */
414 return RCODE_ADDRESS_ERROR;
415
416 case CSR_BUS_MANAGER_ID:
417 if (host->driver->hw_csr_reg)
418 ret = host->driver->hw_csr_reg(host, 0, 0, 0);
419 else
420 ret = host->csr.bus_manager_id;
421
422 *(buf++) = cpu_to_be32(ret);
423 out;
424 case CSR_BANDWIDTH_AVAILABLE:
425 if (host->driver->hw_csr_reg)
426 ret = host->driver->hw_csr_reg(host, 1, 0, 0);
427 else
428 ret = host->csr.bandwidth_available;
429
430 *(buf++) = cpu_to_be32(ret);
431 out;
432 case CSR_CHANNELS_AVAILABLE_HI:
433 if (host->driver->hw_csr_reg)
434 ret = host->driver->hw_csr_reg(host, 2, 0, 0);
435 else
436 ret = host->csr.channels_available_hi;
437
438 *(buf++) = cpu_to_be32(ret);
439 out;
440 case CSR_CHANNELS_AVAILABLE_LO:
441 if (host->driver->hw_csr_reg)
442 ret = host->driver->hw_csr_reg(host, 3, 0, 0);
443 else
444 ret = host->csr.channels_available_lo;
445
446 *(buf++) = cpu_to_be32(ret);
447 out;
448
449 case CSR_BROADCAST_CHANNEL:
450 *(buf++) = cpu_to_be32(host->csr.broadcast_channel);
451 out;
452
453 /* address gap to end - fall through to default */
454 default:
455 return RCODE_ADDRESS_ERROR;
456 }
457
458 return RCODE_COMPLETE;
459}
460
461static int write_regs(struct hpsb_host *host, int nodeid, int destid,
462 quadlet_t *data, u64 addr, size_t length, u16 flags)
463{
464 int csraddr = addr - CSR_REGISTER_BASE;
465
466 if ((csraddr | length) & 0x3)
467 return RCODE_TYPE_ERROR;
468
469 length /= 4;
470
471 switch (csraddr) {
472 case CSR_STATE_CLEAR:
473 /* FIXME FIXME FIXME */
474 printk("doh, someone wants to mess with state clear\n");
475 out;
476 case CSR_STATE_SET:
477 printk("doh, someone wants to mess with state set\n");
478 out;
479
480 case CSR_NODE_IDS:
481 host->csr.node_ids &= NODE_MASK << 16;
482 host->csr.node_ids |= be32_to_cpu(*(data++)) & (BUS_MASK << 16);
483 host->node_id = host->csr.node_ids >> 16;
484 host->driver->devctl(host, SET_BUS_ID, host->node_id >> 6);
485 out;
486
487 case CSR_RESET_START:
488 /* FIXME - perform command reset */
489 out;
490
491 /* address gap */
492 return RCODE_ADDRESS_ERROR;
493
494 case CSR_SPLIT_TIMEOUT_HI:
495 host->csr.split_timeout_hi =
496 be32_to_cpu(*(data++)) & 0x00000007;
497 calculate_expire(&host->csr);
498 out;
499 case CSR_SPLIT_TIMEOUT_LO:
500 host->csr.split_timeout_lo =
501 be32_to_cpu(*(data++)) & 0xfff80000;
502 calculate_expire(&host->csr);
503 out;
504
505 /* address gap */
506 return RCODE_ADDRESS_ERROR;
507
508 case CSR_CYCLE_TIME:
509 /* should only be set by cycle start packet, automatically */
510 host->csr.cycle_time = be32_to_cpu(*data);
511 host->driver->devctl(host, SET_CYCLE_COUNTER,
512 be32_to_cpu(*(data++)));
513 out;
514 case CSR_BUS_TIME:
515 host->csr.bus_time = be32_to_cpu(*(data++)) & 0xffffff80;
516 out;
517
518 /* address gap */
519 return RCODE_ADDRESS_ERROR;
520
521 case CSR_BUSY_TIMEOUT:
522 /* not yet implemented */
523 return RCODE_ADDRESS_ERROR;
524
525 case CSR_BUS_MANAGER_ID:
526 case CSR_BANDWIDTH_AVAILABLE:
527 case CSR_CHANNELS_AVAILABLE_HI:
528 case CSR_CHANNELS_AVAILABLE_LO:
529 /* these are not writable, only lockable */
530 return RCODE_TYPE_ERROR;
531
532 case CSR_BROADCAST_CHANNEL:
533 /* only the valid bit can be written */
534 host->csr.broadcast_channel = (host->csr.broadcast_channel & ~0x40000000)
535 | (be32_to_cpu(*data) & 0x40000000);
536 out;
537
538 /* address gap to end - fall through */
539 default:
540 return RCODE_ADDRESS_ERROR;
541 }
542
543 return RCODE_COMPLETE;
544}
545
546#undef out
547
548
549static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
550 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl)
551{
552 int csraddr = addr - CSR_REGISTER_BASE;
553 unsigned long flags;
554 quadlet_t *regptr = NULL;
555
556 if (csraddr & 0x3)
557 return RCODE_TYPE_ERROR;
558
559 if (csraddr < CSR_BUS_MANAGER_ID || csraddr > CSR_CHANNELS_AVAILABLE_LO
560 || extcode != EXTCODE_COMPARE_SWAP)
561 goto unsupported_lockreq;
562
563 data = be32_to_cpu(data);
564 arg = be32_to_cpu(arg);
565
566 /* Is somebody releasing the broadcast_channel on us? */
567 if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x1)) {
568 /* Note: this is may not be the right way to handle
569 * the problem, so we should look into the proper way
570 * eventually. */
571 HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
572 "broadcast channel 31. Ignoring.",
573 NODE_BUS_ARGS(host, nodeid));
574
575 data &= ~0x1; /* keep broadcast channel allocated */
576 }
577
578 if (host->driver->hw_csr_reg) {
579 quadlet_t old;
580
581 old = host->driver->
582 hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
583 data, arg);
584
585 *store = cpu_to_be32(old);
586 return RCODE_COMPLETE;
587 }
588
589 spin_lock_irqsave(&host->csr.lock, flags);
590
591 switch (csraddr) {
592 case CSR_BUS_MANAGER_ID:
593 regptr = &host->csr.bus_manager_id;
594 *store = cpu_to_be32(*regptr);
595 if (*regptr == arg)
596 *regptr = data;
597 break;
598
599 case CSR_BANDWIDTH_AVAILABLE:
600 {
601 quadlet_t bandwidth;
602 quadlet_t old;
603 quadlet_t new;
604
605 regptr = &host->csr.bandwidth_available;
606 old = *regptr;
607
608 /* bandwidth available algorithm adapted from IEEE 1394a-2000 spec */
609 if (arg > 0x1fff) {
610 *store = cpu_to_be32(old); /* change nothing */
611 break;
612 }
613 data &= 0x1fff;
614 if (arg >= data) {
615 /* allocate bandwidth */
616 bandwidth = arg - data;
617 if (old >= bandwidth) {
618 new = old - bandwidth;
619 *store = cpu_to_be32(arg);
620 *regptr = new;
621 } else {
622 *store = cpu_to_be32(old);
623 }
624 } else {
625 /* deallocate bandwidth */
626 bandwidth = data - arg;
627 if (old + bandwidth < 0x2000) {
628 new = old + bandwidth;
629 *store = cpu_to_be32(arg);
630 *regptr = new;
631 } else {
632 *store = cpu_to_be32(old);
633 }
634 }
635 break;
636 }
637
638 case CSR_CHANNELS_AVAILABLE_HI:
639 {
640 /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
641 quadlet_t affected_channels = arg ^ data;
642
643 regptr = &host->csr.channels_available_hi;
644
645 if ((arg & affected_channels) == (*regptr & affected_channels)) {
646 *regptr ^= affected_channels;
647 *store = cpu_to_be32(arg);
648 } else {
649 *store = cpu_to_be32(*regptr);
650 }
651
652 break;
653 }
654
655 case CSR_CHANNELS_AVAILABLE_LO:
656 {
657 /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
658 quadlet_t affected_channels = arg ^ data;
659
660 regptr = &host->csr.channels_available_lo;
661
662 if ((arg & affected_channels) == (*regptr & affected_channels)) {
663 *regptr ^= affected_channels;
664 *store = cpu_to_be32(arg);
665 } else {
666 *store = cpu_to_be32(*regptr);
667 }
668 break;
669 }
670 }
671
672 spin_unlock_irqrestore(&host->csr.lock, flags);
673
674 return RCODE_COMPLETE;
675
676 unsupported_lockreq:
677 switch (csraddr) {
678 case CSR_STATE_CLEAR:
679 case CSR_STATE_SET:
680 case CSR_RESET_START:
681 case CSR_NODE_IDS:
682 case CSR_SPLIT_TIMEOUT_HI:
683 case CSR_SPLIT_TIMEOUT_LO:
684 case CSR_CYCLE_TIME:
685 case CSR_BUS_TIME:
686 case CSR_BROADCAST_CHANNEL:
687 return RCODE_TYPE_ERROR;
688
689 case CSR_BUSY_TIMEOUT:
690 /* not yet implemented - fall through */
691 default:
692 return RCODE_ADDRESS_ERROR;
693 }
694}
695
696static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
697 u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl)
698{
699 int csraddr = addr - CSR_REGISTER_BASE;
700 unsigned long flags;
701
702 data = be64_to_cpu(data);
703 arg = be64_to_cpu(arg);
704
705 if (csraddr & 0x3)
706 return RCODE_TYPE_ERROR;
707
708 if (csraddr != CSR_CHANNELS_AVAILABLE
709 || extcode != EXTCODE_COMPARE_SWAP)
710 goto unsupported_lock64req;
711
712 /* Is somebody releasing the broadcast_channel on us? */
713 if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x100000000ULL)) {
714 /* Note: this is may not be the right way to handle
715 * the problem, so we should look into the proper way
716 * eventually. */
717 HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
718 "broadcast channel 31. Ignoring.",
719 NODE_BUS_ARGS(host, nodeid));
720
721 data &= ~0x100000000ULL; /* keep broadcast channel allocated */
722 }
723
724 if (host->driver->hw_csr_reg) {
725 quadlet_t data_hi, data_lo;
726 quadlet_t arg_hi, arg_lo;
727 quadlet_t old_hi, old_lo;
728
729 data_hi = data >> 32;
730 data_lo = data & 0xFFFFFFFF;
731 arg_hi = arg >> 32;
732 arg_lo = arg & 0xFFFFFFFF;
733
734 old_hi = host->driver->hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
735 data_hi, arg_hi);
736
737 old_lo = host->driver->hw_csr_reg(host, ((csraddr + 4) - CSR_BUS_MANAGER_ID) >> 2,
738 data_lo, arg_lo);
739
740 *store = cpu_to_be64(((octlet_t)old_hi << 32) | old_lo);
741 } else {
742 octlet_t old;
743 octlet_t affected_channels = arg ^ data;
744
745 spin_lock_irqsave(&host->csr.lock, flags);
746
747 old = ((octlet_t)host->csr.channels_available_hi << 32) | host->csr.channels_available_lo;
748
749 if ((arg & affected_channels) == (old & affected_channels)) {
750 host->csr.channels_available_hi ^= (affected_channels >> 32);
751 host->csr.channels_available_lo ^= (affected_channels & 0xffffffff);
752 *store = cpu_to_be64(arg);
753 } else {
754 *store = cpu_to_be64(old);
755 }
756
757 spin_unlock_irqrestore(&host->csr.lock, flags);
758 }
759
760 /* Is somebody erroneously releasing the broadcast_channel on us? */
761 if (host->csr.channels_available_hi & 0x1)
762 host->csr.channels_available_hi &= ~0x1;
763
764 return RCODE_COMPLETE;
765
766 unsupported_lock64req:
767 switch (csraddr) {
768 case CSR_STATE_CLEAR:
769 case CSR_STATE_SET:
770 case CSR_RESET_START:
771 case CSR_NODE_IDS:
772 case CSR_SPLIT_TIMEOUT_HI:
773 case CSR_SPLIT_TIMEOUT_LO:
774 case CSR_CYCLE_TIME:
775 case CSR_BUS_TIME:
776 case CSR_BUS_MANAGER_ID:
777 case CSR_BROADCAST_CHANNEL:
778 case CSR_BUSY_TIMEOUT:
779 case CSR_BANDWIDTH_AVAILABLE:
780 return RCODE_TYPE_ERROR;
781
782 default:
783 return RCODE_ADDRESS_ERROR;
784 }
785}
786
787static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
788 quadlet_t *data, u64 addr, size_t length, u16 flags)
789{
790 int csraddr = addr - CSR_REGISTER_BASE;
791
792 if (length > 512)
793 return RCODE_TYPE_ERROR;
794
795 switch (csraddr) {
796 case CSR_FCP_COMMAND:
797 highlevel_fcp_request(host, nodeid, 0, (u8 *)data, length);
798 break;
799 case CSR_FCP_RESPONSE:
800 highlevel_fcp_request(host, nodeid, 1, (u8 *)data, length);
801 break;
802 default:
803 return RCODE_TYPE_ERROR;
804 }
805
806 return RCODE_COMPLETE;
807}
808
809static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
810 u64 addr, size_t length, u16 fl)
811{
812 u32 offset = addr - CSR1212_REGISTER_SPACE_BASE;
813
814 if (csr1212_read(host->csr.rom, offset, buffer, length) == CSR1212_SUCCESS)
815 return RCODE_COMPLETE;
816 else
817 return RCODE_ADDRESS_ERROR;
818}
819
820static u64 allocate_addr_range(u64 size, u32 alignment, void *__host)
821{
822 struct hpsb_host *host = (struct hpsb_host*)__host;
823
824 return hpsb_allocate_and_register_addrspace(&csr_highlevel,
825 host,
826 &config_rom_ops,
827 size, alignment,
828 CSR1212_UNITS_SPACE_BASE,
829 CSR1212_UNITS_SPACE_END);
830}
831
832static void release_addr_range(u64 addr, void *__host)
833{
834 struct hpsb_host *host = (struct hpsb_host*)__host;
835 hpsb_unregister_addrspace(&csr_highlevel, host, addr);
836}
837
838
839int init_csr(void)
840{
841 node_cap = csr1212_new_immediate(CSR1212_KV_ID_NODE_CAPABILITIES, 0x0083c0);
842 if (!node_cap) {
843 HPSB_ERR("Failed to allocate memory for Node Capabilties ConfigROM entry!");
844 return -ENOMEM;
845 }
846
847 hpsb_register_highlevel(&csr_highlevel);
848
849 return 0;
850}
851
852void cleanup_csr(void)
853{
854 if (node_cap)
855 csr1212_release_keyval(node_cap);
856 hpsb_unregister_highlevel(&csr_highlevel);
857}
diff --git a/drivers/ieee1394/csr.h b/drivers/ieee1394/csr.h
new file mode 100644
index 000000000000..ea9aa4f53ab6
--- /dev/null
+++ b/drivers/ieee1394/csr.h
@@ -0,0 +1,96 @@
1
2#ifndef _IEEE1394_CSR_H
3#define _IEEE1394_CSR_H
4
5#ifdef CONFIG_PREEMPT
6#include <linux/sched.h>
7#endif
8
9#include "csr1212.h"
10
11#define CSR_REGISTER_BASE 0xfffff0000000ULL
12
13/* register offsets relative to CSR_REGISTER_BASE */
14#define CSR_STATE_CLEAR 0x0
15#define CSR_STATE_SET 0x4
16#define CSR_NODE_IDS 0x8
17#define CSR_RESET_START 0xc
18#define CSR_SPLIT_TIMEOUT_HI 0x18
19#define CSR_SPLIT_TIMEOUT_LO 0x1c
20#define CSR_CYCLE_TIME 0x200
21#define CSR_BUS_TIME 0x204
22#define CSR_BUSY_TIMEOUT 0x210
23#define CSR_BUS_MANAGER_ID 0x21c
24#define CSR_BANDWIDTH_AVAILABLE 0x220
25#define CSR_CHANNELS_AVAILABLE 0x224
26#define CSR_CHANNELS_AVAILABLE_HI 0x224
27#define CSR_CHANNELS_AVAILABLE_LO 0x228
28#define CSR_BROADCAST_CHANNEL 0x234
29#define CSR_CONFIG_ROM 0x400
30#define CSR_CONFIG_ROM_END 0x800
31#define CSR_FCP_COMMAND 0xB00
32#define CSR_FCP_RESPONSE 0xD00
33#define CSR_FCP_END 0xF00
34#define CSR_TOPOLOGY_MAP 0x1000
35#define CSR_TOPOLOGY_MAP_END 0x1400
36#define CSR_SPEED_MAP 0x2000
37#define CSR_SPEED_MAP_END 0x3000
38
39/* IEEE 1394 bus specific Configuration ROM Key IDs */
40#define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30)
41
42/* IEEE 1394 Bus Inforamation Block specifics */
43#define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t))
44
45#define CSR_IRMC_SHIFT 31
46#define CSR_CMC_SHIFT 30
47#define CSR_ISC_SHIFT 29
48#define CSR_BMC_SHIFT 28
49#define CSR_PMC_SHIFT 27
50#define CSR_CYC_CLK_ACC_SHIFT 16
51#define CSR_MAX_REC_SHIFT 12
52#define CSR_MAX_ROM_SHIFT 8
53#define CSR_GENERATION_SHIFT 4
54
55#define CSR_SET_BUS_INFO_GENERATION(csr, gen) \
56 ((csr)->bus_info_data[2] = \
57 cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) & \
58 ~(0xf << CSR_GENERATION_SHIFT)) | \
59 (gen) << CSR_GENERATION_SHIFT))
60
61struct csr_control {
62 spinlock_t lock;
63
64 quadlet_t state;
65 quadlet_t node_ids;
66 quadlet_t split_timeout_hi, split_timeout_lo;
67 unsigned long expire; // Calculated from split_timeout
68 quadlet_t cycle_time;
69 quadlet_t bus_time;
70 quadlet_t bus_manager_id;
71 quadlet_t bandwidth_available;
72 quadlet_t channels_available_hi, channels_available_lo;
73 quadlet_t broadcast_channel;
74
75 /* Bus Info */
76 quadlet_t guid_hi, guid_lo;
77 u8 cyc_clk_acc;
78 u8 max_rec;
79 u8 max_rom;
80 u8 generation; /* Only use values between 0x2 and 0xf */
81 u8 lnk_spd;
82
83 unsigned long gen_timestamp[16];
84
85 struct csr1212_csr *rom;
86
87 quadlet_t topology_map[256];
88 quadlet_t speed_map[1024];
89};
90
91extern struct csr1212_bus_ops csr_bus_ops;
92
93int init_csr(void);
94void cleanup_csr(void);
95
96#endif /* _IEEE1394_CSR_H */
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
new file mode 100644
index 000000000000..7c4330e2e875
--- /dev/null
+++ b/drivers/ieee1394/csr1212.c
@@ -0,0 +1,1612 @@
1/*
2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
3 *
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30
31/* TODO List:
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
34 * - Convenience functions for reading a block of data from a given offset.
35 */
36
37#ifndef __KERNEL__
38#include <string.h>
39#endif
40
41#include "csr1212.h"
42
43
44/* Permitted key type for each key id */
45#define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46#define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47#define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48#define __L (1 << CSR1212_KV_TYPE_LEAF)
49static const u_int8_t csr1212_key_id_type_map[0x30] = {
50 0, /* Reserved */
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
55 0, 0, /* Reserved */
56 __D | __L, /* Module */
57 0, 0, 0, 0, /* Reserved */
58 __I, /* Node_Capabilities */
59 __L, /* EUI_64 */
60 0, 0, 0, /* Reserved */
61 __D, /* Unit */
62 __I, /* Specifier_ID */
63 __I, /* Version */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
66 0, /* Reserved */
67 __I, /* Model */
68 __D, /* Instance */
69 __L, /* Keyword */
70 __D, /* Feature */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
77 __I, /* Revision */
78};
79#undef __I
80#undef __C
81#undef __D
82#undef __L
83
84
85#define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
86#define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
87
88static inline void free_keyval(struct csr1212_keyval *kv)
89{
90 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
91 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
92 CSR1212_FREE(kv->value.leaf.data);
93
94 CSR1212_FREE(kv);
95}
96
97static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
98{
99 int shift;
100 u_int32_t data;
101 u_int16_t sum, crc = 0;
102
103 for (; length; length--) {
104 data = CSR1212_BE32_TO_CPU(*buffer);
105 buffer++;
106 for (shift = 28; shift >= 0; shift -= 4 ) {
107 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
108 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
109 }
110 crc &= 0xffff;
111 }
112
113 return CSR1212_CPU_TO_BE16(crc);
114}
115
116#if 0
117/* Microsoft computes the CRC with the bytes in reverse order. Therefore we
118 * have a special version of the CRC algorithm to account for their buggy
119 * software. */
120static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
121{
122 int shift;
123 u_int32_t data;
124 u_int16_t sum, crc = 0;
125
126 for (; length; length--) {
127 data = CSR1212_LE32_TO_CPU(*buffer);
128 buffer++;
129 for (shift = 28; shift >= 0; shift -= 4 ) {
130 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
131 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
132 }
133 crc &= 0xffff;
134 }
135
136 return CSR1212_CPU_TO_BE16(crc);
137}
138#endif
139
140static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir,
141 struct csr1212_keyval *kv)
142{
143 struct csr1212_dentry *pos;
144
145 for (pos = dir->value.directory.dentries_head;
146 pos != NULL; pos = pos->next) {
147 if (pos->kv == kv)
148 return pos;
149 }
150 return NULL;
151}
152
153
154static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list,
155 u_int32_t offset)
156{
157 struct csr1212_keyval *kv;
158
159 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
160 if (kv->offset == offset)
161 return kv;
162 }
163 return NULL;
164}
165
166
167/* Creation Routines */
168struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
169 size_t bus_info_size, void *private)
170{
171 struct csr1212_csr *csr;
172
173 csr = CSR1212_MALLOC(sizeof(*csr));
174 if (!csr)
175 return NULL;
176
177 csr->cache_head =
178 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
179 CSR1212_CONFIG_ROM_SPACE_SIZE);
180 if (!csr->cache_head) {
181 CSR1212_FREE(csr);
182 return NULL;
183 }
184
185 /* The keyval key id is not used for the root node, but a valid key id
186 * that can be used for a directory needs to be passed to
187 * csr1212_new_directory(). */
188 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
189 if (!csr->root_kv) {
190 CSR1212_FREE(csr->cache_head);
191 CSR1212_FREE(csr);
192 return NULL;
193 }
194
195 csr->bus_info_data = csr->cache_head->data;
196 csr->bus_info_len = bus_info_size;
197 csr->crc_len = bus_info_size;
198 csr->ops = ops;
199 csr->private = private;
200 csr->cache_tail = csr->cache_head;
201
202 return csr;
203}
204
205
206
207void csr1212_init_local_csr(struct csr1212_csr *csr,
208 const u_int32_t *bus_info_data, int max_rom)
209{
210 static const int mr_map[] = { 4, 64, 1024, 0 };
211
212 csr->max_rom = mr_map[max_rom];
213 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
214}
215
216
217static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
218{
219 struct csr1212_keyval *kv;
220
221 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
222 return NULL;
223
224 kv = CSR1212_MALLOC(sizeof(*kv));
225 if (!kv)
226 return NULL;
227
228 kv->key.type = type;
229 kv->key.id = key;
230
231 kv->associate = NULL;
232 kv->refcnt = 1;
233
234 kv->next = NULL;
235 kv->prev = NULL;
236 kv->offset = 0;
237 kv->valid = 0;
238 return kv;
239}
240
241struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
242{
243 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
244
245 if (!kv)
246 return NULL;
247
248 kv->value.immediate = value;
249 kv->valid = 1;
250 return kv;
251}
252
253struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
254{
255 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
256
257 if (!kv)
258 return NULL;
259
260 if (data_len > 0) {
261 kv->value.leaf.data = CSR1212_MALLOC(data_len);
262 if (!kv->value.leaf.data) {
263 CSR1212_FREE(kv);
264 return NULL;
265 }
266
267 if (data)
268 memcpy(kv->value.leaf.data, data, data_len);
269 } else {
270 kv->value.leaf.data = NULL;
271 }
272
273 kv->value.leaf.len = bytes_to_quads(data_len);
274 kv->offset = 0;
275 kv->valid = 1;
276
277 return kv;
278}
279
280struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
281{
282 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
283
284 if (!kv)
285 return NULL;
286
287 kv->value.csr_offset = csr_offset;
288
289 kv->offset = 0;
290 kv->valid = 1;
291 return kv;
292}
293
294struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
295{
296 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
297
298 if (!kv)
299 return NULL;
300
301 kv->value.directory.len = 0;
302 kv->offset = 0;
303 kv->value.directory.dentries_head = NULL;
304 kv->value.directory.dentries_tail = NULL;
305 kv->valid = 1;
306 return kv;
307}
308
309int csr1212_associate_keyval(struct csr1212_keyval *kv,
310 struct csr1212_keyval *associate)
311{
312 if (!kv || !associate)
313 return CSR1212_EINVAL;
314
315 if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
316 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
317 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
318 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
319 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
320 associate->key.id < 0x30))
321 return CSR1212_EINVAL;
322
323 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
324 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
325 return CSR1212_EINVAL;
326
327 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
328 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
329 return CSR1212_EINVAL;
330
331 if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
332 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
333 return CSR1212_EINVAL;
334
335 if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
336 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
337 return CSR1212_EINVAL;
338
339 if (kv->associate)
340 csr1212_release_keyval(kv->associate);
341
342 associate->refcnt++;
343 kv->associate = associate;
344
345 return CSR1212_SUCCESS;
346}
347
348int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
349 struct csr1212_keyval *kv)
350{
351 struct csr1212_dentry *dentry;
352
353 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
354 return CSR1212_EINVAL;
355
356 dentry = CSR1212_MALLOC(sizeof(*dentry));
357 if (!dentry)
358 return CSR1212_ENOMEM;
359
360 dentry->kv = kv;
361
362 kv->refcnt++;
363
364 dentry->next = NULL;
365 dentry->prev = dir->value.directory.dentries_tail;
366
367 if (!dir->value.directory.dentries_head)
368 dir->value.directory.dentries_head = dentry;
369
370 if (dir->value.directory.dentries_tail)
371 dir->value.directory.dentries_tail->next = dentry;
372 dir->value.directory.dentries_tail = dentry;
373
374 return CSR1212_SUCCESS;
375}
376
377struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key,
378 u_int32_t value)
379{
380 struct csr1212_keyval *kvs, *kvk, *kvv;
381
382 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
383 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
384 kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value);
385
386 if (!kvs || !kvk || !kvv) {
387 if (kvs)
388 free_keyval(kvs);
389 if (kvk)
390 free_keyval(kvk);
391 if (kvv)
392 free_keyval(kvv);
393 return NULL;
394 }
395
396 /* Don't keep a local reference to the extended key or value. */
397 kvk->refcnt = 0;
398 kvv->refcnt = 0;
399
400 csr1212_associate_keyval(kvk, kvv);
401 csr1212_associate_keyval(kvs, kvk);
402
403 return kvs;
404}
405
406struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
407 const void *data, size_t data_len)
408{
409 struct csr1212_keyval *kvs, *kvk, *kvv;
410
411 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
412 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
413 kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
414
415 if (!kvs || !kvk || !kvv) {
416 if (kvs)
417 free_keyval(kvs);
418 if (kvk)
419 free_keyval(kvk);
420 if (kvv)
421 free_keyval(kvv);
422 return NULL;
423 }
424
425 /* Don't keep a local reference to the extended key or value. */
426 kvk->refcnt = 0;
427 kvv->refcnt = 0;
428
429 csr1212_associate_keyval(kvk, kvv);
430 csr1212_associate_keyval(kvs, kvk);
431
432 return kvs;
433}
434
435struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
436 const void *data, size_t data_len)
437{
438 struct csr1212_keyval *kv;
439
440 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
441 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
442 if (!kv)
443 return NULL;
444
445 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
446 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
447
448 if (data) {
449 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
450 }
451
452 return kv;
453}
454
455
456struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
457 u_int16_t cset,
458 u_int16_t language,
459 const void *data,
460 size_t data_len)
461{
462 struct csr1212_keyval *kv;
463 char *lstr;
464
465 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
466 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
467 if (!kv)
468 return NULL;
469
470 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
471 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
472 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
473
474 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
475
476 /* make sure last quadlet is zeroed out */
477 *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
478
479 /* don't copy the NUL terminator */
480 memcpy(lstr, data, data_len);
481
482 return kv;
483}
484
485static int csr1212_check_minimal_ascii(const char *s)
486{
487 static const char minimal_ascii_table[] = {
488 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
489 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
492 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
493 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
494 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
495 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
496 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
497 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
498 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
499 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
500 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
501 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
502 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
503 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
504 };
505 for (; *s; s++) {
506 if (minimal_ascii_table[*s & 0x7F] != *s)
507 return -1; /* failed */
508 }
509 /* String conforms to minimal-ascii, as specified by IEEE 1212,
510 * par. 7.4 */
511 return 0;
512}
513
514struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
515{
516 /* Check if string conform to minimal_ascii format */
517 if (csr1212_check_minimal_ascii(s))
518 return NULL;
519
520 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
521 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
522}
523
524struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
525 u_int8_t palette_depth,
526 u_int8_t color_space,
527 u_int16_t language,
528 u_int16_t hscan,
529 u_int16_t vscan,
530 u_int32_t *palette,
531 u_int32_t *pixels)
532{
533 static const int pd[4] = { 0, 4, 16, 256 };
534 static const int cs[16] = { 4, 2 };
535 struct csr1212_keyval *kv;
536 int palette_size = pd[palette_depth] * cs[color_space];
537 int pixel_size = (hscan * vscan + 3) & ~0x3;
538
539 if ((palette_depth && !palette) || !pixels)
540 return NULL;
541
542 kv = csr1212_new_descriptor_leaf(1, 0, NULL,
543 palette_size + pixel_size +
544 CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
545 if (!kv)
546 return NULL;
547
548 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version);
549 CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth);
550 CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space);
551 CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
552 CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
553 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
554
555 if (palette_size)
556 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
557 palette_size);
558
559 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
560
561 return kv;
562}
563
564struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
565 u_int64_t address)
566{
567 struct csr1212_keyval *kv;
568
569 /* IEEE 1212, par. 7.5.4.3 Modifiable descriptors */
570 kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
571 if(!kv)
572 return NULL;
573
574 CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
575 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
576 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
577
578 return kv;
579}
580
581static int csr1212_check_keyword(const char *s)
582{
583 for (; *s; s++) {
584
585 if (('A' <= *s) && (*s <= 'Z'))
586 continue;
587 if (('0' <= *s) && (*s <= '9'))
588 continue;
589 if (*s == '-')
590 continue;
591
592 return -1; /* failed */
593 }
594 /* String conforms to keyword, as specified by IEEE 1212,
595 * par. 7.6.5 */
596 return CSR1212_SUCCESS;
597}
598
599struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
600{
601 struct csr1212_keyval *kv;
602 char *buffer;
603 int i, data_len = 0;
604
605 /* Check all keywords to see if they conform to restrictions:
606 * Only the following characters is allowed ['A'..'Z','0'..'9','-']
607 * Each word is zero-terminated.
608 * Also calculate the total length of the keywords.
609 */
610 for (i = 0; i < strc; i++) {
611 if (!strv[i] || csr1212_check_keyword(strv[i])) {
612 return NULL;
613 }
614 data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
615 }
616
617 /* IEEE 1212, par. 7.6.5 Keyword leaves */
618 kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
619 if (!kv)
620 return NULL;
621
622 buffer = (char *)kv->value.leaf.data;
623
624 /* make sure last quadlet is zeroed out */
625 *((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
626
627 /* Copy keyword(s) into leaf data buffer */
628 for (i = 0; i < strc; i++) {
629 int len = strlen(strv[i]) + 1;
630 memcpy(buffer, strv[i], len);
631 buffer += len;
632 }
633 return kv;
634}
635
636
637/* Destruction Routines */
638
639void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
640 struct csr1212_keyval *kv)
641{
642 struct csr1212_dentry *dentry;
643
644 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
645 return;
646
647 dentry = csr1212_find_keyval(dir, kv);
648
649 if (!dentry)
650 return;
651
652 if (dentry->prev)
653 dentry->prev->next = dentry->next;
654 if (dentry->next)
655 dentry->next->prev = dentry->prev;
656 if (dir->value.directory.dentries_head == dentry)
657 dir->value.directory.dentries_head = dentry->next;
658 if (dir->value.directory.dentries_tail == dentry)
659 dir->value.directory.dentries_tail = dentry->prev;
660
661 CSR1212_FREE(dentry);
662
663 csr1212_release_keyval(kv);
664}
665
666
667void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
668{
669 if (kv->associate) {
670 csr1212_release_keyval(kv->associate);
671 }
672
673 kv->associate = NULL;
674}
675
676
677/* This function is used to free the memory taken by a keyval. If the given
678 * keyval is a directory type, then any keyvals contained in that directory
679 * will be destroyed as well if their respective refcnts are 0. By means of
680 * list manipulation, this routine will descend a directory structure in a
681 * non-recursive manner. */
682void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
683{
684 struct csr1212_keyval *k, *a;
685 struct csr1212_dentry dentry;
686 struct csr1212_dentry *head, *tail;
687
688 dentry.kv = kv;
689 dentry.next = NULL;
690 dentry.prev = NULL;
691
692 head = &dentry;
693 tail = head;
694
695 while (head) {
696 k = head->kv;
697
698 while (k) {
699 k->refcnt--;
700
701 if (k->refcnt > 0)
702 break;
703
704 a = k->associate;
705
706 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
707 /* If the current entry is a directory, then move all
708 * the entries to the destruction list. */
709 if (k->value.directory.dentries_head) {
710 tail->next = k->value.directory.dentries_head;
711 k->value.directory.dentries_head->prev = tail;
712 tail = k->value.directory.dentries_tail;
713 }
714 }
715 free_keyval(k);
716 k = a;
717 }
718
719 head = head->next;
720 if (head) {
721 if (head->prev && head->prev != &dentry) {
722 CSR1212_FREE(head->prev);
723 }
724 head->prev = NULL;
725 } else if (tail != &dentry)
726 CSR1212_FREE(tail);
727 }
728}
729
730
731void csr1212_destroy_csr(struct csr1212_csr *csr)
732{
733 struct csr1212_csr_rom_cache *c, *oc;
734 struct csr1212_cache_region *cr, *ocr;
735
736 csr1212_release_keyval(csr->root_kv);
737
738 c = csr->cache_head;
739 while (c) {
740 oc = c;
741 cr = c->filled_head;
742 while (cr) {
743 ocr = cr;
744 cr = cr->next;
745 CSR1212_FREE(ocr);
746 }
747 c = c->next;
748 CSR1212_FREE(oc);
749 }
750
751 CSR1212_FREE(csr);
752}
753
754
755
756/* CSR Image Creation */
757
758static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
759{
760 struct csr1212_csr_rom_cache *cache;
761 u_int64_t csr_addr;
762
763 if (!csr || !csr->ops->allocate_addr_range ||
764 !csr->ops->release_addr)
765 return CSR1212_ENOMEM;
766
767 /* ROM size must be a multiple of csr->max_rom */
768 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
769
770 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
771 if (csr_addr == ~0ULL) {
772 return CSR1212_ENOMEM;
773 }
774 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
775 /* Invalid address returned from allocate_addr_range(). */
776 csr->ops->release_addr(csr_addr, csr->private);
777 return CSR1212_ENOMEM;
778 }
779
780 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
781 if (!cache) {
782 csr->ops->release_addr(csr_addr, csr->private);
783 return CSR1212_ENOMEM;
784 }
785
786 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
787 if (!cache->ext_rom) {
788 csr->ops->release_addr(csr_addr, csr->private);
789 CSR1212_FREE(cache);
790 return CSR1212_ENOMEM;
791 }
792
793 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
794 csr1212_release_keyval(cache->ext_rom);
795 csr->ops->release_addr(csr_addr, csr->private);
796 CSR1212_FREE(cache);
797 return CSR1212_ENOMEM;
798 }
799 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
800 cache->ext_rom->value.leaf.len = -1;
801 cache->ext_rom->value.leaf.data = cache->data;
802
803 /* Add cache to tail of cache list */
804 cache->prev = csr->cache_tail;
805 csr->cache_tail->next = cache;
806 csr->cache_tail = cache;
807 return CSR1212_SUCCESS;
808}
809
810static inline void csr1212_remove_cache(struct csr1212_csr *csr,
811 struct csr1212_csr_rom_cache *cache)
812{
813 if (csr->cache_head == cache)
814 csr->cache_head = cache->next;
815 if (csr->cache_tail == cache)
816 csr->cache_tail = cache->prev;
817
818 if (cache->prev)
819 cache->prev->next = cache->next;
820 if (cache->next)
821 cache->next->prev = cache->prev;
822
823 if (cache->ext_rom) {
824 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
825 csr1212_release_keyval(cache->ext_rom);
826 }
827
828 CSR1212_FREE(cache);
829}
830
831static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
832 struct csr1212_keyval **layout_tail)
833{
834 struct csr1212_dentry *dentry;
835 struct csr1212_keyval *dkv;
836 struct csr1212_keyval *last_extkey_spec = NULL;
837 struct csr1212_keyval *last_extkey = NULL;
838 int num_entries = 0;
839
840 for (dentry = dir->value.directory.dentries_head; dentry;
841 dentry = dentry->next) {
842 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
843 /* Special Case: Extended Key Specifier_ID */
844 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
845 if (last_extkey_spec == NULL) {
846 last_extkey_spec = dkv;
847 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
848 last_extkey_spec = dkv;
849 } else {
850 continue;
851 }
852 /* Special Case: Extended Key */
853 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
854 if (last_extkey == NULL) {
855 last_extkey = dkv;
856 } else if (dkv->value.immediate != last_extkey->value.immediate) {
857 last_extkey = dkv;
858 } else {
859 continue;
860 }
861 }
862
863 num_entries += 1;
864
865 switch(dkv->key.type) {
866 default:
867 case CSR1212_KV_TYPE_IMMEDIATE:
868 case CSR1212_KV_TYPE_CSR_OFFSET:
869 break;
870 case CSR1212_KV_TYPE_LEAF:
871 case CSR1212_KV_TYPE_DIRECTORY:
872 /* Remove from list */
873 if (dkv->prev && (dkv->prev->next == dkv))
874 dkv->prev->next = dkv->next;
875 if (dkv->next && (dkv->next->prev == dkv))
876 dkv->next->prev = dkv->prev;
877 //if (dkv == *layout_tail)
878 // *layout_tail = dkv->prev;
879
880 /* Special case: Extended ROM leafs */
881 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
882 dkv->value.leaf.len = -1;
883 /* Don't add Extended ROM leafs in the layout list,
884 * they are handled differently. */
885 break;
886 }
887
888 /* Add to tail of list */
889 dkv->next = NULL;
890 dkv->prev = *layout_tail;
891 (*layout_tail)->next = dkv;
892 *layout_tail = dkv;
893 break;
894 }
895 }
896 }
897 return num_entries;
898}
899
900size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
901{
902 struct csr1212_keyval *ltail = kv;
903 size_t agg_size = 0;
904
905 while(kv) {
906 switch(kv->key.type) {
907 case CSR1212_KV_TYPE_LEAF:
908 /* Add 1 quadlet for crc/len field */
909 agg_size += kv->value.leaf.len + 1;
910 break;
911
912 case CSR1212_KV_TYPE_DIRECTORY:
913 kv->value.directory.len = csr1212_generate_layout_subdir(kv, &ltail);
914 /* Add 1 quadlet for crc/len field */
915 agg_size += kv->value.directory.len + 1;
916 break;
917 }
918 kv = kv->next;
919 }
920 return quads_to_bytes(agg_size);
921}
922
923struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
924 struct csr1212_keyval *start_kv,
925 int start_pos)
926{
927 struct csr1212_keyval *kv = start_kv;
928 struct csr1212_keyval *okv = start_kv;
929 int pos = start_pos;
930 int kv_len = 0, okv_len = 0;
931
932 cache->layout_head = kv;
933
934 while(kv && pos < cache->size) {
935 /* Special case: Extended ROM leafs */
936 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
937 kv->offset = cache->offset + pos;
938 }
939
940 switch(kv->key.type) {
941 case CSR1212_KV_TYPE_LEAF:
942 kv_len = kv->value.leaf.len;
943 break;
944
945 case CSR1212_KV_TYPE_DIRECTORY:
946 kv_len = kv->value.directory.len;
947 break;
948
949 default:
950 /* Should never get here */
951 break;
952 }
953
954 pos += quads_to_bytes(kv_len + 1);
955
956 if (pos <= cache->size) {
957 okv = kv;
958 okv_len = kv_len;
959 kv = kv->next;
960 }
961 }
962
963 cache->layout_tail = okv;
964 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
965
966 return kv;
967}
968
969static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
970 u_int32_t *data_buffer)
971{
972 struct csr1212_dentry *dentry;
973 struct csr1212_keyval *last_extkey_spec = NULL;
974 struct csr1212_keyval *last_extkey = NULL;
975 int index = 0;
976
977 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
978 struct csr1212_keyval *a;
979
980 for (a = dentry->kv; a; a = a->associate) {
981 u_int32_t value = 0;
982
983 /* Special Case: Extended Key Specifier_ID */
984 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
985 if (last_extkey_spec == NULL) {
986 last_extkey_spec = a;
987 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
988 last_extkey_spec = a;
989 } else {
990 continue;
991 }
992 /* Special Case: Extended Key */
993 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
994 if (last_extkey == NULL) {
995 last_extkey = a;
996 } else if (a->value.immediate != last_extkey->value.immediate) {
997 last_extkey = a;
998 } else {
999 continue;
1000 }
1001 }
1002
1003 switch(a->key.type) {
1004 case CSR1212_KV_TYPE_IMMEDIATE:
1005 value = a->value.immediate;
1006 break;
1007 case CSR1212_KV_TYPE_CSR_OFFSET:
1008 value = a->value.csr_offset;
1009 break;
1010 case CSR1212_KV_TYPE_LEAF:
1011 value = a->offset;
1012 value -= dir->offset + quads_to_bytes(1+index);
1013 value = bytes_to_quads(value);
1014 break;
1015 case CSR1212_KV_TYPE_DIRECTORY:
1016 value = a->offset;
1017 value -= dir->offset + quads_to_bytes(1+index);
1018 value = bytes_to_quads(value);
1019 break;
1020 default:
1021 /* Should never get here */
1022 break; /* GDB breakpoint */
1023 }
1024
1025 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
1026 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
1027 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
1028 data_buffer[index] = CSR1212_CPU_TO_BE32(value);
1029 index++;
1030 }
1031 }
1032}
1033
1034void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1035{
1036 struct csr1212_keyval *kv, *nkv;
1037 struct csr1212_keyval_img *kvi;
1038
1039 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
1040 kvi = (struct csr1212_keyval_img *)
1041 (cache->data + bytes_to_quads(kv->offset - cache->offset));
1042 switch(kv->key.type) {
1043 default:
1044 case CSR1212_KV_TYPE_IMMEDIATE:
1045 case CSR1212_KV_TYPE_CSR_OFFSET:
1046 /* Should never get here */
1047 break; /* GDB breakpoint */
1048
1049 case CSR1212_KV_TYPE_LEAF:
1050 /* Don't copy over Extended ROM areas, they are
1051 * already filled out! */
1052 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1053 memcpy(kvi->data, kv->value.leaf.data,
1054 quads_to_bytes(kv->value.leaf.len));
1055
1056 kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
1057 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
1058 break;
1059
1060 case CSR1212_KV_TYPE_DIRECTORY:
1061 csr1212_generate_tree_subdir(kv, kvi->data);
1062
1063 kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
1064 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
1065 break;
1066 }
1067
1068 nkv = kv->next;
1069 if (kv->prev)
1070 kv->prev->next = NULL;
1071 if (kv->next)
1072 kv->next->prev = NULL;
1073 kv->prev = NULL;
1074 kv->next = NULL;
1075 }
1076}
1077
1078int csr1212_generate_csr_image(struct csr1212_csr *csr)
1079{
1080 struct csr1212_bus_info_block_img *bi;
1081 struct csr1212_csr_rom_cache *cache;
1082 struct csr1212_keyval *kv;
1083 size_t agg_size;
1084 int ret;
1085 int init_offset;
1086
1087 if (!csr)
1088 return CSR1212_EINVAL;
1089
1090 cache = csr->cache_head;
1091
1092 bi = (struct csr1212_bus_info_block_img*)cache->data;
1093
1094 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
1095 bi->crc_length = bi->length;
1096 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
1097
1098 csr->root_kv->next = NULL;
1099 csr->root_kv->prev = NULL;
1100
1101 agg_size = csr1212_generate_layout_order(csr->root_kv);
1102
1103 init_offset = csr->bus_info_len;
1104
1105 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
1106 if (!cache) {
1107 /* Estimate approximate number of additional cache
1108 * regions needed (it assumes that the cache holding
1109 * the first 1K Config ROM space always exists). */
1110 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
1111 (2 * sizeof(u_int32_t))) + 1;
1112
1113 /* Add additional cache regions, extras will be
1114 * removed later */
1115 for (; est_c; est_c--) {
1116 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
1117 if (ret != CSR1212_SUCCESS)
1118 return ret;
1119 }
1120 /* Need to re-layout for additional cache regions */
1121 agg_size = csr1212_generate_layout_order(csr->root_kv);
1122 kv = csr->root_kv;
1123 cache = csr->cache_head;
1124 init_offset = csr->bus_info_len;
1125 }
1126 kv = csr1212_generate_positions(cache, kv, init_offset);
1127 agg_size -= cache->len;
1128 init_offset = sizeof(u_int32_t);
1129 }
1130
1131 /* Remove unused, excess cache regions */
1132 while (cache) {
1133 struct csr1212_csr_rom_cache *oc = cache;
1134
1135 cache = cache->next;
1136 csr1212_remove_cache(csr, oc);
1137 }
1138
1139 /* Go through the list backward so that when done, the correct CRC
1140 * will be calculated for the Extended ROM areas. */
1141 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1142 /* Only Extended ROM caches should have this set. */
1143 if (cache->ext_rom) {
1144 int leaf_size;
1145
1146 /* Make sure the Extended ROM leaf is a multiple of
1147 * max_rom in size. */
1148 leaf_size = (cache->len + (csr->max_rom - 1)) &
1149 ~(csr->max_rom - 1);
1150
1151 /* Zero out the unused ROM region */
1152 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1153 leaf_size - cache->len);
1154
1155 /* Subtract leaf header */
1156 leaf_size -= sizeof(u_int32_t);
1157
1158 /* Update the Extended ROM leaf length */
1159 cache->ext_rom->value.leaf.len =
1160 bytes_to_quads(leaf_size);
1161 } else {
1162 /* Zero out the unused ROM region */
1163 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1164 cache->size - cache->len);
1165 }
1166
1167 /* Copy the data into the cache buffer */
1168 csr1212_fill_cache(cache);
1169
1170 if (cache != csr->cache_head) {
1171 /* Set the length and CRC of the extended ROM. */
1172 struct csr1212_keyval_img *kvi =
1173 (struct csr1212_keyval_img*)cache->data;
1174
1175 kvi->length = CSR1212_CPU_TO_BE16(bytes_to_quads(cache->len) - 1);
1176 kvi->crc = csr1212_crc16(kvi->data,
1177 bytes_to_quads(cache->len) - 1);
1178
1179 }
1180 }
1181
1182 return CSR1212_SUCCESS;
1183}
1184
1185int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
1186{
1187 struct csr1212_csr_rom_cache *cache;
1188
1189 for (cache = csr->cache_head; cache; cache = cache->next) {
1190 if (offset >= cache->offset &&
1191 (offset + len) <= (cache->offset + cache->size)) {
1192 memcpy(buffer,
1193 &cache->data[bytes_to_quads(offset - cache->offset)],
1194 len);
1195 return CSR1212_SUCCESS;
1196 }
1197 }
1198 return CSR1212_ENOENT;
1199}
1200
1201
1202
1203/* Parse a chunk of data as a Config ROM */
1204
1205static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1206{
1207 struct csr1212_bus_info_block_img *bi;
1208 struct csr1212_cache_region *cr;
1209 int i;
1210 int ret;
1211
1212 /* IEEE 1212 says that the entire bus info block should be readable in
1213 * a single transaction regardless of the max_rom value.
1214 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1215 * bus info block will be read 1 quadlet at a time. The rest of the
1216 * ConfigROM will be read according to the max_rom field. */
1217 for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
1218 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1219 sizeof(csr1212_quad_t),
1220 &csr->cache_head->data[bytes_to_quads(i)],
1221 csr->private);
1222 if (ret != CSR1212_SUCCESS)
1223 return ret;
1224 }
1225
1226 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1227 csr->crc_len = quads_to_bytes(bi->crc_length);
1228
1229 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1230 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1231 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
1232 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1233 sizeof(csr1212_quad_t),
1234 &csr->cache_head->data[bytes_to_quads(i)],
1235 csr->private);
1236 if (ret != CSR1212_SUCCESS)
1237 return ret;
1238 }
1239
1240 if (bytes_to_quads(csr->bus_info_len - sizeof(csr1212_quad_t)) != bi->length)
1241 return CSR1212_EINVAL;
1242
1243#if 0
1244 /* Apparently there are too many differnt wrong implementations of the
1245 * CRC algorithm that verifying them is moot. */
1246 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1247 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1248 return CSR1212_EINVAL;
1249#endif
1250
1251 cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1252 if (!cr)
1253 return CSR1212_ENOMEM;
1254
1255 cr->next = NULL;
1256 cr->prev = NULL;
1257 cr->offset_start = 0;
1258 cr->offset_end = csr->crc_len + 4;
1259
1260 csr->cache_head->filled_head = cr;
1261 csr->cache_head->filled_tail = cr;
1262
1263 return CSR1212_SUCCESS;
1264}
1265
1266static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1267 csr1212_quad_t ki,
1268 u_int32_t kv_pos)
1269{
1270 int ret = CSR1212_SUCCESS;
1271 struct csr1212_keyval *k = NULL;
1272 u_int32_t offset;
1273
1274 switch(CSR1212_KV_KEY_TYPE(ki)) {
1275 case CSR1212_KV_TYPE_IMMEDIATE:
1276 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1277 CSR1212_KV_VAL(ki));
1278 if (!k) {
1279 ret = CSR1212_ENOMEM;
1280 goto fail;
1281 }
1282
1283 k->refcnt = 0; /* Don't keep local reference when parsing. */
1284 break;
1285
1286 case CSR1212_KV_TYPE_CSR_OFFSET:
1287 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1288 CSR1212_KV_VAL(ki));
1289 if (!k) {
1290 ret = CSR1212_ENOMEM;
1291 goto fail;
1292 }
1293 k->refcnt = 0; /* Don't keep local reference when parsing. */
1294 break;
1295
1296 default:
1297 /* Compute the offset from 0xffff f000 0000. */
1298 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1299 if (offset == kv_pos) {
1300 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1301 * or Directories. The Config ROM image is most likely
1302 * messed up, so we'll just abort here. */
1303 ret = CSR1212_EIO;
1304 goto fail;
1305 }
1306
1307 k = csr1212_find_keyval_offset(dir, offset);
1308
1309 if (k)
1310 break; /* Found it. */
1311
1312 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1313 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1314 } else {
1315 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1316 }
1317 if (!k) {
1318 ret = CSR1212_ENOMEM;
1319 goto fail;
1320 }
1321 k->refcnt = 0; /* Don't keep local reference when parsing. */
1322 k->valid = 0; /* Contents not read yet so it's not valid. */
1323 k->offset = offset;
1324
1325 k->prev = dir;
1326 k->next = dir->next;
1327 dir->next->prev = k;
1328 dir->next = k;
1329 }
1330 ret = csr1212_attach_keyval_to_directory(dir, k);
1331
1332fail:
1333 if (ret != CSR1212_SUCCESS) {
1334 if (k)
1335 free_keyval(k);
1336 }
1337 return ret;
1338}
1339
1340
1341int csr1212_parse_keyval(struct csr1212_keyval *kv,
1342 struct csr1212_csr_rom_cache *cache)
1343{
1344 struct csr1212_keyval_img *kvi;
1345 int i;
1346 int ret = CSR1212_SUCCESS;
1347 int kvi_len;
1348
1349 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1350 cache->offset)];
1351 kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
1352
1353#if 0
1354 /* Apparently there are too many differnt wrong implementations of the
1355 * CRC algorithm that verifying them is moot. */
1356 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1357 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1358 ret = CSR1212_EINVAL;
1359 goto fail;
1360 }
1361#endif
1362
1363 switch(kv->key.type) {
1364 case CSR1212_KV_TYPE_DIRECTORY:
1365 for (i = 0; i < kvi_len; i++) {
1366 csr1212_quad_t ki = kvi->data[i];
1367
1368 /* Some devices put null entries in their unit
1369 * directories. If we come across such an entry,
1370 * then skip it. */
1371 if (ki == 0x0)
1372 continue;
1373 ret = csr1212_parse_dir_entry(kv, ki,
1374 (kv->offset +
1375 quads_to_bytes(i + 1)));
1376 }
1377 kv->value.directory.len = kvi_len;
1378 break;
1379
1380 case CSR1212_KV_TYPE_LEAF:
1381 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1382 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1383 if (!kv->value.leaf.data)
1384 {
1385 ret = CSR1212_ENOMEM;
1386 goto fail;
1387 }
1388
1389 kv->value.leaf.len = kvi_len;
1390 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1391 }
1392 break;
1393 }
1394
1395 kv->valid = 1;
1396
1397fail:
1398 return ret;
1399}
1400
1401
1402int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1403{
1404 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1405 struct csr1212_keyval_img *kvi = NULL;
1406 struct csr1212_csr_rom_cache *cache;
1407 int cache_index;
1408 u_int64_t addr;
1409 u_int32_t *cache_ptr;
1410 u_int16_t kv_len = 0;
1411
1412 if (!csr || !kv)
1413 return CSR1212_EINVAL;
1414
1415 /* First find which cache the data should be in (or go in if not read
1416 * yet). */
1417 for (cache = csr->cache_head; cache; cache = cache->next) {
1418 if (kv->offset >= cache->offset &&
1419 kv->offset < (cache->offset + cache->size))
1420 break;
1421 }
1422
1423 if (!cache) {
1424 csr1212_quad_t q;
1425 u_int32_t cache_size;
1426
1427 /* Only create a new cache for Extended ROM leaves. */
1428 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1429 return CSR1212_EINVAL;
1430
1431 if (csr->ops->bus_read(csr,
1432 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1433 sizeof(csr1212_quad_t), &q, csr->private)) {
1434 return CSR1212_EIO;
1435 }
1436
1437 kv->value.leaf.len = CSR1212_BE32_TO_CPU(q) >> 16;
1438
1439 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1440 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1441
1442 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1443 if (!cache)
1444 return CSR1212_ENOMEM;
1445
1446 kv->value.leaf.data = &cache->data[1];
1447 csr->cache_tail->next = cache;
1448 cache->prev = csr->cache_tail;
1449 cache->next = NULL;
1450 csr->cache_tail = cache;
1451 cache->filled_head =
1452 CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1453 if (!cache->filled_head) {
1454 return CSR1212_ENOMEM;
1455 }
1456
1457 cache->filled_head->offset_start = 0;
1458 cache->filled_head->offset_end = sizeof(csr1212_quad_t);
1459 cache->filled_tail = cache->filled_head;
1460 cache->filled_head->next = NULL;
1461 cache->filled_head->prev = NULL;
1462 cache->data[0] = q;
1463
1464 /* Don't read the entire extended ROM now. Pieces of it will
1465 * be read when entries inside it are read. */
1466 return csr1212_parse_keyval(kv, cache);
1467 }
1468
1469 cache_index = kv->offset - cache->offset;
1470
1471 /* Now seach read portions of the cache to see if it is there. */
1472 for (cr = cache->filled_head; cr; cr = cr->next) {
1473 if (cache_index < cr->offset_start) {
1474 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1475 if (!newcr)
1476 return CSR1212_ENOMEM;
1477
1478 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1479 newcr->offset_end = newcr->offset_start;
1480 newcr->next = cr;
1481 newcr->prev = cr->prev;
1482 cr->prev = newcr;
1483 cr = newcr;
1484 break;
1485 } else if ((cache_index >= cr->offset_start) &&
1486 (cache_index < cr->offset_end)) {
1487 kvi = (struct csr1212_keyval_img*)
1488 (&cache->data[bytes_to_quads(cache_index)]);
1489 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1490 1);
1491 break;
1492 } else if (cache_index == cr->offset_end)
1493 break;
1494 }
1495
1496 if (!cr) {
1497 cr = cache->filled_tail;
1498 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1499 if (!newcr)
1500 return CSR1212_ENOMEM;
1501
1502 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1503 newcr->offset_end = newcr->offset_start;
1504 newcr->prev = cr;
1505 newcr->next = cr->next;
1506 cr->next = newcr;
1507 cr = newcr;
1508 cache->filled_tail = newcr;
1509 }
1510
1511 while(!kvi || cr->offset_end < cache_index + kv_len) {
1512 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1513 ~(csr->max_rom - 1))];
1514
1515 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1516 cr->offset_end) & ~(csr->max_rom - 1);
1517
1518 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1519 csr->private)) {
1520 if (csr->max_rom == 4)
1521 /* We've got problems! */
1522 return CSR1212_EIO;
1523
1524 /* Apperently the max_rom value was a lie, set it to
1525 * do quadlet reads and try again. */
1526 csr->max_rom = 4;
1527 continue;
1528 }
1529
1530 cr->offset_end += csr->max_rom - (cr->offset_end &
1531 (csr->max_rom - 1));
1532
1533 if (!kvi && (cr->offset_end > cache_index)) {
1534 kvi = (struct csr1212_keyval_img*)
1535 (&cache->data[bytes_to_quads(cache_index)]);
1536 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1537 1);
1538 }
1539
1540 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1541 /* The Leaf or Directory claims its length extends
1542 * beyond the ConfigROM image region and thus beyond the
1543 * end of our cache region. Therefore, we abort now
1544 * rather than seg faulting later. */
1545 return CSR1212_EIO;
1546 }
1547
1548 ncr = cr->next;
1549
1550 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1551 /* consolidate region entries */
1552 ncr->offset_start = cr->offset_start;
1553
1554 if (cr->prev)
1555 cr->prev->next = cr->next;
1556 ncr->prev = cr->prev;
1557 if (cache->filled_head == cr)
1558 cache->filled_head = ncr;
1559 CSR1212_FREE(cr);
1560 cr = ncr;
1561 }
1562 }
1563
1564 return csr1212_parse_keyval(kv, cache);
1565}
1566
1567
1568
1569int csr1212_parse_csr(struct csr1212_csr *csr)
1570{
1571 static const int mr_map[] = { 4, 64, 1024, 0 };
1572 struct csr1212_dentry *dentry;
1573 int ret;
1574
1575 if (!csr || !csr->ops->bus_read)
1576 return CSR1212_EINVAL;
1577
1578 ret = csr1212_parse_bus_info_block(csr);
1579 if (ret != CSR1212_SUCCESS)
1580 return ret;
1581
1582 if (!csr->ops->get_max_rom)
1583 csr->max_rom = mr_map[0]; /* default value */
1584 else
1585 csr->max_rom = mr_map[csr->ops->get_max_rom(csr->bus_info_data,
1586 csr->private)];
1587
1588 csr->cache_head->layout_head = csr->root_kv;
1589 csr->cache_head->layout_tail = csr->root_kv;
1590
1591 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1592 csr->bus_info_len;
1593
1594 csr->root_kv->valid = 0;
1595 csr->root_kv->next = csr->root_kv;
1596 csr->root_kv->prev = csr->root_kv;
1597 csr1212_get_keyval(csr, csr->root_kv);
1598
1599 /* Scan through the Root directory finding all extended ROM regions
1600 * and make cache regions for them */
1601 for (dentry = csr->root_kv->value.directory.dentries_head;
1602 dentry; dentry = dentry->next) {
1603 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
1604 csr1212_get_keyval(csr, dentry->kv);
1605
1606 if (ret != CSR1212_SUCCESS)
1607 return ret;
1608 }
1609 }
1610
1611 return CSR1212_SUCCESS;
1612}
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
new file mode 100644
index 000000000000..e6734263a1d3
--- /dev/null
+++ b/drivers/ieee1394/csr1212.h
@@ -0,0 +1,727 @@
1/*
2 * csr1212.h -- IEEE 1212 Control and Status Register support for Linux
3 *
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#ifndef __CSR1212_H__
31#define __CSR1212_H__
32
33
34/* Compatibility layer */
35#ifdef __KERNEL__
36
37#include <linux/types.h>
38#include <linux/slab.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <asm/pgalloc.h>
43
44#define CSR1212_MALLOC(size) vmalloc((size))
45#define CSR1212_FREE(ptr) vfree(ptr)
46#define CSR1212_BE16_TO_CPU(quad) be16_to_cpu(quad)
47#define CSR1212_CPU_TO_BE16(quad) cpu_to_be16(quad)
48#define CSR1212_BE32_TO_CPU(quad) be32_to_cpu(quad)
49#define CSR1212_CPU_TO_BE32(quad) cpu_to_be32(quad)
50#define CSR1212_BE64_TO_CPU(quad) be64_to_cpu(quad)
51#define CSR1212_CPU_TO_BE64(quad) cpu_to_be64(quad)
52
53#define CSR1212_LE16_TO_CPU(quad) le16_to_cpu(quad)
54#define CSR1212_CPU_TO_LE16(quad) cpu_to_le16(quad)
55#define CSR1212_LE32_TO_CPU(quad) le32_to_cpu(quad)
56#define CSR1212_CPU_TO_LE32(quad) cpu_to_le32(quad)
57#define CSR1212_LE64_TO_CPU(quad) le64_to_cpu(quad)
58#define CSR1212_CPU_TO_LE64(quad) cpu_to_le64(quad)
59
60#include <linux/errno.h>
61#define CSR1212_SUCCESS (0)
62#define CSR1212_EINVAL (-EINVAL)
63#define CSR1212_ENOMEM (-ENOMEM)
64#define CSR1212_ENOENT (-ENOENT)
65#define CSR1212_EIO (-EIO)
66#define CSR1212_EBUSY (-EBUSY)
67
68#else /* Userspace */
69
70#include <sys/types.h>
71#include <malloc.h>
72#define CSR1212_MALLOC(size) malloc(size)
73#define CSR1212_FREE(ptr) free(ptr)
74#include <endian.h>
75#if __BYTE_ORDER == __LITTLE_ENDIAN
76#include <byteswap.h>
77#define CSR1212_BE16_TO_CPU(quad) bswap_16(quad)
78#define CSR1212_CPU_TO_BE16(quad) bswap_16(quad)
79#define CSR1212_BE32_TO_CPU(quad) bswap_32(quad)
80#define CSR1212_CPU_TO_BE32(quad) bswap_32(quad)
81#define CSR1212_BE64_TO_CPU(quad) bswap_64(quad)
82#define CSR1212_CPU_TO_BE64(quad) bswap_64(quad)
83
84#define CSR1212_LE16_TO_CPU(quad) (quad)
85#define CSR1212_CPU_TO_LE16(quad) (quad)
86#define CSR1212_LE32_TO_CPU(quad) (quad)
87#define CSR1212_CPU_TO_LE32(quad) (quad)
88#define CSR1212_LE64_TO_CPU(quad) (quad)
89#define CSR1212_CPU_TO_LE64(quad) (quad)
90#else
91#define CSR1212_BE16_TO_CPU(quad) (quad)
92#define CSR1212_CPU_TO_BE16(quad) (quad)
93#define CSR1212_BE32_TO_CPU(quad) (quad)
94#define CSR1212_CPU_TO_BE32(quad) (quad)
95#define CSR1212_BE64_TO_CPU(quad) (quad)
96#define CSR1212_CPU_TO_BE64(quad) (quad)
97
98#define CSR1212_LE16_TO_CPU(quad) bswap_16(quad)
99#define CSR1212_CPU_TO_LE16(quad) bswap_16(quad)
100#define CSR1212_LE32_TO_CPU(quad) bswap_32(quad)
101#define CSR1212_CPU_TO_LE32(quad) bswap_32(quad)
102#define CSR1212_LE64_TO_CPU(quad) bswap_64(quad)
103#define CSR1212_CPU_TO_LE64(quad) bswap_64(quad)
104#endif
105
106#include <errno.h>
107#define CSR1212_SUCCESS (0)
108#define CSR1212_EINVAL (EINVAL)
109#define CSR1212_ENOMEM (ENOMEM)
110#define CSR1212_ENOENT (ENOENT)
111#define CSR1212_EIO (EIO)
112#define CSR1212_EBUSY (EBUSY)
113
114#endif
115
116
117#define CSR1212_KV_VAL_MASK 0xffffff
118#define CSR1212_KV_KEY_SHIFT 24
119#define CSR1212_KV_KEY_TYPE_SHIFT 6
120#define CSR1212_KV_KEY_ID_MASK 0x3f
121#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* After shift */
122
123
124/* CSR 1212 key types */
125#define CSR1212_KV_TYPE_IMMEDIATE 0
126#define CSR1212_KV_TYPE_CSR_OFFSET 1
127#define CSR1212_KV_TYPE_LEAF 2
128#define CSR1212_KV_TYPE_DIRECTORY 3
129
130
131/* CSR 1212 key ids */
132#define CSR1212_KV_ID_DESCRIPTOR 0x01
133#define CSR1212_KV_ID_BUS_DEPENDENT_INFO 0x02
134#define CSR1212_KV_ID_VENDOR 0x03
135#define CSR1212_KV_ID_HARDWARE_VERSION 0x04
136#define CSR1212_KV_ID_MODULE 0x07
137#define CSR1212_KV_ID_NODE_CAPABILITIES 0x0C
138#define CSR1212_KV_ID_EUI_64 0x0D
139#define CSR1212_KV_ID_UNIT 0x11
140#define CSR1212_KV_ID_SPECIFIER_ID 0x12
141#define CSR1212_KV_ID_VERSION 0x13
142#define CSR1212_KV_ID_DEPENDENT_INFO 0x14
143#define CSR1212_KV_ID_UNIT_LOCATION 0x15
144#define CSR1212_KV_ID_MODEL 0x17
145#define CSR1212_KV_ID_INSTANCE 0x18
146#define CSR1212_KV_ID_KEYWORD 0x19
147#define CSR1212_KV_ID_FEATURE 0x1A
148#define CSR1212_KV_ID_EXTENDED_ROM 0x1B
149#define CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID 0x1C
150#define CSR1212_KV_ID_EXTENDED_KEY 0x1D
151#define CSR1212_KV_ID_EXTENDED_DATA 0x1E
152#define CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR 0x1F
153#define CSR1212_KV_ID_DIRECTORY_ID 0x20
154#define CSR1212_KV_ID_REVISION 0x21
155
156
157/* IEEE 1212 Address space map */
158#define CSR1212_ALL_SPACE_BASE (0x000000000000ULL)
159#define CSR1212_ALL_SPACE_SIZE (1ULL << 48)
160#define CSR1212_ALL_SPACE_END (CSR1212_ALL_SPACE_BASE + CSR1212_ALL_SPACE_SIZE)
161
162#define CSR1212_MEMORY_SPACE_BASE (0x000000000000ULL)
163#define CSR1212_MEMORY_SPACE_SIZE ((256ULL * (1ULL << 40)) - (512ULL * (1ULL << 20)))
164#define CSR1212_MEMORY_SPACE_END (CSR1212_MEMORY_SPACE_BASE + CSR1212_MEMORY_SPACE_SIZE)
165
166#define CSR1212_PRIVATE_SPACE_BASE (0xffffe0000000ULL)
167#define CSR1212_PRIVATE_SPACE_SIZE (256ULL * (1ULL << 20))
168#define CSR1212_PRIVATE_SPACE_END (CSR1212_PRIVATE_SPACE_BASE + CSR1212_PRIVATE_SPACE_SIZE)
169
170#define CSR1212_REGISTER_SPACE_BASE (0xfffff0000000ULL)
171#define CSR1212_REGISTER_SPACE_SIZE (256ULL * (1ULL << 20))
172#define CSR1212_REGISTER_SPACE_END (CSR1212_REGISTER_SPACE_BASE + CSR1212_REGISTER_SPACE_SIZE)
173
174#define CSR1212_CSR_ARCH_REG_SPACE_BASE (0xfffff0000000ULL)
175#define CSR1212_CSR_ARCH_REG_SPACE_SIZE (512)
176#define CSR1212_CSR_ARCH_REG_SPACE_END (CSR1212_CSR_ARCH_REG_SPACE_BASE + CSR1212_CSR_ARCH_REG_SPACE_SIZE)
177#define CSR1212_CSR_ARCH_REG_SPACE_OFFSET (CSR1212_CSR_ARCH_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
178
179#define CSR1212_CSR_BUS_DEP_REG_SPACE_BASE (0xfffff0000200ULL)
180#define CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE (512)
181#define CSR1212_CSR_BUS_DEP_REG_SPACE_END (CSR1212_CSR_BUS_DEP_REG_SPACE_BASE + CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE)
182#define CSR1212_CSR_BUS_DEP_REG_SPACE_OFFSET (CSR1212_CSR_BUS_DEP_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
183
184#define CSR1212_CONFIG_ROM_SPACE_BASE (0xfffff0000400ULL)
185#define CSR1212_CONFIG_ROM_SPACE_SIZE (1024)
186#define CSR1212_CONFIG_ROM_SPACE_END (CSR1212_CONFIG_ROM_SPACE_BASE + CSR1212_CONFIG_ROM_SPACE_SIZE)
187#define CSR1212_CONFIG_ROM_SPACE_OFFSET (CSR1212_CONFIG_ROM_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
188
189#define CSR1212_UNITS_SPACE_BASE (0xfffff0000800ULL)
190#define CSR1212_UNITS_SPACE_SIZE ((256ULL * (1ULL << 20)) - 2048)
191#define CSR1212_UNITS_SPACE_END (CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE)
192#define CSR1212_UNITS_SPACE_OFFSET (CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
193
194#define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u_int32_t))
195
196
197/* Config ROM image structures */
198struct csr1212_bus_info_block_img {
199 u_int8_t length;
200 u_int8_t crc_length;
201 u_int16_t crc;
202
203 /* Must be last */
204 u_int32_t data[0]; /* older gcc can't handle [] which is standard */
205};
206
207#define CSR1212_KV_KEY(quad) (CSR1212_BE32_TO_CPU(quad) >> CSR1212_KV_KEY_SHIFT)
208#define CSR1212_KV_KEY_TYPE(quad) (CSR1212_KV_KEY(quad) >> CSR1212_KV_KEY_TYPE_SHIFT)
209#define CSR1212_KV_KEY_ID(quad) (CSR1212_KV_KEY(quad) & CSR1212_KV_KEY_ID_MASK)
210#define CSR1212_KV_VAL(quad) (CSR1212_BE32_TO_CPU(quad) & CSR1212_KV_VAL_MASK)
211
212#define CSR1212_SET_KV_KEY(quad, key) ((quad) = \
213 CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | ((key) << CSR1212_KV_KEY_SHIFT)))
214#define CSR1212_SET_KV_VAL(quad, val) ((quad) = \
215 CSR1212_CPU_TO_BE32((CSR1212_KV_KEY(quad) << CSR1212_KV_KEY_SHIFT) | (val)))
216#define CSR1212_SET_KV_TYPEID(quad, type, id) ((quad) = \
217 CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | \
218 (((((type) & CSR1212_KV_KEY_TYPE_MASK) << CSR1212_KV_KEY_TYPE_SHIFT) | \
219 ((id) & CSR1212_KV_KEY_ID_MASK)) << CSR1212_KV_KEY_SHIFT)))
220
221typedef u_int32_t csr1212_quad_t;
222
223
224struct csr1212_keyval_img {
225 u_int16_t length;
226 u_int16_t crc;
227
228 /* Must be last */
229 csr1212_quad_t data[0]; /* older gcc can't handle [] which is standard */
230};
231
232struct csr1212_leaf {
233 int len;
234 u_int32_t *data;
235};
236
237struct csr1212_dentry {
238 struct csr1212_dentry *next, *prev;
239 struct csr1212_keyval *kv;
240};
241
242struct csr1212_directory {
243 int len;
244 struct csr1212_dentry *dentries_head, *dentries_tail;
245};
246
247struct csr1212_keyval {
248 struct {
249 u_int8_t type;
250 u_int8_t id;
251 } key;
252 union {
253 u_int32_t immediate;
254 u_int32_t csr_offset;
255 struct csr1212_leaf leaf;
256 struct csr1212_directory directory;
257 } value;
258 struct csr1212_keyval *associate;
259 int refcnt;
260
261 /* used in generating and/or parsing CSR image */
262 struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
263 u_int32_t offset; /* position in CSR from 0xffff f000 0000 */
264 u_int8_t valid; /* flag indicating keyval has valid data*/
265};
266
267
268struct csr1212_cache_region {
269 struct csr1212_cache_region *next, *prev;
270 u_int32_t offset_start; /* inclusive */
271 u_int32_t offset_end; /* exclusive */
272};
273
274struct csr1212_csr_rom_cache {
275 struct csr1212_csr_rom_cache *next, *prev;
276 struct csr1212_cache_region *filled_head, *filled_tail;
277 struct csr1212_keyval *layout_head, *layout_tail;
278 size_t size;
279 u_int32_t offset;
280 struct csr1212_keyval *ext_rom;
281 size_t len;
282
283 /* Must be last */
284 u_int32_t data[0]; /* older gcc can't handle [] which is standard */
285};
286
287struct csr1212_csr {
288 size_t bus_info_len; /* bus info block length in bytes */
289 size_t crc_len; /* crc length in bytes */
290 u_int32_t *bus_info_data; /* bus info data incl bus name and EUI */
291
292 void *private; /* private, bus specific data */
293 struct csr1212_bus_ops *ops;
294
295 struct csr1212_keyval *root_kv;
296
297 int max_rom; /* max bytes readable in Config ROM region */
298
299 /* Items below used for image parsing and generation */
300 struct csr1212_csr_rom_cache *cache_head, *cache_tail;
301};
302
303struct csr1212_bus_ops {
304 /* This function is used by csr1212 to read additional information
305 * from remote nodes when parsing a Config ROM (i.e., read Config ROM
306 * entries located in the Units Space. Must return 0 on success
307 * anything else indicates an error. */
308 int (*bus_read) (struct csr1212_csr *csr, u_int64_t addr,
309 u_int16_t length, void *buffer, void *private);
310
311 /* This function is used by csr1212 to allocate a region in units space
312 * in the event that Config ROM entries don't all fit in the predefined
313 * 1K region. The void *private parameter is private member of struct
314 * csr1212_csr. */
315 u_int64_t (*allocate_addr_range) (u_int64_t size, u_int32_t alignment,
316 void *private);
317
318
319 /* This function is used by csr1212 to release a region in units space
320 * that is no longer needed. */
321 void (*release_addr) (u_int64_t addr, void *private);
322
323 /* This function is used by csr1212 to determine the max read request
324 * supported by a remote node when reading the ConfigROM space. Must
325 * return 0, 1, or 2 per IEEE 1212. */
326 int (*get_max_rom) (u_int32_t *bus_info, void *private);
327};
328
329
330
331
332/* Descriptor Leaf manipulation macros */
333#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24
334#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff
335#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
336
337#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \
338 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) >> CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
339#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \
340 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) & \
341 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)
342#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
343 (&((kv)->value.leaf.data[1]))
344
345#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
346 ((kv)->value.leaf.data[0] = \
347 CSR1212_CPU_TO_BE32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
348 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
349#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
350 ((kv)->value.leaf.data[0] = \
351 CSR1212_CPU_TO_BE32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
352 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
353 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
354
355/* Text Descriptor Leaf manipulation macros */
356#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28
357#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK 0xf /* after shift */
358#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16
359#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff /* after shift */
360#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
361#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
362
363#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \
364 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
365 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT)
366#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \
367 ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
368 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
369 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
370#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \
371 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) & \
372 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)
373#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \
374 (&((kv)->value.leaf.data[2]))
375
376#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
377 ((kv)->value.leaf.data[1] = \
378 ((kv)->value.leaf.data[1] & \
379 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
380 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
381 CSR1212_CPU_TO_BE32(((width) & \
382 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
383 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
384#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
385 ((kv)->value.leaf.data[1] = \
386 ((kv)->value.leaf.data[1] & \
387 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
388 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
389 CSR1212_CPU_TO_BE32(((char_set) & \
390 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
391 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
392#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
393 ((kv)->value.leaf.data[1] = \
394 ((kv)->value.leaf.data[1] & \
395 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
396 CSR1212_CPU_TO_BE32(((language) & \
397 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
398
399
400/* Icon Descriptor Leaf manipulation macros */
401#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK 0xffffff
402#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT 30
403#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK 0x3 /* after shift */
404#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT 16
405#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK 0xf /* after shift */
406#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
407#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT 16
408#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK 0xffff /* after shift */
409#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK 0xffff
410#define CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD (3 * sizeof(u_int32_t))
411
412#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION(kv) \
413 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[2]) & \
414 CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)
415
416#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv) \
417 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
418 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT)
419
420#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv) \
421 ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
422 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT) & \
423 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK)
424
425#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE(kv) \
426 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) & \
427 CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)
428
429#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN(kv) \
430 ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) >> \
431 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_SHIFT) & \
432 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_MASK)
433
434#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN(kv) \
435 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) & \
436 CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)
437
438#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv) \
439 (&((kv)->value.leaf.data[5]))
440
441static inline u_int32_t *CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(struct csr1212_keyval *kv)
442{
443 static const int pd[4] = { 0, 4, 16, 256 };
444 static const int cs[16] = { 4, 2 };
445 int ps = pd[CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv)];
446
447 return &kv->value.leaf.data[5 +
448 (ps * cs[CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv)]) /
449 sizeof(u_int32_t)];
450}
451
452#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version) \
453 ((kv)->value.leaf.data[2] = \
454 ((kv)->value.leaf.data[2] & \
455 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK))) | \
456 CSR1212_CPU_TO_BE32(((version) & \
457 CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)))
458
459#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth) \
460 ((kv)->value.leaf.data[3] = \
461 ((kv)->value.leaf.data[3] & \
462 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK << \
463 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))) | \
464 CSR1212_CPU_TO_BE32(((palette_depth) & \
465 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK) << \
466 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))
467
468#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space) \
469 ((kv)->value.leaf.data[3] = \
470 ((kv)->value.leaf.data[3] & \
471 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK << \
472 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))) | \
473 CSR1212_CPU_TO_BE32(((color_space) & \
474 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK) << \
475 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))
476
477#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
478 ((kv)->value.leaf.data[3] = \
479 ((kv)->value.leaf.data[3] & \
480 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
481 CSR1212_CPU_TO_BE32(((language) & \
482 CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
483
484#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan) \
485 ((kv)->value.leaf.data[4] = \
486 ((kv)->value.leaf.data[4] & \
487 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK << \
488 CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))) | \
489 CSR1212_CPU_TO_BE32(((hscan) & \
490 CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK) << \
491 CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))
492
493#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan) \
494 ((kv)->value.leaf.data[4] = \
495 (((kv)->value.leaf.data[4] & \
496 CSR1212_CPU_TO_BE32(~CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK))) | \
497 CSR1212_CPU_TO_BE32(((vscan) & \
498 CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)))
499
500
501/* Modifiable Descriptor Leaf manipulation macros */
502#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT 16
503#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK 0xffff
504#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_SHIFT 32
505#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK 0xffff
506#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK 0xffffffffULL
507
508#define CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE(kv) \
509 CSR1212_BE16_TO_CPU((kv)->value.leaf.data[0] >> CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE_SHIFT)
510
511#define CSR1212_MODIFIABLE_DESCRIPTOR_ADDRESS(kv) \
512 (CSR1212_BE16_TO_CPU(((u_int64_t)((kv)->value.leaf.data[0])) << \
513 CSR1212_MODIFIABLE_DESCRIPTOR_ADDR_HI_SHIFT) | \
514 CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]))
515
516#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, size) \
517 ((kv)->value.leaf.data[0] = \
518 ((kv)->value.leaf.data[0] & \
519 CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK << \
520 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))) | \
521 CSR1212_CPU_TO_BE32(((size) & \
522 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK) << \
523 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))
524
525#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, addr) \
526 ((kv)->value.leaf.data[0] = \
527 ((kv)->value.leaf.data[0] & \
528 CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK))) | \
529 CSR1212_CPU_TO_BE32(((addr) & \
530 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK)))
531
532#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, addr) \
533 ((kv)->value.leaf.data[1] = \
534 CSR1212_CPU_TO_BE32(addr & CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK))
535
536
537
538/* The following 2 function are for creating new Configuration ROM trees. The
539 * first function is used for both creating local trees and parsing remote
540 * trees. The second function adds pertinent information to local Configuration
541 * ROM trees - namely data for the bus information block. */
542extern struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
543 size_t bus_info_size,
544 void *private);
545extern void csr1212_init_local_csr(struct csr1212_csr *csr,
546 const u_int32_t *bus_info_data, int max_rom);
547
548
549/* The following function destroys a Configuration ROM tree and release all
550 * memory taken by the tree. */
551extern void csr1212_destroy_csr(struct csr1212_csr *csr);
552
553
554/* The following set of functions are fore creating new keyvals for placement in
555 * a Configuration ROM tree. Code that creates new keyvals with these functions
556 * must release those keyvals with csr1212_release_keyval() when they are no
557 * longer needed. */
558extern struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value);
559extern struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data,
560 size_t data_len);
561extern struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key,
562 u_int32_t csr_offset);
563extern struct csr1212_keyval *csr1212_new_directory(u_int8_t key);
564extern struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec,
565 u_int32_t key,
566 u_int32_t value);
567extern struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec,
568 u_int32_t key,
569 const void *data,
570 size_t data_len);
571extern struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype,
572 u_int32_t specifier_id,
573 const void *data,
574 size_t data_len);
575extern struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
576 u_int16_t cset,
577 u_int16_t language,
578 const void *data,
579 size_t data_len);
580extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s);
581extern struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
582 u_int8_t palette_depth,
583 u_int8_t color_space,
584 u_int16_t language,
585 u_int16_t hscan,
586 u_int16_t vscan,
587 u_int32_t *palette,
588 u_int32_t *pixels);
589extern struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
590 u_int64_t address);
591extern struct csr1212_keyval *csr1212_new_keyword_leaf(int strc,
592 const char *strv[]);
593
594
595/* The following functions manage association between keyvals. Typically,
596 * Descriptor Leaves and Directories will be associated with another keyval and
597 * it is desirable for the Descriptor keyval to be place immediately after the
598 * keyval that it is associated with.*/
599extern int csr1212_associate_keyval(struct csr1212_keyval *kv,
600 struct csr1212_keyval *associate);
601extern void csr1212_disassociate_keyval(struct csr1212_keyval *kv);
602
603
604/* The following functions manage the association of a keyval and directories.
605 * A keyval may be attached to more than one directory. */
606extern int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
607 struct csr1212_keyval *kv);
608extern void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
609 struct csr1212_keyval *kv);
610
611
612/* The following functions create a Configuration ROM image from the tree of
613 * keyvals provided. csr1212_generate_csr_image() creates a complete image in
614 * the list of caches available via csr->cache_head. The other functions are
615 * provided should there be a need to create a flat image without restrictions
616 * placed by IEEE 1212. */
617extern struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
618 struct csr1212_keyval *start_kv,
619 int start_pos);
620extern size_t csr1212_generate_layout_order(struct csr1212_keyval *kv);
621extern void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache);
622extern int csr1212_generate_csr_image(struct csr1212_csr *csr);
623
624
625/* This is a convience function for reading a block of data out of one of the
626 * caches in the csr->cache_head list. */
627extern int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer,
628 u_int32_t len);
629
630
631/* The following functions are in place for parsing Configuration ROM images.
632 * csr1212_parse_keyval() is used should there be a need to directly parse a
633 * Configuration ROM directly. */
634extern int csr1212_parse_keyval(struct csr1212_keyval *kv,
635 struct csr1212_csr_rom_cache *cache);
636extern int csr1212_parse_csr(struct csr1212_csr *csr);
637
638/* These are internal functions referenced by inline functions below. */
639extern int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
640extern void _csr1212_destroy_keyval(struct csr1212_keyval *kv);
641
642
643/* This function allocates a new cache which may be used for either parsing or
644 * generating sub-sets of Configuration ROM images. */
645static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t offset,
646 size_t size)
647{
648 struct csr1212_csr_rom_cache *cache;
649
650 cache = CSR1212_MALLOC(sizeof(struct csr1212_csr_rom_cache) + size);
651 if (!cache)
652 return NULL;
653
654 cache->next = NULL;
655 cache->prev = NULL;
656 cache->filled_head = NULL;
657 cache->filled_tail = NULL;
658 cache->layout_head = NULL;
659 cache->layout_tail = NULL;
660 cache->offset = offset;
661 cache->size = size;
662 cache->ext_rom = NULL;
663
664 return cache;
665}
666
667
668/* This function ensures that a keyval contains data when referencing a keyval
669 * created by parsing a Configuration ROM. */
670static inline struct csr1212_keyval *csr1212_get_keyval(struct csr1212_csr *csr,
671 struct csr1212_keyval *kv)
672{
673 if (!kv)
674 return NULL;
675 if (!kv->valid)
676 if (_csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
677 return NULL;
678 return kv;
679}
680
681
682/* This function increments the reference count for a keyval should there be a
683 * need for code to retain a keyval that has been parsed. */
684static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
685{
686 kv->refcnt++;
687}
688
689
690/* This function decrements a keyval's reference count and will destroy the
691 * keyval when there are no more users of the keyval. This should be called by
692 * any code that calls csr1212_keep_keyval() or any of the keyval creation
693 * routines csr1212_new_*(). */
694static inline void csr1212_release_keyval(struct csr1212_keyval *kv)
695{
696 if (kv->refcnt > 1)
697 kv->refcnt--;
698 else
699 _csr1212_destroy_keyval(kv);
700}
701
702
703/*
704 * This macro allows for looping over the keyval entries in a directory and it
705 * ensures that keyvals from remote ConfigROMs are parsed properly.
706 *
707 * _csr is a struct csr1212_csr * that points to CSR associated with dir.
708 * _kv is a struct csr1212_keyval * that'll point to the current keyval (loop index).
709 * _dir is a struct csr1212_keyval * that points to the directory to be looped.
710 * _pos is a struct csr1212_dentry * that is used internally for indexing.
711 *
712 * kv will be NULL upon exit of the loop.
713 */
714#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
715 for (csr1212_get_keyval((_csr), (_dir)), \
716 _pos = (_dir)->value.directory.dentries_head, \
717 _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL; \
718 (_kv) && (_pos); \
719 (_kv->associate == NULL) ? \
720 ((_pos = _pos->next), \
721 (_kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : \
722 NULL)) : \
723 (_kv = csr1212_get_keyval((_csr), _kv->associate)))
724
725
726
727#endif /* __CSR1212_H__ */
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
new file mode 100644
index 000000000000..758819d1999d
--- /dev/null
+++ b/drivers/ieee1394/dma.c
@@ -0,0 +1,260 @@
1/*
2 * DMA region bookkeeping routines
3 *
4 * Copyright (C) 2002 Maas Digital LLC
5 *
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 */
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/slab.h>
13#include <linux/mm.h>
14#include "dma.h"
15
16/* dma_prog_region */
17
18void dma_prog_region_init(struct dma_prog_region *prog)
19{
20 prog->kvirt = NULL;
21 prog->dev = NULL;
22 prog->n_pages = 0;
23 prog->bus_addr = 0;
24}
25
26int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
27{
28 /* round up to page size */
29 n_bytes = PAGE_ALIGN(n_bytes);
30
31 prog->n_pages = n_bytes >> PAGE_SHIFT;
32
33 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
34 if (!prog->kvirt) {
35 printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
36 dma_prog_region_free(prog);
37 return -ENOMEM;
38 }
39
40 prog->dev = dev;
41
42 return 0;
43}
44
45void dma_prog_region_free(struct dma_prog_region *prog)
46{
47 if (prog->kvirt) {
48 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr);
49 }
50
51 prog->kvirt = NULL;
52 prog->dev = NULL;
53 prog->n_pages = 0;
54 prog->bus_addr = 0;
55}
56
57/* dma_region */
58
59void dma_region_init(struct dma_region *dma)
60{
61 dma->kvirt = NULL;
62 dma->dev = NULL;
63 dma->n_pages = 0;
64 dma->n_dma_pages = 0;
65 dma->sglist = NULL;
66}
67
68int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
69{
70 unsigned int i;
71
72 /* round up to page size */
73 n_bytes = PAGE_ALIGN(n_bytes);
74
75 dma->n_pages = n_bytes >> PAGE_SHIFT;
76
77 dma->kvirt = vmalloc_32(n_bytes);
78 if (!dma->kvirt) {
79 printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
80 goto err;
81 }
82
83 /* Clear the ram out, no junk to the user */
84 memset(dma->kvirt, 0, n_bytes);
85
86 /* allocate scatter/gather list */
87 dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
88 if (!dma->sglist) {
89 printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
90 goto err;
91 }
92
93 /* just to be safe - this will become unnecessary once sglist->address goes away */
94 memset(dma->sglist, 0, dma->n_pages * sizeof(*dma->sglist));
95
96 /* fill scatter/gather list with pages */
97 for (i = 0; i < dma->n_pages; i++) {
98 unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT);
99
100 dma->sglist[i].page = vmalloc_to_page((void *)va);
101 dma->sglist[i].length = PAGE_SIZE;
102 }
103
104 /* map sglist to the IOMMU */
105 dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
106
107 if (dma->n_dma_pages == 0) {
108 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
109 goto err;
110 }
111
112 dma->dev = dev;
113 dma->direction = direction;
114
115 return 0;
116
117err:
118 dma_region_free(dma);
119 return -ENOMEM;
120}
121
122void dma_region_free(struct dma_region *dma)
123{
124 if (dma->n_dma_pages) {
125 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
126 dma->n_dma_pages = 0;
127 dma->dev = NULL;
128 }
129
130 vfree(dma->sglist);
131 dma->sglist = NULL;
132
133 vfree(dma->kvirt);
134 dma->kvirt = NULL;
135 dma->n_pages = 0;
136}
137
138/* find the scatterlist index and remaining offset corresponding to a
139 given offset from the beginning of the buffer */
140static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem)
141{
142 int i;
143 unsigned long off = offset;
144
145 for (i = 0; i < dma->n_dma_pages; i++) {
146 if (off < sg_dma_len(&dma->sglist[i])) {
147 *rem = off;
148 break;
149 }
150
151 off -= sg_dma_len(&dma->sglist[i]);
152 }
153
154 BUG_ON(i >= dma->n_dma_pages);
155
156 return i;
157}
158
159dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
160{
161 unsigned long rem;
162
163 struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
164 return sg_dma_address(sg) + rem;
165}
166
167void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len)
168{
169 int first, last;
170 unsigned long rem;
171
172 if (!len)
173 len = 1;
174
175 first = dma_region_find(dma, offset, &rem);
176 last = dma_region_find(dma, offset + len - 1, &rem);
177
178 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
179}
180
181void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len)
182{
183 int first, last;
184 unsigned long rem;
185
186 if (!len)
187 len = 1;
188
189 first = dma_region_find(dma, offset, &rem);
190 last = dma_region_find(dma, offset + len - 1, &rem);
191
192 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
193}
194
195#ifdef CONFIG_MMU
196
197/* nopage() handler for mmap access */
198
199static struct page*
200dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type)
201{
202 unsigned long offset;
203 unsigned long kernel_virt_addr;
204 struct page *ret = NOPAGE_SIGBUS;
205
206 struct dma_region *dma = (struct dma_region*) area->vm_private_data;
207
208 if (!dma->kvirt)
209 goto out;
210
211 if ( (address < (unsigned long) area->vm_start) ||
212 (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) )
213 goto out;
214
215 if (type)
216 *type = VM_FAULT_MINOR;
217 offset = address - area->vm_start;
218 kernel_virt_addr = (unsigned long) dma->kvirt + offset;
219 ret = vmalloc_to_page((void*) kernel_virt_addr);
220 get_page(ret);
221out:
222 return ret;
223}
224
225static struct vm_operations_struct dma_region_vm_ops = {
226 .nopage = dma_region_pagefault,
227};
228
229int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
230{
231 unsigned long size;
232
233 if (!dma->kvirt)
234 return -EINVAL;
235
236 /* must be page-aligned */
237 if (vma->vm_pgoff != 0)
238 return -EINVAL;
239
240 /* check the length */
241 size = vma->vm_end - vma->vm_start;
242 if (size > (dma->n_pages << PAGE_SHIFT))
243 return -EINVAL;
244
245 vma->vm_ops = &dma_region_vm_ops;
246 vma->vm_private_data = dma;
247 vma->vm_file = file;
248 vma->vm_flags |= VM_RESERVED;
249
250 return 0;
251}
252
253#else /* CONFIG_MMU */
254
255int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
256{
257 return -EINVAL;
258}
259
260#endif /* CONFIG_MMU */
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
new file mode 100644
index 000000000000..061550a6fb99
--- /dev/null
+++ b/drivers/ieee1394/dma.h
@@ -0,0 +1,78 @@
1/*
2 * DMA region bookkeeping routines
3 *
4 * Copyright (C) 2002 Maas Digital LLC
5 *
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 */
9
10#ifndef IEEE1394_DMA_H
11#define IEEE1394_DMA_H
12
13#include <linux/pci.h>
14#include <asm/scatterlist.h>
15
16/* struct dma_prog_region
17
18 a small, physically-contiguous DMA buffer with random-access,
19 synchronous usage characteristics
20*/
21
22struct dma_prog_region {
23 unsigned char *kvirt; /* kernel virtual address */
24 struct pci_dev *dev; /* PCI device */
25 unsigned int n_pages; /* # of kernel pages */
26 dma_addr_t bus_addr; /* base bus address */
27};
28
29/* clear out all fields but do not allocate any memory */
30void dma_prog_region_init(struct dma_prog_region *prog);
31int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev);
32void dma_prog_region_free(struct dma_prog_region *prog);
33
34static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *prog, unsigned long offset)
35{
36 return prog->bus_addr + offset;
37}
38
39/* struct dma_region
40
41 a large, non-physically-contiguous DMA buffer with streaming,
42 asynchronous usage characteristics
43*/
44
45struct dma_region {
46 unsigned char *kvirt; /* kernel virtual address */
47 struct pci_dev *dev; /* PCI device */
48 unsigned int n_pages; /* # of kernel pages */
49 unsigned int n_dma_pages; /* # of IOMMU pages */
50 struct scatterlist *sglist; /* IOMMU mapping */
51 int direction; /* PCI_DMA_TODEVICE, etc */
52};
53
54/* clear out all fields but do not allocate anything */
55void dma_region_init(struct dma_region *dma);
56
57/* allocate the buffer and map it to the IOMMU */
58int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction);
59
60/* unmap and free the buffer */
61void dma_region_free(struct dma_region *dma);
62
63/* sync the CPU's view of the buffer */
64void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len);
65/* sync the IO bus' view of the buffer */
66void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len);
67
68/* map the buffer into a user space process */
69int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma);
70
71/* macro to index into a DMA region (or dma_prog_region) */
72#define dma_region_i(_dma, _type, _index) ( ((_type*) ((_dma)->kvirt)) + (_index) )
73
74/* return the DMA bus address of the byte with the given offset
75 relative to the beginning of the dma_region */
76dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset);
77
78#endif /* IEEE1394_DMA_H */
diff --git a/drivers/ieee1394/dv1394-private.h b/drivers/ieee1394/dv1394-private.h
new file mode 100644
index 000000000000..80b5ac7fe383
--- /dev/null
+++ b/drivers/ieee1394/dv1394-private.h
@@ -0,0 +1,587 @@
1/*
2 * dv1394-private.h - DV input/output over IEEE 1394 on OHCI chips
3 * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
4 * receive by Dan Dennedy <dan@dennedy.org>
5 *
6 * based on:
7 * video1394.h - driver for OHCI 1394 boards
8 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
9 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software Foundation,
23 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#ifndef _DV_1394_PRIVATE_H
27#define _DV_1394_PRIVATE_H
28
29#include "ieee1394.h"
30#include "ohci1394.h"
31#include "dma.h"
32
33/* data structures private to the dv1394 driver */
34/* none of this is exposed to user-space */
35
36
37/*
38 the 8-byte CIP (Common Isochronous Packet) header that precedes
39 each packet of DV data.
40
41 See the IEC 61883 standard.
42*/
43
44struct CIP_header { unsigned char b[8]; };
45
46static inline void fill_cip_header(struct CIP_header *cip,
47 unsigned char source_node_id,
48 unsigned long counter,
49 enum pal_or_ntsc format,
50 unsigned long timestamp)
51{
52 cip->b[0] = source_node_id;
53 cip->b[1] = 0x78; /* packet size in quadlets (480/4) - even for empty packets! */
54 cip->b[2] = 0x00;
55 cip->b[3] = counter;
56
57 cip->b[4] = 0x80; /* const */
58
59 switch(format) {
60 case DV1394_PAL:
61 cip->b[5] = 0x80;
62 break;
63 case DV1394_NTSC:
64 cip->b[5] = 0x00;
65 break;
66 }
67
68 cip->b[6] = timestamp >> 8;
69 cip->b[7] = timestamp & 0xFF;
70}
71
72
73
74/*
75 DMA commands used to program the OHCI's DMA engine
76
77 See the Texas Instruments OHCI 1394 chipset documentation.
78*/
79
80struct output_more_immediate { u32 q[8]; };
81struct output_more { u32 q[4]; };
82struct output_last { u32 q[4]; };
83struct input_more { u32 q[4]; };
84struct input_last { u32 q[4]; };
85
86/* outputs */
87
88static inline void fill_output_more_immediate(struct output_more_immediate *omi,
89 unsigned char tag,
90 unsigned char channel,
91 unsigned char sync_tag,
92 unsigned int payload_size)
93{
94 omi->q[0] = cpu_to_le32(0x02000000 | 8); /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
95 omi->q[1] = 0;
96 omi->q[2] = 0;
97 omi->q[3] = 0;
98
99 /* IT packet header */
100 omi->q[4] = cpu_to_le32( (0x0 << 16) /* IEEE1394_SPEED_100 */
101 | (tag << 14)
102 | (channel << 8)
103 | (TCODE_ISO_DATA << 4)
104 | (sync_tag) );
105
106 /* reserved field; mimic behavior of my Sony DSR-40 */
107 omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
108
109 omi->q[6] = 0;
110 omi->q[7] = 0;
111}
112
113static inline void fill_output_more(struct output_more *om,
114 unsigned int data_size,
115 unsigned long data_phys_addr)
116{
117 om->q[0] = cpu_to_le32(data_size);
118 om->q[1] = cpu_to_le32(data_phys_addr);
119 om->q[2] = 0;
120 om->q[3] = 0;
121}
122
123static inline void fill_output_last(struct output_last *ol,
124 int want_timestamp,
125 int want_interrupt,
126 unsigned int data_size,
127 unsigned long data_phys_addr)
128{
129 u32 temp = 0;
130 temp |= 1 << 28; /* OUTPUT_LAST */
131
132 if (want_timestamp) /* controller will update timestamp at DMA time */
133 temp |= 1 << 27;
134
135 if (want_interrupt)
136 temp |= 3 << 20;
137
138 temp |= 3 << 18; /* must take branch */
139 temp |= data_size;
140
141 ol->q[0] = cpu_to_le32(temp);
142 ol->q[1] = cpu_to_le32(data_phys_addr);
143 ol->q[2] = 0;
144 ol->q[3] = 0;
145}
146
147/* inputs */
148
149static inline void fill_input_more(struct input_more *im,
150 int want_interrupt,
151 unsigned int data_size,
152 unsigned long data_phys_addr)
153{
154 u32 temp = 2 << 28; /* INPUT_MORE */
155 temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
156 if (want_interrupt)
157 temp |= 0 << 20; /* interrupts, i=0 in packet-per-buffer mode */
158 temp |= 0x0 << 16; /* disable branch to address for packet-per-buffer mode */
159 /* disable wait on sync field, not used in DV :-( */
160 temp |= data_size;
161
162 im->q[0] = cpu_to_le32(temp);
163 im->q[1] = cpu_to_le32(data_phys_addr);
164 im->q[2] = 0; /* branchAddress and Z not use in packet-per-buffer mode */
165 im->q[3] = 0; /* xferStatus & resCount, resCount must be initialize to data_size */
166}
167
168static inline void fill_input_last(struct input_last *il,
169 int want_interrupt,
170 unsigned int data_size,
171 unsigned long data_phys_addr)
172{
173 u32 temp = 3 << 28; /* INPUT_LAST */
174 temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
175 if (want_interrupt)
176 temp |= 3 << 20; /* enable interrupts */
177 temp |= 0xC << 16; /* enable branch to address */
178 /* disable wait on sync field, not used in DV :-( */
179 temp |= data_size;
180
181 il->q[0] = cpu_to_le32(temp);
182 il->q[1] = cpu_to_le32(data_phys_addr);
183 il->q[2] = cpu_to_le32(1); /* branchAddress (filled in later) and Z = 1 descriptor in next block */
184 il->q[3] = cpu_to_le32(data_size); /* xferStatus & resCount, resCount must be initialize to data_size */
185}
186
187
188
189/*
190 A "DMA descriptor block" consists of several contiguous DMA commands.
191 struct DMA_descriptor_block encapsulates all of the commands necessary
192 to send one packet of DV data.
193
194 There are three different types of these blocks:
195
196 1) command to send an empty packet (CIP header only, no DV data):
197
198 OUTPUT_MORE-Immediate <-- contains the iso header in-line
199 OUTPUT_LAST <-- points to the CIP header
200
201 2) command to send a full packet when the DV data payload does NOT
202 cross a page boundary:
203
204 OUTPUT_MORE-Immediate <-- contains the iso header in-line
205 OUTPUT_MORE <-- points to the CIP header
206 OUTPUT_LAST <-- points to entire DV data payload
207
208 3) command to send a full packet when the DV payload DOES cross
209 a page boundary:
210
211 OUTPUT_MORE-Immediate <-- contains the iso header in-line
212 OUTPUT_MORE <-- points to the CIP header
213 OUTPUT_MORE <-- points to first part of DV data payload
214 OUTPUT_LAST <-- points to second part of DV data payload
215
216 This struct describes all three block types using unions.
217
218 !!! It is vital that an even number of these descriptor blocks fit on one
219 page of memory, since a block cannot cross a page boundary !!!
220
221 */
222
223struct DMA_descriptor_block {
224
225 union {
226 struct {
227 /* iso header, common to all output block types */
228 struct output_more_immediate omi;
229
230 union {
231 /* empty packet */
232 struct {
233 struct output_last ol; /* CIP header */
234 } empty;
235
236 /* full packet */
237 struct {
238 struct output_more om; /* CIP header */
239
240 union {
241 /* payload does not cross page boundary */
242 struct {
243 struct output_last ol; /* data payload */
244 } nocross;
245
246 /* payload crosses page boundary */
247 struct {
248 struct output_more om; /* data payload */
249 struct output_last ol; /* data payload */
250 } cross;
251 } u;
252
253 } full;
254 } u;
255 } out;
256
257 struct {
258 struct input_last il;
259 } in;
260
261 } u;
262
263 /* ensure that PAGE_SIZE % sizeof(struct DMA_descriptor_block) == 0
264 by padding out to 128 bytes */
265 u32 __pad__[12];
266};
267
268
269/* struct frame contains all data associated with one frame in the
270 ringbuffer these are allocated when the DMA context is initialized
271 do_dv1394_init(). They are re-used after the card finishes
272 transmitting the frame. */
273
274struct video_card; /* forward declaration */
275
276struct frame {
277
278 /* points to the struct video_card that owns this frame */
279 struct video_card *video;
280
281 /* index of this frame in video_card->frames[] */
282 unsigned int frame_num;
283
284 /* FRAME_CLEAR - DMA program not set up, waiting for data
285 FRAME_READY - DMA program written, ready to transmit
286
287 Changes to these should be locked against the interrupt
288 */
289 enum {
290 FRAME_CLEAR = 0,
291 FRAME_READY
292 } state;
293
294 /* whether this frame has been DMA'ed already; used only from
295 the IRQ handler to determine whether the frame can be reset */
296 int done;
297
298
299 /* kernel virtual pointer to the start of this frame's data in
300 the user ringbuffer. Use only for CPU access; to get the DMA
301 bus address you must go through the video->user_dma mapping */
302 unsigned long data;
303
304 /* Max # of packets per frame */
305#define MAX_PACKETS 500
306
307
308 /* a PAGE_SIZE memory pool for allocating CIP headers
309 !header_pool must be aligned to PAGE_SIZE! */
310 struct CIP_header *header_pool;
311 dma_addr_t header_pool_dma;
312
313
314 /* a physically contiguous memory pool for allocating DMA
315 descriptor blocks; usually around 64KB in size
316 !descriptor_pool must be aligned to PAGE_SIZE! */
317 struct DMA_descriptor_block *descriptor_pool;
318 dma_addr_t descriptor_pool_dma;
319 unsigned long descriptor_pool_size;
320
321
322 /* # of packets allocated for this frame */
323 unsigned int n_packets;
324
325
326 /* below are several pointers (kernel virtual addresses, not
327 DMA bus addresses) to parts of the DMA program. These are
328 set each time the DMA program is written in
329 frame_prepare(). They are used later on, e.g. from the
330 interrupt handler, to check the status of the frame */
331
332 /* points to status/timestamp field of first DMA packet */
333 /* (we'll check it later to monitor timestamp accuracy) */
334 u32 *frame_begin_timestamp;
335
336 /* the timestamp we assigned to the first packet in the frame */
337 u32 assigned_timestamp;
338
339 /* pointer to the first packet's CIP header (where the timestamp goes) */
340 struct CIP_header *cip_syt1;
341
342 /* pointer to the second packet's CIP header
343 (only set if the first packet was empty) */
344 struct CIP_header *cip_syt2;
345
346 /* in order to figure out what caused an interrupt,
347 store pointers to the status fields of the two packets
348 that can cause interrupts. We'll check these from the
349 interrupt handler.
350 */
351 u32 *mid_frame_timestamp;
352 u32 *frame_end_timestamp;
353
354 /* branch address field of final packet. This is effectively
355 the "tail" in the chain of DMA descriptor blocks.
356 We will fill it with the address of the first DMA descriptor
357 block in the subsequent frame, once it is ready.
358 */
359 u32 *frame_end_branch;
360
361 /* the number of descriptors in the first descriptor block
362 of the frame. Needed to start DMA */
363 int first_n_descriptors;
364};
365
366
367struct packet {
368 u16 timestamp;
369 u16 invalid;
370 u16 iso_header;
371 u16 data_length;
372 u32 cip_h1;
373 u32 cip_h2;
374 unsigned char data[480];
375 unsigned char padding[16]; /* force struct size =512 for page alignment */
376};
377
378
379/* allocate/free a frame */
380static struct frame* frame_new(unsigned int frame_num, struct video_card *video);
381static void frame_delete(struct frame *f);
382
383/* reset f so that it can be used again */
384static void frame_reset(struct frame *f);
385
386/* struct video_card contains all data associated with one instance
387 of the dv1394 driver
388*/
389enum modes {
390 MODE_RECEIVE,
391 MODE_TRANSMIT
392};
393
394struct video_card {
395
396 /* ohci card to which this instance corresponds */
397 struct ti_ohci *ohci;
398
399 /* OHCI card id; the link between the VFS inode and a specific video_card
400 (essentially the device minor number) */
401 int id;
402
403 /* entry in dv1394_cards */
404 struct list_head list;
405
406 /* OHCI card IT DMA context number, -1 if not in use */
407 int ohci_it_ctx;
408 struct ohci1394_iso_tasklet it_tasklet;
409
410 /* register offsets for current IT DMA context, 0 if not in use */
411 u32 ohci_IsoXmitContextControlSet;
412 u32 ohci_IsoXmitContextControlClear;
413 u32 ohci_IsoXmitCommandPtr;
414
415 /* OHCI card IR DMA context number, -1 if not in use */
416 struct ohci1394_iso_tasklet ir_tasklet;
417 int ohci_ir_ctx;
418
419 /* register offsets for current IR DMA context, 0 if not in use */
420 u32 ohci_IsoRcvContextControlSet;
421 u32 ohci_IsoRcvContextControlClear;
422 u32 ohci_IsoRcvCommandPtr;
423 u32 ohci_IsoRcvContextMatch;
424
425
426 /* CONCURRENCY CONTROL */
427
428 /* there are THREE levels of locking associated with video_card. */
429
430 /*
431 1) the 'open' flag - this prevents more than one process from
432 opening the device. (the driver currently assumes only one opener).
433 This is a regular int, but use test_and_set_bit() (on bit zero)
434 for atomicity.
435 */
436 unsigned long open;
437
438 /*
439 2) the spinlock - this provides mutual exclusion between the interrupt
440 handler and process-context operations. Generally you must take the
441 spinlock under the following conditions:
442 1) DMA (and hence the interrupt handler) may be running
443 AND
444 2) you need to operate on the video_card, especially active_frame
445
446 It is OK to play with video_card without taking the spinlock if
447 you are certain that DMA is not running. Even if DMA is running,
448 it is OK to *read* active_frame with the lock, then drop it
449 immediately. This is safe because the interrupt handler will never
450 advance active_frame onto a frame that is not READY (and the spinlock
451 must be held while marking a frame READY).
452
453 spinlock is also used to protect ohci_it_ctx and ohci_ir_ctx,
454 which can be accessed from both process and interrupt context
455 */
456 spinlock_t spinlock;
457
458 /* flag to prevent spurious interrupts (which OHCI seems to
459 generate a lot :) from accessing the struct */
460 int dma_running;
461
462 /*
463 3) the sleeping semaphore 'sem' - this is used from process context only,
464 to serialize various operations on the video_card. Even though only one
465 open() is allowed, we still need to prevent multiple threads of execution
466 from entering calls like read, write, ioctl, etc.
467
468 I honestly can't think of a good reason to use dv1394 from several threads
469 at once, but we need to serialize anyway to prevent oopses =).
470
471 NOTE: if you need both spinlock and sem, take sem first to avoid deadlock!
472 */
473 struct semaphore sem;
474
475 /* people waiting for buffer space, please form a line here... */
476 wait_queue_head_t waitq;
477
478 /* support asynchronous I/O signals (SIGIO) */
479 struct fasync_struct *fasync;
480
481 /* the large, non-contiguous (rvmalloc()) ringbuffer for DV
482 data, exposed to user-space via mmap() */
483 unsigned long dv_buf_size;
484 struct dma_region dv_buf;
485
486 /* next byte in the ringbuffer that a write() call will fill */
487 size_t write_off;
488
489 struct frame *frames[DV1394_MAX_FRAMES];
490
491 /* n_frames also serves as an indicator that this struct video_card is
492 initialized and ready to run DMA buffers */
493
494 int n_frames;
495
496 /* this is the frame that is currently "owned" by the OHCI DMA controller
497 (set to -1 iff DMA is not running)
498
499 ! must lock against the interrupt handler when accessing it !
500
501 RULES:
502
503 Only the interrupt handler may change active_frame if DMA
504 is running; if not, process may change it
505
506 If the next frame is READY, the interrupt handler will advance
507 active_frame when the current frame is finished.
508
509 If the next frame is CLEAR, the interrupt handler will re-transmit
510 the current frame, and the dropped_frames counter will be incremented.
511
512 The interrupt handler will NEVER advance active_frame to a
513 frame that is not READY.
514 */
515 int active_frame;
516 int first_run;
517
518 /* the same locking rules apply to these three fields also: */
519
520 /* altered ONLY from process context. Must check first_clear_frame->state;
521 if it's READY, that means the ringbuffer is full with READY frames;
522 if it's CLEAR, that means one or more ringbuffer frames are CLEAR */
523 unsigned int first_clear_frame;
524
525 /* altered both by process and interrupt */
526 unsigned int n_clear_frames;
527
528 /* only altered by the interrupt */
529 unsigned int dropped_frames;
530
531
532
533 /* the CIP accumulator and continuity counter are properties
534 of the DMA stream as a whole (not a single frame), so they
535 are stored here in the video_card */
536
537 unsigned long cip_accum;
538 unsigned long cip_n, cip_d;
539 unsigned int syt_offset;
540 unsigned int continuity_counter;
541
542 enum pal_or_ntsc pal_or_ntsc;
543
544 /* redundant, but simplifies the code somewhat */
545 unsigned int frame_size; /* in bytes */
546
547 /* the isochronous channel to use, -1 if video card is inactive */
548 int channel;
549
550
551 /* physically contiguous packet ringbuffer for receive */
552 struct dma_region packet_buf;
553 unsigned long packet_buf_size;
554
555 unsigned int current_packet;
556 int first_frame; /* received first start frame marker? */
557 enum modes mode;
558};
559
560/*
561 if the video_card is not initialized, then the ONLY fields that are valid are:
562 ohci
563 open
564 n_frames
565*/
566
567static inline int video_card_initialized(struct video_card *v)
568{
569 return v->n_frames > 0;
570}
571
572static int do_dv1394_init(struct video_card *video, struct dv1394_init *init);
573static int do_dv1394_init_default(struct video_card *video);
574static void do_dv1394_shutdown(struct video_card *video, int free_user_buf);
575
576
577/* NTSC empty packet rate accurate to within 0.01%,
578 calibrated against a Sony DSR-40 DVCAM deck */
579
580#define CIP_N_NTSC 68000000
581#define CIP_D_NTSC 1068000000
582
583#define CIP_N_PAL 1
584#define CIP_D_PAL 16
585
586#endif /* _DV_1394_PRIVATE_H */
587
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
new file mode 100644
index 000000000000..68c7a5f07842
--- /dev/null
+++ b/drivers/ieee1394/dv1394.c
@@ -0,0 +1,2663 @@
1/*
2 * dv1394.c - DV input/output over IEEE 1394 on OHCI chips
3 * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
4 * receive by Dan Dennedy <dan@dennedy.org>
5 *
6 * based on:
7 * video1394.c - video driver for OHCI 1394 boards
8 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25/*
26 OVERVIEW
27
28 I designed dv1394 as a "pipe" that you can use to shoot DV onto a
29 FireWire bus. In transmission mode, dv1394 does the following:
30
31 1. accepts contiguous frames of DV data from user-space, via write()
32 or mmap() (see dv1394.h for the complete API)
33 2. wraps IEC 61883 packets around the DV data, inserting
34 empty synchronization packets as necessary
35 3. assigns accurate SYT timestamps to the outgoing packets
36 4. shoots them out using the OHCI card's IT DMA engine
37
38 Thanks to Dan Dennedy, we now have a receive mode that does the following:
39
40 1. accepts raw IEC 61883 packets from the OHCI card
41 2. re-assembles the DV data payloads into contiguous frames,
42 discarding empty packets
43 3. sends the DV data to user-space via read() or mmap()
44*/
45
46/*
47 TODO:
48
49 - tunable frame-drop behavior: either loop last frame, or halt transmission
50
51 - use a scatter/gather buffer for DMA programs (f->descriptor_pool)
52 so that we don't rely on allocating 64KB of contiguous kernel memory
53 via pci_alloc_consistent()
54
55 DONE:
56 - during reception, better handling of dropped frames and continuity errors
57 - during reception, prevent DMA from bypassing the irq tasklets
58 - reduce irq rate during reception (1/250 packets).
59 - add many more internal buffers during reception with scatter/gather dma.
60 - add dbc (continuity) checking on receive, increment status.dropped_frames
61 if not continuous.
62 - restart IT DMA after a bus reset
63 - safely obtain and release ISO Tx channels in cooperation with OHCI driver
64 - map received DIF blocks to their proper location in DV frame (ensure
65 recovery if dropped packet)
66 - handle bus resets gracefully (OHCI card seems to take care of this itself(!))
67 - do not allow resizing the user_buf once allocated; eliminate nuke_buffer_mappings
68 - eliminated #ifdef DV1394_DEBUG_LEVEL by inventing macros debug_printk and irq_printk
69 - added wmb() and mb() to places where PCI read/write ordering needs to be enforced
70 - set video->id correctly
71 - store video_cards in an array indexed by OHCI card ID, rather than a list
72 - implement DMA context allocation to cooperate with other users of the OHCI
73 - fix all XXX showstoppers
74 - disable IR/IT DMA interrupts on shutdown
75 - flush pci writes to the card by issuing a read
76 - devfs and character device dispatching (* needs testing with Linux 2.2.x)
77 - switch over to the new kernel DMA API (pci_map_*()) (* needs testing on platforms with IOMMU!)
78 - keep all video_cards in a list (for open() via chardev), set file->private_data = video
79 - dv1394_poll should indicate POLLIN when receiving buffers are available
80 - add proc fs interface to set cip_n, cip_d, syt_offset, and video signal
81 - expose xmit and recv as separate devices (not exclusive)
82 - expose NTSC and PAL as separate devices (can be overridden)
83
84*/
85
86#include <linux/config.h>
87#include <linux/kernel.h>
88#include <linux/list.h>
89#include <linux/slab.h>
90#include <linux/interrupt.h>
91#include <linux/wait.h>
92#include <linux/errno.h>
93#include <linux/module.h>
94#include <linux/init.h>
95#include <linux/pci.h>
96#include <linux/fs.h>
97#include <linux/poll.h>
98#include <linux/smp_lock.h>
99#include <linux/bitops.h>
100#include <asm/byteorder.h>
101#include <asm/atomic.h>
102#include <asm/io.h>
103#include <asm/uaccess.h>
104#include <linux/delay.h>
105#include <asm/pgtable.h>
106#include <asm/page.h>
107#include <linux/sched.h>
108#include <linux/types.h>
109#include <linux/vmalloc.h>
110#include <linux/string.h>
111#include <linux/ioctl32.h>
112#include <linux/compat.h>
113#include <linux/cdev.h>
114
115#include "ieee1394.h"
116#include "ieee1394_types.h"
117#include "nodemgr.h"
118#include "hosts.h"
119#include "ieee1394_core.h"
120#include "highlevel.h"
121#include "dv1394.h"
122#include "dv1394-private.h"
123
124#include "ohci1394.h"
125
126#ifndef virt_to_page
127#define virt_to_page(x) MAP_NR(x)
128#endif
129
130#ifndef vmalloc_32
131#define vmalloc_32(x) vmalloc(x)
132#endif
133
134
135/* DEBUG LEVELS:
136 0 - no debugging messages
137 1 - some debugging messages, but none during DMA frame transmission
138 2 - lots of messages, including during DMA frame transmission
139 (will cause undeflows if your machine is too slow!)
140*/
141
142#define DV1394_DEBUG_LEVEL 0
143
144/* for debugging use ONLY: allow more than one open() of the device */
145/* #define DV1394_ALLOW_MORE_THAN_ONE_OPEN 1 */
146
147#if DV1394_DEBUG_LEVEL >= 2
148#define irq_printk( args... ) printk( args )
149#else
150#define irq_printk( args... )
151#endif
152
153#if DV1394_DEBUG_LEVEL >= 1
154#define debug_printk( args... ) printk( args)
155#else
156#define debug_printk( args... )
157#endif
158
159/* issue a dummy PCI read to force the preceding write
160 to be posted to the PCI bus immediately */
161
162static inline void flush_pci_write(struct ti_ohci *ohci)
163{
164 mb();
165 reg_read(ohci, OHCI1394_IsochronousCycleTimer);
166}
167
168static void it_tasklet_func(unsigned long data);
169static void ir_tasklet_func(unsigned long data);
170
171#ifdef CONFIG_COMPAT
172static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
173 unsigned long arg);
174#endif
175
176/* GLOBAL DATA */
177
178/* list of all video_cards */
179static LIST_HEAD(dv1394_cards);
180static DEFINE_SPINLOCK(dv1394_cards_lock);
181
182/* translate from a struct file* to the corresponding struct video_card* */
183
184static inline struct video_card* file_to_video_card(struct file *file)
185{
186 return (struct video_card*) file->private_data;
187}
188
189/*** FRAME METHODS *********************************************************/
190
191static void frame_reset(struct frame *f)
192{
193 f->state = FRAME_CLEAR;
194 f->done = 0;
195 f->n_packets = 0;
196 f->frame_begin_timestamp = NULL;
197 f->assigned_timestamp = 0;
198 f->cip_syt1 = NULL;
199 f->cip_syt2 = NULL;
200 f->mid_frame_timestamp = NULL;
201 f->frame_end_timestamp = NULL;
202 f->frame_end_branch = NULL;
203}
204
205static struct frame* frame_new(unsigned int frame_num, struct video_card *video)
206{
207 struct frame *f = kmalloc(sizeof(*f), GFP_KERNEL);
208 if (!f)
209 return NULL;
210
211 f->video = video;
212 f->frame_num = frame_num;
213
214 f->header_pool = pci_alloc_consistent(f->video->ohci->dev, PAGE_SIZE, &f->header_pool_dma);
215 if (!f->header_pool) {
216 printk(KERN_ERR "dv1394: failed to allocate CIP header pool\n");
217 kfree(f);
218 return NULL;
219 }
220
221 debug_printk("dv1394: frame_new: allocated CIP header pool at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
222 (unsigned long) f->header_pool, (unsigned long) f->header_pool_dma, PAGE_SIZE);
223
224 f->descriptor_pool_size = MAX_PACKETS * sizeof(struct DMA_descriptor_block);
225 /* make it an even # of pages */
226 f->descriptor_pool_size += PAGE_SIZE - (f->descriptor_pool_size%PAGE_SIZE);
227
228 f->descriptor_pool = pci_alloc_consistent(f->video->ohci->dev,
229 f->descriptor_pool_size,
230 &f->descriptor_pool_dma);
231 if (!f->descriptor_pool) {
232 pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
233 kfree(f);
234 return NULL;
235 }
236
237 debug_printk("dv1394: frame_new: allocated DMA program memory at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
238 (unsigned long) f->descriptor_pool, (unsigned long) f->descriptor_pool_dma, f->descriptor_pool_size);
239
240 f->data = 0;
241 frame_reset(f);
242
243 return f;
244}
245
246static void frame_delete(struct frame *f)
247{
248 pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
249 pci_free_consistent(f->video->ohci->dev, f->descriptor_pool_size, f->descriptor_pool, f->descriptor_pool_dma);
250 kfree(f);
251}
252
253
254
255
256/*
257 frame_prepare() - build the DMA program for transmitting
258
259 Frame_prepare() must be called OUTSIDE the video->spinlock.
260 However, frame_prepare() must still be serialized, so
261 it should be called WITH the video->sem taken.
262 */
263
264static void frame_prepare(struct video_card *video, unsigned int this_frame)
265{
266 struct frame *f = video->frames[this_frame];
267 int last_frame;
268
269 struct DMA_descriptor_block *block;
270 dma_addr_t block_dma;
271 struct CIP_header *cip;
272 dma_addr_t cip_dma;
273
274 unsigned int n_descriptors, full_packets, packets_per_frame, payload_size;
275
276 /* these flags denote packets that need special attention */
277 int empty_packet, first_packet, last_packet, mid_packet;
278
279 u32 *branch_address, *last_branch_address = NULL;
280 unsigned long data_p;
281 int first_packet_empty = 0;
282 u32 cycleTimer, ct_sec, ct_cyc, ct_off;
283 unsigned long irq_flags;
284
285 irq_printk("frame_prepare( %d ) ---------------------\n", this_frame);
286
287 full_packets = 0;
288
289
290
291 if (video->pal_or_ntsc == DV1394_PAL)
292 packets_per_frame = DV1394_PAL_PACKETS_PER_FRAME;
293 else
294 packets_per_frame = DV1394_NTSC_PACKETS_PER_FRAME;
295
296 while ( full_packets < packets_per_frame ) {
297 empty_packet = first_packet = last_packet = mid_packet = 0;
298
299 data_p = f->data + full_packets * 480;
300
301 /************************************************/
302 /* allocate a descriptor block and a CIP header */
303 /************************************************/
304
305 /* note: these should NOT cross a page boundary (DMA restriction) */
306
307 if (f->n_packets >= MAX_PACKETS) {
308 printk(KERN_ERR "dv1394: FATAL ERROR: max packet count exceeded\n");
309 return;
310 }
311
312 /* the block surely won't cross a page boundary,
313 since an even number of descriptor_blocks fit on a page */
314 block = &(f->descriptor_pool[f->n_packets]);
315
316 /* DMA address of the block = offset of block relative
317 to the kernel base address of the descriptor pool
318 + DMA base address of the descriptor pool */
319 block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
320
321
322 /* the whole CIP pool fits on one page, so no worries about boundaries */
323 if ( ((unsigned long) &(f->header_pool[f->n_packets]) - (unsigned long) f->header_pool)
324 > PAGE_SIZE) {
325 printk(KERN_ERR "dv1394: FATAL ERROR: no room to allocate CIP header\n");
326 return;
327 }
328
329 cip = &(f->header_pool[f->n_packets]);
330
331 /* DMA address of the CIP header = offset of cip
332 relative to kernel base address of the header pool
333 + DMA base address of the header pool */
334 cip_dma = (unsigned long) cip % PAGE_SIZE + f->header_pool_dma;
335
336 /* is this an empty packet? */
337
338 if (video->cip_accum > (video->cip_d - video->cip_n)) {
339 empty_packet = 1;
340 payload_size = 8;
341 video->cip_accum -= (video->cip_d - video->cip_n);
342 } else {
343 payload_size = 488;
344 video->cip_accum += video->cip_n;
345 }
346
347 /* there are three important packets each frame:
348
349 the first packet in the frame - we ask the card to record the timestamp when
350 this packet is actually sent, so we can monitor
351 how accurate our timestamps are. Also, the first
352 packet serves as a semaphore to let us know that
353 it's OK to free the *previous* frame's DMA buffer
354
355 the last packet in the frame - this packet is used to detect buffer underflows.
356 if this is the last ready frame, the last DMA block
357 will have a branch back to the beginning of the frame
358 (so that the card will re-send the frame on underflow).
359 if this branch gets taken, we know that at least one
360 frame has been dropped. When the next frame is ready,
361 the branch is pointed to its first packet, and the
362 semaphore is disabled.
363
364 a "mid" packet slightly before the end of the frame - this packet should trigger
365 an interrupt so we can go and assign a timestamp to the first packet
366 in the next frame. We don't use the very last packet in the frame
367 for this purpose, because that would leave very little time to set
368 the timestamp before DMA starts on the next frame.
369 */
370
371 if (f->n_packets == 0) {
372 first_packet = 1;
373 } else if ( full_packets == (packets_per_frame-1) ) {
374 last_packet = 1;
375 } else if (f->n_packets == packets_per_frame) {
376 mid_packet = 1;
377 }
378
379
380 /********************/
381 /* setup CIP header */
382 /********************/
383
384 /* the timestamp will be written later from the
385 mid-frame interrupt handler. For now we just
386 store the address of the CIP header(s) that
387 need a timestamp. */
388
389 /* first packet in the frame needs a timestamp */
390 if (first_packet) {
391 f->cip_syt1 = cip;
392 if (empty_packet)
393 first_packet_empty = 1;
394
395 } else if (first_packet_empty && (f->n_packets == 1) ) {
396 /* if the first packet was empty, the second
397 packet's CIP header also needs a timestamp */
398 f->cip_syt2 = cip;
399 }
400
401 fill_cip_header(cip,
402 /* the node ID number of the OHCI card */
403 reg_read(video->ohci, OHCI1394_NodeID) & 0x3F,
404 video->continuity_counter,
405 video->pal_or_ntsc,
406 0xFFFF /* the timestamp is filled in later */);
407
408 /* advance counter, only for full packets */
409 if ( ! empty_packet )
410 video->continuity_counter++;
411
412 /******************************/
413 /* setup DMA descriptor block */
414 /******************************/
415
416 /* first descriptor - OUTPUT_MORE_IMMEDIATE, for the controller's IT header */
417 fill_output_more_immediate( &(block->u.out.omi), 1, video->channel, 0, payload_size);
418
419 if (empty_packet) {
420 /* second descriptor - OUTPUT_LAST for CIP header */
421 fill_output_last( &(block->u.out.u.empty.ol),
422
423 /* want completion status on all interesting packets */
424 (first_packet || mid_packet || last_packet) ? 1 : 0,
425
426 /* want interrupts on all interesting packets */
427 (first_packet || mid_packet || last_packet) ? 1 : 0,
428
429 sizeof(struct CIP_header), /* data size */
430 cip_dma);
431
432 if (first_packet)
433 f->frame_begin_timestamp = &(block->u.out.u.empty.ol.q[3]);
434 else if (mid_packet)
435 f->mid_frame_timestamp = &(block->u.out.u.empty.ol.q[3]);
436 else if (last_packet) {
437 f->frame_end_timestamp = &(block->u.out.u.empty.ol.q[3]);
438 f->frame_end_branch = &(block->u.out.u.empty.ol.q[2]);
439 }
440
441 branch_address = &(block->u.out.u.empty.ol.q[2]);
442 n_descriptors = 3;
443 if (first_packet)
444 f->first_n_descriptors = n_descriptors;
445
446 } else { /* full packet */
447
448 /* second descriptor - OUTPUT_MORE for CIP header */
449 fill_output_more( &(block->u.out.u.full.om),
450 sizeof(struct CIP_header), /* data size */
451 cip_dma);
452
453
454 /* third (and possibly fourth) descriptor - for DV data */
455 /* the 480-byte payload can cross a page boundary; if so,
456 we need to split it into two DMA descriptors */
457
458 /* does the 480-byte data payload cross a page boundary? */
459 if ( (PAGE_SIZE- ((unsigned long)data_p % PAGE_SIZE) ) < 480 ) {
460
461 /* page boundary crossed */
462
463 fill_output_more( &(block->u.out.u.full.u.cross.om),
464 /* data size - how much of data_p fits on the first page */
465 PAGE_SIZE - (data_p % PAGE_SIZE),
466
467 /* DMA address of data_p */
468 dma_region_offset_to_bus(&video->dv_buf,
469 data_p - (unsigned long) video->dv_buf.kvirt));
470
471 fill_output_last( &(block->u.out.u.full.u.cross.ol),
472
473 /* want completion status on all interesting packets */
474 (first_packet || mid_packet || last_packet) ? 1 : 0,
475
476 /* want interrupt on all interesting packets */
477 (first_packet || mid_packet || last_packet) ? 1 : 0,
478
479 /* data size - remaining portion of data_p */
480 480 - (PAGE_SIZE - (data_p % PAGE_SIZE)),
481
482 /* DMA address of data_p + PAGE_SIZE - (data_p % PAGE_SIZE) */
483 dma_region_offset_to_bus(&video->dv_buf,
484 data_p + PAGE_SIZE - (data_p % PAGE_SIZE) - (unsigned long) video->dv_buf.kvirt));
485
486 if (first_packet)
487 f->frame_begin_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
488 else if (mid_packet)
489 f->mid_frame_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
490 else if (last_packet) {
491 f->frame_end_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
492 f->frame_end_branch = &(block->u.out.u.full.u.cross.ol.q[2]);
493 }
494
495 branch_address = &(block->u.out.u.full.u.cross.ol.q[2]);
496
497 n_descriptors = 5;
498 if (first_packet)
499 f->first_n_descriptors = n_descriptors;
500
501 full_packets++;
502
503 } else {
504 /* fits on one page */
505
506 fill_output_last( &(block->u.out.u.full.u.nocross.ol),
507
508 /* want completion status on all interesting packets */
509 (first_packet || mid_packet || last_packet) ? 1 : 0,
510
511 /* want interrupt on all interesting packets */
512 (first_packet || mid_packet || last_packet) ? 1 : 0,
513
514 480, /* data size (480 bytes of DV data) */
515
516
517 /* DMA address of data_p */
518 dma_region_offset_to_bus(&video->dv_buf,
519 data_p - (unsigned long) video->dv_buf.kvirt));
520
521 if (first_packet)
522 f->frame_begin_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
523 else if (mid_packet)
524 f->mid_frame_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
525 else if (last_packet) {
526 f->frame_end_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
527 f->frame_end_branch = &(block->u.out.u.full.u.nocross.ol.q[2]);
528 }
529
530 branch_address = &(block->u.out.u.full.u.nocross.ol.q[2]);
531
532 n_descriptors = 4;
533 if (first_packet)
534 f->first_n_descriptors = n_descriptors;
535
536 full_packets++;
537 }
538 }
539
540 /* link this descriptor block into the DMA program by filling in
541 the branch address of the previous block */
542
543 /* note: we are not linked into the active DMA chain yet */
544
545 if (last_branch_address) {
546 *(last_branch_address) = cpu_to_le32(block_dma | n_descriptors);
547 }
548
549 last_branch_address = branch_address;
550
551
552 f->n_packets++;
553
554 }
555
556 /* when we first assemble a new frame, set the final branch
557 to loop back up to the top */
558 *(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
559
560 /* make the latest version of this frame visible to the PCI card */
561 dma_region_sync_for_device(&video->dv_buf, f->data - (unsigned long) video->dv_buf.kvirt, video->frame_size);
562
563 /* lock against DMA interrupt */
564 spin_lock_irqsave(&video->spinlock, irq_flags);
565
566 f->state = FRAME_READY;
567
568 video->n_clear_frames--;
569
570 last_frame = video->first_clear_frame - 1;
571 if (last_frame == -1)
572 last_frame = video->n_frames-1;
573
574 video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
575
576 irq_printk(" frame %d prepared, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n last=%d\n",
577 this_frame, video->active_frame, video->n_clear_frames, video->first_clear_frame, last_frame);
578
579 irq_printk(" begin_ts %08lx mid_ts %08lx end_ts %08lx end_br %08lx\n",
580 (unsigned long) f->frame_begin_timestamp,
581 (unsigned long) f->mid_frame_timestamp,
582 (unsigned long) f->frame_end_timestamp,
583 (unsigned long) f->frame_end_branch);
584
585 if (video->active_frame != -1) {
586
587 /* if DMA is already active, we are almost done */
588 /* just link us onto the active DMA chain */
589 if (video->frames[last_frame]->frame_end_branch) {
590 u32 temp;
591
592 /* point the previous frame's tail to this frame's head */
593 *(video->frames[last_frame]->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
594
595 /* this write MUST precede the next one, or we could silently drop frames */
596 wmb();
597
598 /* disable the want_status semaphore on the last packet */
599 temp = le32_to_cpu(*(video->frames[last_frame]->frame_end_branch - 2));
600 temp &= 0xF7CFFFFF;
601 *(video->frames[last_frame]->frame_end_branch - 2) = cpu_to_le32(temp);
602
603 /* flush these writes to memory ASAP */
604 flush_pci_write(video->ohci);
605
606 /* NOTE:
607 ideally the writes should be "atomic": if
608 the OHCI card reads the want_status flag in
609 between them, we'll falsely report a
610 dropped frame. Hopefully this window is too
611 small to really matter, and the consequence
612 is rather harmless. */
613
614
615 irq_printk(" new frame %d linked onto DMA chain\n", this_frame);
616
617 } else {
618 printk(KERN_ERR "dv1394: last frame not ready???\n");
619 }
620
621 } else {
622
623 u32 transmit_sec, transmit_cyc;
624 u32 ts_cyc, ts_off;
625
626 /* DMA is stopped, so this is the very first frame */
627 video->active_frame = this_frame;
628
629 /* set CommandPtr to address and size of first descriptor block */
630 reg_write(video->ohci, video->ohci_IsoXmitCommandPtr,
631 video->frames[video->active_frame]->descriptor_pool_dma |
632 f->first_n_descriptors);
633
634 /* assign a timestamp based on the current cycle time...
635 We'll tell the card to begin DMA 100 cycles from now,
636 and assign a timestamp 103 cycles from now */
637
638 cycleTimer = reg_read(video->ohci, OHCI1394_IsochronousCycleTimer);
639
640 ct_sec = cycleTimer >> 25;
641 ct_cyc = (cycleTimer >> 12) & 0x1FFF;
642 ct_off = cycleTimer & 0xFFF;
643
644 transmit_sec = ct_sec;
645 transmit_cyc = ct_cyc + 100;
646
647 transmit_sec += transmit_cyc/8000;
648 transmit_cyc %= 8000;
649
650 ts_off = ct_off;
651 ts_cyc = transmit_cyc + 3;
652 ts_cyc %= 8000;
653
654 f->assigned_timestamp = (ts_cyc&0xF) << 12;
655
656 /* now actually write the timestamp into the appropriate CIP headers */
657 if (f->cip_syt1) {
658 f->cip_syt1->b[6] = f->assigned_timestamp >> 8;
659 f->cip_syt1->b[7] = f->assigned_timestamp & 0xFF;
660 }
661 if (f->cip_syt2) {
662 f->cip_syt2->b[6] = f->assigned_timestamp >> 8;
663 f->cip_syt2->b[7] = f->assigned_timestamp & 0xFF;
664 }
665
666 /* --- start DMA --- */
667
668 /* clear all bits in ContextControl register */
669
670 reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, 0xFFFFFFFF);
671 wmb();
672
673 /* the OHCI card has the ability to start ISO transmission on a
674 particular cycle (start-on-cycle). This way we can ensure that
675 the first DV frame will have an accurate timestamp.
676
677 However, start-on-cycle only appears to work if the OHCI card
678 is cycle master! Since the consequences of messing up the first
679 timestamp are minimal*, just disable start-on-cycle for now.
680
681 * my DV deck drops the first few frames before it "locks in;"
682 so the first frame having an incorrect timestamp is inconsequential.
683 */
684
685#if 0
686 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet,
687 (1 << 31) /* enable start-on-cycle */
688 | ( (transmit_sec & 0x3) << 29)
689 | (transmit_cyc << 16));
690 wmb();
691#endif
692
693 video->dma_running = 1;
694
695 /* set the 'run' bit */
696 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, 0x8000);
697 flush_pci_write(video->ohci);
698
699 /* --- DMA should be running now --- */
700
701 debug_printk(" Cycle = %4u ContextControl = %08x CmdPtr = %08x\n",
702 (reg_read(video->ohci, OHCI1394_IsochronousCycleTimer) >> 12) & 0x1FFF,
703 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
704 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
705
706 debug_printk(" DMA start - current cycle %4u, transmit cycle %4u (%2u), assigning ts cycle %2u\n",
707 ct_cyc, transmit_cyc, transmit_cyc & 0xF, ts_cyc & 0xF);
708
709#if DV1394_DEBUG_LEVEL >= 2
710 {
711 /* check if DMA is really running */
712 int i = 0;
713 while (i < 20) {
714 mb();
715 mdelay(1);
716 if (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) {
717 printk("DMA ACTIVE after %d msec\n", i);
718 break;
719 }
720 i++;
721 }
722
723 printk("set = %08x, cmdPtr = %08x\n",
724 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
725 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
726 );
727
728 if ( ! (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
729 printk("DMA did NOT go active after 20ms, event = %x\n",
730 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & 0x1F);
731 } else
732 printk("DMA is RUNNING!\n");
733 }
734#endif
735
736 }
737
738
739 spin_unlock_irqrestore(&video->spinlock, irq_flags);
740}
741
742
743
744/*** RECEIVE FUNCTIONS *****************************************************/
745
746/*
747 frame method put_packet
748
749 map and copy the packet data to its location in the frame
750 based upon DIF section and sequence
751*/
752
753static void inline
754frame_put_packet (struct frame *f, struct packet *p)
755{
756 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
757 int dif_sequence = p->data[1] >> 4; /* dif sequence number is in bits 4 - 7 */
758 int dif_block = p->data[2];
759
760 /* sanity check */
761 if (dif_sequence > 11 || dif_block > 149) return;
762
763 switch (section_type) {
764 case 0: /* 1 Header block */
765 memcpy( (void *) f->data + dif_sequence * 150 * 80, p->data, 480);
766 break;
767
768 case 1: /* 2 Subcode blocks */
769 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (1 + dif_block) * 80, p->data, 480);
770 break;
771
772 case 2: /* 3 VAUX blocks */
773 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (3 + dif_block) * 80, p->data, 480);
774 break;
775
776 case 3: /* 9 Audio blocks interleaved with video */
777 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (6 + dif_block * 16) * 80, p->data, 480);
778 break;
779
780 case 4: /* 135 Video blocks interleaved with audio */
781 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (7 + (dif_block / 15) + dif_block) * 80, p->data, 480);
782 break;
783
784 default: /* we can not handle any other data */
785 break;
786 }
787}
788
789
790static void start_dma_receive(struct video_card *video)
791{
792 if (video->first_run == 1) {
793 video->first_run = 0;
794
795 /* start DMA once all of the frames are READY */
796 video->n_clear_frames = 0;
797 video->first_clear_frame = -1;
798 video->current_packet = 0;
799 video->active_frame = 0;
800
801 /* reset iso recv control register */
802 reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, 0xFFFFFFFF);
803 wmb();
804
805 /* clear bufferFill, set isochHeader and speed (0=100) */
806 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x40000000);
807
808 /* match on all tags, listen on channel */
809 reg_write(video->ohci, video->ohci_IsoRcvContextMatch, 0xf0000000 | video->channel);
810
811 /* address and first descriptor block + Z=1 */
812 reg_write(video->ohci, video->ohci_IsoRcvCommandPtr,
813 video->frames[0]->descriptor_pool_dma | 1); /* Z=1 */
814 wmb();
815
816 video->dma_running = 1;
817
818 /* run */
819 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x8000);
820 flush_pci_write(video->ohci);
821
822 debug_printk("dv1394: DMA started\n");
823
824#if DV1394_DEBUG_LEVEL >= 2
825 {
826 int i;
827
828 for (i = 0; i < 1000; ++i) {
829 mdelay(1);
830 if (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) {
831 printk("DMA ACTIVE after %d msec\n", i);
832 break;
833 }
834 }
835 if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
836 printk("DEAD, event = %x\n",
837 reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
838 } else
839 printk("RUNNING!\n");
840 }
841#endif
842 } else if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
843 debug_printk("DEAD, event = %x\n",
844 reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
845
846 /* wake */
847 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
848 }
849}
850
851
852/*
853 receive_packets() - build the DMA program for receiving
854*/
855
856static void receive_packets(struct video_card *video)
857{
858 struct DMA_descriptor_block *block = NULL;
859 dma_addr_t block_dma = 0;
860 struct packet *data = NULL;
861 dma_addr_t data_dma = 0;
862 u32 *last_branch_address = NULL;
863 unsigned long irq_flags;
864 int want_interrupt = 0;
865 struct frame *f = NULL;
866 int i, j;
867
868 spin_lock_irqsave(&video->spinlock, irq_flags);
869
870 for (j = 0; j < video->n_frames; j++) {
871
872 /* connect frames */
873 if (j > 0 && f != NULL && f->frame_end_branch != NULL)
874 *(f->frame_end_branch) = cpu_to_le32(video->frames[j]->descriptor_pool_dma | 1); /* set Z=1 */
875
876 f = video->frames[j];
877
878 for (i = 0; i < MAX_PACKETS; i++) {
879 /* locate a descriptor block and packet from the buffer */
880 block = &(f->descriptor_pool[i]);
881 block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
882
883 data = ((struct packet*)video->packet_buf.kvirt) + f->frame_num * MAX_PACKETS + i;
884 data_dma = dma_region_offset_to_bus( &video->packet_buf,
885 ((unsigned long) data - (unsigned long) video->packet_buf.kvirt) );
886
887 /* setup DMA descriptor block */
888 want_interrupt = ((i % (MAX_PACKETS/2)) == 0 || i == (MAX_PACKETS-1));
889 fill_input_last( &(block->u.in.il), want_interrupt, 512, data_dma);
890
891 /* link descriptors */
892 last_branch_address = f->frame_end_branch;
893
894 if (last_branch_address != NULL)
895 *(last_branch_address) = cpu_to_le32(block_dma | 1); /* set Z=1 */
896
897 f->frame_end_branch = &(block->u.in.il.q[2]);
898 }
899
900 } /* next j */
901
902 spin_unlock_irqrestore(&video->spinlock, irq_flags);
903
904}
905
906
907
908/*** MANAGEMENT FUNCTIONS **************************************************/
909
910static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
911{
912 unsigned long flags, new_buf_size;
913 int i;
914 u64 chan_mask;
915 int retval = -EINVAL;
916
917 debug_printk("dv1394: initialising %d\n", video->id);
918 if (init->api_version != DV1394_API_VERSION)
919 return -EINVAL;
920
921 /* first sanitize all the parameters */
922 if ( (init->n_frames < 2) || (init->n_frames > DV1394_MAX_FRAMES) )
923 return -EINVAL;
924
925 if ( (init->format != DV1394_NTSC) && (init->format != DV1394_PAL) )
926 return -EINVAL;
927
928 if ( (init->syt_offset == 0) || (init->syt_offset > 50) )
929 /* default SYT offset is 3 cycles */
930 init->syt_offset = 3;
931
932 if ( (init->channel > 63) || (init->channel < 0) )
933 init->channel = 63;
934
935 chan_mask = (u64)1 << init->channel;
936
937 /* calculate what size DMA buffer is needed */
938 if (init->format == DV1394_NTSC)
939 new_buf_size = DV1394_NTSC_FRAME_SIZE * init->n_frames;
940 else
941 new_buf_size = DV1394_PAL_FRAME_SIZE * init->n_frames;
942
943 /* round up to PAGE_SIZE */
944 if (new_buf_size % PAGE_SIZE) new_buf_size += PAGE_SIZE - (new_buf_size % PAGE_SIZE);
945
946 /* don't allow the user to allocate the DMA buffer more than once */
947 if (video->dv_buf.kvirt && video->dv_buf_size != new_buf_size) {
948 printk("dv1394: re-sizing the DMA buffer is not allowed\n");
949 return -EINVAL;
950 }
951
952 /* shutdown the card if it's currently active */
953 /* (the card should not be reset if the parameters are screwy) */
954
955 do_dv1394_shutdown(video, 0);
956
957 /* try to claim the ISO channel */
958 spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
959 if (video->ohci->ISO_channel_usage & chan_mask) {
960 spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
961 retval = -EBUSY;
962 goto err;
963 }
964 video->ohci->ISO_channel_usage |= chan_mask;
965 spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
966
967 video->channel = init->channel;
968
969 /* initialize misc. fields of video */
970 video->n_frames = init->n_frames;
971 video->pal_or_ntsc = init->format;
972
973 video->cip_accum = 0;
974 video->continuity_counter = 0;
975
976 video->active_frame = -1;
977 video->first_clear_frame = 0;
978 video->n_clear_frames = video->n_frames;
979 video->dropped_frames = 0;
980
981 video->write_off = 0;
982
983 video->first_run = 1;
984 video->current_packet = -1;
985 video->first_frame = 0;
986
987 if (video->pal_or_ntsc == DV1394_NTSC) {
988 video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_NTSC;
989 video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_NTSC;
990 video->frame_size = DV1394_NTSC_FRAME_SIZE;
991 } else {
992 video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_PAL;
993 video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_PAL;
994 video->frame_size = DV1394_PAL_FRAME_SIZE;
995 }
996
997 video->syt_offset = init->syt_offset;
998
999 /* find and claim DMA contexts on the OHCI card */
1000
1001 if (video->ohci_it_ctx == -1) {
1002 ohci1394_init_iso_tasklet(&video->it_tasklet, OHCI_ISO_TRANSMIT,
1003 it_tasklet_func, (unsigned long) video);
1004
1005 if (ohci1394_register_iso_tasklet(video->ohci, &video->it_tasklet) < 0) {
1006 printk(KERN_ERR "dv1394: could not find an available IT DMA context\n");
1007 retval = -EBUSY;
1008 goto err;
1009 }
1010
1011 video->ohci_it_ctx = video->it_tasklet.context;
1012 debug_printk("dv1394: claimed IT DMA context %d\n", video->ohci_it_ctx);
1013 }
1014
1015 if (video->ohci_ir_ctx == -1) {
1016 ohci1394_init_iso_tasklet(&video->ir_tasklet, OHCI_ISO_RECEIVE,
1017 ir_tasklet_func, (unsigned long) video);
1018
1019 if (ohci1394_register_iso_tasklet(video->ohci, &video->ir_tasklet) < 0) {
1020 printk(KERN_ERR "dv1394: could not find an available IR DMA context\n");
1021 retval = -EBUSY;
1022 goto err;
1023 }
1024 video->ohci_ir_ctx = video->ir_tasklet.context;
1025 debug_printk("dv1394: claimed IR DMA context %d\n", video->ohci_ir_ctx);
1026 }
1027
1028 /* allocate struct frames */
1029 for (i = 0; i < init->n_frames; i++) {
1030 video->frames[i] = frame_new(i, video);
1031
1032 if (!video->frames[i]) {
1033 printk(KERN_ERR "dv1394: Cannot allocate frame structs\n");
1034 retval = -ENOMEM;
1035 goto err;
1036 }
1037 }
1038
1039 if (!video->dv_buf.kvirt) {
1040 /* allocate the ringbuffer */
1041 retval = dma_region_alloc(&video->dv_buf, new_buf_size, video->ohci->dev, PCI_DMA_TODEVICE);
1042 if (retval)
1043 goto err;
1044
1045 video->dv_buf_size = new_buf_size;
1046
1047 debug_printk("dv1394: Allocated %d frame buffers, total %u pages (%u DMA pages), %lu bytes\n",
1048 video->n_frames, video->dv_buf.n_pages,
1049 video->dv_buf.n_dma_pages, video->dv_buf_size);
1050 }
1051
1052 /* set up the frame->data pointers */
1053 for (i = 0; i < video->n_frames; i++)
1054 video->frames[i]->data = (unsigned long) video->dv_buf.kvirt + i * video->frame_size;
1055
1056 if (!video->packet_buf.kvirt) {
1057 /* allocate packet buffer */
1058 video->packet_buf_size = sizeof(struct packet) * video->n_frames * MAX_PACKETS;
1059 if (video->packet_buf_size % PAGE_SIZE)
1060 video->packet_buf_size += PAGE_SIZE - (video->packet_buf_size % PAGE_SIZE);
1061
1062 retval = dma_region_alloc(&video->packet_buf, video->packet_buf_size,
1063 video->ohci->dev, PCI_DMA_FROMDEVICE);
1064 if (retval)
1065 goto err;
1066
1067 debug_printk("dv1394: Allocated %d packets in buffer, total %u pages (%u DMA pages), %lu bytes\n",
1068 video->n_frames*MAX_PACKETS, video->packet_buf.n_pages,
1069 video->packet_buf.n_dma_pages, video->packet_buf_size);
1070 }
1071
1072 /* set up register offsets for IT context */
1073 /* IT DMA context registers are spaced 16 bytes apart */
1074 video->ohci_IsoXmitContextControlSet = OHCI1394_IsoXmitContextControlSet+16*video->ohci_it_ctx;
1075 video->ohci_IsoXmitContextControlClear = OHCI1394_IsoXmitContextControlClear+16*video->ohci_it_ctx;
1076 video->ohci_IsoXmitCommandPtr = OHCI1394_IsoXmitCommandPtr+16*video->ohci_it_ctx;
1077
1078 /* enable interrupts for IT context */
1079 reg_write(video->ohci, OHCI1394_IsoXmitIntMaskSet, (1 << video->ohci_it_ctx));
1080 debug_printk("dv1394: interrupts enabled for IT context %d\n", video->ohci_it_ctx);
1081
1082 /* set up register offsets for IR context */
1083 /* IR DMA context registers are spaced 32 bytes apart */
1084 video->ohci_IsoRcvContextControlSet = OHCI1394_IsoRcvContextControlSet+32*video->ohci_ir_ctx;
1085 video->ohci_IsoRcvContextControlClear = OHCI1394_IsoRcvContextControlClear+32*video->ohci_ir_ctx;
1086 video->ohci_IsoRcvCommandPtr = OHCI1394_IsoRcvCommandPtr+32*video->ohci_ir_ctx;
1087 video->ohci_IsoRcvContextMatch = OHCI1394_IsoRcvContextMatch+32*video->ohci_ir_ctx;
1088
1089 /* enable interrupts for IR context */
1090 reg_write(video->ohci, OHCI1394_IsoRecvIntMaskSet, (1 << video->ohci_ir_ctx) );
1091 debug_printk("dv1394: interrupts enabled for IR context %d\n", video->ohci_ir_ctx);
1092
1093 return 0;
1094
1095err:
1096 do_dv1394_shutdown(video, 1);
1097 return retval;
1098}
1099
1100/* if the user doesn't bother to call ioctl(INIT) before starting
1101 mmap() or read()/write(), just give him some default values */
1102
1103static int do_dv1394_init_default(struct video_card *video)
1104{
1105 struct dv1394_init init;
1106
1107 init.api_version = DV1394_API_VERSION;
1108 init.n_frames = DV1394_MAX_FRAMES / 4;
1109 /* the following are now set via devfs */
1110 init.channel = video->channel;
1111 init.format = video->pal_or_ntsc;
1112 init.cip_n = video->cip_n;
1113 init.cip_d = video->cip_d;
1114 init.syt_offset = video->syt_offset;
1115
1116 return do_dv1394_init(video, &init);
1117}
1118
1119/* do NOT call from interrupt context */
1120static void stop_dma(struct video_card *video)
1121{
1122 unsigned long flags;
1123 int i;
1124
1125 /* no interrupts */
1126 spin_lock_irqsave(&video->spinlock, flags);
1127
1128 video->dma_running = 0;
1129
1130 if ( (video->ohci_it_ctx == -1) && (video->ohci_ir_ctx == -1) )
1131 goto out;
1132
1133 /* stop DMA if in progress */
1134 if ( (video->active_frame != -1) ||
1135 (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
1136 (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
1137
1138 /* clear the .run bits */
1139 reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
1140 reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
1141 flush_pci_write(video->ohci);
1142
1143 video->active_frame = -1;
1144 video->first_run = 1;
1145
1146 /* wait until DMA really stops */
1147 i = 0;
1148 while (i < 1000) {
1149
1150 /* wait 0.1 millisecond */
1151 udelay(100);
1152
1153 if ( (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
1154 (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
1155 /* still active */
1156 debug_printk("dv1394: stop_dma: DMA not stopped yet\n" );
1157 mb();
1158 } else {
1159 debug_printk("dv1394: stop_dma: DMA stopped safely after %d ms\n", i/10);
1160 break;
1161 }
1162
1163 i++;
1164 }
1165
1166 if (i == 1000) {
1167 printk(KERN_ERR "dv1394: stop_dma: DMA still going after %d ms!\n", i/10);
1168 }
1169 }
1170 else
1171 debug_printk("dv1394: stop_dma: already stopped.\n");
1172
1173out:
1174 spin_unlock_irqrestore(&video->spinlock, flags);
1175}
1176
1177
1178
1179static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
1180{
1181 int i;
1182
1183 debug_printk("dv1394: shutdown...\n");
1184
1185 /* stop DMA if in progress */
1186 stop_dma(video);
1187
1188 /* release the DMA contexts */
1189 if (video->ohci_it_ctx != -1) {
1190 video->ohci_IsoXmitContextControlSet = 0;
1191 video->ohci_IsoXmitContextControlClear = 0;
1192 video->ohci_IsoXmitCommandPtr = 0;
1193
1194 /* disable interrupts for IT context */
1195 reg_write(video->ohci, OHCI1394_IsoXmitIntMaskClear, (1 << video->ohci_it_ctx));
1196
1197 /* remove tasklet */
1198 ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
1199 debug_printk("dv1394: IT context %d released\n", video->ohci_it_ctx);
1200 video->ohci_it_ctx = -1;
1201 }
1202
1203 if (video->ohci_ir_ctx != -1) {
1204 video->ohci_IsoRcvContextControlSet = 0;
1205 video->ohci_IsoRcvContextControlClear = 0;
1206 video->ohci_IsoRcvCommandPtr = 0;
1207 video->ohci_IsoRcvContextMatch = 0;
1208
1209 /* disable interrupts for IR context */
1210 reg_write(video->ohci, OHCI1394_IsoRecvIntMaskClear, (1 << video->ohci_ir_ctx));
1211
1212 /* remove tasklet */
1213 ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
1214 debug_printk("dv1394: IR context %d released\n", video->ohci_ir_ctx);
1215 video->ohci_ir_ctx = -1;
1216 }
1217
1218 /* release the ISO channel */
1219 if (video->channel != -1) {
1220 u64 chan_mask;
1221 unsigned long flags;
1222
1223 chan_mask = (u64)1 << video->channel;
1224
1225 spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
1226 video->ohci->ISO_channel_usage &= ~(chan_mask);
1227 spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
1228
1229 video->channel = -1;
1230 }
1231
1232 /* free the frame structs */
1233 for (i = 0; i < DV1394_MAX_FRAMES; i++) {
1234 if (video->frames[i])
1235 frame_delete(video->frames[i]);
1236 video->frames[i] = NULL;
1237 }
1238
1239 video->n_frames = 0;
1240
1241 /* we can't free the DMA buffer unless it is guaranteed that
1242 no more user-space mappings exist */
1243
1244 if (free_dv_buf) {
1245 dma_region_free(&video->dv_buf);
1246 video->dv_buf_size = 0;
1247 }
1248
1249 /* free packet buffer */
1250 dma_region_free(&video->packet_buf);
1251 video->packet_buf_size = 0;
1252
1253 debug_printk("dv1394: shutdown OK\n");
1254}
1255
1256/*
1257 **********************************
1258 *** MMAP() THEORY OF OPERATION ***
1259 **********************************
1260
1261 The ringbuffer cannot be re-allocated or freed while
1262 a user program maintains a mapping of it. (note that a mapping
1263 can persist even after the device fd is closed!)
1264
1265 So, only let the user process allocate the DMA buffer once.
1266 To resize or deallocate it, you must close the device file
1267 and open it again.
1268
1269 Previously Dan M. hacked out a scheme that allowed the DMA
1270 buffer to change by forcefully unmapping it from the user's
1271 address space. It was prone to error because it's very hard to
1272 track all the places the buffer could have been mapped (we
1273 would have had to walk the vma list of every process in the
1274 system to be sure we found all the mappings!). Instead, we
1275 force the user to choose one buffer size and stick with
1276 it. This small sacrifice is worth the huge reduction in
1277 error-prone code in dv1394.
1278*/
1279
1280static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
1281{
1282 struct video_card *video = file_to_video_card(file);
1283 int retval = -EINVAL;
1284
1285 /* serialize mmap */
1286 down(&video->sem);
1287
1288 if ( ! video_card_initialized(video) ) {
1289 retval = do_dv1394_init_default(video);
1290 if (retval)
1291 goto out;
1292 }
1293
1294 retval = dma_region_mmap(&video->dv_buf, file, vma);
1295out:
1296 up(&video->sem);
1297 return retval;
1298}
1299
1300/*** DEVICE FILE INTERFACE *************************************************/
1301
1302/* no need to serialize, multiple threads OK */
1303static unsigned int dv1394_poll(struct file *file, struct poll_table_struct *wait)
1304{
1305 struct video_card *video = file_to_video_card(file);
1306 unsigned int mask = 0;
1307 unsigned long flags;
1308
1309 poll_wait(file, &video->waitq, wait);
1310
1311 spin_lock_irqsave(&video->spinlock, flags);
1312 if ( video->n_frames == 0 ) {
1313
1314 } else if ( video->active_frame == -1 ) {
1315 /* nothing going on */
1316 mask |= POLLOUT;
1317 } else {
1318 /* any clear/ready buffers? */
1319 if (video->n_clear_frames >0)
1320 mask |= POLLOUT | POLLIN;
1321 }
1322 spin_unlock_irqrestore(&video->spinlock, flags);
1323
1324 return mask;
1325}
1326
1327static int dv1394_fasync(int fd, struct file *file, int on)
1328{
1329 /* I just copied this code verbatim from Alan Cox's mouse driver example
1330 (Documentation/DocBook/) */
1331
1332 struct video_card *video = file_to_video_card(file);
1333
1334 int retval = fasync_helper(fd, file, on, &video->fasync);
1335
1336 if (retval < 0)
1337 return retval;
1338 return 0;
1339}
1340
1341static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1342{
1343 struct video_card *video = file_to_video_card(file);
1344 DECLARE_WAITQUEUE(wait, current);
1345 ssize_t ret;
1346 size_t cnt;
1347 unsigned long flags;
1348 int target_frame;
1349
1350 /* serialize this to prevent multi-threaded mayhem */
1351 if (file->f_flags & O_NONBLOCK) {
1352 if (down_trylock(&video->sem))
1353 return -EAGAIN;
1354 } else {
1355 if (down_interruptible(&video->sem))
1356 return -ERESTARTSYS;
1357 }
1358
1359 if ( !video_card_initialized(video) ) {
1360 ret = do_dv1394_init_default(video);
1361 if (ret) {
1362 up(&video->sem);
1363 return ret;
1364 }
1365 }
1366
1367 ret = 0;
1368 add_wait_queue(&video->waitq, &wait);
1369
1370 while (count > 0) {
1371
1372 /* must set TASK_INTERRUPTIBLE *before* checking for free
1373 buffers; otherwise we could miss a wakeup if the interrupt
1374 fires between the check and the schedule() */
1375
1376 set_current_state(TASK_INTERRUPTIBLE);
1377
1378 spin_lock_irqsave(&video->spinlock, flags);
1379
1380 target_frame = video->first_clear_frame;
1381
1382 spin_unlock_irqrestore(&video->spinlock, flags);
1383
1384 if (video->frames[target_frame]->state == FRAME_CLEAR) {
1385
1386 /* how much room is left in the target frame buffer */
1387 cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
1388
1389 } else {
1390 /* buffer is already used */
1391 cnt = 0;
1392 }
1393
1394 if (cnt > count)
1395 cnt = count;
1396
1397 if (cnt <= 0) {
1398 /* no room left, gotta wait */
1399 if (file->f_flags & O_NONBLOCK) {
1400 if (!ret)
1401 ret = -EAGAIN;
1402 break;
1403 }
1404 if (signal_pending(current)) {
1405 if (!ret)
1406 ret = -ERESTARTSYS;
1407 break;
1408 }
1409
1410 schedule();
1411
1412 continue; /* start over from 'while(count > 0)...' */
1413 }
1414
1415 if (copy_from_user(video->dv_buf.kvirt + video->write_off, buffer, cnt)) {
1416 if (!ret)
1417 ret = -EFAULT;
1418 break;
1419 }
1420
1421 video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
1422
1423 count -= cnt;
1424 buffer += cnt;
1425 ret += cnt;
1426
1427 if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames))
1428 frame_prepare(video, target_frame);
1429 }
1430
1431 remove_wait_queue(&video->waitq, &wait);
1432 set_current_state(TASK_RUNNING);
1433 up(&video->sem);
1434 return ret;
1435}
1436
1437
1438static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1439{
1440 struct video_card *video = file_to_video_card(file);
1441 DECLARE_WAITQUEUE(wait, current);
1442 ssize_t ret;
1443 size_t cnt;
1444 unsigned long flags;
1445 int target_frame;
1446
1447 /* serialize this to prevent multi-threaded mayhem */
1448 if (file->f_flags & O_NONBLOCK) {
1449 if (down_trylock(&video->sem))
1450 return -EAGAIN;
1451 } else {
1452 if (down_interruptible(&video->sem))
1453 return -ERESTARTSYS;
1454 }
1455
1456 if ( !video_card_initialized(video) ) {
1457 ret = do_dv1394_init_default(video);
1458 if (ret) {
1459 up(&video->sem);
1460 return ret;
1461 }
1462 video->continuity_counter = -1;
1463
1464 receive_packets(video);
1465
1466 start_dma_receive(video);
1467 }
1468
1469 ret = 0;
1470 add_wait_queue(&video->waitq, &wait);
1471
1472 while (count > 0) {
1473
1474 /* must set TASK_INTERRUPTIBLE *before* checking for free
1475 buffers; otherwise we could miss a wakeup if the interrupt
1476 fires between the check and the schedule() */
1477
1478 set_current_state(TASK_INTERRUPTIBLE);
1479
1480 spin_lock_irqsave(&video->spinlock, flags);
1481
1482 target_frame = video->first_clear_frame;
1483
1484 spin_unlock_irqrestore(&video->spinlock, flags);
1485
1486 if (target_frame >= 0 &&
1487 video->n_clear_frames > 0 &&
1488 video->frames[target_frame]->state == FRAME_CLEAR) {
1489
1490 /* how much room is left in the target frame buffer */
1491 cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
1492
1493 } else {
1494 /* buffer is already used */
1495 cnt = 0;
1496 }
1497
1498 if (cnt > count)
1499 cnt = count;
1500
1501 if (cnt <= 0) {
1502 /* no room left, gotta wait */
1503 if (file->f_flags & O_NONBLOCK) {
1504 if (!ret)
1505 ret = -EAGAIN;
1506 break;
1507 }
1508 if (signal_pending(current)) {
1509 if (!ret)
1510 ret = -ERESTARTSYS;
1511 break;
1512 }
1513
1514 schedule();
1515
1516 continue; /* start over from 'while(count > 0)...' */
1517 }
1518
1519 if (copy_to_user(buffer, video->dv_buf.kvirt + video->write_off, cnt)) {
1520 if (!ret)
1521 ret = -EFAULT;
1522 break;
1523 }
1524
1525 video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
1526
1527 count -= cnt;
1528 buffer += cnt;
1529 ret += cnt;
1530
1531 if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames)) {
1532 spin_lock_irqsave(&video->spinlock, flags);
1533 video->n_clear_frames--;
1534 video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
1535 spin_unlock_irqrestore(&video->spinlock, flags);
1536 }
1537 }
1538
1539 remove_wait_queue(&video->waitq, &wait);
1540 set_current_state(TASK_RUNNING);
1541 up(&video->sem);
1542 return ret;
1543}
1544
1545
1546/*** DEVICE IOCTL INTERFACE ************************************************/
1547
1548static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1549{
1550 struct video_card *video;
1551 unsigned long flags;
1552 int ret = -EINVAL;
1553 void __user *argp = (void __user *)arg;
1554
1555 DECLARE_WAITQUEUE(wait, current);
1556
1557 lock_kernel();
1558 video = file_to_video_card(file);
1559
1560 /* serialize this to prevent multi-threaded mayhem */
1561 if (file->f_flags & O_NONBLOCK) {
1562 if (down_trylock(&video->sem)) {
1563 unlock_kernel();
1564 return -EAGAIN;
1565 }
1566 } else {
1567 if (down_interruptible(&video->sem)) {
1568 unlock_kernel();
1569 return -ERESTARTSYS;
1570 }
1571 }
1572
1573 switch(cmd)
1574 {
1575 case DV1394_IOC_SUBMIT_FRAMES: {
1576 unsigned int n_submit;
1577
1578 if ( !video_card_initialized(video) ) {
1579 ret = do_dv1394_init_default(video);
1580 if (ret)
1581 goto out;
1582 }
1583
1584 n_submit = (unsigned int) arg;
1585
1586 if (n_submit > video->n_frames) {
1587 ret = -EINVAL;
1588 goto out;
1589 }
1590
1591 while (n_submit > 0) {
1592
1593 add_wait_queue(&video->waitq, &wait);
1594 set_current_state(TASK_INTERRUPTIBLE);
1595
1596 spin_lock_irqsave(&video->spinlock, flags);
1597
1598 /* wait until video->first_clear_frame is really CLEAR */
1599 while (video->frames[video->first_clear_frame]->state != FRAME_CLEAR) {
1600
1601 spin_unlock_irqrestore(&video->spinlock, flags);
1602
1603 if (signal_pending(current)) {
1604 remove_wait_queue(&video->waitq, &wait);
1605 set_current_state(TASK_RUNNING);
1606 ret = -EINTR;
1607 goto out;
1608 }
1609
1610 schedule();
1611 set_current_state(TASK_INTERRUPTIBLE);
1612
1613 spin_lock_irqsave(&video->spinlock, flags);
1614 }
1615 spin_unlock_irqrestore(&video->spinlock, flags);
1616
1617 remove_wait_queue(&video->waitq, &wait);
1618 set_current_state(TASK_RUNNING);
1619
1620 frame_prepare(video, video->first_clear_frame);
1621
1622 n_submit--;
1623 }
1624
1625 ret = 0;
1626 break;
1627 }
1628
1629 case DV1394_IOC_WAIT_FRAMES: {
1630 unsigned int n_wait;
1631
1632 if ( !video_card_initialized(video) ) {
1633 ret = -EINVAL;
1634 goto out;
1635 }
1636
1637 n_wait = (unsigned int) arg;
1638
1639 /* since we re-run the last frame on underflow, we will
1640 never actually have n_frames clear frames; at most only
1641 n_frames - 1 */
1642
1643 if (n_wait > (video->n_frames-1) ) {
1644 ret = -EINVAL;
1645 goto out;
1646 }
1647
1648 add_wait_queue(&video->waitq, &wait);
1649 set_current_state(TASK_INTERRUPTIBLE);
1650
1651 spin_lock_irqsave(&video->spinlock, flags);
1652
1653 while (video->n_clear_frames < n_wait) {
1654
1655 spin_unlock_irqrestore(&video->spinlock, flags);
1656
1657 if (signal_pending(current)) {
1658 remove_wait_queue(&video->waitq, &wait);
1659 set_current_state(TASK_RUNNING);
1660 ret = -EINTR;
1661 goto out;
1662 }
1663
1664 schedule();
1665 set_current_state(TASK_INTERRUPTIBLE);
1666
1667 spin_lock_irqsave(&video->spinlock, flags);
1668 }
1669
1670 spin_unlock_irqrestore(&video->spinlock, flags);
1671
1672 remove_wait_queue(&video->waitq, &wait);
1673 set_current_state(TASK_RUNNING);
1674 ret = 0;
1675 break;
1676 }
1677
1678 case DV1394_IOC_RECEIVE_FRAMES: {
1679 unsigned int n_recv;
1680
1681 if ( !video_card_initialized(video) ) {
1682 ret = -EINVAL;
1683 goto out;
1684 }
1685
1686 n_recv = (unsigned int) arg;
1687
1688 /* at least one frame must be active */
1689 if (n_recv > (video->n_frames-1) ) {
1690 ret = -EINVAL;
1691 goto out;
1692 }
1693
1694 spin_lock_irqsave(&video->spinlock, flags);
1695
1696 /* release the clear frames */
1697 video->n_clear_frames -= n_recv;
1698
1699 /* advance the clear frame cursor */
1700 video->first_clear_frame = (video->first_clear_frame + n_recv) % video->n_frames;
1701
1702 /* reset dropped_frames */
1703 video->dropped_frames = 0;
1704
1705 spin_unlock_irqrestore(&video->spinlock, flags);
1706
1707 ret = 0;
1708 break;
1709 }
1710
1711 case DV1394_IOC_START_RECEIVE: {
1712 if ( !video_card_initialized(video) ) {
1713 ret = do_dv1394_init_default(video);
1714 if (ret)
1715 goto out;
1716 }
1717
1718 video->continuity_counter = -1;
1719
1720 receive_packets(video);
1721
1722 start_dma_receive(video);
1723
1724 ret = 0;
1725 break;
1726 }
1727
1728 case DV1394_IOC_INIT: {
1729 struct dv1394_init init;
1730 if (!argp) {
1731 ret = do_dv1394_init_default(video);
1732 } else {
1733 if (copy_from_user(&init, argp, sizeof(init))) {
1734 ret = -EFAULT;
1735 goto out;
1736 }
1737 ret = do_dv1394_init(video, &init);
1738 }
1739 break;
1740 }
1741
1742 case DV1394_IOC_SHUTDOWN:
1743 do_dv1394_shutdown(video, 0);
1744 ret = 0;
1745 break;
1746
1747
1748 case DV1394_IOC_GET_STATUS: {
1749 struct dv1394_status status;
1750
1751 if ( !video_card_initialized(video) ) {
1752 ret = -EINVAL;
1753 goto out;
1754 }
1755
1756 status.init.api_version = DV1394_API_VERSION;
1757 status.init.channel = video->channel;
1758 status.init.n_frames = video->n_frames;
1759 status.init.format = video->pal_or_ntsc;
1760 status.init.cip_n = video->cip_n;
1761 status.init.cip_d = video->cip_d;
1762 status.init.syt_offset = video->syt_offset;
1763
1764 status.first_clear_frame = video->first_clear_frame;
1765
1766 /* the rest of the fields need to be locked against the interrupt */
1767 spin_lock_irqsave(&video->spinlock, flags);
1768
1769 status.active_frame = video->active_frame;
1770 status.n_clear_frames = video->n_clear_frames;
1771
1772 status.dropped_frames = video->dropped_frames;
1773
1774 /* reset dropped_frames */
1775 video->dropped_frames = 0;
1776
1777 spin_unlock_irqrestore(&video->spinlock, flags);
1778
1779 if (copy_to_user(argp, &status, sizeof(status))) {
1780 ret = -EFAULT;
1781 goto out;
1782 }
1783
1784 ret = 0;
1785 break;
1786 }
1787
1788 default:
1789 break;
1790 }
1791
1792 out:
1793 up(&video->sem);
1794 unlock_kernel();
1795 return ret;
1796}
1797
1798/*** DEVICE FILE INTERFACE CONTINUED ***************************************/
1799
1800static int dv1394_open(struct inode *inode, struct file *file)
1801{
1802 struct video_card *video = NULL;
1803
1804 /* if the device was opened through devfs, then file->private_data
1805 has already been set to video by devfs */
1806 if (file->private_data) {
1807 video = (struct video_card*) file->private_data;
1808
1809 } else {
1810 /* look up the card by ID */
1811 unsigned long flags;
1812
1813 spin_lock_irqsave(&dv1394_cards_lock, flags);
1814 if (!list_empty(&dv1394_cards)) {
1815 struct video_card *p;
1816 list_for_each_entry(p, &dv1394_cards, list) {
1817 if ((p->id) == ieee1394_file_to_instance(file)) {
1818 video = p;
1819 break;
1820 }
1821 }
1822 }
1823 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
1824
1825 if (!video) {
1826 debug_printk("dv1394: OHCI card %d not found", ieee1394_file_to_instance(file));
1827 return -ENODEV;
1828 }
1829
1830 file->private_data = (void*) video;
1831 }
1832
1833#ifndef DV1394_ALLOW_MORE_THAN_ONE_OPEN
1834
1835 if ( test_and_set_bit(0, &video->open) ) {
1836 /* video is already open by someone else */
1837 return -EBUSY;
1838 }
1839
1840#endif
1841
1842 return 0;
1843}
1844
1845
1846static int dv1394_release(struct inode *inode, struct file *file)
1847{
1848 struct video_card *video = file_to_video_card(file);
1849
1850 /* OK to free the DMA buffer, no more mappings can exist */
1851 do_dv1394_shutdown(video, 1);
1852
1853 /* clean up async I/O users */
1854 dv1394_fasync(-1, file, 0);
1855
1856 /* give someone else a turn */
1857 clear_bit(0, &video->open);
1858
1859 return 0;
1860}
1861
1862
1863/*** DEVICE DRIVER HANDLERS ************************************************/
1864
1865static void it_tasklet_func(unsigned long data)
1866{
1867 int wake = 0;
1868 struct video_card *video = (struct video_card*) data;
1869
1870 spin_lock(&video->spinlock);
1871
1872 if (!video->dma_running)
1873 goto out;
1874
1875 irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
1876 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
1877 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
1878 );
1879
1880
1881 if ( (video->ohci_it_ctx != -1) &&
1882 (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
1883
1884 struct frame *f;
1885 unsigned int frame, i;
1886
1887
1888 if (video->active_frame == -1)
1889 frame = 0;
1890 else
1891 frame = video->active_frame;
1892
1893 /* check all the DMA-able frames */
1894 for (i = 0; i < video->n_frames; i++, frame = (frame+1) % video->n_frames) {
1895
1896 irq_printk("IRQ checking frame %d...", frame);
1897 f = video->frames[frame];
1898 if (f->state != FRAME_READY) {
1899 irq_printk("clear, skipping\n");
1900 /* we don't own this frame */
1901 continue;
1902 }
1903
1904 irq_printk("DMA\n");
1905
1906 /* check the frame begin semaphore to see if we can free the previous frame */
1907 if ( *(f->frame_begin_timestamp) ) {
1908 int prev_frame;
1909 struct frame *prev_f;
1910
1911
1912
1913 /* don't reset, need this later *(f->frame_begin_timestamp) = 0; */
1914 irq_printk(" BEGIN\n");
1915
1916 prev_frame = frame - 1;
1917 if (prev_frame == -1)
1918 prev_frame += video->n_frames;
1919 prev_f = video->frames[prev_frame];
1920
1921 /* make sure we can actually garbage collect
1922 this frame */
1923 if ( (prev_f->state == FRAME_READY) &&
1924 prev_f->done && (!f->done) )
1925 {
1926 frame_reset(prev_f);
1927 video->n_clear_frames++;
1928 wake = 1;
1929 video->active_frame = frame;
1930
1931 irq_printk(" BEGIN - freeing previous frame %d, new active frame is %d\n", prev_frame, frame);
1932 } else {
1933 irq_printk(" BEGIN - can't free yet\n");
1934 }
1935
1936 f->done = 1;
1937 }
1938
1939
1940 /* see if we need to set the timestamp for the next frame */
1941 if ( *(f->mid_frame_timestamp) ) {
1942 struct frame *next_frame;
1943 u32 begin_ts, ts_cyc, ts_off;
1944
1945 *(f->mid_frame_timestamp) = 0;
1946
1947 begin_ts = le32_to_cpu(*(f->frame_begin_timestamp));
1948
1949 irq_printk(" MIDDLE - first packet was sent at cycle %4u (%2u), assigned timestamp was (%2u) %4u\n",
1950 begin_ts & 0x1FFF, begin_ts & 0xF,
1951 f->assigned_timestamp >> 12, f->assigned_timestamp & 0xFFF);
1952
1953 /* prepare next frame and assign timestamp */
1954 next_frame = video->frames[ (frame+1) % video->n_frames ];
1955
1956 if (next_frame->state == FRAME_READY) {
1957 irq_printk(" MIDDLE - next frame is ready, good\n");
1958 } else {
1959 debug_printk("dv1394: Underflow! At least one frame has been dropped.\n");
1960 next_frame = f;
1961 }
1962
1963 /* set the timestamp to the timestamp of the last frame sent,
1964 plus the length of the last frame sent, plus the syt latency */
1965 ts_cyc = begin_ts & 0xF;
1966 /* advance one frame, plus syt latency (typically 2-3) */
1967 ts_cyc += f->n_packets + video->syt_offset ;
1968
1969 ts_off = 0;
1970
1971 ts_cyc += ts_off/3072;
1972 ts_off %= 3072;
1973
1974 next_frame->assigned_timestamp = ((ts_cyc&0xF) << 12) + ts_off;
1975 if (next_frame->cip_syt1) {
1976 next_frame->cip_syt1->b[6] = next_frame->assigned_timestamp >> 8;
1977 next_frame->cip_syt1->b[7] = next_frame->assigned_timestamp & 0xFF;
1978 }
1979 if (next_frame->cip_syt2) {
1980 next_frame->cip_syt2->b[6] = next_frame->assigned_timestamp >> 8;
1981 next_frame->cip_syt2->b[7] = next_frame->assigned_timestamp & 0xFF;
1982 }
1983
1984 }
1985
1986 /* see if the frame looped */
1987 if ( *(f->frame_end_timestamp) ) {
1988
1989 *(f->frame_end_timestamp) = 0;
1990
1991 debug_printk(" END - the frame looped at least once\n");
1992
1993 video->dropped_frames++;
1994 }
1995
1996 } /* for (each frame) */
1997 }
1998
1999 if (wake) {
2000 kill_fasync(&video->fasync, SIGIO, POLL_OUT);
2001
2002 /* wake readers/writers/ioctl'ers */
2003 wake_up_interruptible(&video->waitq);
2004 }
2005
2006out:
2007 spin_unlock(&video->spinlock);
2008}
2009
2010static void ir_tasklet_func(unsigned long data)
2011{
2012 int wake = 0;
2013 struct video_card *video = (struct video_card*) data;
2014
2015 spin_lock(&video->spinlock);
2016
2017 if (!video->dma_running)
2018 goto out;
2019
2020 if ( (video->ohci_ir_ctx != -1) &&
2021 (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
2022
2023 int sof=0; /* start-of-frame flag */
2024 struct frame *f;
2025 u16 packet_length, packet_time;
2026 int i, dbc=0;
2027 struct DMA_descriptor_block *block = NULL;
2028 u16 xferstatus;
2029
2030 int next_i, prev_i;
2031 struct DMA_descriptor_block *next = NULL;
2032 dma_addr_t next_dma = 0;
2033 struct DMA_descriptor_block *prev = NULL;
2034
2035 /* loop over all descriptors in all frames */
2036 for (i = 0; i < video->n_frames*MAX_PACKETS; i++) {
2037 struct packet *p = dma_region_i(&video->packet_buf, struct packet, video->current_packet);
2038
2039 /* make sure we are seeing the latest changes to p */
2040 dma_region_sync_for_cpu(&video->packet_buf,
2041 (unsigned long) p - (unsigned long) video->packet_buf.kvirt,
2042 sizeof(struct packet));
2043
2044 packet_length = le16_to_cpu(p->data_length);
2045 packet_time = le16_to_cpu(p->timestamp);
2046
2047 irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
2048 packet_time, packet_length,
2049 p->data[0], p->data[1]);
2050
2051 /* get the descriptor based on packet_buffer cursor */
2052 f = video->frames[video->current_packet / MAX_PACKETS];
2053 block = &(f->descriptor_pool[video->current_packet % MAX_PACKETS]);
2054 xferstatus = le32_to_cpu(block->u.in.il.q[3]) >> 16;
2055 xferstatus &= 0x1F;
2056 irq_printk("ir_tasklet_func: xferStatus/resCount [%d] = 0x%08x\n", i, le32_to_cpu(block->u.in.il.q[3]) );
2057
2058 /* get the current frame */
2059 f = video->frames[video->active_frame];
2060
2061 /* exclude empty packet */
2062 if (packet_length > 8 && xferstatus == 0x11) {
2063 /* check for start of frame */
2064 /* DRD> Changed to check section type ([0]>>5==0)
2065 and dif sequence ([1]>>4==0) */
2066 sof = ( (p->data[0] >> 5) == 0 && (p->data[1] >> 4) == 0);
2067
2068 dbc = (int) (p->cip_h1 >> 24);
2069 if ( video->continuity_counter != -1 && dbc > ((video->continuity_counter + 1) % 256) )
2070 {
2071 printk(KERN_WARNING "dv1394: discontinuity detected, dropping all frames\n" );
2072 video->dropped_frames += video->n_clear_frames + 1;
2073 video->first_frame = 0;
2074 video->n_clear_frames = 0;
2075 video->first_clear_frame = -1;
2076 }
2077 video->continuity_counter = dbc;
2078
2079 if (!video->first_frame) {
2080 if (sof) {
2081 video->first_frame = 1;
2082 }
2083
2084 } else if (sof) {
2085 /* close current frame */
2086 frame_reset(f); /* f->state = STATE_CLEAR */
2087 video->n_clear_frames++;
2088 if (video->n_clear_frames > video->n_frames) {
2089 video->dropped_frames++;
2090 printk(KERN_WARNING "dv1394: dropped a frame during reception\n" );
2091 video->n_clear_frames = video->n_frames-1;
2092 video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
2093 }
2094 if (video->first_clear_frame == -1)
2095 video->first_clear_frame = video->active_frame;
2096
2097 /* get the next frame */
2098 video->active_frame = (video->active_frame + 1) % video->n_frames;
2099 f = video->frames[video->active_frame];
2100 irq_printk(" frame received, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n",
2101 video->active_frame, video->n_clear_frames, video->first_clear_frame);
2102 }
2103 if (video->first_frame) {
2104 if (sof) {
2105 /* open next frame */
2106 f->state = FRAME_READY;
2107 }
2108
2109 /* copy to buffer */
2110 if (f->n_packets > (video->frame_size / 480)) {
2111 printk(KERN_ERR "frame buffer overflow during receive\n");
2112 }
2113
2114 frame_put_packet(f, p);
2115
2116 } /* first_frame */
2117 }
2118
2119 /* stop, end of ready packets */
2120 else if (xferstatus == 0) {
2121 break;
2122 }
2123
2124 /* reset xferStatus & resCount */
2125 block->u.in.il.q[3] = cpu_to_le32(512);
2126
2127 /* terminate dma chain at this (next) packet */
2128 next_i = video->current_packet;
2129 f = video->frames[next_i / MAX_PACKETS];
2130 next = &(f->descriptor_pool[next_i % MAX_PACKETS]);
2131 next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
2132 next->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
2133 next->u.in.il.q[2] = 0; /* disable branch */
2134
2135 /* link previous to next */
2136 prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
2137 f = video->frames[prev_i / MAX_PACKETS];
2138 prev = &(f->descriptor_pool[prev_i % MAX_PACKETS]);
2139 if (prev_i % (MAX_PACKETS/2)) {
2140 prev->u.in.il.q[0] &= ~(3 << 20); /* no interrupt */
2141 } else {
2142 prev->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
2143 }
2144 prev->u.in.il.q[2] = cpu_to_le32(next_dma | 1); /* set Z=1 */
2145 wmb();
2146
2147 /* wake up DMA in case it fell asleep */
2148 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
2149
2150 /* advance packet_buffer cursor */
2151 video->current_packet = (video->current_packet + 1) % (MAX_PACKETS * video->n_frames);
2152
2153 } /* for all packets */
2154
2155 wake = 1; /* why the hell not? */
2156
2157 } /* receive interrupt */
2158
2159 if (wake) {
2160 kill_fasync(&video->fasync, SIGIO, POLL_IN);
2161
2162 /* wake readers/writers/ioctl'ers */
2163 wake_up_interruptible(&video->waitq);
2164 }
2165
2166out:
2167 spin_unlock(&video->spinlock);
2168}
2169
2170static struct cdev dv1394_cdev;
2171static struct file_operations dv1394_fops=
2172{
2173 .owner = THIS_MODULE,
2174 .poll = dv1394_poll,
2175 .unlocked_ioctl = dv1394_ioctl,
2176#ifdef CONFIG_COMPAT
2177 .compat_ioctl = dv1394_compat_ioctl,
2178#endif
2179 .mmap = dv1394_mmap,
2180 .open = dv1394_open,
2181 .write = dv1394_write,
2182 .read = dv1394_read,
2183 .release = dv1394_release,
2184 .fasync = dv1394_fasync,
2185};
2186
2187
2188/*** HOTPLUG STUFF **********************************************************/
2189/*
2190 * Export information about protocols/devices supported by this driver.
2191 */
2192static struct ieee1394_device_id dv1394_id_table[] = {
2193 {
2194 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2195 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
2196 .version = AVC_SW_VERSION_ENTRY & 0xffffff
2197 },
2198 { }
2199};
2200
2201MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
2202
2203static struct hpsb_protocol_driver dv1394_driver = {
2204 .name = "DV/1394 Driver",
2205 .id_table = dv1394_id_table,
2206 .driver = {
2207 .name = "dv1394",
2208 .bus = &ieee1394_bus_type,
2209 },
2210};
2211
2212
2213/*** IEEE1394 HPSB CALLBACKS ***********************************************/
2214
2215static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes mode)
2216{
2217 struct video_card *video;
2218 unsigned long flags;
2219 int i;
2220
2221 video = kmalloc(sizeof(struct video_card), GFP_KERNEL);
2222 if (!video) {
2223 printk(KERN_ERR "dv1394: cannot allocate video_card\n");
2224 goto err;
2225 }
2226
2227 memset(video, 0, sizeof(struct video_card));
2228
2229 video->ohci = ohci;
2230 /* lower 2 bits of id indicate which of four "plugs"
2231 per host */
2232 video->id = ohci->host->id << 2;
2233 if (format == DV1394_NTSC)
2234 video->id |= mode;
2235 else
2236 video->id |= 2 + mode;
2237
2238 video->ohci_it_ctx = -1;
2239 video->ohci_ir_ctx = -1;
2240
2241 video->ohci_IsoXmitContextControlSet = 0;
2242 video->ohci_IsoXmitContextControlClear = 0;
2243 video->ohci_IsoXmitCommandPtr = 0;
2244
2245 video->ohci_IsoRcvContextControlSet = 0;
2246 video->ohci_IsoRcvContextControlClear = 0;
2247 video->ohci_IsoRcvCommandPtr = 0;
2248 video->ohci_IsoRcvContextMatch = 0;
2249
2250 video->n_frames = 0; /* flag that video is not initialized */
2251 video->channel = 63; /* default to broadcast channel */
2252 video->active_frame = -1;
2253
2254 /* initialize the following */
2255 video->pal_or_ntsc = format;
2256 video->cip_n = 0; /* 0 = use builtin default */
2257 video->cip_d = 0;
2258 video->syt_offset = 0;
2259 video->mode = mode;
2260
2261 for (i = 0; i < DV1394_MAX_FRAMES; i++)
2262 video->frames[i] = NULL;
2263
2264 dma_region_init(&video->dv_buf);
2265 video->dv_buf_size = 0;
2266 dma_region_init(&video->packet_buf);
2267 video->packet_buf_size = 0;
2268
2269 clear_bit(0, &video->open);
2270 spin_lock_init(&video->spinlock);
2271 video->dma_running = 0;
2272 init_MUTEX(&video->sem);
2273 init_waitqueue_head(&video->waitq);
2274 video->fasync = NULL;
2275
2276 spin_lock_irqsave(&dv1394_cards_lock, flags);
2277 INIT_LIST_HEAD(&video->list);
2278 list_add_tail(&video->list, &dv1394_cards);
2279 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2280
2281 if (devfs_mk_cdev(MKDEV(IEEE1394_MAJOR,
2282 IEEE1394_MINOR_BLOCK_DV1394*16 + video->id),
2283 S_IFCHR|S_IRUGO|S_IWUGO,
2284 "ieee1394/dv/host%d/%s/%s",
2285 (video->id>>2),
2286 (video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
2287 (video->mode == MODE_RECEIVE ? "in" : "out")) < 0)
2288 goto err_free;
2289
2290 debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
2291
2292 return 0;
2293
2294 err_free:
2295 kfree(video);
2296 err:
2297 return -1;
2298}
2299
2300static void dv1394_un_init(struct video_card *video)
2301{
2302 char buf[32];
2303
2304 /* obviously nobody has the driver open at this point */
2305 do_dv1394_shutdown(video, 1);
2306 snprintf(buf, sizeof(buf), "dv/host%d/%s/%s", (video->id >> 2),
2307 (video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
2308 (video->mode == MODE_RECEIVE ? "in" : "out")
2309 );
2310
2311 devfs_remove("ieee1394/%s", buf);
2312 kfree(video);
2313}
2314
2315
2316static void dv1394_remove_host (struct hpsb_host *host)
2317{
2318 struct video_card *video;
2319 unsigned long flags;
2320 int id = host->id;
2321
2322 /* We only work with the OHCI-1394 driver */
2323 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
2324 return;
2325
2326 /* find the corresponding video_cards */
2327 do {
2328 struct video_card *tmp_vid;
2329
2330 video = NULL;
2331
2332 spin_lock_irqsave(&dv1394_cards_lock, flags);
2333 list_for_each_entry(tmp_vid, &dv1394_cards, list) {
2334 if ((tmp_vid->id >> 2) == id) {
2335 list_del(&tmp_vid->list);
2336 video = tmp_vid;
2337 break;
2338 }
2339 }
2340 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2341
2342 if (video)
2343 dv1394_un_init(video);
2344 } while (video != NULL);
2345
2346 class_simple_device_remove(MKDEV(
2347 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)));
2348 devfs_remove("ieee1394/dv/host%d/NTSC", id);
2349 devfs_remove("ieee1394/dv/host%d/PAL", id);
2350 devfs_remove("ieee1394/dv/host%d", id);
2351}
2352
2353static void dv1394_add_host (struct hpsb_host *host)
2354{
2355 struct ti_ohci *ohci;
2356 int id = host->id;
2357
2358 /* We only work with the OHCI-1394 driver */
2359 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
2360 return;
2361
2362 ohci = (struct ti_ohci *)host->hostdata;
2363
2364 class_simple_device_add(hpsb_protocol_class, MKDEV(
2365 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)),
2366 NULL, "dv1394-%d", id);
2367 devfs_mk_dir("ieee1394/dv/host%d", id);
2368 devfs_mk_dir("ieee1394/dv/host%d/NTSC", id);
2369 devfs_mk_dir("ieee1394/dv/host%d/PAL", id);
2370
2371 dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
2372 dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
2373 dv1394_init(ohci, DV1394_PAL, MODE_RECEIVE);
2374 dv1394_init(ohci, DV1394_PAL, MODE_TRANSMIT);
2375}
2376
2377
2378/* Bus reset handler. In the event of a bus reset, we may need to
2379 re-start the DMA contexts - otherwise the user program would
2380 end up waiting forever.
2381*/
2382
2383static void dv1394_host_reset(struct hpsb_host *host)
2384{
2385 struct ti_ohci *ohci;
2386 struct video_card *video = NULL, *tmp_vid;
2387 unsigned long flags;
2388
2389 /* We only work with the OHCI-1394 driver */
2390 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
2391 return;
2392
2393 ohci = (struct ti_ohci *)host->hostdata;
2394
2395
2396 /* find the corresponding video_cards */
2397 spin_lock_irqsave(&dv1394_cards_lock, flags);
2398 list_for_each_entry(tmp_vid, &dv1394_cards, list) {
2399 if ((tmp_vid->id >> 2) == host->id) {
2400 video = tmp_vid;
2401 break;
2402 }
2403 }
2404 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2405
2406 if (!video)
2407 return;
2408
2409
2410 spin_lock_irqsave(&video->spinlock, flags);
2411
2412 if (!video->dma_running)
2413 goto out;
2414
2415 /* check IT context */
2416 if (video->ohci_it_ctx != -1) {
2417 u32 ctx;
2418
2419 ctx = reg_read(video->ohci, video->ohci_IsoXmitContextControlSet);
2420
2421 /* if (RUN but not ACTIVE) */
2422 if ( (ctx & (1<<15)) &&
2423 !(ctx & (1<<10)) ) {
2424
2425 debug_printk("dv1394: IT context stopped due to bus reset; waking it up\n");
2426
2427 /* to be safe, assume a frame has been dropped. User-space programs
2428 should handle this condition like an underflow. */
2429 video->dropped_frames++;
2430
2431 /* for some reason you must clear, then re-set the RUN bit to restart DMA */
2432
2433 /* clear RUN */
2434 reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
2435 flush_pci_write(video->ohci);
2436
2437 /* set RUN */
2438 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 15));
2439 flush_pci_write(video->ohci);
2440
2441 /* set the WAKE bit (just in case; this isn't strictly necessary) */
2442 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 12));
2443 flush_pci_write(video->ohci);
2444
2445 irq_printk("dv1394: AFTER IT restart ctx 0x%08x ptr 0x%08x\n",
2446 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
2447 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
2448 }
2449 }
2450
2451 /* check IR context */
2452 if (video->ohci_ir_ctx != -1) {
2453 u32 ctx;
2454
2455 ctx = reg_read(video->ohci, video->ohci_IsoRcvContextControlSet);
2456
2457 /* if (RUN but not ACTIVE) */
2458 if ( (ctx & (1<<15)) &&
2459 !(ctx & (1<<10)) ) {
2460
2461 debug_printk("dv1394: IR context stopped due to bus reset; waking it up\n");
2462
2463 /* to be safe, assume a frame has been dropped. User-space programs
2464 should handle this condition like an overflow. */
2465 video->dropped_frames++;
2466
2467 /* for some reason you must clear, then re-set the RUN bit to restart DMA */
2468 /* XXX this doesn't work for me, I can't get IR DMA to restart :[ */
2469
2470 /* clear RUN */
2471 reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
2472 flush_pci_write(video->ohci);
2473
2474 /* set RUN */
2475 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 15));
2476 flush_pci_write(video->ohci);
2477
2478 /* set the WAKE bit (just in case; this isn't strictly necessary) */
2479 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
2480 flush_pci_write(video->ohci);
2481
2482 irq_printk("dv1394: AFTER IR restart ctx 0x%08x ptr 0x%08x\n",
2483 reg_read(video->ohci, video->ohci_IsoRcvContextControlSet),
2484 reg_read(video->ohci, video->ohci_IsoRcvCommandPtr));
2485 }
2486 }
2487
2488out:
2489 spin_unlock_irqrestore(&video->spinlock, flags);
2490
2491 /* wake readers/writers/ioctl'ers */
2492 wake_up_interruptible(&video->waitq);
2493}
2494
2495static struct hpsb_highlevel dv1394_highlevel = {
2496 .name = "dv1394",
2497 .add_host = dv1394_add_host,
2498 .remove_host = dv1394_remove_host,
2499 .host_reset = dv1394_host_reset,
2500};
2501
2502#ifdef CONFIG_COMPAT
2503
2504#define DV1394_IOC32_INIT _IOW('#', 0x06, struct dv1394_init32)
2505#define DV1394_IOC32_GET_STATUS _IOR('#', 0x0c, struct dv1394_status32)
2506
2507struct dv1394_init32 {
2508 u32 api_version;
2509 u32 channel;
2510 u32 n_frames;
2511 u32 format;
2512 u32 cip_n;
2513 u32 cip_d;
2514 u32 syt_offset;
2515};
2516
2517struct dv1394_status32 {
2518 struct dv1394_init32 init;
2519 s32 active_frame;
2520 u32 first_clear_frame;
2521 u32 n_clear_frames;
2522 u32 dropped_frames;
2523};
2524
2525/* RED-PEN: this should use compat_alloc_userspace instead */
2526
2527static int handle_dv1394_init(struct file *file, unsigned int cmd, unsigned long arg)
2528{
2529 struct dv1394_init32 dv32;
2530 struct dv1394_init dv;
2531 mm_segment_t old_fs;
2532 int ret;
2533
2534 if (file->f_op->unlocked_ioctl != dv1394_ioctl)
2535 return -EFAULT;
2536
2537 if (copy_from_user(&dv32, (void __user *)arg, sizeof(dv32)))
2538 return -EFAULT;
2539
2540 dv.api_version = dv32.api_version;
2541 dv.channel = dv32.channel;
2542 dv.n_frames = dv32.n_frames;
2543 dv.format = dv32.format;
2544 dv.cip_n = (unsigned long)dv32.cip_n;
2545 dv.cip_d = (unsigned long)dv32.cip_d;
2546 dv.syt_offset = dv32.syt_offset;
2547
2548 old_fs = get_fs();
2549 set_fs(KERNEL_DS);
2550 ret = dv1394_ioctl(file, DV1394_IOC_INIT, (unsigned long)&dv);
2551 set_fs(old_fs);
2552
2553 return ret;
2554}
2555
2556static int handle_dv1394_get_status(struct file *file, unsigned int cmd, unsigned long arg)
2557{
2558 struct dv1394_status32 dv32;
2559 struct dv1394_status dv;
2560 mm_segment_t old_fs;
2561 int ret;
2562
2563 if (file->f_op->unlocked_ioctl != dv1394_ioctl)
2564 return -EFAULT;
2565
2566 old_fs = get_fs();
2567 set_fs(KERNEL_DS);
2568 ret = dv1394_ioctl(file, DV1394_IOC_GET_STATUS, (unsigned long)&dv);
2569 set_fs(old_fs);
2570
2571 if (!ret) {
2572 dv32.init.api_version = dv.init.api_version;
2573 dv32.init.channel = dv.init.channel;
2574 dv32.init.n_frames = dv.init.n_frames;
2575 dv32.init.format = dv.init.format;
2576 dv32.init.cip_n = (u32)dv.init.cip_n;
2577 dv32.init.cip_d = (u32)dv.init.cip_d;
2578 dv32.init.syt_offset = dv.init.syt_offset;
2579 dv32.active_frame = dv.active_frame;
2580 dv32.first_clear_frame = dv.first_clear_frame;
2581 dv32.n_clear_frames = dv.n_clear_frames;
2582 dv32.dropped_frames = dv.dropped_frames;
2583
2584 if (copy_to_user((struct dv1394_status32 __user *)arg, &dv32, sizeof(dv32)))
2585 ret = -EFAULT;
2586 }
2587
2588 return ret;
2589}
2590
2591
2592
2593static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
2594 unsigned long arg)
2595{
2596 switch (cmd) {
2597 case DV1394_IOC_SHUTDOWN:
2598 case DV1394_IOC_SUBMIT_FRAMES:
2599 case DV1394_IOC_WAIT_FRAMES:
2600 case DV1394_IOC_RECEIVE_FRAMES:
2601 case DV1394_IOC_START_RECEIVE:
2602 return dv1394_ioctl(file, cmd, arg);
2603
2604 case DV1394_IOC32_INIT:
2605 return handle_dv1394_init(file, cmd, arg);
2606 case DV1394_IOC32_GET_STATUS:
2607 return handle_dv1394_get_status(file, cmd, arg);
2608 default:
2609 return -ENOIOCTLCMD;
2610 }
2611}
2612
2613#endif /* CONFIG_COMPAT */
2614
2615
2616/*** KERNEL MODULE HANDLERS ************************************************/
2617
2618MODULE_AUTHOR("Dan Maas <dmaas@dcine.com>, Dan Dennedy <dan@dennedy.org>");
2619MODULE_DESCRIPTION("driver for DV input/output on OHCI board");
2620MODULE_SUPPORTED_DEVICE("dv1394");
2621MODULE_LICENSE("GPL");
2622
2623static void __exit dv1394_exit_module(void)
2624{
2625 hpsb_unregister_protocol(&dv1394_driver);
2626
2627 hpsb_unregister_highlevel(&dv1394_highlevel);
2628 cdev_del(&dv1394_cdev);
2629 devfs_remove("ieee1394/dv");
2630}
2631
2632static int __init dv1394_init_module(void)
2633{
2634 int ret;
2635
2636 cdev_init(&dv1394_cdev, &dv1394_fops);
2637 dv1394_cdev.owner = THIS_MODULE;
2638 kobject_set_name(&dv1394_cdev.kobj, "dv1394");
2639 ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
2640 if (ret) {
2641 printk(KERN_ERR "dv1394: unable to register character device\n");
2642 return ret;
2643 }
2644
2645 devfs_mk_dir("ieee1394/dv");
2646
2647 hpsb_register_highlevel(&dv1394_highlevel);
2648
2649 ret = hpsb_register_protocol(&dv1394_driver);
2650 if (ret) {
2651 printk(KERN_ERR "dv1394: failed to register protocol\n");
2652 hpsb_unregister_highlevel(&dv1394_highlevel);
2653 devfs_remove("ieee1394/dv");
2654 cdev_del(&dv1394_cdev);
2655 return ret;
2656 }
2657
2658 return 0;
2659}
2660
2661module_init(dv1394_init_module);
2662module_exit(dv1394_exit_module);
2663MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16);
diff --git a/drivers/ieee1394/dv1394.h b/drivers/ieee1394/dv1394.h
new file mode 100644
index 000000000000..5807f5289810
--- /dev/null
+++ b/drivers/ieee1394/dv1394.h
@@ -0,0 +1,305 @@
1/*
2 * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
3 * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
4 * receive by Dan Dennedy <dan@dennedy.org>
5 *
6 * based on:
7 * video1394.h - driver for OHCI 1394 boards
8 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
9 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software Foundation,
23 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#ifndef _DV_1394_H
27#define _DV_1394_H
28
29/* This is the public user-space interface. Try not to break it. */
30
31#define DV1394_API_VERSION 0x20011127
32
33/* ********************
34 ** **
35 ** DV1394 API **
36 ** **
37 ********************
38
39 There are two methods of operating the DV1394 DV output device.
40
41 1)
42
43 The simplest is an interface based on write(): simply write
44 full DV frames of data to the device, and they will be transmitted
45 as quickly as possible. The FD may be set for non-blocking I/O,
46 in which case you can use select() or poll() to wait for output
47 buffer space.
48
49 To set the DV output parameters (e.g. whether you want NTSC or PAL
50 video), use the DV1394_INIT ioctl, passing in the parameters you
51 want in a struct dv1394_init.
52
53 Example 1:
54 To play a raw .DV file: cat foo.DV > /dev/dv1394
55 (cat will use write() internally)
56
57 Example 2:
58 static struct dv1394_init init = {
59 0x63, (broadcast channel)
60 4, (four-frame ringbuffer)
61 DV1394_NTSC, (send NTSC video)
62 0, 0 (default empty packet rate)
63 }
64
65 ioctl(fd, DV1394_INIT, &init);
66
67 while (1) {
68 read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
69 write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
70 }
71
72 2)
73
74 For more control over buffering, and to avoid unnecessary copies
75 of the DV data, you can use the more sophisticated the mmap() interface.
76 First, call the DV1394_INIT ioctl to specify your parameters,
77 including the number of frames in the ringbuffer. Then, calling mmap()
78 on the dv1394 device will give you direct access to the ringbuffer
79 from which the DV card reads your frame data.
80
81 The ringbuffer is simply one large, contiguous region of memory
82 containing two or more frames of packed DV data. Each frame of DV data
83 is 120000 bytes (NTSC) or 144000 bytes (PAL).
84
85 Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
86 ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
87 or select()/poll() to wait until the frames are transmitted. Next, you'll
88 need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
89 frames are clear (ready to be filled with new DV data). Finally, use
90 DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
91
92
93 Example: here is what a four-frame ringbuffer might look like
94 during DV transmission:
95
96
97 frame 0 frame 1 frame 2 frame 3
98
99 *--------------------------------------*
100 | CLEAR | DV data | DV data | CLEAR |
101 *--------------------------------------*
102 <ACTIVE>
103
104 transmission goes in this direction --->>>
105
106
107 The DV hardware is currently transmitting the data in frame 1.
108 Once frame 1 is finished, it will automatically transmit frame 2.
109 (if frame 2 finishes before frame 3 is submitted, the device
110 will continue to transmit frame 2, and will increase the dropped_frames
111 counter each time it repeats the transmission).
112
113
114 If you called DV1394_GET_STATUS at this instant, you would
115 receive the following values:
116
117 n_frames = 4
118 active_frame = 1
119 first_clear_frame = 3
120 n_clear_frames = 2
121
122 At this point, you should write new DV data into frame 3 and optionally
123 frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
124 it may transmit the new frames.
125
126 ERROR HANDLING
127
128 An error (buffer underflow/overflow or a break in the DV stream due
129 to a 1394 bus reset) can be detected by checking the dropped_frames
130 field of struct dv1394_status (obtained through the
131 DV1394_GET_STATUS ioctl).
132
133 The best way to recover from such an error is to re-initialize
134 dv1394, either by using the DV1394_INIT ioctl call, or closing the
135 file descriptor and opening it again. (note that you must unmap all
136 ringbuffer mappings when closing the file descriptor, or else
137 dv1394 will still be considered 'in use').
138
139 MAIN LOOP
140
141 For maximum efficiency and robustness against bus errors, you are
142 advised to model the main loop of your application after the
143 following pseudo-code example:
144
145 (checks of system call return values omitted for brevity; always
146 check return values in your code!)
147
148 while ( frames left ) {
149
150 struct pollfd *pfd = ...;
151
152 pfd->fd = dv1394_fd;
153 pfd->revents = 0;
154 pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
155
156 (add other sources of I/O here)
157
158 poll(pfd, 1, -1); (or select(); add a timeout if you want)
159
160 if (pfd->revents) {
161 struct dv1394_status status;
162
163 ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
164
165 if (status.dropped_frames > 0) {
166 reset_dv1394();
167 } else {
168 for (int i = 0; i < status.n_clear_frames; i++) {
169 copy_DV_frame();
170 }
171 }
172 }
173 }
174
175 where copy_DV_frame() reads or writes on the dv1394 file descriptor
176 (read/write mode) or copies data to/from the mmap ringbuffer and
177 then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
178 frames are availble (mmap mode).
179
180 reset_dv1394() is called in the event of a buffer
181 underflow/overflow or a halt in the DV stream (e.g. due to a 1394
182 bus reset). To guarantee recovery from the error, this function
183 should close the dv1394 file descriptor (and munmap() all
184 ringbuffer mappings, if you are using them), then re-open the
185 dv1394 device (and re-map the ringbuffer).
186
187*/
188
189
190/* maximum number of frames in the ringbuffer */
191#define DV1394_MAX_FRAMES 32
192
193/* number of *full* isochronous packets per DV frame */
194#define DV1394_NTSC_PACKETS_PER_FRAME 250
195#define DV1394_PAL_PACKETS_PER_FRAME 300
196
197/* size of one frame's worth of DV data, in bytes */
198#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
199#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
200
201
202/* ioctl() commands */
203#include "ieee1394-ioctl.h"
204
205
206enum pal_or_ntsc {
207 DV1394_NTSC = 0,
208 DV1394_PAL
209};
210
211
212
213
214/* this is the argument to DV1394_INIT */
215struct dv1394_init {
216 /* DV1394_API_VERSION */
217 unsigned int api_version;
218
219 /* isochronous transmission channel to use */
220 unsigned int channel;
221
222 /* number of frames in the ringbuffer. Must be at least 2
223 and at most DV1394_MAX_FRAMES. */
224 unsigned int n_frames;
225
226 /* send/receive PAL or NTSC video format */
227 enum pal_or_ntsc format;
228
229 /* the following are used only for transmission */
230
231 /* set these to zero unless you want a
232 non-default empty packet rate (see below) */
233 unsigned long cip_n;
234 unsigned long cip_d;
235
236 /* set this to zero unless you want a
237 non-default SYT cycle offset (default = 3 cycles) */
238 unsigned int syt_offset;
239};
240
241/* NOTE: you may only allocate the DV frame ringbuffer once each time
242 you open the dv1394 device. DV1394_INIT will fail if you call it a
243 second time with different 'n_frames' or 'format' arguments (which
244 would imply a different size for the ringbuffer). If you need a
245 different buffer size, simply close and re-open the device, then
246 initialize it with your new settings. */
247
248/* Q: What are cip_n and cip_d? */
249
250/*
251 A: DV video streams do not utilize 100% of the potential bandwidth offered
252 by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
253 DV devices must periodically insert empty packets into the 1394 data stream.
254 Typically there is one empty packet per 14-16 data-carrying packets.
255
256 Some DV devices will accept a wide range of empty packet rates, while others
257 require a precise rate. If the dv1394 driver produces empty packets at
258 a rate that your device does not accept, you may see ugly patterns on the
259 DV output, or even no output at all.
260
261 The default empty packet insertion rate seems to work for many people; if
262 your DV output is stable, you can simply ignore this discussion. However,
263 we have exposed the empty packet rate as a parameter to support devices that
264 do not work with the default rate.
265
266 The decision to insert an empty packet is made with a numerator/denominator
267 algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
268 You can alter the empty packet rate by passing non-zero values for cip_n
269 and cip_d to the INIT ioctl.
270
271 */
272
273
274
275struct dv1394_status {
276 /* this embedded init struct returns the current dv1394
277 parameters in use */
278 struct dv1394_init init;
279
280 /* the ringbuffer frame that is currently being
281 displayed. (-1 if the device is not transmitting anything) */
282 int active_frame;
283
284 /* index of the first buffer (ahead of active_frame) that
285 is ready to be filled with data */
286 unsigned int first_clear_frame;
287
288 /* how many buffers, including first_clear_buffer, are
289 ready to be filled with data */
290 unsigned int n_clear_frames;
291
292 /* how many times the DV stream has underflowed, overflowed,
293 or otherwise encountered an error, since the previous call
294 to DV1394_GET_STATUS */
295 unsigned int dropped_frames;
296
297 /* N.B. The dropped_frames counter is only a lower bound on the actual
298 number of dropped frames, with the special case that if dropped_frames
299 is zero, then it is guaranteed that NO frames have been dropped
300 since the last call to DV1394_GET_STATUS.
301 */
302};
303
304
305#endif /* _DV_1394_H */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
new file mode 100644
index 000000000000..654da76bf811
--- /dev/null
+++ b/drivers/ieee1394/eth1394.c
@@ -0,0 +1,1801 @@
1/*
2 * eth1394.c -- Ethernet driver for Linux IEEE-1394 Subsystem
3 *
4 * Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org>
5 * 2000 Bonin Franck <boninf@free.fr>
6 * 2003 Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 *
8 * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25/* This driver intends to support RFC 2734, which describes a method for
26 * transporting IPv4 datagrams over IEEE-1394 serial busses. This driver
27 * will ultimately support that method, but currently falls short in
28 * several areas.
29 *
30 * TODO:
31 * RFC 2734 related:
32 * - Add MCAP. Limited Multicast exists only to 224.0.0.1 and 224.0.0.2.
33 *
34 * Non-RFC 2734 related:
35 * - Handle fragmented skb's coming from the networking layer.
36 * - Move generic GASP reception to core 1394 code
37 * - Convert kmalloc/kfree for link fragments to use kmem_cache_* instead
38 * - Stability improvements
39 * - Performance enhancements
40 * - Consider garbage collecting old partial datagrams after X amount of time
41 */
42
43
44#include <linux/module.h>
45
46#include <linux/sched.h>
47#include <linux/kernel.h>
48#include <linux/slab.h>
49#include <linux/errno.h>
50#include <linux/types.h>
51#include <linux/delay.h>
52#include <linux/init.h>
53
54#include <linux/netdevice.h>
55#include <linux/inetdevice.h>
56#include <linux/etherdevice.h>
57#include <linux/if_arp.h>
58#include <linux/if_ether.h>
59#include <linux/ip.h>
60#include <linux/in.h>
61#include <linux/tcp.h>
62#include <linux/skbuff.h>
63#include <linux/bitops.h>
64#include <linux/ethtool.h>
65#include <asm/uaccess.h>
66#include <asm/delay.h>
67#include <asm/semaphore.h>
68#include <net/arp.h>
69
70#include "csr1212.h"
71#include "ieee1394_types.h"
72#include "ieee1394_core.h"
73#include "ieee1394_transactions.h"
74#include "ieee1394.h"
75#include "highlevel.h"
76#include "iso.h"
77#include "nodemgr.h"
78#include "eth1394.h"
79#include "config_roms.h"
80
81#define ETH1394_PRINT_G(level, fmt, args...) \
82 printk(level "%s: " fmt, driver_name, ## args)
83
84#define ETH1394_PRINT(level, dev_name, fmt, args...) \
85 printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
86
87#define DEBUG(fmt, args...) \
88 printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
90
91static char version[] __devinitdata =
92 "$Rev: 1247 $ Ben Collins <bcollins@debian.org>";
93
94struct fragment_info {
95 struct list_head list;
96 int offset;
97 int len;
98};
99
100struct partial_datagram {
101 struct list_head list;
102 u16 dgl;
103 u16 dg_size;
104 u16 ether_type;
105 struct sk_buff *skb;
106 char *pbuf;
107 struct list_head frag_info;
108};
109
110struct pdg_list {
111 struct list_head list; /* partial datagram list per node */
112 unsigned int sz; /* partial datagram list size per node */
113 spinlock_t lock; /* partial datagram lock */
114};
115
116struct eth1394_host_info {
117 struct hpsb_host *host;
118 struct net_device *dev;
119};
120
121struct eth1394_node_ref {
122 struct unit_directory *ud;
123 struct list_head list;
124};
125
126struct eth1394_node_info {
127 u16 maxpayload; /* Max payload */
128 u8 sspd; /* Max speed */
129 u64 fifo; /* FIFO address */
130 struct pdg_list pdg; /* partial RX datagram lists */
131 int dgl; /* Outgoing datagram label */
132};
133
134/* Our ieee1394 highlevel driver */
135#define ETH1394_DRIVER_NAME "eth1394"
136static const char driver_name[] = ETH1394_DRIVER_NAME;
137
138static kmem_cache_t *packet_task_cache;
139
140static struct hpsb_highlevel eth1394_highlevel;
141
142/* Use common.lf to determine header len */
143static const int hdr_type_len[] = {
144 sizeof (struct eth1394_uf_hdr),
145 sizeof (struct eth1394_ff_hdr),
146 sizeof (struct eth1394_sf_hdr),
147 sizeof (struct eth1394_sf_hdr)
148};
149
150/* Change this to IEEE1394_SPEED_S100 to make testing easier */
151#define ETH1394_SPEED_DEF IEEE1394_SPEED_MAX
152
153/* For now, this needs to be 1500, so that XP works with us */
154#define ETH1394_DATA_LEN ETH_DATA_LEN
155
156static const u16 eth1394_speedto_maxpayload[] = {
157/* S100, S200, S400, S800, S1600, S3200 */
158 512, 1024, 2048, 4096, 4096, 4096
159};
160
161MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
162MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
163MODULE_LICENSE("GPL");
164
165/* The max_partial_datagrams parameter is the maximum number of fragmented
166 * datagrams per node that eth1394 will keep in memory. Providing an upper
167 * bound allows us to limit the amount of memory that partial datagrams
168 * consume in the event that some partial datagrams are never completed.
169 */
170static int max_partial_datagrams = 25;
171module_param(max_partial_datagrams, int, S_IRUGO | S_IWUSR);
172MODULE_PARM_DESC(max_partial_datagrams,
173 "Maximum number of partially received fragmented datagrams "
174 "(default = 25).");
175
176
177static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
178 unsigned short type, void *daddr, void *saddr,
179 unsigned len);
180static int ether1394_rebuild_header(struct sk_buff *skb);
181static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr);
182static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh);
183static void ether1394_header_cache_update(struct hh_cache *hh,
184 struct net_device *dev,
185 unsigned char * haddr);
186static int ether1394_mac_addr(struct net_device *dev, void *p);
187
188static void purge_partial_datagram(struct list_head *old);
189static int ether1394_tx(struct sk_buff *skb, struct net_device *dev);
190static void ether1394_iso(struct hpsb_iso *iso);
191
192static struct ethtool_ops ethtool_ops;
193
194static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
195 quadlet_t *data, u64 addr, size_t len, u16 flags);
196static void ether1394_add_host (struct hpsb_host *host);
197static void ether1394_remove_host (struct hpsb_host *host);
198static void ether1394_host_reset (struct hpsb_host *host);
199
200/* Function for incoming 1394 packets */
201static struct hpsb_address_ops addr_ops = {
202 .write = ether1394_write,
203};
204
205/* Ieee1394 highlevel driver functions */
206static struct hpsb_highlevel eth1394_highlevel = {
207 .name = driver_name,
208 .add_host = ether1394_add_host,
209 .remove_host = ether1394_remove_host,
210 .host_reset = ether1394_host_reset,
211};
212
213
214/* This is called after an "ifup" */
215static int ether1394_open (struct net_device *dev)
216{
217 struct eth1394_priv *priv = netdev_priv(dev);
218 int ret = 0;
219
220 /* Something bad happened, don't even try */
221 if (priv->bc_state == ETHER1394_BC_ERROR) {
222 /* we'll try again */
223 priv->iso = hpsb_iso_recv_init(priv->host,
224 ETHER1394_GASP_BUFFERS * 2 *
225 (1 << (priv->host->csr.max_rec +
226 1)),
227 ETHER1394_GASP_BUFFERS,
228 priv->broadcast_channel,
229 HPSB_ISO_DMA_PACKET_PER_BUFFER,
230 1, ether1394_iso);
231 if (priv->iso == NULL) {
232 ETH1394_PRINT(KERN_ERR, dev->name,
233 "Could not allocate isochronous receive "
234 "context for the broadcast channel\n");
235 priv->bc_state = ETHER1394_BC_ERROR;
236 ret = -EAGAIN;
237 } else {
238 if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
239 priv->bc_state = ETHER1394_BC_STOPPED;
240 else
241 priv->bc_state = ETHER1394_BC_RUNNING;
242 }
243 }
244
245 if (ret)
246 return ret;
247
248 netif_start_queue (dev);
249 return 0;
250}
251
252/* This is called after an "ifdown" */
253static int ether1394_stop (struct net_device *dev)
254{
255 netif_stop_queue (dev);
256 return 0;
257}
258
259/* Return statistics to the caller */
260static struct net_device_stats *ether1394_stats (struct net_device *dev)
261{
262 return &(((struct eth1394_priv *)netdev_priv(dev))->stats);
263}
264
265/* What to do if we timeout. I think a host reset is probably in order, so
266 * that's what we do. Should we increment the stat counters too? */
267static void ether1394_tx_timeout (struct net_device *dev)
268{
269 ETH1394_PRINT (KERN_ERR, dev->name, "Timeout, resetting host %s\n",
270 ((struct eth1394_priv *)netdev_priv(dev))->host->driver->name);
271
272 highlevel_host_reset (((struct eth1394_priv *)netdev_priv(dev))->host);
273
274 netif_wake_queue (dev);
275}
276
277static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
278{
279 struct eth1394_priv *priv = netdev_priv(dev);
280
281 if ((new_mtu < 68) ||
282 (new_mtu > min(ETH1394_DATA_LEN,
283 (int)((1 << (priv->host->csr.max_rec + 1)) -
284 (sizeof(union eth1394_hdr) +
285 ETHER1394_GASP_OVERHEAD)))))
286 return -EINVAL;
287 dev->mtu = new_mtu;
288 return 0;
289}
290
291static void purge_partial_datagram(struct list_head *old)
292{
293 struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
294 struct list_head *lh, *n;
295
296 list_for_each_safe(lh, n, &pd->frag_info) {
297 struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
298 list_del(lh);
299 kfree(fi);
300 }
301 list_del(old);
302 kfree_skb(pd->skb);
303 kfree(pd);
304}
305
306/******************************************
307 * 1394 bus activity functions
308 ******************************************/
309
310static struct eth1394_node_ref *eth1394_find_node(struct list_head *inl,
311 struct unit_directory *ud)
312{
313 struct eth1394_node_ref *node;
314
315 list_for_each_entry(node, inl, list)
316 if (node->ud == ud)
317 return node;
318
319 return NULL;
320}
321
322static struct eth1394_node_ref *eth1394_find_node_guid(struct list_head *inl,
323 u64 guid)
324{
325 struct eth1394_node_ref *node;
326
327 list_for_each_entry(node, inl, list)
328 if (node->ud->ne->guid == guid)
329 return node;
330
331 return NULL;
332}
333
334static struct eth1394_node_ref *eth1394_find_node_nodeid(struct list_head *inl,
335 nodeid_t nodeid)
336{
337 struct eth1394_node_ref *node;
338 list_for_each_entry(node, inl, list) {
339 if (node->ud->ne->nodeid == nodeid)
340 return node;
341 }
342
343 return NULL;
344}
345
346static int eth1394_probe(struct device *dev)
347{
348 struct unit_directory *ud;
349 struct eth1394_host_info *hi;
350 struct eth1394_priv *priv;
351 struct eth1394_node_ref *new_node;
352 struct eth1394_node_info *node_info;
353
354 ud = container_of(dev, struct unit_directory, device);
355
356 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
357 if (!hi)
358 return -ENOENT;
359
360 new_node = kmalloc(sizeof(struct eth1394_node_ref),
361 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
362 if (!new_node)
363 return -ENOMEM;
364
365 node_info = kmalloc(sizeof(struct eth1394_node_info),
366 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
367 if (!node_info) {
368 kfree(new_node);
369 return -ENOMEM;
370 }
371
372 spin_lock_init(&node_info->pdg.lock);
373 INIT_LIST_HEAD(&node_info->pdg.list);
374 node_info->pdg.sz = 0;
375 node_info->fifo = ETHER1394_INVALID_ADDR;
376
377 ud->device.driver_data = node_info;
378 new_node->ud = ud;
379
380 priv = netdev_priv(hi->dev);
381 list_add_tail(&new_node->list, &priv->ip_node_list);
382
383 return 0;
384}
385
386static int eth1394_remove(struct device *dev)
387{
388 struct unit_directory *ud;
389 struct eth1394_host_info *hi;
390 struct eth1394_priv *priv;
391 struct eth1394_node_ref *old_node;
392 struct eth1394_node_info *node_info;
393 struct list_head *lh, *n;
394 unsigned long flags;
395
396 ud = container_of(dev, struct unit_directory, device);
397 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
398 if (!hi)
399 return -ENOENT;
400
401 priv = netdev_priv(hi->dev);
402
403 old_node = eth1394_find_node(&priv->ip_node_list, ud);
404
405 if (old_node) {
406 list_del(&old_node->list);
407 kfree(old_node);
408
409 node_info = (struct eth1394_node_info*)ud->device.driver_data;
410
411 spin_lock_irqsave(&node_info->pdg.lock, flags);
412 /* The partial datagram list should be empty, but we'll just
413 * make sure anyway... */
414 list_for_each_safe(lh, n, &node_info->pdg.list) {
415 purge_partial_datagram(lh);
416 }
417 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
418
419 kfree(node_info);
420 ud->device.driver_data = NULL;
421 }
422 return 0;
423}
424
425static int eth1394_update(struct unit_directory *ud)
426{
427 struct eth1394_host_info *hi;
428 struct eth1394_priv *priv;
429 struct eth1394_node_ref *node;
430 struct eth1394_node_info *node_info;
431
432 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
433 if (!hi)
434 return -ENOENT;
435
436 priv = netdev_priv(hi->dev);
437
438 node = eth1394_find_node(&priv->ip_node_list, ud);
439
440 if (!node) {
441 node = kmalloc(sizeof(struct eth1394_node_ref),
442 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
443 if (!node)
444 return -ENOMEM;
445
446 node_info = kmalloc(sizeof(struct eth1394_node_info),
447 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
448 if (!node_info) {
449 kfree(node);
450 return -ENOMEM;
451 }
452
453 spin_lock_init(&node_info->pdg.lock);
454 INIT_LIST_HEAD(&node_info->pdg.list);
455 node_info->pdg.sz = 0;
456
457 ud->device.driver_data = node_info;
458 node->ud = ud;
459
460 priv = netdev_priv(hi->dev);
461 list_add_tail(&node->list, &priv->ip_node_list);
462 }
463
464 return 0;
465}
466
467
468static struct ieee1394_device_id eth1394_id_table[] = {
469 {
470 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
471 IEEE1394_MATCH_VERSION),
472 .specifier_id = ETHER1394_GASP_SPECIFIER_ID,
473 .version = ETHER1394_GASP_VERSION,
474 },
475 {}
476};
477
478MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
479
480static struct hpsb_protocol_driver eth1394_proto_driver = {
481 .name = "IPv4 over 1394 Driver",
482 .id_table = eth1394_id_table,
483 .update = eth1394_update,
484 .driver = {
485 .name = ETH1394_DRIVER_NAME,
486 .bus = &ieee1394_bus_type,
487 .probe = eth1394_probe,
488 .remove = eth1394_remove,
489 },
490};
491
492
493static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
494{
495 unsigned long flags;
496 int i;
497 struct eth1394_priv *priv = netdev_priv(dev);
498 struct hpsb_host *host = priv->host;
499 u64 guid = *((u64*)&(host->csr.rom->bus_info_data[3]));
500 u16 maxpayload = 1 << (host->csr.max_rec + 1);
501 int max_speed = IEEE1394_SPEED_MAX;
502
503 spin_lock_irqsave (&priv->lock, flags);
504
505 memset(priv->ud_list, 0, sizeof(struct node_entry*) * ALL_NODES);
506 priv->bc_maxpayload = 512;
507
508 /* Determine speed limit */
509 for (i = 0; i < host->node_count; i++)
510 if (max_speed > host->speed_map[NODEID_TO_NODE(host->node_id) *
511 64 + i])
512 max_speed = host->speed_map[NODEID_TO_NODE(host->node_id) *
513 64 + i];
514 priv->bc_sspd = max_speed;
515
516 /* We'll use our maxpayload as the default mtu */
517 if (set_mtu) {
518 dev->mtu = min(ETH1394_DATA_LEN,
519 (int)(maxpayload -
520 (sizeof(union eth1394_hdr) +
521 ETHER1394_GASP_OVERHEAD)));
522
523 /* Set our hardware address while we're at it */
524 *(u64*)dev->dev_addr = guid;
525 *(u64*)dev->broadcast = ~0x0ULL;
526 }
527
528 spin_unlock_irqrestore (&priv->lock, flags);
529}
530
531/* This function is called right before register_netdev */
532static void ether1394_init_dev (struct net_device *dev)
533{
534 /* Our functions */
535 dev->open = ether1394_open;
536 dev->stop = ether1394_stop;
537 dev->hard_start_xmit = ether1394_tx;
538 dev->get_stats = ether1394_stats;
539 dev->tx_timeout = ether1394_tx_timeout;
540 dev->change_mtu = ether1394_change_mtu;
541
542 dev->hard_header = ether1394_header;
543 dev->rebuild_header = ether1394_rebuild_header;
544 dev->hard_header_cache = ether1394_header_cache;
545 dev->header_cache_update= ether1394_header_cache_update;
546 dev->hard_header_parse = ether1394_header_parse;
547 dev->set_mac_address = ether1394_mac_addr;
548 SET_ETHTOOL_OPS(dev, &ethtool_ops);
549
550 /* Some constants */
551 dev->watchdog_timeo = ETHER1394_TIMEOUT;
552 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
553 dev->features = NETIF_F_HIGHDMA;
554 dev->addr_len = ETH1394_ALEN;
555 dev->hard_header_len = ETH1394_HLEN;
556 dev->type = ARPHRD_IEEE1394;
557
558 ether1394_reset_priv (dev, 1);
559}
560
561/*
562 * This function is called every time a card is found. It is generally called
563 * when the module is installed. This is where we add all of our ethernet
564 * devices. One for each host.
565 */
566static void ether1394_add_host (struct hpsb_host *host)
567{
568 struct eth1394_host_info *hi = NULL;
569 struct net_device *dev = NULL;
570 struct eth1394_priv *priv;
571 static int version_printed = 0;
572 u64 fifo_addr;
573
574 if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394))
575 return;
576
577 fifo_addr = hpsb_allocate_and_register_addrspace(&eth1394_highlevel,
578 host,
579 &addr_ops,
580 ETHER1394_REGION_ADDR_LEN,
581 ETHER1394_REGION_ADDR_LEN,
582 -1, -1);
583 if (fifo_addr == ~0ULL)
584 goto out;
585
586 if (version_printed++ == 0)
587 ETH1394_PRINT_G (KERN_INFO, "%s\n", version);
588
589 /* We should really have our own alloc_hpsbdev() function in
590 * net_init.c instead of calling the one for ethernet then hijacking
591 * it for ourselves. That way we'd be a real networking device. */
592 dev = alloc_etherdev(sizeof (struct eth1394_priv));
593
594 if (dev == NULL) {
595 ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to allocate "
596 "etherdevice for IEEE 1394 device %s-%d\n",
597 host->driver->name, host->id);
598 goto out;
599 }
600
601 SET_MODULE_OWNER(dev);
602 SET_NETDEV_DEV(dev, &host->device);
603
604 priv = netdev_priv(dev);
605
606 INIT_LIST_HEAD(&priv->ip_node_list);
607
608 spin_lock_init(&priv->lock);
609 priv->host = host;
610 priv->local_fifo = fifo_addr;
611
612 hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
613
614 if (hi == NULL) {
615 ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to create "
616 "hostinfo for IEEE 1394 device %s-%d\n",
617 host->driver->name, host->id);
618 goto out;
619 }
620
621 ether1394_init_dev(dev);
622
623 if (register_netdev (dev)) {
624 ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n");
625 goto out;
626 }
627
628 ETH1394_PRINT (KERN_INFO, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet (fw-host%d)\n",
629 host->id);
630
631 hi->host = host;
632 hi->dev = dev;
633
634 /* Ignore validity in hopes that it will be set in the future. It'll
635 * be checked when the eth device is opened. */
636 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
637
638 priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 *
639 (1 << (host->csr.max_rec + 1))),
640 ETHER1394_GASP_BUFFERS,
641 priv->broadcast_channel,
642 HPSB_ISO_DMA_PACKET_PER_BUFFER,
643 1, ether1394_iso);
644 if (priv->iso == NULL) {
645 ETH1394_PRINT(KERN_ERR, dev->name,
646 "Could not allocate isochronous receive context "
647 "for the broadcast channel\n");
648 priv->bc_state = ETHER1394_BC_ERROR;
649 } else {
650 if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
651 priv->bc_state = ETHER1394_BC_STOPPED;
652 else
653 priv->bc_state = ETHER1394_BC_RUNNING;
654 }
655
656 return;
657
658out:
659 if (dev != NULL)
660 free_netdev(dev);
661 if (hi)
662 hpsb_destroy_hostinfo(&eth1394_highlevel, host);
663
664 return;
665}
666
667/* Remove a card from our list */
668static void ether1394_remove_host (struct hpsb_host *host)
669{
670 struct eth1394_host_info *hi;
671
672 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
673 if (hi != NULL) {
674 struct eth1394_priv *priv = netdev_priv(hi->dev);
675
676 hpsb_unregister_addrspace(&eth1394_highlevel, host,
677 priv->local_fifo);
678
679 if (priv->iso != NULL)
680 hpsb_iso_shutdown(priv->iso);
681
682 if (hi->dev) {
683 unregister_netdev (hi->dev);
684 free_netdev(hi->dev);
685 }
686 }
687
688 return;
689}
690
691/* A reset has just arisen */
692static void ether1394_host_reset (struct hpsb_host *host)
693{
694 struct eth1394_host_info *hi;
695 struct eth1394_priv *priv;
696 struct net_device *dev;
697 struct list_head *lh, *n;
698 struct eth1394_node_ref *node;
699 struct eth1394_node_info *node_info;
700 unsigned long flags;
701
702 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
703
704 /* This can happen for hosts that we don't use */
705 if (hi == NULL)
706 return;
707
708 dev = hi->dev;
709 priv = netdev_priv(dev);
710
711 /* Reset our private host data, but not our mtu */
712 netif_stop_queue (dev);
713 ether1394_reset_priv (dev, 0);
714
715 list_for_each_entry(node, &priv->ip_node_list, list) {
716 node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
717
718 spin_lock_irqsave(&node_info->pdg.lock, flags);
719
720 list_for_each_safe(lh, n, &node_info->pdg.list) {
721 purge_partial_datagram(lh);
722 }
723
724 INIT_LIST_HEAD(&(node_info->pdg.list));
725 node_info->pdg.sz = 0;
726
727 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
728 }
729
730 netif_wake_queue (dev);
731}
732
733/******************************************
734 * HW Header net device functions
735 ******************************************/
736/* These functions have been adapted from net/ethernet/eth.c */
737
738
739/* Create a fake MAC header for an arbitrary protocol layer.
740 * saddr=NULL means use device source address
741 * daddr=NULL means leave destination address (eg unresolved arp). */
742static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
743 unsigned short type, void *daddr, void *saddr,
744 unsigned len)
745{
746 struct eth1394hdr *eth = (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN);
747
748 eth->h_proto = htons(type);
749
750 if (dev->flags & (IFF_LOOPBACK|IFF_NOARP)) {
751 memset(eth->h_dest, 0, dev->addr_len);
752 return(dev->hard_header_len);
753 }
754
755 if (daddr) {
756 memcpy(eth->h_dest,daddr,dev->addr_len);
757 return dev->hard_header_len;
758 }
759
760 return -dev->hard_header_len;
761
762}
763
764
765/* Rebuild the faked MAC header. This is called after an ARP
766 * (or in future other address resolution) has completed on this
767 * sk_buff. We now let ARP fill in the other fields.
768 *
769 * This routine CANNOT use cached dst->neigh!
770 * Really, it is used only when dst->neigh is wrong.
771 */
772static int ether1394_rebuild_header(struct sk_buff *skb)
773{
774 struct eth1394hdr *eth = (struct eth1394hdr *)skb->data;
775 struct net_device *dev = skb->dev;
776
777 switch (eth->h_proto) {
778
779#ifdef CONFIG_INET
780 case __constant_htons(ETH_P_IP):
781 return arp_find((unsigned char*)&eth->h_dest, skb);
782#endif
783 default:
784 ETH1394_PRINT(KERN_DEBUG, dev->name,
785 "unable to resolve type %04x addresses.\n",
786 eth->h_proto);
787 break;
788 }
789
790 return 0;
791}
792
793static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr)
794{
795 struct net_device *dev = skb->dev;
796 memcpy(haddr, dev->dev_addr, ETH1394_ALEN);
797 return ETH1394_ALEN;
798}
799
800
801static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh)
802{
803 unsigned short type = hh->hh_type;
804 struct eth1394hdr *eth = (struct eth1394hdr*)(((u8*)hh->hh_data) +
805 (16 - ETH1394_HLEN));
806 struct net_device *dev = neigh->dev;
807
808 if (type == __constant_htons(ETH_P_802_3)) {
809 return -1;
810 }
811
812 eth->h_proto = type;
813 memcpy(eth->h_dest, neigh->ha, dev->addr_len);
814
815 hh->hh_len = ETH1394_HLEN;
816 return 0;
817}
818
819/* Called by Address Resolution module to notify changes in address. */
820static void ether1394_header_cache_update(struct hh_cache *hh,
821 struct net_device *dev,
822 unsigned char * haddr)
823{
824 memcpy(((u8*)hh->hh_data) + (16 - ETH1394_HLEN), haddr, dev->addr_len);
825}
826
827static int ether1394_mac_addr(struct net_device *dev, void *p)
828{
829 if (netif_running(dev))
830 return -EBUSY;
831
832 /* Not going to allow setting the MAC address, we really need to use
833 * the real one supplied by the hardware */
834 return -EINVAL;
835 }
836
837
838
839/******************************************
840 * Datagram reception code
841 ******************************************/
842
843/* Copied from net/ethernet/eth.c */
844static inline u16 ether1394_type_trans(struct sk_buff *skb,
845 struct net_device *dev)
846{
847 struct eth1394hdr *eth;
848 unsigned char *rawp;
849
850 skb->mac.raw = skb->data;
851 skb_pull (skb, ETH1394_HLEN);
852 eth = eth1394_hdr(skb);
853
854 if (*eth->h_dest & 1) {
855 if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0)
856 skb->pkt_type = PACKET_BROADCAST;
857#if 0
858 else
859 skb->pkt_type = PACKET_MULTICAST;
860#endif
861 } else {
862 if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
863 skb->pkt_type = PACKET_OTHERHOST;
864 }
865
866 if (ntohs (eth->h_proto) >= 1536)
867 return eth->h_proto;
868
869 rawp = skb->data;
870
871 if (*(unsigned short *)rawp == 0xFFFF)
872 return htons (ETH_P_802_3);
873
874 return htons (ETH_P_802_2);
875}
876
877/* Parse an encapsulated IP1394 header into an ethernet frame packet.
878 * We also perform ARP translation here, if need be. */
879static inline u16 ether1394_parse_encap(struct sk_buff *skb,
880 struct net_device *dev,
881 nodeid_t srcid, nodeid_t destid,
882 u16 ether_type)
883{
884 struct eth1394_priv *priv = netdev_priv(dev);
885 u64 dest_hw;
886 unsigned short ret = 0;
887
888 /* Setup our hw addresses. We use these to build the
889 * ethernet header. */
890 if (destid == (LOCAL_BUS | ALL_NODES))
891 dest_hw = ~0ULL; /* broadcast */
892 else
893 dest_hw = cpu_to_be64((((u64)priv->host->csr.guid_hi) << 32) |
894 priv->host->csr.guid_lo);
895
896 /* If this is an ARP packet, convert it. First, we want to make
897 * use of some of the fields, since they tell us a little bit
898 * about the sending machine. */
899 if (ether_type == __constant_htons (ETH_P_ARP)) {
900 struct eth1394_arp *arp1394 = (struct eth1394_arp*)skb->data;
901 struct arphdr *arp = (struct arphdr *)skb->data;
902 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
903 u64 fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 |
904 ntohl(arp1394->fifo_lo);
905 u8 max_rec = min(priv->host->csr.max_rec,
906 (u8)(arp1394->max_rec));
907 int sspd = arp1394->sspd;
908 u16 maxpayload;
909 struct eth1394_node_ref *node;
910 struct eth1394_node_info *node_info;
911
912 /* Sanity check. MacOSX seems to be sending us 131 in this
913 * field (atleast on my Panther G5). Not sure why. */
914 if (sspd > 5 || sspd < 0)
915 sspd = 0;
916
917 maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1)));
918
919 node = eth1394_find_node_guid(&priv->ip_node_list,
920 be64_to_cpu(arp1394->s_uniq_id));
921 if (!node) {
922 return 0;
923 }
924
925 node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
926
927 /* Update our speed/payload/fifo_offset table */
928 node_info->maxpayload = maxpayload;
929 node_info->sspd = sspd;
930 node_info->fifo = fifo_addr;
931
932 /* Now that we're done with the 1394 specific stuff, we'll
933 * need to alter some of the data. Believe it or not, all
934 * that needs to be done is sender_IP_address needs to be
935 * moved, the destination hardware address get stuffed
936 * in and the hardware address length set to 8.
937 *
938 * IMPORTANT: The code below overwrites 1394 specific data
939 * needed above so keep the munging of the data for the
940 * higher level IP stack last. */
941
942 arp->ar_hln = 8;
943 arp_ptr += arp->ar_hln; /* skip over sender unique id */
944 *(u32*)arp_ptr = arp1394->sip; /* move sender IP addr */
945 arp_ptr += arp->ar_pln; /* skip over sender IP addr */
946
947 if (arp->ar_op == 1)
948 /* just set ARP req target unique ID to 0 */
949 *((u64*)arp_ptr) = 0;
950 else
951 *((u64*)arp_ptr) = *((u64*)dev->dev_addr);
952 }
953
954 /* Now add the ethernet header. */
955 if (dev->hard_header (skb, dev, __constant_ntohs (ether_type),
956 &dest_hw, NULL, skb->len) >= 0)
957 ret = ether1394_type_trans(skb, dev);
958
959 return ret;
960}
961
962static inline int fragment_overlap(struct list_head *frag_list, int offset, int len)
963{
964 struct fragment_info *fi;
965
966 list_for_each_entry(fi, frag_list, list) {
967 if ( ! ((offset > (fi->offset + fi->len - 1)) ||
968 ((offset + len - 1) < fi->offset)))
969 return 1;
970 }
971 return 0;
972}
973
974static inline struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
975{
976 struct partial_datagram *pd;
977
978 list_for_each_entry(pd, pdgl, list) {
979 if (pd->dgl == dgl)
980 return &pd->list;
981 }
982 return NULL;
983}
984
985/* Assumes that new fragment does not overlap any existing fragments */
986static inline int new_fragment(struct list_head *frag_info, int offset, int len)
987{
988 struct list_head *lh;
989 struct fragment_info *fi, *fi2, *new;
990
991 list_for_each(lh, frag_info) {
992 fi = list_entry(lh, struct fragment_info, list);
993 if ((fi->offset + fi->len) == offset) {
994 /* The new fragment can be tacked on to the end */
995 fi->len += len;
996 /* Did the new fragment plug a hole? */
997 fi2 = list_entry(lh->next, struct fragment_info, list);
998 if ((fi->offset + fi->len) == fi2->offset) {
999 /* glue fragments together */
1000 fi->len += fi2->len;
1001 list_del(lh->next);
1002 kfree(fi2);
1003 }
1004 return 0;
1005 } else if ((offset + len) == fi->offset) {
1006 /* The new fragment can be tacked on to the beginning */
1007 fi->offset = offset;
1008 fi->len += len;
1009 /* Did the new fragment plug a hole? */
1010 fi2 = list_entry(lh->prev, struct fragment_info, list);
1011 if ((fi2->offset + fi2->len) == fi->offset) {
1012 /* glue fragments together */
1013 fi2->len += fi->len;
1014 list_del(lh);
1015 kfree(fi);
1016 }
1017 return 0;
1018 } else if (offset > (fi->offset + fi->len)) {
1019 break;
1020 } else if ((offset + len) < fi->offset) {
1021 lh = lh->prev;
1022 break;
1023 }
1024 }
1025
1026 new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC);
1027 if (!new)
1028 return -ENOMEM;
1029
1030 new->offset = offset;
1031 new->len = len;
1032
1033 list_add(&new->list, lh);
1034
1035 return 0;
1036}
1037
1038static inline int new_partial_datagram(struct net_device *dev,
1039 struct list_head *pdgl, int dgl,
1040 int dg_size, char *frag_buf,
1041 int frag_off, int frag_len)
1042{
1043 struct partial_datagram *new;
1044
1045 new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC);
1046 if (!new)
1047 return -ENOMEM;
1048
1049 INIT_LIST_HEAD(&new->frag_info);
1050
1051 if (new_fragment(&new->frag_info, frag_off, frag_len) < 0) {
1052 kfree(new);
1053 return -ENOMEM;
1054 }
1055
1056 new->dgl = dgl;
1057 new->dg_size = dg_size;
1058
1059 new->skb = dev_alloc_skb(dg_size + dev->hard_header_len + 15);
1060 if (!new->skb) {
1061 struct fragment_info *fi = list_entry(new->frag_info.next,
1062 struct fragment_info,
1063 list);
1064 kfree(fi);
1065 kfree(new);
1066 return -ENOMEM;
1067 }
1068
1069 skb_reserve(new->skb, (dev->hard_header_len + 15) & ~15);
1070 new->pbuf = skb_put(new->skb, dg_size);
1071 memcpy(new->pbuf + frag_off, frag_buf, frag_len);
1072
1073 list_add(&new->list, pdgl);
1074
1075 return 0;
1076}
1077
1078static inline int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
1079 char *frag_buf, int frag_off, int frag_len)
1080{
1081 struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
1082
1083 if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0) {
1084 return -ENOMEM;
1085 }
1086
1087 memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
1088
1089 /* Move list entry to beginnig of list so that oldest partial
1090 * datagrams percolate to the end of the list */
1091 list_del(lh);
1092 list_add(lh, pdgl);
1093
1094 return 0;
1095}
1096
1097static inline int is_datagram_complete(struct list_head *lh, int dg_size)
1098{
1099 struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
1100 struct fragment_info *fi = list_entry(pd->frag_info.next,
1101 struct fragment_info, list);
1102
1103 return (fi->len == dg_size);
1104}
1105
1106/* Packet reception. We convert the IP1394 encapsulation header to an
1107 * ethernet header, and fill it with some of our other fields. This is
1108 * an incoming packet from the 1394 bus. */
1109static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1110 char *buf, int len)
1111{
1112 struct sk_buff *skb;
1113 unsigned long flags;
1114 struct eth1394_priv *priv = netdev_priv(dev);
1115 union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
1116 u16 ether_type = 0; /* initialized to clear warning */
1117 int hdr_len;
1118 struct unit_directory *ud = priv->ud_list[NODEID_TO_NODE(srcid)];
1119 struct eth1394_node_info *node_info;
1120
1121 if (!ud) {
1122 struct eth1394_node_ref *node;
1123 node = eth1394_find_node_nodeid(&priv->ip_node_list, srcid);
1124 if (!node) {
1125 HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
1126 "lookup failure: " NODE_BUS_FMT,
1127 NODE_BUS_ARGS(priv->host, srcid));
1128 priv->stats.rx_dropped++;
1129 return -1;
1130 }
1131 ud = node->ud;
1132
1133 priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
1134 }
1135
1136 node_info = (struct eth1394_node_info*)ud->device.driver_data;
1137
1138 /* First, did we receive a fragmented or unfragmented datagram? */
1139 hdr->words.word1 = ntohs(hdr->words.word1);
1140
1141 hdr_len = hdr_type_len[hdr->common.lf];
1142
1143 if (hdr->common.lf == ETH1394_HDR_LF_UF) {
1144 /* An unfragmented datagram has been received by the ieee1394
1145 * bus. Build an skbuff around it so we can pass it to the
1146 * high level network layer. */
1147
1148 skb = dev_alloc_skb(len + dev->hard_header_len + 15);
1149 if (!skb) {
1150 HPSB_PRINT (KERN_ERR, "ether1394 rx: low on mem\n");
1151 priv->stats.rx_dropped++;
1152 return -1;
1153 }
1154 skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
1155 memcpy(skb_put(skb, len - hdr_len), buf + hdr_len, len - hdr_len);
1156 ether_type = hdr->uf.ether_type;
1157 } else {
1158 /* A datagram fragment has been received, now the fun begins. */
1159
1160 struct list_head *pdgl, *lh;
1161 struct partial_datagram *pd;
1162 int fg_off;
1163 int fg_len = len - hdr_len;
1164 int dg_size;
1165 int dgl;
1166 int retval;
1167 struct pdg_list *pdg = &(node_info->pdg);
1168
1169 hdr->words.word3 = ntohs(hdr->words.word3);
1170 /* The 4th header word is reserved so no need to do ntohs() */
1171
1172 if (hdr->common.lf == ETH1394_HDR_LF_FF) {
1173 ether_type = hdr->ff.ether_type;
1174 dgl = hdr->ff.dgl;
1175 dg_size = hdr->ff.dg_size + 1;
1176 fg_off = 0;
1177 } else {
1178 hdr->words.word2 = ntohs(hdr->words.word2);
1179 dgl = hdr->sf.dgl;
1180 dg_size = hdr->sf.dg_size + 1;
1181 fg_off = hdr->sf.fg_off;
1182 }
1183 spin_lock_irqsave(&pdg->lock, flags);
1184
1185 pdgl = &(pdg->list);
1186 lh = find_partial_datagram(pdgl, dgl);
1187
1188 if (lh == NULL) {
1189 while (pdg->sz >= max_partial_datagrams) {
1190 /* remove the oldest */
1191 purge_partial_datagram(pdgl->prev);
1192 pdg->sz--;
1193 }
1194
1195 retval = new_partial_datagram(dev, pdgl, dgl, dg_size,
1196 buf + hdr_len, fg_off,
1197 fg_len);
1198 if (retval < 0) {
1199 spin_unlock_irqrestore(&pdg->lock, flags);
1200 goto bad_proto;
1201 }
1202 pdg->sz++;
1203 lh = find_partial_datagram(pdgl, dgl);
1204 } else {
1205 struct partial_datagram *pd;
1206
1207 pd = list_entry(lh, struct partial_datagram, list);
1208
1209 if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
1210 /* Overlapping fragments, obliterate old
1211 * datagram and start new one. */
1212 purge_partial_datagram(lh);
1213 retval = new_partial_datagram(dev, pdgl, dgl,
1214 dg_size,
1215 buf + hdr_len,
1216 fg_off, fg_len);
1217 if (retval < 0) {
1218 pdg->sz--;
1219 spin_unlock_irqrestore(&pdg->lock, flags);
1220 goto bad_proto;
1221 }
1222 } else {
1223 retval = update_partial_datagram(pdgl, lh,
1224 buf + hdr_len,
1225 fg_off, fg_len);
1226 if (retval < 0) {
1227 /* Couldn't save off fragment anyway
1228 * so might as well obliterate the
1229 * datagram now. */
1230 purge_partial_datagram(lh);
1231 pdg->sz--;
1232 spin_unlock_irqrestore(&pdg->lock, flags);
1233 goto bad_proto;
1234 }
1235 } /* fragment overlap */
1236 } /* new datagram or add to existing one */
1237
1238 pd = list_entry(lh, struct partial_datagram, list);
1239
1240 if (hdr->common.lf == ETH1394_HDR_LF_FF) {
1241 pd->ether_type = ether_type;
1242 }
1243
1244 if (is_datagram_complete(lh, dg_size)) {
1245 ether_type = pd->ether_type;
1246 pdg->sz--;
1247 skb = skb_get(pd->skb);
1248 purge_partial_datagram(lh);
1249 spin_unlock_irqrestore(&pdg->lock, flags);
1250 } else {
1251 /* Datagram is not complete, we're done for the
1252 * moment. */
1253 spin_unlock_irqrestore(&pdg->lock, flags);
1254 return 0;
1255 }
1256 } /* unframgented datagram or fragmented one */
1257
1258 /* Write metadata, and then pass to the receive level */
1259 skb->dev = dev;
1260 skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
1261
1262 /* Parse the encapsulation header. This actually does the job of
1263 * converting to an ethernet frame header, aswell as arp
1264 * conversion if needed. ARP conversion is easier in this
1265 * direction, since we are using ethernet as our backend. */
1266 skb->protocol = ether1394_parse_encap(skb, dev, srcid, destid,
1267 ether_type);
1268
1269
1270 spin_lock_irqsave(&priv->lock, flags);
1271 if (!skb->protocol) {
1272 priv->stats.rx_errors++;
1273 priv->stats.rx_dropped++;
1274 dev_kfree_skb_any(skb);
1275 goto bad_proto;
1276 }
1277
1278 if (netif_rx(skb) == NET_RX_DROP) {
1279 priv->stats.rx_errors++;
1280 priv->stats.rx_dropped++;
1281 goto bad_proto;
1282 }
1283
1284 /* Statistics */
1285 priv->stats.rx_packets++;
1286 priv->stats.rx_bytes += skb->len;
1287
1288bad_proto:
1289 if (netif_queue_stopped(dev))
1290 netif_wake_queue(dev);
1291 spin_unlock_irqrestore(&priv->lock, flags);
1292
1293 dev->last_rx = jiffies;
1294
1295 return 0;
1296}
1297
1298static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
1299 quadlet_t *data, u64 addr, size_t len, u16 flags)
1300{
1301 struct eth1394_host_info *hi;
1302
1303 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
1304 if (hi == NULL) {
1305 ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
1306 host->driver->name);
1307 return RCODE_ADDRESS_ERROR;
1308 }
1309
1310 if (ether1394_data_handler(hi->dev, srcid, destid, (char*)data, len))
1311 return RCODE_ADDRESS_ERROR;
1312 else
1313 return RCODE_COMPLETE;
1314}
1315
1316static void ether1394_iso(struct hpsb_iso *iso)
1317{
1318 quadlet_t *data;
1319 char *buf;
1320 struct eth1394_host_info *hi;
1321 struct net_device *dev;
1322 struct eth1394_priv *priv;
1323 unsigned int len;
1324 u32 specifier_id;
1325 u16 source_id;
1326 int i;
1327 int nready;
1328
1329 hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
1330 if (hi == NULL) {
1331 ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
1332 iso->host->driver->name);
1333 return;
1334 }
1335
1336 dev = hi->dev;
1337
1338 nready = hpsb_iso_n_ready(iso);
1339 for (i = 0; i < nready; i++) {
1340 struct hpsb_iso_packet_info *info =
1341 &iso->infos[(iso->first_packet + i) % iso->buf_packets];
1342 data = (quadlet_t*) (iso->data_buf.kvirt + info->offset);
1343
1344 /* skip over GASP header */
1345 buf = (char *)data + 8;
1346 len = info->len - 8;
1347
1348 specifier_id = (((be32_to_cpu(data[0]) & 0xffff) << 8) |
1349 ((be32_to_cpu(data[1]) & 0xff000000) >> 24));
1350 source_id = be32_to_cpu(data[0]) >> 16;
1351
1352 priv = netdev_priv(dev);
1353
1354 if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) ||
1355 specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
1356 /* This packet is not for us */
1357 continue;
1358 }
1359 ether1394_data_handler(dev, source_id, LOCAL_BUS | ALL_NODES,
1360 buf, len);
1361 }
1362
1363 hpsb_iso_recv_release_packets(iso, i);
1364
1365 dev->last_rx = jiffies;
1366}
1367
1368/******************************************
1369 * Datagram transmission code
1370 ******************************************/
1371
1372/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire
1373 * arphdr) is the same format as the ip1394 header, so they overlap. The rest
1374 * needs to be munged a bit. The remainder of the arphdr is formatted based
1375 * on hwaddr len and ipaddr len. We know what they'll be, so it's easy to
1376 * judge.
1377 *
1378 * Now that the EUI is used for the hardware address all we need to do to make
1379 * this work for 1394 is to insert 2 quadlets that contain max_rec size,
1380 * speed, and unicast FIFO address information between the sender_unique_id
1381 * and the IP addresses.
1382 */
1383static inline void ether1394_arp_to_1394arp(struct sk_buff *skb,
1384 struct net_device *dev)
1385{
1386 struct eth1394_priv *priv = netdev_priv(dev);
1387
1388 struct arphdr *arp = (struct arphdr *)skb->data;
1389 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
1390 struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
1391
1392 /* Believe it or not, all that need to happen is sender IP get moved
1393 * and set hw_addr_len, max_rec, sspd, fifo_hi and fifo_lo. */
1394 arp1394->hw_addr_len = 16;
1395 arp1394->sip = *(u32*)(arp_ptr + ETH1394_ALEN);
1396 arp1394->max_rec = priv->host->csr.max_rec;
1397 arp1394->sspd = priv->host->csr.lnk_spd;
1398 arp1394->fifo_hi = htons (priv->local_fifo >> 32);
1399 arp1394->fifo_lo = htonl (priv->local_fifo & ~0x0);
1400
1401 return;
1402}
1403
1404/* We need to encapsulate the standard header with our own. We use the
1405 * ethernet header's proto for our own. */
1406static inline unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
1407 int proto,
1408 union eth1394_hdr *hdr,
1409 u16 dg_size, u16 dgl)
1410{
1411 unsigned int adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
1412
1413 /* Does it all fit in one packet? */
1414 if (dg_size <= adj_max_payload) {
1415 hdr->uf.lf = ETH1394_HDR_LF_UF;
1416 hdr->uf.ether_type = proto;
1417 } else {
1418 hdr->ff.lf = ETH1394_HDR_LF_FF;
1419 hdr->ff.ether_type = proto;
1420 hdr->ff.dg_size = dg_size - 1;
1421 hdr->ff.dgl = dgl;
1422 adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
1423 }
1424 return((dg_size + (adj_max_payload - 1)) / adj_max_payload);
1425}
1426
1427static inline unsigned int ether1394_encapsulate(struct sk_buff *skb,
1428 unsigned int max_payload,
1429 union eth1394_hdr *hdr)
1430{
1431 union eth1394_hdr *bufhdr;
1432 int ftype = hdr->common.lf;
1433 int hdrsz = hdr_type_len[ftype];
1434 unsigned int adj_max_payload = max_payload - hdrsz;
1435
1436 switch(ftype) {
1437 case ETH1394_HDR_LF_UF:
1438 bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
1439 bufhdr->words.word1 = htons(hdr->words.word1);
1440 bufhdr->words.word2 = hdr->words.word2;
1441 break;
1442
1443 case ETH1394_HDR_LF_FF:
1444 bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
1445 bufhdr->words.word1 = htons(hdr->words.word1);
1446 bufhdr->words.word2 = hdr->words.word2;
1447 bufhdr->words.word3 = htons(hdr->words.word3);
1448 bufhdr->words.word4 = 0;
1449
1450 /* Set frag type here for future interior fragments */
1451 hdr->common.lf = ETH1394_HDR_LF_IF;
1452 hdr->sf.fg_off = 0;
1453 break;
1454
1455 default:
1456 hdr->sf.fg_off += adj_max_payload;
1457 bufhdr = (union eth1394_hdr *)skb_pull(skb, adj_max_payload);
1458 if (max_payload >= skb->len)
1459 hdr->common.lf = ETH1394_HDR_LF_LF;
1460 bufhdr->words.word1 = htons(hdr->words.word1);
1461 bufhdr->words.word2 = htons(hdr->words.word2);
1462 bufhdr->words.word3 = htons(hdr->words.word3);
1463 bufhdr->words.word4 = 0;
1464 }
1465
1466 return min(max_payload, skb->len);
1467}
1468
1469static inline struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host)
1470{
1471 struct hpsb_packet *p;
1472
1473 p = hpsb_alloc_packet(0);
1474 if (p) {
1475 p->host = host;
1476 p->generation = get_hpsb_generation(host);
1477 p->type = hpsb_async;
1478 }
1479 return p;
1480}
1481
1482static inline int ether1394_prep_write_packet(struct hpsb_packet *p,
1483 struct hpsb_host *host,
1484 nodeid_t node, u64 addr,
1485 void * data, int tx_len)
1486{
1487 p->node_id = node;
1488 p->data = NULL;
1489
1490 p->tcode = TCODE_WRITEB;
1491 p->header[1] = (host->node_id << 16) | (addr >> 32);
1492 p->header[2] = addr & 0xffffffff;
1493
1494 p->header_size = 16;
1495 p->expect_response = 1;
1496
1497 if (hpsb_get_tlabel(p)) {
1498 ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending "
1499 "to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node));
1500 return -1;
1501 }
1502 p->header[0] = (p->node_id << 16) | (p->tlabel << 10)
1503 | (1 << 8) | (TCODE_WRITEB << 4);
1504
1505 p->header[3] = tx_len << 16;
1506 p->data_size = (tx_len + 3) & ~3;
1507 p->data = (quadlet_t*)data;
1508
1509 return 0;
1510}
1511
1512static inline void ether1394_prep_gasp_packet(struct hpsb_packet *p,
1513 struct eth1394_priv *priv,
1514 struct sk_buff *skb, int length)
1515{
1516 p->header_size = 4;
1517 p->tcode = TCODE_STREAM_DATA;
1518
1519 p->header[0] = (length << 16) | (3 << 14)
1520 | ((priv->broadcast_channel) << 8)
1521 | (TCODE_STREAM_DATA << 4);
1522 p->data_size = length;
1523 p->data = ((quadlet_t*)skb->data) - 2;
1524 p->data[0] = cpu_to_be32((priv->host->node_id << 16) |
1525 ETHER1394_GASP_SPECIFIER_ID_HI);
1526 p->data[1] = __constant_cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) |
1527 ETHER1394_GASP_VERSION);
1528
1529 /* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES)
1530 * prevents hpsb_send_packet() from setting the speed to an arbitrary
1531 * value based on packet->node_id if packet->node_id is not set. */
1532 p->node_id = ALL_NODES;
1533 p->speed_code = priv->bc_sspd;
1534}
1535
1536static inline void ether1394_free_packet(struct hpsb_packet *packet)
1537{
1538 if (packet->tcode != TCODE_STREAM_DATA)
1539 hpsb_free_tlabel(packet);
1540 hpsb_free_packet(packet);
1541}
1542
1543static void ether1394_complete_cb(void *__ptask);
1544
1545static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
1546{
1547 struct eth1394_priv *priv = ptask->priv;
1548 struct hpsb_packet *packet = NULL;
1549
1550 packet = ether1394_alloc_common_packet(priv->host);
1551 if (!packet)
1552 return -1;
1553
1554 if (ptask->tx_type == ETH1394_GASP) {
1555 int length = tx_len + (2 * sizeof(quadlet_t));
1556
1557 ether1394_prep_gasp_packet(packet, priv, ptask->skb, length);
1558 } else if (ether1394_prep_write_packet(packet, priv->host,
1559 ptask->dest_node,
1560 ptask->addr, ptask->skb->data,
1561 tx_len)) {
1562 hpsb_free_packet(packet);
1563 return -1;
1564 }
1565
1566 ptask->packet = packet;
1567 hpsb_set_packet_complete_task(ptask->packet, ether1394_complete_cb,
1568 ptask);
1569
1570 if (hpsb_send_packet(packet) < 0) {
1571 ether1394_free_packet(packet);
1572 return -1;
1573 }
1574
1575 return 0;
1576}
1577
1578
1579/* Task function to be run when a datagram transmission is completed */
1580static inline void ether1394_dg_complete(struct packet_task *ptask, int fail)
1581{
1582 struct sk_buff *skb = ptask->skb;
1583 struct net_device *dev = skb->dev;
1584 struct eth1394_priv *priv = netdev_priv(dev);
1585 unsigned long flags;
1586
1587 /* Statistics */
1588 spin_lock_irqsave(&priv->lock, flags);
1589 if (fail) {
1590 priv->stats.tx_dropped++;
1591 priv->stats.tx_errors++;
1592 } else {
1593 priv->stats.tx_bytes += skb->len;
1594 priv->stats.tx_packets++;
1595 }
1596 spin_unlock_irqrestore(&priv->lock, flags);
1597
1598 dev_kfree_skb_any(skb);
1599 kmem_cache_free(packet_task_cache, ptask);
1600}
1601
1602
1603/* Callback for when a packet has been sent and the status of that packet is
1604 * known */
1605static void ether1394_complete_cb(void *__ptask)
1606{
1607 struct packet_task *ptask = (struct packet_task *)__ptask;
1608 struct hpsb_packet *packet = ptask->packet;
1609 int fail = 0;
1610
1611 if (packet->tcode != TCODE_STREAM_DATA)
1612 fail = hpsb_packet_success(packet);
1613
1614 ether1394_free_packet(packet);
1615
1616 ptask->outstanding_pkts--;
1617 if (ptask->outstanding_pkts > 0 && !fail) {
1618 int tx_len;
1619
1620 /* Add the encapsulation header to the fragment */
1621 tx_len = ether1394_encapsulate(ptask->skb, ptask->max_payload,
1622 &ptask->hdr);
1623 if (ether1394_send_packet(ptask, tx_len))
1624 ether1394_dg_complete(ptask, 1);
1625 } else {
1626 ether1394_dg_complete(ptask, fail);
1627 }
1628}
1629
1630
1631
1632/* Transmit a packet (called by kernel) */
1633static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1634{
1635 int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
1636 struct eth1394hdr *eth;
1637 struct eth1394_priv *priv = netdev_priv(dev);
1638 int proto;
1639 unsigned long flags;
1640 nodeid_t dest_node;
1641 eth1394_tx_type tx_type;
1642 int ret = 0;
1643 unsigned int tx_len;
1644 unsigned int max_payload;
1645 u16 dg_size;
1646 u16 dgl;
1647 struct packet_task *ptask;
1648 struct eth1394_node_ref *node;
1649 struct eth1394_node_info *node_info = NULL;
1650
1651 ptask = kmem_cache_alloc(packet_task_cache, kmflags);
1652 if (ptask == NULL) {
1653 ret = -ENOMEM;
1654 goto fail;
1655 }
1656
1657 /* XXX Ignore this for now. Noticed that when MacOSX is the IRM,
1658 * it does not set our validity bit. We need to compensate for
1659 * that somewhere else, but not in eth1394. */
1660#if 0
1661 if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000) {
1662 ret = -EAGAIN;
1663 goto fail;
1664 }
1665#endif
1666
1667 if ((skb = skb_share_check (skb, kmflags)) == NULL) {
1668 ret = -ENOMEM;
1669 goto fail;
1670 }
1671
1672 /* Get rid of the fake eth1394 header, but save a pointer */
1673 eth = (struct eth1394hdr*)skb->data;
1674 skb_pull(skb, ETH1394_HLEN);
1675
1676 proto = eth->h_proto;
1677 dg_size = skb->len;
1678
1679 /* Set the transmission type for the packet. ARP packets and IP
1680 * broadcast packets are sent via GASP. */
1681 if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 ||
1682 proto == __constant_htons(ETH_P_ARP) ||
1683 (proto == __constant_htons(ETH_P_IP) &&
1684 IN_MULTICAST(__constant_ntohl(skb->nh.iph->daddr)))) {
1685 tx_type = ETH1394_GASP;
1686 dest_node = LOCAL_BUS | ALL_NODES;
1687 max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
1688 BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD));
1689 dgl = priv->bc_dgl;
1690 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
1691 priv->bc_dgl++;
1692 } else {
1693 node = eth1394_find_node_guid(&priv->ip_node_list,
1694 be64_to_cpu(*(u64*)eth->h_dest));
1695 if (!node) {
1696 ret = -EAGAIN;
1697 goto fail;
1698 }
1699 node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
1700 if (node_info->fifo == ETHER1394_INVALID_ADDR) {
1701 ret = -EAGAIN;
1702 goto fail;
1703 }
1704
1705 dest_node = node->ud->ne->nodeid;
1706 max_payload = node_info->maxpayload;
1707 BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD));
1708
1709 dgl = node_info->dgl;
1710 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
1711 node_info->dgl++;
1712 tx_type = ETH1394_WRREQ;
1713 }
1714
1715 /* If this is an ARP packet, convert it */
1716 if (proto == __constant_htons (ETH_P_ARP))
1717 ether1394_arp_to_1394arp (skb, dev);
1718
1719 ptask->hdr.words.word1 = 0;
1720 ptask->hdr.words.word2 = 0;
1721 ptask->hdr.words.word3 = 0;
1722 ptask->hdr.words.word4 = 0;
1723 ptask->skb = skb;
1724 ptask->priv = priv;
1725 ptask->tx_type = tx_type;
1726
1727 if (tx_type != ETH1394_GASP) {
1728 u64 addr;
1729
1730 spin_lock_irqsave(&priv->lock, flags);
1731 addr = node_info->fifo;
1732 spin_unlock_irqrestore(&priv->lock, flags);
1733
1734 ptask->addr = addr;
1735 ptask->dest_node = dest_node;
1736 }
1737
1738 ptask->tx_type = tx_type;
1739 ptask->max_payload = max_payload;
1740 ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload, proto,
1741 &ptask->hdr, dg_size,
1742 dgl);
1743
1744 /* Add the encapsulation header to the fragment */
1745 tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr);
1746 dev->trans_start = jiffies;
1747 if (ether1394_send_packet(ptask, tx_len))
1748 goto fail;
1749
1750 netif_wake_queue(dev);
1751 return 0;
1752fail:
1753 if (ptask)
1754 kmem_cache_free(packet_task_cache, ptask);
1755
1756 if (skb != NULL)
1757 dev_kfree_skb(skb);
1758
1759 spin_lock_irqsave (&priv->lock, flags);
1760 priv->stats.tx_dropped++;
1761 priv->stats.tx_errors++;
1762 spin_unlock_irqrestore (&priv->lock, flags);
1763
1764 if (netif_queue_stopped(dev))
1765 netif_wake_queue(dev);
1766
1767 return 0; /* returning non-zero causes serious problems */
1768}
1769
1770static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1771{
1772 strcpy (info->driver, driver_name);
1773 strcpy (info->version, "$Rev: 1247 $");
1774 /* FIXME XXX provide sane businfo */
1775 strcpy (info->bus_info, "ieee1394");
1776}
1777
1778static struct ethtool_ops ethtool_ops = {
1779 .get_drvinfo = ether1394_get_drvinfo
1780};
1781
1782static int __init ether1394_init_module (void)
1783{
1784 packet_task_cache = kmem_cache_create("packet_task", sizeof(struct packet_task),
1785 0, 0, NULL, NULL);
1786
1787 /* Register ourselves as a highlevel driver */
1788 hpsb_register_highlevel(&eth1394_highlevel);
1789
1790 return hpsb_register_protocol(&eth1394_proto_driver);
1791}
1792
1793static void __exit ether1394_exit_module (void)
1794{
1795 hpsb_unregister_protocol(&eth1394_proto_driver);
1796 hpsb_unregister_highlevel(&eth1394_highlevel);
1797 kmem_cache_destroy(packet_task_cache);
1798}
1799
1800module_init(ether1394_init_module);
1801module_exit(ether1394_exit_module);
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
new file mode 100644
index 000000000000..ed8f1c4b7fd8
--- /dev/null
+++ b/drivers/ieee1394/eth1394.h
@@ -0,0 +1,236 @@
1/*
2 * eth1394.h -- Ethernet driver for Linux IEEE-1394 Subsystem
3 *
4 * Copyright (C) 2000 Bonin Franck <boninf@free.fr>
5 * (C) 2001 Ben Collins <bcollins@debian.org>
6 *
7 * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#ifndef __ETH1394_H
25#define __ETH1394_H
26
27#include <linux/netdevice.h>
28
29#include "ieee1394.h"
30
31/* Register for incoming packets. This is 4096 bytes, which supports up to
32 * S3200 (per Table 16-3 of IEEE 1394b-2002). */
33#define ETHER1394_REGION_ADDR_LEN 4096
34
35#define ETHER1394_INVALID_ADDR ~0ULL
36
37/* GASP identifier numbers for IPv4 over IEEE 1394 */
38#define ETHER1394_GASP_SPECIFIER_ID 0x00005E
39#define ETHER1394_GASP_SPECIFIER_ID_HI ((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff)
40#define ETHER1394_GASP_SPECIFIER_ID_LO (ETHER1394_GASP_SPECIFIER_ID & 0xff)
41#define ETHER1394_GASP_VERSION 1
42
43#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* GASP header overhead */
44
45#define ETHER1394_GASP_BUFFERS 16
46
47/* Node set == 64 */
48#define NODE_SET (ALL_NODES + 1)
49
50enum eth1394_bc_states { ETHER1394_BC_ERROR,
51 ETHER1394_BC_RUNNING,
52 ETHER1394_BC_STOPPED };
53
54
55/* Private structure for our ethernet driver */
56struct eth1394_priv {
57 struct net_device_stats stats; /* Device stats */
58 struct hpsb_host *host; /* The card for this dev */
59 u16 bc_maxpayload; /* Max broadcast payload */
60 u8 bc_sspd; /* Max broadcast speed */
61 u64 local_fifo; /* Local FIFO Address */
62 spinlock_t lock; /* Private lock */
63 int broadcast_channel; /* Async stream Broadcast Channel */
64 enum eth1394_bc_states bc_state; /* broadcast channel state */
65 struct hpsb_iso *iso; /* Async stream recv handle */
66 int bc_dgl; /* Outgoing broadcast datagram label */
67 struct list_head ip_node_list; /* List of IP capable nodes */
68 struct unit_directory *ud_list[ALL_NODES]; /* Cached unit dir list */
69};
70
71
72/* Define a fake hardware header format for the networking core. Note that
73 * header size cannot exceed 16 bytes as that is the size of the header cache.
74 * Also, we do not need the source address in the header so we omit it and
75 * keep the header to under 16 bytes */
76#define ETH1394_ALEN (8)
77#define ETH1394_HLEN (10)
78
79struct eth1394hdr {
80 unsigned char h_dest[ETH1394_ALEN]; /* destination eth1394 addr */
81 unsigned short h_proto; /* packet type ID field */
82} __attribute__((packed));
83
84#ifdef __KERNEL__
85#include <linux/skbuff.h>
86
87static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
88{
89 return (struct eth1394hdr *)skb->mac.raw;
90}
91#endif
92
93typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
94
95/* IP1394 headers */
96#include <asm/byteorder.h>
97
98/* Unfragmented */
99#if defined __BIG_ENDIAN_BITFIELD
100struct eth1394_uf_hdr {
101 u16 lf:2;
102 u16 res:14;
103 u16 ether_type; /* Ethernet packet type */
104} __attribute__((packed));
105#elif defined __LITTLE_ENDIAN_BITFIELD
106struct eth1394_uf_hdr {
107 u16 res:14;
108 u16 lf:2;
109 u16 ether_type;
110} __attribute__((packed));
111#else
112#error Unknown bit field type
113#endif
114
115/* First fragment */
116#if defined __BIG_ENDIAN_BITFIELD
117struct eth1394_ff_hdr {
118 u16 lf:2;
119 u16 res1:2;
120 u16 dg_size:12; /* Datagram size */
121 u16 ether_type; /* Ethernet packet type */
122 u16 dgl; /* Datagram label */
123 u16 res2;
124} __attribute__((packed));
125#elif defined __LITTLE_ENDIAN_BITFIELD
126struct eth1394_ff_hdr {
127 u16 dg_size:12;
128 u16 res1:2;
129 u16 lf:2;
130 u16 ether_type;
131 u16 dgl;
132 u16 res2;
133} __attribute__((packed));
134#else
135#error Unknown bit field type
136#endif
137
138/* XXX: Subsequent fragments, including last */
139#if defined __BIG_ENDIAN_BITFIELD
140struct eth1394_sf_hdr {
141 u16 lf:2;
142 u16 res1:2;
143 u16 dg_size:12; /* Datagram size */
144 u16 res2:4;
145 u16 fg_off:12; /* Fragment offset */
146 u16 dgl; /* Datagram label */
147 u16 res3;
148} __attribute__((packed));
149#elif defined __LITTLE_ENDIAN_BITFIELD
150struct eth1394_sf_hdr {
151 u16 dg_size:12;
152 u16 res1:2;
153 u16 lf:2;
154 u16 fg_off:12;
155 u16 res2:4;
156 u16 dgl;
157 u16 res3;
158} __attribute__((packed));
159#else
160#error Unknown bit field type
161#endif
162
163#if defined __BIG_ENDIAN_BITFIELD
164struct eth1394_common_hdr {
165 u16 lf:2;
166 u16 pad1:14;
167} __attribute__((packed));
168#elif defined __LITTLE_ENDIAN_BITFIELD
169struct eth1394_common_hdr {
170 u16 pad1:14;
171 u16 lf:2;
172} __attribute__((packed));
173#else
174#error Unknown bit field type
175#endif
176
177struct eth1394_hdr_words {
178 u16 word1;
179 u16 word2;
180 u16 word3;
181 u16 word4;
182};
183
184union eth1394_hdr {
185 struct eth1394_common_hdr common;
186 struct eth1394_uf_hdr uf;
187 struct eth1394_ff_hdr ff;
188 struct eth1394_sf_hdr sf;
189 struct eth1394_hdr_words words;
190};
191
192/* End of IP1394 headers */
193
194/* Fragment types */
195#define ETH1394_HDR_LF_UF 0 /* unfragmented */
196#define ETH1394_HDR_LF_FF 1 /* first fragment */
197#define ETH1394_HDR_LF_LF 2 /* last fragment */
198#define ETH1394_HDR_LF_IF 3 /* interior fragment */
199
200#define IP1394_HW_ADDR_LEN 16 /* As per RFC */
201
202/* Our arp packet (ARPHRD_IEEE1394) */
203struct eth1394_arp {
204 u16 hw_type; /* 0x0018 */
205 u16 proto_type; /* 0x0806 */
206 u8 hw_addr_len; /* 16 */
207 u8 ip_addr_len; /* 4 */
208 u16 opcode; /* ARP Opcode */
209 /* Above is exactly the same format as struct arphdr */
210
211 u64 s_uniq_id; /* Sender's 64bit EUI */
212 u8 max_rec; /* Sender's max packet size */
213 u8 sspd; /* Sender's max speed */
214 u16 fifo_hi; /* hi 16bits of sender's FIFO addr */
215 u32 fifo_lo; /* lo 32bits of sender's FIFO addr */
216 u32 sip; /* Sender's IP Address */
217 u32 tip; /* IP Address of requested hw addr */
218};
219
220/* Network timeout */
221#define ETHER1394_TIMEOUT 100000
222
223/* This is our task struct. It's used for the packet complete callback. */
224struct packet_task {
225 struct sk_buff *skb;
226 int outstanding_pkts;
227 eth1394_tx_type tx_type;
228 int max_payload;
229 struct hpsb_packet *packet;
230 struct eth1394_priv *priv;
231 union eth1394_hdr hdr;
232 u64 addr;
233 u16 dest_node;
234};
235
236#endif /* __ETH1394_H */
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
new file mode 100644
index 000000000000..997e1bf6297f
--- /dev/null
+++ b/drivers/ieee1394/highlevel.c
@@ -0,0 +1,704 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Copyright (C) 1999 Andreas E. Bombe
5 *
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 *
9 *
10 * Contributions:
11 *
12 * Christian Toegel <christian.toegel@gmx.at>
13 * unregister address space
14 *
15 * Manfred Weihs <weihs@ict.tuwien.ac.at>
16 * unregister address space
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/slab.h>
22#include <linux/list.h>
23#include <linux/bitops.h>
24
25#include "ieee1394.h"
26#include "ieee1394_types.h"
27#include "hosts.h"
28#include "ieee1394_core.h"
29#include "highlevel.h"
30#include "nodemgr.h"
31
32
33struct hl_host_info {
34 struct list_head list;
35 struct hpsb_host *host;
36 size_t size;
37 unsigned long key;
38 void *data;
39};
40
41
42static LIST_HEAD(hl_drivers);
43static DECLARE_RWSEM(hl_drivers_sem);
44
45static LIST_HEAD(hl_irqs);
46static DEFINE_RWLOCK(hl_irqs_lock);
47
48static DEFINE_RWLOCK(addr_space_lock);
49
50/* addr_space list will have zero and max already included as bounds */
51static struct hpsb_address_ops dummy_ops = { NULL, NULL, NULL, NULL };
52static struct hpsb_address_serve dummy_zero_addr, dummy_max_addr;
53
54
55static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
56 struct hpsb_host *host)
57{
58 struct hl_host_info *hi = NULL;
59
60 if (!hl || !host)
61 return NULL;
62
63 read_lock(&hl->host_info_lock);
64 list_for_each_entry(hi, &hl->host_info_list, list) {
65 if (hi->host == host) {
66 read_unlock(&hl->host_info_lock);
67 return hi;
68 }
69 }
70 read_unlock(&hl->host_info_lock);
71
72 return NULL;
73}
74
75
76/* Returns a per host/driver data structure that was previously stored by
77 * hpsb_create_hostinfo. */
78void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
79{
80 struct hl_host_info *hi = hl_get_hostinfo(hl, host);
81
82 if (hi)
83 return hi->data;
84
85 return NULL;
86}
87
88
89/* If size is zero, then the return here is only valid for error checking */
90void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
91 size_t data_size)
92{
93 struct hl_host_info *hi;
94 void *data;
95 unsigned long flags;
96
97 hi = hl_get_hostinfo(hl, host);
98 if (hi) {
99 HPSB_ERR("%s called hpsb_create_hostinfo when hostinfo already exists",
100 hl->name);
101 return NULL;
102 }
103
104 hi = kmalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
105 if (!hi)
106 return NULL;
107
108 memset(hi, 0, sizeof(*hi) + data_size);
109
110 if (data_size) {
111 data = hi->data = hi + 1;
112 hi->size = data_size;
113 } else
114 data = hi;
115
116 hi->host = host;
117
118 write_lock_irqsave(&hl->host_info_lock, flags);
119 list_add_tail(&hi->list, &hl->host_info_list);
120 write_unlock_irqrestore(&hl->host_info_lock, flags);
121
122 return data;
123}
124
125
126int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
127 void *data)
128{
129 struct hl_host_info *hi;
130
131 hi = hl_get_hostinfo(hl, host);
132 if (hi) {
133 if (!hi->size && !hi->data) {
134 hi->data = data;
135 return 0;
136 } else
137 HPSB_ERR("%s called hpsb_set_hostinfo when hostinfo already has data",
138 hl->name);
139 } else
140 HPSB_ERR("%s called hpsb_set_hostinfo when no hostinfo exists",
141 hl->name);
142
143 return -EINVAL;
144}
145
146
147void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
148{
149 struct hl_host_info *hi;
150
151 hi = hl_get_hostinfo(hl, host);
152 if (hi) {
153 unsigned long flags;
154 write_lock_irqsave(&hl->host_info_lock, flags);
155 list_del(&hi->list);
156 write_unlock_irqrestore(&hl->host_info_lock, flags);
157 kfree(hi);
158 }
159
160 return;
161}
162
163
164void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, unsigned long key)
165{
166 struct hl_host_info *hi;
167
168 hi = hl_get_hostinfo(hl, host);
169 if (hi)
170 hi->key = key;
171
172 return;
173}
174
175
176void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key)
177{
178 struct hl_host_info *hi;
179 void *data = NULL;
180
181 if (!hl)
182 return NULL;
183
184 read_lock(&hl->host_info_lock);
185 list_for_each_entry(hi, &hl->host_info_list, list) {
186 if (hi->key == key) {
187 data = hi->data;
188 break;
189 }
190 }
191 read_unlock(&hl->host_info_lock);
192
193 return data;
194}
195
196
197static int highlevel_for_each_host_reg(struct hpsb_host *host, void *__data)
198{
199 struct hpsb_highlevel *hl = __data;
200
201 hl->add_host(host);
202
203 if (host->update_config_rom) {
204 if (hpsb_update_config_rom_image(host) < 0) {
205 HPSB_ERR("Failed to generate Configuration ROM image for host "
206 "%s-%d", hl->name, host->id);
207 }
208 }
209
210 return 0;
211}
212
213void hpsb_register_highlevel(struct hpsb_highlevel *hl)
214{
215 INIT_LIST_HEAD(&hl->addr_list);
216 INIT_LIST_HEAD(&hl->host_info_list);
217
218 rwlock_init(&hl->host_info_lock);
219
220 down_write(&hl_drivers_sem);
221 list_add_tail(&hl->hl_list, &hl_drivers);
222 up_write(&hl_drivers_sem);
223
224 write_lock(&hl_irqs_lock);
225 list_add_tail(&hl->irq_list, &hl_irqs);
226 write_unlock(&hl_irqs_lock);
227
228 if (hl->add_host)
229 nodemgr_for_each_host(hl, highlevel_for_each_host_reg);
230
231 return;
232}
233
234static void __delete_addr(struct hpsb_address_serve *as)
235{
236 list_del(&as->host_list);
237 list_del(&as->hl_list);
238 kfree(as);
239}
240
241static void __unregister_host(struct hpsb_highlevel *hl, struct hpsb_host *host, int update_cr)
242{
243 unsigned long flags;
244 struct list_head *lh, *next;
245 struct hpsb_address_serve *as;
246
247 /* First, let the highlevel driver unreg */
248 if (hl->remove_host)
249 hl->remove_host(host);
250
251 /* Remove any addresses that are matched for this highlevel driver
252 * and this particular host. */
253 write_lock_irqsave(&addr_space_lock, flags);
254 list_for_each_safe (lh, next, &hl->addr_list) {
255 as = list_entry(lh, struct hpsb_address_serve, hl_list);
256
257 if (as->host == host)
258 __delete_addr(as);
259 }
260 write_unlock_irqrestore(&addr_space_lock, flags);
261
262 /* Now update the config-rom to reflect anything removed by the
263 * highlevel driver. */
264 if (update_cr && host->update_config_rom) {
265 if (hpsb_update_config_rom_image(host) < 0) {
266 HPSB_ERR("Failed to generate Configuration ROM image for host "
267 "%s-%d", hl->name, host->id);
268 }
269 }
270
271 /* And finally, remove all the host info associated between these
272 * two. */
273 hpsb_destroy_hostinfo(hl, host);
274}
275
276static int highlevel_for_each_host_unreg(struct hpsb_host *host, void *__data)
277{
278 struct hpsb_highlevel *hl = __data;
279
280 __unregister_host(hl, host, 1);
281
282 return 0;
283}
284
285void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
286{
287 write_lock(&hl_irqs_lock);
288 list_del(&hl->irq_list);
289 write_unlock(&hl_irqs_lock);
290
291 down_write(&hl_drivers_sem);
292 list_del(&hl->hl_list);
293 up_write(&hl_drivers_sem);
294
295 nodemgr_for_each_host(hl, highlevel_for_each_host_unreg);
296}
297
298u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
299 struct hpsb_host *host,
300 struct hpsb_address_ops *ops,
301 u64 size, u64 alignment,
302 u64 start, u64 end)
303{
304 struct hpsb_address_serve *as, *a1, *a2;
305 struct list_head *entry;
306 u64 retval = ~0ULL;
307 unsigned long flags;
308 u64 align_mask = ~(alignment - 1);
309
310 if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
311 ((hweight32(alignment >> 32) +
312 hweight32(alignment & 0xffffffff) != 1))) {
313 HPSB_ERR("%s called with invalid alignment: 0x%048llx",
314 __FUNCTION__, (unsigned long long)alignment);
315 return retval;
316 }
317
318 if (start == ~0ULL && end == ~0ULL) {
319 start = CSR1212_ALL_SPACE_BASE + 0xffff00000000ULL; /* ohci1394.c limit */
320 end = CSR1212_ALL_SPACE_END;
321 }
322
323 if (((start|end) & ~align_mask) || (start >= end) || (end > 0x1000000000000ULL)) {
324 HPSB_ERR("%s called with invalid addresses (start = %012Lx end = %012Lx)",
325 __FUNCTION__, (unsigned long long)start, (unsigned long long)end);
326 return retval;
327 }
328
329 as = (struct hpsb_address_serve *)
330 kmalloc(sizeof(struct hpsb_address_serve), GFP_KERNEL);
331 if (as == NULL) {
332 return retval;
333 }
334
335 INIT_LIST_HEAD(&as->host_list);
336 INIT_LIST_HEAD(&as->hl_list);
337 as->op = ops;
338 as->host = host;
339
340 write_lock_irqsave(&addr_space_lock, flags);
341
342 list_for_each(entry, &host->addr_space) {
343 u64 a1sa, a1ea;
344 u64 a2sa, a2ea;
345
346 a1 = list_entry(entry, struct hpsb_address_serve, host_list);
347 a2 = list_entry(entry->next, struct hpsb_address_serve, host_list);
348
349 a1sa = a1->start & align_mask;
350 a1ea = (a1->end + alignment -1) & align_mask;
351 a2sa = a2->start & align_mask;
352 a2ea = (a2->end + alignment -1) & align_mask;
353
354 if ((a2sa - a1ea >= size) && (a2sa - start >= size) && (a2sa > start)) {
355 as->start = max(start, a1ea);
356 as->end = as->start + size;
357 list_add(&as->host_list, entry);
358 list_add_tail(&as->hl_list, &hl->addr_list);
359 retval = as->start;
360 break;
361 }
362 }
363
364 write_unlock_irqrestore(&addr_space_lock, flags);
365
366 if (retval == ~0ULL) {
367 kfree(as);
368 }
369
370 return retval;
371}
372
373int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
374 struct hpsb_address_ops *ops, u64 start, u64 end)
375{
376 struct hpsb_address_serve *as;
377 struct list_head *lh;
378 int retval = 0;
379 unsigned long flags;
380
381 if (((start|end) & 3) || (start >= end) || (end > 0x1000000000000ULL)) {
382 HPSB_ERR("%s called with invalid addresses", __FUNCTION__);
383 return 0;
384 }
385
386 as = (struct hpsb_address_serve *)
387 kmalloc(sizeof(struct hpsb_address_serve), GFP_ATOMIC);
388 if (as == NULL) {
389 return 0;
390 }
391
392 INIT_LIST_HEAD(&as->host_list);
393 INIT_LIST_HEAD(&as->hl_list);
394 as->op = ops;
395 as->start = start;
396 as->end = end;
397 as->host = host;
398
399 write_lock_irqsave(&addr_space_lock, flags);
400
401 list_for_each(lh, &host->addr_space) {
402 struct hpsb_address_serve *as_this =
403 list_entry(lh, struct hpsb_address_serve, host_list);
404 struct hpsb_address_serve *as_next =
405 list_entry(lh->next, struct hpsb_address_serve, host_list);
406
407 if (as_this->end > as->start)
408 break;
409
410 if (as_next->start >= as->end) {
411 list_add(&as->host_list, lh);
412 list_add_tail(&as->hl_list, &hl->addr_list);
413 retval = 1;
414 break;
415 }
416 }
417 write_unlock_irqrestore(&addr_space_lock, flags);
418
419 if (retval == 0)
420 kfree(as);
421
422 return retval;
423}
424
425int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
426 u64 start)
427{
428 int retval = 0;
429 struct hpsb_address_serve *as;
430 struct list_head *lh, *next;
431 unsigned long flags;
432
433 write_lock_irqsave(&addr_space_lock, flags);
434
435 list_for_each_safe (lh, next, &hl->addr_list) {
436 as = list_entry(lh, struct hpsb_address_serve, hl_list);
437 if (as->start == start && as->host == host) {
438 __delete_addr(as);
439 retval = 1;
440 break;
441 }
442 }
443
444 write_unlock_irqrestore(&addr_space_lock, flags);
445
446 return retval;
447}
448
449int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
450 unsigned int channel)
451{
452 if (channel > 63) {
453 HPSB_ERR("%s called with invalid channel", __FUNCTION__);
454 return -EINVAL;
455 }
456
457 if (host->iso_listen_count[channel]++ == 0) {
458 return host->driver->devctl(host, ISO_LISTEN_CHANNEL, channel);
459 }
460
461 return 0;
462}
463
464void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
465 unsigned int channel)
466{
467 if (channel > 63) {
468 HPSB_ERR("%s called with invalid channel", __FUNCTION__);
469 return;
470 }
471
472 if (--host->iso_listen_count[channel] == 0) {
473 host->driver->devctl(host, ISO_UNLISTEN_CHANNEL, channel);
474 }
475}
476
477static void init_hpsb_highlevel(struct hpsb_host *host)
478{
479 INIT_LIST_HEAD(&dummy_zero_addr.host_list);
480 INIT_LIST_HEAD(&dummy_zero_addr.hl_list);
481 INIT_LIST_HEAD(&dummy_max_addr.host_list);
482 INIT_LIST_HEAD(&dummy_max_addr.hl_list);
483
484 dummy_zero_addr.op = dummy_max_addr.op = &dummy_ops;
485
486 dummy_zero_addr.start = dummy_zero_addr.end = 0;
487 dummy_max_addr.start = dummy_max_addr.end = ((u64) 1) << 48;
488
489 list_add_tail(&dummy_zero_addr.host_list, &host->addr_space);
490 list_add_tail(&dummy_max_addr.host_list, &host->addr_space);
491}
492
493void highlevel_add_host(struct hpsb_host *host)
494{
495 struct hpsb_highlevel *hl;
496
497 init_hpsb_highlevel(host);
498
499 down_read(&hl_drivers_sem);
500 list_for_each_entry(hl, &hl_drivers, hl_list) {
501 if (hl->add_host)
502 hl->add_host(host);
503 }
504 up_read(&hl_drivers_sem);
505 if (host->update_config_rom) {
506 if (hpsb_update_config_rom_image(host) < 0)
507 HPSB_ERR("Failed to generate Configuration ROM image for "
508 "host %s-%d", hl->name, host->id);
509 }
510}
511
512void highlevel_remove_host(struct hpsb_host *host)
513{
514 struct hpsb_highlevel *hl;
515
516 down_read(&hl_drivers_sem);
517 list_for_each_entry(hl, &hl_drivers, hl_list)
518 __unregister_host(hl, host, 0);
519 up_read(&hl_drivers_sem);
520}
521
522void highlevel_host_reset(struct hpsb_host *host)
523{
524 struct hpsb_highlevel *hl;
525
526 read_lock(&hl_irqs_lock);
527 list_for_each_entry(hl, &hl_irqs, irq_list) {
528 if (hl->host_reset)
529 hl->host_reset(host);
530 }
531 read_unlock(&hl_irqs_lock);
532}
533
534void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length)
535{
536 struct hpsb_highlevel *hl;
537 int channel = (((quadlet_t *)data)[0] >> 8) & 0x3f;
538
539 read_lock(&hl_irqs_lock);
540 list_for_each_entry(hl, &hl_irqs, irq_list) {
541 if (hl->iso_receive)
542 hl->iso_receive(host, channel, data, length);
543 }
544 read_unlock(&hl_irqs_lock);
545}
546
547void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
548 void *data, size_t length)
549{
550 struct hpsb_highlevel *hl;
551 int cts = ((quadlet_t *)data)[0] >> 4;
552
553 read_lock(&hl_irqs_lock);
554 list_for_each_entry(hl, &hl_irqs, irq_list) {
555 if (hl->fcp_request)
556 hl->fcp_request(host, nodeid, direction, cts, data,
557 length);
558 }
559 read_unlock(&hl_irqs_lock);
560}
561
562int highlevel_read(struct hpsb_host *host, int nodeid, void *data,
563 u64 addr, unsigned int length, u16 flags)
564{
565 struct hpsb_address_serve *as;
566 unsigned int partlength;
567 int rcode = RCODE_ADDRESS_ERROR;
568
569 read_lock(&addr_space_lock);
570
571 list_for_each_entry(as, &host->addr_space, host_list) {
572 if (as->start > addr)
573 break;
574
575 if (as->end > addr) {
576 partlength = min(as->end - addr, (u64) length);
577
578 if (as->op->read) {
579 rcode = as->op->read(host, nodeid, data,
580 addr, partlength, flags);
581 } else {
582 rcode = RCODE_TYPE_ERROR;
583 }
584
585 data += partlength;
586 length -= partlength;
587 addr += partlength;
588
589 if ((rcode != RCODE_COMPLETE) || !length) {
590 break;
591 }
592 }
593 }
594
595 read_unlock(&addr_space_lock);
596
597 if (length && (rcode == RCODE_COMPLETE)) {
598 rcode = RCODE_ADDRESS_ERROR;
599 }
600
601 return rcode;
602}
603
604int highlevel_write(struct hpsb_host *host, int nodeid, int destid,
605 void *data, u64 addr, unsigned int length, u16 flags)
606{
607 struct hpsb_address_serve *as;
608 unsigned int partlength;
609 int rcode = RCODE_ADDRESS_ERROR;
610
611 read_lock(&addr_space_lock);
612
613 list_for_each_entry(as, &host->addr_space, host_list) {
614 if (as->start > addr)
615 break;
616
617 if (as->end > addr) {
618 partlength = min(as->end - addr, (u64) length);
619
620 if (as->op->write) {
621 rcode = as->op->write(host, nodeid, destid,
622 data, addr, partlength, flags);
623 } else {
624 rcode = RCODE_TYPE_ERROR;
625 }
626
627 data += partlength;
628 length -= partlength;
629 addr += partlength;
630
631 if ((rcode != RCODE_COMPLETE) || !length) {
632 break;
633 }
634 }
635 }
636
637 read_unlock(&addr_space_lock);
638
639 if (length && (rcode == RCODE_COMPLETE)) {
640 rcode = RCODE_ADDRESS_ERROR;
641 }
642
643 return rcode;
644}
645
646
647int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
648 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags)
649{
650 struct hpsb_address_serve *as;
651 int rcode = RCODE_ADDRESS_ERROR;
652
653 read_lock(&addr_space_lock);
654
655 list_for_each_entry(as, &host->addr_space, host_list) {
656 if (as->start > addr)
657 break;
658
659 if (as->end > addr) {
660 if (as->op->lock) {
661 rcode = as->op->lock(host, nodeid, store, addr,
662 data, arg, ext_tcode, flags);
663 } else {
664 rcode = RCODE_TYPE_ERROR;
665 }
666
667 break;
668 }
669 }
670
671 read_unlock(&addr_space_lock);
672
673 return rcode;
674}
675
676int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
677 u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags)
678{
679 struct hpsb_address_serve *as;
680 int rcode = RCODE_ADDRESS_ERROR;
681
682 read_lock(&addr_space_lock);
683
684 list_for_each_entry(as, &host->addr_space, host_list) {
685 if (as->start > addr)
686 break;
687
688 if (as->end > addr) {
689 if (as->op->lock64) {
690 rcode = as->op->lock64(host, nodeid, store,
691 addr, data, arg,
692 ext_tcode, flags);
693 } else {
694 rcode = RCODE_TYPE_ERROR;
695 }
696
697 break;
698 }
699 }
700
701 read_unlock(&addr_space_lock);
702
703 return rcode;
704}
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
new file mode 100644
index 000000000000..e119fb87e5b5
--- /dev/null
+++ b/drivers/ieee1394/highlevel.h
@@ -0,0 +1,190 @@
1
2#ifndef IEEE1394_HIGHLEVEL_H
3#define IEEE1394_HIGHLEVEL_H
4
5
6struct hpsb_address_serve {
7 struct list_head host_list; /* per host list */
8
9 struct list_head hl_list; /* hpsb_highlevel list */
10
11 struct hpsb_address_ops *op;
12
13 struct hpsb_host *host;
14
15 /* first address handled and first address behind, quadlet aligned */
16 u64 start, end;
17};
18
19
20/*
21 * The above structs are internal to highlevel driver handling. Only the
22 * following structures are of interest to actual highlevel drivers.
23 */
24
25struct hpsb_highlevel {
26 struct module *owner;
27 const char *name;
28
29 /* Any of the following pointers can legally be NULL, except for
30 * iso_receive which can only be NULL when you don't request
31 * channels. */
32
33 /* New host initialized. Will also be called during
34 * hpsb_register_highlevel for all hosts already installed. */
35 void (*add_host) (struct hpsb_host *host);
36
37 /* Host about to be removed. Will also be called during
38 * hpsb_unregister_highlevel once for each host. */
39 void (*remove_host) (struct hpsb_host *host);
40
41 /* Host experienced bus reset with possible configuration changes.
42 * Note that this one may occur during interrupt/bottom half handling.
43 * You can not expect to be able to do stock hpsb_reads. */
44 void (*host_reset) (struct hpsb_host *host);
45
46 /* An isochronous packet was received. Channel contains the channel
47 * number for your convenience, it is also contained in the included
48 * packet header (first quadlet, CRCs are missing). You may get called
49 * for channel/host combinations you did not request. */
50 void (*iso_receive) (struct hpsb_host *host, int channel,
51 quadlet_t *data, size_t length);
52
53 /* A write request was received on either the FCP_COMMAND (direction =
54 * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg
55 * contains the cts field (first byte of data). */
56 void (*fcp_request) (struct hpsb_host *host, int nodeid, int direction,
57 int cts, u8 *data, size_t length);
58
59 /* These are initialized by the subsystem when the
60 * hpsb_higlevel is registered. */
61 struct list_head hl_list;
62 struct list_head irq_list;
63 struct list_head addr_list;
64
65 struct list_head host_info_list;
66 rwlock_t host_info_lock;
67};
68
69struct hpsb_address_ops {
70 /*
71 * Null function pointers will make the respective operation complete
72 * with RCODE_TYPE_ERROR. Makes for easy to implement read-only
73 * registers (just leave everything but read NULL).
74 *
75 * All functions shall return appropriate IEEE 1394 rcodes.
76 */
77
78 /* These functions have to implement block reads for themselves. */
79 /* These functions either return a response code
80 or a negative number. In the first case a response will be generated; in the
81 later case, no response will be sent and the driver, that handled the request
82 will send the response itself
83 */
84 int (*read) (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
85 u64 addr, size_t length, u16 flags);
86 int (*write) (struct hpsb_host *host, int nodeid, int destid,
87 quadlet_t *data, u64 addr, size_t length, u16 flags);
88
89 /* Lock transactions: write results of ext_tcode operation into
90 * *store. */
91 int (*lock) (struct hpsb_host *host, int nodeid, quadlet_t *store,
92 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags);
93 int (*lock64) (struct hpsb_host *host, int nodeid, octlet_t *store,
94 u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags);
95};
96
97
98void highlevel_add_host(struct hpsb_host *host);
99void highlevel_remove_host(struct hpsb_host *host);
100void highlevel_host_reset(struct hpsb_host *host);
101
102
103/* these functions are called to handle transactions. They are called, when
104 a packet arrives. The flags argument contains the second word of the first header
105 quadlet of the incoming packet (containing transaction label, retry code,
106 transaction code and priority). These functions either return a response code
107 or a negative number. In the first case a response will be generated; in the
108 later case, no response will be sent and the driver, that handled the request
109 will send the response itself.
110*/
111int highlevel_read(struct hpsb_host *host, int nodeid, void *data,
112 u64 addr, unsigned int length, u16 flags);
113int highlevel_write(struct hpsb_host *host, int nodeid, int destid,
114 void *data, u64 addr, unsigned int length, u16 flags);
115int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
116 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags);
117int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
118 u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags);
119
120void highlevel_iso_receive(struct hpsb_host *host, void *data,
121 size_t length);
122void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
123 void *data, size_t length);
124
125
126/*
127 * Register highlevel driver. The name pointer has to stay valid at all times
128 * because the string is not copied.
129 */
130void hpsb_register_highlevel(struct hpsb_highlevel *hl);
131void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
132
133/*
134 * Register handlers for host address spaces. Start and end are 48 bit pointers
135 * and have to be quadlet aligned (end points to the first address behind the
136 * handled addresses. This function can be called multiple times for a single
137 * hpsb_highlevel to implement sparse register sets. The requested region must
138 * not overlap any previously allocated region, otherwise registering will fail.
139 *
140 * It returns true for successful allocation. There is no unregister function,
141 * all address spaces are deallocated together with the hpsb_highlevel.
142 */
143u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
144 struct hpsb_host *host,
145 struct hpsb_address_ops *ops,
146 u64 size, u64 alignment,
147 u64 start, u64 end);
148int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
149 struct hpsb_address_ops *ops, u64 start, u64 end);
150
151int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
152 u64 start);
153
154/*
155 * Enable or disable receving a certain isochronous channel through the
156 * iso_receive op.
157 */
158int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
159 unsigned int channel);
160void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
161 unsigned int channel);
162
163
164/* Retrieve a hostinfo pointer bound to this driver/host */
165void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
166
167/* Allocate a hostinfo pointer of data_size bound to this driver/host */
168void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
169 size_t data_size);
170
171/* Free and remove the hostinfo pointer bound to this driver/host */
172void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
173
174/* Set an alternate lookup key for the hostinfo bound to this driver/host */
175void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, unsigned long key);
176
177/* Retrieve the alternate lookup key for the hostinfo bound to this driver/host */
178unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host);
179
180/* Retrieve a hostinfo pointer bound to this driver using its alternate key */
181void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
182
183/* Set the hostinfo pointer to something useful. Usually follows a call to
184 * hpsb_create_hostinfo, where the size is 0. */
185int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, void *data);
186
187/* Retrieve hpsb_host using a highlevel handle and a key */
188struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl, unsigned long key);
189
190#endif /* IEEE1394_HIGHLEVEL_H */
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
new file mode 100644
index 000000000000..c502c6e9c440
--- /dev/null
+++ b/drivers/ieee1394/hosts.c
@@ -0,0 +1,233 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Low level (host adapter) management.
5 *
6 * Copyright (C) 1999 Andreas E. Bombe
7 * Copyright (C) 1999 Emanuel Pirker
8 *
9 * This code is licensed under the GPL. See the file COPYING in the root
10 * directory of the kernel sources for details.
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/pci.h>
20#include <linux/timer.h>
21
22#include "csr1212.h"
23#include "ieee1394.h"
24#include "ieee1394_types.h"
25#include "hosts.h"
26#include "ieee1394_core.h"
27#include "highlevel.h"
28#include "nodemgr.h"
29#include "csr.h"
30#include "config_roms.h"
31
32
33static void delayed_reset_bus(void * __reset_info)
34{
35 struct hpsb_host *host = (struct hpsb_host*)__reset_info;
36 int generation = host->csr.generation + 1;
37
38 /* The generation field rolls over to 2 rather than 0 per IEEE
39 * 1394a-2000. */
40 if (generation > 0xf || generation < 2)
41 generation = 2;
42
43 CSR_SET_BUS_INFO_GENERATION(host->csr.rom, generation);
44 if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
45 /* CSR image creation failed, reset generation field and do not
46 * issue a bus reset. */
47 CSR_SET_BUS_INFO_GENERATION(host->csr.rom, host->csr.generation);
48 return;
49 }
50
51 host->csr.generation = generation;
52
53 host->update_config_rom = 0;
54 if (host->driver->set_hw_config_rom)
55 host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data);
56
57 host->csr.gen_timestamp[host->csr.generation] = jiffies;
58 hpsb_reset_bus(host, SHORT_RESET);
59}
60
61static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p)
62{
63 return 0;
64}
65
66static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
67{
68 return -1;
69}
70
71static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg)
72{
73 return -1;
74}
75
76static struct hpsb_host_driver dummy_driver = {
77 .transmit_packet = dummy_transmit_packet,
78 .devctl = dummy_devctl,
79 .isoctl = dummy_isoctl
80};
81
82static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
83{
84 int *hostnum = __data;
85
86 if (host->id == *hostnum)
87 return 1;
88
89 return 0;
90}
91
92/**
93 * hpsb_alloc_host - allocate a new host controller.
94 * @drv: the driver that will manage the host controller
95 * @extra: number of extra bytes to allocate for the driver
96 *
97 * Allocate a &hpsb_host and initialize the general subsystem specific
98 * fields. If the driver needs to store per host data, as drivers
99 * usually do, the amount of memory required can be specified by the
100 * @extra parameter. Once allocated, the driver should initialize the
101 * driver specific parts, enable the controller and make it available
102 * to the general subsystem using hpsb_add_host().
103 *
104 * Return Value: a pointer to the &hpsb_host if succesful, %NULL if
105 * no memory was available.
106 */
107static DECLARE_MUTEX(host_num_alloc);
108
109struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
110 struct device *dev)
111{
112 struct hpsb_host *h;
113 int i;
114 int hostnum = 0;
115
116 h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL);
117 if (!h) return NULL;
118 memset(h, 0, sizeof(struct hpsb_host) + extra);
119
120 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
121 if (!h->csr.rom) {
122 kfree(h);
123 return NULL;
124 }
125
126 h->hostdata = h + 1;
127 h->driver = drv;
128
129 skb_queue_head_init(&h->pending_packet_queue);
130 INIT_LIST_HEAD(&h->addr_space);
131
132 for (i = 2; i < 16; i++)
133 h->csr.gen_timestamp[i] = jiffies - 60 * HZ;
134
135 for (i = 0; i < ARRAY_SIZE(h->tpool); i++)
136 HPSB_TPOOL_INIT(&h->tpool[i]);
137
138 atomic_set(&h->generation, 0);
139
140 INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
141
142 init_timer(&h->timeout);
143 h->timeout.data = (unsigned long) h;
144 h->timeout.function = abort_timedouts;
145 h->timeout_interval = HZ / 20; // 50ms by default
146
147 h->topology_map = h->csr.topology_map + 3;
148 h->speed_map = (u8 *)(h->csr.speed_map + 2);
149
150 down(&host_num_alloc);
151
152 while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb))
153 hostnum++;
154
155 h->id = hostnum;
156
157 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
158 h->device.parent = dev;
159 snprintf(h->device.bus_id, BUS_ID_SIZE, "fw-host%d", h->id);
160
161 h->class_dev.dev = &h->device;
162 h->class_dev.class = &hpsb_host_class;
163 snprintf(h->class_dev.class_id, BUS_ID_SIZE, "fw-host%d", h->id);
164
165 device_register(&h->device);
166 class_device_register(&h->class_dev);
167 get_device(&h->device);
168
169 up(&host_num_alloc);
170
171 return h;
172}
173
174int hpsb_add_host(struct hpsb_host *host)
175{
176 if (hpsb_default_host_entry(host))
177 return -ENOMEM;
178
179 hpsb_add_extra_config_roms(host);
180
181 highlevel_add_host(host);
182
183 return 0;
184}
185
186void hpsb_remove_host(struct hpsb_host *host)
187{
188 host->is_shutdown = 1;
189
190 cancel_delayed_work(&host->delayed_reset);
191 flush_scheduled_work();
192
193 host->driver = &dummy_driver;
194
195 highlevel_remove_host(host);
196
197 hpsb_remove_extra_config_roms(host);
198
199 class_device_unregister(&host->class_dev);
200 device_unregister(&host->device);
201}
202
203int hpsb_update_config_rom_image(struct hpsb_host *host)
204{
205 unsigned long reset_delay;
206 int next_gen = host->csr.generation + 1;
207
208 if (!host->update_config_rom)
209 return -EINVAL;
210
211 if (next_gen > 0xf)
212 next_gen = 2;
213
214 /* Stop the delayed interrupt, we're about to change the config rom and
215 * it would be a waste to do a bus reset twice. */
216 cancel_delayed_work(&host->delayed_reset);
217
218 /* IEEE 1394a-2000 prohibits using the same generation number
219 * twice in a 60 second period. */
220 if (jiffies - host->csr.gen_timestamp[next_gen] < 60 * HZ)
221 /* Wait 60 seconds from the last time this generation number was
222 * used. */
223 reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
224 else
225 /* Wait 1 second in case some other code wants to change the
226 * Config ROM in the near future. */
227 reset_delay = HZ;
228
229 PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
230 schedule_delayed_work(&host->delayed_reset, reset_delay);
231
232 return 0;
233}
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
new file mode 100644
index 000000000000..739e76840d51
--- /dev/null
+++ b/drivers/ieee1394/hosts.h
@@ -0,0 +1,215 @@
1#ifndef _IEEE1394_HOSTS_H
2#define _IEEE1394_HOSTS_H
3
4#include <linux/device.h>
5#include <linux/wait.h>
6#include <linux/list.h>
7#include <linux/timer.h>
8#include <linux/skbuff.h>
9
10#include <asm/semaphore.h>
11
12#include "ieee1394_types.h"
13#include "csr.h"
14
15
16struct hpsb_packet;
17struct hpsb_iso;
18
19struct hpsb_host {
20 struct list_head host_list;
21
22 void *hostdata;
23
24 atomic_t generation;
25
26 struct sk_buff_head pending_packet_queue;
27
28 struct timer_list timeout;
29 unsigned long timeout_interval;
30
31 unsigned char iso_listen_count[64];
32
33 int node_count; /* number of identified nodes on this bus */
34 int selfid_count; /* total number of SelfIDs received */
35 int nodes_active; /* number of nodes that are actually active */
36
37 nodeid_t node_id; /* node ID of this host */
38 nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
39 nodeid_t busmgr_id; /* ID of this bus' bus manager */
40
41 /* this nodes state */
42 unsigned in_bus_reset:1;
43 unsigned is_shutdown:1;
44
45 /* this nodes' duties on the bus */
46 unsigned is_root:1;
47 unsigned is_cycmst:1;
48 unsigned is_irm:1;
49 unsigned is_busmgr:1;
50
51 int reset_retries;
52 quadlet_t *topology_map;
53 u8 *speed_map;
54 struct csr_control csr;
55
56 /* Per node tlabel pool allocation */
57 struct hpsb_tlabel_pool tpool[64];
58
59 struct hpsb_host_driver *driver;
60
61 struct pci_dev *pdev;
62
63 int id;
64
65 struct device device;
66 struct class_device class_dev;
67
68 int update_config_rom;
69 struct work_struct delayed_reset;
70
71 unsigned int config_roms;
72
73 struct list_head addr_space;
74};
75
76
77
78enum devctl_cmd {
79 /* Host is requested to reset its bus and cancel all outstanding async
80 * requests. If arg == 1, it shall also attempt to become root on the
81 * bus. Return void. */
82 RESET_BUS,
83
84 /* Arg is void, return value is the hardware cycle counter value. */
85 GET_CYCLE_COUNTER,
86
87 /* Set the hardware cycle counter to the value in arg, return void.
88 * FIXME - setting is probably not required. */
89 SET_CYCLE_COUNTER,
90
91 /* Configure hardware for new bus ID in arg, return void. */
92 SET_BUS_ID,
93
94 /* If arg true, start sending cycle start packets, stop if arg == 0.
95 * Return void. */
96 ACT_CYCLE_MASTER,
97
98 /* Cancel all outstanding async requests without resetting the bus.
99 * Return void. */
100 CANCEL_REQUESTS,
101
102 /* Start or stop receiving isochronous channel in arg. Return void.
103 * This acts as an optimization hint, hosts are not required not to
104 * listen on unrequested channels. */
105 ISO_LISTEN_CHANNEL,
106 ISO_UNLISTEN_CHANNEL
107};
108
109enum isoctl_cmd {
110 /* rawiso API - see iso.h for the meanings of these commands
111 (they correspond exactly to the hpsb_iso_* API functions)
112 * INIT = allocate resources
113 * START = begin transmission/reception
114 * STOP = halt transmission/reception
115 * QUEUE/RELEASE = produce/consume packets
116 * SHUTDOWN = deallocate resources
117 */
118
119 XMIT_INIT,
120 XMIT_START,
121 XMIT_STOP,
122 XMIT_QUEUE,
123 XMIT_SHUTDOWN,
124
125 RECV_INIT,
126 RECV_LISTEN_CHANNEL, /* multi-channel only */
127 RECV_UNLISTEN_CHANNEL, /* multi-channel only */
128 RECV_SET_CHANNEL_MASK, /* multi-channel only; arg is a *u64 */
129 RECV_START,
130 RECV_STOP,
131 RECV_RELEASE,
132 RECV_SHUTDOWN,
133 RECV_FLUSH
134};
135
136enum reset_types {
137 /* 166 microsecond reset -- only type of reset available on
138 non-1394a capable IEEE 1394 controllers */
139 LONG_RESET,
140
141 /* Short (arbitrated) reset -- only available on 1394a capable
142 IEEE 1394 capable controllers */
143 SHORT_RESET,
144
145 /* Variants, that set force_root before issueing the bus reset */
146 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
147
148 /* Variants, that clear force_root before issueing the bus reset */
149 LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
150};
151
152struct hpsb_host_driver {
153 struct module *owner;
154 const char *name;
155
156 /* The hardware driver may optionally support a function that is used
157 * to set the hardware ConfigROM if the hardware supports handling
158 * reads to the ConfigROM on its own. */
159 void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom);
160
161 /* This function shall implement packet transmission based on
162 * packet->type. It shall CRC both parts of the packet (unless
163 * packet->type == raw) and do byte-swapping as necessary or instruct
164 * the hardware to do so. It can return immediately after the packet
165 * was queued for sending. After sending, hpsb_sent_packet() has to be
166 * called. Return 0 on success, negative errno on failure.
167 * NOTE: The function must be callable in interrupt context.
168 */
169 int (*transmit_packet) (struct hpsb_host *host,
170 struct hpsb_packet *packet);
171
172 /* This function requests miscellanous services from the driver, see
173 * above for command codes and expected actions. Return -1 for unknown
174 * command, though that should never happen.
175 */
176 int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
177
178 /* ISO transmission/reception functions. Return 0 on success, -1
179 * (or -EXXX errno code) on failure. If the low-level driver does not
180 * support the new ISO API, set isoctl to NULL.
181 */
182 int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg);
183
184 /* This function is mainly to redirect local CSR reads/locks to the iso
185 * management registers (bus manager id, bandwidth available, channels
186 * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
187 * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
188 * as OHCI uses). data and compare are the new data and expected data
189 * respectively, return value is the old value.
190 */
191 quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
192 quadlet_t data, quadlet_t compare);
193};
194
195
196struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
197 struct device *dev);
198int hpsb_add_host(struct hpsb_host *host);
199void hpsb_remove_host(struct hpsb_host *h);
200
201/* The following 2 functions are deprecated and will be removed when the
202 * raw1394/libraw1394 update is complete. */
203int hpsb_update_config_rom(struct hpsb_host *host,
204 const quadlet_t *new_rom, size_t size, unsigned char rom_version);
205int hpsb_get_config_rom(struct hpsb_host *host, quadlet_t *buffer,
206 size_t buffersize, size_t *rom_size, unsigned char *rom_version);
207
208/* Updates the configuration rom image of a host. rom_version must be the
209 * current version, otherwise it will fail with return value -1. If this
210 * host does not support config-rom-update, it will return -EINVAL.
211 * Return value 0 indicates success.
212 */
213int hpsb_update_config_rom_image(struct hpsb_host *host);
214
215#endif /* _IEEE1394_HOSTS_H */
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h
new file mode 100644
index 000000000000..f92b566363d5
--- /dev/null
+++ b/drivers/ieee1394/ieee1394-ioctl.h
@@ -0,0 +1,111 @@
1/* Base file for all ieee1394 ioctl's. Linux-1394 has allocated base '#'
2 * with a range of 0x00-0x3f. */
3
4#ifndef __IEEE1394_IOCTL_H
5#define __IEEE1394_IOCTL_H
6
7#include <linux/ioctl.h>
8#include <linux/types.h>
9
10
11/* AMDTP Gets 6 */
12#define AMDTP_IOC_CHANNEL _IOW('#', 0x00, struct amdtp_ioctl)
13#define AMDTP_IOC_PLUG _IOW('#', 0x01, struct amdtp_ioctl)
14#define AMDTP_IOC_PING _IOW('#', 0x02, struct amdtp_ioctl)
15#define AMDTP_IOC_ZAP _IO ('#', 0x03)
16
17
18/* DV1394 Gets 10 */
19
20/* Get the driver ready to transmit video. pass a struct dv1394_init* as
21 * the parameter (see below), or NULL to get default parameters */
22#define DV1394_IOC_INIT _IOW('#', 0x06, struct dv1394_init)
23
24/* Stop transmitting video and free the ringbuffer */
25#define DV1394_IOC_SHUTDOWN _IO ('#', 0x07)
26
27/* Submit N new frames to be transmitted, where the index of the first new
28 * frame is first_clear_buffer, and the index of the last new frame is
29 * (first_clear_buffer + N) % n_frames */
30#define DV1394_IOC_SUBMIT_FRAMES _IO ('#', 0x08)
31
32/* Block until N buffers are clear (pass N as the parameter) Because we
33 * re-transmit the last frame on underrun, there will at most be n_frames
34 * - 1 clear frames at any time */
35#define DV1394_IOC_WAIT_FRAMES _IO ('#', 0x09)
36
37/* Capture new frames that have been received, where the index of the
38 * first new frame is first_clear_buffer, and the index of the last new
39 * frame is (first_clear_buffer + N) % n_frames */
40#define DV1394_IOC_RECEIVE_FRAMES _IO ('#', 0x0a)
41
42/* Tell card to start receiving DMA */
43#define DV1394_IOC_START_RECEIVE _IO ('#', 0x0b)
44
45/* Pass a struct dv1394_status* as the parameter */
46#define DV1394_IOC_GET_STATUS _IOR('#', 0x0c, struct dv1394_status)
47
48
49/* Video1394 Gets 10 */
50
51#define VIDEO1394_IOC_LISTEN_CHANNEL \
52 _IOWR('#', 0x10, struct video1394_mmap)
53#define VIDEO1394_IOC_UNLISTEN_CHANNEL \
54 _IOW ('#', 0x11, int)
55#define VIDEO1394_IOC_LISTEN_QUEUE_BUFFER \
56 _IOW ('#', 0x12, struct video1394_wait)
57#define VIDEO1394_IOC_LISTEN_WAIT_BUFFER \
58 _IOWR('#', 0x13, struct video1394_wait)
59#define VIDEO1394_IOC_TALK_CHANNEL \
60 _IOWR('#', 0x14, struct video1394_mmap)
61#define VIDEO1394_IOC_UNTALK_CHANNEL \
62 _IOW ('#', 0x15, int)
63/*
64 * This one is broken: it really wanted
65 * "sizeof (struct video1394_wait) + sizeof (struct video1394_queue_variable)"
66 * but got just a "size_t"
67 */
68#define VIDEO1394_IOC_TALK_QUEUE_BUFFER \
69 _IOW ('#', 0x16, size_t)
70#define VIDEO1394_IOC_TALK_WAIT_BUFFER \
71 _IOW ('#', 0x17, struct video1394_wait)
72#define VIDEO1394_IOC_LISTEN_POLL_BUFFER \
73 _IOWR('#', 0x18, struct video1394_wait)
74
75
76/* Raw1394's ISO interface */
77#define RAW1394_IOC_ISO_XMIT_INIT \
78 _IOW ('#', 0x1a, struct raw1394_iso_status)
79#define RAW1394_IOC_ISO_RECV_INIT \
80 _IOWR('#', 0x1b, struct raw1394_iso_status)
81#define RAW1394_IOC_ISO_RECV_START \
82 _IOC (_IOC_WRITE, '#', 0x1c, sizeof(int) * 3)
83#define RAW1394_IOC_ISO_XMIT_START \
84 _IOC (_IOC_WRITE, '#', 0x1d, sizeof(int) * 2)
85#define RAW1394_IOC_ISO_XMIT_RECV_STOP \
86 _IO ('#', 0x1e)
87#define RAW1394_IOC_ISO_GET_STATUS \
88 _IOR ('#', 0x1f, struct raw1394_iso_status)
89#define RAW1394_IOC_ISO_SHUTDOWN \
90 _IO ('#', 0x20)
91#define RAW1394_IOC_ISO_QUEUE_ACTIVITY \
92 _IO ('#', 0x21)
93#define RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL \
94 _IOW ('#', 0x22, unsigned char)
95#define RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL \
96 _IOW ('#', 0x23, unsigned char)
97#define RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK \
98 _IOW ('#', 0x24, __u64)
99#define RAW1394_IOC_ISO_RECV_PACKETS \
100 _IOW ('#', 0x25, struct raw1394_iso_packets)
101#define RAW1394_IOC_ISO_RECV_RELEASE_PACKETS \
102 _IOW ('#', 0x26, unsigned int)
103#define RAW1394_IOC_ISO_XMIT_PACKETS \
104 _IOW ('#', 0x27, struct raw1394_iso_packets)
105#define RAW1394_IOC_ISO_XMIT_SYNC \
106 _IO ('#', 0x28)
107#define RAW1394_IOC_ISO_RECV_FLUSH \
108 _IO ('#', 0x29)
109
110
111#endif /* __IEEE1394_IOCTL_H */
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
new file mode 100644
index 000000000000..b634a9bb365c
--- /dev/null
+++ b/drivers/ieee1394/ieee1394.h
@@ -0,0 +1,202 @@
1/*
2 * Generic IEEE 1394 definitions
3 */
4
5#ifndef _IEEE1394_IEEE1394_H
6#define _IEEE1394_IEEE1394_H
7
8#define TCODE_WRITEQ 0x0
9#define TCODE_WRITEB 0x1
10#define TCODE_WRITE_RESPONSE 0x2
11#define TCODE_READQ 0x4
12#define TCODE_READB 0x5
13#define TCODE_READQ_RESPONSE 0x6
14#define TCODE_READB_RESPONSE 0x7
15#define TCODE_CYCLE_START 0x8
16#define TCODE_LOCK_REQUEST 0x9
17#define TCODE_ISO_DATA 0xa
18#define TCODE_STREAM_DATA 0xa
19#define TCODE_LOCK_RESPONSE 0xb
20
21#define RCODE_COMPLETE 0x0
22#define RCODE_CONFLICT_ERROR 0x4
23#define RCODE_DATA_ERROR 0x5
24#define RCODE_TYPE_ERROR 0x6
25#define RCODE_ADDRESS_ERROR 0x7
26
27#define EXTCODE_MASK_SWAP 0x1
28#define EXTCODE_COMPARE_SWAP 0x2
29#define EXTCODE_FETCH_ADD 0x3
30#define EXTCODE_LITTLE_ADD 0x4
31#define EXTCODE_BOUNDED_ADD 0x5
32#define EXTCODE_WRAP_ADD 0x6
33
34#define ACK_COMPLETE 0x1
35#define ACK_PENDING 0x2
36#define ACK_BUSY_X 0x4
37#define ACK_BUSY_A 0x5
38#define ACK_BUSY_B 0x6
39#define ACK_TARDY 0xb
40#define ACK_CONFLICT_ERROR 0xc
41#define ACK_DATA_ERROR 0xd
42#define ACK_TYPE_ERROR 0xe
43#define ACK_ADDRESS_ERROR 0xf
44
45/* Non-standard "ACK codes" for internal use */
46#define ACKX_NONE (-1)
47#define ACKX_SEND_ERROR (-2)
48#define ACKX_ABORTED (-3)
49#define ACKX_TIMEOUT (-4)
50
51
52#define IEEE1394_SPEED_100 0x00
53#define IEEE1394_SPEED_200 0x01
54#define IEEE1394_SPEED_400 0x02
55#define IEEE1394_SPEED_800 0x03
56#define IEEE1394_SPEED_1600 0x04
57#define IEEE1394_SPEED_3200 0x05
58/* The current highest tested speed supported by the subsystem */
59#define IEEE1394_SPEED_MAX IEEE1394_SPEED_800
60
61/* Maps speed values above to a string representation */
62extern const char *hpsb_speedto_str[];
63
64
65#define SELFID_PWRCL_NO_POWER 0x0
66#define SELFID_PWRCL_PROVIDE_15W 0x1
67#define SELFID_PWRCL_PROVIDE_30W 0x2
68#define SELFID_PWRCL_PROVIDE_45W 0x3
69#define SELFID_PWRCL_USE_1W 0x4
70#define SELFID_PWRCL_USE_3W 0x5
71#define SELFID_PWRCL_USE_6W 0x6
72#define SELFID_PWRCL_USE_10W 0x7
73
74#define SELFID_PORT_CHILD 0x3
75#define SELFID_PORT_PARENT 0x2
76#define SELFID_PORT_NCONN 0x1
77#define SELFID_PORT_NONE 0x0
78
79
80/* 1394a PHY bitmasks */
81#define PHY_00_PHYSICAL_ID 0xFC
82#define PHY_00_R 0x02 /* Root */
83#define PHY_00_PS 0x01 /* Power Status*/
84#define PHY_01_RHB 0x80 /* Root Hold-Off */
85#define PHY_01_IBR 0x80 /* Initiate Bus Reset */
86#define PHY_01_GAP_COUNT 0x3F
87#define PHY_02_EXTENDED 0xE0 /* 0x7 for 1394a-compliant PHY */
88#define PHY_02_TOTAL_PORTS 0x1F
89#define PHY_03_MAX_SPEED 0xE0
90#define PHY_03_DELAY 0x0F
91#define PHY_04_LCTRL 0x80 /* Link Active Report Control */
92#define PHY_04_CONTENDER 0x40
93#define PHY_04_JITTER 0x38
94#define PHY_04_PWR_CLASS 0x07 /* Power Class */
95#define PHY_05_WATCHDOG 0x80
96#define PHY_05_ISBR 0x40 /* Initiate Short Bus Reset */
97#define PHY_05_LOOP 0x20 /* Loop Detect */
98#define PHY_05_PWR_FAIL 0x10 /* Cable Power Failure Detect */
99#define PHY_05_TIMEOUT 0x08 /* Arbitration State Machine Timeout */
100#define PHY_05_PORT_EVENT 0x04 /* Port Event Detect */
101#define PHY_05_ENAB_ACCEL 0x02 /* Enable Arbitration Acceleration */
102#define PHY_05_ENAB_MULTI 0x01 /* Ena. Multispeed Packet Concatenation */
103
104#include <asm/byteorder.h>
105
106#ifdef __BIG_ENDIAN_BITFIELD
107
108struct selfid {
109 u32 packet_identifier:2; /* always binary 10 */
110 u32 phy_id:6;
111 /* byte */
112 u32 extended:1; /* if true is struct ext_selfid */
113 u32 link_active:1;
114 u32 gap_count:6;
115 /* byte */
116 u32 speed:2;
117 u32 phy_delay:2;
118 u32 contender:1;
119 u32 power_class:3;
120 /* byte */
121 u32 port0:2;
122 u32 port1:2;
123 u32 port2:2;
124 u32 initiated_reset:1;
125 u32 more_packets:1;
126} __attribute__((packed));
127
128struct ext_selfid {
129 u32 packet_identifier:2; /* always binary 10 */
130 u32 phy_id:6;
131 /* byte */
132 u32 extended:1; /* if false is struct selfid */
133 u32 seq_nr:3;
134 u32 reserved:2;
135 u32 porta:2;
136 /* byte */
137 u32 portb:2;
138 u32 portc:2;
139 u32 portd:2;
140 u32 porte:2;
141 /* byte */
142 u32 portf:2;
143 u32 portg:2;
144 u32 porth:2;
145 u32 reserved2:1;
146 u32 more_packets:1;
147} __attribute__((packed));
148
149#elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */
150
151/*
152 * Note: these mean to be bit fields of a big endian SelfID as seen on a little
153 * endian machine. Without swapping.
154 */
155
156struct selfid {
157 u32 phy_id:6;
158 u32 packet_identifier:2; /* always binary 10 */
159 /* byte */
160 u32 gap_count:6;
161 u32 link_active:1;
162 u32 extended:1; /* if true is struct ext_selfid */
163 /* byte */
164 u32 power_class:3;
165 u32 contender:1;
166 u32 phy_delay:2;
167 u32 speed:2;
168 /* byte */
169 u32 more_packets:1;
170 u32 initiated_reset:1;
171 u32 port2:2;
172 u32 port1:2;
173 u32 port0:2;
174} __attribute__((packed));
175
176struct ext_selfid {
177 u32 phy_id:6;
178 u32 packet_identifier:2; /* always binary 10 */
179 /* byte */
180 u32 porta:2;
181 u32 reserved:2;
182 u32 seq_nr:3;
183 u32 extended:1; /* if false is struct selfid */
184 /* byte */
185 u32 porte:2;
186 u32 portd:2;
187 u32 portc:2;
188 u32 portb:2;
189 /* byte */
190 u32 more_packets:1;
191 u32 reserved2:1;
192 u32 porth:2;
193 u32 portg:2;
194 u32 portf:2;
195} __attribute__((packed));
196
197#else
198#error What? PDP endian?
199#endif /* __BIG_ENDIAN_BITFIELD */
200
201
202#endif /* _IEEE1394_IEEE1394_H */
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
new file mode 100644
index 000000000000..1c5845f7e4ab
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -0,0 +1,1330 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Core support: hpsb_packet management, packet handling and forwarding to
5 * highlevel or lowlevel code
6 *
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
9 *
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
12 *
13 *
14 * Contributions:
15 *
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * loopback functionality in hpsb_send_packet
18 * allow highlevel drivers to disable automatic response generation
19 * and to generate responses themselves (deferred)
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/string.h>
27#include <linux/init.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/bitops.h>
33#include <linux/kdev_t.h>
34#include <linux/skbuff.h>
35#include <linux/suspend.h>
36
37#include <asm/byteorder.h>
38#include <asm/semaphore.h>
39
40#include "ieee1394_types.h"
41#include "ieee1394.h"
42#include "hosts.h"
43#include "ieee1394_core.h"
44#include "highlevel.h"
45#include "ieee1394_transactions.h"
46#include "csr.h"
47#include "nodemgr.h"
48#include "dma.h"
49#include "iso.h"
50#include "config_roms.h"
51
52/*
53 * Disable the nodemgr detection and config rom reading functionality.
54 */
55static int disable_nodemgr = 0;
56module_param(disable_nodemgr, int, 0444);
57MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
58
59/* Disable Isochronous Resource Manager functionality */
60int hpsb_disable_irm = 0;
61module_param_named(disable_irm, hpsb_disable_irm, bool, 0);
62MODULE_PARM_DESC(disable_irm,
63 "Disable Isochronous Resource Manager functionality.");
64
65/* We are GPL, so treat us special */
66MODULE_LICENSE("GPL");
67
68/* Some globals used */
69const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
70struct class_simple *hpsb_protocol_class;
71
72#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
73static void dump_packet(const char *text, quadlet_t *data, int size)
74{
75 int i;
76
77 size /= 4;
78 size = (size > 4 ? 4 : size);
79
80 printk(KERN_DEBUG "ieee1394: %s", text);
81 for (i = 0; i < size; i++)
82 printk(" %08x", data[i]);
83 printk("\n");
84}
85#else
86#define dump_packet(x,y,z)
87#endif
88
89static void abort_requests(struct hpsb_host *host);
90static void queue_packet_complete(struct hpsb_packet *packet);
91
92
93/**
94 * hpsb_set_packet_complete_task - set the task that runs when a packet
95 * completes. You cannot call this more than once on a single packet
96 * before it is sent.
97 *
98 * @packet: the packet whose completion we want the task added to
99 * @routine: function to call
100 * @data: data (if any) to pass to the above function
101 */
102void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
103 void (*routine)(void *), void *data)
104{
105 WARN_ON(packet->complete_routine != NULL);
106 packet->complete_routine = routine;
107 packet->complete_data = data;
108 return;
109}
110
111/**
112 * hpsb_alloc_packet - allocate new packet structure
113 * @data_size: size of the data block to be allocated
114 *
115 * This function allocates, initializes and returns a new &struct hpsb_packet.
116 * It can be used in interrupt context. A header block is always included, its
117 * size is big enough to contain all possible 1394 headers. The data block is
118 * only allocated when @data_size is not zero.
119 *
120 * For packets for which responses will be received the @data_size has to be big
121 * enough to contain the response's data block since no further allocation
122 * occurs at response matching time.
123 *
124 * The packet's generation value will be set to the current generation number
125 * for ease of use. Remember to overwrite it with your own recorded generation
126 * number if you can not be sure that your code will not race with a bus reset.
127 *
128 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
129 * failure.
130 */
131struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
132{
133 struct hpsb_packet *packet = NULL;
134 struct sk_buff *skb;
135
136 data_size = ((data_size + 3) & ~3);
137
138 skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC);
139 if (skb == NULL)
140 return NULL;
141
142 memset(skb->data, 0, data_size + sizeof(*packet));
143
144 packet = (struct hpsb_packet *)skb->data;
145 packet->skb = skb;
146
147 packet->header = packet->embedded_header;
148 packet->state = hpsb_unused;
149 packet->generation = -1;
150 INIT_LIST_HEAD(&packet->driver_list);
151 atomic_set(&packet->refcnt, 1);
152
153 if (data_size) {
154 packet->data = (quadlet_t *)(skb->data + sizeof(*packet));
155 packet->data_size = data_size;
156 }
157
158 return packet;
159}
160
161
162/**
163 * hpsb_free_packet - free packet and data associated with it
164 * @packet: packet to free (is NULL safe)
165 *
166 * This function will free packet->data and finally the packet itself.
167 */
168void hpsb_free_packet(struct hpsb_packet *packet)
169{
170 if (packet && atomic_dec_and_test(&packet->refcnt)) {
171 BUG_ON(!list_empty(&packet->driver_list));
172 kfree_skb(packet->skb);
173 }
174}
175
176
177int hpsb_reset_bus(struct hpsb_host *host, int type)
178{
179 if (!host->in_bus_reset) {
180 host->driver->devctl(host, RESET_BUS, type);
181 return 0;
182 } else {
183 return 1;
184 }
185}
186
187
188int hpsb_bus_reset(struct hpsb_host *host)
189{
190 if (host->in_bus_reset) {
191 HPSB_NOTICE("%s called while bus reset already in progress",
192 __FUNCTION__);
193 return 1;
194 }
195
196 abort_requests(host);
197 host->in_bus_reset = 1;
198 host->irm_id = -1;
199 host->is_irm = 0;
200 host->busmgr_id = -1;
201 host->is_busmgr = 0;
202 host->is_cycmst = 0;
203 host->node_count = 0;
204 host->selfid_count = 0;
205
206 return 0;
207}
208
209
210/*
211 * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
212 * case verification failed.
213 */
214static int check_selfids(struct hpsb_host *host)
215{
216 int nodeid = -1;
217 int rest_of_selfids = host->selfid_count;
218 struct selfid *sid = (struct selfid *)host->topology_map;
219 struct ext_selfid *esid;
220 int esid_seq = 23;
221
222 host->nodes_active = 0;
223
224 while (rest_of_selfids--) {
225 if (!sid->extended) {
226 nodeid++;
227 esid_seq = 0;
228
229 if (sid->phy_id != nodeid) {
230 HPSB_INFO("SelfIDs failed monotony check with "
231 "%d", sid->phy_id);
232 return 0;
233 }
234
235 if (sid->link_active) {
236 host->nodes_active++;
237 if (sid->contender)
238 host->irm_id = LOCAL_BUS | sid->phy_id;
239 }
240 } else {
241 esid = (struct ext_selfid *)sid;
242
243 if ((esid->phy_id != nodeid)
244 || (esid->seq_nr != esid_seq)) {
245 HPSB_INFO("SelfIDs failed monotony check with "
246 "%d/%d", esid->phy_id, esid->seq_nr);
247 return 0;
248 }
249 esid_seq++;
250 }
251 sid++;
252 }
253
254 esid = (struct ext_selfid *)(sid - 1);
255 while (esid->extended) {
256 if ((esid->porta == 0x2) || (esid->portb == 0x2)
257 || (esid->portc == 0x2) || (esid->portd == 0x2)
258 || (esid->porte == 0x2) || (esid->portf == 0x2)
259 || (esid->portg == 0x2) || (esid->porth == 0x2)) {
260 HPSB_INFO("SelfIDs failed root check on "
261 "extended SelfID");
262 return 0;
263 }
264 esid--;
265 }
266
267 sid = (struct selfid *)esid;
268 if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) {
269 HPSB_INFO("SelfIDs failed root check");
270 return 0;
271 }
272
273 host->node_count = nodeid + 1;
274 return 1;
275}
276
277static void build_speed_map(struct hpsb_host *host, int nodecount)
278{
279 u8 speedcap[nodecount];
280 u8 cldcnt[nodecount];
281 u8 *map = host->speed_map;
282 struct selfid *sid;
283 struct ext_selfid *esid;
284 int i, j, n;
285
286 for (i = 0; i < (nodecount * 64); i += 64) {
287 for (j = 0; j < nodecount; j++) {
288 map[i+j] = IEEE1394_SPEED_MAX;
289 }
290 }
291
292 for (i = 0; i < nodecount; i++) {
293 cldcnt[i] = 0;
294 }
295
296 /* find direct children count and speed */
297 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
298 n = nodecount - 1;
299 (void *)sid >= (void *)host->topology_map; sid--) {
300 if (sid->extended) {
301 esid = (struct ext_selfid *)sid;
302
303 if (esid->porta == 0x3) cldcnt[n]++;
304 if (esid->portb == 0x3) cldcnt[n]++;
305 if (esid->portc == 0x3) cldcnt[n]++;
306 if (esid->portd == 0x3) cldcnt[n]++;
307 if (esid->porte == 0x3) cldcnt[n]++;
308 if (esid->portf == 0x3) cldcnt[n]++;
309 if (esid->portg == 0x3) cldcnt[n]++;
310 if (esid->porth == 0x3) cldcnt[n]++;
311 } else {
312 if (sid->port0 == 0x3) cldcnt[n]++;
313 if (sid->port1 == 0x3) cldcnt[n]++;
314 if (sid->port2 == 0x3) cldcnt[n]++;
315
316 speedcap[n] = sid->speed;
317 n--;
318 }
319 }
320
321 /* set self mapping */
322 for (i = 0; i < nodecount; i++) {
323 map[64*i + i] = speedcap[i];
324 }
325
326 /* fix up direct children count to total children count;
327 * also fix up speedcaps for sibling and parent communication */
328 for (i = 1; i < nodecount; i++) {
329 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
330 cldcnt[i] += cldcnt[n];
331 speedcap[n] = min(speedcap[n], speedcap[i]);
332 n -= cldcnt[n] + 1;
333 }
334 }
335
336 for (n = 0; n < nodecount; n++) {
337 for (i = n - cldcnt[n]; i <= n; i++) {
338 for (j = 0; j < (n - cldcnt[n]); j++) {
339 map[j*64 + i] = map[i*64 + j] =
340 min(map[i*64 + j], speedcap[n]);
341 }
342 for (j = n + 1; j < nodecount; j++) {
343 map[j*64 + i] = map[i*64 + j] =
344 min(map[i*64 + j], speedcap[n]);
345 }
346 }
347 }
348}
349
350
351void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
352{
353 if (host->in_bus_reset) {
354 HPSB_VERBOSE("Including SelfID 0x%x", sid);
355 host->topology_map[host->selfid_count++] = sid;
356 } else {
357 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
358 sid, NODEID_TO_BUS(host->node_id));
359 }
360}
361
362void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
363{
364 if (!host->in_bus_reset)
365 HPSB_NOTICE("SelfID completion called outside of bus reset!");
366
367 host->node_id = LOCAL_BUS | phyid;
368 host->is_root = isroot;
369
370 if (!check_selfids(host)) {
371 if (host->reset_retries++ < 20) {
372 /* selfid stage did not complete without error */
373 HPSB_NOTICE("Error in SelfID stage, resetting");
374 host->in_bus_reset = 0;
375 /* this should work from ohci1394 now... */
376 hpsb_reset_bus(host, LONG_RESET);
377 return;
378 } else {
379 HPSB_NOTICE("Stopping out-of-control reset loop");
380 HPSB_NOTICE("Warning - topology map and speed map will not be valid");
381 host->reset_retries = 0;
382 }
383 } else {
384 host->reset_retries = 0;
385 build_speed_map(host, host->node_count);
386 }
387
388 HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
389 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
390
391 /* irm_id is kept up to date by check_selfids() */
392 if (host->irm_id == host->node_id) {
393 host->is_irm = 1;
394 } else {
395 host->is_busmgr = 0;
396 host->is_irm = 0;
397 }
398
399 if (isroot) {
400 host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
401 host->is_cycmst = 1;
402 }
403 atomic_inc(&host->generation);
404 host->in_bus_reset = 0;
405 highlevel_host_reset(host);
406}
407
408
409void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
410 int ackcode)
411{
412 unsigned long flags;
413
414 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
415
416 packet->ack_code = ackcode;
417
418 if (packet->no_waiter || packet->state == hpsb_complete) {
419 /* if packet->no_waiter, must not have a tlabel allocated */
420 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
421 hpsb_free_packet(packet);
422 return;
423 }
424
425 atomic_dec(&packet->refcnt); /* drop HC's reference */
426 /* here the packet must be on the host->pending_packet_queue */
427
428 if (ackcode != ACK_PENDING || !packet->expect_response) {
429 packet->state = hpsb_complete;
430 __skb_unlink(packet->skb, &host->pending_packet_queue);
431 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
432 queue_packet_complete(packet);
433 return;
434 }
435
436 packet->state = hpsb_pending;
437 packet->sendtime = jiffies;
438
439 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
440
441 mod_timer(&host->timeout, jiffies + host->timeout_interval);
442}
443
444/**
445 * hpsb_send_phy_config - transmit a PHY configuration packet on the bus
446 * @host: host that PHY config packet gets sent through
447 * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
448 * @gapcnt: gap count value to set (-1 = don't set gap count)
449 *
450 * This function sends a PHY config packet on the bus through the specified host.
451 *
452 * Return value: 0 for success or error number otherwise.
453 */
454int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
455{
456 struct hpsb_packet *packet;
457 int retval = 0;
458
459 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
460 (rootid == -1 && gapcnt == -1)) {
461 HPSB_DEBUG("Invalid Parameter: rootid = %d gapcnt = %d",
462 rootid, gapcnt);
463 return -EINVAL;
464 }
465
466 packet = hpsb_alloc_packet(0);
467 if (!packet)
468 return -ENOMEM;
469
470 packet->host = host;
471 packet->header_size = 8;
472 packet->data_size = 0;
473 packet->expect_response = 0;
474 packet->no_waiter = 0;
475 packet->type = hpsb_raw;
476 packet->header[0] = 0;
477 if (rootid != -1)
478 packet->header[0] |= rootid << 24 | 1 << 23;
479 if (gapcnt != -1)
480 packet->header[0] |= gapcnt << 16 | 1 << 22;
481
482 packet->header[1] = ~packet->header[0];
483
484 packet->generation = get_hpsb_generation(host);
485
486 retval = hpsb_send_packet_and_wait(packet);
487 hpsb_free_packet(packet);
488
489 return retval;
490}
491
492/**
493 * hpsb_send_packet - transmit a packet on the bus
494 * @packet: packet to send
495 *
496 * The packet is sent through the host specified in the packet->host field.
497 * Before sending, the packet's transmit speed is automatically determined
498 * using the local speed map when it is an async, non-broadcast packet.
499 *
500 * Possibilities for failure are that host is either not initialized, in bus
501 * reset, the packet's generation number doesn't match the current generation
502 * number or the host reports a transmit error.
503 *
504 * Return value: 0 on success, negative errno on failure.
505 */
506int hpsb_send_packet(struct hpsb_packet *packet)
507{
508 struct hpsb_host *host = packet->host;
509
510 if (host->is_shutdown)
511 return -EINVAL;
512 if (host->in_bus_reset ||
513 (packet->generation != get_hpsb_generation(host)))
514 return -EAGAIN;
515
516 packet->state = hpsb_queued;
517
518 /* This just seems silly to me */
519 WARN_ON(packet->no_waiter && packet->expect_response);
520
521 if (!packet->no_waiter || packet->expect_response) {
522 atomic_inc(&packet->refcnt);
523 packet->sendtime = jiffies;
524 skb_queue_tail(&host->pending_packet_queue, packet->skb);
525 }
526
527 if (packet->node_id == host->node_id) {
528 /* it is a local request, so handle it locally */
529
530 quadlet_t *data;
531 size_t size = packet->data_size + packet->header_size;
532
533 data = kmalloc(size, GFP_ATOMIC);
534 if (!data) {
535 HPSB_ERR("unable to allocate memory for concatenating header and data");
536 return -ENOMEM;
537 }
538
539 memcpy(data, packet->header, packet->header_size);
540
541 if (packet->data_size)
542 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
543
544 dump_packet("send packet local:", packet->header,
545 packet->header_size);
546
547 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
548 hpsb_packet_received(host, data, size, 0);
549
550 kfree(data);
551
552 return 0;
553 }
554
555 if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
556 packet->speed_code =
557 host->speed_map[NODEID_TO_NODE(host->node_id) * 64
558 + NODEID_TO_NODE(packet->node_id)];
559 }
560
561#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
562 switch (packet->speed_code) {
563 case 2:
564 dump_packet("send packet 400:", packet->header,
565 packet->header_size);
566 break;
567 case 1:
568 dump_packet("send packet 200:", packet->header,
569 packet->header_size);
570 break;
571 default:
572 dump_packet("send packet 100:", packet->header,
573 packet->header_size);
574 }
575#endif
576
577 return host->driver->transmit_packet(host, packet);
578}
579
580/* We could just use complete() directly as the packet complete
581 * callback, but this is more typesafe, in the sense that we get a
582 * compiler error if the prototype for complete() changes. */
583
584static void complete_packet(void *data)
585{
586 complete((struct completion *) data);
587}
588
589int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
590{
591 struct completion done;
592 int retval;
593
594 init_completion(&done);
595 hpsb_set_packet_complete_task(packet, complete_packet, &done);
596 retval = hpsb_send_packet(packet);
597 if (retval == 0)
598 wait_for_completion(&done);
599
600 return retval;
601}
602
603static void send_packet_nocare(struct hpsb_packet *packet)
604{
605 if (hpsb_send_packet(packet) < 0) {
606 hpsb_free_packet(packet);
607 }
608}
609
610
611static void handle_packet_response(struct hpsb_host *host, int tcode,
612 quadlet_t *data, size_t size)
613{
614 struct hpsb_packet *packet = NULL;
615 struct sk_buff *skb;
616 int tcode_match = 0;
617 int tlabel;
618 unsigned long flags;
619
620 tlabel = (data[0] >> 10) & 0x3f;
621
622 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
623
624 skb_queue_walk(&host->pending_packet_queue, skb) {
625 packet = (struct hpsb_packet *)skb->data;
626 if ((packet->tlabel == tlabel)
627 && (packet->node_id == (data[1] >> 16))){
628 break;
629 }
630
631 packet = NULL;
632 }
633
634 if (packet == NULL) {
635 HPSB_DEBUG("unsolicited response packet received - no tlabel match");
636 dump_packet("contents:", data, 16);
637 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
638 return;
639 }
640
641 switch (packet->tcode) {
642 case TCODE_WRITEQ:
643 case TCODE_WRITEB:
644 if (tcode != TCODE_WRITE_RESPONSE)
645 break;
646 tcode_match = 1;
647 memcpy(packet->header, data, 12);
648 break;
649 case TCODE_READQ:
650 if (tcode != TCODE_READQ_RESPONSE)
651 break;
652 tcode_match = 1;
653 memcpy(packet->header, data, 16);
654 break;
655 case TCODE_READB:
656 if (tcode != TCODE_READB_RESPONSE)
657 break;
658 tcode_match = 1;
659 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
660 memcpy(packet->header, data, 16);
661 memcpy(packet->data, data + 4, size - 16);
662 break;
663 case TCODE_LOCK_REQUEST:
664 if (tcode != TCODE_LOCK_RESPONSE)
665 break;
666 tcode_match = 1;
667 size = min((size - 16), (size_t)8);
668 BUG_ON(packet->skb->len - sizeof(*packet) < size);
669 memcpy(packet->header, data, 16);
670 memcpy(packet->data, data + 4, size);
671 break;
672 }
673
674 if (!tcode_match) {
675 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
676 HPSB_INFO("unsolicited response packet received - tcode mismatch");
677 dump_packet("contents:", data, 16);
678 return;
679 }
680
681 __skb_unlink(skb, skb->list);
682
683 if (packet->state == hpsb_queued) {
684 packet->sendtime = jiffies;
685 packet->ack_code = ACK_PENDING;
686 }
687
688 packet->state = hpsb_complete;
689 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
690
691 queue_packet_complete(packet);
692}
693
694
695static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
696 quadlet_t *data, size_t dsize)
697{
698 struct hpsb_packet *p;
699
700 p = hpsb_alloc_packet(dsize);
701 if (unlikely(p == NULL)) {
702 /* FIXME - send data_error response */
703 return NULL;
704 }
705
706 p->type = hpsb_async;
707 p->state = hpsb_unused;
708 p->host = host;
709 p->node_id = data[1] >> 16;
710 p->tlabel = (data[0] >> 10) & 0x3f;
711 p->no_waiter = 1;
712
713 p->generation = get_hpsb_generation(host);
714
715 if (dsize % 4)
716 p->data[dsize / 4] = 0;
717
718 return p;
719}
720
721#define PREP_ASYNC_HEAD_RCODE(tc) \
722 packet->tcode = tc; \
723 packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
724 | (1 << 8) | (tc << 4); \
725 packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
726 packet->header[2] = 0
727
728static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
729 quadlet_t data)
730{
731 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
732 packet->header[3] = data;
733 packet->header_size = 16;
734 packet->data_size = 0;
735}
736
737static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
738 int length)
739{
740 if (rcode != RCODE_COMPLETE)
741 length = 0;
742
743 PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
744 packet->header[3] = length << 16;
745 packet->header_size = 16;
746 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
747}
748
749static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
750{
751 PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
752 packet->header[2] = 0;
753 packet->header_size = 12;
754 packet->data_size = 0;
755}
756
757static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
758 int length)
759{
760 if (rcode != RCODE_COMPLETE)
761 length = 0;
762
763 PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
764 packet->header[3] = (length << 16) | extcode;
765 packet->header_size = 16;
766 packet->data_size = length;
767}
768
769#define PREP_REPLY_PACKET(length) \
770 packet = create_reply_packet(host, data, length); \
771 if (packet == NULL) break
772
773static void handle_incoming_packet(struct hpsb_host *host, int tcode,
774 quadlet_t *data, size_t size, int write_acked)
775{
776 struct hpsb_packet *packet;
777 int length, rcode, extcode;
778 quadlet_t buffer;
779 nodeid_t source = data[1] >> 16;
780 nodeid_t dest = data[0] >> 16;
781 u16 flags = (u16) data[0];
782 u64 addr;
783
784 /* big FIXME - no error checking is done for an out of bounds length */
785
786 switch (tcode) {
787 case TCODE_WRITEQ:
788 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
789 rcode = highlevel_write(host, source, dest, data+3,
790 addr, 4, flags);
791
792 if (!write_acked
793 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
794 && (rcode >= 0)) {
795 /* not a broadcast write, reply */
796 PREP_REPLY_PACKET(0);
797 fill_async_write_resp(packet, rcode);
798 send_packet_nocare(packet);
799 }
800 break;
801
802 case TCODE_WRITEB:
803 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
804 rcode = highlevel_write(host, source, dest, data+4,
805 addr, data[3]>>16, flags);
806
807 if (!write_acked
808 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
809 && (rcode >= 0)) {
810 /* not a broadcast write, reply */
811 PREP_REPLY_PACKET(0);
812 fill_async_write_resp(packet, rcode);
813 send_packet_nocare(packet);
814 }
815 break;
816
817 case TCODE_READQ:
818 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
819 rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
820
821 if (rcode >= 0) {
822 PREP_REPLY_PACKET(0);
823 fill_async_readquad_resp(packet, rcode, buffer);
824 send_packet_nocare(packet);
825 }
826 break;
827
828 case TCODE_READB:
829 length = data[3] >> 16;
830 PREP_REPLY_PACKET(length);
831
832 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
833 rcode = highlevel_read(host, source, packet->data, addr,
834 length, flags);
835
836 if (rcode >= 0) {
837 fill_async_readblock_resp(packet, rcode, length);
838 send_packet_nocare(packet);
839 } else {
840 hpsb_free_packet(packet);
841 }
842 break;
843
844 case TCODE_LOCK_REQUEST:
845 length = data[3] >> 16;
846 extcode = data[3] & 0xffff;
847 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
848
849 PREP_REPLY_PACKET(8);
850
851 if ((extcode == 0) || (extcode >= 7)) {
852 /* let switch default handle error */
853 length = 0;
854 }
855
856 switch (length) {
857 case 4:
858 rcode = highlevel_lock(host, source, packet->data, addr,
859 data[4], 0, extcode,flags);
860 fill_async_lock_resp(packet, rcode, extcode, 4);
861 break;
862 case 8:
863 if ((extcode != EXTCODE_FETCH_ADD)
864 && (extcode != EXTCODE_LITTLE_ADD)) {
865 rcode = highlevel_lock(host, source,
866 packet->data, addr,
867 data[5], data[4],
868 extcode, flags);
869 fill_async_lock_resp(packet, rcode, extcode, 4);
870 } else {
871 rcode = highlevel_lock64(host, source,
872 (octlet_t *)packet->data, addr,
873 *(octlet_t *)(data + 4), 0ULL,
874 extcode, flags);
875 fill_async_lock_resp(packet, rcode, extcode, 8);
876 }
877 break;
878 case 16:
879 rcode = highlevel_lock64(host, source,
880 (octlet_t *)packet->data, addr,
881 *(octlet_t *)(data + 6),
882 *(octlet_t *)(data + 4),
883 extcode, flags);
884 fill_async_lock_resp(packet, rcode, extcode, 8);
885 break;
886 default:
887 rcode = RCODE_TYPE_ERROR;
888 fill_async_lock_resp(packet, rcode,
889 extcode, 0);
890 }
891
892 if (rcode >= 0) {
893 send_packet_nocare(packet);
894 } else {
895 hpsb_free_packet(packet);
896 }
897 break;
898 }
899
900}
901#undef PREP_REPLY_PACKET
902
903
904void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
905 int write_acked)
906{
907 int tcode;
908
909 if (host->in_bus_reset) {
910 HPSB_INFO("received packet during reset; ignoring");
911 return;
912 }
913
914 dump_packet("received packet:", data, size);
915
916 tcode = (data[0] >> 4) & 0xf;
917
918 switch (tcode) {
919 case TCODE_WRITE_RESPONSE:
920 case TCODE_READQ_RESPONSE:
921 case TCODE_READB_RESPONSE:
922 case TCODE_LOCK_RESPONSE:
923 handle_packet_response(host, tcode, data, size);
924 break;
925
926 case TCODE_WRITEQ:
927 case TCODE_WRITEB:
928 case TCODE_READQ:
929 case TCODE_READB:
930 case TCODE_LOCK_REQUEST:
931 handle_incoming_packet(host, tcode, data, size, write_acked);
932 break;
933
934
935 case TCODE_ISO_DATA:
936 highlevel_iso_receive(host, data, size);
937 break;
938
939 case TCODE_CYCLE_START:
940 /* simply ignore this packet if it is passed on */
941 break;
942
943 default:
944 HPSB_NOTICE("received packet with bogus transaction code %d",
945 tcode);
946 break;
947 }
948}
949
950
951static void abort_requests(struct hpsb_host *host)
952{
953 struct hpsb_packet *packet;
954 struct sk_buff *skb;
955
956 host->driver->devctl(host, CANCEL_REQUESTS, 0);
957
958 while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) {
959 packet = (struct hpsb_packet *)skb->data;
960
961 packet->state = hpsb_complete;
962 packet->ack_code = ACKX_ABORTED;
963 queue_packet_complete(packet);
964 }
965}
966
967void abort_timedouts(unsigned long __opaque)
968{
969 struct hpsb_host *host = (struct hpsb_host *)__opaque;
970 unsigned long flags;
971 struct hpsb_packet *packet;
972 struct sk_buff *skb;
973 unsigned long expire;
974
975 spin_lock_irqsave(&host->csr.lock, flags);
976 expire = host->csr.expire;
977 spin_unlock_irqrestore(&host->csr.lock, flags);
978
979 /* Hold the lock around this, since we aren't dequeuing all
980 * packets, just ones we need. */
981 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
982
983 while (!skb_queue_empty(&host->pending_packet_queue)) {
984 skb = skb_peek(&host->pending_packet_queue);
985
986 packet = (struct hpsb_packet *)skb->data;
987
988 if (time_before(packet->sendtime + expire, jiffies)) {
989 __skb_unlink(skb, skb->list);
990 packet->state = hpsb_complete;
991 packet->ack_code = ACKX_TIMEOUT;
992 queue_packet_complete(packet);
993 } else {
994 /* Since packets are added to the tail, the oldest
995 * ones are first, always. When we get to one that
996 * isn't timed out, the rest aren't either. */
997 break;
998 }
999 }
1000
1001 if (!skb_queue_empty(&host->pending_packet_queue))
1002 mod_timer(&host->timeout, jiffies + host->timeout_interval);
1003
1004 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
1005}
1006
1007
1008/* Kernel thread and vars, which handles packets that are completed. Only
1009 * packets that have a "complete" function are sent here. This way, the
1010 * completion is run out of kernel context, and doesn't block the rest of
1011 * the stack. */
1012static int khpsbpkt_pid = -1, khpsbpkt_kill;
1013static DECLARE_COMPLETION(khpsbpkt_complete);
1014static struct sk_buff_head hpsbpkt_queue;
1015static DECLARE_MUTEX_LOCKED(khpsbpkt_sig);
1016
1017
1018static void queue_packet_complete(struct hpsb_packet *packet)
1019{
1020 if (packet->no_waiter) {
1021 hpsb_free_packet(packet);
1022 return;
1023 }
1024 if (packet->complete_routine != NULL) {
1025 skb_queue_tail(&hpsbpkt_queue, packet->skb);
1026
1027 /* Signal the kernel thread to handle this */
1028 up(&khpsbpkt_sig);
1029 }
1030 return;
1031}
1032
1033static int hpsbpkt_thread(void *__hi)
1034{
1035 struct sk_buff *skb;
1036 struct hpsb_packet *packet;
1037 void (*complete_routine)(void*);
1038 void *complete_data;
1039
1040 daemonize("khpsbpkt");
1041
1042 while (1) {
1043 if (down_interruptible(&khpsbpkt_sig)) {
1044 if (current->flags & PF_FREEZE) {
1045 refrigerator(0);
1046 continue;
1047 }
1048 printk("khpsbpkt: received unexpected signal?!\n" );
1049 break;
1050 }
1051
1052 if (khpsbpkt_kill)
1053 break;
1054
1055 while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
1056 packet = (struct hpsb_packet *)skb->data;
1057
1058 complete_routine = packet->complete_routine;
1059 complete_data = packet->complete_data;
1060
1061 packet->complete_routine = packet->complete_data = NULL;
1062
1063 complete_routine(complete_data);
1064 }
1065 }
1066
1067 complete_and_exit(&khpsbpkt_complete, 0);
1068}
1069
1070static int __init ieee1394_init(void)
1071{
1072 int i, ret;
1073
1074 skb_queue_head_init(&hpsbpkt_queue);
1075
1076 /* non-fatal error */
1077 if (hpsb_init_config_roms()) {
1078 HPSB_ERR("Failed to initialize some config rom entries.\n");
1079 HPSB_ERR("Some features may not be available\n");
1080 }
1081
1082 khpsbpkt_pid = kernel_thread(hpsbpkt_thread, NULL, CLONE_KERNEL);
1083 if (khpsbpkt_pid < 0) {
1084 HPSB_ERR("Failed to start hpsbpkt thread!\n");
1085 ret = -ENOMEM;
1086 goto exit_cleanup_config_roms;
1087 }
1088
1089 if (register_chrdev_region(IEEE1394_CORE_DEV, 256, "ieee1394")) {
1090 HPSB_ERR("unable to register character device major %d!\n", IEEE1394_MAJOR);
1091 ret = -ENODEV;
1092 goto exit_release_kernel_thread;
1093 }
1094
1095 /* actually this is a non-fatal error */
1096 ret = devfs_mk_dir("ieee1394");
1097 if (ret < 0) {
1098 HPSB_ERR("unable to make devfs dir for device major %d!\n", IEEE1394_MAJOR);
1099 goto release_chrdev;
1100 }
1101
1102 ret = bus_register(&ieee1394_bus_type);
1103 if (ret < 0) {
1104 HPSB_INFO("bus register failed");
1105 goto release_devfs;
1106 }
1107
1108 for (i = 0; fw_bus_attrs[i]; i++) {
1109 ret = bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1110 if (ret < 0) {
1111 while (i >= 0) {
1112 bus_remove_file(&ieee1394_bus_type,
1113 fw_bus_attrs[i--]);
1114 }
1115 bus_unregister(&ieee1394_bus_type);
1116 goto release_devfs;
1117 }
1118 }
1119
1120 ret = class_register(&hpsb_host_class);
1121 if (ret < 0)
1122 goto release_all_bus;
1123
1124 hpsb_protocol_class = class_simple_create(THIS_MODULE, "ieee1394_protocol");
1125 if (IS_ERR(hpsb_protocol_class)) {
1126 ret = PTR_ERR(hpsb_protocol_class);
1127 goto release_class_host;
1128 }
1129
1130 ret = init_csr();
1131 if (ret) {
1132 HPSB_INFO("init csr failed");
1133 ret = -ENOMEM;
1134 goto release_class_protocol;
1135 }
1136
1137 if (disable_nodemgr) {
1138 HPSB_INFO("nodemgr and IRM functionality disabled");
1139 /* We shouldn't contend for IRM with nodemgr disabled, since
1140 nodemgr implements functionality required of ieee1394a-2000
1141 IRMs */
1142 hpsb_disable_irm = 1;
1143
1144 return 0;
1145 }
1146
1147 if (hpsb_disable_irm) {
1148 HPSB_INFO("IRM functionality disabled");
1149 }
1150
1151 ret = init_ieee1394_nodemgr();
1152 if (ret < 0) {
1153 HPSB_INFO("init nodemgr failed");
1154 goto cleanup_csr;
1155 }
1156
1157 return 0;
1158
1159cleanup_csr:
1160 cleanup_csr();
1161release_class_protocol:
1162 class_simple_destroy(hpsb_protocol_class);
1163release_class_host:
1164 class_unregister(&hpsb_host_class);
1165release_all_bus:
1166 for (i = 0; fw_bus_attrs[i]; i++)
1167 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1168 bus_unregister(&ieee1394_bus_type);
1169release_devfs:
1170 devfs_remove("ieee1394");
1171release_chrdev:
1172 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1173exit_release_kernel_thread:
1174 if (khpsbpkt_pid >= 0) {
1175 kill_proc(khpsbpkt_pid, SIGTERM, 1);
1176 wait_for_completion(&khpsbpkt_complete);
1177 }
1178exit_cleanup_config_roms:
1179 hpsb_cleanup_config_roms();
1180 return ret;
1181}
1182
1183static void __exit ieee1394_cleanup(void)
1184{
1185 int i;
1186
1187 if (!disable_nodemgr)
1188 cleanup_ieee1394_nodemgr();
1189
1190 cleanup_csr();
1191
1192 class_simple_destroy(hpsb_protocol_class);
1193 class_unregister(&hpsb_host_class);
1194 for (i = 0; fw_bus_attrs[i]; i++)
1195 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1196 bus_unregister(&ieee1394_bus_type);
1197
1198 if (khpsbpkt_pid >= 0) {
1199 khpsbpkt_kill = 1;
1200 mb();
1201 up(&khpsbpkt_sig);
1202 wait_for_completion(&khpsbpkt_complete);
1203 }
1204
1205 hpsb_cleanup_config_roms();
1206
1207 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1208 devfs_remove("ieee1394");
1209}
1210
1211module_init(ieee1394_init);
1212module_exit(ieee1394_cleanup);
1213
1214/* Exported symbols */
1215
1216/** hosts.c **/
1217EXPORT_SYMBOL(hpsb_alloc_host);
1218EXPORT_SYMBOL(hpsb_add_host);
1219EXPORT_SYMBOL(hpsb_remove_host);
1220EXPORT_SYMBOL(hpsb_update_config_rom_image);
1221
1222/** ieee1394_core.c **/
1223EXPORT_SYMBOL(hpsb_speedto_str);
1224EXPORT_SYMBOL(hpsb_protocol_class);
1225EXPORT_SYMBOL(hpsb_set_packet_complete_task);
1226EXPORT_SYMBOL(hpsb_alloc_packet);
1227EXPORT_SYMBOL(hpsb_free_packet);
1228EXPORT_SYMBOL(hpsb_send_phy_config);
1229EXPORT_SYMBOL(hpsb_send_packet);
1230EXPORT_SYMBOL(hpsb_send_packet_and_wait);
1231EXPORT_SYMBOL(hpsb_reset_bus);
1232EXPORT_SYMBOL(hpsb_bus_reset);
1233EXPORT_SYMBOL(hpsb_selfid_received);
1234EXPORT_SYMBOL(hpsb_selfid_complete);
1235EXPORT_SYMBOL(hpsb_packet_sent);
1236EXPORT_SYMBOL(hpsb_packet_received);
1237EXPORT_SYMBOL_GPL(hpsb_disable_irm);
1238
1239/** ieee1394_transactions.c **/
1240EXPORT_SYMBOL(hpsb_get_tlabel);
1241EXPORT_SYMBOL(hpsb_free_tlabel);
1242EXPORT_SYMBOL(hpsb_make_readpacket);
1243EXPORT_SYMBOL(hpsb_make_writepacket);
1244EXPORT_SYMBOL(hpsb_make_streampacket);
1245EXPORT_SYMBOL(hpsb_make_lockpacket);
1246EXPORT_SYMBOL(hpsb_make_lock64packet);
1247EXPORT_SYMBOL(hpsb_make_phypacket);
1248EXPORT_SYMBOL(hpsb_make_isopacket);
1249EXPORT_SYMBOL(hpsb_read);
1250EXPORT_SYMBOL(hpsb_write);
1251EXPORT_SYMBOL(hpsb_lock);
1252EXPORT_SYMBOL(hpsb_packet_success);
1253
1254/** highlevel.c **/
1255EXPORT_SYMBOL(hpsb_register_highlevel);
1256EXPORT_SYMBOL(hpsb_unregister_highlevel);
1257EXPORT_SYMBOL(hpsb_register_addrspace);
1258EXPORT_SYMBOL(hpsb_unregister_addrspace);
1259EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
1260EXPORT_SYMBOL(hpsb_listen_channel);
1261EXPORT_SYMBOL(hpsb_unlisten_channel);
1262EXPORT_SYMBOL(hpsb_get_hostinfo);
1263EXPORT_SYMBOL(hpsb_create_hostinfo);
1264EXPORT_SYMBOL(hpsb_destroy_hostinfo);
1265EXPORT_SYMBOL(hpsb_set_hostinfo_key);
1266EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
1267EXPORT_SYMBOL(hpsb_set_hostinfo);
1268EXPORT_SYMBOL(highlevel_add_host);
1269EXPORT_SYMBOL(highlevel_remove_host);
1270EXPORT_SYMBOL(highlevel_host_reset);
1271
1272/** nodemgr.c **/
1273EXPORT_SYMBOL(hpsb_node_fill_packet);
1274EXPORT_SYMBOL(hpsb_node_write);
1275EXPORT_SYMBOL(hpsb_register_protocol);
1276EXPORT_SYMBOL(hpsb_unregister_protocol);
1277EXPORT_SYMBOL(ieee1394_bus_type);
1278EXPORT_SYMBOL(nodemgr_for_each_host);
1279
1280/** csr.c **/
1281EXPORT_SYMBOL(hpsb_update_config_rom);
1282
1283/** dma.c **/
1284EXPORT_SYMBOL(dma_prog_region_init);
1285EXPORT_SYMBOL(dma_prog_region_alloc);
1286EXPORT_SYMBOL(dma_prog_region_free);
1287EXPORT_SYMBOL(dma_region_init);
1288EXPORT_SYMBOL(dma_region_alloc);
1289EXPORT_SYMBOL(dma_region_free);
1290EXPORT_SYMBOL(dma_region_sync_for_cpu);
1291EXPORT_SYMBOL(dma_region_sync_for_device);
1292EXPORT_SYMBOL(dma_region_mmap);
1293EXPORT_SYMBOL(dma_region_offset_to_bus);
1294
1295/** iso.c **/
1296EXPORT_SYMBOL(hpsb_iso_xmit_init);
1297EXPORT_SYMBOL(hpsb_iso_recv_init);
1298EXPORT_SYMBOL(hpsb_iso_xmit_start);
1299EXPORT_SYMBOL(hpsb_iso_recv_start);
1300EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
1301EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
1302EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
1303EXPORT_SYMBOL(hpsb_iso_stop);
1304EXPORT_SYMBOL(hpsb_iso_shutdown);
1305EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
1306EXPORT_SYMBOL(hpsb_iso_xmit_sync);
1307EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
1308EXPORT_SYMBOL(hpsb_iso_n_ready);
1309EXPORT_SYMBOL(hpsb_iso_packet_sent);
1310EXPORT_SYMBOL(hpsb_iso_packet_received);
1311EXPORT_SYMBOL(hpsb_iso_wake);
1312EXPORT_SYMBOL(hpsb_iso_recv_flush);
1313
1314/** csr1212.c **/
1315EXPORT_SYMBOL(csr1212_create_csr);
1316EXPORT_SYMBOL(csr1212_init_local_csr);
1317EXPORT_SYMBOL(csr1212_new_immediate);
1318EXPORT_SYMBOL(csr1212_new_directory);
1319EXPORT_SYMBOL(csr1212_associate_keyval);
1320EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
1321EXPORT_SYMBOL(csr1212_new_string_descriptor_leaf);
1322EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
1323EXPORT_SYMBOL(csr1212_release_keyval);
1324EXPORT_SYMBOL(csr1212_destroy_csr);
1325EXPORT_SYMBOL(csr1212_read);
1326EXPORT_SYMBOL(csr1212_generate_csr_image);
1327EXPORT_SYMBOL(csr1212_parse_keyval);
1328EXPORT_SYMBOL(csr1212_parse_csr);
1329EXPORT_SYMBOL(_csr1212_read_keyval);
1330EXPORT_SYMBOL(_csr1212_destroy_keyval);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
new file mode 100644
index 000000000000..c4b4408e2e05
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -0,0 +1,228 @@
1
2#ifndef _IEEE1394_CORE_H
3#define _IEEE1394_CORE_H
4
5#include <linux/slab.h>
6#include <linux/devfs_fs_kernel.h>
7#include <asm/atomic.h>
8#include <asm/semaphore.h>
9#include "hosts.h"
10
11
12struct hpsb_packet {
13 /* This struct is basically read-only for hosts with the exception of
14 * the data buffer contents and xnext - see below. */
15
16 /* This can be used for host driver internal linking.
17 *
18 * NOTE: This must be left in init state when the driver is done
19 * with it (e.g. by using list_del_init()), since the core does
20 * some sanity checks to make sure the packet is not on a
21 * driver_list when free'ing it. */
22 struct list_head driver_list;
23
24 nodeid_t node_id;
25
26 /* Async and Iso types should be clear, raw means send-as-is, do not
27 * CRC! Byte swapping shall still be done in this case. */
28 enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type;
29
30 /* Okay, this is core internal and a no care for hosts.
31 * queued = queued for sending
32 * pending = sent, waiting for response
33 * complete = processing completed, successful or not
34 */
35 enum {
36 hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
37 } __attribute__((packed)) state;
38
39 /* These are core internal. */
40 signed char tlabel;
41 char ack_code;
42 char tcode;
43
44 unsigned expect_response:1;
45 unsigned no_waiter:1;
46
47 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
48 unsigned speed_code:2;
49
50 /*
51 * *header and *data are guaranteed to be 32-bit DMAable and may be
52 * overwritten to allow in-place byte swapping. Neither of these is
53 * CRCed (the sizes also don't include CRC), but contain space for at
54 * least one additional quadlet to allow in-place CRCing. The memory is
55 * also guaranteed to be DMA mappable.
56 */
57 quadlet_t *header;
58 quadlet_t *data;
59 size_t header_size;
60 size_t data_size;
61
62
63 struct hpsb_host *host;
64 unsigned int generation;
65
66 atomic_t refcnt;
67
68 /* Function (and possible data to pass to it) to call when this
69 * packet is completed. */
70 void (*complete_routine)(void *);
71 void *complete_data;
72
73 /* XXX This is just a hack at the moment */
74 struct sk_buff *skb;
75
76 /* Store jiffies for implementing bus timeouts. */
77 unsigned long sendtime;
78
79 quadlet_t embedded_header[5];
80};
81
82/* Set a task for when a packet completes */
83void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
84 void (*routine)(void *), void *data);
85
86static inline struct hpsb_packet *driver_packet(struct list_head *l)
87{
88 return list_entry(l, struct hpsb_packet, driver_list);
89}
90
91void abort_timedouts(unsigned long __opaque);
92
93struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
94void hpsb_free_packet(struct hpsb_packet *packet);
95
96
97/*
98 * Generation counter for the complete 1394 subsystem. Generation gets
99 * incremented on every change in the subsystem (e.g. bus reset).
100 *
101 * Use the functions, not the variable.
102 */
103static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
104{
105 return atomic_read(&host->generation);
106}
107
108/*
109 * Send a PHY configuration packet, return 0 on success, negative
110 * errno on failure.
111 */
112int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt);
113
114/*
115 * Queue packet for transmitting, return 0 on success, negative errno
116 * on failure.
117 */
118int hpsb_send_packet(struct hpsb_packet *packet);
119
120/*
121 * Queue packet for transmitting, and block until the transaction
122 * completes. Return 0 on success, negative errno on failure.
123 */
124int hpsb_send_packet_and_wait(struct hpsb_packet *packet);
125
126/* Initiate bus reset on the given host. Returns 1 if bus reset already in
127 * progress, 0 otherwise. */
128int hpsb_reset_bus(struct hpsb_host *host, int type);
129
130/*
131 * The following functions are exported for host driver module usage. All of
132 * them are safe to use in interrupt contexts, although some are quite
133 * complicated so you may want to run them in bottom halves instead of calling
134 * them directly.
135 */
136
137/* Notify a bus reset to the core. Returns 1 if bus reset already in progress,
138 * 0 otherwise. */
139int hpsb_bus_reset(struct hpsb_host *host);
140
141/*
142 * Hand over received selfid packet to the core. Complement check (second
143 * quadlet is complement of first) is expected to be done and succesful.
144 */
145void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
146
147/*
148 * Notify completion of SelfID stage to the core and report new physical ID
149 * and whether host is root now.
150 */
151void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
152
153/*
154 * Notify core of sending a packet. Ackcode is the ack code returned for async
155 * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
156 * for other cases (internal errors that don't justify a panic). Safe to call
157 * from within a transmit packet routine.
158 */
159void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
160 int ackcode);
161
162/*
163 * Hand over received packet to the core. The contents of data are expected to
164 * be the full packet but with the CRCs left out (data block follows header
165 * immediately), with the header (i.e. the first four quadlets) in machine byte
166 * order and the data block in big endian. *data can be safely overwritten
167 * after this call.
168 *
169 * If the packet is a write request, write_acked is to be set to true if it was
170 * ack_complete'd already, false otherwise. This arg is ignored for any other
171 * packet type.
172 */
173void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
174 int write_acked);
175
176
177/*
178 * CHARACTER DEVICE DISPATCHING
179 *
180 * All ieee1394 character device drivers share the same major number
181 * (major 171). The 256 minor numbers are allocated to the various
182 * task-specific interfaces (raw1394, video1394, dv1394, etc) in
183 * blocks of 16.
184 *
185 * The core ieee1394.o module allocates the device number region
186 * 171:0-255, the various drivers must then cdev_add() their cdev
187 * objects to handle their respective sub-regions.
188 *
189 * Minor device number block allocations:
190 *
191 * Block 0 ( 0- 15) raw1394
192 * Block 1 ( 16- 31) video1394
193 * Block 2 ( 32- 47) dv1394
194 *
195 * Blocks 3-14 free for future allocation
196 *
197 * Block 15 (240-255) reserved for drivers under development, etc.
198 */
199
200#define IEEE1394_MAJOR 171
201
202#define IEEE1394_MINOR_BLOCK_RAW1394 0
203#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
204#define IEEE1394_MINOR_BLOCK_DV1394 2
205#define IEEE1394_MINOR_BLOCK_AMDTP 3
206#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
207
208#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
209#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)
210#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
211#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16)
212#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16)
213#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
214
215/* return the index (within a minor number block) of a file */
216static inline unsigned char ieee1394_file_to_instance(struct file *file)
217{
218 return file->f_dentry->d_inode->i_cindex;
219}
220
221extern int hpsb_disable_irm;
222
223/* Our sysfs bus entry */
224extern struct bus_type ieee1394_bus_type;
225extern struct class hpsb_host_class;
226extern struct class_simple *hpsb_protocol_class;
227
228#endif /* _IEEE1394_CORE_H */
diff --git a/drivers/ieee1394/ieee1394_hotplug.h b/drivers/ieee1394/ieee1394_hotplug.h
new file mode 100644
index 000000000000..5be70d31b007
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_hotplug.h
@@ -0,0 +1,33 @@
1#ifndef _IEEE1394_HOTPLUG_H
2#define _IEEE1394_HOTPLUG_H
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/mod_devicetable.h>
7
8/* Unit spec id and sw version entry for some protocols */
9#define AVC_UNIT_SPEC_ID_ENTRY 0x0000A02D
10#define AVC_SW_VERSION_ENTRY 0x00010001
11#define CAMERA_UNIT_SPEC_ID_ENTRY 0x0000A02D
12#define CAMERA_SW_VERSION_ENTRY 0x00000100
13
14/* Check to make sure this all isn't already defined */
15#ifndef IEEE1394_MATCH_VENDOR_ID
16
17#define IEEE1394_MATCH_VENDOR_ID 0x0001
18#define IEEE1394_MATCH_MODEL_ID 0x0002
19#define IEEE1394_MATCH_SPECIFIER_ID 0x0004
20#define IEEE1394_MATCH_VERSION 0x0008
21
22struct ieee1394_device_id {
23 u32 match_flags;
24 u32 vendor_id;
25 u32 model_id;
26 u32 specifier_id;
27 u32 version;
28 void *driver_data;
29};
30
31#endif
32
33#endif /* _IEEE1394_HOTPLUG_H */
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
new file mode 100644
index 000000000000..09908b9564d8
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -0,0 +1,601 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Transaction support.
5 *
6 * Copyright (C) 1999 Andreas E. Bombe
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#include <linux/sched.h>
13#include <linux/bitops.h>
14#include <linux/smp_lock.h>
15#include <linux/interrupt.h>
16
17#include <asm/errno.h>
18
19#include "ieee1394.h"
20#include "ieee1394_types.h"
21#include "hosts.h"
22#include "ieee1394_core.h"
23#include "highlevel.h"
24#include "nodemgr.h"
25
26
27#define PREP_ASYNC_HEAD_ADDRESS(tc) \
28 packet->tcode = tc; \
29 packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
30 | (1 << 8) | (tc << 4); \
31 packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
32 packet->header[2] = addr & 0xffffffff
33
34
35static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
36{
37 PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
38 packet->header_size = 12;
39 packet->data_size = 0;
40 packet->expect_response = 1;
41}
42
43static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length)
44{
45 PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
46 packet->header[3] = length << 16;
47 packet->header_size = 16;
48 packet->data_size = 0;
49 packet->expect_response = 1;
50}
51
52static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data)
53{
54 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
55 packet->header[3] = data;
56 packet->header_size = 16;
57 packet->data_size = 0;
58 packet->expect_response = 1;
59}
60
61static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length)
62{
63 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
64 packet->header[3] = length << 16;
65 packet->header_size = 16;
66 packet->expect_response = 1;
67 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
68}
69
70static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
71 int length)
72{
73 PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
74 packet->header[3] = (length << 16) | extcode;
75 packet->header_size = 16;
76 packet->data_size = length;
77 packet->expect_response = 1;
78}
79
80static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
81 int tag, int sync)
82{
83 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
84 | (TCODE_ISO_DATA << 4) | sync;
85
86 packet->header_size = 4;
87 packet->data_size = length;
88 packet->type = hpsb_iso;
89 packet->tcode = TCODE_ISO_DATA;
90}
91
92static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
93{
94 packet->header[0] = data;
95 packet->header[1] = ~data;
96 packet->header_size = 8;
97 packet->data_size = 0;
98 packet->expect_response = 0;
99 packet->type = hpsb_raw; /* No CRC added */
100 packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
101}
102
103static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
104 int channel, int tag, int sync)
105{
106 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
107 | (TCODE_STREAM_DATA << 4) | sync;
108
109 packet->header_size = 4;
110 packet->data_size = length;
111 packet->type = hpsb_async;
112 packet->tcode = TCODE_ISO_DATA;
113}
114
115/**
116 * hpsb_get_tlabel - allocate a transaction label
117 * @packet: the packet who's tlabel/tpool we set
118 *
119 * Every asynchronous transaction on the 1394 bus needs a transaction
120 * label to match the response to the request. This label has to be
121 * different from any other transaction label in an outstanding request to
122 * the same node to make matching possible without ambiguity.
123 *
124 * There are 64 different tlabels, so an allocated tlabel has to be freed
125 * with hpsb_free_tlabel() after the transaction is complete (unless it's
126 * reused again for the same target node).
127 *
128 * Return value: Zero on success, otherwise non-zero. A non-zero return
129 * generally means there are no available tlabels. If this is called out
130 * of interrupt or atomic context, then it will sleep until can return a
131 * tlabel.
132 */
133int hpsb_get_tlabel(struct hpsb_packet *packet)
134{
135 unsigned long flags;
136 struct hpsb_tlabel_pool *tp;
137
138 tp = &packet->host->tpool[packet->node_id & NODE_MASK];
139
140 if (irqs_disabled() || in_atomic()) {
141 if (down_trylock(&tp->count))
142 return 1;
143 } else {
144 down(&tp->count);
145 }
146
147 spin_lock_irqsave(&tp->lock, flags);
148
149 packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
150 if (packet->tlabel > 63)
151 packet->tlabel = find_first_zero_bit(tp->pool, 64);
152 tp->next = (packet->tlabel + 1) % 64;
153 /* Should _never_ happen */
154 BUG_ON(test_and_set_bit(packet->tlabel, tp->pool));
155 tp->allocations++;
156 spin_unlock_irqrestore(&tp->lock, flags);
157
158 return 0;
159}
160
161/**
162 * hpsb_free_tlabel - free an allocated transaction label
163 * @packet: packet whos tlabel/tpool needs to be cleared
164 *
165 * Frees the transaction label allocated with hpsb_get_tlabel(). The
166 * tlabel has to be freed after the transaction is complete (i.e. response
167 * was received for a split transaction or packet was sent for a unified
168 * transaction).
169 *
170 * A tlabel must not be freed twice.
171 */
172void hpsb_free_tlabel(struct hpsb_packet *packet)
173{
174 unsigned long flags;
175 struct hpsb_tlabel_pool *tp;
176
177 tp = &packet->host->tpool[packet->node_id & NODE_MASK];
178
179 BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
180
181 spin_lock_irqsave(&tp->lock, flags);
182 BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool));
183 spin_unlock_irqrestore(&tp->lock, flags);
184
185 up(&tp->count);
186}
187
188
189
190int hpsb_packet_success(struct hpsb_packet *packet)
191{
192 switch (packet->ack_code) {
193 case ACK_PENDING:
194 switch ((packet->header[1] >> 12) & 0xf) {
195 case RCODE_COMPLETE:
196 return 0;
197 case RCODE_CONFLICT_ERROR:
198 return -EAGAIN;
199 case RCODE_DATA_ERROR:
200 return -EREMOTEIO;
201 case RCODE_TYPE_ERROR:
202 return -EACCES;
203 case RCODE_ADDRESS_ERROR:
204 return -EINVAL;
205 default:
206 HPSB_ERR("received reserved rcode %d from node %d",
207 (packet->header[1] >> 12) & 0xf,
208 packet->node_id);
209 return -EAGAIN;
210 }
211 HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__);
212
213 case ACK_BUSY_X:
214 case ACK_BUSY_A:
215 case ACK_BUSY_B:
216 return -EBUSY;
217
218 case ACK_TYPE_ERROR:
219 return -EACCES;
220
221 case ACK_COMPLETE:
222 if (packet->tcode == TCODE_WRITEQ
223 || packet->tcode == TCODE_WRITEB) {
224 return 0;
225 } else {
226 HPSB_ERR("impossible ack_complete from node %d "
227 "(tcode %d)", packet->node_id, packet->tcode);
228 return -EAGAIN;
229 }
230
231
232 case ACK_DATA_ERROR:
233 if (packet->tcode == TCODE_WRITEB
234 || packet->tcode == TCODE_LOCK_REQUEST) {
235 return -EAGAIN;
236 } else {
237 HPSB_ERR("impossible ack_data_error from node %d "
238 "(tcode %d)", packet->node_id, packet->tcode);
239 return -EAGAIN;
240 }
241
242 case ACK_ADDRESS_ERROR:
243 return -EINVAL;
244
245 case ACK_TARDY:
246 case ACK_CONFLICT_ERROR:
247 case ACKX_NONE:
248 case ACKX_SEND_ERROR:
249 case ACKX_ABORTED:
250 case ACKX_TIMEOUT:
251 /* error while sending */
252 return -EAGAIN;
253
254 default:
255 HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
256 packet->ack_code, packet->node_id, packet->tcode);
257 return -EAGAIN;
258 }
259
260 HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
261}
262
263struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
264 u64 addr, size_t length)
265{
266 struct hpsb_packet *packet;
267
268 if (length == 0)
269 return NULL;
270
271 packet = hpsb_alloc_packet(length);
272 if (!packet)
273 return NULL;
274
275 packet->host = host;
276 packet->node_id = node;
277
278 if (hpsb_get_tlabel(packet)) {
279 hpsb_free_packet(packet);
280 return NULL;
281 }
282
283 if (length == 4)
284 fill_async_readquad(packet, addr);
285 else
286 fill_async_readblock(packet, addr, length);
287
288 return packet;
289}
290
291struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
292 u64 addr, quadlet_t *buffer, size_t length)
293{
294 struct hpsb_packet *packet;
295
296 if (length == 0)
297 return NULL;
298
299 packet = hpsb_alloc_packet(length);
300 if (!packet)
301 return NULL;
302
303 if (length % 4) { /* zero padding bytes */
304 packet->data[length >> 2] = 0;
305 }
306 packet->host = host;
307 packet->node_id = node;
308
309 if (hpsb_get_tlabel(packet)) {
310 hpsb_free_packet(packet);
311 return NULL;
312 }
313
314 if (length == 4) {
315 fill_async_writequad(packet, addr, buffer ? *buffer : 0);
316 } else {
317 fill_async_writeblock(packet, addr, length);
318 if (buffer)
319 memcpy(packet->data, buffer, length);
320 }
321
322 return packet;
323}
324
325struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, int length,
326 int channel, int tag, int sync)
327{
328 struct hpsb_packet *packet;
329
330 if (length == 0)
331 return NULL;
332
333 packet = hpsb_alloc_packet(length);
334 if (!packet)
335 return NULL;
336
337 if (length % 4) { /* zero padding bytes */
338 packet->data[length >> 2] = 0;
339 }
340 packet->host = host;
341
342 if (hpsb_get_tlabel(packet)) {
343 hpsb_free_packet(packet);
344 return NULL;
345 }
346
347 fill_async_stream_packet(packet, length, channel, tag, sync);
348 if (buffer)
349 memcpy(packet->data, buffer, length);
350
351 return packet;
352}
353
354struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
355 u64 addr, int extcode, quadlet_t *data,
356 quadlet_t arg)
357{
358 struct hpsb_packet *p;
359 u32 length;
360
361 p = hpsb_alloc_packet(8);
362 if (!p) return NULL;
363
364 p->host = host;
365 p->node_id = node;
366 if (hpsb_get_tlabel(p)) {
367 hpsb_free_packet(p);
368 return NULL;
369 }
370
371 switch (extcode) {
372 case EXTCODE_FETCH_ADD:
373 case EXTCODE_LITTLE_ADD:
374 length = 4;
375 if (data)
376 p->data[0] = *data;
377 break;
378 default:
379 length = 8;
380 if (data) {
381 p->data[0] = arg;
382 p->data[1] = *data;
383 }
384 break;
385 }
386 fill_async_lock(p, addr, extcode, length);
387
388 return p;
389}
390
391struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
392 u64 addr, int extcode, octlet_t *data,
393 octlet_t arg)
394{
395 struct hpsb_packet *p;
396 u32 length;
397
398 p = hpsb_alloc_packet(16);
399 if (!p) return NULL;
400
401 p->host = host;
402 p->node_id = node;
403 if (hpsb_get_tlabel(p)) {
404 hpsb_free_packet(p);
405 return NULL;
406 }
407
408 switch (extcode) {
409 case EXTCODE_FETCH_ADD:
410 case EXTCODE_LITTLE_ADD:
411 length = 8;
412 if (data) {
413 p->data[0] = *data >> 32;
414 p->data[1] = *data & 0xffffffff;
415 }
416 break;
417 default:
418 length = 16;
419 if (data) {
420 p->data[0] = arg >> 32;
421 p->data[1] = arg & 0xffffffff;
422 p->data[2] = *data >> 32;
423 p->data[3] = *data & 0xffffffff;
424 }
425 break;
426 }
427 fill_async_lock(p, addr, extcode, length);
428
429 return p;
430}
431
432struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
433 quadlet_t data)
434{
435 struct hpsb_packet *p;
436
437 p = hpsb_alloc_packet(0);
438 if (!p) return NULL;
439
440 p->host = host;
441 fill_phy_packet(p, data);
442
443 return p;
444}
445
446struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
447 int length, int channel,
448 int tag, int sync)
449{
450 struct hpsb_packet *p;
451
452 p = hpsb_alloc_packet(length);
453 if (!p) return NULL;
454
455 p->host = host;
456 fill_iso_packet(p, length, channel, tag, sync);
457
458 p->generation = get_hpsb_generation(host);
459
460 return p;
461}
462
463/*
464 * FIXME - these functions should probably read from / write to user space to
465 * avoid in kernel buffers for user space callers
466 */
467
468int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
469 u64 addr, quadlet_t *buffer, size_t length)
470{
471 struct hpsb_packet *packet;
472 int retval = 0;
473
474 if (length == 0)
475 return -EINVAL;
476
477 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
478
479 packet = hpsb_make_readpacket(host, node, addr, length);
480
481 if (!packet) {
482 return -ENOMEM;
483 }
484
485 packet->generation = generation;
486 retval = hpsb_send_packet_and_wait(packet);
487 if (retval < 0)
488 goto hpsb_read_fail;
489
490 retval = hpsb_packet_success(packet);
491
492 if (retval == 0) {
493 if (length == 4) {
494 *buffer = packet->header[3];
495 } else {
496 memcpy(buffer, packet->data, length);
497 }
498 }
499
500hpsb_read_fail:
501 hpsb_free_tlabel(packet);
502 hpsb_free_packet(packet);
503
504 return retval;
505}
506
507
508int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
509 u64 addr, quadlet_t *buffer, size_t length)
510{
511 struct hpsb_packet *packet;
512 int retval;
513
514 if (length == 0)
515 return -EINVAL;
516
517 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
518
519 packet = hpsb_make_writepacket (host, node, addr, buffer, length);
520
521 if (!packet)
522 return -ENOMEM;
523
524 packet->generation = generation;
525 retval = hpsb_send_packet_and_wait(packet);
526 if (retval < 0)
527 goto hpsb_write_fail;
528
529 retval = hpsb_packet_success(packet);
530
531hpsb_write_fail:
532 hpsb_free_tlabel(packet);
533 hpsb_free_packet(packet);
534
535 return retval;
536}
537
538
539int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
540 u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
541{
542 struct hpsb_packet *packet;
543 int retval = 0;
544
545 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
546
547 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
548 if (!packet)
549 return -ENOMEM;
550
551 packet->generation = generation;
552 retval = hpsb_send_packet_and_wait(packet);
553 if (retval < 0)
554 goto hpsb_lock_fail;
555
556 retval = hpsb_packet_success(packet);
557
558 if (retval == 0) {
559 *data = packet->data[0];
560 }
561
562hpsb_lock_fail:
563 hpsb_free_tlabel(packet);
564 hpsb_free_packet(packet);
565
566 return retval;
567}
568
569
570int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
571 quadlet_t *buffer, size_t length, u32 specifier_id,
572 unsigned int version)
573{
574 struct hpsb_packet *packet;
575 int retval = 0;
576 u16 specifier_id_hi = (specifier_id & 0x00ffff00) >> 8;
577 u8 specifier_id_lo = specifier_id & 0xff;
578
579 HPSB_VERBOSE("Send GASP: channel = %d, length = %Zd", channel, length);
580
581 length += 8;
582
583 packet = hpsb_make_streampacket(host, NULL, length, channel, 3, 0);
584 if (!packet)
585 return -ENOMEM;
586
587 packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi);
588 packet->data[1] = cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
589
590 memcpy(&(packet->data[2]), buffer, length - 8);
591
592 packet->generation = generation;
593
594 packet->no_waiter = 1;
595
596 retval = hpsb_send_packet(packet);
597 if (retval < 0)
598 hpsb_free_packet(packet);
599
600 return retval;
601}
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
new file mode 100644
index 000000000000..526a43ceb496
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_transactions.h
@@ -0,0 +1,64 @@
1#ifndef _IEEE1394_TRANSACTIONS_H
2#define _IEEE1394_TRANSACTIONS_H
3
4#include "ieee1394_core.h"
5
6
7/*
8 * Get and free transaction labels.
9 */
10int hpsb_get_tlabel(struct hpsb_packet *packet);
11void hpsb_free_tlabel(struct hpsb_packet *packet);
12
13struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
14 u64 addr, size_t length);
15struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
16 u64 addr, int extcode, quadlet_t *data,
17 quadlet_t arg);
18struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
19 u64 addr, int extcode, octlet_t *data,
20 octlet_t arg);
21struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
22 quadlet_t data) ;
23struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
24 int length, int channel,
25 int tag, int sync);
26struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
27 u64 addr, quadlet_t *buffer, size_t length);
28struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
29 int length, int channel, int tag, int sync);
30
31/*
32 * hpsb_packet_success - Make sense of the ack and reply codes and
33 * return more convenient error codes:
34 * 0 success
35 * -EBUSY node is busy, try again
36 * -EAGAIN error which can probably resolved by retry
37 * -EREMOTEIO node suffers from an internal error
38 * -EACCES this transaction is not allowed on requested address
39 * -EINVAL invalid address at node
40 */
41int hpsb_packet_success(struct hpsb_packet *packet);
42
43
44/*
45 * The generic read, write and lock functions. All recognize the local node ID
46 * and act accordingly. Read and write automatically use quadlet commands if
47 * length == 4 and and block commands otherwise (however, they do not yet
48 * support lengths that are not a multiple of 4). You must explicitly specifiy
49 * the generation for which the node ID is valid, to avoid sending packets to
50 * the wrong nodes when we race with a bus reset.
51 */
52int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
53 u64 addr, quadlet_t *buffer, size_t length);
54int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
55 u64 addr, quadlet_t *buffer, size_t length);
56int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
57 u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
58int hpsb_lock64(struct hpsb_host *host, nodeid_t node, unsigned int generation,
59 u64 addr, int extcode, octlet_t *data, octlet_t arg);
60int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
61 quadlet_t *buffer, size_t length, u32 specifier_id,
62 unsigned int version);
63
64#endif /* _IEEE1394_TRANSACTIONS_H */
diff --git a/drivers/ieee1394/ieee1394_types.h b/drivers/ieee1394/ieee1394_types.h
new file mode 100644
index 000000000000..3165609ec1ec
--- /dev/null
+++ b/drivers/ieee1394/ieee1394_types.h
@@ -0,0 +1,101 @@
1
2#ifndef _IEEE1394_TYPES_H
3#define _IEEE1394_TYPES_H
4
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/list.h>
8#include <linux/init.h>
9#include <linux/spinlock.h>
10#include <linux/string.h>
11
12#include <asm/semaphore.h>
13#include <asm/byteorder.h>
14
15
16/* Transaction Label handling */
17struct hpsb_tlabel_pool {
18 DECLARE_BITMAP(pool, 64);
19 spinlock_t lock;
20 u8 next;
21 u32 allocations;
22 struct semaphore count;
23};
24
25#define HPSB_TPOOL_INIT(_tp) \
26do { \
27 bitmap_zero((_tp)->pool, 64); \
28 spin_lock_init(&(_tp)->lock); \
29 (_tp)->next = 0; \
30 (_tp)->allocations = 0; \
31 sema_init(&(_tp)->count, 63); \
32} while (0)
33
34
35typedef u32 quadlet_t;
36typedef u64 octlet_t;
37typedef u16 nodeid_t;
38
39typedef u8 byte_t;
40typedef u64 nodeaddr_t;
41typedef u16 arm_length_t;
42
43#define BUS_MASK 0xffc0
44#define BUS_SHIFT 6
45#define NODE_MASK 0x003f
46#define LOCAL_BUS 0xffc0
47#define ALL_NODES 0x003f
48
49#define NODEID_TO_BUS(nodeid) ((nodeid & BUS_MASK) >> BUS_SHIFT)
50#define NODEID_TO_NODE(nodeid) (nodeid & NODE_MASK)
51
52/* Can be used to consistently print a node/bus ID. */
53#define NODE_BUS_FMT "%d-%02d:%04d"
54#define NODE_BUS_ARGS(__host, __nodeid) \
55 __host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid)
56
57#define HPSB_PRINT(level, fmt, args...) printk(level "ieee1394: " fmt "\n" , ## args)
58
59#define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
60#define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args)
61#define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args)
62#define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args)
63#define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args)
64
65#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
66#define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
67#else
68#define HPSB_VERBOSE(fmt, args...)
69#endif
70
71#define HPSB_PANIC(fmt, args...) panic("ieee1394: " fmt "\n" , ## args)
72
73#define HPSB_TRACE() HPSB_PRINT(KERN_INFO, "TRACE - %s, %s(), line %d", __FILE__, __FUNCTION__, __LINE__)
74
75
76#ifdef __BIG_ENDIAN
77
78static __inline__ void *memcpy_le32(u32 *dest, const u32 *__src, size_t count)
79{
80 void *tmp = dest;
81 u32 *src = (u32 *)__src;
82
83 count /= 4;
84
85 while (count--) {
86 *dest++ = swab32p(src++);
87 }
88
89 return tmp;
90}
91
92#else
93
94static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count)
95{
96 return memcpy(dest, src, count);
97}
98
99#endif /* __BIG_ENDIAN */
100
101#endif /* _IEEE1394_TYPES_H */
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
new file mode 100644
index 000000000000..f05759107f7e
--- /dev/null
+++ b/drivers/ieee1394/iso.c
@@ -0,0 +1,451 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * kernel ISO transmission/reception
5 *
6 * Copyright (C) 2002 Maas Digital LLC
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include "iso.h"
15
16void hpsb_iso_stop(struct hpsb_iso *iso)
17{
18 if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
19 return;
20
21 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
22 XMIT_STOP : RECV_STOP, 0);
23 iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
24}
25
26void hpsb_iso_shutdown(struct hpsb_iso *iso)
27{
28 if (iso->flags & HPSB_ISO_DRIVER_INIT) {
29 hpsb_iso_stop(iso);
30 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
31 XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
32 iso->flags &= ~HPSB_ISO_DRIVER_INIT;
33 }
34
35 dma_region_free(&iso->data_buf);
36 kfree(iso);
37}
38
39static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type,
40 unsigned int data_buf_size,
41 unsigned int buf_packets,
42 int channel,
43 int dma_mode,
44 int irq_interval,
45 void (*callback)(struct hpsb_iso*))
46{
47 struct hpsb_iso *iso;
48 int dma_direction;
49
50 /* make sure driver supports the ISO API */
51 if (!host->driver->isoctl) {
52 printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n",
53 host->driver->name);
54 return NULL;
55 }
56
57 /* sanitize parameters */
58
59 if (buf_packets < 2)
60 buf_packets = 2;
61
62 if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
63 dma_mode=HPSB_ISO_DMA_DEFAULT;
64
65 if (irq_interval == 0) /* really interrupt for each packet*/
66 irq_interval = 1;
67 else if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
68 irq_interval = buf_packets / 4;
69
70 if (channel < -1 || channel >= 64)
71 return NULL;
72
73 /* channel = -1 is OK for multi-channel recv but not for xmit */
74 if (type == HPSB_ISO_XMIT && channel < 0)
75 return NULL;
76
77 /* allocate and write the struct hpsb_iso */
78
79 iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL);
80 if (!iso)
81 return NULL;
82
83 iso->infos = (struct hpsb_iso_packet_info *)(iso + 1);
84
85 iso->type = type;
86 iso->host = host;
87 iso->hostdata = NULL;
88 iso->callback = callback;
89 init_waitqueue_head(&iso->waitq);
90 iso->channel = channel;
91 iso->irq_interval = irq_interval;
92 iso->dma_mode = dma_mode;
93 dma_region_init(&iso->data_buf);
94 iso->buf_size = PAGE_ALIGN(data_buf_size);
95 iso->buf_packets = buf_packets;
96 iso->pkt_dma = 0;
97 iso->first_packet = 0;
98 spin_lock_init(&iso->lock);
99
100 if (iso->type == HPSB_ISO_XMIT) {
101 iso->n_ready_packets = iso->buf_packets;
102 dma_direction = PCI_DMA_TODEVICE;
103 } else {
104 iso->n_ready_packets = 0;
105 dma_direction = PCI_DMA_FROMDEVICE;
106 }
107
108 atomic_set(&iso->overflows, 0);
109 iso->flags = 0;
110 iso->prebuffer = 0;
111
112 /* allocate the packet buffer */
113 if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
114 goto err;
115
116 return iso;
117
118err:
119 hpsb_iso_shutdown(iso);
120 return NULL;
121}
122
123int hpsb_iso_n_ready(struct hpsb_iso* iso)
124{
125 unsigned long flags;
126 int val;
127
128 spin_lock_irqsave(&iso->lock, flags);
129 val = iso->n_ready_packets;
130 spin_unlock_irqrestore(&iso->lock, flags);
131
132 return val;
133}
134
135
136struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
137 unsigned int data_buf_size,
138 unsigned int buf_packets,
139 int channel,
140 int speed,
141 int irq_interval,
142 void (*callback)(struct hpsb_iso*))
143{
144 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
145 data_buf_size, buf_packets,
146 channel, HPSB_ISO_DMA_DEFAULT, irq_interval, callback);
147 if (!iso)
148 return NULL;
149
150 iso->speed = speed;
151
152 /* tell the driver to start working */
153 if (host->driver->isoctl(iso, XMIT_INIT, 0))
154 goto err;
155
156 iso->flags |= HPSB_ISO_DRIVER_INIT;
157 return iso;
158
159err:
160 hpsb_iso_shutdown(iso);
161 return NULL;
162}
163
164struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
165 unsigned int data_buf_size,
166 unsigned int buf_packets,
167 int channel,
168 int dma_mode,
169 int irq_interval,
170 void (*callback)(struct hpsb_iso*))
171{
172 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
173 data_buf_size, buf_packets,
174 channel, dma_mode, irq_interval, callback);
175 if (!iso)
176 return NULL;
177
178 /* tell the driver to start working */
179 if (host->driver->isoctl(iso, RECV_INIT, 0))
180 goto err;
181
182 iso->flags |= HPSB_ISO_DRIVER_INIT;
183 return iso;
184
185err:
186 hpsb_iso_shutdown(iso);
187 return NULL;
188}
189
190int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
191{
192 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
193 return -EINVAL;
194 return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
195}
196
197int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
198{
199 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
200 return -EINVAL;
201 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
202}
203
204int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
205{
206 if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
207 return -EINVAL;
208 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask);
209}
210
211int hpsb_iso_recv_flush(struct hpsb_iso *iso)
212{
213 if (iso->type != HPSB_ISO_RECV)
214 return -EINVAL;
215 return iso->host->driver->isoctl(iso, RECV_FLUSH, 0);
216}
217
218static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
219{
220 int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
221 if (retval)
222 return retval;
223
224 iso->flags |= HPSB_ISO_DRIVER_STARTED;
225 return retval;
226}
227
228int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
229{
230 if (iso->type != HPSB_ISO_XMIT)
231 return -1;
232
233 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
234 return 0;
235
236 if (cycle < -1)
237 cycle = -1;
238 else if (cycle >= 8000)
239 cycle %= 8000;
240
241 iso->xmit_cycle = cycle;
242
243 if (prebuffer < 0)
244 prebuffer = iso->buf_packets;
245 else if (prebuffer == 0)
246 prebuffer = 1;
247
248 if (prebuffer > iso->buf_packets)
249 prebuffer = iso->buf_packets;
250
251 iso->prebuffer = prebuffer;
252
253 /* remember the starting cycle; DMA will commence from xmit_queue_packets()
254 once enough packets have been buffered */
255 iso->start_cycle = cycle;
256
257 return 0;
258}
259
260int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
261{
262 int retval = 0;
263 int isoctl_args[3];
264
265 if (iso->type != HPSB_ISO_RECV)
266 return -1;
267
268 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
269 return 0;
270
271 if (cycle < -1)
272 cycle = -1;
273 else if (cycle >= 8000)
274 cycle %= 8000;
275
276 isoctl_args[0] = cycle;
277
278 if (tag_mask < 0)
279 /* match all tags */
280 tag_mask = 0xF;
281 isoctl_args[1] = tag_mask;
282
283 isoctl_args[2] = sync;
284
285 retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]);
286 if (retval)
287 return retval;
288
289 iso->flags |= HPSB_ISO_DRIVER_STARTED;
290 return retval;
291}
292
293/* check to make sure the user has not supplied bogus values of offset/len
294 that would cause the kernel to access memory outside the buffer */
295
296static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
297 unsigned int offset, unsigned short len,
298 unsigned int *out_offset, unsigned short *out_len)
299{
300 if (offset >= iso->buf_size)
301 return -EFAULT;
302
303 /* make sure the packet does not go beyond the end of the buffer */
304 if (offset + len > iso->buf_size)
305 return -EFAULT;
306
307 /* check for wrap-around */
308 if (offset + len < offset)
309 return -EFAULT;
310
311 /* now we can trust 'offset' and 'length' */
312 *out_offset = offset;
313 *out_len = len;
314
315 return 0;
316}
317
318
319int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy)
320{
321 struct hpsb_iso_packet_info *info;
322 unsigned long flags;
323 int rv;
324
325 if (iso->type != HPSB_ISO_XMIT)
326 return -EINVAL;
327
328 /* is there space in the buffer? */
329 if (iso->n_ready_packets <= 0) {
330 return -EBUSY;
331 }
332
333 info = &iso->infos[iso->first_packet];
334
335 /* check for bogus offset/length */
336 if (hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len))
337 return -EFAULT;
338
339 info->tag = tag;
340 info->sy = sy;
341
342 spin_lock_irqsave(&iso->lock, flags);
343
344 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info);
345 if (rv)
346 goto out;
347
348 /* increment cursors */
349 iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
350 iso->xmit_cycle = (iso->xmit_cycle+1) % 8000;
351 iso->n_ready_packets--;
352
353 if (iso->prebuffer != 0) {
354 iso->prebuffer--;
355 if (iso->prebuffer <= 0) {
356 iso->prebuffer = 0;
357 rv = do_iso_xmit_start(iso, iso->start_cycle);
358 }
359 }
360
361out:
362 spin_unlock_irqrestore(&iso->lock, flags);
363 return rv;
364}
365
366int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
367{
368 if (iso->type != HPSB_ISO_XMIT)
369 return -EINVAL;
370
371 return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets);
372}
373
374void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
375{
376 unsigned long flags;
377 spin_lock_irqsave(&iso->lock, flags);
378
379 /* predict the cycle of the next packet to be queued */
380
381 /* jump ahead by the number of packets that are already buffered */
382 cycle += iso->buf_packets - iso->n_ready_packets;
383 cycle %= 8000;
384
385 iso->xmit_cycle = cycle;
386 iso->n_ready_packets++;
387 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
388
389 if (iso->n_ready_packets == iso->buf_packets || error != 0) {
390 /* the buffer has run empty! */
391 atomic_inc(&iso->overflows);
392 }
393
394 spin_unlock_irqrestore(&iso->lock, flags);
395}
396
397void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
398 u16 cycle, u8 channel, u8 tag, u8 sy)
399{
400 unsigned long flags;
401 spin_lock_irqsave(&iso->lock, flags);
402
403 if (iso->n_ready_packets == iso->buf_packets) {
404 /* overflow! */
405 atomic_inc(&iso->overflows);
406 } else {
407 struct hpsb_iso_packet_info *info = &iso->infos[iso->pkt_dma];
408 info->offset = offset;
409 info->len = len;
410 info->cycle = cycle;
411 info->channel = channel;
412 info->tag = tag;
413 info->sy = sy;
414
415 iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets;
416 iso->n_ready_packets++;
417 }
418
419 spin_unlock_irqrestore(&iso->lock, flags);
420}
421
422int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
423{
424 unsigned long flags;
425 unsigned int i;
426 int rv = 0;
427
428 if (iso->type != HPSB_ISO_RECV)
429 return -1;
430
431 spin_lock_irqsave(&iso->lock, flags);
432 for (i = 0; i < n_packets; i++) {
433 rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
434 (unsigned long) &iso->infos[iso->first_packet]);
435 if (rv)
436 break;
437
438 iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
439 iso->n_ready_packets--;
440 }
441 spin_unlock_irqrestore(&iso->lock, flags);
442 return rv;
443}
444
445void hpsb_iso_wake(struct hpsb_iso *iso)
446{
447 wake_up_interruptible(&iso->waitq);
448
449 if (iso->callback)
450 iso->callback(iso);
451}
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
new file mode 100644
index 000000000000..fb654d9639a7
--- /dev/null
+++ b/drivers/ieee1394/iso.h
@@ -0,0 +1,201 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * kernel ISO transmission/reception
5 *
6 * Copyright (C) 2002 Maas Digital LLC
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#ifndef IEEE1394_ISO_H
13#define IEEE1394_ISO_H
14
15#include "hosts.h"
16#include "dma.h"
17
18/* high-level ISO interface */
19
20/* This API sends and receives isochronous packets on a large,
21 virtually-contiguous kernel memory buffer. The buffer may be mapped
22 into a user-space process for zero-copy transmission and reception.
23
24 There are no explicit boundaries between packets in the buffer. A
25 packet may be transmitted or received at any location. However,
26 low-level drivers may impose certain restrictions on alignment or
27 size of packets. (e.g. in OHCI no packet may cross a page boundary,
28 and packets should be quadlet-aligned)
29*/
30
31/* Packet descriptor - the API maintains a ring buffer of these packet
32 descriptors in kernel memory (hpsb_iso.infos[]). */
33
34struct hpsb_iso_packet_info {
35 /* offset of data payload relative to the first byte of the buffer */
36 __u32 offset;
37
38 /* length of the data payload, in bytes (not including the isochronous header) */
39 __u16 len;
40
41 /* (recv only) the cycle number (mod 8000) on which the packet was received */
42 __u16 cycle;
43
44 /* (recv only) channel on which the packet was received */
45 __u8 channel;
46
47 /* 2-bit 'tag' and 4-bit 'sy' fields of the isochronous header */
48 __u8 tag;
49 __u8 sy;
50};
51
52enum hpsb_iso_type { HPSB_ISO_RECV = 0, HPSB_ISO_XMIT = 1 };
53
54/* The mode of the dma when receiving iso data. Must be supported by chip */
55enum raw1394_iso_dma_recv_mode {
56 HPSB_ISO_DMA_DEFAULT = -1,
57 HPSB_ISO_DMA_OLD_ABI = 0,
58 HPSB_ISO_DMA_BUFFERFILL = 1,
59 HPSB_ISO_DMA_PACKET_PER_BUFFER = 2
60};
61
62struct hpsb_iso {
63 enum hpsb_iso_type type;
64
65 /* pointer to low-level driver and its private data */
66 struct hpsb_host *host;
67 void *hostdata;
68
69 /* a function to be called (from interrupt context) after
70 outgoing packets have been sent, or incoming packets have
71 arrived */
72 void (*callback)(struct hpsb_iso*);
73
74 /* wait for buffer space */
75 wait_queue_head_t waitq;
76
77 int speed; /* IEEE1394_SPEED_100, 200, or 400 */
78 int channel; /* -1 if multichannel */
79 int dma_mode; /* dma receive mode */
80
81
82 /* greatest # of packets between interrupts - controls
83 the maximum latency of the buffer */
84 int irq_interval;
85
86 /* the buffer for packet data payloads */
87 struct dma_region data_buf;
88
89 /* size of data_buf, in bytes (always a multiple of PAGE_SIZE) */
90 unsigned int buf_size;
91
92 /* # of packets in the ringbuffer */
93 unsigned int buf_packets;
94
95 /* protects packet cursors */
96 spinlock_t lock;
97
98 /* the index of the next packet that will be produced
99 or consumed by the user */
100 int first_packet;
101
102 /* the index of the next packet that will be transmitted
103 or received by the 1394 hardware */
104 int pkt_dma;
105
106 /* how many packets, starting at first_packet:
107 (transmit) are ready to be filled with data
108 (receive) contain received data */
109 int n_ready_packets;
110
111 /* how many times the buffer has overflowed or underflowed */
112 atomic_t overflows;
113
114 /* private flags to track initialization progress */
115#define HPSB_ISO_DRIVER_INIT (1<<0)
116#define HPSB_ISO_DRIVER_STARTED (1<<1)
117 unsigned int flags;
118
119 /* # of packets left to prebuffer (xmit only) */
120 int prebuffer;
121
122 /* starting cycle for DMA (xmit only) */
123 int start_cycle;
124
125 /* cycle at which next packet will be transmitted,
126 -1 if not known */
127 int xmit_cycle;
128
129 /* ringbuffer of packet descriptors in regular kernel memory
130 * XXX Keep this last, since we use over-allocated memory from
131 * this entry to fill this field. */
132 struct hpsb_iso_packet_info *infos;
133};
134
135/* functions available to high-level drivers (e.g. raw1394) */
136
137/* allocate the buffer and DMA context */
138
139struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
140 unsigned int data_buf_size,
141 unsigned int buf_packets,
142 int channel,
143 int speed,
144 int irq_interval,
145 void (*callback)(struct hpsb_iso*));
146
147/* note: if channel = -1, multi-channel receive is enabled */
148struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
149 unsigned int data_buf_size,
150 unsigned int buf_packets,
151 int channel,
152 int dma_mode,
153 int irq_interval,
154 void (*callback)(struct hpsb_iso*));
155
156/* multi-channel only */
157int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
158int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
159int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
160
161/* start/stop DMA */
162int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, int prebuffer);
163int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle, int tag_mask, int sync);
164void hpsb_iso_stop(struct hpsb_iso *iso);
165
166/* deallocate buffer and DMA context */
167void hpsb_iso_shutdown(struct hpsb_iso *iso);
168
169/* queue a packet for transmission. 'offset' is relative to the beginning of the
170 DMA buffer, where the packet's data payload should already have been placed */
171int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy);
172
173/* wait until all queued packets have been transmitted to the bus */
174int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
175
176/* N packets have been read out of the buffer, re-use the buffer space */
177int hpsb_iso_recv_release_packets(struct hpsb_iso *recv, unsigned int n_packets);
178
179/* check for arrival of new packets immediately (even if irq_interval
180 has not yet been reached) */
181int hpsb_iso_recv_flush(struct hpsb_iso *iso);
182
183/* returns # of packets ready to send or receive */
184int hpsb_iso_n_ready(struct hpsb_iso *iso);
185
186/* the following are callbacks available to low-level drivers */
187
188/* call after a packet has been transmitted to the bus (interrupt context is OK)
189 'cycle' is the _exact_ cycle the packet was sent on
190 'error' should be non-zero if some sort of error occurred when sending the packet
191*/
192void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
193
194/* call after a packet has been received (interrupt context OK) */
195void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
196 u16 cycle, u8 channel, u8 tag, u8 sy);
197
198/* call to wake waiting processes after buffer space has opened up. */
199void hpsb_iso_wake(struct hpsb_iso *iso);
200
201#endif /* IEEE1394_ISO_H */
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
new file mode 100644
index 000000000000..a1e30a66297b
--- /dev/null
+++ b/drivers/ieee1394/nodemgr.c
@@ -0,0 +1,1732 @@
1/*
2 * Node information (ConfigROM) collection and management.
3 *
4 * Copyright (C) 2000 Andreas E. Bombe
5 * 2001-2003 Ben Collins <bcollins@debian.net>
6 *
7 * This code is licensed under the GPL. See the file COPYING in the root
8 * directory of the kernel sources for details.
9 */
10
11#include <linux/kernel.h>
12#include <linux/config.h>
13#include <linux/list.h>
14#include <linux/slab.h>
15#include <linux/smp_lock.h>
16#include <linux/interrupt.h>
17#include <linux/kmod.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/pci.h>
21#include <linux/moduleparam.h>
22#include <asm/atomic.h>
23
24#include "ieee1394_types.h"
25#include "ieee1394.h"
26#include "ieee1394_core.h"
27#include "hosts.h"
28#include "ieee1394_transactions.h"
29#include "highlevel.h"
30#include "csr.h"
31#include "nodemgr.h"
32
33static int ignore_drivers = 0;
34module_param(ignore_drivers, int, 0444);
35MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers.");
36
37struct nodemgr_csr_info {
38 struct hpsb_host *host;
39 nodeid_t nodeid;
40 unsigned int generation;
41};
42
43
44static char *nodemgr_find_oui_name(int oui)
45{
46#ifdef CONFIG_IEEE1394_OUI_DB
47 extern struct oui_list_struct {
48 int oui;
49 char *name;
50 } oui_list[];
51 int i;
52
53 for (i = 0; oui_list[i].name; i++)
54 if (oui_list[i].oui == oui)
55 return oui_list[i].name;
56#endif
57 return NULL;
58}
59
60
61static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
62 void *buffer, void *__ci)
63{
64 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
65 int i, ret = 0;
66
67 for (i = 0; i < 3; i++) {
68 ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
69 buffer, length);
70 if (!ret)
71 break;
72
73 if (msleep_interruptible(334))
74 return -EINTR;
75 }
76
77 return ret;
78}
79
80static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
81{
82 return (CSR1212_BE32_TO_CPU(bus_info_data[2]) >> 8) & 0x3;
83}
84
85static struct csr1212_bus_ops nodemgr_csr_ops = {
86 .bus_read = nodemgr_bus_read,
87 .get_max_rom = nodemgr_get_max_rom
88};
89
90
91/*
92 * Basically what we do here is start off retrieving the bus_info block.
93 * From there will fill in some info about the node, verify it is of IEEE
94 * 1394 type, and that the crc checks out ok. After that we start off with
95 * the root directory, and subdirectories. To do this, we retrieve the
96 * quadlet header for a directory, find out the length, and retrieve the
97 * complete directory entry (be it a leaf or a directory). We then process
98 * it and add the info to our structure for that particular node.
99 *
100 * We verify CRC's along the way for each directory/block/leaf. The entire
101 * node structure is generic, and simply stores the information in a way
102 * that's easy to parse by the protocol interface.
103 */
104
105/*
106 * The nodemgr relies heavily on the Driver Model for device callbacks and
107 * driver/device mappings. The old nodemgr used to handle all this itself,
108 * but now we are much simpler because of the LDM.
109 */
110
111static DECLARE_MUTEX(nodemgr_serialize);
112
113struct host_info {
114 struct hpsb_host *host;
115 struct list_head list;
116 struct completion exited;
117 struct semaphore reset_sem;
118 int pid;
119 char daemon_name[15];
120 int kill_me;
121};
122
123static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
124static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
125 char *buffer, int buffer_size);
126static void nodemgr_resume_ne(struct node_entry *ne);
127static void nodemgr_remove_ne(struct node_entry *ne);
128static struct node_entry *find_entry_by_guid(u64 guid);
129
130struct bus_type ieee1394_bus_type = {
131 .name = "ieee1394",
132 .match = nodemgr_bus_match,
133};
134
135static void host_cls_release(struct class_device *class_dev)
136{
137 put_device(&container_of((class_dev), struct hpsb_host, class_dev)->device);
138}
139
140struct class hpsb_host_class = {
141 .name = "ieee1394_host",
142 .release = host_cls_release,
143};
144
145static void ne_cls_release(struct class_device *class_dev)
146{
147 put_device(&container_of((class_dev), struct node_entry, class_dev)->device);
148}
149
150static struct class nodemgr_ne_class = {
151 .name = "ieee1394_node",
152 .release = ne_cls_release,
153};
154
155static void ud_cls_release(struct class_device *class_dev)
156{
157 put_device(&container_of((class_dev), struct unit_directory, class_dev)->device);
158}
159
160/* The name here is only so that unit directory hotplug works with old
161 * style hotplug, which only ever did unit directories anyway. */
162static struct class nodemgr_ud_class = {
163 .name = "ieee1394",
164 .release = ud_cls_release,
165 .hotplug = nodemgr_hotplug,
166};
167
168static struct hpsb_highlevel nodemgr_highlevel;
169
170
171static void nodemgr_release_ud(struct device *dev)
172{
173 struct unit_directory *ud = container_of(dev, struct unit_directory, device);
174
175 if (ud->vendor_name_kv)
176 csr1212_release_keyval(ud->vendor_name_kv);
177 if (ud->model_name_kv)
178 csr1212_release_keyval(ud->model_name_kv);
179
180 kfree(ud);
181}
182
183static void nodemgr_release_ne(struct device *dev)
184{
185 struct node_entry *ne = container_of(dev, struct node_entry, device);
186
187 if (ne->vendor_name_kv)
188 csr1212_release_keyval(ne->vendor_name_kv);
189
190 kfree(ne);
191}
192
193
194static void nodemgr_release_host(struct device *dev)
195{
196 struct hpsb_host *host = container_of(dev, struct hpsb_host, device);
197
198 csr1212_destroy_csr(host->csr.rom);
199
200 kfree(host);
201}
202
203static int nodemgr_ud_platform_data;
204
205static struct device nodemgr_dev_template_ud = {
206 .bus = &ieee1394_bus_type,
207 .release = nodemgr_release_ud,
208 .platform_data = &nodemgr_ud_platform_data,
209};
210
211static struct device nodemgr_dev_template_ne = {
212 .bus = &ieee1394_bus_type,
213 .release = nodemgr_release_ne,
214};
215
216struct device nodemgr_dev_template_host = {
217 .bus = &ieee1394_bus_type,
218 .release = nodemgr_release_host,
219};
220
221
222#define fw_attr(class, class_type, field, type, format_string) \
223static ssize_t fw_show_##class##_##field (struct device *dev, char *buf)\
224{ \
225 class_type *class; \
226 class = container_of(dev, class_type, device); \
227 return sprintf(buf, format_string, (type)class->field); \
228} \
229static struct device_attribute dev_attr_##class##_##field = { \
230 .attr = {.name = __stringify(field), .mode = S_IRUGO }, \
231 .show = fw_show_##class##_##field, \
232};
233
234#define fw_attr_td(class, class_type, td_kv) \
235static ssize_t fw_show_##class##_##td_kv (struct device *dev, char *buf)\
236{ \
237 int len; \
238 class_type *class = container_of(dev, class_type, device); \
239 len = (class->td_kv->value.leaf.len - 2) * sizeof(quadlet_t); \
240 memcpy(buf, \
241 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(class->td_kv), \
242 len); \
243 while ((buf + len - 1) == '\0') \
244 len--; \
245 buf[len++] = '\n'; \
246 buf[len] = '\0'; \
247 return len; \
248} \
249static struct device_attribute dev_attr_##class##_##td_kv = { \
250 .attr = {.name = __stringify(td_kv), .mode = S_IRUGO }, \
251 .show = fw_show_##class##_##td_kv, \
252};
253
254
255#define fw_drv_attr(field, type, format_string) \
256static ssize_t fw_drv_show_##field (struct device_driver *drv, char *buf) \
257{ \
258 struct hpsb_protocol_driver *driver; \
259 driver = container_of(drv, struct hpsb_protocol_driver, driver); \
260 return sprintf(buf, format_string, (type)driver->field);\
261} \
262static struct driver_attribute driver_attr_drv_##field = { \
263 .attr = {.name = __stringify(field), .mode = S_IRUGO }, \
264 .show = fw_drv_show_##field, \
265};
266
267
268static ssize_t fw_show_ne_bus_options(struct device *dev, char *buf)
269{
270 struct node_entry *ne = container_of(dev, struct node_entry, device);
271
272 return sprintf(buf, "IRMC(%d) CMC(%d) ISC(%d) BMC(%d) PMC(%d) GEN(%d) "
273 "LSPD(%d) MAX_REC(%d) MAX_ROM(%d) CYC_CLK_ACC(%d)\n",
274 ne->busopt.irmc,
275 ne->busopt.cmc, ne->busopt.isc, ne->busopt.bmc,
276 ne->busopt.pmc, ne->busopt.generation, ne->busopt.lnkspd,
277 ne->busopt.max_rec,
278 ne->busopt.max_rom,
279 ne->busopt.cyc_clk_acc);
280}
281static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL);
282
283
284static ssize_t fw_show_ne_tlabels_free(struct device *dev, char *buf)
285{
286 struct node_entry *ne = container_of(dev, struct node_entry, device);
287 return sprintf(buf, "%d\n", atomic_read(&ne->tpool->count.count) + 1);
288}
289static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL);
290
291
292static ssize_t fw_show_ne_tlabels_allocations(struct device *dev, char *buf)
293{
294 struct node_entry *ne = container_of(dev, struct node_entry, device);
295 return sprintf(buf, "%u\n", ne->tpool->allocations);
296}
297static DEVICE_ATTR(tlabels_allocations,S_IRUGO,fw_show_ne_tlabels_allocations,NULL);
298
299
300static ssize_t fw_show_ne_tlabels_mask(struct device *dev, char *buf)
301{
302 struct node_entry *ne = container_of(dev, struct node_entry, device);
303#if (BITS_PER_LONG <= 32)
304 return sprintf(buf, "0x%08lx%08lx\n", ne->tpool->pool[0], ne->tpool->pool[1]);
305#else
306 return sprintf(buf, "0x%016lx\n", ne->tpool->pool[0]);
307#endif
308}
309static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL);
310
311
312static ssize_t fw_set_ignore_driver(struct device *dev, const char *buf, size_t count)
313{
314 struct unit_directory *ud = container_of(dev, struct unit_directory, device);
315 int state = simple_strtoul(buf, NULL, 10);
316
317 if (state == 1) {
318 down_write(&dev->bus->subsys.rwsem);
319 device_release_driver(dev);
320 ud->ignore_driver = 1;
321 up_write(&dev->bus->subsys.rwsem);
322 } else if (!state)
323 ud->ignore_driver = 0;
324
325 return count;
326}
327static ssize_t fw_get_ignore_driver(struct device *dev, char *buf)
328{
329 struct unit_directory *ud = container_of(dev, struct unit_directory, device);
330
331 return sprintf(buf, "%d\n", ud->ignore_driver);
332}
333static DEVICE_ATTR(ignore_driver, S_IWUSR | S_IRUGO, fw_get_ignore_driver, fw_set_ignore_driver);
334
335
336static ssize_t fw_set_destroy_node(struct bus_type *bus, const char *buf, size_t count)
337{
338 struct node_entry *ne;
339 u64 guid = (u64)simple_strtoull(buf, NULL, 16);
340
341 ne = find_entry_by_guid(guid);
342
343 if (ne == NULL || !ne->in_limbo)
344 return -EINVAL;
345
346 nodemgr_remove_ne(ne);
347
348 return count;
349}
350static ssize_t fw_get_destroy_node(struct bus_type *bus, char *buf)
351{
352 return sprintf(buf, "You can destroy in_limbo nodes by writing their GUID to this file\n");
353}
354static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node);
355
356static int nodemgr_rescan_bus_thread(void *__unused)
357{
358 /* No userlevel access needed */
359 daemonize("kfwrescan");
360
361 bus_rescan_devices(&ieee1394_bus_type);
362
363 return 0;
364}
365
366static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count)
367{
368 int state = simple_strtoul(buf, NULL, 10);
369
370 /* Don't wait for this, or care about errors. Root could do
371 * something stupid and spawn this a lot of times, but that's
372 * root's fault. */
373 if (state == 1)
374 kernel_thread(nodemgr_rescan_bus_thread, NULL, CLONE_KERNEL);
375
376 return count;
377}
378static ssize_t fw_get_rescan(struct bus_type *bus, char *buf)
379{
380 return sprintf(buf, "You can force a rescan of the bus for "
381 "drivers by writing a 1 to this file\n");
382}
383static BUS_ATTR(rescan, S_IWUSR | S_IRUGO, fw_get_rescan, fw_set_rescan);
384
385
386static ssize_t fw_set_ignore_drivers(struct bus_type *bus, const char *buf, size_t count)
387{
388 int state = simple_strtoul(buf, NULL, 10);
389
390 if (state == 1)
391 ignore_drivers = 1;
392 else if (!state)
393 ignore_drivers = 0;
394
395 return count;
396}
397static ssize_t fw_get_ignore_drivers(struct bus_type *bus, char *buf)
398{
399 return sprintf(buf, "%d\n", ignore_drivers);
400}
401static BUS_ATTR(ignore_drivers, S_IWUSR | S_IRUGO, fw_get_ignore_drivers, fw_set_ignore_drivers);
402
403
404struct bus_attribute *const fw_bus_attrs[] = {
405 &bus_attr_destroy_node,
406 &bus_attr_rescan,
407 &bus_attr_ignore_drivers,
408 NULL
409};
410
411
412fw_attr(ne, struct node_entry, capabilities, unsigned int, "0x%06x\n")
413fw_attr(ne, struct node_entry, nodeid, unsigned int, "0x%04x\n")
414
415fw_attr(ne, struct node_entry, vendor_id, unsigned int, "0x%06x\n")
416fw_attr_td(ne, struct node_entry, vendor_name_kv)
417fw_attr(ne, struct node_entry, vendor_oui, const char *, "%s\n")
418
419fw_attr(ne, struct node_entry, guid, unsigned long long, "0x%016Lx\n")
420fw_attr(ne, struct node_entry, guid_vendor_id, unsigned int, "0x%06x\n")
421fw_attr(ne, struct node_entry, guid_vendor_oui, const char *, "%s\n")
422fw_attr(ne, struct node_entry, in_limbo, int, "%d\n");
423
424static struct device_attribute *const fw_ne_attrs[] = {
425 &dev_attr_ne_guid,
426 &dev_attr_ne_guid_vendor_id,
427 &dev_attr_ne_capabilities,
428 &dev_attr_ne_vendor_id,
429 &dev_attr_ne_nodeid,
430 &dev_attr_bus_options,
431 &dev_attr_tlabels_free,
432 &dev_attr_tlabels_allocations,
433 &dev_attr_tlabels_mask,
434};
435
436
437
438fw_attr(ud, struct unit_directory, address, unsigned long long, "0x%016Lx\n")
439fw_attr(ud, struct unit_directory, length, int, "%d\n")
440/* These are all dependent on the value being provided */
441fw_attr(ud, struct unit_directory, vendor_id, unsigned int, "0x%06x\n")
442fw_attr(ud, struct unit_directory, model_id, unsigned int, "0x%06x\n")
443fw_attr(ud, struct unit_directory, specifier_id, unsigned int, "0x%06x\n")
444fw_attr(ud, struct unit_directory, version, unsigned int, "0x%06x\n")
445fw_attr_td(ud, struct unit_directory, vendor_name_kv)
446fw_attr(ud, struct unit_directory, vendor_oui, const char *, "%s\n")
447fw_attr_td(ud, struct unit_directory, model_name_kv)
448
449static struct device_attribute *const fw_ud_attrs[] = {
450 &dev_attr_ud_address,
451 &dev_attr_ud_length,
452 &dev_attr_ignore_driver,
453};
454
455
456fw_attr(host, struct hpsb_host, node_count, int, "%d\n")
457fw_attr(host, struct hpsb_host, selfid_count, int, "%d\n")
458fw_attr(host, struct hpsb_host, nodes_active, int, "%d\n")
459fw_attr(host, struct hpsb_host, in_bus_reset, int, "%d\n")
460fw_attr(host, struct hpsb_host, is_root, int, "%d\n")
461fw_attr(host, struct hpsb_host, is_cycmst, int, "%d\n")
462fw_attr(host, struct hpsb_host, is_irm, int, "%d\n")
463fw_attr(host, struct hpsb_host, is_busmgr, int, "%d\n")
464
465static struct device_attribute *const fw_host_attrs[] = {
466 &dev_attr_host_node_count,
467 &dev_attr_host_selfid_count,
468 &dev_attr_host_nodes_active,
469 &dev_attr_host_in_bus_reset,
470 &dev_attr_host_is_root,
471 &dev_attr_host_is_cycmst,
472 &dev_attr_host_is_irm,
473 &dev_attr_host_is_busmgr,
474};
475
476
477static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
478{
479 struct hpsb_protocol_driver *driver;
480 struct ieee1394_device_id *id;
481 int length = 0;
482 char *scratch = buf;
483
484 driver = container_of(drv, struct hpsb_protocol_driver, driver);
485
486 for (id = driver->id_table; id->match_flags != 0; id++) {
487 int need_coma = 0;
488
489 if (id->match_flags & IEEE1394_MATCH_VENDOR_ID) {
490 length += sprintf(scratch, "vendor_id=0x%06x", id->vendor_id);
491 scratch = buf + length;
492 need_coma++;
493 }
494
495 if (id->match_flags & IEEE1394_MATCH_MODEL_ID) {
496 length += sprintf(scratch, "%smodel_id=0x%06x",
497 need_coma++ ? "," : "",
498 id->model_id);
499 scratch = buf + length;
500 }
501
502 if (id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) {
503 length += sprintf(scratch, "%sspecifier_id=0x%06x",
504 need_coma++ ? "," : "",
505 id->specifier_id);
506 scratch = buf + length;
507 }
508
509 if (id->match_flags & IEEE1394_MATCH_VERSION) {
510 length += sprintf(scratch, "%sversion=0x%06x",
511 need_coma++ ? "," : "",
512 id->version);
513 scratch = buf + length;
514 }
515
516 if (need_coma) {
517 *scratch++ = '\n';
518 length++;
519 }
520 }
521
522 return length;
523}
524static DRIVER_ATTR(device_ids,S_IRUGO,fw_show_drv_device_ids,NULL);
525
526
527fw_drv_attr(name, const char *, "%s\n")
528
529static struct driver_attribute *const fw_drv_attrs[] = {
530 &driver_attr_drv_name,
531 &driver_attr_device_ids,
532};
533
534
535static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
536{
537 struct device_driver *drv = &driver->driver;
538 int i;
539
540 for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
541 driver_create_file(drv, fw_drv_attrs[i]);
542}
543
544
545static void nodemgr_remove_drv_files(struct hpsb_protocol_driver *driver)
546{
547 struct device_driver *drv = &driver->driver;
548 int i;
549
550 for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
551 driver_remove_file(drv, fw_drv_attrs[i]);
552}
553
554
555static void nodemgr_create_ne_dev_files(struct node_entry *ne)
556{
557 struct device *dev = &ne->device;
558 int i;
559
560 for (i = 0; i < ARRAY_SIZE(fw_ne_attrs); i++)
561 device_create_file(dev, fw_ne_attrs[i]);
562}
563
564
565static void nodemgr_create_host_dev_files(struct hpsb_host *host)
566{
567 struct device *dev = &host->device;
568 int i;
569
570 for (i = 0; i < ARRAY_SIZE(fw_host_attrs); i++)
571 device_create_file(dev, fw_host_attrs[i]);
572}
573
574
575static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid);
576
577static void nodemgr_update_host_dev_links(struct hpsb_host *host)
578{
579 struct device *dev = &host->device;
580 struct node_entry *ne;
581
582 sysfs_remove_link(&dev->kobj, "irm_id");
583 sysfs_remove_link(&dev->kobj, "busmgr_id");
584 sysfs_remove_link(&dev->kobj, "host_id");
585
586 if ((ne = find_entry_by_nodeid(host, host->irm_id)))
587 sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id");
588 if ((ne = find_entry_by_nodeid(host, host->busmgr_id)))
589 sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id");
590 if ((ne = find_entry_by_nodeid(host, host->node_id)))
591 sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id");
592}
593
594static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
595{
596 struct device *dev = &ud->device;
597 int i;
598
599 for (i = 0; i < ARRAY_SIZE(fw_ud_attrs); i++)
600 device_create_file(dev, fw_ud_attrs[i]);
601
602 if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
603 device_create_file(dev, &dev_attr_ud_specifier_id);
604
605 if (ud->flags & UNIT_DIRECTORY_VERSION)
606 device_create_file(dev, &dev_attr_ud_version);
607
608 if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
609 device_create_file(dev, &dev_attr_ud_vendor_id);
610 if (ud->vendor_name_kv)
611 device_create_file(dev, &dev_attr_ud_vendor_name_kv);
612 }
613
614 if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
615 device_create_file(dev, &dev_attr_ud_model_id);
616 if (ud->model_name_kv)
617 device_create_file(dev, &dev_attr_ud_model_name_kv);
618 }
619}
620
621
622static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
623{
624 struct hpsb_protocol_driver *driver;
625 struct unit_directory *ud;
626 struct ieee1394_device_id *id;
627
628 /* We only match unit directories */
629 if (dev->platform_data != &nodemgr_ud_platform_data)
630 return 0;
631
632 ud = container_of(dev, struct unit_directory, device);
633 driver = container_of(drv, struct hpsb_protocol_driver, driver);
634
635 if (ud->ne->in_limbo || ud->ignore_driver)
636 return 0;
637
638 for (id = driver->id_table; id->match_flags != 0; id++) {
639 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
640 id->vendor_id != ud->vendor_id)
641 continue;
642
643 if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) &&
644 id->model_id != ud->model_id)
645 continue;
646
647 if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) &&
648 id->specifier_id != ud->specifier_id)
649 continue;
650
651 if ((id->match_flags & IEEE1394_MATCH_VERSION) &&
652 id->version != ud->version)
653 continue;
654
655 return 1;
656 }
657
658 return 0;
659}
660
661
662static void nodemgr_remove_uds(struct node_entry *ne)
663{
664 struct class_device *cdev, *next;
665 struct unit_directory *ud;
666
667 list_for_each_entry_safe(cdev, next, &nodemgr_ud_class.children, node) {
668 ud = container_of(cdev, struct unit_directory, class_dev);
669
670 if (ud->ne != ne)
671 continue;
672
673 class_device_unregister(&ud->class_dev);
674 device_unregister(&ud->device);
675 }
676}
677
678
679static void nodemgr_remove_ne(struct node_entry *ne)
680{
681 struct device *dev = &ne->device;
682
683 dev = get_device(&ne->device);
684 if (!dev)
685 return;
686
687 HPSB_DEBUG("Node removed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
688 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
689
690 nodemgr_remove_uds(ne);
691
692 class_device_unregister(&ne->class_dev);
693 device_unregister(dev);
694
695 put_device(dev);
696}
697
698
699static void nodemgr_remove_host_dev(struct device *dev)
700{
701 struct device *ne_dev, *next;
702
703 list_for_each_entry_safe(ne_dev, next, &dev->children, node)
704 nodemgr_remove_ne(container_of(ne_dev, struct node_entry, device));
705
706 sysfs_remove_link(&dev->kobj, "irm_id");
707 sysfs_remove_link(&dev->kobj, "busmgr_id");
708 sysfs_remove_link(&dev->kobj, "host_id");
709}
710
711
712static void nodemgr_update_bus_options(struct node_entry *ne)
713{
714#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
715 static const u16 mr[] = { 4, 64, 1024, 0};
716#endif
717 quadlet_t busoptions = be32_to_cpu(ne->csr->bus_info_data[2]);
718
719 ne->busopt.irmc = (busoptions >> 31) & 1;
720 ne->busopt.cmc = (busoptions >> 30) & 1;
721 ne->busopt.isc = (busoptions >> 29) & 1;
722 ne->busopt.bmc = (busoptions >> 28) & 1;
723 ne->busopt.pmc = (busoptions >> 27) & 1;
724 ne->busopt.cyc_clk_acc = (busoptions >> 16) & 0xff;
725 ne->busopt.max_rec = 1 << (((busoptions >> 12) & 0xf) + 1);
726 ne->busopt.max_rom = (busoptions >> 8) & 0x3;
727 ne->busopt.generation = (busoptions >> 4) & 0xf;
728 ne->busopt.lnkspd = busoptions & 0x7;
729
730 HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d "
731 "cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d",
732 busoptions, ne->busopt.irmc, ne->busopt.cmc,
733 ne->busopt.isc, ne->busopt.bmc, ne->busopt.pmc,
734 ne->busopt.cyc_clk_acc, ne->busopt.max_rec,
735 mr[ne->busopt.max_rom],
736 ne->busopt.generation, ne->busopt.lnkspd);
737}
738
739
740static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr *csr,
741 struct host_info *hi, nodeid_t nodeid,
742 unsigned int generation)
743{
744 struct hpsb_host *host = hi->host;
745 struct node_entry *ne;
746
747 ne = kmalloc(sizeof(struct node_entry), GFP_KERNEL);
748 if (!ne) return NULL;
749
750 memset(ne, 0, sizeof(struct node_entry));
751
752 ne->tpool = &host->tpool[nodeid & NODE_MASK];
753
754 ne->host = host;
755 ne->nodeid = nodeid;
756 ne->generation = generation;
757 ne->needs_probe = 1;
758
759 ne->guid = guid;
760 ne->guid_vendor_id = (guid >> 40) & 0xffffff;
761 ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id);
762 ne->csr = csr;
763
764 memcpy(&ne->device, &nodemgr_dev_template_ne,
765 sizeof(ne->device));
766 ne->device.parent = &host->device;
767 snprintf(ne->device.bus_id, BUS_ID_SIZE, "%016Lx",
768 (unsigned long long)(ne->guid));
769
770 ne->class_dev.dev = &ne->device;
771 ne->class_dev.class = &nodemgr_ne_class;
772 snprintf(ne->class_dev.class_id, BUS_ID_SIZE, "%016Lx",
773 (unsigned long long)(ne->guid));
774
775 device_register(&ne->device);
776 class_device_register(&ne->class_dev);
777 get_device(&ne->device);
778
779 if (ne->guid_vendor_oui)
780 device_create_file(&ne->device, &dev_attr_ne_guid_vendor_oui);
781 nodemgr_create_ne_dev_files(ne);
782
783 nodemgr_update_bus_options(ne);
784
785 HPSB_DEBUG("%s added: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
786 (host->node_id == nodeid) ? "Host" : "Node",
787 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
788
789 return ne;
790}
791
792
793static struct node_entry *find_entry_by_guid(u64 guid)
794{
795 struct class *class = &nodemgr_ne_class;
796 struct class_device *cdev;
797 struct node_entry *ne, *ret_ne = NULL;
798
799 down_read(&class->subsys.rwsem);
800 list_for_each_entry(cdev, &class->children, node) {
801 ne = container_of(cdev, struct node_entry, class_dev);
802
803 if (ne->guid == guid) {
804 ret_ne = ne;
805 break;
806 }
807 }
808 up_read(&class->subsys.rwsem);
809
810 return ret_ne;
811}
812
813
814static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, nodeid_t nodeid)
815{
816 struct class *class = &nodemgr_ne_class;
817 struct class_device *cdev;
818 struct node_entry *ne, *ret_ne = NULL;
819
820 down_read(&class->subsys.rwsem);
821 list_for_each_entry(cdev, &class->children, node) {
822 ne = container_of(cdev, struct node_entry, class_dev);
823
824 if (ne->host == host && ne->nodeid == nodeid) {
825 ret_ne = ne;
826 break;
827 }
828 }
829 up_read(&class->subsys.rwsem);
830
831 return ret_ne;
832}
833
834
835static void nodemgr_register_device(struct node_entry *ne,
836 struct unit_directory *ud, struct device *parent)
837{
838 memcpy(&ud->device, &nodemgr_dev_template_ud,
839 sizeof(ud->device));
840
841 ud->device.parent = parent;
842
843 snprintf(ud->device.bus_id, BUS_ID_SIZE, "%s-%u",
844 ne->device.bus_id, ud->id);
845
846 ud->class_dev.dev = &ud->device;
847 ud->class_dev.class = &nodemgr_ud_class;
848 snprintf(ud->class_dev.class_id, BUS_ID_SIZE, "%s-%u",
849 ne->device.bus_id, ud->id);
850
851 device_register(&ud->device);
852 class_device_register(&ud->class_dev);
853 get_device(&ud->device);
854
855 if (ud->vendor_oui)
856 device_create_file(&ud->device, &dev_attr_ud_vendor_oui);
857 nodemgr_create_ud_dev_files(ud);
858}
859
860
861/* This implementation currently only scans the config rom and its
862 * immediate unit directories looking for software_id and
863 * software_version entries, in order to get driver autoloading working. */
864static struct unit_directory *nodemgr_process_unit_directory
865 (struct host_info *hi, struct node_entry *ne, struct csr1212_keyval *ud_kv,
866 unsigned int *id, struct unit_directory *parent)
867{
868 struct unit_directory *ud;
869 struct unit_directory *ud_child = NULL;
870 struct csr1212_dentry *dentry;
871 struct csr1212_keyval *kv;
872 u8 last_key_id = 0;
873
874 ud = kmalloc(sizeof(struct unit_directory), GFP_KERNEL);
875 if (!ud)
876 goto unit_directory_error;
877
878 memset (ud, 0, sizeof(struct unit_directory));
879
880 ud->ne = ne;
881 ud->ignore_driver = ignore_drivers;
882 ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE;
883 ud->ud_kv = ud_kv;
884 ud->id = (*id)++;
885
886 csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) {
887 switch (kv->key.id) {
888 case CSR1212_KV_ID_VENDOR:
889 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
890 ud->vendor_id = kv->value.immediate;
891 ud->flags |= UNIT_DIRECTORY_VENDOR_ID;
892
893 if (ud->vendor_id)
894 ud->vendor_oui = nodemgr_find_oui_name(ud->vendor_id);
895 }
896 break;
897
898 case CSR1212_KV_ID_MODEL:
899 ud->model_id = kv->value.immediate;
900 ud->flags |= UNIT_DIRECTORY_MODEL_ID;
901 break;
902
903 case CSR1212_KV_ID_SPECIFIER_ID:
904 ud->specifier_id = kv->value.immediate;
905 ud->flags |= UNIT_DIRECTORY_SPECIFIER_ID;
906 break;
907
908 case CSR1212_KV_ID_VERSION:
909 ud->version = kv->value.immediate;
910 ud->flags |= UNIT_DIRECTORY_VERSION;
911 break;
912
913 case CSR1212_KV_ID_DESCRIPTOR:
914 if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
915 CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
916 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
917 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
918 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
919 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
920 switch (last_key_id) {
921 case CSR1212_KV_ID_VENDOR:
922 ud->vendor_name_kv = kv;
923 csr1212_keep_keyval(kv);
924 break;
925
926 case CSR1212_KV_ID_MODEL:
927 ud->model_name_kv = kv;
928 csr1212_keep_keyval(kv);
929 break;
930
931 }
932 } /* else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) ... */
933 break;
934
935 case CSR1212_KV_ID_DEPENDENT_INFO:
936 /* Logical Unit Number */
937 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
938 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
939 ud_child = kmalloc(sizeof(struct unit_directory), GFP_KERNEL);
940 if (!ud_child)
941 goto unit_directory_error;
942 memcpy(ud_child, ud, sizeof(struct unit_directory));
943 nodemgr_register_device(ne, ud_child, &ne->device);
944 ud_child = NULL;
945
946 ud->id = (*id)++;
947 }
948 ud->lun = kv->value.immediate;
949 ud->flags |= UNIT_DIRECTORY_HAS_LUN;
950
951 /* Logical Unit Directory */
952 } else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) {
953 /* This should really be done in SBP2 as this is
954 * doing SBP2 specific parsing.
955 */
956
957 /* first register the parent unit */
958 ud->flags |= UNIT_DIRECTORY_HAS_LUN_DIRECTORY;
959 if (ud->device.bus != &ieee1394_bus_type)
960 nodemgr_register_device(ne, ud, &ne->device);
961
962 /* process the child unit */
963 ud_child = nodemgr_process_unit_directory(hi, ne, kv, id, ud);
964
965 if (ud_child == NULL)
966 break;
967
968 /* inherit unspecified values so hotplug picks it up */
969 if ((ud->flags & UNIT_DIRECTORY_MODEL_ID) &&
970 !(ud_child->flags & UNIT_DIRECTORY_MODEL_ID))
971 {
972 ud_child->flags |= UNIT_DIRECTORY_MODEL_ID;
973 ud_child->model_id = ud->model_id;
974 }
975 if ((ud->flags & UNIT_DIRECTORY_SPECIFIER_ID) &&
976 !(ud_child->flags & UNIT_DIRECTORY_SPECIFIER_ID))
977 {
978 ud_child->flags |= UNIT_DIRECTORY_SPECIFIER_ID;
979 ud_child->specifier_id = ud->specifier_id;
980 }
981 if ((ud->flags & UNIT_DIRECTORY_VERSION) &&
982 !(ud_child->flags & UNIT_DIRECTORY_VERSION))
983 {
984 ud_child->flags |= UNIT_DIRECTORY_VERSION;
985 ud_child->version = ud->version;
986 }
987
988 /* register the child unit */
989 ud_child->flags |= UNIT_DIRECTORY_LUN_DIRECTORY;
990 nodemgr_register_device(ne, ud_child, &ud->device);
991 }
992
993 break;
994
995 default:
996 break;
997 }
998 last_key_id = kv->key.id;
999 }
1000
1001 /* do not process child units here and only if not already registered */
1002 if (!parent && ud->device.bus != &ieee1394_bus_type)
1003 nodemgr_register_device(ne, ud, &ne->device);
1004
1005 return ud;
1006
1007unit_directory_error:
1008 if (ud != NULL)
1009 kfree(ud);
1010 return NULL;
1011}
1012
1013
1014static void nodemgr_process_root_directory(struct host_info *hi, struct node_entry *ne)
1015{
1016 unsigned int ud_id = 0;
1017 struct csr1212_dentry *dentry;
1018 struct csr1212_keyval *kv;
1019 u8 last_key_id = 0;
1020
1021 ne->needs_probe = 0;
1022
1023 csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) {
1024 switch (kv->key.id) {
1025 case CSR1212_KV_ID_VENDOR:
1026 ne->vendor_id = kv->value.immediate;
1027
1028 if (ne->vendor_id)
1029 ne->vendor_oui = nodemgr_find_oui_name(ne->vendor_id);
1030 break;
1031
1032 case CSR1212_KV_ID_NODE_CAPABILITIES:
1033 ne->capabilities = kv->value.immediate;
1034 break;
1035
1036 case CSR1212_KV_ID_UNIT:
1037 nodemgr_process_unit_directory(hi, ne, kv, &ud_id, NULL);
1038 break;
1039
1040 case CSR1212_KV_ID_DESCRIPTOR:
1041 if (last_key_id == CSR1212_KV_ID_VENDOR) {
1042 if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
1043 CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
1044 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
1045 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
1046 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
1047 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
1048 ne->vendor_name_kv = kv;
1049 csr1212_keep_keyval(kv);
1050 }
1051 }
1052 break;
1053 }
1054 last_key_id = kv->key.id;
1055 }
1056
1057 if (ne->vendor_oui)
1058 device_create_file(&ne->device, &dev_attr_ne_vendor_oui);
1059 if (ne->vendor_name_kv)
1060 device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv);
1061}
1062
1063#ifdef CONFIG_HOTPLUG
1064
1065static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
1066 char *buffer, int buffer_size)
1067{
1068 struct unit_directory *ud;
1069 int i = 0;
1070 int length = 0;
1071
1072 if (!cdev)
1073 return -ENODEV;
1074
1075 ud = container_of(cdev, struct unit_directory, class_dev);
1076
1077 if (ud->ne->in_limbo || ud->ignore_driver)
1078 return -ENODEV;
1079
1080#define PUT_ENVP(fmt,val) \
1081do { \
1082 int printed; \
1083 envp[i++] = buffer; \
1084 printed = snprintf(buffer, buffer_size - length, \
1085 fmt, val); \
1086 if ((buffer_size - (length+printed) <= 0) || (i >= num_envp)) \
1087 return -ENOMEM; \
1088 length += printed+1; \
1089 buffer += printed+1; \
1090} while (0)
1091
1092 PUT_ENVP("VENDOR_ID=%06x", ud->vendor_id);
1093 PUT_ENVP("MODEL_ID=%06x", ud->model_id);
1094 PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid);
1095 PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id);
1096 PUT_ENVP("VERSION=%06x", ud->version);
1097
1098#undef PUT_ENVP
1099
1100 envp[i] = NULL;
1101
1102 return 0;
1103}
1104
1105#else
1106
1107static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
1108 char *buffer, int buffer_size)
1109{
1110 return -ENODEV;
1111}
1112
1113#endif /* CONFIG_HOTPLUG */
1114
1115
1116int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
1117{
1118 int ret;
1119
1120 /* This will cause a probe for devices */
1121 ret = driver_register(&driver->driver);
1122 if (!ret)
1123 nodemgr_create_drv_files(driver);
1124
1125 return ret;
1126}
1127
1128void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
1129{
1130 nodemgr_remove_drv_files(driver);
1131 /* This will subsequently disconnect all devices that our driver
1132 * is attached to. */
1133 driver_unregister(&driver->driver);
1134}
1135
1136
1137/*
1138 * This function updates nodes that were present on the bus before the
1139 * reset and still are after the reset. The nodeid and the config rom
1140 * may have changed, and the drivers managing this device must be
1141 * informed that this device just went through a bus reset, to allow
1142 * the to take whatever actions required.
1143 */
1144static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
1145 struct host_info *hi, nodeid_t nodeid,
1146 unsigned int generation)
1147{
1148 if (ne->nodeid != nodeid) {
1149 HPSB_DEBUG("Node changed: " NODE_BUS_FMT " -> " NODE_BUS_FMT,
1150 NODE_BUS_ARGS(ne->host, ne->nodeid),
1151 NODE_BUS_ARGS(ne->host, nodeid));
1152 ne->nodeid = nodeid;
1153 }
1154
1155 if (ne->busopt.generation != ((be32_to_cpu(csr->bus_info_data[2]) >> 4) & 0xf)) {
1156 kfree(ne->csr->private);
1157 csr1212_destroy_csr(ne->csr);
1158 ne->csr = csr;
1159
1160 /* If the node's configrom generation has changed, we
1161 * unregister all the unit directories. */
1162 nodemgr_remove_uds(ne);
1163
1164 nodemgr_update_bus_options(ne);
1165
1166 /* Mark the node as new, so it gets re-probed */
1167 ne->needs_probe = 1;
1168 } else {
1169 /* old cache is valid, so update its generation */
1170 struct nodemgr_csr_info *ci = ne->csr->private;
1171 ci->generation = generation;
1172 /* free the partially filled now unneeded new cache */
1173 kfree(csr->private);
1174 csr1212_destroy_csr(csr);
1175 }
1176
1177 if (ne->in_limbo)
1178 nodemgr_resume_ne(ne);
1179
1180 /* Mark the node current */
1181 ne->generation = generation;
1182}
1183
1184
1185
1186static void nodemgr_node_scan_one(struct host_info *hi,
1187 nodeid_t nodeid, int generation)
1188{
1189 struct hpsb_host *host = hi->host;
1190 struct node_entry *ne;
1191 octlet_t guid;
1192 struct csr1212_csr *csr;
1193 struct nodemgr_csr_info *ci;
1194
1195 ci = kmalloc(sizeof(struct nodemgr_csr_info), GFP_KERNEL);
1196 if (!ci)
1197 return;
1198
1199 ci->host = host;
1200 ci->nodeid = nodeid;
1201 ci->generation = generation;
1202
1203 /* We need to detect when the ConfigROM's generation has changed,
1204 * so we only update the node's info when it needs to be. */
1205
1206 csr = csr1212_create_csr(&nodemgr_csr_ops, 5 * sizeof(quadlet_t), ci);
1207 if (!csr || csr1212_parse_csr(csr) != CSR1212_SUCCESS) {
1208 HPSB_ERR("Error parsing configrom for node " NODE_BUS_FMT,
1209 NODE_BUS_ARGS(host, nodeid));
1210 if (csr)
1211 csr1212_destroy_csr(csr);
1212 kfree(ci);
1213 return;
1214 }
1215
1216 if (csr->bus_info_data[1] != IEEE1394_BUSID_MAGIC) {
1217 /* This isn't a 1394 device, but we let it slide. There
1218 * was a report of a device with broken firmware which
1219 * reported '2394' instead of '1394', which is obviously a
1220 * mistake. One would hope that a non-1394 device never
1221 * gets connected to Firewire bus. If someone does, we
1222 * shouldn't be held responsible, so we'll allow it with a
1223 * warning. */
1224 HPSB_WARN("Node " NODE_BUS_FMT " has invalid busID magic [0x%08x]",
1225 NODE_BUS_ARGS(host, nodeid), csr->bus_info_data[1]);
1226 }
1227
1228 guid = ((u64)be32_to_cpu(csr->bus_info_data[3]) << 32) | be32_to_cpu(csr->bus_info_data[4]);
1229 ne = find_entry_by_guid(guid);
1230
1231 if (ne && ne->host != host && ne->in_limbo) {
1232 /* Must have moved this device from one host to another */
1233 nodemgr_remove_ne(ne);
1234 ne = NULL;
1235 }
1236
1237 if (!ne)
1238 nodemgr_create_node(guid, csr, hi, nodeid, generation);
1239 else
1240 nodemgr_update_node(ne, csr, hi, nodeid, generation);
1241
1242 return;
1243}
1244
1245
1246static void nodemgr_node_scan(struct host_info *hi, int generation)
1247{
1248 int count;
1249 struct hpsb_host *host = hi->host;
1250 struct selfid *sid = (struct selfid *)host->topology_map;
1251 nodeid_t nodeid = LOCAL_BUS;
1252
1253 /* Scan each node on the bus */
1254 for (count = host->selfid_count; count; count--, sid++) {
1255 if (sid->extended)
1256 continue;
1257
1258 if (!sid->link_active) {
1259 nodeid++;
1260 continue;
1261 }
1262 nodemgr_node_scan_one(hi, nodeid++, generation);
1263 }
1264}
1265
1266
1267static void nodemgr_suspend_ne(struct node_entry *ne)
1268{
1269 struct class_device *cdev;
1270 struct unit_directory *ud;
1271
1272 HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1273 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
1274
1275 ne->in_limbo = 1;
1276 device_create_file(&ne->device, &dev_attr_ne_in_limbo);
1277
1278 down_write(&ne->device.bus->subsys.rwsem);
1279 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
1280 ud = container_of(cdev, struct unit_directory, class_dev);
1281
1282 if (ud->ne != ne)
1283 continue;
1284
1285 if (ud->device.driver &&
1286 (!ud->device.driver->suspend ||
1287 ud->device.driver->suspend(&ud->device, PMSG_SUSPEND, 0)))
1288 device_release_driver(&ud->device);
1289 }
1290 up_write(&ne->device.bus->subsys.rwsem);
1291}
1292
1293
1294static void nodemgr_resume_ne(struct node_entry *ne)
1295{
1296 struct class_device *cdev;
1297 struct unit_directory *ud;
1298
1299 ne->in_limbo = 0;
1300 device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
1301
1302 down_read(&ne->device.bus->subsys.rwsem);
1303 list_for_each_entry(cdev, &nodemgr_ud_class.children, node) {
1304 ud = container_of(cdev, struct unit_directory, class_dev);
1305
1306 if (ud->ne != ne)
1307 continue;
1308
1309 if (ud->device.driver && ud->device.driver->resume)
1310 ud->device.driver->resume(&ud->device, 0);
1311 }
1312 up_read(&ne->device.bus->subsys.rwsem);
1313
1314 HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1315 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
1316}
1317
1318
1319static void nodemgr_update_pdrv(struct node_entry *ne)
1320{
1321 struct unit_directory *ud;
1322 struct hpsb_protocol_driver *pdrv;
1323 struct class *class = &nodemgr_ud_class;
1324 struct class_device *cdev;
1325
1326 down_read(&class->subsys.rwsem);
1327 list_for_each_entry(cdev, &class->children, node) {
1328 ud = container_of(cdev, struct unit_directory, class_dev);
1329 if (ud->ne != ne || !ud->device.driver)
1330 continue;
1331
1332 pdrv = container_of(ud->device.driver, struct hpsb_protocol_driver, driver);
1333
1334 if (pdrv->update && pdrv->update(ud)) {
1335 down_write(&ud->device.bus->subsys.rwsem);
1336 device_release_driver(&ud->device);
1337 up_write(&ud->device.bus->subsys.rwsem);
1338 }
1339 }
1340 up_read(&class->subsys.rwsem);
1341}
1342
1343
1344static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation)
1345{
1346 struct device *dev;
1347
1348 if (ne->host != hi->host || ne->in_limbo)
1349 return;
1350
1351 dev = get_device(&ne->device);
1352 if (!dev)
1353 return;
1354
1355 /* If "needs_probe", then this is either a new or changed node we
1356 * rescan totally. If the generation matches for an existing node
1357 * (one that existed prior to the bus reset) we send update calls
1358 * down to the drivers. Otherwise, this is a dead node and we
1359 * suspend it. */
1360 if (ne->needs_probe)
1361 nodemgr_process_root_directory(hi, ne);
1362 else if (ne->generation == generation)
1363 nodemgr_update_pdrv(ne);
1364 else
1365 nodemgr_suspend_ne(ne);
1366
1367 put_device(dev);
1368}
1369
1370
1371static void nodemgr_node_probe(struct host_info *hi, int generation)
1372{
1373 struct hpsb_host *host = hi->host;
1374 struct class *class = &nodemgr_ne_class;
1375 struct class_device *cdev;
1376
1377 /* Do some processing of the nodes we've probed. This pulls them
1378 * into the sysfs layer if needed, and can result in processing of
1379 * unit-directories, or just updating the node and it's
1380 * unit-directories. */
1381 down_read(&class->subsys.rwsem);
1382 list_for_each_entry(cdev, &class->children, node)
1383 nodemgr_probe_ne(hi, container_of(cdev, struct node_entry, class_dev), generation);
1384 up_read(&class->subsys.rwsem);
1385
1386
1387 /* If we had a bus reset while we were scanning the bus, it is
1388 * possible that we did not probe all nodes. In that case, we
1389 * skip the clean up for now, since we could remove nodes that
1390 * were still on the bus. The bus reset increased hi->reset_sem,
1391 * so there's a bus scan pending which will do the clean up
1392 * eventually.
1393 *
1394 * Now let's tell the bus to rescan our devices. This may seem
1395 * like overhead, but the driver-model core will only scan a
1396 * device for a driver when either the device is added, or when a
1397 * new driver is added. A bus reset is a good reason to rescan
1398 * devices that were there before. For example, an sbp2 device
1399 * may become available for login, if the host that held it was
1400 * just removed. */
1401
1402 if (generation == get_hpsb_generation(host))
1403 bus_rescan_devices(&ieee1394_bus_type);
1404
1405 return;
1406}
1407
1408/* Because we are a 1394a-2000 compliant IRM, we need to inform all the other
1409 * nodes of the broadcast channel. (Really we're only setting the validity
1410 * bit). Other IRM responsibilities go in here as well. */
1411static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
1412{
1413 quadlet_t bc;
1414
1415 /* if irm_id == -1 then there is no IRM on this bus */
1416 if (!host->is_irm || host->irm_id == (nodeid_t)-1)
1417 return 1;
1418
1419 host->csr.broadcast_channel |= 0x40000000; /* set validity bit */
1420
1421 bc = cpu_to_be32(host->csr.broadcast_channel);
1422
1423 hpsb_write(host, LOCAL_BUS | ALL_NODES, get_hpsb_generation(host),
1424 (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL),
1425 &bc, sizeof(quadlet_t));
1426
1427 /* If there is no bus manager then we should set the root node's
1428 * force_root bit to promote bus stability per the 1394
1429 * spec. (8.4.2.6) */
1430 if (host->busmgr_id == 0xffff && host->node_count > 1)
1431 {
1432 u16 root_node = host->node_count - 1;
1433 struct node_entry *ne = find_entry_by_nodeid(host, root_node | LOCAL_BUS);
1434
1435 if (ne && ne->busopt.cmc)
1436 hpsb_send_phy_config(host, root_node, -1);
1437 else {
1438 HPSB_DEBUG("The root node is not cycle master capable; "
1439 "selecting a new root node and resetting...");
1440
1441 if (cycles >= 5) {
1442 /* Oh screw it! Just leave the bus as it is */
1443 HPSB_DEBUG("Stopping reset loop for IRM sanity");
1444 return 1;
1445 }
1446
1447 hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
1448 hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
1449
1450 return 0;
1451 }
1452 }
1453
1454 return 1;
1455}
1456
1457/* We need to ensure that if we are not the IRM, that the IRM node is capable of
1458 * everything we can do, otherwise issue a bus reset and try to become the IRM
1459 * ourselves. */
1460static int nodemgr_check_irm_capability(struct hpsb_host *host, int cycles)
1461{
1462 quadlet_t bc;
1463 int status;
1464
1465 if (hpsb_disable_irm || host->is_irm)
1466 return 1;
1467
1468 status = hpsb_read(host, LOCAL_BUS | (host->irm_id),
1469 get_hpsb_generation(host),
1470 (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL),
1471 &bc, sizeof(quadlet_t));
1472
1473 if (status < 0 || !(be32_to_cpu(bc) & 0x80000000)) {
1474 /* The current irm node does not have a valid BROADCAST_CHANNEL
1475 * register and we do, so reset the bus with force_root set */
1476 HPSB_DEBUG("Current remote IRM is not 1394a-2000 compliant, resetting...");
1477
1478 if (cycles >= 5) {
1479 /* Oh screw it! Just leave the bus as it is */
1480 HPSB_DEBUG("Stopping reset loop for IRM sanity");
1481 return 1;
1482 }
1483
1484 hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
1485 hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
1486
1487 return 0;
1488 }
1489
1490 return 1;
1491}
1492
1493static int nodemgr_host_thread(void *__hi)
1494{
1495 struct host_info *hi = (struct host_info *)__hi;
1496 struct hpsb_host *host = hi->host;
1497 int reset_cycles = 0;
1498
1499 /* No userlevel access needed */
1500 daemonize(hi->daemon_name);
1501
1502 /* Setup our device-model entries */
1503 nodemgr_create_host_dev_files(host);
1504
1505 /* Sit and wait for a signal to probe the nodes on the bus. This
1506 * happens when we get a bus reset. */
1507 while (1) {
1508 unsigned int generation = 0;
1509 int i;
1510
1511 if (down_interruptible(&hi->reset_sem) ||
1512 down_interruptible(&nodemgr_serialize)) {
1513 if (try_to_freeze(PF_FREEZE))
1514 continue;
1515 printk("NodeMgr: received unexpected signal?!\n" );
1516 break;
1517 }
1518
1519 if (hi->kill_me) {
1520 up(&nodemgr_serialize);
1521 break;
1522 }
1523
1524 /* Pause for 1/4 second in 1/16 second intervals,
1525 * to make sure things settle down. */
1526 for (i = 0; i < 4 ; i++) {
1527 set_current_state(TASK_INTERRUPTIBLE);
1528 if (msleep_interruptible(63)) {
1529 up(&nodemgr_serialize);
1530 goto caught_signal;
1531 }
1532
1533 /* Now get the generation in which the node ID's we collect
1534 * are valid. During the bus scan we will use this generation
1535 * for the read transactions, so that if another reset occurs
1536 * during the scan the transactions will fail instead of
1537 * returning bogus data. */
1538 generation = get_hpsb_generation(host);
1539
1540 /* If we get a reset before we are done waiting, then
1541 * start the the waiting over again */
1542 while (!down_trylock(&hi->reset_sem))
1543 i = 0;
1544
1545 /* Check the kill_me again */
1546 if (hi->kill_me) {
1547 up(&nodemgr_serialize);
1548 goto caught_signal;
1549 }
1550 }
1551
1552 if (!nodemgr_check_irm_capability(host, reset_cycles)) {
1553 reset_cycles++;
1554 up(&nodemgr_serialize);
1555 continue;
1556 }
1557
1558 /* Scan our nodes to get the bus options and create node
1559 * entries. This does not do the sysfs stuff, since that
1560 * would trigger hotplug callbacks and such, which is a
1561 * bad idea at this point. */
1562 nodemgr_node_scan(hi, generation);
1563 if (!nodemgr_do_irm_duties(host, reset_cycles)) {
1564 reset_cycles++;
1565 up(&nodemgr_serialize);
1566 continue;
1567 }
1568
1569 reset_cycles = 0;
1570
1571 /* This actually does the full probe, with sysfs
1572 * registration. */
1573 nodemgr_node_probe(hi, generation);
1574
1575 /* Update some of our sysfs symlinks */
1576 nodemgr_update_host_dev_links(host);
1577
1578 up(&nodemgr_serialize);
1579 }
1580
1581caught_signal:
1582 HPSB_VERBOSE("NodeMgr: Exiting thread");
1583
1584 complete_and_exit(&hi->exited, 0);
1585}
1586
1587int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
1588{
1589 struct class *class = &hpsb_host_class;
1590 struct class_device *cdev;
1591 struct hpsb_host *host;
1592 int error = 0;
1593
1594 down_read(&class->subsys.rwsem);
1595 list_for_each_entry(cdev, &class->children, node) {
1596 host = container_of(cdev, struct hpsb_host, class_dev);
1597
1598 if ((error = cb(host, __data)))
1599 break;
1600 }
1601 up_read(&class->subsys.rwsem);
1602
1603 return error;
1604}
1605
1606/* The following four convenience functions use a struct node_entry
1607 * for addressing a node on the bus. They are intended for use by any
1608 * process context, not just the nodemgr thread, so we need to be a
1609 * little careful when reading out the node ID and generation. The
1610 * thing that can go wrong is that we get the node ID, then a bus
1611 * reset occurs, and then we read the generation. The node ID is
1612 * possibly invalid, but the generation is current, and we end up
1613 * sending a packet to a the wrong node.
1614 *
1615 * The solution is to make sure we read the generation first, so that
1616 * if a reset occurs in the process, we end up with a stale generation
1617 * and the transactions will fail instead of silently using wrong node
1618 * ID's.
1619 */
1620
1621void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
1622{
1623 pkt->host = ne->host;
1624 pkt->generation = ne->generation;
1625 barrier();
1626 pkt->node_id = ne->nodeid;
1627}
1628
1629int hpsb_node_write(struct node_entry *ne, u64 addr,
1630 quadlet_t *buffer, size_t length)
1631{
1632 unsigned int generation = ne->generation;
1633
1634 barrier();
1635 return hpsb_write(ne->host, ne->nodeid, generation,
1636 addr, buffer, length);
1637}
1638
1639static void nodemgr_add_host(struct hpsb_host *host)
1640{
1641 struct host_info *hi;
1642
1643 hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi));
1644
1645 if (!hi) {
1646 HPSB_ERR ("NodeMgr: out of memory in add host");
1647 return;
1648 }
1649
1650 hi->host = host;
1651 init_completion(&hi->exited);
1652 sema_init(&hi->reset_sem, 0);
1653
1654 sprintf(hi->daemon_name, "knodemgrd_%d", host->id);
1655
1656 hi->pid = kernel_thread(nodemgr_host_thread, hi, CLONE_KERNEL);
1657
1658 if (hi->pid < 0) {
1659 HPSB_ERR ("NodeMgr: failed to start %s thread for %s",
1660 hi->daemon_name, host->driver->name);
1661 hpsb_destroy_hostinfo(&nodemgr_highlevel, host);
1662 return;
1663 }
1664
1665 return;
1666}
1667
1668static void nodemgr_host_reset(struct hpsb_host *host)
1669{
1670 struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
1671
1672 if (hi != NULL) {
1673 HPSB_VERBOSE("NodeMgr: Processing host reset for %s", hi->daemon_name);
1674 up(&hi->reset_sem);
1675 } else
1676 HPSB_ERR ("NodeMgr: could not process reset of unused host");
1677
1678 return;
1679}
1680
1681static void nodemgr_remove_host(struct hpsb_host *host)
1682{
1683 struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
1684
1685 if (hi) {
1686 if (hi->pid >= 0) {
1687 hi->kill_me = 1;
1688 mb();
1689 up(&hi->reset_sem);
1690 wait_for_completion(&hi->exited);
1691 nodemgr_remove_host_dev(&host->device);
1692 }
1693 } else
1694 HPSB_ERR("NodeMgr: host %s does not exist, cannot remove",
1695 host->driver->name);
1696
1697 return;
1698}
1699
1700static struct hpsb_highlevel nodemgr_highlevel = {
1701 .name = "Node manager",
1702 .add_host = nodemgr_add_host,
1703 .host_reset = nodemgr_host_reset,
1704 .remove_host = nodemgr_remove_host,
1705};
1706
1707int init_ieee1394_nodemgr(void)
1708{
1709 int ret;
1710
1711 ret = class_register(&nodemgr_ne_class);
1712 if (ret < 0)
1713 return ret;
1714
1715 ret = class_register(&nodemgr_ud_class);
1716 if (ret < 0) {
1717 class_unregister(&nodemgr_ne_class);
1718 return ret;
1719 }
1720
1721 hpsb_register_highlevel(&nodemgr_highlevel);
1722
1723 return 0;
1724}
1725
1726void cleanup_ieee1394_nodemgr(void)
1727{
1728 hpsb_unregister_highlevel(&nodemgr_highlevel);
1729
1730 class_unregister(&nodemgr_ud_class);
1731 class_unregister(&nodemgr_ne_class);
1732}
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
new file mode 100644
index 000000000000..3a2f0c02fd08
--- /dev/null
+++ b/drivers/ieee1394/nodemgr.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (C) 2000 Andreas E. Bombe
3 * 2001 Ben Collins <bcollins@debian.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef _IEEE1394_NODEMGR_H
21#define _IEEE1394_NODEMGR_H
22
23#include <linux/device.h>
24#include "csr1212.h"
25#include "ieee1394_core.h"
26#include "ieee1394_hotplug.h"
27
28/* '1' '3' '9' '4' in ASCII */
29#define IEEE1394_BUSID_MAGIC __constant_cpu_to_be32(0x31333934)
30
31/* This is the start of a Node entry structure. It should be a stable API
32 * for which to gather info from the Node Manager about devices attached
33 * to the bus. */
34struct bus_options {
35 u8 irmc; /* Iso Resource Manager Capable */
36 u8 cmc; /* Cycle Master Capable */
37 u8 isc; /* Iso Capable */
38 u8 bmc; /* Bus Master Capable */
39 u8 pmc; /* Power Manager Capable (PNP spec) */
40 u8 cyc_clk_acc; /* Cycle clock accuracy */
41 u8 max_rom; /* Maximum block read supported in the CSR */
42 u8 generation; /* Incremented when configrom changes */
43 u8 lnkspd; /* Link speed */
44 u16 max_rec; /* Maximum packet size node can receive */
45};
46
47
48#define UNIT_DIRECTORY_VENDOR_ID 0x01
49#define UNIT_DIRECTORY_MODEL_ID 0x02
50#define UNIT_DIRECTORY_SPECIFIER_ID 0x04
51#define UNIT_DIRECTORY_VERSION 0x08
52#define UNIT_DIRECTORY_HAS_LUN_DIRECTORY 0x10
53#define UNIT_DIRECTORY_LUN_DIRECTORY 0x20
54#define UNIT_DIRECTORY_HAS_LUN 0x40
55
56/*
57 * A unit directory corresponds to a protocol supported by the
58 * node. If a node supports eg. IP/1394 and AV/C, its config rom has a
59 * unit directory for each of these protocols.
60 */
61struct unit_directory {
62 struct node_entry *ne; /* The node which this directory belongs to */
63 octlet_t address; /* Address of the unit directory on the node */
64 u8 flags; /* Indicates which entries were read */
65
66 quadlet_t vendor_id;
67 struct csr1212_keyval *vendor_name_kv;
68 const char *vendor_oui;
69
70 quadlet_t model_id;
71 struct csr1212_keyval *model_name_kv;
72 quadlet_t specifier_id;
73 quadlet_t version;
74
75 unsigned int id;
76
77 int ignore_driver;
78
79 int length; /* Number of quadlets */
80
81 struct device device;
82
83 struct class_device class_dev;
84
85 struct csr1212_keyval *ud_kv;
86 u32 lun; /* logical unit number immediate value */
87};
88
89struct node_entry {
90 u64 guid; /* GUID of this node */
91 u32 guid_vendor_id; /* Top 24bits of guid */
92 const char *guid_vendor_oui; /* OUI name of guid vendor id */
93
94 struct hpsb_host *host; /* Host this node is attached to */
95 nodeid_t nodeid; /* NodeID */
96 struct bus_options busopt; /* Bus Options */
97 int needs_probe;
98 unsigned int generation; /* Synced with hpsb generation */
99
100 /* The following is read from the config rom */
101 u32 vendor_id;
102 struct csr1212_keyval *vendor_name_kv;
103 const char *vendor_oui;
104
105 u32 capabilities;
106 struct hpsb_tlabel_pool *tpool;
107
108 struct device device;
109
110 struct class_device class_dev;
111
112 /* Means this node is not attached anymore */
113 int in_limbo;
114
115 struct csr1212_csr *csr;
116};
117
118struct hpsb_protocol_driver {
119 /* The name of the driver, e.g. SBP2 or IP1394 */
120 const char *name;
121
122 /*
123 * The device id table describing the protocols and/or devices
124 * supported by this driver. This is used by the nodemgr to
125 * decide if a driver could support a given node, but the
126 * probe function below can implement further protocol
127 * dependent or vendor dependent checking.
128 */
129 struct ieee1394_device_id *id_table;
130
131 /*
132 * The update function is called when the node has just
133 * survived a bus reset, i.e. it is still present on the bus.
134 * However, it may be necessary to reestablish the connection
135 * or login into the node again, depending on the protocol. If the
136 * probe fails (returns non-zero), we unbind the driver from this
137 * device.
138 */
139 int (*update)(struct unit_directory *ud);
140
141 /* Our LDM structure */
142 struct device_driver driver;
143};
144
145int hpsb_register_protocol(struct hpsb_protocol_driver *driver);
146void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver);
147
148static inline int hpsb_node_entry_valid(struct node_entry *ne)
149{
150 return ne->generation == get_hpsb_generation(ne->host);
151}
152
153/*
154 * Returns a node entry (which has its reference count incremented) or NULL if
155 * the GUID in question is not known. Getting a valid entry does not mean that
156 * the node with this GUID is currently accessible (might be powered down).
157 */
158struct node_entry *hpsb_guid_get_entry(u64 guid);
159
160/* Same as above, but use the nodeid to get an node entry. This is not
161 * fool-proof by itself, since the nodeid can change. */
162struct node_entry *hpsb_nodeid_get_entry(struct hpsb_host *host, nodeid_t nodeid);
163
164/*
165 * If the entry refers to a local host, this function will return the pointer
166 * to the hpsb_host structure. It will return NULL otherwise. Once you have
167 * established it is a local host, you can use that knowledge from then on (the
168 * GUID won't wander to an external node). */
169struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne);
170
171/*
172 * This will fill in the given, pre-initialised hpsb_packet with the current
173 * information from the node entry (host, node ID, generation number). It will
174 * return false if the node owning the GUID is not accessible (and not modify the
175 * hpsb_packet) and return true otherwise.
176 *
177 * Note that packet sending may still fail in hpsb_send_packet if a bus reset
178 * happens while you are trying to set up the packet (due to obsolete generation
179 * number). It will at least reliably fail so that you don't accidentally and
180 * unknowingly send your packet to the wrong node.
181 */
182void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
183
184int hpsb_node_read(struct node_entry *ne, u64 addr,
185 quadlet_t *buffer, size_t length);
186int hpsb_node_write(struct node_entry *ne, u64 addr,
187 quadlet_t *buffer, size_t length);
188int hpsb_node_lock(struct node_entry *ne, u64 addr,
189 int extcode, quadlet_t *data, quadlet_t arg);
190
191
192/* Iterate the hosts, calling a given function with supplied data for each
193 * host. */
194int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *));
195
196
197int init_ieee1394_nodemgr(void);
198void cleanup_ieee1394_nodemgr(void);
199
200
201/* The template for a host device */
202extern struct device nodemgr_dev_template_host;
203
204/* Bus attributes we export */
205extern struct bus_attribute *const fw_bus_attrs[];
206
207#endif /* _IEEE1394_NODEMGR_H */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
new file mode 100644
index 000000000000..97ff364c0434
--- /dev/null
+++ b/drivers/ieee1394/ohci1394.c
@@ -0,0 +1,3705 @@
1/*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44/*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
85#include <linux/config.h>
86#include <linux/kernel.h>
87#include <linux/list.h>
88#include <linux/slab.h>
89#include <linux/interrupt.h>
90#include <linux/wait.h>
91#include <linux/errno.h>
92#include <linux/module.h>
93#include <linux/moduleparam.h>
94#include <linux/pci.h>
95#include <linux/fs.h>
96#include <linux/poll.h>
97#include <asm/byteorder.h>
98#include <asm/atomic.h>
99#include <asm/uaccess.h>
100#include <linux/delay.h>
101#include <linux/spinlock.h>
102
103#include <asm/pgtable.h>
104#include <asm/page.h>
105#include <asm/irq.h>
106#include <linux/sched.h>
107#include <linux/types.h>
108#include <linux/vmalloc.h>
109#include <linux/init.h>
110
111#ifdef CONFIG_PPC_PMAC
112#include <asm/machdep.h>
113#include <asm/pmac_feature.h>
114#include <asm/prom.h>
115#include <asm/pci-bridge.h>
116#endif
117
118#include "csr1212.h"
119#include "ieee1394.h"
120#include "ieee1394_types.h"
121#include "hosts.h"
122#include "dma.h"
123#include "iso.h"
124#include "ieee1394_core.h"
125#include "highlevel.h"
126#include "ohci1394.h"
127
128#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129#define OHCI1394_DEBUG
130#endif
131
132#ifdef DBGMSG
133#undef DBGMSG
134#endif
135
136#ifdef OHCI1394_DEBUG
137#define DBGMSG(fmt, args...) \
138printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139#else
140#define DBGMSG(fmt, args...)
141#endif
142
143#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144#define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147#define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150static int global_outstanding_dmas = 0;
151#else
152#define OHCI_DMA_ALLOC(fmt, args...)
153#define OHCI_DMA_FREE(fmt, args...)
154#endif
155
156/* print general (card independent) information */
157#define PRINT_G(level, fmt, args...) \
158printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160/* print card specific information */
161#define PRINT(level, fmt, args...) \
162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
164static char version[] __devinitdata =
165 "$Rev: 1250 $ Ben Collins <bcollins@debian.org>";
166
167/* Module Parameters */
168static int phys_dma = 1;
169module_param(phys_dma, int, 0644);
170MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
171
172static void dma_trm_tasklet(unsigned long data);
173static void dma_trm_reset(struct dma_trm_ctx *d);
174
175static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176 enum context_type type, int ctx, int num_desc,
177 int buf_size, int split_buf_size, int context_base);
178static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
180
181static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182 enum context_type type, int ctx, int num_desc,
183 int context_base);
184
185static void ohci1394_pci_remove(struct pci_dev *pdev);
186
187#ifndef __LITTLE_ENDIAN
188static unsigned hdr_sizes[] =
189{
190 3, /* TCODE_WRITEQ */
191 4, /* TCODE_WRITEB */
192 3, /* TCODE_WRITE_RESPONSE */
193 0, /* ??? */
194 3, /* TCODE_READQ */
195 4, /* TCODE_READB */
196 3, /* TCODE_READQ_RESPONSE */
197 4, /* TCODE_READB_RESPONSE */
198 1, /* TCODE_CYCLE_START (???) */
199 4, /* TCODE_LOCK_REQUEST */
200 2, /* TCODE_ISO_DATA */
201 4, /* TCODE_LOCK_RESPONSE */
202};
203
204/* Swap headers */
205static inline void packet_swab(quadlet_t *data, int tcode)
206{
207 size_t size = hdr_sizes[tcode];
208
209 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210 return;
211
212 while (size--)
213 data[size] = swab32(data[size]);
214}
215#else
216/* Don't waste cycles on same sex byte swaps */
217#define packet_swab(w,x)
218#endif /* !LITTLE_ENDIAN */
219
220/***********************************
221 * IEEE-1394 functionality section *
222 ***********************************/
223
224static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
225{
226 int i;
227 unsigned long flags;
228 quadlet_t r;
229
230 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
231
232 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
233
234 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236 break;
237
238 mdelay(1);
239 }
240
241 r = reg_read(ohci, OHCI1394_PhyControl);
242
243 if (i >= OHCI_LOOP_COUNT)
244 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245 r, r & 0x80000000, i);
246
247 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
248
249 return (r & 0x00ff0000) >> 16;
250}
251
252static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
253{
254 int i;
255 unsigned long flags;
256 u32 r = 0;
257
258 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
259
260 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
261
262 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263 r = reg_read(ohci, OHCI1394_PhyControl);
264 if (!(r & 0x00004000))
265 break;
266
267 mdelay(1);
268 }
269
270 if (i == OHCI_LOOP_COUNT)
271 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272 r, r & 0x00004000, i);
273
274 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
275
276 return;
277}
278
279/* Or's our value into the current value */
280static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281{
282 u8 old;
283
284 old = get_phy_reg (ohci, addr);
285 old |= data;
286 set_phy_reg (ohci, addr, old);
287
288 return;
289}
290
291static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292 int phyid, int isroot)
293{
294 quadlet_t *q = ohci->selfid_buf_cpu;
295 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296 size_t size;
297 quadlet_t q0, q1;
298
299 /* Check status of self-id reception */
300
301 if (ohci->selfid_swap)
302 q0 = le32_to_cpu(q[0]);
303 else
304 q0 = q[0];
305
306 if ((self_id_count & 0x80000000) ||
307 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308 PRINT(KERN_ERR,
309 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310 self_id_count, q0, ohci->self_id_errors);
311
312 /* Tip by James Goodwin <jamesg@Filanet.com>:
313 * We had an error, generate another bus reset in response. */
314 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315 set_phy_reg_mask (ohci, 1, 0x40);
316 ohci->self_id_errors++;
317 } else {
318 PRINT(KERN_ERR,
319 "Too many errors on SelfID error reception, giving up!");
320 }
321 return;
322 }
323
324 /* SelfID Ok, reset error counter. */
325 ohci->self_id_errors = 0;
326
327 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328 q++;
329
330 while (size > 0) {
331 if (ohci->selfid_swap) {
332 q0 = le32_to_cpu(q[0]);
333 q1 = le32_to_cpu(q[1]);
334 } else {
335 q0 = q[0];
336 q1 = q[1];
337 }
338
339 if (q0 == ~q1) {
340 DBGMSG ("SelfID packet 0x%x received", q0);
341 hpsb_selfid_received(host, cpu_to_be32(q0));
342 if (((q0 & 0x3f000000) >> 24) == phyid)
343 DBGMSG ("SelfID for this node is 0x%08x", q0);
344 } else {
345 PRINT(KERN_ERR,
346 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
347 }
348 q += 2;
349 size -= 2;
350 }
351
352 DBGMSG("SelfID complete");
353
354 return;
355}
356
357static void ohci_soft_reset(struct ti_ohci *ohci) {
358 int i;
359
360 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
361
362 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364 break;
365 mdelay(1);
366 }
367 DBGMSG ("Soft reset finished");
368}
369
370
371/* Generate the dma receive prgs and start the context */
372static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
373{
374 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375 int i;
376
377 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
378
379 for (i=0; i<d->num_desc; i++) {
380 u32 c;
381
382 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383 if (generate_irq)
384 c |= DMA_CTL_IRQ;
385
386 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
387
388 /* End of descriptor list? */
389 if (i + 1 < d->num_desc) {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392 } else {
393 d->prg_cpu[i]->branchAddress =
394 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
395 }
396
397 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
399 }
400
401 d->buf_ind = 0;
402 d->buf_offset = 0;
403
404 if (d->type == DMA_CTX_ISO) {
405 /* Clear contextControl */
406 reg_write(ohci, d->ctrlClear, 0xffffffff);
407
408 /* Set bufferFill, isochHeader, multichannel for IR context */
409 reg_write(ohci, d->ctrlSet, 0xd0000000);
410
411 /* Set the context match register to match on all tags */
412 reg_write(ohci, d->ctxtMatch, 0xf0000000);
413
414 /* Clear the multi channel mask high and low registers */
415 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
417
418 /* Set up isoRecvIntMask to generate interrupts */
419 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
420 }
421
422 /* Tell the controller where the first AR program is */
423 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424
425 /* Run context */
426 reg_write(ohci, d->ctrlSet, 0x00008000);
427
428 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
429}
430
431/* Initialize the dma transmit context */
432static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
433{
434 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
435
436 /* Stop the context */
437 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438
439 d->prg_ind = 0;
440 d->sent_ind = 0;
441 d->free_prgs = d->num_desc;
442 d->branchAddrPtr = NULL;
443 INIT_LIST_HEAD(&d->fifo_list);
444 INIT_LIST_HEAD(&d->pending_list);
445
446 if (d->type == DMA_CTX_ISO) {
447 /* enable interrupts */
448 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
449 }
450
451 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
452}
453
454/* Count the number of available iso contexts */
455static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
456{
457 int i,ctx=0;
458 u32 tmp;
459
460 reg_write(ohci, reg, 0xffffffff);
461 tmp = reg_read(ohci, reg);
462
463 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
464
465 /* Count the number of contexts */
466 for (i=0; i<32; i++) {
467 if (tmp & 1) ctx++;
468 tmp >>= 1;
469 }
470 return ctx;
471}
472
473/* Global initialization */
474static void ohci_initialize(struct ti_ohci *ohci)
475{
476 char irq_buf[16];
477 quadlet_t buf;
478 int num_ports, i;
479
480 spin_lock_init(&ohci->phy_reg_lock);
481 spin_lock_init(&ohci->event_lock);
482
483 /* Put some defaults to these undefined bus options */
484 buf = reg_read(ohci, OHCI1394_BusOptions);
485 buf |= 0x60000000; /* Enable CMC and ISC */
486 if (!hpsb_disable_irm)
487 buf |= 0x80000000; /* Enable IRMC */
488 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
489 buf &= ~0x18000000; /* Disable PMC and BMC */
490 reg_write(ohci, OHCI1394_BusOptions, buf);
491
492 /* Set the bus number */
493 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
494
495 /* Enable posted writes */
496 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
497
498 /* Clear link control register */
499 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
500
501 /* Enable cycle timer and cycle master and set the IRM
502 * contender bit in our self ID packets if appropriate. */
503 reg_write(ohci, OHCI1394_LinkControlSet,
504 OHCI1394_LinkControl_CycleTimerEnable |
505 OHCI1394_LinkControl_CycleMaster);
506 set_phy_reg_mask(ohci, 4, PHY_04_LCTRL |
507 (hpsb_disable_irm ? 0 : PHY_04_CONTENDER));
508
509 /* Set up self-id dma buffer */
510 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
511
512 /* enable self-id and phys */
513 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
514 OHCI1394_LinkControl_RcvPhyPkt);
515
516 /* Set the Config ROM mapping register */
517 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
518
519 /* Now get our max packet size */
520 ohci->max_packet_size =
521 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
522
523 /* Don't accept phy packets into AR request context */
524 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
525
526 /* Clear the interrupt mask */
527 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
528 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
529
530 /* Clear the interrupt mask */
531 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
532 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
533
534 /* Initialize AR dma */
535 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
536 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
537
538 /* Initialize AT dma */
539 initialize_dma_trm_ctx(&ohci->at_req_context);
540 initialize_dma_trm_ctx(&ohci->at_resp_context);
541
542 /* Initialize IR Legacy DMA */
543 ohci->ir_legacy_channels = 0;
544 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
545 DBGMSG("ISO receive legacy context activated");
546
547 /*
548 * Accept AT requests from all nodes. This probably
549 * will have to be controlled from the subsystem
550 * on a per node basis.
551 */
552 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
553
554 /* Specify AT retries */
555 reg_write(ohci, OHCI1394_ATRetries,
556 OHCI1394_MAX_AT_REQ_RETRIES |
557 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
558 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
559
560 /* We don't want hardware swapping */
561 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
562
563 /* Enable interrupts */
564 reg_write(ohci, OHCI1394_IntMaskSet,
565 OHCI1394_unrecoverableError |
566 OHCI1394_masterIntEnable |
567 OHCI1394_busReset |
568 OHCI1394_selfIDComplete |
569 OHCI1394_RSPkt |
570 OHCI1394_RQPkt |
571 OHCI1394_respTxComplete |
572 OHCI1394_reqTxComplete |
573 OHCI1394_isochRx |
574 OHCI1394_isochTx |
575 OHCI1394_cycleInconsistent);
576
577 /* Enable link */
578 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
579
580 buf = reg_read(ohci, OHCI1394_Version);
581#ifndef __sparc__
582 sprintf (irq_buf, "%d", ohci->dev->irq);
583#else
584 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
585#endif
586 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
587 "MMIO=[%lx-%lx] Max Packet=[%d]",
588 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
589 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
590 pci_resource_start(ohci->dev, 0),
591 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
592 ohci->max_packet_size);
593
594 /* Check all of our ports to make sure that if anything is
595 * connected, we enable that port. */
596 num_ports = get_phy_reg(ohci, 2) & 0xf;
597 for (i = 0; i < num_ports; i++) {
598 unsigned int status;
599
600 set_phy_reg(ohci, 7, i);
601 status = get_phy_reg(ohci, 8);
602
603 if (status & 0x20)
604 set_phy_reg(ohci, 8, status & ~1);
605 }
606
607 /* Serial EEPROM Sanity check. */
608 if ((ohci->max_packet_size < 512) ||
609 (ohci->max_packet_size > 4096)) {
610 /* Serial EEPROM contents are suspect, set a sane max packet
611 * size and print the raw contents for bug reports if verbose
612 * debug is enabled. */
613#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
614 int i;
615#endif
616
617 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
618 "attempting to setting max_packet_size to 512 bytes");
619 reg_write(ohci, OHCI1394_BusOptions,
620 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
621 ohci->max_packet_size = 512;
622#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
623 PRINT(KERN_DEBUG, " EEPROM Present: %d",
624 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
625 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
626
627 for (i = 0;
628 ((i < 1000) &&
629 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
630 udelay(10);
631
632 for (i = 0; i < 0x20; i++) {
633 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
634 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
635 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
636 }
637#endif
638 }
639}
640
641/*
642 * Insert a packet in the DMA fifo and generate the DMA prg
643 * FIXME: rewrite the program in order to accept packets crossing
644 * page boundaries.
645 * check also that a single dma descriptor doesn't cross a
646 * page boundary.
647 */
648static void insert_packet(struct ti_ohci *ohci,
649 struct dma_trm_ctx *d, struct hpsb_packet *packet)
650{
651 u32 cycleTimer;
652 int idx = d->prg_ind;
653
654 DBGMSG("Inserting packet for node " NODE_BUS_FMT
655 ", tlabel=%d, tcode=0x%x, speed=%d",
656 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
657 packet->tcode, packet->speed_code);
658
659 d->prg_cpu[idx]->begin.address = 0;
660 d->prg_cpu[idx]->begin.branchAddress = 0;
661
662 if (d->type == DMA_CTX_ASYNC_RESP) {
663 /*
664 * For response packets, we need to put a timeout value in
665 * the 16 lower bits of the status... let's try 1 sec timeout
666 */
667 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
668 d->prg_cpu[idx]->begin.status = cpu_to_le32(
669 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
670 ((cycleTimer&0x01fff000)>>12));
671
672 DBGMSG("cycleTimer: %08x timeStamp: %08x",
673 cycleTimer, d->prg_cpu[idx]->begin.status);
674 } else
675 d->prg_cpu[idx]->begin.status = 0;
676
677 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
678
679 if (packet->type == hpsb_raw) {
680 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
681 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
682 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
683 } else {
684 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
685 (packet->header[0] & 0xFFFF);
686
687 if (packet->tcode == TCODE_ISO_DATA) {
688 /* Sending an async stream packet */
689 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
690 } else {
691 /* Sending a normal async request or response */
692 d->prg_cpu[idx]->data[1] =
693 (packet->header[1] & 0xFFFF) |
694 (packet->header[0] & 0xFFFF0000);
695 d->prg_cpu[idx]->data[2] = packet->header[2];
696 d->prg_cpu[idx]->data[3] = packet->header[3];
697 }
698 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
699 }
700
701 if (packet->data_size) { /* block transmit */
702 if (packet->tcode == TCODE_STREAM_DATA){
703 d->prg_cpu[idx]->begin.control =
704 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
705 DMA_CTL_IMMEDIATE | 0x8);
706 } else {
707 d->prg_cpu[idx]->begin.control =
708 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
709 DMA_CTL_IMMEDIATE | 0x10);
710 }
711 d->prg_cpu[idx]->end.control =
712 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
713 DMA_CTL_IRQ |
714 DMA_CTL_BRANCH |
715 packet->data_size);
716 /*
717 * Check that the packet data buffer
718 * does not cross a page boundary.
719 *
720 * XXX Fix this some day. eth1394 seems to trigger
721 * it, but ignoring it doesn't seem to cause a
722 * problem.
723 */
724#if 0
725 if (cross_bound((unsigned long)packet->data,
726 packet->data_size)>0) {
727 /* FIXME: do something about it */
728 PRINT(KERN_ERR,
729 "%s: packet data addr: %p size %Zd bytes "
730 "cross page boundary", __FUNCTION__,
731 packet->data, packet->data_size);
732 }
733#endif
734 d->prg_cpu[idx]->end.address = cpu_to_le32(
735 pci_map_single(ohci->dev, packet->data,
736 packet->data_size,
737 PCI_DMA_TODEVICE));
738 OHCI_DMA_ALLOC("single, block transmit packet");
739
740 d->prg_cpu[idx]->end.branchAddress = 0;
741 d->prg_cpu[idx]->end.status = 0;
742 if (d->branchAddrPtr)
743 *(d->branchAddrPtr) =
744 cpu_to_le32(d->prg_bus[idx] | 0x3);
745 d->branchAddrPtr =
746 &(d->prg_cpu[idx]->end.branchAddress);
747 } else { /* quadlet transmit */
748 if (packet->type == hpsb_raw)
749 d->prg_cpu[idx]->begin.control =
750 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
751 DMA_CTL_IMMEDIATE |
752 DMA_CTL_IRQ |
753 DMA_CTL_BRANCH |
754 (packet->header_size + 4));
755 else
756 d->prg_cpu[idx]->begin.control =
757 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
758 DMA_CTL_IMMEDIATE |
759 DMA_CTL_IRQ |
760 DMA_CTL_BRANCH |
761 packet->header_size);
762
763 if (d->branchAddrPtr)
764 *(d->branchAddrPtr) =
765 cpu_to_le32(d->prg_bus[idx] | 0x2);
766 d->branchAddrPtr =
767 &(d->prg_cpu[idx]->begin.branchAddress);
768 }
769
770 } else { /* iso packet */
771 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
772 (packet->header[0] & 0xFFFF);
773 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
774 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
775
776 d->prg_cpu[idx]->begin.control =
777 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
778 DMA_CTL_IMMEDIATE | 0x8);
779 d->prg_cpu[idx]->end.control =
780 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
781 DMA_CTL_UPDATE |
782 DMA_CTL_IRQ |
783 DMA_CTL_BRANCH |
784 packet->data_size);
785 d->prg_cpu[idx]->end.address = cpu_to_le32(
786 pci_map_single(ohci->dev, packet->data,
787 packet->data_size, PCI_DMA_TODEVICE));
788 OHCI_DMA_ALLOC("single, iso transmit packet");
789
790 d->prg_cpu[idx]->end.branchAddress = 0;
791 d->prg_cpu[idx]->end.status = 0;
792 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
793 " begin=%08x %08x %08x %08x\n"
794 " %08x %08x %08x %08x\n"
795 " end =%08x %08x %08x %08x",
796 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
797 d->prg_cpu[idx]->begin.control,
798 d->prg_cpu[idx]->begin.address,
799 d->prg_cpu[idx]->begin.branchAddress,
800 d->prg_cpu[idx]->begin.status,
801 d->prg_cpu[idx]->data[0],
802 d->prg_cpu[idx]->data[1],
803 d->prg_cpu[idx]->data[2],
804 d->prg_cpu[idx]->data[3],
805 d->prg_cpu[idx]->end.control,
806 d->prg_cpu[idx]->end.address,
807 d->prg_cpu[idx]->end.branchAddress,
808 d->prg_cpu[idx]->end.status);
809 if (d->branchAddrPtr)
810 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
811 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
812 }
813 d->free_prgs--;
814
815 /* queue the packet in the appropriate context queue */
816 list_add_tail(&packet->driver_list, &d->fifo_list);
817 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
818}
819
820/*
821 * This function fills the FIFO with the (eventual) pending packets
822 * and runs or wakes up the DMA prg if necessary.
823 *
824 * The function MUST be called with the d->lock held.
825 */
826static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
827{
828 struct hpsb_packet *packet, *ptmp;
829 int idx = d->prg_ind;
830 int z = 0;
831
832 /* insert the packets into the dma fifo */
833 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
834 if (!d->free_prgs)
835 break;
836
837 /* For the first packet only */
838 if (!z)
839 z = (packet->data_size) ? 3 : 2;
840
841 /* Insert the packet */
842 list_del_init(&packet->driver_list);
843 insert_packet(ohci, d, packet);
844 }
845
846 /* Nothing must have been done, either no free_prgs or no packets */
847 if (z == 0)
848 return;
849
850 /* Is the context running ? (should be unless it is
851 the first packet to be sent in this context) */
852 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
853 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
854
855 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
856 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
857
858 /* Check that the node id is valid, and not 63 */
859 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
860 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
861 else
862 reg_write(ohci, d->ctrlSet, 0x8000);
863 } else {
864 /* Wake up the dma context if necessary */
865 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
866 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
867
868 /* do this always, to avoid race condition */
869 reg_write(ohci, d->ctrlSet, 0x1000);
870 }
871
872 return;
873}
874
875/* Transmission of an async or iso packet */
876static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
877{
878 struct ti_ohci *ohci = host->hostdata;
879 struct dma_trm_ctx *d;
880 unsigned long flags;
881
882 if (packet->data_size > ohci->max_packet_size) {
883 PRINT(KERN_ERR,
884 "Transmit packet size %Zd is too big",
885 packet->data_size);
886 return -EOVERFLOW;
887 }
888
889 /* Decide whether we have an iso, a request, or a response packet */
890 if (packet->type == hpsb_raw)
891 d = &ohci->at_req_context;
892 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
893 /* The legacy IT DMA context is initialized on first
894 * use. However, the alloc cannot be run from
895 * interrupt context, so we bail out if that is the
896 * case. I don't see anyone sending ISO packets from
897 * interrupt context anyway... */
898
899 if (ohci->it_legacy_context.ohci == NULL) {
900 if (in_interrupt()) {
901 PRINT(KERN_ERR,
902 "legacy IT context cannot be initialized during interrupt");
903 return -EINVAL;
904 }
905
906 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
907 DMA_CTX_ISO, 0, IT_NUM_DESC,
908 OHCI1394_IsoXmitContextBase) < 0) {
909 PRINT(KERN_ERR,
910 "error initializing legacy IT context");
911 return -ENOMEM;
912 }
913
914 initialize_dma_trm_ctx(&ohci->it_legacy_context);
915 }
916
917 d = &ohci->it_legacy_context;
918 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
919 d = &ohci->at_resp_context;
920 else
921 d = &ohci->at_req_context;
922
923 spin_lock_irqsave(&d->lock,flags);
924
925 list_add_tail(&packet->driver_list, &d->pending_list);
926
927 dma_trm_flush(ohci, d);
928
929 spin_unlock_irqrestore(&d->lock,flags);
930
931 return 0;
932}
933
934static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
935{
936 struct ti_ohci *ohci = host->hostdata;
937 int retval = 0;
938 unsigned long flags;
939 int phy_reg;
940
941 switch (cmd) {
942 case RESET_BUS:
943 switch (arg) {
944 case SHORT_RESET:
945 phy_reg = get_phy_reg(ohci, 5);
946 phy_reg |= 0x40;
947 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
948 break;
949 case LONG_RESET:
950 phy_reg = get_phy_reg(ohci, 1);
951 phy_reg |= 0x40;
952 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
953 break;
954 case SHORT_RESET_NO_FORCE_ROOT:
955 phy_reg = get_phy_reg(ohci, 1);
956 if (phy_reg & 0x80) {
957 phy_reg &= ~0x80;
958 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
959 }
960
961 phy_reg = get_phy_reg(ohci, 5);
962 phy_reg |= 0x40;
963 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
964 break;
965 case LONG_RESET_NO_FORCE_ROOT:
966 phy_reg = get_phy_reg(ohci, 1);
967 phy_reg &= ~0x80;
968 phy_reg |= 0x40;
969 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
970 break;
971 case SHORT_RESET_FORCE_ROOT:
972 phy_reg = get_phy_reg(ohci, 1);
973 if (!(phy_reg & 0x80)) {
974 phy_reg |= 0x80;
975 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
976 }
977
978 phy_reg = get_phy_reg(ohci, 5);
979 phy_reg |= 0x40;
980 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
981 break;
982 case LONG_RESET_FORCE_ROOT:
983 phy_reg = get_phy_reg(ohci, 1);
984 phy_reg |= 0xc0;
985 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
986 break;
987 default:
988 retval = -1;
989 }
990 break;
991
992 case GET_CYCLE_COUNTER:
993 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
994 break;
995
996 case SET_CYCLE_COUNTER:
997 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
998 break;
999
1000 case SET_BUS_ID:
1001 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1002 break;
1003
1004 case ACT_CYCLE_MASTER:
1005 if (arg) {
1006 /* check if we are root and other nodes are present */
1007 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1008 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1009 /*
1010 * enable cycleTimer, cycleMaster
1011 */
1012 DBGMSG("Cycle master enabled");
1013 reg_write(ohci, OHCI1394_LinkControlSet,
1014 OHCI1394_LinkControl_CycleTimerEnable |
1015 OHCI1394_LinkControl_CycleMaster);
1016 }
1017 } else {
1018 /* disable cycleTimer, cycleMaster, cycleSource */
1019 reg_write(ohci, OHCI1394_LinkControlClear,
1020 OHCI1394_LinkControl_CycleTimerEnable |
1021 OHCI1394_LinkControl_CycleMaster |
1022 OHCI1394_LinkControl_CycleSource);
1023 }
1024 break;
1025
1026 case CANCEL_REQUESTS:
1027 DBGMSG("Cancel request received");
1028 dma_trm_reset(&ohci->at_req_context);
1029 dma_trm_reset(&ohci->at_resp_context);
1030 break;
1031
1032 case ISO_LISTEN_CHANNEL:
1033 {
1034 u64 mask;
1035
1036 if (arg<0 || arg>63) {
1037 PRINT(KERN_ERR,
1038 "%s: IS0 listen channel %d is out of range",
1039 __FUNCTION__, arg);
1040 return -EFAULT;
1041 }
1042
1043 mask = (u64)0x1<<arg;
1044
1045 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1046
1047 if (ohci->ISO_channel_usage & mask) {
1048 PRINT(KERN_ERR,
1049 "%s: IS0 listen channel %d is already used",
1050 __FUNCTION__, arg);
1051 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1052 return -EFAULT;
1053 }
1054
1055 ohci->ISO_channel_usage |= mask;
1056 ohci->ir_legacy_channels |= mask;
1057
1058 if (arg>31)
1059 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1060 1<<(arg-32));
1061 else
1062 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1063 1<<arg);
1064
1065 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1066 DBGMSG("Listening enabled on channel %d", arg);
1067 break;
1068 }
1069 case ISO_UNLISTEN_CHANNEL:
1070 {
1071 u64 mask;
1072
1073 if (arg<0 || arg>63) {
1074 PRINT(KERN_ERR,
1075 "%s: IS0 unlisten channel %d is out of range",
1076 __FUNCTION__, arg);
1077 return -EFAULT;
1078 }
1079
1080 mask = (u64)0x1<<arg;
1081
1082 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1083
1084 if (!(ohci->ISO_channel_usage & mask)) {
1085 PRINT(KERN_ERR,
1086 "%s: IS0 unlisten channel %d is not used",
1087 __FUNCTION__, arg);
1088 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1089 return -EFAULT;
1090 }
1091
1092 ohci->ISO_channel_usage &= ~mask;
1093 ohci->ir_legacy_channels &= ~mask;
1094
1095 if (arg>31)
1096 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1097 1<<(arg-32));
1098 else
1099 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1100 1<<arg);
1101
1102 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1103 DBGMSG("Listening disabled on channel %d", arg);
1104 break;
1105 }
1106 default:
1107 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1108 cmd);
1109 break;
1110 }
1111 return retval;
1112}
1113
1114/***********************************
1115 * rawiso ISO reception *
1116 ***********************************/
1117
1118/*
1119 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1120 buffer is split into "blocks" (regions described by one DMA
1121 descriptor). Each block must be one page or less in size, and
1122 must not cross a page boundary.
1123
1124 There is one little wrinkle with buffer-fill mode: a packet that
1125 starts in the final block may wrap around into the first block. But
1126 the user API expects all packets to be contiguous. Our solution is
1127 to keep the very last page of the DMA buffer in reserve - if a
1128 packet spans the gap, we copy its tail into this page.
1129*/
1130
1131struct ohci_iso_recv {
1132 struct ti_ohci *ohci;
1133
1134 struct ohci1394_iso_tasklet task;
1135 int task_active;
1136
1137 enum { BUFFER_FILL_MODE = 0,
1138 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1139
1140 /* memory and PCI mapping for the DMA descriptors */
1141 struct dma_prog_region prog;
1142 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1143
1144 /* how many DMA blocks fit in the buffer */
1145 unsigned int nblocks;
1146
1147 /* stride of DMA blocks */
1148 unsigned int buf_stride;
1149
1150 /* number of blocks to batch between interrupts */
1151 int block_irq_interval;
1152
1153 /* block that DMA will finish next */
1154 int block_dma;
1155
1156 /* (buffer-fill only) block that the reader will release next */
1157 int block_reader;
1158
1159 /* (buffer-fill only) bytes of buffer the reader has released,
1160 less than one block */
1161 int released_bytes;
1162
1163 /* (buffer-fill only) buffer offset at which the next packet will appear */
1164 int dma_offset;
1165
1166 /* OHCI DMA context control registers */
1167 u32 ContextControlSet;
1168 u32 ContextControlClear;
1169 u32 CommandPtr;
1170 u32 ContextMatch;
1171};
1172
1173static void ohci_iso_recv_task(unsigned long data);
1174static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1175static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1176static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1177static void ohci_iso_recv_program(struct hpsb_iso *iso);
1178
1179static int ohci_iso_recv_init(struct hpsb_iso *iso)
1180{
1181 struct ti_ohci *ohci = iso->host->hostdata;
1182 struct ohci_iso_recv *recv;
1183 int ctx;
1184 int ret = -ENOMEM;
1185
1186 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1187 if (!recv)
1188 return -ENOMEM;
1189
1190 iso->hostdata = recv;
1191 recv->ohci = ohci;
1192 recv->task_active = 0;
1193 dma_prog_region_init(&recv->prog);
1194 recv->block = NULL;
1195
1196 /* use buffer-fill mode, unless irq_interval is 1
1197 (note: multichannel requires buffer-fill) */
1198
1199 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1200 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1201 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1202 } else {
1203 recv->dma_mode = BUFFER_FILL_MODE;
1204 }
1205
1206 /* set nblocks, buf_stride, block_irq_interval */
1207
1208 if (recv->dma_mode == BUFFER_FILL_MODE) {
1209 recv->buf_stride = PAGE_SIZE;
1210
1211 /* one block per page of data in the DMA buffer, minus the final guard page */
1212 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1213 if (recv->nblocks < 3) {
1214 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1215 goto err;
1216 }
1217
1218 /* iso->irq_interval is in packets - translate that to blocks */
1219 if (iso->irq_interval == 1)
1220 recv->block_irq_interval = 1;
1221 else
1222 recv->block_irq_interval = iso->irq_interval *
1223 ((recv->nblocks+1)/iso->buf_packets);
1224 if (recv->block_irq_interval*4 > recv->nblocks)
1225 recv->block_irq_interval = recv->nblocks/4;
1226 if (recv->block_irq_interval < 1)
1227 recv->block_irq_interval = 1;
1228
1229 } else {
1230 int max_packet_size;
1231
1232 recv->nblocks = iso->buf_packets;
1233 recv->block_irq_interval = iso->irq_interval;
1234 if (recv->block_irq_interval * 4 > iso->buf_packets)
1235 recv->block_irq_interval = iso->buf_packets / 4;
1236 if (recv->block_irq_interval < 1)
1237 recv->block_irq_interval = 1;
1238
1239 /* choose a buffer stride */
1240 /* must be a power of 2, and <= PAGE_SIZE */
1241
1242 max_packet_size = iso->buf_size / iso->buf_packets;
1243
1244 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1245 recv->buf_stride *= 2);
1246
1247 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1248 recv->buf_stride > PAGE_SIZE) {
1249 /* this shouldn't happen, but anyway... */
1250 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1251 goto err;
1252 }
1253 }
1254
1255 recv->block_reader = 0;
1256 recv->released_bytes = 0;
1257 recv->block_dma = 0;
1258 recv->dma_offset = 0;
1259
1260 /* size of DMA program = one descriptor per block */
1261 if (dma_prog_region_alloc(&recv->prog,
1262 sizeof(struct dma_cmd) * recv->nblocks,
1263 recv->ohci->dev))
1264 goto err;
1265
1266 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1267
1268 ohci1394_init_iso_tasklet(&recv->task,
1269 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1270 OHCI_ISO_RECEIVE,
1271 ohci_iso_recv_task, (unsigned long) iso);
1272
1273 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1274 goto err;
1275
1276 recv->task_active = 1;
1277
1278 /* recv context registers are spaced 32 bytes apart */
1279 ctx = recv->task.context;
1280 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1281 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1282 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1283 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1284
1285 if (iso->channel == -1) {
1286 /* clear multi-channel selection mask */
1287 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1288 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1289 }
1290
1291 /* write the DMA program */
1292 ohci_iso_recv_program(iso);
1293
1294 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1295 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1296 recv->dma_mode == BUFFER_FILL_MODE ?
1297 "buffer-fill" : "packet-per-buffer",
1298 iso->buf_size/PAGE_SIZE, iso->buf_size,
1299 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1300
1301 return 0;
1302
1303err:
1304 ohci_iso_recv_shutdown(iso);
1305 return ret;
1306}
1307
1308static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1309{
1310 struct ohci_iso_recv *recv = iso->hostdata;
1311
1312 /* disable interrupts */
1313 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1314
1315 /* halt DMA */
1316 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1317}
1318
1319static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1320{
1321 struct ohci_iso_recv *recv = iso->hostdata;
1322
1323 if (recv->task_active) {
1324 ohci_iso_recv_stop(iso);
1325 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1326 recv->task_active = 0;
1327 }
1328
1329 dma_prog_region_free(&recv->prog);
1330 kfree(recv);
1331 iso->hostdata = NULL;
1332}
1333
1334/* set up a "gapped" ring buffer DMA program */
1335static void ohci_iso_recv_program(struct hpsb_iso *iso)
1336{
1337 struct ohci_iso_recv *recv = iso->hostdata;
1338 int blk;
1339
1340 /* address of 'branch' field in previous DMA descriptor */
1341 u32 *prev_branch = NULL;
1342
1343 for (blk = 0; blk < recv->nblocks; blk++) {
1344 u32 control;
1345
1346 /* the DMA descriptor */
1347 struct dma_cmd *cmd = &recv->block[blk];
1348
1349 /* offset of the DMA descriptor relative to the DMA prog buffer */
1350 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1351
1352 /* offset of this packet's data within the DMA buffer */
1353 unsigned long buf_offset = blk * recv->buf_stride;
1354
1355 if (recv->dma_mode == BUFFER_FILL_MODE) {
1356 control = 2 << 28; /* INPUT_MORE */
1357 } else {
1358 control = 3 << 28; /* INPUT_LAST */
1359 }
1360
1361 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1362
1363 /* interrupt on last block, and at intervals */
1364 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1365 control |= 3 << 20; /* want interrupt */
1366 }
1367
1368 control |= 3 << 18; /* enable branch to address */
1369 control |= recv->buf_stride;
1370
1371 cmd->control = cpu_to_le32(control);
1372 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1373 cmd->branchAddress = 0; /* filled in on next loop */
1374 cmd->status = cpu_to_le32(recv->buf_stride);
1375
1376 /* link the previous descriptor to this one */
1377 if (prev_branch) {
1378 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1379 }
1380
1381 prev_branch = &cmd->branchAddress;
1382 }
1383
1384 /* the final descriptor's branch address and Z should be left at 0 */
1385}
1386
1387/* listen or unlisten to a specific channel (multi-channel mode only) */
1388static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1389{
1390 struct ohci_iso_recv *recv = iso->hostdata;
1391 int reg, i;
1392
1393 if (channel < 32) {
1394 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1395 i = channel;
1396 } else {
1397 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1398 i = channel - 32;
1399 }
1400
1401 reg_write(recv->ohci, reg, (1 << i));
1402
1403 /* issue a dummy read to force all PCI writes to be posted immediately */
1404 mb();
1405 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1406}
1407
1408static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1409{
1410 struct ohci_iso_recv *recv = iso->hostdata;
1411 int i;
1412
1413 for (i = 0; i < 64; i++) {
1414 if (mask & (1ULL << i)) {
1415 if (i < 32)
1416 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1417 else
1418 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1419 } else {
1420 if (i < 32)
1421 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1422 else
1423 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1424 }
1425 }
1426
1427 /* issue a dummy read to force all PCI writes to be posted immediately */
1428 mb();
1429 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1430}
1431
1432static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1433{
1434 struct ohci_iso_recv *recv = iso->hostdata;
1435 struct ti_ohci *ohci = recv->ohci;
1436 u32 command, contextMatch;
1437
1438 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1439 wmb();
1440
1441 /* always keep ISO headers */
1442 command = (1 << 30);
1443
1444 if (recv->dma_mode == BUFFER_FILL_MODE)
1445 command |= (1 << 31);
1446
1447 reg_write(recv->ohci, recv->ContextControlSet, command);
1448
1449 /* match on specified tags */
1450 contextMatch = tag_mask << 28;
1451
1452 if (iso->channel == -1) {
1453 /* enable multichannel reception */
1454 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1455 } else {
1456 /* listen on channel */
1457 contextMatch |= iso->channel;
1458 }
1459
1460 if (cycle != -1) {
1461 u32 seconds;
1462
1463 /* enable cycleMatch */
1464 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1465
1466 /* set starting cycle */
1467 cycle &= 0x1FFF;
1468
1469 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1470 just snarf them from the current time */
1471 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1472
1473 /* advance one second to give some extra time for DMA to start */
1474 seconds += 1;
1475
1476 cycle |= (seconds & 3) << 13;
1477
1478 contextMatch |= cycle << 12;
1479 }
1480
1481 if (sync != -1) {
1482 /* set sync flag on first DMA descriptor */
1483 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1484 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1485
1486 /* match sync field */
1487 contextMatch |= (sync&0xf)<<8;
1488 }
1489
1490 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1491
1492 /* address of first descriptor block */
1493 command = dma_prog_region_offset_to_bus(&recv->prog,
1494 recv->block_dma * sizeof(struct dma_cmd));
1495 command |= 1; /* Z=1 */
1496
1497 reg_write(recv->ohci, recv->CommandPtr, command);
1498
1499 /* enable interrupts */
1500 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1501
1502 wmb();
1503
1504 /* run */
1505 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1506
1507 /* issue a dummy read of the cycle timer register to force
1508 all PCI writes to be posted immediately */
1509 mb();
1510 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1511
1512 /* check RUN */
1513 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1514 PRINT(KERN_ERR,
1515 "Error starting IR DMA (ContextControl 0x%08x)\n",
1516 reg_read(recv->ohci, recv->ContextControlSet));
1517 return -1;
1518 }
1519
1520 return 0;
1521}
1522
1523static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1524{
1525 /* re-use the DMA descriptor for the block */
1526 /* by linking the previous descriptor to it */
1527
1528 int next_i = block;
1529 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1530
1531 struct dma_cmd *next = &recv->block[next_i];
1532 struct dma_cmd *prev = &recv->block[prev_i];
1533
1534 /* 'next' becomes the new end of the DMA chain,
1535 so disable branch and enable interrupt */
1536 next->branchAddress = 0;
1537 next->control |= cpu_to_le32(3 << 20);
1538 next->status = cpu_to_le32(recv->buf_stride);
1539
1540 /* link prev to next */
1541 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1542 sizeof(struct dma_cmd) * next_i)
1543 | 1); /* Z=1 */
1544
1545 /* disable interrupt on previous DMA descriptor, except at intervals */
1546 if ((prev_i % recv->block_irq_interval) == 0) {
1547 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1548 } else {
1549 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1550 }
1551 wmb();
1552
1553 /* wake up DMA in case it fell asleep */
1554 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1555}
1556
1557static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1558 struct hpsb_iso_packet_info *info)
1559{
1560 int len;
1561
1562 /* release the memory where the packet was */
1563 len = info->len;
1564
1565 /* add the wasted space for padding to 4 bytes */
1566 if (len % 4)
1567 len += 4 - (len % 4);
1568
1569 /* add 8 bytes for the OHCI DMA data format overhead */
1570 len += 8;
1571
1572 recv->released_bytes += len;
1573
1574 /* have we released enough memory for one block? */
1575 while (recv->released_bytes > recv->buf_stride) {
1576 ohci_iso_recv_release_block(recv, recv->block_reader);
1577 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1578 recv->released_bytes -= recv->buf_stride;
1579 }
1580}
1581
1582static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1583{
1584 struct ohci_iso_recv *recv = iso->hostdata;
1585 if (recv->dma_mode == BUFFER_FILL_MODE) {
1586 ohci_iso_recv_bufferfill_release(recv, info);
1587 } else {
1588 ohci_iso_recv_release_block(recv, info - iso->infos);
1589 }
1590}
1591
1592/* parse all packets from blocks that have been fully received */
1593static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1594{
1595 int wake = 0;
1596 int runaway = 0;
1597 struct ti_ohci *ohci = recv->ohci;
1598
1599 while (1) {
1600 /* we expect the next parsable packet to begin at recv->dma_offset */
1601 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1602
1603 unsigned int offset;
1604 unsigned short len, cycle;
1605 unsigned char channel, tag, sy;
1606
1607 unsigned char *p = iso->data_buf.kvirt;
1608
1609 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1610
1611 /* don't loop indefinitely */
1612 if (runaway++ > 100000) {
1613 atomic_inc(&iso->overflows);
1614 PRINT(KERN_ERR,
1615 "IR DMA error - Runaway during buffer parsing!\n");
1616 break;
1617 }
1618
1619 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1620 if (this_block == recv->block_dma)
1621 break;
1622
1623 wake = 1;
1624
1625 /* parse data length, tag, channel, and sy */
1626
1627 /* note: we keep our own local copies of 'len' and 'offset'
1628 so the user can't mess with them by poking in the mmap area */
1629
1630 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1631
1632 if (len > 4096) {
1633 PRINT(KERN_ERR,
1634 "IR DMA error - bogus 'len' value %u\n", len);
1635 }
1636
1637 channel = p[recv->dma_offset+1] & 0x3F;
1638 tag = p[recv->dma_offset+1] >> 6;
1639 sy = p[recv->dma_offset+0] & 0xF;
1640
1641 /* advance to data payload */
1642 recv->dma_offset += 4;
1643
1644 /* check for wrap-around */
1645 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1646 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1647 }
1648
1649 /* dma_offset now points to the first byte of the data payload */
1650 offset = recv->dma_offset;
1651
1652 /* advance to xferStatus/timeStamp */
1653 recv->dma_offset += len;
1654
1655 /* payload is padded to 4 bytes */
1656 if (len % 4) {
1657 recv->dma_offset += 4 - (len%4);
1658 }
1659
1660 /* check for wrap-around */
1661 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1662 /* uh oh, the packet data wraps from the last
1663 to the first DMA block - make the packet
1664 contiguous by copying its "tail" into the
1665 guard page */
1666
1667 int guard_off = recv->buf_stride*recv->nblocks;
1668 int tail_len = len - (guard_off - offset);
1669
1670 if (tail_len > 0 && tail_len < recv->buf_stride) {
1671 memcpy(iso->data_buf.kvirt + guard_off,
1672 iso->data_buf.kvirt,
1673 tail_len);
1674 }
1675
1676 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1677 }
1678
1679 /* parse timestamp */
1680 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1681 cycle &= 0x1FFF;
1682
1683 /* advance to next packet */
1684 recv->dma_offset += 4;
1685
1686 /* check for wrap-around */
1687 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1688 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1689 }
1690
1691 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1692 }
1693
1694 if (wake)
1695 hpsb_iso_wake(iso);
1696}
1697
1698static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1699{
1700 int loop;
1701 struct ti_ohci *ohci = recv->ohci;
1702
1703 /* loop over all blocks */
1704 for (loop = 0; loop < recv->nblocks; loop++) {
1705
1706 /* check block_dma to see if it's done */
1707 struct dma_cmd *im = &recv->block[recv->block_dma];
1708
1709 /* check the DMA descriptor for new writes to xferStatus */
1710 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1711
1712 /* rescount is the number of bytes *remaining to be written* in the block */
1713 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1714
1715 unsigned char event = xferstatus & 0x1F;
1716
1717 if (!event) {
1718 /* nothing has happened to this block yet */
1719 break;
1720 }
1721
1722 if (event != 0x11) {
1723 atomic_inc(&iso->overflows);
1724 PRINT(KERN_ERR,
1725 "IR DMA error - OHCI error code 0x%02x\n", event);
1726 }
1727
1728 if (rescount != 0) {
1729 /* the card is still writing to this block;
1730 we can't touch it until it's done */
1731 break;
1732 }
1733
1734 /* OK, the block is finished... */
1735
1736 /* sync our view of the block */
1737 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1738
1739 /* reset the DMA descriptor */
1740 im->status = recv->buf_stride;
1741
1742 /* advance block_dma */
1743 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1744
1745 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1746 atomic_inc(&iso->overflows);
1747 DBGMSG("ISO reception overflow - "
1748 "ran out of DMA blocks");
1749 }
1750 }
1751
1752 /* parse any packets that have arrived */
1753 ohci_iso_recv_bufferfill_parse(iso, recv);
1754}
1755
1756static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1757{
1758 int count;
1759 int wake = 0;
1760 struct ti_ohci *ohci = recv->ohci;
1761
1762 /* loop over the entire buffer */
1763 for (count = 0; count < recv->nblocks; count++) {
1764 u32 packet_len = 0;
1765
1766 /* pointer to the DMA descriptor */
1767 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1768
1769 /* check the DMA descriptor for new writes to xferStatus */
1770 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1771 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1772
1773 unsigned char event = xferstatus & 0x1F;
1774
1775 if (!event) {
1776 /* this packet hasn't come in yet; we are done for now */
1777 goto out;
1778 }
1779
1780 if (event == 0x11) {
1781 /* packet received successfully! */
1782
1783 /* rescount is the number of bytes *remaining* in the packet buffer,
1784 after the packet was written */
1785 packet_len = recv->buf_stride - rescount;
1786
1787 } else if (event == 0x02) {
1788 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1789 } else if (event) {
1790 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1791 }
1792
1793 /* sync our view of the buffer */
1794 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1795
1796 /* record the per-packet info */
1797 {
1798 /* iso header is 8 bytes ahead of the data payload */
1799 unsigned char *hdr;
1800
1801 unsigned int offset;
1802 unsigned short cycle;
1803 unsigned char channel, tag, sy;
1804
1805 offset = iso->pkt_dma * recv->buf_stride;
1806 hdr = iso->data_buf.kvirt + offset;
1807
1808 /* skip iso header */
1809 offset += 8;
1810 packet_len -= 8;
1811
1812 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1813 channel = hdr[5] & 0x3F;
1814 tag = hdr[5] >> 6;
1815 sy = hdr[4] & 0xF;
1816
1817 hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1818 }
1819
1820 /* reset the DMA descriptor */
1821 il->status = recv->buf_stride;
1822
1823 wake = 1;
1824 recv->block_dma = iso->pkt_dma;
1825 }
1826
1827out:
1828 if (wake)
1829 hpsb_iso_wake(iso);
1830}
1831
1832static void ohci_iso_recv_task(unsigned long data)
1833{
1834 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1835 struct ohci_iso_recv *recv = iso->hostdata;
1836
1837 if (recv->dma_mode == BUFFER_FILL_MODE)
1838 ohci_iso_recv_bufferfill_task(iso, recv);
1839 else
1840 ohci_iso_recv_packetperbuf_task(iso, recv);
1841}
1842
1843/***********************************
1844 * rawiso ISO transmission *
1845 ***********************************/
1846
1847struct ohci_iso_xmit {
1848 struct ti_ohci *ohci;
1849 struct dma_prog_region prog;
1850 struct ohci1394_iso_tasklet task;
1851 int task_active;
1852
1853 u32 ContextControlSet;
1854 u32 ContextControlClear;
1855 u32 CommandPtr;
1856};
1857
1858/* transmission DMA program:
1859 one OUTPUT_MORE_IMMEDIATE for the IT header
1860 one OUTPUT_LAST for the buffer data */
1861
1862struct iso_xmit_cmd {
1863 struct dma_cmd output_more_immediate;
1864 u8 iso_hdr[8];
1865 u32 unused[2];
1866 struct dma_cmd output_last;
1867};
1868
1869static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1870static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1871static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1872static void ohci_iso_xmit_task(unsigned long data);
1873
1874static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1875{
1876 struct ohci_iso_xmit *xmit;
1877 unsigned int prog_size;
1878 int ctx;
1879 int ret = -ENOMEM;
1880
1881 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1882 if (!xmit)
1883 return -ENOMEM;
1884
1885 iso->hostdata = xmit;
1886 xmit->ohci = iso->host->hostdata;
1887 xmit->task_active = 0;
1888
1889 dma_prog_region_init(&xmit->prog);
1890
1891 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1892
1893 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1894 goto err;
1895
1896 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1897 ohci_iso_xmit_task, (unsigned long) iso);
1898
1899 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
1900 goto err;
1901
1902 xmit->task_active = 1;
1903
1904 /* xmit context registers are spaced 16 bytes apart */
1905 ctx = xmit->task.context;
1906 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1907 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1908 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1909
1910 return 0;
1911
1912err:
1913 ohci_iso_xmit_shutdown(iso);
1914 return ret;
1915}
1916
1917static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1918{
1919 struct ohci_iso_xmit *xmit = iso->hostdata;
1920 struct ti_ohci *ohci = xmit->ohci;
1921
1922 /* disable interrupts */
1923 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1924
1925 /* halt DMA */
1926 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1927 /* XXX the DMA context will lock up if you try to send too much data! */
1928 PRINT(KERN_ERR,
1929 "you probably exceeded the OHCI card's bandwidth limit - "
1930 "reload the module and reduce xmit bandwidth");
1931 }
1932}
1933
1934static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1935{
1936 struct ohci_iso_xmit *xmit = iso->hostdata;
1937
1938 if (xmit->task_active) {
1939 ohci_iso_xmit_stop(iso);
1940 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1941 xmit->task_active = 0;
1942 }
1943
1944 dma_prog_region_free(&xmit->prog);
1945 kfree(xmit);
1946 iso->hostdata = NULL;
1947}
1948
1949static void ohci_iso_xmit_task(unsigned long data)
1950{
1951 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1952 struct ohci_iso_xmit *xmit = iso->hostdata;
1953 struct ti_ohci *ohci = xmit->ohci;
1954 int wake = 0;
1955 int count;
1956
1957 /* check the whole buffer if necessary, starting at pkt_dma */
1958 for (count = 0; count < iso->buf_packets; count++) {
1959 int cycle;
1960
1961 /* DMA descriptor */
1962 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1963
1964 /* check for new writes to xferStatus */
1965 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1966 u8 event = xferstatus & 0x1F;
1967
1968 if (!event) {
1969 /* packet hasn't been sent yet; we are done for now */
1970 break;
1971 }
1972
1973 if (event != 0x11)
1974 PRINT(KERN_ERR,
1975 "IT DMA error - OHCI error code 0x%02x\n", event);
1976
1977 /* at least one packet went out, so wake up the writer */
1978 wake = 1;
1979
1980 /* parse cycle */
1981 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1982
1983 /* tell the subsystem the packet has gone out */
1984 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1985
1986 /* reset the DMA descriptor for next time */
1987 cmd->output_last.status = 0;
1988 }
1989
1990 if (wake)
1991 hpsb_iso_wake(iso);
1992}
1993
1994static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1995{
1996 struct ohci_iso_xmit *xmit = iso->hostdata;
1997 struct ti_ohci *ohci = xmit->ohci;
1998
1999 int next_i, prev_i;
2000 struct iso_xmit_cmd *next, *prev;
2001
2002 unsigned int offset;
2003 unsigned short len;
2004 unsigned char tag, sy;
2005
2006 /* check that the packet doesn't cross a page boundary
2007 (we could allow this if we added OUTPUT_MORE descriptor support) */
2008 if (cross_bound(info->offset, info->len)) {
2009 PRINT(KERN_ERR,
2010 "rawiso xmit: packet %u crosses a page boundary",
2011 iso->first_packet);
2012 return -EINVAL;
2013 }
2014
2015 offset = info->offset;
2016 len = info->len;
2017 tag = info->tag;
2018 sy = info->sy;
2019
2020 /* sync up the card's view of the buffer */
2021 dma_region_sync_for_device(&iso->data_buf, offset, len);
2022
2023 /* append first_packet to the DMA chain */
2024 /* by linking the previous descriptor to it */
2025 /* (next will become the new end of the DMA chain) */
2026
2027 next_i = iso->first_packet;
2028 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2029
2030 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2031 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2032
2033 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2034 memset(next, 0, sizeof(struct iso_xmit_cmd));
2035 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2036
2037 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2038
2039 /* tcode = 0xA, and sy */
2040 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2041
2042 /* tag and channel number */
2043 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2044
2045 /* transmission speed */
2046 next->iso_hdr[2] = iso->speed & 0x7;
2047
2048 /* payload size */
2049 next->iso_hdr[6] = len & 0xFF;
2050 next->iso_hdr[7] = len >> 8;
2051
2052 /* set up the OUTPUT_LAST */
2053 next->output_last.control = cpu_to_le32(1 << 28);
2054 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2055 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2056 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2057 next->output_last.control |= cpu_to_le32(len);
2058
2059 /* payload bus address */
2060 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2061
2062 /* leave branchAddress at zero for now */
2063
2064 /* re-write the previous DMA descriptor to chain to this one */
2065
2066 /* set prev branch address to point to next (Z=3) */
2067 prev->output_last.branchAddress = cpu_to_le32(
2068 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2069
2070 /* disable interrupt, unless required by the IRQ interval */
2071 if (prev_i % iso->irq_interval) {
2072 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2073 } else {
2074 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2075 }
2076
2077 wmb();
2078
2079 /* wake DMA in case it is sleeping */
2080 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2081
2082 /* issue a dummy read of the cycle timer to force all PCI
2083 writes to be posted immediately */
2084 mb();
2085 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2086
2087 return 0;
2088}
2089
2090static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2091{
2092 struct ohci_iso_xmit *xmit = iso->hostdata;
2093 struct ti_ohci *ohci = xmit->ohci;
2094
2095 /* clear out the control register */
2096 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2097 wmb();
2098
2099 /* address and length of first descriptor block (Z=3) */
2100 reg_write(xmit->ohci, xmit->CommandPtr,
2101 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2102
2103 /* cycle match */
2104 if (cycle != -1) {
2105 u32 start = cycle & 0x1FFF;
2106
2107 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2108 just snarf them from the current time */
2109 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2110
2111 /* advance one second to give some extra time for DMA to start */
2112 seconds += 1;
2113
2114 start |= (seconds & 3) << 13;
2115
2116 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2117 }
2118
2119 /* enable interrupts */
2120 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2121
2122 /* run */
2123 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2124 mb();
2125
2126 /* wait 100 usec to give the card time to go active */
2127 udelay(100);
2128
2129 /* check the RUN bit */
2130 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2131 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2132 reg_read(xmit->ohci, xmit->ContextControlSet));
2133 return -1;
2134 }
2135
2136 return 0;
2137}
2138
2139static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2140{
2141
2142 switch(cmd) {
2143 case XMIT_INIT:
2144 return ohci_iso_xmit_init(iso);
2145 case XMIT_START:
2146 return ohci_iso_xmit_start(iso, arg);
2147 case XMIT_STOP:
2148 ohci_iso_xmit_stop(iso);
2149 return 0;
2150 case XMIT_QUEUE:
2151 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2152 case XMIT_SHUTDOWN:
2153 ohci_iso_xmit_shutdown(iso);
2154 return 0;
2155
2156 case RECV_INIT:
2157 return ohci_iso_recv_init(iso);
2158 case RECV_START: {
2159 int *args = (int*) arg;
2160 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2161 }
2162 case RECV_STOP:
2163 ohci_iso_recv_stop(iso);
2164 return 0;
2165 case RECV_RELEASE:
2166 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2167 return 0;
2168 case RECV_FLUSH:
2169 ohci_iso_recv_task((unsigned long) iso);
2170 return 0;
2171 case RECV_SHUTDOWN:
2172 ohci_iso_recv_shutdown(iso);
2173 return 0;
2174 case RECV_LISTEN_CHANNEL:
2175 ohci_iso_recv_change_channel(iso, arg, 1);
2176 return 0;
2177 case RECV_UNLISTEN_CHANNEL:
2178 ohci_iso_recv_change_channel(iso, arg, 0);
2179 return 0;
2180 case RECV_SET_CHANNEL_MASK:
2181 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2182 return 0;
2183
2184 default:
2185 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2186 cmd);
2187 break;
2188 }
2189 return -EINVAL;
2190}
2191
2192/***************************************
2193 * IEEE-1394 functionality section END *
2194 ***************************************/
2195
2196
2197/********************************************************
2198 * Global stuff (interrupt handler, init/shutdown code) *
2199 ********************************************************/
2200
2201static void dma_trm_reset(struct dma_trm_ctx *d)
2202{
2203 unsigned long flags;
2204 LIST_HEAD(packet_list);
2205 struct ti_ohci *ohci = d->ohci;
2206 struct hpsb_packet *packet, *ptmp;
2207
2208 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2209
2210 /* Lock the context, reset it and release it. Move the packets
2211 * that were pending in the context to packet_list and free
2212 * them after releasing the lock. */
2213
2214 spin_lock_irqsave(&d->lock, flags);
2215
2216 list_splice(&d->fifo_list, &packet_list);
2217 list_splice(&d->pending_list, &packet_list);
2218 INIT_LIST_HEAD(&d->fifo_list);
2219 INIT_LIST_HEAD(&d->pending_list);
2220
2221 d->branchAddrPtr = NULL;
2222 d->sent_ind = d->prg_ind;
2223 d->free_prgs = d->num_desc;
2224
2225 spin_unlock_irqrestore(&d->lock, flags);
2226
2227 if (list_empty(&packet_list))
2228 return;
2229
2230 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2231
2232 /* Now process subsystem callbacks for the packets from this
2233 * context. */
2234 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2235 list_del_init(&packet->driver_list);
2236 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2237 }
2238}
2239
2240static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2241 quadlet_t rx_event,
2242 quadlet_t tx_event)
2243{
2244 struct ohci1394_iso_tasklet *t;
2245 unsigned long mask;
2246
2247 spin_lock(&ohci->iso_tasklet_list_lock);
2248
2249 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2250 mask = 1 << t->context;
2251
2252 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2253 tasklet_schedule(&t->tasklet);
2254 else if (rx_event & mask)
2255 tasklet_schedule(&t->tasklet);
2256 }
2257
2258 spin_unlock(&ohci->iso_tasklet_list_lock);
2259
2260}
2261
2262static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2263 struct pt_regs *regs_are_unused)
2264{
2265 quadlet_t event, node_id;
2266 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2267 struct hpsb_host *host = ohci->host;
2268 int phyid = -1, isroot = 0;
2269 unsigned long flags;
2270
2271 /* Read and clear the interrupt event register. Don't clear
2272 * the busReset event, though. This is done when we get the
2273 * selfIDComplete interrupt. */
2274 spin_lock_irqsave(&ohci->event_lock, flags);
2275 event = reg_read(ohci, OHCI1394_IntEventClear);
2276 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2277 spin_unlock_irqrestore(&ohci->event_lock, flags);
2278
2279 if (!event)
2280 return IRQ_NONE;
2281
2282 /* If event is ~(u32)0 cardbus card was ejected. In this case
2283 * we just return, and clean up in the ohci1394_pci_remove
2284 * function. */
2285 if (event == ~(u32) 0) {
2286 DBGMSG("Device removed.");
2287 return IRQ_NONE;
2288 }
2289
2290 DBGMSG("IntEvent: %08x", event);
2291
2292 if (event & OHCI1394_unrecoverableError) {
2293 int ctx;
2294 PRINT(KERN_ERR, "Unrecoverable error!");
2295
2296 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2297 PRINT(KERN_ERR, "Async Req Tx Context died: "
2298 "ctrl[%08x] cmdptr[%08x]",
2299 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2300 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2301
2302 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2303 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2304 "ctrl[%08x] cmdptr[%08x]",
2305 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2306 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2307
2308 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2309 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2310 "ctrl[%08x] cmdptr[%08x]",
2311 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2312 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2313
2314 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2315 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2316 "ctrl[%08x] cmdptr[%08x]",
2317 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2318 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2319
2320 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2321 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2322 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2323 "ctrl[%08x] cmdptr[%08x]", ctx,
2324 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2325 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2326 }
2327
2328 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2329 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2330 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2331 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2332 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2333 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2334 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2335 }
2336
2337 event &= ~OHCI1394_unrecoverableError;
2338 }
2339
2340 if (event & OHCI1394_cycleInconsistent) {
2341 /* We subscribe to the cycleInconsistent event only to
2342 * clear the corresponding event bit... otherwise,
2343 * isochronous cycleMatch DMA won't work. */
2344 DBGMSG("OHCI1394_cycleInconsistent");
2345 event &= ~OHCI1394_cycleInconsistent;
2346 }
2347
2348 if (event & OHCI1394_busReset) {
2349 /* The busReset event bit can't be cleared during the
2350 * selfID phase, so we disable busReset interrupts, to
2351 * avoid burying the cpu in interrupt requests. */
2352 spin_lock_irqsave(&ohci->event_lock, flags);
2353 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2354
2355 if (ohci->check_busreset) {
2356 int loop_count = 0;
2357
2358 udelay(10);
2359
2360 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2361 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2362
2363 spin_unlock_irqrestore(&ohci->event_lock, flags);
2364 udelay(10);
2365 spin_lock_irqsave(&ohci->event_lock, flags);
2366
2367 /* The loop counter check is to prevent the driver
2368 * from remaining in this state forever. For the
2369 * initial bus reset, the loop continues for ever
2370 * and the system hangs, until some device is plugged-in
2371 * or out manually into a port! The forced reset seems
2372 * to solve this problem. This mainly effects nForce2. */
2373 if (loop_count > 10000) {
2374 ohci_devctl(host, RESET_BUS, LONG_RESET);
2375 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2376 loop_count = 0;
2377 }
2378
2379 loop_count++;
2380 }
2381 }
2382 spin_unlock_irqrestore(&ohci->event_lock, flags);
2383 if (!host->in_bus_reset) {
2384 DBGMSG("irq_handler: Bus reset requested");
2385
2386 /* Subsystem call */
2387 hpsb_bus_reset(ohci->host);
2388 }
2389 event &= ~OHCI1394_busReset;
2390 }
2391
2392 if (event & OHCI1394_reqTxComplete) {
2393 struct dma_trm_ctx *d = &ohci->at_req_context;
2394 DBGMSG("Got reqTxComplete interrupt "
2395 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2396 if (reg_read(ohci, d->ctrlSet) & 0x800)
2397 ohci1394_stop_context(ohci, d->ctrlClear,
2398 "reqTxComplete");
2399 else
2400 dma_trm_tasklet((unsigned long)d);
2401 //tasklet_schedule(&d->task);
2402 event &= ~OHCI1394_reqTxComplete;
2403 }
2404 if (event & OHCI1394_respTxComplete) {
2405 struct dma_trm_ctx *d = &ohci->at_resp_context;
2406 DBGMSG("Got respTxComplete interrupt "
2407 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2408 if (reg_read(ohci, d->ctrlSet) & 0x800)
2409 ohci1394_stop_context(ohci, d->ctrlClear,
2410 "respTxComplete");
2411 else
2412 tasklet_schedule(&d->task);
2413 event &= ~OHCI1394_respTxComplete;
2414 }
2415 if (event & OHCI1394_RQPkt) {
2416 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2417 DBGMSG("Got RQPkt interrupt status=0x%08X",
2418 reg_read(ohci, d->ctrlSet));
2419 if (reg_read(ohci, d->ctrlSet) & 0x800)
2420 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2421 else
2422 tasklet_schedule(&d->task);
2423 event &= ~OHCI1394_RQPkt;
2424 }
2425 if (event & OHCI1394_RSPkt) {
2426 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2427 DBGMSG("Got RSPkt interrupt status=0x%08X",
2428 reg_read(ohci, d->ctrlSet));
2429 if (reg_read(ohci, d->ctrlSet) & 0x800)
2430 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2431 else
2432 tasklet_schedule(&d->task);
2433 event &= ~OHCI1394_RSPkt;
2434 }
2435 if (event & OHCI1394_isochRx) {
2436 quadlet_t rx_event;
2437
2438 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2439 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2440 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2441 event &= ~OHCI1394_isochRx;
2442 }
2443 if (event & OHCI1394_isochTx) {
2444 quadlet_t tx_event;
2445
2446 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2447 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2448 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2449 event &= ~OHCI1394_isochTx;
2450 }
2451 if (event & OHCI1394_selfIDComplete) {
2452 if (host->in_bus_reset) {
2453 node_id = reg_read(ohci, OHCI1394_NodeID);
2454
2455 if (!(node_id & 0x80000000)) {
2456 PRINT(KERN_ERR,
2457 "SelfID received, but NodeID invalid "
2458 "(probably new bus reset occurred): %08X",
2459 node_id);
2460 goto selfid_not_valid;
2461 }
2462
2463 phyid = node_id & 0x0000003f;
2464 isroot = (node_id & 0x40000000) != 0;
2465
2466 DBGMSG("SelfID interrupt received "
2467 "(phyid %d, %s)", phyid,
2468 (isroot ? "root" : "not root"));
2469
2470 handle_selfid(ohci, host, phyid, isroot);
2471
2472 /* Clear the bus reset event and re-enable the
2473 * busReset interrupt. */
2474 spin_lock_irqsave(&ohci->event_lock, flags);
2475 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2476 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2477 spin_unlock_irqrestore(&ohci->event_lock, flags);
2478
2479 /* Accept Physical requests from all nodes. */
2480 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2481 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2482
2483 /* Turn on phys dma reception.
2484 *
2485 * TODO: Enable some sort of filtering management.
2486 */
2487 if (phys_dma) {
2488 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2489 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2490 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2491 } else {
2492 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2493 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2494 }
2495
2496 DBGMSG("PhyReqFilter=%08x%08x",
2497 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2498 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2499
2500 hpsb_selfid_complete(host, phyid, isroot);
2501 } else
2502 PRINT(KERN_ERR,
2503 "SelfID received outside of bus reset sequence");
2504
2505selfid_not_valid:
2506 event &= ~OHCI1394_selfIDComplete;
2507 }
2508
2509 /* Make sure we handle everything, just in case we accidentally
2510 * enabled an interrupt that we didn't write a handler for. */
2511 if (event)
2512 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2513 event);
2514
2515 return IRQ_HANDLED;
2516}
2517
2518/* Put the buffer back into the dma context */
2519static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2520{
2521 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2522 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2523
2524 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2525 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2526 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2527 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2528
2529 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2530 * context program descriptors before it sees the wakeup bit set. */
2531 wmb();
2532
2533 /* wake up the dma context if necessary */
2534 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2535 PRINT(KERN_INFO,
2536 "Waking dma ctx=%d ... processing is probably too slow",
2537 d->ctx);
2538 }
2539
2540 /* do this always, to avoid race condition */
2541 reg_write(ohci, d->ctrlSet, 0x1000);
2542}
2543
2544#define cond_le32_to_cpu(data, noswap) \
2545 (noswap ? data : le32_to_cpu(data))
2546
2547static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2548 -1, 0, -1, 0, -1, -1, 16, -1};
2549
2550/*
2551 * Determine the length of a packet in the buffer
2552 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2553 */
2554static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2555 int offset, unsigned char tcode, int noswap)
2556{
2557 int length = -1;
2558
2559 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2560 length = TCODE_SIZE[tcode];
2561 if (length == 0) {
2562 if (offset + 12 >= d->buf_size) {
2563 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2564 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2565 } else {
2566 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2567 }
2568 length += 20;
2569 }
2570 } else if (d->type == DMA_CTX_ISO) {
2571 /* Assumption: buffer fill mode with header/trailer */
2572 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2573 }
2574
2575 if (length > 0 && length % 4)
2576 length += 4 - (length % 4);
2577
2578 return length;
2579}
2580
2581/* Tasklet that processes dma receive buffers */
2582static void dma_rcv_tasklet (unsigned long data)
2583{
2584 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2585 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2586 unsigned int split_left, idx, offset, rescount;
2587 unsigned char tcode;
2588 int length, bytes_left, ack;
2589 unsigned long flags;
2590 quadlet_t *buf_ptr;
2591 char *split_ptr;
2592 char msg[256];
2593
2594 spin_lock_irqsave(&d->lock, flags);
2595
2596 idx = d->buf_ind;
2597 offset = d->buf_offset;
2598 buf_ptr = d->buf_cpu[idx] + offset/4;
2599
2600 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2601 bytes_left = d->buf_size - rescount - offset;
2602
2603 while (bytes_left > 0) {
2604 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2605
2606 /* packet_length() will return < 4 for an error */
2607 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2608
2609 if (length < 4) { /* something is wrong */
2610 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2611 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2612 d->ctx, length);
2613 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2614 spin_unlock_irqrestore(&d->lock, flags);
2615 return;
2616 }
2617
2618 /* The first case is where we have a packet that crosses
2619 * over more than one descriptor. The next case is where
2620 * it's all in the first descriptor. */
2621 if ((offset + length) > d->buf_size) {
2622 DBGMSG("Split packet rcv'd");
2623 if (length > d->split_buf_size) {
2624 ohci1394_stop_context(ohci, d->ctrlClear,
2625 "Split packet size exceeded");
2626 d->buf_ind = idx;
2627 d->buf_offset = offset;
2628 spin_unlock_irqrestore(&d->lock, flags);
2629 return;
2630 }
2631
2632 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2633 == d->buf_size) {
2634 /* Other part of packet not written yet.
2635 * this should never happen I think
2636 * anyway we'll get it on the next call. */
2637 PRINT(KERN_INFO,
2638 "Got only half a packet!");
2639 d->buf_ind = idx;
2640 d->buf_offset = offset;
2641 spin_unlock_irqrestore(&d->lock, flags);
2642 return;
2643 }
2644
2645 split_left = length;
2646 split_ptr = (char *)d->spb;
2647 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2648 split_left -= d->buf_size-offset;
2649 split_ptr += d->buf_size-offset;
2650 insert_dma_buffer(d, idx);
2651 idx = (idx+1) % d->num_desc;
2652 buf_ptr = d->buf_cpu[idx];
2653 offset=0;
2654
2655 while (split_left >= d->buf_size) {
2656 memcpy(split_ptr,buf_ptr,d->buf_size);
2657 split_ptr += d->buf_size;
2658 split_left -= d->buf_size;
2659 insert_dma_buffer(d, idx);
2660 idx = (idx+1) % d->num_desc;
2661 buf_ptr = d->buf_cpu[idx];
2662 }
2663
2664 if (split_left > 0) {
2665 memcpy(split_ptr, buf_ptr, split_left);
2666 offset = split_left;
2667 buf_ptr += offset/4;
2668 }
2669 } else {
2670 DBGMSG("Single packet rcv'd");
2671 memcpy(d->spb, buf_ptr, length);
2672 offset += length;
2673 buf_ptr += length/4;
2674 if (offset==d->buf_size) {
2675 insert_dma_buffer(d, idx);
2676 idx = (idx+1) % d->num_desc;
2677 buf_ptr = d->buf_cpu[idx];
2678 offset=0;
2679 }
2680 }
2681
2682 /* We get one phy packet to the async descriptor for each
2683 * bus reset. We always ignore it. */
2684 if (tcode != OHCI1394_TCODE_PHY) {
2685 if (!ohci->no_swap_incoming)
2686 packet_swab(d->spb, tcode);
2687 DBGMSG("Packet received from node"
2688 " %d ack=0x%02X spd=%d tcode=0x%X"
2689 " length=%d ctx=%d tlabel=%d",
2690 (d->spb[1]>>16)&0x3f,
2691 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2692 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2693 tcode, length, d->ctx,
2694 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
2695
2696 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2697 == 0x11) ? 1 : 0;
2698
2699 hpsb_packet_received(ohci->host, d->spb,
2700 length-4, ack);
2701 }
2702#ifdef OHCI1394_DEBUG
2703 else
2704 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2705 d->ctx);
2706#endif
2707
2708 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2709
2710 bytes_left = d->buf_size - rescount - offset;
2711
2712 }
2713
2714 d->buf_ind = idx;
2715 d->buf_offset = offset;
2716
2717 spin_unlock_irqrestore(&d->lock, flags);
2718}
2719
2720/* Bottom half that processes sent packets */
2721static void dma_trm_tasklet (unsigned long data)
2722{
2723 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2724 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2725 struct hpsb_packet *packet, *ptmp;
2726 unsigned long flags;
2727 u32 status, ack;
2728 size_t datasize;
2729
2730 spin_lock_irqsave(&d->lock, flags);
2731
2732 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2733 datasize = packet->data_size;
2734 if (datasize && packet->type != hpsb_raw)
2735 status = le32_to_cpu(
2736 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2737 else
2738 status = le32_to_cpu(
2739 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2740
2741 if (status == 0)
2742 /* this packet hasn't been sent yet*/
2743 break;
2744
2745#ifdef OHCI1394_DEBUG
2746 if (datasize)
2747 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2748 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2749 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2750 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2751 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2752 status&0x1f, (status>>5)&0x3,
2753 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2754 d->ctx);
2755 else
2756 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2757 "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
2758 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2759 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2760 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2761 status&0x1f, (status>>5)&0x3,
2762 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2763 d->ctx);
2764 else
2765 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2766 "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
2767 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2768 >>16)&0x3f,
2769 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2770 >>4)&0xf,
2771 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2772 >>10)&0x3f,
2773 status&0x1f, (status>>5)&0x3,
2774 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2775 d->ctx);
2776#endif
2777
2778 if (status & 0x10) {
2779 ack = status & 0xf;
2780 } else {
2781 switch (status & 0x1f) {
2782 case EVT_NO_STATUS: /* that should never happen */
2783 case EVT_RESERVED_A: /* that should never happen */
2784 case EVT_LONG_PACKET: /* that should never happen */
2785 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2786 ack = ACKX_SEND_ERROR;
2787 break;
2788 case EVT_MISSING_ACK:
2789 ack = ACKX_TIMEOUT;
2790 break;
2791 case EVT_UNDERRUN:
2792 ack = ACKX_SEND_ERROR;
2793 break;
2794 case EVT_OVERRUN: /* that should never happen */
2795 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2796 ack = ACKX_SEND_ERROR;
2797 break;
2798 case EVT_DESCRIPTOR_READ:
2799 case EVT_DATA_READ:
2800 case EVT_DATA_WRITE:
2801 ack = ACKX_SEND_ERROR;
2802 break;
2803 case EVT_BUS_RESET: /* that should never happen */
2804 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2805 ack = ACKX_SEND_ERROR;
2806 break;
2807 case EVT_TIMEOUT:
2808 ack = ACKX_TIMEOUT;
2809 break;
2810 case EVT_TCODE_ERR:
2811 ack = ACKX_SEND_ERROR;
2812 break;
2813 case EVT_RESERVED_B: /* that should never happen */
2814 case EVT_RESERVED_C: /* that should never happen */
2815 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2816 ack = ACKX_SEND_ERROR;
2817 break;
2818 case EVT_UNKNOWN:
2819 case EVT_FLUSHED:
2820 ack = ACKX_SEND_ERROR;
2821 break;
2822 default:
2823 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2824 ack = ACKX_SEND_ERROR;
2825 BUG();
2826 }
2827 }
2828
2829 list_del_init(&packet->driver_list);
2830 hpsb_packet_sent(ohci->host, packet, ack);
2831
2832 if (datasize) {
2833 pci_unmap_single(ohci->dev,
2834 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2835 datasize, PCI_DMA_TODEVICE);
2836 OHCI_DMA_FREE("single Xmit data packet");
2837 }
2838
2839 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2840 d->free_prgs++;
2841 }
2842
2843 dma_trm_flush(ohci, d);
2844
2845 spin_unlock_irqrestore(&d->lock, flags);
2846}
2847
2848static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2849{
2850 if (d->ctrlClear) {
2851 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2852
2853 if (d->type == DMA_CTX_ISO) {
2854 /* disable interrupts */
2855 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2856 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2857 } else {
2858 tasklet_kill(&d->task);
2859 }
2860 }
2861}
2862
2863
2864static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2865{
2866 int i;
2867 struct ti_ohci *ohci = d->ohci;
2868
2869 if (ohci == NULL)
2870 return;
2871
2872 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2873
2874 if (d->buf_cpu) {
2875 for (i=0; i<d->num_desc; i++)
2876 if (d->buf_cpu[i] && d->buf_bus[i]) {
2877 pci_free_consistent(
2878 ohci->dev, d->buf_size,
2879 d->buf_cpu[i], d->buf_bus[i]);
2880 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2881 }
2882 kfree(d->buf_cpu);
2883 kfree(d->buf_bus);
2884 }
2885 if (d->prg_cpu) {
2886 for (i=0; i<d->num_desc; i++)
2887 if (d->prg_cpu[i] && d->prg_bus[i]) {
2888 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2889 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2890 }
2891 pci_pool_destroy(d->prg_pool);
2892 OHCI_DMA_FREE("dma_rcv prg pool");
2893 kfree(d->prg_cpu);
2894 kfree(d->prg_bus);
2895 }
2896 if (d->spb) kfree(d->spb);
2897
2898 /* Mark this context as freed. */
2899 d->ohci = NULL;
2900}
2901
2902static int
2903alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2904 enum context_type type, int ctx, int num_desc,
2905 int buf_size, int split_buf_size, int context_base)
2906{
2907 int i, len;
2908 static int num_allocs;
2909 static char pool_name[20];
2910
2911 d->ohci = ohci;
2912 d->type = type;
2913 d->ctx = ctx;
2914
2915 d->num_desc = num_desc;
2916 d->buf_size = buf_size;
2917 d->split_buf_size = split_buf_size;
2918
2919 d->ctrlSet = 0;
2920 d->ctrlClear = 0;
2921 d->cmdPtr = 0;
2922
2923 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
2924 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2925
2926 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2927 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2928 free_dma_rcv_ctx(d);
2929 return -ENOMEM;
2930 }
2931 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2932 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2933
2934 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2935 GFP_ATOMIC);
2936 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2937
2938 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2939 PRINT(KERN_ERR, "Failed to allocate dma prg");
2940 free_dma_rcv_ctx(d);
2941 return -ENOMEM;
2942 }
2943 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2944 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2945
2946 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2947
2948 if (d->spb == NULL) {
2949 PRINT(KERN_ERR, "Failed to allocate split buffer");
2950 free_dma_rcv_ctx(d);
2951 return -ENOMEM;
2952 }
2953
2954 len = sprintf(pool_name, "ohci1394_rcv_prg");
2955 sprintf(pool_name+len, "%d", num_allocs);
2956 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2957 sizeof(struct dma_cmd), 4, 0);
2958 if(d->prg_pool == NULL)
2959 {
2960 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2961 free_dma_rcv_ctx(d);
2962 return -ENOMEM;
2963 }
2964 num_allocs++;
2965
2966 OHCI_DMA_ALLOC("dma_rcv prg pool");
2967
2968 for (i=0; i<d->num_desc; i++) {
2969 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2970 d->buf_size,
2971 d->buf_bus+i);
2972 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2973
2974 if (d->buf_cpu[i] != NULL) {
2975 memset(d->buf_cpu[i], 0, d->buf_size);
2976 } else {
2977 PRINT(KERN_ERR,
2978 "Failed to allocate dma buffer");
2979 free_dma_rcv_ctx(d);
2980 return -ENOMEM;
2981 }
2982
2983 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
2984 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2985
2986 if (d->prg_cpu[i] != NULL) {
2987 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2988 } else {
2989 PRINT(KERN_ERR,
2990 "Failed to allocate dma prg");
2991 free_dma_rcv_ctx(d);
2992 return -ENOMEM;
2993 }
2994 }
2995
2996 spin_lock_init(&d->lock);
2997
2998 if (type == DMA_CTX_ISO) {
2999 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3000 OHCI_ISO_MULTICHANNEL_RECEIVE,
3001 dma_rcv_tasklet, (unsigned long) d);
3002 if (ohci1394_register_iso_tasklet(ohci,
3003 &ohci->ir_legacy_tasklet) < 0) {
3004 PRINT(KERN_ERR, "No IR DMA context available");
3005 free_dma_rcv_ctx(d);
3006 return -EBUSY;
3007 }
3008
3009 /* the IR context can be assigned to any DMA context
3010 * by ohci1394_register_iso_tasklet */
3011 d->ctx = ohci->ir_legacy_tasklet.context;
3012 d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
3013 d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
3014 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
3015 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
3016 } else {
3017 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3018 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3019 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3020
3021 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3022 }
3023
3024 return 0;
3025}
3026
3027static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3028{
3029 int i;
3030 struct ti_ohci *ohci = d->ohci;
3031
3032 if (ohci == NULL)
3033 return;
3034
3035 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3036
3037 if (d->prg_cpu) {
3038 for (i=0; i<d->num_desc; i++)
3039 if (d->prg_cpu[i] && d->prg_bus[i]) {
3040 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3041 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3042 }
3043 pci_pool_destroy(d->prg_pool);
3044 OHCI_DMA_FREE("dma_trm prg pool");
3045 kfree(d->prg_cpu);
3046 kfree(d->prg_bus);
3047 }
3048
3049 /* Mark this context as freed. */
3050 d->ohci = NULL;
3051}
3052
3053static int
3054alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3055 enum context_type type, int ctx, int num_desc,
3056 int context_base)
3057{
3058 int i, len;
3059 static char pool_name[20];
3060 static int num_allocs=0;
3061
3062 d->ohci = ohci;
3063 d->type = type;
3064 d->ctx = ctx;
3065 d->num_desc = num_desc;
3066 d->ctrlSet = 0;
3067 d->ctrlClear = 0;
3068 d->cmdPtr = 0;
3069
3070 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3071 GFP_KERNEL);
3072 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3073
3074 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3075 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3076 free_dma_trm_ctx(d);
3077 return -ENOMEM;
3078 }
3079 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3080 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3081
3082 len = sprintf(pool_name, "ohci1394_trm_prg");
3083 sprintf(pool_name+len, "%d", num_allocs);
3084 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3085 sizeof(struct at_dma_prg), 4, 0);
3086 if (d->prg_pool == NULL) {
3087 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3088 free_dma_trm_ctx(d);
3089 return -ENOMEM;
3090 }
3091 num_allocs++;
3092
3093 OHCI_DMA_ALLOC("dma_rcv prg pool");
3094
3095 for (i = 0; i < d->num_desc; i++) {
3096 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3097 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3098
3099 if (d->prg_cpu[i] != NULL) {
3100 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3101 } else {
3102 PRINT(KERN_ERR,
3103 "Failed to allocate at dma prg");
3104 free_dma_trm_ctx(d);
3105 return -ENOMEM;
3106 }
3107 }
3108
3109 spin_lock_init(&d->lock);
3110
3111 /* initialize tasklet */
3112 if (type == DMA_CTX_ISO) {
3113 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3114 dma_trm_tasklet, (unsigned long) d);
3115 if (ohci1394_register_iso_tasklet(ohci,
3116 &ohci->it_legacy_tasklet) < 0) {
3117 PRINT(KERN_ERR, "No IT DMA context available");
3118 free_dma_trm_ctx(d);
3119 return -EBUSY;
3120 }
3121
3122 /* IT can be assigned to any context by register_iso_tasklet */
3123 d->ctx = ohci->it_legacy_tasklet.context;
3124 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3125 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3126 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3127 } else {
3128 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3129 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3130 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3131 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3132 }
3133
3134 return 0;
3135}
3136
3137static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3138{
3139 struct ti_ohci *ohci = host->hostdata;
3140
3141 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3142 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3143
3144 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3145}
3146
3147
3148static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3149 quadlet_t data, quadlet_t compare)
3150{
3151 struct ti_ohci *ohci = host->hostdata;
3152 int i;
3153
3154 reg_write(ohci, OHCI1394_CSRData, data);
3155 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3156 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3157
3158 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3159 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3160 break;
3161
3162 mdelay(1);
3163 }
3164
3165 return reg_read(ohci, OHCI1394_CSRData);
3166}
3167
3168static struct hpsb_host_driver ohci1394_driver = {
3169 .owner = THIS_MODULE,
3170 .name = OHCI1394_DRIVER_NAME,
3171 .set_hw_config_rom = ohci_set_hw_config_rom,
3172 .transmit_packet = ohci_transmit,
3173 .devctl = ohci_devctl,
3174 .isoctl = ohci_isoctl,
3175 .hw_csr_reg = ohci_hw_csr_reg,
3176};
3177
3178
3179
3180/***********************************
3181 * PCI Driver Interface functions *
3182 ***********************************/
3183
3184#define FAIL(err, fmt, args...) \
3185do { \
3186 PRINT_G(KERN_ERR, fmt , ## args); \
3187 ohci1394_pci_remove(dev); \
3188 return err; \
3189} while (0)
3190
3191static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3192 const struct pci_device_id *ent)
3193{
3194 static int version_printed = 0;
3195
3196 struct hpsb_host *host;
3197 struct ti_ohci *ohci; /* shortcut to currently handled device */
3198 unsigned long ohci_base;
3199
3200 if (version_printed++ == 0)
3201 PRINT_G(KERN_INFO, "%s", version);
3202
3203 if (pci_enable_device(dev))
3204 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3205 pci_set_master(dev);
3206
3207 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3208 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3209
3210 ohci = host->hostdata;
3211 ohci->dev = dev;
3212 ohci->host = host;
3213 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3214 host->pdev = dev;
3215 pci_set_drvdata(dev, ohci);
3216
3217 /* We don't want hardware swapping */
3218 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3219
3220 /* Some oddball Apple controllers do not order the selfid
3221 * properly, so we make up for it here. */
3222#ifndef __LITTLE_ENDIAN
3223 /* XXX: Need a better way to check this. I'm wondering if we can
3224 * read the values of the OHCI1394_PCI_HCI_Control and the
3225 * noByteSwapData registers to see if they were not cleared to
3226 * zero. Should this work? Obviously it's not defined what these
3227 * registers will read when they aren't supported. Bleh! */
3228 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3229 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3230 ohci->no_swap_incoming = 1;
3231 ohci->selfid_swap = 0;
3232 } else
3233 ohci->selfid_swap = 1;
3234#endif
3235
3236
3237#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3238#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3239#endif
3240
3241 /* These chipsets require a bit of extra care when checking after
3242 * a busreset. */
3243 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3244 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3245 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3246 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3247 ohci->check_busreset = 1;
3248
3249 /* We hardwire the MMIO length, since some CardBus adaptors
3250 * fail to report the right length. Anyway, the ohci spec
3251 * clearly says it's 2kb, so this shouldn't be a problem. */
3252 ohci_base = pci_resource_start(dev, 0);
3253 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3254 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3255 pci_resource_len(dev, 0));
3256
3257 /* Seems PCMCIA handles this internally. Not sure why. Seems
3258 * pretty bogus to force a driver to special case this. */
3259#ifndef PCMCIA
3260 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3261 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3262 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3263#endif
3264 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3265
3266 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3267 if (ohci->registers == NULL)
3268 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3269 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3270 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3271
3272 /* csr_config rom allocation */
3273 ohci->csr_config_rom_cpu =
3274 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3275 &ohci->csr_config_rom_bus);
3276 OHCI_DMA_ALLOC("consistent csr_config_rom");
3277 if (ohci->csr_config_rom_cpu == NULL)
3278 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3279 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3280
3281 /* self-id dma buffer allocation */
3282 ohci->selfid_buf_cpu =
3283 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3284 &ohci->selfid_buf_bus);
3285 OHCI_DMA_ALLOC("consistent selfid_buf");
3286
3287 if (ohci->selfid_buf_cpu == NULL)
3288 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3289 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3290
3291 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3292 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3293 "8Kb boundary... may cause problems on some CXD3222 chip",
3294 ohci->selfid_buf_cpu);
3295
3296 /* No self-id errors at startup */
3297 ohci->self_id_errors = 0;
3298
3299 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3300 /* AR DMA request context allocation */
3301 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3302 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3303 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3304 OHCI1394_AsReqRcvContextBase) < 0)
3305 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3306
3307 /* AR DMA response context allocation */
3308 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3309 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3310 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3311 OHCI1394_AsRspRcvContextBase) < 0)
3312 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3313
3314 /* AT DMA request context */
3315 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3316 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3317 OHCI1394_AsReqTrContextBase) < 0)
3318 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3319
3320 /* AT DMA response context */
3321 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3322 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3323 OHCI1394_AsRspTrContextBase) < 0)
3324 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3325
3326 /* Start off with a soft reset, to clear everything to a sane
3327 * state. */
3328 ohci_soft_reset(ohci);
3329
3330 /* Now enable LPS, which we need in order to start accessing
3331 * most of the registers. In fact, on some cards (ALI M5251),
3332 * accessing registers in the SClk domain without LPS enabled
3333 * will lock up the machine. Wait 50msec to make sure we have
3334 * full link enabled. */
3335 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3336
3337 /* Disable and clear interrupts */
3338 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3339 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3340
3341 mdelay(50);
3342
3343 /* Determine the number of available IR and IT contexts. */
3344 ohci->nb_iso_rcv_ctx =
3345 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3346 DBGMSG("%d iso receive contexts available",
3347 ohci->nb_iso_rcv_ctx);
3348
3349 ohci->nb_iso_xmit_ctx =
3350 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3351 DBGMSG("%d iso transmit contexts available",
3352 ohci->nb_iso_xmit_ctx);
3353
3354 /* Set the usage bits for non-existent contexts so they can't
3355 * be allocated */
3356 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3357 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3358
3359 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3360 spin_lock_init(&ohci->iso_tasklet_list_lock);
3361 ohci->ISO_channel_usage = 0;
3362 spin_lock_init(&ohci->IR_channel_lock);
3363
3364 /* Allocate the IR DMA context right here so we don't have
3365 * to do it in interrupt path - note that this doesn't
3366 * waste much memory and avoids the jugglery required to
3367 * allocate it in IRQ path. */
3368 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3369 DMA_CTX_ISO, 0, IR_NUM_DESC,
3370 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3371 OHCI1394_IsoRcvContextBase) < 0) {
3372 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3373 }
3374
3375 /* We hopefully don't have to pre-allocate IT DMA like we did
3376 * for IR DMA above. Allocate it on-demand and mark inactive. */
3377 ohci->it_legacy_context.ohci = NULL;
3378
3379 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3380 OHCI1394_DRIVER_NAME, ohci))
3381 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3382
3383 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3384 ohci_initialize(ohci);
3385
3386 /* Set certain csr values */
3387 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3388 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3389 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3390 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3391 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3392
3393 /* Tell the highlevel this host is ready */
3394 if (hpsb_add_host(host))
3395 FAIL(-ENOMEM, "Failed to register host with highlevel");
3396
3397 ohci->init_state = OHCI_INIT_DONE;
3398
3399 return 0;
3400#undef FAIL
3401}
3402
3403static void ohci1394_pci_remove(struct pci_dev *pdev)
3404{
3405 struct ti_ohci *ohci;
3406 struct device *dev;
3407
3408 ohci = pci_get_drvdata(pdev);
3409 if (!ohci)
3410 return;
3411
3412 dev = get_device(&ohci->host->device);
3413
3414 switch (ohci->init_state) {
3415 case OHCI_INIT_DONE:
3416 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
3417 hpsb_remove_host(ohci->host);
3418
3419 /* Clear out BUS Options */
3420 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3421 reg_write(ohci, OHCI1394_BusOptions,
3422 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3423 0x00ff0000);
3424 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3425
3426 case OHCI_INIT_HAVE_IRQ:
3427 /* Clear interrupt registers */
3428 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3429 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3430 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3431 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3432 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3433 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3434
3435 /* Disable IRM Contender */
3436 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3437
3438 /* Clear link control register */
3439 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3440
3441 /* Let all other nodes know to ignore us */
3442 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3443
3444 /* Soft reset before we start - this disables
3445 * interrupts and clears linkEnable and LPS. */
3446 ohci_soft_reset(ohci);
3447 free_irq(ohci->dev->irq, ohci);
3448
3449 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3450 /* The ohci_soft_reset() stops all DMA contexts, so we
3451 * dont need to do this. */
3452 /* Free AR dma */
3453 free_dma_rcv_ctx(&ohci->ar_req_context);
3454 free_dma_rcv_ctx(&ohci->ar_resp_context);
3455
3456 /* Free AT dma */
3457 free_dma_trm_ctx(&ohci->at_req_context);
3458 free_dma_trm_ctx(&ohci->at_resp_context);
3459
3460 /* Free IR dma */
3461 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3462
3463 /* Free IT dma */
3464 free_dma_trm_ctx(&ohci->it_legacy_context);
3465
3466 /* Free IR legacy dma */
3467 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3468
3469
3470 case OHCI_INIT_HAVE_SELFID_BUFFER:
3471 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3472 ohci->selfid_buf_cpu,
3473 ohci->selfid_buf_bus);
3474 OHCI_DMA_FREE("consistent selfid_buf");
3475
3476 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3477 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3478 ohci->csr_config_rom_cpu,
3479 ohci->csr_config_rom_bus);
3480 OHCI_DMA_FREE("consistent csr_config_rom");
3481
3482 case OHCI_INIT_HAVE_IOMAPPING:
3483 iounmap(ohci->registers);
3484
3485 case OHCI_INIT_HAVE_MEM_REGION:
3486#ifndef PCMCIA
3487 release_mem_region(pci_resource_start(ohci->dev, 0),
3488 OHCI1394_REGISTER_SIZE);
3489#endif
3490
3491#ifdef CONFIG_PPC_PMAC
3492 /* On UniNorth, power down the cable and turn off the chip
3493 * clock when the module is removed to save power on
3494 * laptops. Turning it back ON is done by the arch code when
3495 * pci_enable_device() is called */
3496 {
3497 struct device_node* of_node;
3498
3499 of_node = pci_device_to_OF_node(ohci->dev);
3500 if (of_node) {
3501 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3502 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3503 }
3504 }
3505#endif /* CONFIG_PPC_PMAC */
3506
3507 case OHCI_INIT_ALLOC_HOST:
3508 pci_set_drvdata(ohci->dev, NULL);
3509 }
3510
3511 if (dev)
3512 put_device(dev);
3513}
3514
3515
3516static int ohci1394_pci_resume (struct pci_dev *pdev)
3517{
3518#ifdef CONFIG_PMAC_PBOOK
3519 {
3520 struct device_node *of_node;
3521
3522 /* Re-enable 1394 */
3523 of_node = pci_device_to_OF_node (pdev);
3524 if (of_node)
3525 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3526 }
3527#endif
3528
3529 pci_enable_device(pdev);
3530
3531 return 0;
3532}
3533
3534
3535static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3536{
3537#ifdef CONFIG_PMAC_PBOOK
3538 {
3539 struct device_node *of_node;
3540
3541 /* Disable 1394 */
3542 of_node = pci_device_to_OF_node (pdev);
3543 if (of_node)
3544 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3545 }
3546#endif
3547
3548 return 0;
3549}
3550
3551
3552#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3553
3554static struct pci_device_id ohci1394_pci_tbl[] = {
3555 {
3556 .class = PCI_CLASS_FIREWIRE_OHCI,
3557 .class_mask = PCI_ANY_ID,
3558 .vendor = PCI_ANY_ID,
3559 .device = PCI_ANY_ID,
3560 .subvendor = PCI_ANY_ID,
3561 .subdevice = PCI_ANY_ID,
3562 },
3563 { 0, },
3564};
3565
3566MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3567
3568static struct pci_driver ohci1394_pci_driver = {
3569 .name = OHCI1394_DRIVER_NAME,
3570 .id_table = ohci1394_pci_tbl,
3571 .probe = ohci1394_pci_probe,
3572 .remove = ohci1394_pci_remove,
3573 .resume = ohci1394_pci_resume,
3574 .suspend = ohci1394_pci_suspend,
3575};
3576
3577
3578
3579/***********************************
3580 * OHCI1394 Video Interface *
3581 ***********************************/
3582
3583/* essentially the only purpose of this code is to allow another
3584 module to hook into ohci's interrupt handler */
3585
3586int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3587{
3588 int i=0;
3589
3590 /* stop the channel program if it's still running */
3591 reg_write(ohci, reg, 0x8000);
3592
3593 /* Wait until it effectively stops */
3594 while (reg_read(ohci, reg) & 0x400) {
3595 i++;
3596 if (i>5000) {
3597 PRINT(KERN_ERR,
3598 "Runaway loop while stopping context: %s...", msg ? msg : "");
3599 return 1;
3600 }
3601
3602 mb();
3603 udelay(10);
3604 }
3605 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3606 return 0;
3607}
3608
3609void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3610 void (*func)(unsigned long), unsigned long data)
3611{
3612 tasklet_init(&tasklet->tasklet, func, data);
3613 tasklet->type = type;
3614 /* We init the tasklet->link field, so we can list_del() it
3615 * without worrying whether it was added to the list or not. */
3616 INIT_LIST_HEAD(&tasklet->link);
3617}
3618
3619int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3620 struct ohci1394_iso_tasklet *tasklet)
3621{
3622 unsigned long flags, *usage;
3623 int n, i, r = -EBUSY;
3624
3625 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3626 n = ohci->nb_iso_xmit_ctx;
3627 usage = &ohci->it_ctx_usage;
3628 }
3629 else {
3630 n = ohci->nb_iso_rcv_ctx;
3631 usage = &ohci->ir_ctx_usage;
3632
3633 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3634 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3635 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3636 return r;
3637 }
3638 }
3639 }
3640
3641 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3642
3643 for (i = 0; i < n; i++)
3644 if (!test_and_set_bit(i, usage)) {
3645 tasklet->context = i;
3646 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3647 r = 0;
3648 break;
3649 }
3650
3651 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3652
3653 return r;
3654}
3655
3656void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3657 struct ohci1394_iso_tasklet *tasklet)
3658{
3659 unsigned long flags;
3660
3661 tasklet_kill(&tasklet->tasklet);
3662
3663 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3664
3665 if (tasklet->type == OHCI_ISO_TRANSMIT)
3666 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3667 else {
3668 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3669
3670 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3671 clear_bit(0, &ohci->ir_multichannel_used);
3672 }
3673 }
3674
3675 list_del(&tasklet->link);
3676
3677 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3678}
3679
3680EXPORT_SYMBOL(ohci1394_stop_context);
3681EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3682EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3683EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3684
3685
3686/***********************************
3687 * General module initialization *
3688 ***********************************/
3689
3690MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3691MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3692MODULE_LICENSE("GPL");
3693
3694static void __exit ohci1394_cleanup (void)
3695{
3696 pci_unregister_driver(&ohci1394_pci_driver);
3697}
3698
3699static int __init ohci1394_init(void)
3700{
3701 return pci_register_driver(&ohci1394_pci_driver);
3702}
3703
3704module_init(ohci1394_init);
3705module_exit(ohci1394_cleanup);
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
new file mode 100644
index 000000000000..d1758d409610
--- /dev/null
+++ b/drivers/ieee1394/ohci1394.h
@@ -0,0 +1,456 @@
1/*
2 * ohci1394.h - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef _OHCI1394_H
22#define _OHCI1394_H
23
24#include "ieee1394_types.h"
25#include <asm/io.h>
26
27#define OHCI1394_DRIVER_NAME "ohci1394"
28
29#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
30#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
31#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
32#define OHCI1394_MAX_SELF_ID_ERRORS 16
33
34#define AR_REQ_NUM_DESC 4 /* number of AR req descriptors */
35#define AR_REQ_BUF_SIZE PAGE_SIZE /* size of AR req buffers */
36#define AR_REQ_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
37
38#define AR_RESP_NUM_DESC 4 /* number of AR resp descriptors */
39#define AR_RESP_BUF_SIZE PAGE_SIZE /* size of AR resp buffers */
40#define AR_RESP_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
41
42#define IR_NUM_DESC 16 /* number of IR descriptors */
43#define IR_BUF_SIZE PAGE_SIZE /* 4096 bytes/buffer */
44#define IR_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
45
46#define IT_NUM_DESC 16 /* number of IT descriptors */
47
48#define AT_REQ_NUM_DESC 32 /* number of AT req descriptors */
49#define AT_RESP_NUM_DESC 32 /* number of AT resp descriptors */
50
51#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */
52
53#define OHCI_CONFIG_ROM_LEN 1024 /* Length of the mapped configrom space */
54
55#define OHCI1394_SI_DMA_BUF_SIZE 8192 /* length of the selfid buffer */
56
57/* PCI configuration space addresses */
58#define OHCI1394_PCI_HCI_Control 0x40
59
60struct dma_cmd {
61 u32 control;
62 u32 address;
63 u32 branchAddress;
64 u32 status;
65};
66
67/*
68 * FIXME:
69 * It is important that a single at_dma_prg does not cross a page boundary
70 * The proper way to do it would be to do the check dynamically as the
71 * programs are inserted into the AT fifo.
72 */
73struct at_dma_prg {
74 struct dma_cmd begin;
75 quadlet_t data[4];
76 struct dma_cmd end;
77 quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
78};
79
80/* identify whether a DMA context is asynchronous or isochronous */
81enum context_type { DMA_CTX_ASYNC_REQ, DMA_CTX_ASYNC_RESP, DMA_CTX_ISO };
82
83/* DMA receive context */
84struct dma_rcv_ctx {
85 struct ti_ohci *ohci;
86 enum context_type type;
87 int ctx;
88 unsigned int num_desc;
89
90 unsigned int buf_size;
91 unsigned int split_buf_size;
92
93 /* dma block descriptors */
94 struct dma_cmd **prg_cpu;
95 dma_addr_t *prg_bus;
96 struct pci_pool *prg_pool;
97
98 /* dma buffers */
99 quadlet_t **buf_cpu;
100 dma_addr_t *buf_bus;
101
102 unsigned int buf_ind;
103 unsigned int buf_offset;
104 quadlet_t *spb;
105 spinlock_t lock;
106 struct tasklet_struct task;
107 int ctrlClear;
108 int ctrlSet;
109 int cmdPtr;
110 int ctxtMatch;
111};
112
113/* DMA transmit context */
114struct dma_trm_ctx {
115 struct ti_ohci *ohci;
116 enum context_type type;
117 int ctx;
118 unsigned int num_desc;
119
120 /* dma block descriptors */
121 struct at_dma_prg **prg_cpu;
122 dma_addr_t *prg_bus;
123 struct pci_pool *prg_pool;
124
125 unsigned int prg_ind;
126 unsigned int sent_ind;
127 int free_prgs;
128 quadlet_t *branchAddrPtr;
129
130 /* list of packets inserted in the AT FIFO */
131 struct list_head fifo_list;
132
133 /* list of pending packets to be inserted in the AT FIFO */
134 struct list_head pending_list;
135
136 spinlock_t lock;
137 struct tasklet_struct task;
138 int ctrlClear;
139 int ctrlSet;
140 int cmdPtr;
141};
142
143struct ohci1394_iso_tasklet {
144 struct tasklet_struct tasklet;
145 struct list_head link;
146 int context;
147 enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE,
148 OHCI_ISO_MULTICHANNEL_RECEIVE } type;
149};
150
151struct ti_ohci {
152 struct pci_dev *dev;
153
154 enum {
155 OHCI_INIT_ALLOC_HOST,
156 OHCI_INIT_HAVE_MEM_REGION,
157 OHCI_INIT_HAVE_IOMAPPING,
158 OHCI_INIT_HAVE_CONFIG_ROM_BUFFER,
159 OHCI_INIT_HAVE_SELFID_BUFFER,
160 OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE,
161 OHCI_INIT_HAVE_IRQ,
162 OHCI_INIT_DONE,
163 } init_state;
164
165 /* remapped memory spaces */
166 void __iomem *registers;
167
168 /* dma buffer for self-id packets */
169 quadlet_t *selfid_buf_cpu;
170 dma_addr_t selfid_buf_bus;
171
172 /* buffer for csr config rom */
173 quadlet_t *csr_config_rom_cpu;
174 dma_addr_t csr_config_rom_bus;
175 int csr_config_rom_length;
176
177 unsigned int max_packet_size;
178
179 /* async receive */
180 struct dma_rcv_ctx ar_resp_context;
181 struct dma_rcv_ctx ar_req_context;
182
183 /* async transmit */
184 struct dma_trm_ctx at_resp_context;
185 struct dma_trm_ctx at_req_context;
186
187 /* iso receive */
188 int nb_iso_rcv_ctx;
189 unsigned long ir_ctx_usage; /* use test_and_set_bit() for atomicity */
190 unsigned long ir_multichannel_used; /* ditto */
191 spinlock_t IR_channel_lock;
192
193 /* iso receive (legacy API) */
194 u64 ir_legacy_channels; /* note: this differs from ISO_channel_usage;
195 it only accounts for channels listened to
196 by the legacy API, so that we can know when
197 it is safe to free the legacy API context */
198
199 struct dma_rcv_ctx ir_legacy_context;
200 struct ohci1394_iso_tasklet ir_legacy_tasklet;
201
202 /* iso transmit */
203 int nb_iso_xmit_ctx;
204 unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
205
206 /* iso transmit (legacy API) */
207 struct dma_trm_ctx it_legacy_context;
208 struct ohci1394_iso_tasklet it_legacy_tasklet;
209
210 u64 ISO_channel_usage;
211
212 /* IEEE-1394 part follows */
213 struct hpsb_host *host;
214
215 int phyid, isroot;
216
217 spinlock_t phy_reg_lock;
218 spinlock_t event_lock;
219
220 int self_id_errors;
221
222 /* Tasklets for iso receive and transmit, used by video1394,
223 * amdtp and dv1394 */
224
225 struct list_head iso_tasklet_list;
226 spinlock_t iso_tasklet_list_lock;
227
228 /* Swap the selfid buffer? */
229 unsigned int selfid_swap:1;
230 /* Some Apple chipset seem to swap incoming headers for us */
231 unsigned int no_swap_incoming:1;
232
233 /* Force extra paranoia checking on bus-reset handling */
234 unsigned int check_busreset:1;
235};
236
237static inline int cross_bound(unsigned long addr, unsigned int size)
238{
239 if (size > PAGE_SIZE)
240 return 1;
241
242 if (addr >> PAGE_SHIFT != (addr + size - 1) >> PAGE_SHIFT)
243 return 1;
244
245 return 0;
246}
247
248/*
249 * Register read and write helper functions.
250 */
251static inline void reg_write(const struct ti_ohci *ohci, int offset, u32 data)
252{
253 writel(data, ohci->registers + offset);
254}
255
256static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
257{
258 return readl(ohci->registers + offset);
259}
260
261
262/* 2 KiloBytes of register space */
263#define OHCI1394_REGISTER_SIZE 0x800
264
265/* Offsets relative to context bases defined below */
266
267#define OHCI1394_ContextControlSet 0x000
268#define OHCI1394_ContextControlClear 0x004
269#define OHCI1394_ContextCommandPtr 0x00C
270
271/* register map */
272#define OHCI1394_Version 0x000
273#define OHCI1394_GUID_ROM 0x004
274#define OHCI1394_ATRetries 0x008
275#define OHCI1394_CSRData 0x00C
276#define OHCI1394_CSRCompareData 0x010
277#define OHCI1394_CSRControl 0x014
278#define OHCI1394_ConfigROMhdr 0x018
279#define OHCI1394_BusID 0x01C
280#define OHCI1394_BusOptions 0x020
281#define OHCI1394_GUIDHi 0x024
282#define OHCI1394_GUIDLo 0x028
283#define OHCI1394_ConfigROMmap 0x034
284#define OHCI1394_PostedWriteAddressLo 0x038
285#define OHCI1394_PostedWriteAddressHi 0x03C
286#define OHCI1394_VendorID 0x040
287#define OHCI1394_HCControlSet 0x050
288#define OHCI1394_HCControlClear 0x054
289#define OHCI1394_HCControl_noByteSwap 0x40000000
290#define OHCI1394_HCControl_programPhyEnable 0x00800000
291#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
292#define OHCI1394_HCControl_LPS 0x00080000
293#define OHCI1394_HCControl_postedWriteEnable 0x00040000
294#define OHCI1394_HCControl_linkEnable 0x00020000
295#define OHCI1394_HCControl_softReset 0x00010000
296#define OHCI1394_SelfIDBuffer 0x064
297#define OHCI1394_SelfIDCount 0x068
298#define OHCI1394_IRMultiChanMaskHiSet 0x070
299#define OHCI1394_IRMultiChanMaskHiClear 0x074
300#define OHCI1394_IRMultiChanMaskLoSet 0x078
301#define OHCI1394_IRMultiChanMaskLoClear 0x07C
302#define OHCI1394_IntEventSet 0x080
303#define OHCI1394_IntEventClear 0x084
304#define OHCI1394_IntMaskSet 0x088
305#define OHCI1394_IntMaskClear 0x08C
306#define OHCI1394_IsoXmitIntEventSet 0x090
307#define OHCI1394_IsoXmitIntEventClear 0x094
308#define OHCI1394_IsoXmitIntMaskSet 0x098
309#define OHCI1394_IsoXmitIntMaskClear 0x09C
310#define OHCI1394_IsoRecvIntEventSet 0x0A0
311#define OHCI1394_IsoRecvIntEventClear 0x0A4
312#define OHCI1394_IsoRecvIntMaskSet 0x0A8
313#define OHCI1394_IsoRecvIntMaskClear 0x0AC
314#define OHCI1394_InitialBandwidthAvailable 0x0B0
315#define OHCI1394_InitialChannelsAvailableHi 0x0B4
316#define OHCI1394_InitialChannelsAvailableLo 0x0B8
317#define OHCI1394_FairnessControl 0x0DC
318#define OHCI1394_LinkControlSet 0x0E0
319#define OHCI1394_LinkControlClear 0x0E4
320#define OHCI1394_LinkControl_RcvSelfID 0x00000200
321#define OHCI1394_LinkControl_RcvPhyPkt 0x00000400
322#define OHCI1394_LinkControl_CycleTimerEnable 0x00100000
323#define OHCI1394_LinkControl_CycleMaster 0x00200000
324#define OHCI1394_LinkControl_CycleSource 0x00400000
325#define OHCI1394_NodeID 0x0E8
326#define OHCI1394_PhyControl 0x0EC
327#define OHCI1394_IsochronousCycleTimer 0x0F0
328#define OHCI1394_AsReqFilterHiSet 0x100
329#define OHCI1394_AsReqFilterHiClear 0x104
330#define OHCI1394_AsReqFilterLoSet 0x108
331#define OHCI1394_AsReqFilterLoClear 0x10C
332#define OHCI1394_PhyReqFilterHiSet 0x110
333#define OHCI1394_PhyReqFilterHiClear 0x114
334#define OHCI1394_PhyReqFilterLoSet 0x118
335#define OHCI1394_PhyReqFilterLoClear 0x11C
336#define OHCI1394_PhyUpperBound 0x120
337
338#define OHCI1394_AsReqTrContextBase 0x180
339#define OHCI1394_AsReqTrContextControlSet 0x180
340#define OHCI1394_AsReqTrContextControlClear 0x184
341#define OHCI1394_AsReqTrCommandPtr 0x18C
342
343#define OHCI1394_AsRspTrContextBase 0x1A0
344#define OHCI1394_AsRspTrContextControlSet 0x1A0
345#define OHCI1394_AsRspTrContextControlClear 0x1A4
346#define OHCI1394_AsRspTrCommandPtr 0x1AC
347
348#define OHCI1394_AsReqRcvContextBase 0x1C0
349#define OHCI1394_AsReqRcvContextControlSet 0x1C0
350#define OHCI1394_AsReqRcvContextControlClear 0x1C4
351#define OHCI1394_AsReqRcvCommandPtr 0x1CC
352
353#define OHCI1394_AsRspRcvContextBase 0x1E0
354#define OHCI1394_AsRspRcvContextControlSet 0x1E0
355#define OHCI1394_AsRspRcvContextControlClear 0x1E4
356#define OHCI1394_AsRspRcvCommandPtr 0x1EC
357
358/* Isochronous transmit registers */
359/* Add (16 * n) for context n */
360#define OHCI1394_IsoXmitContextBase 0x200
361#define OHCI1394_IsoXmitContextControlSet 0x200
362#define OHCI1394_IsoXmitContextControlClear 0x204
363#define OHCI1394_IsoXmitCommandPtr 0x20C
364
365/* Isochronous receive registers */
366/* Add (32 * n) for context n */
367#define OHCI1394_IsoRcvContextBase 0x400
368#define OHCI1394_IsoRcvContextControlSet 0x400
369#define OHCI1394_IsoRcvContextControlClear 0x404
370#define OHCI1394_IsoRcvCommandPtr 0x40C
371#define OHCI1394_IsoRcvContextMatch 0x410
372
373/* Interrupts Mask/Events */
374
375#define OHCI1394_reqTxComplete 0x00000001
376#define OHCI1394_respTxComplete 0x00000002
377#define OHCI1394_ARRQ 0x00000004
378#define OHCI1394_ARRS 0x00000008
379#define OHCI1394_RQPkt 0x00000010
380#define OHCI1394_RSPkt 0x00000020
381#define OHCI1394_isochTx 0x00000040
382#define OHCI1394_isochRx 0x00000080
383#define OHCI1394_postedWriteErr 0x00000100
384#define OHCI1394_lockRespErr 0x00000200
385#define OHCI1394_selfIDComplete 0x00010000
386#define OHCI1394_busReset 0x00020000
387#define OHCI1394_phy 0x00080000
388#define OHCI1394_cycleSynch 0x00100000
389#define OHCI1394_cycle64Seconds 0x00200000
390#define OHCI1394_cycleLost 0x00400000
391#define OHCI1394_cycleInconsistent 0x00800000
392#define OHCI1394_unrecoverableError 0x01000000
393#define OHCI1394_cycleTooLong 0x02000000
394#define OHCI1394_phyRegRcvd 0x04000000
395#define OHCI1394_masterIntEnable 0x80000000
396
397/* DMA Control flags */
398#define DMA_CTL_OUTPUT_MORE 0x00000000
399#define DMA_CTL_OUTPUT_LAST 0x10000000
400#define DMA_CTL_INPUT_MORE 0x20000000
401#define DMA_CTL_INPUT_LAST 0x30000000
402#define DMA_CTL_UPDATE 0x08000000
403#define DMA_CTL_IMMEDIATE 0x02000000
404#define DMA_CTL_IRQ 0x00300000
405#define DMA_CTL_BRANCH 0x000c0000
406#define DMA_CTL_WAIT 0x00030000
407
408/* OHCI evt_* error types, table 3-2 of the OHCI 1.1 spec. */
409#define EVT_NO_STATUS 0x0 /* No event status */
410#define EVT_RESERVED_A 0x1 /* Reserved, not used !!! */
411#define EVT_LONG_PACKET 0x2 /* The revc data was longer than the buf */
412#define EVT_MISSING_ACK 0x3 /* A subaction gap was detected before an ack
413 arrived, or recv'd ack had a parity error */
414#define EVT_UNDERRUN 0x4 /* Underrun on corresponding FIFO, packet
415 truncated */
416#define EVT_OVERRUN 0x5 /* A recv FIFO overflowed on reception of ISO
417 packet */
418#define EVT_DESCRIPTOR_READ 0x6 /* An unrecoverable error occurred while host was
419 reading a descriptor block */
420#define EVT_DATA_READ 0x7 /* An error occurred while host controller was
421 attempting to read from host memory in the data
422 stage of descriptor processing */
423#define EVT_DATA_WRITE 0x8 /* An error occurred while host controller was
424 attempting to write either during the data stage
425 of descriptor processing, or when processing a single
426 16-bit host memory write */
427#define EVT_BUS_RESET 0x9 /* Identifies a PHY packet in the recv buffer as
428 being a synthesized bus reset packet */
429#define EVT_TIMEOUT 0xa /* Indicates that the asynchronous transmit response
430 packet expired and was not transmitted, or that an
431 IT DMA context experienced a skip processing overflow */
432#define EVT_TCODE_ERR 0xb /* A bad tCode is associated with this packet.
433 The packet was flushed */
434#define EVT_RESERVED_B 0xc /* Reserved, not used !!! */
435#define EVT_RESERVED_C 0xd /* Reserved, not used !!! */
436#define EVT_UNKNOWN 0xe /* An error condition has occurred that cannot be
437 represented by any other event codes defined herein. */
438#define EVT_FLUSHED 0xf /* Send by the link side of output FIFO when asynchronous
439 packets are being flushed due to a bus reset. */
440
441#define OHCI1394_TCODE_PHY 0xE
442
443void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
444 int type,
445 void (*func)(unsigned long),
446 unsigned long data);
447int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
448 struct ohci1394_iso_tasklet *tasklet);
449void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
450 struct ohci1394_iso_tasklet *tasklet);
451
452/* returns zero if successful, one if DMA context is locked up */
453int ohci1394_stop_context (struct ti_ohci *ohci, int reg, char *msg);
454struct ti_ohci *ohci1394_get_struct(int card_num);
455
456#endif
diff --git a/drivers/ieee1394/oui.db b/drivers/ieee1394/oui.db
new file mode 100644
index 000000000000..592c8a60d01e
--- /dev/null
+++ b/drivers/ieee1394/oui.db
@@ -0,0 +1,7048 @@
1000000 XEROX CORPORATION
2000001 XEROX CORPORATION
3000002 XEROX CORPORATION
4000003 XEROX CORPORATION
5000004 XEROX CORPORATION
6000005 XEROX CORPORATION
7000006 XEROX CORPORATION
8000007 XEROX CORPORATION
9000008 XEROX CORPORATION
10000009 XEROX CORPORATION
1100000A OMRON TATEISI ELECTRONICS CO.
1200000B MATRIX CORPORATION
1300000C CISCO SYSTEMS, INC.
1400000D FIBRONICS LTD.
1500000E FUJITSU LIMITED
1600000F NEXT, INC.
17000010 SYTEK INC.
18000011 NORMEREL SYSTEMES
19000012 INFORMATION TECHNOLOGY LIMITED
20000013 CAMEX
21000014 NETRONIX
22000015 DATAPOINT CORPORATION
23000016 DU PONT PIXEL SYSTEMS .
24000017 TEKELEC
25000018 WEBSTER COMPUTER CORPORATION
26000019 APPLIED DYNAMICS INTERNATIONAL
2700001A ADVANCED MICRO DEVICES
2800001B NOVELL INC.
2900001C BELL TECHNOLOGIES
3000001D CABLETRON SYSTEMS, INC.
3100001E TELSIST INDUSTRIA ELECTRONICA
3200001F Telco Systems, Inc.
33000020 DATAINDUSTRIER DIAB AB
34000021 SUREMAN COMP. & COMMUN. CORP.
35000022 VISUAL TECHNOLOGY INC.
36000023 ABB INDUSTRIAL SYSTEMS AB
37000024 CONNECT AS
38000025 RAMTEK CORP.
39000026 SHA-KEN CO., LTD.
40000027 JAPAN RADIO COMPANY
41000028 PRODIGY SYSTEMS CORPORATION
42000029 IMC NETWORKS CORP.
4300002A TRW - SEDD/INP
4400002B CRISP AUTOMATION, INC
4500002C AUTOTOTE LIMITED
4600002D CHROMATICS INC
4700002E SOCIETE EVIRA
4800002F TIMEPLEX INC.
49000030 VG LABORATORY SYSTEMS LTD
50000031 QPSX COMMUNICATIONS PTY LTD
51000032 Marconi plc
52000033 EGAN MACHINERY COMPANY
53000034 NETWORK RESOURCES CORPORATION
54000035 SPECTRAGRAPHICS CORPORATION
55000036 ATARI CORPORATION
56000037 OXFORD METRICS LIMITED
57000038 CSS LABS
58000039 TOSHIBA CORPORATION
5900003A CHYRON CORPORATION
6000003B i Controls, Inc.
6100003C AUSPEX SYSTEMS INC.
6200003D UNISYS
6300003E SIMPACT
6400003F SYNTREX, INC.
65000040 APPLICON, INC.
66000041 ICE CORPORATION
67000042 METIER MANAGEMENT SYSTEMS LTD.
68000043 MICRO TECHNOLOGY
69000044 CASTELLE CORPORATION
70000045 FORD AEROSPACE & COMM. CORP.
71000046 OLIVETTI NORTH AMERICA
72000047 NICOLET INSTRUMENTS CORP.
73000048 SEIKO EPSON CORPORATION
74000049 APRICOT COMPUTERS, LTD
7500004A ADC CODENOLL TECHNOLOGY CORP.
7600004B ICL DATA OY
7700004C NEC CORPORATION
7800004D DCI CORPORATION
7900004E AMPEX CORPORATION
8000004F LOGICRAFT, INC.
81000050 RADISYS CORPORATION
82000051 HOB ELECTRONIC GMBH & CO. KG
83000052 Intrusion.com, Inc.
84000053 COMPUCORP
85000054 MODICON, INC.
86000055 COMMISSARIAT A L`ENERGIE ATOM.
87000056 DR. B. STRUCK
88000057 SCITEX CORPORATION LTD.
89000058 RACORE COMPUTER PRODUCTS INC.
90000059 HELLIGE GMBH
9100005A SysKonnect GmbH
9200005B ELTEC ELEKTRONIK AG
9300005C TELEMATICS INTERNATIONAL INC.
9400005D CS TELECOM
9500005E USC INFORMATION SCIENCES INST
9600005F SUMITOMO ELECTRIC IND., LTD.
97000060 KONTRON ELEKTRONIK GMBH
98000061 GATEWAY COMMUNICATIONS
99000062 BULL HN INFORMATION SYSTEMS
100000063 DR.ING.SEUFERT GMBH
101000064 YOKOGAWA DIGITAL COMPUTER CORP
102000065 NETWORK ASSOCIATES, INC.
103000066 TALARIS SYSTEMS, INC.
104000067 SOFT * RITE, INC.
105000068 ROSEMOUNT CONTROLS
106000069 CONCORD COMMUNICATIONS INC
10700006A COMPUTER CONSOLES INC.
10800006B SILICON GRAPHICS INC./MIPS
10900006D CRAY COMMUNICATIONS, LTD.
11000006E ARTISOFT, INC.
11100006F Madge Ltd.
112000070 HCL LIMITED
113000071 ADRA SYSTEMS INC.
114000072 MINIWARE TECHNOLOGY
115000073 SIECOR CORPORATION
116000074 RICOH COMPANY LTD.
117000075 Nortel Networks
118000076 ABEKAS VIDEO SYSTEM
119000077 INTERPHASE CORPORATION
120000078 LABTAM LIMITED
121000079 NETWORTH INCORPORATED
12200007A DANA COMPUTER INC.
12300007B RESEARCH MACHINES
12400007C AMPERE INCORPORATED
12500007D SUN MICROSYSTEMS, INC.
12600007E CLUSTRIX CORPORATION
12700007F LINOTYPE-HELL AG
128000080 CRAY COMMUNICATIONS A/S
129000081 BAY NETWORKS
130000082 LECTRA SYSTEMES SA
131000083 TADPOLE TECHNOLOGY PLC
132000084 SUPERNET
133000085 CANON INC.
134000086 MEGAHERTZ CORPORATION
135000087 HITACHI, LTD.
136000088 COMPUTER NETWORK TECH. CORP.
137000089 CAYMAN SYSTEMS INC.
13800008A DATAHOUSE INFORMATION SYSTEMS
13900008B INFOTRON
14000008C Alloy Computer Products (Australia) Pty Ltd
14100008D VERDIX CORPORATION
14200008E SOLBOURNE COMPUTER, INC.
14300008F RAYTHEON COMPANY
144000090 MICROCOM
145000091 ANRITSU CORPORATION
146000092 COGENT DATA TECHNOLOGIES
147000093 PROTEON INC.
148000094 ASANTE TECHNOLOGIES
149000095 SONY TEKTRONIX CORP.
150000096 MARCONI ELECTRONICS LTD.
151000097 EPOCH SYSTEMS
152000098 CROSSCOMM CORPORATION
153000099 MTX, INC.
15400009A RC COMPUTER A/S
15500009B INFORMATION INTERNATIONAL, INC
15600009C ROLM MIL-SPEC COMPUTERS
15700009D LOCUS COMPUTING CORPORATION
15800009E MARLI S.A.
15900009F AMERISTAR TECHNOLOGIES INC.
1600000A0 TOKYO SANYO ELECTRIC CO. LTD.
1610000A1 MARQUETTE ELECTRIC CO.
1620000A2 BAY NETWORKS
1630000A3 NETWORK APPLICATION TECHNOLOGY
1640000A4 ACORN COMPUTERS LIMITED
1650000A5 COMPATIBLE SYSTEMS CORP.
1660000A6 NETWORK GENERAL CORPORATION
1670000A7 NETWORK COMPUTING DEVICES INC.
1680000A8 STRATUS COMPUTER INC.
1690000A9 NETWORK SYSTEMS CORP.
1700000AA XEROX CORPORATION
1710000AB LOGIC MODELING CORPORATION
1720000AC CONWARE COMPUTER CONSULTING
1730000AD BRUKER INSTRUMENTS INC.
1740000AE DASSAULT ELECTRONIQUE
1750000AF NUCLEAR DATA INSTRUMENTATION
1760000B0 RND-RAD NETWORK DEVICES
1770000B1 ALPHA MICROSYSTEMS INC.
1780000B2 TELEVIDEO SYSTEMS, INC.
1790000B3 CIMLINC INCORPORATED
1800000B4 EDIMAX COMPUTER COMPANY
1810000B5 DATABILITY SOFTWARE SYS. INC.
1820000B6 MICRO-MATIC RESEARCH
1830000B7 DOVE COMPUTER CORPORATION
1840000B8 SEIKOSHA CO., LTD.
1850000B9 MCDONNELL DOUGLAS COMPUTER SYS
1860000BA SIIG, INC.
1870000BB TRI-DATA
1880000BC ALLEN-BRADLEY CO. INC.
1890000BD MITSUBISHI CABLE COMPANY
1900000BE THE NTI GROUP
1910000BF SYMMETRIC COMPUTER SYSTEMS
1920000C0 WESTERN DIGITAL CORPORATION
1930000C1 Madge Ltd.
1940000C2 INFORMATION PRESENTATION TECH.
1950000C3 HARRIS CORP COMPUTER SYS DIV
1960000C4 WATERS DIV. OF MILLIPORE
1970000C5 FARALLON COMPUTING/NETOPIA
1980000C6 EON SYSTEMS
1990000C7 ARIX CORPORATION
2000000C8 ALTOS COMPUTER SYSTEMS
2010000C9 EMULEX CORPORATION
2020000CA APPLITEK
2030000CB COMPU-SHACK ELECTRONIC GMBH
2040000CC DENSAN CO., LTD.
2050000CD Allied Telesyn Research Ltd.
2060000CE MEGADATA CORP.
2070000CF HAYES MICROCOMPUTER PRODUCTS
2080000D0 DEVELCON ELECTRONICS LTD.
2090000D1 ADAPTEC INCORPORATED
2100000D2 SBE, INC.
2110000D3 WANG LABORATORIES INC.
2120000D4 PURE DATA LTD.
2130000D5 MICROGNOSIS INTERNATIONAL
2140000D6 PUNCH LINE HOLDING
2150000D7 DARTMOUTH COLLEGE
2160000D8 NOVELL, INC.
2170000D9 NIPPON TELEGRAPH & TELEPHONE
2180000DA ATEX
2190000DB BRITISH TELECOMMUNICATIONS PLC
2200000DC HAYES MICROCOMPUTER PRODUCTS
2210000DD TCL INCORPORATED
2220000DE CETIA
2230000DF BELL & HOWELL PUB SYS DIV
2240000E0 QUADRAM CORP.
2250000E1 GRID SYSTEMS
2260000E2 ACER TECHNOLOGIES CORP.
2270000E3 INTEGRATED MICRO PRODUCTS LTD
2280000E4 IN2 GROUPE INTERTECHNIQUE
2290000E5 SIGMEX LTD.
2300000E6 APTOR PRODUITS DE COMM INDUST
2310000E7 STAR GATE TECHNOLOGIES
2320000E8 ACCTON TECHNOLOGY CORP.
2330000E9 ISICAD, INC.
2340000EA UPNOD AB
2350000EB MATSUSHITA COMM. IND. CO. LTD.
2360000EC MICROPROCESS
2370000ED APRIL
2380000EE NETWORK DESIGNERS, LTD.
2390000EF KTI
2400000F0 SAMSUNG ELECTRONICS CO., LTD.
2410000F1 MAGNA COMPUTER CORPORATION
2420000F2 SPIDER COMMUNICATIONS
2430000F3 GANDALF DATA LIMITED
2440000F4 ALLIED TELESYN INTERNATIONAL
2450000F5 DIAMOND SALES LIMITED
2460000F6 APPLIED MICROSYSTEMS CORP.
2470000F7 YOUTH KEEP ENTERPRISE CO LTD
2480000F8 DIGITAL EQUIPMENT CORPORATION
2490000F9 QUOTRON SYSTEMS INC.
2500000FA MICROSAGE COMPUTER SYSTEMS INC
2510000FB RECHNER ZUR KOMMUNIKATION
2520000FC MEIKO
2530000FD HIGH LEVEL HARDWARE
2540000FE ANNAPOLIS MICRO SYSTEMS
2550000FF CAMTEC ELECTRONICS LTD.
256000100 EQUIP'TRANS
257000102 3COM CORPORATION
258000103 3COM CORPORATION
259000104 DVICO Co., Ltd.
260000105 BECKHOFF GmbH
261000106 Tews Datentechnik GmbH
262000107 Leiser GmbH
263000108 AVLAB Technology, Inc.
264000109 Nagano Japan Radio Co., Ltd.
26500010A CIS TECHNOLOGY INC.
26600010B Space CyberLink, Inc.
26700010C System Talks Inc.
26800010D CORECO, INC.
26900010E Bri-Link Technologies Co., Ltd
27000010F Nishan Systems, Inc.
271000110 Gotham Networks
272000111 iDigm Inc.
273000112 Shark Multimedia Inc.
274000113 OLYMPUS CORPORATION
275000114 KANDA TSUSHIN KOGYO CO., LTD.
276000115 EXTRATECH CORPORATION
277000116 Netspect Technologies, Inc.
278000117 CANAL +
279000118 EZ Digital Co., Ltd.
280000119 Action Controls Pty. Ltd.
28100011A EEH DataLink GmbH
28200011B Unizone Technologies, Inc.
28300011C Universal Talkware Corporation
28400011D Centillium Communications
28500011E Precidia Technologies, Inc.
28600011F RC Networks, Inc.
287000120 OSCILLOQUARTZ S.A.
288000121 RapidStream Inc.
289000122 Trend Communications, Ltd.
290000123 DIGITAL ELECTRONICS CORP.
291000124 Acer Incorporated
292000125 YAESU MUSEN CO., LTD.
293000126 PAC Labs
294000127 The OPEN Group Limited
295000128 EnjoyWeb, Inc.
296000129 DFI Inc.
29700012A Telematica Sistems Inteligente
29800012B TELENET Co., Ltd.
29900012C Aravox Technologies, Inc.
30000012D Komodo Technology
30100012E PC Partner Ltd.
30200012F Twinhead International Corp
303000130 Extreme Networks
304000131 Detection Systems, Inc.
305000132 Dranetz - BMI
306000133 KYOWA Electronic Instruments C
307000134 SIG Positec Systems AG
308000135 KDC Corp.
309000136 CyberTAN Technology, Inc.
310000137 IT Farm Corporation
311000138 XAVi Technologies Corp.
312000139 Point Multimedia Systems
31300013A SHELCAD COMMUNICATIONS, LTD.
31400013B BNA SYSTEMS
31500013C TIW SYSTEMS
31600013D RiscStation Ltd.
31700013E Ascom Tateco AB
31800013F Neighbor World Co., Ltd.
319000140 Sendtek Corporation
320000141 CABLE PRINT
321000142 Cisco Systems, Inc.
322000143 Cisco Systems, Inc.
323000144 Cereva Networks, Inc.
324000145 WINSYSTEMS, INC.
325000146 Tesco Controls, Inc.
326000147 Zhone Technologies
327000148 X-traWeb Inc.
328000149 T.D.T. Transfer Data Test GmbH
32900014A SONY COMPUTER SCIENCE LABS., I
33000014B Ennovate Networks, Inc.
33100014C Berkeley Process Control
33200014D Shin Kin Enterprises Co., Ltd
33300014E WIN Enterprises, Inc.
33400014F LUMINOUS Networks, Inc.
335000150 GILAT COMMUNICATIONS, LTD.
336000151 Ensemble Communications
337000152 CHROMATEK INC.
338000153 ARCHTEK TELECOM CORPORATION
339000154 G3M Corporation
340000155 Promise Technology, Inc.
341000156 FIREWIREDIRECT.COM, INC.
342000157 SYSWAVE CO., LTD
343000158 Electro Industries/Gauge Tech
344000159 S1 Corporation
34500015A Digital Video Broadcasting
34600015B ITALTEL S.p.A/RF-UP-I
34700015C CADANT INC.
34800015D Sun Microsystems, Inc
34900015E BEST TECHNOLOGY CO., LTD.
35000015F DIGITAL DESIGN GmbH
351000160 ELMEX Co., LTD.
352000161 Meta Machine Technology
353000162 Cygnet Technologies, Inc.
354000163 Cisco Systems, Inc.
355000164 Cisco Systems, Inc.
356000165 AirSwitch Corporation
357000166 TC GROUP A/S
358000167 HIOKI E.E. CORPORATION
359000168 VITANA CORPORATION
360000169 Celestix Networks Pte Ltd.
36100016A ALITEC
36200016B LightChip, Inc.
36300016C FOXCONN
36400016D CarrierComm Inc.
36500016E Conklin Corporation
36600016F HAITAI ELECTRONICS CO., LTD.
367000170 ESE Embedded System Engineer'g
368000171 Allied Data Technologies
369000172 TechnoLand Co., LTD.
370000173 JNI Corporation
371000174 CyberOptics Corporation
372000175 Radiant Communications Corp.
373000176 Orient Silver Enterprises
374000177 EDSL
375000178 MARGI Systems, Inc.
376000179 WIRELESS TECHNOLOGY, INC.
37700017A Chengdu Maipu Electric Industrial Co., Ltd.
37800017B Heidelberger Druckmaschinen AG
37900017C AG-E GmbH
38000017D ThermoQuest
38100017E ADTEK System Science Co., Ltd.
38200017F Experience Music Project
383000180 AOpen, Inc.
384000181 Nortel Networks
385000182 DICA TECHNOLOGIES AG
386000183 ANITE TELECOMS
387000184 SIEB & MEYER AG
388000185 Aloka Co., Ltd.
389000186 DISCH GmbH
390000187 i2SE GmbH
391000188 LXCO Technologies ag
392000189 Refraction Technology, Inc.
39300018A ROI COMPUTER AG
39400018B NetLinks Co., Ltd.
39500018C Mega Vision
39600018D AudeSi Technologies
39700018E Logitec Corporation
39800018F Kenetec, Inc.
399000190 SMK-M
400000191 SYRED Data Systems
401000192 Texas Digital Systems
402000193 Hanbyul Telecom Co., Ltd.
403000194 Capital Equipment Corporation
404000195 Sena Technologies, Inc.
405000196 Cisco Systems, Inc.
406000197 Cisco Systems, Inc.
407000198 Darim Vision
408000199 HeiSei Electronics
40900019A LEUNIG GmbH
41000019B Kyoto Microcomputer Co., Ltd.
41100019C JDS Uniphase Inc.
41200019D E-Control Systems, Inc.
41300019E ESS Technology, Inc.
41400019F Phonex Broadband
4150001A0 Infinilink Corporation
4160001A1 Mag-Tek, Inc.
4170001A2 Logical Co., Ltd.
4180001A3 GENESYS LOGIC, INC.
4190001A4 Microlink Corporation
4200001A5 Nextcomm, Inc.
4210001A6 Scientific-Atlanta Arcodan A/S
4220001A7 UNEX TECHNOLOGY CORPORATION
4230001A8 Welltech Computer Co., Ltd.
4240001A9 BMW AG
4250001AA Airspan Communications, Ltd.
4260001AB Main Street Networks
4270001AC Sitara Networks, Inc.
4280001AD Coach Master International d.b.a. CMI Worldwide, Inc.
4290001AE Trex Enterprises
4300001AF Motorola Computer Group
4310001B0 Fulltek Technology Co., Ltd.
4320001B1 General Bandwidth
4330001B2 Digital Processing Systems, Inc.
4340001B3 Precision Electronic Manufacturing
4350001B4 Wayport, Inc.
4360001B5 Turin Networks, Inc.
4370001B6 SAEJIN T&M Co., Ltd.
4380001B7 Centos, Inc.
4390001B8 Netsensity, Inc.
4400001B9 SKF Condition Monitoring
4410001BA IC-Net, Inc.
4420001BB Frequentis
4430001BC Brains Corporation
4440001BD Peterson Electro-Musical Products, Inc.
4450001BE Gigalink Co., Ltd.
4460001BF Teleforce Co., Ltd.
4470001C0 CompuLab, Ltd.
4480001C1 Vitesse Semiconductor Corporation
4490001C2 ARK Research Corp.
4500001C3 Acromag, Inc.
4510001C4 NeoWave, Inc.
4520001C5 Simpler Networks
4530001C6 Quarry Technologies
4540001C7 Cisco Systems, Inc.
4550001C8 THOMAS CONRAD CORP.
4560001C8 CONRAD CORP.
4570001C9 Cisco Systems, Inc.
4580001CA Geocast Network Systems, Inc.
4590001CB NetGame, Ltd.
4600001CC Japan Total Design Communication Co., Ltd.
4610001CD ARtem
4620001CE Custom Micro Products, Ltd.
4630001CF Alpha Data Parallel Systems, Ltd.
4640001D0 VitalPoint, Inc.
4650001D1 CoNet Communications, Inc.
4660001D2 MacPower Peripherals, Ltd.
4670001D3 PAXCOMM, Inc.
4680001D4 Leisure Time, Inc.
4690001D5 HAEDONG INFO & COMM CO., LTD
4700001D6 MAN Roland Druckmaschinen AG
4710001D7 F5 Networks, Inc.
4720001D8 Teltronics, Inc.
4730001D9 Sigma, Inc.
4740001DA WINCOMM Corporation
4750001DB Freecom Technologies GmbH
4760001DC Activetelco
4770001DD Avail Networks
4780001DE Trango Systems, Inc.
4790001DF ISDN Communications, Ltd.
4800001E0 Fast Systems, Inc.
4810001E1 Kinpo Electronics, Inc.
4820001E2 Ando Electric Corporation
4830001E3 Siemens AG
4840001E4 Sitera, Inc.
4850001E5 Supernet, Inc.
4860001E6 Hewlett-Packard Company
4870001E7 Hewlett-Packard Company
4880001E8 Force10 Networks, Inc.
4890001E9 Litton Marine Systems B.V.
4900001EA Cirilium Corp.
4910001EB C-COM Corporation
4920001EC Ericsson Group
4930001ED SETA Corp.
4940001EE Comtrol Europe, Ltd.
4950001EF Camtel Technology Corp.
4960001F0 Tridium, Inc.
4970001F1 Innovative Concepts, Inc.
4980001F2 Mark of the Unicorn, Inc.
4990001F3 QPS, Inc.
5000001F4 Enterasys Networks
5010001F5 ERIM S.A.
5020001F6 Association of Musical Electronics Industry
5030001F7 Image Display Systems, Inc.
5040001F8 Adherent Systems, Ltd.
5050001F9 TeraGlobal Communications Corp.
5060001FA HOROSCAS
5070001FB DoTop Technology, Inc.
5080001FC Keyence Corporation
5090001FD Digital Voice Systems, Inc.
5100001FE DIGITAL EQUIPMENT CORPORATION
5110001FF Data Direct Networks, Inc.
512000200 Net & Sys Co., Ltd.
513000201 IFM Electronic gmbh
514000202 Amino Communications, Ltd.
515000203 Woonsang Telecom, Inc.
516000204 Bodmann Industries Elektronik GmbH
517000205 Hitachi Denshi, Ltd.
518000206 Telital R&D Denmark A/S
519000207 VisionGlobal Network Corp.
520000208 Unify Networks, Inc.
521000209 Shenzhen SED Information Technology Co., Ltd.
52200020A Gefran Spa
52300020B Native Networks, Inc.
52400020C Metro-Optix
52500020D Micronpc.com
52600020E Laurel Networks, Inc.
52700020F AATR
528000210 Fenecom
529000211 Nature Worldwide Technology Corp.
530000212 SierraCom
531000213 S.D.E.L.
532000214 DTVRO
533000215 Cotas Computer Technology A/B
534000216 Cisco Systems, Inc.
535000217 Cisco Systems, Inc.
536000218 Advanced Scientific Corp
537000219 Paralon Technologies
53800021A Zuma Networks
53900021B Kollmorgen-Servotronix
54000021C Network Elements, Inc.
54100021D Data General Communication Ltd.
54200021E SIMTEL S.R.L.
54300021F Aculab PLC
544000220 Canon Aptex, Inc.
545000221 DSP Application, Ltd.
546000222 Chromisys, Inc.
547000223 ClickTV
548000224 Lantern Communications, Inc.
549000225 Certus Technology, Inc.
550000226 XESystems, Inc.
551000227 ESD GmbH
552000228 Necsom, Ltd.
553000229 Adtec Corporation
55400022A Asound Electronic
55500022B Tamura Electric Works, Ltd.
55600022C ABB Bomem, Inc.
55700022D Agere Systems
55800022E TEAC Corp. R& D
55900022F P-Cube, Ltd.
560000230 Intersoft Electronics
561000231 Ingersoll-Rand
562000232 Avision, Inc.
563000233 Mantra Communications, Inc.
564000234 Imperial Technology, Inc.
565000235 Paragon Networks International
566000236 INIT GmbH
567000237 Cosmo Research Corp.
568000238 Serome Technology, Inc.
569000239 Visicom
57000023A ZSK Stickmaschinen GmbH
57100023B Redback Networks
57200023C Creative Technology, Ltd.
57300023D NuSpeed, Inc.
57400023E Selta Telematica S.p.a
57500023F Compal Electronics, Inc.
576000240 Seedek Co., Ltd.
577000241 Amer.com
578000242 Videoframe Systems
579000243 Raysis Co., Ltd.
580000244 SURECOM Technology Co.
581000245 Lampus Co, Ltd.
582000246 All-Win Tech Co., Ltd.
583000247 Great Dragon Information Technology (Group) Co., Ltd.
584000248 Pilz GmbH & Co.
585000249 Aviv Infocom Co, Ltd.
58600024A Cisco Systems, Inc.
58700024B Cisco Systems, Inc.
58800024C SiByte, Inc.
58900024D Mannesman Dematic Colby Pty. Ltd.
59000024E Datacard Group
59100024F IPM Datacom S.R.L.
592000250 Geyser Networks, Inc.
593000251 Soma Networks
594000252 Carrier Corporation
595000253 Televideo, Inc.
596000254 WorldGate
597000255 IBM Corporation
598000256 Alpha Processor, Inc.
599000257 Microcom Corp.
600000258 Flying Packets Communications
601000259 Tsann Kuen China (Shanghai)Enterprise Co., Ltd. IT Group
60200025A Catena Networks
60300025B Cambridge Silicon Radio
60400025C SCI Systems (Kunshan) Co., Ltd.
60500025D Calix Networks
60600025E High Technology Ltd
60700025F Nortel Networks
608000260 Accordion Networks, Inc.
609000261 i3 Micro Technology AB
610000262 Soyo Group Soyo Com Tech Co., Ltd
611000263 UPS Manufacturing SRL
612000264 AudioRamp.com
613000265 Virditech Co. Ltd.
614000266 Thermalogic Corporation
615000267 NODE RUNNER, INC.
616000268 Harris Government Communications
617000269 Nadatel Co., Ltd
61800026A Cocess Telecom Co., Ltd.
61900026B BCM Computers Co., Ltd.
62000026C Philips CFT
62100026D Adept Telecom
62200026E NeGeN Access, Inc.
62300026F Senao International Co., Ltd.
624000270 Crewave Co., Ltd.
625000271 Vpacket Communications
626000272 CC&C Technologies, Inc.
627000273 Coriolis Networks
628000274 Tommy Technologies Corp.
629000275 SMART Technologies, Inc.
630000276 Primax Electronics Ltd.
631000277 Cash Systemes Industrie
632000278 Samsung Electro-Mechanics Co., Ltd.
633000279 Control Applications, Ltd.
63400027A IOI Technology Corporation
63500027B Amplify Net, Inc.
63600027C Trilithic, Inc.
63700027D Cisco Systems, Inc.
63800027E Cisco Systems, Inc.
63900027F ask-technologies.com
640000280 Mu Net, Inc.
641000281 Madge Ltd.
642000282 ViaClix, Inc.
643000283 Spectrum Controls, Inc.
644000284 Alstom T&D P&C
645000285 Riverstone Networks
646000286 Occam Networks
647000287 Adapcom
648000288 GLOBAL VILLAGE COMMUNICATION
649000289 DNE Technologies
65000028A Ambit Microsystems Corporation
65100028B VDSL Systems OY
65200028C Micrel-Synergy Semiconductor
65300028D Movita Technologies, Inc.
65400028E Rapid 5 Networks, Inc.
65500028F Globetek, Inc.
656000290 Woorigisool, Inc.
657000291 Open Network Co., Ltd.
658000292 Logic Innovations, Inc.
659000293 Solid Data Systems
660000294 Tokyo Sokushin Co., Ltd.
661000295 IP.Access Limited
662000296 Lectron Co,. Ltd.
663000297 C-COR.net
664000298 Broadframe Corporation
665000299 Apex, Inc.
66600029A Storage Apps
66700029B Kreatel Communications AB
66800029C 3COM
66900029D Merix Corp.
67000029E Information Equipment Co., Ltd.
67100029F L-3 Communication Aviation Recorders
6720002A0 Flatstack Ltd.
6730002A1 World Wide Packets
6740002A2 Hilscher GmbH
6750002A3 ABB Power Automation
6760002A4 AddPac Technology Co., Ltd.
6770002A5 Compaq Computer Corporation
6780002A6 Effinet Systems Co., Ltd.
6790002A7 Vivace Networks
6800002A8 Air Link Technology
6810002A9 RACOM, s.r.o.
6820002AA PLcom Co., Ltd.
6830002AB CTC Union Technologies Co., Ltd.
6840002AC 3PAR data
6850002AD Pentax Corpotation
6860002AE Scannex Electronics Ltd.
6870002AF TeleCruz Technology, Inc.
6880002B0 Hokubu Communication & Industrial Co., Ltd.
6890002B1 Anritsu, Ltd.
6900002B2 Cablevision
6910002B3 Intel Corporation
6920002B4 DAPHNE
6930002B5 Avnet, Inc.
6940002B6 Acrosser Technology Co., Ltd.
6950002B7 Watanabe Electric Industry Co., Ltd.
6960002B8 WHI KONSULT AB
6970002B9 Cisco Systems, Inc.
6980002BA Cisco Systems, Inc.
6990002BB Continuous Computing
7000002BC LVL 7 Systems, Inc.
7010002BD Bionet Co., Ltd.
7020002BE Totsu Engineering, Inc.
7030002BF dotRocket, Inc.
7040002C0 Bencent Tzeng Industry Co., Ltd.
7050002C1 Innovative Electronic Designs, Inc.
7060002C2 Net Vision Telecom
7070002C3 Arelnet Ltd.
7080002C4 Vector International BUBA
7090002C5 Evertz Microsystems Ltd.
7100002C6 Data Track Technology PLC
7110002C7 ALPS ELECTRIC Co., Ltd.
7120002C8 Technocom Communications Technology (pte) Ltd
7130002C9 Mellanox Technologies
7140002CA EndPoints, Inc.
7150002CB TriState Ltd.
7160002CC M.C.C.I
7170002CD TeleDream, Inc.
7180002CE FoxJet, Inc.
7190002CF ZyGate Communications, Inc.
7200002D0 Comdial Corporation
7210002D1 Vivotek, Inc.
7220002D2 Workstation AG
7230002D3 NetBotz, Inc.
7240002D4 PDA Peripherals, Inc.
7250002D5 ACR
7260002D6 NICE Systems
7270002D7 EMPEG Ltd
7280002D8 BRECIS Communications Corporation
7290002D9 Reliable Controls
7300002DA ExiO Communications, Inc.
7310002DB NETSEC
7320002DC Fujitsu General Limited
7330002DD Bromax Communications, Ltd.
7340002DE Astrodesign, Inc.
7350002DF Net Com Systems, Inc.
7360002E0 ETAS GmbH
7370002E1 Integrated Network Corporation
7380002E2 NDC Infared Engineering
7390002E3 LITE-ON Communications, Inc.
7400002E4 JC HYUN Systems, Inc.
7410002E5 Timeware Ltd.
7420002E6 Gould Instrument Systems, Inc.
7430002E7 CAB GmbH & Co KG
7440002E8 E.D.&A.
7450002E9 CS Systemes De Securite - C3S
7460002EA Videonics, Inc.
7470002EB Pico Communications
7480002EC Maschoff Design Engineering
7490002ED DXO Telecom Co., Ltd.
7500002EE Nokia Danmark A/S
7510002EF CCC Network Systems Group Ltd.
7520002F0 AME Optimedia Technology Co., Ltd.
7530002F1 Pinetron Co., Ltd.
7540002F2 eDevice, Inc.
7550002F3 Media Serve Co., Ltd.
7560002F4 PCTEL, Inc.
7570002F5 VIVE Synergies, Inc.
7580002F6 Equipe Communications
7590002F7 ARM
7600002F8 SEAKR Engineering, Inc.
7610002F9 Mimos Semiconductor SDN BHD
7620002FA DX Antenna Co., Ltd.
7630002FB Baumuller Aulugen-Systemtechnik GmbH
7640002FC Cisco Systems, Inc.
7650002FD Cisco Systems, Inc.
7660002FE Viditec, Inc.
7670002FF Handan BroadInfoCom
768000300 NetContinuum, Inc.
769000301 Avantas Networks Corporation
770000302 Oasys Telecom, Inc.
771000303 JAMA Electronics Co., Ltd.
772000304 Pacific Broadband Communications
773000305 Smart Network Devices GmbH
774000306 Fusion In Tech Co., Ltd.
775000307 Secure Works, Inc.
776000308 AM Communications, Inc.
777000309 Texcel Technology PLC
77800030A Argus Technologies
77900030B Hunter Technology, Inc.
78000030C Telesoft Technologies Ltd.
78100030D Uniwill Computer Corp.
78200030E Core Communications Co., Ltd.
78300030F Digital China (Shanghai) Networks Ltd.
784000310 Link Evolution Corp.
785000311 Micro Technology Co., Ltd.
786000312 TR-Systemtechnik GmbH
787000313 Access Media SPA
788000314 Teleware Network Systems
789000315 Cidco Incorporated
790000316 Nobell Communications, Inc.
791000317 Merlin Systems, Inc.
792000318 Cyras Systems, Inc.
793000319 Infineon AG
79400031A Beijing Broad Telecom Ltd., China
79500031B Cellvision Systems, Inc.
79600031C Svenska Hardvarufabriken AB
79700031D Taiwan Commate Computer, Inc.
79800031E Optranet, Inc.
79900031F Condev Ltd.
800000320 Xpeed, Inc.
801000321 Reco Research Co., Ltd.
802000322 IDIS Co., Ltd.
803000323 Cornet Technology, Inc.
804000324 SANYO Multimedia Tottori Co., Ltd.
805000325 Arima Computer Corp.
806000326 Iwasaki Information Systems Co., Ltd.
807000327 ACT'L
808000328 Mace Group, Inc.
809000329 F3, Inc.
81000032A UniData Communication Systems, Inc.
81100032B GAI Datenfunksysteme GmbH
81200032C ABB Industrie AG
81300032D IBASE Technology, Inc.
81400032E Scope Information Management, Ltd.
81500032F Global Sun Technology, Inc.
816000330 Imagenics, Co., Ltd.
817000331 Cisco Systems, Inc.
818000332 Cisco Systems, Inc.
819000333 Digitel Co., Ltd.
820000334 Newport Electronics
821000335 Mirae Technology
822000336 Zetes Technologies
823000337 Vaone, Inc.
824000338 Oak Technology
825000339 Eurologic Systems, Ltd.
82600033A Silicon Wave, Inc.
82700033B TAMI Tech Co., Ltd.
82800033C Daiden Co., Ltd.
82900033D ILSHin Lab
83000033E Tateyama System Laboratory Co., Ltd.
83100033F BigBand Networks, Ltd.
832000340 Floware Wireless Systems, Ltd.
833000341 Axon Digital Design
834000342 Nortel Networks
835000343 Martin Professional A/S
836000344 Tietech.Co., Ltd.
837000345 Routrek Networks Corporation
838000346 Hitachi Kokusai Electric, Inc.
839000347 Intel Corporation
840000348 Norscan Instruments, Ltd.
841000349 Vidicode Datacommunicatie B.V.
84200034A RIAS Corporation
84300034B Nortel Networks
84400034C Shanghai DigiVision Technology Co., Ltd.
84500034D Chiaro Networks, Ltd.
84600034E Pos Data Company, Ltd.
84700034F Sur-Gard Security
848000350 BTICINO SPA
849000351 Diebold, Inc.
850000352 Colubris Networks
851000353 Mitac, Inc.
852000354 Fiber Logic Communications
853000355 TeraBeam Internet Systems
854000356 Wincor Nixdorf GmbH & Co KG
855000357 Intervoice-Brite, Inc.
856000358 iCable System Co., Ltd.
857000359 DigitalSis
85800035A Photron Limited
85900035B BridgeWave Communications
86000035C Saint Song Corp.
86100035D Bosung Hi-Net Co., Ltd.
86200035E Metropolitan Area Networks, Inc.
86300035F Prueftechnik Condition Monitoring GmbH & Co. KG
864000360 PAC Interactive Technology, Inc.
865000361 Widcomm, Inc.
866000362 Vodtel Communications, Inc.
867000363 Miraesys Co., Ltd.
868000364 Scenix Semiconductor, Inc.
869000365 Kira Information & Communications, Ltd.
870000366 ASM Pacific Technology
871000367 Jasmine Networks, Inc.
872000368 Embedone Co., Ltd.
873000369 Nippon Antenna Co., Ltd.
87400036A Mainnet, Ltd.
87500036B Cisco Systems, Inc.
87600036C Cisco Systems, Inc.
87700036D Runtop, Inc.
87800036E Nicon Systems (Pty) Limited
87900036F Telsey SPA
880000370 NXTV, Inc.
881000371 Acomz Networks Corp.
882000372 ULAN
883000373 Aselsan A.S
884000374 Hunter Watertech
885000375 NetMedia, Inc.
886000376 Graphtec Technology, Inc.
887000377 Gigabit Wireless
888000378 HUMAX Co., Ltd.
889000379 Proscend Communications, Inc.
89000037A Taiyo Yuden Co., Ltd.
89100037B IDEC IZUMI Corporation
89200037C Coax Media
89300037D Stellcom
89400037E PORTech Communications, Inc.
89500037F Atheros Communications, Inc.
896000380 SSH Communications Security Corp.
897000381 Ingenico International
898000382 A-One Co., Ltd.
899000383 Metera Networks, Inc.
900000384 AETA
901000385 Actelis Networks, Inc.
902000386 Ho Net, Inc.
903000387 Blaze Network Products
904000388 Fastfame Technology Co., Ltd.
905000389 Plantronics
90600038A America Online, Inc.
90700038B PLUS-ONE I&T, Inc.
90800038C Total Impact
90900038D PCS Revenue Control Systems, Inc.
91000038E Atoga Systems, Inc.
91100038F Weinschel Corporation
912000390 Digital Video Communications, Inc.
913000392 Hyundai Teletek Co., Ltd.
914000393 Apple Computer, Inc.
915000394 Connect One
916000395 California Amplifier
917000396 EZ Cast Co., Ltd.
918000397 Watchfront Electronics
919000398 WISI
920000399 Dongju Informations & Communications Co., Ltd.
92100039A nSine, Ltd.
92200039B NetChip Technology, Inc.
92300039C OptiMight Communications, Inc.
92400039D BENQ CORPORATION
92500039E Tera System Co., Ltd.
92600039F Cisco Systems, Inc.
9270003A0 Cisco Systems, Inc.
9280003A1 HIPER Information & Communication, Inc.
9290003A2 Catapult Communications
9300003A3 MAVIX, Ltd.
9310003A4 Data Storage and Information Management
9320003A5 Medea Corporation
9330003A7 Unixtar Technology, Inc.
9340003A8 IDOT Computers, Inc.
9350003A9 AXCENT Media AG
9360003AA Watlow
9370003AB Bridge Information Systems
9380003AC Fronius Schweissmaschinen
9390003AD Emerson Energy Systems AB
9400003AE Allied Advanced Manufacturing Pte, Ltd.
9410003AF Paragea Communications
9420003B0 Xsense Technology Corp.
9430003B1 Abbott Laboratories HPD
9440003B2 Radware
9450003B3 IA Link Systems Co., Ltd.
9460003B4 Macrotek International Corp.
9470003B5 Entra Technology Co.
9480003B6 QSI Corporation
9490003B7 ZACCESS Systems
9500003B8 NetKit Solutions, LLC
9510003B9 Hualong Telecom Co., Ltd.
9520003BA Sun Microsystems
9530003BB Signal Communications Limited
9540003BC COT GmbH
9550003BD OmniCluster Technologies, Inc.
9560003BE Netility
9570003BF Centerpoint Broadband Technologies, Inc.
9580003C0 RFTNC Co., Ltd.
9590003C1 Packet Dynamics Ltd
9600003C2 Solphone K.K.
9610003C3 Micronik Multimedia
9620003C4 Tomra Systems ASA
9630003C5 Mobotix AG
9640003C6 ICUE Systems, Inc.
9650003C7 hopf Elektronik GmbH
9660003C8 CML Emergency Services
9670003C9 TECOM Co., Ltd.
9680003CA MTS Systems Corp.
9690003CB Nippon Systems Development Co., Ltd.
9700003CC Momentum Computer, Inc.
9710003CD Clovertech, Inc.
9720003CE ETEN Technologies, Inc.
9730003CF Muxcom, Inc.
9740003D0 KOANKEISO Co., Ltd.
9750003D1 Takaya Corporation
9760003D2 Crossbeam Systems, Inc.
9770003D3 Internet Energy Systems, Inc.
9780003D4 Alloptic, Inc.
9790003D5 Advanced Communications Co., Ltd.
9800003D6 RADVision, Ltd.
9810003D7 NextNet Wireless, Inc.
9820003D8 iMPath Networks, Inc.
9830003D9 Secheron SA
9840003DA Takamisawa Cybernetics Co., Ltd.
9850003DB Apogee Electronics Corp.
9860003DC Lexar Media, Inc.
9870003DD Comark Corp.
9880003DE OTC Wireless
9890003DF Desana Systems
9900003E0 RadioFrame Networks, Inc.
9910003E1 Winmate Communication, Inc.
9920003E2 Comspace Corporation
9930003E3 Cisco Systems, Inc.
9940003E4 Cisco Systems, Inc.
9950003E5 Hermstedt SG
9960003E6 Entone Technologies, Inc.
9970003E7 Logostek Co. Ltd.
9980003E8 Wavelength Digital Limited
9990003E9 Akara Canada, Inc.
10000003EA Mega System Technologies, Inc.
10010003EB Atrica
10020003EC ICG Research, Inc.
10030003ED Shinkawa Electric Co., Ltd.
10040003EE MKNet Corporation
10050003EF Oneline AG
10060003F0 Redfern Broadband Networks
10070003F1 Cicada Semiconductor, Inc.
10080003F2 Seneca Networks
10090003F3 Dazzle Multimedia, Inc.
10100003F4 NetBurner
10110003F5 Chip2Chip
10120003F6 Allegro Networks, Inc.
10130003F7 Plast-Control GmbH
10140003F8 SanCastle Technologies, Inc.
10150003F9 Pleiades Communications, Inc.
10160003FA TiMetra Networks
10170003FB Toko Seiki Company, Ltd.
10180003FC Intertex Data AB
10190003FD Cisco Systems, Inc.
10200003FE Cisco Systems, Inc.
10210003FF Connectix
1022000400 LEXMARK INTERNATIONAL, INC.
1023000401 Osaki Electric Co., Ltd.
1024000402 Nexsan Technologies, Ltd.
1025000403 Nexsi Corporation
1026000404 Makino Milling Machine Co., Ltd.
1027000405 ACN Technologies
1028000406 Fa. Metabox AG
1029000407 Topcon Positioning Systems, Inc.
1030000408 Sanko Electronics Co., Ltd.
1031000409 Cratos Networks
103200040A Sage Systems
103300040B 3com Europe Ltd.
103400040C KANNO Work's Ltd.
103500040D Avaya, Inc.
103600040E AVM GmbH
103700040F Asus Network Technologies, Inc.
1038000410 Spinnaker Networks, Inc.
1039000411 Inkra Networks, Inc.
1040000412 WaveSmith Networks, Inc.
1041000413 SNOM Technology AG
1042000414 Umezawa Musen Denki Co., Ltd.
1043000415 Rasteme Systems Co., Ltd.
1044000416 Parks S/A Comunicacoes Digitais
1045000417 ELAU AG
1046000418 Teltronic S.A.U.
1047000419 Fibercycle Networks, Inc.
104800041A ines GmbH
104900041B Digital Interfaces Ltd.
105000041C ipDialog, Inc.
105100041D Corega of America
105200041E Shikoku Instrumentation Co., Ltd.
105300041F Sony Computer Entertainment, Inc.
1054000420 Slim Devices, Inc.
1055000421 Ocular Networks
1056000422 Gordon Kapes, Inc.
1057000423 Intel Corporation
1058000424 TMC s.r.l.
1059000425 Atmel Corporation
1060000426 Autosys
1061000427 Cisco Systems, Inc.
1062000428 Cisco Systems, Inc.
1063000429 Pixord Corporation
106400042A Wireless Networks, Inc.
106500042B IT Access Co., Ltd.
106600042C Minet, Inc.
106700042D Sarian Systems, Ltd.
106800042E Netous Technologies, Ltd.
106900042F International Communications Products, Inc.
1070000430 Netgem
1071000431 GlobalStreams, Inc.
1072000432 Voyetra Turtle Beach, Inc.
1073000433 Cyberboard A/S
1074000434 Accelent Systems, Inc.
1075000435 Comptek International, Inc.
1076000436 ELANsat Technologies, Inc.
1077000437 Powin Information Technology, Inc.
1078000438 Nortel Networks
1079000439 Rosco Entertainment Technology, Inc.
108000043A Intelligent Telecommunications, Inc.
108100043B Lava Computer Mfg., Inc.
108200043C SONOS Co., Ltd.
108300043D INDEL AG
108400043E Telencomm
108500043F Electronic Systems Technology, Inc.
1086000440 cyberPIXIE, Inc.
1087000441 Half Dome Systems, Inc.
1088000442 NACT
1089000443 Agilent Technologies, Inc.
1090000444 Western Multiplex Corporation
1091000445 LMS Skalar Instruments GmbH
1092000446 CYZENTECH Co., Ltd.
1093000447 Acrowave Systems Co., Ltd.
1094000448 Polaroid Professional Imaging
1095000449 Mapletree Networks
109600044A iPolicy Networks, Inc.
109700044B NVIDIA
109800044C JENOPTIK
109900044D Cisco Systems, Inc.
110000044E Cisco Systems, Inc.
110100044F Leukhardt Systemelektronik GmbH
1102000450 DMD Computers SRL
1103000451 Medrad, Inc.
1104000452 RocketLogix, Inc.
1105000453 YottaYotta, Inc.
1106000454 Quadriga UK
1107000455 ANTARA.net
1108000456 PipingHot Networks
1109000457 Universal Access Technology, Inc.
1110000458 Fusion X Co., Ltd.
1111000459 Veristar Corporation
111200045A The Linksys Group, Inc.
111300045B Techsan Electronics Co., Ltd.
111400045C Mobiwave Pte Ltd
111500045D BEKA Elektronik
111600045E PolyTrax Information Technology AG
111700045F Evalue Technology, Inc.
1118000460 Knilink Technology, Inc.
1119000461 EPOX Computer Co., Ltd.
1120000462 DAKOS Data & Communication Co., Ltd.
1121000463 Bosch Security Systems
1122000464 Fantasma Networks, Inc.
1123000465 i.s.t isdn-support technik GmbH
1124000466 ARMITEL Co.
1125000467 Wuhan Research Institute of MII
1126000468 Vivity, Inc.
1127000469 Innocom, Inc.
112800046A Navini Networks
112900046B Palm Wireless, Inc.
113000046C Cyber Technology Co., Ltd.
113100046D Cisco Systems, Inc.
113200046E Cisco Systems, Inc.
113300046F Digitel S/A Industria Eletronica
1134000470 ipUnplugged AB
1135000471 IPrad
1136000472 Telelynx, Inc.
1137000473 Photonex Corporation
1138000474 LEGRAND
1139000475 3 Com Corporation
1140000476 3 Com Corporation
1141000477 Scalant Systems, Inc.
1142000478 G. Star Technology Corporation
1143000479 Radius Co., Ltd.
114400047A AXXESSIT ASA
114500047B Schlumberger
114600047C Skidata AG
114700047D Pelco
114800047E NKF Electronics
114900047F Chr. Mayr GmbH & Co. KG
1150000480 Foundry Networks, Inc.
1151000481 Econolite Control Products, Inc.
1152000482 Medialogic Corp.
1153000483 Deltron Technology, Inc.
1154000484 Amann GmbH
1155000485 PicoLight
1156000486 ITTC, University of Kansas
1157000487 Cogency Semiconductor, Inc.
1158000488 Eurotherm Action Incorporated.
1159000489 YAFO Networks, Inc.
116000048A Temia Vertriebs GmbH
116100048B Poscon Corporation
116200048C Nayna Networks, Inc.
116300048D Tone Commander Systems, Inc.
116400048E Ohm Tech Labs, Inc.
116500048F TD Systems Corp.
1166000490 Optical Access
1167000491 Technovision, Inc.
1168000492 Hive Internet, Ltd.
1169000493 Tsinghua Unisplendour Co., Ltd.
1170000494 Breezecom, Ltd.
1171000495 Tejas Networks
1172000496 Extreme Networks
1173000497 MacroSystem Digital Video AG
1174000499 Chino Corporation
117500049A Cisco Systems, Inc.
117600049B Cisco Systems, Inc.
117700049C Surgient Networks, Inc.
117800049D Ipanema Technologies
117900049E Wirelink Co., Ltd.
118000049F Metrowerks
11810004A0 Verity Instruments, Inc.
11820004A1 Pathway Connectivity
11830004A2 L.S.I. Japan Co., Ltd.
11840004A3 Microchip Technology, Inc.
11850004A4 NetEnabled, Inc.
11860004A5 Barco Projection Systems NV
11870004A6 SAF Tehnika Ltd.
11880004A7 FabiaTech Corporation
11890004A8 Broadmax Technologies, Inc.
11900004A9 SandStream Technologies, Inc.
11910004AA Jetstream Communications
11920004AB Comverse Network Systems, Inc.
11930004AC IBM CORP.
11940004AD Malibu Networks
11950004AE Liquid Metronics
11960004AF Digital Fountain, Inc.
11970004B0 ELESIGN Co., Ltd.
11980004B1 Signal Technology, Inc.
11990004B2 ESSEGI SRL
12000004B3 Videotek, Inc.
12010004B4 CIAC
12020004B5 Equitrac Corporation
12030004B6 Stratex Networks, Inc.
12040004B7 AMB i.t. Holding
12050004B8 Kumahira Co., Ltd.
12060004B9 S.I. Soubou, Inc.
12070004BA KDD Media Will Corporation
12080004BB Bardac Corporation
12090004BC Giantec, Inc.
12100004BD Motorola BCS
12110004BE OptXCon, Inc.
12120004BF VersaLogic Corp.
12130004C0 Cisco Systems, Inc.
12140004C1 Cisco Systems, Inc.
12150004C2 Magnipix, Inc.
12160004C3 CASTOR Informatique
12170004C4 Allen & Heath Limited
12180004C5 ASE Technologies, USA
12190004C6 Yamaha Motor Co., Ltd.
12200004C7 NetMount
12210004C8 LIBA Maschinenfabrik GmbH
12220004C9 Micro Electron Co., Ltd.
12230004CA FreeMs Corp.
12240004CB Tdsoft Communication, Ltd.
12250004CC Peek Traffic B.V.
12260004CD Informedia Research Group
12270004CE Patria Ailon
12280004CF Seagate Technology
12290004D0 Softlink s.r.o.
12300004D1 Drew Technologies, Inc.
12310004D2 Adcon Telemetry AG
12320004D3 Toyokeiki Co., Ltd.
12330004D4 Proview Electronics Co., Ltd.
12340004D5 Hitachi Communication Systems, Inc.
12350004D6 Takagi Industrial Co., Ltd.
12360004D7 Omitec Instrumentation Ltd.
12370004D8 IPWireless, Inc.
12380004D9 Titan Electronics, Inc.
12390004DA Relax Technology, Inc.
12400004DB Tellus Group Corp.
12410004DC Nortel Networks
12420004DD Cisco Systems, Inc.
12430004DE Cisco Systems, Inc.
12440004DF Teracom Telematica Ltda.
12450004E0 Procket Networks
12460004E1 Infinior Microsystems
12470004E2 SMC Networks, Inc.
12480004E3 Accton Technology Corp.
12490004E4 Daeryung Ind., Inc.
12500004E5 Glonet Systems, Inc.
12510004E6 Banyan Network Private Limited
12520004E7 Lightpointe Communications, Inc
12530004E8 IER, Inc.
12540004E9 Infiniswitch Corporation
12550004EA Hewlett-Packard Company
12560004EB Paxonet Communications, Inc.
12570004EC Memobox SA
12580004ED Billion Electric Co., Ltd.
12590004EE Lincoln Electric Company
12600004EF Polestar Corp.
12610004F0 International Computers, Ltd
12620004F1 WhereNet
12630004F2 Circa Communications, Ltd.
12640004F3 FS FORTH-SYSTEME GmbH
12650004F4 Infinite Electronics Inc.
12660004F5 SnowShore Networks, Inc.
12670004F6 Amphus
12680004F7 Omega Band, Inc.
12690004F8 QUALICABLE TV Industria E Com., Ltda
12700004F9 Xtera Communications, Inc.
12710004FA MIST Inc.
12720004FB Commtech, Inc.
12730004FC Stratus Computer (DE), Inc.
12740004FD Japan Control Engineering Co., Ltd.
12750004FE Pelago Networks
12760004FF Acronet Co., Ltd.
1277000500 Cisco Systems, Inc.
1278000501 Cisco Systems, Inc.
1279000502 APPLE COMPUTER
1280000503 ICONAG
1281000504 Naray Information & Communication Enterprise
1282000505 Systems Integration Solutions, Inc.
1283000506 Reddo Networks AB
1284000507 Fine Appliance Corp.
1285000508 Inetcam, Inc.
1286000509 AVOC Nishimura Ltd.
128700050A ICS Spa
128800050B SICOM Systems, Inc.
128900050C Network Photonics, Inc.
129000050D Midstream Technologies, Inc.
129100050E 3ware, Inc.
129200050F Tanaka S/S Ltd.
1293000510 Infinite Shanghai Communication Terminals Ltd.
1294000511 Complementary Technologies Ltd
1295000512 MeshNetworks, Inc.
1296000513 VTLinx Multimedia Systems, Inc.
1297000514 KDT Systems Co., Ltd.
1298000515 Nuark Co., Ltd.
1299000516 SMART Modular Technologies
1300000517 Shellcomm, Inc.
1301000518 Jupiters Technology
1302000519 Siemens Building Technologies AG,
130300051A 3Com Europe Ltd.
130400051B Magic Control Technology Corporation
130500051C Xnet Technology Corp.
130600051D Airocon, Inc.
130700051E Brocade Communications Systems, Inc.
130800051F Taijin Media Co., Ltd.
1309000520 Smartronix, Inc.
1310000521 Control Microsystems
1311000522 LEA*D Corporation, Inc.
1312000523 AVL List GmbH
1313000524 BTL System (HK) Limited
1314000525 Puretek Industrial Co., Ltd.
1315000526 IPAS GmbH
1316000527 SJ Tek Co. Ltd
1317000528 New Focus, Inc.
1318000529 Shanghai Broadan Communication Technology Co., Ltd
131900052A Ikegami Tsushinki Co., Ltd.
132000052B HORIBA, Ltd.
132100052C Supreme Magic Corporation
132200052D Zoltrix International Limited
132300052E Cinta Networks
132400052F Leviton Voice and Data
1325000530 Andiamo Systems, Inc.
1326000531 Cisco Systems, Inc.
1327000532 Cisco Systems, Inc.
1328000533 Sanera Systems, Inc.
1329000534 Northstar Engineering Ltd.
1330000535 Chip PC Ltd.
1331000536 Danam Communications, Inc.
1332000537 Nets Technology Co., Ltd.
1333000538 Merilus, Inc.
1334000539 A Brand New World in Sweden AB
133500053A Willowglen Services Pte Ltd
133600053B Harbour Networks Ltd., Co. Beijing
133700053C Xircom
133800053D Agere Systems
133900053E KID Systeme GmbH
134000053F VisionTek, Inc.
1341000540 FAST Corporation
1342000541 Advanced Systems Co., Ltd.
1343000542 Otari, Inc.
1344000543 IQ Wireless GmbH
1345000544 Valley Technologies, Inc.
1346000545 Internet Photonics
1347000546 K-Solutions Inc.
1348000547 Starent Networks
1349000548 Disco Corporation
1350000549 Salira Optical Network Systems
135100054A Ario Data Networks, Inc.
135200054B Micro Innovation AG
135300054C RF Innovations Pty Ltd
135400054D Brans Technologies, Inc.
135500054E Philips Components
1356000550 Digi-Tech Communications Limited
1357000551 F & S Elektronik Systeme GmbH
1358000552 Xycotec Computer GmbH
1359000553 DVC Company, Inc.
1360000554 Rangestar Wireless
1361000555 Japan Cash Machine Co., Ltd.
1362000556 360 Systems
1363000557 Agile TV Corporation
1364000558 Synchronous, Inc.
1365000559 Intracom S.A.
136600055A Power Dsine Ltd.
136700055B Charles Industries, Ltd.
136800055C Kowa Company, Ltd.
136900055D D-Link Systems, Inc.
137000055E Cisco Systems, Inc.
137100055F Cisco Systems, Inc.
1372000560 LEADER COMM.CO., LTD
1373000561 nac Image Technology, Inc.
1374000562 Digital View Limited
1375000563 J-Works, Inc.
1376000564 Tsinghua Bitway Co., Ltd.
1377000565 Tailyn Communication Company Ltd.
1378000566 Secui.com Corporation
1379000567 Etymonic Design, Inc.
1380000568 Piltofish Networks AB
1381000569 VMWARE, Inc.
138200056A Heuft Systemtechnik GmbH
138300056B C.P. Technology Co., Ltd.
138400056C Hung Chang Co., Ltd.
138500056D Pacific Corporation
138600056E National Enhance Technology, Inc.
138700056F Innomedia Technologies Pvt. Ltd.
1388000570 Baydel Ltd.
1389000571 Seiwa Electronics Co.
1390000572 Deonet Co., Ltd.
1391000573 Cisco Systems, Inc.
1392000574 Cisco Systems, Inc.
1393000575 CDS-Electronics BV
1394000576 NSM Technology Ltd.
1395000577 SM Information & Communication
1396000579 Universal Control Solution Corp.
139700057A Hatteras Networks
139800057B Chung Nam Electronic Co., Ltd.
139900057C RCO Security AB
140000057D Sun Communications, Inc.
140100057E Eckelmann Steuerungstechnik GmbH
140200057F Acqis Technology
1403000580 Fibrolan Ltd.
1404000581 Snell & Wilcox Ltd.
1405000582 ClearCube Technology
1406000583 ImageCom Limited
1407000584 AbsoluteValue Systems, Inc.
1408000585 Juniper Networks, Inc.
1409000586 Lucent Technologies
1410000587 Locus, Incorporated
1411000588 Sensoria Corp.
1412000589 National Datacomputer
141300058A Netcom Co., Ltd.
141400058B IPmental, Inc.
141500058C Opentech Inc.
141600058D Lynx Photonic Networks, Inc.
141700058E Flextronics International GmbH & Co. Nfg. KG
141800058F CLCsoft co.
1419000590 Swissvoice Ltd.
1420000591 Active Silicon Ltd.
1421000592 Pultek Corp.
1422000593 Grammar Engine Inc.
1423000594 IXXAT Automation GmbH
1424000595 Alesis Corporation
1425000596 Genotech Co., Ltd.
1426000597 Eagle Traffic Control Systems
1427000598 CRONOS S.r.l.
1428000599 DRS Test and Energy Management or DRS-TEM
142900059A Cisco Systems, Inc.
143000059B Cisco Systems, Inc.
143100059C Kleinknecht GmbH, Ing. Buero
143200059D Daniel Computing Systems, Inc.
143300059E Zinwell Corporation
143400059F Yotta Networks, Inc.
14350005A0 MOBILINE Kft.
14360005A1 Zenocom
14370005A2 CELOX Networks
14380005A3 QEI, Inc.
14390005A4 Lucid Voice Ltd.
14400005A5 KOTT
14410005A6 Extron Electronics
14420005A7 Hyperchip, Inc.
14430005A8 WYLE ELECTRONICS
14440005A9 Princeton Networks, Inc.
14450005AA Moore Industries International Inc.
14460005AB Cyber Fone, Inc.
14470005AC Northern Digital, Inc.
14480005AD Topspin Communications, Inc.
14490005AE Mediaport USA
14500005AF InnoScan Computing A/S
14510005B0 Korea Computer Technology Co., Ltd.
14520005B1 ASB Technology BV
14530005B2 Medison Co., Ltd.
14540005B3 Asahi-Engineering Co., Ltd.
14550005B4 Aceex Corporation
14560005B5 Broadcom Technologies
14570005B6 INSYS Microelectronics GmbH
14580005B7 Arbor Technology Corp.
14590005B8 Electronic Design Associates, Inc.
14600005B9 Airvana, Inc.
14610005BA Area Netwoeks, Inc.
14620005BB Myspace AB
14630005BC Resorsys Ltd.
14640005BD ROAX BV
14650005BE Kongsberg Seatex AS
14660005BF JustEzy Technology, Inc.
14670005C0 Digital Network Alacarte Co., Ltd.
14680005C1 A-Kyung Motion, Inc.
14690005C2 Soronti, Inc.
14700005C3 Pacific Instruments, Inc.
14710005C4 Telect, Inc.
14720005C5 Flaga HF
14730005C6 Triz Communications
14740005C7 I/F-COM A/S
14750005C8 VERYTECH
14760005C9 LG Innotek
14770005CA Hitron Technology, Inc.
14780005CB ROIS Technologies, Inc.
14790005CC Sumtel Communications, Inc.
14800005CD Denon, Ltd.
14810005CE Prolink Microsystems Corporation
14820005CF Thunder River Technologies, Inc.
14830005D0 Solinet Systems
14840005D1 Metavector Technologies
14850005D2 DAP Technologies
14860005D3 eProduction Solutions, Inc.
14870005D4 FutureSmart Networks, Inc.
14880005D5 Speedcom Wireless
14890005D6 Titan Wireless
14900005D7 Vista Imaging, Inc.
14910005D8 Arescom, Inc.
14920005D9 Techno Valley, Inc.
14930005DA Apex Automationstechnik
14940005DB Nentec GmbH
14950005DC Cisco Systems, Inc.
14960005DD Cisco Systems, Inc.
14970005DE Gi Fone Korea, Inc.
14980005DF Electronic Innovation, Inc.
14990005E0 Empirix Corp.
15000005E1 Trellis Photonics, Ltd.
15010005E2 Creativ Network Technologies
15020005E3 LightSand Communications, Inc.
15030005E4 Red Lion Controls L.P.
15040005E5 Renishaw PLC
15050005E6 Egenera, Inc.
15060005E7 Netrake Corp.
15070005E8 TurboWave, Inc.
15080005E9 Unicess Network, Inc.
15090005EA Rednix
15100005EB Blue Ridge Networks, Inc.
15110005EC Mosaic Systems Inc.
15120005ED Technikum Joanneum GmbH
15130005EE BEWATOR Group
15140005EF ADOIR Digital Technology
15150005F0 SATEC
15160005F1 Vrcom, Inc.
15170005F2 Power R, Inc.
15180005F3 Weboyn
15190005F4 System Base Co., Ltd.
15200005F5 OYO Geospace Corp.
15210005F6 Young Chang Co. Ltd.
15220005F7 Analog Devices, Inc.
15230005F8 Real Time Access, Inc.
15240005F9 TOA Corporation
15250005FA IPOptical, Inc.
15260005FB ShareGate, Inc.
15270005FC Schenck Pegasus Corp.
15280005FD PacketLight Networks Ltd.
15290005FE Traficon N.V.
15300005FF SNS Solutions, Inc.
1531000600 Tokyo Electronic Industry Co., Ltd.
1532000601 Otanikeiki Co., Ltd.
1533000602 Cirkitech Electronics Co.
1534000603 Baker Hughes Inc.
1535000604 @Track Communications, Inc.
1536000605 Inncom International, Inc.
1537000606 RapidWAN, Inc.
1538000607 Omni Directional Control Technology Inc.
1539000608 At-Sky SAS
1540000609 Crossport Systems
154100060A Blue2space
154200060B Paceline Systems Corporation
154300060C Melco Industries, Inc.
154400060D Wave7 Optics
154500060E IGYS Systems, Inc.
154600060F Narad Networks Inc
1547000610 Abeona Networks Inc
1548000611 Zeus Wireless, Inc.
1549000612 Accusys, Inc.
1550000613 Kawasaki Microelectronics Incorporated
1551000614 Prism Holdings
1552000615 Kimoto Electric Co., Ltd.
1553000616 Tel Net Co., Ltd.
1554000617 Redswitch Inc.
1555000618 DigiPower Manufacturing Inc.
1556000619 Connection Technology Systems
155700061A Zetari Inc.
155800061B Portable Systems, IBM Japan Co, Ltd
155900061C Hoshino Metal Industries, Ltd.
156000061D MIP Telecom, Inc.
156100061E Maxan Systems
156200061F Vision Components GmbH
1563000620 Serial System Ltd.
1564000621 Hinox, Co., Ltd.
1565000622 Chung Fu Chen Yeh Enterprise Corp.
1566000623 MGE UPS Systems France
1567000624 Gentner Communications Corp.
1568000625 The Linksys Group, Inc.
1569000626 MWE GmbH
1570000627 Uniwide Technologies, Inc.
1571000628 Cisco Systems, Inc.
1572000629 IBM CORPORATION
157300062A Cisco Systems, Inc.
157400062B INTRASERVER TECHNOLOGY
157500062C Network Robots, Inc.
157600062D TouchStar Technologies, L.L.C.
157700062E Aristos Logic Corp.
157800062F Pivotech Systems Inc.
1579000630 Adtranz Sweden
1580000631 Optical Solutions, Inc.
1581000632 Mesco Engineering GmbH
1582000633 Heimann Biometric Systems GmbH
1583000634 GTE Airfone Inc.
1584000635 PacketAir Networks, Inc.
1585000636 Jedai Broadband Networks
1586000637 Toptrend-Meta Information (ShenZhen) Inc.
1587000638 Sungjin C&C Co., Ltd.
1588000639 Newtec
158900063A Dura Micro, Inc.
159000063B Arcturus Networks, Inc.
159100063C NMI Electronics Ltd
159200063D Microwave Data Systems Inc.
159300063E Opthos Inc.
159400063F Everex Communications Inc.
1595000640 White Rock Networks
1596000641 ITCN
1597000642 Genetel Systems Inc.
1598000643 SONO Computer Co., Ltd.
1599000644 NEIX Inc.
1600000645 Meisei Electric Co. Ltd.
1601000646 ShenZhen XunBao Network Technology Co Ltd
1602000647 Etrali S.A.
1603000648 Seedsware, Inc.
1604000649 Quante
160500064A Honeywell Co., Ltd. (KOREA)
160600064B Alexon Co., Ltd.
160700064C Invicta Networks, Inc.
160800064D Sencore
160900064E Broad Net Technology Inc.
161000064F PRO-NETS Technology Corporation
1611000650 Tiburon Networks, Inc.
1612000651 Aspen Networks Inc.
1613000652 Cisco Systems, Inc.
1614000653 Cisco Systems, Inc.
1615000654 Maxxio Technologies
1616000655 Yipee, Inc.
1617000656 Tactel AB
1618000657 Market Central, Inc.
1619000658 Helmut Fischer GmbH & Co. KG
1620000659 EAL (Apeldoorn) B.V.
162100065A Strix Systems
162200065B Dell Computer Corp.
162300065C Malachite Technologies, Inc.
162400065D Heidelberg Web Systems
162500065E Photuris, Inc.
162600065F ECI Telecom - NGTS Ltd.
1627000660 NADEX Co., Ltd.
1628000661 NIA Home Technologies Corp.
1629000662 MBM Technology Ltd.
1630000663 Human Technology Co., Ltd.
1631000664 Fostex Corporation
1632000665 Sunny Giken, Inc.
1633000666 Roving Networks
1634000667 Tripp Lite
1635000668 Vicon Industries Inc.
1636000669 Datasound Laboratories Ltd
163700066A InfiniCon Systems, Inc.
163800066B Sysmex Corporation
163900066C Robinson Corporation
164000066D Compuprint S.P.A.
164100066E Delta Electronics, Inc.
164200066F Korea Data Systems
1643000670 Upponetti Oy
1644000671 Softing AG
1645000672 Netezza
1646000673 Optelecom, Inc.
1647000674 Spectrum Control, Inc.
1648000675 Banderacom, Inc.
1649000676 Novra Technologies Inc.
1650000677 SICK AG
1651000678 Marantz Japan, Inc.
1652000679 Konami Corporation
165300067A JMP Systems
165400067B Toplink C&C Corporation
165500067C CISCO SYSTEMS, INC.
165600067D Takasago Ltd.
165700067E WinCom Systems, Inc.
165800067F Rearden Steel Technologies
1659000680 Card Access, Inc.
1660000681 Goepel Electronic GmbH
1661000682 Convedia
1662000683 Bravara Communications, Inc.
1663000684 Biacore AB
1664000685 NetNearU Corporation
1665000686 ZARDCOM Co., Ltd.
1666000687 Omnitron Systems Technology, Inc.
1667000688 Telways Communication Co., Ltd.
1668000689 yLez Technologies Pte Ltd
166900068A NeuronNet Co. Ltd. R&D Center
167000068B AirRunner Technologies, Inc.
167100068C 3Com Corporation
167200068D SANgate Systems
167300068E HID Corporation
167400068F Telemonitor, Inc.
1675000690 Euracom Communication GmbH
1676000691 PT Inovacao
1677000692 Intruvert Networks, Inc.
1678000693 Flexus Computer Technology, Inc.
1679000694 Mobillian Corporation
1680000695 Ensure Technologies, Inc.
1681000696 Advent Networks
1682000697 R & D Center
1683000698 egnite Software GmbH
1684000699 Vida Design Co.
168500069A e & Tel
168600069B AVT Audio Video Technologies GmbH
168700069C Transmode Systems AB
168800069D Petards Mobile Intelligence
168900069E UNIQA, Inc.
169000069F Kuokoa Networks
16910006A0 Mx Imaging
16920006A1 Celsian Technologies, Inc.
16930006A2 Microtune, Inc.
16940006A3 Bitran Corporation
16950006A4 INNOWELL Corp.
16960006A5 PINON Corp.
16970006A6 Artistic Licence (UK) Ltd
16980006A7 Primarion
16990006A8 KC Technology, Inc.
17000006A9 Universal Instruments Corp.
17010006AA Miltope Corporation
17020006AB W-Link Systems, Inc.
17030006AC Intersoft Co.
17040006AD KB Electronics Ltd.
17050006AE Himachal Futuristic Communications Ltd
17060006B0 Comtech EF Data Corp.
17070006B1 Sonicwall
17080006B2 Linxtek Co.
17090006B3 Diagraph Corporation
17100006B4 Vorne Industries, Inc.
17110006B5 Luminent, Inc.
17120006B6 Nir-Or Israel Ltd.
17130006B7 TELEM GmbH
17140006B8 Bandspeed Pty Ltd
17150006B9 A5TEK Corp.
17160006BA Westwave Communications
17170006BB ATI Technologies Inc.
17180006BC Macrolink, Inc.
17190006BD BNTECHNOLOGY Co., Ltd.
17200006BE Baumer Optronic GmbH
17210006BF Accella Technologies Co., Ltd.
17220006C0 United Internetworks, Inc.
17230006C1 CISCO SYSTEMS, INC.
17240006C2 Smartmatic Corporation
17250006C3 Schindler Elevators Ltd.
17260006C4 Piolink Inc.
17270006C5 INNOVI Technologies Limited
17280006C6 lesswire AG
17290006C7 RFNET Technologies Pte Ltd (S)
17300006C8 Sumitomo Metal Micro Devices, Inc.
17310006C9 Technical Marketing Research, Inc.
17320006CA American Computer & Digital Components, Inc. (ACDC)
17330006CB Jotron Electronics A/S
17340006CC JMI Electronics Co., Ltd.
17350006CD CreoScitex Corporation Ltd.
17360006CE DATENO
17370006CF Thales Avionics In-Flight Systems, LLC
17380006D0 Elgar Electronics Corp.
17390006D1 Tahoe Networks, Inc.
17400006D2 Tundra Semiconductor Corp.
17410006D3 Alpha Telecom, Inc. U.S.A.
17420006D4 Interactive Objects, Inc.
17430006D5 Diamond Systems Corp.
17440006D6 Cisco Systems, Inc.
17450006D7 Cisco Systems, Inc.
17460006D8 Maple Optical Systems
17470006D9 IPM-Net S.p.A.
17480006DA ITRAN Communications Ltd.
17490006DB ICHIPS Co., Ltd.
17500006DC Syabas Technology (Amquest)
17510006DD AT & T Laboratories - Cambridge Ltd
17520006DE Flash Technology
17530006DF AIDONIC Corporation
17540006E0 MAT Co., Ltd.
17550006E1 Techno Trade s.a
17560006E2 Ceemax Technology Co., Ltd.
17570006E3 Quantitative Imaging Corporation
17580006E4 Citel Technologies Ltd.
17590006E5 Fujian Newland Computer Ltd. Co.
17600006E6 DongYang Telecom Co., Ltd.
17610006E7 Bit Blitz Communications Inc.
17620006E8 Optical Network Testing, Inc.
17630006E9 Intime Corp.
17640006EA ELZET80 Mikrocomputer GmbH&Co. KG
17650006EB Global Data
17660006EC M/A COM Private Radio System Inc.
17670006ED Inara Networks
17680006EE Shenyang Neu-era Information & Technology Stock Co., Ltd
17690006EF Maxxan Systems, Inc.
17700006F0 Digeo, Inc.
17710006F1 Optillion
17720006F2 Platys Communications
17730006F3 AcceLight Networks
17740006F4 Prime Electronics & Satellitics Inc.
17750006F9 Mitsui Zosen Systems Research Inc.
17760006FA IP SQUARE Co, Ltd.
17770006FB Hitachi Printing Solutions, Ltd.
17780006FC Fnet Co., Ltd.
17790006FD Comjet Information Systems Corp.
17800006FE Celion Networks, Inc.
17810006FF Sheba Systems Co., Ltd.
1782000700 Zettamedia Korea
1783000701 RACAL-DATACOM
1784000702 Varian Medical Systems
1785000703 CSEE Transport
1786000705 Endress & Hauser GmbH & Co
1787000706 Sanritz Corporation
1788000707 Interalia Inc.
1789000708 Bitrage Inc.
1790000709 Westerstrand Urfabrik AB
179100070A Unicom Automation Co., Ltd.
179200070B Octal, SA
179300070C SVA-Intrusion.com Co. Ltd.
179400070D Cisco Systems Inc.
179500070E Cisco Systems Inc.
179600070F Fujant, Inc.
1797000710 Adax, Inc.
1798000711 Acterna
1799000712 JAL Information Technology
1800000713 IP One, Inc.
1801000714 Brightcom
1802000715 General Research of Electronics, Inc.
1803000716 J & S Marine Ltd.
1804000717 Wieland Electric GmbH
1805000718 iCanTek Co., Ltd.
1806000719 Mobiis Co., Ltd.
180700071A Finedigital Inc.
180800071B Position Technology Inc.
180900071C AT&T Fixed Wireless Services
181000071D Satelsa Sistemas Y Aplicaciones De Telecomunicaciones, S.A.
181100071E Tri-M Engineering / Nupak Dev. Corp.
181200071F European Systems Integration
1813000720 Trutzschler GmbH & Co. KG
1814000721 Formac Elektronik GmbH
1815000722 Nielsen Media Research
1816000723 ELCON Systemtechnik GmbH
1817000724 Telemax Co., Ltd.
1818000725 Bematech International Corp.
1819000727 Zi Corporation (HK) Ltd.
1820000728 Neo Telecom
1821000729 Kistler Instrumente AG
182200072A Innovance Networks
182300072B Jung Myung Telecom Co., Ltd.
182400072C Fabricom
182500072D CNSystems
182600072E North Node AB
182700072F Instransa, Inc.
1828000730 Hutchison OPTEL Telecom Technology Co., Ltd.
1829000731 Spiricon, Inc.
1830000732 AAEON Technology Inc.
1831000733 DANCONTROL Engineering
1832000734 ONStor, Inc.
1833000735 Flarion Technologies, Inc.
1834000736 Data Video Technologies Co., Ltd.
1835000737 Soriya Co. Ltd.
1836000738 Young Technology Co., Ltd.
1837000739 Motion Media Technology Ltd.
183800073A Inventel Systemes
183900073B Tenovis GmbH & Co KG
184000073C Telecom Design
184100073D Nanjing Postel Telecommunications Co., Ltd.
184200073E China Great-Wall Computer Shenzhen Co., Ltd.
184300073F Woojyun Systec Co., Ltd.
1844000740 Melco Inc.
1845000741 Sierra Automated Systems
1846000742 Current Technologies
1847000743 Chelsio Communications
1848000744 Unico, Inc.
1849000745 Radlan Computer Communications Ltd.
1850000746 Interlink BT, LLC
1851000747 Mecalc
1852000748 The Imaging Source Europe
1853000749 CENiX Inc.
185400074A Carl Valentin GmbH
185500074B Daihen Corporation
185600074C Beicom Inc.
185700074D Zebra Technologies Corp.
185800074E Naughty boy co., Ltd.
185900074F Cisco Systems, Inc.
1860000750 Cisco Systems, Inc.
1861000751 m.u.t. - GmbH
1862000752 Rhythm Watch Co., Ltd.
1863000753 Beijing Qxcomm Technology Co., Ltd.
1864000754 Xyterra Computing, Inc.
1865000755 Lafon SA
1866000756 Juyoung Telecom
1867000757 Topcall International AG
1868000758 Dragonwave
1869000759 Boris Manufacturing Corp.
187000075A Air Products and Chemicals, Inc.
187100075B Gibson Guitars
187200075C ENCAD, Inc.
187300075D Celleritas Inc.
187400075E Pulsar Technologies, Inc.
187500075F VCS Video Communication Systems AG
1876000760 TOMIS Information & Telecom Corp.
1877000761 Logitech SA
1878000762 Group Sense Limited
1879000763 Sunniwell Cyber Tech. Co., Ltd.
1880000764 YoungWoo Telecom Co. Ltd.
1881000765 Jade Quantum Technologies, Inc.
1882000766 Chou Chin Industrial Co., Ltd.
1883000767 Yuxing Electronics Company Limited
1884000768 Danfoss A/S
1885000769 Italiana Macchi SpA
188600076A NEXTEYE Co., Ltd.
188700076B Stralfors AB
188800076C Daehanet, Inc.
188900076D Flexlight Networks
189000076E Sinetica Corporation Ltd.
189100076F Synoptics Limited
1892000770 Locusnetworks Corporation
1893000771 Embedded System Corporation
1894000772 Alcatel Shanghai Bell Co., Ltd.
1895000773 Ascom Powerline Communications Ltd.
1896000774 GuangZhou Thinker Technology Co. Ltd.
1897000775 Valence Semiconductor, Inc.
1898000776 Federal APD
1899000777 Motah Ltd.
1900000778 GERSTEL GmbH & Co. KG
1901000779 Sungil Telecom Co., Ltd.
190200077A Infoware System Co., Ltd.
190300077B Millimetrix Broadband Networks
190400077C OnTime Networks
190500077E Elrest GmbH
190600077F J Communications Co., Ltd.
1907000780 Bluegiga Technologies OY
1908000781 Itron Inc.
1909000782 Nauticus Networks, Inc.
1910000783 SynCom Network, Inc.
1911000784 Cisco Systems Inc.
1912000785 Cisco Systems Inc.
1913000786 Wireless Networks Inc.
1914000787 Idea System Co., Ltd.
1915000788 Clipcomm, Inc.
1916000789 Eastel Systems Corporation
191700078A Mentor Data System Inc.
191800078B Wegener Communications, Inc.
191900078C Elektronikspecialisten i Borlange AB
192000078D NetEngines Ltd.
192100078E Garz & Friche GmbH
192200078F Emkay Innovative Products
1923000790 Tri-M Technologies (s) Limited
1924000791 International Data Communications, Inc.
1925000792 Suetron Electronic GmbH
1926000794 Simple Devices, Inc.
1927000795 Elitegroup Computer System Co. (ECS)
1928000796 LSI Systems, Inc.
1929000797 Netpower Co., Ltd.
1930000798 Selea SRL
1931000799 Tipping Point Technologies, Inc.
193200079A SmartSight Networks Inc.
193300079B Aurora Networks
193400079C Golden Electronics Technology Co., Ltd.
193500079D Musashi Co., Ltd.
193600079E Ilinx Co., Ltd.
193700079F Action Digital Inc.
19380007A0 e-Watch Inc.
19390007A1 VIASYS Healthcare GmbH
19400007A2 Opteon Corporation
19410007A3 Ositis Software, Inc.
19420007A4 GN Netcom Ltd.
19430007A5 Y.D.K Co. Ltd.
19440007A6 Home Automation, Inc.
19450007A7 A-Z Inc.
19460007A8 Haier Group Technologies Ltd.
19470007A9 Novasonics
19480007AA Quantum Data Inc.
19490007AC Eolring
19500007AD Pentacon GmbH Foto-und Feinwerktechnik
19510007AE Layer N Networks
19520007AF N-Tron Corp.
19530007B0 Office Details, Inc.
19540007B1 Equator Technologies
19550007B2 Transaccess S.A.
19560007B3 Cisco Systems Inc.
19570007B4 Cisco Systems Inc.
19580007B5 Any One Wireless Ltd.
19590007B6 Telecom Technology Ltd.
19600007B7 Samurai Ind. Prods Eletronicos Ltda
19610007B8 American Predator Corp.
19620007B9 Ginganet Corporation
19630007BA Xebeo Communications, Inc.
19640007BB Candera Inc.
19650007BC Identix Inc.
19660007BD Radionet Ltd.
19670007BE DataLogic SpA
19680007BF Armillaire Technologies, Inc.
19690007C0 NetZerver Inc.
19700007C1 Overture Networks, Inc.
19710007C2 Netsys Telecom
19720007C3 Cirpack
19730007C4 JEAN Co. Ltd.
19740007C5 Gcom, Inc.
19750007C6 VDS Vosskuhler GmbH
19760007C7 Synectics Systems Limited
19770007C8 Brain21, Inc.
19780007C9 Technol Seven Co., Ltd.
19790007CA Creatix Polymedia Ges Fur Kommunikaitonssysteme
19800007CB Freebox SA
19810007CC Kaba Benzing GmbH
19820007CD NMTEL Co., Ltd.
19830007CE Cabletime Limited
19840007CF Anoto AB
19850007D0 Automat Engenharia de Automaoa Ltda.
19860007D1 Spectrum Signal Processing Inc.
19870007D2 Logopak Systeme
19880007D3 Stork Digital Imaging B.V.
19890007D4 Zhejiang Yutong Network Communication Co Ltd.
19900007D5 3e Technologies Int;., Inc.
19910007D6 Commil Ltd.
19920007D7 Caporis Networks AG
19930007D8 Hitron Systems Inc.
19940007D9 Splicecom
19950007DA Neuro Telecom Co., Ltd.
19960007DB Kirana Networks, Inc.
19970007DC Atek Co, Ltd.
19980007DD Cradle Technologies
19990007DE eCopilt AB
20000007DF Vbrick Systems Inc.
20010007E0 Palm Inc.
20020007E1 WIS Communications Co. Ltd.
20030007E2 Bitworks, Inc.
20040007E3 Navcom Technology, Inc.
20050007E4 SoftRadio Co., Ltd.
20060007E5 Coup Corporation
20070007E6 edgeflow Canada Inc.
20080007E7 FreeWave Technologies
20090007E8 St. Bernard Software
20100007E9 Intel Corporation
20110007EA Massana, Inc.
20120007EB Cisco Systems Inc.
20130007EC Cisco Systems Inc.
20140007ED Altera Corporation
20150007EE telco Informationssysteme GmbH
20160007EF Lockheed Martin Tactical Systems
20170007F0 LogiSync Corporation
20180007F1 TeraBurst Networks Inc.
20190007F2 IOA Corporation
20200007F3 Think Engine Networks
20210007F4 Eletex Co., Ltd.
20220007F5 Bridgeco Co AG
20230007F6 Qqest Software Systems
20240007F7 Galtronics
20250007F8 ITDevices, Inc.
20260007F9 Phonetics, Inc.
20270007FA ITT Co., Ltd.
20280007FB Giga Stream UMTS Technologies GmbH
20290007FC Adept Systems Inc.
20300007FD LANergy Ltd.
20310007FE Rigaku Corporation
20320007FF Gluon Networks
2033000800 MULTITECH SYSTEMS, INC.
2034000801 HighSpeed Surfing Inc.
2035000802 Compaq Computer Corporation
2036000803 Cos Tron
2037000804 ICA Inc.
2038000805 Techno-Holon Corporation
2039000806 Raonet Systems, Inc.
2040000807 Access Devices Limited
2041000808 PPT Vision, Inc.
2042000809 Systemonic AG
204300080A Espera-Werke GmbH
204400080B Birka BPA Informationssystem AB
204500080C VDA elettronica SrL
204600080D Toshiba
204700080E Motorola, BCS
204800080F Proximion Fiber Optics AB
2049000810 Key Technology, Inc.
2050000811 VOIX Corporation
2051000812 GM-2 Corporation
2052000813 Diskbank, Inc.
2053000814 TIL Technologies
2054000815 CATS Co., Ltd.
2055000816 Bluetags A/S
2056000817 EmergeCore Networks LLC
2057000818 Pixelworks, Inc.
2058000819 Banksys
205900081A Sanrad Intelligence Storage Communications (2000) Ltd.
206000081B Windigo Systems
206100081C @pos.com
206200081D Ipsil, Incorporated
206300081E Repeatit AB
206400081F Pou Yuen Tech Corp. Ltd.
2065000820 Cisco Systems Inc.
2066000821 Cisco Systems Inc.
2067000822 InPro Comm
2068000823 Texa Corp.
2069000824 Promatek Industries Ltd.
2070000825 Acme Packet
2071000826 Colorado Med Tech
2072000827 Pirelli Cables & Systems
2073000828 Koei Engineering Ltd.
2074000829 Aval Nagasaki Corporation
207500082A Powerwallz Network Security
207600082B Wooksung Electronics, Inc.
207700082C Homag AG
207800082D Indus Teqsite Private Limited
207900082E Multitone Electronics PLC
208000084E DivergeNet, Inc.
208100084F Qualstar Corporation
2082000850 Arizona Instrument Corp.
2083000851 Canadian Bank Note Company, Ltd.
2084000852 Davolink Co. Inc.
2085000853 Schleicher GmbH & Co. Relaiswerke KG
2086000854 Netronix, Inc.
2087000855 NASA-Goddard Space Flight Center
2088000856 Gamatronic Electronic Industries Ltd.
2089000857 Polaris Networks, Inc.
2090000858 Novatechnology Inc.
2091000859 ShenZhen Unitone Electronics Co., Ltd.
209200085A IntiGate Inc.
209300085B Hanbit Electronics Co., Ltd.
209400085C Shanghai Dare Technologies Co. Ltd.
209500085D Aastra
209600085E PCO AG
209700085F Picanol N.V.
2098000860 LodgeNet Entertainment Corp.
2099000861 SoftEnergy Co., Ltd.
2100000862 NEC Eluminant Technologies, Inc.
2101000863 Entrisphere Inc.
2102000864 Fasy S.p.A.
2103000865 JASCOM CO., LTD
2104000866 DSX Access Systems, Inc.
2105000867 Uptime Devices
2106000868 PurOptix
2107000869 Command-e Technology Co.,Ltd.
210800086A Industrie Technik IPS GmbH
210900086B MIPSYS
211000086C Plasmon LMS
211100086D Missouri FreeNet
211200086E Hyglo AB
211300086F Resources Computer Network Ltd.
2114000870 Rasvia Systems, Inc.
2115000871 NORTHDATA Co., Ltd.
2116000872 Sorenson Technologies, Inc.
2117000873 DAP Design B.V.
2118000874 Dell Computer Corp.
2119000875 Acorp Electronics Corp.
2120000876 SDSystem
2121000877 Liebert HIROSS S.p.A.
2122000878 Benchmark Storage Innovations
2123000879 CEM Corporation
212400087A Wipotec GmbH
212500087B RTX Telecom A/S
212600087C Cisco Systems, Inc.
212700087D Cisco Systems Inc.
212800087E Bon Electro-Telecom Inc.
212900087F SPAUN electronic GmbH & Co. KG
2130000880 BroadTel Canada Communications inc.
2131000881 DIGITAL HANDS CO.,LTD.
2132000882 SIGMA CORPORATION
2133000883 Hewlett-Packard Company
2134000884 Index Braille AB
2135000885 EMS Dr. Thomas Wuensche
2136000886 Hansung Teliann, Inc.
2137000887 Maschinenfabrik Reinhausen GmbH
2138000888 OULLIM Information Technology Inc,.
2139000889 Echostar Technologies Corp
214000088A Minds@Work
214100088B Tropic Networks Inc.
214200088C Quanta Network Systems Inc.
214300088D Sigma-Links Inc.
214400088E Nihon Computer Co., Ltd.
214500088F ADVANCED DIGITAL TECHNOLOGY
2146000890 AVILINKS SA
2147000891 Lyan Inc.
2148000892 EM Solutions
2149000894 InnoVISION Multimedia Ltd.
2150000895 DIRC Technologie GmbH & Co.KG
2151000896 Printronix, Inc.
2152000897 Quake Technologies
2153000898 Gigabit Optics Corporation
2154000899 Netbind, Inc.
215500089A Alcatel Microelectronics
215600089B ICP Electronics Inc.
215700089C Elecs Industry Co., Ltd.
215800089D UHD-Elektronik
215900089E Beijing Enter-Net co.LTD
216000089F EFM Networks
21610008A0 Stotz Feinmesstechnik GmbH
21620008A1 CNet Technology Inc.
21630008A2 ADI Engineering, Inc.
21640008A3 Cisco Systems
21650008A4 Cisco Systems
21660008A5 Peninsula Systems Inc.
21670008A6 Multiware & Image Co., Ltd.
21680008A7 iLogic Inc.
21690008A8 Systec Co., Ltd.
21700008A9 SangSang Technology, Inc.
21710008AA KARAM
21720008AB EnerLinx.com, Inc.
21730008AD Toyo-Linx Co., Ltd.
21740008AE Packetfront
21750008AF Novatec Corporation
21760008B0 BKtel communications GmbH
21770008B1 ProQuent Systems
21780008B2 SHENZHEN COMPASS TECHNOLOGY DEVELOPMENT CO.,LTD
21790008B3 Fastwel
21800008B4 SYSPOL
21810008B5 TAI GUEN ENTERPRISE CO., LTD
21820008B6 RouteFree, Inc.
21830008B7 HIT Incorporated
21840008B8 E.F. Johnson
21850008B9 KAON MEDIA Co., Ltd.
21860008BA Erskine Systems Ltd
21870008BB NetExcell
21880008BC Ilevo AB
21890008BD TEPG-US
21900008BE XENPAK MSA Group
21910008BF Aptus Elektronik AB
21920008C0 ASA SYSTEMS
21930008C1 Avistar Communications Corporation
21940008C2 Cisco Systems
21950008C3 Contex A/S
21960008C4 Hikari Co.,Ltd.
21970008C5 Liontech Co., Ltd.
21980008C6 Philips Consumer Communications
21990008C7 COMPAQ COMPUTER CORPORATION
22000008C8 Soneticom, Inc.
22010008C9 TechniSat Digital GmbH
22020008CA TwinHan Technology Co.,Ltd
22030008CB Zeta Broadband Inc.
22040008CC Remotec, Inc.
22050008CD With-Net Inc
22060008CF Nippon Koei Power Systems Co., Ltd.
22070008D0 Musashi Engineering Co., LTD.
22080008D1 KAREL INC.
22090008D2 ZOOM Networks Inc.
22100008D3 Hercules Technologies S.A.
22110008D4 IneoQuest Technologies, Inc
22120008D5 Vanguard Managed Solutions
22130008D6 HASSNET Inc.
22140008D7 HOW CORPORATION
22150008D8 Dowkey Microwave
22160008D9 Mitadenshi Co.,LTD
22170008DA SofaWare Technologies Ltd.
22180008DB Corrigent Systems
22190008DC Wiznet
22200008DD Telena Communications, Inc.
22210008DE 3UP Systems
22220008DF Alistel Inc.
22230008E0 ATO Technology Ltd.
22240008E1 Barix AG
22250008E2 Cisco Systems
22260008E3 Cisco Systems
22270008E4 Envenergy Inc
22280008E5 IDK Corporation
22290008E6 Littlefeet
22300008E7 SHI ControlSystems,Ltd.
22310008E8 Excel Master Ltd.
22320008E9 NextGig
22330008EA Motion Control Engineering, Inc
22340008EB ROMWin Co.,Ltd.
22350008EC Zonu, Inc.
22360008ED ST&T Instrument Corp.
22370008EE Logic Product Development
22380008EF DIBAL,S.A.
22390008F0 Next Generation Systems, Inc.
22400008F1 Voltaire
22410008F2 C&S Technology
22420008F3 WANY
22430008F4 Bluetake Technology Co., Ltd.
22440008F5 YESTECHNOLOGY Co.,Ltd.
22450008F6 SUMITOMO ELECTRIC HIGHTECHS.co.,ltd.
22460008F7 Hitachi Ltd, Semiconductor &amp; Integrated Circuits Gr
22470008F8 Guardall Ltd
22480008F9 Padcom, Inc.
22490008FA Karl E.Brinkmann GmbH
22500008FB SonoSite, Inc.
22510008FC Gigaphoton Inc.
22520008FD BlueKorea Co., Ltd.
22530008FE UNIK C&C Co.,Ltd.
22540008FF Trilogy Broadcast (Holdings) Ltd
2255000900 TMT
2256000901 Shenzhen Shixuntong Information & Technoligy Co
2257000902 Redline Communications Inc.
2258000903 Panasas, Inc
2259000904 MONDIAL electronic
2260000905 iTEC Technologies Ltd.
2261000906 Esteem Networks
2262000907 Chrysalis Development
2263000908 VTech Technology Corp.
2264000909 Telenor Connect A/S
226500090A SnedFar Technology Co., Ltd.
226600090B MTL Instruments PLC
226700090C Mayekawa Mfg. Co. Ltd.
226800090D LEADER ELECTRONICS CORP.
226900090E Helix Technology Inc.
227000090F Fortinet Inc.
2271000910 Simple Access Inc.
2272000911 Cisco Systems
2273000912 Cisco Systems
2274000914 COMPUTROLS INC.
2275000915 CAS Corp.
2276000916 Listman Home Technologies, Inc.
2277000917 WEM Technology Inc
2278000918 SAMSUNG TECHWIN CO.,LTD
2279000919 MDS Gateways
228000091A Macat Optics & Electronics Co., Ltd.
228100091B Digital Generation Inc.
228200091C CacheVision, Inc
228300091D Proteam Computer Corporation
228400091E Firstech Technology Corp.
228500091F A&amp;D Co., Ltd.
2286000920 EpoX COMPUTER CO.,LTD.
2287000921 Planmeca Oy
2288000922 Touchless Sensor Technology AG
2289000923 Heaman System Co., Ltd
2290000924 Telebau GmbH
2291000925 VSN Systemen BV
2292000926 YODA COMMUNICATIONS, INC.
2293000927 TOYOKEIKI CO.,LTD.
2294000928 Telecore Inc
2295000929 Sanyo Industries (UK) Limited
229600092A MYTECS Co.,Ltd.
229700092B iQstor Networks, Inc.
229800092C Hitpoint Inc.
229900092D High Tech Computer, Corp.
230000092E B&Tech System Inc.
230100092F Akom Technology Corporation
2302000930 AeroConcierge Inc.
2303000931 Future Internet, Inc.
2304000932 Omnilux
2305000933 OPTOVALLEY Co. Ltd.
2306000934 Dream-Multimedia-Tv GmbH
2307000935 Sandvine Incorporated
2308000936 Ipetronik GmbH & Co.KG
2309000937 Inventec Appliance Corp
2310000938 Allot Communications
2311000939 ShibaSoku Co.,Ltd.
231200093A Molex Fiber Optics
231300093B HYUNDAI NETWORKS INC.
231400093C Jacques Technologies P/L
231500093D Newisys,Inc.
231600093E C&I Technologies
231700093F Double-Win Enterpirse CO., LTD
2318000940 AGFEO GmbH & Co. KG
2319000941 Allied Telesis K.K.
2320000942 CRESCO, LTD.
2321000943 Cisco Systems
2322000944 Cisco Systems
2323000945 Palmmicro Communications Inc
2324000946 Cluster Labs GmbH
2325000947 Aztek, Inc.
2326000948 Vista Control Systems, Corp.
2327000949 Glyph Technologies Inc.
232800094A Homenet Communications
232900094B FillFactory NV
233000094C Communication Weaver Co.,Ltd.
233100094D Braintree Communications Pty Ltd
233200094E BARTECH SYSTEMS INTERNATIONAL, INC
233300094F elmegt GmbH & Co. KG
2334000950 Independent Storage Corporation
2335000951 Apogee Instruments, Inc
2336000952 Auerswald GmbH & Co. KG
2337000953 Linkage System Integration Co.Ltd.
2338000954 AMiT spol. s. r. o.
2339000955 Young Generation International Corp.
2340000956 Network Systems Group, Ltd. (NSG)
2341000957 Supercaller, Inc.
2342000958 INTELNET S.A.
2343000959 Sitecsoft
234400095A RACEWOOD TECHNOLOGY
234500095B Netgear, Inc.
234600095C Philips Medical Systems - Cardiac and Monitoring Systems (CM
234700095D Dialogue Technology Corp.
234800095E Masstech Group Inc.
234900095F Telebyte, Inc.
2350000960 YOZAN Inc.
2351000961 Switchgear and Instrumentation Ltd
2352000962 Filetrac AS
2353000963 Dominion Lasercom Inc.
2354000964 Hi-Techniques
2355000966 Thales Navigation
2356000967 Tachyon, Inc
2357000968 TECHNOVENTURE, INC.
2358000969 Meret Optical Communications
235900096A Cloverleaf Communications Inc.
236000096B IBM Corporation
236100096C Imedia Semiconductor Corp.
236200096D Powernet Technologies Corp.
236300096E GIANT ELECTRONICS LTD.
236400096F Beijing Zhongqing Elegant Tech. Corp.,Limited
2365000970 Vibration Research Corporation
2366000971 Time Management, Inc.
2367000972 Securebase,Inc
2368000973 Lenten Technology Co., Ltd.
2369000974 Innopia Technologies, Inc.
2370000975 fSONA Communications Corporation
2371000976 Datasoft ISDN Systems GmbH
2372000977 Brunner Elektronik AG
2373000978 AIJI System Co., Ltd.
2374000979 Advanced Television Systems Committee, Inc.
237500097A Louis Design Labs.
237600097B Cisco Systems
237700097C Cisco Systems
237800097D SecWell Networks Oy
237900097E IMI TECHNOLOGY CO., LTD
238000097F Vsecure 2000 LTD.
2381000980 Power Zenith Inc.
2382000981 Newport Networks
2383000982 Loewe Opta GmbH
2384000983 Gvision Incorporated
2385000984 MyCasa Network Inc.
2386000985 Auto Telecom Company
2387000986 Metalink LTD.
2388000987 NISHI NIPPON ELECTRIC WIRE & CABLE CO.,LTD.
2389000988 Nudian Electron Co., Ltd.
2390000989 VividLogic Inc.
239100098A EqualLogic Inc
239200098B Entropic Communications, Inc.
239300098C Possio AB
239400098D DCT Ltd (Digital Communication Technologies Ltd)
239500098E ipcas GmbH
239600098F Cetacean Networks
2397000990 ACKSYS Communications & systems
2398000991 GE Fanuc Automation Manufacturing, Inc.
2399000992 InterEpoch Technology,INC.
2400000993 Visteon Corporation
2401000994 Cronyx Engineering
2402000995 Castle Technology Ltd
2403000996 RDI
2404000997 Nortel Networks
2405000998 Capinfo Company Limited
2406000999 CP GEORGES RENAULT
240700099A ELMO COMPANY, LIMITED
240800099B Western Telematic Inc.
240900099C Naval Research Laboratory
241000099D Haliplex Communications
241100099E Testech, Inc.
241200099F VIDEX INC.
24130009A0 Microtechno Corporation
24140009A1 Telewise Communications, Inc.
24150009A2 Interface Co., Ltd.
24160009A3 Leadfly Techologies Corp. Ltd.
24170009A4 HARTEC Corporation
24180009A5 HANSUNG ELETRONIC INDUSTRIES DEVELOPMENT CO., LTD
24190009A6 Ignis Optics, Inc.
24200009A7 Bang & Olufsen A/S
24210009A8 Eastmode Pte Ltd
24220009A9 Ikanos Communications
24230009AA Data Comm for Business, Inc.
24240009AB Netcontrol Oy
24250009AC LANVOICE
24260009AD HYUNDAI SYSCOMM, INC.
24270009AE OKANO ELECTRIC CO.,LTD
24280009AF e-generis
24290009B0 Onkyo Corporation
24300009B1 Kanematsu Electronics, Ltd.
24310009B2 L&F Inc.
24320009B3 MCM Systems Ltd
24330009B4 KISAN TELECOM CO., LTD.
24340009B5 3J Tech. Co., Ltd.
24350009B6 Cisco Systems
24360009B7 Cisco Systems
24370009B8 Entise Systems
24380009B9 Action Imaging Solutions
24390009BA MAKU Informationstechik GmbH
24400009BB MathStar, Inc.
24410009BC Digital Safety Technologies Inc.
24420009BD Epygi Technologies, Ltd.
24430009BE Mamiya-OP Co.,Ltd.
24440009BF Nintendo Co.,Ltd.
24450009C0 6WIND
24460009C1 PROCES-DATA A/S
24470009C3 NETAS
24480009C4 Medicore Co., Ltd
24490009C5 KINGENE Technology Corporation
24500009C6 Visionics Corporation
24510009C7 Movistec
24520009C8 SINAGAWA TSUSHIN KEISOU SERVICE
24530009C9 BlueWINC Co., Ltd.
24540009CA iMaxNetworks(Shenzhen)Limited.
24550009CB HBrain
24560009CC Moog GmbH
24570009CD HUDSON SOFT CO.,LTD.
24580009CE SpaceBridge Semiconductor Corp.
24590009CF iAd GmbH
24600009D0 Versatel Networks
24610009D1 SERANOA NETWORKS INC
24620009D2 Mai Logic Inc.
24630009D3 Western DataCom Co., Inc.
24640009D4 Transtech Networks
24650009D5 Signal Communication, Inc.
24660009D6 KNC One GmbH
24670009D7 DC Security Products
24680009D9 Neoscale Systems, Inc
24690009DA Control Module Inc.
24700009DB eSpace
24710009DC Galaxis Technology AG
24720009DD Mavin Technology Inc.
24730009DE Samjin Information & Communications Co., Ltd.
24740009DF Vestel Komunikasyon Sanayi ve Ticaret A.S.
24750009E0 XEMICS S.A.
24760009E1 Gemtek Technology Co., Ltd.
24770009E2 Sinbon Electronics Co., Ltd.
24780009E3 Angel Iglesias S.A.
24790009E4 K Tech Infosystem Inc.
24800009E5 Hottinger Baldwin Messtechnik GmbH
24810009E6 Cyber Switching Inc.
24820009E7 ADC Techonology
24830009E8 Cisco Systems
24840009E9 Cisco Systems
24850009EA YEM Inc.
24860009EB HuMANDATA LTD.
24870009EC Daktronics, Inc.
24880009ED CipherOptics
24890009EE MEIKYO ELECTRIC CO.,LTD
24900009EF Vocera Communications
24910009F0 Shimizu Technology Inc.
24920009F1 Yamaki Electric Corporation
24930009F2 Cohu, Inc., Electronics Division
24940009F3 WELL Communication Corp.
24950009F4 Alcon Laboratories, Inc.
24960009F5 Emerson Network Power Co.,Ltd
24970009F6 Shenzhen Eastern Digital Tech Ltd.
24980009F7 SED, a division of Calian
24990009F8 UNIMO TECHNOLOGY CO., LTD.
25000009F9 ART JAPAN CO., LTD.
25010009FB Philips Medizinsysteme Boeblingen GmbH
25020009FC IPFLEX Inc.
25030009FD Ubinetics Limited
25040009FE Daisy Technologies, Inc.
25050009FF X.net 2000 GmbH
2506000A00 Mediatek Corp.
2507000A01 SOHOware, Inc.
2508000A02 ANNSO CO., LTD.
2509000A03 ENDESA SERVICIOS, S.L.
2510000A04 3Com Europe Ltd
2511000A05 Widax Corp.
2512000A06 Teledex LLC
2513000A07 WebWayOne Ltd
2514000A08 ALPINE ELECTRONICS, INC.
2515000A09 TaraCom Integrated Products, Inc.
2516000A0A SUNIX Co., Ltd.
2517000A0B Sealevel Systems, Inc.
2518000A0C Scientific Research Corporation
2519000A0D MergeOptics GmbH
2520000A0E Invivo Research Inc.
2521000A0F Ilryung Telesys, Inc
2522000A10 FAST media integrations AG
2523000A11 ExPet Technologies, Inc
2524000A12 Azylex Technology, Inc
2525000A13 Silent Witness
2526000A14 TECO a.s.
2527000A15 Silicon Data, Inc
2528000A16 Lassen Research
2529000A17 NESTAR COMMUNICATIONS, INC
2530000A18 Vichel Inc.
2531000A19 Valere Power, Inc.
2532000A1A Imerge Ltd
2533000A1B Stream Labs
2534000A1C Bridge Information Co., Ltd.
2535000A1D Optical Communications Products Inc.
2536000A1E Red-M (Communications) Limited
2537000A1F ART WARE Telecommunication Co., Ltd.
2538000A20 SVA Networks, Inc.
2539000A21 Integra Telecom Co. Ltd
2540000A22 Amperion Inc
2541000A23 Parama Networks Inc
2542000A24 Octave Communications
2543000A25 CERAGON NETWORKS
2544000A26 CEIA S.p.A.
2545000A27 Apple Computer, Inc.
2546000A28 Motorola
2547000A29 Pan Dacom Networking AG
2548000A2A QSI Systems Inc.
2549000A2B Etherstuff
2550000A2C Active Tchnology Corporation
2551000A2E MAPLE NETWORKS CO., LTD
2552000A2F Artnix Inc.
2553000A30 Johnson Controls-ASG
2554000A31 HCV Wireless
2555000A32 Xsido Corporation
2556000A33 Sierra Logic, Inc.
2557000A34 Identicard Systems Incorporated
2558000A35 Xilinx
2559000A36 Synelec Telecom Multimedia
2560000A37 Procera Networks, Inc.
2561000A38 Netlock Technologies, Inc.
2562000A39 LoPA Information Technology
2563000A3A J-THREE INTERNATIONAL Holding Co., Ltd.
2564000A3B GCT Semiconductor, Inc
2565000A3C Enerpoint Ltd.
2566000A3D Elo Sistemas Eletronicos S.A.
2567000A3E EADS Telecom
2568000A3F Data East Corporation
2569000A40 Crown Audio
2570000A41 Cisco Systems
2571000A42 Cisco Systems
2572000A43 Chunghwa Telecom Co., Ltd.
2573000A44 Avery Dennison Deutschland GmbH
2574000A45 Audio-Technica Corp.
2575000A46 ARO Controls SAS
2576000A47 Allied Vision Technologies
2577000A48 Albatron Technology
2578000A49 Acopia Networks
2579000A4A Targa Systems Ltd.
2580000A4B DataPower Technology, Inc.
2581000A4C Molecular Devices Corporation
2582000A4D Noritz Corporation
2583000A4E UNITEK Electronics INC.
2584000A4F Brain Boxes Limited
2585000A50 REMOTEK CORPORATION
2586000A51 GyroSignal Technology Co., Ltd.
2587000A52 Venitek Co. Ltd.
2588000A53 Intronics, Incorporated
2589000A54 Laguna Hills, Inc.
2590000A55 MARKEM Corporation
2591000A56 HITACHI Maxell Ltd.
2592000A57 Hewlett-Packard Company - Standards
2593000A58 Ingenieur-Buero Freyer & Siegel
2594000A59 HW server
2595000A5A GreenNET Technologies Co.,Ltd.
2596000A5B Power-One as
2597000A5C Carel s.p.a.
2598000A5D PUC Founder (MSC) Berhad
2599000A5E 3COM Corporation
2600000A5F almedio inc.
2601000A60 Autostar Technology Pte Ltd
2602000A61 Cellinx Systems Inc.
2603000A62 Crinis Networks, Inc.
2604000A63 DHD GmbH
2605000A64 Eracom Technologies
2606000A65 GentechMedia.co.,ltd.
2607000A66 MITSUBISHI ELECTRIC SYSTEM & SERVICE CO.,LTD.
2608000A67 OngCorp
2609000A68 SolarFlare Communications, Inc.
2610000A69 SUNNY bell Technology Co., Ltd.
2611000A6A SVM Microwaves s.r.o.
2612000A6B Tadiran Telecom Business Systems LTD
2613000A6C Walchem Corporation
2614000A6D EKS Elektronikservice GmbH
2615000A6E Broadcast Technology Limited
2616000A6F ZyTera Technologies Inc.
2617000A70 MPLS Forum
2618000A71 Avrio Technologies, Inc
2619000A72 SimpleTech, Inc.
2620000A73 Scientific Atlanta
2621000A74 Manticom Networks Inc.
2622000A75 Cat Electronics
2623000A76 Beida Jade Bird Huaguang Technology Co.,Ltd
2624000A77 Bluewire Technologies LLC
2625000A78 OLITEC
2626000A79 corega K.K.
2627000A7A Kyoritsu Electric Co., Ltd.
2628000A7B Cornelius Consult
2629000A7C Tecton Ltd
2630000A7D Valo, Inc.
2631000A7E The Advantage Group
2632000A7F Teradon Industries, Inc
2633000A80 Telkonet Inc.
2634000A81 TEIMA Audiotex S.L.
2635000A82 TATSUTA SYSTEM ELECTRONICS CO.,LTD.
2636000A83 SALTO SYSTEMS S.L.
2637000A84 Rainsun Enterprise Co., Ltd.
2638000A85 PLAT'C2,Inc
2639000A86 Lenze
2640000A87 Integrated Micromachines Inc.
2641000A88 InCypher S.A.
2642000A89 Creval Systems, Inc.
2643000A8A Cisco Systems
2644000A8B Cisco Systems
2645000A8C Guardware Systems Ltd.
2646000A8D EUROTHERM LIMITED
2647000A8E Invacom Ltd
2648000A8F Aska International Inc.
2649000A90 Bayside Interactive, Inc.
2650000A91 HemoCue AB
2651000A92 Presonus Corporation
2652000A93 W2 Networks, Inc.
2653000A94 ShangHai cellink CO., LTD
2654000A95 Apple Computer, Inc.
2655000A96 MEWTEL TECHNOLOGY INC.
2656000A97 SONICblue, Inc.
2657000A98 M+F Gwinner GmbH & Co
2658000A99 Dataradio Inc.
2659000A9A Aiptek International Inc
2660000A9B Towa Meccs Corporation
2661000A9C Server Technology, Inc.
2662000A9D King Young Technology Co. Ltd.
2663000A9E BroadWeb Corportation
2664000A9F Pannaway Technologies, Inc.
2665000AA0 Cedar Point Communications
2666000AA1 V V S Limited
2667000AA2 SYSTEK INC.
2668000AA3 SHIMAFUJI ELECTRIC CO.,LTD.
2669000AA4 SHANGHAI SURVEILLANCE TECHNOLOGY CO,LTD
2670000AA5 MAXLINK INDUSTRIES LIMITED
2671000AA6 Hochiki Corporation
2672000AA7 FEI Company
2673000AA8 ePipe Pty. Ltd.
2674000AA9 Brooks Automation GmbH
2675000AAA AltiGen Communications Inc.
2676000AAB TOYOTA MACS, INC.
2677000AAC TerraTec Electronic GmbH
2678000AAD Stargames Corporation
2679000AAE Rosemount Process Analytical
2680000AAF Pipal Systems
2681000AB0 LOYTEC electronics GmbH
2682000AB1 GENETEC Corporation
2683000AB2 Fresnel Wireless Systems
2684000AB3 Fa. GIRA
2685000AB4 ETIC Telecommunications
2686000AB5 Digital Electronic Network
2687000AB6 COMPUNETIX, INC
2688000AB7 Cisco Systems
2689000AB8 Cisco Systems
2690000AB9 Astera Technologies Corp.
2691000ABA Arcon Technology Limited
2692000ABB Taiwan Secom Co,. Ltd
2693000ABC Seabridge Ltd.
2694000ABD Rupprecht & Patashnick Co.
2695000ABE OPNET Technologies CO., LTD.
2696000ABF HIROTA SS
2697000AC0 Fuyoh Video Industry CO., LTD.
2698000AC1 Futuretel
2699000AC2 FiberHome Telecommunication Technologies CO.,LTD
2700000AC3 eM Technics Co., Ltd.
2701000AC4 Daewoo Teletech Co., Ltd
2702000AC5 Color Kinetics
2703000AC7 Unication Group
2704000AC8 ZPSYS CO.,LTD. (Planning&Management)
2705000AC9 Zambeel Inc
2706000ACA YOKOYAMA SHOKAI CO.,Ltd.
2707000ACB XPAK MSA Group
2708000ACC Winnow Networks, Inc.
2709000ACD Sunrich Technology Limited
2710000ACE RADIANTECH, INC.
2711000ACF PROVIDEO Multimedia Co. Ltd.
2712000AD0 Niigata Develoment Center, F.I.T. Co., Ltd.
2713000AD1 MWS
2714000AD2 JEPICO Corporation
2715000AD3 INITECH Co., Ltd
2716000AD4 CoreBell Systems Inc.
2717000AD5 Brainchild Electronic Co., Ltd.
2718000AD6 BeamReach Networks
2719000AD8 IPCserv Technology Corp.
2720000AD9 Sony Ericsson Mobile Communications AB
2721000ADB SkyPilot Network, Inc
2722000ADC RuggedCom Inc.
2723000ADD InSciTek Microsystems, Inc.
2724000ADE Happy Communication Co., Ltd.
2725000ADF Gennum Corporation
2726000AE0 Fujitsu Softek
2727000AE1 EG Technology
2728000AE2 Binatone Electronics International, Ltd
2729000AE3 YANG MEI TECHNOLOGY CO., LTD
2730000AE4 Wistron Corp.
2731000AE5 ScottCare Corporation
2732000AE6 Elitegroup Computer System Co. (ECS)
2733000AE7 ELIOP S.A.
2734000AE8 Cathay Roxus Information Technology Co. LTD
2735000AE9 AirVast Technology Inc.
2736000AEA ADAM ELEKTRONIK LTD.STI.
2737000AEB Shenzhen Tp-link Technology Co; Ltd.
2738000AEC Koatsu Gas Kogyo Co., Ltd.
2739000AED HARTING Vending G.m.b.H. & CO KG
2740000AEE GCD Hard- & Software GmbH
2741000AEF OTRUM ASA
2742000AF0 SHIN-OH ELECTRONICS CO., LTD. R&D
2743000AF1 Clarity Design, Inc.
2744000AF2 NeoAxiom Corp.
2745000AF3 Cisco Systems
2746000AF4 Cisco Systems
2747000AF5 Airgo Networks, Inc.
2748000AF6 Computer Process Controls
2749000AF7 Broadcom Corp.
2750000AF8 American Telecare Inc.
2751000AFA Traverse Technologies Australia
2752000AFB Ambri Limited
2753000AFC Core Tec Communications, LLC
2754000AFD Viking Electronic Services
2755000AFE NovaPal Ltd
2756000AFF Kilchherr Elektronik AG
2757000B00 FUJIAN START COMPUTER EQUIPMENT CO.,LTD
2758000B01 DAIICHI ELECTRONICS CO., LTD.
2759000B02 Dallmeier electronic
2760000B03 Taekwang Industrial Co., Ltd
2761000B04 Volktek Corporation
2762000B05 Pacific Broadband Networks
2763000B06 Motorola BCS
2764000B07 Voxpath Networks
2765000B08 Pillar Data Systems
2766000B09 Ifoundry Systems Singapore
2767000B0A dBm Optics
2768000B0B Corrent Corporation
2769000B0C Agile Systems Inc.
2770000B0D Air2U, Inc.
2771000B0E Trapeze Networks
2772000B0F Nyquist Industrial Control BV
2773000B10 11wave Technonlogy Co.,Ltd
2774000B11 HIMEJI ABC TRADING CO.,LTD.
2775000B13 ZETRON INC
2776000B14 ViewSonic Corporation
2777000B15 Platypus Technology
2778000B16 Communication Machinery Corporation
2779000B17 MKS Instruments
2780000B19 Vernier Networks, Inc.
2781000B1A Teltone Corporation
2782000B1B Systronix, Inc.
2783000B1D LayerZero Power Systems, Inc.
2784000B1E KAPPA opto-electronics GmbH
2785000B1F I CON Computer Co.
2786000B20 Hirata corporation
2787000B21 G-Star Communications Inc.
2788000B22 Environmental Systems and Services
2789000B23 Efficient Networks, Inc.
2790000B24 AirLogic
2791000B25 Aeluros
2792000B26 Wetek Corporation
2793000B27 Scion Corporation
2794000B28 Quatech Inc.
2795000B29 LG Industrial Systems Co.,Ltd.
2796000B2A HOWTEL Co., Ltd.
2797000B2B HOSTNET CORPORATION
2798000B2C Eiki Industrial Co. Ltd.
2799000B2D Danfoss Inc.
2800000B2E Cal-Comp Electronics (Thailand) Public Company Limited Taipe
2801000B2F bplan GmbH
2802000B30 Beijing Gongye Science & Technology Co.,Ltd
2803000B31 Yantai ZhiYang Scientific and technology industry CO., LTD
2804000B32 VORMETRIC, INC.
2805000B33 Vivato
2806000B34 ShangHai Broadband Technologies CO.LTD
2807000B35 Quad Bit System co., Ltd.
2808000B36 Productivity Systems, Inc.
2809000B37 MANUFACTURE DES MONTRES ROLEX SA
2810000B38 Knuerr AG
2811000B39 Keisoku Giken Co.,Ltd.
2812000B3A Fortel DTV, Inc.
2813000B3B devolo AG
2814000B3C Cygnal Integrated Products, Inc.
2815000B3D CONTAL OK Ltd.
2816000B3E BittWare, Inc
2817000B3F Anthology Solutions Inc.
2818000B40 OpNext Inc.
2819000B41 Ing. Buero Dr. Beutlhauser
2820000B42 commax Co., Ltd.
2821000B43 Microscan Systems, Inc.
2822000B44 Concord IDea Corp.
2823000B45 Cisco
2824000B46 Cisco
2825000B47 Advanced Energy
2826000B48 sofrel
2827000B49 RF-Link System Inc.
2828000B4A Visimetrics (UK) Ltd
2829000B4B VISIOWAVE SA
2830000B4C Clarion (M) Sdn Bhd
2831000B4D Emuzed
2832000B4E VertexRSI Antenna Products Division
2833000B4F Verifone, INC.
2834000B50 Oxygnet
2835000B51 Micetek International Inc.
2836000B52 JOYMAX ELECTRONICS CORP.
2837000B53 INITIUM Co., Ltd.
2838000B54 BiTMICRO Networks, Inc.
2839000B55 ADInstruments
2840000B56 Cybernetics
2841000B57 Silicon Laboratories
2842000B58 Astronautics C.A LTD
2843000B59 ScriptPro, LLC
2844000B5A HyperEdge
2845000B5B Rincon Research Corporation
2846000B5C Newtech Co.,Ltd
2847000B5D FUJITSU LIMITED
2848000B5E ATMAVA Ltd
2849000B5F Cisco Systems
2850000B60 Cisco Systems
2851000B61 Friedrich Lütze GmbH &Co.
2852000B62 Ingenieurbüro Ingo Mohnen
2853000B64 Kieback & Peter GmbH & Co KG
2854000B65 Sy.A.C. srl
2855000B66 Teralink Communications
2856000B67 Topview Technology Corporation
2857000B68 Addvalue Communications Pte Ltd
2858000B69 Franke Finland Oy
2859000B6A Asiarock Incorporation
2860000B6B Wistron Neweb Corp.
2861000B6C Sychip Inc.
2862000B6D SOLECTRON JAPAN NAKANIIDA
2863000B6E Neff Instrument Corp.
2864000B6F Media Streaming Networks Inc
2865000B70 Load Technology, Inc.
2866000B71 Litchfield Communications Inc.
2867000B72 Lawo AG
2868000B73 Kodeos Communications
2869000B74 Kingwave Technology Co., Ltd.
2870000B75 Iosoft Ltd.
2871000B76 ET&T Co. Ltd.
2872000B77 Cogent Systems, Inc.
2873000B78 TAIFATECH INC.
2874000B79 X-COM, Inc.
2875000B7B Test-Um Inc.
2876000B7C Telex Communications
2877000B7D SOLOMON EXTREME INTERNATIONAL LTD.
2878000B7E SAGINOMIYA Seisakusho Inc.
2879000B7F OmniWerks
2880000B81 Kaparel Corporation
2881000B82 Grandstream Networks, Inc.
2882000B83 DATAWATT B.V.
2883000B84 BODET
2884000B85 Airespace, Inc.
2885000B86 Aruba Networks
2886000B87 American Reliance Inc.
2887000B88 Vidisco ltd.
2888000B89 Top Global Technology, Ltd.
2889000B8A MITEQ Inc.
2890000B8B KERAJET, S.A.
2891000B8C flextronics israel
2892000B8D Avvio Networks
2893000B8E Ascent Corporation
2894000B8F AKITA ELECTRONICS SYSTEMS CO.,LTD.
2895000B90 Covaro Networks, Inc.
2896000B91 Aglaia Gesellschaft für Bildverarbeitung und Kommunikation m
2897000B92 Ascom Danmark A/S
2898000B93 Barmag Electronic
2899000B94 Digital Monitoring Products, Inc.
2900000B95 eBet Gaming Systems Pty Ltd
2901000B96 Innotrac Diagnostics Oy
2902000B97 Matsushita Electric Industrial Co.,Ltd.
2903000B98 NiceTechVision
2904000B99 SensAble Technologies, Inc.
2905000B9A Shanghai Ulink Telecom Equipment Co. Ltd.
2906000B9B Sirius System Co, Ltd.
2907000B9C TriBeam Technologies, Inc.
2908000B9D TwinMOS Technologies Inc.
2909000B9E Yasing Technology Corp.
2910000B9F Neue ELSA GmbH
2911000BA0 T&L Information Inc.
2912000BA1 SYSCOM Ltd.
2913000BA2 Sumitomo Electric Networks, Inc
2914000BA3 Siemens AG, I&S
2915000BA4 Shiron Satellite Communications Ltd. (1996)
2916000BA5 Quasar Cipta Mandiri, PT
2917000BA6 Miyakawa Electric Works Ltd.
2918000BA7 Maranti Networks
2919000BA8 HANBACK ELECTRONICS CO., LTD.
2920000BAA Aiphone co.,Ltd
2921000BAB Advantech Technology (CHINA) Co., Ltd.
2922000BAC 3Com Europe Ltd.
2923000BAD PC-PoS Inc.
2924000BAE Vitals System Inc.
2925000BB0 Sysnet Telematica srl
2926000BB1 Super Star Technology Co., Ltd.
2927000BB2 SMALLBIG TECHNOLOGY
2928000BB3 RiT technologies Ltd.
2929000BB4 RDC Semiconductor Inc.,
2930000BB5 nStor Technologies, Inc.
2931000BB6 Mototech Inc.
2932000BB7 Micro Systems Co.,Ltd.
2933000BB8 Kihoku Electronic Co.
2934000BB9 Imsys AB
2935000BBA Harmonic Broadband Access Networks
2936000BBB Etin Systems Co., Ltd
2937000BBC En Garde Systems, Inc.
2938000BBD Connexionz Limited
2939000BBE Cisco Systems
2940000BBF Cisco Systems
2941000BC0 China IWNComm Co., Ltd.
2942000BC1 Bay Microsystems, Inc.
2943000BC2 Corinex Communication Corp.
2944000BC3 Multiplex, Inc.
2945000BC4 BIOTRONIK GmbH & Co
2946000BC5 SMC Networks, Inc.
2947000BC6 ISAC, Inc.
2948000BC7 ICET S.p.A.
2949000BC8 AirFlow Networks
2950000BC9 Electroline Equipment
2951000BCA DATAVAN International Corporation
2952000BCB Fagor Automation , S. Coop
2953000BCC JUSAN, S.A.
2954000BCD Compaq (HP)
2955000BCE Free2move AB
2956000BCF AGFA NDT INC.
2957000BD0 XiMeta Technology Americas Inc.
2958000BD1 Aeronix, Inc.
2959000BD2 Remopro Technology Inc.
2960000BD3 cd3o
2961000BD4 Beijing Wise Technology & Science Development Co.Ltd
2962000BD5 Nvergence, Inc.
2963000BD6 Paxton Access Ltd
2964000BD7 MBB Gelma GmbH
2965000BD8 Industrial Scientific Corp.
2966000BD9 General Hydrogen
2967000BDA EyeCross Co.,Inc.
2968000BDB Dell ESG PCBA Test
2969000BDC AKCP
2970000BDD TOHOKU RICOH Co., LTD.
2971000BDF Shenzhen RouterD Networks Limited
2972000BE0 SercoNet Ltd.
2973000BE2 Lumenera Corporation
2974000BE3 Key Stream Co., Ltd.
2975000BE4 Hosiden Corporation
2976000BE5 HIMS Korea Co., Ltd.
2977000BE6 Datel Electronics
2978000BE7 COMFLUX TECHNOLOGY INC.
2979000BE8 AOIP
2980000BEA Zultys Technologies
2981000BEB Systegra AG
2982000BEC NIPPON ELECTRIC INSTRUMENT, INC.
2983000BED ELM Inc.
2984000BEE inc.jet, Incorporated
2985000BEF Code Corporation
2986000BF0 MoTEX Products Co., Ltd.
2987000BF1 LAP Laser Applikations
2988000BF2 Chih-Kan Technology Co., Ltd.
2989000BF3 BAE SYSTEMS
2990000BF5 Shanghai Sibo Telecom Technology Co.,Ltd
2991000BF6 Nitgen Co., Ltd
2992000BF7 NIDEK CO.,LTD
2993000BF8 Infinera
2994000BF9 Gemstone communications, Inc.
2995000BFB D-NET International Corporation
2996000BFC Cisco Systems
2997000BFD Cisco Systems
2998000BFE CASTEL Broadband Limited
2999000BFF Berkeley Camera Engineering
3000000C00 BEB Industrie-Elektronik AG
3001000C01 Abatron AG
3002000C02 ABB Oy
3003000C03 HDMI Licensing, LLC
3004000C04 Tecnova
3005000C05 RPA Reserch Co., Ltd.
3006000C06 Nixvue Systems Pte Ltd
3007000C07 Iftest AG
3008000C08 HUMEX Technologies Corp.
3009000C09 Hitachi IE Systems Co., Ltd
3010000C0A Guangdong Province Electronic Technology Research Institute
3011000C0B Broadbus Technologies
3012000C0C APPRO TECHNOLOGY INC.
3013000C0D Communications & Power Industries / Satcom Division
3014000C0E XtremeSpectrum, Inc.
3015000C0F Techno-One Co., Ltd
3016000C10 PNI Corporation
3017000C11 NIPPON DEMPA CO.,LTD.
3018000C12 Micro-Optronic-Messtechnik GmbH
3019000C13 MediaQ
3020000C14 Diagnostic Instruments, Inc.
3021000C15 CyberPower Systems, Inc.
3022000C16 Concorde Microsystems Inc.
3023000C17 AJA Video Systems Inc
3024000C18 Zenisu Keisoku Inc.
3025000C19 Telio Communications GmbH
3026000C1A Quest Technical Solutions Inc.
3027000C1B ORACOM Co, Ltd.
3028000C1C MicroWeb Co., Ltd.
3029000C1D Mettler & Fuchs AG
3030000C1E Global Cache
3031000C1F Glimmerglass Networks
3032000C20 Fi WIn, Inc.
3033000C21 Faculty of Science and Technology, Keio University
3034000C22 Double D Electronics Ltd
3035000C23 Beijing Lanchuan Tech. Co., Ltd.
3036000C25 Allied Telesyn Networks
3037000C26 Weintek Labs. Inc.
3038000C27 Sammy Corporation
3039000C28 RIFATRON
3040000C29 VMware, Inc.
3041000C2A OCTTEL Communication Co., Ltd.
3042000C2B ELIAS Technology, Inc.
3043000C2C Enwiser Inc.
3044000C2D FullWave Technology Co., Ltd.
3045000C2E Openet information technology(shenzhen) Co., Ltd.
3046000C2F SeorimTechnology Co.,Ltd.
3047000C30 Cisco
3048000C31 Cisco
3049000C32 Avionic Design Development GmbH
3050000C33 Compucase Enterprise Co. Ltd.
3051000C34 Vixen Co., Ltd.
3052000C35 KaVo Dental GmbH & Co. KG
3053000C36 SHARP TAKAYA ELECTRONICS INDUSTRY CO.,LTD.
3054000C37 Geomation, Inc.
3055000C38 TelcoBridges Inc.
3056000C39 Sentinel Wireless Inc.
3057000C3A Oxance
3058000C3B Orion Electric Co., Ltd.
3059000C3C MediaChorus, Inc.
3060000C3D Glsystech Co., Ltd.
3061000C3E Crest Audio
3062000C3F Cogent Defence & Security Networks,
3063000C40 Altech Controls
3064000C41 The Linksys Group, Inc.
3065000C42 Routerboard.com
3066000C43 Ralink Technology, Corp.
3067000C44 Automated Interfaces, Inc.
3068000C45 Animation Technologies Inc.
3069000C46 Allied Telesyn Inc.
3070000C47 SK Teletech(R&D Planning Team)
3071000C48 QoStek Corporation
3072000C49 Dangaard Telecom RTC Division A/S
3073000C4A Cygnus Microsystems Private Limited
3074000C4B Cheops Elektronik
3075000C4C Arcor AG&Co.
3076000C4D ACRA CONTROL
3077000C4E Winbest Technology CO,LT
3078000C4F UDTech Japan Corporation
3079000C50 Seagate Technology
3080000C51 Scientific Technologies Inc.
3081000C52 Roll Systems Inc.
3082000C54 Pedestal Networks, Inc
3083000C55 Microlink Communications Inc.
3084000C56 Megatel Computer (1986) Corp.
3085000C57 MACKIE Engineering Services Belgium BVBA
3086000C58 M&S Systems
3087000C59 Indyme Electronics, Inc.
3088000C5A IBSmm Industrieelektronik Multimedia
3089000C5B HANWANG TECHNOLOGY CO.,LTD
3090000C5C GTN Systems B.V.
3091000C5D CHIC TECHNOLOGY (CHINA) CORP.
3092000C5F Avtec, Inc.
3093000C60 ACM Systems
3094000C61 AC Tech corporation DBA Advanced Digital
3095000C62 ABB Automation Technology Products AB, Control
3096000C63 Zenith Electronics Corporation
3097000C64 X2 MSA Group
3098000C65 Sunin Telecom
3099000C66 Pronto Networks Inc
3100000C67 OYO ELECTRIC CO.,LTD
3101000C68 Oasis Semiconductor, Inc.
3102000C69 National Radio Astronomy Observatory
3103000C6A MBARI
3104000C6B Kurz Industrie-Elektronik GmbH
3105000C6C Elgato Systems LLC
3106000C6D BOC Edwards
3107000C6E ASUSTEK COMPUTER INC.
3108000C6F Amtek system co.,LTD.
3109000C70 ACC GmbH
3110000C71 Wybron, Inc
3111000C72 Tempearl Industrial Co., Ltd.
3112000C73 TELSON ELECTRONICS CO., LTD
3113000C74 RIVERTEC CORPORATION
3114000C75 Oriental integrated electronics. LTD
3115000C76 MICRO-STAR INTERNATIONAL CO., LTD.
3116000C77 Life Racing Ltd
3117000C78 In-Tech Electronics Limited
3118000C79 Extel Communications P/L
3119000C7A DaTARIUS Technologies GmbH
3120000C7B ALPHA PROJECT Co.,Ltd.
3121000C7C Internet Information Image Inc.
3122000C7D TEIKOKU ELECTRIC MFG. CO., LTD
3123000C7E Tellium Incorporated
3124000C7F synertronixx GmbH
3125000C80 Opelcomm Inc.
3126000C81 Nulec Industries Pty Ltd
3127000C82 NETWORK TECHNOLOGIES INC
3128000C83 Logical Solutions
3129000C84 Eazix, Inc.
3130000C85 Cisco Systems
3131000C86 Cisco Systems
3132000C87 ATI
3133000C88 Apache Micro Peripherals, Inc.
3134000C89 AC Electric Vehicles, Ltd.
3135000C8A Bose Corporation
3136000C8B Connect Tech Inc
3137000C8C KODICOM CO.,LTD.
3138000C8D MATRIX VISION GmbH
3139000C8E Mentor Engineering Inc
3140000C8F Nergal s.r.l.
3141000C90 Octasic Inc.
3142000C91 Riverhead Networks Inc.
3143000C92 WolfVision Gmbh
3144000C93 Xeline Co., Ltd.
3145000C94 United Electronic Industries, Inc.
3146000C95 PrimeNet
3147000C96 OQO, Inc.
3148000C97 NV ADB TTV Technologies SA
3149000C98 LETEK Communications Inc.
3150000C99 HITEL LINK Co.,Ltd
3151000C9A Hitech Electronics Corp.
3152000C9B EE Solutions, Inc
3153000C9C Chongho information & communications
3154000C9D AirWalk Communications, Inc.
3155000C9E MemoryLink Corp.
3156000C9F NKE Corporation
3157000CA0 StorCase Technology, Inc.
3158000CA1 SIGMACOM Co., LTD.
3159000CA2 Scopus Network Technologies Ltd
3160000CA3 Rancho Technology, Inc.
3161000CA4 Prompttec Product Management GmbH
3162000CA6 Mintera Corporation
3163000CA7 Metro (Suzhou) Technologies Co., Ltd.
3164000CA8 Garuda Networks Corporation
3165000CA9 Ebtron Inc.
3166000CAA Cubic Transportation Systems Inc
3167000CAB COMMEND International
3168000CAC Citizen Watch Co., Ltd.
3169000CAD BTU International
3170000CAE Ailocom Oy
3171000CAF TRI TERM CO.,LTD.
3172000CB0 Star Semiconductor Corporation
3173000CB1 Salland Engineering (Europe) BV
3174000CB2 safei Co., Ltd.
3175000CB3 ROUND Co.,Ltd.
3176000CB4 Propagate Networks, Inc
3177000CB5 Premier Technolgies, Inc
3178000CB6 NANJING SEU MOBILE & INTERNET TECHNOLOGY CO.,LTD
3179000CB7 Nanjing Huazhuo Electronics Co., Ltd.
3180000CB8 MEDION AG
3181000CB9 LEA
3182000CBA Jamex
3183000CBB ISKRAEMECO
3184000CBC Iscutum
3185000CBD Interface Masters, Inc
3186000CBF Holy Stone Ent. Co., Ltd.
3187000CC0 Genera Oy
3188000CC1 Cooper Industries Inc.
3189000CC3 BeWAN systems
3190000CC4 Tiptel AG
3191000CC5 Nextlink Co., Ltd.
3192000CC6 Ka-Ro electronics GmbH
3193000CC7 Intelligent Computer Solutions Inc.
3194000CC8 Integrated Digital Systems, Inc.
3195000CC9 ILWOO DATA & TECHNOLOGY CO.,LTD
3196000CCA Hitachi Global Storage Technologies
3197000CCB Design Combus Ltd
3198000CCC Bluesoft Ltd.
3199000CCD IEC - TC57
3200000CCE Cisco Systems
3201000CCF Cisco Systems
3202000CD0 Symetrix
3203000CD1 SFOM Technology Corp.
3204000CD2 Schaffner EMV AG
3205000CD3 Prettl Elektronik Radeberg GmbH
3206000CD4 Positron Public Safety Systems inc.
3207000CD5 Passave Inc.
3208000CD6 PARTNER TECH
3209000CD7 Nallatech Ltd
3210000CD8 M. K. Juchheim GmbH & Co
3211000CD9 Itcare Co., Ltd
3212000CDA FreeHand Systems, Inc.
3213000CDB Foundry Networks
3214000CDC BECS Technology, Inc
3215000CDD AOS Technologies AG
3216000CDE ABB STOTZ-KONTAKT GmbH
3217000CDF PULNiX America, Inc
3218000CE0 Trek Diagnostics Inc.
3219000CE1 The Open Group
3220000CE2 Rolls-Royce
3221000CE3 Option International N.V.
3222000CE4 NeuroCom International, Inc.
3223000CE5 Motorola BCS
3224000CE6 Meru Networks Inc
3225000CE7 MediaTek Inc.
3226000CE8 GuangZhou AnJuBao Co., Ltd
3227000CE9 BLOOMBERG L.P.
3228000CEA aphona Kommunikationssysteme
3229000CEB CNMP Networks, Inc.
3230000CEC Spectracom Corp.
3231000CED Real Digital Media
3232000CEE Q-Networks
3233000CEF Open Networks Engineering Ltd
3234000CF0 M & N GmbH
3235000CF1 Intel Corporation
3236000CF2 GAMESA EÓLICA
3237000CF3 CALL IMAGE SA
3238000CF4 AKATSUKI ELECTRIC MFG.CO.,LTD.
3239000CF5 InfoExpress
3240000CF6 Sitecom Europe BV
3241000CF7 Nortel Networks
3242000CF8 Nortel Networks
3243000CF9 ITT Flygt AB
3244000CFA Digital Systems Corp
3245000CFB Korea Network Systems
3246000CFC S2io Technologies Corp
3247000CFE Grand Electronic Co., Ltd
3248000CFF MRO-TEK LIMITED
3249000D00 Seaway Networks Inc.
3250000D01 P&E Microcomputer Systems, Inc.
3251000D02 NEC Access Technica,Ltd
3252000D03 Matrics, Inc.
3253000D04 Foxboro Eckardt Development GmbH
3254000D05 cybernet manufacturing inc.
3255000D06 Compulogic Limited
3256000D07 Calrec Audio Ltd
3257000D08 AboveCable, Inc.
3258000D09 Yuehua(Zhuhai) Electronic CO. LTD
3259000D0A Projectiondesign as
3260000D0B Melco Inc.
3261000D0C MDI Security Systems
3262000D0D ITSupported, LLC
3263000D0E Inqnet Systems, Inc.
3264000D0F Finlux Ltd
3265000D10 Embedtronics Oy
3266000D11 DENTSPLY - Gendex
3267000D12 AXELL Corporation
3268000D13 Wilhelm Rutenbeck GmbH&Co.
3269000D14 Vtech Innovation LP dba Advanced American Telephones
3270000D15 Voipac s.r.o.
3271000D16 UHS Systems Pty Ltd
3272000D17 Turbo Networks Co.Ltd
3273000D18 Sunitec Enterprise Co., Ltd.
3274000D19 ROBE Show lighting
3275000D1A Mustek System Inc.
3276000D1B Kyoto Electronics Manufacturing Co., Ltd.
3277000D1C I2E TELECOM
3278000D1D HIGH-TEK HARNESS ENT. CO., LTD.
3279000D1E Control Techniques
3280000D1F AV Digital
3281000D20 ASAHIKASEI TECHNOSYSTEM CO.,LTD.
3282000D21 WISCORE Inc.
3283000D22 Unitronics
3284000D23 Smart Solution, Inc
3285000D24 SENTEC E&E CO., LTD.
3286000D25 SANDEN CORPORATION
3287000D26 Primagraphics Limited
3288000D27 MICROPLEX Printware AG
3289000D28 Cisco
3290000D29 Cisco
3291000D2A Scanmatic AS
3292000D2B Racal Instruments
3293000D2C Patapsco Designs Ltd
3294000D2D NCT Deutschland GmbH
3295000D2E Matsushita Avionics Systems Corporation
3296000D2F AIN Comm.Tech.Co., LTD
3297000D30 IceFyre Semiconductor
3298000D31 Compellent Technologies, Inc.
3299000D32 DispenseSource, Inc.
3300000D33 Prediwave Corp.
3301000D34 Shell International Exploration and Production, Inc.
3302000D35 PAC International Ltd
3303000D36 Wu Han Routon Electronic Co., Ltd
3304000D37 WIPLUG
3305000D38 NISSIN INC.
3306000D39 Network Electronics
3307000D3A Microsoft Corp.
3308000D3B Microelectronics Technology Inc.
3309000D3C i.Tech Dynamic Ltd
3310000D3E APLUX Communications Ltd.
3311000D3F VXI Technology
3312000D40 Verint Loronix Video Solutions
3313000D41 Siemens AG ICM MP UC RD IT KLF1
3314000D42 Newbest Development Limited
3315000D43 DRS Tactical Systems Inc.
3316000D45 Tottori SANYO Electric Co., Ltd.
3317000D46 Eurotherm Drives, Ltd.
3318000D47 Collex
3319000D48 AEWIN Technologies Co., Ltd.
3320000D49 Triton Systems of Delaware, Inc.
3321000D4A Steag ETA-Optik
3322000D4B Roku, LLC
3323000D4C Outline Electronics Ltd.
3324000D4D Ninelanes
3325000D4E NDR Co.,LTD.
3326000D4F Kenwood Corporation
3327000D50 Galazar Networks
3328000D51 DIVR Systems, Inc.
3329000D52 Comart system
3330000D53 Beijing 5w Communication Corp.
3331000D54 3Com Europe Ltd
3332000D55 SANYCOM Technology Co.,Ltd
3333000D56 Dell PCBA Test
3334000D57 Fujitsu I-Network Systems Limited.
3335000D59 Amity Systems, Inc.
3336000D5A Tiesse SpA
3337000D5B Smart Empire Investments Limited
3338000D5C Robert Bosch GmbH, VT-ATMO
3339000D5D Raritan Computer, Inc
3340000D5E NEC CustomTechnica, Ltd.
3341000D5F Minds Inc
3342000D60 IBM Corporation
3343000D61 Giga-Byte Technology Co., Ltd.
3344000D62 Funkwerk Dabendorf GmbH
3345000D63 DENT Instruments, Inc.
3346000D64 COMAG Handels AG
3347000D65 Cisco Systems
3348000D66 Cisco Systems
3349000D67 BelAir Networks Inc.
3350000D68 Vinci Systems, Inc.
3351000D69 TMT&D Corporation
3352000D6A Redwood Technologies LTD
3353000D6B Mita-Teknik A/S
3354000D6C M-Audio
3355000D6D K-Tech Devices Corp.
3356000D6E K-Patents Oy
3357000D6F Ember Corporation
3358000D70 Datamax Corporation
3359000D71 boca systems
3360000D72 2Wire, Inc
3361000D73 Technical Support, Inc.
3362000D74 Sand Network Systems, Inc.
3363000D75 Kobian Pte Ltd - Taiwan Branch
3364000D76 Hokuto Denshi Co,. Ltd.
3365000D77 FalconStor Software
3366000D78 Engineering & Security
3367000D79 Dynamic Solutions Co,.Ltd.
3368000D7A DiGATTO Asia Pacific Pte Ltd
3369000D7B Consensys Computers Inc.
3370000D7C Codian Ltd
3371000D7D Afco Systems
3372000D7E Axiowave Networks, Inc.
3373000D7F MIDAS COMMUNICATION TECHNOLOGIES PTE LTD ( Foreign Branch)
3374000D80 Online Development Inc
3375000D81 Pepperl+Fuchs GmbH
3376000D82 PHS srl
3377000D83 Sanmina-SCI Hungary Ltd.
3378000D84 Seodu Inchip, Inc.
3379000D85 Tapwave, Inc.
3380000D86 Huber + Suhner AG
3381000D87 Elitegroup Computer System Co. (ECS)
3382000D88 D-Link Corporation
3383000D89 Bils Technology Inc
3384000D8A Winners Electronics Co., Ltd.
3385000D8B T&D Corporation
3386000D8C Shanghai Wedone Digital Ltd. CO.
3387000D8D ProLinx Communication Gateways, Inc.
3388000D8E Koden Electronics Co., Ltd.
3389000D8F King Tsushin Kogyo Co., LTD.
3390000D90 Factum Electronics AB
3391000D91 Eclipse (HQ Espana) S.L.
3392000D92 Arima Communication Corporation
3393000D93 Apple Computer
3394000D94 AFAR Communications,Inc
3395000D96 Vtera Technology Inc.
3396000D97 Tropos Networks, Inc.
3397000D98 S.W.A.C. Schmitt-Walter Automation Consult GmbH
3398000D99 Orbital Sciences Corp.; Launch Systems Group
3399000D9A INFOTEC LTD
3400000D9C Elan GmbH & Co KG
3401000D9D Hewlett Packard
3402000D9E TOKUDEN OHIZUMI SEISAKUSYO Co.,Ltd.
3403000D9F RF Micro Devices
3404000DA0 NEDAP N.V.
3405000DA1 MIRAE ITS Co.,LTD.
3406000DA2 Infrant Technologies, Inc.
3407000DA3 Emerging Technologies Limited
3408000DA4 DOSCH & AMAND SYSTEMS AG
3409000DA5 Fabric7 Systems, Inc
3410000DA6 Universal Switching Corporation
3411000DA8 Teletronics Technology Corporation
3412000DA9 T.E.A.M. S.L.
3413000DAA S.A.Tehnology co.,Ltd.
3414000DAB Parker Hannifin GmbH Electromechanical Division Europe
3415000DAC Japan CBM Corporation
3416000DAD Dataprobe Inc
3417000DAE SAMSUNG HEAVY INDUSTRIES CO., LTD.
3418000DAF Plexus Corp (UK) Ltd
3419000DB0 Olym-tech Co.,Ltd.
3420000DB1 Japan Network Service Co., Ltd.
3421000DB2 Ammasso, Inc.
3422000DB3 SDO Communication Corperation
3423000DB4 NETASQ
3424000DB5 GLOBALSAT TECHNOLOGY CORPORATION
3425000DB6 Teknovus, Inc.
3426000DB7 SANKO ELECTRIC CO,.LTD
3427000DB8 SCHILLER AG
3428000DB9 PC Engines GmbH
3429000DBA Océ Document Technologies GmbH
3430000DBB Nippon Dentsu Co.,Ltd.
3431000DBC Cisco Systems
3432000DBD Cisco Systems
3433000DBE Bel Fuse Europe Ltd.,UK
3434000DBF TekTone Sound & Signal Mfg., Inc.
3435000DC0 Spagat AS
3436000DC1 SafeWeb Inc
3437000DC3 First Communication, Inc.
3438000DC4 Emcore Corporation
3439000DC5 EchoStar International Corporation
3440000DC6 DigiRose Technology Co., Ltd.
3441000DC7 COSMIC ENGINEERING INC.
3442000DC8 AirMagnet, Inc
3443000DC9 THALES Elektronik Systeme GmbH
3444000DCA Tait Electronics
3445000DCB Petcomkorea Co., Ltd.
3446000DCC NEOSMART Corp.
3447000DCD GROUPE TXCOM
3448000DCE Dynavac Technology Pte Ltd
3449000DCF Cidra Corp.
3450000DD0 TetraTec Instruments GmbH
3451000DD1 Stryker Corporation
3452000DD2 Simrad Optronics ASA
3453000DD3 SAMWOO Telecommunication Co.,Ltd.
3454000DD4 Revivio Inc.
3455000DD5 O'RITE TECHNOLOGY CO.,LTD
3456000DD7 Bright
3457000DD8 BBN
3458000DD9 Anton Paar GmbH
3459000DDA ALLIED TELESIS K.K.
3460000DDB AIRWAVE TECHNOLOGIES INC.
3461000DDC VAC
3462000DDD PROFÝLO TELRA ELEKTRONÝK SANAYÝ VE TÝCARET A.Þ.
3463000DDE Joyteck Co., Ltd.
3464000DDF Japan Image & Network Inc.
3465000DE0 ICPDAS Co.,LTD
3466000DE1 Control Products, Inc.
3467000DE2 CMZ Sistemi Elettronici
3468000DE3 AT Sweden AB
3469000DE4 DIGINICS, Inc.
3470000DE5 Samsung Thales
3471000DE6 YOUNGBO ENGINEERING CO.,LTD
3472000DE7 Snap-on OEM Group
3473000DE8 Nasaco Electronics Pte. Ltd
3474000DE9 Napatech Aps
3475000DEA Kingtel Telecommunication Corp.
3476000DEB CompXs Limited
3477000DEC Cisco Systems
3478000DED Cisco Systems
3479000DEF Soc. Coop. Bilanciai
3480000DF0 QCOM TECHNOLOGY INC.
3481000DF1 IONIX INC.
3482000DF3 Asmax Solutions
3483000DF4 Watertek Co.
3484000DF5 Teletronics International Inc.
3485000DF6 Technology Thesaurus Corp.
3486000DF7 Space Dynamics Lab
3487000DF8 ORGA Kartensysteme GmbH
3488000DF9 NDS Limited
3489000DFA Micro Control Systems Ltd.
3490000DFB Komax AG
3491000DFC ITFOR Inc. resarch and development
3492000DFD Huges Hi-Tech Inc.,
3493000DFE Hauppauge Computer Works, Inc.
3494000DFF CHENMING MOLD INDUSTRY CORP.
3495000E01 ASIP Technologies Inc.
3496000E02 Advantech AMT Inc.
3497000E03 Aarohi Communications, Inc.
3498000E05 WIRELESS MATRIX CORP.
3499000E06 Team Simoco Ltd
3500000E07 Sony Ericsson Mobile Communications AB
3501000E08 Sipura Technology, Inc.
3502000E09 Shenzhen Coship Software Co.,LTD.
3503000E0B Netac Technology Co., Ltd.
3504000E0C Intel Corporation
3505000E0D HESCH Schröder GmbH
3506000E0E ESA elettronica S.P.A.
3507000E0F ERMME
3508000E11 BDT Büro- und Datentechnik GmbH & Co. KG
3509000E12 Adaptive Micro Systems Inc.
3510000E13 Accu-Sort Systems inc.
3511000E14 Visionary Solutions, Inc.
3512000E15 Tadlys LTD
3513000E16 SouthWing
3514000E18 MyA Technology
3515000E19 LogicaCMG Pty Ltd
3516000E1B IAV GmbH
3517000E1C Hach Company
3518000E1F TCL Networks Equipment Co., Ltd.
3519000E20 PalmSource, Inc.
3520000E21 MTU Friedrichshafen GmbH
3521000E23 Incipient, Inc.
3522000E25 Hannae Technology Co., Ltd
3523000E26 Gincom Technology Corp.
3524000E27 Crere Networks, Inc.
3525000E28 Dynamic Ratings P/L
3526000E29 Shester Communications Inc
3527000E2B Safari Technologies
3528000E2C Netcodec co.
3529000E2D Hyundai Digital Technology Co.,Ltd.
3530000E2E Edimax Technology Co., Ltd.
3531000E2F Disetronic Medical Systems AG
3532000E30 AERAS Networks, Inc.
3533000E31 Olympus BioSystems GmbH
3534000E32 Kontron Medical
3535000E33 Shuko Electronics Co.,Ltd
3536000E34 NexxGenCity
3537000E35 Intel Corp
3538000E36 HEINESYS, Inc.
3539000E37 Harms & Wende GmbH & Co.KG
3540000E38 Cisco Systems
3541000E39 Cisco Systems
3542000E3A Cirrus Logic
3543000E3B Hawking Technologies, Inc.
3544000E3C TransAct Technoloiges Inc.
3545000E3D Televic N.V.
3546000E3E Sun Optronics Inc
3547000E3F Soronti, Inc.
3548000E40 Nortel Networks
3549000E41 NIHON MECHATRONICS CO.,LTD.
3550000E42 Motic Incoporation Ltd.
3551000E43 G-Tek Electronics Sdn. Bhd.
3552000E44 Digital 5, Inc.
3553000E45 Beijing Newtry Electronic Technology Ltd
3554000E46 Niigata Seimitsu Co.,Ltd.
3555000E47 NCI System Co.,Ltd.
3556000E48 Lipman TransAction Solutions
3557000E49 Forsway Scandinavia AB
3558000E4A Changchun Huayu WEBPAD Co.,LTD
3559000E4B atrium c and i
3560000E4C Bermai Inc.
3561000E4D Numesa Inc.
3562000E4E Waveplus Technology Co., Ltd.
3563000E4F Trajet GmbH
3564000E50 Thomson Multi Media
3565000E51 tecna elettronica srl
3566000E52 Optium Corporation
3567000E53 AV TECH CORPORATION
3568000E54 AlphaCell Wireless Ltd.
3569000E55 AUVITRAN
3570000E56 4G Systems GmbH
3571000E57 Iworld Networking, Inc.
3572000E58 Rincon Networks
3573000E5A TELEFIELD inc.
3574000E5B ParkerVision - Direct2Data
3575000E5C Motorola BCS
3576000E5D Com-X Networks
3577000E5E Beijing Raisecom Science & Technology Development Co.,Ltd
3578000E5F activ-net GmbH & Co. KG
3579000E60 360SUN Digital Broadband Corporation
3580000E61 MICROTROL LIMITED
3581000E62 Nortel Networks
3582000E63 Lemke Diagnostics GmbH
3583000E64 Elphel, Inc
3584000E65 TransCore
3585000E66 Hitachi Advanced Digital, Inc.
3586000E67 Eltis Microelectronics Ltd.
3587000E68 E-TOP Network Technology Inc.
3588000E69 China Electric Power Research Institute
3589000E6A 3COM EUROPE LTD
3590000E6B Janitza electronics GmbH
3591000E6C Device Drivers Limited
3592000E6D Murata Manufacturing Co., Ltd.
3593000E6E MICRELEC ELECTRONICS S.A
3594000E6F IRIS Corporation Berhad
3595000E70 in2 Networks
3596000E71 Gemstar Technology Development Ltd.
3597000E72 CTS electronics
3598000E73 Tpack A/S
3599000E74 Solar Telecom. Tech
3600000E75 New York Air Brake Corp.
3601000E76 GEMSOC INNOVISION INC.
3602000E77 Decru, Inc.
3603000E78 Amtelco
3604000E79 Ample Communications Inc.
3605000E7B Toshiba
3606000E7D Electronics Line 3000 Ltd.
3607000E7E Comprog Oy
3608000E7F Hewlett Packard
3609000E81 Instant802 Networks Inc.
3610000E82 Commtech Wireless
3611000E83 Cisco Systems
3612000E84 Cisco Systems
3613000E85 Catalyst Enterprises, Inc.
3614000E86 Alcatel North America
3615000E87 adp Gauselmann GmbH
3616000E88 VIDEOTRON CORP.
3617000E89 CLEMATIC
3618000E8A Avara Technologies Pty. Ltd.
3619000E8B Astarte Technology Co, Ltd.
3620000E8C Siemens AG A&D ET
3621000E8D Systems in Progress Holding GmbH
3622000E8E SparkLAN Communications, Inc.
3623000E8F Sercomm Corp.
3624000E90 PONICO CORP.
3625000E92 Millinet Co., Ltd.
3626000E93 Milénio 3 Sistemas Electrónicos, Lda.
3627000E94 Maas International BV
3628000E95 Fujiya Denki Seisakusho Co.,Ltd.
3629000E96 Cubic Defense Applications, Inc.
3630000E97 Ultracker Technology CO., Inc
3631000E98 Vitec CC, INC.
3632000E99 Spectrum Digital, Inc
3633000E9A BOE TECHNOLOGY GROUP CO.,LTD
3634000E9C Pemstar
3635000E9D Video Networks Ltd
3636000E9E Topfield Co., Ltd
3637000E9F TEMIC SDS GmbH
3638000EA0 NetKlass Technology Inc.
3639000EA1 Formosa Teletek Corporation
3640000EA2 CyberGuard Corporation
3641000EA3 CNCR-IT CO.,LTD,HangZhou P.R.CHINA
3642000EA4 Certance Inc.
3643000EA5 BLIP Systems
3644000EA6 ASUSTEK COMPUTER INC.
3645000EA7 Endace Inc Ltd.
3646000EA8 United Technologists Europe Limited
3647000EA9 Shanghai Xun Shi Communications Equipment Ltd. Co.
3648000EAC MINTRON ENTERPRISE CO., LTD.
3649000EAD Metanoia Technologies, Inc.
3650000EAE GAWELL TECHNOLOGIES CORP.
3651000EAF CASTEL
3652000EB0 Solutions Radio BV
3653000EB1 Newcotech,Ltd
3654000EB2 Micro-Research Finland Oy
3655000EB3 LeftHand Networks
3656000EB4 GUANGZHOU GAOKE COMMUNICATIONS TECHNOLOGY CO.LTD.
3657000EB5 Ecastle Electronics Co., Ltd.
3658000EB6 Riverbed Technology, Inc.
3659000EB7 Knovative, Inc.
3660000EB8 Iiga co.,Ltd
3661000EB9 HASHIMOTO Electronics Industry Co.,Ltd.
3662000EBA HANMI SEMICONDUCTOR CO., LTD.
3663000EBB Everbee Networks
3664000EBC Cullmann GmbH
3665000EBD Burdick, a Quinton Compny
3666000EBE B&B Electronics Manufacturing Co.
3667000EC0 Nortel Networks
3668000EC1 MYNAH Technologies
3669000EC2 Lowrance Electronics, Inc.
3670000EC3 Logic Controls, Inc.
3671000EC4 Iskra Transmission d.d.
3672000EC6 ASIX ELECTRONICS CORP.
3673000EC7 Appeal Telecom Co.,Ltd.
3674000EC8 Zoran Corporation
3675000EC9 YOKO Technology Corp.
3676000ECB VineSys Technology
3677000ECC Tableau
3678000ECD SKOV A/S
3679000ECE S.I.T.T.I. S.p.A.
3680000ECF PROFIBUS Nutzerorganisation e.V.
3681000ED0 Privaris, Inc.
3682000ED1 Osaka Micro Computer.
3683000ED2 Filtronic plc
3684000ED3 Epicenter, Inc.
3685000ED4 CRESITT INDUSTRIE
3686000ED5 COPAN Systems Inc.
3687000ED6 Cisco Systems
3688000ED7 Cisco Systems
3689000ED8 Aktino, Inc.
3690000ED9 Aksys, Ltd.
3691000EDA C-TECH UNITED CORP.
3692000EDB XiNCOM Corp.
3693000EDC Tellion INC.
3694000EDD SHURE INCORPORATED
3695000EDE REMEC, Inc.
3696000EDF PLX Technology
3697000EE0 Mcharge
3698000EE1 ExtremeSpeed Inc.
3699000EE2 Custom Engineering S.p.A.
3700000EE3 Chiyu Technology Co.,Ltd
3701000EE5 bitWallet, Inc.
3702000EE6 Adimos Systems LTD
3703000EE7 AAC ELECTRONICS CORP.
3704000EE8 zioncom
3705000EE9 WayTech Development, Inc.
3706000EEA Shadong Luneng Jicheng Electronics,Co.,Ltd
3707000EEB Sandmartin(zhong shan)Electronics Co.,Ltd
3708000EEC Orban
3709000EED Nokia Danmark A/S
3710000EEE Muco Industrie BV
3711000EF0 Festo AG & Co. KG
3712000EF1 EZQUEST INC.
3713000EF3 Smarthome
3714000EF4 Shenzhen Kasda Digital Technology Co.,Ltd
3715000EF5 iPAC Technology Co., Ltd.
3716000EF6 E-TEN Information Systems Co., Ltd.
3717000EF7 Vulcan Portals Inc
3718000EF8 SBC ASI
3719000EF9 REA Elektronik GmbH
3720000EFA Optoway Technology Incorporation
3721000EFB Macey Enterprises
3722000EFC JTAG Technologies B.V.
3723000EFD FUJI PHOTO OPTICAL CO., LTD.
3724000EFE EndRun Technologies LLC
3725000EFF Megasolution,Inc.
3726000F00 Legra Systems, Inc.
3727000F01 DIGITALKS INC
3728000F02 Digicube Technology Co., Ltd
3729000F03 COM&C CO., LTD
3730000F04 cim-usa inc
3731000F05 3B SYSTEM INC.
3732000F06 Nortel Networks
3733000F07 Mangrove Systems, Inc.
3734000F08 Indagon Oy
3735000F0B Kentima Technologies AB
3736000F0C SYNCHRONIC ENGINEERING
3737000F0D Hunt Electronic Co., Ltd.
3738000F0E WaveSplitter Technologies, Inc.
3739000F0F Real ID Technology Co., Ltd.
3740000F10 RDM Corporation
3741000F11 Prodrive B.V.
3742000F12 Panasonic AVC Networks Germany GmbH
3743000F13 Nisca corporation
3744000F14 Mindray Co., Ltd.
3745000F15 Kjaerulff1 A/S
3746000F16 JAY HOW TECHNOLOGY CO.,
3747000F17 Insta Elektro GmbH
3748000F18 Industrial Control Systems
3749000F19 Guidant Corporation
3750000F1A Gaming Support B.V.
3751000F1B Ego Systems Inc.
3752000F1C DigitAll World Co., Ltd
3753000F1D Cosmo Techs Co., Ltd.
3754000F1E Chengdu KT Electric Co.of High & New Technology
3755000F1F WW PCBA Test
3756000F20 WW Ops
3757000F21 Scientific Atlanta, Inc
3758000F22 Helius, Inc.
3759000F23 Cisco Systems
3760000F24 Cisco Systems
3761000F25 AimValley B.V.
3762000F26 WorldAccxx LLC
3763000F27 TEAL Electronics, Inc.
3764000F28 Itronix Corporation
3765000F29 Augmentix Corporation
3766000F2A Cableware Electronics
3767000F2B GREENBELL SYSTEMS
3768000F2C Uplogix, Inc.
3769001000 CABLE TELEVISION LABORATORIES, INC.
3770001001 MCK COMMUNICATIONS
3771001002 ACTIA
3772001003 IMATRON, INC.
3773001004 THE BRANTLEY COILE COMPANY,INC
3774001005 UEC COMMERCIAL
3775001006 Thales Contact Solutions Ltd.
3776001007 CISCO SYSTEMS, INC.
3777001008 VIENNA SYSTEMS CORPORATION
3778001009 HORO QUARTZ
377900100A WILLIAMS COMMUNICATIONS GROUP
378000100B CISCO SYSTEMS, INC.
378100100C ITO CO., LTD.
378200100D CISCO SYSTEMS, INC.
378300100E MICRO LINEAR COPORATION
378400100F INDUSTRIAL CPU SYSTEMS
3785001010 INITIO CORPORATION
3786001011 CISCO SYSTEMS, INC.
3787001012 PROCESSOR SYSTEMS (I) PVT LTD
3788001013 INDUSTRIAL COMPUTER SOURCE
3789001014 CISCO SYSTEMS, INC.
3790001015 OOmon Inc.
3791001016 T.SQWARE
3792001017 MICOS GmbH
3793001018 BROADCOM CORPORATION
3794001019 SIRONA DENTAL SYSTEMS GmbH & Co. KG
379500101A PictureTel Corp.
379600101B CORNET TECHNOLOGY, INC.
379700101C OHM TECHNOLOGIES INTL, LLC
379800101D WINBOND ELECTRONICS CORP.
379900101E MATSUSHITA ELECTRONIC INSTRUMENTS CORP.
380000101F CISCO SYSTEMS, INC.
3801001020 WELCH ALLYN, DATA COLLECTION
3802001021 ENCANTO NETWORKS, INC.
3803001022 SatCom Media Corporation
3804001023 FLOWWISE NETWORKS, INC.
3805001024 NAGOYA ELECTRIC WORKS CO., LTD
3806001025 GRAYHILL INC.
3807001026 ACCELERATED NETWORKS, INC.
3808001027 L-3 COMMUNICATIONS EAST
3809001028 COMPUTER TECHNICA, INC.
3810001029 CISCO SYSTEMS, INC.
381100102A ZF MICROSYSTEMS, INC.
381200102B UMAX DATA SYSTEMS, INC.
381300102C Lasat Networks A/S
381400102D HITACHI SOFTWARE ENGINEERING
381500102E NETWORK SYSTEMS & TECHNOLOGIES PVT. LTD.
381600102F CISCO SYSTEMS, INC.
3817001030 Wi-LAN, Inc.
3818001031 OBJECTIVE COMMUNICATIONS, INC.
3819001032 ALTA TECHNOLOGY
3820001033 ACCESSLAN COMMUNICATIONS, INC.
3821001034 GNP Computers
3822001035 ELITEGROUP COMPUTER SYSTEMS CO., LTD
3823001036 INTER-TEL INTEGRATED SYSTEMS
3824001037 CYQ've Technology Co., Ltd.
3825001038 MICRO RESEARCH INSTITUTE, INC.
3826001039 Vectron Systems AG
382700103A DIAMOND NETWORK TECH
382800103B HIPPI NETWORKING FORUM
382900103C IC ENSEMBLE, INC.
383000103D PHASECOM, LTD.
383100103E NETSCHOOLS CORPORATION
383200103F TOLLGRADE COMMUNICATIONS, INC.
3833001040 INTERMEC CORPORATION
3834001041 BRISTOL BABCOCK, INC.
3835001042 AlacriTech
3836001043 A2 CORPORATION
3837001044 InnoLabs Corporation
3838001045 Nortel Networks
3839001046 ALCORN MCBRIDE INC.
3840001047 ECHO ELETRIC CO. LTD.
3841001048 HTRC AUTOMATION, INC.
3842001049 SHORELINE TELEWORKS, INC.
384300104A THE PARVUC CORPORATION
384400104B 3COM CORPORATION
384500104C COMPUTER ACCESS TECHNOLOGY
384600104D SURTEC INDUSTRIES, INC.
384700104E CEOLOGIC
384800104F STORAGE TECHNOLOGY CORPORATION
3849001050 RION CO., LTD.
3850001051 CMICRO CORPORATION
3851001052 METTLER-TOLEDO (ALBSTADT) GMBH
3852001053 COMPUTER TECHNOLOGY CORP.
3853001054 CISCO SYSTEMS, INC.
3854001055 FUJITSU MICROELECTRONICS, INC.
3855001056 SODICK CO., LTD.
3856001057 Rebel.com, Inc.
3857001058 ArrowPoint Communications
3858001059 DIABLO RESEARCH CO. LLC
385900105A 3COM CORPORATION
386000105B NET INSIGHT AB
386100105C QUANTUM DESIGNS (H.K.) LTD.
386200105D Draeger Medical
386300105E HEKIMIAN LABORATORIES, INC.
386400105F IN-SNEC
3865001060 BILLIONTON SYSTEMS, INC.
3866001061 HOSTLINK CORP.
3867001062 NX SERVER, ILNC.
3868001063 STARGUIDE DIGITAL NETWORKS
3869001064 DIGITAL EQUIPMENT CORP.
3870001065 RADYNE CORPORATION
3871001066 ADVANCED CONTROL SYSTEMS, INC.
3872001067 REDBACK NETWORKS, INC.
3873001068 COMOS TELECOM
3874001069 HELIOSS COMMUNICATIONS, INC.
387500106A DIGITAL MICROWAVE CORPORATION
387600106B SONUS NETWORKS, INC.
387700106C INFRATEC PLUS GmbH
387800106D INTEGRITY COMMUNICATIONS, INC.
387900106E TADIRAN COM. LTD.
388000106F TRENTON TECHNOLOGY INC.
3881001070 CARADON TREND LTD.
3882001071 ADVANET INC.
3883001072 GVN TECHNOLOGIES, INC.
3884001073 TECHNOBOX, INC.
3885001074 ATEN INTERNATIONAL CO., LTD.
3886001075 Maxtor Corporation
3887001076 EUREM GmbH
3888001077 SAF DRIVE SYSTEMS, LTD.
3889001078 NUERA COMMUNICATIONS, INC.
3890001079 CISCO SYSTEMS, INC.
389100107A AmbiCom, Inc.
389200107B CISCO SYSTEMS, INC.
389300107C P-COM, INC.
389400107D AURORA COMMUNICATIONS, LTD.
389500107E BACHMANN ELECTRONIC GmbH
389600107F CRESTRON ELECTRONICS, INC.
3897001080 METAWAVE COMMUNICATIONS
3898001081 DPS, INC.
3899001082 JNA TELECOMMUNICATIONS LIMITED
3900001083 HEWLETT-PACKARD COMPANY
3901001084 K-BOT COMMUNICATIONS
3902001085 POLARIS COMMUNICATIONS, INC.
3903001086 ATTO TECHNOLOGY, INC.
3904001087 Xstreamis PLC
3905001088 AMERICAN NETWORKS INC.
3906001089 WebSonic
390700108A TeraLogic, Inc.
390800108B LASERANIMATION SOLLINGER GmbH
390900108C FUJITSU TELECOMMUNICATIONS EUROPE, LTD.
391000108D JOHNSON CONTROLS, INC.
391100108E HUGH SYMONS CONCEPT Technologies Ltd.
391200108F RAPTOR SYSTEMS
3913001090 CIMETRICS, INC.
3914001091 NO WIRES NEEDED BV
3915001092 NETCORE INC.
3916001093 CMS COMPUTERS, LTD.
3917001094 Performance Analysis Broadband, Spirent plc
3918001095 Thomson Multimedia, Inc.
3919001096 TRACEWELL SYSTEMS, INC.
3920001097 WinNet Metropolitan Communications Systems, Inc.
3921001098 STARNET TECHNOLOGIES, INC.
3922001099 InnoMedia, Inc.
392300109A NETLINE
392400109B VIXEL CORPORATION
392500109C M-SYSTEM CO., LTD.
392600109D CLARINET SYSTEMS, INC.
392700109E AWARE, INC.
392800109F PAVO, INC.
39290010A0 INNOVEX TECHNOLOGIES, INC.
39300010A1 KENDIN SEMICONDUCTOR, INC.
39310010A2 TNS
39320010A3 OMNITRONIX, INC.
39330010A4 XIRCOM
39340010A5 OXFORD INSTRUMENTS
39350010A6 CISCO SYSTEMS, INC.
39360010A7 UNEX TECHNOLOGY CORPORATION
39370010A8 RELIANCE COMPUTER CORP.
39380010A9 ADHOC TECHNOLOGIES
39390010AA MEDIA4, INC.
39400010AB KOITO INDUSTRIES, LTD.
39410010AC IMCI TECHNOLOGIES
39420010AD SOFTRONICS USB, INC.
39430010AE SHINKO ELECTRIC INDUSTRIES CO.
39440010AF TAC SYSTEMS, INC.
39450010B0 MERIDIAN TECHNOLOGY CORP.
39460010B1 FOR-A CO., LTD.
39470010B2 COACTIVE AESTHETICS
39480010B3 NOKIA MULTIMEDIA TERMINALS
39490010B4 ATMOSPHERE NETWORKS
39500010B5 ACCTON TECHNOLOGY CORPORATION
39510010B6 ENTRATA COMMUNICATIONS CORP.
39520010B7 COYOTE TECHNOLOGIES, LLC
39530010B8 ISHIGAKI COMPUTER SYSTEM CO.
39540010B9 MAXTOR CORP.
39550010BA MARTINHO-DAVIS SYSTEMS, INC.
39560010BB DATA & INFORMATION TECHNOLOGY
39570010BC Aastra Telecom
39580010BD THE TELECOMMUNICATION TECHNOLOGY COMMITTEE
39590010BE TELEXIS CORP.
39600010BF InterAir Wireless
39610010C0 ARMA, INC.
39620010C1 OI ELECTRIC CO., LTD.
39630010C2 WILLNET, INC.
39640010C3 CSI-CONTROL SYSTEMS
39650010C4 MEDIA LINKS CO., LTD.
39660010C5 PROTOCOL TECHNOLOGIES, INC.
39670010C6 USI
39680010C7 DATA TRANSMISSION NETWORK
39690010C8 COMMUNICATIONS ELECTRONICS SECURITY GROUP
39700010C9 MITSUBISHI ELECTRONICS LOGISTIC SUPPORT CO.
39710010CA INTEGRAL ACCESS
39720010CB FACIT K.K.
39730010CC CLP COMPUTER LOGISTIK PLANUNG GmbH
39740010CD INTERFACE CONCEPT
39750010CE VOLAMP, LTD.
39760010CF FIBERLANE COMMUNICATIONS
39770010D0 WITCOM, LTD.
39780010D1 Top Layer Networks, Inc.
39790010D2 NITTO TSUSHINKI CO., LTD
39800010D3 GRIPS ELECTRONIC GMBH
39810010D4 STORAGE COMPUTER CORPORATION
39820010D5 IMASDE CANARIAS, S.A.
39830010D6 ITT - A/CD
39840010D7 ARGOSY RESEARCH INC.
39850010D8 CALISTA
39860010D9 IBM JAPAN, FUJISAWA MT+D
39870010DA MOTION ENGINEERING, INC.
39880010DB NetScreen Technologies, Inc.
39890010DC MICRO-STAR INTERNATIONAL CO., LTD.
39900010DD ENABLE SEMICONDUCTOR, INC.
39910010DE INTERNATIONAL DATACASTING CORPORATION
39920010DF RISE COMPUTER INC.
39930010E0 COBALT MICROSERVER, INC.
39940010E1 S.I. TECH, INC.
39950010E2 ArrayComm, Inc.
39960010E3 COMPAQ COMPUTER CORPORATION
39970010E4 NSI CORPORATION
39980010E5 SOLECTRON TEXAS
39990010E6 APPLIED INTELLIGENT SYSTEMS, INC.
40000010E7 BreezeCom
40010010E8 TELOCITY, INCORPORATED
40020010E9 RAIDTEC LTD.
40030010EA ADEPT TECHNOLOGY
40040010EB SELSIUS SYSTEMS, INC.
40050010EC RPCG, LLC
40060010ED SUNDANCE TECHNOLOGY, INC.
40070010EE CTI PRODUCTS, INC.
40080010EF DBTEL INCORPORATED
40090010F1 I-O CORPORATION
40100010F2 ANTEC
40110010F3 Nexcom International Co., Ltd.
40120010F4 VERTICAL NETWORKS, INC.
40130010F5 AMHERST SYSTEMS, INC.
40140010F6 CISCO SYSTEMS, INC.
40150010F7 IRIICHI TECHNOLOGIES Inc.
40160010F8 KENWOOD TMI CORPORATION
40170010F9 UNIQUE SYSTEMS, INC.
40180010FA ZAYANTE, INC.
40190010FB ZIDA TECHNOLOGIES LIMITED
40200010FC BROADBAND NETWORKS, INC.
40210010FD COCOM A/S
40220010FE DIGITAL EQUIPMENT CORPORATION
40230010FF CISCO SYSTEMS, INC.
4024001C7C PERQ SYSTEMS CORPORATION
4025002000 LEXMARK INTERNATIONAL, INC.
4026002001 DSP SOLUTIONS, INC.
4027002002 SERITECH ENTERPRISE CO., LTD.
4028002003 PIXEL POWER LTD.
4029002004 YAMATAKE-HONEYWELL CO., LTD.
4030002005 SIMPLE TECHNOLOGY
4031002006 GARRETT COMMUNICATIONS, INC.
4032002007 SFA, INC.
4033002008 CABLE & COMPUTER TECHNOLOGY
4034002009 PACKARD BELL ELEC., INC.
403500200A SOURCE-COMM CORP.
403600200B OCTAGON SYSTEMS CORP.
403700200C ADASTRA SYSTEMS CORP.
403800200D CARL ZEISS
403900200E SATELLITE TECHNOLOGY MGMT, INC
404000200F TANBAC CO., LTD.
4041002010 JEOL SYSTEM TECHNOLOGY CO. LTD
4042002011 CANOPUS CO., LTD.
4043002012 CAMTRONICS MEDICAL SYSTEMS
4044002013 DIVERSIFIED TECHNOLOGY, INC.
4045002014 GLOBAL VIEW CO., LTD.
4046002015 ACTIS COMPUTER SA
4047002016 SHOWA ELECTRIC WIRE & CABLE CO
4048002017 ORBOTECH
4049002018 CIS TECHNOLOGY INC.
4050002019 OHLER GmbH
405100201A N-BASE SWITCH COMMUNICATIONS
405200201B NORTHERN TELECOM/NETWORK
405300201C EXCEL, INC.
405400201D KATANA PRODUCTS
405500201E NETQUEST CORPORATION
405600201F BEST POWER TECHNOLOGY, INC.
4057002020 MEGATRON COMPUTER INDUSTRIES PTY, LTD.
4058002021 ALGORITHMS SOFTWARE PVT. LTD.
4059002022 TEKNIQUE, INC.
4060002023 T.C. TECHNOLOGIES PTY. LTD
4061002024 PACIFIC COMMUNICATION SCIENCES
4062002025 CONTROL TECHNOLOGY, INC.
4063002026 AMKLY SYSTEMS, INC.
4064002027 MING FORTUNE INDUSTRY CO., LTD
4065002028 WEST EGG SYSTEMS, INC.
4066002029 TELEPROCESSING PRODUCTS, INC.
406700202A N.V. DZINE
406800202B ADVANCED TELECOMMUNICATIONS MODULES, LTD.
406900202C WELLTRONIX CO., LTD.
407000202D TAIYO CORPORATION
407100202E DAYSTAR DIGITAL
407200202F ZETA COMMUNICATIONS, LTD.
4073002030 ANALOG & DIGITAL SYSTEMS
4074002031 ERTEC GmbH
4075002032 ALCATEL TAISEL
4076002033 SYNAPSE TECHNOLOGIES, INC.
4077002034 ROTEC INDUSTRIEAUTOMATION GMBH
4078002035 IBM CORPORATION
4079002036 BMC SOFTWARE
4080002037 SEAGATE TECHNOLOGY
4081002038 VME MICROSYSTEMS INTERNATIONAL CORPORATION
4082002039 SCINETS
408300203A DIGITAL BI0METRICS INC.
408400203B WISDM LTD.
408500203C EUROTIME AB
408600203D NOVAR ELECTRONICS CORPORATION
408700203E LogiCan Technologies, Inc.
408800203F JUKI CORPORATION
4089002040 Motorola Broadband Communications Sector
4090002041 DATA NET
4091002042 DATAMETRICS CORP.
4092002043 NEURON COMPANY LIMITED
4093002044 GENITECH PTY LTD
4094002045 ION Networks, Inc.
4095002046 CIPRICO, INC.
4096002047 STEINBRECHER CORP.
4097002048 Marconi Communications
4098002049 COMTRON, INC.
409900204A PRONET GMBH
410000204B AUTOCOMPUTER CO., LTD.
410100204C MITRON COMPUTER PTE LTD.
410200204D INOVIS GMBH
410300204E NETWORK SECURITY SYSTEMS, INC.
410400204F DEUTSCHE AEROSPACE AG
4105002050 KOREA COMPUTER INC.
4106002051 Verilink Corporation
4107002052 RAGULA SYSTEMS
4108002053 HUNTSVILLE MICROSYSTEMS, INC.
4109002054 EASTERN RESEARCH, INC.
4110002055 ALTECH CO., LTD.
4111002056 NEOPRODUCTS
4112002057 TITZE DATENTECHNIK GmbH
4113002058 ALLIED SIGNAL INC.
4114002059 MIRO COMPUTER PRODUCTS AG
411500205A COMPUTER IDENTICS
411600205B SKYLINE TECHNOLOGY
411700205C InterNet Systems of Florida, Inc.
411800205D NANOMATIC OY
411900205E CASTLE ROCK, INC.
412000205F GAMMADATA COMPUTER GMBH
4121002060 ALCATEL ITALIA S.p.A.
4122002061 DYNATECH COMMUNICATIONS, INC.
4123002062 SCORPION LOGIC, LTD.
4124002063 WIPRO INFOTECH LTD.
4125002064 PROTEC MICROSYSTEMS, INC.
4126002065 SUPERNET NETWORKING INC.
4127002066 GENERAL MAGIC, INC.
4128002068 ISDYNE
4129002069 ISDN SYSTEMS CORPORATION
413000206A OSAKA COMPUTER CORP.
413100206B KONICA MINOLTA HOLDINGS, INC.
413200206C EVERGREEN TECHNOLOGY CORP.
413300206D DATA RACE, INC.
413400206E XACT, INC.
413500206F FLOWPOINT CORPORATION
4136002070 HYNET, LTD.
4137002071 IBR GMBH
4138002072 WORKLINK INNOVATIONS
4139002073 FUSION SYSTEMS CORPORATION
4140002074 SUNGWOON SYSTEMS
4141002075 MOTOROLA COMMUNICATION ISRAEL
4142002076 REUDO CORPORATION
4143002077 KARDIOS SYSTEMS CORP.
4144002078 RUNTOP, INC.
4145002079 MIKRON GMBH
414600207A WiSE Communications, Inc.
414700207B Intel Corporation
414800207C AUTEC GmbH
414900207D ADVANCED COMPUTER APPLICATIONS
415000207E FINECOM Co., Ltd.
415100207F KYOEI SANGYO CO., LTD.
4152002080 SYNERGY (UK) LTD.
4153002081 TITAN ELECTRONICS
4154002082 ONEAC CORPORATION
4155002083 PRESTICOM INCORPORATED
4156002084 OCE PRINTING SYSTEMS, GMBH
4157002085 EXIDE ELECTRONICS
4158002086 MICROTECH ELECTRONICS LIMITED
4159002087 MEMOTEC COMMUNICATIONS CORP.
4160002088 GLOBAL VILLAGE COMMUNICATION
4161002089 T3PLUS NETWORKING, INC.
416200208A SONIX COMMUNICATIONS, LTD.
416300208B LAPIS TECHNOLOGIES, INC.
416400208C GALAXY NETWORKS, INC.
416500208D CMD TECHNOLOGY
416600208E CHEVIN SOFTWARE ENG. LTD.
416700208F ECI TELECOM LTD.
4168002090 ADVANCED COMPRESSION TECHNOLOGY, INC.
4169002091 J125, NATIONAL SECURITY AGENCY
4170002092 CHESS ENGINEERING B.V.
4171002093 LANDINGS TECHNOLOGY CORP.
4172002094 CUBIX CORPORATION
4173002095 RIVA ELECTRONICS
4174002096 Invensys
4175002097 APPLIED SIGNAL TECHNOLOGY
4176002098 HECTRONIC AB
4177002099 BON ELECTRIC CO., LTD.
417800209A THE 3DO COMPANY
417900209B ERSAT ELECTRONIC GMBH
418000209C PRIMARY ACCESS CORP.
418100209D LIPPERT AUTOMATIONSTECHNIK
418200209E BROWN'S OPERATING SYSTEM SERVICES, LTD.
418300209F MERCURY COMPUTER SYSTEMS, INC.
41840020A0 OA LABORATORY CO., LTD.
41850020A1 DOVATRON
41860020A2 GALCOM NETWORKING LTD.
41870020A3 DIVICOM INC.
41880020A4 MULTIPOINT NETWORKS
41890020A5 API ENGINEERING
41900020A6 PROXIM, INC.
41910020A7 PAIRGAIN TECHNOLOGIES, INC.
41920020A8 SAST TECHNOLOGY CORP.
41930020A9 WHITE HORSE INDUSTRIAL
41940020AA DIGIMEDIA VISION LTD.
41950020AB MICRO INDUSTRIES CORP.
41960020AC INTERFLEX DATENSYSTEME GMBH
41970020AD LINQ SYSTEMS
41980020AE ORNET DATA COMMUNICATION TECH.
41990020AF 3COM CORPORATION
42000020B0 GATEWAY DEVICES, INC.
42010020B1 COMTECH RESEARCH INC.
42020020B2 GKD Gesellschaft Fur Kommunikation Und Datentechnik
42030020B3 SCLTEC COMMUNICATIONS SYSTEMS
42040020B4 TERMA ELEKTRONIK AS
42050020B5 YASKAWA ELECTRIC CORPORATION
42060020B6 AGILE NETWORKS, INC.
42070020B7 NAMAQUA COMPUTERWARE
42080020B8 PRIME OPTION, INC.
42090020B9 METRICOM, INC.
42100020BA CENTER FOR HIGH PERFORMANCE
42110020BB ZAX CORPORATION
42120020BC JTEC PTY LTD.
42130020BD NIOBRARA R & D CORPORATION
42140020BE LAN ACCESS CORP.
42150020BF AEHR TEST SYSTEMS
42160020C0 PULSE ELECTRONICS, INC.
42170020C1 TAIKO ELECTRIC WORKS, LTD.
42180020C2 TEXAS MEMORY SYSTEMS, INC.
42190020C3 COUNTER SOLUTIONS LTD.
42200020C4 INET,INC.
42210020C5 EAGLE TECHNOLOGY
42220020C6 NECTEC
42230020C7 AKAI Professional M.I. Corp.
42240020C8 LARSCOM INCORPORATED
42250020C9 VICTRON BV
42260020CA DIGITAL OCEAN
42270020CB PRETEC ELECTRONICS CORP.
42280020CC DIGITAL SERVICES, LTD.
42290020CD HYBRID NETWORKS, INC.
42300020CE LOGICAL DESIGN GROUP, INC.
42310020CF TEST & MEASUREMENT SYSTEMS INC
42320020D0 VERSALYNX CORPORATION
42330020D1 MICROCOMPUTER SYSTEMS (M) SDN.
42340020D2 RAD DATA COMMUNICATIONS, LTD.
42350020D3 OST (OUEST STANDARD TELEMATIQU
42360020D4 CABLETRON - ZEITTNET INC.
42370020D5 VIPA GMBH
42380020D6 BREEZECOM
42390020D7 JAPAN MINICOMPUTER SYSTEMS CO., Ltd.
42400020D8 Nortel Networks
42410020D9 PANASONIC TECHNOLOGIES, INC./MIECO-US
42420020DA XYLAN CORPORATION
42430020DB XNET TECHNOLOGY, INC.
42440020DC DENSITRON TAIWAN LTD.
42450020DD Cybertec Pty Ltd
42460020DE JAPAN DIGITAL LABORAT'Y CO.LTD
42470020DF KYOSAN ELECTRIC MFG. CO., LTD.
42480020E0 PREMAX ELECTRONICS, INC.
42490020E1 ALAMAR ELECTRONICS
42500020E2 INFORMATION RESOURCE ENGINEERING
42510020E3 MCD KENCOM CORPORATION
42520020E4 HSING TECH ENTERPRISE CO., LTD
42530020E5 APEX DATA, INC.
42540020E6 LIDKOPING MACHINE TOOLS AB
42550020E7 B&W NUCLEAR SERVICE COMPANY
42560020E8 DATATREK CORPORATION
42570020E9 DANTEL
42580020EA EFFICIENT NETWORKS, INC.
42590020EB CINCINNATI MICROWAVE, INC.
42600020EC TECHWARE SYSTEMS CORP.
42610020ED GIGA-BYTE TECHNOLOGY CO., LTD.
42620020EE GTECH CORPORATION
42630020EF USC CORPORATION
42640020F0 UNIVERSAL MICROELECTRONICS CO.
42650020F1 ALTOS INDIA LIMITED
42660020F2 SUN MICROSYSTEMS, INC.
42670020F3 RAYNET CORPORATION
42680020F4 SPECTRIX CORPORATION
42690020F5 PANDATEL AG
42700020F6 NET TEK AND KARLNET, INC.
42710020F7 CYBERDATA
42720020F8 CARRERA COMPUTERS, INC.
42730020F9 PARALINK NETWORKS, INC.
42740020FA GDE SYSTEMS, INC.
42750020FB OCTEL COMMUNICATIONS CORP.
42760020FC MATROX
42770020FD ITV TECHNOLOGIES, INC.
42780020FE TOPWARE INC. / GRAND COMPUTER
42790020FF SYMMETRICAL TECHNOLOGIES
4280003000 ALLWELL TECHNOLOGY CORP.
4281003001 SMP
4282003002 Expand Networks
4283003003 Phasys Ltd.
4284003004 LEADTEK RESEARCH INC.
4285003005 Fujitsu Siemens Computers
4286003006 SUPERPOWER COMPUTER
4287003007 OPTI, INC.
4288003008 AVIO DIGITAL, INC.
4289003009 Tachion Networks, Inc.
429000300A AZTECH SYSTEMS LTD.
429100300B mPHASE Technologies, Inc.
429200300C CONGRUENCY, LTD.
429300300D MMC Technology, Inc.
429400300E Klotz Digital AG
429500300F IMT - Information Management T
4296003010 VISIONETICS INTERNATIONAL
4297003011 HMS FIELDBUS SYSTEMS AB
4298003012 DIGITAL ENGINEERING LTD.
4299003013 NEC Corporation
4300003014 DIVIO, INC.
4301003015 CP CLARE CORP.
4302003016 ISHIDA CO., LTD.
4303003017 TERASTACK LTD.
4304003018 Jetway Information Co., Ltd.
4305003019 CISCO SYSTEMS, INC.
430600301A SMARTBRIDGES PTE. LTD.
430700301B SHUTTLE, INC.
430800301C ALTVATER AIRDATA SYSTEMS
430900301D SKYSTREAM, INC.
431000301E 3COM Europe Ltd.
431100301F OPTICAL NETWORKS, INC.
4312003020 TSI, Inc..
4313003021 HSING TECH. ENTERPRISE CO.,LTD
4314003022 Fong Kai Industrial Co., Ltd.
4315003023 COGENT COMPUTER SYSTEMS, INC.
4316003024 CISCO SYSTEMS, INC.
4317003025 CHECKOUT COMPUTER SYSTEMS, LTD
4318003026 HEITEL
4319003027 KERBANGO, INC.
4320003028 FASE Saldatura srl
4321003029 OPICOM
432200302A SOUTHERN INFORMATION
432300302B INALP NETWORKS, INC.
432400302C SYLANTRO SYSTEMS CORPORATION
432500302D QUANTUM BRIDGE COMMUNICATIONS
432600302E Hoft & Wessel AG
432700302F Smiths Industries
4328003030 HARMONIX CORPORATION
4329003031 LIGHTWAVE COMMUNICATIONS, INC.
4330003032 MagicRam, Inc.
4331003033 ORIENT TELECOM CO., LTD.
4332003036 RMP ELEKTRONIKSYSTEME GMBH
4333003037 Packard Bell Nec Services
4334003038 XCP, INC.
4335003039 SOFTBOOK PRESS
433600303A MAATEL
433700303B PowerCom Technology
433800303C ONNTO CORP.
433900303D IVA CORPORATION
434000303E Radcom Ltd.
434100303F TurboComm Tech Inc.
4342003040 CISCO SYSTEMS, INC.
4343003041 SAEJIN T & M CO., LTD.
4344003042 DeTeWe-Deutsche Telephonwerke
4345003043 IDREAM TECHNOLOGIES, PTE. LTD.
4346003044 Portsmith LLC
4347003045 Village Networks, Inc. (VNI)
4348003046 Controlled Electronic Manageme
4349003047 NISSEI ELECTRIC CO., LTD.
4350003048 Supermicro Computer, Inc.
4351003049 BRYANT TECHNOLOGY, LTD.
435200304A FRAUNHOFER INSTITUTE IMS
435300304B ORBACOM SYSTEMS, INC.
435400304C APPIAN COMMUNICATIONS, INC.
435500304D ESI
435600304E BUSTEC PRODUCTION LTD.
435700304F PLANET Technology Corporation
4358003050 Versa Technology
4359003051 ORBIT AVIONIC & COMMUNICATION
4360003052 ELASTIC NETWORKS
4361003053 Basler AG
4362003054 CASTLENET TECHNOLOGY, INC.
4363003055 Hitachi Semiconductor America,
4364003056 Beck IPC GmbH
4365003057 E-Tel Corporation
4366003058 API MOTION
4367003059 DIGITAL-LOGIC AG
436800305A TELGEN CORPORATION
436900305B MODULE DEPARTMENT
437000305C SMAR Laboratories Corp.
437100305D DIGITRA SYSTEMS, INC.
437200305E Abelko Innovation
437300305F IMACON APS
4374003060 STARMATIX, INC.
4375003061 MobyTEL
4376003062 PATH 1 NETWORK TECHNOL'S INC.
4377003063 SANTERA SYSTEMS, INC.
4378003064 ADLINK TECHNOLOGY, INC.
4379003065 APPLE COMPUTER, INC.
4380003066 DIGITAL WIRELESS CORPORATION
4381003067 BIOSTAR MICROTECH INT'L CORP.
4382003068 CYBERNETICS TECH. CO., LTD.
4383003069 IMPACCT TECHNOLOGY CORP.
438400306A PENTA MEDIA CO., LTD.
438500306B CMOS SYSTEMS, INC.
438600306C Hitex Holding GmbH
438700306D LUCENT TECHNOLOGIES
438800306E HEWLETT PACKARD
438900306F SEYEON TECH. CO., LTD.
4390003070 1Net Corporation
4391003071 Cisco Systems, Inc.
4392003072 INTELLIBYTE INC.
4393003073 International Microsystems, In
4394003074 EQUIINET LTD.
4395003075 ADTECH
4396003076 Akamba Corporation
4397003077 ONPREM NETWORKS
4398003078 Cisco Systems, Inc.
4399003079 CQOS, INC.
440000307A Advanced Technology & Systems
440100307B Cisco Systems, Inc.
440200307C ADID SA
440300307D GRE AMERICA, INC.
440400307E Redflex Communication Systems
440500307F IRLAN LTD.
4406003080 CISCO SYSTEMS, INC.
4407003081 ALTOS C&C
4408003082 TAIHAN ELECTRIC WIRE CO., LTD.
4409003083 Ivron Systems
4410003084 ALLIED TELESYN INTERNAIONAL
4411003085 CISCO SYSTEMS, INC.
4412003086 Transistor Devices, Inc.
4413003087 VEGA GRIESHABER KG
4414003088 Siara Systems, Inc.
4415003089 Spectrapoint Wireless, LLC
441600308A NICOTRA SISTEMI S.P.A
441700308B Brix Networks
441800308C ADVANCED DIGITAL INFORMATION
441900308D PINNACLE SYSTEMS, INC.
442000308E CROSS MATCH TECHNOLOGIES, INC.
442100308F MICRILOR, Inc.
4422003090 CYRA TECHNOLOGIES, INC.
4423003091 TAIWAN FIRST LINE ELEC. CORP.
4424003092 ModuNORM GmbH
4425003093 SONNET TECHNOLOGIES, INC.
4426003094 Cisco Systems, Inc.
4427003095 Procomp Informatics, Ltd.
4428003096 CISCO SYSTEMS, INC.
4429003097 EXOMATIC AB
4430003098 Global Converging Technologies
4431003099 BOENIG UND KALLENBACH OHG
443200309A ASTRO TERRA CORP.
443300309B Smartware
443400309C Timing Applications, Inc.
443500309D Nimble Microsystems, Inc.
443600309E WORKBIT CORPORATION.
443700309F AMBER NETWORKS
44380030A0 TYCO SUBMARINE SYSTEMS, LTD.
44390030A1 WEBGATE Inc.
44400030A2 Lightner Engineering
44410030A3 CISCO SYSTEMS, INC.
44420030A4 Woodwind Communications System
44430030A5 ACTIVE POWER
44440030A6 VIANET TECHNOLOGIES, LTD.
44450030A7 SCHWEITZER ENGINEERING
44460030A8 OL'E COMMUNICATIONS, INC.
44470030A9 Netiverse, Inc.
44480030AA AXUS MICROSYSTEMS, INC.
44490030AB DELTA NETWORKS, INC.
44500030AC Systeme Lauer GmbH & Co., Ltd.
44510030AD SHANGHAI COMMUNICATION
44520030AE Times N System, Inc.
44530030AF Honeywell GmbH
44540030B0 Convergenet Technologies
44550030B1 GOC GESELLSCHAFT FUR OPTISCHE
44560030B2 WESCAM - HEALDSBURG
44570030B3 San Valley Systems, Inc.
44580030B4 INTERSIL CORP.
44590030B5 Tadiran Microwave Networks
44600030B6 CISCO SYSTEMS, INC.
44610030B7 Teletrol Systems, Inc.
44620030B8 RiverDelta Networks
44630030B9 ECTEL
44640030BA AC&T SYSTEM CO., LTD.
44650030BB CacheFlow, Inc.
44660030BC Optronic AG
44670030BD BELKIN COMPONENTS
44680030BE City-Net Technology, Inc.
44690030BF MULTIDATA GMBH
44700030C0 Lara Technology, Inc.
44710030C1 HEWLETT-PACKARD
44720030C2 COMONE
44730030C3 FLUECKIGER ELEKTRONIK AG
44740030C4 Niigata Canotec Co., Inc.
44750030C5 CADENCE DESIGN SYSTEMS
44760030C6 CONTROL SOLUTIONS, INC.
44770030C7 MACROMATE CORP.
44780030C8 GAD LINE, LTD.
44790030C9 LuxN, N
44800030CA Discovery Com
44810030CB OMNI FLOW COMPUTERS, INC.
44820030CC Tenor Networks, Inc.
44830030CD CONEXANT SYSTEMS, INC.
44840030CE Zaffire
44850030CF TWO TECHNOLOGIES, INC.
44860030D1 INOVA CORPORATION
44870030D2 WIN TECHNOLOGIES, CO., LTD.
44880030D3 Agilent Technologies
44890030D4 COMTIER
44900030D5 DResearch GmbH
44910030D6 MSC VERTRIEBS GMBH
44920030D7 Innovative Systems, L.L.C.
44930030D8 SITEK
44940030D9 DATACORE SOFTWARE CORP.
44950030DA COMTREND CO.
44960030DB Mindready Solutions, Inc.
44970030DC RIGHTECH CORPORATION
44980030DD INDIGITA CORPORATION
44990030DE WAGO Kontakttechnik GmbH
45000030DF KB/TEL TELECOMUNICACIONES
45010030E0 OXFORD SEMICONDUCTOR LTD.
45020030E1 ACROTRON SYSTEMS, INC.
45030030E2 GARNET SYSTEMS CO., LTD.
45040030E3 SEDONA NETWORKS CORP.
45050030E4 CHIYODA SYSTEM RIKEN
45060030E5 Amper Datos S.A.
45070030E6 SIEMENS MEDICAL SYSTEMS
45080030E7 CNF MOBILE SOLUTIONS, INC.
45090030E8 ENSIM CORP.
45100030E9 GMA COMMUNICATION MANUFACT'G
45110030EA TeraForce Technology Corporation
45120030EB TURBONET COMMUNICATIONS, INC.
45130030EC BORGARDT
45140030ED Expert Magnetics Corp.
45150030EE DSG Technology, Inc.
45160030EF NEON TECHNOLOGY, INC.
45170030F0 Uniform Industrial Corp.
45180030F1 Accton Technology Corp.
45190030F2 CISCO SYSTEMS, INC.
45200030F3 At Work Computers
45210030F4 STARDOT TECHNOLOGIES
45220030F5 Wild Lab. Ltd.
45230030F6 SECURELOGIX CORPORATION
45240030F7 RAMIX INC.
45250030F8 Dynapro Systems, Inc.
45260030F9 Sollae Systems Co., Ltd.
45270030FA TELICA, INC.
45280030FB AZS Technology AG
45290030FC Terawave Communications, Inc.
45300030FD INTEGRATED SYSTEMS DESIGN
45310030FE DSA GmbH
45320030FF DATAFAB SYSTEMS, INC.
4533004000 PCI COMPONENTES DA AMZONIA LTD
4534004001 ZYXEL COMMUNICATIONS, INC.
4535004002 PERLE SYSTEMS LIMITED
4536004003 WESTINGHOUSE PROCESS CONTROL
4537004004 ICM CO. LTD.
4538004005 ANI COMMUNICATIONS INC.
4539004006 SAMPO TECHNOLOGY CORPORATION
4540004007 TELMAT INFORMATIQUE
4541004008 A PLUS INFO CORPORATION
4542004009 TACHIBANA TECTRON CO., LTD.
454300400A PIVOTAL TECHNOLOGIES, INC.
454400400B CISCO SYSTEMS, INC.
454500400C GENERAL MICRO SYSTEMS, INC.
454600400D LANNET DATA COMMUNICATIONS,LTD
454700400E MEMOTEC COMMUNICATIONS, INC.
454800400F DATACOM TECHNOLOGIES
4549004010 SONIC SYSTEMS, INC.
4550004011 ANDOVER CONTROLS CORPORATION
4551004012 WINDATA, INC.
4552004013 NTT DATA COMM. SYSTEMS CORP.
4553004014 COMSOFT GMBH
4554004015 ASCOM INFRASYS AG
4555004016 HADAX ELECTRONICS, INC.
4556004017 XCD INC.
4557004018 ADOBE SYSTEMS, INC.
4558004019 AEON SYSTEMS, INC.
455900401A FUJI ELECTRIC CO., LTD.
456000401B PRINTER SYSTEMS CORP.
456100401C AST RESEARCH, INC.
456200401D INVISIBLE SOFTWARE, INC.
456300401E ICC
456400401F COLORGRAPH LTD
4565004020 PINACL COMMUNICATION
4566004021 RASTER GRAPHICS
4567004022 KLEVER COMPUTERS, INC.
4568004023 LOGIC CORPORATION
4569004024 COMPAC INC.
4570004025 MOLECULAR DYNAMICS
4571004026 MELCO, INC.
4572004027 SMC MASSACHUSETTS, INC.
4573004028 NETCOMM LIMITED
4574004029 COMPEX
457500402A CANOGA-PERKINS
457600402B TRIGEM COMPUTER, INC.
457700402C ISIS DISTRIBUTED SYSTEMS, INC.
457800402D HARRIS ADACOM CORPORATION
457900402E PRECISION SOFTWARE, INC.
458000402F XLNT DESIGNS INC.
4581004030 GK COMPUTER
4582004031 KOKUSAI ELECTRIC CO., LTD
4583004032 DIGITAL COMMUNICATIONS
4584004033 ADDTRON TECHNOLOGY CO., LTD.
4585004034 BUSTEK CORPORATION
4586004035 OPCOM
4587004036 TRIBE COMPUTER WORKS, INC.
4588004037 SEA-ILAN, INC.
4589004038 TALENT ELECTRIC INCORPORATED
4590004039 OPTEC DAIICHI DENKO CO., LTD.
459100403A IMPACT TECHNOLOGIES
459200403B SYNERJET INTERNATIONAL CORP.
459300403C FORKS, INC.
459400403D TERADATA
459500403E RASTER OPS CORPORATION
459600403F SSANGYONG COMPUTER SYSTEMS
4597004040 RING ACCESS, INC.
4598004041 FUJIKURA LTD.
4599004042 N.A.T. GMBH
4600004043 NOKIA TELECOMMUNICATIONS
4601004044 QNIX COMPUTER CO., LTD.
4602004045 TWINHEAD CORPORATION
4603004046 UDC RESEARCH LIMITED
4604004047 WIND RIVER SYSTEMS
4605004048 SMD INFORMATICA S.A.
4606004049 TEGIMENTA AG
460700404A WEST AUSTRALIAN DEPARTMENT
460800404B MAPLE COMPUTER SYSTEMS
460900404C HYPERTEC PTY LTD.
461000404D TELECOMMUNICATIONS TECHNIQUES
461100404E FLUENT, INC.
461200404F SPACE & NAVAL WARFARE SYSTEMS
4613004050 IRONICS, INCORPORATED
4614004051 GRACILIS, INC.
4615004052 STAR TECHNOLOGIES, INC.
4616004053 AMPRO COMPUTERS
4617004054 CONNECTION MACHINES SERVICES
4618004055 METRONIX GMBH
4619004056 MCM JAPAN LTD.
4620004057 LOCKHEED - SANDERS
4621004058 KRONOS, INC.
4622004059 YOSHIDA KOGYO K. K.
462300405A GOLDSTAR INFORMATION & COMM.
462400405B FUNASSET LIMITED
462500405C FUTURE SYSTEMS, INC.
462600405D STAR-TEK, INC.
462700405E NORTH HILLS ISRAEL
462800405F AFE COMPUTERS LTD.
4629004060 COMENDEC LTD
4630004061 DATATECH ENTERPRISES CO., LTD.
4631004062 E-SYSTEMS, INC./GARLAND DIV.
4632004063 VIA TECHNOLOGIES, INC.
4633004064 KLA INSTRUMENTS CORPORATION
4634004065 GTE SPACENET
4635004066 HITACHI CABLE, LTD.
4636004067 OMNIBYTE CORPORATION
4637004068 EXTENDED SYSTEMS
4638004069 LEMCOM SYSTEMS, INC.
463900406A KENTEK INFORMATION SYSTEMS,INC
464000406B SYSGEN
464100406C COPERNIQUE
464200406D LANCO, INC.
464300406E COROLLARY, INC.
464400406F SYNC RESEARCH INC.
4645004070 INTERWARE CO., LTD.
4646004071 ATM COMPUTER GMBH
4647004072 Applied Innovation Inc.
4648004073 BASS ASSOCIATES
4649004074 CABLE AND WIRELESS
4650004075 M-TRADE (UK) LTD
4651004076 Sun Conversion Technologies
4652004077 MAXTON TECHNOLOGY CORPORATION
4653004078 WEARNES AUTOMATION PTE LTD
4654004079 JUKO MANUFACTURE COMPANY, LTD.
465500407A SOCIETE D'EXPLOITATION DU CNIT
465600407B SCIENTIFIC ATLANTA
465700407C QUME CORPORATION
465800407D EXTENSION TECHNOLOGY CORP.
465900407E EVERGREEN SYSTEMS, INC.
466000407F FLIR Systems
4661004080 ATHENIX CORPORATION
4662004081 MANNESMANN SCANGRAPHIC GMBH
4663004082 LABORATORY EQUIPMENT CORP.
4664004083 TDA INDUSTRIA DE PRODUTOS
4665004084 HONEYWELL INC.
4666004085 SAAB INSTRUMENTS AB
4667004086 MICHELS & KLEBERHOFF COMPUTER
4668004087 UBITREX CORPORATION
4669004088 MOBIUS TECHNOLOGIES, INC.
4670004089 MEIDENSHA CORPORATION
467100408A TPS TELEPROCESSING SYS. GMBH
467200408B RAYLAN CORPORATION
467300408C AXIS COMMUNICATIONS AB
467400408D THE GOODYEAR TIRE & RUBBER CO.
467500408E DIGILOG, INC.
467600408F WM-DATA MINFO AB
4677004090 ANSEL COMMUNICATIONS
4678004091 PROCOMP INDUSTRIA ELETRONICA
4679004092 ASP COMPUTER PRODUCTS, INC.
4680004093 PAXDATA NETWORKS LTD.
4681004094 SHOGRAPHICS, INC.
4682004095 R.P.T. INTERGROUPS INT'L LTD.
4683004096 Aironet Wireless Communication
4684004097 DATEX DIVISION OF
4685004098 DRESSLER GMBH & CO.
4686004099 NEWGEN SYSTEMS CORP.
468700409A NETWORK EXPRESS, INC.
468800409B HAL COMPUTER SYSTEMS INC.
468900409C TRANSWARE
469000409D DIGIBOARD, INC.
469100409E CONCURRENT TECHNOLOGIES LTD.
469200409F LANCAST/CASAT TECHNOLOGY, INC.
46930040A0 GOLDSTAR CO., LTD.
46940040A1 ERGO COMPUTING
46950040A2 KINGSTAR TECHNOLOGY INC.
46960040A3 MICROUNITY SYSTEMS ENGINEERING
46970040A4 ROSE ELECTRONICS
46980040A5 CLINICOMP INTL.
46990040A6 Cray, Inc.
47000040A7 ITAUTEC PHILCO S.A.
47010040A8 IMF INTERNATIONAL LTD.
47020040A9 DATACOM INC.
47030040AA VALMET AUTOMATION INC.
47040040AB ROLAND DG CORPORATION
47050040AC SUPER WORKSTATION, INC.
47060040AD SMA REGELSYSTEME GMBH
47070040AE DELTA CONTROLS, INC.
47080040AF DIGITAL PRODUCTS, INC.
47090040B0 BYTEX CORPORATION, ENGINEERING
47100040B1 CODONICS INC.
47110040B2 SYSTEMFORSCHUNG
47120040B3 PAR MICROSYSTEMS CORPORATION
47130040B4 NEXTCOM K.K.
47140040B5 VIDEO TECHNOLOGY COMPUTERS LTD
47150040B6 COMPUTERM CORPORATION
47160040B7 STEALTH COMPUTER SYSTEMS
47170040B8 IDEA ASSOCIATES
47180040B9 MACQ ELECTRONIQUE SA
47190040BA ALLIANT COMPUTER SYSTEMS CORP.
47200040BB GOLDSTAR CABLE CO., LTD.
47210040BC ALGORITHMICS LTD.
47220040BD STARLIGHT NETWORKS, INC.
47230040BE BOEING DEFENSE & SPACE
47240040BF CHANNEL SYSTEMS INTERN'L INC.
47250040C0 VISTA CONTROLS CORPORATION
47260040C1 BIZERBA-WERKE WILHEIM KRAUT
47270040C2 APPLIED COMPUTING DEVICES
47280040C3 FISCHER AND PORTER CO.
47290040C4 KINKEI SYSTEM CORPORATION
47300040C5 MICOM COMMUNICATIONS INC.
47310040C6 FIBERNET RESEARCH, INC.
47320040C7 RUBY TECH CORPORATION
47330040C8 MILAN TECHNOLOGY CORPORATION
47340040C9 NCUBE
47350040CA FIRST INTERNAT'L COMPUTER, INC
47360040CB LANWAN TECHNOLOGIES
47370040CC SILCOM MANUF'G TECHNOLOGY INC.
47380040CD TERA MICROSYSTEMS, INC.
47390040CE NET-SOURCE, INC.
47400040CF STRAWBERRY TREE, INC.
47410040D0 MITAC INTERNATIONAL CORP.
47420040D1 FUKUDA DENSHI CO., LTD.
47430040D2 PAGINE CORPORATION
47440040D3 KIMPSION INTERNATIONAL CORP.
47450040D4 GAGE TALKER CORP.
47460040D5 SARTORIUS AG
47470040D6 LOCAMATION B.V.
47480040D7 STUDIO GEN INC.
47490040D8 OCEAN OFFICE AUTOMATION LTD.
47500040D9 AMERICAN MEGATRENDS INC.
47510040DA TELSPEC LTD
47520040DB ADVANCED TECHNICAL SOLUTIONS
47530040DC TRITEC ELECTRONIC GMBH
47540040DD HONG TECHNOLOGIES
47550040DE ELETTRONICA SAN GIORGIO
47560040DF DIGALOG SYSTEMS, INC.
47570040E0 ATOMWIDE LTD.
47580040E1 MARNER INTERNATIONAL, INC.
47590040E2 MESA RIDGE TECHNOLOGIES, INC.
47600040E3 QUIN SYSTEMS LTD
47610040E4 E-M TECHNOLOGY, INC.
47620040E5 SYBUS CORPORATION
47630040E6 C.A.E.N.
47640040E7 ARNOS INSTRUMENTS & COMPUTER
47650040E8 CHARLES RIVER DATA SYSTEMS,INC
47660040E9 ACCORD SYSTEMS, INC.
47670040EA PLAIN TREE SYSTEMS INC
47680040EB MARTIN MARIETTA CORPORATION
47690040EC MIKASA SYSTEM ENGINEERING
47700040ED NETWORK CONTROLS INT'NATL INC.
47710040EE OPTIMEM
47720040EF HYPERCOM, INC.
47730040F0 MICRO SYSTEMS, INC.
47740040F1 CHUO ELECTRONICS CO., LTD.
47750040F2 JANICH & KLASS COMPUTERTECHNIK
47760040F3 NETCOR
47770040F4 CAMEO COMMUNICATIONS, INC.
47780040F5 OEM ENGINES
47790040F6 KATRON COMPUTERS INC.
47800040F7 POLAROID MEDICAL IMAGING SYS.
47810040F8 SYSTEMHAUS DISCOM
47820040F9 COMBINET
47830040FA MICROBOARDS, INC.
47840040FB CASCADE COMMUNICATIONS CORP.
47850040FC IBR COMPUTER TECHNIK GMBH
47860040FD LXE
47870040FE SYMPLEX COMMUNICATIONS
47880040FF TELEBIT CORPORATION
4789004252 RLX Technologies
4790005000 NEXO COMMUNICATIONS, INC.
4791005001 YAMASHITA SYSTEMS CORP.
4792005002 OMNISEC AG
4793005003 GRETAG MACBETH AG
4794005004 3COM CORPORATION
4795005006 TAC AB
4796005007 SIEMENS TELECOMMUNICATION SYSTEMS LIMITED
4797005008 TIVA MICROCOMPUTER CORP. (TMC)
4798005009 PHILIPS BROADBAND NETWORKS
479900500A IRIS TECHNOLOGIES, INC.
480000500B CISCO SYSTEMS, INC.
480100500C e-Tek Labs, Inc.
480200500D SATORI ELECTORIC CO., LTD.
480300500E CHROMATIS NETWORKS, INC.
480400500F CISCO SYSTEMS, INC.
4805005010 NovaNET Learning, Inc.
4806005012 CBL - GMBH
4807005013 Chaparral Network Storage
4808005014 CISCO SYSTEMS, INC.
4809005015 BRIGHT STAR ENGINEERING
4810005016 SST/WOODHEAD INDUSTRIES
4811005017 RSR S.R.L.
4812005018 ADVANCED MULTIMEDIA INTERNET TECHNOLOGY INC.
4813005019 SPRING TIDE NETWORKS, INC.
481400501A UISIQN
481500501B ABL CANADA, INC.
481600501C JATOM SYSTEMS, INC.
481700501E Miranda Technologies, Inc.
481800501F MRG SYSTEMS, LTD.
4819005020 MEDIASTAR CO., LTD.
4820005021 EIS INTERNATIONAL, INC.
4821005022 ZONET TECHNOLOGY, INC.
4822005023 PG DESIGN ELECTRONICS, INC.
4823005024 NAVIC SYSTEMS, INC.
4824005026 COSYSTEMS, INC.
4825005027 GENICOM CORPORATION
4826005028 AVAL COMMUNICATIONS
4827005029 1394 PRINTER WORKING GROUP
482800502A CISCO SYSTEMS, INC.
482900502B GENRAD LTD.
483000502C SOYO COMPUTER, INC.
483100502D ACCEL, INC.
483200502E CAMBEX CORPORATION
483300502F TollBridge Technologies, Inc.
4834005030 FUTURE PLUS SYSTEMS
4835005031 AEROFLEX LABORATORIES, INC.
4836005032 PICAZO COMMUNICATIONS, INC.
4837005033 MAYAN NETWORKS
4838005036 NETCAM, LTD.
4839005037 KOGA ELECTRONICS CO.
4840005038 DAIN TELECOM CO., LTD.
4841005039 MARINER NETWORKS
484200503A DATONG ELECTRONICS LTD.
484300503B MEDIAFIRE CORPORATION
484400503C TSINGHUA NOVEL ELECTRONICS
484500503E CISCO SYSTEMS, INC.
484600503F ANCHOR GAMES
4847005040 EMWARE, INC.
4848005041 CTX OPTO ELECTRONIC CORP.
4849005042 SCI MANUFACTURING SINGAPORE PTE, LTD.
4850005043 MARVELL SEMICONDUCTOR, INC.
4851005044 ASACA CORPORATION
4852005045 RIOWORKS SOLUTIONS, INC.
4853005046 MENICX INTERNATIONAL CO., LTD.
4854005048 INFOLIBRIA
4855005049 ELLACOYA NETWORKS, INC.
485600504A ELTECO A.S.
485700504B BARCONET N.V.
485800504C GALIL MOTION CONTROL, INC.
485900504D TOKYO ELECTRON DEVICE LTD.
486000504E SIERRA MONITOR CORP.
486100504F OLENCOM ELECTRONICS
4862005050 CISCO SYSTEMS, INC.
4863005051 IWATSU ELECTRIC CO., LTD.
4864005052 TIARA NETWORKS, INC.
4865005053 CISCO SYSTEMS, INC.
4866005054 CISCO SYSTEMS, INC.
4867005055 DOMS A/S
4868005056 VMWare, Inc.
4869005057 BROADBAND ACCESS SYSTEMS
4870005058 VEGASTREAM LIMITED
4871005059 SUITE TECHNOLOGY SYSTEMS NETWORK
487200505A NETWORK ALCHEMY, INC.
487300505B KAWASAKI LSI U.S.A., INC.
487400505C TUNDO CORPORATION
487500505E DIGITEK MICROLOGIC S.A.
487600505F BRAND INNOVATORS
4877005060 TANDBERG TELECOM AS
4878005062 KOUWELL ELECTRONICS CORP. **
4879005063 OY COMSEL SYSTEM AB
4880005064 CAE ELECTRONICS
4881005065 DENSEI-LAMBAD Co., Ltd.
4882005066 AtecoM GmbH advanced telecomunication modules
4883005067 AEROCOMM, INC.
4884005068 ELECTRONIC INDUSTRIES ASSOCIATION
4885005069 PixStream Incorporated
488600506A EDEVA, INC.
488700506B SPX-ATEG
488800506C G & L BEIJER ELECTRONICS AB
488900506D VIDEOJET SYSTEMS
489000506E CORDER ENGINEERING CORPORATION
489100506F G-CONNECT
4892005070 CHAINTECH COMPUTER CO., LTD.
4893005071 AIWA CO., LTD.
4894005072 CORVIS CORPORATION
4895005073 CISCO SYSTEMS, INC.
4896005074 ADVANCED HI-TECH CORP.
4897005075 KESTREL SOLUTIONS
4898005076 IBM
4899005077 PROLIFIC TECHNOLOGY, INC.
4900005078 MEGATON HOUSE, LTD.
490100507A XPEED, INC.
490200507B MERLOT COMMUNICATIONS
490300507C VIDEOCON AG
490400507D IFP
490500507E NEWER TECHNOLOGY
490600507F DrayTek Corp.
4907005080 CISCO SYSTEMS, INC.
4908005081 MURATA MACHINERY, LTD.
4909005082 FORESSON CORPORATION
4910005083 GILBARCO, INC.
4911005084 ATL PRODUCTS
4912005086 TELKOM SA, LTD.
4913005087 TERASAKI ELECTRIC CO., LTD.
4914005088 AMANO CORPORATION
4915005089 SAFETY MANAGEMENT SYSTEMS
491600508B COMPAQ COMPUTER CORPORATION
491700508C RSI SYSTEMS
491800508D ABIT COMPUTER CORPORATION
491900508E OPTIMATION, INC.
492000508F ASITA TECHNOLOGIES INT'L LTD.
4921005090 DCTRI
4922005091 NETACCESS, INC.
4923005092 RIGAKU INDUSTRIAL CORPORATION
4924005093 BOEING
4925005094 PACE MICRO TECHNOLOGY PLC
4926005095 PERACOM NETWORKS
4927005096 SALIX TECHNOLOGIES, INC.
4928005097 MMC-EMBEDDED COMPUTERTECHNIK GmbH
4929005098 GLOBALOOP, LTD.
4930005099 3COM EUROPE, LTD.
493100509A TAG ELECTRONIC SYSTEMS
493200509B SWITCHCORE AB
493300509C BETA RESEARCH
493400509D THE INDUSTREE B.V.
493500509E Les Technologies SoftAcoustik Inc.
493600509F HORIZON COMPUTER
49370050A0 DELTA COMPUTER SYSTEMS, INC.
49380050A1 CARLO GAVAZZI, INC.
49390050A2 CISCO SYSTEMS, INC.
49400050A3 TransMedia Communications, Inc.
49410050A4 IO TECH, INC.
49420050A5 CAPITOL BUSINESS SYSTEMS, LTD.
49430050A6 OPTRONICS
49440050A7 CISCO SYSTEMS, INC.
49450050A8 OpenCon Systems, Inc.
49460050A9 MOLDAT WIRELESS TECHNOLGIES
49470050AA KONICA MINOLTA HOLDINGS, INC.
49480050AB NALTEC, INC.
49490050AC MAPLE COMPUTER CORPORATION
49500050AD CommUnique Wireless Corp.
49510050AE IWAKI ELECTRONICS CO., LTD.
49520050AF INTERGON, INC.
49530050B0 TECHNOLOGY ATLANTA CORPORATION
49540050B1 GIDDINGS & LEWIS
49550050B2 BRODEL AUTOMATION
49560050B3 VOICEBOARD CORPORATION
49570050B4 SATCHWELL CONTROL SYSTEMS, LTD
49580050B5 FICHET-BAUCHE
49590050B6 GOOD WAY IND. CO., LTD.
49600050B7 BOSER TECHNOLOGY CO., LTD.
49610050B8 INOVA COMPUTERS GMBH & CO. KG
49620050B9 XITRON TECHNOLOGIES, INC.
49630050BA D-LINK
49640050BB CMS TECHNOLOGIES
49650050BC HAMMER STORAGE SOLUTIONS
49660050BD CISCO SYSTEMS, INC.
49670050BE FAST MULTIMEDIA AG
49680050BF MOTOTECH INC.
49690050C0 GATAN, INC.
49700050C1 GEMFLEX NETWORKS, LTD.
49710050C2 IEEE REGISTRATION AUTHORITY
49720050C4 IMD
49730050C5 ADS TECHNOLOGIES, INC.
49740050C6 LOOP TELECOMMUNICATION INTERNATIONAL, INC.
49750050C8 ADDONICS COMMUNICATIONS, INC.
49760050C9 MASPRO DENKOH CORP.
49770050CA NET TO NET TECHNOLOGIES
49780050CB JETTER
49790050CC XYRATEX
49800050CD DIGIANSWER A/S
49810050CE LG INTERNATIONAL CORP.
49820050CF VANLINK COMMUNICATION TECHNOLOGY RESEARCH INSTITUTE
49830050D0 MINERVA SYSTEMS
49840050D1 CISCO SYSTEMS, INC.
49850050D2 BAE Systems Canada, Inc.
49860050D3 DIGITAL AUDIO PROCESSING PTY. LTD.
49870050D4 JOOHONG INFORMATION &
49880050D5 AD SYSTEMS CORP.
49890050D6 ATLAS COPCO TOOLS AB
49900050D7 TELSTRAT
49910050D8 UNICORN COMPUTER CORP.
49920050D9 ENGETRON-ENGENHARIA ELETRONICA IND. e COM. LTDA
49930050DA 3COM CORPORATION
49940050DB CONTEMPORARY CONTROL
49950050DC TAS TELEFONBAU A. SCHWABE GMBH & CO. KG
49960050DD SERRA SOLDADURA, S.A.
49970050DE SIGNUM SYSTEMS CORP.
49980050DF AirFiber, Inc.
49990050E1 NS TECH ELECTRONICS SDN BHD
50000050E2 CISCO SYSTEMS, INC.
50010050E3 Terayon Communications Systems
50020050E4 APPLE COMPUTER, INC.
50030050E6 HAKUSAN CORPORATION
50040050E7 PARADISE INNOVATIONS (ASIA)
50050050E8 NOMADIX INC.
50060050EA XEL COMMUNICATIONS, INC.
50070050EB ALPHA-TOP CORPORATION
50080050EC OLICOM A/S
50090050ED ANDA NETWORKS
50100050EE TEK DIGITEL CORPORATION
50110050EF SPE Systemhaus GmbH
50120050F0 CISCO SYSTEMS, INC.
50130050F1 LIBIT SIGNAL PROCESSING, LTD.
50140050F2 MICROSOFT CORP.
50150050F3 GLOBAL NET INFORMATION CO., Ltd.
50160050F4 SIGMATEK GMBH & CO. KG
50170050F6 PAN-INTERNATIONAL INDUSTRIAL CORP.
50180050F7 VENTURE MANUFACTURING (SINGAPORE) LTD.
50190050F8 ENTREGA TECHNOLOGIES, INC.
50200050FA OXTEL, LTD.
50210050FB VSK ELECTRONICS
50220050FC EDIMAX TECHNOLOGY CO., LTD.
50230050FD VISIONCOMM CO., LTD.
50240050FE PCTVnet ASA
50250050FF HAKKO ELECTRONICS CO., LTD.
5026006000 XYCOM INC.
5027006001 InnoSys, Inc.
5028006002 SCREEN SUBTITLING SYSTEMS, LTD
5029006003 TERAOKA WEIGH SYSTEM PTE, LTD.
5030006004 COMPUTADORES MODULARES SA
5031006005 FEEDBACK DATA LTD.
5032006006 SOTEC CO., LTD
5033006007 ACRES GAMING, INC.
5034006008 3COM CORPORATION
5035006009 CISCO SYSTEMS, INC.
503600600A SORD COMPUTER CORPORATION
503700600B LOGWARE GmbH
503800600C APPLIED DATA SYSTEMS, INC.
503900600D Digital Logic GmbH
504000600E WAVENET INTERNATIONAL, INC.
504100600F WESTELL, INC.
5042006010 NETWORK MACHINES, INC.
5043006011 CRYSTAL SEMICONDUCTOR CORP.
5044006012 POWER COMPUTING CORPORATION
5045006013 NETSTAL MASCHINEN AG
5046006014 EDEC CO., LTD.
5047006015 NET2NET CORPORATION
5048006016 CLARIION
5049006017 TOKIMEC INC.
5050006018 STELLAR ONE CORPORATION
5051006019 Roche Diagnostics
505200601A KEITHLEY INSTRUMENTS
505300601B MESA ELECTRONICS
505400601C TELXON CORPORATION
505500601D LUCENT TECHNOLOGIES
505600601E SOFTLAB, INC.
505700601F STALLION TECHNOLOGIES
5058006020 PIVOTAL NETWORKING, INC.
5059006021 DSC CORPORATION
5060006022 VICOM SYSTEMS, INC.
5061006023 PERICOM SEMICONDUCTOR CORP.
5062006024 GRADIENT TECHNOLOGIES, INC.
5063006025 ACTIVE IMAGING PLC
5064006026 VIKING COMPONENTS, INC.
5065006027 Superior Modular Products
5066006028 MACROVISION CORPORATION
5067006029 CARY PERIPHERALS INC.
506800602A SYMICRON COMPUTER COMMUNICATIONS, LTD.
506900602B PEAK AUDIO
507000602C LINX Data Terminals, Inc.
507100602D ALERTON TECHNOLOGIES, INC.
507200602E CYCLADES CORPORATION
507300602F CISCO SYSTEMS, INC.
5074006030 VILLAGE TRONIC ENTWICKLUNG
5075006031 HRK SYSTEMS
5076006032 I-CUBE, INC.
5077006033 ACUITY IMAGING, INC.
5078006034 ROBERT BOSCH GmbH
5079006035 DALLAS SEMICONDUCTOR, INC.
5080006036 AUSTRIAN RESEARCH CENTER SEIBERSDORF
5081006037 PHILIPS SEMICONDUCTORS
5082006038 Nortel Networks
5083006039 SanCom Technology, Inc.
508400603A QUICK CONTROLS LTD.
508500603B AMTEC spa
508600603C HAGIWARA SYS-COM CO., LTD.
508700603D 3CX
508800603E CISCO SYSTEMS, INC.
508900603F PATAPSCO DESIGNS
5090006040 NETRO CORP.
5091006041 Yokogawa Electric Corporation
5092006042 TKS (USA), INC.
5093006043 ComSoft Systems, Inc.
5094006044 LITTON/POLY-SCIENTIFIC
5095006045 PATHLIGHT TECHNOLOGIES
5096006046 VMETRO, INC.
5097006047 CISCO SYSTEMS, INC.
5098006048 EMC CORPORATION
5099006049 VINA TECHNOLOGIES
510000604A SAIC IDEAS GROUP
510100604B BIODATA GmbH
510200604C SAT
510300604D MMC NETWORKS, INC.
510400604E CYCLE COMPUTER CORPORATION, INC.
510500604F SUZUKI MFG. CO., LTD.
5106006050 INTERNIX INC.
5107006051 QUALITY SEMICONDUCTOR
5108006052 PERIPHERALS ENTERPRISE CO., Ltd.
5109006053 TOYODA MACHINE WORKS, LTD.
5110006054 CONTROLWARE GMBH
5111006055 CORNELL UNIVERSITY
5112006056 NETWORK TOOLS, INC.
5113006057 MURATA MANUFACTURING CO., LTD.
5114006058 COPPER MOUNTAIN COMMUNICATIONS, INC.
5115006059 TECHNICAL COMMUNICATIONS CORP.
511600605A CELCORE, INC.
511700605B IntraServer Technology, Inc.
511800605C CISCO SYSTEMS, INC.
511900605D SCANIVALVE CORP.
512000605E LIBERTY TECHNOLOGY NETWORKING
512100605F NIPPON UNISOFT CORPORATION
5122006060 DAWNING TECHNOLOGIES, INC.
5123006061 WHISTLE COMMUNICATIONS CORP.
5124006062 TELESYNC, INC.
5125006063 PSION DACOM PLC.
5126006064 NETCOMM LIMITED
5127006065 BERNECKER & RAINER INDUSTRIE-ELEKTRONIC GmbH
5128006066 LACROIX TECHNOLGIE
5129006067 ACER NETXUS INC.
5130006068 EICON TECHNOLOGY CORPORATION
5131006069 BROCADE COMMUNICATIONS SYSTEMS, Inc.
513200606A MITSUBISHI WIRELESS COMMUNICATIONS. INC.
513300606B Synclayer Inc.
513400606C ARESCOM
513500606D DIGITAL EQUIPMENT CORP.
513600606E DAVICOM SEMICONDUCTOR, INC.
513700606F CLARION CORPORATION OF AMERICA
5138006070 CISCO SYSTEMS, INC.
5139006071 MIDAS LAB, INC.
5140006072 VXL INSTRUMENTS, LIMITED
5141006073 REDCREEK COMMUNICATIONS, INC.
5142006074 QSC AUDIO PRODUCTS
5143006075 PENTEK, INC.
5144006076 SCHLUMBERGER TECHNOLOGIES RETAIL PETROLEUM SYSTEMS
5145006077 PRISA NETWORKS
5146006078 POWER MEASUREMENT LTD.
5147006079 Mainstream Data, Inc.
514800607A DVS GmbH
514900607B FORE SYSTEMS, INC.
515000607C WaveAccess, Ltd.
515100607D SENTIENT NETWORKS INC.
515200607E GIGALABS, INC.
515300607F AURORA TECHNOLOGIES, INC.
5154006080 MICROTRONIX DATACOM LTD.
5155006081 TV/COM INTERNATIONAL
5156006082 NOVALINK TECHNOLOGIES, INC.
5157006083 CISCO SYSTEMS, INC.
5158006084 DIGITAL VIDEO
5159006085 Storage Concepts
5160006086 LOGIC REPLACEMENT TECH. LTD.
5161006087 KANSAI ELECTRIC CO., LTD.
5162006088 WHITE MOUNTAIN DSP, INC.
5163006089 XATA
516400608A CITADEL COMPUTER
516500608B ConferTech International
516600608C 3COM CORPORATION
516700608D UNIPULSE CORP.
516800608E HE ELECTRONICS, TECHNOLOGIE & SYSTEMTECHNIK GmbH
516900608F TEKRAM TECHNOLOGY CO., LTD.
5170006090 ABLE COMMUNICATIONS, INC.
5171006091 FIRST PACIFIC NETWORKS, INC.
5172006092 MICRO/SYS, INC.
5173006093 VARIAN
5174006094 IBM CORP.
5175006095 ACCU-TIME SYSTEMS, INC.
5176006096 T.S. MICROTECH INC.
5177006097 3COM CORPORATION
5178006098 HT COMMUNICATIONS
5179006099 LAN MEDIA CORPORATION
518000609A NJK TECHNO CO.
518100609B ASTRO-MED, INC.
518200609C Perkin-Elmer Incorporated
518300609D PMI FOOD EQUIPMENT GROUP
518400609E ASC X3 - INFORMATION TECHNOLOGY STANDARDS SECRETARIATS
518500609F PHAST CORPORATION
51860060A0 SWITCHED NETWORK TECHNOLOGIES, INC.
51870060A1 VPNet, Inc.
51880060A2 NIHON UNISYS LIMITED CO.
51890060A3 CONTINUUM TECHNOLOGY CORP.
51900060A4 GRINAKER SYSTEM TECHNOLOGIES
51910060A5 PERFORMANCE TELECOM CORP.
51920060A6 PARTICLE MEASURING SYSTEMS
51930060A7 MICROSENS GmbH & CO. KG
51940060A8 TIDOMAT AB
51950060A9 GESYTEC MbH
51960060AA INTELLIGENT DEVICES INC. (IDI)
51970060AB LARSCOM INCORPORATED
51980060AC RESILIENCE CORPORATION
51990060AD MegaChips Corporation
52000060AE TRIO INFORMATION SYSTEMS AB
52010060AF PACIFIC MICRO DATA, INC.
52020060B0 HEWLETT-PACKARD CO.
52030060B1 INPUT/OUTPUT, INC.
52040060B2 PROCESS CONTROL CORP.
52050060B3 Z-COM, INC.
52060060B4 GLENAYRE R&D INC.
52070060B5 KEBA GmbH
52080060B6 LAND COMPUTER CO., LTD.
52090060B7 CHANNELMATIC, INC.
52100060B8 CORELIS INC.
52110060B9 NITSUKO CORPORATION
52120060BA SAHARA NETWORKS, INC.
52130060BB CABLETRON - NETLINK, INC.
52140060BC KeunYoung Electronics & Communication Co., Ltd.
52150060BD HUBBELL-PULSECOM
52160060BE WEBTRONICS
52170060BF MACRAIGOR SYSTEMS, INC.
52180060C0 NERA AS
52190060C1 WaveSpan Corporation
52200060C2 MPL AG
52210060C3 NETVISION CORPORATION
52220060C4 SOLITON SYSTEMS K.K.
52230060C5 ANCOT CORP.
52240060C6 DCS AG
52250060C7 AMATI COMMUNICATIONS CORP.
52260060C8 KUKA WELDING SYSTEMS & ROBOTS
52270060C9 ControlNet, Inc.
52280060CA HARMONIC SYSTEMS INCORPORATED
52290060CB HITACHI ZOSEN CORPORATION
52300060CC EMTRAK, INCORPORATED
52310060CD VideoServer, Inc.
52320060CE ACCLAIM COMMUNICATIONS
52330060CF ALTEON NETWORKS, INC.
52340060D0 SNMP RESEARCH INCORPORATED
52350060D1 CASCADE COMMUNICATIONS
52360060D2 LUCENT TECHNOLOGIES TAIWAN TELECOMMUNICATIONS CO., LTD.
52370060D3 AT&T
52380060D4 ELDAT COMMUNICATION LTD.
52390060D5 MIYACHI TECHNOS CORP.
52400060D6 NovAtel Wireless Technologies Ltd.
52410060D7 ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (EPFL)
52420060D8 ELMIC SYSTEMS, INC.
52430060D9 TRANSYS NETWORKS INC.
52440060DA JBM ELECTRONICS CO.
52450060DB NTP ELEKTRONIK A/S
52460060DC TOYO COMMUNICATION EQUIPMENT Co., Ltd.
52470060DD MYRICOM, INC.
52480060DE KAYSER-THREDE GmbH
52490060DF CNT Corporation
52500060E0 AXIOM TECHNOLOGY CO., LTD.
52510060E1 ORCKIT COMMUNICATIONS LTD.
52520060E2 QUEST ENGINEERING & DEVELOPMENT
52530060E3 ARBIN INSTRUMENTS
52540060E4 COMPUSERVE, INC.
52550060E5 FUJI AUTOMATION CO., LTD.
52560060E6 SHOMITI SYSTEMS INCORPORATED
52570060E7 RANDATA
52580060E8 HITACHI COMPUTER PRODUCTS (AMERICA), INC.
52590060E9 ATOP TECHNOLOGIES, INC.
52600060EA StreamLogic
52610060EB FOURTHTRACK SYSTEMS
52620060EC HERMARY OPTO ELECTRONICS INC.
52630060ED RICARDO TEST AUTOMATION LTD.
52640060EE APOLLO
52650060EF FLYTECH TECHNOLOGY CO., LTD.
52660060F0 JOHNSON & JOHNSON MEDICAL, INC
52670060F1 EXP COMPUTER, INC.
52680060F2 LASERGRAPHICS, INC.
52690060F3 Performance Analysis Broadband, Spirent plc
52700060F4 ADVANCED COMPUTER SOLUTIONS, Inc.
52710060F5 ICON WEST, INC.
52720060F6 NEXTEST COMMUNICATIONS PRODUCTS, INC.
52730060F7 DATAFUSION SYSTEMS
52740060F8 Loran International Technologies Inc.
52750060F9 DIAMOND LANE COMMUNICATIONS
52760060FA EDUCATIONAL TECHNOLOGY RESOURCES, INC.
52770060FB PACKETEER, INC.
52780060FC CONSERVATION THROUGH INNOVATION LTD.
52790060FD NetICs, Inc.
52800060FE LYNX SYSTEM DEVELOPERS, INC.
52810060FF QuVis, Inc.
52820070B0 M/A-COM INC. COMPANIES
52830070B3 DATA RECALL LTD.
5284008000 MULTITECH SYSTEMS, INC.
5285008001 PERIPHONICS CORPORATION
5286008002 SATELCOM (UK) LTD
5287008003 HYTEC ELECTRONICS LTD.
5288008004 ANTLOW COMMUNICATIONS, LTD.
5289008005 CACTUS COMPUTER INC.
5290008006 COMPUADD CORPORATION
5291008007 DLOG NC-SYSTEME
5292008008 DYNATECH COMPUTER SYSTEMS
5293008009 JUPITER SYSTEMS, INC.
529400800A JAPAN COMPUTER CORP.
529500800B CSK CORPORATION
529600800C VIDECOM LIMITED
529700800D VOSSWINKEL F.U.
529800800E ATLANTIX CORPORATION
529900800F STANDARD MICROSYSTEMS
5300008010 COMMODORE INTERNATIONAL
5301008011 DIGITAL SYSTEMS INT'L. INC.
5302008012 INTEGRATED MEASUREMENT SYSTEMS
5303008013 THOMAS-CONRAD CORPORATION
5304008014 ESPRIT SYSTEMS
5305008015 SEIKO SYSTEMS, INC.
5306008016 WANDEL AND GOLTERMANN
5307008017 PFU LIMITED
5308008018 KOBE STEEL, LTD.
5309008019 DAYNA COMMUNICATIONS, INC.
531000801A BELL ATLANTIC
531100801B KODIAK TECHNOLOGY
531200801C NEWPORT SYSTEMS SOLUTIONS
531300801D INTEGRATED INFERENCE MACHINES
531400801E XINETRON, INC.
531500801F KRUPP ATLAS ELECTRONIK GMBH
5316008020 NETWORK PRODUCTS
5317008021 Alcatel Canada Inc.
5318008022 SCAN-OPTICS
5319008023 INTEGRATED BUSINESS NETWORKS
5320008024 KALPANA, INC.
5321008025 STOLLMANN GMBH
5322008026 NETWORK PRODUCTS CORPORATION
5323008027 ADAPTIVE SYSTEMS, INC.
5324008028 TRADPOST (HK) LTD
5325008029 EAGLE TECHNOLOGY, INC.
532600802A TEST SYSTEMS & SIMULATIONS INC
532700802B INTEGRATED MARKETING CO
532800802C THE SAGE GROUP PLC
532900802D XYLOGICS INC
533000802E CASTLE ROCK COMPUTING
533100802F NATIONAL INSTRUMENTS CORP.
5332008030 NEXUS ELECTRONICS
5333008031 BASYS, CORP.
5334008032 ACCESS CO., LTD.
5335008033 FORMATION, INC.
5336008034 SMT GOUPIL
5337008035 TECHNOLOGY WORKS, INC.
5338008036 REFLEX MANUFACTURING SYSTEMS
5339008037 Ericsson Group
5340008038 DATA RESEARCH & APPLICATIONS
5341008039 ALCATEL STC AUSTRALIA
534200803A VARITYPER, INC.
534300803B APT COMMUNICATIONS, INC.
534400803C TVS ELECTRONICS LTD
534500803D SURIGIKEN CO., LTD.
534600803E SYNERNETICS
534700803F TATUNG COMPANY
5348008040 JOHN FLUKE MANUFACTURING CO.
5349008041 VEB KOMBINAT ROBOTRON
5350008042 FORCE COMPUTERS
5351008043 NETWORLD, INC.
5352008044 SYSTECH COMPUTER CORP.
5353008045 MATSUSHITA ELECTRIC IND. CO
5354008046 UNIVERSITY OF TORONTO
5355008047 IN-NET CORP.
5356008048 COMPEX INCORPORATED
5357008049 NISSIN ELECTRIC CO., LTD.
535800804A PRO-LOG
535900804B EAGLE TECHNOLOGIES PTY.LTD.
536000804C CONTEC CO., LTD.
536100804D CYCLONE MICROSYSTEMS, INC.
536200804E APEX COMPUTER COMPANY
536300804F DAIKIN INDUSTRIES, LTD.
5364008050 ZIATECH CORPORATION
5365008051 FIBERMUX
5366008052 TECHNICALLY ELITE CONCEPTS
5367008053 INTELLICOM, INC.
5368008054 FRONTIER TECHNOLOGIES CORP.
5369008055 FERMILAB
5370008056 SPHINX ELEKTRONIK GMBH
5371008057 ADSOFT, LTD.
5372008058 PRINTER SYSTEMS CORPORATION
5373008059 STANLEY ELECTRIC CO., LTD
537400805A TULIP COMPUTERS INTERNAT'L B.V
537500805B CONDOR SYSTEMS, INC.
537600805C AGILIS CORPORATION
537700805D CANSTAR
537800805E LSI LOGIC CORPORATION
537900805F COMPAQ COMPUTER CORPORATION
5380008060 NETWORK INTERFACE CORPORATION
5381008061 LITTON SYSTEMS, INC.
5382008062 INTERFACE CO.
5383008063 RICHARD HIRSCHMANN GMBH & CO.
5384008064 WYSE TECHNOLOGY
5385008065 CYBERGRAPHIC SYSTEMS PTY LTD.
5386008066 ARCOM CONTROL SYSTEMS, LTD.
5387008067 SQUARE D COMPANY
5388008068 YAMATECH SCIENTIFIC LTD.
5389008069 COMPUTONE SYSTEMS
539000806A ERI (EMPAC RESEARCH INC.)
539100806B SCHMID TELECOMMUNICATION
539200806C CEGELEC PROJECTS LTD
539300806D CENTURY SYSTEMS CORP.
539400806E NIPPON STEEL CORPORATION
539500806F ONELAN LTD.
5396008070 COMPUTADORAS MICRON
5397008071 SAI TECHNOLOGY
5398008072 MICROPLEX SYSTEMS LTD.
5399008073 DWB ASSOCIATES
5400008074 FISHER CONTROLS
5401008075 PARSYTEC GMBH
5402008076 MCNC
5403008077 BROTHER INDUSTRIES, LTD.
5404008078 PRACTICAL PERIPHERALS, INC.
5405008079 MICROBUS DESIGNS LTD.
540600807A AITECH SYSTEMS LTD.
540700807B ARTEL COMMUNICATIONS CORP.
540800807C FIBERCOM, INC.
540900807D EQUINOX SYSTEMS INC.
541000807E SOUTHERN PACIFIC LTD.
541100807F DY-4 INCORPORATED
5412008080 DATAMEDIA CORPORATION
5413008081 KENDALL SQUARE RESEARCH CORP.
5414008082 PEP MODULAR COMPUTERS GMBH
5415008083 AMDAHL
5416008084 THE CLOUD INC.
5417008085 H-THREE SYSTEMS CORPORATION
5418008086 COMPUTER GENERATION INC.
5419008087 OKI ELECTRIC INDUSTRY CO., LTD
5420008088 VICTOR COMPANY OF JAPAN, LTD.
5421008089 TECNETICS (PTY) LTD.
542200808A SUMMIT MICROSYSTEMS CORP.
542300808B DACOLL LIMITED
542400808C NetScout Systems, Inc.
542500808D WESTCOAST TECHNOLOGY B.V.
542600808E RADSTONE TECHNOLOGY
542700808F C. ITOH ELECTRONICS, INC.
5428008090 MICROTEK INTERNATIONAL, INC.
5429008091 TOKYO ELECTRIC CO.,LTD
5430008092 JAPAN COMPUTER INDUSTRY, INC.
5431008093 XYRON CORPORATION
5432008094 ALFA LAVAL AUTOMATION AB
5433008095 BASIC MERTON HANDELSGES.M.B.H.
5434008096 HUMAN DESIGNED SYSTEMS, INC.
5435008097 CENTRALP AUTOMATISMES
5436008098 TDK CORPORATION
5437008099 KLOCKNER MOELLER IPC
543800809A NOVUS NETWORKS LTD
543900809B JUSTSYSTEM CORPORATION
544000809C LUXCOM, INC.
544100809D Commscraft Ltd.
544200809E DATUS GMBH
544300809F ALCATEL BUSINESS SYSTEMS
54440080A0 EDISA HEWLETT PACKARD S/A
54450080A1 MICROTEST, INC.
54460080A2 CREATIVE ELECTRONIC SYSTEMS
54470080A3 LANTRONIX
54480080A4 LIBERTY ELECTRONICS
54490080A5 SPEED INTERNATIONAL
54500080A6 REPUBLIC TECHNOLOGY, INC.
54510080A7 MEASUREX CORP.
54520080A8 VITACOM CORPORATION
54530080A9 CLEARPOINT RESEARCH
54540080AA MAXPEED
54550080AB DUKANE NETWORK INTEGRATION
54560080AC IMLOGIX, DIVISION OF GENESYS
54570080AD CNET TECHNOLOGY, INC.
54580080AE HUGHES NETWORK SYSTEMS
54590080AF ALLUMER CO., LTD.
54600080B0 ADVANCED INFORMATION
54610080B1 SOFTCOM A/S
54620080B2 NETWORK EQUIPMENT TECHNOLOGIES
54630080B3 AVAL DATA CORPORATION
54640080B4 SOPHIA SYSTEMS
54650080B5 UNITED NETWORKS INC.
54660080B6 THEMIS COMPUTER
54670080B7 STELLAR COMPUTER
54680080B8 BUG, INCORPORATED
54690080B9 ARCHE TECHNOLIGIES INC.
54700080BA SPECIALIX (ASIA) PTE, LTD
54710080BB HUGHES LAN SYSTEMS
54720080BC HITACHI ENGINEERING CO., LTD
54730080BD THE FURUKAWA ELECTRIC CO., LTD
54740080BE ARIES RESEARCH
54750080BF TAKAOKA ELECTRIC MFG. CO. LTD.
54760080C0 PENRIL DATACOMM
54770080C1 LANEX CORPORATION
54780080C2 IEEE 802.1 COMMITTEE
54790080C3 BICC INFORMATION SYSTEMS & SVC
54800080C4 DOCUMENT TECHNOLOGIES, INC.
54810080C5 NOVELLCO DE MEXICO
54820080C6 NATIONAL DATACOMM CORPORATION
54830080C7 XIRCOM
54840080C8 D-LINK SYSTEMS, INC.
54850080C9 ALBERTA MICROELECTRONIC CENTRE
54860080CA NETCOM RESEARCH INCORPORATED
54870080CB FALCO DATA PRODUCTS
54880080CC MICROWAVE BYPASS SYSTEMS
54890080CD MICRONICS COMPUTER, INC.
54900080CE BROADCAST TELEVISION SYSTEMS
54910080CF EMBEDDED PERFORMANCE INC.
54920080D0 COMPUTER PERIPHERALS, INC.
54930080D1 KIMTRON CORPORATION
54940080D2 SHINNIHONDENKO CO., LTD.
54950080D3 SHIVA CORP.
54960080D4 CHASE RESEARCH LTD.
54970080D5 CADRE TECHNOLOGIES
54980080D6 NUVOTECH, INC.
54990080D7 Fantum Engineering
55000080D8 NETWORK PERIPHERALS INC.
55010080D9 EMK ELEKTRONIK
55020080DA BRUEL & KJAER
55030080DB GRAPHON CORPORATION
55040080DC PICKER INTERNATIONAL
55050080DD GMX INC/GIMIX
55060080DE GIPSI S.A.
55070080DF ADC CODENOLL TECHNOLOGY CORP.
55080080E0 XTP SYSTEMS, INC.
55090080E1 STMICROELECTRONICS
55100080E2 T.D.I. CO., LTD.
55110080E3 CORAL NETWORK CORPORATION
55120080E4 NORTHWEST DIGITAL SYSTEMS, INC
55130080E5 MYLEX CORPORATION
55140080E6 PEER NETWORKS, INC.
55150080E7 LYNWOOD SCIENTIFIC DEV. LTD.
55160080E8 CUMULUS CORPORATIION
55170080E9 Madge Ltd.
55180080EA ADVA Optical Networking Ltd.
55190080EB COMPCONTROL B.V.
55200080EC SUPERCOMPUTING SOLUTIONS, INC.
55210080ED IQ TECHNOLOGIES, INC.
55220080EE THOMSON CSF
55230080EF RATIONAL
55240080F0 Panasonic Communications Co., Ltd.
55250080F1 OPUS SYSTEMS
55260080F2 RAYCOM SYSTEMS INC
55270080F3 SUN ELECTRONICS CORP.
55280080F4 TELEMECANIQUE ELECTRIQUE
55290080F5 QUANTEL LTD
55300080F6 SYNERGY MICROSYSTEMS
55310080F7 ZENITH ELECTRONICS
55320080F8 MIZAR, INC.
55330080F9 HEURIKON CORPORATION
55340080FA RWT GMBH
55350080FB BVM LIMITED
55360080FC AVATAR CORPORATION
55370080FD EXSCEED CORPRATION
55380080FE AZURE TECHNOLOGIES, INC.
55390080FF SOC. DE TELEINFORMATIQUE RTC
5540009000 DIAMOND MULTIMEDIA
5541009001 NISHIMU ELECTRONICS INDUSTRIES CO., LTD.
5542009002 ALLGON AB
5543009003 APLIO
5544009004 3COM EUROPE LTD.
5545009005 PROTECH SYSTEMS CO., LTD.
5546009006 HAMAMATSU PHOTONICS K.K.
5547009007 DOMEX TECHNOLOGY CORP.
5548009008 HanA Systems Inc.
5549009009 i Controls, Inc.
555000900A PROTON ELECTRONIC INDUSTRIAL CO., LTD.
555100900B LANNER ELECTRONICS, INC.
555200900C CISCO SYSTEMS, INC.
555300900D OVERLAND DATA INC.
555400900E HANDLINK TECHNOLOGIES, INC.
555500900F KAWASAKI HEAVY INDUSTRIES, LTD
5556009010 SIMULATION LABORATORIES, INC.
5557009011 WAVTrace, Inc.
5558009012 GLOBESPAN SEMICONDUCTOR, INC.
5559009013 SAMSAN CORP.
5560009014 ROTORK INSTRUMENTS, LTD.
5561009015 CENTIGRAM COMMUNICATIONS CORP.
5562009016 ZAC
5563009017 ZYPCOM, INC.
5564009018 ITO ELECTRIC INDUSTRY CO, LTD.
5565009019 HERMES ELECTRONICS CO., LTD.
556600901A UNISPHERE SOLUTIONS
556700901B DIGITAL CONTROLS
556800901C mps Software Gmbh
556900901D PEC (NZ) LTD.
557000901E SELESTA INGEGNE RIA S.P.A.
557100901F ADTEC PRODUCTIONS, INC.
5572009020 PHILIPS ANALYTICAL X-RAY B.V.
5573009021 CISCO SYSTEMS, INC.
5574009022 IVEX
5575009023 ZILOG INC.
5576009024 PIPELINKS, INC.
5577009025 VISION SYSTEMS LTD. PTY
5578009026 ADVANCED SWITCHING COMMUNICATIONS, INC.
5579009027 INTEL CORPORATION
5580009028 NIPPON SIGNAL CO., LTD.
5581009029 CRYPTO AG
558200902A COMMUNICATION DEVICES, INC.
558300902B CISCO SYSTEMS, INC.
558400902C DATA & CONTROL EQUIPMENT LTD.
558500902D DATA ELECTRONICS (AUST.) PTY, LTD.
558600902E NAMCO LIMITED
558700902F NETCORE SYSTEMS, INC.
5588009030 HONEYWELL-DATING
5589009031 MYSTICOM, LTD.
5590009032 PELCOMBE GROUP LTD.
5591009033 INNOVAPHONE GmbH
5592009034 IMAGIC, INC.
5593009035 ALPHA TELECOM, INC.
5594009036 ens, inc.
5595009037 ACUCOMM, INC.
5596009038 FOUNTAIN TECHNOLOGIES, INC.
5597009039 SHASTA NETWORKS
559800903A NIHON MEDIA TOOL INC.
559900903B TriEMS Research Lab, Inc.
560000903C ATLANTIC NETWORK SYSTEMS
560100903D BIOPAC SYSTEMS, INC.
560200903E N.V. PHILIPS INDUSTRIAL ACTIVITIES
560300903F AZTEC RADIOMEDIA
5604009040 Siemens Network Convergence LLC
5605009041 APPLIED DIGITAL ACCESS
5606009042 ECCS, Inc.
5607009043 NICHIBEI DENSHI CO., LTD.
5608009044 ASSURED DIGITAL, INC.
5609009045 Marconi Communications
5610009046 DEXDYNE, LTD.
5611009047 GIGA FAST E. LTD.
5612009048 ZEAL CORPORATION
5613009049 ENTRIDIA CORPORATION
561400904A CONCUR SYSTEM TECHNOLOGIES
561500904B GemTek Technology Co., Ltd.
561600904C EPIGRAM, INC.
561700904D SPEC S.A.
561800904E DELEM BV
561900904F ABB POWER T&D COMPANY, INC.
5620009050 TELESTE OY
5621009051 ULTIMATE TECHNOLOGY CORP.
5622009052 SELCOM ELETTRONICA S.R.L.
5623009053 DAEWOO ELECTRONICS CO., LTD.
5624009054 INNOVATIVE SEMICONDUCTORS, INC
5625009055 PARKER HANNIFIN CORPORATION COMPUMOTOR DIVISION
5626009056 TELESTREAM, INC.
5627009057 AANetcom, Inc.
5628009058 Ultra Electronics Ltd., Command and Control Systems
5629009059 TELECOM DEVICE K.K.
563000905A DEARBORN GROUP, INC.
563100905B RAYMOND AND LAE ENGINEERING
563200905C EDMI
563300905D NETCOM SICHERHEITSTECHNIK GmbH
563400905E RAULAND-BORG CORPORATION
563500905F CISCO SYSTEMS, INC.
5636009060 SYSTEM CREATE CORP.
5637009061 PACIFIC RESEARCH & ENGINEERING CORPORATION
5638009062 ICP VORTEX COMPUTERSYSTEME GmbH
5639009063 COHERENT COMMUNICATIONS SYSTEMS CORPORATION
5640009064 THOMSON BROADCAST SYSTEMS
5641009065 FINISAR CORPORATION
5642009066 Troika Networks, Inc.
5643009067 WalkAbout Computers, Inc.
5644009068 DVT CORP.
5645009069 JUNIPER NETWORKS, INC.
564600906A TURNSTONE SYSTEMS, INC.
564700906B APPLIED RESOURCES, INC.
564800906C GWT GLOBAL WEIGHING TECHNOLOGIES GmbH
564900906D CISCO SYSTEMS, INC.
565000906E PRAXON, INC.
565100906F CISCO SYSTEMS, INC.
5652009070 NEO NETWORKS, INC.
5653009071 Applied Innovation Inc.
5654009072 SIMRAD AS
5655009073 GAIO TECHNOLOGY
5656009074 ARGON NETWORKS, INC.
5657009075 NEC DO BRASIL S.A.
5658009076 FMT AIRCRAFT GATE SUPPORT SYSTEMS AB
5659009077 ADVANCED FIBRE COMMUNICATIONS
5660009078 MER TELEMANAGEMENT SOLUTIONS, LTD.
5661009079 ClearOne, Inc.
566200907A SPECTRALINK CORP.
566300907B E-TECH, INC.
566400907C DIGITALCAST, INC.
566500907D Lake Communications
566600907E VETRONIX CORP.
566700907F WatchGuard Technologies, Inc.
5668009080 NOT LIMITED, INC.
5669009081 ALOHA NETWORKS, INC.
5670009082 FORCE INSTITUTE
5671009083 TURBO COMMUNICATION, INC.
5672009084 ATECH SYSTEM
5673009085 GOLDEN ENTERPRISES, INC.
5674009086 CISCO SYSTEMS, INC.
5675009087 ITIS
5676009088 BAXALL SECURITY LTD.
5677009089 SOFTCOM MICROSYSTEMS, INC.
567800908A BAYLY COMMUNICATIONS, INC.
567900908B CELL COMPUTING, INC.
568000908C ETREND ELECTRONICS, INC.
568100908D VICKERS ELECTRONICS SYSTEMS
568200908E Nortel Networks Broadband Access
568300908F AUDIO CODES LTD.
5684009090 I-BUS
5685009091 DigitalScape, Inc.
5686009092 CISCO SYSTEMS, INC.
5687009093 NANAO CORPORATION
5688009094 OSPREY TECHNOLOGIES, INC.
5689009095 UNIVERSAL AVIONICS
5690009096 ASKEY COMPUTER CORP.
5691009097 SYCAMORE NETWORKS
5692009098 SBC DESIGNS, INC.
5693009099 ALLIED TELESIS, K.K.
569400909A ONE WORLD SYSTEMS, INC.
569500909B MARKPOINT AB
569600909C Terayon Communications Systems
569700909D GSE SYSTEMS, INC.
569800909E Critical IO, LLC
569900909F DIGI-DATA CORPORATION
57000090A0 8X8 INC.
57010090A1 FLYING PIG SYSTEMS, LTD.
57020090A2 CYBERTAN TECHNOLOGY, INC.
57030090A3 Corecess Inc.
57040090A4 ALTIGA NETWORKS
57050090A5 SPECTRA LOGIC
57060090A6 CISCO SYSTEMS, INC.
57070090A7 CLIENTEC CORPORATION
57080090A8 NineTiles Networks, Ltd.
57090090A9 WESTERN DIGITAL
57100090AA INDIGO ACTIVE VISION SYSTEMS LIMITED
57110090AB CISCO SYSTEMS, INC.
57120090AC OPTIVISION, INC.
57130090AD ASPECT ELECTRONICS, INC.
57140090AE ITALTEL S.p.A.
57150090AF J. MORITA MFG. CORP.
57160090B0 VADEM
57170090B1 CISCO SYSTEMS, INC.
57180090B2 AVICI SYSTEMS INC.
57190090B3 AGRANAT SYSTEMS
57200090B4 WILLOWBROOK TECHNOLOGIES
57210090B5 NIKON CORPORATION
57220090B6 FIBEX SYSTEMS
57230090B7 DIGITAL LIGHTWAVE, INC.
57240090B8 ROHDE & SCHWARZ GMBH & CO. KG
57250090B9 BERAN INSTRUMENTS LTD.
57260090BA VALID NETWORKS, INC.
57270090BB TAINET COMMUNICATION SYSTEM Corp.
57280090BC TELEMANN CO., LTD.
57290090BD OMNIA COMMUNICATIONS, INC.
57300090BE IBC/INTEGRATED BUSINESS COMPUTERS
57310090BF CISCO SYSTEMS, INC.
57320090C0 K.J. LAW ENGINEERS, INC.
57330090C1 Peco II, Inc.
57340090C2 JK microsystems, Inc.
57350090C3 TOPIC SEMICONDUCTOR CORP.
57360090C4 JAVELIN SYSTEMS, INC.
57370090C5 INTERNET MAGIC, INC.
57380090C6 OPTIM SYSTEMS, INC.
57390090C7 ICOM INC.
57400090C8 WAVERIDER COMMUNICATIONS (CANADA) INC.
57410090C9 DPAC Technologies
57420090CA ACCORD VIDEO TELECOMMUNICATIONS, LTD.
57430090CB Wireless OnLine, Inc.
57440090CC PLANET COMMUNICATIONS, INC.
57450090CD ENT-EMPRESA NACIONAL DE TELECOMMUNICACOES, S.A.
57460090CE TETRA GmbH
57470090CF NORTEL
57480090D0 Thomson Belgium
57490090D1 LEICHU ENTERPRISE CO., LTD.
57500090D2 ARTEL VIDEO SYSTEMS
57510090D3 GIESECKE & DEVRIENT GmbH
57520090D4 BindView Development Corp.
57530090D5 EUPHONIX, INC.
57540090D6 CRYSTAL GROUP
57550090D7 NetBoost Corp.
57560090D8 WHITECROSS SYSTEMS
57570090D9 CISCO SYSTEMS, INC.
57580090DA DYNARC, INC.
57590090DB NEXT LEVEL COMMUNICATIONS
57600090DC TECO INFORMATION SYSTEMS
57610090DD THE MIHARU COMMUNICATIONS CO., LTD.
57620090DE CARDKEY SYSTEMS, INC.
57630090DF MITSUBISHI CHEMICAL AMERICA, INC.
57640090E0 SYSTRAN CORP.
57650090E1 TELENA S.P.A.
57660090E2 DISTRIBUTED PROCESSING TECHNOLOGY
57670090E3 AVEX ELECTRONICS INC.
57680090E4 NEC AMERICA, INC.
57690090E5 TEKNEMA, INC.
57700090E6 ACER LABORATORIES, INC.
57710090E7 HORSCH ELEKTRONIK AG
57720090E8 MOXA TECHNOLOGIES CORP., LTD.
57730090E9 JANZ COMPUTER AG
57740090EA ALPHA TECHNOLOGIES, INC.
57750090EB SENTRY TELECOM SYSTEMS
57760090EC PYRESCOM
57770090ED CENTRAL SYSTEM RESEARCH CO., LTD.
57780090EE PERSONAL COMMUNICATIONS TECHNOLOGIES
57790090EF INTEGRIX, INC.
57800090F0 HARMONIC LIGHTWAVES, LTD.
57810090F1 DOT HILL SYSTEMS CORPORATION
57820090F2 CISCO SYSTEMS, INC.
57830090F3 ASPECT COMMUNICATIONS
57840090F4 LIGHTNING INSTRUMENTATION
57850090F5 CLEVO CO.
57860090F6 ESCALATE NETWORKS, INC.
57870090F7 NBASE COMMUNICATIONS LTD.
57880090F8 MEDIATRIX TELECOM
57890090F9 LEITCH
57900090FA GigaNet, Inc.
57910090FB PORTWELL, INC.
57920090FC NETWORK COMPUTING DEVICES
57930090FD CopperCom, Inc.
57940090FE ELECOM CO., LTD. (LANEED DIV.)
57950090FF TELLUS TECHNOLOGY INC.
57960091D6 Crystal Group, Inc.
5797009D8E CARDIAC RECORDERS, INC.
579800A000 CENTILLION NETWORKS, INC.
579900A001 WATKINS-JOHNSON COMPANY
580000A002 LEEDS & NORTHRUP AUSTRALIA PTY LTD
580100A003 STAEFA CONTROL SYSTEM
580200A004 NETPOWER, INC.
580300A005 DANIEL INSTRUMENTS, LTD.
580400A006 IMAGE DATA PROCESSING SYSTEM GROUP
580500A007 APEXX TECHNOLOGY, INC.
580600A008 NETCORP
580700A009 WHITETREE NETWORK
580800A00A R.D.C. COMMUNICATION
580900A00B COMPUTEX CO., LTD.
581000A00C KINGMAX TECHNOLOGY, INC.
581100A00D THE PANDA PROJECT
581200A00E VISUAL NETWORKS, INC.
581300A00F Broadband Technologies
581400A010 SYSLOGIC DATENTECHNIK AG
581500A011 MUTOH INDUSTRIES LTD.
581600A012 B.A.T.M. ADVANCED TECHNOLOGIES
581700A013 TELTREND LTD.
581800A014 CSIR
581900A015 WYLE
582000A016 MICROPOLIS CORP.
582100A017 J B M CORPORATION
582200A018 CREATIVE CONTROLLERS, INC.
582300A019 NEBULA CONSULTANTS, INC.
582400A01A BINAR ELEKTRONIK AB
582500A01B PREMISYS COMMUNICATIONS, INC.
582600A01C NASCENT NETWORKS CORPORATION
582700A01D SIXNET
582800A01E EST CORPORATION
582900A01F TRICORD SYSTEMS, INC.
583000A020 CITICORP/TTI
583100A021 GENERAL DYNAMICS-
583200A022 CENTRE FOR DEVELOPMENT OF ADVANCED COMPUTING
583300A023 APPLIED CREATIVE TECHNOLOGY, INC.
583400A024 3COM CORPORATION
583500A025 REDCOM LABS INC.
583600A026 TELDAT, S.A.
583700A027 FIREPOWER SYSTEMS, INC.
583800A028 CONNER PERIPHERALS
583900A029 COULTER CORPORATION
584000A02A TRANCELL SYSTEMS
584100A02B TRANSITIONS RESEARCH CORP.
584200A02C interWAVE Communications
584300A02D 1394 Trade Association
584400A02E BRAND COMMUNICATIONS, LTD.
584500A02F PIRELLI CAVI
584600A030 CAPTOR NV/SA
584700A031 HAZELTINE CORPORATION, MS 1-17
584800A032 GES SINGAPORE PTE. LTD.
584900A033 imc MeBsysteme GmbH
585000A034 AXEL
585100A035 CYLINK CORPORATION
585200A036 APPLIED NETWORK TECHNOLOGY
585300A037 DATASCOPE CORPORATION
585400A038 EMAIL ELECTRONICS
585500A039 ROSS TECHNOLOGY, INC.
585600A03A KUBOTEK CORPORATION
585700A03B TOSHIN ELECTRIC CO., LTD.
585800A03C EG&G NUCLEAR INSTRUMENTS
585900A03D OPTO-22
586000A03E ATM FORUM
586100A03F COMPUTER SOCIETY MICROPROCESSOR & MICROPROCESSOR STANDARDS C
586200A040 APPLE COMPUTER
586300A041 LEYBOLD-INFICON
586400A042 SPUR PRODUCTS CORP.
586500A043 AMERICAN TECHNOLOGY LABS, INC.
586600A044 NTT IT CO., LTD.
586700A045 PHOENIX CONTACT GMBH & CO.
586800A046 SCITEX CORP. LTD.
586900A047 INTEGRATED FITNESS CORP.
587000A048 QUESTECH, LTD.
587100A049 DIGITECH INDUSTRIES, INC.
587200A04A NISSHIN ELECTRIC CO., LTD.
587300A04B TFL LAN INC.
587400A04C INNOVATIVE SYSTEMS & TECHNOLOGIES, INC.
587500A04D EDA INSTRUMENTS, INC.
587600A04E VOELKER TECHNOLOGIES, INC.
587700A04F AMERITEC CORP.
587800A050 CYPRESS SEMICONDUCTOR
587900A051 ANGIA COMMUNICATIONS. INC.
588000A052 STANILITE ELECTRONICS PTY. LTD
588100A053 COMPACT DEVICES, INC.
588200A055 Data Device Corporation
588300A056 MICROPROSS
588400A057 LANCOM Systems GmbH
588500A058 GLORY, LTD.
588600A059 HAMILTON HALLMARK
588700A05A KOFAX IMAGE PRODUCTS
588800A05B MARQUIP, INC.
588900A05C INVENTORY CONVERSION, INC./
589000A05D CS COMPUTER SYSTEME GmbH
589100A05E MYRIAD LOGIC INC.
589200A05F BTG ENGINEERING BV
589300A060 ACER PERIPHERALS, INC.
589400A061 PURITAN BENNETT
589500A062 AES PRODATA
589600A063 JRL SYSTEMS, INC.
589700A064 KVB/ANALECT
589800A065 NEXLAND, INC.
589900A066 ISA CO., LTD.
590000A067 NETWORK SERVICES GROUP
590100A068 BHP LIMITED
590200A069 Symmetricom, Inc.
590300A06A Verilink Corporation
590400A06B DMS DORSCH MIKROSYSTEM GMBH
590500A06C SHINDENGEN ELECTRIC MFG. CO., LTD.
590600A06D MANNESMANN TALLY CORPORATION
590700A06E AUSTRON, INC.
590800A06F THE APPCON GROUP, INC.
590900A070 COASTCOM
591000A071 VIDEO LOTTERY TECHNOLOGIES,INC
591100A072 OVATION SYSTEMS LTD.
591200A073 COM21, INC.
591300A074 PERCEPTION TECHNOLOGY
591400A075 MICRON TECHNOLOGY, INC.
591500A076 CARDWARE LAB, INC.
591600A077 FUJITSU NEXION, INC.
591700A078 Marconi Communications
591800A079 ALPS ELECTRIC (USA), INC.
591900A07A ADVANCED PERIPHERALS TECHNOLOGIES, INC.
592000A07B DAWN COMPUTER INCORPORATION
592100A07C TONYANG NYLON CO., LTD.
592200A07D SEEQ TECHNOLOGY, INC.
592300A07E AVID TECHNOLOGY, INC.
592400A07F GSM-SYNTEL, LTD.
592500A080 ANTARES MICROSYSTEMS
592600A081 ALCATEL DATA NETWORKS
592700A082 NKT ELEKTRONIK A/S
592800A083 ASIMMPHONY TURKEY
592900A084 DATAPLEX PTY. LTD.
593000A086 AMBER WAVE SYSTEMS, INC.
593100A087 Zarlink Semiconductor Ltd.
593200A088 ESSENTIAL COMMUNICATIONS
593300A089 XPOINT TECHNOLOGIES, INC.
593400A08A BROOKTROUT TECHNOLOGY, INC.
593500A08B ASTON ELECTRONIC DESIGNS LTD.
593600A08C MultiMedia LANs, Inc.
593700A08D JACOMO CORPORATION
593800A08E Nokia Internet Communications
593900A08F DESKNET SYSTEMS, INC.
594000A090 TimeStep Corporation
594100A091 APPLICOM INTERNATIONAL
594200A092 H. BOLLMANN MANUFACTURERS, LTD
594300A093 B/E AEROSPACE, Inc.
594400A094 COMSAT CORPORATION
594500A095 ACACIA NETWORKS, INC.
594600A096 MITUMI ELECTRIC CO., LTD.
594700A097 JC INFORMATION SYSTEMS
594800A098 NETWORK APPLIANCE CORP.
594900A099 K-NET LTD.
595000A09A NIHON KOHDEN AMERICA
595100A09B QPSX COMMUNICATIONS, LTD.
595200A09C Xyplex, Inc.
595300A09D JOHNATHON FREEMAN TECHNOLOGIES
595400A09E ICTV
595500A09F COMMVISION CORP.
595600A0A0 COMPACT DATA, LTD.
595700A0A1 EPIC DATA INC.
595800A0A2 DIGICOM S.P.A.
595900A0A3 RELIABLE POWER METERS
596000A0A4 MICROS SYSTEMS, INC.
596100A0A5 TEKNOR MICROSYSTEME, INC.
596200A0A6 M.I. SYSTEMS, K.K.
596300A0A7 VORAX CORPORATION
596400A0A8 RENEX CORPORATION
596500A0A9 GN NETTEST (CANADA) NAVTEL DIVISION
596600A0AA SPACELABS MEDICAL
596700A0AB NETCS INFORMATIONSTECHNIK GMBH
596800A0AC GILAT SATELLITE NETWORKS, LTD.
596900A0AD MARCONI SPA
597000A0AE NUCOM SYSTEMS, INC.
597100A0AF WMS INDUSTRIES
597200A0B0 I-O DATA DEVICE, INC.
597300A0B1 FIRST VIRTUAL CORPORATION
597400A0B2 SHIMA SEIKI
597500A0B3 ZYKRONIX
597600A0B4 TEXAS MICROSYSTEMS, INC.
597700A0B5 3H TECHNOLOGY
597800A0B6 SANRITZ AUTOMATION CO., LTD.
597900A0B7 CORDANT, INC.
598000A0B8 SYMBIOS LOGIC INC.
598100A0B9 EAGLE TECHNOLOGY, INC.
598200A0BA PATTON ELECTRONICS CO.
598300A0BB HILAN GMBH
598400A0BC VIASAT, INCORPORATED
598500A0BD I-TECH CORP.
598600A0BE INTEGRATED CIRCUIT SYSTEMS, INC. COMMUNICATIONS GROUP
598700A0BF WIRELESS DATA GROUP MOTOROLA
598800A0C0 DIGITAL LINK CORP.
598900A0C1 ORTIVUS MEDICAL AB
599000A0C2 R.A. SYSTEMS CO., LTD.
599100A0C3 UNICOMPUTER GMBH
599200A0C4 CRISTIE ELECTRONICS LTD.
599300A0C5 ZYXEL COMMUNICATION
599400A0C6 QUALCOMM INCORPORATED
599500A0C7 TADIRAN TELECOMMUNICATIONS
599600A0C8 ADTRAN INC.
599700A0C9 INTEL CORPORATION - HF1-06
599800A0CA FUJITSU DENSO LTD.
599900A0CB ARK TELECOMMUNICATIONS, INC.
600000A0CC LITE-ON COMMUNICATIONS, INC.
600100A0CD DR. JOHANNES HEIDENHAIN GmbH
600200A0CE ASTROCOM CORPORATION
600300A0CF SOTAS, INC.
600400A0D0 TEN X TECHNOLOGY, INC.
600500A0D1 INVENTEC CORPORATION
600600A0D2 ALLIED TELESIS INTERNATIONAL CORPORATION
600700A0D3 INSTEM COMPUTER SYSTEMS, LTD.
600800A0D4 RADIOLAN, INC.
600900A0D5 SIERRA WIRELESS INC.
601000A0D6 SBE, INC.
601100A0D7 KASTEN CHASE APPLIED RESEARCH
601200A0D8 SPECTRA - TEK
601300A0D9 CONVEX COMPUTER CORPORATION
601400A0DA INTEGRATED SYSTEMS Technology, Inc.
601500A0DB FISHER & PAYKEL PRODUCTION
601600A0DC O.N. ELECTRONIC CO., LTD.
601700A0DD AZONIX CORPORATION
601800A0DE YAMAHA CORPORATION
601900A0DF STS TECHNOLOGIES, INC.
602000A0E0 TENNYSON TECHNOLOGIES PTY LTD
602100A0E1 WESTPORT RESEARCH ASSOCIATES, INC.
602200A0E2 KEISOKU GIKEN CORP.
602300A0E3 XKL SYSTEMS CORP.
602400A0E4 OPTIQUEST
602500A0E5 NHC COMMUNICATIONS
602600A0E6 DIALOGIC CORPORATION
602700A0E7 CENTRAL DATA CORPORATION
602800A0E8 REUTERS HOLDINGS PLC
602900A0E9 ELECTRONIC RETAILING SYSTEMS INTERNATIONAL
603000A0EA ETHERCOM CORP.
603100A0EB Encore Networks
603200A0EC TRANSMITTON LTD.
603300A0ED PRI AUTOMATION
603400A0EE NASHOBA NETWORKS
603500A0EF LUCIDATA LTD.
603600A0F0 TORONTO MICROELECTRONICS INC.
603700A0F1 MTI
603800A0F2 INFOTEK COMMUNICATIONS, INC.
603900A0F3 STAUBLI
604000A0F4 GE
604100A0F5 RADGUARD LTD.
604200A0F6 AutoGas Systems Inc.
604300A0F7 V.I COMPUTER CORP.
604400A0F8 SYMBOL TECHNOLOGIES, INC.
604500A0F9 BINTEC COMMUNICATIONS GMBH
604600A0FA Marconi Communication GmbH
604700A0FB TORAY ENGINEERING CO., LTD.
604800A0FC IMAGE SCIENCES, INC.
604900A0FD SCITEX DIGITAL PRINTING, INC.
605000A0FE BOSTON TECHNOLOGY, INC.
605100A0FF TELLABS OPERATIONS, INC.
605200AA00 INTEL CORPORATION
605300AA01 INTEL CORPORATION
605400AA02 INTEL CORPORATION
605500AA3C OLIVETTI TELECOM SPA (OLTECO)
605600B009 Grass Valley Group
605700B017 InfoGear Technology Corp.
605800B019 Casi-Rusco
605900B01C Westport Technologies
606000B01E Rantic Labs, Inc.
606100B02A ORSYS GmbH
606200B02D ViaGate Technologies, Inc.
606300B03B HiQ Networks
606400B048 Marconi Communications Inc.
606500B04A Cisco Systems, Inc.
606600B052 Intellon Corporation
606700B064 Cisco Systems, Inc.
606800B069 Honewell Oy
606900B06D Jones Futurex Inc.
607000B080 Mannesmann Ipulsys B.V.
607100B086 LocSoft Limited
607200B08E Cisco Systems, Inc.
607300B091 Transmeta Corp.
607400B094 Alaris, Inc.
607500B09A Morrow Technologies Corp.
607600B09D Point Grey Research Inc.
607700B0AC SIAE-Microelettronica S.p.A.
607800B0AE Symmetricom
607900B0B3 Xstreamis PLC
608000B0C2 Cisco Systems, Inc.
608100B0C7 Tellabs Operations, Inc.
608200B0CE TECHNOLOGY RESCUE
608300B0D0 Dell Computer Corp.
608400B0DB Nextcell, Inc.
608500B0DF Reliable Data Technology, Inc.
608600B0E7 British Federal Ltd.
608700B0EC EACEM
608800B0EE Ajile Systems, Inc.
608900B0F0 CALY NETWORKS
609000B0F5 NetWorth Technologies, Inc.
609100BB01 OCTOTHORPE CORP.
609200BBF0 UNGERMANN-BASS INC.
609300C000 LANOPTICS, LTD.
609400C001 DIATEK PATIENT MANAGMENT
609500C002 SERCOMM CORPORATION
609600C003 GLOBALNET COMMUNICATIONS
609700C004 JAPAN BUSINESS COMPUTER CO.LTD
609800C005 LIVINGSTON ENTERPRISES, INC.
609900C006 NIPPON AVIONICS CO., LTD.
610000C007 PINNACLE DATA SYSTEMS, INC.
610100C008 SECO SRL
610200C009 KT TECHNOLOGY (S) PTE LTD
610300C00A MICRO CRAFT
610400C00B NORCONTROL A.S.
610500C00C RELIA TECHNOLGIES
610600C00D ADVANCED LOGIC RESEARCH, INC.
610700C00E PSITECH, INC.
610800C00F QUANTUM SOFTWARE SYSTEMS LTD.
610900C010 HIRAKAWA HEWTECH CORP.
611000C011 INTERACTIVE COMPUTING DEVICES
611100C012 NETSPAN CORPORATION
611200C013 NETRIX
611300C014 TELEMATICS CALABASAS INT'L,INC
611400C015 NEW MEDIA CORPORATION
611500C016 ELECTRONIC THEATRE CONTROLS
611600C017 FORTE NETWORKS
611700C018 LANART CORPORATION
611800C019 LEAP TECHNOLOGY, INC.
611900C01A COROMETRICS MEDICAL SYSTEMS
612000C01B SOCKET COMMUNICATIONS, INC.
612100C01C INTERLINK COMMUNICATIONS LTD.
612200C01D GRAND JUNCTION NETWORKS, INC.
612300C01E LA FRANCAISE DES JEUX
612400C01F S.E.R.C.E.L.
612500C020 ARCO ELECTRONIC, CONTROL LTD.
612600C021 NETEXPRESS
612700C022 LASERMASTER TECHNOLOGIES, INC.
612800C023 TUTANKHAMON ELECTRONICS
612900C024 EDEN SISTEMAS DE COMPUTACAO SA
613000C025 DATAPRODUCTS CORPORATION
613100C026 LANS TECHNOLOGY CO., LTD.
613200C027 CIPHER SYSTEMS, INC.
613300C028 JASCO CORPORATION
613400C029 Nexans Deutschland AG - ANS
613500C02A OHKURA ELECTRIC CO., LTD.
613600C02B GERLOFF GESELLSCHAFT FUR
613700C02C CENTRUM COMMUNICATIONS, INC.
613800C02D FUJI PHOTO FILM CO., LTD.
613900C02E NETWIZ
614000C02F OKUMA CORPORATION
614100C030 INTEGRATED ENGINEERING B. V.
614200C031 DESIGN RESEARCH SYSTEMS, INC.
614300C032 I-CUBED LIMITED
614400C033 TELEBIT COMMUNICATIONS APS
614500C034 TRANSACTION NETWORK
614600C035 QUINTAR COMPANY
614700C036 RAYTECH ELECTRONIC CORP.
614800C037 DYNATEM
614900C038 RASTER IMAGE PROCESSING SYSTEM
615000C039 TDK SEMICONDUCTOR CORPORATION
615100C03A MEN-MIKRO ELEKTRONIK GMBH
615200C03B MULTIACCESS COMPUTING CORP.
615300C03C TOWER TECH S.R.L.
615400C03D WIESEMANN & THEIS GMBH
615500C03E FA. GEBR. HELLER GMBH
615600C03F STORES AUTOMATED SYSTEMS, INC.
615700C040 ECCI
615800C041 DIGITAL TRANSMISSION SYSTEMS
615900C042 DATALUX CORP.
616000C043 STRATACOM
616100C044 EMCOM CORPORATION
616200C045 ISOLATION SYSTEMS, LTD.
616300C046 KEMITRON LTD.
616400C047 UNIMICRO SYSTEMS, INC.
616500C048 BAY TECHNICAL ASSOCIATES
616600C049 U.S. ROBOTICS, INC.
616700C04A GROUP 2000 AG
616800C04B CREATIVE MICROSYSTEMS
616900C04C DEPARTMENT OF FOREIGN AFFAIRS
617000C04D MITEC, INC.
617100C04E COMTROL CORPORATION
617200C04F DELL COMPUTER CORPORATION
617300C050 TOYO DENKI SEIZO K.K.
617400C051 ADVANCED INTEGRATION RESEARCH
617500C052 BURR-BROWN
617600C053 DAVOX CORPORATION
617700C054 NETWORK PERIPHERALS, LTD.
617800C055 MODULAR COMPUTING TECHNOLOGIES
617900C056 SOMELEC
618000C057 MYCO ELECTRONICS
618100C058 DATAEXPERT CORP.
618200C059 NIPPON DENSO CO., LTD.
618300C05A SEMAPHORE COMMUNICATIONS CORP.
618400C05B NETWORKS NORTHWEST, INC.
618500C05C ELONEX PLC
618600C05D L&N TECHNOLOGIES
618700C05E VARI-LITE, INC.
618800C05F FINE-PAL COMPANY LIMITED
618900C060 ID SCANDINAVIA AS
619000C061 SOLECTEK CORPORATION
619100C062 IMPULSE TECHNOLOGY
619200C063 MORNING STAR TECHNOLOGIES, INC
619300C064 GENERAL DATACOMM IND. INC.
619400C065 SCOPE COMMUNICATIONS, INC.
619500C066 DOCUPOINT, INC.
619600C067 UNITED BARCODE INDUSTRIES
619700C068 PHILIP DRAKE ELECTRONICS LTD.
619800C069 Axxcelera Broadband Wireless
619900C06A ZAHNER-ELEKTRIK GMBH & CO. KG
620000C06B OSI PLUS CORPORATION
620100C06C SVEC COMPUTER CORP.
620200C06D BOCA RESEARCH, INC.
620300C06E HAFT TECHNOLOGY, INC.
620400C06F KOMATSU LTD.
620500C070 SECTRA SECURE-TRANSMISSION AB
620600C071 AREANEX COMMUNICATIONS, INC.
620700C072 KNX LTD.
620800C073 XEDIA CORPORATION
620900C074 TOYODA AUTOMATIC LOOM
621000C075 XANTE CORPORATION
621100C076 I-DATA INTERNATIONAL A-S
621200C077 DAEWOO TELECOM LTD.
621300C078 COMPUTER SYSTEMS ENGINEERING
621400C079 FONSYS CO.,LTD.
621500C07A PRIVA B.V.
621600C07B ASCEND COMMUNICATIONS, INC.
621700C07C HIGHTECH INFORMATION
621800C07D RISC DEVELOPMENTS LTD.
621900C07E KUBOTA CORPORATION ELECTRONIC
622000C07F NUPON COMPUTING CORP.
622100C080 NETSTAR, INC.
622200C081 METRODATA LTD.
622300C082 MOORE PRODUCTS CO.
622400C083 TRACE MOUNTAIN PRODUCTS, INC.
622500C084 DATA LINK CORP. LTD.
622600C085 ELECTRONICS FOR IMAGING, INC.
622700C086 THE LYNK CORPORATION
622800C087 UUNET TECHNOLOGIES, INC.
622900C088 EKF ELEKTRONIK GMBH
623000C089 TELINDUS DISTRIBUTION
623100C08A LAUTERBACH DATENTECHNIK GMBH
623200C08B RISQ MODULAR SYSTEMS, INC.
623300C08C PERFORMANCE TECHNOLOGIES, INC.
623400C08D TRONIX PRODUCT DEVELOPMENT
623500C08E NETWORK INFORMATION TECHNOLOGY
623600C08F MATSUSHITA ELECTRIC WORKS, LTD
623700C090 PRAIM S.R.L.
623800C091 JABIL CIRCUIT, INC.
623900C092 MENNEN MEDICAL INC.
624000C093 ALTA RESEARCH CORP.
624100C094 VMX INC.
624200C095 ZNYX
624300C096 TAMURA CORPORATION
624400C097 ARCHIPEL SA
624500C098 CHUNTEX ELECTRONIC CO., LTD.
624600C099 YOSHIKI INDUSTRIAL CO.,LTD.
624700C09A PHOTONICS CORPORATION
624800C09B RELIANCE COMM/TEC, R-TEC
624900C09C TOA ELECTRONIC LTD.
625000C09D DISTRIBUTED SYSTEMS INT'L, INC
625100C09E CACHE COMPUTERS, INC.
625200C09F QUANTA COMPUTER, INC.
625300C0A0 ADVANCE MICRO RESEARCH, INC.
625400C0A1 TOKYO DENSHI SEKEI CO.
625500C0A2 INTERMEDIUM A/S
625600C0A3 DUAL ENTERPRISES CORPORATION
625700C0A4 UNIGRAF OY
625800C0A5 DICKENS DATA SYSTEMS
625900C0A6 EXICOM AUSTRALIA PTY. LTD
626000C0A7 SEEL LTD.
626100C0A8 GVC CORPORATION
626200C0A9 BARRON MCCANN LTD.
626300C0AA SILICON VALLEY COMPUTER
626400C0AB Telco Systems, Inc.
626500C0AC GAMBIT COMPUTER COMMUNICATIONS
626600C0AD MARBEN COMMUNICATION SYSTEMS
626700C0AE TOWERCOM CO. INC. DBA PC HOUSE
626800C0AF TEKLOGIX INC.
626900C0B0 GCC TECHNOLOGIES,INC.
627000C0B1 GENIUS NET CO.
627100C0B2 NORAND CORPORATION
627200C0B3 COMSTAT DATACOMM CORPORATION
627300C0B4 MYSON TECHNOLOGY, INC.
627400C0B5 CORPORATE NETWORK SYSTEMS,INC.
627500C0B6 Snap Appliance, Inc.
627600C0B7 AMERICAN POWER CONVERSION CORP
627700C0B8 FRASER'S HILL LTD.
627800C0B9 FUNK SOFTWARE, INC.
627900C0BA NETVANTAGE
628000C0BB FORVAL CREATIVE, INC.
628100C0BC TELECOM AUSTRALIA/CSSC
628200C0BD INEX TECHNOLOGIES, INC.
628300C0BE ALCATEL - SEL
628400C0BF TECHNOLOGY CONCEPTS, LTD.
628500C0C0 SHORE MICROSYSTEMS, INC.
628600C0C1 QUAD/GRAPHICS, INC.
628700C0C2 INFINITE NETWORKS LTD.
628800C0C3 ACUSON COMPUTED SONOGRAPHY
628900C0C4 COMPUTER OPERATIONAL
629000C0C5 SID INFORMATICA
629100C0C6 PERSONAL MEDIA CORP.
629200C0C7 SPARKTRUM MICROSYSTEMS, INC.
629300C0C8 MICRO BYTE PTY. LTD.
629400C0C9 ELSAG BAILEY PROCESS
629500C0CA ALFA, INC.
629600C0CB CONTROL TECHNOLOGY CORPORATION
629700C0CC TELESCIENCES CO SYSTEMS, INC.
629800C0CD COMELTA, S.A.
629900C0CE CEI SYSTEMS & ENGINEERING PTE
630000C0CF IMATRAN VOIMA OY
630100C0D0 RATOC SYSTEM INC.
630200C0D1 COMTREE TECHNOLOGY CORPORATION
630300C0D2 SYNTELLECT, INC.
630400C0D3 OLYMPUS IMAGE SYSTEMS, INC.
630500C0D4 AXON NETWORKS, INC.
630600C0D5 QUANCOM ELECTRONIC GMBH
630700C0D6 J1 SYSTEMS, INC.
630800C0D7 TAIWAN TRADING CENTER DBA
630900C0D8 UNIVERSAL DATA SYSTEMS
631000C0D9 QUINTE NETWORK CONFIDENTIALITY
631100C0DA NICE SYSTEMS LTD.
631200C0DB IPC CORPORATION (PTE) LTD.
631300C0DC EOS TECHNOLOGIES, INC.
631400C0DD QLogic Corporation
631500C0DE ZCOMM, INC.
631600C0DF KYE Systems Corp.
631700C0E0 DSC COMMUNICATION CORP.
631800C0E1 SONIC SOLUTIONS
631900C0E2 CALCOMP, INC.
632000C0E3 OSITECH COMMUNICATIONS, INC.
632100C0E4 SIEMENS BUILDING
632200C0E5 GESPAC, S.A.
632300C0E6 Verilink Corporation
632400C0E7 FIBERDATA AB
632500C0E8 PLEXCOM, INC.
632600C0E9 OAK SOLUTIONS, LTD.
632700C0EA ARRAY TECHNOLOGY LTD.
632800C0EB SEH COMPUTERTECHNIK GMBH
632900C0EC DAUPHIN TECHNOLOGY
633000C0ED US ARMY ELECTRONIC
633100C0EE KYOCERA CORPORATION
633200C0EF ABIT CORPORATION
633300C0F0 KINGSTON TECHNOLOGY CORP.
633400C0F1 SHINKO ELECTRIC CO., LTD.
633500C0F2 TRANSITION NETWORKS
633600C0F3 NETWORK COMMUNICATIONS CORP.
633700C0F4 INTERLINK SYSTEM CO., LTD.
633800C0F5 METACOMP, INC.
633900C0F6 CELAN TECHNOLOGY INC.
634000C0F7 ENGAGE COMMUNICATION, INC.
634100C0F8 ABOUT COMPUTING INC.
634200C0F9 HARRIS AND JEFFRIES, INC.
634300C0FA CANARY COMMUNICATIONS, INC.
634400C0FB ADVANCED TECHNOLOGY LABS
634500C0FC ELASTIC REALITY, INC.
634600C0FD PROSUM
634700C0FE APTEC COMPUTER SYSTEMS, INC.
634800C0FF DOT HILL SYSTEMS CORPORATION
634900CBBD Cambridge Broadband Ltd.
635000CF1C COMMUNICATION MACHINERY CORP.
635100D000 FERRAN SCIENTIFIC, INC.
635200D001 VST TECHNOLOGIES, INC.
635300D002 DITECH CORPORATION
635400D003 COMDA ENTERPRISES CORP.
635500D004 PENTACOM LTD.
635600D005 ZHS ZEITMANAGEMENTSYSTEME
635700D006 CISCO SYSTEMS, INC.
635800D007 MIC ASSOCIATES, INC.
635900D008 MACTELL CORPORATION
636000D009 HSING TECH. ENTERPRISE CO. LTD
636100D00A LANACCESS TELECOM S.A.
636200D00B RHK TECHNOLOGY, INC.
636300D00C SNIJDER MICRO SYSTEMS
636400D00D MICROMERITICS INSTRUMENT
636500D00E PLURIS, INC.
636600D00F SPEECH DESIGN GMBH
636700D010 CONVERGENT NETWORKS, INC.
636800D011 PRISM VIDEO, INC.
636900D012 GATEWORKS CORP.
637000D013 PRIMEX AEROSPACE COMPANY
637100D014 ROOT, INC.
637200D015 UNIVEX MICROTECHNOLOGY CORP.
637300D016 SCM MICROSYSTEMS, INC.
637400D017 SYNTECH INFORMATION CO., LTD.
637500D018 QWES. COM, INC.
637600D019 DAINIPPON SCREEN CORPORATE
637700D01A URMET SUD S.P.A.
637800D01B MIMAKI ENGINEERING CO., LTD.
637900D01C SBS TECHNOLOGIES,
638000D01D FURUNO ELECTRIC CO., LTD.
638100D01E PINGTEL CORP.
638200D01F CTAM PTY. LTD.
638300D020 AIM SYSTEM, INC.
638400D021 REGENT ELECTRONICS CORP.
638500D022 INCREDIBLE TECHNOLOGIES, INC.
638600D023 INFORTREND TECHNOLOGY, INC.
638700D024 Cognex Corporation
638800D025 XROSSTECH, INC.
638900D026 HIRSCHMANN AUSTRIA GMBH
639000D027 APPLIED AUTOMATION, INC.
639100D028 OMNEON VIDEO NETWORKS
639200D029 WAKEFERN FOOD CORPORATION
639300D02A Voxent Systems Ltd.
639400D02B JETCELL, INC.
639500D02C CAMPBELL SCIENTIFIC, INC.
639600D02D ADEMCO
639700D02E COMMUNICATION AUTOMATION CORP.
639800D02F VLSI TECHNOLOGY INC.
639900D030 SAFETRAN SYSTEMS CORP.
640000D031 INDUSTRIAL LOGIC CORPORATION
640100D032 YANO ELECTRIC CO., LTD.
640200D033 DALIAN DAXIAN NETWORK
640300D034 ORMEC SYSTEMS CORP.
640400D035 BEHAVIOR TECH. COMPUTER CORP.
640500D036 TECHNOLOGY ATLANTA CORP.
640600D037 PHILIPS-DVS-LO BDR
640700D038 FIVEMERE, LTD.
640800D039 UTILICOM, INC.
640900D03A ZONEWORX, INC.
641000D03B VISION PRODUCTS PTY. LTD.
641100D03C Vieo, Inc.
641200D03D GALILEO TECHNOLOGY, LTD.
641300D03E ROCKETCHIPS, INC.
641400D03F AMERICAN COMMUNICATION
641500D040 SYSMATE CO., LTD.
641600D041 AMIGO TECHNOLOGY CO., LTD.
641700D042 MAHLO GMBH & CO. UG
641800D043 ZONAL RETAIL DATA SYSTEMS
641900D044 ALIDIAN NETWORKS, INC.
642000D045 KVASER AB
642100D046 DOLBY LABORATORIES, INC.
642200D047 XN TECHNOLOGIES
642300D048 ECTON, INC.
642400D049 IMPRESSTEK CO., LTD.
642500D04A PRESENCE TECHNOLOGY GMBH
642600D04B LA CIE GROUP S.A.
642700D04C EUROTEL TELECOM LTD.
642800D04D DIV OF RESEARCH & STATISTICS
642900D04E LOGIBAG
643000D04F BITRONICS, INC.
643100D050 ISKRATEL
643200D051 O2 MICRO, INC.
643300D052 ASCEND COMMUNICATIONS, INC.
643400D053 CONNECTED SYSTEMS
643500D054 SAS INSTITUTE INC.
643600D055 KATHREIN-WERKE KG
643700D056 SOMAT CORPORATION
643800D057 ULTRAK, INC.
643900D058 CISCO SYSTEMS, INC.
644000D059 AMBIT MICROSYSTEMS CORP.
644100D05A SYMBIONICS, LTD.
644200D05B ACROLOOP MOTION CONTROL
644300D05C TECHNOTREND SYSTEMTECHNIK GMBH
644400D05D INTELLIWORXX, INC.
644500D05E STRATABEAM TECHNOLOGY, INC.
644600D05F VALCOM, INC.
644700D060 PANASONIC EUROPEAN
644800D061 TREMON ENTERPRISES CO., LTD.
644900D062 DIGIGRAM
645000D063 CISCO SYSTEMS, INC.
645100D064 MULTITEL
645200D065 TOKO ELECTRIC
645300D066 WINTRISS ENGINEERING CORP.
645400D067 CAMPIO COMMUNICATIONS
645500D068 IWILL CORPORATION
645600D069 TECHNOLOGIC SYSTEMS
645700D06A LINKUP SYSTEMS CORPORATION
645800D06B SR TELECOM INC.
645900D06C SHAREWAVE, INC.
646000D06D ACRISON, INC.
646100D06E TRENDVIEW RECORDERS LTD.
646200D06F KMC CONTROLS
646300D070 LONG WELL ELECTRONICS CORP.
646400D071 ECHELON CORP.
646500D072 BROADLOGIC
646600D073 ACN ADVANCED COMMUNICATIONS
646700D074 TAQUA SYSTEMS, INC.
646800D075 ALARIS MEDICAL SYSTEMS, INC.
646900D076 MERRILL LYNCH & CO., INC.
647000D077 LUCENT TECHNOLOGIES
647100D078 ELTEX OF SWEDEN AB
647200D079 CISCO SYSTEMS, INC.
647300D07A AMAQUEST COMPUTER CORP.
647400D07B COMCAM INTERNATIONAL LTD.
647500D07C KOYO ELECTRONICS INC. CO.,LTD.
647600D07D COSINE COMMUNICATIONS
647700D07E KEYCORP LTD.
647800D07F STRATEGY & TECHNOLOGY, LIMITED
647900D080 EXABYTE CORPORATION
648000D081 REAL TIME DEVICES USA, INC.
648100D082 IOWAVE INC.
648200D083 INVERTEX, INC.
648300D084 NEXCOMM SYSTEMS, INC.
648400D085 OTIS ELEVATOR COMPANY
648500D086 FOVEON, INC.
648600D087 MICROFIRST INC.
648700D088 Terayon Communications Systems
648800D089 DYNACOLOR, INC.
648900D08A PHOTRON USA
649000D08B ADVA Limited
649100D08C GENOA TECHNOLOGY, INC.
649200D08D PHOENIX GROUP, INC.
649300D08E NVISION INC.
649400D08F ARDENT TECHNOLOGIES, INC.
649500D090 CISCO SYSTEMS, INC.
649600D091 SMARTSAN SYSTEMS, INC.
649700D092 GLENAYRE WESTERN MULTIPLEX
649800D093 TQ - COMPONENTS GMBH
649900D094 TIMELINE VISTA, INC.
650000D095 XYLAN CORPORATION
650100D096 3COM EUROPE LTD.
650200D097 CISCO SYSTEMS, INC.
650300D098 Photon Dynamics Canada Inc.
650400D099 ELCARD OY
650500D09A FILANET CORPORATION
650600D09B SPECTEL LTD.
650700D09C KAPADIA COMMUNICATIONS
650800D09D VERIS INDUSTRIES
650900D09E 2WIRE, INC.
651000D09F NOVTEK TEST SYSTEMS
651100D0A0 MIPS DENMARK
651200D0A1 OSKAR VIERLING GMBH + CO. KG
651300D0A2 INTEGRATED DEVICE
651400D0A3 VOCAL DATA, INC.
651500D0A4 ALANTRO COMMUNICATIONS
651600D0A5 AMERICAN ARIUM
651700D0A6 LANBIRD TECHNOLOGY CO., LTD.
651800D0A7 TOKYO SOKKI KENKYUJO CO., LTD.
651900D0A8 NETWORK ENGINES, INC.
652000D0A9 SHINANO KENSHI CO., LTD.
652100D0AA CHASE COMMUNICATIONS
652200D0AB DELTAKABEL TELECOM CV
652300D0AC GRAYSON WIRELESS
652400D0AD TL INDUSTRIES
652500D0AE ORESIS COMMUNICATIONS, INC.
652600D0AF CUTLER-HAMMER, INC.
652700D0B0 BITSWITCH LTD.
652800D0B1 OMEGA ELECTRONICS SA
652900D0B2 XIOTECH CORPORATION
653000D0B3 DRS FLIGHT SAFETY AND
653100D0B4 KATSUJIMA CO., LTD.
653200D0B5 IPricot formerly DotCom
653300D0B6 CRESCENT NETWORKS, INC.
653400D0B7 INTEL CORPORATION
653500D0B8 IOMEGA CORP.
653600D0B9 MICROTEK INTERNATIONAL, INC.
653700D0BA CISCO SYSTEMS, INC.
653800D0BB CISCO SYSTEMS, INC.
653900D0BC CISCO SYSTEMS, INC.
654000D0BD SICAN GMBH
654100D0BE EMUTEC INC.
654200D0BF PIVOTAL TECHNOLOGIES
654300D0C0 CISCO SYSTEMS, INC.
654400D0C1 HARMONIC DATA SYSTEMS, LTD.
654500D0C2 BALTHAZAR TECHNOLOGY AB
654600D0C3 VIVID TECHNOLOGY PTE, LTD.
654700D0C4 TERATECH CORPORATION
654800D0C5 COMPUTATIONAL SYSTEMS, INC.
654900D0C6 THOMAS & BETTS CORP.
655000D0C7 PATHWAY, INC.
655100D0C8 I/O CONSULTING A/S
655200D0C9 ADVANTECH CO., LTD.
655300D0CA INTRINSYC SOFTWARE INC.
655400D0CB DASAN CO., LTD.
655500D0CC TECHNOLOGIES LYRE INC.
655600D0CD ATAN TECHNOLOGY INC.
655700D0CE ASYST ELECTRONIC
655800D0CF MORETON BAY
655900D0D0 ZHONGXING TELECOM LTD.
656000D0D1 SIROCCO SYSTEMS, INC.
656100D0D2 EPILOG CORPORATION
656200D0D3 CISCO SYSTEMS, INC.
656300D0D4 V-BITS, INC.
656400D0D5 GRUNDIG AG
656500D0D6 AETHRA TELECOMUNICAZIONI
656600D0D7 B2C2, INC.
656700D0D8 3Com Corporation
656800D0D9 DEDICATED MICROCOMPUTERS
656900D0DA TAICOM DATA SYSTEMS CO., LTD.
657000D0DB MCQUAY INTERNATIONAL
657100D0DC MODULAR MINING SYSTEMS, INC.
657200D0DD SUNRISE TELECOM, INC.
657300D0DE PHILIPS MULTIMEDIA NETWORK
657400D0DF KUZUMI ELECTRONICS, INC.
657500D0E0 DOOIN ELECTRONICS CO.
657600D0E1 AVIONITEK ISRAEL INC.
657700D0E2 MRT MICRO, INC.
657800D0E3 ELE-CHEM ENGINEERING CO., LTD.
657900D0E4 CISCO SYSTEMS, INC.
658000D0E5 SOLIDUM SYSTEMS CORP.
658100D0E6 IBOND INC.
658200D0E7 VCON TELECOMMUNICATION LTD.
658300D0E8 MAC SYSTEM CO., LTD.
658400D0E9 ADVANTAGE CENTURY
658500D0EA NEXTONE COMMUNICATIONS, INC.
658600D0EB LIGHTERA NETWORKS, INC.
658700D0EC NAKAYO TELECOMMUNICATIONS, INC
658800D0ED XIOX
658900D0EE DICTAPHONE CORPORATION
659000D0EF IGT
659100D0F0 CONVISION TECHNOLOGY GMBH
659200D0F1 SEGA ENTERPRISES, LTD.
659300D0F2 MONTEREY NETWORKS
659400D0F3 SOLARI DI UDINE SPA
659500D0F4 CARINTHIAN TECH INSTITUTE
659600D0F5 ORANGE MICRO, INC.
659700D0F6 Alcatel Canada
659800D0F7 NEXT NETS CORPORATION
659900D0F8 FUJIAN STAR TERMINAL
660000D0F9 ACUTE COMMUNICATIONS CORP.
660100D0FA RACAL GUARDATA
660200D0FB TEK MICROSYSTEMS, INCORPORATED
660300D0FC GRANITE MICROSYSTEMS
660400D0FD OPTIMA TELE.COM, INC.
660500D0FE ASTRAL POINT
660600D0FF CISCO SYSTEMS, INC.
660700DD00 UNGERMANN-BASS INC.
660800DD01 UNGERMANN-BASS INC.
660900DD02 UNGERMANN-BASS INC.
661000DD03 UNGERMANN-BASS INC.
661100DD04 UNGERMANN-BASS INC.
661200DD05 UNGERMANN-BASS INC.
661300DD06 UNGERMANN-BASS INC.
661400DD07 UNGERMANN-BASS INC.
661500DD08 UNGERMANN-BASS INC.
661600DD09 UNGERMANN-BASS INC.
661700DD0A UNGERMANN-BASS INC.
661800DD0B UNGERMANN-BASS INC.
661900DD0C UNGERMANN-BASS INC.
662000DD0D UNGERMANN-BASS INC.
662100DD0E UNGERMANN-BASS INC.
662200DD0F UNGERMANN-BASS INC.
662300E000 FUJITSU, LTD
662400E001 STRAND LIGHTING LIMITED
662500E002 CROSSROADS SYSTEMS, INC.
662600E003 NOKIA WIRELESS BUSINESS COMMUN
662700E004 PMC-SIERRA, INC.
662800E005 TECHNICAL CORP.
662900E006 SILICON INTEGRATED SYS. CORP.
663000E007 NETWORK ALCHEMY LTD.
663100E008 AMAZING CONTROLS! INC.
663200E009 MARATHON TECHNOLOGIES CORP.
663300E00A DIBA, INC.
663400E00B ROOFTOP COMMUNICATIONS CORP.
663500E00C MOTOROLA
663600E00D RADIANT SYSTEMS
663700E00E AVALON IMAGING SYSTEMS, INC.
663800E00F SHANGHAI BAUD DATA
663900E010 HESS SB-AUTOMATENBAU GmbH
664000E011 UNIDEN SAN DIEGO R&D CENTER, INC.
664100E012 PLUTO TECHNOLOGIES INTERNATIONAL INC.
664200E013 EASTERN ELECTRONIC CO., LTD.
664300E014 CISCO SYSTEMS, INC.
664400E015 HEIWA CORPORATION
664500E016 RAPID CITY COMMUNICATIONS
664600E017 EXXACT GmbH
664700E018 ASUSTEK COMPUTER INC.
664800E019 ING. GIORDANO ELETTRONICA
664900E01A COMTEC SYSTEMS. CO., LTD.
665000E01B SPHERE COMMUNICATIONS, INC.
665100E01C MOBILITY ELECTRONICSY
665200E01D WebTV NETWORKS, INC.
665300E01E CISCO SYSTEMS, INC.
665400E01F AVIDIA Systems, Inc.
665500E020 TECNOMEN OY
665600E021 FREEGATE CORP.
665700E022 MediaLight, Inc.
665800E023 TELRAD
665900E024 GADZOOX NETWORKS
666000E025 dit CO., LTD.
666100E026 EASTMAN KODAK CO.
666200E027 DUX, INC.
666300E028 APTIX CORPORATION
666400E029 STANDARD MICROSYSTEMS CORP.
666500E02A TANDBERG TELEVISION AS
666600E02B EXTREME NETWORKS
666700E02C AST COMPUTER
666800E02D InnoMediaLogic, Inc.
666900E02E SPC ELECTRONICS CORPORATION
667000E02F MCNS HOLDINGS, L.P.
667100E030 MELITA INTERNATIONAL CORP.
667200E031 HAGIWARA ELECTRIC CO., LTD.
667300E032 MISYS FINANCIAL SYSTEMS, LTD.
667400E033 E.E.P.D. GmbH
667500E034 CISCO SYSTEMS, INC.
667600E035 LOUGHBOROUGH SOUND IMAGES, PLC
667700E036 PIONEER CORPORATION
667800E037 CENTURY CORPORATION
667900E038 PROXIMA CORPORATION
668000E039 PARADYNE CORP.
668100E03A CABLETRON SYSTEMS, INC.
668200E03B PROMINET CORPORATION
668300E03C AdvanSys
668400E03D FOCON ELECTRONIC SYSTEMS A/S
668500E03E ALFATECH, INC.
668600E03F JATON CORPORATION
668700E040 DeskStation Technology, Inc.
668800E041 CSPI
668900E042 Pacom Systems Ltd.
669000E043 VitalCom
669100E044 LSICS CORPORATION
669200E045 TOUCHWAVE, INC.
669300E046 BENTLY NEVADA CORP.
669400E047 INFOCUS SYSTEMS
669500E048 SDL COMMUNICATIONS, INC.
669600E049 MICROWI ELECTRONIC GmbH
669700E04A ENHANCED MESSAGING SYSTEMS, INC
669800E04B JUMP INDUSTRIELLE COMPUTERTECHNIK GmbH
669900E04C REALTEK SEMICONDUCTOR CORP.
670000E04D INTERNET INITIATIVE JAPAN, INC
670100E04E SANYO DENKI CO., LTD.
670200E04F CISCO SYSTEMS, INC.
670300E050 EXECUTONE INFORMATION SYSTEMS, INC.
670400E051 TALX CORPORATION
670500E052 FOUNDRY NETWORKS, INC.
670600E053 CELLPORT LABS, INC.
670700E054 KODAI HITEC CO., LTD.
670800E055 INGENIERIA ELECTRONICA COMERCIAL INELCOM S.A.
670900E056 HOLONTECH CORPORATION
671000E057 HAN MICROTELECOM. CO., LTD.
671100E058 PHASE ONE DENMARK A/S
671200E059 CONTROLLED ENVIRONMENTS, LTD.
671300E05A GALEA NETWORK SECURITY
671400E05B WEST END SYSTEMS CORP.
671500E05C MATSUSHITA KOTOBUKI ELECTRONICS INDUSTRIES, LTD.
671600E05D UNITEC CO., LTD.
671700E05E JAPAN AVIATION ELECTRONICS INDUSTRY, LTD.
671800E05F e-Net, Inc.
671900E060 SHERWOOD
672000E061 EdgePoint Networks, Inc.
672100E062 HOST ENGINEERING
672200E063 CABLETRON - YAGO SYSTEMS, INC.
672300E064 SAMSUNG ELECTRONICS
672400E065 OPTICAL ACCESS INTERNATIONAL
672500E066 ProMax Systems, Inc.
672600E067 eac AUTOMATION-CONSULTING GmbH
672700E068 MERRIMAC SYSTEMS INC.
672800E069 JAYCOR
672900E06A KAPSCH AG
673000E06B W&G SPECIAL PRODUCTS
673100E06C AEP Systems International Ltd
673200E06D COMPUWARE CORPORATION
673300E06E FAR SYSTEMS S.p.A.
673400E06F Terayon Communications Systems
673500E070 DH TECHNOLOGY
673600E071 EPIS MICROCOMPUTER
673700E072 LYNK
673800E073 NATIONAL AMUSEMENT NETWORK, INC.
673900E074 TIERNAN COMMUNICATIONS, INC.
674000E075 Verilink Corporation
674100E076 DEVELOPMENT CONCEPTS, INC.
674200E077 WEBGEAR, INC.
674300E078 BERKELEY NETWORKS
674400E079 A.T.N.R.
674500E07A MIKRODIDAKT AB
674600E07B BAY NETWORKS
674700E07C METTLER-TOLEDO, INC.
674800E07D NETRONIX, INC.
674900E07E WALT DISNEY IMAGINEERING
675000E07F LOGISTISTEM s.r.l.
675100E080 CONTROL RESOURCES CORPORATION
675200E081 TYAN COMPUTER CORP.
675300E082 ANERMA
675400E083 JATO TECHNOLOGIES, INC.
675500E084 COMPULITE R&D
675600E085 GLOBAL MAINTECH, INC.
675700E086 CYBEX COMPUTER PRODUCTS
675800E087 LeCroy - Networking Productions Division
675900E088 LTX CORPORATION
676000E089 ION Networks, Inc.
676100E08A GEC AVERY, LTD.
676200E08B QLogic Corp.
676300E08C NEOPARADIGM LABS, INC.
676400E08D PRESSURE SYSTEMS, INC.
676500E08E UTSTARCOM
676600E08F CISCO SYSTEMS, INC.
676700E090 BECKMAN LAB. AUTOMATION DIV.
676800E091 LG ELECTRONICS, INC.
676900E092 ADMTEK INCORPORATED
677000E093 ACKFIN NETWORKS
677100E094 OSAI SRL
677200E095 ADVANCED-VISION TECHNOLGIES CORP.
677300E096 SHIMADZU CORPORATION
677400E097 CARRIER ACCESS CORPORATION
677500E098 AboCom Systems, Inc.
677600E099 SAMSON AG
677700E09A POSITRON INDUSTRIES, INC.
677800E09B ENGAGE NETWORKS, INC.
677900E09C MII
678000E09D SARNOFF CORPORATION
678100E09E QUANTUM CORPORATION
678200E09F PIXEL VISION
678300E0A0 WILTRON CO.
678400E0A1 HIMA PAUL HILDEBRANDT GmbH Co. KG
678500E0A2 MICROSLATE INC.
678600E0A3 CISCO SYSTEMS, INC.
678700E0A4 ESAOTE S.p.A.
678800E0A5 ComCore Semiconductor, Inc.
678900E0A6 TELOGY NETWORKS, INC.
679000E0A7 IPC INFORMATION SYSTEMS, INC.
679100E0A8 SAT GmbH & Co.
679200E0A9 FUNAI ELECTRIC CO., LTD.
679300E0AA ELECTROSONIC LTD.
679400E0AB DIMAT S.A.
679500E0AC MIDSCO, INC.
679600E0AD EES TECHNOLOGY, LTD.
679700E0AE XAQTI CORPORATION
679800E0AF GENERAL DYNAMICS INFORMATION SYSTEMS
679900E0B0 CISCO SYSTEMS, INC.
680000E0B1 PACKET ENGINES, INC.
680100E0B2 TELMAX COMMUNICATIONS CORP.
680200E0B3 EtherWAN Systems, Inc.
680300E0B4 TECHNO SCOPE CO., LTD.
680400E0B5 ARDENT COMMUNICATIONS CORP.
680500E0B6 Entrada Networks
680600E0B7 PI GROUP, LTD.
680700E0B8 GATEWAY 2000
680800E0B9 BYAS SYSTEMS
680900E0BA BERGHOF AUTOMATIONSTECHNIK GmbH
681000E0BB NBX CORPORATION
681100E0BC SYMON COMMUNICATIONS, INC.
681200E0BD INTERFACE SYSTEMS, INC.
681300E0BE GENROCO INTERNATIONAL, INC.
681400E0BF TORRENT NETWORKING TECHNOLOGIES CORP.
681500E0C0 SEIWA ELECTRIC MFG. CO., LTD.
681600E0C1 MEMOREX TELEX JAPAN, LTD.
681700E0C2 NECSY S.p.A.
681800E0C3 SAKAI SYSTEM DEVELOPMENT CORP.
681900E0C4 HORNER ELECTRIC, INC.
682000E0C5 BCOM ELECTRONICS INC.
682100E0C6 LINK2IT, L.L.C.
682200E0C7 EUROTECH SRL
682300E0C8 VIRTUAL ACCESS, LTD.
682400E0C9 AutomatedLogic Corporation
682500E0CA BEST DATA PRODUCTS
682600E0CB RESON, INC.
682700E0CC HERO SYSTEMS, LTD.
682800E0CD SENSIS CORPORATION
682900E0CE ARN
683000E0CF INTEGRATED DEVICE TECHNOLOGY, INC.
683100E0D0 NETSPEED, INC.
683200E0D1 TELSIS LIMITED
683300E0D2 VERSANET COMMUNICATIONS, INC.
683400E0D3 DATENTECHNIK GmbH
683500E0D4 EXCELLENT COMPUTER
683600E0D5 ARCXEL TECHNOLOGIES, INC.
683700E0D6 COMPUTER & COMMUNICATION RESEARCH LAB.
683800E0D7 SUNSHINE ELECTRONICS, INC.
683900E0D8 LANBit Computer, Inc.
684000E0D9 TAZMO CO., LTD.
684100E0DA ASSURED ACCESS TECHNOLOGY, INC.
684200E0DB ViaVideo Communications, Inc.
684300E0DC NEXWARE CORP.
684400E0DD ZENITH ELECTRONICS CORPORATION
684500E0DE DATAX NV
684600E0DF KE KOMMUNIKATIONS-ELECTRONIK
684700E0E0 SI ELECTRONICS, LTD.
684800E0E1 G2 NETWORKS, INC.
684900E0E2 INNOVA CORP.
685000E0E3 SK-ELEKTRONIK GmbH
685100E0E4 FANUC ROBOTICS NORTH AMERICA, Inc.
685200E0E5 CINCO NETWORKS, INC.
685300E0E6 INCAA DATACOM B.V.
685400E0E7 RAYTHEON E-SYSTEMS, INC.
685500E0E8 GRETACODER Data Systems AG
685600E0E9 DATA LABS, INC.
685700E0EA INNOVAT COMMUNICATIONS, INC.
685800E0EB DIGICOM SYSTEMS, INCORPORATED
685900E0EC CELESTICA INC.
686000E0ED SILICOM, LTD.
686100E0EE MAREL HF
686200E0EF DIONEX
686300E0F0 ABLER TECHNOLOGY, INC.
686400E0F1 THAT CORPORATION
686500E0F2 ARLOTTO COMNET, INC.
686600E0F3 WebSprint Communications, Inc.
686700E0F4 INSIDE Technology A/S
686800E0F5 TELES AG
686900E0F6 DECISION EUROPE
687000E0F7 CISCO SYSTEMS, INC.
687100E0F8 DICNA CONTROL AB
687200E0F9 CISCO SYSTEMS, INC.
687300E0FA TRL TECHNOLOGY, LTD.
687400E0FB LEIGHTRONIX, INC.
687500E0FC HUAWEI TECHNOLOGIES CO., LTD.
687600E0FD A-TREND TECHNOLOGY CO., LTD.
687700E0FE CISCO SYSTEMS, INC.
687800E0FF SECURITY DYNAMICS TECHNOLOGIES, Inc.
687900E6D3 NIXDORF COMPUTER CORP.
6880020701 RACAL-DATACOM
6881021C7C PERQ SYSTEMS CORPORATION
6882026086 LOGIC REPLACEMENT TECH. LTD.
688302608C 3COM CORPORATION
6884027001 RACAL-DATACOM
68850270B0 M/A-COM INC. COMPANIES
68860270B3 DATA RECALL LTD
6887029D8E CARDIAC RECORDERS INC.
688802AA3C OLIVETTI TELECOMM SPA (OLTECO)
688902BB01 OCTOTHORPE CORP.
689002C08C 3COM CORPORATION
689102CF1C COMMUNICATION MACHINERY CORP.
689202E6D3 NIXDORF COMPUTER CORPORATION
6893040AE0 XMIT AG COMPUTER NETWORKS
689404E0C4 TRIUMPH-ADLER AG
6895080001 COMPUTERVISION CORPORATION
6896080002 BRIDGE COMMUNICATIONS INC.
6897080003 ADVANCED COMPUTER COMM.
6898080004 CROMEMCO INCORPORATED
6899080005 SYMBOLICS INC.
6900080006 SIEMENS AG
6901080007 APPLE COMPUTER INC.
6902080008 BOLT BERANEK AND NEWMAN INC.
6903080009 HEWLETT PACKARD
690408000A NESTAR SYSTEMS INCORPORATED
690508000B UNISYS CORPORATION
690608000C MIKLYN DEVELOPMENT CO.
690708000D INTERNATIONAL COMPUTERS LTD.
690808000E NCR CORPORATION
690908000F MITEL CORPORATION
6910080011 TEKTRONIX INC.
6911080012 BELL ATLANTIC INTEGRATED SYST.
6912080013 EXXON
6913080014 EXCELAN
6914080015 STC BUSINESS SYSTEMS
6915080016 BARRISTER INFO SYS CORP
6916080017 NATIONAL SEMICONDUCTOR
6917080018 PIRELLI FOCOM NETWORKS
6918080019 GENERAL ELECTRIC CORPORATION
691908001A TIARA/ 10NET
692008001B DATA GENERAL
692108001C KDD-KOKUSAI DEBNSIN DENWA CO.
692208001D ABLE COMMUNICATIONS INC.
692308001E APOLLO COMPUTER INC.
692408001F SHARP CORPORATION
6925080020 SUN MICROSYSTEMS INC.
6926080021 3M COMPANY
6927080022 NBI INC.
6928080023 Panasonic Communications Co., Ltd.
6929080024 10NET COMMUNICATIONS/DCA
6930080025 CONTROL DATA
6931080026 NORSK DATA A.S.
6932080027 CADMUS COMPUTER SYSTEMS
6933080028 Texas Instruments
6934080029 MEGATEK CORPORATION
693508002A MOSAIC TECHNOLOGIES INC.
693608002B DIGITAL EQUIPMENT CORPORATION
693708002C BRITTON LEE INC.
693808002D LAN-TEC INC.
693908002E METAPHOR COMPUTER SYSTEMS
694008002F PRIME COMPUTER INC.
6941080030 NETWORK RESEARCH CORPORATION
6942080030 CERN
6943080030 ROYAL MELBOURNE INST OF TECH
6944080031 LITTLE MACHINES INC.
6945080032 TIGAN INCORPORATED
6946080033 BAUSCH & LOMB
6947080034 FILENET CORPORATION
6948080035 MICROFIVE CORPORATION
6949080036 INTERGRAPH CORPORATION
6950080037 FUJI-XEROX CO. LTD.
6951080038 CII HONEYWELL BULL
6952080039 SPIDER SYSTEMS LIMITED
695308003A ORCATECH INC.
695408003B TORUS SYSTEMS LIMITED
695508003C SCHLUMBERGER WELL SERVICES
695608003D CADNETIX CORPORATIONS
695708003E CODEX CORPORATION
695808003F FRED KOSCHARA ENTERPRISES
6959080040 FERRANTI COMPUTER SYS. LIMITED
6960080041 RACAL-MILGO INFORMATION SYS..
6961080042 JAPAN MACNICS CORP.
6962080043 PIXEL COMPUTER INC.
6963080044 DAVID SYSTEMS INC.
6964080045 CONCURRENT COMPUTER CORP.
6965080046 SONY CORPORATION LTD.
6966080047 SEQUENT COMPUTER SYSTEMS INC.
6967080048 EUROTHERM GAUGING SYSTEMS
6968080049 UNIVATION
696908004A BANYAN SYSTEMS INC.
697008004B PLANNING RESEARCH CORP.
697108004C HYDRA COMPUTER SYSTEMS INC.
697208004D CORVUS SYSTEMS INC.
697308004E 3COM EUROPE LTD.
697408004F CYGNET SYSTEMS
6975080050 DAISY SYSTEMS CORP.
6976080051 EXPERDATA
6977080052 INSYSTEC
6978080053 MIDDLE EAST TECH. UNIVERSITY
6979080055 STANFORD TELECOMM. INC.
6980080056 STANFORD LINEAR ACCEL. CENTER
6981080057 EVANS & SUTHERLAND
6982080058 SYSTEMS CONCEPTS
6983080059 A/S MYCRON
698408005A IBM CORPORATION
698508005B VTA TECHNOLOGIES INC.
698608005C FOUR PHASE SYSTEMS
698708005D GOULD INC.
698808005E COUNTERPOINT COMPUTER INC.
698908005F SABER TECHNOLOGY CORP.
6990080060 INDUSTRIAL NETWORKING INC.
6991080061 JAROGATE LTD.
6992080062 GENERAL DYNAMICS
6993080063 PLESSEY
6994080064 AUTOPHON AG
6995080065 GENRAD INC.
6996080066 AGFA CORPORATION
6997080067 COMDESIGN
6998080068 RIDGE COMPUTERS
6999080069 SILICON GRAPHICS INC.
700008006A ATT BELL LABORATORIES
700108006B ACCEL TECHNOLOGIES INC.
700208006C SUNTEK TECHNOLOGY INT'L
700308006D WHITECHAPEL COMPUTER WORKS
700408006E MASSCOMP
700508006F PHILIPS APELDOORN B.V.
7006080070 MITSUBISHI ELECTRIC CORP.
7007080071 MATRA (DSIE)
7008080072 XEROX CORP UNIV GRANT PROGRAM
7009080073 TECMAR INC.
7010080074 CASIO COMPUTER CO. LTD.
7011080075 DANSK DATA ELECTRONIK
7012080076 PC LAN TECHNOLOGIES
7013080077 TSL COMMUNICATIONS LTD.
7014080078 ACCELL CORPORATION
7015080079 THE DROID WORKS
701608007A INDATA
701708007B SANYO ELECTRIC CO. LTD.
701808007C VITALINK COMMUNICATIONS CORP.
701908007E AMALGAMATED WIRELESS(AUS) LTD
702008007F CARNEGIE-MELLON UNIVERSITY
7021080080 AES DATA INC.
7022080081 ,ASTECH INC.
7023080082 VERITAS SOFTWARE
7024080083 Seiko Instruments Inc.
7025080084 TOMEN ELECTRONICS CORP.
7026080085 ELXSI
7027080086 KONICA MINOLTA HOLDINGS, INC.
7028080087 XYPLEX
7029080088 MCDATA CORPORATION
7030080089 KINETICS
703108008A PERFORMANCE TECHNOLOGY
703208008B PYRAMID TECHNOLOGY CORP.
703308008C NETWORK RESEARCH CORPORATION
703408008D XYVISION INC.
703508008E TANDEM COMPUTERS
703608008F CHIPCOM CORPORATION
7037080090 SONOMA SYSTEMS
7038081443 UNIBRAIN S.A.
703908BBCC AK-NORD EDV VERTRIEBSGES. mbH
704010005A IBM CORPORATION
70411000E8 NATIONAL SEMICONDUCTOR
7042800010 ATT BELL LABORATORIES
7043A06A00 Verilink Corporation
7044AA0000 DIGITAL EQUIPMENT CORPORATION
7045AA0001 DIGITAL EQUIPMENT CORPORATION
7046AA0002 DIGITAL EQUIPMENT CORPORATION
7047AA0003 DIGITAL EQUIPMENT CORPORATION
7048AA0004 DIGITAL EQUIPMENT CORPORATION
diff --git a/drivers/ieee1394/oui2c.sh b/drivers/ieee1394/oui2c.sh
new file mode 100644
index 000000000000..d50dc7a2d087
--- /dev/null
+++ b/drivers/ieee1394/oui2c.sh
@@ -0,0 +1,23 @@
1#!/bin/sh
2
3cat <<EOF
4/* Generated file for OUI database */
5
6#include <linux/config.h>
7
8#ifdef CONFIG_IEEE1394_OUI_DB
9struct oui_list_struct {
10 int oui;
11 char *name;
12} oui_list[] = {
13EOF
14
15while read oui name; do
16 echo " { 0x$oui, \"$name\" },"
17done
18
19cat <<EOF
20};
21
22#endif /* CONFIG_IEEE1394_OUI_DB */
23EOF
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
new file mode 100644
index 000000000000..a261d2b0e5ac
--- /dev/null
+++ b/drivers/ieee1394/pcilynx.c
@@ -0,0 +1,1982 @@
1/*
2 * pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
5 * Manfred Weihs <weihs@ict.tuwien.ac.at>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Contributions:
24 *
25 * Manfred Weihs <weihs@ict.tuwien.ac.at>
26 * reading bus info block (containing GUID) from serial
27 * eeprom via i2c and storing it in config ROM
28 * Reworked code for initiating bus resets
29 * (long, short, with or without hold-off)
30 * Enhancements in async and iso send code
31 */
32
33#include <linux/config.h>
34#include <linux/kernel.h>
35#include <linux/slab.h>
36#include <linux/interrupt.h>
37#include <linux/wait.h>
38#include <linux/errno.h>
39#include <linux/module.h>
40#include <linux/moduleparam.h>
41#include <linux/init.h>
42#include <linux/pci.h>
43#include <linux/fs.h>
44#include <linux/poll.h>
45#include <linux/kdev_t.h>
46#include <asm/byteorder.h>
47#include <asm/atomic.h>
48#include <asm/io.h>
49#include <asm/uaccess.h>
50#include <asm/irq.h>
51
52#include "csr1212.h"
53#include "ieee1394.h"
54#include "ieee1394_types.h"
55#include "hosts.h"
56#include "ieee1394_core.h"
57#include "highlevel.h"
58#include "pcilynx.h"
59
60#include <linux/i2c.h>
61#include <linux/i2c-algo-bit.h>
62
63/* print general (card independent) information */
64#define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
65/* print card specific information */
66#define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
67
68#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
69#define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
70#define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
71#else
72#define PRINT_GD(level, fmt, args...) do {} while (0)
73#define PRINTD(level, card, fmt, args...) do {} while (0)
74#endif
75
76
77/* Module Parameters */
78static int skip_eeprom = 0;
79module_param(skip_eeprom, int, 0444);
80MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
81
82
83static struct hpsb_host_driver lynx_driver;
84static unsigned int card_id;
85
86
87
88/*
89 * I2C stuff
90 */
91
92/* the i2c stuff was inspired by i2c-philips-par.c */
93
94static void bit_setscl(void *data, int state)
95{
96 if (state) {
97 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
98 } else {
99 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
100 }
101 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
102}
103
104static void bit_setsda(void *data, int state)
105{
106 if (state) {
107 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
108 } else {
109 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
110 }
111 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
112}
113
114static int bit_getscl(void *data)
115{
116 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
117}
118
119static int bit_getsda(void *data)
120{
121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
122}
123
124static int bit_reg(struct i2c_client *client)
125{
126 return 0;
127}
128
129static int bit_unreg(struct i2c_client *client)
130{
131 return 0;
132}
133
134static struct i2c_algo_bit_data bit_data = {
135 .setsda = bit_setsda,
136 .setscl = bit_setscl,
137 .getsda = bit_getsda,
138 .getscl = bit_getscl,
139 .udelay = 5,
140 .mdelay = 5,
141 .timeout = 100,
142};
143
144static struct i2c_adapter bit_ops = {
145 .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
146 .client_register = bit_reg,
147 .client_unregister = bit_unreg,
148 .name = "PCILynx I2C",
149};
150
151
152
153/*
154 * PCL handling functions.
155 */
156
157static pcl_t alloc_pcl(struct ti_lynx *lynx)
158{
159 u8 m;
160 int i, j;
161
162 spin_lock(&lynx->lock);
163 /* FIXME - use ffz() to make this readable */
164 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
165 m = lynx->pcl_bmap[i];
166 for (j = 0; j < 8; j++) {
167 if (m & 1<<j) {
168 continue;
169 }
170 m |= 1<<j;
171 lynx->pcl_bmap[i] = m;
172 spin_unlock(&lynx->lock);
173 return 8 * i + j;
174 }
175 }
176 spin_unlock(&lynx->lock);
177
178 return -1;
179}
180
181
182#if 0
183static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
184{
185 int off, bit;
186
187 off = pclid / 8;
188 bit = pclid % 8;
189
190 if (pclid < 0) {
191 return;
192 }
193
194 spin_lock(&lynx->lock);
195 if (lynx->pcl_bmap[off] & 1<<bit) {
196 lynx->pcl_bmap[off] &= ~(1<<bit);
197 } else {
198 PRINT(KERN_ERR, lynx->id,
199 "attempted to free unallocated PCL %d", pclid);
200 }
201 spin_unlock(&lynx->lock);
202}
203
204/* functions useful for debugging */
205static void pretty_print_pcl(const struct ti_pcl *pcl)
206{
207 int i;
208
209 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
210 pcl->next, pcl->user_data, pcl->pcl_status,
211 pcl->remaining_transfer_count, pcl->next_data_buffer);
212
213 printk("PCL");
214 for (i=0; i<13; i++) {
215 printk(" c%x:%08x d%x:%08x",
216 i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
217 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
218 }
219 printk("\n");
220}
221
222static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
223{
224 struct ti_pcl pcl;
225
226 get_pcl(lynx, pclid, &pcl);
227 pretty_print_pcl(&pcl);
228}
229#endif
230
231
232
233/***********************************
234 * IEEE-1394 functionality section *
235 ***********************************/
236
237
238static int get_phy_reg(struct ti_lynx *lynx, int addr)
239{
240 int retval;
241 int i = 0;
242
243 unsigned long flags;
244
245 if (addr > 15) {
246 PRINT(KERN_ERR, lynx->id,
247 "%s: PHY register address %d out of range",
248 __FUNCTION__, addr);
249 return -1;
250 }
251
252 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
253
254 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
255 do {
256 retval = reg_read(lynx, LINK_PHY);
257
258 if (i > 10000) {
259 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
260 __FUNCTION__);
261 retval = -1;
262 break;
263 }
264 i++;
265 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
266
267 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
268 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
269
270 if (retval != -1) {
271 return retval & 0xff;
272 } else {
273 return -1;
274 }
275}
276
277static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
278{
279 unsigned long flags;
280
281 if (addr > 15) {
282 PRINT(KERN_ERR, lynx->id,
283 "%s: PHY register address %d out of range", __FUNCTION__, addr);
284 return -1;
285 }
286
287 if (val > 0xff) {
288 PRINT(KERN_ERR, lynx->id,
289 "%s: PHY register value %d out of range", __FUNCTION__, val);
290 return -1;
291 }
292
293 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
294
295 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
296 | LINK_PHY_WDATA(val));
297
298 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
299
300 return 0;
301}
302
303static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
304{
305 int reg;
306
307 if (page > 7) {
308 PRINT(KERN_ERR, lynx->id,
309 "%s: PHY page %d out of range", __FUNCTION__, page);
310 return -1;
311 }
312
313 reg = get_phy_reg(lynx, 7);
314 if (reg != -1) {
315 reg &= 0x1f;
316 reg |= (page << 5);
317 set_phy_reg(lynx, 7, reg);
318 return 0;
319 } else {
320 return -1;
321 }
322}
323
324#if 0 /* not needed at this time */
325static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
326{
327 int reg;
328
329 if (port > 15) {
330 PRINT(KERN_ERR, lynx->id,
331 "%s: PHY port %d out of range", __FUNCTION__, port);
332 return -1;
333 }
334
335 reg = get_phy_reg(lynx, 7);
336 if (reg != -1) {
337 reg &= 0xf0;
338 reg |= port;
339 set_phy_reg(lynx, 7, reg);
340 return 0;
341 } else {
342 return -1;
343 }
344}
345#endif
346
347static u32 get_phy_vendorid(struct ti_lynx *lynx)
348{
349 u32 pvid = 0;
350 sel_phy_reg_page(lynx, 1);
351 pvid |= (get_phy_reg(lynx, 10) << 16);
352 pvid |= (get_phy_reg(lynx, 11) << 8);
353 pvid |= get_phy_reg(lynx, 12);
354 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
355 return pvid;
356}
357
358static u32 get_phy_productid(struct ti_lynx *lynx)
359{
360 u32 id = 0;
361 sel_phy_reg_page(lynx, 1);
362 id |= (get_phy_reg(lynx, 13) << 16);
363 id |= (get_phy_reg(lynx, 14) << 8);
364 id |= get_phy_reg(lynx, 15);
365 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
366 return id;
367}
368
369static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
370 struct hpsb_host *host)
371{
372 quadlet_t lsid;
373 char phyreg[7];
374 int i;
375
376 phyreg[0] = lynx->phy_reg0;
377 for (i = 1; i < 7; i++) {
378 phyreg[i] = get_phy_reg(lynx, i);
379 }
380
381 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
382 more than 3 ports on the PHY anyway. */
383
384 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
385 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
386 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
387 if (!hpsb_disable_irm)
388 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
389 /* lsid |= 1 << 11; *//* set contender (hack) */
390 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
391
392 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
393 if (phyreg[3 + i] & 0x4) {
394 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
395 << (6 - i*2);
396 } else {
397 lsid |= 1 << (6 - i*2);
398 }
399 }
400
401 cpu_to_be32s(&lsid);
402 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
403 return lsid;
404}
405
406static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
407{
408 quadlet_t *q = lynx->rcv_page;
409 int phyid, isroot, size;
410 quadlet_t lsid = 0;
411 int i;
412
413 if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
414
415 size = lynx->selfid_size;
416 phyid = lynx->phy_reg0;
417
418 i = (size > 16 ? 16 : size) / 4 - 1;
419 while (i >= 0) {
420 cpu_to_be32s(&q[i]);
421 i--;
422 }
423
424 if (!lynx->phyic.reg_1394a) {
425 lsid = generate_own_selfid(lynx, host);
426 }
427
428 isroot = (phyid & 2) != 0;
429 phyid >>= 2;
430 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
431 phyid, (isroot ? "root" : "not root"));
432 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
433
434 if (!lynx->phyic.reg_1394a && !size) {
435 hpsb_selfid_received(host, lsid);
436 }
437
438 while (size > 0) {
439 struct selfid *sid = (struct selfid *)q;
440
441 if (!lynx->phyic.reg_1394a && !sid->extended
442 && (sid->phy_id == (phyid + 1))) {
443 hpsb_selfid_received(host, lsid);
444 }
445
446 if (q[0] == ~q[1]) {
447 PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
448 q[0]);
449 hpsb_selfid_received(host, q[0]);
450 } else {
451 PRINT(KERN_INFO, lynx->id,
452 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
453 }
454 q += 2;
455 size -= 8;
456 }
457
458 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
459 hpsb_selfid_received(host, lsid);
460 }
461
462 hpsb_selfid_complete(host, phyid, isroot);
463
464 if (host->in_bus_reset) return; /* in bus reset again */
465
466 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
467 reg_set_bits(lynx, LINK_CONTROL,
468 LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
469 | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
470}
471
472
473
474/* This must be called with the respective queue_lock held. */
475static void send_next(struct ti_lynx *lynx, int what)
476{
477 struct ti_pcl pcl;
478 struct lynx_send_data *d;
479 struct hpsb_packet *packet;
480
481 d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
482 if (!list_empty(&d->pcl_queue)) {
483 PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
484 BUG();
485 }
486
487 packet = driver_packet(d->queue.next);
488 list_move_tail(&packet->driver_list, &d->pcl_queue);
489
490 d->header_dma = pci_map_single(lynx->dev, packet->header,
491 packet->header_size, PCI_DMA_TODEVICE);
492 if (packet->data_size) {
493 d->data_dma = pci_map_single(lynx->dev, packet->data,
494 packet->data_size,
495 PCI_DMA_TODEVICE);
496 } else {
497 d->data_dma = 0;
498 }
499
500 pcl.next = PCL_NEXT_INVALID;
501 pcl.async_error_next = PCL_NEXT_INVALID;
502 pcl.pcl_status = 0;
503 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
504#ifndef __BIG_ENDIAN
505 pcl.buffer[0].control |= PCL_BIGENDIAN;
506#endif
507 pcl.buffer[0].pointer = d->header_dma;
508 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
509 pcl.buffer[1].pointer = d->data_dma;
510
511 switch (packet->type) {
512 case hpsb_async:
513 pcl.buffer[0].control |= PCL_CMD_XMT;
514 break;
515 case hpsb_iso:
516 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
517 break;
518 case hpsb_raw:
519 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
520 break;
521 }
522
523 put_pcl(lynx, d->pcl, &pcl);
524 run_pcl(lynx, d->pcl_start, d->channel);
525}
526
527
528/* called from subsystem core */
529static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
530{
531 struct ti_lynx *lynx = host->hostdata;
532 struct lynx_send_data *d;
533 unsigned long flags;
534
535 if (packet->data_size >= 4096) {
536 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
537 packet->data_size);
538 return -EOVERFLOW;
539 }
540
541 switch (packet->type) {
542 case hpsb_async:
543 case hpsb_raw:
544 d = &lynx->async;
545 break;
546 case hpsb_iso:
547 d = &lynx->iso_send;
548 break;
549 default:
550 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
551 packet->type);
552 return -EINVAL;
553 }
554
555 if (packet->tcode == TCODE_WRITEQ
556 || packet->tcode == TCODE_READQ_RESPONSE) {
557 cpu_to_be32s(&packet->header[3]);
558 }
559
560 spin_lock_irqsave(&d->queue_lock, flags);
561
562 list_add_tail(&packet->driver_list, &d->queue);
563 if (list_empty(&d->pcl_queue))
564 send_next(lynx, packet->type);
565
566 spin_unlock_irqrestore(&d->queue_lock, flags);
567
568 return 0;
569}
570
571
572/* called from subsystem core */
573static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
574{
575 struct ti_lynx *lynx = host->hostdata;
576 int retval = 0;
577 struct hpsb_packet *packet;
578 LIST_HEAD(packet_list);
579 unsigned long flags;
580 int phy_reg;
581
582 switch (cmd) {
583 case RESET_BUS:
584 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
585 retval = 0;
586 break;
587 }
588
589 switch (arg) {
590 case SHORT_RESET:
591 if (lynx->phyic.reg_1394a) {
592 phy_reg = get_phy_reg(lynx, 5);
593 if (phy_reg == -1) {
594 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
595 retval = -1;
596 break;
597 }
598 phy_reg |= 0x40;
599
600 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
601
602 lynx->selfid_size = -1;
603 lynx->phy_reg0 = -1;
604 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
605 break;
606 } else {
607 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
608 /* fall through to long bus reset */
609 }
610 case LONG_RESET:
611 phy_reg = get_phy_reg(lynx, 1);
612 if (phy_reg == -1) {
613 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
614 retval = -1;
615 break;
616 }
617 phy_reg |= 0x40;
618
619 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
620
621 lynx->selfid_size = -1;
622 lynx->phy_reg0 = -1;
623 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
624 break;
625 case SHORT_RESET_NO_FORCE_ROOT:
626 if (lynx->phyic.reg_1394a) {
627 phy_reg = get_phy_reg(lynx, 1);
628 if (phy_reg == -1) {
629 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
630 retval = -1;
631 break;
632 }
633 if (phy_reg & 0x80) {
634 phy_reg &= ~0x80;
635 set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
636 }
637
638 phy_reg = get_phy_reg(lynx, 5);
639 if (phy_reg == -1) {
640 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
641 retval = -1;
642 break;
643 }
644 phy_reg |= 0x40;
645
646 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
647
648 lynx->selfid_size = -1;
649 lynx->phy_reg0 = -1;
650 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
651 break;
652 } else {
653 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
654 /* fall through to long bus reset */
655 }
656 case LONG_RESET_NO_FORCE_ROOT:
657 phy_reg = get_phy_reg(lynx, 1);
658 if (phy_reg == -1) {
659 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
660 retval = -1;
661 break;
662 }
663 phy_reg &= ~0x80;
664 phy_reg |= 0x40;
665
666 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
667
668 lynx->selfid_size = -1;
669 lynx->phy_reg0 = -1;
670 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
671 break;
672 case SHORT_RESET_FORCE_ROOT:
673 if (lynx->phyic.reg_1394a) {
674 phy_reg = get_phy_reg(lynx, 1);
675 if (phy_reg == -1) {
676 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
677 retval = -1;
678 break;
679 }
680 if (!(phy_reg & 0x80)) {
681 phy_reg |= 0x80;
682 set_phy_reg(lynx, 1, phy_reg); /* set RHB */
683 }
684
685 phy_reg = get_phy_reg(lynx, 5);
686 if (phy_reg == -1) {
687 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
688 retval = -1;
689 break;
690 }
691 phy_reg |= 0x40;
692
693 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
694
695 lynx->selfid_size = -1;
696 lynx->phy_reg0 = -1;
697 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
698 break;
699 } else {
700 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
701 /* fall through to long bus reset */
702 }
703 case LONG_RESET_FORCE_ROOT:
704 phy_reg = get_phy_reg(lynx, 1);
705 if (phy_reg == -1) {
706 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
707 retval = -1;
708 break;
709 }
710 phy_reg |= 0xc0;
711
712 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
713
714 lynx->selfid_size = -1;
715 lynx->phy_reg0 = -1;
716 set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
717 break;
718 default:
719 PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
720 retval = -1;
721 }
722
723 break;
724
725 case GET_CYCLE_COUNTER:
726 retval = reg_read(lynx, CYCLE_TIMER);
727 break;
728
729 case SET_CYCLE_COUNTER:
730 reg_write(lynx, CYCLE_TIMER, arg);
731 break;
732
733 case SET_BUS_ID:
734 reg_write(lynx, LINK_ID,
735 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
736 break;
737
738 case ACT_CYCLE_MASTER:
739 if (arg) {
740 reg_set_bits(lynx, LINK_CONTROL,
741 LINK_CONTROL_CYCMASTER);
742 } else {
743 reg_clear_bits(lynx, LINK_CONTROL,
744 LINK_CONTROL_CYCMASTER);
745 }
746 break;
747
748 case CANCEL_REQUESTS:
749 spin_lock_irqsave(&lynx->async.queue_lock, flags);
750
751 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
752 list_splice(&lynx->async.queue, &packet_list);
753 INIT_LIST_HEAD(&lynx->async.queue);
754
755 if (list_empty(&lynx->async.pcl_queue)) {
756 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
757 PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
758 } else {
759 struct ti_pcl pcl;
760 u32 ack;
761 struct hpsb_packet *packet;
762
763 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
764
765 get_pcl(lynx, lynx->async.pcl, &pcl);
766
767 packet = driver_packet(lynx->async.pcl_queue.next);
768 list_del_init(&packet->driver_list);
769
770 pci_unmap_single(lynx->dev, lynx->async.header_dma,
771 packet->header_size, PCI_DMA_TODEVICE);
772 if (packet->data_size) {
773 pci_unmap_single(lynx->dev, lynx->async.data_dma,
774 packet->data_size, PCI_DMA_TODEVICE);
775 }
776
777 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
778
779 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
780 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
781 ack = (pcl.pcl_status >> 15) & 0xf;
782 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
783 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
784 } else {
785 ack = (pcl.pcl_status >> 15) & 0xf;
786 }
787 } else {
788 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
789 ack = ACKX_ABORTED;
790 }
791 hpsb_packet_sent(host, packet, ack);
792 }
793
794 while (!list_empty(&packet_list)) {
795 packet = driver_packet(packet_list.next);
796 list_del_init(&packet->driver_list);
797 hpsb_packet_sent(host, packet, ACKX_ABORTED);
798 }
799
800 break;
801
802 case ISO_LISTEN_CHANNEL:
803 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
804
805 if (lynx->iso_rcv.chan_count++ == 0) {
806 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
807 DMA_WORD1_CMP_ENABLE_MASTER);
808 }
809
810 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
811 break;
812
813 case ISO_UNLISTEN_CHANNEL:
814 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
815
816 if (--lynx->iso_rcv.chan_count == 0) {
817 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
818 0);
819 }
820
821 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
822 break;
823
824 default:
825 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
826 retval = -1;
827 }
828
829 return retval;
830}
831
832
833/***************************************
834 * IEEE-1394 functionality section END *
835 ***************************************/
836
837#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
838/* VFS functions for local bus / aux device access. Access to those
839 * is implemented as a character device instead of block devices
840 * because buffers are not wanted for this. Therefore llseek (from
841 * VFS) can be used for these char devices with obvious effects.
842 */
843static int mem_open(struct inode*, struct file*);
844static int mem_release(struct inode*, struct file*);
845static unsigned int aux_poll(struct file*, struct poll_table_struct*);
846static loff_t mem_llseek(struct file*, loff_t, int);
847static ssize_t mem_read (struct file*, char*, size_t, loff_t*);
848static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
849
850
851static struct file_operations aux_ops = {
852 .owner = THIS_MODULE,
853 .read = mem_read,
854 .write = mem_write,
855 .poll = aux_poll,
856 .llseek = mem_llseek,
857 .open = mem_open,
858 .release = mem_release,
859};
860
861
862static void aux_setup_pcls(struct ti_lynx *lynx)
863{
864 struct ti_pcl pcl;
865
866 pcl.next = PCL_NEXT_INVALID;
867 pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl);
868 put_pcl(lynx, lynx->dmem_pcl, &pcl);
869}
870
871static int mem_open(struct inode *inode, struct file *file)
872{
873 int cid = iminor(inode);
874 enum { t_rom, t_aux, t_ram } type;
875 struct memdata *md;
876
877 if (cid < PCILYNX_MINOR_AUX_START) {
878 /* just for completeness */
879 return -ENXIO;
880 } else if (cid < PCILYNX_MINOR_ROM_START) {
881 cid -= PCILYNX_MINOR_AUX_START;
882 if (cid >= num_of_cards || !cards[cid].aux_port)
883 return -ENXIO;
884 type = t_aux;
885 } else if (cid < PCILYNX_MINOR_RAM_START) {
886 cid -= PCILYNX_MINOR_ROM_START;
887 if (cid >= num_of_cards || !cards[cid].local_rom)
888 return -ENXIO;
889 type = t_rom;
890 } else {
891 /* WARNING: Know what you are doing when opening RAM.
892 * It is currently used inside the driver! */
893 cid -= PCILYNX_MINOR_RAM_START;
894 if (cid >= num_of_cards || !cards[cid].local_ram)
895 return -ENXIO;
896 type = t_ram;
897 }
898
899 md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
900 if (md == NULL)
901 return -ENOMEM;
902
903 md->lynx = &cards[cid];
904 md->cid = cid;
905
906 switch (type) {
907 case t_rom:
908 md->type = rom;
909 break;
910 case t_ram:
911 md->type = ram;
912 break;
913 case t_aux:
914 atomic_set(&md->aux_intr_last_seen,
915 atomic_read(&cards[cid].aux_intr_seen));
916 md->type = aux;
917 break;
918 }
919
920 file->private_data = md;
921
922 return 0;
923}
924
925static int mem_release(struct inode *inode, struct file *file)
926{
927 kfree(file->private_data);
928 return 0;
929}
930
931static unsigned int aux_poll(struct file *file, poll_table *pt)
932{
933 struct memdata *md = (struct memdata *)file->private_data;
934 int cid = md->cid;
935 unsigned int mask;
936
937 /* reading and writing is always allowed */
938 mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
939
940 if (md->type == aux) {
941 poll_wait(file, &cards[cid].aux_intr_wait, pt);
942
943 if (atomic_read(&md->aux_intr_last_seen)
944 != atomic_read(&cards[cid].aux_intr_seen)) {
945 mask |= POLLPRI;
946 atomic_inc(&md->aux_intr_last_seen);
947 }
948 }
949
950 return mask;
951}
952
953loff_t mem_llseek(struct file *file, loff_t offs, int orig)
954{
955 loff_t newoffs;
956
957 switch (orig) {
958 case 0:
959 newoffs = offs;
960 break;
961 case 1:
962 newoffs = offs + file->f_pos;
963 break;
964 case 2:
965 newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
966 break;
967 default:
968 return -EINVAL;
969 }
970
971 if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) return -EINVAL;
972
973 file->f_pos = newoffs;
974 return newoffs;
975}
976
977/*
978 * do not DMA if count is too small because this will have a serious impact
979 * on performance - the value 2400 was found by experiment and may not work
980 * everywhere as good as here - use mem_mindma option for modules to change
981 */
982static short mem_mindma = 2400;
983module_param(mem_mindma, short, 0444);
984MODULE_PARM_DESC(mem_mindma, "Minimum amount of data required to use DMA");
985
986static ssize_t mem_dmaread(struct memdata *md, u32 physbuf, ssize_t count,
987 int offset)
988{
989 pcltmp_t pcltmp;
990 struct ti_pcl *pcl;
991 size_t retval;
992 int i;
993 DECLARE_WAITQUEUE(wait, current);
994
995 count &= ~3;
996 count = min(count, 53196);
997 retval = count;
998
999 if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
1000 & DMA_CHAN_CTRL_BUSY) {
1001 PRINT(KERN_WARNING, md->lynx->id, "DMA ALREADY ACTIVE!");
1002 }
1003
1004 reg_write(md->lynx, LBUS_ADDR, md->type | offset);
1005
1006 pcl = edit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
1007 pcl->buffer[0].control = PCL_CMD_LBUS_TO_PCI | min(count, 4092);
1008 pcl->buffer[0].pointer = physbuf;
1009 count -= 4092;
1010
1011 i = 0;
1012 while (count > 0) {
1013 i++;
1014 pcl->buffer[i].control = min(count, 4092);
1015 pcl->buffer[i].pointer = physbuf + i * 4092;
1016 count -= 4092;
1017 }
1018 pcl->buffer[i].control |= PCL_LAST_BUFF;
1019 commit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
1020
1021 set_current_state(TASK_INTERRUPTIBLE);
1022 add_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
1023 run_sub_pcl(md->lynx, md->lynx->dmem_pcl, 2, CHANNEL_LOCALBUS);
1024
1025 schedule();
1026 while (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
1027 & DMA_CHAN_CTRL_BUSY) {
1028 if (signal_pending(current)) {
1029 retval = -EINTR;
1030 break;
1031 }
1032 schedule();
1033 }
1034
1035 reg_write(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS), 0);
1036 remove_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
1037
1038 if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
1039 & DMA_CHAN_CTRL_BUSY) {
1040 PRINT(KERN_ERR, md->lynx->id, "DMA STILL ACTIVE!");
1041 }
1042
1043 return retval;
1044}
1045
1046static ssize_t mem_read(struct file *file, char *buffer, size_t count,
1047 loff_t *offset)
1048{
1049 struct memdata *md = (struct memdata *)file->private_data;
1050 ssize_t bcount;
1051 size_t alignfix;
1052 loff_t off = *offset; /* avoid useless 64bit-arithmetic */
1053 ssize_t retval;
1054 void *membase;
1055
1056 if ((off + count) > PCILYNX_MAX_MEMORY+1) {
1057 count = PCILYNX_MAX_MEMORY+1 - off;
1058 }
1059 if (count == 0 || off > PCILYNX_MAX_MEMORY) {
1060 return -ENOSPC;
1061 }
1062
1063 switch (md->type) {
1064 case rom:
1065 membase = md->lynx->local_rom;
1066 break;
1067 case ram:
1068 membase = md->lynx->local_ram;
1069 break;
1070 case aux:
1071 membase = md->lynx->aux_port;
1072 break;
1073 default:
1074 panic("pcilynx%d: unsupported md->type %d in %s",
1075 md->lynx->id, md->type, __FUNCTION__);
1076 }
1077
1078 down(&md->lynx->mem_dma_mutex);
1079
1080 if (count < mem_mindma) {
1081 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off, count);
1082 goto out;
1083 }
1084
1085 bcount = count;
1086 alignfix = 4 - (off % 4);
1087 if (alignfix != 4) {
1088 if (bcount < alignfix) {
1089 alignfix = bcount;
1090 }
1091 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off,
1092 alignfix);
1093 if (bcount == alignfix) {
1094 goto out;
1095 }
1096 bcount -= alignfix;
1097 off += alignfix;
1098 }
1099
1100 while (bcount >= 4) {
1101 retval = mem_dmaread(md, md->lynx->mem_dma_buffer_dma
1102 + count - bcount, bcount, off);
1103 if (retval < 0) return retval;
1104
1105 bcount -= retval;
1106 off += retval;
1107 }
1108
1109 if (bcount) {
1110 memcpy_fromio(md->lynx->mem_dma_buffer + count - bcount,
1111 membase+off, bcount);
1112 }
1113
1114 out:
1115 retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
1116 up(&md->lynx->mem_dma_mutex);
1117
1118 if (retval) return -EFAULT;
1119 *offset += count;
1120 return count;
1121}
1122
1123
1124static ssize_t mem_write(struct file *file, const char *buffer, size_t count,
1125 loff_t *offset)
1126{
1127 struct memdata *md = (struct memdata *)file->private_data;
1128
1129 if (((*offset) + count) > PCILYNX_MAX_MEMORY+1) {
1130 count = PCILYNX_MAX_MEMORY+1 - *offset;
1131 }
1132 if (count == 0 || *offset > PCILYNX_MAX_MEMORY) {
1133 return -ENOSPC;
1134 }
1135
1136 /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
1137 switch (md->type) {
1138 case aux:
1139 if (copy_from_user(md->lynx->aux_port+(*offset), buffer, count))
1140 return -EFAULT;
1141 break;
1142 case ram:
1143 if (copy_from_user(md->lynx->local_ram+(*offset), buffer, count))
1144 return -EFAULT;
1145 break;
1146 case rom:
1147 /* the ROM may be writeable */
1148 if (copy_from_user(md->lynx->local_rom+(*offset), buffer, count))
1149 return -EFAULT;
1150 break;
1151 }
1152
1153 file->f_pos += count;
1154 return count;
1155}
1156#endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
1157
1158
1159/********************************************************
1160 * Global stuff (interrupt handler, init/shutdown code) *
1161 ********************************************************/
1162
1163
1164static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
1165 struct pt_regs *regs_are_unused)
1166{
1167 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
1168 struct hpsb_host *host = lynx->host;
1169 u32 intmask;
1170 u32 linkint;
1171
1172 linkint = reg_read(lynx, LINK_INT_STATUS);
1173 intmask = reg_read(lynx, PCI_INT_STATUS);
1174
1175 if (!(intmask & PCI_INT_INT_PEND))
1176 return IRQ_NONE;
1177
1178 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
1179 linkint);
1180
1181 reg_write(lynx, LINK_INT_STATUS, linkint);
1182 reg_write(lynx, PCI_INT_STATUS, intmask);
1183
1184#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1185 if (intmask & PCI_INT_AUX_INT) {
1186 atomic_inc(&lynx->aux_intr_seen);
1187 wake_up_interruptible(&lynx->aux_intr_wait);
1188 }
1189
1190 if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) {
1191 wake_up_interruptible(&lynx->mem_dma_intr_wait);
1192 }
1193#endif
1194
1195
1196 if (intmask & PCI_INT_1394) {
1197 if (linkint & LINK_INT_PHY_TIMEOUT) {
1198 PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
1199 }
1200 if (linkint & LINK_INT_PHY_BUSRESET) {
1201 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
1202 lynx->selfid_size = -1;
1203 lynx->phy_reg0 = -1;
1204 if (!host->in_bus_reset)
1205 hpsb_bus_reset(host);
1206 }
1207 if (linkint & LINK_INT_PHY_REG_RCVD) {
1208 u32 reg;
1209
1210 spin_lock(&lynx->phy_reg_lock);
1211 reg = reg_read(lynx, LINK_PHY);
1212 spin_unlock(&lynx->phy_reg_lock);
1213
1214 if (!host->in_bus_reset) {
1215 PRINT(KERN_INFO, lynx->id,
1216 "phy reg received without reset");
1217 } else if (reg & 0xf00) {
1218 PRINT(KERN_INFO, lynx->id,
1219 "unsolicited phy reg %d received",
1220 (reg >> 8) & 0xf);
1221 } else {
1222 lynx->phy_reg0 = reg & 0xff;
1223 handle_selfid(lynx, host);
1224 }
1225 }
1226 if (linkint & LINK_INT_ISO_STUCK) {
1227 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
1228 }
1229 if (linkint & LINK_INT_ASYNC_STUCK) {
1230 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
1231 }
1232 if (linkint & LINK_INT_SENT_REJECT) {
1233 PRINT(KERN_INFO, lynx->id, "sent reject");
1234 }
1235 if (linkint & LINK_INT_TX_INVALID_TC) {
1236 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
1237 }
1238 if (linkint & LINK_INT_GRF_OVERFLOW) {
1239 /* flush FIFO if overflow happens during reset */
1240 if (host->in_bus_reset)
1241 reg_write(lynx, FIFO_CONTROL,
1242 FIFO_CONTROL_GRF_FLUSH);
1243 PRINT(KERN_INFO, lynx->id, "GRF overflow");
1244 }
1245 if (linkint & LINK_INT_ITF_UNDERFLOW) {
1246 PRINT(KERN_INFO, lynx->id, "ITF underflow");
1247 }
1248 if (linkint & LINK_INT_ATF_UNDERFLOW) {
1249 PRINT(KERN_INFO, lynx->id, "ATF underflow");
1250 }
1251 }
1252
1253 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
1254 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
1255
1256 spin_lock(&lynx->iso_rcv.lock);
1257
1258 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
1259 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
1260
1261 lynx->iso_rcv.used++;
1262 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
1263
1264 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
1265 || !lynx->iso_rcv.chan_count) {
1266 PRINTD(KERN_DEBUG, lynx->id, "stopped");
1267 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1268 }
1269
1270 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
1271 CHANNEL_ISO_RCV);
1272
1273 spin_unlock(&lynx->iso_rcv.lock);
1274
1275 tasklet_schedule(&lynx->iso_rcv.tq);
1276 }
1277
1278 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
1279 PRINTD(KERN_DEBUG, lynx->id, "async sent");
1280 spin_lock(&lynx->async.queue_lock);
1281
1282 if (list_empty(&lynx->async.pcl_queue)) {
1283 spin_unlock(&lynx->async.queue_lock);
1284 PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
1285 } else {
1286 struct ti_pcl pcl;
1287 u32 ack;
1288 struct hpsb_packet *packet;
1289
1290 get_pcl(lynx, lynx->async.pcl, &pcl);
1291
1292 packet = driver_packet(lynx->async.pcl_queue.next);
1293 list_del_init(&packet->driver_list);
1294
1295 pci_unmap_single(lynx->dev, lynx->async.header_dma,
1296 packet->header_size, PCI_DMA_TODEVICE);
1297 if (packet->data_size) {
1298 pci_unmap_single(lynx->dev, lynx->async.data_dma,
1299 packet->data_size, PCI_DMA_TODEVICE);
1300 }
1301
1302 if (!list_empty(&lynx->async.queue)) {
1303 send_next(lynx, hpsb_async);
1304 }
1305
1306 spin_unlock(&lynx->async.queue_lock);
1307
1308 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1309 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1310 ack = (pcl.pcl_status >> 15) & 0xf;
1311 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1312 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1313 } else {
1314 ack = (pcl.pcl_status >> 15) & 0xf;
1315 }
1316 } else {
1317 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
1318 ack = ACKX_SEND_ERROR;
1319 }
1320 hpsb_packet_sent(host, packet, ack);
1321 }
1322 }
1323
1324 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
1325 PRINTD(KERN_DEBUG, lynx->id, "iso sent");
1326 spin_lock(&lynx->iso_send.queue_lock);
1327
1328 if (list_empty(&lynx->iso_send.pcl_queue)) {
1329 spin_unlock(&lynx->iso_send.queue_lock);
1330 PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
1331 } else {
1332 struct ti_pcl pcl;
1333 u32 ack;
1334 struct hpsb_packet *packet;
1335
1336 get_pcl(lynx, lynx->iso_send.pcl, &pcl);
1337
1338 packet = driver_packet(lynx->iso_send.pcl_queue.next);
1339 list_del_init(&packet->driver_list);
1340
1341 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
1342 packet->header_size, PCI_DMA_TODEVICE);
1343 if (packet->data_size) {
1344 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
1345 packet->data_size, PCI_DMA_TODEVICE);
1346 }
1347
1348 if (!list_empty(&lynx->iso_send.queue)) {
1349 send_next(lynx, hpsb_iso);
1350 }
1351
1352 spin_unlock(&lynx->iso_send.queue_lock);
1353
1354 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1355 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1356 ack = (pcl.pcl_status >> 15) & 0xf;
1357 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1358 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1359 } else {
1360 ack = (pcl.pcl_status >> 15) & 0xf;
1361 }
1362 } else {
1363 PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
1364 ack = ACKX_SEND_ERROR;
1365 }
1366
1367 hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
1368 }
1369 }
1370
1371 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1372 /* general receive DMA completed */
1373 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1374
1375 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1376 stat & 0x1fff);
1377
1378 if (stat & DMA_CHAN_STAT_SELFID) {
1379 lynx->selfid_size = stat & 0x1fff;
1380 handle_selfid(lynx, host);
1381 } else {
1382 quadlet_t *q_data = lynx->rcv_page;
1383 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1384 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1385 cpu_to_be32s(q_data + 3);
1386 }
1387 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1388 }
1389
1390 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1391 }
1392
1393 return IRQ_HANDLED;
1394}
1395
1396
1397static void iso_rcv_bh(struct ti_lynx *lynx)
1398{
1399 unsigned int idx;
1400 quadlet_t *data;
1401 unsigned long flags;
1402
1403 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1404
1405 while (lynx->iso_rcv.used) {
1406 idx = lynx->iso_rcv.last;
1407 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1408
1409 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1410 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1411
1412 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1413 PRINT(KERN_ERR, lynx->id,
1414 "iso length mismatch 0x%08x/0x%08x", *data,
1415 lynx->iso_rcv.stat[idx]);
1416 }
1417
1418 if (lynx->iso_rcv.stat[idx]
1419 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1420 PRINT(KERN_INFO, lynx->id,
1421 "iso receive error on %d to 0x%p", idx, data);
1422 } else {
1423 hpsb_packet_received(lynx->host, data,
1424 lynx->iso_rcv.stat[idx] & 0x1fff,
1425 0);
1426 }
1427
1428 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1429 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1430 lynx->iso_rcv.used--;
1431 }
1432
1433 if (lynx->iso_rcv.chan_count) {
1434 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1435 DMA_WORD1_CMP_ENABLE_MASTER);
1436 }
1437 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1438}
1439
1440
1441static void remove_card(struct pci_dev *dev)
1442{
1443 struct ti_lynx *lynx;
1444 struct device *lynx_dev;
1445 int i;
1446
1447 lynx = pci_get_drvdata(dev);
1448 if (!lynx) return;
1449 pci_set_drvdata(dev, NULL);
1450
1451 lynx_dev = get_device(&lynx->host->device);
1452
1453 switch (lynx->state) {
1454 case is_host:
1455 reg_write(lynx, PCI_INT_ENABLE, 0);
1456 hpsb_remove_host(lynx->host);
1457 case have_intr:
1458 reg_write(lynx, PCI_INT_ENABLE, 0);
1459 free_irq(lynx->dev->irq, lynx);
1460
1461 /* Disable IRM Contender and LCtrl */
1462 if (lynx->phyic.reg_1394a)
1463 set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
1464
1465 /* Let all other nodes know to ignore us */
1466 lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
1467
1468 case have_iomappings:
1469 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1470 /* Fix buggy cards with autoboot pin not tied low: */
1471 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1472 iounmap(lynx->registers);
1473 iounmap(lynx->local_rom);
1474 iounmap(lynx->local_ram);
1475 iounmap(lynx->aux_port);
1476 case have_1394_buffers:
1477 for (i = 0; i < ISORCV_PAGES; i++) {
1478 if (lynx->iso_rcv.page[i]) {
1479 pci_free_consistent(lynx->dev, PAGE_SIZE,
1480 lynx->iso_rcv.page[i],
1481 lynx->iso_rcv.page_dma[i]);
1482 }
1483 }
1484 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1485 lynx->rcv_page_dma);
1486 case have_aux_buf:
1487#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1488 pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer,
1489 lynx->mem_dma_buffer_dma);
1490#endif
1491 case have_pcl_mem:
1492#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1493 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1494 lynx->pcl_mem_dma);
1495#endif
1496 case clear:
1497 /* do nothing - already freed */
1498 ;
1499 }
1500
1501 tasklet_kill(&lynx->iso_rcv.tq);
1502
1503 if (lynx_dev)
1504 put_device(lynx_dev);
1505}
1506
1507
1508static int __devinit add_card(struct pci_dev *dev,
1509 const struct pci_device_id *devid_is_unused)
1510{
1511#define FAIL(fmt, args...) do { \
1512 PRINT_G(KERN_ERR, fmt , ## args); \
1513 remove_card(dev); \
1514 return error; \
1515 } while (0)
1516
1517 char irq_buf[16];
1518 struct hpsb_host *host;
1519 struct ti_lynx *lynx; /* shortcut to currently handled device */
1520 struct ti_pcl pcl;
1521 u32 *pcli;
1522 int i;
1523 int error;
1524
1525 error = -ENXIO;
1526
1527 if (pci_set_dma_mask(dev, 0xffffffff))
1528 FAIL("DMA address limits not supported for PCILynx hardware");
1529 if (pci_enable_device(dev))
1530 FAIL("failed to enable PCILynx hardware");
1531 pci_set_master(dev);
1532
1533 error = -ENOMEM;
1534
1535 host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
1536 if (!host) FAIL("failed to allocate control structure memory");
1537
1538 lynx = host->hostdata;
1539 lynx->id = card_id++;
1540 lynx->dev = dev;
1541 lynx->state = clear;
1542 lynx->host = host;
1543 host->pdev = dev;
1544 pci_set_drvdata(dev, lynx);
1545
1546 spin_lock_init(&lynx->lock);
1547 spin_lock_init(&lynx->phy_reg_lock);
1548
1549#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1550 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1551 &lynx->pcl_mem_dma);
1552
1553 if (lynx->pcl_mem != NULL) {
1554 lynx->state = have_pcl_mem;
1555 PRINT(KERN_INFO, lynx->id,
1556 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1557 lynx->pcl_mem);
1558 } else {
1559 FAIL("failed to allocate PCL memory area");
1560 }
1561#endif
1562
1563#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1564 lynx->mem_dma_buffer = pci_alloc_consistent(dev, 65536,
1565 &lynx->mem_dma_buffer_dma);
1566 if (lynx->mem_dma_buffer == NULL) {
1567 FAIL("failed to allocate DMA buffer for aux");
1568 }
1569 lynx->state = have_aux_buf;
1570#endif
1571
1572 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1573 &lynx->rcv_page_dma);
1574 if (lynx->rcv_page == NULL) {
1575 FAIL("failed to allocate receive buffer");
1576 }
1577 lynx->state = have_1394_buffers;
1578
1579 for (i = 0; i < ISORCV_PAGES; i++) {
1580 lynx->iso_rcv.page[i] =
1581 pci_alloc_consistent(dev, PAGE_SIZE,
1582 &lynx->iso_rcv.page_dma[i]);
1583 if (lynx->iso_rcv.page[i] == NULL) {
1584 FAIL("failed to allocate iso receive buffers");
1585 }
1586 }
1587
1588 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1589 PCILYNX_MAX_REGISTER);
1590 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1591 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1592 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1593 PCILYNX_MAX_MEMORY);
1594 lynx->state = have_iomappings;
1595
1596 if (lynx->registers == NULL) {
1597 FAIL("failed to remap registers - card not accessible");
1598 }
1599
1600#ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1601 if (lynx->local_ram == NULL) {
1602 FAIL("failed to remap local RAM which is required for "
1603 "operation");
1604 }
1605#endif
1606
1607 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1608 /* Fix buggy cards with autoboot pin not tied low: */
1609 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1610
1611#ifndef __sparc__
1612 sprintf (irq_buf, "%d", dev->irq);
1613#else
1614 sprintf (irq_buf, "%s", __irq_itoa(dev->irq));
1615#endif
1616
1617 if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
1618 PCILYNX_DRIVER_NAME, lynx)) {
1619 PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
1620 lynx->state = have_intr;
1621 } else {
1622 FAIL("failed to allocate shared interrupt %s", irq_buf);
1623 }
1624
1625 /* alloc_pcl return values are not checked, it is expected that the
1626 * provided PCL space is sufficient for the initial allocations */
1627#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1628 if (lynx->aux_port != NULL) {
1629 lynx->dmem_pcl = alloc_pcl(lynx);
1630 aux_setup_pcls(lynx);
1631 sema_init(&lynx->mem_dma_mutex, 1);
1632 }
1633#endif
1634 lynx->rcv_pcl = alloc_pcl(lynx);
1635 lynx->rcv_pcl_start = alloc_pcl(lynx);
1636 lynx->async.pcl = alloc_pcl(lynx);
1637 lynx->async.pcl_start = alloc_pcl(lynx);
1638 lynx->iso_send.pcl = alloc_pcl(lynx);
1639 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1640
1641 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1642 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1643 }
1644 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1645
1646 /* all allocations successful - simple init stuff follows */
1647
1648 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1649
1650#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1651 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_AUX_INT);
1652 init_waitqueue_head(&lynx->mem_dma_intr_wait);
1653 init_waitqueue_head(&lynx->aux_intr_wait);
1654#endif
1655
1656 tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1657 (unsigned long)lynx);
1658
1659 spin_lock_init(&lynx->iso_rcv.lock);
1660
1661 spin_lock_init(&lynx->async.queue_lock);
1662 lynx->async.channel = CHANNEL_ASYNC_SEND;
1663 spin_lock_init(&lynx->iso_send.queue_lock);
1664 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1665
1666 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1667 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1668 lynx->local_ram, lynx->aux_port);
1669
1670 /* now, looking for PHY register set */
1671 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1672 lynx->phyic.reg_1394a = 1;
1673 PRINT(KERN_INFO, lynx->id,
1674 "found 1394a conform PHY (using extended register set)");
1675 lynx->phyic.vendor = get_phy_vendorid(lynx);
1676 lynx->phyic.product = get_phy_productid(lynx);
1677 } else {
1678 lynx->phyic.reg_1394a = 0;
1679 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1680 }
1681
1682 lynx->selfid_size = -1;
1683 lynx->phy_reg0 = -1;
1684
1685 INIT_LIST_HEAD(&lynx->async.queue);
1686 INIT_LIST_HEAD(&lynx->async.pcl_queue);
1687 INIT_LIST_HEAD(&lynx->iso_send.queue);
1688 INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
1689
1690 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1691 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1692
1693 pcl.next = PCL_NEXT_INVALID;
1694 pcl.async_error_next = PCL_NEXT_INVALID;
1695
1696 pcl.buffer[0].control = PCL_CMD_RCV | 16;
1697#ifndef __BIG_ENDIAN
1698 pcl.buffer[0].control |= PCL_BIGENDIAN;
1699#endif
1700 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1701
1702 pcl.buffer[0].pointer = lynx->rcv_page_dma;
1703 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1704 put_pcl(lynx, lynx->rcv_pcl, &pcl);
1705
1706 pcl.next = pcl_bus(lynx, lynx->async.pcl);
1707 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1708 put_pcl(lynx, lynx->async.pcl_start, &pcl);
1709
1710 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1711 pcl.async_error_next = PCL_NEXT_INVALID;
1712 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1713
1714 pcl.next = PCL_NEXT_INVALID;
1715 pcl.async_error_next = PCL_NEXT_INVALID;
1716 pcl.buffer[0].control = PCL_CMD_RCV | 4;
1717#ifndef __BIG_ENDIAN
1718 pcl.buffer[0].control |= PCL_BIGENDIAN;
1719#endif
1720 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1721
1722 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1723 int page = i / ISORCV_PER_PAGE;
1724 int sec = i % ISORCV_PER_PAGE;
1725
1726 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
1727 + sec * MAX_ISORCV_SIZE;
1728 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1729 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1730 }
1731
1732 pcli = (u32 *)&pcl;
1733 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1734 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1735 }
1736 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1737
1738 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1739 reg_write(lynx, FIFO_SIZES, 0x003030a0);
1740 /* 20 byte threshold before triggering PCI transfer */
1741 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1742 /* threshold on both send FIFOs before transmitting:
1743 FIFO size - cache line size - 1 */
1744 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1745 i = 0x30 - i - 1;
1746 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1747
1748 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1749
1750 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1751 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
1752 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
1753 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
1754 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
1755 | LINK_INT_ATF_UNDERFLOW);
1756
1757 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1758 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1759 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1760 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1761 DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1762 | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
1763 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1764
1765 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1766
1767 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1768 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1769 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1770 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1771
1772 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1773
1774 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1775 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
1776 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1777 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
1778
1779 if (!lynx->phyic.reg_1394a) {
1780 if (!hpsb_disable_irm) {
1781 /* attempt to enable contender bit -FIXME- would this
1782 * work elsewhere? */
1783 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1784 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
1785 }
1786 } else {
1787 /* set the contender (if appropriate) and LCtrl bit in the
1788 * extended PHY register set. (Should check that PHY_02_EXTENDED
1789 * is set in register 2?)
1790 */
1791 i = get_phy_reg(lynx, 4);
1792 i |= PHY_04_LCTRL;
1793 if (hpsb_disable_irm)
1794 i &= !PHY_04_CONTENDER;
1795 else
1796 i |= PHY_04_CONTENDER;
1797 if (i != -1) set_phy_reg(lynx, 4, i);
1798 }
1799
1800 if (!skip_eeprom)
1801 {
1802 /* needed for i2c communication with serial eeprom */
1803 struct i2c_adapter *i2c_ad;
1804 struct i2c_algo_bit_data i2c_adapter_data;
1805
1806 error = -ENOMEM;
1807 i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL);
1808 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1809
1810 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
1811 i2c_adapter_data = bit_data;
1812 i2c_ad->algo_data = &i2c_adapter_data;
1813 i2c_adapter_data.data = lynx;
1814
1815 PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
1816 reg_read(lynx, SERIAL_EEPROM_CONTROL));
1817
1818 /* reset hardware to sane state */
1819 lynx->i2c_driven_state = 0x00000070;
1820 reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
1821
1822 if (i2c_bit_add_bus(i2c_ad) < 0)
1823 {
1824 kfree(i2c_ad);
1825 error = -ENXIO;
1826 FAIL("unable to register i2c");
1827 }
1828 else
1829 {
1830 /* do i2c stuff */
1831 unsigned char i2c_cmd = 0x10;
1832 struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
1833 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1834 };
1835
1836
1837#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
1838 union i2c_smbus_data data;
1839
1840 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
1841 PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
1842 else
1843 {
1844 u16 addr;
1845 for (addr=0x00; addr < 0x100; addr++) {
1846 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
1847 PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
1848 break;
1849 }
1850 else
1851 PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
1852 }
1853 }
1854#endif
1855
1856 /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
1857 do it more efficiently in one transaction rather then using several reads */
1858 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1859 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1860 } else {
1861 int i;
1862
1863 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1864 /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
1865 * generation(1394a) and link_spd(1394a) field and recalculate
1866 * the CRC */
1867
1868 for (i = 0; i < 5 ; i++)
1869 PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
1870 i, be32_to_cpu(lynx->bus_info_block[i]));
1871
1872 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
1873 if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
1874 (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
1875 {
1876 PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
1877 } else {
1878 kfree(i2c_ad);
1879 error = -ENXIO;
1880 FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
1881 }
1882
1883 }
1884
1885 i2c_bit_del_bus(i2c_ad);
1886 kfree(i2c_ad);
1887 }
1888 }
1889
1890 host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
1891 host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
1892 host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
1893 host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
1894 if (!lynx->phyic.reg_1394a)
1895 host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
1896 else
1897 host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
1898
1899 if (hpsb_add_host(host)) {
1900 error = -ENOMEM;
1901 FAIL("Failed to register host with highlevel");
1902 }
1903
1904 lynx->state = is_host;
1905
1906 return 0;
1907#undef FAIL
1908}
1909
1910
1911static struct pci_device_id pci_table[] = {
1912 {
1913 .vendor = PCI_VENDOR_ID_TI,
1914 .device = PCI_DEVICE_ID_TI_PCILYNX,
1915 .subvendor = PCI_ANY_ID,
1916 .subdevice = PCI_ANY_ID,
1917 },
1918 { } /* Terminating entry */
1919};
1920
1921static struct pci_driver lynx_pci_driver = {
1922 .name = PCILYNX_DRIVER_NAME,
1923 .id_table = pci_table,
1924 .probe = add_card,
1925 .remove = remove_card,
1926};
1927
1928static struct hpsb_host_driver lynx_driver = {
1929 .owner = THIS_MODULE,
1930 .name = PCILYNX_DRIVER_NAME,
1931 .set_hw_config_rom = NULL,
1932 .transmit_packet = lynx_transmit,
1933 .devctl = lynx_devctl,
1934 .isoctl = NULL,
1935};
1936
1937MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1938MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1939MODULE_LICENSE("GPL");
1940MODULE_SUPPORTED_DEVICE("pcilynx");
1941MODULE_DEVICE_TABLE(pci, pci_table);
1942
1943static int __init pcilynx_init(void)
1944{
1945 int ret;
1946
1947#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1948 if (register_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME, &aux_ops)) {
1949 PRINT_G(KERN_ERR, "allocation of char major number %d failed",
1950 PCILYNX_MAJOR);
1951 return -EBUSY;
1952 }
1953#endif
1954
1955 ret = pci_register_driver(&lynx_pci_driver);
1956 if (ret < 0) {
1957 PRINT_G(KERN_ERR, "PCI module init failed");
1958 goto free_char_dev;
1959 }
1960
1961 return 0;
1962
1963 free_char_dev:
1964#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1965 unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
1966#endif
1967
1968 return ret;
1969}
1970
1971static void __exit pcilynx_cleanup(void)
1972{
1973 pci_unregister_driver(&lynx_pci_driver);
1974
1975#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1976 unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
1977#endif
1978}
1979
1980
1981module_init(pcilynx_init);
1982module_exit(pcilynx_cleanup);
diff --git a/drivers/ieee1394/pcilynx.h b/drivers/ieee1394/pcilynx.h
new file mode 100644
index 000000000000..644ec55d3d46
--- /dev/null
+++ b/drivers/ieee1394/pcilynx.h
@@ -0,0 +1,516 @@
1#ifndef __PCILYNX_H__
2#define __PCILYNX_H__
3
4#include <linux/config.h>
5
6#define PCILYNX_DRIVER_NAME "pcilynx"
7#define PCILYNX_MAJOR 177
8
9#define PCILYNX_MINOR_AUX_START 0
10#define PCILYNX_MINOR_ROM_START 16
11#define PCILYNX_MINOR_RAM_START 32
12
13#define PCILYNX_MAX_REGISTER 0xfff
14#define PCILYNX_MAX_MEMORY 0xffff
15
16#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
17#define MAX_PCILYNX_CARDS 4
18#define LOCALRAM_SIZE 4096
19
20#define NUM_ISORCV_PCL 4
21#define MAX_ISORCV_SIZE 2048
22#define ISORCV_PER_PAGE (PAGE_SIZE / MAX_ISORCV_SIZE)
23#define ISORCV_PAGES (NUM_ISORCV_PCL / ISORCV_PER_PAGE)
24
25#define CHANNEL_LOCALBUS 0
26#define CHANNEL_ASYNC_RCV 1
27#define CHANNEL_ISO_RCV 2
28#define CHANNEL_ASYNC_SEND 3
29#define CHANNEL_ISO_SEND 4
30
31#define PCILYNX_CONFIG_ROM_LENGTH 1024
32
33typedef int pcl_t;
34
35struct ti_lynx {
36 int id; /* sequential card number */
37
38 spinlock_t lock;
39
40 struct pci_dev *dev;
41
42 struct {
43 unsigned reg_1394a:1;
44 u32 vendor;
45 u32 product;
46 } phyic;
47
48 enum { clear, have_intr, have_aux_buf, have_pcl_mem,
49 have_1394_buffers, have_iomappings, is_host } state;
50
51 /* remapped memory spaces */
52 void __iomem *registers;
53 void __iomem *local_rom;
54 void __iomem *local_ram;
55 void __iomem *aux_port;
56 quadlet_t bus_info_block[5];
57
58#ifdef CONFIG_IEEE1394_PCILYNX_PORTS
59 atomic_t aux_intr_seen;
60 wait_queue_head_t aux_intr_wait;
61
62 void *mem_dma_buffer;
63 dma_addr_t mem_dma_buffer_dma;
64 struct semaphore mem_dma_mutex;
65 wait_queue_head_t mem_dma_intr_wait;
66#endif
67
68 /*
69 * use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
70 * LOCALRAM_SIZE * 8 PCLs (each sized 128 bytes);
71 * the following is an allocation bitmap
72 */
73 u8 pcl_bmap[LOCALRAM_SIZE / 1024];
74
75#ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
76 /* point to PCLs memory area if needed */
77 void *pcl_mem;
78 dma_addr_t pcl_mem_dma;
79#endif
80
81 /* PCLs for local mem / aux transfers */
82 pcl_t dmem_pcl;
83
84 /* IEEE-1394 part follows */
85 struct hpsb_host *host;
86
87 int phyid, isroot;
88 int selfid_size;
89 int phy_reg0;
90
91 spinlock_t phy_reg_lock;
92
93 pcl_t rcv_pcl_start, rcv_pcl;
94 void *rcv_page;
95 dma_addr_t rcv_page_dma;
96 int rcv_active;
97
98 struct lynx_send_data {
99 pcl_t pcl_start, pcl;
100 struct list_head queue;
101 struct list_head pcl_queue; /* this queue contains at most one packet */
102 spinlock_t queue_lock;
103 dma_addr_t header_dma, data_dma;
104 int channel;
105 } async, iso_send;
106
107 struct {
108 pcl_t pcl[NUM_ISORCV_PCL];
109 u32 stat[NUM_ISORCV_PCL];
110 void *page[ISORCV_PAGES];
111 dma_addr_t page_dma[ISORCV_PAGES];
112 pcl_t pcl_start;
113 int chan_count;
114 int next, last, used, running;
115 struct tasklet_struct tq;
116 spinlock_t lock;
117 } iso_rcv;
118
119 u32 i2c_driven_state; /* the state we currently drive the Serial EEPROM Control register */
120};
121
122/* the per-file data structure for mem space access */
123struct memdata {
124 struct ti_lynx *lynx;
125 int cid;
126 atomic_t aux_intr_last_seen;
127 /* enum values are the same as LBUS_ADDR_SEL_* values below */
128 enum { rom = 0x10000, aux = 0x20000, ram = 0 } type;
129};
130
131
132
133/*
134 * Register read and write helper functions.
135 */
136static inline void reg_write(const struct ti_lynx *lynx, int offset, u32 data)
137{
138 writel(data, lynx->registers + offset);
139}
140
141static inline u32 reg_read(const struct ti_lynx *lynx, int offset)
142{
143 return readl(lynx->registers + offset);
144}
145
146static inline void reg_set_bits(const struct ti_lynx *lynx, int offset,
147 u32 mask)
148{
149 reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
150}
151
152static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
153 u32 mask)
154{
155 reg_write(lynx, offset, (reg_read(lynx, offset) & ~mask));
156}
157
158
159
160/* chip register definitions follow */
161
162#define PCI_LATENCY_CACHELINE 0x0c
163
164#define MISC_CONTROL 0x40
165#define MISC_CONTROL_SWRESET (1<<0)
166
167#define SERIAL_EEPROM_CONTROL 0x44
168
169#define PCI_INT_STATUS 0x48
170#define PCI_INT_ENABLE 0x4c
171/* status and enable have identical bit numbers */
172#define PCI_INT_INT_PEND (1<<31)
173#define PCI_INT_FORCED_INT (1<<30)
174#define PCI_INT_SLV_ADR_PERR (1<<28)
175#define PCI_INT_SLV_DAT_PERR (1<<27)
176#define PCI_INT_MST_DAT_PERR (1<<26)
177#define PCI_INT_MST_DEV_TIMEOUT (1<<25)
178#define PCI_INT_INTERNAL_SLV_TIMEOUT (1<<23)
179#define PCI_INT_AUX_TIMEOUT (1<<18)
180#define PCI_INT_AUX_INT (1<<17)
181#define PCI_INT_1394 (1<<16)
182#define PCI_INT_DMA4_PCL (1<<9)
183#define PCI_INT_DMA4_HLT (1<<8)
184#define PCI_INT_DMA3_PCL (1<<7)
185#define PCI_INT_DMA3_HLT (1<<6)
186#define PCI_INT_DMA2_PCL (1<<5)
187#define PCI_INT_DMA2_HLT (1<<4)
188#define PCI_INT_DMA1_PCL (1<<3)
189#define PCI_INT_DMA1_HLT (1<<2)
190#define PCI_INT_DMA0_PCL (1<<1)
191#define PCI_INT_DMA0_HLT (1<<0)
192/* all DMA interrupts combined: */
193#define PCI_INT_DMA_ALL 0x3ff
194
195#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2))
196#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1))
197
198#define LBUS_ADDR 0xb4
199#define LBUS_ADDR_SEL_RAM (0x0<<16)
200#define LBUS_ADDR_SEL_ROM (0x1<<16)
201#define LBUS_ADDR_SEL_AUX (0x2<<16)
202#define LBUS_ADDR_SEL_ZV (0x3<<16)
203
204#define GPIO_CTRL_A 0xb8
205#define GPIO_CTRL_B 0xbc
206#define GPIO_DATA_BASE 0xc0
207
208#define DMA_BREG(base, chan) (base + chan * 0x20)
209#define DMA_SREG(base, chan) (base + chan * 0x10)
210
211#define DMA0_PREV_PCL 0x100
212#define DMA1_PREV_PCL 0x120
213#define DMA2_PREV_PCL 0x140
214#define DMA3_PREV_PCL 0x160
215#define DMA4_PREV_PCL 0x180
216#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
217
218#define DMA0_CURRENT_PCL 0x104
219#define DMA1_CURRENT_PCL 0x124
220#define DMA2_CURRENT_PCL 0x144
221#define DMA3_CURRENT_PCL 0x164
222#define DMA4_CURRENT_PCL 0x184
223#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan))
224
225#define DMA0_CHAN_STAT 0x10c
226#define DMA1_CHAN_STAT 0x12c
227#define DMA2_CHAN_STAT 0x14c
228#define DMA3_CHAN_STAT 0x16c
229#define DMA4_CHAN_STAT 0x18c
230#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan))
231/* CHAN_STATUS registers share bits */
232#define DMA_CHAN_STAT_SELFID (1<<31)
233#define DMA_CHAN_STAT_ISOPKT (1<<30)
234#define DMA_CHAN_STAT_PCIERR (1<<29)
235#define DMA_CHAN_STAT_PKTERR (1<<28)
236#define DMA_CHAN_STAT_PKTCMPL (1<<27)
237#define DMA_CHAN_STAT_SPECIALACK (1<<14)
238
239
240#define DMA0_CHAN_CTRL 0x110
241#define DMA1_CHAN_CTRL 0x130
242#define DMA2_CHAN_CTRL 0x150
243#define DMA3_CHAN_CTRL 0x170
244#define DMA4_CHAN_CTRL 0x190
245#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
246/* CHAN_CTRL registers share bits */
247#define DMA_CHAN_CTRL_ENABLE (1<<31)
248#define DMA_CHAN_CTRL_BUSY (1<<30)
249#define DMA_CHAN_CTRL_LINK (1<<29)
250
251#define DMA0_READY 0x114
252#define DMA1_READY 0x134
253#define DMA2_READY 0x154
254#define DMA3_READY 0x174
255#define DMA4_READY 0x194
256#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan))
257
258#define DMA_GLOBAL_REGISTER 0x908
259
260#define FIFO_SIZES 0xa00
261
262#define FIFO_CONTROL 0xa10
263#define FIFO_CONTROL_GRF_FLUSH (1<<4)
264#define FIFO_CONTROL_ITF_FLUSH (1<<3)
265#define FIFO_CONTROL_ATF_FLUSH (1<<2)
266
267#define FIFO_XMIT_THRESHOLD 0xa14
268
269#define DMA0_WORD0_CMP_VALUE 0xb00
270#define DMA1_WORD0_CMP_VALUE 0xb10
271#define DMA2_WORD0_CMP_VALUE 0xb20
272#define DMA3_WORD0_CMP_VALUE 0xb30
273#define DMA4_WORD0_CMP_VALUE 0xb40
274#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
275
276#define DMA0_WORD0_CMP_ENABLE 0xb04
277#define DMA1_WORD0_CMP_ENABLE 0xb14
278#define DMA2_WORD0_CMP_ENABLE 0xb24
279#define DMA3_WORD0_CMP_ENABLE 0xb34
280#define DMA4_WORD0_CMP_ENABLE 0xb44
281#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE,chan))
282
283#define DMA0_WORD1_CMP_VALUE 0xb08
284#define DMA1_WORD1_CMP_VALUE 0xb18
285#define DMA2_WORD1_CMP_VALUE 0xb28
286#define DMA3_WORD1_CMP_VALUE 0xb38
287#define DMA4_WORD1_CMP_VALUE 0xb48
288#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
289
290#define DMA0_WORD1_CMP_ENABLE 0xb0c
291#define DMA1_WORD1_CMP_ENABLE 0xb1c
292#define DMA2_WORD1_CMP_ENABLE 0xb2c
293#define DMA3_WORD1_CMP_ENABLE 0xb3c
294#define DMA4_WORD1_CMP_ENABLE 0xb4c
295#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE,chan))
296/* word 1 compare enable flags */
297#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15)
298#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14)
299#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13)
300#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12)
301#define DMA_WORD1_CMP_MATCH_EXACT (1<<11)
302#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10)
303#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8)
304
305#define LINK_ID 0xf00
306#define LINK_ID_BUS(id) (id<<22)
307#define LINK_ID_NODE(id) (id<<16)
308
309#define LINK_CONTROL 0xf04
310#define LINK_CONTROL_BUSY (1<<29)
311#define LINK_CONTROL_TX_ISO_EN (1<<26)
312#define LINK_CONTROL_RX_ISO_EN (1<<25)
313#define LINK_CONTROL_TX_ASYNC_EN (1<<24)
314#define LINK_CONTROL_RX_ASYNC_EN (1<<23)
315#define LINK_CONTROL_RESET_TX (1<<21)
316#define LINK_CONTROL_RESET_RX (1<<20)
317#define LINK_CONTROL_CYCMASTER (1<<11)
318#define LINK_CONTROL_CYCSOURCE (1<<10)
319#define LINK_CONTROL_CYCTIMEREN (1<<9)
320#define LINK_CONTROL_RCV_CMP_VALID (1<<7)
321#define LINK_CONTROL_SNOOP_ENABLE (1<<6)
322
323#define CYCLE_TIMER 0xf08
324
325#define LINK_PHY 0xf0c
326#define LINK_PHY_READ (1<<31)
327#define LINK_PHY_WRITE (1<<30)
328#define LINK_PHY_ADDR(addr) (addr<<24)
329#define LINK_PHY_WDATA(data) (data<<16)
330#define LINK_PHY_RADDR(addr) (addr<<8)
331
332
333#define LINK_INT_STATUS 0xf14
334#define LINK_INT_ENABLE 0xf18
335/* status and enable have identical bit numbers */
336#define LINK_INT_LINK_INT (1<<31)
337#define LINK_INT_PHY_TIMEOUT (1<<30)
338#define LINK_INT_PHY_REG_RCVD (1<<29)
339#define LINK_INT_PHY_BUSRESET (1<<28)
340#define LINK_INT_TX_RDY (1<<26)
341#define LINK_INT_RX_DATA_RDY (1<<25)
342#define LINK_INT_ISO_STUCK (1<<20)
343#define LINK_INT_ASYNC_STUCK (1<<19)
344#define LINK_INT_SENT_REJECT (1<<17)
345#define LINK_INT_HDR_ERR (1<<16)
346#define LINK_INT_TX_INVALID_TC (1<<15)
347#define LINK_INT_CYC_SECOND (1<<11)
348#define LINK_INT_CYC_START (1<<10)
349#define LINK_INT_CYC_DONE (1<<9)
350#define LINK_INT_CYC_PENDING (1<<8)
351#define LINK_INT_CYC_LOST (1<<7)
352#define LINK_INT_CYC_ARB_FAILED (1<<6)
353#define LINK_INT_GRF_OVERFLOW (1<<5)
354#define LINK_INT_ITF_UNDERFLOW (1<<4)
355#define LINK_INT_ATF_UNDERFLOW (1<<3)
356#define LINK_INT_ISOARB_FAILED (1<<0)
357
358/* PHY specifics */
359#define PHY_VENDORID_TI 0x800028
360#define PHY_PRODUCTID_TSB41LV03 0x000000
361
362
363/* this is the physical layout of a PCL, its size is 128 bytes */
364struct ti_pcl {
365 u32 next;
366 u32 async_error_next;
367 u32 user_data;
368 u32 pcl_status;
369 u32 remaining_transfer_count;
370 u32 next_data_buffer;
371 struct {
372 u32 control;
373 u32 pointer;
374 } buffer[13] __attribute__ ((packed));
375} __attribute__ ((packed));
376
377#include <linux/stddef.h>
378#define pcloffs(MEMBER) (offsetof(struct ti_pcl, MEMBER))
379
380
381#ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
382
383static inline void put_pcl(const struct ti_lynx *lynx, pcl_t pclid,
384 const struct ti_pcl *pcl)
385{
386 int i;
387 u32 *in = (u32 *)pcl;
388 u32 *out = (u32 *)(lynx->local_ram + pclid * sizeof(struct ti_pcl));
389
390 for (i = 0; i < 32; i++, out++, in++) {
391 writel(*in, out);
392 }
393}
394
395static inline void get_pcl(const struct ti_lynx *lynx, pcl_t pclid,
396 struct ti_pcl *pcl)
397{
398 int i;
399 u32 *out = (u32 *)pcl;
400 u32 *in = (u32 *)(lynx->local_ram + pclid * sizeof(struct ti_pcl));
401
402 for (i = 0; i < 32; i++, out++, in++) {
403 *out = readl(in);
404 }
405}
406
407static inline u32 pcl_bus(const struct ti_lynx *lynx, pcl_t pclid)
408{
409 return pci_resource_start(lynx->dev, 1) + pclid * sizeof(struct ti_pcl);
410}
411
412#else /* CONFIG_IEEE1394_PCILYNX_LOCALRAM */
413
414static inline void put_pcl(const struct ti_lynx *lynx, pcl_t pclid,
415 const struct ti_pcl *pcl)
416{
417 memcpy_le32((u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
418 (u32 *)pcl, sizeof(struct ti_pcl));
419}
420
421static inline void get_pcl(const struct ti_lynx *lynx, pcl_t pclid,
422 struct ti_pcl *pcl)
423{
424 memcpy_le32((u32 *)pcl,
425 (u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
426 sizeof(struct ti_pcl));
427}
428
429static inline u32 pcl_bus(const struct ti_lynx *lynx, pcl_t pclid)
430{
431 return lynx->pcl_mem_dma + pclid * sizeof(struct ti_pcl);
432}
433
434#endif /* CONFIG_IEEE1394_PCILYNX_LOCALRAM */
435
436
437#if defined (CONFIG_IEEE1394_PCILYNX_LOCALRAM) || defined (__BIG_ENDIAN)
438typedef struct ti_pcl pcltmp_t;
439
440static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
441 pcltmp_t *tmp)
442{
443 get_pcl(lynx, pclid, tmp);
444 return tmp;
445}
446
447static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
448 pcltmp_t *tmp)
449{
450 put_pcl(lynx, pclid, tmp);
451}
452
453#else
454typedef int pcltmp_t; /* just a dummy */
455
456static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
457 pcltmp_t *tmp)
458{
459 return lynx->pcl_mem + pclid * sizeof(struct ti_pcl);
460}
461
462static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
463 pcltmp_t *tmp)
464{
465}
466#endif
467
468
469static inline void run_sub_pcl(const struct ti_lynx *lynx, pcl_t pclid, int idx,
470 int dmachan)
471{
472 reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20,
473 pcl_bus(lynx, pclid) + idx * 4);
474 reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
475 DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
476}
477
478static inline void run_pcl(const struct ti_lynx *lynx, pcl_t pclid, int dmachan)
479{
480 run_sub_pcl(lynx, pclid, 0, dmachan);
481}
482
483#define PCL_NEXT_INVALID (1<<0)
484
485/* transfer commands */
486#define PCL_CMD_RCV (0x1<<24)
487#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
488#define PCL_CMD_XMT (0x2<<24)
489#define PCL_CMD_UNFXMT (0xc<<24)
490#define PCL_CMD_PCI_TO_LBUS (0x8<<24)
491#define PCL_CMD_LBUS_TO_PCI (0x9<<24)
492
493/* aux commands */
494#define PCL_CMD_NOP (0x0<<24)
495#define PCL_CMD_LOAD (0x3<<24)
496#define PCL_CMD_STOREQ (0x4<<24)
497#define PCL_CMD_STORED (0xb<<24)
498#define PCL_CMD_STORE0 (0x5<<24)
499#define PCL_CMD_STORE1 (0x6<<24)
500#define PCL_CMD_COMPARE (0xe<<24)
501#define PCL_CMD_SWAP_COMPARE (0xf<<24)
502#define PCL_CMD_ADD (0xd<<24)
503#define PCL_CMD_BRANCH (0x7<<24)
504
505/* BRANCH condition codes */
506#define PCL_COND_DMARDY_SET (0x1<<20)
507#define PCL_COND_DMARDY_CLEAR (0x2<<20)
508
509#define PCL_GEN_INTR (1<<19)
510#define PCL_LAST_BUFF (1<<18)
511#define PCL_LAST_CMD (PCL_LAST_BUFF)
512#define PCL_WAITSTAT (1<<17)
513#define PCL_BIGENDIAN (1<<16)
514#define PCL_ISOMODE (1<<12)
515
516#endif
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h
new file mode 100644
index 000000000000..c93587be9cab
--- /dev/null
+++ b/drivers/ieee1394/raw1394-private.h
@@ -0,0 +1,86 @@
1#ifndef IEEE1394_RAW1394_PRIVATE_H
2#define IEEE1394_RAW1394_PRIVATE_H
3
4/* header for definitions that are private to the raw1394 driver
5 and not visible to user-space */
6
7#define RAW1394_DEVICE_MAJOR 171
8#define RAW1394_DEVICE_NAME "raw1394"
9
10#define RAW1394_MAX_USER_CSR_DIRS 16
11
12struct iso_block_store {
13 atomic_t refcount;
14 size_t data_size;
15 quadlet_t data[0];
16};
17
18enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
19 RAW1394_ISO_RECV = 1,
20 RAW1394_ISO_XMIT = 2 };
21
22struct file_info {
23 struct list_head list;
24
25 enum { opened, initialized, connected } state;
26 unsigned int protocol_version;
27
28 struct hpsb_host *host;
29
30 struct list_head req_pending;
31 struct list_head req_complete;
32 struct semaphore complete_sem;
33 spinlock_t reqlists_lock;
34 wait_queue_head_t poll_wait_complete;
35
36 struct list_head addr_list;
37
38 u8 __user *fcp_buffer;
39
40 /* old ISO API */
41 u64 listen_channels;
42 quadlet_t __user *iso_buffer;
43 size_t iso_buffer_length;
44
45 u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
46
47 /* new rawiso API */
48 enum raw1394_iso_state iso_state;
49 struct hpsb_iso *iso_handle;
50
51 /* User space's CSR1212 dynamic ConfigROM directories */
52 struct csr1212_keyval *csr1212_dirs[RAW1394_MAX_USER_CSR_DIRS];
53
54 /* Legacy ConfigROM update flag */
55 u8 cfgrom_upd;
56};
57
58struct arm_addr {
59 struct list_head addr_list; /* file_info list */
60 u64 start, end;
61 u64 arm_tag;
62 u8 access_rights;
63 u8 notification_options;
64 u8 client_transactions;
65 u64 recvb;
66 u16 rec_length;
67 u8 *addr_space_buffer; /* accessed by read/write/lock */
68};
69
70struct pending_request {
71 struct list_head list;
72 struct file_info *file_info;
73 struct hpsb_packet *packet;
74 struct iso_block_store *ibs;
75 quadlet_t *data;
76 int free_data;
77 struct raw1394_request req;
78};
79
80struct host_info {
81 struct list_head list;
82 struct hpsb_host *host;
83 struct list_head file_info_list;
84};
85
86#endif /* IEEE1394_RAW1394_PRIVATE_H */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
new file mode 100644
index 000000000000..6a08a8982ea8
--- /dev/null
+++ b/drivers/ieee1394/raw1394.c
@@ -0,0 +1,2958 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Raw interface to the bus
5 *
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
7 * 2001, 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
8 * 2002 Christian Toegel <christian.toegel@gmx.at>
9 *
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
12 *
13 *
14 * Contributions:
15 *
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * configuration ROM manipulation
18 * address range mapping
19 * adaptation for new (transparent) loopback mechanism
20 * sending of arbitrary async packets
21 * Christian Toegel <christian.toegel@gmx.at>
22 * address range mapping
23 * lock64 request
24 * transmit physical packet
25 * busreset notification control (switch on/off)
26 * busreset with selection of type (short/long)
27 * request_reply
28 */
29
30#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/string.h>
33#include <linux/slab.h>
34#include <linux/fs.h>
35#include <linux/poll.h>
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/smp_lock.h>
39#include <linux/interrupt.h>
40#include <linux/vmalloc.h>
41#include <linux/cdev.h>
42#include <asm/uaccess.h>
43#include <asm/atomic.h>
44#include <linux/devfs_fs_kernel.h>
45
46#include "csr1212.h"
47#include "ieee1394.h"
48#include "ieee1394_types.h"
49#include "ieee1394_core.h"
50#include "nodemgr.h"
51#include "hosts.h"
52#include "highlevel.h"
53#include "iso.h"
54#include "ieee1394_transactions.h"
55#include "raw1394.h"
56#include "raw1394-private.h"
57
58#define int2ptr(x) ((void __user *)(unsigned long)x)
59#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
60
61#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
62#define RAW1394_DEBUG
63#endif
64
65#ifdef RAW1394_DEBUG
66#define DBGMSG(fmt, args...) \
67printk(KERN_INFO "raw1394:" fmt "\n" , ## args)
68#else
69#define DBGMSG(fmt, args...)
70#endif
71
72static LIST_HEAD(host_info_list);
73static int host_count;
74static DEFINE_SPINLOCK(host_info_lock);
75static atomic_t internal_generation = ATOMIC_INIT(0);
76
77static atomic_t iso_buffer_size;
78static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
79
80static struct hpsb_highlevel raw1394_highlevel;
81
82static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
83 u64 addr, size_t length, u16 flags);
84static int arm_write(struct hpsb_host *host, int nodeid, int destid,
85 quadlet_t * data, u64 addr, size_t length, u16 flags);
86static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
87 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
88 u16 flags);
89static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
90 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
91 u16 flags);
92static struct hpsb_address_ops arm_ops = {
93 .read = arm_read,
94 .write = arm_write,
95 .lock = arm_lock,
96 .lock64 = arm_lock64,
97};
98
99static void queue_complete_cb(struct pending_request *req);
100
101static struct pending_request *__alloc_pending_request(int flags)
102{
103 struct pending_request *req;
104
105 req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
106 flags);
107 if (req != NULL) {
108 memset(req, 0, sizeof(struct pending_request));
109 INIT_LIST_HEAD(&req->list);
110 }
111
112 return req;
113}
114
115static inline struct pending_request *alloc_pending_request(void)
116{
117 return __alloc_pending_request(SLAB_KERNEL);
118}
119
120static void free_pending_request(struct pending_request *req)
121{
122 if (req->ibs) {
123 if (atomic_dec_and_test(&req->ibs->refcount)) {
124 atomic_sub(req->ibs->data_size, &iso_buffer_size);
125 kfree(req->ibs);
126 }
127 } else if (req->free_data) {
128 kfree(req->data);
129 }
130 hpsb_free_packet(req->packet);
131 kfree(req);
132}
133
134/* fi->reqlists_lock must be taken */
135static void __queue_complete_req(struct pending_request *req)
136{
137 struct file_info *fi = req->file_info;
138 list_del(&req->list);
139 list_add_tail(&req->list, &fi->req_complete);
140
141 up(&fi->complete_sem);
142 wake_up_interruptible(&fi->poll_wait_complete);
143}
144
145static void queue_complete_req(struct pending_request *req)
146{
147 unsigned long flags;
148 struct file_info *fi = req->file_info;
149
150 spin_lock_irqsave(&fi->reqlists_lock, flags);
151 __queue_complete_req(req);
152 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
153}
154
155static void queue_complete_cb(struct pending_request *req)
156{
157 struct hpsb_packet *packet = req->packet;
158 int rcode = (packet->header[1] >> 12) & 0xf;
159
160 switch (packet->ack_code) {
161 case ACKX_NONE:
162 case ACKX_SEND_ERROR:
163 req->req.error = RAW1394_ERROR_SEND_ERROR;
164 break;
165 case ACKX_ABORTED:
166 req->req.error = RAW1394_ERROR_ABORTED;
167 break;
168 case ACKX_TIMEOUT:
169 req->req.error = RAW1394_ERROR_TIMEOUT;
170 break;
171 default:
172 req->req.error = (packet->ack_code << 16) | rcode;
173 break;
174 }
175
176 if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
177 req->req.length = 0;
178 }
179
180 if ((req->req.type == RAW1394_REQ_ASYNC_READ) ||
181 (req->req.type == RAW1394_REQ_ASYNC_WRITE) ||
182 (req->req.type == RAW1394_REQ_ASYNC_STREAM) ||
183 (req->req.type == RAW1394_REQ_LOCK) ||
184 (req->req.type == RAW1394_REQ_LOCK64))
185 hpsb_free_tlabel(packet);
186
187 queue_complete_req(req);
188}
189
190static void add_host(struct hpsb_host *host)
191{
192 struct host_info *hi;
193 unsigned long flags;
194
195 hi = (struct host_info *)kmalloc(sizeof(struct host_info), GFP_KERNEL);
196
197 if (hi != NULL) {
198 INIT_LIST_HEAD(&hi->list);
199 hi->host = host;
200 INIT_LIST_HEAD(&hi->file_info_list);
201
202 spin_lock_irqsave(&host_info_lock, flags);
203 list_add_tail(&hi->list, &host_info_list);
204 host_count++;
205 spin_unlock_irqrestore(&host_info_lock, flags);
206 }
207
208 atomic_inc(&internal_generation);
209}
210
211static struct host_info *find_host_info(struct hpsb_host *host)
212{
213 struct host_info *hi;
214
215 list_for_each_entry(hi, &host_info_list, list)
216 if (hi->host == host)
217 return hi;
218
219 return NULL;
220}
221
222static void remove_host(struct hpsb_host *host)
223{
224 struct host_info *hi;
225 unsigned long flags;
226
227 spin_lock_irqsave(&host_info_lock, flags);
228 hi = find_host_info(host);
229
230 if (hi != NULL) {
231 list_del(&hi->list);
232 host_count--;
233 /*
234 FIXME: address ranges should be removed
235 and fileinfo states should be initialized
236 (including setting generation to
237 internal-generation ...)
238 */
239 }
240 spin_unlock_irqrestore(&host_info_lock, flags);
241
242 if (hi == NULL) {
243 printk(KERN_ERR "raw1394: attempt to remove unknown host "
244 "0x%p\n", host);
245 return;
246 }
247
248 kfree(hi);
249
250 atomic_inc(&internal_generation);
251}
252
253static void host_reset(struct hpsb_host *host)
254{
255 unsigned long flags;
256 struct host_info *hi;
257 struct file_info *fi;
258 struct pending_request *req;
259
260 spin_lock_irqsave(&host_info_lock, flags);
261 hi = find_host_info(host);
262
263 if (hi != NULL) {
264 list_for_each_entry(fi, &hi->file_info_list, list) {
265 if (fi->notification == RAW1394_NOTIFY_ON) {
266 req = __alloc_pending_request(SLAB_ATOMIC);
267
268 if (req != NULL) {
269 req->file_info = fi;
270 req->req.type = RAW1394_REQ_BUS_RESET;
271 req->req.generation =
272 get_hpsb_generation(host);
273 req->req.misc = (host->node_id << 16)
274 | host->node_count;
275 if (fi->protocol_version > 3) {
276 req->req.misc |=
277 (NODEID_TO_NODE
278 (host->irm_id)
279 << 8);
280 }
281
282 queue_complete_req(req);
283 }
284 }
285 }
286 }
287 spin_unlock_irqrestore(&host_info_lock, flags);
288}
289
290static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
291 size_t length)
292{
293 unsigned long flags;
294 struct host_info *hi;
295 struct file_info *fi;
296 struct pending_request *req, *req_next;
297 struct iso_block_store *ibs = NULL;
298 LIST_HEAD(reqs);
299
300 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
301 HPSB_INFO("dropped iso packet");
302 return;
303 }
304
305 spin_lock_irqsave(&host_info_lock, flags);
306 hi = find_host_info(host);
307
308 if (hi != NULL) {
309 list_for_each_entry(fi, &hi->file_info_list, list) {
310 if (!(fi->listen_channels & (1ULL << channel)))
311 continue;
312
313 req = __alloc_pending_request(SLAB_ATOMIC);
314 if (!req)
315 break;
316
317 if (!ibs) {
318 ibs = kmalloc(sizeof(struct iso_block_store)
319 + length, SLAB_ATOMIC);
320 if (!ibs) {
321 kfree(req);
322 break;
323 }
324
325 atomic_add(length, &iso_buffer_size);
326 atomic_set(&ibs->refcount, 0);
327 ibs->data_size = length;
328 memcpy(ibs->data, data, length);
329 }
330
331 atomic_inc(&ibs->refcount);
332
333 req->file_info = fi;
334 req->ibs = ibs;
335 req->data = ibs->data;
336 req->req.type = RAW1394_REQ_ISO_RECEIVE;
337 req->req.generation = get_hpsb_generation(host);
338 req->req.misc = 0;
339 req->req.recvb = ptr2int(fi->iso_buffer);
340 req->req.length = min(length, fi->iso_buffer_length);
341
342 list_add_tail(&req->list, &reqs);
343 }
344 }
345 spin_unlock_irqrestore(&host_info_lock, flags);
346
347 list_for_each_entry_safe(req, req_next, &reqs, list)
348 queue_complete_req(req);
349}
350
351static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
352 int cts, u8 * data, size_t length)
353{
354 unsigned long flags;
355 struct host_info *hi;
356 struct file_info *fi;
357 struct pending_request *req, *req_next;
358 struct iso_block_store *ibs = NULL;
359 LIST_HEAD(reqs);
360
361 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
362 HPSB_INFO("dropped fcp request");
363 return;
364 }
365
366 spin_lock_irqsave(&host_info_lock, flags);
367 hi = find_host_info(host);
368
369 if (hi != NULL) {
370 list_for_each_entry(fi, &hi->file_info_list, list) {
371 if (!fi->fcp_buffer)
372 continue;
373
374 req = __alloc_pending_request(SLAB_ATOMIC);
375 if (!req)
376 break;
377
378 if (!ibs) {
379 ibs = kmalloc(sizeof(struct iso_block_store)
380 + length, SLAB_ATOMIC);
381 if (!ibs) {
382 kfree(req);
383 break;
384 }
385
386 atomic_add(length, &iso_buffer_size);
387 atomic_set(&ibs->refcount, 0);
388 ibs->data_size = length;
389 memcpy(ibs->data, data, length);
390 }
391
392 atomic_inc(&ibs->refcount);
393
394 req->file_info = fi;
395 req->ibs = ibs;
396 req->data = ibs->data;
397 req->req.type = RAW1394_REQ_FCP_REQUEST;
398 req->req.generation = get_hpsb_generation(host);
399 req->req.misc = nodeid | (direction << 16);
400 req->req.recvb = ptr2int(fi->fcp_buffer);
401 req->req.length = length;
402
403 list_add_tail(&req->list, &reqs);
404 }
405 }
406 spin_unlock_irqrestore(&host_info_lock, flags);
407
408 list_for_each_entry_safe(req, req_next, &reqs, list)
409 queue_complete_req(req);
410}
411
412static ssize_t raw1394_read(struct file *file, char __user * buffer,
413 size_t count, loff_t * offset_is_ignored)
414{
415 struct file_info *fi = (struct file_info *)file->private_data;
416 struct list_head *lh;
417 struct pending_request *req;
418 ssize_t ret;
419
420 if (count != sizeof(struct raw1394_request)) {
421 return -EINVAL;
422 }
423
424 if (!access_ok(VERIFY_WRITE, buffer, count)) {
425 return -EFAULT;
426 }
427
428 if (file->f_flags & O_NONBLOCK) {
429 if (down_trylock(&fi->complete_sem)) {
430 return -EAGAIN;
431 }
432 } else {
433 if (down_interruptible(&fi->complete_sem)) {
434 return -ERESTARTSYS;
435 }
436 }
437
438 spin_lock_irq(&fi->reqlists_lock);
439 lh = fi->req_complete.next;
440 list_del(lh);
441 spin_unlock_irq(&fi->reqlists_lock);
442
443 req = list_entry(lh, struct pending_request, list);
444
445 if (req->req.length) {
446 if (copy_to_user(int2ptr(req->req.recvb), req->data,
447 req->req.length)) {
448 req->req.error = RAW1394_ERROR_MEMFAULT;
449 }
450 }
451 if (copy_to_user(buffer, &req->req, sizeof(req->req))) {
452 ret = -EFAULT;
453 goto out;
454 }
455
456 ret = (ssize_t) sizeof(struct raw1394_request);
457 out:
458 free_pending_request(req);
459 return ret;
460}
461
462static int state_opened(struct file_info *fi, struct pending_request *req)
463{
464 if (req->req.type == RAW1394_REQ_INITIALIZE) {
465 switch (req->req.misc) {
466 case RAW1394_KERNELAPI_VERSION:
467 case 3:
468 fi->state = initialized;
469 fi->protocol_version = req->req.misc;
470 req->req.error = RAW1394_ERROR_NONE;
471 req->req.generation = atomic_read(&internal_generation);
472 break;
473
474 default:
475 req->req.error = RAW1394_ERROR_COMPAT;
476 req->req.misc = RAW1394_KERNELAPI_VERSION;
477 }
478 } else {
479 req->req.error = RAW1394_ERROR_STATE_ORDER;
480 }
481
482 req->req.length = 0;
483 queue_complete_req(req);
484 return sizeof(struct raw1394_request);
485}
486
487static int state_initialized(struct file_info *fi, struct pending_request *req)
488{
489 struct host_info *hi;
490 struct raw1394_khost_list *khl;
491
492 if (req->req.generation != atomic_read(&internal_generation)) {
493 req->req.error = RAW1394_ERROR_GENERATION;
494 req->req.generation = atomic_read(&internal_generation);
495 req->req.length = 0;
496 queue_complete_req(req);
497 return sizeof(struct raw1394_request);
498 }
499
500 switch (req->req.type) {
501 case RAW1394_REQ_LIST_CARDS:
502 spin_lock_irq(&host_info_lock);
503 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
504 SLAB_ATOMIC);
505
506 if (khl != NULL) {
507 req->req.misc = host_count;
508 req->data = (quadlet_t *) khl;
509
510 list_for_each_entry(hi, &host_info_list, list) {
511 khl->nodes = hi->host->node_count;
512 strcpy(khl->name, hi->host->driver->name);
513 khl++;
514 }
515 }
516 spin_unlock_irq(&host_info_lock);
517
518 if (khl != NULL) {
519 req->req.error = RAW1394_ERROR_NONE;
520 req->req.length = min(req->req.length,
521 (u32) (sizeof
522 (struct raw1394_khost_list)
523 * req->req.misc));
524 req->free_data = 1;
525 } else {
526 return -ENOMEM;
527 }
528 break;
529
530 case RAW1394_REQ_SET_CARD:
531 spin_lock_irq(&host_info_lock);
532 if (req->req.misc < host_count) {
533 list_for_each_entry(hi, &host_info_list, list) {
534 if (!req->req.misc--)
535 break;
536 }
537 get_device(&hi->host->device); // XXX Need to handle failure case
538 list_add_tail(&fi->list, &hi->file_info_list);
539 fi->host = hi->host;
540 fi->state = connected;
541
542 req->req.error = RAW1394_ERROR_NONE;
543 req->req.generation = get_hpsb_generation(fi->host);
544 req->req.misc = (fi->host->node_id << 16)
545 | fi->host->node_count;
546 if (fi->protocol_version > 3) {
547 req->req.misc |=
548 NODEID_TO_NODE(fi->host->irm_id) << 8;
549 }
550 } else {
551 req->req.error = RAW1394_ERROR_INVALID_ARG;
552 }
553 spin_unlock_irq(&host_info_lock);
554
555 req->req.length = 0;
556 break;
557
558 default:
559 req->req.error = RAW1394_ERROR_STATE_ORDER;
560 req->req.length = 0;
561 break;
562 }
563
564 queue_complete_req(req);
565 return sizeof(struct raw1394_request);
566}
567
568static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
569{
570 int channel = req->req.misc;
571
572 spin_lock_irq(&host_info_lock);
573 if ((channel > 63) || (channel < -64)) {
574 req->req.error = RAW1394_ERROR_INVALID_ARG;
575 } else if (channel >= 0) {
576 /* allocate channel req.misc */
577 if (fi->listen_channels & (1ULL << channel)) {
578 req->req.error = RAW1394_ERROR_ALREADY;
579 } else {
580 if (hpsb_listen_channel
581 (&raw1394_highlevel, fi->host, channel)) {
582 req->req.error = RAW1394_ERROR_ALREADY;
583 } else {
584 fi->listen_channels |= 1ULL << channel;
585 fi->iso_buffer = int2ptr(req->req.recvb);
586 fi->iso_buffer_length = req->req.length;
587 }
588 }
589 } else {
590 /* deallocate channel (one's complement neg) req.misc */
591 channel = ~channel;
592
593 if (fi->listen_channels & (1ULL << channel)) {
594 hpsb_unlisten_channel(&raw1394_highlevel, fi->host,
595 channel);
596 fi->listen_channels &= ~(1ULL << channel);
597 } else {
598 req->req.error = RAW1394_ERROR_INVALID_ARG;
599 }
600 }
601
602 req->req.length = 0;
603 queue_complete_req(req);
604 spin_unlock_irq(&host_info_lock);
605}
606
607static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
608{
609 if (req->req.misc) {
610 if (fi->fcp_buffer) {
611 req->req.error = RAW1394_ERROR_ALREADY;
612 } else {
613 fi->fcp_buffer = int2ptr(req->req.recvb);
614 }
615 } else {
616 if (!fi->fcp_buffer) {
617 req->req.error = RAW1394_ERROR_ALREADY;
618 } else {
619 fi->fcp_buffer = NULL;
620 }
621 }
622
623 req->req.length = 0;
624 queue_complete_req(req);
625}
626
627static int handle_async_request(struct file_info *fi,
628 struct pending_request *req, int node)
629{
630 struct hpsb_packet *packet = NULL;
631 u64 addr = req->req.address & 0xffffffffffffULL;
632
633 switch (req->req.type) {
634 case RAW1394_REQ_ASYNC_READ:
635 DBGMSG("read_request called");
636 packet =
637 hpsb_make_readpacket(fi->host, node, addr, req->req.length);
638
639 if (!packet)
640 return -ENOMEM;
641
642 if (req->req.length == 4)
643 req->data = &packet->header[3];
644 else
645 req->data = packet->data;
646
647 break;
648
649 case RAW1394_REQ_ASYNC_WRITE:
650 DBGMSG("write_request called");
651
652 packet = hpsb_make_writepacket(fi->host, node, addr, NULL,
653 req->req.length);
654 if (!packet)
655 return -ENOMEM;
656
657 if (req->req.length == 4) {
658 if (copy_from_user
659 (&packet->header[3], int2ptr(req->req.sendb),
660 req->req.length))
661 req->req.error = RAW1394_ERROR_MEMFAULT;
662 } else {
663 if (copy_from_user
664 (packet->data, int2ptr(req->req.sendb),
665 req->req.length))
666 req->req.error = RAW1394_ERROR_MEMFAULT;
667 }
668
669 req->req.length = 0;
670 break;
671
672 case RAW1394_REQ_ASYNC_STREAM:
673 DBGMSG("stream_request called");
674
675 packet =
676 hpsb_make_streampacket(fi->host, NULL, req->req.length,
677 node & 0x3f /*channel */ ,
678 (req->req.misc >> 16) & 0x3,
679 req->req.misc & 0xf);
680 if (!packet)
681 return -ENOMEM;
682
683 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
684 req->req.length))
685 req->req.error = RAW1394_ERROR_MEMFAULT;
686
687 req->req.length = 0;
688 break;
689
690 case RAW1394_REQ_LOCK:
691 DBGMSG("lock_request called");
692 if ((req->req.misc == EXTCODE_FETCH_ADD)
693 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
694 if (req->req.length != 4) {
695 req->req.error = RAW1394_ERROR_INVALID_ARG;
696 break;
697 }
698 } else {
699 if (req->req.length != 8) {
700 req->req.error = RAW1394_ERROR_INVALID_ARG;
701 break;
702 }
703 }
704
705 packet = hpsb_make_lockpacket(fi->host, node, addr,
706 req->req.misc, NULL, 0);
707 if (!packet)
708 return -ENOMEM;
709
710 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
711 req->req.length)) {
712 req->req.error = RAW1394_ERROR_MEMFAULT;
713 break;
714 }
715
716 req->data = packet->data;
717 req->req.length = 4;
718 break;
719
720 case RAW1394_REQ_LOCK64:
721 DBGMSG("lock64_request called");
722 if ((req->req.misc == EXTCODE_FETCH_ADD)
723 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
724 if (req->req.length != 8) {
725 req->req.error = RAW1394_ERROR_INVALID_ARG;
726 break;
727 }
728 } else {
729 if (req->req.length != 16) {
730 req->req.error = RAW1394_ERROR_INVALID_ARG;
731 break;
732 }
733 }
734 packet = hpsb_make_lock64packet(fi->host, node, addr,
735 req->req.misc, NULL, 0);
736 if (!packet)
737 return -ENOMEM;
738
739 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
740 req->req.length)) {
741 req->req.error = RAW1394_ERROR_MEMFAULT;
742 break;
743 }
744
745 req->data = packet->data;
746 req->req.length = 8;
747 break;
748
749 default:
750 req->req.error = RAW1394_ERROR_STATE_ORDER;
751 }
752
753 req->packet = packet;
754
755 if (req->req.error) {
756 req->req.length = 0;
757 queue_complete_req(req);
758 return sizeof(struct raw1394_request);
759 }
760
761 hpsb_set_packet_complete_task(packet,
762 (void (*)(void *))queue_complete_cb, req);
763
764 spin_lock_irq(&fi->reqlists_lock);
765 list_add_tail(&req->list, &fi->req_pending);
766 spin_unlock_irq(&fi->reqlists_lock);
767
768 packet->generation = req->req.generation;
769
770 if (hpsb_send_packet(packet) < 0) {
771 req->req.error = RAW1394_ERROR_SEND_ERROR;
772 req->req.length = 0;
773 hpsb_free_tlabel(packet);
774 queue_complete_req(req);
775 }
776 return sizeof(struct raw1394_request);
777}
778
779static int handle_iso_send(struct file_info *fi, struct pending_request *req,
780 int channel)
781{
782 struct hpsb_packet *packet;
783
784 packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f,
785 (req->req.misc >> 16) & 0x3,
786 req->req.misc & 0xf);
787 if (!packet)
788 return -ENOMEM;
789
790 packet->speed_code = req->req.address & 0x3;
791
792 req->packet = packet;
793
794 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
795 req->req.length)) {
796 req->req.error = RAW1394_ERROR_MEMFAULT;
797 req->req.length = 0;
798 queue_complete_req(req);
799 return sizeof(struct raw1394_request);
800 }
801
802 req->req.length = 0;
803 hpsb_set_packet_complete_task(packet,
804 (void (*)(void *))queue_complete_req,
805 req);
806
807 spin_lock_irq(&fi->reqlists_lock);
808 list_add_tail(&req->list, &fi->req_pending);
809 spin_unlock_irq(&fi->reqlists_lock);
810
811 /* Update the generation of the packet just before sending. */
812 packet->generation = req->req.generation;
813
814 if (hpsb_send_packet(packet) < 0) {
815 req->req.error = RAW1394_ERROR_SEND_ERROR;
816 queue_complete_req(req);
817 }
818
819 return sizeof(struct raw1394_request);
820}
821
822static int handle_async_send(struct file_info *fi, struct pending_request *req)
823{
824 struct hpsb_packet *packet;
825 int header_length = req->req.misc & 0xffff;
826 int expect_response = req->req.misc >> 16;
827
828 if ((header_length > req->req.length) || (header_length < 12)) {
829 req->req.error = RAW1394_ERROR_INVALID_ARG;
830 req->req.length = 0;
831 queue_complete_req(req);
832 return sizeof(struct raw1394_request);
833 }
834
835 packet = hpsb_alloc_packet(req->req.length - header_length);
836 req->packet = packet;
837 if (!packet)
838 return -ENOMEM;
839
840 if (copy_from_user(packet->header, int2ptr(req->req.sendb),
841 header_length)) {
842 req->req.error = RAW1394_ERROR_MEMFAULT;
843 req->req.length = 0;
844 queue_complete_req(req);
845 return sizeof(struct raw1394_request);
846 }
847
848 if (copy_from_user
849 (packet->data, int2ptr(req->req.sendb) + header_length,
850 packet->data_size)) {
851 req->req.error = RAW1394_ERROR_MEMFAULT;
852 req->req.length = 0;
853 queue_complete_req(req);
854 return sizeof(struct raw1394_request);
855 }
856
857 packet->type = hpsb_async;
858 packet->node_id = packet->header[0] >> 16;
859 packet->tcode = (packet->header[0] >> 4) & 0xf;
860 packet->tlabel = (packet->header[0] >> 10) & 0x3f;
861 packet->host = fi->host;
862 packet->expect_response = expect_response;
863 packet->header_size = header_length;
864 packet->data_size = req->req.length - header_length;
865
866 req->req.length = 0;
867 hpsb_set_packet_complete_task(packet,
868 (void (*)(void *))queue_complete_cb, req);
869
870 spin_lock_irq(&fi->reqlists_lock);
871 list_add_tail(&req->list, &fi->req_pending);
872 spin_unlock_irq(&fi->reqlists_lock);
873
874 /* Update the generation of the packet just before sending. */
875 packet->generation = req->req.generation;
876
877 if (hpsb_send_packet(packet) < 0) {
878 req->req.error = RAW1394_ERROR_SEND_ERROR;
879 queue_complete_req(req);
880 }
881
882 return sizeof(struct raw1394_request);
883}
884
885static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
886 u64 addr, size_t length, u16 flags)
887{
888 struct pending_request *req;
889 struct host_info *hi;
890 struct file_info *fi = NULL;
891 struct list_head *entry;
892 struct arm_addr *arm_addr = NULL;
893 struct arm_request *arm_req = NULL;
894 struct arm_response *arm_resp = NULL;
895 int found = 0, size = 0, rcode = -1;
896 struct arm_request_response *arm_req_resp = NULL;
897
898 DBGMSG("arm_read called by node: %X"
899 "addr: %4.4x %8.8x length: %Zu", nodeid,
900 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
901 length);
902 spin_lock(&host_info_lock);
903 hi = find_host_info(host); /* search address-entry */
904 if (hi != NULL) {
905 list_for_each_entry(fi, &hi->file_info_list, list) {
906 entry = fi->addr_list.next;
907 while (entry != &(fi->addr_list)) {
908 arm_addr =
909 list_entry(entry, struct arm_addr,
910 addr_list);
911 if (((arm_addr->start) <= (addr))
912 && ((arm_addr->end) >= (addr + length))) {
913 found = 1;
914 break;
915 }
916 entry = entry->next;
917 }
918 if (found) {
919 break;
920 }
921 }
922 }
923 rcode = -1;
924 if (!found) {
925 printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found"
926 " -> rcode_address_error\n");
927 spin_unlock(&host_info_lock);
928 return (RCODE_ADDRESS_ERROR);
929 } else {
930 DBGMSG("arm_read addr_entry FOUND");
931 }
932 if (arm_addr->rec_length < length) {
933 DBGMSG("arm_read blocklength too big -> rcode_data_error");
934 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
935 }
936 if (rcode == -1) {
937 if (arm_addr->access_rights & ARM_READ) {
938 if (!(arm_addr->client_transactions & ARM_READ)) {
939 memcpy(buffer,
940 (arm_addr->addr_space_buffer) + (addr -
941 (arm_addr->
942 start)),
943 length);
944 DBGMSG("arm_read -> (rcode_complete)");
945 rcode = RCODE_COMPLETE;
946 }
947 } else {
948 rcode = RCODE_TYPE_ERROR; /* function not allowed */
949 DBGMSG("arm_read -> rcode_type_error (access denied)");
950 }
951 }
952 if (arm_addr->notification_options & ARM_READ) {
953 DBGMSG("arm_read -> entering notification-section");
954 req = __alloc_pending_request(SLAB_ATOMIC);
955 if (!req) {
956 DBGMSG("arm_read -> rcode_conflict_error");
957 spin_unlock(&host_info_lock);
958 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
959 The request may be retried */
960 }
961 if (rcode == RCODE_COMPLETE) {
962 size =
963 sizeof(struct arm_request) +
964 sizeof(struct arm_response) +
965 length * sizeof(byte_t) +
966 sizeof(struct arm_request_response);
967 } else {
968 size =
969 sizeof(struct arm_request) +
970 sizeof(struct arm_response) +
971 sizeof(struct arm_request_response);
972 }
973 req->data = kmalloc(size, SLAB_ATOMIC);
974 if (!(req->data)) {
975 free_pending_request(req);
976 DBGMSG("arm_read -> rcode_conflict_error");
977 spin_unlock(&host_info_lock);
978 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
979 The request may be retried */
980 }
981 req->free_data = 1;
982 req->file_info = fi;
983 req->req.type = RAW1394_REQ_ARM;
984 req->req.generation = get_hpsb_generation(host);
985 req->req.misc =
986 (((length << 16) & (0xFFFF0000)) | (ARM_READ & 0xFF));
987 req->req.tag = arm_addr->arm_tag;
988 req->req.recvb = arm_addr->recvb;
989 req->req.length = size;
990 arm_req_resp = (struct arm_request_response *)(req->data);
991 arm_req = (struct arm_request *)((byte_t *) (req->data) +
992 (sizeof
993 (struct
994 arm_request_response)));
995 arm_resp =
996 (struct arm_response *)((byte_t *) (arm_req) +
997 (sizeof(struct arm_request)));
998 arm_req->buffer = NULL;
999 arm_resp->buffer = NULL;
1000 if (rcode == RCODE_COMPLETE) {
1001 byte_t *buf =
1002 (byte_t *) arm_resp + sizeof(struct arm_response);
1003 memcpy(buf,
1004 (arm_addr->addr_space_buffer) + (addr -
1005 (arm_addr->
1006 start)),
1007 length);
1008 arm_resp->buffer =
1009 int2ptr((arm_addr->recvb) +
1010 sizeof(struct arm_request_response) +
1011 sizeof(struct arm_request) +
1012 sizeof(struct arm_response));
1013 }
1014 arm_resp->buffer_length =
1015 (rcode == RCODE_COMPLETE) ? length : 0;
1016 arm_resp->response_code = rcode;
1017 arm_req->buffer_length = 0;
1018 arm_req->generation = req->req.generation;
1019 arm_req->extended_transaction_code = 0;
1020 arm_req->destination_offset = addr;
1021 arm_req->source_nodeid = nodeid;
1022 arm_req->destination_nodeid = host->node_id;
1023 arm_req->tlabel = (flags >> 10) & 0x3f;
1024 arm_req->tcode = (flags >> 4) & 0x0f;
1025 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1026 sizeof(struct
1027 arm_request_response));
1028 arm_req_resp->response =
1029 int2ptr((arm_addr->recvb) +
1030 sizeof(struct arm_request_response) +
1031 sizeof(struct arm_request));
1032 queue_complete_req(req);
1033 }
1034 spin_unlock(&host_info_lock);
1035 return (rcode);
1036}
1037
1038static int arm_write(struct hpsb_host *host, int nodeid, int destid,
1039 quadlet_t * data, u64 addr, size_t length, u16 flags)
1040{
1041 struct pending_request *req;
1042 struct host_info *hi;
1043 struct file_info *fi = NULL;
1044 struct list_head *entry;
1045 struct arm_addr *arm_addr = NULL;
1046 struct arm_request *arm_req = NULL;
1047 struct arm_response *arm_resp = NULL;
1048 int found = 0, size = 0, rcode = -1, length_conflict = 0;
1049 struct arm_request_response *arm_req_resp = NULL;
1050
1051 DBGMSG("arm_write called by node: %X"
1052 "addr: %4.4x %8.8x length: %Zu", nodeid,
1053 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
1054 length);
1055 spin_lock(&host_info_lock);
1056 hi = find_host_info(host); /* search address-entry */
1057 if (hi != NULL) {
1058 list_for_each_entry(fi, &hi->file_info_list, list) {
1059 entry = fi->addr_list.next;
1060 while (entry != &(fi->addr_list)) {
1061 arm_addr =
1062 list_entry(entry, struct arm_addr,
1063 addr_list);
1064 if (((arm_addr->start) <= (addr))
1065 && ((arm_addr->end) >= (addr + length))) {
1066 found = 1;
1067 break;
1068 }
1069 entry = entry->next;
1070 }
1071 if (found) {
1072 break;
1073 }
1074 }
1075 }
1076 rcode = -1;
1077 if (!found) {
1078 printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found"
1079 " -> rcode_address_error\n");
1080 spin_unlock(&host_info_lock);
1081 return (RCODE_ADDRESS_ERROR);
1082 } else {
1083 DBGMSG("arm_write addr_entry FOUND");
1084 }
1085 if (arm_addr->rec_length < length) {
1086 DBGMSG("arm_write blocklength too big -> rcode_data_error");
1087 length_conflict = 1;
1088 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
1089 }
1090 if (rcode == -1) {
1091 if (arm_addr->access_rights & ARM_WRITE) {
1092 if (!(arm_addr->client_transactions & ARM_WRITE)) {
1093 memcpy((arm_addr->addr_space_buffer) +
1094 (addr - (arm_addr->start)), data,
1095 length);
1096 DBGMSG("arm_write -> (rcode_complete)");
1097 rcode = RCODE_COMPLETE;
1098 }
1099 } else {
1100 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1101 DBGMSG("arm_write -> rcode_type_error (access denied)");
1102 }
1103 }
1104 if (arm_addr->notification_options & ARM_WRITE) {
1105 DBGMSG("arm_write -> entering notification-section");
1106 req = __alloc_pending_request(SLAB_ATOMIC);
1107 if (!req) {
1108 DBGMSG("arm_write -> rcode_conflict_error");
1109 spin_unlock(&host_info_lock);
1110 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1111 The request my be retried */
1112 }
1113 size =
1114 sizeof(struct arm_request) + sizeof(struct arm_response) +
1115 (length) * sizeof(byte_t) +
1116 sizeof(struct arm_request_response);
1117 req->data = kmalloc(size, SLAB_ATOMIC);
1118 if (!(req->data)) {
1119 free_pending_request(req);
1120 DBGMSG("arm_write -> rcode_conflict_error");
1121 spin_unlock(&host_info_lock);
1122 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1123 The request may be retried */
1124 }
1125 req->free_data = 1;
1126 req->file_info = fi;
1127 req->req.type = RAW1394_REQ_ARM;
1128 req->req.generation = get_hpsb_generation(host);
1129 req->req.misc =
1130 (((length << 16) & (0xFFFF0000)) | (ARM_WRITE & 0xFF));
1131 req->req.tag = arm_addr->arm_tag;
1132 req->req.recvb = arm_addr->recvb;
1133 req->req.length = size;
1134 arm_req_resp = (struct arm_request_response *)(req->data);
1135 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1136 (sizeof
1137 (struct
1138 arm_request_response)));
1139 arm_resp =
1140 (struct arm_response *)((byte_t *) (arm_req) +
1141 (sizeof(struct arm_request)));
1142 arm_resp->buffer = NULL;
1143 memcpy((byte_t *) arm_resp + sizeof(struct arm_response),
1144 data, length);
1145 arm_req->buffer = int2ptr((arm_addr->recvb) +
1146 sizeof(struct arm_request_response) +
1147 sizeof(struct arm_request) +
1148 sizeof(struct arm_response));
1149 arm_req->buffer_length = length;
1150 arm_req->generation = req->req.generation;
1151 arm_req->extended_transaction_code = 0;
1152 arm_req->destination_offset = addr;
1153 arm_req->source_nodeid = nodeid;
1154 arm_req->destination_nodeid = destid;
1155 arm_req->tlabel = (flags >> 10) & 0x3f;
1156 arm_req->tcode = (flags >> 4) & 0x0f;
1157 arm_resp->buffer_length = 0;
1158 arm_resp->response_code = rcode;
1159 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1160 sizeof(struct
1161 arm_request_response));
1162 arm_req_resp->response =
1163 int2ptr((arm_addr->recvb) +
1164 sizeof(struct arm_request_response) +
1165 sizeof(struct arm_request));
1166 queue_complete_req(req);
1167 }
1168 spin_unlock(&host_info_lock);
1169 return (rcode);
1170}
1171
1172static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
1173 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
1174 u16 flags)
1175{
1176 struct pending_request *req;
1177 struct host_info *hi;
1178 struct file_info *fi = NULL;
1179 struct list_head *entry;
1180 struct arm_addr *arm_addr = NULL;
1181 struct arm_request *arm_req = NULL;
1182 struct arm_response *arm_resp = NULL;
1183 int found = 0, size = 0, rcode = -1;
1184 quadlet_t old, new;
1185 struct arm_request_response *arm_req_resp = NULL;
1186
1187 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1188 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1189 DBGMSG("arm_lock called by node: %X "
1190 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
1191 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1192 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1193 be32_to_cpu(data));
1194 } else {
1195 DBGMSG("arm_lock called by node: %X "
1196 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
1197 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1198 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1199 be32_to_cpu(data), be32_to_cpu(arg));
1200 }
1201 spin_lock(&host_info_lock);
1202 hi = find_host_info(host); /* search address-entry */
1203 if (hi != NULL) {
1204 list_for_each_entry(fi, &hi->file_info_list, list) {
1205 entry = fi->addr_list.next;
1206 while (entry != &(fi->addr_list)) {
1207 arm_addr =
1208 list_entry(entry, struct arm_addr,
1209 addr_list);
1210 if (((arm_addr->start) <= (addr))
1211 && ((arm_addr->end) >=
1212 (addr + sizeof(*store)))) {
1213 found = 1;
1214 break;
1215 }
1216 entry = entry->next;
1217 }
1218 if (found) {
1219 break;
1220 }
1221 }
1222 }
1223 rcode = -1;
1224 if (!found) {
1225 printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found"
1226 " -> rcode_address_error\n");
1227 spin_unlock(&host_info_lock);
1228 return (RCODE_ADDRESS_ERROR);
1229 } else {
1230 DBGMSG("arm_lock addr_entry FOUND");
1231 }
1232 if (rcode == -1) {
1233 if (arm_addr->access_rights & ARM_LOCK) {
1234 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1235 memcpy(&old,
1236 (arm_addr->addr_space_buffer) + (addr -
1237 (arm_addr->
1238 start)),
1239 sizeof(old));
1240 switch (ext_tcode) {
1241 case (EXTCODE_MASK_SWAP):
1242 new = data | (old & ~arg);
1243 break;
1244 case (EXTCODE_COMPARE_SWAP):
1245 if (old == arg) {
1246 new = data;
1247 } else {
1248 new = old;
1249 }
1250 break;
1251 case (EXTCODE_FETCH_ADD):
1252 new =
1253 cpu_to_be32(be32_to_cpu(data) +
1254 be32_to_cpu(old));
1255 break;
1256 case (EXTCODE_LITTLE_ADD):
1257 new =
1258 cpu_to_le32(le32_to_cpu(data) +
1259 le32_to_cpu(old));
1260 break;
1261 case (EXTCODE_BOUNDED_ADD):
1262 if (old != arg) {
1263 new =
1264 cpu_to_be32(be32_to_cpu
1265 (data) +
1266 be32_to_cpu
1267 (old));
1268 } else {
1269 new = old;
1270 }
1271 break;
1272 case (EXTCODE_WRAP_ADD):
1273 if (old != arg) {
1274 new =
1275 cpu_to_be32(be32_to_cpu
1276 (data) +
1277 be32_to_cpu
1278 (old));
1279 } else {
1280 new = data;
1281 }
1282 break;
1283 default:
1284 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1285 printk(KERN_ERR
1286 "raw1394: arm_lock FAILED "
1287 "ext_tcode not allowed -> rcode_type_error\n");
1288 break;
1289 } /*switch */
1290 if (rcode == -1) {
1291 DBGMSG("arm_lock -> (rcode_complete)");
1292 rcode = RCODE_COMPLETE;
1293 memcpy(store, &old, sizeof(*store));
1294 memcpy((arm_addr->addr_space_buffer) +
1295 (addr - (arm_addr->start)),
1296 &new, sizeof(*store));
1297 }
1298 }
1299 } else {
1300 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1301 DBGMSG("arm_lock -> rcode_type_error (access denied)");
1302 }
1303 }
1304 if (arm_addr->notification_options & ARM_LOCK) {
1305 byte_t *buf1, *buf2;
1306 DBGMSG("arm_lock -> entering notification-section");
1307 req = __alloc_pending_request(SLAB_ATOMIC);
1308 if (!req) {
1309 DBGMSG("arm_lock -> rcode_conflict_error");
1310 spin_unlock(&host_info_lock);
1311 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1312 The request may be retried */
1313 }
1314 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1315 req->data = kmalloc(size, SLAB_ATOMIC);
1316 if (!(req->data)) {
1317 free_pending_request(req);
1318 DBGMSG("arm_lock -> rcode_conflict_error");
1319 spin_unlock(&host_info_lock);
1320 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1321 The request may be retried */
1322 }
1323 req->free_data = 1;
1324 arm_req_resp = (struct arm_request_response *)(req->data);
1325 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1326 (sizeof
1327 (struct
1328 arm_request_response)));
1329 arm_resp =
1330 (struct arm_response *)((byte_t *) (arm_req) +
1331 (sizeof(struct arm_request)));
1332 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1333 buf2 = buf1 + 2 * sizeof(*store);
1334 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1335 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1336 arm_req->buffer_length = sizeof(*store);
1337 memcpy(buf1, &data, sizeof(*store));
1338
1339 } else {
1340 arm_req->buffer_length = 2 * sizeof(*store);
1341 memcpy(buf1, &arg, sizeof(*store));
1342 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1343 }
1344 if (rcode == RCODE_COMPLETE) {
1345 arm_resp->buffer_length = sizeof(*store);
1346 memcpy(buf2, &old, sizeof(*store));
1347 } else {
1348 arm_resp->buffer_length = 0;
1349 }
1350 req->file_info = fi;
1351 req->req.type = RAW1394_REQ_ARM;
1352 req->req.generation = get_hpsb_generation(host);
1353 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1354 (ARM_LOCK & 0xFF));
1355 req->req.tag = arm_addr->arm_tag;
1356 req->req.recvb = arm_addr->recvb;
1357 req->req.length = size;
1358 arm_req->generation = req->req.generation;
1359 arm_req->extended_transaction_code = ext_tcode;
1360 arm_req->destination_offset = addr;
1361 arm_req->source_nodeid = nodeid;
1362 arm_req->destination_nodeid = host->node_id;
1363 arm_req->tlabel = (flags >> 10) & 0x3f;
1364 arm_req->tcode = (flags >> 4) & 0x0f;
1365 arm_resp->response_code = rcode;
1366 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1367 sizeof(struct
1368 arm_request_response));
1369 arm_req_resp->response =
1370 int2ptr((arm_addr->recvb) +
1371 sizeof(struct arm_request_response) +
1372 sizeof(struct arm_request));
1373 arm_req->buffer =
1374 int2ptr((arm_addr->recvb) +
1375 sizeof(struct arm_request_response) +
1376 sizeof(struct arm_request) +
1377 sizeof(struct arm_response));
1378 arm_resp->buffer =
1379 int2ptr((arm_addr->recvb) +
1380 sizeof(struct arm_request_response) +
1381 sizeof(struct arm_request) +
1382 sizeof(struct arm_response) + 2 * sizeof(*store));
1383 queue_complete_req(req);
1384 }
1385 spin_unlock(&host_info_lock);
1386 return (rcode);
1387}
1388
1389static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
1390 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
1391 u16 flags)
1392{
1393 struct pending_request *req;
1394 struct host_info *hi;
1395 struct file_info *fi = NULL;
1396 struct list_head *entry;
1397 struct arm_addr *arm_addr = NULL;
1398 struct arm_request *arm_req = NULL;
1399 struct arm_response *arm_resp = NULL;
1400 int found = 0, size = 0, rcode = -1;
1401 octlet_t old, new;
1402 struct arm_request_response *arm_req_resp = NULL;
1403
1404 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1405 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1406 DBGMSG("arm_lock64 called by node: %X "
1407 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ",
1408 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1409 (u32) (addr & 0xFFFFFFFF),
1410 ext_tcode & 0xFF,
1411 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1412 (u32) (be64_to_cpu(data) & 0xFFFFFFFF));
1413 } else {
1414 DBGMSG("arm_lock64 called by node: %X "
1415 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: "
1416 "%8.8X %8.8X ",
1417 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1418 (u32) (addr & 0xFFFFFFFF),
1419 ext_tcode & 0xFF,
1420 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1421 (u32) (be64_to_cpu(data) & 0xFFFFFFFF),
1422 (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
1423 (u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
1424 }
1425 spin_lock(&host_info_lock);
1426 hi = find_host_info(host); /* search addressentry in file_info's for host */
1427 if (hi != NULL) {
1428 list_for_each_entry(fi, &hi->file_info_list, list) {
1429 entry = fi->addr_list.next;
1430 while (entry != &(fi->addr_list)) {
1431 arm_addr =
1432 list_entry(entry, struct arm_addr,
1433 addr_list);
1434 if (((arm_addr->start) <= (addr))
1435 && ((arm_addr->end) >=
1436 (addr + sizeof(*store)))) {
1437 found = 1;
1438 break;
1439 }
1440 entry = entry->next;
1441 }
1442 if (found) {
1443 break;
1444 }
1445 }
1446 }
1447 rcode = -1;
1448 if (!found) {
1449 printk(KERN_ERR
1450 "raw1394: arm_lock64 FAILED addr_entry not found"
1451 " -> rcode_address_error\n");
1452 spin_unlock(&host_info_lock);
1453 return (RCODE_ADDRESS_ERROR);
1454 } else {
1455 DBGMSG("arm_lock64 addr_entry FOUND");
1456 }
1457 if (rcode == -1) {
1458 if (arm_addr->access_rights & ARM_LOCK) {
1459 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1460 memcpy(&old,
1461 (arm_addr->addr_space_buffer) + (addr -
1462 (arm_addr->
1463 start)),
1464 sizeof(old));
1465 switch (ext_tcode) {
1466 case (EXTCODE_MASK_SWAP):
1467 new = data | (old & ~arg);
1468 break;
1469 case (EXTCODE_COMPARE_SWAP):
1470 if (old == arg) {
1471 new = data;
1472 } else {
1473 new = old;
1474 }
1475 break;
1476 case (EXTCODE_FETCH_ADD):
1477 new =
1478 cpu_to_be64(be64_to_cpu(data) +
1479 be64_to_cpu(old));
1480 break;
1481 case (EXTCODE_LITTLE_ADD):
1482 new =
1483 cpu_to_le64(le64_to_cpu(data) +
1484 le64_to_cpu(old));
1485 break;
1486 case (EXTCODE_BOUNDED_ADD):
1487 if (old != arg) {
1488 new =
1489 cpu_to_be64(be64_to_cpu
1490 (data) +
1491 be64_to_cpu
1492 (old));
1493 } else {
1494 new = old;
1495 }
1496 break;
1497 case (EXTCODE_WRAP_ADD):
1498 if (old != arg) {
1499 new =
1500 cpu_to_be64(be64_to_cpu
1501 (data) +
1502 be64_to_cpu
1503 (old));
1504 } else {
1505 new = data;
1506 }
1507 break;
1508 default:
1509 printk(KERN_ERR
1510 "raw1394: arm_lock64 FAILED "
1511 "ext_tcode not allowed -> rcode_type_error\n");
1512 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1513 break;
1514 } /*switch */
1515 if (rcode == -1) {
1516 DBGMSG
1517 ("arm_lock64 -> (rcode_complete)");
1518 rcode = RCODE_COMPLETE;
1519 memcpy(store, &old, sizeof(*store));
1520 memcpy((arm_addr->addr_space_buffer) +
1521 (addr - (arm_addr->start)),
1522 &new, sizeof(*store));
1523 }
1524 }
1525 } else {
1526 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1527 DBGMSG
1528 ("arm_lock64 -> rcode_type_error (access denied)");
1529 }
1530 }
1531 if (arm_addr->notification_options & ARM_LOCK) {
1532 byte_t *buf1, *buf2;
1533 DBGMSG("arm_lock64 -> entering notification-section");
1534 req = __alloc_pending_request(SLAB_ATOMIC);
1535 if (!req) {
1536 spin_unlock(&host_info_lock);
1537 DBGMSG("arm_lock64 -> rcode_conflict_error");
1538 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1539 The request may be retried */
1540 }
1541 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1542 req->data = kmalloc(size, SLAB_ATOMIC);
1543 if (!(req->data)) {
1544 free_pending_request(req);
1545 spin_unlock(&host_info_lock);
1546 DBGMSG("arm_lock64 -> rcode_conflict_error");
1547 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1548 The request may be retried */
1549 }
1550 req->free_data = 1;
1551 arm_req_resp = (struct arm_request_response *)(req->data);
1552 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1553 (sizeof
1554 (struct
1555 arm_request_response)));
1556 arm_resp =
1557 (struct arm_response *)((byte_t *) (arm_req) +
1558 (sizeof(struct arm_request)));
1559 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1560 buf2 = buf1 + 2 * sizeof(*store);
1561 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1562 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1563 arm_req->buffer_length = sizeof(*store);
1564 memcpy(buf1, &data, sizeof(*store));
1565
1566 } else {
1567 arm_req->buffer_length = 2 * sizeof(*store);
1568 memcpy(buf1, &arg, sizeof(*store));
1569 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1570 }
1571 if (rcode == RCODE_COMPLETE) {
1572 arm_resp->buffer_length = sizeof(*store);
1573 memcpy(buf2, &old, sizeof(*store));
1574 } else {
1575 arm_resp->buffer_length = 0;
1576 }
1577 req->file_info = fi;
1578 req->req.type = RAW1394_REQ_ARM;
1579 req->req.generation = get_hpsb_generation(host);
1580 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1581 (ARM_LOCK & 0xFF));
1582 req->req.tag = arm_addr->arm_tag;
1583 req->req.recvb = arm_addr->recvb;
1584 req->req.length = size;
1585 arm_req->generation = req->req.generation;
1586 arm_req->extended_transaction_code = ext_tcode;
1587 arm_req->destination_offset = addr;
1588 arm_req->source_nodeid = nodeid;
1589 arm_req->destination_nodeid = host->node_id;
1590 arm_req->tlabel = (flags >> 10) & 0x3f;
1591 arm_req->tcode = (flags >> 4) & 0x0f;
1592 arm_resp->response_code = rcode;
1593 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1594 sizeof(struct
1595 arm_request_response));
1596 arm_req_resp->response =
1597 int2ptr((arm_addr->recvb) +
1598 sizeof(struct arm_request_response) +
1599 sizeof(struct arm_request));
1600 arm_req->buffer =
1601 int2ptr((arm_addr->recvb) +
1602 sizeof(struct arm_request_response) +
1603 sizeof(struct arm_request) +
1604 sizeof(struct arm_response));
1605 arm_resp->buffer =
1606 int2ptr((arm_addr->recvb) +
1607 sizeof(struct arm_request_response) +
1608 sizeof(struct arm_request) +
1609 sizeof(struct arm_response) + 2 * sizeof(*store));
1610 queue_complete_req(req);
1611 }
1612 spin_unlock(&host_info_lock);
1613 return (rcode);
1614}
1615
1616static int arm_register(struct file_info *fi, struct pending_request *req)
1617{
1618 int retval;
1619 struct arm_addr *addr;
1620 struct host_info *hi;
1621 struct file_info *fi_hlp = NULL;
1622 struct list_head *entry;
1623 struct arm_addr *arm_addr = NULL;
1624 int same_host, another_host;
1625 unsigned long flags;
1626
1627 DBGMSG("arm_register called "
1628 "addr(Offset): %8.8x %8.8x length: %u "
1629 "rights: %2.2X notify: %2.2X "
1630 "max_blk_len: %4.4X",
1631 (u32) ((req->req.address >> 32) & 0xFFFF),
1632 (u32) (req->req.address & 0xFFFFFFFF),
1633 req->req.length, ((req->req.misc >> 8) & 0xFF),
1634 (req->req.misc & 0xFF), ((req->req.misc >> 16) & 0xFFFF));
1635 /* check addressrange */
1636 if ((((req->req.address) & ~(0xFFFFFFFFFFFFULL)) != 0) ||
1637 (((req->req.address + req->req.length) & ~(0xFFFFFFFFFFFFULL)) !=
1638 0)) {
1639 req->req.length = 0;
1640 return (-EINVAL);
1641 }
1642 /* addr-list-entry for fileinfo */
1643 addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL);
1644 if (!addr) {
1645 req->req.length = 0;
1646 return (-ENOMEM);
1647 }
1648 /* allocation of addr_space_buffer */
1649 addr->addr_space_buffer = (u8 *) vmalloc(req->req.length);
1650 if (!(addr->addr_space_buffer)) {
1651 kfree(addr);
1652 req->req.length = 0;
1653 return (-ENOMEM);
1654 }
1655 /* initialization of addr_space_buffer */
1656 if ((req->req.sendb) == (unsigned long)NULL) {
1657 /* init: set 0 */
1658 memset(addr->addr_space_buffer, 0, req->req.length);
1659 } else {
1660 /* init: user -> kernel */
1661 if (copy_from_user
1662 (addr->addr_space_buffer, int2ptr(req->req.sendb),
1663 req->req.length)) {
1664 vfree(addr->addr_space_buffer);
1665 kfree(addr);
1666 return (-EFAULT);
1667 }
1668 }
1669 INIT_LIST_HEAD(&addr->addr_list);
1670 addr->arm_tag = req->req.tag;
1671 addr->start = req->req.address;
1672 addr->end = req->req.address + req->req.length;
1673 addr->access_rights = (u8) (req->req.misc & 0x0F);
1674 addr->notification_options = (u8) ((req->req.misc >> 4) & 0x0F);
1675 addr->client_transactions = (u8) ((req->req.misc >> 8) & 0x0F);
1676 addr->access_rights |= addr->client_transactions;
1677 addr->notification_options |= addr->client_transactions;
1678 addr->recvb = req->req.recvb;
1679 addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF);
1680 spin_lock_irqsave(&host_info_lock, flags);
1681 hi = find_host_info(fi->host);
1682 same_host = 0;
1683 another_host = 0;
1684 /* same host with address-entry containing same addressrange ? */
1685 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1686 entry = fi_hlp->addr_list.next;
1687 while (entry != &(fi_hlp->addr_list)) {
1688 arm_addr =
1689 list_entry(entry, struct arm_addr, addr_list);
1690 if ((arm_addr->start == addr->start)
1691 && (arm_addr->end == addr->end)) {
1692 DBGMSG("same host ownes same "
1693 "addressrange -> EALREADY");
1694 same_host = 1;
1695 break;
1696 }
1697 entry = entry->next;
1698 }
1699 if (same_host) {
1700 break;
1701 }
1702 }
1703 if (same_host) {
1704 /* addressrange occupied by same host */
1705 vfree(addr->addr_space_buffer);
1706 kfree(addr);
1707 spin_unlock_irqrestore(&host_info_lock, flags);
1708 return (-EALREADY);
1709 }
1710 /* another host with valid address-entry containing same addressrange */
1711 list_for_each_entry(hi, &host_info_list, list) {
1712 if (hi->host != fi->host) {
1713 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1714 entry = fi_hlp->addr_list.next;
1715 while (entry != &(fi_hlp->addr_list)) {
1716 arm_addr =
1717 list_entry(entry, struct arm_addr,
1718 addr_list);
1719 if ((arm_addr->start == addr->start)
1720 && (arm_addr->end == addr->end)) {
1721 DBGMSG
1722 ("another host ownes same "
1723 "addressrange");
1724 another_host = 1;
1725 break;
1726 }
1727 entry = entry->next;
1728 }
1729 if (another_host) {
1730 break;
1731 }
1732 }
1733 }
1734 }
1735 if (another_host) {
1736 DBGMSG("another hosts entry is valid -> SUCCESS");
1737 if (copy_to_user(int2ptr(req->req.recvb),
1738 &addr->start, sizeof(u64))) {
1739 printk(KERN_ERR "raw1394: arm_register failed "
1740 " address-range-entry is invalid -> EFAULT !!!\n");
1741 vfree(addr->addr_space_buffer);
1742 kfree(addr);
1743 spin_unlock_irqrestore(&host_info_lock, flags);
1744 return (-EFAULT);
1745 }
1746 free_pending_request(req); /* immediate success or fail */
1747 /* INSERT ENTRY */
1748 list_add_tail(&addr->addr_list, &fi->addr_list);
1749 spin_unlock_irqrestore(&host_info_lock, flags);
1750 return sizeof(struct raw1394_request);
1751 }
1752 retval =
1753 hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops,
1754 req->req.address,
1755 req->req.address + req->req.length);
1756 if (retval) {
1757 /* INSERT ENTRY */
1758 list_add_tail(&addr->addr_list, &fi->addr_list);
1759 } else {
1760 DBGMSG("arm_register failed errno: %d \n", retval);
1761 vfree(addr->addr_space_buffer);
1762 kfree(addr);
1763 spin_unlock_irqrestore(&host_info_lock, flags);
1764 return (-EALREADY);
1765 }
1766 spin_unlock_irqrestore(&host_info_lock, flags);
1767 free_pending_request(req); /* immediate success or fail */
1768 return sizeof(struct raw1394_request);
1769}
1770
1771static int arm_unregister(struct file_info *fi, struct pending_request *req)
1772{
1773 int found = 0;
1774 int retval = 0;
1775 struct list_head *entry;
1776 struct arm_addr *addr = NULL;
1777 struct host_info *hi;
1778 struct file_info *fi_hlp = NULL;
1779 struct arm_addr *arm_addr = NULL;
1780 int another_host;
1781 unsigned long flags;
1782
1783 DBGMSG("arm_Unregister called addr(Offset): "
1784 "%8.8x %8.8x",
1785 (u32) ((req->req.address >> 32) & 0xFFFF),
1786 (u32) (req->req.address & 0xFFFFFFFF));
1787 spin_lock_irqsave(&host_info_lock, flags);
1788 /* get addr */
1789 entry = fi->addr_list.next;
1790 while (entry != &(fi->addr_list)) {
1791 addr = list_entry(entry, struct arm_addr, addr_list);
1792 if (addr->start == req->req.address) {
1793 found = 1;
1794 break;
1795 }
1796 entry = entry->next;
1797 }
1798 if (!found) {
1799 DBGMSG("arm_Unregister addr not found");
1800 spin_unlock_irqrestore(&host_info_lock, flags);
1801 return (-EINVAL);
1802 }
1803 DBGMSG("arm_Unregister addr found");
1804 another_host = 0;
1805 /* another host with valid address-entry containing
1806 same addressrange */
1807 list_for_each_entry(hi, &host_info_list, list) {
1808 if (hi->host != fi->host) {
1809 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1810 entry = fi_hlp->addr_list.next;
1811 while (entry != &(fi_hlp->addr_list)) {
1812 arm_addr = list_entry(entry,
1813 struct arm_addr,
1814 addr_list);
1815 if (arm_addr->start == addr->start) {
1816 DBGMSG("another host ownes "
1817 "same addressrange");
1818 another_host = 1;
1819 break;
1820 }
1821 entry = entry->next;
1822 }
1823 if (another_host) {
1824 break;
1825 }
1826 }
1827 }
1828 }
1829 if (another_host) {
1830 DBGMSG("delete entry from list -> success");
1831 list_del(&addr->addr_list);
1832 vfree(addr->addr_space_buffer);
1833 kfree(addr);
1834 free_pending_request(req); /* immediate success or fail */
1835 spin_unlock_irqrestore(&host_info_lock, flags);
1836 return sizeof(struct raw1394_request);
1837 }
1838 retval =
1839 hpsb_unregister_addrspace(&raw1394_highlevel, fi->host,
1840 addr->start);
1841 if (!retval) {
1842 printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n");
1843 spin_unlock_irqrestore(&host_info_lock, flags);
1844 return (-EINVAL);
1845 }
1846 DBGMSG("delete entry from list -> success");
1847 list_del(&addr->addr_list);
1848 spin_unlock_irqrestore(&host_info_lock, flags);
1849 vfree(addr->addr_space_buffer);
1850 kfree(addr);
1851 free_pending_request(req); /* immediate success or fail */
1852 return sizeof(struct raw1394_request);
1853}
1854
1855/* Copy data from ARM buffer(s) to user buffer. */
1856static int arm_get_buf(struct file_info *fi, struct pending_request *req)
1857{
1858 struct arm_addr *arm_addr = NULL;
1859 unsigned long flags;
1860 unsigned long offset;
1861
1862 struct list_head *entry;
1863
1864 DBGMSG("arm_get_buf "
1865 "addr(Offset): %04X %08X length: %u",
1866 (u32) ((req->req.address >> 32) & 0xFFFF),
1867 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
1868
1869 spin_lock_irqsave(&host_info_lock, flags);
1870 entry = fi->addr_list.next;
1871 while (entry != &(fi->addr_list)) {
1872 arm_addr = list_entry(entry, struct arm_addr, addr_list);
1873 if ((arm_addr->start <= req->req.address) &&
1874 (arm_addr->end > req->req.address)) {
1875 if (req->req.address + req->req.length <= arm_addr->end) {
1876 offset = req->req.address - arm_addr->start;
1877
1878 DBGMSG
1879 ("arm_get_buf copy_to_user( %08X, %p, %u )",
1880 (u32) req->req.recvb,
1881 arm_addr->addr_space_buffer + offset,
1882 (u32) req->req.length);
1883
1884 if (copy_to_user
1885 (int2ptr(req->req.recvb),
1886 arm_addr->addr_space_buffer + offset,
1887 req->req.length)) {
1888 spin_unlock_irqrestore(&host_info_lock,
1889 flags);
1890 return (-EFAULT);
1891 }
1892
1893 spin_unlock_irqrestore(&host_info_lock, flags);
1894 /* We have to free the request, because we
1895 * queue no response, and therefore nobody
1896 * will free it. */
1897 free_pending_request(req);
1898 return sizeof(struct raw1394_request);
1899 } else {
1900 DBGMSG("arm_get_buf request exceeded mapping");
1901 spin_unlock_irqrestore(&host_info_lock, flags);
1902 return (-EINVAL);
1903 }
1904 }
1905 entry = entry->next;
1906 }
1907 spin_unlock_irqrestore(&host_info_lock, flags);
1908 return (-EINVAL);
1909}
1910
1911/* Copy data from user buffer to ARM buffer(s). */
1912static int arm_set_buf(struct file_info *fi, struct pending_request *req)
1913{
1914 struct arm_addr *arm_addr = NULL;
1915 unsigned long flags;
1916 unsigned long offset;
1917
1918 struct list_head *entry;
1919
1920 DBGMSG("arm_set_buf "
1921 "addr(Offset): %04X %08X length: %u",
1922 (u32) ((req->req.address >> 32) & 0xFFFF),
1923 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
1924
1925 spin_lock_irqsave(&host_info_lock, flags);
1926 entry = fi->addr_list.next;
1927 while (entry != &(fi->addr_list)) {
1928 arm_addr = list_entry(entry, struct arm_addr, addr_list);
1929 if ((arm_addr->start <= req->req.address) &&
1930 (arm_addr->end > req->req.address)) {
1931 if (req->req.address + req->req.length <= arm_addr->end) {
1932 offset = req->req.address - arm_addr->start;
1933
1934 DBGMSG
1935 ("arm_set_buf copy_from_user( %p, %08X, %u )",
1936 arm_addr->addr_space_buffer + offset,
1937 (u32) req->req.sendb,
1938 (u32) req->req.length);
1939
1940 if (copy_from_user
1941 (arm_addr->addr_space_buffer + offset,
1942 int2ptr(req->req.sendb),
1943 req->req.length)) {
1944 spin_unlock_irqrestore(&host_info_lock,
1945 flags);
1946 return (-EFAULT);
1947 }
1948
1949 spin_unlock_irqrestore(&host_info_lock, flags);
1950 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
1951 return sizeof(struct raw1394_request);
1952 } else {
1953 DBGMSG("arm_set_buf request exceeded mapping");
1954 spin_unlock_irqrestore(&host_info_lock, flags);
1955 return (-EINVAL);
1956 }
1957 }
1958 entry = entry->next;
1959 }
1960 spin_unlock_irqrestore(&host_info_lock, flags);
1961 return (-EINVAL);
1962}
1963
1964static int reset_notification(struct file_info *fi, struct pending_request *req)
1965{
1966 DBGMSG("reset_notification called - switch %s ",
1967 (req->req.misc == RAW1394_NOTIFY_OFF) ? "OFF" : "ON");
1968 if ((req->req.misc == RAW1394_NOTIFY_OFF) ||
1969 (req->req.misc == RAW1394_NOTIFY_ON)) {
1970 fi->notification = (u8) req->req.misc;
1971 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
1972 return sizeof(struct raw1394_request);
1973 }
1974 /* error EINVAL (22) invalid argument */
1975 return (-EINVAL);
1976}
1977
1978static int write_phypacket(struct file_info *fi, struct pending_request *req)
1979{
1980 struct hpsb_packet *packet = NULL;
1981 int retval = 0;
1982 quadlet_t data;
1983
1984 data = be32_to_cpu((u32) req->req.sendb);
1985 DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data);
1986 packet = hpsb_make_phypacket(fi->host, data);
1987 if (!packet)
1988 return -ENOMEM;
1989 req->req.length = 0;
1990 req->packet = packet;
1991 hpsb_set_packet_complete_task(packet,
1992 (void (*)(void *))queue_complete_cb, req);
1993 spin_lock_irq(&fi->reqlists_lock);
1994 list_add_tail(&req->list, &fi->req_pending);
1995 spin_unlock_irq(&fi->reqlists_lock);
1996 packet->generation = req->req.generation;
1997 retval = hpsb_send_packet(packet);
1998 DBGMSG("write_phypacket send_packet called => retval: %d ", retval);
1999 if (retval < 0) {
2000 req->req.error = RAW1394_ERROR_SEND_ERROR;
2001 req->req.length = 0;
2002 queue_complete_req(req);
2003 }
2004 return sizeof(struct raw1394_request);
2005}
2006
2007static int get_config_rom(struct file_info *fi, struct pending_request *req)
2008{
2009 int ret = sizeof(struct raw1394_request);
2010 quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
2011 int status;
2012
2013 if (!data)
2014 return -ENOMEM;
2015
2016 status =
2017 csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET,
2018 data, req->req.length);
2019 if (copy_to_user(int2ptr(req->req.recvb), data, req->req.length))
2020 ret = -EFAULT;
2021 if (copy_to_user
2022 (int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len,
2023 sizeof(fi->host->csr.rom->cache_head->len)))
2024 ret = -EFAULT;
2025 if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation,
2026 sizeof(fi->host->csr.generation)))
2027 ret = -EFAULT;
2028 if (copy_to_user(int2ptr(req->req.sendb), &status, sizeof(status)))
2029 ret = -EFAULT;
2030 kfree(data);
2031 if (ret >= 0) {
2032 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2033 }
2034 return ret;
2035}
2036
2037static int update_config_rom(struct file_info *fi, struct pending_request *req)
2038{
2039 int ret = sizeof(struct raw1394_request);
2040 quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
2041 if (!data)
2042 return -ENOMEM;
2043 if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
2044 ret = -EFAULT;
2045 } else {
2046 int status = hpsb_update_config_rom(fi->host,
2047 data, req->req.length,
2048 (unsigned char)req->req.
2049 misc);
2050 if (copy_to_user
2051 (int2ptr(req->req.recvb), &status, sizeof(status)))
2052 ret = -ENOMEM;
2053 }
2054 kfree(data);
2055 if (ret >= 0) {
2056 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2057 fi->cfgrom_upd = 1;
2058 }
2059 return ret;
2060}
2061
2062static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2063{
2064 struct csr1212_keyval *kv;
2065 struct csr1212_csr_rom_cache *cache;
2066 struct csr1212_dentry *dentry;
2067 u32 dr;
2068 int ret = 0;
2069
2070 if (req->req.misc == ~0) {
2071 if (req->req.length == 0)
2072 return -EINVAL;
2073
2074 /* Find an unused slot */
2075 for (dr = 0;
2076 dr < RAW1394_MAX_USER_CSR_DIRS && fi->csr1212_dirs[dr];
2077 dr++) ;
2078
2079 if (dr == RAW1394_MAX_USER_CSR_DIRS)
2080 return -ENOMEM;
2081
2082 fi->csr1212_dirs[dr] =
2083 csr1212_new_directory(CSR1212_KV_ID_VENDOR);
2084 if (!fi->csr1212_dirs[dr])
2085 return -ENOMEM;
2086 } else {
2087 dr = req->req.misc;
2088 if (!fi->csr1212_dirs[dr])
2089 return -EINVAL;
2090
2091 /* Delete old stuff */
2092 for (dentry =
2093 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2094 dentry; dentry = dentry->next) {
2095 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2096 root_kv,
2097 dentry->kv);
2098 }
2099
2100 if (req->req.length == 0) {
2101 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2102 fi->csr1212_dirs[dr] = NULL;
2103
2104 hpsb_update_config_rom_image(fi->host);
2105 free_pending_request(req);
2106 return sizeof(struct raw1394_request);
2107 }
2108 }
2109
2110 cache = csr1212_rom_cache_malloc(0, req->req.length);
2111 if (!cache) {
2112 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2113 fi->csr1212_dirs[dr] = NULL;
2114 return -ENOMEM;
2115 }
2116
2117 cache->filled_head =
2118 kmalloc(sizeof(struct csr1212_cache_region), GFP_KERNEL);
2119 if (!cache->filled_head) {
2120 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2121 fi->csr1212_dirs[dr] = NULL;
2122 CSR1212_FREE(cache);
2123 return -ENOMEM;
2124 }
2125 cache->filled_tail = cache->filled_head;
2126
2127 if (copy_from_user(cache->data, int2ptr(req->req.sendb),
2128 req->req.length)) {
2129 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2130 fi->csr1212_dirs[dr] = NULL;
2131 CSR1212_FREE(cache);
2132 ret = -EFAULT;
2133 } else {
2134 cache->len = req->req.length;
2135 cache->filled_head->offset_start = 0;
2136 cache->filled_head->offset_end = cache->size - 1;
2137
2138 cache->layout_head = cache->layout_tail = fi->csr1212_dirs[dr];
2139
2140 ret = CSR1212_SUCCESS;
2141 /* parse all the items */
2142 for (kv = cache->layout_head; ret == CSR1212_SUCCESS && kv;
2143 kv = kv->next) {
2144 ret = csr1212_parse_keyval(kv, cache);
2145 }
2146
2147 /* attach top level items to the root directory */
2148 for (dentry =
2149 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2150 ret == CSR1212_SUCCESS && dentry; dentry = dentry->next) {
2151 ret =
2152 csr1212_attach_keyval_to_directory(fi->host->csr.
2153 rom->root_kv,
2154 dentry->kv);
2155 }
2156
2157 if (ret == CSR1212_SUCCESS) {
2158 ret = hpsb_update_config_rom_image(fi->host);
2159
2160 if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
2161 &dr, sizeof(dr))) {
2162 ret = -ENOMEM;
2163 }
2164 }
2165 }
2166 kfree(cache->filled_head);
2167 kfree(cache);
2168
2169 if (ret >= 0) {
2170 /* we have to free the request, because we queue no response,
2171 * and therefore nobody will free it */
2172 free_pending_request(req);
2173 return sizeof(struct raw1394_request);
2174 } else {
2175 for (dentry =
2176 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2177 dentry; dentry = dentry->next) {
2178 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2179 root_kv,
2180 dentry->kv);
2181 }
2182 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2183 fi->csr1212_dirs[dr] = NULL;
2184 return ret;
2185 }
2186}
2187
2188static int state_connected(struct file_info *fi, struct pending_request *req)
2189{
2190 int node = req->req.address >> 48;
2191
2192 req->req.error = RAW1394_ERROR_NONE;
2193
2194 switch (req->req.type) {
2195
2196 case RAW1394_REQ_ECHO:
2197 queue_complete_req(req);
2198 return sizeof(struct raw1394_request);
2199
2200 case RAW1394_REQ_ISO_SEND:
2201 return handle_iso_send(fi, req, node);
2202
2203 case RAW1394_REQ_ARM_REGISTER:
2204 return arm_register(fi, req);
2205
2206 case RAW1394_REQ_ARM_UNREGISTER:
2207 return arm_unregister(fi, req);
2208
2209 case RAW1394_REQ_ARM_SET_BUF:
2210 return arm_set_buf(fi, req);
2211
2212 case RAW1394_REQ_ARM_GET_BUF:
2213 return arm_get_buf(fi, req);
2214
2215 case RAW1394_REQ_RESET_NOTIFY:
2216 return reset_notification(fi, req);
2217
2218 case RAW1394_REQ_ISO_LISTEN:
2219 handle_iso_listen(fi, req);
2220 return sizeof(struct raw1394_request);
2221
2222 case RAW1394_REQ_FCP_LISTEN:
2223 handle_fcp_listen(fi, req);
2224 return sizeof(struct raw1394_request);
2225
2226 case RAW1394_REQ_RESET_BUS:
2227 if (req->req.misc == RAW1394_LONG_RESET) {
2228 DBGMSG("busreset called (type: LONG)");
2229 hpsb_reset_bus(fi->host, LONG_RESET);
2230 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2231 return sizeof(struct raw1394_request);
2232 }
2233 if (req->req.misc == RAW1394_SHORT_RESET) {
2234 DBGMSG("busreset called (type: SHORT)");
2235 hpsb_reset_bus(fi->host, SHORT_RESET);
2236 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2237 return sizeof(struct raw1394_request);
2238 }
2239 /* error EINVAL (22) invalid argument */
2240 return (-EINVAL);
2241 case RAW1394_REQ_GET_ROM:
2242 return get_config_rom(fi, req);
2243
2244 case RAW1394_REQ_UPDATE_ROM:
2245 return update_config_rom(fi, req);
2246
2247 case RAW1394_REQ_MODIFY_ROM:
2248 return modify_config_rom(fi, req);
2249 }
2250
2251 if (req->req.generation != get_hpsb_generation(fi->host)) {
2252 req->req.error = RAW1394_ERROR_GENERATION;
2253 req->req.generation = get_hpsb_generation(fi->host);
2254 req->req.length = 0;
2255 queue_complete_req(req);
2256 return sizeof(struct raw1394_request);
2257 }
2258
2259 switch (req->req.type) {
2260 case RAW1394_REQ_PHYPACKET:
2261 return write_phypacket(fi, req);
2262 case RAW1394_REQ_ASYNC_SEND:
2263 return handle_async_send(fi, req);
2264 }
2265
2266 if (req->req.length == 0) {
2267 req->req.error = RAW1394_ERROR_INVALID_ARG;
2268 queue_complete_req(req);
2269 return sizeof(struct raw1394_request);
2270 }
2271
2272 return handle_async_request(fi, req, node);
2273}
2274
2275static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2276 size_t count, loff_t * offset_is_ignored)
2277{
2278 struct file_info *fi = (struct file_info *)file->private_data;
2279 struct pending_request *req;
2280 ssize_t retval = 0;
2281
2282 if (count != sizeof(struct raw1394_request)) {
2283 return -EINVAL;
2284 }
2285
2286 req = alloc_pending_request();
2287 if (req == NULL) {
2288 return -ENOMEM;
2289 }
2290 req->file_info = fi;
2291
2292 if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
2293 free_pending_request(req);
2294 return -EFAULT;
2295 }
2296
2297 switch (fi->state) {
2298 case opened:
2299 retval = state_opened(fi, req);
2300 break;
2301
2302 case initialized:
2303 retval = state_initialized(fi, req);
2304 break;
2305
2306 case connected:
2307 retval = state_connected(fi, req);
2308 break;
2309 }
2310
2311 if (retval < 0) {
2312 free_pending_request(req);
2313 }
2314
2315 return retval;
2316}
2317
2318/* rawiso operations */
2319
2320/* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the
2321 * completion queue (reqlists_lock must be taken) */
2322static inline int __rawiso_event_in_queue(struct file_info *fi)
2323{
2324 struct pending_request *req;
2325
2326 list_for_each_entry(req, &fi->req_complete, list)
2327 if (req->req.type == RAW1394_REQ_RAWISO_ACTIVITY)
2328 return 1;
2329
2330 return 0;
2331}
2332
2333/* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */
2334static void queue_rawiso_event(struct file_info *fi)
2335{
2336 unsigned long flags;
2337
2338 spin_lock_irqsave(&fi->reqlists_lock, flags);
2339
2340 /* only one ISO activity event may be in the queue */
2341 if (!__rawiso_event_in_queue(fi)) {
2342 struct pending_request *req =
2343 __alloc_pending_request(SLAB_ATOMIC);
2344
2345 if (req) {
2346 req->file_info = fi;
2347 req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
2348 req->req.generation = get_hpsb_generation(fi->host);
2349 __queue_complete_req(req);
2350 } else {
2351 /* on allocation failure, signal an overflow */
2352 if (fi->iso_handle) {
2353 atomic_inc(&fi->iso_handle->overflows);
2354 }
2355 }
2356 }
2357 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2358}
2359
2360static void rawiso_activity_cb(struct hpsb_iso *iso)
2361{
2362 unsigned long flags;
2363 struct host_info *hi;
2364 struct file_info *fi;
2365
2366 spin_lock_irqsave(&host_info_lock, flags);
2367 hi = find_host_info(iso->host);
2368
2369 if (hi != NULL) {
2370 list_for_each_entry(fi, &hi->file_info_list, list) {
2371 if (fi->iso_handle == iso)
2372 queue_rawiso_event(fi);
2373 }
2374 }
2375
2376 spin_unlock_irqrestore(&host_info_lock, flags);
2377}
2378
2379/* helper function - gather all the kernel iso status bits for returning to user-space */
2380static void raw1394_iso_fill_status(struct hpsb_iso *iso,
2381 struct raw1394_iso_status *stat)
2382{
2383 stat->config.data_buf_size = iso->buf_size;
2384 stat->config.buf_packets = iso->buf_packets;
2385 stat->config.channel = iso->channel;
2386 stat->config.speed = iso->speed;
2387 stat->config.irq_interval = iso->irq_interval;
2388 stat->n_packets = hpsb_iso_n_ready(iso);
2389 stat->overflows = atomic_read(&iso->overflows);
2390 stat->xmit_cycle = iso->xmit_cycle;
2391}
2392
2393static int raw1394_iso_xmit_init(struct file_info *fi, void __user * uaddr)
2394{
2395 struct raw1394_iso_status stat;
2396
2397 if (!fi->host)
2398 return -EINVAL;
2399
2400 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2401 return -EFAULT;
2402
2403 fi->iso_handle = hpsb_iso_xmit_init(fi->host,
2404 stat.config.data_buf_size,
2405 stat.config.buf_packets,
2406 stat.config.channel,
2407 stat.config.speed,
2408 stat.config.irq_interval,
2409 rawiso_activity_cb);
2410 if (!fi->iso_handle)
2411 return -ENOMEM;
2412
2413 fi->iso_state = RAW1394_ISO_XMIT;
2414
2415 raw1394_iso_fill_status(fi->iso_handle, &stat);
2416 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2417 return -EFAULT;
2418
2419 /* queue an event to get things started */
2420 rawiso_activity_cb(fi->iso_handle);
2421
2422 return 0;
2423}
2424
2425static int raw1394_iso_recv_init(struct file_info *fi, void __user * uaddr)
2426{
2427 struct raw1394_iso_status stat;
2428
2429 if (!fi->host)
2430 return -EINVAL;
2431
2432 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2433 return -EFAULT;
2434
2435 fi->iso_handle = hpsb_iso_recv_init(fi->host,
2436 stat.config.data_buf_size,
2437 stat.config.buf_packets,
2438 stat.config.channel,
2439 stat.config.dma_mode,
2440 stat.config.irq_interval,
2441 rawiso_activity_cb);
2442 if (!fi->iso_handle)
2443 return -ENOMEM;
2444
2445 fi->iso_state = RAW1394_ISO_RECV;
2446
2447 raw1394_iso_fill_status(fi->iso_handle, &stat);
2448 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2449 return -EFAULT;
2450 return 0;
2451}
2452
2453static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
2454{
2455 struct raw1394_iso_status stat;
2456 struct hpsb_iso *iso = fi->iso_handle;
2457
2458 raw1394_iso_fill_status(fi->iso_handle, &stat);
2459 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2460 return -EFAULT;
2461
2462 /* reset overflow counter */
2463 atomic_set(&iso->overflows, 0);
2464
2465 return 0;
2466}
2467
2468/* copy N packet_infos out of the ringbuffer into user-supplied array */
2469static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
2470{
2471 struct raw1394_iso_packets upackets;
2472 unsigned int packet = fi->iso_handle->first_packet;
2473 int i;
2474
2475 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2476 return -EFAULT;
2477
2478 if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
2479 return -EINVAL;
2480
2481 /* ensure user-supplied buffer is accessible and big enough */
2482 if (!access_ok(VERIFY_WRITE, upackets.infos,
2483 upackets.n_packets *
2484 sizeof(struct raw1394_iso_packet_info)))
2485 return -EFAULT;
2486
2487 /* copy the packet_infos out */
2488 for (i = 0; i < upackets.n_packets; i++) {
2489 if (__copy_to_user(&upackets.infos[i],
2490 &fi->iso_handle->infos[packet],
2491 sizeof(struct raw1394_iso_packet_info)))
2492 return -EFAULT;
2493
2494 packet = (packet + 1) % fi->iso_handle->buf_packets;
2495 }
2496
2497 return 0;
2498}
2499
2500/* copy N packet_infos from user to ringbuffer, and queue them for transmission */
2501static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
2502{
2503 struct raw1394_iso_packets upackets;
2504 int i, rv;
2505
2506 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2507 return -EFAULT;
2508
2509 if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
2510 return -EINVAL;
2511
2512 /* ensure user-supplied buffer is accessible and big enough */
2513 if (!access_ok(VERIFY_READ, upackets.infos,
2514 upackets.n_packets *
2515 sizeof(struct raw1394_iso_packet_info)))
2516 return -EFAULT;
2517
2518 /* copy the infos structs in and queue the packets */
2519 for (i = 0; i < upackets.n_packets; i++) {
2520 struct raw1394_iso_packet_info info;
2521
2522 if (__copy_from_user(&info, &upackets.infos[i],
2523 sizeof(struct raw1394_iso_packet_info)))
2524 return -EFAULT;
2525
2526 rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset,
2527 info.len, info.tag, info.sy);
2528 if (rv)
2529 return rv;
2530 }
2531
2532 return 0;
2533}
2534
2535static void raw1394_iso_shutdown(struct file_info *fi)
2536{
2537 if (fi->iso_handle)
2538 hpsb_iso_shutdown(fi->iso_handle);
2539
2540 fi->iso_handle = NULL;
2541 fi->iso_state = RAW1394_ISO_INACTIVE;
2542}
2543
2544/* mmap the rawiso xmit/recv buffer */
2545static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
2546{
2547 struct file_info *fi = file->private_data;
2548
2549 if (fi->iso_state == RAW1394_ISO_INACTIVE)
2550 return -EINVAL;
2551
2552 return dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
2553}
2554
2555/* ioctl is only used for rawiso operations */
2556static int raw1394_ioctl(struct inode *inode, struct file *file,
2557 unsigned int cmd, unsigned long arg)
2558{
2559 struct file_info *fi = file->private_data;
2560 void __user *argp = (void __user *)arg;
2561
2562 switch (fi->iso_state) {
2563 case RAW1394_ISO_INACTIVE:
2564 switch (cmd) {
2565 case RAW1394_IOC_ISO_XMIT_INIT:
2566 return raw1394_iso_xmit_init(fi, argp);
2567 case RAW1394_IOC_ISO_RECV_INIT:
2568 return raw1394_iso_recv_init(fi, argp);
2569 default:
2570 break;
2571 }
2572 break;
2573 case RAW1394_ISO_RECV:
2574 switch (cmd) {
2575 case RAW1394_IOC_ISO_RECV_START:{
2576 /* copy args from user-space */
2577 int args[3];
2578 if (copy_from_user
2579 (&args[0], argp, sizeof(args)))
2580 return -EFAULT;
2581 return hpsb_iso_recv_start(fi->iso_handle,
2582 args[0], args[1],
2583 args[2]);
2584 }
2585 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2586 hpsb_iso_stop(fi->iso_handle);
2587 return 0;
2588 case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
2589 return hpsb_iso_recv_listen_channel(fi->iso_handle,
2590 arg);
2591 case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
2592 return hpsb_iso_recv_unlisten_channel(fi->iso_handle,
2593 arg);
2594 case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
2595 /* copy the u64 from user-space */
2596 u64 mask;
2597 if (copy_from_user(&mask, argp, sizeof(mask)))
2598 return -EFAULT;
2599 return hpsb_iso_recv_set_channel_mask(fi->
2600 iso_handle,
2601 mask);
2602 }
2603 case RAW1394_IOC_ISO_GET_STATUS:
2604 return raw1394_iso_get_status(fi, argp);
2605 case RAW1394_IOC_ISO_RECV_PACKETS:
2606 return raw1394_iso_recv_packets(fi, argp);
2607 case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
2608 return hpsb_iso_recv_release_packets(fi->iso_handle,
2609 arg);
2610 case RAW1394_IOC_ISO_RECV_FLUSH:
2611 return hpsb_iso_recv_flush(fi->iso_handle);
2612 case RAW1394_IOC_ISO_SHUTDOWN:
2613 raw1394_iso_shutdown(fi);
2614 return 0;
2615 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2616 queue_rawiso_event(fi);
2617 return 0;
2618 }
2619 break;
2620 case RAW1394_ISO_XMIT:
2621 switch (cmd) {
2622 case RAW1394_IOC_ISO_XMIT_START:{
2623 /* copy two ints from user-space */
2624 int args[2];
2625 if (copy_from_user
2626 (&args[0], argp, sizeof(args)))
2627 return -EFAULT;
2628 return hpsb_iso_xmit_start(fi->iso_handle,
2629 args[0], args[1]);
2630 }
2631 case RAW1394_IOC_ISO_XMIT_SYNC:
2632 return hpsb_iso_xmit_sync(fi->iso_handle);
2633 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2634 hpsb_iso_stop(fi->iso_handle);
2635 return 0;
2636 case RAW1394_IOC_ISO_GET_STATUS:
2637 return raw1394_iso_get_status(fi, argp);
2638 case RAW1394_IOC_ISO_XMIT_PACKETS:
2639 return raw1394_iso_send_packets(fi, argp);
2640 case RAW1394_IOC_ISO_SHUTDOWN:
2641 raw1394_iso_shutdown(fi);
2642 return 0;
2643 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2644 queue_rawiso_event(fi);
2645 return 0;
2646 }
2647 break;
2648 default:
2649 break;
2650 }
2651
2652 return -EINVAL;
2653}
2654
2655static unsigned int raw1394_poll(struct file *file, poll_table * pt)
2656{
2657 struct file_info *fi = file->private_data;
2658 unsigned int mask = POLLOUT | POLLWRNORM;
2659
2660 poll_wait(file, &fi->poll_wait_complete, pt);
2661
2662 spin_lock_irq(&fi->reqlists_lock);
2663 if (!list_empty(&fi->req_complete)) {
2664 mask |= POLLIN | POLLRDNORM;
2665 }
2666 spin_unlock_irq(&fi->reqlists_lock);
2667
2668 return mask;
2669}
2670
2671static int raw1394_open(struct inode *inode, struct file *file)
2672{
2673 struct file_info *fi;
2674
2675 fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
2676 if (fi == NULL)
2677 return -ENOMEM;
2678
2679 memset(fi, 0, sizeof(struct file_info));
2680 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
2681
2682 INIT_LIST_HEAD(&fi->list);
2683 fi->state = opened;
2684 INIT_LIST_HEAD(&fi->req_pending);
2685 INIT_LIST_HEAD(&fi->req_complete);
2686 sema_init(&fi->complete_sem, 0);
2687 spin_lock_init(&fi->reqlists_lock);
2688 init_waitqueue_head(&fi->poll_wait_complete);
2689 INIT_LIST_HEAD(&fi->addr_list);
2690
2691 file->private_data = fi;
2692
2693 return 0;
2694}
2695
2696static int raw1394_release(struct inode *inode, struct file *file)
2697{
2698 struct file_info *fi = file->private_data;
2699 struct list_head *lh;
2700 struct pending_request *req;
2701 int done = 0, i, fail = 0;
2702 int retval = 0;
2703 struct list_head *entry;
2704 struct arm_addr *addr = NULL;
2705 struct host_info *hi;
2706 struct file_info *fi_hlp = NULL;
2707 struct arm_addr *arm_addr = NULL;
2708 int another_host;
2709 int csr_mod = 0;
2710
2711 if (fi->iso_state != RAW1394_ISO_INACTIVE)
2712 raw1394_iso_shutdown(fi);
2713
2714 for (i = 0; i < 64; i++) {
2715 if (fi->listen_channels & (1ULL << i)) {
2716 hpsb_unlisten_channel(&raw1394_highlevel, fi->host, i);
2717 }
2718 }
2719
2720 spin_lock_irq(&host_info_lock);
2721 fi->listen_channels = 0;
2722 spin_unlock_irq(&host_info_lock);
2723
2724 fail = 0;
2725 /* set address-entries invalid */
2726 spin_lock_irq(&host_info_lock);
2727
2728 while (!list_empty(&fi->addr_list)) {
2729 another_host = 0;
2730 lh = fi->addr_list.next;
2731 addr = list_entry(lh, struct arm_addr, addr_list);
2732 /* another host with valid address-entry containing
2733 same addressrange? */
2734 list_for_each_entry(hi, &host_info_list, list) {
2735 if (hi->host != fi->host) {
2736 list_for_each_entry(fi_hlp, &hi->file_info_list,
2737 list) {
2738 entry = fi_hlp->addr_list.next;
2739 while (entry != &(fi_hlp->addr_list)) {
2740 arm_addr = list_entry(entry,
2741 struct
2742 arm_addr,
2743 addr_list);
2744 if (arm_addr->start ==
2745 addr->start) {
2746 DBGMSG
2747 ("raw1394_release: "
2748 "another host ownes "
2749 "same addressrange");
2750 another_host = 1;
2751 break;
2752 }
2753 entry = entry->next;
2754 }
2755 if (another_host) {
2756 break;
2757 }
2758 }
2759 }
2760 }
2761 if (!another_host) {
2762 DBGMSG("raw1394_release: call hpsb_arm_unregister");
2763 retval =
2764 hpsb_unregister_addrspace(&raw1394_highlevel,
2765 fi->host, addr->start);
2766 if (!retval) {
2767 ++fail;
2768 printk(KERN_ERR
2769 "raw1394_release arm_Unregister failed\n");
2770 }
2771 }
2772 DBGMSG("raw1394_release: delete addr_entry from list");
2773 list_del(&addr->addr_list);
2774 vfree(addr->addr_space_buffer);
2775 kfree(addr);
2776 } /* while */
2777 spin_unlock_irq(&host_info_lock);
2778 if (fail > 0) {
2779 printk(KERN_ERR "raw1394: during addr_list-release "
2780 "error(s) occurred \n");
2781 }
2782
2783 while (!done) {
2784 spin_lock_irq(&fi->reqlists_lock);
2785
2786 while (!list_empty(&fi->req_complete)) {
2787 lh = fi->req_complete.next;
2788 list_del(lh);
2789
2790 req = list_entry(lh, struct pending_request, list);
2791
2792 free_pending_request(req);
2793 }
2794
2795 if (list_empty(&fi->req_pending))
2796 done = 1;
2797
2798 spin_unlock_irq(&fi->reqlists_lock);
2799
2800 if (!done)
2801 down_interruptible(&fi->complete_sem);
2802 }
2803
2804 /* Remove any sub-trees left by user space programs */
2805 for (i = 0; i < RAW1394_MAX_USER_CSR_DIRS; i++) {
2806 struct csr1212_dentry *dentry;
2807 if (!fi->csr1212_dirs[i])
2808 continue;
2809 for (dentry =
2810 fi->csr1212_dirs[i]->value.directory.dentries_head; dentry;
2811 dentry = dentry->next) {
2812 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2813 root_kv,
2814 dentry->kv);
2815 }
2816 csr1212_release_keyval(fi->csr1212_dirs[i]);
2817 fi->csr1212_dirs[i] = NULL;
2818 csr_mod = 1;
2819 }
2820
2821 if ((csr_mod || fi->cfgrom_upd)
2822 && hpsb_update_config_rom_image(fi->host) < 0)
2823 HPSB_ERR
2824 ("Failed to generate Configuration ROM image for host %d",
2825 fi->host->id);
2826
2827 if (fi->state == connected) {
2828 spin_lock_irq(&host_info_lock);
2829 list_del(&fi->list);
2830 spin_unlock_irq(&host_info_lock);
2831
2832 put_device(&fi->host->device);
2833 }
2834
2835 kfree(fi);
2836
2837 return 0;
2838}
2839
2840/*** HOTPLUG STUFF **********************************************************/
2841/*
2842 * Export information about protocols/devices supported by this driver.
2843 */
2844static struct ieee1394_device_id raw1394_id_table[] = {
2845 {
2846 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2847 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
2848 .version = AVC_SW_VERSION_ENTRY & 0xffffff},
2849 {
2850 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2851 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2852 .version = CAMERA_SW_VERSION_ENTRY & 0xffffff},
2853 {
2854 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2855 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2856 .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff},
2857 {
2858 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2859 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2860 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
2861 {}
2862};
2863
2864MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
2865
2866static struct hpsb_protocol_driver raw1394_driver = {
2867 .name = "raw1394 Driver",
2868 .id_table = raw1394_id_table,
2869 .driver = {
2870 .name = "raw1394",
2871 .bus = &ieee1394_bus_type,
2872 },
2873};
2874
2875/******************************************************************************/
2876
2877static struct hpsb_highlevel raw1394_highlevel = {
2878 .name = RAW1394_DEVICE_NAME,
2879 .add_host = add_host,
2880 .remove_host = remove_host,
2881 .host_reset = host_reset,
2882 .iso_receive = iso_receive,
2883 .fcp_request = fcp_request,
2884};
2885
2886static struct cdev raw1394_cdev;
2887static struct file_operations raw1394_fops = {
2888 .owner = THIS_MODULE,
2889 .read = raw1394_read,
2890 .write = raw1394_write,
2891 .mmap = raw1394_mmap,
2892 .ioctl = raw1394_ioctl,
2893 .poll = raw1394_poll,
2894 .open = raw1394_open,
2895 .release = raw1394_release,
2896};
2897
2898static int __init init_raw1394(void)
2899{
2900 int ret = 0;
2901
2902 hpsb_register_highlevel(&raw1394_highlevel);
2903
2904 if (IS_ERR(class_simple_device_add(hpsb_protocol_class, MKDEV(
2905 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
2906 NULL, RAW1394_DEVICE_NAME))) {
2907 ret = -EFAULT;
2908 goto out_unreg;
2909 }
2910
2911 devfs_mk_cdev(MKDEV(
2912 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
2913 S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
2914
2915 cdev_init(&raw1394_cdev, &raw1394_fops);
2916 raw1394_cdev.owner = THIS_MODULE;
2917 kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
2918 ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
2919 if (ret) {
2920 HPSB_ERR("raw1394 failed to register minor device block");
2921 goto out_dev;
2922 }
2923
2924 HPSB_INFO("raw1394: /dev/%s device initialized", RAW1394_DEVICE_NAME);
2925
2926 ret = hpsb_register_protocol(&raw1394_driver);
2927 if (ret) {
2928 HPSB_ERR("raw1394: failed to register protocol");
2929 cdev_del(&raw1394_cdev);
2930 goto out_dev;
2931 }
2932
2933 goto out;
2934
2935out_dev:
2936 devfs_remove(RAW1394_DEVICE_NAME);
2937 class_simple_device_remove(MKDEV(
2938 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16));
2939out_unreg:
2940 hpsb_unregister_highlevel(&raw1394_highlevel);
2941out:
2942 return ret;
2943}
2944
2945static void __exit cleanup_raw1394(void)
2946{
2947 class_simple_device_remove(MKDEV(
2948 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16));
2949 cdev_del(&raw1394_cdev);
2950 devfs_remove(RAW1394_DEVICE_NAME);
2951 hpsb_unregister_highlevel(&raw1394_highlevel);
2952 hpsb_unregister_protocol(&raw1394_driver);
2953}
2954
2955module_init(init_raw1394);
2956module_exit(cleanup_raw1394);
2957MODULE_LICENSE("GPL");
2958MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16);
diff --git a/drivers/ieee1394/raw1394.h b/drivers/ieee1394/raw1394.h
new file mode 100644
index 000000000000..35bfc38f013c
--- /dev/null
+++ b/drivers/ieee1394/raw1394.h
@@ -0,0 +1,181 @@
1#ifndef IEEE1394_RAW1394_H
2#define IEEE1394_RAW1394_H
3
4/* header for the raw1394 API that is exported to user-space */
5
6#define RAW1394_KERNELAPI_VERSION 4
7
8/* state: opened */
9#define RAW1394_REQ_INITIALIZE 1
10
11/* state: initialized */
12#define RAW1394_REQ_LIST_CARDS 2
13#define RAW1394_REQ_SET_CARD 3
14
15/* state: connected */
16#define RAW1394_REQ_ASYNC_READ 100
17#define RAW1394_REQ_ASYNC_WRITE 101
18#define RAW1394_REQ_LOCK 102
19#define RAW1394_REQ_LOCK64 103
20#define RAW1394_REQ_ISO_SEND 104
21#define RAW1394_REQ_ASYNC_SEND 105
22#define RAW1394_REQ_ASYNC_STREAM 106
23
24#define RAW1394_REQ_ISO_LISTEN 200
25#define RAW1394_REQ_FCP_LISTEN 201
26#define RAW1394_REQ_RESET_BUS 202
27#define RAW1394_REQ_GET_ROM 203
28#define RAW1394_REQ_UPDATE_ROM 204
29#define RAW1394_REQ_ECHO 205
30#define RAW1394_REQ_MODIFY_ROM 206
31
32#define RAW1394_REQ_ARM_REGISTER 300
33#define RAW1394_REQ_ARM_UNREGISTER 301
34#define RAW1394_REQ_ARM_SET_BUF 302
35#define RAW1394_REQ_ARM_GET_BUF 303
36
37#define RAW1394_REQ_RESET_NOTIFY 400
38
39#define RAW1394_REQ_PHYPACKET 500
40
41/* kernel to user */
42#define RAW1394_REQ_BUS_RESET 10000
43#define RAW1394_REQ_ISO_RECEIVE 10001
44#define RAW1394_REQ_FCP_REQUEST 10002
45#define RAW1394_REQ_ARM 10003
46#define RAW1394_REQ_RAWISO_ACTIVITY 10004
47
48/* error codes */
49#define RAW1394_ERROR_NONE 0
50#define RAW1394_ERROR_COMPAT (-1001)
51#define RAW1394_ERROR_STATE_ORDER (-1002)
52#define RAW1394_ERROR_GENERATION (-1003)
53#define RAW1394_ERROR_INVALID_ARG (-1004)
54#define RAW1394_ERROR_MEMFAULT (-1005)
55#define RAW1394_ERROR_ALREADY (-1006)
56
57#define RAW1394_ERROR_EXCESSIVE (-1020)
58#define RAW1394_ERROR_UNTIDY_LEN (-1021)
59
60#define RAW1394_ERROR_SEND_ERROR (-1100)
61#define RAW1394_ERROR_ABORTED (-1101)
62#define RAW1394_ERROR_TIMEOUT (-1102)
63
64/* arm_codes */
65#define ARM_READ 1
66#define ARM_WRITE 2
67#define ARM_LOCK 4
68
69#define RAW1394_LONG_RESET 0
70#define RAW1394_SHORT_RESET 1
71
72/* busresetnotify ... */
73#define RAW1394_NOTIFY_OFF 0
74#define RAW1394_NOTIFY_ON 1
75
76#include <asm/types.h>
77
78struct raw1394_request {
79 __u32 type;
80 __s32 error;
81 __u32 misc;
82
83 __u32 generation;
84 __u32 length;
85
86 __u64 address;
87
88 __u64 tag;
89
90 __u64 sendb;
91 __u64 recvb;
92};
93
94struct raw1394_khost_list {
95 __u32 nodes;
96 __u8 name[32];
97};
98
99typedef struct arm_request {
100 __u16 destination_nodeid;
101 __u16 source_nodeid;
102 __u64 destination_offset;
103 __u8 tlabel;
104 __u8 tcode;
105 __u8 extended_transaction_code;
106 __u32 generation;
107 __u16 buffer_length;
108 __u8 __user *buffer;
109} *arm_request_t;
110
111typedef struct arm_response {
112 __s32 response_code;
113 __u16 buffer_length;
114 __u8 __user *buffer;
115} *arm_response_t;
116
117typedef struct arm_request_response {
118 struct arm_request __user *request;
119 struct arm_response __user *response;
120} *arm_request_response_t;
121
122/* rawiso API */
123#include "ieee1394-ioctl.h"
124
125/* per-packet metadata embedded in the ringbuffer */
126/* must be identical to hpsb_iso_packet_info in iso.h! */
127struct raw1394_iso_packet_info {
128 __u32 offset;
129 __u16 len;
130 __u16 cycle; /* recv only */
131 __u8 channel; /* recv only */
132 __u8 tag;
133 __u8 sy;
134};
135
136/* argument for RAW1394_ISO_RECV/XMIT_PACKETS ioctls */
137struct raw1394_iso_packets {
138 __u32 n_packets;
139 struct raw1394_iso_packet_info __user *infos;
140};
141
142struct raw1394_iso_config {
143 /* size of packet data buffer, in bytes (will be rounded up to PAGE_SIZE) */
144 __u32 data_buf_size;
145
146 /* # of packets to buffer */
147 __u32 buf_packets;
148
149 /* iso channel (set to -1 for multi-channel recv) */
150 __s32 channel;
151
152 /* xmit only - iso transmission speed */
153 __u8 speed;
154
155 /* The mode of the dma when receiving iso data. Must be supported by chip */
156 __u8 dma_mode;
157
158 /* max. latency of buffer, in packets (-1 if you don't care) */
159 __s32 irq_interval;
160};
161
162/* argument to RAW1394_ISO_XMIT/RECV_INIT and RAW1394_ISO_GET_STATUS */
163struct raw1394_iso_status {
164 /* current settings */
165 struct raw1394_iso_config config;
166
167 /* number of packets waiting to be filled with data (ISO transmission)
168 or containing data received (ISO reception) */
169 __u32 n_packets;
170
171 /* approximate number of packets dropped due to overflow or
172 underflow of the packet buffer (a value of zero guarantees
173 that no packets have been dropped) */
174 __u32 overflows;
175
176 /* cycle number at which next packet will be transmitted;
177 -1 if not known */
178 __s16 xmit_cycle;
179};
180
181#endif /* IEEE1394_RAW1394_H */
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
new file mode 100644
index 000000000000..00c7b958361a
--- /dev/null
+++ b/drivers/ieee1394/sbp2.c
@@ -0,0 +1,2864 @@
1/*
2 * sbp2.c - SBP-2 protocol driver for IEEE-1394
3 *
4 * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
5 * jamesg@filanet.com (JSG)
6 *
7 * Copyright (C) 2003 Ben Collins <bcollins@debian.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24/*
25 * Brief Description:
26 *
27 * This driver implements the Serial Bus Protocol 2 (SBP-2) over IEEE-1394
28 * under Linux. The SBP-2 driver is implemented as an IEEE-1394 high-level
29 * driver. It also registers as a SCSI lower-level driver in order to accept
30 * SCSI commands for transport using SBP-2.
31 *
32 * You may access any attached SBP-2 storage devices as if they were SCSI
33 * devices (e.g. mount /dev/sda1, fdisk, mkfs, etc.).
34 *
35 * Current Issues:
36 *
37 * - Error Handling: SCSI aborts and bus reset requests are handled somewhat
38 * but the code needs additional debugging.
39 */
40
41#include <linux/config.h>
42#include <linux/kernel.h>
43#include <linux/list.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/interrupt.h>
47#include <linux/fs.h>
48#include <linux/poll.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/types.h>
52#include <linux/delay.h>
53#include <linux/sched.h>
54#include <linux/blkdev.h>
55#include <linux/smp_lock.h>
56#include <linux/init.h>
57#include <linux/pci.h>
58
59#include <asm/current.h>
60#include <asm/uaccess.h>
61#include <asm/io.h>
62#include <asm/byteorder.h>
63#include <asm/atomic.h>
64#include <asm/system.h>
65#include <asm/scatterlist.h>
66
67#include <scsi/scsi.h>
68#include <scsi/scsi_cmnd.h>
69#include <scsi/scsi_dbg.h>
70#include <scsi/scsi_device.h>
71#include <scsi/scsi_host.h>
72
73#include "csr1212.h"
74#include "ieee1394.h"
75#include "ieee1394_types.h"
76#include "ieee1394_core.h"
77#include "nodemgr.h"
78#include "hosts.h"
79#include "highlevel.h"
80#include "ieee1394_transactions.h"
81#include "sbp2.h"
82
83static char version[] __devinitdata =
84 "$Rev: 1219 $ Ben Collins <bcollins@debian.org>";
85
86/*
87 * Module load parameter definitions
88 */
89
90/*
91 * Change max_speed on module load if you have a bad IEEE-1394
92 * controller that has trouble running 2KB packets at 400mb.
93 *
94 * NOTE: On certain OHCI parts I have seen short packets on async transmit
95 * (probably due to PCI latency/throughput issues with the part). You can
96 * bump down the speed if you are running into problems.
97 */
98static int max_speed = IEEE1394_SPEED_MAX;
99module_param(max_speed, int, 0644);
100MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb default, 1 = 200mb, 0 = 100mb)");
101
102/*
103 * Set serialize_io to 1 if you'd like only one scsi command sent
104 * down to us at a time (debugging). This might be necessary for very
105 * badly behaved sbp2 devices.
106 */
107static int serialize_io = 0;
108module_param(serialize_io, int, 0444);
109MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)");
110
111/*
112 * Bump up max_sectors if you'd like to support very large sized
113 * transfers. Please note that some older sbp2 bridge chips are broken for
114 * transfers greater or equal to 128KB. Default is a value of 255
115 * sectors, or just under 128KB (at 512 byte sector size). I can note that
116 * the Oxsemi sbp2 chipsets have no problems supporting very large
117 * transfer sizes.
118 */
119static int max_sectors = SBP2_MAX_SECTORS;
120module_param(max_sectors, int, 0444);
121MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
122
123/*
124 * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
125 * do an exclusive login, as it's generally unsafe to have two hosts
126 * talking to a single sbp2 device at the same time (filesystem coherency,
127 * etc.). If you're running an sbp2 device that supports multiple logins,
128 * and you're either running read-only filesystems or some sort of special
129 * filesystem supporting multiple hosts (one such filesystem is OpenGFS,
130 * see opengfs.sourceforge.net for more info), then set exclusive_login
131 * to zero. Note: The Oxsemi OXFW911 sbp2 chipset supports up to four
132 * concurrent logins.
133 */
134static int exclusive_login = 1;
135module_param(exclusive_login, int, 0644);
136MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
137
138/*
139 * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
140 * if your sbp2 device is not properly handling the SCSI inquiry command.
141 * This hack makes the inquiry look more like a typical MS Windows
142 * inquiry.
143 *
144 * If force_inquiry_hack=1 is required for your device to work,
145 * please submit the logged sbp2_firmware_revision value of this device to
146 * the linux1394-devel mailing list.
147 */
148static int force_inquiry_hack = 0;
149module_param(force_inquiry_hack, int, 0444);
150MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
151
152
153/*
154 * Export information about protocols/devices supported by this driver.
155 */
156static struct ieee1394_device_id sbp2_id_table[] = {
157 {
158 .match_flags =IEEE1394_MATCH_SPECIFIER_ID |
159 IEEE1394_MATCH_VERSION,
160 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
161 .version = SBP2_SW_VERSION_ENTRY & 0xffffff
162 },
163 { }
164};
165
166MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
167
168/*
169 * Debug levels, configured via kernel config, or enable here.
170 */
171
172/* #define CONFIG_IEEE1394_SBP2_DEBUG_ORBS */
173/* #define CONFIG_IEEE1394_SBP2_DEBUG_DMA */
174/* #define CONFIG_IEEE1394_SBP2_DEBUG 1 */
175/* #define CONFIG_IEEE1394_SBP2_DEBUG 2 */
176/* #define CONFIG_IEEE1394_SBP2_PACKET_DUMP */
177
178#ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
179#define SBP2_ORB_DEBUG(fmt, args...) HPSB_ERR("sbp2(%s): "fmt, __FUNCTION__, ## args)
180static u32 global_outstanding_command_orbs = 0;
181#define outstanding_orb_incr global_outstanding_command_orbs++
182#define outstanding_orb_decr global_outstanding_command_orbs--
183#else
184#define SBP2_ORB_DEBUG(fmt, args...)
185#define outstanding_orb_incr
186#define outstanding_orb_decr
187#endif
188
189#ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
190#define SBP2_DMA_ALLOC(fmt, args...) \
191 HPSB_ERR("sbp2(%s)alloc(%d): "fmt, __FUNCTION__, \
192 ++global_outstanding_dmas, ## args)
193#define SBP2_DMA_FREE(fmt, args...) \
194 HPSB_ERR("sbp2(%s)free(%d): "fmt, __FUNCTION__, \
195 --global_outstanding_dmas, ## args)
196static u32 global_outstanding_dmas = 0;
197#else
198#define SBP2_DMA_ALLOC(fmt, args...)
199#define SBP2_DMA_FREE(fmt, args...)
200#endif
201
202#if CONFIG_IEEE1394_SBP2_DEBUG >= 2
203#define SBP2_DEBUG(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
204#define SBP2_INFO(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
205#define SBP2_NOTICE(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
206#define SBP2_WARN(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
207#elif CONFIG_IEEE1394_SBP2_DEBUG == 1
208#define SBP2_DEBUG(fmt, args...) HPSB_DEBUG("sbp2: "fmt, ## args)
209#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
210#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
211#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
212#else
213#define SBP2_DEBUG(fmt, args...)
214#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
215#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
216#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
217#endif
218
219#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
220
221
222/*
223 * Globals
224 */
225
226static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
227 u32 status);
228
229static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
230 u32 scsi_status, struct scsi_cmnd *SCpnt,
231 void (*done)(struct scsi_cmnd *));
232
233static struct scsi_host_template scsi_driver_template;
234
235static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
236
237static void sbp2_host_reset(struct hpsb_host *host);
238
239static int sbp2_probe(struct device *dev);
240static int sbp2_remove(struct device *dev);
241static int sbp2_update(struct unit_directory *ud);
242
243static struct hpsb_highlevel sbp2_highlevel = {
244 .name = SBP2_DEVICE_NAME,
245 .host_reset = sbp2_host_reset,
246};
247
248static struct hpsb_address_ops sbp2_ops = {
249 .write = sbp2_handle_status_write
250};
251
252#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
253static struct hpsb_address_ops sbp2_physdma_ops = {
254 .read = sbp2_handle_physdma_read,
255 .write = sbp2_handle_physdma_write,
256};
257#endif
258
259static struct hpsb_protocol_driver sbp2_driver = {
260 .name = "SBP2 Driver",
261 .id_table = sbp2_id_table,
262 .update = sbp2_update,
263 .driver = {
264 .name = SBP2_DEVICE_NAME,
265 .bus = &ieee1394_bus_type,
266 .probe = sbp2_probe,
267 .remove = sbp2_remove,
268 },
269};
270
271
272/* List of device firmware's that require a forced 36 byte inquiry. */
273static u32 sbp2_broken_inquiry_list[] = {
274 0x00002800, /* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */
275 /* DViCO Momobay CX-1 */
276 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
277 /* QPS Fire DVDBurner */
278};
279
280#define NUM_BROKEN_INQUIRY_DEVS \
281 (sizeof(sbp2_broken_inquiry_list)/sizeof(*sbp2_broken_inquiry_list))
282
283/**************************************
284 * General utility functions
285 **************************************/
286
287
288#ifndef __BIG_ENDIAN
289/*
290 * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
291 */
292static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
293{
294 u32 *temp = buffer;
295
296 for (length = (length >> 2); length--; )
297 temp[length] = be32_to_cpu(temp[length]);
298
299 return;
300}
301
302/*
303 * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
304 */
305static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
306{
307 u32 *temp = buffer;
308
309 for (length = (length >> 2); length--; )
310 temp[length] = cpu_to_be32(temp[length]);
311
312 return;
313}
314#else /* BIG_ENDIAN */
315/* Why waste the cpu cycles? */
316#define sbp2util_be32_to_cpu_buffer(x,y)
317#define sbp2util_cpu_to_be32_buffer(x,y)
318#endif
319
320#ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP
321/*
322 * Debug packet dump routine. Length is in bytes.
323 */
324static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr)
325{
326 int i;
327 unsigned char *dump = buffer;
328
329 if (!dump || !length || !dump_name)
330 return;
331
332 if (dump_phys_addr)
333 printk("[%s, 0x%x]", dump_name, dump_phys_addr);
334 else
335 printk("[%s]", dump_name);
336 for (i = 0; i < length; i++) {
337 if (i > 0x3f) {
338 printk("\n ...");
339 break;
340 }
341 if ((i & 0x3) == 0)
342 printk(" ");
343 if ((i & 0xf) == 0)
344 printk("\n ");
345 printk("%02x ", (int) dump[i]);
346 }
347 printk("\n");
348
349 return;
350}
351#else
352#define sbp2util_packet_dump(w,x,y,z)
353#endif
354
355/*
356 * Goofy routine that basically does a down_timeout function.
357 */
358static int sbp2util_down_timeout(atomic_t *done, int timeout)
359{
360 int i;
361
362 for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
363 if (msleep_interruptible(100)) /* 100ms */
364 return(1);
365 }
366 return ((i > 0) ? 0:1);
367}
368
369/* Free's an allocated packet */
370static void sbp2_free_packet(struct hpsb_packet *packet)
371{
372 hpsb_free_tlabel(packet);
373 hpsb_free_packet(packet);
374}
375
376/* This is much like hpsb_node_write(), except it ignores the response
377 * subaction and returns immediately. Can be used from interrupts.
378 */
379static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
380 quadlet_t *buffer, size_t length)
381{
382 struct hpsb_packet *packet;
383
384 packet = hpsb_make_writepacket(ne->host, ne->nodeid,
385 addr, buffer, length);
386 if (!packet)
387 return -ENOMEM;
388
389 hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet,
390 packet);
391
392 hpsb_node_fill_packet(ne, packet);
393
394 if (hpsb_send_packet(packet) < 0) {
395 sbp2_free_packet(packet);
396 return -EIO;
397 }
398
399 return 0;
400}
401
402/*
403 * This function is called to create a pool of command orbs used for
404 * command processing. It is called when a new sbp2 device is detected.
405 */
406static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id)
407{
408 struct sbp2scsi_host_info *hi = scsi_id->hi;
409 int i;
410 unsigned long flags, orbs;
411 struct sbp2_command_info *command;
412
413 orbs = serialize_io ? 2 : SBP2_MAX_CMDS;
414
415 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
416 for (i = 0; i < orbs; i++) {
417 command = (struct sbp2_command_info *)
418 kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC);
419 if (!command) {
420 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
421 return(-ENOMEM);
422 }
423 memset(command, '\0', sizeof(struct sbp2_command_info));
424 command->command_orb_dma =
425 pci_map_single (hi->host->pdev, &command->command_orb,
426 sizeof(struct sbp2_command_orb),
427 PCI_DMA_BIDIRECTIONAL);
428 SBP2_DMA_ALLOC("single command orb DMA");
429 command->sge_dma =
430 pci_map_single (hi->host->pdev, &command->scatter_gather_element,
431 sizeof(command->scatter_gather_element),
432 PCI_DMA_BIDIRECTIONAL);
433 SBP2_DMA_ALLOC("scatter_gather_element");
434 INIT_LIST_HEAD(&command->list);
435 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
436 }
437 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
438 return 0;
439}
440
441/*
442 * This function is called to delete a pool of command orbs.
443 */
444static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id)
445{
446 struct hpsb_host *host = scsi_id->hi->host;
447 struct list_head *lh, *next;
448 struct sbp2_command_info *command;
449 unsigned long flags;
450
451 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
452 if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
453 list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
454 command = list_entry(lh, struct sbp2_command_info, list);
455
456 /* Release our generic DMA's */
457 pci_unmap_single(host->pdev, command->command_orb_dma,
458 sizeof(struct sbp2_command_orb),
459 PCI_DMA_BIDIRECTIONAL);
460 SBP2_DMA_FREE("single command orb DMA");
461 pci_unmap_single(host->pdev, command->sge_dma,
462 sizeof(command->scatter_gather_element),
463 PCI_DMA_BIDIRECTIONAL);
464 SBP2_DMA_FREE("scatter_gather_element");
465
466 kfree(command);
467 }
468 }
469 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
470 return;
471}
472
473/*
474 * This function finds the sbp2_command for a given outstanding command
475 * orb.Only looks at the inuse list.
476 */
477static struct sbp2_command_info *sbp2util_find_command_for_orb(
478 struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
479{
480 struct sbp2_command_info *command;
481 unsigned long flags;
482
483 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
484 if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
485 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
486 if (command->command_orb_dma == orb) {
487 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
488 return (command);
489 }
490 }
491 }
492 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
493
494 SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
495
496 return(NULL);
497}
498
499/*
500 * This function finds the sbp2_command for a given outstanding SCpnt.
501 * Only looks at the inuse list.
502 */
503static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt)
504{
505 struct sbp2_command_info *command;
506 unsigned long flags;
507
508 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
509 if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
510 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
511 if (command->Current_SCpnt == SCpnt) {
512 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
513 return (command);
514 }
515 }
516 }
517 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
518 return(NULL);
519}
520
521/*
522 * This function allocates a command orb used to send a scsi command.
523 */
524static struct sbp2_command_info *sbp2util_allocate_command_orb(
525 struct scsi_id_instance_data *scsi_id,
526 struct scsi_cmnd *Current_SCpnt,
527 void (*Current_done)(struct scsi_cmnd *))
528{
529 struct list_head *lh;
530 struct sbp2_command_info *command = NULL;
531 unsigned long flags;
532
533 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
534 if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
535 lh = scsi_id->sbp2_command_orb_completed.next;
536 list_del(lh);
537 command = list_entry(lh, struct sbp2_command_info, list);
538 command->Current_done = Current_done;
539 command->Current_SCpnt = Current_SCpnt;
540 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
541 } else {
542 SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
543 }
544 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
545 return (command);
546}
547
548/* Free our DMA's */
549static void sbp2util_free_command_dma(struct sbp2_command_info *command)
550{
551 struct scsi_id_instance_data *scsi_id =
552 (struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0];
553 struct hpsb_host *host;
554
555 if (!scsi_id) {
556 printk(KERN_ERR "%s: scsi_id == NULL\n", __FUNCTION__);
557 return;
558 }
559
560 host = scsi_id->ud->ne->host;
561
562 if (command->cmd_dma) {
563 if (command->dma_type == CMD_DMA_SINGLE) {
564 pci_unmap_single(host->pdev, command->cmd_dma,
565 command->dma_size, command->dma_dir);
566 SBP2_DMA_FREE("single bulk");
567 } else if (command->dma_type == CMD_DMA_PAGE) {
568 pci_unmap_page(host->pdev, command->cmd_dma,
569 command->dma_size, command->dma_dir);
570 SBP2_DMA_FREE("single page");
571 } /* XXX: Check for CMD_DMA_NONE bug */
572 command->dma_type = CMD_DMA_NONE;
573 command->cmd_dma = 0;
574 }
575
576 if (command->sge_buffer) {
577 pci_unmap_sg(host->pdev, command->sge_buffer,
578 command->dma_size, command->dma_dir);
579 SBP2_DMA_FREE("scatter list");
580 command->sge_buffer = NULL;
581 }
582}
583
584/*
585 * This function moves a command to the completed orb list.
586 */
587static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command)
588{
589 unsigned long flags;
590
591 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
592 list_del(&command->list);
593 sbp2util_free_command_dma(command);
594 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
595 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
596}
597
598
599
600/*********************************************
601 * IEEE-1394 core driver stack related section
602 *********************************************/
603static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud);
604
605static int sbp2_probe(struct device *dev)
606{
607 struct unit_directory *ud;
608 struct scsi_id_instance_data *scsi_id;
609
610 SBP2_DEBUG("sbp2_probe");
611
612 ud = container_of(dev, struct unit_directory, device);
613
614 /* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
615 * instead. */
616 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
617 return -ENODEV;
618
619 scsi_id = sbp2_alloc_device(ud);
620
621 if (!scsi_id)
622 return -ENOMEM;
623
624 sbp2_parse_unit_directory(scsi_id, ud);
625
626 return sbp2_start_device(scsi_id);
627}
628
629static int sbp2_remove(struct device *dev)
630{
631 struct unit_directory *ud;
632 struct scsi_id_instance_data *scsi_id;
633
634 SBP2_DEBUG("sbp2_remove");
635
636 ud = container_of(dev, struct unit_directory, device);
637 scsi_id = ud->device.driver_data;
638
639 sbp2_logout_device(scsi_id);
640 sbp2_remove_device(scsi_id);
641
642 return 0;
643}
644
645static int sbp2_update(struct unit_directory *ud)
646{
647 struct scsi_id_instance_data *scsi_id = ud->device.driver_data;
648
649 SBP2_DEBUG("sbp2_update");
650
651 if (sbp2_reconnect_device(scsi_id)) {
652
653 /*
654 * Ok, reconnect has failed. Perhaps we didn't
655 * reconnect fast enough. Try doing a regular login, but
656 * first do a logout just in case of any weirdness.
657 */
658 sbp2_logout_device(scsi_id);
659
660 if (sbp2_login_device(scsi_id)) {
661 /* Login failed too, just fail, and the backend
662 * will call our sbp2_remove for us */
663 SBP2_ERR("Failed to reconnect to sbp2 device!");
664 return -EBUSY;
665 }
666 }
667
668 /* Set max retries to something large on the device. */
669 sbp2_set_busy_timeout(scsi_id);
670
671 /* Do a SBP-2 fetch agent reset. */
672 sbp2_agent_reset(scsi_id, 1);
673
674 /* Get the max speed and packet size that we can use. */
675 sbp2_max_speed_and_size(scsi_id);
676
677 /* Complete any pending commands with busy (so they get
678 * retried) and remove them from our queue
679 */
680 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
681
682 /* Make sure we unblock requests (since this is likely after a bus
683 * reset). */
684 scsi_unblock_requests(scsi_id->scsi_host);
685
686 return 0;
687}
688
689/* This functions is called by the sbp2_probe, for each new device. We now
690 * allocate one scsi host for each scsi_id (unit directory). */
691static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud)
692{
693 struct sbp2scsi_host_info *hi;
694 struct Scsi_Host *scsi_host = NULL;
695 struct scsi_id_instance_data *scsi_id = NULL;
696
697 SBP2_DEBUG("sbp2_alloc_device");
698
699 scsi_id = kmalloc(sizeof(*scsi_id), GFP_KERNEL);
700 if (!scsi_id) {
701 SBP2_ERR("failed to create scsi_id");
702 goto failed_alloc;
703 }
704 memset(scsi_id, 0, sizeof(*scsi_id));
705
706 scsi_id->ne = ud->ne;
707 scsi_id->ud = ud;
708 scsi_id->speed_code = IEEE1394_SPEED_100;
709 scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
710 atomic_set(&scsi_id->sbp2_login_complete, 0);
711 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
712 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
713 INIT_LIST_HEAD(&scsi_id->scsi_list);
714 spin_lock_init(&scsi_id->sbp2_command_orb_lock);
715 scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED;
716
717 ud->device.driver_data = scsi_id;
718
719 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
720 if (!hi) {
721 hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host, sizeof(*hi));
722 if (!hi) {
723 SBP2_ERR("failed to allocate hostinfo");
724 goto failed_alloc;
725 }
726 SBP2_DEBUG("sbp2_alloc_device: allocated hostinfo");
727 hi->host = ud->ne->host;
728 INIT_LIST_HEAD(&hi->scsi_ids);
729
730 /* Register our sbp2 status address space... */
731 hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_ops,
732 SBP2_STATUS_FIFO_ADDRESS,
733 SBP2_STATUS_FIFO_ADDRESS +
734 SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2_MAX_UDS_PER_NODE+1));
735#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
736 /* Handle data movement if physical dma is not
737 * enabled/supportedon host controller */
738 hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_physdma_ops,
739 0x0ULL, 0xfffffffcULL);
740#endif
741 }
742
743 scsi_id->hi = hi;
744
745 list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
746
747 /* Register our host with the SCSI stack. */
748 scsi_host = scsi_host_alloc(&scsi_driver_template, 0);
749 if (!scsi_host) {
750 SBP2_ERR("failed to register scsi host");
751 goto failed_alloc;
752 }
753
754 scsi_host->hostdata[0] = (unsigned long)scsi_id;
755
756 if (!scsi_add_host(scsi_host, &ud->device)) {
757 scsi_id->scsi_host = scsi_host;
758 return scsi_id;
759 }
760
761 SBP2_ERR("failed to add scsi host");
762 scsi_host_put(scsi_host);
763
764failed_alloc:
765 sbp2_remove_device(scsi_id);
766 return NULL;
767}
768
769
770static void sbp2_host_reset(struct hpsb_host *host)
771{
772 struct sbp2scsi_host_info *hi;
773 struct scsi_id_instance_data *scsi_id;
774
775 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
776
777 if (hi) {
778 list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list)
779 scsi_block_requests(scsi_id->scsi_host);
780 }
781}
782
783
784/*
785 * This function is where we first pull the node unique ids, and then
786 * allocate memory and register a SBP-2 device.
787 */
788static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
789{
790 struct sbp2scsi_host_info *hi = scsi_id->hi;
791 struct scsi_device *sdev;
792
793 SBP2_DEBUG("sbp2_start_device");
794
795 /* Login FIFO DMA */
796 scsi_id->login_response =
797 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response),
798 &scsi_id->login_response_dma);
799 if (!scsi_id->login_response)
800 goto alloc_fail;
801 SBP2_DMA_ALLOC("consistent DMA region for login FIFO");
802
803 /* Query logins ORB DMA */
804 scsi_id->query_logins_orb =
805 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_orb),
806 &scsi_id->query_logins_orb_dma);
807 if (!scsi_id->query_logins_orb)
808 goto alloc_fail;
809 SBP2_DMA_ALLOC("consistent DMA region for query logins ORB");
810
811 /* Query logins response DMA */
812 scsi_id->query_logins_response =
813 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_response),
814 &scsi_id->query_logins_response_dma);
815 if (!scsi_id->query_logins_response)
816 goto alloc_fail;
817 SBP2_DMA_ALLOC("consistent DMA region for query logins response");
818
819 /* Reconnect ORB DMA */
820 scsi_id->reconnect_orb =
821 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb),
822 &scsi_id->reconnect_orb_dma);
823 if (!scsi_id->reconnect_orb)
824 goto alloc_fail;
825 SBP2_DMA_ALLOC("consistent DMA region for reconnect ORB");
826
827 /* Logout ORB DMA */
828 scsi_id->logout_orb =
829 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb),
830 &scsi_id->logout_orb_dma);
831 if (!scsi_id->logout_orb)
832 goto alloc_fail;
833 SBP2_DMA_ALLOC("consistent DMA region for logout ORB");
834
835 /* Login ORB DMA */
836 scsi_id->login_orb =
837 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb),
838 &scsi_id->login_orb_dma);
839 if (!scsi_id->login_orb) {
840alloc_fail:
841 if (scsi_id->query_logins_response) {
842 pci_free_consistent(hi->host->pdev,
843 sizeof(struct sbp2_query_logins_response),
844 scsi_id->query_logins_response,
845 scsi_id->query_logins_response_dma);
846 SBP2_DMA_FREE("query logins response DMA");
847 }
848
849 if (scsi_id->query_logins_orb) {
850 pci_free_consistent(hi->host->pdev,
851 sizeof(struct sbp2_query_logins_orb),
852 scsi_id->query_logins_orb,
853 scsi_id->query_logins_orb_dma);
854 SBP2_DMA_FREE("query logins ORB DMA");
855 }
856
857 if (scsi_id->logout_orb) {
858 pci_free_consistent(hi->host->pdev,
859 sizeof(struct sbp2_logout_orb),
860 scsi_id->logout_orb,
861 scsi_id->logout_orb_dma);
862 SBP2_DMA_FREE("logout ORB DMA");
863 }
864
865 if (scsi_id->reconnect_orb) {
866 pci_free_consistent(hi->host->pdev,
867 sizeof(struct sbp2_reconnect_orb),
868 scsi_id->reconnect_orb,
869 scsi_id->reconnect_orb_dma);
870 SBP2_DMA_FREE("reconnect ORB DMA");
871 }
872
873 if (scsi_id->login_response) {
874 pci_free_consistent(hi->host->pdev,
875 sizeof(struct sbp2_login_response),
876 scsi_id->login_response,
877 scsi_id->login_response_dma);
878 SBP2_DMA_FREE("login FIFO DMA");
879 }
880
881 list_del(&scsi_id->scsi_list);
882
883 kfree(scsi_id);
884
885 SBP2_ERR ("Could not allocate memory for scsi_id");
886
887 return -ENOMEM;
888 }
889 SBP2_DMA_ALLOC("consistent DMA region for login ORB");
890
891 SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
892
893 /*
894 * Create our command orb pool
895 */
896 if (sbp2util_create_command_orb_pool(scsi_id)) {
897 SBP2_ERR("sbp2util_create_command_orb_pool failed!");
898 sbp2_remove_device(scsi_id);
899 return -ENOMEM;
900 }
901
902 /* Schedule a timeout here. The reason is that we may be so close
903 * to a bus reset, that the device is not available for logins.
904 * This can happen when the bus reset is caused by the host
905 * connected to the sbp2 device being removed. That host would
906 * have a certain amount of time to relogin before the sbp2 device
907 * allows someone else to login instead. One second makes sense. */
908 msleep_interruptible(1000);
909 if (signal_pending(current)) {
910 SBP2_WARN("aborting sbp2_start_device due to event");
911 sbp2_remove_device(scsi_id);
912 return -EINTR;
913 }
914
915 /*
916 * Login to the sbp-2 device
917 */
918 if (sbp2_login_device(scsi_id)) {
919 /* Login failed, just remove the device. */
920 sbp2_remove_device(scsi_id);
921 return -EBUSY;
922 }
923
924 /*
925 * Set max retries to something large on the device
926 */
927 sbp2_set_busy_timeout(scsi_id);
928
929 /*
930 * Do a SBP-2 fetch agent reset
931 */
932 sbp2_agent_reset(scsi_id, 1);
933
934 /*
935 * Get the max speed and packet size that we can use
936 */
937 sbp2_max_speed_and_size(scsi_id);
938
939 /* Add this device to the scsi layer now */
940 sdev = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
941 if (IS_ERR(sdev)) {
942 SBP2_ERR("scsi_add_device failed");
943 return PTR_ERR(sdev);
944 }
945
946 return 0;
947}
948
949/*
950 * This function removes an sbp2 device from the sbp2scsi_host_info struct.
951 */
952static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
953{
954 struct sbp2scsi_host_info *hi;
955
956 SBP2_DEBUG("sbp2_remove_device");
957
958 if (!scsi_id)
959 return;
960
961 hi = scsi_id->hi;
962
963 /* This will remove our scsi device aswell */
964 if (scsi_id->scsi_host) {
965 scsi_remove_host(scsi_id->scsi_host);
966 scsi_host_put(scsi_id->scsi_host);
967 }
968
969 sbp2util_remove_command_orb_pool(scsi_id);
970
971 list_del(&scsi_id->scsi_list);
972
973 if (scsi_id->login_response) {
974 pci_free_consistent(hi->host->pdev,
975 sizeof(struct sbp2_login_response),
976 scsi_id->login_response,
977 scsi_id->login_response_dma);
978 SBP2_DMA_FREE("single login FIFO");
979 }
980
981 if (scsi_id->login_orb) {
982 pci_free_consistent(hi->host->pdev,
983 sizeof(struct sbp2_login_orb),
984 scsi_id->login_orb,
985 scsi_id->login_orb_dma);
986 SBP2_DMA_FREE("single login ORB");
987 }
988
989 if (scsi_id->reconnect_orb) {
990 pci_free_consistent(hi->host->pdev,
991 sizeof(struct sbp2_reconnect_orb),
992 scsi_id->reconnect_orb,
993 scsi_id->reconnect_orb_dma);
994 SBP2_DMA_FREE("single reconnect orb");
995 }
996
997 if (scsi_id->logout_orb) {
998 pci_free_consistent(hi->host->pdev,
999 sizeof(struct sbp2_logout_orb),
1000 scsi_id->logout_orb,
1001 scsi_id->logout_orb_dma);
1002 SBP2_DMA_FREE("single logout orb");
1003 }
1004
1005 if (scsi_id->query_logins_orb) {
1006 pci_free_consistent(hi->host->pdev,
1007 sizeof(struct sbp2_query_logins_orb),
1008 scsi_id->query_logins_orb,
1009 scsi_id->query_logins_orb_dma);
1010 SBP2_DMA_FREE("single query logins orb");
1011 }
1012
1013 if (scsi_id->query_logins_response) {
1014 pci_free_consistent(hi->host->pdev,
1015 sizeof(struct sbp2_query_logins_response),
1016 scsi_id->query_logins_response,
1017 scsi_id->query_logins_response_dma);
1018 SBP2_DMA_FREE("single query logins data");
1019 }
1020
1021 scsi_id->ud->device.driver_data = NULL;
1022
1023 SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
1024
1025 kfree(scsi_id);
1026}
1027
1028#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
1029/*
1030 * This function deals with physical dma write requests (for adapters that do not support
1031 * physical dma in hardware). Mostly just here for debugging...
1032 */
1033static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
1034 u64 addr, size_t length, u16 flags)
1035{
1036
1037 /*
1038 * Manually put the data in the right place.
1039 */
1040 memcpy(bus_to_virt((u32)addr), data, length);
1041 sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr);
1042 return(RCODE_COMPLETE);
1043}
1044
1045/*
1046 * This function deals with physical dma read requests (for adapters that do not support
1047 * physical dma in hardware). Mostly just here for debugging...
1048 */
1049static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
1050 u64 addr, size_t length, u16 flags)
1051{
1052
1053 /*
1054 * Grab data from memory and send a read response.
1055 */
1056 memcpy(data, bus_to_virt((u32)addr), length);
1057 sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr);
1058 return(RCODE_COMPLETE);
1059}
1060#endif
1061
1062
1063/**************************************
1064 * SBP-2 protocol related section
1065 **************************************/
1066
1067/*
1068 * This function determines if we should convert scsi commands for a particular sbp2 device type
1069 */
1070static __inline__ int sbp2_command_conversion_device_type(u8 device_type)
1071{
1072 return (((device_type == TYPE_DISK) ||
1073 (device_type == TYPE_SDAD) ||
1074 (device_type == TYPE_ROM)) ? 1:0);
1075}
1076
1077/*
1078 * This function queries the device for the maximum concurrent logins it
1079 * supports.
1080 */
1081static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1082{
1083 struct sbp2scsi_host_info *hi = scsi_id->hi;
1084 quadlet_t data[2];
1085 int max_logins;
1086 int active_logins;
1087
1088 SBP2_DEBUG("sbp2_query_logins");
1089
1090 scsi_id->query_logins_orb->reserved1 = 0x0;
1091 scsi_id->query_logins_orb->reserved2 = 0x0;
1092
1093 scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
1094 scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1095 SBP2_DEBUG("sbp2_query_logins: query_response_hi/lo initialized");
1096
1097 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1098 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1099 if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
1100 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
1101 SBP2_DEBUG("sbp2_query_logins: set lun to %d",
1102 ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
1103 }
1104 SBP2_DEBUG("sbp2_query_logins: lun_misc initialized");
1105
1106 scsi_id->query_logins_orb->reserved_resp_length =
1107 ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
1108 SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized");
1109
1110 scsi_id->query_logins_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
1111 SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
1112 scsi_id->query_logins_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
1113 SBP2_STATUS_FIFO_ADDRESS_HI);
1114 SBP2_DEBUG("sbp2_query_logins: status FIFO initialized");
1115
1116 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
1117
1118 SBP2_DEBUG("sbp2_query_logins: orb byte-swapped");
1119
1120 sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb),
1121 "sbp2 query logins orb", scsi_id->query_logins_orb_dma);
1122
1123 memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
1124 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
1125
1126 SBP2_DEBUG("sbp2_query_logins: query_logins_response/status FIFO memset");
1127
1128 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1129 data[1] = scsi_id->query_logins_orb_dma;
1130 sbp2util_cpu_to_be32_buffer(data, 8);
1131
1132 atomic_set(&scsi_id->sbp2_login_complete, 0);
1133
1134 SBP2_DEBUG("sbp2_query_logins: prepared to write");
1135 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
1136 SBP2_DEBUG("sbp2_query_logins: written");
1137
1138 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) {
1139 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1140 return(-EIO);
1141 }
1142
1143 if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
1144 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1145 return(-EIO);
1146 }
1147
1148 if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
1149 STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
1150 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1151
1152 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1153 return(-EIO);
1154 }
1155
1156 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
1157
1158 SBP2_DEBUG("length_max_logins = %x",
1159 (unsigned int)scsi_id->query_logins_response->length_max_logins);
1160
1161 SBP2_DEBUG("Query logins to SBP-2 device successful");
1162
1163 max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins);
1164 SBP2_DEBUG("Maximum concurrent logins supported: %d", max_logins);
1165
1166 active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins);
1167 SBP2_DEBUG("Number of active logins: %d", active_logins);
1168
1169 if (active_logins >= max_logins) {
1170 return(-EIO);
1171 }
1172
1173 return 0;
1174}
1175
1176/*
1177 * This function is called in order to login to a particular SBP-2 device,
1178 * after a bus reset.
1179 */
1180static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1181{
1182 struct sbp2scsi_host_info *hi = scsi_id->hi;
1183 quadlet_t data[2];
1184
1185 SBP2_DEBUG("sbp2_login_device");
1186
1187 if (!scsi_id->login_orb) {
1188 SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!");
1189 return(-EIO);
1190 }
1191
1192 if (!exclusive_login) {
1193 if (sbp2_query_logins(scsi_id)) {
1194 SBP2_INFO("Device does not support any more concurrent logins");
1195 return(-EIO);
1196 }
1197 }
1198
1199 /* Set-up login ORB, assume no password */
1200 scsi_id->login_orb->password_hi = 0;
1201 scsi_id->login_orb->password_lo = 0;
1202 SBP2_DEBUG("sbp2_login_device: password_hi/lo initialized");
1203
1204 scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma;
1205 scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1206 SBP2_DEBUG("sbp2_login_device: login_response_hi/lo initialized");
1207
1208 scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
1209 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
1210 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
1211 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
1212 /* Set the lun if we were able to pull it from the device's unit directory */
1213 if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
1214 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
1215 SBP2_DEBUG("sbp2_query_logins: set lun to %d",
1216 ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
1217 }
1218 SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
1219
1220 scsi_id->login_orb->passwd_resp_lengths =
1221 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
1222 SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
1223
1224 scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
1225 SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
1226 scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
1227 SBP2_STATUS_FIFO_ADDRESS_HI);
1228 SBP2_DEBUG("sbp2_login_device: status FIFO initialized");
1229
1230 /*
1231 * Byte swap ORB if necessary
1232 */
1233 sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb));
1234
1235 SBP2_DEBUG("sbp2_login_device: orb byte-swapped");
1236
1237 sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
1238 "sbp2 login orb", scsi_id->login_orb_dma);
1239
1240 /*
1241 * Initialize login response and status fifo
1242 */
1243 memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
1244 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
1245
1246 SBP2_DEBUG("sbp2_login_device: login_response/status FIFO memset");
1247
1248 /*
1249 * Ok, let's write to the target's management agent register
1250 */
1251 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1252 data[1] = scsi_id->login_orb_dma;
1253 sbp2util_cpu_to_be32_buffer(data, 8);
1254
1255 atomic_set(&scsi_id->sbp2_login_complete, 0);
1256
1257 SBP2_DEBUG("sbp2_login_device: prepared to write to %08x",
1258 (unsigned int)scsi_id->sbp2_management_agent_addr);
1259 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
1260 SBP2_DEBUG("sbp2_login_device: written");
1261
1262 /*
1263 * Wait for login status (up to 20 seconds)...
1264 */
1265 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
1266 SBP2_ERR("Error logging into SBP-2 device - login timed-out");
1267 return(-EIO);
1268 }
1269
1270 /*
1271 * Sanity. Make sure status returned matches login orb.
1272 */
1273 if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
1274 SBP2_ERR("Error logging into SBP-2 device - login timed-out");
1275 return(-EIO);
1276 }
1277
1278 /*
1279 * Check status
1280 */
1281 if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
1282 STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
1283 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1284
1285 SBP2_ERR("Error logging into SBP-2 device - login failed");
1286 return(-EIO);
1287 }
1288
1289 /*
1290 * Byte swap the login response, for use when reconnecting or
1291 * logging out.
1292 */
1293 sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response));
1294
1295 /*
1296 * Grab our command block agent address from the login response.
1297 */
1298 SBP2_DEBUG("command_block_agent_hi = %x",
1299 (unsigned int)scsi_id->login_response->command_block_agent_hi);
1300 SBP2_DEBUG("command_block_agent_lo = %x",
1301 (unsigned int)scsi_id->login_response->command_block_agent_lo);
1302
1303 scsi_id->sbp2_command_block_agent_addr =
1304 ((u64)scsi_id->login_response->command_block_agent_hi) << 32;
1305 scsi_id->sbp2_command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo);
1306 scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
1307
1308 SBP2_INFO("Logged into SBP-2 device");
1309
1310 return(0);
1311
1312}
1313
1314/*
1315 * This function is called in order to logout from a particular SBP-2
1316 * device, usually called during driver unload.
1317 */
1318static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1319{
1320 struct sbp2scsi_host_info *hi = scsi_id->hi;
1321 quadlet_t data[2];
1322 int error;
1323
1324 SBP2_DEBUG("sbp2_logout_device");
1325
1326 /*
1327 * Set-up logout ORB
1328 */
1329 scsi_id->logout_orb->reserved1 = 0x0;
1330 scsi_id->logout_orb->reserved2 = 0x0;
1331 scsi_id->logout_orb->reserved3 = 0x0;
1332 scsi_id->logout_orb->reserved4 = 0x0;
1333
1334 scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
1335 scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
1336
1337 /* Notify us when complete */
1338 scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1339
1340 scsi_id->logout_orb->reserved5 = 0x0;
1341 scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
1342 SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
1343 scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
1344 SBP2_STATUS_FIFO_ADDRESS_HI);
1345
1346 /*
1347 * Byte swap ORB if necessary
1348 */
1349 sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
1350
1351 sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
1352 "sbp2 logout orb", scsi_id->logout_orb_dma);
1353
1354 /*
1355 * Ok, let's write to the target's management agent register
1356 */
1357 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1358 data[1] = scsi_id->logout_orb_dma;
1359 sbp2util_cpu_to_be32_buffer(data, 8);
1360
1361 atomic_set(&scsi_id->sbp2_login_complete, 0);
1362
1363 error = hpsb_node_write(scsi_id->ne,
1364 scsi_id->sbp2_management_agent_addr,
1365 data, 8);
1366 if (error)
1367 return error;
1368
1369 /* Wait for device to logout...1 second. */
1370 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ))
1371 return -EIO;
1372
1373 SBP2_INFO("Logged out of SBP-2 device");
1374
1375 return(0);
1376
1377}
1378
1379/*
1380 * This function is called in order to reconnect to a particular SBP-2
1381 * device, after a bus reset.
1382 */
1383static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1384{
1385 struct sbp2scsi_host_info *hi = scsi_id->hi;
1386 quadlet_t data[2];
1387 int error;
1388
1389 SBP2_DEBUG("sbp2_reconnect_device");
1390
1391 /*
1392 * Set-up reconnect ORB
1393 */
1394 scsi_id->reconnect_orb->reserved1 = 0x0;
1395 scsi_id->reconnect_orb->reserved2 = 0x0;
1396 scsi_id->reconnect_orb->reserved3 = 0x0;
1397 scsi_id->reconnect_orb->reserved4 = 0x0;
1398
1399 scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
1400 scsi_id->reconnect_orb->login_ID_misc |=
1401 ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
1402
1403 /* Notify us when complete */
1404 scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1405
1406 scsi_id->reconnect_orb->reserved5 = 0x0;
1407 scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
1408 SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
1409 scsi_id->reconnect_orb->status_FIFO_hi =
1410 (ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
1411
1412 /*
1413 * Byte swap ORB if necessary
1414 */
1415 sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
1416
1417 sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
1418 "sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
1419
1420 /*
1421 * Initialize status fifo
1422 */
1423 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
1424
1425 /*
1426 * Ok, let's write to the target's management agent register
1427 */
1428 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1429 data[1] = scsi_id->reconnect_orb_dma;
1430 sbp2util_cpu_to_be32_buffer(data, 8);
1431
1432 atomic_set(&scsi_id->sbp2_login_complete, 0);
1433
1434 error = hpsb_node_write(scsi_id->ne,
1435 scsi_id->sbp2_management_agent_addr,
1436 data, 8);
1437 if (error)
1438 return error;
1439
1440 /*
1441 * Wait for reconnect status (up to 1 second)...
1442 */
1443 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) {
1444 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
1445 return(-EIO);
1446 }
1447
1448 /*
1449 * Sanity. Make sure status returned matches reconnect orb.
1450 */
1451 if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
1452 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
1453 return(-EIO);
1454 }
1455
1456 /*
1457 * Check status
1458 */
1459 if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
1460 STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
1461 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1462
1463 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed");
1464 return(-EIO);
1465 }
1466
1467 HPSB_DEBUG("Reconnected to SBP-2 device");
1468
1469 return(0);
1470
1471}
1472
1473/*
1474 * This function is called in order to set the busy timeout (number of
1475 * retries to attempt) on the sbp2 device.
1476 */
1477static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
1478{
1479 quadlet_t data;
1480
1481 SBP2_DEBUG("sbp2_set_busy_timeout");
1482
1483 /*
1484 * Ok, let's write to the target's busy timeout register
1485 */
1486 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
1487
1488 if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) {
1489 SBP2_ERR("sbp2_set_busy_timeout error");
1490 }
1491
1492 return(0);
1493}
1494
1495
1496/*
1497 * This function is called to parse sbp2 device's config rom unit
1498 * directory. Used to determine things like sbp2 management agent offset,
1499 * and command set used (SCSI or RBC).
1500 */
1501static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1502 struct unit_directory *ud)
1503{
1504 struct csr1212_keyval *kv;
1505 struct csr1212_dentry *dentry;
1506 u64 management_agent_addr;
1507 u32 command_set_spec_id, command_set, unit_characteristics,
1508 firmware_revision, workarounds;
1509 int i;
1510
1511 SBP2_DEBUG("sbp2_parse_unit_directory");
1512
1513 management_agent_addr = 0x0;
1514 command_set_spec_id = 0x0;
1515 command_set = 0x0;
1516 unit_characteristics = 0x0;
1517 firmware_revision = 0x0;
1518
1519 /* Handle different fields in the unit directory, based on keys */
1520 csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
1521 switch (kv->key.id) {
1522 case CSR1212_KV_ID_DEPENDENT_INFO:
1523 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
1524 /* Save off the management agent address */
1525 management_agent_addr =
1526 CSR1212_REGISTER_SPACE_BASE +
1527 (kv->value.csr_offset << 2);
1528
1529 SBP2_DEBUG("sbp2_management_agent_addr = %x",
1530 (unsigned int) management_agent_addr);
1531 } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
1532 scsi_id->sbp2_device_type_and_lun = kv->value.immediate;
1533 }
1534 break;
1535
1536 case SBP2_COMMAND_SET_SPEC_ID_KEY:
1537 /* Command spec organization */
1538 command_set_spec_id = kv->value.immediate;
1539 SBP2_DEBUG("sbp2_command_set_spec_id = %x",
1540 (unsigned int) command_set_spec_id);
1541 break;
1542
1543 case SBP2_COMMAND_SET_KEY:
1544 /* Command set used by sbp2 device */
1545 command_set = kv->value.immediate;
1546 SBP2_DEBUG("sbp2_command_set = %x",
1547 (unsigned int) command_set);
1548 break;
1549
1550 case SBP2_UNIT_CHARACTERISTICS_KEY:
1551 /*
1552 * Unit characterisitcs (orb related stuff
1553 * that I'm not yet paying attention to)
1554 */
1555 unit_characteristics = kv->value.immediate;
1556 SBP2_DEBUG("sbp2_unit_characteristics = %x",
1557 (unsigned int) unit_characteristics);
1558 break;
1559
1560 case SBP2_FIRMWARE_REVISION_KEY:
1561 /* Firmware revision */
1562 firmware_revision = kv->value.immediate;
1563 if (force_inquiry_hack)
1564 SBP2_INFO("sbp2_firmware_revision = %x",
1565 (unsigned int) firmware_revision);
1566 else SBP2_DEBUG("sbp2_firmware_revision = %x",
1567 (unsigned int) firmware_revision);
1568 break;
1569
1570 default:
1571 break;
1572 }
1573 }
1574
1575 /* This is the start of our broken device checking. We try to hack
1576 * around oddities and known defects. */
1577 workarounds = 0x0;
1578
1579 /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
1580 * bridge with 128KB max transfer size limitation. For sanity, we
1581 * only voice this when the current max_sectors setting
1582 * exceeds the 128k limit. By default, that is not the case.
1583 *
1584 * It would be really nice if we could detect this before the scsi
1585 * host gets initialized. That way we can down-force the
1586 * max_sectors to account for it. That is not currently
1587 * possible. */
1588 if ((firmware_revision & 0xffff00) ==
1589 SBP2_128KB_BROKEN_FIRMWARE &&
1590 (max_sectors * 512) > (128*1024)) {
1591 SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
1592 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
1593 SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
1594 max_sectors);
1595 workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
1596 }
1597
1598 /* Check for a blacklisted set of devices that require us to force
1599 * a 36 byte host inquiry. This can be overriden as a module param
1600 * (to force all hosts). */
1601 for (i = 0; i < NUM_BROKEN_INQUIRY_DEVS; i++) {
1602 if ((firmware_revision & 0xffff00) ==
1603 sbp2_broken_inquiry_list[i]) {
1604 SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
1605 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
1606 workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
1607 break; /* No need to continue. */
1608 }
1609 }
1610
1611 /* If this is a logical unit directory entry, process the parent
1612 * to get the values. */
1613 if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
1614 struct unit_directory *parent_ud =
1615 container_of(ud->device.parent, struct unit_directory, device);
1616 sbp2_parse_unit_directory(scsi_id, parent_ud);
1617 } else {
1618 scsi_id->sbp2_management_agent_addr = management_agent_addr;
1619 scsi_id->sbp2_command_set_spec_id = command_set_spec_id;
1620 scsi_id->sbp2_command_set = command_set;
1621 scsi_id->sbp2_unit_characteristics = unit_characteristics;
1622 scsi_id->sbp2_firmware_revision = firmware_revision;
1623 scsi_id->workarounds = workarounds;
1624 if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
1625 scsi_id->sbp2_device_type_and_lun = ud->lun;
1626 }
1627}
1628
1629/*
1630 * This function is called in order to determine the max speed and packet
1631 * size we can use in our ORBs. Note, that we (the driver and host) only
1632 * initiate the transaction. The SBP-2 device actually transfers the data
1633 * (by reading from the DMA area we tell it). This means that the SBP-2
1634 * device decides the actual maximum data it can transfer. We just tell it
1635 * the speed that it needs to use, and the max_rec the host supports, and
1636 * it takes care of the rest.
1637 */
1638static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
1639{
1640 struct sbp2scsi_host_info *hi = scsi_id->hi;
1641
1642 SBP2_DEBUG("sbp2_max_speed_and_size");
1643
1644 /* Initial setting comes from the hosts speed map */
1645 scsi_id->speed_code = hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64
1646 + NODEID_TO_NODE(scsi_id->ne->nodeid)];
1647
1648 /* Bump down our speed if the user requested it */
1649 if (scsi_id->speed_code > max_speed) {
1650 scsi_id->speed_code = max_speed;
1651 SBP2_ERR("Forcing SBP-2 max speed down to %s",
1652 hpsb_speedto_str[scsi_id->speed_code]);
1653 }
1654
1655 /* Payload size is the lesser of what our speed supports and what
1656 * our host supports. */
1657 scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code],
1658 (u8)(hi->host->csr.max_rec - 1));
1659
1660 HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
1661 NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
1662 hpsb_speedto_str[scsi_id->speed_code],
1663 1 << ((u32)scsi_id->max_payload_size + 2));
1664
1665 return(0);
1666}
1667
1668/*
1669 * This function is called in order to perform a SBP-2 agent reset.
1670 */
1671static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
1672{
1673 quadlet_t data;
1674 u64 addr;
1675 int retval;
1676
1677 SBP2_DEBUG("sbp2_agent_reset");
1678
1679 /*
1680 * Ok, let's write to the target's management agent register
1681 */
1682 data = ntohl(SBP2_AGENT_RESET_DATA);
1683 addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
1684
1685 if (wait)
1686 retval = hpsb_node_write(scsi_id->ne, addr, &data, 4);
1687 else
1688 retval = sbp2util_node_write_no_wait(scsi_id->ne, addr, &data, 4);
1689
1690 if (retval < 0) {
1691 SBP2_ERR("hpsb_node_write failed.\n");
1692 return -EIO;
1693 }
1694
1695 /*
1696 * Need to make sure orb pointer is written on next command
1697 */
1698 scsi_id->last_orb = NULL;
1699
1700 return(0);
1701}
1702
1703/*
1704 * This function is called to create the actual command orb and s/g list
1705 * out of the scsi command itself.
1706 */
1707static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1708 struct sbp2_command_info *command,
1709 unchar *scsi_cmd,
1710 unsigned int scsi_use_sg,
1711 unsigned int scsi_request_bufflen,
1712 void *scsi_request_buffer,
1713 enum dma_data_direction dma_dir)
1714
1715{
1716 struct sbp2scsi_host_info *hi = scsi_id->hi;
1717 struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer;
1718 struct sbp2_command_orb *command_orb = &command->command_orb;
1719 struct sbp2_unrestricted_page_table *scatter_gather_element =
1720 &command->scatter_gather_element[0];
1721 u32 sg_count, sg_len, orb_direction;
1722 dma_addr_t sg_addr;
1723 int i;
1724
1725 /*
1726 * Set-up our command ORB..
1727 *
1728 * NOTE: We're doing unrestricted page tables (s/g), as this is
1729 * best performance (at least with the devices I have). This means
1730 * that data_size becomes the number of s/g elements, and
1731 * page_size should be zero (for unrestricted).
1732 */
1733 command_orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
1734 command_orb->next_ORB_lo = 0x0;
1735 command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
1736 command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
1737 command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
1738
1739 /*
1740 * Get the direction of the transfer. If the direction is unknown, then use our
1741 * goofy table as a back-up.
1742 */
1743 switch (dma_dir) {
1744 case DMA_NONE:
1745 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1746 break;
1747 case DMA_TO_DEVICE:
1748 orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
1749 break;
1750 case DMA_FROM_DEVICE:
1751 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1752 break;
1753 case DMA_BIDIRECTIONAL:
1754 default:
1755 SBP2_ERR("SCSI data transfer direction not specified. "
1756 "Update the SBP2 direction table in sbp2.h if "
1757 "necessary for your application");
1758 __scsi_print_command(scsi_cmd);
1759 orb_direction = sbp2scsi_direction_table[*scsi_cmd];
1760 break;
1761 }
1762
1763 /*
1764 * Set-up our pagetable stuff... unfortunately, this has become
1765 * messier than I'd like. Need to clean this up a bit. ;-)
1766 */
1767 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
1768
1769 SBP2_DEBUG("No data transfer");
1770
1771 /*
1772 * Handle no data transfer
1773 */
1774 command_orb->data_descriptor_hi = 0x0;
1775 command_orb->data_descriptor_lo = 0x0;
1776 command_orb->misc |= ORB_SET_DIRECTION(1);
1777
1778 } else if (scsi_use_sg) {
1779
1780 SBP2_DEBUG("Use scatter/gather");
1781
1782 /*
1783 * Special case if only one element (and less than 64KB in size)
1784 */
1785 if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1786
1787 SBP2_DEBUG("Only one s/g element");
1788 command->dma_dir = dma_dir;
1789 command->dma_size = sgpnt[0].length;
1790 command->dma_type = CMD_DMA_PAGE;
1791 command->cmd_dma = pci_map_page(hi->host->pdev,
1792 sgpnt[0].page,
1793 sgpnt[0].offset,
1794 command->dma_size,
1795 command->dma_dir);
1796 SBP2_DMA_ALLOC("single page scatter element");
1797
1798 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1799 command_orb->data_descriptor_lo = command->cmd_dma;
1800 command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
1801 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1802
1803 } else {
1804 int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir);
1805 SBP2_DMA_ALLOC("scatter list");
1806
1807 command->dma_size = scsi_use_sg;
1808 command->dma_dir = dma_dir;
1809 command->sge_buffer = sgpnt;
1810
1811 /* use page tables (s/g) */
1812 command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1813 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1814 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1815 command_orb->data_descriptor_lo = command->sge_dma;
1816
1817 /*
1818 * Loop through and fill out our sbp-2 page tables
1819 * (and split up anything too large)
1820 */
1821 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
1822 sg_len = sg_dma_len(sgpnt);
1823 sg_addr = sg_dma_address(sgpnt);
1824 while (sg_len) {
1825 scatter_gather_element[sg_count].segment_base_lo = sg_addr;
1826 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1827 scatter_gather_element[sg_count].length_segment_base_hi =
1828 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1829 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1830 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1831 } else {
1832 scatter_gather_element[sg_count].length_segment_base_hi =
1833 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1834 sg_len = 0;
1835 }
1836 sg_count++;
1837 }
1838 }
1839
1840 /* Number of page table (s/g) elements */
1841 command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1842
1843 sbp2util_packet_dump(scatter_gather_element,
1844 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1845 "sbp2 s/g list", command->sge_dma);
1846
1847 /*
1848 * Byte swap page tables if necessary
1849 */
1850 sbp2util_cpu_to_be32_buffer(scatter_gather_element,
1851 (sizeof(struct sbp2_unrestricted_page_table)) *
1852 sg_count);
1853
1854 }
1855
1856 } else {
1857
1858 SBP2_DEBUG("No scatter/gather");
1859
1860 command->dma_dir = dma_dir;
1861 command->dma_size = scsi_request_bufflen;
1862 command->dma_type = CMD_DMA_SINGLE;
1863 command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer,
1864 command->dma_size,
1865 command->dma_dir);
1866 SBP2_DMA_ALLOC("single bulk");
1867
1868 /*
1869 * Handle case where we get a command w/o s/g enabled (but
1870 * check for transfers larger than 64K)
1871 */
1872 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1873
1874 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1875 command_orb->data_descriptor_lo = command->cmd_dma;
1876 command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1877 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1878
1879 /*
1880 * Sanity, in case our direction table is not
1881 * up-to-date
1882 */
1883 if (!scsi_request_bufflen) {
1884 command_orb->data_descriptor_hi = 0x0;
1885 command_orb->data_descriptor_lo = 0x0;
1886 command_orb->misc |= ORB_SET_DIRECTION(1);
1887 }
1888
1889 } else {
1890 /*
1891 * Need to turn this into page tables, since the
1892 * buffer is too large.
1893 */
1894 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1895 command_orb->data_descriptor_lo = command->sge_dma;
1896
1897 /* Use page tables (s/g) */
1898 command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1899 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1900
1901 /*
1902 * fill out our sbp-2 page tables (and split up
1903 * the large buffer)
1904 */
1905 sg_count = 0;
1906 sg_len = scsi_request_bufflen;
1907 sg_addr = command->cmd_dma;
1908 while (sg_len) {
1909 scatter_gather_element[sg_count].segment_base_lo = sg_addr;
1910 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1911 scatter_gather_element[sg_count].length_segment_base_hi =
1912 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1913 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1914 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1915 } else {
1916 scatter_gather_element[sg_count].length_segment_base_hi =
1917 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1918 sg_len = 0;
1919 }
1920 sg_count++;
1921 }
1922
1923 /* Number of page table (s/g) elements */
1924 command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1925
1926 sbp2util_packet_dump(scatter_gather_element,
1927 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1928 "sbp2 s/g list", command->sge_dma);
1929
1930 /*
1931 * Byte swap page tables if necessary
1932 */
1933 sbp2util_cpu_to_be32_buffer(scatter_gather_element,
1934 (sizeof(struct sbp2_unrestricted_page_table)) *
1935 sg_count);
1936
1937 }
1938
1939 }
1940
1941 /*
1942 * Byte swap command ORB if necessary
1943 */
1944 sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
1945
1946 /*
1947 * Put our scsi command in the command ORB
1948 */
1949 memset(command_orb->cdb, 0, 12);
1950 memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
1951
1952 return(0);
1953}
1954
1955/*
1956 * This function is called in order to begin a regular SBP-2 command.
1957 */
1958static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1959 struct sbp2_command_info *command)
1960{
1961 struct sbp2scsi_host_info *hi = scsi_id->hi;
1962 struct sbp2_command_orb *command_orb = &command->command_orb;
1963 struct node_entry *ne = scsi_id->ne;
1964 u64 addr;
1965
1966 outstanding_orb_incr;
1967 SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
1968 command_orb, global_outstanding_command_orbs);
1969
1970 pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
1971 sizeof(struct sbp2_command_orb),
1972 PCI_DMA_BIDIRECTIONAL);
1973 pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma,
1974 sizeof(command->scatter_gather_element),
1975 PCI_DMA_BIDIRECTIONAL);
1976 /*
1977 * Check to see if there are any previous orbs to use
1978 */
1979 if (scsi_id->last_orb == NULL) {
1980 quadlet_t data[2];
1981
1982 /*
1983 * Ok, let's write to the target's management agent register
1984 */
1985 addr = scsi_id->sbp2_command_block_agent_addr + SBP2_ORB_POINTER_OFFSET;
1986 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1987 data[1] = command->command_orb_dma;
1988 sbp2util_cpu_to_be32_buffer(data, 8);
1989
1990 SBP2_ORB_DEBUG("write command agent, command orb %p", command_orb);
1991
1992 if (sbp2util_node_write_no_wait(ne, addr, data, 8) < 0) {
1993 SBP2_ERR("sbp2util_node_write_no_wait failed.\n");
1994 return -EIO;
1995 }
1996
1997 SBP2_ORB_DEBUG("write command agent complete");
1998
1999 scsi_id->last_orb = command_orb;
2000 scsi_id->last_orb_dma = command->command_orb_dma;
2001
2002 } else {
2003 quadlet_t data;
2004
2005 /*
2006 * We have an orb already sent (maybe or maybe not
2007 * processed) that we can append this orb to. So do so,
2008 * and ring the doorbell. Have to be very careful
2009 * modifying these next orb pointers, as they are accessed
2010 * both by the sbp2 device and us.
2011 */
2012 scsi_id->last_orb->next_ORB_lo =
2013 cpu_to_be32(command->command_orb_dma);
2014 /* Tells hardware that this pointer is valid */
2015 scsi_id->last_orb->next_ORB_hi = 0x0;
2016 pci_dma_sync_single_for_device(hi->host->pdev, scsi_id->last_orb_dma,
2017 sizeof(struct sbp2_command_orb),
2018 PCI_DMA_BIDIRECTIONAL);
2019
2020 /*
2021 * Ring the doorbell
2022 */
2023 data = cpu_to_be32(command->command_orb_dma);
2024 addr = scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET;
2025
2026 SBP2_ORB_DEBUG("ring doorbell, command orb %p", command_orb);
2027
2028 if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) {
2029 SBP2_ERR("sbp2util_node_write_no_wait failed");
2030 return(-EIO);
2031 }
2032
2033 scsi_id->last_orb = command_orb;
2034 scsi_id->last_orb_dma = command->command_orb_dma;
2035
2036 }
2037 return(0);
2038}
2039
2040/*
2041 * This function is called in order to begin a regular SBP-2 command.
2042 */
2043static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2044 struct scsi_cmnd *SCpnt,
2045 void (*done)(struct scsi_cmnd *))
2046{
2047 unchar *cmd = (unchar *) SCpnt->cmnd;
2048 unsigned int request_bufflen = SCpnt->request_bufflen;
2049 struct sbp2_command_info *command;
2050
2051 SBP2_DEBUG("sbp2_send_command");
2052#if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
2053 printk("[scsi command]\n ");
2054 scsi_print_command(SCpnt);
2055#endif
2056 SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
2057 SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
2058
2059 /*
2060 * Allocate a command orb and s/g structure
2061 */
2062 command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
2063 if (!command) {
2064 return(-EIO);
2065 }
2066
2067 /*
2068 * The scsi stack sends down a request_bufflen which does not match the
2069 * length field in the scsi cdb. This causes some sbp2 devices to
2070 * reject this inquiry command. Fix the request_bufflen.
2071 */
2072 if (*cmd == INQUIRY) {
2073 if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
2074 request_bufflen = cmd[4] = 0x24;
2075 else
2076 request_bufflen = cmd[4];
2077 }
2078
2079 /*
2080 * Now actually fill in the comamnd orb and sbp2 s/g list
2081 */
2082 sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
2083 request_bufflen, SCpnt->request_buffer,
2084 SCpnt->sc_data_direction);
2085 /*
2086 * Update our cdb if necessary (to handle sbp2 RBC command set
2087 * differences). This is where the command set hacks go! =)
2088 */
2089 sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
2090
2091 sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
2092 "sbp2 command orb", command->command_orb_dma);
2093
2094 /*
2095 * Initialize status fifo
2096 */
2097 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
2098
2099 /*
2100 * Link up the orb, and ring the doorbell if needed
2101 */
2102 sbp2_link_orb_command(scsi_id, command);
2103
2104 return(0);
2105}
2106
2107
2108/*
2109 * This function deals with command set differences between Linux scsi
2110 * command set and sbp2 RBC command set.
2111 */
2112static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd)
2113{
2114 unchar new_cmd[16];
2115 u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
2116
2117 SBP2_DEBUG("sbp2_check_sbp2_command");
2118
2119 switch (*cmd) {
2120
2121 case READ_6:
2122
2123 if (sbp2_command_conversion_device_type(device_type)) {
2124
2125 SBP2_DEBUG("Convert READ_6 to READ_10");
2126
2127 /*
2128 * Need to turn read_6 into read_10
2129 */
2130 new_cmd[0] = 0x28;
2131 new_cmd[1] = (cmd[1] & 0xe0);
2132 new_cmd[2] = 0x0;
2133 new_cmd[3] = (cmd[1] & 0x1f);
2134 new_cmd[4] = cmd[2];
2135 new_cmd[5] = cmd[3];
2136 new_cmd[6] = 0x0;
2137 new_cmd[7] = 0x0;
2138 new_cmd[8] = cmd[4];
2139 new_cmd[9] = cmd[5];
2140
2141 memcpy(cmd, new_cmd, 10);
2142
2143 }
2144
2145 break;
2146
2147 case WRITE_6:
2148
2149 if (sbp2_command_conversion_device_type(device_type)) {
2150
2151 SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
2152
2153 /*
2154 * Need to turn write_6 into write_10
2155 */
2156 new_cmd[0] = 0x2a;
2157 new_cmd[1] = (cmd[1] & 0xe0);
2158 new_cmd[2] = 0x0;
2159 new_cmd[3] = (cmd[1] & 0x1f);
2160 new_cmd[4] = cmd[2];
2161 new_cmd[5] = cmd[3];
2162 new_cmd[6] = 0x0;
2163 new_cmd[7] = 0x0;
2164 new_cmd[8] = cmd[4];
2165 new_cmd[9] = cmd[5];
2166
2167 memcpy(cmd, new_cmd, 10);
2168
2169 }
2170
2171 break;
2172
2173 case MODE_SENSE:
2174
2175 if (sbp2_command_conversion_device_type(device_type)) {
2176
2177 SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10");
2178
2179 /*
2180 * Need to turn mode_sense_6 into mode_sense_10
2181 */
2182 new_cmd[0] = 0x5a;
2183 new_cmd[1] = cmd[1];
2184 new_cmd[2] = cmd[2];
2185 new_cmd[3] = 0x0;
2186 new_cmd[4] = 0x0;
2187 new_cmd[5] = 0x0;
2188 new_cmd[6] = 0x0;
2189 new_cmd[7] = 0x0;
2190 new_cmd[8] = cmd[4];
2191 new_cmd[9] = cmd[5];
2192
2193 memcpy(cmd, new_cmd, 10);
2194
2195 }
2196
2197 break;
2198
2199 case MODE_SELECT:
2200
2201 /*
2202 * TODO. Probably need to change mode select to 10 byte version
2203 */
2204
2205 default:
2206 break;
2207 }
2208
2209 return;
2210}
2211
2212/*
2213 * Translates SBP-2 status into SCSI sense data for check conditions
2214 */
2215static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data)
2216{
2217 SBP2_DEBUG("sbp2_status_to_sense_data");
2218
2219 /*
2220 * Ok, it's pretty ugly... ;-)
2221 */
2222 sense_data[0] = 0x70;
2223 sense_data[1] = 0x0;
2224 sense_data[2] = sbp2_status[9];
2225 sense_data[3] = sbp2_status[12];
2226 sense_data[4] = sbp2_status[13];
2227 sense_data[5] = sbp2_status[14];
2228 sense_data[6] = sbp2_status[15];
2229 sense_data[7] = 10;
2230 sense_data[8] = sbp2_status[16];
2231 sense_data[9] = sbp2_status[17];
2232 sense_data[10] = sbp2_status[18];
2233 sense_data[11] = sbp2_status[19];
2234 sense_data[12] = sbp2_status[10];
2235 sense_data[13] = sbp2_status[11];
2236 sense_data[14] = sbp2_status[20];
2237 sense_data[15] = sbp2_status[21];
2238
2239 return(sbp2_status[8] & 0x3f); /* return scsi status */
2240}
2241
2242/*
2243 * This function is called after a command is completed, in order to do any necessary SBP-2
2244 * response data translations for the SCSI stack
2245 */
2246static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
2247 struct scsi_cmnd *SCpnt)
2248{
2249 u8 *scsi_buf = SCpnt->request_buffer;
2250 u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
2251
2252 SBP2_DEBUG("sbp2_check_sbp2_response");
2253
2254 switch (SCpnt->cmnd[0]) {
2255
2256 case INQUIRY:
2257
2258 /*
2259 * If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill
2260 * this information in from the inquiry response data. Lun is set to zero.
2261 */
2262 if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
2263 SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data");
2264 scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16;
2265 }
2266
2267 /*
2268 * Make sure data length is ok. Minimum length is 36 bytes
2269 */
2270 if (scsi_buf[4] == 0) {
2271 scsi_buf[4] = 36 - 5;
2272 }
2273
2274 /*
2275 * Check for Simple Direct Access Device and change it to TYPE_DISK
2276 */
2277 if ((scsi_buf[0] & 0x1f) == TYPE_SDAD) {
2278 SBP2_DEBUG("Changing TYPE_SDAD to TYPE_DISK");
2279 scsi_buf[0] &= 0xe0;
2280 }
2281
2282 /*
2283 * Fix ansi revision and response data format
2284 */
2285 scsi_buf[2] |= 2;
2286 scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
2287
2288 break;
2289
2290 case MODE_SENSE:
2291
2292 if (sbp2_command_conversion_device_type(device_type)) {
2293
2294 SBP2_DEBUG("Modify mode sense response (10 byte version)");
2295
2296 scsi_buf[0] = scsi_buf[1]; /* Mode data length */
2297 scsi_buf[1] = scsi_buf[2]; /* Medium type */
2298 scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */
2299 scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */
2300 memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
2301 }
2302
2303 break;
2304
2305 case MODE_SELECT:
2306
2307 /*
2308 * TODO. Probably need to change mode select to 10 byte version
2309 */
2310
2311 default:
2312 break;
2313 }
2314 return;
2315}
2316
2317/*
2318 * This function deals with status writes from the SBP-2 device
2319 */
2320static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
2321 quadlet_t *data, u64 addr, size_t length, u16 fl)
2322{
2323 struct sbp2scsi_host_info *hi;
2324 struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
2325 u32 id;
2326 struct scsi_cmnd *SCpnt = NULL;
2327 u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
2328 struct sbp2_command_info *command;
2329
2330 SBP2_DEBUG("sbp2_handle_status_write");
2331
2332 sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
2333
2334 if (!host) {
2335 SBP2_ERR("host is NULL - this is bad!");
2336 return(RCODE_ADDRESS_ERROR);
2337 }
2338
2339 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
2340
2341 if (!hi) {
2342 SBP2_ERR("host info is NULL - this is bad!");
2343 return(RCODE_ADDRESS_ERROR);
2344 }
2345
2346 /*
2347 * Find our scsi_id structure by looking at the status fifo address written to by
2348 * the sbp2 device.
2349 */
2350 id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS));
2351 list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
2352 if (scsi_id_tmp->ne->nodeid == nodeid && scsi_id_tmp->ud->id == id) {
2353 scsi_id = scsi_id_tmp;
2354 break;
2355 }
2356 }
2357
2358 if (!scsi_id) {
2359 SBP2_ERR("scsi_id is NULL - device is gone?");
2360 return(RCODE_ADDRESS_ERROR);
2361 }
2362
2363 /*
2364 * Put response into scsi_id status fifo...
2365 */
2366 memcpy(&scsi_id->status_block, data, length);
2367
2368 /*
2369 * Byte swap first two quadlets (8 bytes) of status for processing
2370 */
2371 sbp2util_be32_to_cpu_buffer(&scsi_id->status_block, 8);
2372
2373 /*
2374 * Handle command ORB status here if necessary. First, need to match status with command.
2375 */
2376 command = sbp2util_find_command_for_orb(scsi_id, scsi_id->status_block.ORB_offset_lo);
2377 if (command) {
2378
2379 SBP2_DEBUG("Found status for command ORB");
2380 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
2381 sizeof(struct sbp2_command_orb),
2382 PCI_DMA_BIDIRECTIONAL);
2383 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
2384 sizeof(command->scatter_gather_element),
2385 PCI_DMA_BIDIRECTIONAL);
2386
2387 SBP2_ORB_DEBUG("matched command orb %p", &command->command_orb);
2388 outstanding_orb_decr;
2389
2390 /*
2391 * Matched status with command, now grab scsi command pointers and check status
2392 */
2393 SCpnt = command->Current_SCpnt;
2394 sbp2util_mark_command_completed(scsi_id, command);
2395
2396 if (SCpnt) {
2397
2398 /*
2399 * See if the target stored any scsi status information
2400 */
2401 if (STATUS_GET_LENGTH(scsi_id->status_block.ORB_offset_hi_misc) > 1) {
2402 /*
2403 * Translate SBP-2 status to SCSI sense data
2404 */
2405 SBP2_DEBUG("CHECK CONDITION");
2406 scsi_status = sbp2_status_to_sense_data((unchar *)&scsi_id->status_block, SCpnt->sense_buffer);
2407 }
2408
2409 /*
2410 * Check to see if the dead bit is set. If so, we'll have to initiate
2411 * a fetch agent reset.
2412 */
2413 if (STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc)) {
2414
2415 /*
2416 * Initiate a fetch agent reset.
2417 */
2418 SBP2_DEBUG("Dead bit set - initiating fetch agent reset");
2419 sbp2_agent_reset(scsi_id, 0);
2420 }
2421
2422 SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
2423 }
2424
2425 /*
2426 * Check here to see if there are no commands in-use. If there are none, we can
2427 * null out last orb so that next time around we write directly to the orb pointer...
2428 * Quick start saves one 1394 bus transaction.
2429 */
2430 if (list_empty(&scsi_id->sbp2_command_orb_inuse)) {
2431 scsi_id->last_orb = NULL;
2432 }
2433
2434 } else {
2435
2436 /*
2437 * It's probably a login/logout/reconnect status.
2438 */
2439 if ((scsi_id->login_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
2440 (scsi_id->query_logins_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
2441 (scsi_id->reconnect_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
2442 (scsi_id->logout_orb_dma == scsi_id->status_block.ORB_offset_lo)) {
2443 atomic_set(&scsi_id->sbp2_login_complete, 1);
2444 }
2445 }
2446
2447 if (SCpnt) {
2448
2449 /* Complete the SCSI command. */
2450 SBP2_DEBUG("Completing SCSI command");
2451 sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt,
2452 command->Current_done);
2453 SBP2_ORB_DEBUG("command orb completed");
2454 }
2455
2456 return(RCODE_COMPLETE);
2457}
2458
2459
2460/**************************************
2461 * SCSI interface related section
2462 **************************************/
2463
2464/*
2465 * This routine is the main request entry routine for doing I/O. It is
2466 * called from the scsi stack directly.
2467 */
2468static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2469 void (*done)(struct scsi_cmnd *))
2470{
2471 struct scsi_id_instance_data *scsi_id =
2472 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2473 struct sbp2scsi_host_info *hi;
2474
2475 SBP2_DEBUG("sbp2scsi_queuecommand");
2476
2477 /*
2478 * If scsi_id is null, it means there is no device in this slot,
2479 * so we should return selection timeout.
2480 */
2481 if (!scsi_id) {
2482 SCpnt->result = DID_NO_CONNECT << 16;
2483 done (SCpnt);
2484 return 0;
2485 }
2486
2487 hi = scsi_id->hi;
2488
2489 if (!hi) {
2490 SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
2491 SCpnt->result = DID_NO_CONNECT << 16;
2492 done (SCpnt);
2493 return(0);
2494 }
2495
2496 /*
2497 * Until we handle multiple luns, just return selection time-out
2498 * to any IO directed at non-zero LUNs
2499 */
2500 if (SCpnt->device->lun) {
2501 SCpnt->result = DID_NO_CONNECT << 16;
2502 done (SCpnt);
2503 return(0);
2504 }
2505
2506 /*
2507 * Check for request sense command, and handle it here
2508 * (autorequest sense)
2509 */
2510 if (SCpnt->cmnd[0] == REQUEST_SENSE) {
2511 SBP2_DEBUG("REQUEST_SENSE");
2512 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
2513 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
2514 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
2515 return(0);
2516 }
2517
2518 /*
2519 * Check to see if we are in the middle of a bus reset.
2520 */
2521 if (!hpsb_node_entry_valid(scsi_id->ne)) {
2522 SBP2_ERR("Bus reset in progress - rejecting command");
2523 SCpnt->result = DID_BUS_BUSY << 16;
2524 done (SCpnt);
2525 return(0);
2526 }
2527
2528 /*
2529 * Try and send our SCSI command
2530 */
2531 if (sbp2_send_command(scsi_id, SCpnt, done)) {
2532 SBP2_ERR("Error sending SCSI command");
2533 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
2534 SCpnt, done);
2535 }
2536
2537 return(0);
2538}
2539
2540/*
2541 * This function is called in order to complete all outstanding SBP-2
2542 * commands (in case of resets, etc.).
2543 */
2544static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
2545 u32 status)
2546{
2547 struct sbp2scsi_host_info *hi = scsi_id->hi;
2548 struct list_head *lh;
2549 struct sbp2_command_info *command;
2550
2551 SBP2_DEBUG("sbp2scsi_complete_all_commands");
2552
2553 while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
2554 SBP2_DEBUG("Found pending command to complete");
2555 lh = scsi_id->sbp2_command_orb_inuse.next;
2556 command = list_entry(lh, struct sbp2_command_info, list);
2557 pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma,
2558 sizeof(struct sbp2_command_orb),
2559 PCI_DMA_BIDIRECTIONAL);
2560 pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma,
2561 sizeof(command->scatter_gather_element),
2562 PCI_DMA_BIDIRECTIONAL);
2563 sbp2util_mark_command_completed(scsi_id, command);
2564 if (command->Current_SCpnt) {
2565 command->Current_SCpnt->result = status << 16;
2566 command->Current_done(command->Current_SCpnt);
2567 }
2568 }
2569
2570 return;
2571}
2572
2573/*
2574 * This function is called in order to complete a regular SBP-2 command.
2575 *
2576 * This can be called in interrupt context.
2577 */
2578static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2579 u32 scsi_status, struct scsi_cmnd *SCpnt,
2580 void (*done)(struct scsi_cmnd *))
2581{
2582 unsigned long flags;
2583
2584 SBP2_DEBUG("sbp2scsi_complete_command");
2585
2586 /*
2587 * Sanity
2588 */
2589 if (!SCpnt) {
2590 SBP2_ERR("SCpnt is NULL");
2591 return;
2592 }
2593
2594 /*
2595 * If a bus reset is in progress and there was an error, don't
2596 * complete the command, just let it get retried at the end of the
2597 * bus reset.
2598 */
2599 if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2600 SBP2_ERR("Bus reset in progress - retry command later");
2601 return;
2602 }
2603
2604 /*
2605 * Switch on scsi status
2606 */
2607 switch (scsi_status) {
2608 case SBP2_SCSI_STATUS_GOOD:
2609 SCpnt->result = DID_OK;
2610 break;
2611
2612 case SBP2_SCSI_STATUS_BUSY:
2613 SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
2614 SCpnt->result = DID_BUS_BUSY << 16;
2615 break;
2616
2617 case SBP2_SCSI_STATUS_CHECK_CONDITION:
2618 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
2619 SCpnt->result = CHECK_CONDITION << 1;
2620
2621 /*
2622 * Debug stuff
2623 */
2624#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
2625 scsi_print_command(SCpnt);
2626 scsi_print_sense("bh", SCpnt);
2627#endif
2628
2629 break;
2630
2631 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
2632 SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
2633 SCpnt->result = DID_NO_CONNECT << 16;
2634 scsi_print_command(SCpnt);
2635 break;
2636
2637 case SBP2_SCSI_STATUS_CONDITION_MET:
2638 case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
2639 case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
2640 SBP2_ERR("Bad SCSI status = %x", scsi_status);
2641 SCpnt->result = DID_ERROR << 16;
2642 scsi_print_command(SCpnt);
2643 break;
2644
2645 default:
2646 SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
2647 SCpnt->result = DID_ERROR << 16;
2648 }
2649
2650 /*
2651 * Take care of any sbp2 response data mucking here (RBC stuff, etc.)
2652 */
2653 if (SCpnt->result == DID_OK) {
2654 sbp2_check_sbp2_response(scsi_id, SCpnt);
2655 }
2656
2657 /*
2658 * If a bus reset is in progress and there was an error, complete
2659 * the command as busy so that it will get retried.
2660 */
2661 if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2662 SBP2_ERR("Completing command with busy (bus reset)");
2663 SCpnt->result = DID_BUS_BUSY << 16;
2664 }
2665
2666 /*
2667 * If a unit attention occurs, return busy status so it gets
2668 * retried... it could have happened because of a 1394 bus reset
2669 * or hot-plug...
2670 */
2671#if 0
2672 if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
2673 (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
2674 SBP2_DEBUG("UNIT ATTENTION - return busy");
2675 SCpnt->result = DID_BUS_BUSY << 16;
2676 }
2677#endif
2678
2679 /*
2680 * Tell scsi stack that we're done with this command
2681 */
2682 spin_lock_irqsave(scsi_id->scsi_host->host_lock,flags);
2683 done (SCpnt);
2684 spin_unlock_irqrestore(scsi_id->scsi_host->host_lock,flags);
2685
2686 return;
2687}
2688
2689
2690static int sbp2scsi_slave_configure (struct scsi_device *sdev)
2691{
2692 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2693
2694 return 0;
2695}
2696
2697
2698/*
2699 * Called by scsi stack when something has really gone wrong. Usually
2700 * called when a command has timed-out for some reason.
2701 */
2702static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2703{
2704 struct scsi_id_instance_data *scsi_id =
2705 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2706 struct sbp2scsi_host_info *hi = scsi_id->hi;
2707 struct sbp2_command_info *command;
2708
2709 SBP2_ERR("aborting sbp2 command");
2710 scsi_print_command(SCpnt);
2711
2712 if (scsi_id) {
2713
2714 /*
2715 * Right now, just return any matching command structures
2716 * to the free pool.
2717 */
2718 command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
2719 if (command) {
2720 SBP2_DEBUG("Found command to abort");
2721 pci_dma_sync_single_for_cpu(hi->host->pdev,
2722 command->command_orb_dma,
2723 sizeof(struct sbp2_command_orb),
2724 PCI_DMA_BIDIRECTIONAL);
2725 pci_dma_sync_single_for_cpu(hi->host->pdev,
2726 command->sge_dma,
2727 sizeof(command->scatter_gather_element),
2728 PCI_DMA_BIDIRECTIONAL);
2729 sbp2util_mark_command_completed(scsi_id, command);
2730 if (command->Current_SCpnt) {
2731 command->Current_SCpnt->result = DID_ABORT << 16;
2732 command->Current_done(command->Current_SCpnt);
2733 }
2734 }
2735
2736 /*
2737 * Initiate a fetch agent reset.
2738 */
2739 sbp2_agent_reset(scsi_id, 0);
2740 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
2741 }
2742
2743 return(SUCCESS);
2744}
2745
2746/*
2747 * Called by scsi stack when something has really gone wrong.
2748 */
2749static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2750{
2751 struct scsi_id_instance_data *scsi_id =
2752 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2753
2754 SBP2_ERR("reset requested");
2755
2756 if (scsi_id) {
2757 SBP2_ERR("Generating sbp2 fetch agent reset");
2758 sbp2_agent_reset(scsi_id, 0);
2759 }
2760
2761 return(SUCCESS);
2762}
2763
2764static const char *sbp2scsi_info (struct Scsi_Host *host)
2765{
2766 return "SCSI emulation for IEEE-1394 SBP-2 Devices";
2767}
2768
2769static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, char *buf)
2770{
2771 struct scsi_device *sdev;
2772 struct scsi_id_instance_data *scsi_id;
2773 int lun;
2774
2775 if (!(sdev = to_scsi_device(dev)))
2776 return 0;
2777
2778 if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
2779 return 0;
2780
2781 if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED)
2782 lun = 0;
2783 else
2784 lun = ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
2785
2786 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
2787 scsi_id->ud->id, lun);
2788}
2789static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
2790
2791static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
2792 &dev_attr_ieee1394_id,
2793 NULL
2794};
2795
2796MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
2797MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
2798MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
2799MODULE_LICENSE("GPL");
2800
2801/* SCSI host template */
2802static struct scsi_host_template scsi_driver_template = {
2803 .module = THIS_MODULE,
2804 .name = "SBP-2 IEEE-1394",
2805 .proc_name = SBP2_DEVICE_NAME,
2806 .info = sbp2scsi_info,
2807 .queuecommand = sbp2scsi_queuecommand,
2808 .eh_abort_handler = sbp2scsi_abort,
2809 .eh_device_reset_handler = sbp2scsi_reset,
2810 .eh_bus_reset_handler = sbp2scsi_reset,
2811 .eh_host_reset_handler = sbp2scsi_reset,
2812 .slave_configure = sbp2scsi_slave_configure,
2813 .this_id = -1,
2814 .sg_tablesize = SG_ALL,
2815 .use_clustering = ENABLE_CLUSTERING,
2816 .cmd_per_lun = SBP2_MAX_CMDS,
2817 .can_queue = SBP2_MAX_CMDS,
2818 .emulated = 1,
2819 .sdev_attrs = sbp2_sysfs_sdev_attrs,
2820};
2821
2822static int sbp2_module_init(void)
2823{
2824 int ret;
2825
2826 SBP2_DEBUG("sbp2_module_init");
2827
2828 printk(KERN_INFO "sbp2: %s\n", version);
2829
2830 /* Module load debug option to force one command at a time (serializing I/O) */
2831 if (serialize_io) {
2832 SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)");
2833 scsi_driver_template.can_queue = 1;
2834 scsi_driver_template.cmd_per_lun = 1;
2835 }
2836
2837 /* Set max sectors (module load option). Default is 255 sectors. */
2838 scsi_driver_template.max_sectors = max_sectors;
2839
2840
2841 /* Register our high level driver with 1394 stack */
2842 hpsb_register_highlevel(&sbp2_highlevel);
2843
2844 ret = hpsb_register_protocol(&sbp2_driver);
2845 if (ret) {
2846 SBP2_ERR("Failed to register protocol");
2847 hpsb_unregister_highlevel(&sbp2_highlevel);
2848 return ret;
2849 }
2850
2851 return 0;
2852}
2853
2854static void __exit sbp2_module_exit(void)
2855{
2856 SBP2_DEBUG("sbp2_module_exit");
2857
2858 hpsb_unregister_protocol(&sbp2_driver);
2859
2860 hpsb_unregister_highlevel(&sbp2_highlevel);
2861}
2862
2863module_init(sbp2_module_init);
2864module_exit(sbp2_module_exit);
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
new file mode 100644
index 000000000000..a84b039a05b9
--- /dev/null
+++ b/drivers/ieee1394/sbp2.h
@@ -0,0 +1,484 @@
1/*
2 * sbp2.h - Defines and prototypes for sbp2.c
3 *
4 * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
5 * jamesg@filanet.com
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#ifndef SBP2_H
23#define SBP2_H
24
25#define SBP2_DEVICE_NAME "sbp2"
26
27/*
28 * SBP2 specific structures and defines
29 */
30
31#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
32#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
33#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
34
35#define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31)
36#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
37#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
38#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16)
39#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
40#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
41#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
42#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
43#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
44#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
45
46struct sbp2_command_orb {
47 volatile u32 next_ORB_hi;
48 volatile u32 next_ORB_lo;
49 u32 data_descriptor_hi;
50 u32 data_descriptor_lo;
51 u32 misc;
52 u8 cdb[12];
53};
54
55#define SBP2_LOGIN_REQUEST 0x0
56#define SBP2_QUERY_LOGINS_REQUEST 0x1
57#define SBP2_RECONNECT_REQUEST 0x3
58#define SBP2_SET_PASSWORD_REQUEST 0x4
59#define SBP2_LOGOUT_REQUEST 0x7
60#define SBP2_ABORT_TASK_REQUEST 0xb
61#define SBP2_ABORT_TASK_SET 0xc
62#define SBP2_LOGICAL_UNIT_RESET 0xe
63#define SBP2_TARGET_RESET_REQUEST 0xf
64
65#define ORB_SET_LUN(value) (value & 0xffff)
66#define ORB_SET_FUNCTION(value) ((value & 0xf) << 16)
67#define ORB_SET_RECONNECT(value) ((value & 0xf) << 20)
68#define ORB_SET_EXCLUSIVE(value) ((value & 0x1) << 28)
69#define ORB_SET_LOGIN_RESP_LENGTH(value) (value & 0xffff)
70#define ORB_SET_PASSWD_LENGTH(value) ((value & 0xffff) << 16)
71
72struct sbp2_login_orb {
73 u32 password_hi;
74 u32 password_lo;
75 u32 login_response_hi;
76 u32 login_response_lo;
77 u32 lun_misc;
78 u32 passwd_resp_lengths;
79 u32 status_FIFO_hi;
80 u32 status_FIFO_lo;
81};
82
83#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff)
84#define RESPONSE_GET_LENGTH(value) ((value >> 16) & 0xffff)
85#define RESPONSE_GET_RECONNECT_HOLD(value) (value & 0xffff)
86
87struct sbp2_login_response {
88 u32 length_login_ID;
89 u32 command_block_agent_hi;
90 u32 command_block_agent_lo;
91 u32 reconnect_hold;
92};
93
94#define ORB_SET_LOGIN_ID(value) (value & 0xffff)
95
96#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(value) (value & 0xffff)
97
98struct sbp2_query_logins_orb {
99 u32 reserved1;
100 u32 reserved2;
101 u32 query_response_hi;
102 u32 query_response_lo;
103 u32 lun_misc;
104 u32 reserved_resp_length;
105 u32 status_FIFO_hi;
106 u32 status_FIFO_lo;
107};
108
109#define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff)
110#define RESPONSE_GET_ACTIVE_LOGINS(value) ((RESPONSE_GET_LENGTH(value) - 4) / 12)
111
112struct sbp2_query_logins_response {
113 u32 length_max_logins;
114 u32 misc_IDs;
115 u32 initiator_misc_hi;
116 u32 initiator_misc_lo;
117};
118
119struct sbp2_reconnect_orb {
120 u32 reserved1;
121 u32 reserved2;
122 u32 reserved3;
123 u32 reserved4;
124 u32 login_ID_misc;
125 u32 reserved5;
126 u32 status_FIFO_hi;
127 u32 status_FIFO_lo;
128};
129
130struct sbp2_logout_orb {
131 u32 reserved1;
132 u32 reserved2;
133 u32 reserved3;
134 u32 reserved4;
135 u32 login_ID_misc;
136 u32 reserved5;
137 u32 status_FIFO_hi;
138 u32 status_FIFO_lo;
139};
140
141#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff)
142#define PAGE_TABLE_SET_SEGMENT_LENGTH(value) ((value & 0xffff) << 16)
143
144struct sbp2_unrestricted_page_table {
145 u32 length_segment_base_hi;
146 u32 segment_base_lo;
147};
148
149#define RESP_STATUS_REQUEST_COMPLETE 0x0
150#define RESP_STATUS_TRANSPORT_FAILURE 0x1
151#define RESP_STATUS_ILLEGAL_REQUEST 0x2
152#define RESP_STATUS_VENDOR_DEPENDENT 0x3
153
154#define SBP2_STATUS_NO_ADDITIONAL_INFO 0x0
155#define SBP2_STATUS_REQ_TYPE_NOT_SUPPORTED 0x1
156#define SBP2_STATUS_SPEED_NOT_SUPPORTED 0x2
157#define SBP2_STATUS_PAGE_SIZE_NOT_SUPPORTED 0x3
158#define SBP2_STATUS_ACCESS_DENIED 0x4
159#define SBP2_STATUS_LU_NOT_SUPPORTED 0x5
160#define SBP2_STATUS_MAX_PAYLOAD_TOO_SMALL 0x6
161#define SBP2_STATUS_RESOURCES_UNAVAILABLE 0x8
162#define SBP2_STATUS_FUNCTION_REJECTED 0x9
163#define SBP2_STATUS_LOGIN_ID_NOT_RECOGNIZED 0xa
164#define SBP2_STATUS_DUMMY_ORB_COMPLETED 0xb
165#define SBP2_STATUS_REQUEST_ABORTED 0xc
166#define SBP2_STATUS_UNSPECIFIED_ERROR 0xff
167
168#define SFMT_CURRENT_ERROR 0x0
169#define SFMT_DEFERRED_ERROR 0x1
170#define SFMT_VENDOR_DEPENDENT_STATUS 0x3
171
172#define SBP2_SCSI_STATUS_GOOD 0x0
173#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2
174#define SBP2_SCSI_STATUS_CONDITION_MET 0x4
175#define SBP2_SCSI_STATUS_BUSY 0x8
176#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18
177#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22
178
179#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
180
181#define STATUS_GET_ORB_OFFSET_HI(value) (value & 0xffff)
182#define STATUS_GET_SBP_STATUS(value) ((value >> 16) & 0xff)
183#define STATUS_GET_LENGTH(value) ((value >> 24) & 0x7)
184#define STATUS_GET_DEAD_BIT(value) ((value >> 27) & 0x1)
185#define STATUS_GET_RESP(value) ((value >> 28) & 0x3)
186#define STATUS_GET_SRC(value) ((value >> 30) & 0x3)
187
188struct sbp2_status_block {
189 u32 ORB_offset_hi_misc;
190 u32 ORB_offset_lo;
191 u8 command_set_dependent[24];
192};
193
194/*
195 * Miscellaneous SBP2 related config rom defines
196 */
197
198/* The status fifo address definition below is used as a base for each
199 * node, which a chunk seperately assigned to each unit directory in the
200 * node. For example, 0xfffe00000000ULL is used for the first sbp2 device
201 * detected on node 0, 0xfffe00000020ULL for the next sbp2 device on node
202 * 0, and so on.
203 *
204 * Note: We could use a single status fifo address for all sbp2 devices,
205 * and figure out which sbp2 device the status belongs to by looking at
206 * the source node id of the status write... but, using separate addresses
207 * for each sbp2 unit directory allows for better code and the ability to
208 * support multiple luns within a single 1394 node.
209 *
210 * Also note that we choose the address range below as it is a region
211 * specified for write posting, where the ohci controller will
212 * automatically send an ack_complete when the status is written by the
213 * sbp2 device... saving a split transaction. =)
214 */
215#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
216#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
217#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
218
219#define SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(entry) ((entry) << 5)
220#define SBP2_STATUS_FIFO_OFFSET_TO_ENTRY(offset) ((offset) >> 5)
221
222#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
223#define SBP2_CSR_OFFSET_KEY 0x54
224#define SBP2_UNIT_SPEC_ID_KEY 0x12
225#define SBP2_UNIT_SW_VERSION_KEY 0x13
226#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38
227#define SBP2_COMMAND_SET_KEY 0x39
228#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a
229#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
230#define SBP2_FIRMWARE_REVISION_KEY 0x3c
231
232#define SBP2_DEVICE_TYPE(q) (((q) >> 16) & 0x1f)
233#define SBP2_DEVICE_LUN(q) ((q) & 0xffff)
234
235#define SBP2_AGENT_STATE_OFFSET 0x00ULL
236#define SBP2_AGENT_RESET_OFFSET 0x04ULL
237#define SBP2_ORB_POINTER_OFFSET 0x08ULL
238#define SBP2_DOORBELL_OFFSET 0x10ULL
239#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL
240#define SBP2_UNSOLICITED_STATUS_VALUE 0xf
241
242#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL
243#define SBP2_BUSY_TIMEOUT_VALUE 0xf
244
245#define SBP2_AGENT_RESET_DATA 0xf
246
247/*
248 * Unit spec id and sw version entry for SBP-2 devices
249 */
250
251#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
252#define SBP2_SW_VERSION_ENTRY 0x00010483
253
254/*
255 * Other misc defines
256 */
257#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
258
259#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED 0xffffffff
260
261/*
262 * SCSI specific stuff
263 */
264
265#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
266#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */
267#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
268
269#ifndef TYPE_SDAD
270#define TYPE_SDAD 0x0e /* simplified direct access device */
271#endif
272
273/*
274 * SCSI direction table...
275 * (now used as a back-up in case the direction passed down from above is "unknown")
276 *
277 * DIN = IN data direction
278 * DOU = OUT data direction
279 * DNO = No data transfer
280 * DUN = Unknown data direction
281 *
282 * Opcode 0xec (Teac specific "opc execute") possibly should be DNO,
283 * but we'll change it when somebody reports a problem with this.
284 */
285#define DIN ORB_DIRECTION_READ_FROM_MEDIA
286#define DOU ORB_DIRECTION_WRITE_TO_MEDIA
287#define DNO ORB_DIRECTION_NO_DATA_TRANSFER
288#define DUN DIN
289
290static unchar sbp2scsi_direction_table[0x100] = {
291 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
292 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
293 DIN,DUN,DIN,DIN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
294 DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
295 DOU,DOU,DIN,DIN,DIN,DNO,DIN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DNO,DUN,
296 DUN,DIN,DIN,DNO,DNO,DOU,DUN,DUN,DNO,DIN,DIN,DNO,DIN,DOU,DUN,DUN,
297 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
298 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
299 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
300 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
301 DUN,DNO,DOU,DOU,DIN,DNO,DNO,DNO,DIN,DNO,DOU,DUN,DNO,DIN,DOU,DOU,
302 DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DIN,DNO,DNO,DNO,DIN,DIN,DUN,
303 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
304 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
305 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
306 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
307};
308
309/* This should be safe */
310#define SBP2_MAX_CMDS 8
311
312/* This is the two dma types we use for cmd_dma below */
313enum cmd_dma_types {
314 CMD_DMA_NONE,
315 CMD_DMA_PAGE,
316 CMD_DMA_SINGLE
317};
318
319/*
320 * Encapsulates all the info necessary for an outstanding command.
321 */
322struct sbp2_command_info {
323
324 struct list_head list;
325 struct sbp2_command_orb command_orb ____cacheline_aligned;
326 dma_addr_t command_orb_dma ____cacheline_aligned;
327 struct scsi_cmnd *Current_SCpnt;
328 void (*Current_done)(struct scsi_cmnd *);
329
330 /* Also need s/g structure for each sbp2 command */
331 struct sbp2_unrestricted_page_table scatter_gather_element[SG_ALL] ____cacheline_aligned;
332 dma_addr_t sge_dma ____cacheline_aligned;
333 void *sge_buffer;
334 dma_addr_t cmd_dma;
335 enum cmd_dma_types dma_type;
336 unsigned long dma_size;
337 int dma_dir;
338
339};
340
341/* A list of flags for detected oddities and brokeness. */
342#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
343#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
344
345
346struct sbp2scsi_host_info;
347
348
349/*
350 * Information needed on a per scsi id basis (one for each sbp2 device)
351 */
352struct scsi_id_instance_data {
353 /*
354 * Various sbp2 specific structures
355 */
356 struct sbp2_command_orb *last_orb;
357 dma_addr_t last_orb_dma;
358 struct sbp2_login_orb *login_orb;
359 dma_addr_t login_orb_dma;
360 struct sbp2_login_response *login_response;
361 dma_addr_t login_response_dma;
362 struct sbp2_query_logins_orb *query_logins_orb;
363 dma_addr_t query_logins_orb_dma;
364 struct sbp2_query_logins_response *query_logins_response;
365 dma_addr_t query_logins_response_dma;
366 struct sbp2_reconnect_orb *reconnect_orb;
367 dma_addr_t reconnect_orb_dma;
368 struct sbp2_logout_orb *logout_orb;
369 dma_addr_t logout_orb_dma;
370 struct sbp2_status_block status_block;
371
372 /*
373 * Stuff we need to know about the sbp2 device itself
374 */
375 u64 sbp2_management_agent_addr;
376 u64 sbp2_command_block_agent_addr;
377 u32 speed_code;
378 u32 max_payload_size;
379
380 /*
381 * Values pulled from the device's unit directory
382 */
383 u32 sbp2_command_set_spec_id;
384 u32 sbp2_command_set;
385 u32 sbp2_unit_characteristics;
386 u32 sbp2_device_type_and_lun;
387 u32 sbp2_firmware_revision;
388
389 /*
390 * Variable used for logins, reconnects, logouts, query logins
391 */
392 atomic_t sbp2_login_complete;
393
394 /*
395 * Pool of command orbs, so we can have more than overlapped command per id
396 */
397 spinlock_t sbp2_command_orb_lock;
398 struct list_head sbp2_command_orb_inuse;
399 struct list_head sbp2_command_orb_completed;
400
401 struct list_head scsi_list;
402
403 /* Node entry, as retrieved from NodeMgr entries */
404 struct node_entry *ne;
405 struct unit_directory *ud;
406
407 /* A backlink to our host_info */
408 struct sbp2scsi_host_info *hi;
409
410 /* SCSI related pointers */
411 struct scsi_device *sdev;
412 struct Scsi_Host *scsi_host;
413
414 /* Device specific workarounds/brokeness */
415 u32 workarounds;
416};
417
418
419/* Sbp2 host data structure (one per IEEE1394 host) */
420struct sbp2scsi_host_info {
421 struct hpsb_host *host; /* IEEE1394 host */
422 struct list_head scsi_ids; /* List of scsi ids on this host */
423};
424
425/*
426 * Function prototypes
427 */
428
429/*
430 * Various utility prototypes
431 */
432static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id);
433static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id);
434static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb);
435static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt);
436static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
437 struct scsi_cmnd *Current_SCpnt,
438 void (*Current_done)(struct scsi_cmnd *));
439static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
440 struct sbp2_command_info *command);
441
442
443static int sbp2_start_device(struct scsi_id_instance_data *scsi_id);
444static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id);
445
446#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
447static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
448 u64 addr, size_t length, u16 flags);
449static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
450 u64 addr, size_t length, u16 flags);
451#endif
452
453/*
454 * SBP-2 protocol related prototypes
455 */
456static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id);
457static int sbp2_login_device(struct scsi_id_instance_data *scsi_id);
458static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id);
459static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
460static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
461 quadlet_t *data, u64 addr, size_t length, u16 flags);
462static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
463static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
464 struct sbp2_command_info *command,
465 unchar *scsi_cmd,
466 unsigned int scsi_use_sg,
467 unsigned int scsi_request_bufflen,
468 void *scsi_request_buffer,
469 enum dma_data_direction dma_dir);
470static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
471 struct sbp2_command_info *command);
472static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
473 struct scsi_cmnd *SCpnt,
474 void (*done)(struct scsi_cmnd *));
475static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
476static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd);
477static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
478 struct scsi_cmnd *SCpnt);
479static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
480 struct unit_directory *ud);
481static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id);
482static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id);
483
484#endif /* SBP2_H */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
new file mode 100644
index 000000000000..4bedf7113f40
--- /dev/null
+++ b/drivers/ieee1394/video1394.c
@@ -0,0 +1,1527 @@
1/*
2 * video1394.c - video driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * NOTES:
21 *
22 * jds -- add private data to file to keep track of iso contexts associated
23 * with each open -- so release won't kill all iso transfers.
24 *
25 * Damien Douxchamps: Fix failure when the number of DMA pages per frame is
26 * one.
27 *
28 * ioctl return codes:
29 * EFAULT is only for invalid address for the argp
30 * EINVAL for out of range values
31 * EBUSY when trying to use an already used resource
32 * ESRCH when trying to free/stop a not used resource
33 * EAGAIN for resource allocation failure that could perhaps succeed later
34 * ENOTTY for unsupported ioctl request
35 *
36 */
37
38#include <linux/config.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/slab.h>
42#include <linux/interrupt.h>
43#include <linux/wait.h>
44#include <linux/errno.h>
45#include <linux/module.h>
46#include <linux/init.h>
47#include <linux/pci.h>
48#include <linux/fs.h>
49#include <linux/poll.h>
50#include <linux/smp_lock.h>
51#include <linux/delay.h>
52#include <linux/devfs_fs_kernel.h>
53#include <linux/bitops.h>
54#include <linux/types.h>
55#include <linux/vmalloc.h>
56#include <linux/timex.h>
57#include <linux/mm.h>
58#include <linux/ioctl32.h>
59#include <linux/compat.h>
60#include <linux/cdev.h>
61
62#include "ieee1394.h"
63#include "ieee1394_types.h"
64#include "hosts.h"
65#include "ieee1394_core.h"
66#include "highlevel.h"
67#include "video1394.h"
68#include "nodemgr.h"
69#include "dma.h"
70
71#include "ohci1394.h"
72
73#define ISO_CHANNELS 64
74
75#ifndef virt_to_page
76#define virt_to_page(x) MAP_NR(x)
77#endif
78
79#ifndef vmalloc_32
80#define vmalloc_32(x) vmalloc(x)
81#endif
82
83struct it_dma_prg {
84 struct dma_cmd begin;
85 quadlet_t data[4];
86 struct dma_cmd end;
87 quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
88};
89
90struct dma_iso_ctx {
91 struct ti_ohci *ohci;
92 int type; /* OHCI_ISO_TRANSMIT or OHCI_ISO_RECEIVE */
93 struct ohci1394_iso_tasklet iso_tasklet;
94 int channel;
95 int ctx;
96 int last_buffer;
97 int * next_buffer; /* For ISO Transmit of video packets
98 to write the correct SYT field
99 into the next block */
100 unsigned int num_desc;
101 unsigned int buf_size;
102 unsigned int frame_size;
103 unsigned int packet_size;
104 unsigned int left_size;
105 unsigned int nb_cmd;
106
107 struct dma_region dma;
108
109 struct dma_prog_region *prg_reg;
110
111 struct dma_cmd **ir_prg;
112 struct it_dma_prg **it_prg;
113
114 unsigned int *buffer_status;
115 struct timeval *buffer_time; /* time when the buffer was received */
116 unsigned int *last_used_cmd; /* For ISO Transmit with
117 variable sized packets only ! */
118 int ctrlClear;
119 int ctrlSet;
120 int cmdPtr;
121 int ctxMatch;
122 wait_queue_head_t waitq;
123 spinlock_t lock;
124 unsigned int syt_offset;
125 int flags;
126
127 struct list_head link;
128};
129
130
131struct file_ctx {
132 struct ti_ohci *ohci;
133 struct list_head context_list;
134 struct dma_iso_ctx *current_ctx;
135};
136
137#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
138#define VIDEO1394_DEBUG
139#endif
140
141#ifdef DBGMSG
142#undef DBGMSG
143#endif
144
145#ifdef VIDEO1394_DEBUG
146#define DBGMSG(card, fmt, args...) \
147printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args)
148#else
149#define DBGMSG(card, fmt, args...)
150#endif
151
152/* print general (card independent) information */
153#define PRINT_G(level, fmt, args...) \
154printk(level "video1394: " fmt "\n" , ## args)
155
156/* print card specific information */
157#define PRINT(level, card, fmt, args...) \
158printk(level "video1394_%d: " fmt "\n" , card , ## args)
159
160static void wakeup_dma_ir_ctx(unsigned long l);
161static void wakeup_dma_it_ctx(unsigned long l);
162
163static struct hpsb_highlevel video1394_highlevel;
164
165static int free_dma_iso_ctx(struct dma_iso_ctx *d)
166{
167 int i;
168
169 DBGMSG(d->ohci->host->id, "Freeing dma_iso_ctx %d", d->ctx);
170
171 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
172 if (d->iso_tasklet.link.next != NULL)
173 ohci1394_unregister_iso_tasklet(d->ohci, &d->iso_tasklet);
174
175 dma_region_free(&d->dma);
176
177 if (d->prg_reg) {
178 for (i = 0; i < d->num_desc; i++)
179 dma_prog_region_free(&d->prg_reg[i]);
180 kfree(d->prg_reg);
181 }
182
183 if (d->ir_prg)
184 kfree(d->ir_prg);
185
186 if (d->it_prg)
187 kfree(d->it_prg);
188
189 if (d->buffer_status)
190 kfree(d->buffer_status);
191 if (d->buffer_time)
192 kfree(d->buffer_time);
193 if (d->last_used_cmd)
194 kfree(d->last_used_cmd);
195 if (d->next_buffer)
196 kfree(d->next_buffer);
197
198 list_del(&d->link);
199
200 kfree(d);
201
202 return 0;
203}
204
205static struct dma_iso_ctx *
206alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
207 int buf_size, int channel, unsigned int packet_size)
208{
209 struct dma_iso_ctx *d;
210 int i;
211
212 d = kmalloc(sizeof(struct dma_iso_ctx), GFP_KERNEL);
213 if (d == NULL) {
214 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx");
215 return NULL;
216 }
217
218 memset(d, 0, sizeof *d);
219
220 d->ohci = ohci;
221 d->type = type;
222 d->channel = channel;
223 d->num_desc = num_desc;
224 d->frame_size = buf_size;
225 d->buf_size = PAGE_ALIGN(buf_size);
226 d->last_buffer = -1;
227 INIT_LIST_HEAD(&d->link);
228 init_waitqueue_head(&d->waitq);
229
230 /* Init the regions for easy cleanup */
231 dma_region_init(&d->dma);
232
233 if (dma_region_alloc(&d->dma, d->num_desc * d->buf_size, ohci->dev,
234 PCI_DMA_BIDIRECTIONAL)) {
235 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma buffer");
236 free_dma_iso_ctx(d);
237 return NULL;
238 }
239
240 if (type == OHCI_ISO_RECEIVE)
241 ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
242 wakeup_dma_ir_ctx,
243 (unsigned long) d);
244 else
245 ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
246 wakeup_dma_it_ctx,
247 (unsigned long) d);
248
249 if (ohci1394_register_iso_tasklet(ohci, &d->iso_tasklet) < 0) {
250 PRINT(KERN_ERR, ohci->host->id, "no free iso %s contexts",
251 type == OHCI_ISO_RECEIVE ? "receive" : "transmit");
252 free_dma_iso_ctx(d);
253 return NULL;
254 }
255 d->ctx = d->iso_tasklet.context;
256
257 d->prg_reg = kmalloc(d->num_desc * sizeof(struct dma_prog_region),
258 GFP_KERNEL);
259 if (d->prg_reg == NULL) {
260 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs");
261 free_dma_iso_ctx(d);
262 return NULL;
263 }
264 /* Makes for easier cleanup */
265 for (i = 0; i < d->num_desc; i++)
266 dma_prog_region_init(&d->prg_reg[i]);
267
268 if (type == OHCI_ISO_RECEIVE) {
269 d->ctrlSet = OHCI1394_IsoRcvContextControlSet+32*d->ctx;
270 d->ctrlClear = OHCI1394_IsoRcvContextControlClear+32*d->ctx;
271 d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
272 d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
273
274 d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *),
275 GFP_KERNEL);
276
277 if (d->ir_prg == NULL) {
278 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
279 free_dma_iso_ctx(d);
280 return NULL;
281 }
282 memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *));
283
284 d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
285 d->left_size = (d->frame_size % PAGE_SIZE) ?
286 d->frame_size % PAGE_SIZE : PAGE_SIZE;
287
288 for (i = 0;i < d->num_desc; i++) {
289 if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
290 sizeof(struct dma_cmd), ohci->dev)) {
291 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
292 free_dma_iso_ctx(d);
293 return NULL;
294 }
295 d->ir_prg[i] = (struct dma_cmd *)d->prg_reg[i].kvirt;
296 }
297
298 } else { /* OHCI_ISO_TRANSMIT */
299 d->ctrlSet = OHCI1394_IsoXmitContextControlSet+16*d->ctx;
300 d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
301 d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
302
303 d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *),
304 GFP_KERNEL);
305
306 if (d->it_prg == NULL) {
307 PRINT(KERN_ERR, ohci->host->id,
308 "Failed to allocate dma it prg");
309 free_dma_iso_ctx(d);
310 return NULL;
311 }
312 memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *));
313
314 d->packet_size = packet_size;
315
316 if (PAGE_SIZE % packet_size || packet_size>4096) {
317 PRINT(KERN_ERR, ohci->host->id,
318 "Packet size %d (page_size: %ld) "
319 "not yet supported\n",
320 packet_size, PAGE_SIZE);
321 free_dma_iso_ctx(d);
322 return NULL;
323 }
324
325 d->nb_cmd = d->frame_size / d->packet_size;
326 if (d->frame_size % d->packet_size) {
327 d->nb_cmd++;
328 d->left_size = d->frame_size % d->packet_size;
329 } else
330 d->left_size = d->packet_size;
331
332 for (i = 0; i < d->num_desc; i++) {
333 if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
334 sizeof(struct it_dma_prg), ohci->dev)) {
335 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma it prg");
336 free_dma_iso_ctx(d);
337 return NULL;
338 }
339 d->it_prg[i] = (struct it_dma_prg *)d->prg_reg[i].kvirt;
340 }
341 }
342
343 d->buffer_status = kmalloc(d->num_desc * sizeof(unsigned int),
344 GFP_KERNEL);
345 d->buffer_time = kmalloc(d->num_desc * sizeof(struct timeval),
346 GFP_KERNEL);
347 d->last_used_cmd = kmalloc(d->num_desc * sizeof(unsigned int),
348 GFP_KERNEL);
349 d->next_buffer = kmalloc(d->num_desc * sizeof(int),
350 GFP_KERNEL);
351
352 if (d->buffer_status == NULL) {
353 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_status");
354 free_dma_iso_ctx(d);
355 return NULL;
356 }
357 if (d->buffer_time == NULL) {
358 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_time");
359 free_dma_iso_ctx(d);
360 return NULL;
361 }
362 if (d->last_used_cmd == NULL) {
363 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate last_used_cmd");
364 free_dma_iso_ctx(d);
365 return NULL;
366 }
367 if (d->next_buffer == NULL) {
368 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate next_buffer");
369 free_dma_iso_ctx(d);
370 return NULL;
371 }
372 memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int));
373 memset(d->buffer_time, 0, d->num_desc * sizeof(struct timeval));
374 memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int));
375 memset(d->next_buffer, -1, d->num_desc * sizeof(int));
376
377 spin_lock_init(&d->lock);
378
379 PRINT(KERN_INFO, ohci->host->id, "Iso %s DMA: %d buffers "
380 "of size %d allocated for a frame size %d, each with %d prgs",
381 (type == OHCI_ISO_RECEIVE) ? "receive" : "transmit",
382 d->num_desc, d->buf_size, d->frame_size, d->nb_cmd);
383
384 return d;
385}
386
387static void reset_ir_status(struct dma_iso_ctx *d, int n)
388{
389 int i;
390 d->ir_prg[n][0].status = cpu_to_le32(4);
391 d->ir_prg[n][1].status = cpu_to_le32(PAGE_SIZE-4);
392 for (i = 2; i < d->nb_cmd - 1; i++)
393 d->ir_prg[n][i].status = cpu_to_le32(PAGE_SIZE);
394 d->ir_prg[n][i].status = cpu_to_le32(d->left_size);
395}
396
397static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
398{
399 struct dma_cmd *ir_prg = d->ir_prg[n];
400 struct dma_prog_region *ir_reg = &d->prg_reg[n];
401 unsigned long buf = (unsigned long)d->dma.kvirt + n * d->buf_size;
402 int i;
403
404 /* the first descriptor will read only 4 bytes */
405 ir_prg[0].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
406 DMA_CTL_BRANCH | 4);
407
408 /* set the sync flag */
409 if (flags & VIDEO1394_SYNC_FRAMES)
410 ir_prg[0].control |= cpu_to_le32(DMA_CTL_WAIT);
411
412 ir_prg[0].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, buf -
413 (unsigned long)d->dma.kvirt));
414 ir_prg[0].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
415 1 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
416
417 /* If there is *not* only one DMA page per frame (hence, d->nb_cmd==2) */
418 if (d->nb_cmd > 2) {
419 /* The second descriptor will read PAGE_SIZE-4 bytes */
420 ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
421 DMA_CTL_BRANCH | (PAGE_SIZE-4));
422 ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf + 4) -
423 (unsigned long)d->dma.kvirt));
424 ir_prg[1].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
425 2 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
426
427 for (i = 2; i < d->nb_cmd - 1; i++) {
428 ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
429 DMA_CTL_BRANCH | PAGE_SIZE);
430 ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
431 (buf+(i-1)*PAGE_SIZE) -
432 (unsigned long)d->dma.kvirt));
433
434 ir_prg[i].branchAddress =
435 cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
436 (i + 1) * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
437 }
438
439 /* The last descriptor will generate an interrupt */
440 ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
441 DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
442 ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
443 (buf+(i-1)*PAGE_SIZE) -
444 (unsigned long)d->dma.kvirt));
445 } else {
446 /* Only one DMA page is used. Read d->left_size immediately and */
447 /* generate an interrupt as this is also the last page. */
448 ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
449 DMA_CTL_IRQ | DMA_CTL_BRANCH | (d->left_size-4));
450 ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
451 (buf + 4) - (unsigned long)d->dma.kvirt));
452 }
453}
454
455static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
456{
457 struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
458 int i;
459
460 d->flags = flags;
461
462 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
463
464 for (i=0;i<d->num_desc;i++) {
465 initialize_dma_ir_prg(d, i, flags);
466 reset_ir_status(d, i);
467 }
468
469 /* reset the ctrl register */
470 reg_write(ohci, d->ctrlClear, 0xf0000000);
471
472 /* Set bufferFill */
473 reg_write(ohci, d->ctrlSet, 0x80000000);
474
475 /* Set isoch header */
476 if (flags & VIDEO1394_INCLUDE_ISO_HEADERS)
477 reg_write(ohci, d->ctrlSet, 0x40000000);
478
479 /* Set the context match register to match on all tags,
480 sync for sync tag, and listen to d->channel */
481 reg_write(ohci, d->ctxMatch, 0xf0000000|((tag&0xf)<<8)|d->channel);
482
483 /* Set up isoRecvIntMask to generate interrupts */
484 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1<<d->ctx);
485}
486
487/* find which context is listening to this channel */
488static struct dma_iso_ctx *
489find_ctx(struct list_head *list, int type, int channel)
490{
491 struct dma_iso_ctx *ctx;
492
493 list_for_each_entry(ctx, list, link) {
494 if (ctx->type == type && ctx->channel == channel)
495 return ctx;
496 }
497
498 return NULL;
499}
500
501static void wakeup_dma_ir_ctx(unsigned long l)
502{
503 struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
504 int i;
505
506 spin_lock(&d->lock);
507
508 for (i = 0; i < d->num_desc; i++) {
509 if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
510 reset_ir_status(d, i);
511 d->buffer_status[i] = VIDEO1394_BUFFER_READY;
512 do_gettimeofday(&d->buffer_time[i]);
513 }
514 }
515
516 spin_unlock(&d->lock);
517
518 if (waitqueue_active(&d->waitq))
519 wake_up_interruptible(&d->waitq);
520}
521
522static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
523 int n)
524{
525 unsigned char* buf = d->dma.kvirt + n * d->buf_size;
526 u32 cycleTimer;
527 u32 timeStamp;
528
529 if (n == -1) {
530 return;
531 }
532
533 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
534
535 timeStamp = ((cycleTimer & 0x0fff) + d->syt_offset); /* 11059 = 450 us */
536 timeStamp = (timeStamp % 3072 + ((timeStamp / 3072) << 12)
537 + (cycleTimer & 0xf000)) & 0xffff;
538
539 buf[6] = timeStamp >> 8;
540 buf[7] = timeStamp & 0xff;
541
542 /* if first packet is empty packet, then put timestamp into the next full one too */
543 if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
544 buf += d->packet_size;
545 buf[6] = timeStamp >> 8;
546 buf[7] = timeStamp & 0xff;
547 }
548
549 /* do the next buffer frame too in case of irq latency */
550 n = d->next_buffer[n];
551 if (n == -1) {
552 return;
553 }
554 buf = d->dma.kvirt + n * d->buf_size;
555
556 timeStamp += (d->last_used_cmd[n] << 12) & 0xffff;
557
558 buf[6] = timeStamp >> 8;
559 buf[7] = timeStamp & 0xff;
560
561 /* if first packet is empty packet, then put timestamp into the next full one too */
562 if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
563 buf += d->packet_size;
564 buf[6] = timeStamp >> 8;
565 buf[7] = timeStamp & 0xff;
566 }
567
568#if 0
569 printk("curr: %d, next: %d, cycleTimer: %08x timeStamp: %08x\n",
570 curr, n, cycleTimer, timeStamp);
571#endif
572}
573
574static void wakeup_dma_it_ctx(unsigned long l)
575{
576 struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
577 struct ti_ohci *ohci = d->ohci;
578 int i;
579
580 spin_lock(&d->lock);
581
582 for (i = 0; i < d->num_desc; i++) {
583 if (d->it_prg[i][d->last_used_cmd[i]].end.status &
584 cpu_to_le32(0xFFFF0000)) {
585 int next = d->next_buffer[i];
586 put_timestamp(ohci, d, next);
587 d->it_prg[i][d->last_used_cmd[i]].end.status = 0;
588 d->buffer_status[i] = VIDEO1394_BUFFER_READY;
589 }
590 }
591
592 spin_unlock(&d->lock);
593
594 if (waitqueue_active(&d->waitq))
595 wake_up_interruptible(&d->waitq);
596}
597
598static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
599{
600 struct it_dma_prg *it_prg = d->it_prg[n];
601 struct dma_prog_region *it_reg = &d->prg_reg[n];
602 unsigned long buf = (unsigned long)d->dma.kvirt + n * d->buf_size;
603 int i;
604 d->last_used_cmd[n] = d->nb_cmd - 1;
605 for (i=0;i<d->nb_cmd;i++) {
606
607 it_prg[i].begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE |
608 DMA_CTL_IMMEDIATE | 8) ;
609 it_prg[i].begin.address = 0;
610
611 it_prg[i].begin.status = 0;
612
613 it_prg[i].data[0] = cpu_to_le32(
614 (IEEE1394_SPEED_100 << 16)
615 | (/* tag */ 1 << 14)
616 | (d->channel << 8)
617 | (TCODE_ISO_DATA << 4));
618 if (i==0) it_prg[i].data[0] |= cpu_to_le32(sync_tag);
619 it_prg[i].data[1] = cpu_to_le32(d->packet_size << 16);
620 it_prg[i].data[2] = 0;
621 it_prg[i].data[3] = 0;
622
623 it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST |
624 DMA_CTL_BRANCH);
625 it_prg[i].end.address =
626 cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf+i*d->packet_size) -
627 (unsigned long)d->dma.kvirt));
628
629 if (i<d->nb_cmd-1) {
630 it_prg[i].end.control |= cpu_to_le32(d->packet_size);
631 it_prg[i].begin.branchAddress =
632 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
633 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
634 it_prg[i].end.branchAddress =
635 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
636 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
637 } else {
638 /* the last prg generates an interrupt */
639 it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
640 DMA_CTL_IRQ | d->left_size);
641 /* the last prg doesn't branch */
642 it_prg[i].begin.branchAddress = 0;
643 it_prg[i].end.branchAddress = 0;
644 }
645 it_prg[i].end.status = 0;
646 }
647}
648
649static void initialize_dma_it_prg_var_packet_queue(
650 struct dma_iso_ctx *d, int n, unsigned int * packet_sizes,
651 struct ti_ohci *ohci)
652{
653 struct it_dma_prg *it_prg = d->it_prg[n];
654 struct dma_prog_region *it_reg = &d->prg_reg[n];
655 int i;
656
657#if 0
658 if (n != -1) {
659 put_timestamp(ohci, d, n);
660 }
661#endif
662 d->last_used_cmd[n] = d->nb_cmd - 1;
663
664 for (i = 0; i < d->nb_cmd; i++) {
665 unsigned int size;
666 if (packet_sizes[i] > d->packet_size) {
667 size = d->packet_size;
668 } else {
669 size = packet_sizes[i];
670 }
671 it_prg[i].data[1] = cpu_to_le32(size << 16);
672 it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH);
673
674 if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
675 it_prg[i].end.control |= cpu_to_le32(size);
676 it_prg[i].begin.branchAddress =
677 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
678 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
679 it_prg[i].end.branchAddress =
680 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
681 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
682 } else {
683 /* the last prg generates an interrupt */
684 it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
685 DMA_CTL_IRQ | size);
686 /* the last prg doesn't branch */
687 it_prg[i].begin.branchAddress = 0;
688 it_prg[i].end.branchAddress = 0;
689 d->last_used_cmd[n] = i;
690 break;
691 }
692 }
693}
694
695static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
696 unsigned int syt_offset, int flags)
697{
698 struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
699 int i;
700
701 d->flags = flags;
702 d->syt_offset = (syt_offset == 0 ? 11000 : syt_offset);
703
704 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
705
706 for (i=0;i<d->num_desc;i++)
707 initialize_dma_it_prg(d, i, sync_tag);
708
709 /* Set up isoRecvIntMask to generate interrupts */
710 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1<<d->ctx);
711}
712
713static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
714 unsigned int buffer)
715{
716 unsigned long flags;
717 unsigned int ret;
718 spin_lock_irqsave(&d->lock, flags);
719 ret = d->buffer_status[buffer];
720 spin_unlock_irqrestore(&d->lock, flags);
721 return ret;
722}
723
724static int __video1394_ioctl(struct file *file,
725 unsigned int cmd, unsigned long arg)
726{
727 struct file_ctx *ctx = (struct file_ctx *)file->private_data;
728 struct ti_ohci *ohci = ctx->ohci;
729 unsigned long flags;
730 void __user *argp = (void __user *)arg;
731
732 switch(cmd)
733 {
734 case VIDEO1394_IOC_LISTEN_CHANNEL:
735 case VIDEO1394_IOC_TALK_CHANNEL:
736 {
737 struct video1394_mmap v;
738 u64 mask;
739 struct dma_iso_ctx *d;
740 int i;
741
742 if (copy_from_user(&v, argp, sizeof(v)))
743 return -EFAULT;
744
745 /* if channel < 0, find lowest available one */
746 if (v.channel < 0) {
747 mask = (u64)0x1;
748 for (i=0; ; i++) {
749 if (i == ISO_CHANNELS) {
750 PRINT(KERN_ERR, ohci->host->id,
751 "No free channel found");
752 return EAGAIN;
753 }
754 if (!(ohci->ISO_channel_usage & mask)) {
755 v.channel = i;
756 PRINT(KERN_INFO, ohci->host->id, "Found free channel %d", i);
757 break;
758 }
759 mask = mask << 1;
760 }
761 } else if (v.channel >= ISO_CHANNELS) {
762 PRINT(KERN_ERR, ohci->host->id,
763 "Iso channel %d out of bounds", v.channel);
764 return -EINVAL;
765 } else {
766 mask = (u64)0x1<<v.channel;
767 }
768 PRINT(KERN_INFO, ohci->host->id, "mask: %08X%08X usage: %08X%08X\n",
769 (u32)(mask>>32),(u32)(mask&0xffffffff),
770 (u32)(ohci->ISO_channel_usage>>32),
771 (u32)(ohci->ISO_channel_usage&0xffffffff));
772 if (ohci->ISO_channel_usage & mask) {
773 PRINT(KERN_ERR, ohci->host->id,
774 "Channel %d is already taken", v.channel);
775 return -EBUSY;
776 }
777
778 if (v.buf_size == 0 || v.buf_size > VIDEO1394_MAX_SIZE) {
779 PRINT(KERN_ERR, ohci->host->id,
780 "Invalid %d length buffer requested",v.buf_size);
781 return -EINVAL;
782 }
783
784 if (v.nb_buffers == 0 || v.nb_buffers > VIDEO1394_MAX_SIZE) {
785 PRINT(KERN_ERR, ohci->host->id,
786 "Invalid %d buffers requested",v.nb_buffers);
787 return -EINVAL;
788 }
789
790 if (v.nb_buffers * v.buf_size > VIDEO1394_MAX_SIZE) {
791 PRINT(KERN_ERR, ohci->host->id,
792 "%d buffers of size %d bytes is too big",
793 v.nb_buffers, v.buf_size);
794 return -EINVAL;
795 }
796
797 if (cmd == VIDEO1394_IOC_LISTEN_CHANNEL) {
798 d = alloc_dma_iso_ctx(ohci, OHCI_ISO_RECEIVE,
799 v.nb_buffers, v.buf_size,
800 v.channel, 0);
801
802 if (d == NULL) {
803 PRINT(KERN_ERR, ohci->host->id,
804 "Couldn't allocate ir context");
805 return -EAGAIN;
806 }
807 initialize_dma_ir_ctx(d, v.sync_tag, v.flags);
808
809 ctx->current_ctx = d;
810
811 v.buf_size = d->buf_size;
812 list_add_tail(&d->link, &ctx->context_list);
813
814 PRINT(KERN_INFO, ohci->host->id,
815 "iso context %d listen on channel %d",
816 d->ctx, v.channel);
817 }
818 else {
819 d = alloc_dma_iso_ctx(ohci, OHCI_ISO_TRANSMIT,
820 v.nb_buffers, v.buf_size,
821 v.channel, v.packet_size);
822
823 if (d == NULL) {
824 PRINT(KERN_ERR, ohci->host->id,
825 "Couldn't allocate it context");
826 return -EAGAIN;
827 }
828 initialize_dma_it_ctx(d, v.sync_tag,
829 v.syt_offset, v.flags);
830
831 ctx->current_ctx = d;
832
833 v.buf_size = d->buf_size;
834
835 list_add_tail(&d->link, &ctx->context_list);
836
837 PRINT(KERN_INFO, ohci->host->id,
838 "Iso context %d talk on channel %d", d->ctx,
839 v.channel);
840 }
841
842 if (copy_to_user((void *)arg, &v, sizeof(v))) {
843 /* FIXME : free allocated dma resources */
844 return -EFAULT;
845 }
846
847 ohci->ISO_channel_usage |= mask;
848
849 return 0;
850 }
851 case VIDEO1394_IOC_UNLISTEN_CHANNEL:
852 case VIDEO1394_IOC_UNTALK_CHANNEL:
853 {
854 int channel;
855 u64 mask;
856 struct dma_iso_ctx *d;
857
858 if (copy_from_user(&channel, argp, sizeof(int)))
859 return -EFAULT;
860
861 if (channel < 0 || channel >= ISO_CHANNELS) {
862 PRINT(KERN_ERR, ohci->host->id,
863 "Iso channel %d out of bound", channel);
864 return -EINVAL;
865 }
866 mask = (u64)0x1<<channel;
867 if (!(ohci->ISO_channel_usage & mask)) {
868 PRINT(KERN_ERR, ohci->host->id,
869 "Channel %d is not being used", channel);
870 return -ESRCH;
871 }
872
873 /* Mark this channel as unused */
874 ohci->ISO_channel_usage &= ~mask;
875
876 if (cmd == VIDEO1394_IOC_UNLISTEN_CHANNEL)
877 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, channel);
878 else
879 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, channel);
880
881 if (d == NULL) return -ESRCH;
882 PRINT(KERN_INFO, ohci->host->id, "Iso context %d "
883 "stop talking on channel %d", d->ctx, channel);
884 free_dma_iso_ctx(d);
885
886 return 0;
887 }
888 case VIDEO1394_IOC_LISTEN_QUEUE_BUFFER:
889 {
890 struct video1394_wait v;
891 struct dma_iso_ctx *d;
892
893 if (copy_from_user(&v, argp, sizeof(v)))
894 return -EFAULT;
895
896 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
897 if (d == NULL) return -EFAULT;
898
899 if ((v.buffer<0) || (v.buffer>d->num_desc)) {
900 PRINT(KERN_ERR, ohci->host->id,
901 "Buffer %d out of range",v.buffer);
902 return -EINVAL;
903 }
904
905 spin_lock_irqsave(&d->lock,flags);
906
907 if (d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED) {
908 PRINT(KERN_ERR, ohci->host->id,
909 "Buffer %d is already used",v.buffer);
910 spin_unlock_irqrestore(&d->lock,flags);
911 return -EBUSY;
912 }
913
914 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
915
916 if (d->last_buffer>=0)
917 d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress =
918 cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0)
919 & 0xfffffff0) | 0x1);
920
921 d->last_buffer = v.buffer;
922
923 d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress = 0;
924
925 spin_unlock_irqrestore(&d->lock,flags);
926
927 if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
928 {
929 DBGMSG(ohci->host->id, "Starting iso DMA ctx=%d",d->ctx);
930
931 /* Tell the controller where the first program is */
932 reg_write(ohci, d->cmdPtr,
933 dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0) | 0x1);
934
935 /* Run IR context */
936 reg_write(ohci, d->ctrlSet, 0x8000);
937 }
938 else {
939 /* Wake up dma context if necessary */
940 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
941 PRINT(KERN_INFO, ohci->host->id,
942 "Waking up iso dma ctx=%d", d->ctx);
943 reg_write(ohci, d->ctrlSet, 0x1000);
944 }
945 }
946 return 0;
947
948 }
949 case VIDEO1394_IOC_LISTEN_WAIT_BUFFER:
950 case VIDEO1394_IOC_LISTEN_POLL_BUFFER:
951 {
952 struct video1394_wait v;
953 struct dma_iso_ctx *d;
954 int i;
955
956 if (copy_from_user(&v, argp, sizeof(v)))
957 return -EFAULT;
958
959 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
960 if (d == NULL) return -EFAULT;
961
962 if ((v.buffer<0) || (v.buffer>d->num_desc)) {
963 PRINT(KERN_ERR, ohci->host->id,
964 "Buffer %d out of range",v.buffer);
965 return -EINVAL;
966 }
967
968 /*
969 * I change the way it works so that it returns
970 * the last received frame.
971 */
972 spin_lock_irqsave(&d->lock, flags);
973 switch(d->buffer_status[v.buffer]) {
974 case VIDEO1394_BUFFER_READY:
975 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
976 break;
977 case VIDEO1394_BUFFER_QUEUED:
978 if (cmd == VIDEO1394_IOC_LISTEN_POLL_BUFFER) {
979 /* for polling, return error code EINTR */
980 spin_unlock_irqrestore(&d->lock, flags);
981 return -EINTR;
982 }
983
984 spin_unlock_irqrestore(&d->lock, flags);
985 wait_event_interruptible(d->waitq,
986 video1394_buffer_state(d, v.buffer) ==
987 VIDEO1394_BUFFER_READY);
988 if (signal_pending(current))
989 return -EINTR;
990 spin_lock_irqsave(&d->lock, flags);
991 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
992 break;
993 default:
994 PRINT(KERN_ERR, ohci->host->id,
995 "Buffer %d is not queued",v.buffer);
996 spin_unlock_irqrestore(&d->lock, flags);
997 return -ESRCH;
998 }
999
1000 /* set time of buffer */
1001 v.filltime = d->buffer_time[v.buffer];
1002// printk("Buffer %d time %d\n", v.buffer, (d->buffer_time[v.buffer]).tv_usec);
1003
1004 /*
1005 * Look ahead to see how many more buffers have been received
1006 */
1007 i=0;
1008 while (d->buffer_status[(v.buffer+1)%d->num_desc]==
1009 VIDEO1394_BUFFER_READY) {
1010 v.buffer=(v.buffer+1)%d->num_desc;
1011 i++;
1012 }
1013 spin_unlock_irqrestore(&d->lock, flags);
1014
1015 v.buffer=i;
1016 if (copy_to_user(argp, &v, sizeof(v)))
1017 return -EFAULT;
1018
1019 return 0;
1020 }
1021 case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
1022 {
1023 struct video1394_wait v;
1024 unsigned int *psizes = NULL;
1025 struct dma_iso_ctx *d;
1026
1027 if (copy_from_user(&v, argp, sizeof(v)))
1028 return -EFAULT;
1029
1030 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
1031 if (d == NULL) return -EFAULT;
1032
1033 if ((v.buffer<0) || (v.buffer>d->num_desc)) {
1034 PRINT(KERN_ERR, ohci->host->id,
1035 "Buffer %d out of range",v.buffer);
1036 return -EINVAL;
1037 }
1038
1039 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
1040 int buf_size = d->nb_cmd * sizeof(unsigned int);
1041 struct video1394_queue_variable __user *p = argp;
1042 unsigned int __user *qv;
1043
1044 if (get_user(qv, &p->packet_sizes))
1045 return -EFAULT;
1046
1047 psizes = kmalloc(buf_size, GFP_KERNEL);
1048 if (!psizes)
1049 return -ENOMEM;
1050
1051 if (copy_from_user(psizes, qv, buf_size)) {
1052 kfree(psizes);
1053 return -EFAULT;
1054 }
1055 }
1056
1057 spin_lock_irqsave(&d->lock,flags);
1058
1059 if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
1060 PRINT(KERN_ERR, ohci->host->id,
1061 "Buffer %d is already used",v.buffer);
1062 spin_unlock_irqrestore(&d->lock,flags);
1063 if (psizes)
1064 kfree(psizes);
1065 return -EBUSY;
1066 }
1067
1068 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
1069 initialize_dma_it_prg_var_packet_queue(
1070 d, v.buffer, psizes,
1071 ohci);
1072 }
1073
1074 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
1075
1076 if (d->last_buffer >= 0) {
1077 d->it_prg[d->last_buffer]
1078 [ d->last_used_cmd[d->last_buffer] ].end.branchAddress =
1079 cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer],
1080 0) & 0xfffffff0) | 0x3);
1081
1082 d->it_prg[d->last_buffer]
1083 [ d->last_used_cmd[d->last_buffer] ].begin.branchAddress =
1084 cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer],
1085 0) & 0xfffffff0) | 0x3);
1086 d->next_buffer[d->last_buffer] = v.buffer;
1087 }
1088 d->last_buffer = v.buffer;
1089 d->next_buffer[d->last_buffer] = -1;
1090
1091 d->it_prg[d->last_buffer][d->last_used_cmd[d->last_buffer]].end.branchAddress = 0;
1092
1093 spin_unlock_irqrestore(&d->lock,flags);
1094
1095 if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
1096 {
1097 DBGMSG(ohci->host->id, "Starting iso transmit DMA ctx=%d",
1098 d->ctx);
1099 put_timestamp(ohci, d, d->last_buffer);
1100
1101 /* Tell the controller where the first program is */
1102 reg_write(ohci, d->cmdPtr,
1103 dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer], 0) | 0x3);
1104
1105 /* Run IT context */
1106 reg_write(ohci, d->ctrlSet, 0x8000);
1107 }
1108 else {
1109 /* Wake up dma context if necessary */
1110 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
1111 PRINT(KERN_INFO, ohci->host->id,
1112 "Waking up iso transmit dma ctx=%d",
1113 d->ctx);
1114 put_timestamp(ohci, d, d->last_buffer);
1115 reg_write(ohci, d->ctrlSet, 0x1000);
1116 }
1117 }
1118
1119 if (psizes)
1120 kfree(psizes);
1121
1122 return 0;
1123
1124 }
1125 case VIDEO1394_IOC_TALK_WAIT_BUFFER:
1126 {
1127 struct video1394_wait v;
1128 struct dma_iso_ctx *d;
1129
1130 if (copy_from_user(&v, argp, sizeof(v)))
1131 return -EFAULT;
1132
1133 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
1134 if (d == NULL) return -EFAULT;
1135
1136 if ((v.buffer<0) || (v.buffer>d->num_desc)) {
1137 PRINT(KERN_ERR, ohci->host->id,
1138 "Buffer %d out of range",v.buffer);
1139 return -EINVAL;
1140 }
1141
1142 switch(d->buffer_status[v.buffer]) {
1143 case VIDEO1394_BUFFER_READY:
1144 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
1145 return 0;
1146 case VIDEO1394_BUFFER_QUEUED:
1147 wait_event_interruptible(d->waitq,
1148 (d->buffer_status[v.buffer] == VIDEO1394_BUFFER_READY));
1149 if (signal_pending(current))
1150 return -EINTR;
1151 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
1152 return 0;
1153 default:
1154 PRINT(KERN_ERR, ohci->host->id,
1155 "Buffer %d is not queued",v.buffer);
1156 return -ESRCH;
1157 }
1158 }
1159 default:
1160 return -ENOTTY;
1161 }
1162}
1163
1164static long video1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1165{
1166 int err;
1167 lock_kernel();
1168 err = __video1394_ioctl(file, cmd, arg);
1169 unlock_kernel();
1170 return err;
1171}
1172
1173/*
1174 * This maps the vmalloced and reserved buffer to user space.
1175 *
1176 * FIXME:
1177 * - PAGE_READONLY should suffice!?
1178 * - remap_pfn_range is kind of inefficient for page by page remapping.
1179 * But e.g. pte_alloc() does not work in modules ... :-(
1180 */
1181
1182static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
1183{
1184 struct file_ctx *ctx = (struct file_ctx *)file->private_data;
1185 int res = -EINVAL;
1186
1187 lock_kernel();
1188 if (ctx->current_ctx == NULL) {
1189 PRINT(KERN_ERR, ctx->ohci->host->id, "Current iso context not set");
1190 } else
1191 res = dma_region_mmap(&ctx->current_ctx->dma, file, vma);
1192 unlock_kernel();
1193
1194 return res;
1195}
1196
1197static int video1394_open(struct inode *inode, struct file *file)
1198{
1199 int i = ieee1394_file_to_instance(file);
1200 struct ti_ohci *ohci;
1201 struct file_ctx *ctx;
1202
1203 ohci = hpsb_get_hostinfo_bykey(&video1394_highlevel, i);
1204 if (ohci == NULL)
1205 return -EIO;
1206
1207 ctx = kmalloc(sizeof(struct file_ctx), GFP_KERNEL);
1208 if (ctx == NULL) {
1209 PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx");
1210 return -ENOMEM;
1211 }
1212
1213 memset(ctx, 0, sizeof(struct file_ctx));
1214 ctx->ohci = ohci;
1215 INIT_LIST_HEAD(&ctx->context_list);
1216 ctx->current_ctx = NULL;
1217 file->private_data = ctx;
1218
1219 return 0;
1220}
1221
1222static int video1394_release(struct inode *inode, struct file *file)
1223{
1224 struct file_ctx *ctx = (struct file_ctx *)file->private_data;
1225 struct ti_ohci *ohci = ctx->ohci;
1226 struct list_head *lh, *next;
1227 u64 mask;
1228
1229 lock_kernel();
1230 list_for_each_safe(lh, next, &ctx->context_list) {
1231 struct dma_iso_ctx *d;
1232 d = list_entry(lh, struct dma_iso_ctx, link);
1233 mask = (u64) 1 << d->channel;
1234
1235 if (!(ohci->ISO_channel_usage & mask))
1236 PRINT(KERN_ERR, ohci->host->id, "On release: Channel %d "
1237 "is not being used", d->channel);
1238 else
1239 ohci->ISO_channel_usage &= ~mask;
1240 PRINT(KERN_INFO, ohci->host->id, "On release: Iso %s context "
1241 "%d stop listening on channel %d",
1242 d->type == OHCI_ISO_RECEIVE ? "receive" : "transmit",
1243 d->ctx, d->channel);
1244 free_dma_iso_ctx(d);
1245 }
1246
1247 kfree(ctx);
1248 file->private_data = NULL;
1249
1250 unlock_kernel();
1251 return 0;
1252}
1253
1254#ifdef CONFIG_COMPAT
1255static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
1256#endif
1257
1258static struct cdev video1394_cdev;
1259static struct file_operations video1394_fops=
1260{
1261 .owner = THIS_MODULE,
1262 .unlocked_ioctl = video1394_ioctl,
1263#ifdef CONFIG_COMPAT
1264 .compat_ioctl = video1394_compat_ioctl,
1265#endif
1266 .mmap = video1394_mmap,
1267 .open = video1394_open,
1268 .release = video1394_release
1269};
1270
1271/*** HOTPLUG STUFF **********************************************************/
1272/*
1273 * Export information about protocols/devices supported by this driver.
1274 */
1275static struct ieee1394_device_id video1394_id_table[] = {
1276 {
1277 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1278 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
1279 .version = CAMERA_SW_VERSION_ENTRY & 0xffffff
1280 },
1281 {
1282 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1283 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
1284 .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff
1285 },
1286 {
1287 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1288 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
1289 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
1290 },
1291 { }
1292};
1293
1294MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
1295
1296static struct hpsb_protocol_driver video1394_driver = {
1297 .name = "1394 Digital Camera Driver",
1298 .id_table = video1394_id_table,
1299 .driver = {
1300 .name = VIDEO1394_DRIVER_NAME,
1301 .bus = &ieee1394_bus_type,
1302 },
1303};
1304
1305
1306static void video1394_add_host (struct hpsb_host *host)
1307{
1308 struct ti_ohci *ohci;
1309 int minor;
1310
1311 /* We only work with the OHCI-1394 driver */
1312 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
1313 return;
1314
1315 ohci = (struct ti_ohci *)host->hostdata;
1316
1317 if (!hpsb_create_hostinfo(&video1394_highlevel, host, 0)) {
1318 PRINT(KERN_ERR, ohci->host->id, "Cannot allocate hostinfo");
1319 return;
1320 }
1321
1322 hpsb_set_hostinfo(&video1394_highlevel, host, ohci);
1323 hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id);
1324
1325 minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id;
1326 class_simple_device_add(hpsb_protocol_class, MKDEV(
1327 IEEE1394_MAJOR, minor),
1328 NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
1329 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, minor),
1330 S_IFCHR | S_IRUSR | S_IWUSR,
1331 "%s/%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
1332}
1333
1334
1335static void video1394_remove_host (struct hpsb_host *host)
1336{
1337 struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host);
1338
1339 if (ohci) {
1340 class_simple_device_remove(MKDEV(IEEE1394_MAJOR,
1341 IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id));
1342 devfs_remove("%s/%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
1343 }
1344
1345 return;
1346}
1347
1348
1349static struct hpsb_highlevel video1394_highlevel = {
1350 .name = VIDEO1394_DRIVER_NAME,
1351 .add_host = video1394_add_host,
1352 .remove_host = video1394_remove_host,
1353};
1354
1355MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
1356MODULE_DESCRIPTION("driver for digital video on OHCI board");
1357MODULE_SUPPORTED_DEVICE(VIDEO1394_DRIVER_NAME);
1358MODULE_LICENSE("GPL");
1359
1360#ifdef CONFIG_COMPAT
1361
1362#define VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER \
1363 _IOW ('#', 0x12, struct video1394_wait32)
1364#define VIDEO1394_IOC32_LISTEN_WAIT_BUFFER \
1365 _IOWR('#', 0x13, struct video1394_wait32)
1366#define VIDEO1394_IOC32_TALK_WAIT_BUFFER \
1367 _IOW ('#', 0x17, struct video1394_wait32)
1368#define VIDEO1394_IOC32_LISTEN_POLL_BUFFER \
1369 _IOWR('#', 0x18, struct video1394_wait32)
1370
1371struct video1394_wait32 {
1372 u32 channel;
1373 u32 buffer;
1374 struct compat_timeval filltime;
1375};
1376
1377static int video1394_wr_wait32(struct file *file, unsigned int cmd, unsigned long arg)
1378{
1379 struct video1394_wait32 __user *argp = (void __user *)arg;
1380 struct video1394_wait32 wait32;
1381 struct video1394_wait wait;
1382 mm_segment_t old_fs;
1383 int ret;
1384
1385 if (copy_from_user(&wait32, argp, sizeof(wait32)))
1386 return -EFAULT;
1387
1388 wait.channel = wait32.channel;
1389 wait.buffer = wait32.buffer;
1390 wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
1391 wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
1392
1393 old_fs = get_fs();
1394 set_fs(KERNEL_DS);
1395 if (cmd == VIDEO1394_IOC32_LISTEN_WAIT_BUFFER)
1396 ret = video1394_ioctl(file,
1397 VIDEO1394_IOC_LISTEN_WAIT_BUFFER,
1398 (unsigned long) &wait);
1399 else
1400 ret = video1394_ioctl(file,
1401 VIDEO1394_IOC_LISTEN_POLL_BUFFER,
1402 (unsigned long) &wait);
1403 set_fs(old_fs);
1404
1405 if (!ret) {
1406 wait32.channel = wait.channel;
1407 wait32.buffer = wait.buffer;
1408 wait32.filltime.tv_sec = (int)wait.filltime.tv_sec;
1409 wait32.filltime.tv_usec = (int)wait.filltime.tv_usec;
1410
1411 if (copy_to_user(argp, &wait32, sizeof(wait32)))
1412 ret = -EFAULT;
1413 }
1414
1415 return ret;
1416}
1417
1418static int video1394_w_wait32(struct file *file, unsigned int cmd, unsigned long arg)
1419{
1420 struct video1394_wait32 wait32;
1421 struct video1394_wait wait;
1422 mm_segment_t old_fs;
1423 int ret;
1424
1425 if (copy_from_user(&wait32, (void __user *)arg, sizeof(wait32)))
1426 return -EFAULT;
1427
1428 wait.channel = wait32.channel;
1429 wait.buffer = wait32.buffer;
1430 wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
1431 wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
1432
1433 old_fs = get_fs();
1434 set_fs(KERNEL_DS);
1435 if (cmd == VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER)
1436 ret = video1394_ioctl(file,
1437 VIDEO1394_IOC_LISTEN_QUEUE_BUFFER,
1438 (unsigned long) &wait);
1439 else
1440 ret = video1394_ioctl(file,
1441 VIDEO1394_IOC_TALK_WAIT_BUFFER,
1442 (unsigned long) &wait);
1443 set_fs(old_fs);
1444
1445 return ret;
1446}
1447
1448static int video1394_queue_buf32(struct file *file, unsigned int cmd, unsigned long arg)
1449{
1450 return -EFAULT; /* ??? was there before. */
1451
1452 return video1394_ioctl(file,
1453 VIDEO1394_IOC_TALK_QUEUE_BUFFER, arg);
1454}
1455
1456static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
1457{
1458 switch (cmd) {
1459 case VIDEO1394_IOC_LISTEN_CHANNEL:
1460 case VIDEO1394_IOC_UNLISTEN_CHANNEL:
1461 case VIDEO1394_IOC_TALK_CHANNEL:
1462 case VIDEO1394_IOC_UNTALK_CHANNEL:
1463 return video1394_ioctl(f, cmd, arg);
1464
1465 case VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER:
1466 return video1394_w_wait32(f, cmd, arg);
1467 case VIDEO1394_IOC32_LISTEN_WAIT_BUFFER:
1468 return video1394_wr_wait32(f, cmd, arg);
1469 case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
1470 return video1394_queue_buf32(f, cmd, arg);
1471 case VIDEO1394_IOC32_TALK_WAIT_BUFFER:
1472 return video1394_w_wait32(f, cmd, arg);
1473 case VIDEO1394_IOC32_LISTEN_POLL_BUFFER:
1474 return video1394_wr_wait32(f, cmd, arg);
1475 default:
1476 return -ENOIOCTLCMD;
1477 }
1478}
1479
1480#endif /* CONFIG_COMPAT */
1481
1482static void __exit video1394_exit_module (void)
1483{
1484 hpsb_unregister_protocol(&video1394_driver);
1485
1486 hpsb_unregister_highlevel(&video1394_highlevel);
1487
1488 devfs_remove(VIDEO1394_DRIVER_NAME);
1489 cdev_del(&video1394_cdev);
1490
1491 PRINT_G(KERN_INFO, "Removed " VIDEO1394_DRIVER_NAME " module");
1492}
1493
1494static int __init video1394_init_module (void)
1495{
1496 int ret;
1497
1498 cdev_init(&video1394_cdev, &video1394_fops);
1499 video1394_cdev.owner = THIS_MODULE;
1500 kobject_set_name(&video1394_cdev.kobj, VIDEO1394_DRIVER_NAME);
1501 ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
1502 if (ret) {
1503 PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
1504 return ret;
1505 }
1506
1507 devfs_mk_dir(VIDEO1394_DRIVER_NAME);
1508
1509 hpsb_register_highlevel(&video1394_highlevel);
1510
1511 ret = hpsb_register_protocol(&video1394_driver);
1512 if (ret) {
1513 PRINT_G(KERN_ERR, "video1394: failed to register protocol");
1514 hpsb_unregister_highlevel(&video1394_highlevel);
1515 devfs_remove(VIDEO1394_DRIVER_NAME);
1516 cdev_del(&video1394_cdev);
1517 return ret;
1518 }
1519
1520 PRINT_G(KERN_INFO, "Installed " VIDEO1394_DRIVER_NAME " module");
1521 return 0;
1522}
1523
1524
1525module_init(video1394_init_module);
1526module_exit(video1394_exit_module);
1527MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16);
diff --git a/drivers/ieee1394/video1394.h b/drivers/ieee1394/video1394.h
new file mode 100644
index 000000000000..9a89d9cc3c85
--- /dev/null
+++ b/drivers/ieee1394/video1394.h
@@ -0,0 +1,67 @@
1/*
2 * video1394.h - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef _VIDEO_1394_H
22#define _VIDEO_1394_H
23
24#include "ieee1394-ioctl.h"
25
26#define VIDEO1394_DRIVER_NAME "video1394"
27
28#define VIDEO1394_MAX_SIZE 0x4000000
29
30enum {
31 VIDEO1394_BUFFER_FREE = 0,
32 VIDEO1394_BUFFER_QUEUED,
33 VIDEO1394_BUFFER_READY
34};
35
36#define VIDEO1394_SYNC_FRAMES 0x00000001
37#define VIDEO1394_INCLUDE_ISO_HEADERS 0x00000002
38#define VIDEO1394_VARIABLE_PACKET_SIZE 0x00000004
39
40struct video1394_mmap {
41 int channel; /* -1 to find an open channel in LISTEN/TALK */
42 unsigned int sync_tag;
43 unsigned int nb_buffers;
44 unsigned int buf_size;
45 unsigned int packet_size; /* For VARIABLE_PACKET_SIZE:
46 Maximum packet size */
47 unsigned int fps;
48 unsigned int syt_offset;
49 unsigned int flags;
50};
51
52/* For TALK_QUEUE_BUFFER with VIDEO1394_VARIABLE_PACKET_SIZE use */
53struct video1394_queue_variable {
54 unsigned int channel;
55 unsigned int buffer;
56 unsigned int __user * packet_sizes; /* Buffer of size:
57 buf_size / packet_size */
58};
59
60struct video1394_wait {
61 unsigned int channel;
62 unsigned int buffer;
63 struct timeval filltime; /* time of buffer full */
64};
65
66
67#endif