aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/firewire/Kconfig61
-rw-r--r--drivers/firewire/Makefile10
-rw-r--r--drivers/firewire/fw-card.c560
-rw-r--r--drivers/firewire/fw-cdev.c961
-rw-r--r--drivers/firewire/fw-device.c813
-rw-r--r--drivers/firewire/fw-device.h146
-rw-r--r--drivers/firewire/fw-iso.c163
-rw-r--r--drivers/firewire/fw-ohci.c1943
-rw-r--r--drivers/firewire/fw-ohci.h153
-rw-r--r--drivers/firewire/fw-sbp2.c1147
-rw-r--r--drivers/firewire/fw-topology.c537
-rw-r--r--drivers/firewire/fw-topology.h92
-rw-r--r--drivers/firewire/fw-transaction.c910
-rw-r--r--drivers/firewire/fw-transaction.h458
-rw-r--r--drivers/ieee1394/Kconfig2
-rw-r--r--include/linux/crc-itu-t.h28
-rw-r--r--include/linux/firewire-cdev.h229
-rw-r--r--include/linux/firewire-constants.h67
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Makefile1
-rw-r--r--lib/crc-itu-t.c69
22 files changed, 8359 insertions, 0 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 26ca9031ea49..adad2f3d438a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_FC4) += fc4/
36obj-$(CONFIG_SCSI) += scsi/ 36obj-$(CONFIG_SCSI) += scsi/
37obj-$(CONFIG_ATA) += ata/ 37obj-$(CONFIG_ATA) += ata/
38obj-$(CONFIG_FUSION) += message/ 38obj-$(CONFIG_FUSION) += message/
39obj-$(CONFIG_FIREWIRE) += firewire/
39obj-$(CONFIG_IEEE1394) += ieee1394/ 40obj-$(CONFIG_IEEE1394) += ieee1394/
40obj-y += cdrom/ 41obj-y += cdrom/
41obj-y += auxdisplay/ 42obj-y += auxdisplay/
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
new file mode 100644
index 000000000000..5932c72f9e42
--- /dev/null
+++ b/drivers/firewire/Kconfig
@@ -0,0 +1,61 @@
1# -*- shell-script -*-
2
3comment "An alternative FireWire stack is available with EXPERIMENTAL=y"
4 depends on EXPERIMENTAL=n
5
6config FIREWIRE
7 tristate "IEEE 1394 (FireWire) support (JUJU alternative stack, experimental)"
8 depends on EXPERIMENTAL
9 select CRC_ITU_T
10 help
11 IEEE 1394 describes a high performance serial bus, which is also
12 known as FireWire(tm) or i.Link(tm) and is used for connecting all
13 sorts of devices (most notably digital video cameras) to your
14 computer.
15
16 If you have FireWire hardware and want to use it, say Y here. This
17 is the core support only, you will also need to select a driver for
18 your IEEE 1394 adapter.
19
20 To compile this driver as a module, say M here: the module will be
21 called fw-core.
22
23 This is the "JUJU" FireWire stack, an alternative implementation
24 designed for robustness and simplicity. You can build either this
25 stack, or the classic stack (the ieee1394 driver, ohci1394 etc.)
26 or both.
27
28config FIREWIRE_OHCI
29 tristate "Support for OHCI FireWire host controllers"
30 depends on PCI && FIREWIRE
31 help
32 Enable this driver if you have a FireWire controller based
33 on the OHCI specification. For all practical purposes, this
34 is the only chipset in use, so say Y here.
35
36 To compile this driver as a module, say M here: The module will be
37 called fw-ohci.
38
39 If you also build ohci1394 of the classic IEEE 1394 driver stack,
40 blacklist either ohci1394 or fw-ohci to let hotplug load the desired
41 driver.
42
43config FIREWIRE_SBP2
44 tristate "Support for storage devices (SBP-2 protocol driver)"
45 depends on FIREWIRE && SCSI
46 help
47 This option enables you to use SBP-2 devices connected to a
48 FireWire bus. SBP-2 devices include storage devices like
49 harddisks and DVD drives, also some other FireWire devices
50 like scanners.
51
52 To compile this driver as a module, say M here: The module will be
53 called fw-sbp2.
54
55 You should also enable support for disks, CD-ROMs, etc. in the SCSI
56 configuration section.
57
58 If you also build sbp2 of the classic IEEE 1394 driver stack,
59 blacklist either sbp2 or fw-sbp2 to let hotplug load the desired
60 driver.
61
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
new file mode 100644
index 000000000000..fc7d59d4bce0
--- /dev/null
+++ b/drivers/firewire/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Linux IEEE 1394 implementation
3#
4
5fw-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
6 fw-device.o fw-cdev.o
7
8obj-$(CONFIG_FIREWIRE) += fw-core.o
9obj-$(CONFIG_FIREWIRE_OHCI) += fw-ohci.o
10obj-$(CONFIG_FIREWIRE_SBP2) += fw-sbp2.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
new file mode 100644
index 000000000000..636151a64add
--- /dev/null
+++ b/drivers/firewire/fw-card.c
@@ -0,0 +1,560 @@
1/*
2 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/device.h>
22#include <linux/mutex.h>
23#include <linux/crc-itu-t.h>
24#include "fw-transaction.h"
25#include "fw-topology.h"
26#include "fw-device.h"
27
28int fw_compute_block_crc(u32 *block)
29{
30 __be32 be32_block[256];
31 int i, length;
32
33 length = (*block >> 16) & 0xff;
34 for (i = 0; i < length; i++)
35 be32_block[i] = cpu_to_be32(block[i + 1]);
36 *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
37
38 return length;
39}
40
41static DEFINE_MUTEX(card_mutex);
42static LIST_HEAD(card_list);
43
44static LIST_HEAD(descriptor_list);
45static int descriptor_count;
46
47#define BIB_CRC(v) ((v) << 0)
48#define BIB_CRC_LENGTH(v) ((v) << 16)
49#define BIB_INFO_LENGTH(v) ((v) << 24)
50
51#define BIB_LINK_SPEED(v) ((v) << 0)
52#define BIB_GENERATION(v) ((v) << 4)
53#define BIB_MAX_ROM(v) ((v) << 8)
54#define BIB_MAX_RECEIVE(v) ((v) << 12)
55#define BIB_CYC_CLK_ACC(v) ((v) << 16)
56#define BIB_PMC ((1) << 27)
57#define BIB_BMC ((1) << 28)
58#define BIB_ISC ((1) << 29)
59#define BIB_CMC ((1) << 30)
60#define BIB_IMC ((1) << 31)
61
62static u32 *
63generate_config_rom(struct fw_card *card, size_t *config_rom_length)
64{
65 struct fw_descriptor *desc;
66 static u32 config_rom[256];
67 int i, j, length;
68
69 /*
70 * Initialize contents of config rom buffer. On the OHCI
71 * controller, block reads to the config rom accesses the host
72 * memory, but quadlet read access the hardware bus info block
73 * registers. That's just crack, but it means we should make
74 * sure the contents of bus info block in host memory mathces
75 * the version stored in the OHCI registers.
76 */
77
78 memset(config_rom, 0, sizeof(config_rom));
79 config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
80 config_rom[1] = 0x31333934;
81
82 config_rom[2] =
83 BIB_LINK_SPEED(card->link_speed) |
84 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
85 BIB_MAX_ROM(2) |
86 BIB_MAX_RECEIVE(card->max_receive) |
87 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
88 config_rom[3] = card->guid >> 32;
89 config_rom[4] = card->guid;
90
91 /* Generate root directory. */
92 i = 5;
93 config_rom[i++] = 0;
94 config_rom[i++] = 0x0c0083c0; /* node capabilities */
95 j = i + descriptor_count;
96
97 /* Generate root directory entries for descriptors. */
98 list_for_each_entry (desc, &descriptor_list, link) {
99 if (desc->immediate > 0)
100 config_rom[i++] = desc->immediate;
101 config_rom[i] = desc->key | (j - i);
102 i++;
103 j += desc->length;
104 }
105
106 /* Update root directory length. */
107 config_rom[5] = (i - 5 - 1) << 16;
108
109 /* End of root directory, now copy in descriptors. */
110 list_for_each_entry (desc, &descriptor_list, link) {
111 memcpy(&config_rom[i], desc->data, desc->length * 4);
112 i += desc->length;
113 }
114
115 /* Calculate CRCs for all blocks in the config rom. This
116 * assumes that CRC length and info length are identical for
117 * the bus info block, which is always the case for this
118 * implementation. */
119 for (i = 0; i < j; i += length + 1)
120 length = fw_compute_block_crc(config_rom + i);
121
122 *config_rom_length = j;
123
124 return config_rom;
125}
126
127static void
128update_config_roms(void)
129{
130 struct fw_card *card;
131 u32 *config_rom;
132 size_t length;
133
134 list_for_each_entry (card, &card_list, link) {
135 config_rom = generate_config_rom(card, &length);
136 card->driver->set_config_rom(card, config_rom, length);
137 }
138}
139
140int
141fw_core_add_descriptor(struct fw_descriptor *desc)
142{
143 size_t i;
144
145 /*
146 * Check descriptor is valid; the length of all blocks in the
147 * descriptor has to add up to exactly the length of the
148 * block.
149 */
150 i = 0;
151 while (i < desc->length)
152 i += (desc->data[i] >> 16) + 1;
153
154 if (i != desc->length)
155 return -EINVAL;
156
157 mutex_lock(&card_mutex);
158
159 list_add_tail(&desc->link, &descriptor_list);
160 descriptor_count++;
161 if (desc->immediate > 0)
162 descriptor_count++;
163 update_config_roms();
164
165 mutex_unlock(&card_mutex);
166
167 return 0;
168}
169EXPORT_SYMBOL(fw_core_add_descriptor);
170
171void
172fw_core_remove_descriptor(struct fw_descriptor *desc)
173{
174 mutex_lock(&card_mutex);
175
176 list_del(&desc->link);
177 descriptor_count--;
178 if (desc->immediate > 0)
179 descriptor_count--;
180 update_config_roms();
181
182 mutex_unlock(&card_mutex);
183}
184EXPORT_SYMBOL(fw_core_remove_descriptor);
185
186static const char gap_count_table[] = {
187 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
188};
189
190struct bm_data {
191 struct fw_transaction t;
192 struct {
193 __be32 arg;
194 __be32 data;
195 } lock;
196 u32 old;
197 int rcode;
198 struct completion done;
199};
200
201static void
202complete_bm_lock(struct fw_card *card, int rcode,
203 void *payload, size_t length, void *data)
204{
205 struct bm_data *bmd = data;
206
207 if (rcode == RCODE_COMPLETE)
208 bmd->old = be32_to_cpu(*(__be32 *) payload);
209 bmd->rcode = rcode;
210 complete(&bmd->done);
211}
212
213static void
214fw_card_bm_work(struct work_struct *work)
215{
216 struct fw_card *card = container_of(work, struct fw_card, work.work);
217 struct fw_device *root;
218 struct bm_data bmd;
219 unsigned long flags;
220 int root_id, new_root_id, irm_id, gap_count, generation, grace;
221 int do_reset = 0;
222
223 spin_lock_irqsave(&card->lock, flags);
224
225 generation = card->generation;
226 root = card->root_node->data;
227 root_id = card->root_node->node_id;
228 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
229
230 if (card->bm_generation + 1 == generation ||
231 (card->bm_generation != generation && grace)) {
232 /*
233 * This first step is to figure out who is IRM and
234 * then try to become bus manager. If the IRM is not
235 * well defined (e.g. does not have an active link
236 * layer or does not responds to our lock request, we
237 * will have to do a little vigilante bus management.
238 * In that case, we do a goto into the gap count logic
239 * so that when we do the reset, we still optimize the
240 * gap count. That could well save a reset in the
241 * next generation.
242 */
243
244 irm_id = card->irm_node->node_id;
245 if (!card->irm_node->link_on) {
246 new_root_id = card->local_node->node_id;
247 fw_notify("IRM has link off, making local node (%02x) root.\n",
248 new_root_id);
249 goto pick_me;
250 }
251
252 bmd.lock.arg = cpu_to_be32(0x3f);
253 bmd.lock.data = cpu_to_be32(card->local_node->node_id);
254
255 spin_unlock_irqrestore(&card->lock, flags);
256
257 init_completion(&bmd.done);
258 fw_send_request(card, &bmd.t, TCODE_LOCK_COMPARE_SWAP,
259 irm_id, generation,
260 SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
261 &bmd.lock, sizeof(bmd.lock),
262 complete_bm_lock, &bmd);
263 wait_for_completion(&bmd.done);
264
265 if (bmd.rcode == RCODE_GENERATION) {
266 /*
267 * Another bus reset happened. Just return,
268 * the BM work has been rescheduled.
269 */
270 return;
271 }
272
273 if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
274 /* Somebody else is BM, let them do the work. */
275 return;
276
277 spin_lock_irqsave(&card->lock, flags);
278 if (bmd.rcode != RCODE_COMPLETE) {
279 /*
280 * The lock request failed, maybe the IRM
281 * isn't really IRM capable after all. Let's
282 * do a bus reset and pick the local node as
283 * root, and thus, IRM.
284 */
285 new_root_id = card->local_node->node_id;
286 fw_notify("BM lock failed, making local node (%02x) root.\n",
287 new_root_id);
288 goto pick_me;
289 }
290 } else if (card->bm_generation != generation) {
291 /*
292 * OK, we weren't BM in the last generation, and it's
293 * less than 100ms since last bus reset. Reschedule
294 * this task 100ms from now.
295 */
296 spin_unlock_irqrestore(&card->lock, flags);
297 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
298 return;
299 }
300
301 /*
302 * We're bus manager for this generation, so next step is to
303 * make sure we have an active cycle master and do gap count
304 * optimization.
305 */
306 card->bm_generation = generation;
307
308 if (root == NULL) {
309 /*
310 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root.
312 */
313 new_root_id = card->local_node->node_id;
314 } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
315 /*
316 * If we haven't probed this device yet, bail out now
317 * and let's try again once that's done.
318 */
319 spin_unlock_irqrestore(&card->lock, flags);
320 return;
321 } else if (root->config_rom[2] & BIB_CMC) {
322 /*
323 * FIXME: I suppose we should set the cmstr bit in the
324 * STATE_CLEAR register of this node, as described in
325 * 1394-1995, 8.4.2.6. Also, send out a force root
326 * packet for this node.
327 */
328 new_root_id = root_id;
329 } else {
330 /*
331 * Current root has an active link layer and we
332 * successfully read the config rom, but it's not
333 * cycle master capable.
334 */
335 new_root_id = card->local_node->node_id;
336 }
337
338 pick_me:
339 /* Now figure out what gap count to set. */
340 if (card->topology_type == FW_TOPOLOGY_A &&
341 card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
342 gap_count = gap_count_table[card->root_node->max_hops];
343 else
344 gap_count = 63;
345
346 /*
347 * Finally, figure out if we should do a reset or not. If we've
348 * done less that 5 resets with the same physical topology and we
349 * have either a new root or a new gap count setting, let's do it.
350 */
351
352 if (card->bm_retries++ < 5 &&
353 (card->gap_count != gap_count || new_root_id != root_id))
354 do_reset = 1;
355
356 spin_unlock_irqrestore(&card->lock, flags);
357
358 if (do_reset) {
359 fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
360 card->index, new_root_id, gap_count);
361 fw_send_phy_config(card, new_root_id, generation, gap_count);
362 fw_core_initiate_bus_reset(card, 1);
363 }
364}
365
366static void
367flush_timer_callback(unsigned long data)
368{
369 struct fw_card *card = (struct fw_card *)data;
370
371 fw_flush_transactions(card);
372}
373
374void
375fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
376 struct device *device)
377{
378 static atomic_t index = ATOMIC_INIT(-1);
379
380 kref_init(&card->kref);
381 card->index = atomic_inc_return(&index);
382 card->driver = driver;
383 card->device = device;
384 card->current_tlabel = 0;
385 card->tlabel_mask = 0;
386 card->color = 0;
387
388 INIT_LIST_HEAD(&card->transaction_list);
389 spin_lock_init(&card->lock);
390 setup_timer(&card->flush_timer,
391 flush_timer_callback, (unsigned long)card);
392
393 card->local_node = NULL;
394
395 INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
396}
397EXPORT_SYMBOL(fw_card_initialize);
398
399int
400fw_card_add(struct fw_card *card,
401 u32 max_receive, u32 link_speed, u64 guid)
402{
403 u32 *config_rom;
404 size_t length;
405
406 card->max_receive = max_receive;
407 card->link_speed = link_speed;
408 card->guid = guid;
409
410 /* Activate link_on bit and contender bit in our self ID packets.*/
411 if (card->driver->update_phy_reg(card, 4, 0,
412 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
413 return -EIO;
414
415 /*
416 * The subsystem grabs a reference when the card is added and
417 * drops it when the driver calls fw_core_remove_card.
418 */
419 fw_card_get(card);
420
421 mutex_lock(&card_mutex);
422 config_rom = generate_config_rom(card, &length);
423 list_add_tail(&card->link, &card_list);
424 mutex_unlock(&card_mutex);
425
426 return card->driver->enable(card, config_rom, length);
427}
428EXPORT_SYMBOL(fw_card_add);
429
430
431/*
432 * The next few functions implements a dummy driver that use once a
433 * card driver shuts down an fw_card. This allows the driver to
434 * cleanly unload, as all IO to the card will be handled by the dummy
435 * driver instead of calling into the (possibly) unloaded module. The
436 * dummy driver just fails all IO.
437 */
438
439static int
440dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
441{
442 BUG();
443 return -1;
444}
445
446static int
447dummy_update_phy_reg(struct fw_card *card, int address,
448 int clear_bits, int set_bits)
449{
450 return -ENODEV;
451}
452
453static int
454dummy_set_config_rom(struct fw_card *card,
455 u32 *config_rom, size_t length)
456{
457 /*
458 * We take the card out of card_list before setting the dummy
459 * driver, so this should never get called.
460 */
461 BUG();
462 return -1;
463}
464
465static void
466dummy_send_request(struct fw_card *card, struct fw_packet *packet)
467{
468 packet->callback(packet, card, -ENODEV);
469}
470
471static void
472dummy_send_response(struct fw_card *card, struct fw_packet *packet)
473{
474 packet->callback(packet, card, -ENODEV);
475}
476
477static int
478dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
479{
480 return -ENOENT;
481}
482
483static int
484dummy_enable_phys_dma(struct fw_card *card,
485 int node_id, int generation)
486{
487 return -ENODEV;
488}
489
490static struct fw_card_driver dummy_driver = {
491 .name = "dummy",
492 .enable = dummy_enable,
493 .update_phy_reg = dummy_update_phy_reg,
494 .set_config_rom = dummy_set_config_rom,
495 .send_request = dummy_send_request,
496 .cancel_packet = dummy_cancel_packet,
497 .send_response = dummy_send_response,
498 .enable_phys_dma = dummy_enable_phys_dma,
499};
500
501void
502fw_core_remove_card(struct fw_card *card)
503{
504 card->driver->update_phy_reg(card, 4,
505 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
506 fw_core_initiate_bus_reset(card, 1);
507
508 mutex_lock(&card_mutex);
509 list_del(&card->link);
510 mutex_unlock(&card_mutex);
511
512 /* Set up the dummy driver. */
513 card->driver = &dummy_driver;
514
515 fw_flush_transactions(card);
516
517 fw_destroy_nodes(card);
518
519 fw_card_put(card);
520}
521EXPORT_SYMBOL(fw_core_remove_card);
522
523struct fw_card *
524fw_card_get(struct fw_card *card)
525{
526 kref_get(&card->kref);
527
528 return card;
529}
530EXPORT_SYMBOL(fw_card_get);
531
532static void
533release_card(struct kref *kref)
534{
535 struct fw_card *card = container_of(kref, struct fw_card, kref);
536
537 kfree(card);
538}
539
540/*
541 * An assumption for fw_card_put() is that the card driver allocates
542 * the fw_card struct with kalloc and that it has been shut down
543 * before the last ref is dropped.
544 */
545void
546fw_card_put(struct fw_card *card)
547{
548 kref_put(&card->kref, release_card);
549}
550EXPORT_SYMBOL(fw_card_put);
551
552int
553fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
554{
555 int reg = short_reset ? 5 : 1;
556 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
557
558 return card->driver->update_phy_reg(card, reg, 0, bit);
559}
560EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
new file mode 100644
index 000000000000..0fa5bd54c6a1
--- /dev/null
+++ b/drivers/firewire/fw-cdev.c
@@ -0,0 +1,961 @@
1/*
2 * Char device for device raw access
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h>
26#include <linux/vmalloc.h>
27#include <linux/poll.h>
28#include <linux/delay.h>
29#include <linux/mm.h>
30#include <linux/idr.h>
31#include <linux/compat.h>
32#include <linux/firewire-cdev.h>
33#include <asm/uaccess.h>
34#include "fw-transaction.h"
35#include "fw-topology.h"
36#include "fw-device.h"
37
38struct client;
39struct client_resource {
40 struct list_head link;
41 void (*release)(struct client *client, struct client_resource *r);
42 u32 handle;
43};
44
45/*
46 * dequeue_event() just kfree()'s the event, so the event has to be
47 * the first field in the struct.
48 */
49
50struct event {
51 struct { void *data; size_t size; } v[2];
52 struct list_head link;
53};
54
55struct bus_reset {
56 struct event event;
57 struct fw_cdev_event_bus_reset reset;
58};
59
60struct response {
61 struct event event;
62 struct fw_transaction transaction;
63 struct client *client;
64 struct client_resource resource;
65 struct fw_cdev_event_response response;
66};
67
68struct iso_interrupt {
69 struct event event;
70 struct fw_cdev_event_iso_interrupt interrupt;
71};
72
73struct client {
74 u32 version;
75 struct fw_device *device;
76 spinlock_t lock;
77 u32 resource_handle;
78 struct list_head resource_list;
79 struct list_head event_list;
80 wait_queue_head_t wait;
81 u64 bus_reset_closure;
82
83 struct fw_iso_context *iso_context;
84 u64 iso_closure;
85 struct fw_iso_buffer buffer;
86 unsigned long vm_start;
87
88 struct list_head link;
89};
90
91static inline void __user *
92u64_to_uptr(__u64 value)
93{
94 return (void __user *)(unsigned long)value;
95}
96
97static inline __u64
98uptr_to_u64(void __user *ptr)
99{
100 return (__u64)(unsigned long)ptr;
101}
102
103static int fw_device_op_open(struct inode *inode, struct file *file)
104{
105 struct fw_device *device;
106 struct client *client;
107 unsigned long flags;
108
109 device = fw_device_from_devt(inode->i_rdev);
110 if (device == NULL)
111 return -ENODEV;
112
113 client = kzalloc(sizeof(*client), GFP_KERNEL);
114 if (client == NULL)
115 return -ENOMEM;
116
117 client->device = fw_device_get(device);
118 INIT_LIST_HEAD(&client->event_list);
119 INIT_LIST_HEAD(&client->resource_list);
120 spin_lock_init(&client->lock);
121 init_waitqueue_head(&client->wait);
122
123 file->private_data = client;
124
125 spin_lock_irqsave(&device->card->lock, flags);
126 list_add_tail(&client->link, &device->client_list);
127 spin_unlock_irqrestore(&device->card->lock, flags);
128
129 return 0;
130}
131
132static void queue_event(struct client *client, struct event *event,
133 void *data0, size_t size0, void *data1, size_t size1)
134{
135 unsigned long flags;
136
137 event->v[0].data = data0;
138 event->v[0].size = size0;
139 event->v[1].data = data1;
140 event->v[1].size = size1;
141
142 spin_lock_irqsave(&client->lock, flags);
143
144 list_add_tail(&event->link, &client->event_list);
145 wake_up_interruptible(&client->wait);
146
147 spin_unlock_irqrestore(&client->lock, flags);
148}
149
150static int
151dequeue_event(struct client *client, char __user *buffer, size_t count)
152{
153 unsigned long flags;
154 struct event *event;
155 size_t size, total;
156 int i, retval;
157
158 retval = wait_event_interruptible(client->wait,
159 !list_empty(&client->event_list) ||
160 fw_device_is_shutdown(client->device));
161 if (retval < 0)
162 return retval;
163
164 if (list_empty(&client->event_list) &&
165 fw_device_is_shutdown(client->device))
166 return -ENODEV;
167
168 spin_lock_irqsave(&client->lock, flags);
169 event = container_of(client->event_list.next, struct event, link);
170 list_del(&event->link);
171 spin_unlock_irqrestore(&client->lock, flags);
172
173 total = 0;
174 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
175 size = min(event->v[i].size, count - total);
176 if (copy_to_user(buffer + total, event->v[i].data, size)) {
177 retval = -EFAULT;
178 goto out;
179 }
180 total += size;
181 }
182 retval = total;
183
184 out:
185 kfree(event);
186
187 return retval;
188}
189
190static ssize_t
191fw_device_op_read(struct file *file,
192 char __user *buffer, size_t count, loff_t *offset)
193{
194 struct client *client = file->private_data;
195
196 return dequeue_event(client, buffer, count);
197}
198
199static void
200fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
201 struct client *client)
202{
203 struct fw_card *card = client->device->card;
204
205 event->closure = client->bus_reset_closure;
206 event->type = FW_CDEV_EVENT_BUS_RESET;
207 event->node_id = client->device->node_id;
208 event->local_node_id = card->local_node->node_id;
209 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
210 event->irm_node_id = card->irm_node->node_id;
211 event->root_node_id = card->root_node->node_id;
212 event->generation = card->generation;
213}
214
215static void
216for_each_client(struct fw_device *device,
217 void (*callback)(struct client *client))
218{
219 struct fw_card *card = device->card;
220 struct client *c;
221 unsigned long flags;
222
223 spin_lock_irqsave(&card->lock, flags);
224
225 list_for_each_entry(c, &device->client_list, link)
226 callback(c);
227
228 spin_unlock_irqrestore(&card->lock, flags);
229}
230
231static void
232queue_bus_reset_event(struct client *client)
233{
234 struct bus_reset *bus_reset;
235
236 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
237 if (bus_reset == NULL) {
238 fw_notify("Out of memory when allocating bus reset event\n");
239 return;
240 }
241
242 fill_bus_reset_event(&bus_reset->reset, client);
243
244 queue_event(client, &bus_reset->event,
245 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
246}
247
248void fw_device_cdev_update(struct fw_device *device)
249{
250 for_each_client(device, queue_bus_reset_event);
251}
252
253static void wake_up_client(struct client *client)
254{
255 wake_up_interruptible(&client->wait);
256}
257
258void fw_device_cdev_remove(struct fw_device *device)
259{
260 for_each_client(device, wake_up_client);
261}
262
263static int ioctl_get_info(struct client *client, void *buffer)
264{
265 struct fw_cdev_get_info *get_info = buffer;
266 struct fw_cdev_event_bus_reset bus_reset;
267
268 client->version = get_info->version;
269 get_info->version = FW_CDEV_VERSION;
270
271 if (get_info->rom != 0) {
272 void __user *uptr = u64_to_uptr(get_info->rom);
273 size_t want = get_info->rom_length;
274 size_t have = client->device->config_rom_length * 4;
275
276 if (copy_to_user(uptr, client->device->config_rom,
277 min(want, have)))
278 return -EFAULT;
279 }
280 get_info->rom_length = client->device->config_rom_length * 4;
281
282 client->bus_reset_closure = get_info->bus_reset_closure;
283 if (get_info->bus_reset != 0) {
284 void __user *uptr = u64_to_uptr(get_info->bus_reset);
285
286 fill_bus_reset_event(&bus_reset, client);
287 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
288 return -EFAULT;
289 }
290
291 get_info->card = client->device->card->index;
292
293 return 0;
294}
295
296static void
297add_client_resource(struct client *client, struct client_resource *resource)
298{
299 unsigned long flags;
300
301 spin_lock_irqsave(&client->lock, flags);
302 list_add_tail(&resource->link, &client->resource_list);
303 resource->handle = client->resource_handle++;
304 spin_unlock_irqrestore(&client->lock, flags);
305}
306
307static int
308release_client_resource(struct client *client, u32 handle,
309 struct client_resource **resource)
310{
311 struct client_resource *r;
312 unsigned long flags;
313
314 spin_lock_irqsave(&client->lock, flags);
315 list_for_each_entry(r, &client->resource_list, link) {
316 if (r->handle == handle) {
317 list_del(&r->link);
318 break;
319 }
320 }
321 spin_unlock_irqrestore(&client->lock, flags);
322
323 if (&r->link == &client->resource_list)
324 return -EINVAL;
325
326 if (resource)
327 *resource = r;
328 else
329 r->release(client, r);
330
331 return 0;
332}
333
334static void
335release_transaction(struct client *client, struct client_resource *resource)
336{
337 struct response *response =
338 container_of(resource, struct response, resource);
339
340 fw_cancel_transaction(client->device->card, &response->transaction);
341}
342
343static void
344complete_transaction(struct fw_card *card, int rcode,
345 void *payload, size_t length, void *data)
346{
347 struct response *response = data;
348 struct client *client = response->client;
349 unsigned long flags;
350
351 if (length < response->response.length)
352 response->response.length = length;
353 if (rcode == RCODE_COMPLETE)
354 memcpy(response->response.data, payload,
355 response->response.length);
356
357 spin_lock_irqsave(&client->lock, flags);
358 list_del(&response->resource.link);
359 spin_unlock_irqrestore(&client->lock, flags);
360
361 response->response.type = FW_CDEV_EVENT_RESPONSE;
362 response->response.rcode = rcode;
363 queue_event(client, &response->event,
364 &response->response, sizeof(response->response),
365 response->response.data, response->response.length);
366}
367
368static ssize_t ioctl_send_request(struct client *client, void *buffer)
369{
370 struct fw_device *device = client->device;
371 struct fw_cdev_send_request *request = buffer;
372 struct response *response;
373
374 /* What is the biggest size we'll accept, really? */
375 if (request->length > 4096)
376 return -EINVAL;
377
378 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
379 if (response == NULL)
380 return -ENOMEM;
381
382 response->client = client;
383 response->response.length = request->length;
384 response->response.closure = request->closure;
385
386 if (request->data &&
387 copy_from_user(response->response.data,
388 u64_to_uptr(request->data), request->length)) {
389 kfree(response);
390 return -EFAULT;
391 }
392
393 response->resource.release = release_transaction;
394 add_client_resource(client, &response->resource);
395
396 fw_send_request(device->card, &response->transaction,
397 request->tcode & 0x1f,
398 device->node->node_id,
399 request->generation,
400 device->node->max_speed,
401 request->offset,
402 response->response.data, request->length,
403 complete_transaction, response);
404
405 if (request->data)
406 return sizeof(request) + request->length;
407 else
408 return sizeof(request);
409}
410
411struct address_handler {
412 struct fw_address_handler handler;
413 __u64 closure;
414 struct client *client;
415 struct client_resource resource;
416};
417
418struct request {
419 struct fw_request *request;
420 void *data;
421 size_t length;
422 struct client_resource resource;
423};
424
425struct request_event {
426 struct event event;
427 struct fw_cdev_event_request request;
428};
429
430static void
431release_request(struct client *client, struct client_resource *resource)
432{
433 struct request *request =
434 container_of(resource, struct request, resource);
435
436 fw_send_response(client->device->card, request->request,
437 RCODE_CONFLICT_ERROR);
438 kfree(request);
439}
440
441static void
442handle_request(struct fw_card *card, struct fw_request *r,
443 int tcode, int destination, int source,
444 int generation, int speed,
445 unsigned long long offset,
446 void *payload, size_t length, void *callback_data)
447{
448 struct address_handler *handler = callback_data;
449 struct request *request;
450 struct request_event *e;
451 struct client *client = handler->client;
452
453 request = kmalloc(sizeof(*request), GFP_ATOMIC);
454 e = kmalloc(sizeof(*e), GFP_ATOMIC);
455 if (request == NULL || e == NULL) {
456 kfree(request);
457 kfree(e);
458 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
459 return;
460 }
461
462 request->request = r;
463 request->data = payload;
464 request->length = length;
465
466 request->resource.release = release_request;
467 add_client_resource(client, &request->resource);
468
469 e->request.type = FW_CDEV_EVENT_REQUEST;
470 e->request.tcode = tcode;
471 e->request.offset = offset;
472 e->request.length = length;
473 e->request.handle = request->resource.handle;
474 e->request.closure = handler->closure;
475
476 queue_event(client, &e->event,
477 &e->request, sizeof(e->request), payload, length);
478}
479
480static void
481release_address_handler(struct client *client,
482 struct client_resource *resource)
483{
484 struct address_handler *handler =
485 container_of(resource, struct address_handler, resource);
486
487 fw_core_remove_address_handler(&handler->handler);
488 kfree(handler);
489}
490
491static int ioctl_allocate(struct client *client, void *buffer)
492{
493 struct fw_cdev_allocate *request = buffer;
494 struct address_handler *handler;
495 struct fw_address_region region;
496
497 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
498 if (handler == NULL)
499 return -ENOMEM;
500
501 region.start = request->offset;
502 region.end = request->offset + request->length;
503 handler->handler.length = request->length;
504 handler->handler.address_callback = handle_request;
505 handler->handler.callback_data = handler;
506 handler->closure = request->closure;
507 handler->client = client;
508
509 if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
510 kfree(handler);
511 return -EBUSY;
512 }
513
514 handler->resource.release = release_address_handler;
515 add_client_resource(client, &handler->resource);
516 request->handle = handler->resource.handle;
517
518 return 0;
519}
520
521static int ioctl_deallocate(struct client *client, void *buffer)
522{
523 struct fw_cdev_deallocate *request = buffer;
524
525 return release_client_resource(client, request->handle, NULL);
526}
527
528static int ioctl_send_response(struct client *client, void *buffer)
529{
530 struct fw_cdev_send_response *request = buffer;
531 struct client_resource *resource;
532 struct request *r;
533
534 if (release_client_resource(client, request->handle, &resource) < 0)
535 return -EINVAL;
536 r = container_of(resource, struct request, resource);
537 if (request->length < r->length)
538 r->length = request->length;
539 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
540 return -EFAULT;
541
542 fw_send_response(client->device->card, r->request, request->rcode);
543 kfree(r);
544
545 return 0;
546}
547
548static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
549{
550 struct fw_cdev_initiate_bus_reset *request = buffer;
551 int short_reset;
552
553 short_reset = (request->type == FW_CDEV_SHORT_RESET);
554
555 return fw_core_initiate_bus_reset(client->device->card, short_reset);
556}
557
558struct descriptor {
559 struct fw_descriptor d;
560 struct client_resource resource;
561 u32 data[0];
562};
563
564static void release_descriptor(struct client *client,
565 struct client_resource *resource)
566{
567 struct descriptor *descriptor =
568 container_of(resource, struct descriptor, resource);
569
570 fw_core_remove_descriptor(&descriptor->d);
571 kfree(descriptor);
572}
573
574static int ioctl_add_descriptor(struct client *client, void *buffer)
575{
576 struct fw_cdev_add_descriptor *request = buffer;
577 struct descriptor *descriptor;
578 int retval;
579
580 if (request->length > 256)
581 return -EINVAL;
582
583 descriptor =
584 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
585 if (descriptor == NULL)
586 return -ENOMEM;
587
588 if (copy_from_user(descriptor->data,
589 u64_to_uptr(request->data), request->length * 4)) {
590 kfree(descriptor);
591 return -EFAULT;
592 }
593
594 descriptor->d.length = request->length;
595 descriptor->d.immediate = request->immediate;
596 descriptor->d.key = request->key;
597 descriptor->d.data = descriptor->data;
598
599 retval = fw_core_add_descriptor(&descriptor->d);
600 if (retval < 0) {
601 kfree(descriptor);
602 return retval;
603 }
604
605 descriptor->resource.release = release_descriptor;
606 add_client_resource(client, &descriptor->resource);
607 request->handle = descriptor->resource.handle;
608
609 return 0;
610}
611
612static int ioctl_remove_descriptor(struct client *client, void *buffer)
613{
614 struct fw_cdev_remove_descriptor *request = buffer;
615
616 return release_client_resource(client, request->handle, NULL);
617}
618
619static void
620iso_callback(struct fw_iso_context *context, u32 cycle,
621 size_t header_length, void *header, void *data)
622{
623 struct client *client = data;
624 struct iso_interrupt *interrupt;
625
626 interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC);
627 if (interrupt == NULL)
628 return;
629
630 interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
631 interrupt->interrupt.closure = client->iso_closure;
632 interrupt->interrupt.cycle = cycle;
633 interrupt->interrupt.header_length = header_length;
634 memcpy(interrupt->interrupt.header, header, header_length);
635 queue_event(client, &interrupt->event,
636 &interrupt->interrupt,
637 sizeof(interrupt->interrupt) + header_length, NULL, 0);
638}
639
640static int ioctl_create_iso_context(struct client *client, void *buffer)
641{
642 struct fw_cdev_create_iso_context *request = buffer;
643
644 if (request->channel > 63)
645 return -EINVAL;
646
647 switch (request->type) {
648 case FW_ISO_CONTEXT_RECEIVE:
649 if (request->header_size < 4 || (request->header_size & 3))
650 return -EINVAL;
651
652 break;
653
654 case FW_ISO_CONTEXT_TRANSMIT:
655 if (request->speed > SCODE_3200)
656 return -EINVAL;
657
658 break;
659
660 default:
661 return -EINVAL;
662 }
663
664 client->iso_closure = request->closure;
665 client->iso_context = fw_iso_context_create(client->device->card,
666 request->type,
667 request->channel,
668 request->speed,
669 request->header_size,
670 iso_callback, client);
671 if (IS_ERR(client->iso_context))
672 return PTR_ERR(client->iso_context);
673
674 /* We only support one context at this time. */
675 request->handle = 0;
676
677 return 0;
678}
679
680static int ioctl_queue_iso(struct client *client, void *buffer)
681{
682 struct fw_cdev_queue_iso *request = buffer;
683 struct fw_cdev_iso_packet __user *p, *end, *next;
684 struct fw_iso_context *ctx = client->iso_context;
685 unsigned long payload, buffer_end, header_length;
686 int count;
687 struct {
688 struct fw_iso_packet packet;
689 u8 header[256];
690 } u;
691
692 if (ctx == NULL || request->handle != 0)
693 return -EINVAL;
694
695 /*
696 * If the user passes a non-NULL data pointer, has mmap()'ed
697 * the iso buffer, and the pointer points inside the buffer,
698 * we setup the payload pointers accordingly. Otherwise we
699 * set them both to 0, which will still let packets with
700 * payload_length == 0 through. In other words, if no packets
701 * use the indirect payload, the iso buffer need not be mapped
702 * and the request->data pointer is ignored.
703 */
704
705 payload = (unsigned long)request->data - client->vm_start;
706 buffer_end = client->buffer.page_count << PAGE_SHIFT;
707 if (request->data == 0 || client->buffer.pages == NULL ||
708 payload >= buffer_end) {
709 payload = 0;
710 buffer_end = 0;
711 }
712
713 if (!access_ok(VERIFY_READ, request->packets, request->size))
714 return -EFAULT;
715
716 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
717 end = (void __user *)p + request->size;
718 count = 0;
719 while (p < end) {
720 if (__copy_from_user(&u.packet, p, sizeof(*p)))
721 return -EFAULT;
722
723 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
724 header_length = u.packet.header_length;
725 } else {
726 /*
727 * We require that header_length is a multiple of
728 * the fixed header size, ctx->header_size.
729 */
730 if (ctx->header_size == 0) {
731 if (u.packet.header_length > 0)
732 return -EINVAL;
733 } else if (u.packet.header_length % ctx->header_size != 0) {
734 return -EINVAL;
735 }
736 header_length = 0;
737 }
738
739 next = (struct fw_cdev_iso_packet __user *)
740 &p->header[header_length / 4];
741 if (next > end)
742 return -EINVAL;
743 if (__copy_from_user
744 (u.packet.header, p->header, header_length))
745 return -EFAULT;
746 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
747 u.packet.header_length + u.packet.payload_length > 0)
748 return -EINVAL;
749 if (payload + u.packet.payload_length > buffer_end)
750 return -EINVAL;
751
752 if (fw_iso_context_queue(ctx, &u.packet,
753 &client->buffer, payload))
754 break;
755
756 p = next;
757 payload += u.packet.payload_length;
758 count++;
759 }
760
761 request->size -= uptr_to_u64(p) - request->packets;
762 request->packets = uptr_to_u64(p);
763 request->data = client->vm_start + payload;
764
765 return count;
766}
767
768static int ioctl_start_iso(struct client *client, void *buffer)
769{
770 struct fw_cdev_start_iso *request = buffer;
771
772 if (request->handle != 0)
773 return -EINVAL;
774 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
775 if (request->tags == 0 || request->tags > 15)
776 return -EINVAL;
777
778 if (request->sync > 15)
779 return -EINVAL;
780 }
781
782 return fw_iso_context_start(client->iso_context, request->cycle,
783 request->sync, request->tags);
784}
785
786static int ioctl_stop_iso(struct client *client, void *buffer)
787{
788 struct fw_cdev_stop_iso *request = buffer;
789
790 if (request->handle != 0)
791 return -EINVAL;
792
793 return fw_iso_context_stop(client->iso_context);
794}
795
796static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
797 ioctl_get_info,
798 ioctl_send_request,
799 ioctl_allocate,
800 ioctl_deallocate,
801 ioctl_send_response,
802 ioctl_initiate_bus_reset,
803 ioctl_add_descriptor,
804 ioctl_remove_descriptor,
805 ioctl_create_iso_context,
806 ioctl_queue_iso,
807 ioctl_start_iso,
808 ioctl_stop_iso,
809};
810
811static int
812dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
813{
814 char buffer[256];
815 int retval;
816
817 if (_IOC_TYPE(cmd) != '#' ||
818 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
819 return -EINVAL;
820
821 if (_IOC_DIR(cmd) & _IOC_WRITE) {
822 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
823 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
824 return -EFAULT;
825 }
826
827 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
828 if (retval < 0)
829 return retval;
830
831 if (_IOC_DIR(cmd) & _IOC_READ) {
832 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
833 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
834 return -EFAULT;
835 }
836
837 return 0;
838}
839
840static long
841fw_device_op_ioctl(struct file *file,
842 unsigned int cmd, unsigned long arg)
843{
844 struct client *client = file->private_data;
845
846 return dispatch_ioctl(client, cmd, (void __user *) arg);
847}
848
849#ifdef CONFIG_COMPAT
850static long
851fw_device_op_compat_ioctl(struct file *file,
852 unsigned int cmd, unsigned long arg)
853{
854 struct client *client = file->private_data;
855
856 return dispatch_ioctl(client, cmd, compat_ptr(arg));
857}
858#endif
859
860static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
861{
862 struct client *client = file->private_data;
863 enum dma_data_direction direction;
864 unsigned long size;
865 int page_count, retval;
866
867 /* FIXME: We could support multiple buffers, but we don't. */
868 if (client->buffer.pages != NULL)
869 return -EBUSY;
870
871 if (!(vma->vm_flags & VM_SHARED))
872 return -EINVAL;
873
874 if (vma->vm_start & ~PAGE_MASK)
875 return -EINVAL;
876
877 client->vm_start = vma->vm_start;
878 size = vma->vm_end - vma->vm_start;
879 page_count = size >> PAGE_SHIFT;
880 if (size & ~PAGE_MASK)
881 return -EINVAL;
882
883 if (vma->vm_flags & VM_WRITE)
884 direction = DMA_TO_DEVICE;
885 else
886 direction = DMA_FROM_DEVICE;
887
888 retval = fw_iso_buffer_init(&client->buffer, client->device->card,
889 page_count, direction);
890 if (retval < 0)
891 return retval;
892
893 retval = fw_iso_buffer_map(&client->buffer, vma);
894 if (retval < 0)
895 fw_iso_buffer_destroy(&client->buffer, client->device->card);
896
897 return retval;
898}
899
900static int fw_device_op_release(struct inode *inode, struct file *file)
901{
902 struct client *client = file->private_data;
903 struct event *e, *next_e;
904 struct client_resource *r, *next_r;
905 unsigned long flags;
906
907 if (client->buffer.pages)
908 fw_iso_buffer_destroy(&client->buffer, client->device->card);
909
910 if (client->iso_context)
911 fw_iso_context_destroy(client->iso_context);
912
913 list_for_each_entry_safe(r, next_r, &client->resource_list, link)
914 r->release(client, r);
915
916 /*
917 * FIXME: We should wait for the async tasklets to stop
918 * running before freeing the memory.
919 */
920
921 list_for_each_entry_safe(e, next_e, &client->event_list, link)
922 kfree(e);
923
924 spin_lock_irqsave(&client->device->card->lock, flags);
925 list_del(&client->link);
926 spin_unlock_irqrestore(&client->device->card->lock, flags);
927
928 fw_device_put(client->device);
929 kfree(client);
930
931 return 0;
932}
933
934static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
935{
936 struct client *client = file->private_data;
937 unsigned int mask = 0;
938
939 poll_wait(file, &client->wait, pt);
940
941 if (fw_device_is_shutdown(client->device))
942 mask |= POLLHUP | POLLERR;
943 if (!list_empty(&client->event_list))
944 mask |= POLLIN | POLLRDNORM;
945
946 return mask;
947}
948
949const struct file_operations fw_device_ops = {
950 .owner = THIS_MODULE,
951 .open = fw_device_op_open,
952 .read = fw_device_op_read,
953 .unlocked_ioctl = fw_device_op_ioctl,
954 .poll = fw_device_op_poll,
955 .release = fw_device_op_release,
956 .mmap = fw_device_op_mmap,
957
958#ifdef CONFIG_COMPAT
959 .compat_ioctl = fw_device_op_compat_ioctl,
960#endif
961};
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
new file mode 100644
index 000000000000..c1ce465d9710
--- /dev/null
+++ b/drivers/firewire/fw-device.c
@@ -0,0 +1,813 @@
1/*
2 * Device probing and sysfs code.
3 *
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include <linux/kthread.h>
25#include <linux/device.h>
26#include <linux/delay.h>
27#include <linux/idr.h>
28#include <linux/rwsem.h>
29#include <asm/semaphore.h>
30#include <linux/ctype.h>
31#include "fw-transaction.h"
32#include "fw-topology.h"
33#include "fw-device.h"
34
35void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
36{
37 ci->p = p + 1;
38 ci->end = ci->p + (p[0] >> 16);
39}
40EXPORT_SYMBOL(fw_csr_iterator_init);
41
42int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
43{
44 *key = *ci->p >> 24;
45 *value = *ci->p & 0xffffff;
46
47 return ci->p++ < ci->end;
48}
49EXPORT_SYMBOL(fw_csr_iterator_next);
50
51static int is_fw_unit(struct device *dev);
52
53static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
54{
55 struct fw_csr_iterator ci;
56 int key, value, match;
57
58 match = 0;
59 fw_csr_iterator_init(&ci, directory);
60 while (fw_csr_iterator_next(&ci, &key, &value)) {
61 if (key == CSR_VENDOR && value == id->vendor)
62 match |= FW_MATCH_VENDOR;
63 if (key == CSR_MODEL && value == id->model)
64 match |= FW_MATCH_MODEL;
65 if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
66 match |= FW_MATCH_SPECIFIER_ID;
67 if (key == CSR_VERSION && value == id->version)
68 match |= FW_MATCH_VERSION;
69 }
70
71 return (match & id->match_flags) == id->match_flags;
72}
73
74static int fw_unit_match(struct device *dev, struct device_driver *drv)
75{
76 struct fw_unit *unit = fw_unit(dev);
77 struct fw_driver *driver = fw_driver(drv);
78 int i;
79
80 /* We only allow binding to fw_units. */
81 if (!is_fw_unit(dev))
82 return 0;
83
84 for (i = 0; driver->id_table[i].match_flags != 0; i++) {
85 if (match_unit_directory(unit->directory, &driver->id_table[i]))
86 return 1;
87 }
88
89 return 0;
90}
91
92static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
93{
94 struct fw_device *device = fw_device(unit->device.parent);
95 struct fw_csr_iterator ci;
96
97 int key, value;
98 int vendor = 0;
99 int model = 0;
100 int specifier_id = 0;
101 int version = 0;
102
103 fw_csr_iterator_init(&ci, &device->config_rom[5]);
104 while (fw_csr_iterator_next(&ci, &key, &value)) {
105 switch (key) {
106 case CSR_VENDOR:
107 vendor = value;
108 break;
109 case CSR_MODEL:
110 model = value;
111 break;
112 }
113 }
114
115 fw_csr_iterator_init(&ci, unit->directory);
116 while (fw_csr_iterator_next(&ci, &key, &value)) {
117 switch (key) {
118 case CSR_SPECIFIER_ID:
119 specifier_id = value;
120 break;
121 case CSR_VERSION:
122 version = value;
123 break;
124 }
125 }
126
127 return snprintf(buffer, buffer_size,
128 "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
129 vendor, model, specifier_id, version);
130}
131
132static int
133fw_unit_uevent(struct device *dev, char **envp, int num_envp,
134 char *buffer, int buffer_size)
135{
136 struct fw_unit *unit = fw_unit(dev);
137 char modalias[64];
138 int length = 0;
139 int i = 0;
140
141 get_modalias(unit, modalias, sizeof(modalias));
142
143 if (add_uevent_var(envp, num_envp, &i,
144 buffer, buffer_size, &length,
145 "MODALIAS=%s", modalias))
146 return -ENOMEM;
147
148 envp[i] = NULL;
149
150 return 0;
151}
152
153struct bus_type fw_bus_type = {
154 .name = "firewire",
155 .match = fw_unit_match,
156};
157EXPORT_SYMBOL(fw_bus_type);
158
159struct fw_device *fw_device_get(struct fw_device *device)
160{
161 get_device(&device->device);
162
163 return device;
164}
165
166void fw_device_put(struct fw_device *device)
167{
168 put_device(&device->device);
169}
170
171static void fw_device_release(struct device *dev)
172{
173 struct fw_device *device = fw_device(dev);
174 unsigned long flags;
175
176 /*
177 * Take the card lock so we don't set this to NULL while a
178 * FW_NODE_UPDATED callback is being handled.
179 */
180 spin_lock_irqsave(&device->card->lock, flags);
181 device->node->data = NULL;
182 spin_unlock_irqrestore(&device->card->lock, flags);
183
184 fw_node_put(device->node);
185 fw_card_put(device->card);
186 kfree(device->config_rom);
187 kfree(device);
188}
189
190int fw_device_enable_phys_dma(struct fw_device *device)
191{
192 return device->card->driver->enable_phys_dma(device->card,
193 device->node_id,
194 device->generation);
195}
196EXPORT_SYMBOL(fw_device_enable_phys_dma);
197
198struct config_rom_attribute {
199 struct device_attribute attr;
200 u32 key;
201};
202
203static ssize_t
204show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
205{
206 struct config_rom_attribute *attr =
207 container_of(dattr, struct config_rom_attribute, attr);
208 struct fw_csr_iterator ci;
209 u32 *dir;
210 int key, value;
211
212 if (is_fw_unit(dev))
213 dir = fw_unit(dev)->directory;
214 else
215 dir = fw_device(dev)->config_rom + 5;
216
217 fw_csr_iterator_init(&ci, dir);
218 while (fw_csr_iterator_next(&ci, &key, &value))
219 if (attr->key == key)
220 return snprintf(buf, buf ? PAGE_SIZE : 0,
221 "0x%06x\n", value);
222
223 return -ENOENT;
224}
225
226#define IMMEDIATE_ATTR(name, key) \
227 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
228
229static ssize_t
230show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
231{
232 struct config_rom_attribute *attr =
233 container_of(dattr, struct config_rom_attribute, attr);
234 struct fw_csr_iterator ci;
235 u32 *dir, *block = NULL, *p, *end;
236 int length, key, value, last_key = 0;
237 char *b;
238
239 if (is_fw_unit(dev))
240 dir = fw_unit(dev)->directory;
241 else
242 dir = fw_device(dev)->config_rom + 5;
243
244 fw_csr_iterator_init(&ci, dir);
245 while (fw_csr_iterator_next(&ci, &key, &value)) {
246 if (attr->key == last_key &&
247 key == (CSR_DESCRIPTOR | CSR_LEAF))
248 block = ci.p - 1 + value;
249 last_key = key;
250 }
251
252 if (block == NULL)
253 return -ENOENT;
254
255 length = min(block[0] >> 16, 256U);
256 if (length < 3)
257 return -ENOENT;
258
259 if (block[1] != 0 || block[2] != 0)
260 /* Unknown encoding. */
261 return -ENOENT;
262
263 if (buf == NULL)
264 return length * 4;
265
266 b = buf;
267 end = &block[length + 1];
268 for (p = &block[3]; p < end; p++, b += 4)
269 * (u32 *) b = (__force u32) __cpu_to_be32(*p);
270
271 /* Strip trailing whitespace and add newline. */
272 while (b--, (isspace(*b) || *b == '\0') && b > buf);
273 strcpy(b + 1, "\n");
274
275 return b + 2 - buf;
276}
277
278#define TEXT_LEAF_ATTR(name, key) \
279 { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
280
281static struct config_rom_attribute config_rom_attributes[] = {
282 IMMEDIATE_ATTR(vendor, CSR_VENDOR),
283 IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
284 IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
285 IMMEDIATE_ATTR(version, CSR_VERSION),
286 IMMEDIATE_ATTR(model, CSR_MODEL),
287 TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
288 TEXT_LEAF_ATTR(model_name, CSR_MODEL),
289 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
290};
291
292static void
293init_fw_attribute_group(struct device *dev,
294 struct device_attribute *attrs,
295 struct fw_attribute_group *group)
296{
297 struct device_attribute *attr;
298 int i, j;
299
300 for (j = 0; attrs[j].attr.name != NULL; j++)
301 group->attrs[j] = &attrs[j].attr;
302
303 for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
304 attr = &config_rom_attributes[i].attr;
305 if (attr->show(dev, attr, NULL) < 0)
306 continue;
307 group->attrs[j++] = &attr->attr;
308 }
309
310 BUG_ON(j >= ARRAY_SIZE(group->attrs));
311 group->attrs[j++] = NULL;
312 group->groups[0] = &group->group;
313 group->groups[1] = NULL;
314 group->group.attrs = group->attrs;
315 dev->groups = group->groups;
316}
317
318static ssize_t
319modalias_show(struct device *dev,
320 struct device_attribute *attr, char *buf)
321{
322 struct fw_unit *unit = fw_unit(dev);
323 int length;
324
325 length = get_modalias(unit, buf, PAGE_SIZE);
326 strcpy(buf + length, "\n");
327
328 return length + 1;
329}
330
331static ssize_t
332rom_index_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
334{
335 struct fw_device *device = fw_device(dev->parent);
336 struct fw_unit *unit = fw_unit(dev);
337
338 return snprintf(buf, PAGE_SIZE, "%d\n",
339 (int)(unit->directory - device->config_rom));
340}
341
342static struct device_attribute fw_unit_attributes[] = {
343 __ATTR_RO(modalias),
344 __ATTR_RO(rom_index),
345 __ATTR_NULL,
346};
347
348static ssize_t
349config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
350{
351 struct fw_device *device = fw_device(dev);
352
353 memcpy(buf, device->config_rom, device->config_rom_length * 4);
354
355 return device->config_rom_length * 4;
356}
357
358static ssize_t
359guid_show(struct device *dev, struct device_attribute *attr, char *buf)
360{
361 struct fw_device *device = fw_device(dev);
362 u64 guid;
363
364 guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
365
366 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
367 (unsigned long long)guid);
368}
369
370static struct device_attribute fw_device_attributes[] = {
371 __ATTR_RO(config_rom),
372 __ATTR_RO(guid),
373 __ATTR_NULL,
374};
375
376struct read_quadlet_callback_data {
377 struct completion done;
378 int rcode;
379 u32 data;
380};
381
382static void
383complete_transaction(struct fw_card *card, int rcode,
384 void *payload, size_t length, void *data)
385{
386 struct read_quadlet_callback_data *callback_data = data;
387
388 if (rcode == RCODE_COMPLETE)
389 callback_data->data = be32_to_cpu(*(__be32 *)payload);
390 callback_data->rcode = rcode;
391 complete(&callback_data->done);
392}
393
394static int read_rom(struct fw_device *device, int index, u32 * data)
395{
396 struct read_quadlet_callback_data callback_data;
397 struct fw_transaction t;
398 u64 offset;
399
400 init_completion(&callback_data.done);
401
402 offset = 0xfffff0000400ULL + index * 4;
403 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
404 device->node_id,
405 device->generation, SCODE_100,
406 offset, NULL, 4, complete_transaction, &callback_data);
407
408 wait_for_completion(&callback_data.done);
409
410 *data = callback_data.data;
411
412 return callback_data.rcode;
413}
414
415static int read_bus_info_block(struct fw_device *device)
416{
417 static u32 rom[256];
418 u32 stack[16], sp, key;
419 int i, end, length;
420
421 /* First read the bus info block. */
422 for (i = 0; i < 5; i++) {
423 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
424 return -1;
425 /*
426 * As per IEEE1212 7.2, during power-up, devices can
427 * reply with a 0 for the first quadlet of the config
428 * rom to indicate that they are booting (for example,
429 * if the firmware is on the disk of a external
430 * harddisk). In that case we just fail, and the
431 * retry mechanism will try again later.
432 */
433 if (i == 0 && rom[i] == 0)
434 return -1;
435 }
436
437 /*
438 * Now parse the config rom. The config rom is a recursive
439 * directory structure so we parse it using a stack of
440 * references to the blocks that make up the structure. We
441 * push a reference to the root directory on the stack to
442 * start things off.
443 */
444 length = i;
445 sp = 0;
446 stack[sp++] = 0xc0000005;
447 while (sp > 0) {
448 /*
449 * Pop the next block reference of the stack. The
450 * lower 24 bits is the offset into the config rom,
451 * the upper 8 bits are the type of the reference the
452 * block.
453 */
454 key = stack[--sp];
455 i = key & 0xffffff;
456 if (i >= ARRAY_SIZE(rom))
457 /*
458 * The reference points outside the standard
459 * config rom area, something's fishy.
460 */
461 return -1;
462
463 /* Read header quadlet for the block to get the length. */
464 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
465 return -1;
466 end = i + (rom[i] >> 16) + 1;
467 i++;
468 if (end > ARRAY_SIZE(rom))
469 /*
470 * This block extends outside standard config
471 * area (and the array we're reading it
472 * into). That's broken, so ignore this
473 * device.
474 */
475 return -1;
476
477 /*
478 * Now read in the block. If this is a directory
479 * block, check the entries as we read them to see if
480 * it references another block, and push it in that case.
481 */
482 while (i < end) {
483 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
484 return -1;
485 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
486 sp < ARRAY_SIZE(stack))
487 stack[sp++] = i + rom[i];
488 i++;
489 }
490 if (length < i)
491 length = i;
492 }
493
494 device->config_rom = kmalloc(length * 4, GFP_KERNEL);
495 if (device->config_rom == NULL)
496 return -1;
497 memcpy(device->config_rom, rom, length * 4);
498 device->config_rom_length = length;
499
500 return 0;
501}
502
503static void fw_unit_release(struct device *dev)
504{
505 struct fw_unit *unit = fw_unit(dev);
506
507 kfree(unit);
508}
509
510static struct device_type fw_unit_type = {
511 .uevent = fw_unit_uevent,
512 .release = fw_unit_release,
513};
514
515static int is_fw_unit(struct device *dev)
516{
517 return dev->type == &fw_unit_type;
518}
519
520static void create_units(struct fw_device *device)
521{
522 struct fw_csr_iterator ci;
523 struct fw_unit *unit;
524 int key, value, i;
525
526 i = 0;
527 fw_csr_iterator_init(&ci, &device->config_rom[5]);
528 while (fw_csr_iterator_next(&ci, &key, &value)) {
529 if (key != (CSR_UNIT | CSR_DIRECTORY))
530 continue;
531
532 /*
533 * Get the address of the unit directory and try to
534 * match the drivers id_tables against it.
535 */
536 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
537 if (unit == NULL) {
538 fw_error("failed to allocate memory for unit\n");
539 continue;
540 }
541
542 unit->directory = ci.p + value - 1;
543 unit->device.bus = &fw_bus_type;
544 unit->device.type = &fw_unit_type;
545 unit->device.parent = &device->device;
546 snprintf(unit->device.bus_id, sizeof(unit->device.bus_id),
547 "%s.%d", device->device.bus_id, i++);
548
549 init_fw_attribute_group(&unit->device,
550 fw_unit_attributes,
551 &unit->attribute_group);
552 if (device_register(&unit->device) < 0)
553 goto skip_unit;
554
555 continue;
556
557 skip_unit:
558 kfree(unit);
559 }
560}
561
562static int shutdown_unit(struct device *device, void *data)
563{
564 device_unregister(device);
565
566 return 0;
567}
568
569static DECLARE_RWSEM(idr_rwsem);
570static DEFINE_IDR(fw_device_idr);
571int fw_cdev_major;
572
573struct fw_device *fw_device_from_devt(dev_t devt)
574{
575 struct fw_device *device;
576
577 down_read(&idr_rwsem);
578 device = idr_find(&fw_device_idr, MINOR(devt));
579 up_read(&idr_rwsem);
580
581 return device;
582}
583
584static void fw_device_shutdown(struct work_struct *work)
585{
586 struct fw_device *device =
587 container_of(work, struct fw_device, work.work);
588 int minor = MINOR(device->device.devt);
589
590 down_write(&idr_rwsem);
591 idr_remove(&fw_device_idr, minor);
592 up_write(&idr_rwsem);
593
594 fw_device_cdev_remove(device);
595 device_for_each_child(&device->device, NULL, shutdown_unit);
596 device_unregister(&device->device);
597}
598
599static struct device_type fw_device_type = {
600 .release = fw_device_release,
601};
602
603/*
604 * These defines control the retry behavior for reading the config
605 * rom. It shouldn't be necessary to tweak these; if the device
606 * doesn't respond to a config rom read within 10 seconds, it's not
607 * going to respond at all. As for the initial delay, a lot of
608 * devices will be able to respond within half a second after bus
609 * reset. On the other hand, it's not really worth being more
610 * aggressive than that, since it scales pretty well; if 10 devices
611 * are plugged in, they're all getting read within one second.
612 */
613
614#define MAX_RETRIES 10
615#define RETRY_DELAY (3 * HZ)
616#define INITIAL_DELAY (HZ / 2)
617
618static void fw_device_init(struct work_struct *work)
619{
620 struct fw_device *device =
621 container_of(work, struct fw_device, work.work);
622 int minor, err;
623
624 /*
625 * All failure paths here set node->data to NULL, so that we
626 * don't try to do device_for_each_child() on a kfree()'d
627 * device.
628 */
629
630 if (read_bus_info_block(device) < 0) {
631 if (device->config_rom_retries < MAX_RETRIES) {
632 device->config_rom_retries++;
633 schedule_delayed_work(&device->work, RETRY_DELAY);
634 } else {
635 fw_notify("giving up on config rom for node id %x\n",
636 device->node_id);
637 if (device->node == device->card->root_node)
638 schedule_delayed_work(&device->card->work, 0);
639 fw_device_release(&device->device);
640 }
641 return;
642 }
643
644 err = -ENOMEM;
645 down_write(&idr_rwsem);
646 if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
647 err = idr_get_new(&fw_device_idr, device, &minor);
648 up_write(&idr_rwsem);
649 if (err < 0)
650 goto error;
651
652 device->device.bus = &fw_bus_type;
653 device->device.type = &fw_device_type;
654 device->device.parent = device->card->device;
655 device->device.devt = MKDEV(fw_cdev_major, minor);
656 snprintf(device->device.bus_id, sizeof(device->device.bus_id),
657 "fw%d", minor);
658
659 init_fw_attribute_group(&device->device,
660 fw_device_attributes,
661 &device->attribute_group);
662 if (device_add(&device->device)) {
663 fw_error("Failed to add device.\n");
664 goto error_with_cdev;
665 }
666
667 create_units(device);
668
669 /*
670 * Transition the device to running state. If it got pulled
671 * out from under us while we did the intialization work, we
672 * have to shut down the device again here. Normally, though,
673 * fw_node_event will be responsible for shutting it down when
674 * necessary. We have to use the atomic cmpxchg here to avoid
675 * racing with the FW_NODE_DESTROYED case in
676 * fw_node_event().
677 */
678 if (atomic_cmpxchg(&device->state,
679 FW_DEVICE_INITIALIZING,
680 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
681 fw_device_shutdown(&device->work.work);
682 else
683 fw_notify("created new fw device %s (%d config rom retries)\n",
684 device->device.bus_id, device->config_rom_retries);
685
686 /*
687 * Reschedule the IRM work if we just finished reading the
688 * root node config rom. If this races with a bus reset we
689 * just end up running the IRM work a couple of extra times -
690 * pretty harmless.
691 */
692 if (device->node == device->card->root_node)
693 schedule_delayed_work(&device->card->work, 0);
694
695 return;
696
697 error_with_cdev:
698 down_write(&idr_rwsem);
699 idr_remove(&fw_device_idr, minor);
700 up_write(&idr_rwsem);
701 error:
702 put_device(&device->device);
703}
704
705static int update_unit(struct device *dev, void *data)
706{
707 struct fw_unit *unit = fw_unit(dev);
708 struct fw_driver *driver = (struct fw_driver *)dev->driver;
709
710 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
711 down(&dev->sem);
712 driver->update(unit);
713 up(&dev->sem);
714 }
715
716 return 0;
717}
718
719static void fw_device_update(struct work_struct *work)
720{
721 struct fw_device *device =
722 container_of(work, struct fw_device, work.work);
723
724 fw_device_cdev_update(device);
725 device_for_each_child(&device->device, NULL, update_unit);
726}
727
728void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
729{
730 struct fw_device *device;
731
732 switch (event) {
733 case FW_NODE_CREATED:
734 case FW_NODE_LINK_ON:
735 if (!node->link_on)
736 break;
737
738 device = kzalloc(sizeof(*device), GFP_ATOMIC);
739 if (device == NULL)
740 break;
741
742 /*
743 * Do minimal intialization of the device here, the
744 * rest will happen in fw_device_init(). We need the
745 * card and node so we can read the config rom and we
746 * need to do device_initialize() now so
747 * device_for_each_child() in FW_NODE_UPDATED is
748 * doesn't freak out.
749 */
750 device_initialize(&device->device);
751 atomic_set(&device->state, FW_DEVICE_INITIALIZING);
752 device->card = fw_card_get(card);
753 device->node = fw_node_get(node);
754 device->node_id = node->node_id;
755 device->generation = card->generation;
756 INIT_LIST_HEAD(&device->client_list);
757
758 /*
759 * Set the node data to point back to this device so
760 * FW_NODE_UPDATED callbacks can update the node_id
761 * and generation for the device.
762 */
763 node->data = device;
764
765 /*
766 * Many devices are slow to respond after bus resets,
767 * especially if they are bus powered and go through
768 * power-up after getting plugged in. We schedule the
769 * first config rom scan half a second after bus reset.
770 */
771 INIT_DELAYED_WORK(&device->work, fw_device_init);
772 schedule_delayed_work(&device->work, INITIAL_DELAY);
773 break;
774
775 case FW_NODE_UPDATED:
776 if (!node->link_on || node->data == NULL)
777 break;
778
779 device = node->data;
780 device->node_id = node->node_id;
781 device->generation = card->generation;
782 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
783 PREPARE_DELAYED_WORK(&device->work, fw_device_update);
784 schedule_delayed_work(&device->work, 0);
785 }
786 break;
787
788 case FW_NODE_DESTROYED:
789 case FW_NODE_LINK_OFF:
790 if (!node->data)
791 break;
792
793 /*
794 * Destroy the device associated with the node. There
795 * are two cases here: either the device is fully
796 * initialized (FW_DEVICE_RUNNING) or we're in the
797 * process of reading its config rom
798 * (FW_DEVICE_INITIALIZING). If it is fully
799 * initialized we can reuse device->work to schedule a
800 * full fw_device_shutdown(). If not, there's work
801 * scheduled to read it's config rom, and we just put
802 * the device in shutdown state to have that code fail
803 * to create the device.
804 */
805 device = node->data;
806 if (atomic_xchg(&device->state,
807 FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
808 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
809 schedule_delayed_work(&device->work, 0);
810 }
811 break;
812 }
813}
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
new file mode 100644
index 000000000000..0ba9d64ccf4c
--- /dev/null
+++ b/drivers/firewire/fw-device.h
@@ -0,0 +1,146 @@
1/*
2 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_device_h
20#define __fw_device_h
21
22#include <linux/fs.h>
23#include <linux/cdev.h>
24#include <asm/atomic.h>
25
26enum fw_device_state {
27 FW_DEVICE_INITIALIZING,
28 FW_DEVICE_RUNNING,
29 FW_DEVICE_SHUTDOWN,
30};
31
32struct fw_attribute_group {
33 struct attribute_group *groups[2];
34 struct attribute_group group;
35 struct attribute *attrs[11];
36};
37
38struct fw_device {
39 atomic_t state;
40 struct fw_node *node;
41 int node_id;
42 int generation;
43 struct fw_card *card;
44 struct device device;
45 struct list_head link;
46 struct list_head client_list;
47 u32 *config_rom;
48 size_t config_rom_length;
49 int config_rom_retries;
50 struct delayed_work work;
51 struct fw_attribute_group attribute_group;
52};
53
54static inline struct fw_device *
55fw_device(struct device *dev)
56{
57 return container_of(dev, struct fw_device, device);
58}
59
60static inline int
61fw_device_is_shutdown(struct fw_device *device)
62{
63 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
64}
65
66struct fw_device *fw_device_get(struct fw_device *device);
67void fw_device_put(struct fw_device *device);
68int fw_device_enable_phys_dma(struct fw_device *device);
69
70void fw_device_cdev_update(struct fw_device *device);
71void fw_device_cdev_remove(struct fw_device *device);
72
73struct fw_device *fw_device_from_devt(dev_t devt);
74extern int fw_cdev_major;
75
76struct fw_unit {
77 struct device device;
78 u32 *directory;
79 struct fw_attribute_group attribute_group;
80};
81
82static inline struct fw_unit *
83fw_unit(struct device *dev)
84{
85 return container_of(dev, struct fw_unit, device);
86}
87
88#define CSR_OFFSET 0x40
89#define CSR_LEAF 0x80
90#define CSR_DIRECTORY 0xc0
91
92#define CSR_DESCRIPTOR 0x01
93#define CSR_VENDOR 0x03
94#define CSR_HARDWARE_VERSION 0x04
95#define CSR_NODE_CAPABILITIES 0x0c
96#define CSR_UNIT 0x11
97#define CSR_SPECIFIER_ID 0x12
98#define CSR_VERSION 0x13
99#define CSR_DEPENDENT_INFO 0x14
100#define CSR_MODEL 0x17
101#define CSR_INSTANCE 0x18
102
103#define SBP2_COMMAND_SET_SPECIFIER 0x38
104#define SBP2_COMMAND_SET 0x39
105#define SBP2_COMMAND_SET_REVISION 0x3b
106#define SBP2_FIRMWARE_REVISION 0x3c
107
108struct fw_csr_iterator {
109 u32 *p;
110 u32 *end;
111};
112
113void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
114int fw_csr_iterator_next(struct fw_csr_iterator *ci,
115 int *key, int *value);
116
117#define FW_MATCH_VENDOR 0x0001
118#define FW_MATCH_MODEL 0x0002
119#define FW_MATCH_SPECIFIER_ID 0x0004
120#define FW_MATCH_VERSION 0x0008
121
122struct fw_device_id {
123 u32 match_flags;
124 u32 vendor;
125 u32 model;
126 u32 specifier_id;
127 u32 version;
128 void *driver_data;
129};
130
131struct fw_driver {
132 struct device_driver driver;
133 /* Called when the parent device sits through a bus reset. */
134 void (*update) (struct fw_unit *unit);
135 const struct fw_device_id *id_table;
136};
137
138static inline struct fw_driver *
139fw_driver(struct device_driver *drv)
140{
141 return container_of(drv, struct fw_driver, driver);
142}
143
144extern const struct file_operations fw_device_ops;
145
146#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
new file mode 100644
index 000000000000..2b640e9be6de
--- /dev/null
+++ b/drivers/firewire/fw-iso.c
@@ -0,0 +1,163 @@
1/*
2 * Isochronous IO functionality
3 *
4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/dma-mapping.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
26
27#include "fw-transaction.h"
28#include "fw-topology.h"
29#include "fw-device.h"
30
31int
32fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
33 int page_count, enum dma_data_direction direction)
34{
35 int i, j, retval = -ENOMEM;
36 dma_addr_t address;
37
38 buffer->page_count = page_count;
39 buffer->direction = direction;
40
41 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
42 GFP_KERNEL);
43 if (buffer->pages == NULL)
44 goto out;
45
46 for (i = 0; i < buffer->page_count; i++) {
47 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
48 if (buffer->pages[i] == NULL)
49 goto out_pages;
50
51 address = dma_map_page(card->device, buffer->pages[i],
52 0, PAGE_SIZE, direction);
53 if (dma_mapping_error(address)) {
54 __free_page(buffer->pages[i]);
55 goto out_pages;
56 }
57 set_page_private(buffer->pages[i], address);
58 }
59
60 return 0;
61
62 out_pages:
63 for (j = 0; j < i; j++) {
64 address = page_private(buffer->pages[j]);
65 dma_unmap_page(card->device, address,
66 PAGE_SIZE, DMA_TO_DEVICE);
67 __free_page(buffer->pages[j]);
68 }
69 kfree(buffer->pages);
70 out:
71 buffer->pages = NULL;
72 return retval;
73}
74
75int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
76{
77 unsigned long uaddr;
78 int i, retval;
79
80 uaddr = vma->vm_start;
81 for (i = 0; i < buffer->page_count; i++) {
82 retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
83 if (retval)
84 return retval;
85 uaddr += PAGE_SIZE;
86 }
87
88 return 0;
89}
90
91void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
92 struct fw_card *card)
93{
94 int i;
95 dma_addr_t address;
96
97 for (i = 0; i < buffer->page_count; i++) {
98 address = page_private(buffer->pages[i]);
99 dma_unmap_page(card->device, address,
100 PAGE_SIZE, DMA_TO_DEVICE);
101 __free_page(buffer->pages[i]);
102 }
103
104 kfree(buffer->pages);
105 buffer->pages = NULL;
106}
107
108struct fw_iso_context *
109fw_iso_context_create(struct fw_card *card, int type,
110 int channel, int speed, size_t header_size,
111 fw_iso_callback_t callback, void *callback_data)
112{
113 struct fw_iso_context *ctx;
114
115 ctx = card->driver->allocate_iso_context(card, type, header_size);
116 if (IS_ERR(ctx))
117 return ctx;
118
119 ctx->card = card;
120 ctx->type = type;
121 ctx->channel = channel;
122 ctx->speed = speed;
123 ctx->header_size = header_size;
124 ctx->callback = callback;
125 ctx->callback_data = callback_data;
126
127 return ctx;
128}
129EXPORT_SYMBOL(fw_iso_context_create);
130
131void fw_iso_context_destroy(struct fw_iso_context *ctx)
132{
133 struct fw_card *card = ctx->card;
134
135 card->driver->free_iso_context(ctx);
136}
137EXPORT_SYMBOL(fw_iso_context_destroy);
138
139int
140fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
141{
142 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
143}
144EXPORT_SYMBOL(fw_iso_context_start);
145
146int
147fw_iso_context_queue(struct fw_iso_context *ctx,
148 struct fw_iso_packet *packet,
149 struct fw_iso_buffer *buffer,
150 unsigned long payload)
151{
152 struct fw_card *card = ctx->card;
153
154 return card->driver->queue_iso(ctx, packet, buffer, payload);
155}
156EXPORT_SYMBOL(fw_iso_context_queue);
157
158int
159fw_iso_context_stop(struct fw_iso_context *ctx)
160{
161 return ctx->card->driver->stop_iso(ctx);
162}
163EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
new file mode 100644
index 000000000000..1f5c70461b8b
--- /dev/null
+++ b/drivers/firewire/fw-ohci.c
@@ -0,0 +1,1943 @@
1/*
2 * Driver for OHCI 1394 controllers
3 *
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
28#include <linux/dma-mapping.h>
29
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-ohci.h"
35
36#define DESCRIPTOR_OUTPUT_MORE 0
37#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
38#define DESCRIPTOR_INPUT_MORE (2 << 12)
39#define DESCRIPTOR_INPUT_LAST (3 << 12)
40#define DESCRIPTOR_STATUS (1 << 11)
41#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
42#define DESCRIPTOR_PING (1 << 7)
43#define DESCRIPTOR_YY (1 << 6)
44#define DESCRIPTOR_NO_IRQ (0 << 4)
45#define DESCRIPTOR_IRQ_ERROR (1 << 4)
46#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
47#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
48#define DESCRIPTOR_WAIT (3 << 0)
49
50struct descriptor {
51 __le16 req_count;
52 __le16 control;
53 __le32 data_address;
54 __le32 branch_address;
55 __le16 res_count;
56 __le16 transfer_status;
57} __attribute__((aligned(16)));
58
59struct db_descriptor {
60 __le16 first_size;
61 __le16 control;
62 __le16 second_req_count;
63 __le16 first_req_count;
64 __le32 branch_address;
65 __le16 second_res_count;
66 __le16 first_res_count;
67 __le32 reserved0;
68 __le32 first_buffer;
69 __le32 second_buffer;
70 __le32 reserved1;
71} __attribute__((aligned(16)));
72
73#define CONTROL_SET(regs) (regs)
74#define CONTROL_CLEAR(regs) ((regs) + 4)
75#define COMMAND_PTR(regs) ((regs) + 12)
76#define CONTEXT_MATCH(regs) ((regs) + 16)
77
78struct ar_buffer {
79 struct descriptor descriptor;
80 struct ar_buffer *next;
81 __le32 data[0];
82};
83
84struct ar_context {
85 struct fw_ohci *ohci;
86 struct ar_buffer *current_buffer;
87 struct ar_buffer *last_buffer;
88 void *pointer;
89 u32 regs;
90 struct tasklet_struct tasklet;
91};
92
93struct context;
94
95typedef int (*descriptor_callback_t)(struct context *ctx,
96 struct descriptor *d,
97 struct descriptor *last);
98struct context {
99 struct fw_ohci *ohci;
100 u32 regs;
101
102 struct descriptor *buffer;
103 dma_addr_t buffer_bus;
104 size_t buffer_size;
105 struct descriptor *head_descriptor;
106 struct descriptor *tail_descriptor;
107 struct descriptor *tail_descriptor_last;
108 struct descriptor *prev_descriptor;
109
110 descriptor_callback_t callback;
111
112 struct tasklet_struct tasklet;
113};
114
115#define IT_HEADER_SY(v) ((v) << 0)
116#define IT_HEADER_TCODE(v) ((v) << 4)
117#define IT_HEADER_CHANNEL(v) ((v) << 8)
118#define IT_HEADER_TAG(v) ((v) << 14)
119#define IT_HEADER_SPEED(v) ((v) << 16)
120#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
121
122struct iso_context {
123 struct fw_iso_context base;
124 struct context context;
125 void *header;
126 size_t header_length;
127};
128
129#define CONFIG_ROM_SIZE 1024
130
131struct fw_ohci {
132 struct fw_card card;
133
134 u32 version;
135 __iomem char *registers;
136 dma_addr_t self_id_bus;
137 __le32 *self_id_cpu;
138 struct tasklet_struct bus_reset_tasklet;
139 int node_id;
140 int generation;
141 int request_generation;
142 u32 bus_seconds;
143
144 /*
145 * Spinlock for accessing fw_ohci data. Never call out of
146 * this driver with this lock held.
147 */
148 spinlock_t lock;
149 u32 self_id_buffer[512];
150
151 /* Config rom buffers */
152 __be32 *config_rom;
153 dma_addr_t config_rom_bus;
154 __be32 *next_config_rom;
155 dma_addr_t next_config_rom_bus;
156 u32 next_header;
157
158 struct ar_context ar_request_ctx;
159 struct ar_context ar_response_ctx;
160 struct context at_request_ctx;
161 struct context at_response_ctx;
162
163 u32 it_context_mask;
164 struct iso_context *it_context_list;
165 u32 ir_context_mask;
166 struct iso_context *ir_context_list;
167};
168
169static inline struct fw_ohci *fw_ohci(struct fw_card *card)
170{
171 return container_of(card, struct fw_ohci, card);
172}
173
174#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
175#define IR_CONTEXT_BUFFER_FILL 0x80000000
176#define IR_CONTEXT_ISOCH_HEADER 0x40000000
177#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
178#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
179#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
180
181#define CONTEXT_RUN 0x8000
182#define CONTEXT_WAKE 0x1000
183#define CONTEXT_DEAD 0x0800
184#define CONTEXT_ACTIVE 0x0400
185
186#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
187#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
188#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
189
190#define FW_OHCI_MAJOR 240
191#define OHCI1394_REGISTER_SIZE 0x800
192#define OHCI_LOOP_COUNT 500
193#define OHCI1394_PCI_HCI_Control 0x40
194#define SELF_ID_BUF_SIZE 0x800
195#define OHCI_TCODE_PHY_PACKET 0x0e
196#define OHCI_VERSION_1_1 0x010010
197#define ISO_BUFFER_SIZE (64 * 1024)
198#define AT_BUFFER_SIZE 4096
199
200static char ohci_driver_name[] = KBUILD_MODNAME;
201
202static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
203{
204 writel(data, ohci->registers + offset);
205}
206
207static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
208{
209 return readl(ohci->registers + offset);
210}
211
212static inline void flush_writes(const struct fw_ohci *ohci)
213{
214 /* Do a dummy read to flush writes. */
215 reg_read(ohci, OHCI1394_Version);
216}
217
218static int
219ohci_update_phy_reg(struct fw_card *card, int addr,
220 int clear_bits, int set_bits)
221{
222 struct fw_ohci *ohci = fw_ohci(card);
223 u32 val, old;
224
225 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
226 msleep(2);
227 val = reg_read(ohci, OHCI1394_PhyControl);
228 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
229 fw_error("failed to set phy reg bits.\n");
230 return -EBUSY;
231 }
232
233 old = OHCI1394_PhyControl_ReadData(val);
234 old = (old & ~clear_bits) | set_bits;
235 reg_write(ohci, OHCI1394_PhyControl,
236 OHCI1394_PhyControl_Write(addr, old));
237
238 return 0;
239}
240
241static int ar_context_add_page(struct ar_context *ctx)
242{
243 struct device *dev = ctx->ohci->card.device;
244 struct ar_buffer *ab;
245 dma_addr_t ab_bus;
246 size_t offset;
247
248 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
249 if (ab == NULL)
250 return -ENOMEM;
251
252 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
253 if (dma_mapping_error(ab_bus)) {
254 free_page((unsigned long) ab);
255 return -ENOMEM;
256 }
257
258 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
259 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
260 DESCRIPTOR_STATUS |
261 DESCRIPTOR_BRANCH_ALWAYS);
262 offset = offsetof(struct ar_buffer, data);
263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
265 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
266 ab->descriptor.branch_address = 0;
267
268 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
269
270 ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
271 ctx->last_buffer->next = ab;
272 ctx->last_buffer = ab;
273
274 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
275 flush_writes(ctx->ohci);
276
277 return 0;
278}
279
280static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
281{
282 struct fw_ohci *ohci = ctx->ohci;
283 struct fw_packet p;
284 u32 status, length, tcode;
285
286 p.header[0] = le32_to_cpu(buffer[0]);
287 p.header[1] = le32_to_cpu(buffer[1]);
288 p.header[2] = le32_to_cpu(buffer[2]);
289
290 tcode = (p.header[0] >> 4) & 0x0f;
291 switch (tcode) {
292 case TCODE_WRITE_QUADLET_REQUEST:
293 case TCODE_READ_QUADLET_RESPONSE:
294 p.header[3] = (__force __u32) buffer[3];
295 p.header_length = 16;
296 p.payload_length = 0;
297 break;
298
299 case TCODE_READ_BLOCK_REQUEST :
300 p.header[3] = le32_to_cpu(buffer[3]);
301 p.header_length = 16;
302 p.payload_length = 0;
303 break;
304
305 case TCODE_WRITE_BLOCK_REQUEST:
306 case TCODE_READ_BLOCK_RESPONSE:
307 case TCODE_LOCK_REQUEST:
308 case TCODE_LOCK_RESPONSE:
309 p.header[3] = le32_to_cpu(buffer[3]);
310 p.header_length = 16;
311 p.payload_length = p.header[3] >> 16;
312 break;
313
314 case TCODE_WRITE_RESPONSE:
315 case TCODE_READ_QUADLET_REQUEST:
316 case OHCI_TCODE_PHY_PACKET:
317 p.header_length = 12;
318 p.payload_length = 0;
319 break;
320 }
321
322 p.payload = (void *) buffer + p.header_length;
323
324 /* FIXME: What to do about evt_* errors? */
325 length = (p.header_length + p.payload_length + 3) / 4;
326 status = le32_to_cpu(buffer[length]);
327
328 p.ack = ((status >> 16) & 0x1f) - 16;
329 p.speed = (status >> 21) & 0x7;
330 p.timestamp = status & 0xffff;
331 p.generation = ohci->request_generation;
332
333 /*
334 * The OHCI bus reset handler synthesizes a phy packet with
335 * the new generation number when a bus reset happens (see
336 * section 8.4.2.3). This helps us determine when a request
337 * was received and make sure we send the response in the same
338 * generation. We only need this for requests; for responses
339 * we use the unique tlabel for finding the matching
340 * request.
341 */
342
343 if (p.ack + 16 == 0x09)
344 ohci->request_generation = (buffer[2] >> 16) & 0xff;
345 else if (ctx == &ohci->ar_request_ctx)
346 fw_core_handle_request(&ohci->card, &p);
347 else
348 fw_core_handle_response(&ohci->card, &p);
349
350 return buffer + length + 1;
351}
352
353static void ar_context_tasklet(unsigned long data)
354{
355 struct ar_context *ctx = (struct ar_context *)data;
356 struct fw_ohci *ohci = ctx->ohci;
357 struct ar_buffer *ab;
358 struct descriptor *d;
359 void *buffer, *end;
360
361 ab = ctx->current_buffer;
362 d = &ab->descriptor;
363
364 if (d->res_count == 0) {
365 size_t size, rest, offset;
366
367 /*
368 * This descriptor is finished and we may have a
369 * packet split across this and the next buffer. We
370 * reuse the page for reassembling the split packet.
371 */
372
373 offset = offsetof(struct ar_buffer, data);
374 dma_unmap_single(ohci->card.device,
375 ab->descriptor.data_address - offset,
376 PAGE_SIZE, DMA_BIDIRECTIONAL);
377
378 buffer = ab;
379 ab = ab->next;
380 d = &ab->descriptor;
381 size = buffer + PAGE_SIZE - ctx->pointer;
382 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
383 memmove(buffer, ctx->pointer, size);
384 memcpy(buffer + size, ab->data, rest);
385 ctx->current_buffer = ab;
386 ctx->pointer = (void *) ab->data + rest;
387 end = buffer + size + rest;
388
389 while (buffer < end)
390 buffer = handle_ar_packet(ctx, buffer);
391
392 free_page((unsigned long)buffer);
393 ar_context_add_page(ctx);
394 } else {
395 buffer = ctx->pointer;
396 ctx->pointer = end =
397 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
398
399 while (buffer < end)
400 buffer = handle_ar_packet(ctx, buffer);
401 }
402}
403
404static int
405ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
406{
407 struct ar_buffer ab;
408
409 ctx->regs = regs;
410 ctx->ohci = ohci;
411 ctx->last_buffer = &ab;
412 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
413
414 ar_context_add_page(ctx);
415 ar_context_add_page(ctx);
416 ctx->current_buffer = ab.next;
417 ctx->pointer = ctx->current_buffer->data;
418
419 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
420 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
421 flush_writes(ctx->ohci);
422
423 return 0;
424}
425
426static void context_tasklet(unsigned long data)
427{
428 struct context *ctx = (struct context *) data;
429 struct fw_ohci *ohci = ctx->ohci;
430 struct descriptor *d, *last;
431 u32 address;
432 int z;
433
434 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
435 ctx->buffer_size, DMA_TO_DEVICE);
436
437 d = ctx->tail_descriptor;
438 last = ctx->tail_descriptor_last;
439
440 while (last->branch_address != 0) {
441 address = le32_to_cpu(last->branch_address);
442 z = address & 0xf;
443 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
444 last = (z == 2) ? d : d + z - 1;
445
446 if (!ctx->callback(ctx, d, last))
447 break;
448
449 ctx->tail_descriptor = d;
450 ctx->tail_descriptor_last = last;
451 }
452}
453
454static int
455context_init(struct context *ctx, struct fw_ohci *ohci,
456 size_t buffer_size, u32 regs,
457 descriptor_callback_t callback)
458{
459 ctx->ohci = ohci;
460 ctx->regs = regs;
461 ctx->buffer_size = buffer_size;
462 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
463 if (ctx->buffer == NULL)
464 return -ENOMEM;
465
466 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
467 ctx->callback = callback;
468
469 ctx->buffer_bus =
470 dma_map_single(ohci->card.device, ctx->buffer,
471 buffer_size, DMA_TO_DEVICE);
472 if (dma_mapping_error(ctx->buffer_bus)) {
473 kfree(ctx->buffer);
474 return -ENOMEM;
475 }
476
477 ctx->head_descriptor = ctx->buffer;
478 ctx->prev_descriptor = ctx->buffer;
479 ctx->tail_descriptor = ctx->buffer;
480 ctx->tail_descriptor_last = ctx->buffer;
481
482 /*
483 * We put a dummy descriptor in the buffer that has a NULL
484 * branch address and looks like it's been sent. That way we
485 * have a descriptor to append DMA programs to. Also, the
486 * ring buffer invariant is that it always has at least one
487 * element so that head == tail means buffer full.
488 */
489
490 memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
491 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
493 ctx->head_descriptor++;
494
495 return 0;
496}
497
498static void
499context_release(struct context *ctx)
500{
501 struct fw_card *card = &ctx->ohci->card;
502
503 dma_unmap_single(card->device, ctx->buffer_bus,
504 ctx->buffer_size, DMA_TO_DEVICE);
505 kfree(ctx->buffer);
506}
507
508static struct descriptor *
509context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
510{
511 struct descriptor *d, *tail, *end;
512
513 d = ctx->head_descriptor;
514 tail = ctx->tail_descriptor;
515 end = ctx->buffer + ctx->buffer_size / sizeof(*d);
516
517 if (d + z <= tail) {
518 goto has_space;
519 } else if (d > tail && d + z <= end) {
520 goto has_space;
521 } else if (d > tail && ctx->buffer + z <= tail) {
522 d = ctx->buffer;
523 goto has_space;
524 }
525
526 return NULL;
527
528 has_space:
529 memset(d, 0, z * sizeof(*d));
530 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
531
532 return d;
533}
534
535static void context_run(struct context *ctx, u32 extra)
536{
537 struct fw_ohci *ohci = ctx->ohci;
538
539 reg_write(ohci, COMMAND_PTR(ctx->regs),
540 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
541 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
542 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
543 flush_writes(ohci);
544}
545
546static void context_append(struct context *ctx,
547 struct descriptor *d, int z, int extra)
548{
549 dma_addr_t d_bus;
550
551 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
552
553 ctx->head_descriptor = d + z + extra;
554 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
555 ctx->prev_descriptor = z == 2 ? d : d + z - 1;
556
557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
558 ctx->buffer_size, DMA_TO_DEVICE);
559
560 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
561 flush_writes(ctx->ohci);
562}
563
564static void context_stop(struct context *ctx)
565{
566 u32 reg;
567 int i;
568
569 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
570 flush_writes(ctx->ohci);
571
572 for (i = 0; i < 10; i++) {
573 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
574 if ((reg & CONTEXT_ACTIVE) == 0)
575 break;
576
577 fw_notify("context_stop: still active (0x%08x)\n", reg);
578 msleep(1);
579 }
580}
581
582struct driver_data {
583 struct fw_packet *packet;
584};
585
586/*
587 * This function apppends a packet to the DMA queue for transmission.
588 * Must always be called with the ochi->lock held to ensure proper
589 * generation handling and locking around packet queue manipulation.
590 */
591static int
592at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
593{
594 struct fw_ohci *ohci = ctx->ohci;
595 dma_addr_t d_bus, payload_bus;
596 struct driver_data *driver_data;
597 struct descriptor *d, *last;
598 __le32 *header;
599 int z, tcode;
600 u32 reg;
601
602 d = context_get_descriptors(ctx, 4, &d_bus);
603 if (d == NULL) {
604 packet->ack = RCODE_SEND_ERROR;
605 return -1;
606 }
607
608 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
609 d[0].res_count = cpu_to_le16(packet->timestamp);
610
611 /*
612 * The DMA format for asyncronous link packets is different
613 * from the IEEE1394 layout, so shift the fields around
614 * accordingly. If header_length is 8, it's a PHY packet, to
615 * which we need to prepend an extra quadlet.
616 */
617
618 header = (__le32 *) &d[1];
619 if (packet->header_length > 8) {
620 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
621 (packet->speed << 16));
622 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
623 (packet->header[0] & 0xffff0000));
624 header[2] = cpu_to_le32(packet->header[2]);
625
626 tcode = (packet->header[0] >> 4) & 0x0f;
627 if (TCODE_IS_BLOCK_PACKET(tcode))
628 header[3] = cpu_to_le32(packet->header[3]);
629 else
630 header[3] = (__force __le32) packet->header[3];
631
632 d[0].req_count = cpu_to_le16(packet->header_length);
633 } else {
634 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
635 (packet->speed << 16));
636 header[1] = cpu_to_le32(packet->header[0]);
637 header[2] = cpu_to_le32(packet->header[1]);
638 d[0].req_count = cpu_to_le16(12);
639 }
640
641 driver_data = (struct driver_data *) &d[3];
642 driver_data->packet = packet;
643 packet->driver_data = driver_data;
644
645 if (packet->payload_length > 0) {
646 payload_bus =
647 dma_map_single(ohci->card.device, packet->payload,
648 packet->payload_length, DMA_TO_DEVICE);
649 if (dma_mapping_error(payload_bus)) {
650 packet->ack = RCODE_SEND_ERROR;
651 return -1;
652 }
653
654 d[2].req_count = cpu_to_le16(packet->payload_length);
655 d[2].data_address = cpu_to_le32(payload_bus);
656 last = &d[2];
657 z = 3;
658 } else {
659 last = &d[0];
660 z = 2;
661 }
662
663 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
664 DESCRIPTOR_IRQ_ALWAYS |
665 DESCRIPTOR_BRANCH_ALWAYS);
666
667 /* FIXME: Document how the locking works. */
668 if (ohci->generation != packet->generation) {
669 packet->ack = RCODE_GENERATION;
670 return -1;
671 }
672
673 context_append(ctx, d, z, 4 - z);
674
675 /* If the context isn't already running, start it up. */
676 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
677 if ((reg & CONTEXT_RUN) == 0)
678 context_run(ctx, 0);
679
680 return 0;
681}
682
683static int handle_at_packet(struct context *context,
684 struct descriptor *d,
685 struct descriptor *last)
686{
687 struct driver_data *driver_data;
688 struct fw_packet *packet;
689 struct fw_ohci *ohci = context->ohci;
690 dma_addr_t payload_bus;
691 int evt;
692
693 if (last->transfer_status == 0)
694 /* This descriptor isn't done yet, stop iteration. */
695 return 0;
696
697 driver_data = (struct driver_data *) &d[3];
698 packet = driver_data->packet;
699 if (packet == NULL)
700 /* This packet was cancelled, just continue. */
701 return 1;
702
703 payload_bus = le32_to_cpu(last->data_address);
704 if (payload_bus != 0)
705 dma_unmap_single(ohci->card.device, payload_bus,
706 packet->payload_length, DMA_TO_DEVICE);
707
708 evt = le16_to_cpu(last->transfer_status) & 0x1f;
709 packet->timestamp = le16_to_cpu(last->res_count);
710
711 switch (evt) {
712 case OHCI1394_evt_timeout:
713 /* Async response transmit timed out. */
714 packet->ack = RCODE_CANCELLED;
715 break;
716
717 case OHCI1394_evt_flushed:
718 /*
719 * The packet was flushed should give same error as
720 * when we try to use a stale generation count.
721 */
722 packet->ack = RCODE_GENERATION;
723 break;
724
725 case OHCI1394_evt_missing_ack:
726 /*
727 * Using a valid (current) generation count, but the
728 * node is not on the bus or not sending acks.
729 */
730 packet->ack = RCODE_NO_ACK;
731 break;
732
733 case ACK_COMPLETE + 0x10:
734 case ACK_PENDING + 0x10:
735 case ACK_BUSY_X + 0x10:
736 case ACK_BUSY_A + 0x10:
737 case ACK_BUSY_B + 0x10:
738 case ACK_DATA_ERROR + 0x10:
739 case ACK_TYPE_ERROR + 0x10:
740 packet->ack = evt - 0x10;
741 break;
742
743 default:
744 packet->ack = RCODE_SEND_ERROR;
745 break;
746 }
747
748 packet->callback(packet, &ohci->card, packet->ack);
749
750 return 1;
751}
752
753#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
754#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
755#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
756#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
757#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
758
759static void
760handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
761{
762 struct fw_packet response;
763 int tcode, length, i;
764
765 tcode = HEADER_GET_TCODE(packet->header[0]);
766 if (TCODE_IS_BLOCK_PACKET(tcode))
767 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
768 else
769 length = 4;
770
771 i = csr - CSR_CONFIG_ROM;
772 if (i + length > CONFIG_ROM_SIZE) {
773 fw_fill_response(&response, packet->header,
774 RCODE_ADDRESS_ERROR, NULL, 0);
775 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
776 fw_fill_response(&response, packet->header,
777 RCODE_TYPE_ERROR, NULL, 0);
778 } else {
779 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
780 (void *) ohci->config_rom + i, length);
781 }
782
783 fw_core_handle_response(&ohci->card, &response);
784}
785
786static void
787handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
788{
789 struct fw_packet response;
790 int tcode, length, ext_tcode, sel;
791 __be32 *payload, lock_old;
792 u32 lock_arg, lock_data;
793
794 tcode = HEADER_GET_TCODE(packet->header[0]);
795 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
796 payload = packet->payload;
797 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
798
799 if (tcode == TCODE_LOCK_REQUEST &&
800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
801 lock_arg = be32_to_cpu(payload[0]);
802 lock_data = be32_to_cpu(payload[1]);
803 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
804 lock_arg = 0;
805 lock_data = 0;
806 } else {
807 fw_fill_response(&response, packet->header,
808 RCODE_TYPE_ERROR, NULL, 0);
809 goto out;
810 }
811
812 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
813 reg_write(ohci, OHCI1394_CSRData, lock_data);
814 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
815 reg_write(ohci, OHCI1394_CSRControl, sel);
816
817 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
818 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
819 else
820 fw_notify("swap not done yet\n");
821
822 fw_fill_response(&response, packet->header,
823 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
824 out:
825 fw_core_handle_response(&ohci->card, &response);
826}
827
828static void
829handle_local_request(struct context *ctx, struct fw_packet *packet)
830{
831 u64 offset;
832 u32 csr;
833
834 if (ctx == &ctx->ohci->at_request_ctx) {
835 packet->ack = ACK_PENDING;
836 packet->callback(packet, &ctx->ohci->card, packet->ack);
837 }
838
839 offset =
840 ((unsigned long long)
841 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
842 packet->header[2];
843 csr = offset - CSR_REGISTER_BASE;
844
845 /* Handle config rom reads. */
846 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
847 handle_local_rom(ctx->ohci, packet, csr);
848 else switch (csr) {
849 case CSR_BUS_MANAGER_ID:
850 case CSR_BANDWIDTH_AVAILABLE:
851 case CSR_CHANNELS_AVAILABLE_HI:
852 case CSR_CHANNELS_AVAILABLE_LO:
853 handle_local_lock(ctx->ohci, packet, csr);
854 break;
855 default:
856 if (ctx == &ctx->ohci->at_request_ctx)
857 fw_core_handle_request(&ctx->ohci->card, packet);
858 else
859 fw_core_handle_response(&ctx->ohci->card, packet);
860 break;
861 }
862
863 if (ctx == &ctx->ohci->at_response_ctx) {
864 packet->ack = ACK_COMPLETE;
865 packet->callback(packet, &ctx->ohci->card, packet->ack);
866 }
867}
868
869static void
870at_context_transmit(struct context *ctx, struct fw_packet *packet)
871{
872 unsigned long flags;
873 int retval;
874
875 spin_lock_irqsave(&ctx->ohci->lock, flags);
876
877 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
878 ctx->ohci->generation == packet->generation) {
879 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
880 handle_local_request(ctx, packet);
881 return;
882 }
883
884 retval = at_context_queue_packet(ctx, packet);
885 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
886
887 if (retval < 0)
888 packet->callback(packet, &ctx->ohci->card, packet->ack);
889
890}
891
892static void bus_reset_tasklet(unsigned long data)
893{
894 struct fw_ohci *ohci = (struct fw_ohci *)data;
895 int self_id_count, i, j, reg;
896 int generation, new_generation;
897 unsigned long flags;
898
899 reg = reg_read(ohci, OHCI1394_NodeID);
900 if (!(reg & OHCI1394_NodeID_idValid)) {
901 fw_error("node ID not valid, new bus reset in progress\n");
902 return;
903 }
904 ohci->node_id = reg & 0xffff;
905
906 /*
907 * The count in the SelfIDCount register is the number of
908 * bytes in the self ID receive buffer. Since we also receive
909 * the inverted quadlets and a header quadlet, we shift one
910 * bit extra to get the actual number of self IDs.
911 */
912
913 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
914 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
915
916 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
917 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
918 fw_error("inconsistent self IDs\n");
919 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
920 }
921
922 /*
923 * Check the consistency of the self IDs we just read. The
924 * problem we face is that a new bus reset can start while we
925 * read out the self IDs from the DMA buffer. If this happens,
926 * the DMA buffer will be overwritten with new self IDs and we
927 * will read out inconsistent data. The OHCI specification
928 * (section 11.2) recommends a technique similar to
929 * linux/seqlock.h, where we remember the generation of the
930 * self IDs in the buffer before reading them out and compare
931 * it to the current generation after reading them out. If
932 * the two generations match we know we have a consistent set
933 * of self IDs.
934 */
935
936 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
937 if (new_generation != generation) {
938 fw_notify("recursive bus reset detected, "
939 "discarding self ids\n");
940 return;
941 }
942
943 /* FIXME: Document how the locking works. */
944 spin_lock_irqsave(&ohci->lock, flags);
945
946 ohci->generation = generation;
947 context_stop(&ohci->at_request_ctx);
948 context_stop(&ohci->at_response_ctx);
949 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
950
951 /*
952 * This next bit is unrelated to the AT context stuff but we
953 * have to do it under the spinlock also. If a new config rom
954 * was set up before this reset, the old one is now no longer
955 * in use and we can free it. Update the config rom pointers
956 * to point to the current config rom and clear the
957 * next_config_rom pointer so a new udpate can take place.
958 */
959
960 if (ohci->next_config_rom != NULL) {
961 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
962 ohci->config_rom, ohci->config_rom_bus);
963 ohci->config_rom = ohci->next_config_rom;
964 ohci->config_rom_bus = ohci->next_config_rom_bus;
965 ohci->next_config_rom = NULL;
966
967 /*
968 * Restore config_rom image and manually update
969 * config_rom registers. Writing the header quadlet
970 * will indicate that the config rom is ready, so we
971 * do that last.
972 */
973 reg_write(ohci, OHCI1394_BusOptions,
974 be32_to_cpu(ohci->config_rom[2]));
975 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
976 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
977 }
978
979 spin_unlock_irqrestore(&ohci->lock, flags);
980
981 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
982 self_id_count, ohci->self_id_buffer);
983}
984
985static irqreturn_t irq_handler(int irq, void *data)
986{
987 struct fw_ohci *ohci = data;
988 u32 event, iso_event, cycle_time;
989 int i;
990
991 event = reg_read(ohci, OHCI1394_IntEventClear);
992
993 if (!event)
994 return IRQ_NONE;
995
996 reg_write(ohci, OHCI1394_IntEventClear, event);
997
998 if (event & OHCI1394_selfIDComplete)
999 tasklet_schedule(&ohci->bus_reset_tasklet);
1000
1001 if (event & OHCI1394_RQPkt)
1002 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1003
1004 if (event & OHCI1394_RSPkt)
1005 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1006
1007 if (event & OHCI1394_reqTxComplete)
1008 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1009
1010 if (event & OHCI1394_respTxComplete)
1011 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1012
1013 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1014 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1015
1016 while (iso_event) {
1017 i = ffs(iso_event) - 1;
1018 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1019 iso_event &= ~(1 << i);
1020 }
1021
1022 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1023 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1024
1025 while (iso_event) {
1026 i = ffs(iso_event) - 1;
1027 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1028 iso_event &= ~(1 << i);
1029 }
1030
1031 if (event & OHCI1394_cycle64Seconds) {
1032 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1033 if ((cycle_time & 0x80000000) == 0)
1034 ohci->bus_seconds++;
1035 }
1036
1037 return IRQ_HANDLED;
1038}
1039
1040static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1041{
1042 struct fw_ohci *ohci = fw_ohci(card);
1043 struct pci_dev *dev = to_pci_dev(card->device);
1044
1045 /*
1046 * When the link is not yet enabled, the atomic config rom
1047 * update mechanism described below in ohci_set_config_rom()
1048 * is not active. We have to update ConfigRomHeader and
1049 * BusOptions manually, and the write to ConfigROMmap takes
1050 * effect immediately. We tie this to the enabling of the
1051 * link, so we have a valid config rom before enabling - the
1052 * OHCI requires that ConfigROMhdr and BusOptions have valid
1053 * values before enabling.
1054 *
1055 * However, when the ConfigROMmap is written, some controllers
1056 * always read back quadlets 0 and 2 from the config rom to
1057 * the ConfigRomHeader and BusOptions registers on bus reset.
1058 * They shouldn't do that in this initial case where the link
1059 * isn't enabled. This means we have to use the same
1060 * workaround here, setting the bus header to 0 and then write
1061 * the right values in the bus reset tasklet.
1062 */
1063
1064 ohci->next_config_rom =
1065 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1066 &ohci->next_config_rom_bus, GFP_KERNEL);
1067 if (ohci->next_config_rom == NULL)
1068 return -ENOMEM;
1069
1070 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1071 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1072
1073 ohci->next_header = config_rom[0];
1074 ohci->next_config_rom[0] = 0;
1075 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1076 reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
1077 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1078
1079 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1080
1081 if (request_irq(dev->irq, irq_handler,
1082 IRQF_SHARED, ohci_driver_name, ohci)) {
1083 fw_error("Failed to allocate shared interrupt %d.\n",
1084 dev->irq);
1085 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1086 ohci->config_rom, ohci->config_rom_bus);
1087 return -EIO;
1088 }
1089
1090 reg_write(ohci, OHCI1394_HCControlSet,
1091 OHCI1394_HCControl_linkEnable |
1092 OHCI1394_HCControl_BIBimageValid);
1093 flush_writes(ohci);
1094
1095 /*
1096 * We are ready to go, initiate bus reset to finish the
1097 * initialization.
1098 */
1099
1100 fw_core_initiate_bus_reset(&ohci->card, 1);
1101
1102 return 0;
1103}
1104
1105static int
1106ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1107{
1108 struct fw_ohci *ohci;
1109 unsigned long flags;
1110 int retval = 0;
1111 __be32 *next_config_rom;
1112 dma_addr_t next_config_rom_bus;
1113
1114 ohci = fw_ohci(card);
1115
1116 /*
1117 * When the OHCI controller is enabled, the config rom update
1118 * mechanism is a bit tricky, but easy enough to use. See
1119 * section 5.5.6 in the OHCI specification.
1120 *
1121 * The OHCI controller caches the new config rom address in a
1122 * shadow register (ConfigROMmapNext) and needs a bus reset
1123 * for the changes to take place. When the bus reset is
1124 * detected, the controller loads the new values for the
1125 * ConfigRomHeader and BusOptions registers from the specified
1126 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1127 * shadow register. All automatically and atomically.
1128 *
1129 * Now, there's a twist to this story. The automatic load of
1130 * ConfigRomHeader and BusOptions doesn't honor the
1131 * noByteSwapData bit, so with a be32 config rom, the
1132 * controller will load be32 values in to these registers
1133 * during the atomic update, even on litte endian
1134 * architectures. The workaround we use is to put a 0 in the
1135 * header quadlet; 0 is endian agnostic and means that the
1136 * config rom isn't ready yet. In the bus reset tasklet we
1137 * then set up the real values for the two registers.
1138 *
1139 * We use ohci->lock to avoid racing with the code that sets
1140 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1141 */
1142
1143 next_config_rom =
1144 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1145 &next_config_rom_bus, GFP_KERNEL);
1146 if (next_config_rom == NULL)
1147 return -ENOMEM;
1148
1149 spin_lock_irqsave(&ohci->lock, flags);
1150
1151 if (ohci->next_config_rom == NULL) {
1152 ohci->next_config_rom = next_config_rom;
1153 ohci->next_config_rom_bus = next_config_rom_bus;
1154
1155 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1156 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1157 length * 4);
1158
1159 ohci->next_header = config_rom[0];
1160 ohci->next_config_rom[0] = 0;
1161
1162 reg_write(ohci, OHCI1394_ConfigROMmap,
1163 ohci->next_config_rom_bus);
1164 } else {
1165 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1166 next_config_rom, next_config_rom_bus);
1167 retval = -EBUSY;
1168 }
1169
1170 spin_unlock_irqrestore(&ohci->lock, flags);
1171
1172 /*
1173 * Now initiate a bus reset to have the changes take
1174 * effect. We clean up the old config rom memory and DMA
1175 * mappings in the bus reset tasklet, since the OHCI
1176 * controller could need to access it before the bus reset
1177 * takes effect.
1178 */
1179 if (retval == 0)
1180 fw_core_initiate_bus_reset(&ohci->card, 1);
1181
1182 return retval;
1183}
1184
1185static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1186{
1187 struct fw_ohci *ohci = fw_ohci(card);
1188
1189 at_context_transmit(&ohci->at_request_ctx, packet);
1190}
1191
1192static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1193{
1194 struct fw_ohci *ohci = fw_ohci(card);
1195
1196 at_context_transmit(&ohci->at_response_ctx, packet);
1197}
1198
1199static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1200{
1201 struct fw_ohci *ohci = fw_ohci(card);
1202 struct context *ctx = &ohci->at_request_ctx;
1203 struct driver_data *driver_data = packet->driver_data;
1204 int retval = -ENOENT;
1205
1206 tasklet_disable(&ctx->tasklet);
1207
1208 if (packet->ack != 0)
1209 goto out;
1210
1211 driver_data->packet = NULL;
1212 packet->ack = RCODE_CANCELLED;
1213 packet->callback(packet, &ohci->card, packet->ack);
1214 retval = 0;
1215
1216 out:
1217 tasklet_enable(&ctx->tasklet);
1218
1219 return retval;
1220}
1221
1222static int
1223ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1224{
1225 struct fw_ohci *ohci = fw_ohci(card);
1226 unsigned long flags;
1227 int n, retval = 0;
1228
1229 /*
1230 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1231 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1232 */
1233
1234 spin_lock_irqsave(&ohci->lock, flags);
1235
1236 if (ohci->generation != generation) {
1237 retval = -ESTALE;
1238 goto out;
1239 }
1240
1241 /*
1242 * Note, if the node ID contains a non-local bus ID, physical DMA is
1243 * enabled for _all_ nodes on remote buses.
1244 */
1245
1246 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1247 if (n < 32)
1248 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1249 else
1250 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1251
1252 flush_writes(ohci);
1253 out:
1254 spin_unlock_irqrestore(&ohci->lock, flags);
1255 return retval;
1256}
1257
1258static u64
1259ohci_get_bus_time(struct fw_card *card)
1260{
1261 struct fw_ohci *ohci = fw_ohci(card);
1262 u32 cycle_time;
1263 u64 bus_time;
1264
1265 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1266 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1267
1268 return bus_time;
1269}
1270
1271static int handle_ir_dualbuffer_packet(struct context *context,
1272 struct descriptor *d,
1273 struct descriptor *last)
1274{
1275 struct iso_context *ctx =
1276 container_of(context, struct iso_context, context);
1277 struct db_descriptor *db = (struct db_descriptor *) d;
1278 __le32 *ir_header;
1279 size_t header_length;
1280 void *p, *end;
1281 int i;
1282
1283 if (db->first_res_count > 0 && db->second_res_count > 0)
1284 /* This descriptor isn't done yet, stop iteration. */
1285 return 0;
1286
1287 header_length = le16_to_cpu(db->first_req_count) -
1288 le16_to_cpu(db->first_res_count);
1289
1290 i = ctx->header_length;
1291 p = db + 1;
1292 end = p + header_length;
1293 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1294 /*
1295 * The iso header is byteswapped to little endian by
1296 * the controller, but the remaining header quadlets
1297 * are big endian. We want to present all the headers
1298 * as big endian, so we have to swap the first
1299 * quadlet.
1300 */
1301 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1302 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1303 i += ctx->base.header_size;
1304 p += ctx->base.header_size + 4;
1305 }
1306
1307 ctx->header_length = i;
1308
1309 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1310 ir_header = (__le32 *) (db + 1);
1311 ctx->base.callback(&ctx->base,
1312 le32_to_cpu(ir_header[0]) & 0xffff,
1313 ctx->header_length, ctx->header,
1314 ctx->base.callback_data);
1315 ctx->header_length = 0;
1316 }
1317
1318 return 1;
1319}
1320
1321static int handle_it_packet(struct context *context,
1322 struct descriptor *d,
1323 struct descriptor *last)
1324{
1325 struct iso_context *ctx =
1326 container_of(context, struct iso_context, context);
1327
1328 if (last->transfer_status == 0)
1329 /* This descriptor isn't done yet, stop iteration. */
1330 return 0;
1331
1332 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1334 0, NULL, ctx->base.callback_data);
1335
1336 return 1;
1337}
1338
1339static struct fw_iso_context *
1340ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1341{
1342 struct fw_ohci *ohci = fw_ohci(card);
1343 struct iso_context *ctx, *list;
1344 descriptor_callback_t callback;
1345 u32 *mask, regs;
1346 unsigned long flags;
1347 int index, retval = -ENOMEM;
1348
1349 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1350 mask = &ohci->it_context_mask;
1351 list = ohci->it_context_list;
1352 callback = handle_it_packet;
1353 } else {
1354 mask = &ohci->ir_context_mask;
1355 list = ohci->ir_context_list;
1356 callback = handle_ir_dualbuffer_packet;
1357 }
1358
1359 /* FIXME: We need a fallback for pre 1.1 OHCI. */
1360 if (callback == handle_ir_dualbuffer_packet &&
1361 ohci->version < OHCI_VERSION_1_1)
1362 return ERR_PTR(-EINVAL);
1363
1364 spin_lock_irqsave(&ohci->lock, flags);
1365 index = ffs(*mask) - 1;
1366 if (index >= 0)
1367 *mask &= ~(1 << index);
1368 spin_unlock_irqrestore(&ohci->lock, flags);
1369
1370 if (index < 0)
1371 return ERR_PTR(-EBUSY);
1372
1373 if (type == FW_ISO_CONTEXT_TRANSMIT)
1374 regs = OHCI1394_IsoXmitContextBase(index);
1375 else
1376 regs = OHCI1394_IsoRcvContextBase(index);
1377
1378 ctx = &list[index];
1379 memset(ctx, 0, sizeof(*ctx));
1380 ctx->header_length = 0;
1381 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1382 if (ctx->header == NULL)
1383 goto out;
1384
1385 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
1386 regs, callback);
1387 if (retval < 0)
1388 goto out_with_header;
1389
1390 return &ctx->base;
1391
1392 out_with_header:
1393 free_page((unsigned long)ctx->header);
1394 out:
1395 spin_lock_irqsave(&ohci->lock, flags);
1396 *mask |= 1 << index;
1397 spin_unlock_irqrestore(&ohci->lock, flags);
1398
1399 return ERR_PTR(retval);
1400}
1401
1402static int ohci_start_iso(struct fw_iso_context *base,
1403 s32 cycle, u32 sync, u32 tags)
1404{
1405 struct iso_context *ctx = container_of(base, struct iso_context, base);
1406 struct fw_ohci *ohci = ctx->context.ohci;
1407 u32 control, match;
1408 int index;
1409
1410 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1411 index = ctx - ohci->it_context_list;
1412 match = 0;
1413 if (cycle >= 0)
1414 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1415 (cycle & 0x7fff) << 16;
1416
1417 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1418 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1419 context_run(&ctx->context, match);
1420 } else {
1421 index = ctx - ohci->ir_context_list;
1422 control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
1423 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1424 if (cycle >= 0) {
1425 match |= (cycle & 0x07fff) << 12;
1426 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1427 }
1428
1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1431 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1432 context_run(&ctx->context, control);
1433 }
1434
1435 return 0;
1436}
1437
1438static int ohci_stop_iso(struct fw_iso_context *base)
1439{
1440 struct fw_ohci *ohci = fw_ohci(base->card);
1441 struct iso_context *ctx = container_of(base, struct iso_context, base);
1442 int index;
1443
1444 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1445 index = ctx - ohci->it_context_list;
1446 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1447 } else {
1448 index = ctx - ohci->ir_context_list;
1449 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1450 }
1451 flush_writes(ohci);
1452 context_stop(&ctx->context);
1453
1454 return 0;
1455}
1456
1457static void ohci_free_iso_context(struct fw_iso_context *base)
1458{
1459 struct fw_ohci *ohci = fw_ohci(base->card);
1460 struct iso_context *ctx = container_of(base, struct iso_context, base);
1461 unsigned long flags;
1462 int index;
1463
1464 ohci_stop_iso(base);
1465 context_release(&ctx->context);
1466 free_page((unsigned long)ctx->header);
1467
1468 spin_lock_irqsave(&ohci->lock, flags);
1469
1470 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1471 index = ctx - ohci->it_context_list;
1472 ohci->it_context_mask |= 1 << index;
1473 } else {
1474 index = ctx - ohci->ir_context_list;
1475 ohci->ir_context_mask |= 1 << index;
1476 }
1477
1478 spin_unlock_irqrestore(&ohci->lock, flags);
1479}
1480
1481static int
1482ohci_queue_iso_transmit(struct fw_iso_context *base,
1483 struct fw_iso_packet *packet,
1484 struct fw_iso_buffer *buffer,
1485 unsigned long payload)
1486{
1487 struct iso_context *ctx = container_of(base, struct iso_context, base);
1488 struct descriptor *d, *last, *pd;
1489 struct fw_iso_packet *p;
1490 __le32 *header;
1491 dma_addr_t d_bus, page_bus;
1492 u32 z, header_z, payload_z, irq;
1493 u32 payload_index, payload_end_index, next_page_index;
1494 int page, end_page, i, length, offset;
1495
1496 /*
1497 * FIXME: Cycle lost behavior should be configurable: lose
1498 * packet, retransmit or terminate..
1499 */
1500
1501 p = packet;
1502 payload_index = payload;
1503
1504 if (p->skip)
1505 z = 1;
1506 else
1507 z = 2;
1508 if (p->header_length > 0)
1509 z++;
1510
1511 /* Determine the first page the payload isn't contained in. */
1512 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1513 if (p->payload_length > 0)
1514 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1515 else
1516 payload_z = 0;
1517
1518 z += payload_z;
1519
1520 /* Get header size in number of descriptors. */
1521 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
1522
1523 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1524 if (d == NULL)
1525 return -ENOMEM;
1526
1527 if (!p->skip) {
1528 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1529 d[0].req_count = cpu_to_le16(8);
1530
1531 header = (__le32 *) &d[1];
1532 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1533 IT_HEADER_TAG(p->tag) |
1534 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1535 IT_HEADER_CHANNEL(ctx->base.channel) |
1536 IT_HEADER_SPEED(ctx->base.speed));
1537 header[1] =
1538 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
1539 p->payload_length));
1540 }
1541
1542 if (p->header_length > 0) {
1543 d[2].req_count = cpu_to_le16(p->header_length);
1544 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
1545 memcpy(&d[z], p->header, p->header_length);
1546 }
1547
1548 pd = d + z - payload_z;
1549 payload_end_index = payload_index + p->payload_length;
1550 for (i = 0; i < payload_z; i++) {
1551 page = payload_index >> PAGE_SHIFT;
1552 offset = payload_index & ~PAGE_MASK;
1553 next_page_index = (page + 1) << PAGE_SHIFT;
1554 length =
1555 min(next_page_index, payload_end_index) - payload_index;
1556 pd[i].req_count = cpu_to_le16(length);
1557
1558 page_bus = page_private(buffer->pages[page]);
1559 pd[i].data_address = cpu_to_le32(page_bus + offset);
1560
1561 payload_index += length;
1562 }
1563
1564 if (p->interrupt)
1565 irq = DESCRIPTOR_IRQ_ALWAYS;
1566 else
1567 irq = DESCRIPTOR_NO_IRQ;
1568
1569 last = z == 2 ? d : d + z - 1;
1570 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1571 DESCRIPTOR_STATUS |
1572 DESCRIPTOR_BRANCH_ALWAYS |
1573 irq);
1574
1575 context_append(&ctx->context, d, z, header_z);
1576
1577 return 0;
1578}
1579
1580static int
1581ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1582 struct fw_iso_packet *packet,
1583 struct fw_iso_buffer *buffer,
1584 unsigned long payload)
1585{
1586 struct iso_context *ctx = container_of(base, struct iso_context, base);
1587 struct db_descriptor *db = NULL;
1588 struct descriptor *d;
1589 struct fw_iso_packet *p;
1590 dma_addr_t d_bus, page_bus;
1591 u32 z, header_z, length, rest;
1592 int page, offset, packet_count, header_size;
1593
1594 /*
1595 * FIXME: Cycle lost behavior should be configurable: lose
1596 * packet, retransmit or terminate..
1597 */
1598
1599 if (packet->skip) {
1600 d = context_get_descriptors(&ctx->context, 2, &d_bus);
1601 if (d == NULL)
1602 return -ENOMEM;
1603
1604 db = (struct db_descriptor *) d;
1605 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1606 DESCRIPTOR_BRANCH_ALWAYS |
1607 DESCRIPTOR_WAIT);
1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1609 context_append(&ctx->context, d, 2, 0);
1610 }
1611
1612 p = packet;
1613 z = 2;
1614
1615 /*
1616 * The OHCI controller puts the status word in the header
1617 * buffer too, so we need 4 extra bytes per packet.
1618 */
1619 packet_count = p->header_length / ctx->base.header_size;
1620 header_size = packet_count * (ctx->base.header_size + 4);
1621
1622 /* Get header size in number of descriptors. */
1623 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
1624 page = payload >> PAGE_SHIFT;
1625 offset = payload & ~PAGE_MASK;
1626 rest = p->payload_length;
1627
1628 /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
1629 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1630 while (rest > 0) {
1631 d = context_get_descriptors(&ctx->context,
1632 z + header_z, &d_bus);
1633 if (d == NULL)
1634 return -ENOMEM;
1635
1636 db = (struct db_descriptor *) d;
1637 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1638 DESCRIPTOR_BRANCH_ALWAYS);
1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1640 db->first_req_count = cpu_to_le16(header_size);
1641 db->first_res_count = db->first_req_count;
1642 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
1643
1644 if (offset + rest < PAGE_SIZE)
1645 length = rest;
1646 else
1647 length = PAGE_SIZE - offset;
1648
1649 db->second_req_count = cpu_to_le16(length);
1650 db->second_res_count = db->second_req_count;
1651 page_bus = page_private(buffer->pages[page]);
1652 db->second_buffer = cpu_to_le32(page_bus + offset);
1653
1654 if (p->interrupt && length == rest)
1655 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
1656
1657 context_append(&ctx->context, d, z, header_z);
1658 offset = (offset + length) & ~PAGE_MASK;
1659 rest -= length;
1660 page++;
1661 }
1662
1663 return 0;
1664}
1665
1666static int
1667ohci_queue_iso(struct fw_iso_context *base,
1668 struct fw_iso_packet *packet,
1669 struct fw_iso_buffer *buffer,
1670 unsigned long payload)
1671{
1672 struct iso_context *ctx = container_of(base, struct iso_context, base);
1673
1674 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1675 return ohci_queue_iso_transmit(base, packet, buffer, payload);
1676 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
1677 return ohci_queue_iso_receive_dualbuffer(base, packet,
1678 buffer, payload);
1679 else
1680 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1681 return -EINVAL;
1682}
1683
1684static const struct fw_card_driver ohci_driver = {
1685 .name = ohci_driver_name,
1686 .enable = ohci_enable,
1687 .update_phy_reg = ohci_update_phy_reg,
1688 .set_config_rom = ohci_set_config_rom,
1689 .send_request = ohci_send_request,
1690 .send_response = ohci_send_response,
1691 .cancel_packet = ohci_cancel_packet,
1692 .enable_phys_dma = ohci_enable_phys_dma,
1693 .get_bus_time = ohci_get_bus_time,
1694
1695 .allocate_iso_context = ohci_allocate_iso_context,
1696 .free_iso_context = ohci_free_iso_context,
1697 .queue_iso = ohci_queue_iso,
1698 .start_iso = ohci_start_iso,
1699 .stop_iso = ohci_stop_iso,
1700};
1701
1702static int software_reset(struct fw_ohci *ohci)
1703{
1704 int i;
1705
1706 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1707
1708 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1709 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1710 OHCI1394_HCControl_softReset) == 0)
1711 return 0;
1712 msleep(1);
1713 }
1714
1715 return -EBUSY;
1716}
1717
1718static int __devinit
1719pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1720{
1721 struct fw_ohci *ohci;
1722 u32 bus_options, max_receive, link_speed;
1723 u64 guid;
1724 int err;
1725 size_t size;
1726
1727 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
1728 if (ohci == NULL) {
1729 fw_error("Could not malloc fw_ohci data.\n");
1730 return -ENOMEM;
1731 }
1732
1733 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
1734
1735 err = pci_enable_device(dev);
1736 if (err) {
1737 fw_error("Failed to enable OHCI hardware.\n");
1738 goto fail_put_card;
1739 }
1740
1741 pci_set_master(dev);
1742 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1743 pci_set_drvdata(dev, ohci);
1744
1745 spin_lock_init(&ohci->lock);
1746
1747 tasklet_init(&ohci->bus_reset_tasklet,
1748 bus_reset_tasklet, (unsigned long)ohci);
1749
1750 err = pci_request_region(dev, 0, ohci_driver_name);
1751 if (err) {
1752 fw_error("MMIO resource unavailable\n");
1753 goto fail_disable;
1754 }
1755
1756 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
1757 if (ohci->registers == NULL) {
1758 fw_error("Failed to remap registers\n");
1759 err = -ENXIO;
1760 goto fail_iomem;
1761 }
1762
1763 if (software_reset(ohci)) {
1764 fw_error("Failed to reset ohci card.\n");
1765 err = -EBUSY;
1766 goto fail_registers;
1767 }
1768
1769 /*
1770 * Now enable LPS, which we need in order to start accessing
1771 * most of the registers. In fact, on some cards (ALI M5251),
1772 * accessing registers in the SClk domain without LPS enabled
1773 * will lock up the machine. Wait 50msec to make sure we have
1774 * full link enabled.
1775 */
1776 reg_write(ohci, OHCI1394_HCControlSet,
1777 OHCI1394_HCControl_LPS |
1778 OHCI1394_HCControl_postedWriteEnable);
1779 flush_writes(ohci);
1780 msleep(50);
1781
1782 reg_write(ohci, OHCI1394_HCControlClear,
1783 OHCI1394_HCControl_noByteSwapData);
1784
1785 reg_write(ohci, OHCI1394_LinkControlSet,
1786 OHCI1394_LinkControl_rcvSelfID |
1787 OHCI1394_LinkControl_cycleTimerEnable |
1788 OHCI1394_LinkControl_cycleMaster);
1789
1790 ar_context_init(&ohci->ar_request_ctx, ohci,
1791 OHCI1394_AsReqRcvContextControlSet);
1792
1793 ar_context_init(&ohci->ar_response_ctx, ohci,
1794 OHCI1394_AsRspRcvContextControlSet);
1795
1796 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
1797 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
1798
1799 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
1800 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
1801
1802 reg_write(ohci, OHCI1394_ATRetries,
1803 OHCI1394_MAX_AT_REQ_RETRIES |
1804 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1805 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1806
1807 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
1808 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
1809 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
1810 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
1811 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
1812
1813 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
1814 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
1815 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
1816 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
1817 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
1818
1819 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
1820 fw_error("Out of memory for it/ir contexts.\n");
1821 err = -ENOMEM;
1822 goto fail_registers;
1823 }
1824
1825 /* self-id dma buffer allocation */
1826 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
1827 SELF_ID_BUF_SIZE,
1828 &ohci->self_id_bus,
1829 GFP_KERNEL);
1830 if (ohci->self_id_cpu == NULL) {
1831 fw_error("Out of memory for self ID buffer.\n");
1832 err = -ENOMEM;
1833 goto fail_registers;
1834 }
1835
1836 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1837 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1838 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1839 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1840 reg_write(ohci, OHCI1394_IntMaskSet,
1841 OHCI1394_selfIDComplete |
1842 OHCI1394_RQPkt | OHCI1394_RSPkt |
1843 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1844 OHCI1394_isochRx | OHCI1394_isochTx |
1845 OHCI1394_masterIntEnable |
1846 OHCI1394_cycle64Seconds);
1847
1848 bus_options = reg_read(ohci, OHCI1394_BusOptions);
1849 max_receive = (bus_options >> 12) & 0xf;
1850 link_speed = bus_options & 0x7;
1851 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
1852 reg_read(ohci, OHCI1394_GUIDLo);
1853
1854 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
1855 if (err < 0)
1856 goto fail_self_id;
1857
1858 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1859 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1860 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
1861
1862 return 0;
1863
1864 fail_self_id:
1865 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1866 ohci->self_id_cpu, ohci->self_id_bus);
1867 fail_registers:
1868 kfree(ohci->it_context_list);
1869 kfree(ohci->ir_context_list);
1870 pci_iounmap(dev, ohci->registers);
1871 fail_iomem:
1872 pci_release_region(dev, 0);
1873 fail_disable:
1874 pci_disable_device(dev);
1875 fail_put_card:
1876 fw_card_put(&ohci->card);
1877
1878 return err;
1879}
1880
1881static void pci_remove(struct pci_dev *dev)
1882{
1883 struct fw_ohci *ohci;
1884
1885 ohci = pci_get_drvdata(dev);
1886 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1887 flush_writes(ohci);
1888 fw_core_remove_card(&ohci->card);
1889
1890 /*
1891 * FIXME: Fail all pending packets here, now that the upper
1892 * layers can't queue any more.
1893 */
1894
1895 software_reset(ohci);
1896 free_irq(dev->irq, ohci);
1897 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1898 ohci->self_id_cpu, ohci->self_id_bus);
1899 kfree(ohci->it_context_list);
1900 kfree(ohci->ir_context_list);
1901 pci_iounmap(dev, ohci->registers);
1902 pci_release_region(dev, 0);
1903 pci_disable_device(dev);
1904 fw_card_put(&ohci->card);
1905
1906 fw_notify("Removed fw-ohci device.\n");
1907}
1908
1909static struct pci_device_id pci_table[] = {
1910 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
1911 { }
1912};
1913
1914MODULE_DEVICE_TABLE(pci, pci_table);
1915
1916static struct pci_driver fw_ohci_pci_driver = {
1917 .name = ohci_driver_name,
1918 .id_table = pci_table,
1919 .probe = pci_probe,
1920 .remove = pci_remove,
1921};
1922
1923MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1924MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1925MODULE_LICENSE("GPL");
1926
1927/* Provide a module alias so root-on-sbp2 initrds don't break. */
1928#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
1929MODULE_ALIAS("ohci1394");
1930#endif
1931
1932static int __init fw_ohci_init(void)
1933{
1934 return pci_register_driver(&fw_ohci_pci_driver);
1935}
1936
1937static void __exit fw_ohci_cleanup(void)
1938{
1939 pci_unregister_driver(&fw_ohci_pci_driver);
1940}
1941
1942module_init(fw_ohci_init);
1943module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
new file mode 100644
index 000000000000..fa15706397d7
--- /dev/null
+++ b/drivers/firewire/fw-ohci.h
@@ -0,0 +1,153 @@
1#ifndef __fw_ohci_h
2#define __fw_ohci_h
3
4/* OHCI register map */
5
6#define OHCI1394_Version 0x000
7#define OHCI1394_GUID_ROM 0x004
8#define OHCI1394_ATRetries 0x008
9#define OHCI1394_CSRData 0x00C
10#define OHCI1394_CSRCompareData 0x010
11#define OHCI1394_CSRControl 0x014
12#define OHCI1394_ConfigROMhdr 0x018
13#define OHCI1394_BusID 0x01C
14#define OHCI1394_BusOptions 0x020
15#define OHCI1394_GUIDHi 0x024
16#define OHCI1394_GUIDLo 0x028
17#define OHCI1394_ConfigROMmap 0x034
18#define OHCI1394_PostedWriteAddressLo 0x038
19#define OHCI1394_PostedWriteAddressHi 0x03C
20#define OHCI1394_VendorID 0x040
21#define OHCI1394_HCControlSet 0x050
22#define OHCI1394_HCControlClear 0x054
23#define OHCI1394_HCControl_BIBimageValid 0x80000000
24#define OHCI1394_HCControl_noByteSwapData 0x40000000
25#define OHCI1394_HCControl_programPhyEnable 0x00800000
26#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
27#define OHCI1394_HCControl_LPS 0x00080000
28#define OHCI1394_HCControl_postedWriteEnable 0x00040000
29#define OHCI1394_HCControl_linkEnable 0x00020000
30#define OHCI1394_HCControl_softReset 0x00010000
31#define OHCI1394_SelfIDBuffer 0x064
32#define OHCI1394_SelfIDCount 0x068
33#define OHCI1394_IRMultiChanMaskHiSet 0x070
34#define OHCI1394_IRMultiChanMaskHiClear 0x074
35#define OHCI1394_IRMultiChanMaskLoSet 0x078
36#define OHCI1394_IRMultiChanMaskLoClear 0x07C
37#define OHCI1394_IntEventSet 0x080
38#define OHCI1394_IntEventClear 0x084
39#define OHCI1394_IntMaskSet 0x088
40#define OHCI1394_IntMaskClear 0x08C
41#define OHCI1394_IsoXmitIntEventSet 0x090
42#define OHCI1394_IsoXmitIntEventClear 0x094
43#define OHCI1394_IsoXmitIntMaskSet 0x098
44#define OHCI1394_IsoXmitIntMaskClear 0x09C
45#define OHCI1394_IsoRecvIntEventSet 0x0A0
46#define OHCI1394_IsoRecvIntEventClear 0x0A4
47#define OHCI1394_IsoRecvIntMaskSet 0x0A8
48#define OHCI1394_IsoRecvIntMaskClear 0x0AC
49#define OHCI1394_InitialBandwidthAvailable 0x0B0
50#define OHCI1394_InitialChannelsAvailableHi 0x0B4
51#define OHCI1394_InitialChannelsAvailableLo 0x0B8
52#define OHCI1394_FairnessControl 0x0DC
53#define OHCI1394_LinkControlSet 0x0E0
54#define OHCI1394_LinkControlClear 0x0E4
55#define OHCI1394_LinkControl_rcvSelfID (1 << 9)
56#define OHCI1394_LinkControl_rcvPhyPkt (1 << 10)
57#define OHCI1394_LinkControl_cycleTimerEnable (1 << 20)
58#define OHCI1394_LinkControl_cycleMaster (1 << 21)
59#define OHCI1394_LinkControl_cycleSource (1 << 22)
60#define OHCI1394_NodeID 0x0E8
61#define OHCI1394_NodeID_idValid 0x80000000
62#define OHCI1394_PhyControl 0x0EC
63#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000)
64#define OHCI1394_PhyControl_ReadDone 0x80000000
65#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
66#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
67#define OHCI1394_PhyControl_WriteDone 0x00004000
68#define OHCI1394_IsochronousCycleTimer 0x0F0
69#define OHCI1394_AsReqFilterHiSet 0x100
70#define OHCI1394_AsReqFilterHiClear 0x104
71#define OHCI1394_AsReqFilterLoSet 0x108
72#define OHCI1394_AsReqFilterLoClear 0x10C
73#define OHCI1394_PhyReqFilterHiSet 0x110
74#define OHCI1394_PhyReqFilterHiClear 0x114
75#define OHCI1394_PhyReqFilterLoSet 0x118
76#define OHCI1394_PhyReqFilterLoClear 0x11C
77#define OHCI1394_PhyUpperBound 0x120
78
79#define OHCI1394_AsReqTrContextBase 0x180
80#define OHCI1394_AsReqTrContextControlSet 0x180
81#define OHCI1394_AsReqTrContextControlClear 0x184
82#define OHCI1394_AsReqTrCommandPtr 0x18C
83
84#define OHCI1394_AsRspTrContextBase 0x1A0
85#define OHCI1394_AsRspTrContextControlSet 0x1A0
86#define OHCI1394_AsRspTrContextControlClear 0x1A4
87#define OHCI1394_AsRspTrCommandPtr 0x1AC
88
89#define OHCI1394_AsReqRcvContextBase 0x1C0
90#define OHCI1394_AsReqRcvContextControlSet 0x1C0
91#define OHCI1394_AsReqRcvContextControlClear 0x1C4
92#define OHCI1394_AsReqRcvCommandPtr 0x1CC
93
94#define OHCI1394_AsRspRcvContextBase 0x1E0
95#define OHCI1394_AsRspRcvContextControlSet 0x1E0
96#define OHCI1394_AsRspRcvContextControlClear 0x1E4
97#define OHCI1394_AsRspRcvCommandPtr 0x1EC
98
99/* Isochronous transmit registers */
100#define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n))
101#define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n))
102#define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n))
103#define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n))
104
105/* Isochronous receive registers */
106#define OHCI1394_IsoRcvContextBase(n) (0x400 + 32 * (n))
107#define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n))
108#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
109#define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n))
110#define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n))
111
112/* Interrupts Mask/Events */
113#define OHCI1394_reqTxComplete 0x00000001
114#define OHCI1394_respTxComplete 0x00000002
115#define OHCI1394_ARRQ 0x00000004
116#define OHCI1394_ARRS 0x00000008
117#define OHCI1394_RQPkt 0x00000010
118#define OHCI1394_RSPkt 0x00000020
119#define OHCI1394_isochTx 0x00000040
120#define OHCI1394_isochRx 0x00000080
121#define OHCI1394_postedWriteErr 0x00000100
122#define OHCI1394_lockRespErr 0x00000200
123#define OHCI1394_selfIDComplete 0x00010000
124#define OHCI1394_busReset 0x00020000
125#define OHCI1394_phy 0x00080000
126#define OHCI1394_cycleSynch 0x00100000
127#define OHCI1394_cycle64Seconds 0x00200000
128#define OHCI1394_cycleLost 0x00400000
129#define OHCI1394_cycleInconsistent 0x00800000
130#define OHCI1394_unrecoverableError 0x01000000
131#define OHCI1394_cycleTooLong 0x02000000
132#define OHCI1394_phyRegRcvd 0x04000000
133#define OHCI1394_masterIntEnable 0x80000000
134
135#define OHCI1394_evt_no_status 0x0
136#define OHCI1394_evt_long_packet 0x2
137#define OHCI1394_evt_missing_ack 0x3
138#define OHCI1394_evt_underrun 0x4
139#define OHCI1394_evt_overrun 0x5
140#define OHCI1394_evt_descriptor_read 0x6
141#define OHCI1394_evt_data_read 0x7
142#define OHCI1394_evt_data_write 0x8
143#define OHCI1394_evt_bus_reset 0x9
144#define OHCI1394_evt_timeout 0xa
145#define OHCI1394_evt_tcode_err 0xb
146#define OHCI1394_evt_reserved_b 0xc
147#define OHCI1394_evt_reserved_c 0xd
148#define OHCI1394_evt_unknown 0xe
149#define OHCI1394_evt_flushed 0xf
150
151#define OHCI1394_phy_tcode 0xe
152
153#endif /* __fw_ohci_h */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
new file mode 100644
index 000000000000..68300414e5f4
--- /dev/null
+++ b/drivers/firewire/fw-sbp2.c
@@ -0,0 +1,1147 @@
1/*
2 * SBP2 driver (SCSI over IEEE1394)
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 * The basic structure of this driver is based on the old storage driver,
23 * drivers/ieee1394/sbp2.c, originally written by
24 * James Goodwin <jamesg@filanet.com>
25 * with later contributions and ongoing maintenance from
26 * Ben Collins <bcollins@debian.org>,
27 * Stefan Richter <stefanr@s5r6.in-berlin.de>
28 * and many others.
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/mod_devicetable.h>
34#include <linux/device.h>
35#include <linux/scatterlist.h>
36#include <linux/dma-mapping.h>
37#include <linux/timer.h>
38
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_dbg.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_host.h>
44
45#include "fw-transaction.h"
46#include "fw-topology.h"
47#include "fw-device.h"
48
49/* I don't know why the SCSI stack doesn't define something like this... */
50typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
51
52static const char sbp2_driver_name[] = "sbp2";
53
54struct sbp2_device {
55 struct kref kref;
56 struct fw_unit *unit;
57 struct fw_address_handler address_handler;
58 struct list_head orb_list;
59 u64 management_agent_address;
60 u64 command_block_agent_address;
61 u32 workarounds;
62 int login_id;
63
64 /*
65 * We cache these addresses and only update them once we've
66 * logged in or reconnected to the sbp2 device. That way, any
67 * IO to the device will automatically fail and get retried if
68 * it happens in a window where the device is not ready to
69 * handle it (e.g. after a bus reset but before we reconnect).
70 */
71 int node_id;
72 int address_high;
73 int generation;
74
75 int retries;
76 struct delayed_work work;
77};
78
79#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
80#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
81#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
82
83#define SBP2_ORB_NULL 0x80000000
84
85#define SBP2_DIRECTION_TO_MEDIA 0x0
86#define SBP2_DIRECTION_FROM_MEDIA 0x1
87
88/* Unit directory keys */
89#define SBP2_COMMAND_SET_SPECIFIER 0x38
90#define SBP2_COMMAND_SET 0x39
91#define SBP2_COMMAND_SET_REVISION 0x3b
92#define SBP2_FIRMWARE_REVISION 0x3c
93
94/* Flags for detected oddities and brokeness */
95#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
96#define SBP2_WORKAROUND_INQUIRY_36 0x2
97#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
98#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
99#define SBP2_WORKAROUND_OVERRIDE 0x100
100
101/* Management orb opcodes */
102#define SBP2_LOGIN_REQUEST 0x0
103#define SBP2_QUERY_LOGINS_REQUEST 0x1
104#define SBP2_RECONNECT_REQUEST 0x3
105#define SBP2_SET_PASSWORD_REQUEST 0x4
106#define SBP2_LOGOUT_REQUEST 0x7
107#define SBP2_ABORT_TASK_REQUEST 0xb
108#define SBP2_ABORT_TASK_SET 0xc
109#define SBP2_LOGICAL_UNIT_RESET 0xe
110#define SBP2_TARGET_RESET_REQUEST 0xf
111
112/* Offsets for command block agent registers */
113#define SBP2_AGENT_STATE 0x00
114#define SBP2_AGENT_RESET 0x04
115#define SBP2_ORB_POINTER 0x08
116#define SBP2_DOORBELL 0x10
117#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
118
119/* Status write response codes */
120#define SBP2_STATUS_REQUEST_COMPLETE 0x0
121#define SBP2_STATUS_TRANSPORT_FAILURE 0x1
122#define SBP2_STATUS_ILLEGAL_REQUEST 0x2
123#define SBP2_STATUS_VENDOR_DEPENDENT 0x3
124
125#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
126#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
127#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
128#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
129#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
130#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
131#define STATUS_GET_ORB_LOW(v) ((v).orb_low)
132#define STATUS_GET_DATA(v) ((v).data)
133
134struct sbp2_status {
135 u32 status;
136 u32 orb_low;
137 u8 data[24];
138};
139
140struct sbp2_pointer {
141 u32 high;
142 u32 low;
143};
144
145struct sbp2_orb {
146 struct fw_transaction t;
147 dma_addr_t request_bus;
148 int rcode;
149 struct sbp2_pointer pointer;
150 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
151 struct list_head link;
152};
153
154#define MANAGEMENT_ORB_LUN(v) ((v))
155#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
156#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
157#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28)
158#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
159#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
160
161#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
162#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
163
164struct sbp2_management_orb {
165 struct sbp2_orb base;
166 struct {
167 struct sbp2_pointer password;
168 struct sbp2_pointer response;
169 u32 misc;
170 u32 length;
171 struct sbp2_pointer status_fifo;
172 } request;
173 __be32 response[4];
174 dma_addr_t response_bus;
175 struct completion done;
176 struct sbp2_status status;
177};
178
179#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
180#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
181
182struct sbp2_login_response {
183 u32 misc;
184 struct sbp2_pointer command_block_agent;
185 u32 reconnect_hold;
186};
187#define COMMAND_ORB_DATA_SIZE(v) ((v))
188#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
189#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
190#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
191#define COMMAND_ORB_SPEED(v) ((v) << 24)
192#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
193#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
194#define COMMAND_ORB_NOTIFY ((1) << 31)
195
196struct sbp2_command_orb {
197 struct sbp2_orb base;
198 struct {
199 struct sbp2_pointer next;
200 struct sbp2_pointer data_descriptor;
201 u32 misc;
202 u8 command_block[12];
203 } request;
204 struct scsi_cmnd *cmd;
205 scsi_done_fn_t done;
206 struct fw_unit *unit;
207
208 struct sbp2_pointer page_table[SG_ALL];
209 dma_addr_t page_table_bus;
210 dma_addr_t request_buffer_bus;
211};
212
213/*
214 * List of devices with known bugs.
215 *
216 * The firmware_revision field, masked with 0xffff00, is the best
217 * indicator for the type of bridge chip of a device. It yields a few
218 * false positives but this did not break correctly behaving devices
219 * so far. We use ~0 as a wildcard, since the 24 bit values we get
220 * from the config rom can never match that.
221 */
222static const struct {
223 u32 firmware_revision;
224 u32 model;
225 unsigned workarounds;
226} sbp2_workarounds_table[] = {
227 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
228 .firmware_revision = 0x002800,
229 .model = 0x001010,
230 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
231 SBP2_WORKAROUND_MODE_SENSE_8,
232 },
233 /* Initio bridges, actually only needed for some older ones */ {
234 .firmware_revision = 0x000200,
235 .model = ~0,
236 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
237 },
238 /* Symbios bridge */ {
239 .firmware_revision = 0xa0b800,
240 .model = ~0,
241 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
242 },
243
244 /*
245 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
246 * these iPods do not feature the read_capacity bug according
247 * to one report. Read_capacity behaviour as well as model_id
248 * could change due to Apple-supplied firmware updates though.
249 */
250
251 /* iPod 4th generation. */ {
252 .firmware_revision = 0x0a2700,
253 .model = 0x000021,
254 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
255 },
256 /* iPod mini */ {
257 .firmware_revision = 0x0a2700,
258 .model = 0x000023,
259 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
260 },
261 /* iPod Photo */ {
262 .firmware_revision = 0x0a2700,
263 .model = 0x00007e,
264 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
265 }
266};
267
268static void
269sbp2_status_write(struct fw_card *card, struct fw_request *request,
270 int tcode, int destination, int source,
271 int generation, int speed,
272 unsigned long long offset,
273 void *payload, size_t length, void *callback_data)
274{
275 struct sbp2_device *sd = callback_data;
276 struct sbp2_orb *orb;
277 struct sbp2_status status;
278 size_t header_size;
279 unsigned long flags;
280
281 if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
282 length == 0 || length > sizeof(status)) {
283 fw_send_response(card, request, RCODE_TYPE_ERROR);
284 return;
285 }
286
287 header_size = min(length, 2 * sizeof(u32));
288 fw_memcpy_from_be32(&status, payload, header_size);
289 if (length > header_size)
290 memcpy(status.data, payload + 8, length - header_size);
291 if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
292 fw_notify("non-orb related status write, not handled\n");
293 fw_send_response(card, request, RCODE_COMPLETE);
294 return;
295 }
296
297 /* Lookup the orb corresponding to this status write. */
298 spin_lock_irqsave(&card->lock, flags);
299 list_for_each_entry(orb, &sd->orb_list, link) {
300 if (STATUS_GET_ORB_HIGH(status) == 0 &&
301 STATUS_GET_ORB_LOW(status) == orb->request_bus &&
302 orb->rcode == RCODE_COMPLETE) {
303 list_del(&orb->link);
304 break;
305 }
306 }
307 spin_unlock_irqrestore(&card->lock, flags);
308
309 if (&orb->link != &sd->orb_list)
310 orb->callback(orb, &status);
311 else
312 fw_error("status write for unknown orb\n");
313
314 fw_send_response(card, request, RCODE_COMPLETE);
315}
316
317static void
318complete_transaction(struct fw_card *card, int rcode,
319 void *payload, size_t length, void *data)
320{
321 struct sbp2_orb *orb = data;
322 unsigned long flags;
323
324 orb->rcode = rcode;
325 if (rcode != RCODE_COMPLETE) {
326 spin_lock_irqsave(&card->lock, flags);
327 list_del(&orb->link);
328 spin_unlock_irqrestore(&card->lock, flags);
329 orb->callback(orb, NULL);
330 }
331}
332
333static void
334sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
335 int node_id, int generation, u64 offset)
336{
337 struct fw_device *device = fw_device(unit->device.parent);
338 struct sbp2_device *sd = unit->device.driver_data;
339 unsigned long flags;
340
341 orb->pointer.high = 0;
342 orb->pointer.low = orb->request_bus;
343 fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
344
345 spin_lock_irqsave(&device->card->lock, flags);
346 list_add_tail(&orb->link, &sd->orb_list);
347 spin_unlock_irqrestore(&device->card->lock, flags);
348
349 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
350 node_id, generation,
351 device->node->max_speed, offset,
352 &orb->pointer, sizeof(orb->pointer),
353 complete_transaction, orb);
354}
355
356static int sbp2_cancel_orbs(struct fw_unit *unit)
357{
358 struct fw_device *device = fw_device(unit->device.parent);
359 struct sbp2_device *sd = unit->device.driver_data;
360 struct sbp2_orb *orb, *next;
361 struct list_head list;
362 unsigned long flags;
363 int retval = -ENOENT;
364
365 INIT_LIST_HEAD(&list);
366 spin_lock_irqsave(&device->card->lock, flags);
367 list_splice_init(&sd->orb_list, &list);
368 spin_unlock_irqrestore(&device->card->lock, flags);
369
370 list_for_each_entry_safe(orb, next, &list, link) {
371 retval = 0;
372 if (fw_cancel_transaction(device->card, &orb->t) == 0)
373 continue;
374
375 orb->rcode = RCODE_CANCELLED;
376 orb->callback(orb, NULL);
377 }
378
379 return retval;
380}
381
382static void
383complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
384{
385 struct sbp2_management_orb *orb =
386 (struct sbp2_management_orb *)base_orb;
387
388 if (status)
389 memcpy(&orb->status, status, sizeof(*status));
390 complete(&orb->done);
391}
392
393static int
394sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
395 int function, int lun, void *response)
396{
397 struct fw_device *device = fw_device(unit->device.parent);
398 struct sbp2_device *sd = unit->device.driver_data;
399 struct sbp2_management_orb *orb;
400 int retval = -ENOMEM;
401
402 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
403 if (orb == NULL)
404 return -ENOMEM;
405
406 /*
407 * The sbp2 device is going to send a block read request to
408 * read out the request from host memory, so map it for dma.
409 */
410 orb->base.request_bus =
411 dma_map_single(device->card->device, &orb->request,
412 sizeof(orb->request), DMA_TO_DEVICE);
413 if (dma_mapping_error(orb->base.request_bus))
414 goto out;
415
416 orb->response_bus =
417 dma_map_single(device->card->device, &orb->response,
418 sizeof(orb->response), DMA_FROM_DEVICE);
419 if (dma_mapping_error(orb->response_bus))
420 goto out;
421
422 orb->request.response.high = 0;
423 orb->request.response.low = orb->response_bus;
424
425 orb->request.misc =
426 MANAGEMENT_ORB_NOTIFY |
427 MANAGEMENT_ORB_FUNCTION(function) |
428 MANAGEMENT_ORB_LUN(lun);
429 orb->request.length =
430 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
431
432 orb->request.status_fifo.high = sd->address_handler.offset >> 32;
433 orb->request.status_fifo.low = sd->address_handler.offset;
434
435 /*
436 * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
437 * login and 1 second reconnect time. The reconnect setting
438 * is probably fine, but the exclusive login should be an option.
439 */
440 if (function == SBP2_LOGIN_REQUEST) {
441 orb->request.misc |=
442 MANAGEMENT_ORB_EXCLUSIVE |
443 MANAGEMENT_ORB_RECONNECT(0);
444 }
445
446 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
447
448 init_completion(&orb->done);
449 orb->base.callback = complete_management_orb;
450
451 sbp2_send_orb(&orb->base, unit,
452 node_id, generation, sd->management_agent_address);
453
454 wait_for_completion_timeout(&orb->done,
455 msecs_to_jiffies(SBP2_ORB_TIMEOUT));
456
457 retval = -EIO;
458 if (sbp2_cancel_orbs(unit) == 0) {
459 fw_error("orb reply timed out, rcode=0x%02x\n",
460 orb->base.rcode);
461 goto out;
462 }
463
464 if (orb->base.rcode != RCODE_COMPLETE) {
465 fw_error("management write failed, rcode 0x%02x\n",
466 orb->base.rcode);
467 goto out;
468 }
469
470 if (STATUS_GET_RESPONSE(orb->status) != 0 ||
471 STATUS_GET_SBP_STATUS(orb->status) != 0) {
472 fw_error("error status: %d:%d\n",
473 STATUS_GET_RESPONSE(orb->status),
474 STATUS_GET_SBP_STATUS(orb->status));
475 goto out;
476 }
477
478 retval = 0;
479 out:
480 dma_unmap_single(device->card->device, orb->base.request_bus,
481 sizeof(orb->request), DMA_TO_DEVICE);
482 dma_unmap_single(device->card->device, orb->response_bus,
483 sizeof(orb->response), DMA_FROM_DEVICE);
484
485 if (response)
486 fw_memcpy_from_be32(response,
487 orb->response, sizeof(orb->response));
488 kfree(orb);
489
490 return retval;
491}
492
493static void
494complete_agent_reset_write(struct fw_card *card, int rcode,
495 void *payload, size_t length, void *data)
496{
497 struct fw_transaction *t = data;
498
499 kfree(t);
500}
501
502static int sbp2_agent_reset(struct fw_unit *unit)
503{
504 struct fw_device *device = fw_device(unit->device.parent);
505 struct sbp2_device *sd = unit->device.driver_data;
506 struct fw_transaction *t;
507 static u32 zero;
508
509 t = kzalloc(sizeof(*t), GFP_ATOMIC);
510 if (t == NULL)
511 return -ENOMEM;
512
513 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
514 sd->node_id, sd->generation, SCODE_400,
515 sd->command_block_agent_address + SBP2_AGENT_RESET,
516 &zero, sizeof(zero), complete_agent_reset_write, t);
517
518 return 0;
519}
520
521static void sbp2_reconnect(struct work_struct *work);
522static struct scsi_host_template scsi_driver_template;
523
524static void
525release_sbp2_device(struct kref *kref)
526{
527 struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
528 struct Scsi_Host *host =
529 container_of((void *)sd, struct Scsi_Host, hostdata[0]);
530
531 sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
532 SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
533
534 scsi_remove_host(host);
535 fw_core_remove_address_handler(&sd->address_handler);
536 fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
537 put_device(&sd->unit->device);
538 scsi_host_put(host);
539}
540
541static void sbp2_login(struct work_struct *work)
542{
543 struct sbp2_device *sd =
544 container_of(work, struct sbp2_device, work.work);
545 struct Scsi_Host *host =
546 container_of((void *)sd, struct Scsi_Host, hostdata[0]);
547 struct fw_unit *unit = sd->unit;
548 struct fw_device *device = fw_device(unit->device.parent);
549 struct sbp2_login_response response;
550 int generation, node_id, local_node_id, lun, retval;
551
552 /* FIXME: Make this work for multi-lun devices. */
553 lun = 0;
554
555 generation = device->card->generation;
556 node_id = device->node->node_id;
557 local_node_id = device->card->local_node->node_id;
558
559 if (sbp2_send_management_orb(unit, node_id, generation,
560 SBP2_LOGIN_REQUEST, lun, &response) < 0) {
561 if (sd->retries++ < 5) {
562 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
563 } else {
564 fw_error("failed to login to %s\n",
565 unit->device.bus_id);
566 kref_put(&sd->kref, release_sbp2_device);
567 }
568 return;
569 }
570
571 sd->generation = generation;
572 sd->node_id = node_id;
573 sd->address_high = local_node_id << 16;
574
575 /* Get command block agent offset and login id. */
576 sd->command_block_agent_address =
577 ((u64) (response.command_block_agent.high & 0xffff) << 32) |
578 response.command_block_agent.low;
579 sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
580
581 fw_notify("logged in to sbp2 unit %s (%d retries)\n",
582 unit->device.bus_id, sd->retries);
583 fw_notify(" - management_agent_address: 0x%012llx\n",
584 (unsigned long long) sd->management_agent_address);
585 fw_notify(" - command_block_agent_address: 0x%012llx\n",
586 (unsigned long long) sd->command_block_agent_address);
587 fw_notify(" - status write address: 0x%012llx\n",
588 (unsigned long long) sd->address_handler.offset);
589
590#if 0
591 /* FIXME: The linux1394 sbp2 does this last step. */
592 sbp2_set_busy_timeout(scsi_id);
593#endif
594
595 PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect);
596 sbp2_agent_reset(unit);
597
598 /* FIXME: Loop over luns here. */
599 lun = 0;
600 retval = scsi_add_device(host, 0, 0, lun);
601 if (retval < 0) {
602 sbp2_send_management_orb(unit, sd->node_id, sd->generation,
603 SBP2_LOGOUT_REQUEST, sd->login_id,
604 NULL);
605 /*
606 * Set this back to sbp2_login so we fall back and
607 * retry login on bus reset.
608 */
609 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
610 }
611 kref_put(&sd->kref, release_sbp2_device);
612}
613
614static int sbp2_probe(struct device *dev)
615{
616 struct fw_unit *unit = fw_unit(dev);
617 struct fw_device *device = fw_device(unit->device.parent);
618 struct sbp2_device *sd;
619 struct fw_csr_iterator ci;
620 struct Scsi_Host *host;
621 int i, key, value, err;
622 u32 model, firmware_revision;
623
624 err = -ENOMEM;
625 host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd));
626 if (host == NULL)
627 goto fail;
628
629 sd = (struct sbp2_device *) host->hostdata;
630 unit->device.driver_data = sd;
631 sd->unit = unit;
632 INIT_LIST_HEAD(&sd->orb_list);
633 kref_init(&sd->kref);
634
635 sd->address_handler.length = 0x100;
636 sd->address_handler.address_callback = sbp2_status_write;
637 sd->address_handler.callback_data = sd;
638
639 err = fw_core_add_address_handler(&sd->address_handler,
640 &fw_high_memory_region);
641 if (err < 0)
642 goto fail_host;
643
644 err = fw_device_enable_phys_dma(device);
645 if (err < 0)
646 goto fail_address_handler;
647
648 err = scsi_add_host(host, &unit->device);
649 if (err < 0)
650 goto fail_address_handler;
651
652 /*
653 * Scan unit directory to get management agent address,
654 * firmware revison and model. Initialize firmware_revision
655 * and model to values that wont match anything in our table.
656 */
657 firmware_revision = 0xff000000;
658 model = 0xff000000;
659 fw_csr_iterator_init(&ci, unit->directory);
660 while (fw_csr_iterator_next(&ci, &key, &value)) {
661 switch (key) {
662 case CSR_DEPENDENT_INFO | CSR_OFFSET:
663 sd->management_agent_address =
664 0xfffff0000000ULL + 4 * value;
665 break;
666 case SBP2_FIRMWARE_REVISION:
667 firmware_revision = value;
668 break;
669 case CSR_MODEL:
670 model = value;
671 break;
672 }
673 }
674
675 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
676 if (sbp2_workarounds_table[i].firmware_revision !=
677 (firmware_revision & 0xffffff00))
678 continue;
679 if (sbp2_workarounds_table[i].model != model &&
680 sbp2_workarounds_table[i].model != ~0)
681 continue;
682 sd->workarounds |= sbp2_workarounds_table[i].workarounds;
683 break;
684 }
685
686 if (sd->workarounds)
687 fw_notify("Workarounds for node %s: 0x%x "
688 "(firmware_revision 0x%06x, model_id 0x%06x)\n",
689 unit->device.bus_id,
690 sd->workarounds, firmware_revision, model);
691
692 get_device(&unit->device);
693
694 /*
695 * We schedule work to do the login so we can easily
696 * reschedule retries. Always get the ref before scheduling
697 * work.
698 */
699 INIT_DELAYED_WORK(&sd->work, sbp2_login);
700 if (schedule_delayed_work(&sd->work, 0))
701 kref_get(&sd->kref);
702
703 return 0;
704
705 fail_address_handler:
706 fw_core_remove_address_handler(&sd->address_handler);
707 fail_host:
708 scsi_host_put(host);
709 fail:
710 return err;
711}
712
713static int sbp2_remove(struct device *dev)
714{
715 struct fw_unit *unit = fw_unit(dev);
716 struct sbp2_device *sd = unit->device.driver_data;
717
718 kref_put(&sd->kref, release_sbp2_device);
719
720 return 0;
721}
722
723static void sbp2_reconnect(struct work_struct *work)
724{
725 struct sbp2_device *sd =
726 container_of(work, struct sbp2_device, work.work);
727 struct fw_unit *unit = sd->unit;
728 struct fw_device *device = fw_device(unit->device.parent);
729 int generation, node_id, local_node_id;
730
731 generation = device->card->generation;
732 node_id = device->node->node_id;
733 local_node_id = device->card->local_node->node_id;
734
735 if (sbp2_send_management_orb(unit, node_id, generation,
736 SBP2_RECONNECT_REQUEST,
737 sd->login_id, NULL) < 0) {
738 if (sd->retries++ >= 5) {
739 fw_error("failed to reconnect to %s\n",
740 unit->device.bus_id);
741 /* Fall back and try to log in again. */
742 sd->retries = 0;
743 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
744 }
745 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
746 return;
747 }
748
749 sd->generation = generation;
750 sd->node_id = node_id;
751 sd->address_high = local_node_id << 16;
752
753 fw_notify("reconnected to unit %s (%d retries)\n",
754 unit->device.bus_id, sd->retries);
755 sbp2_agent_reset(unit);
756 sbp2_cancel_orbs(unit);
757 kref_put(&sd->kref, release_sbp2_device);
758}
759
760static void sbp2_update(struct fw_unit *unit)
761{
762 struct fw_device *device = fw_device(unit->device.parent);
763 struct sbp2_device *sd = unit->device.driver_data;
764
765 sd->retries = 0;
766 fw_device_enable_phys_dma(device);
767 if (schedule_delayed_work(&sd->work, 0))
768 kref_get(&sd->kref);
769}
770
771#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
772#define SBP2_SW_VERSION_ENTRY 0x00010483
773
774static const struct fw_device_id sbp2_id_table[] = {
775 {
776 .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
777 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
778 .version = SBP2_SW_VERSION_ENTRY,
779 },
780 { }
781};
782
783static struct fw_driver sbp2_driver = {
784 .driver = {
785 .owner = THIS_MODULE,
786 .name = sbp2_driver_name,
787 .bus = &fw_bus_type,
788 .probe = sbp2_probe,
789 .remove = sbp2_remove,
790 },
791 .update = sbp2_update,
792 .id_table = sbp2_id_table,
793};
794
795static unsigned int
796sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
797{
798 int sam_status;
799
800 sense_data[0] = 0x70;
801 sense_data[1] = 0x0;
802 sense_data[2] = sbp2_status[1];
803 sense_data[3] = sbp2_status[4];
804 sense_data[4] = sbp2_status[5];
805 sense_data[5] = sbp2_status[6];
806 sense_data[6] = sbp2_status[7];
807 sense_data[7] = 10;
808 sense_data[8] = sbp2_status[8];
809 sense_data[9] = sbp2_status[9];
810 sense_data[10] = sbp2_status[10];
811 sense_data[11] = sbp2_status[11];
812 sense_data[12] = sbp2_status[2];
813 sense_data[13] = sbp2_status[3];
814 sense_data[14] = sbp2_status[12];
815 sense_data[15] = sbp2_status[13];
816
817 sam_status = sbp2_status[0] & 0x3f;
818
819 switch (sam_status) {
820 case SAM_STAT_GOOD:
821 case SAM_STAT_CHECK_CONDITION:
822 case SAM_STAT_CONDITION_MET:
823 case SAM_STAT_BUSY:
824 case SAM_STAT_RESERVATION_CONFLICT:
825 case SAM_STAT_COMMAND_TERMINATED:
826 return DID_OK << 16 | sam_status;
827
828 default:
829 return DID_ERROR << 16;
830 }
831}
832
833static void
834complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
835{
836 struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb;
837 struct fw_unit *unit = orb->unit;
838 struct fw_device *device = fw_device(unit->device.parent);
839 struct scatterlist *sg;
840 int result;
841
842 if (status != NULL) {
843 if (STATUS_GET_DEAD(*status))
844 sbp2_agent_reset(unit);
845
846 switch (STATUS_GET_RESPONSE(*status)) {
847 case SBP2_STATUS_REQUEST_COMPLETE:
848 result = DID_OK << 16;
849 break;
850 case SBP2_STATUS_TRANSPORT_FAILURE:
851 result = DID_BUS_BUSY << 16;
852 break;
853 case SBP2_STATUS_ILLEGAL_REQUEST:
854 case SBP2_STATUS_VENDOR_DEPENDENT:
855 default:
856 result = DID_ERROR << 16;
857 break;
858 }
859
860 if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
861 result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
862 orb->cmd->sense_buffer);
863 } else {
864 /*
865 * If the orb completes with status == NULL, something
866 * went wrong, typically a bus reset happened mid-orb
867 * or when sending the write (less likely).
868 */
869 result = DID_BUS_BUSY << 16;
870 }
871
872 dma_unmap_single(device->card->device, orb->base.request_bus,
873 sizeof(orb->request), DMA_TO_DEVICE);
874
875 if (orb->cmd->use_sg > 0) {
876 sg = (struct scatterlist *)orb->cmd->request_buffer;
877 dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
878 orb->cmd->sc_data_direction);
879 }
880
881 if (orb->page_table_bus != 0)
882 dma_unmap_single(device->card->device, orb->page_table_bus,
883 sizeof(orb->page_table_bus), DMA_TO_DEVICE);
884
885 if (orb->request_buffer_bus != 0)
886 dma_unmap_single(device->card->device, orb->request_buffer_bus,
887 sizeof(orb->request_buffer_bus),
888 DMA_FROM_DEVICE);
889
890 orb->cmd->result = result;
891 orb->done(orb->cmd);
892 kfree(orb);
893}
894
895static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
896{
897 struct sbp2_device *sd =
898 (struct sbp2_device *)orb->cmd->device->host->hostdata;
899 struct fw_unit *unit = sd->unit;
900 struct fw_device *device = fw_device(unit->device.parent);
901 struct scatterlist *sg;
902 int sg_len, l, i, j, count;
903 size_t size;
904 dma_addr_t sg_addr;
905
906 sg = (struct scatterlist *)orb->cmd->request_buffer;
907 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
908 orb->cmd->sc_data_direction);
909 if (count == 0)
910 goto fail;
911
912 /*
913 * Handle the special case where there is only one element in
914 * the scatter list by converting it to an immediate block
915 * request. This is also a workaround for broken devices such
916 * as the second generation iPod which doesn't support page
917 * tables.
918 */
919 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
920 orb->request.data_descriptor.high = sd->address_high;
921 orb->request.data_descriptor.low = sg_dma_address(sg);
922 orb->request.misc |=
923 COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
924 return 0;
925 }
926
927 /*
928 * Convert the scatterlist to an sbp2 page table. If any
929 * scatterlist entries are too big for sbp2, we split them as we
930 * go. Even if we ask the block I/O layer to not give us sg
931 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
932 * during DMA mapping, and Linux currently doesn't prevent this.
933 */
934 for (i = 0, j = 0; i < count; i++) {
935 sg_len = sg_dma_len(sg + i);
936 sg_addr = sg_dma_address(sg + i);
937 while (sg_len) {
938 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
939 orb->page_table[j].low = sg_addr;
940 orb->page_table[j].high = (l << 16);
941 sg_addr += l;
942 sg_len -= l;
943 j++;
944 }
945 }
946
947 size = sizeof(orb->page_table[0]) * j;
948
949 /*
950 * The data_descriptor pointer is the one case where we need
951 * to fill in the node ID part of the address. All other
952 * pointers assume that the data referenced reside on the
953 * initiator (i.e. us), but data_descriptor can refer to data
954 * on other nodes so we need to put our ID in descriptor.high.
955 */
956
957 orb->page_table_bus =
958 dma_map_single(device->card->device, orb->page_table,
959 size, DMA_TO_DEVICE);
960 if (dma_mapping_error(orb->page_table_bus))
961 goto fail_page_table;
962 orb->request.data_descriptor.high = sd->address_high;
963 orb->request.data_descriptor.low = orb->page_table_bus;
964 orb->request.misc |=
965 COMMAND_ORB_PAGE_TABLE_PRESENT |
966 COMMAND_ORB_DATA_SIZE(j);
967
968 fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
969
970 return 0;
971
972 fail_page_table:
973 dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
974 orb->cmd->sc_data_direction);
975 fail:
976 return -ENOMEM;
977}
978
979/* SCSI stack integration */
980
981static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
982{
983 struct sbp2_device *sd =
984 (struct sbp2_device *)cmd->device->host->hostdata;
985 struct fw_unit *unit = sd->unit;
986 struct fw_device *device = fw_device(unit->device.parent);
987 struct sbp2_command_orb *orb;
988
989 /*
990 * Bidirectional commands are not yet implemented, and unknown
991 * transfer direction not handled.
992 */
993 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
994 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
995 cmd->result = DID_ERROR << 16;
996 done(cmd);
997 return 0;
998 }
999
1000 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1001 if (orb == NULL) {
1002 fw_notify("failed to alloc orb\n");
1003 goto fail_alloc;
1004 }
1005
1006 /* Initialize rcode to something not RCODE_COMPLETE. */
1007 orb->base.rcode = -1;
1008 orb->base.request_bus =
1009 dma_map_single(device->card->device, &orb->request,
1010 sizeof(orb->request), DMA_TO_DEVICE);
1011 if (dma_mapping_error(orb->base.request_bus))
1012 goto fail_mapping;
1013
1014 orb->unit = unit;
1015 orb->done = done;
1016 orb->cmd = cmd;
1017
1018 orb->request.next.high = SBP2_ORB_NULL;
1019 orb->request.next.low = 0x0;
1020 /*
1021 * At speed 100 we can do 512 bytes per packet, at speed 200,
1022 * 1024 bytes per packet etc. The SBP-2 max_payload field
1023 * specifies the max payload size as 2 ^ (max_payload + 2), so
1024 * if we set this to max_speed + 7, we get the right value.
1025 */
1026 orb->request.misc =
1027 COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
1028 COMMAND_ORB_SPEED(device->node->max_speed) |
1029 COMMAND_ORB_NOTIFY;
1030
1031 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1032 orb->request.misc |=
1033 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
1034 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
1035 orb->request.misc |=
1036 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1037
1038 if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
1039 goto fail_map_payload;
1040
1041 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
1042
1043 memset(orb->request.command_block,
1044 0, sizeof(orb->request.command_block));
1045 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
1046
1047 orb->base.callback = complete_command_orb;
1048
1049 sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
1050 sd->command_block_agent_address + SBP2_ORB_POINTER);
1051
1052 return 0;
1053
1054 fail_map_payload:
1055 dma_unmap_single(device->card->device, orb->base.request_bus,
1056 sizeof(orb->request), DMA_TO_DEVICE);
1057 fail_mapping:
1058 kfree(orb);
1059 fail_alloc:
1060 return SCSI_MLQUEUE_HOST_BUSY;
1061}
1062
1063static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1064{
1065 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
1066
1067 sdev->allow_restart = 1;
1068
1069 if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1070 sdev->inquiry_len = 36;
1071 return 0;
1072}
1073
1074static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1075{
1076 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
1077 struct fw_unit *unit = sd->unit;
1078
1079 sdev->use_10_for_rw = 1;
1080
1081 if (sdev->type == TYPE_ROM)
1082 sdev->use_10_for_ms = 1;
1083 if (sdev->type == TYPE_DISK &&
1084 sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1085 sdev->skip_ms_page_8 = 1;
1086 if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
1087 fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
1088 sdev->fix_capacity = 1;
1089 }
1090
1091 return 0;
1092}
1093
1094/*
1095 * Called by scsi stack when something has really gone wrong. Usually
1096 * called when a command has timed-out for some reason.
1097 */
1098static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1099{
1100 struct sbp2_device *sd =
1101 (struct sbp2_device *)cmd->device->host->hostdata;
1102 struct fw_unit *unit = sd->unit;
1103
1104 fw_notify("sbp2_scsi_abort\n");
1105 sbp2_agent_reset(unit);
1106 sbp2_cancel_orbs(unit);
1107
1108 return SUCCESS;
1109}
1110
1111static struct scsi_host_template scsi_driver_template = {
1112 .module = THIS_MODULE,
1113 .name = "SBP-2 IEEE-1394",
1114 .proc_name = (char *)sbp2_driver_name,
1115 .queuecommand = sbp2_scsi_queuecommand,
1116 .slave_alloc = sbp2_scsi_slave_alloc,
1117 .slave_configure = sbp2_scsi_slave_configure,
1118 .eh_abort_handler = sbp2_scsi_abort,
1119 .this_id = -1,
1120 .sg_tablesize = SG_ALL,
1121 .use_clustering = ENABLE_CLUSTERING,
1122 .cmd_per_lun = 1,
1123 .can_queue = 1,
1124};
1125
1126MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1127MODULE_DESCRIPTION("SCSI over IEEE1394");
1128MODULE_LICENSE("GPL");
1129MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
1130
1131/* Provide a module alias so root-on-sbp2 initrds don't break. */
1132#ifndef CONFIG_IEEE1394_SBP2_MODULE
1133MODULE_ALIAS("sbp2");
1134#endif
1135
1136static int __init sbp2_init(void)
1137{
1138 return driver_register(&sbp2_driver.driver);
1139}
1140
1141static void __exit sbp2_cleanup(void)
1142{
1143 driver_unregister(&sbp2_driver.driver);
1144}
1145
1146module_init(sbp2_init);
1147module_exit(sbp2_cleanup);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
new file mode 100644
index 000000000000..7aebb8ae0efa
--- /dev/null
+++ b/drivers/firewire/fw-topology.c
@@ -0,0 +1,537 @@
1/*
2 * Incremental bus scan, based on bus topology
3 *
4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include "fw-transaction.h"
25#include "fw-topology.h"
26
27#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
28#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
29#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
30#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
31#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
32#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
33#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
34#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
35
36#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
37
38static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
39{
40 u32 q;
41 int port_type, shift, seq;
42
43 *total_port_count = 0;
44 *child_port_count = 0;
45
46 shift = 6;
47 q = *sid;
48 seq = 0;
49
50 while (1) {
51 port_type = (q >> shift) & 0x03;
52 switch (port_type) {
53 case SELFID_PORT_CHILD:
54 (*child_port_count)++;
55 case SELFID_PORT_PARENT:
56 case SELFID_PORT_NCONN:
57 (*total_port_count)++;
58 case SELFID_PORT_NONE:
59 break;
60 }
61
62 shift -= 2;
63 if (shift == 0) {
64 if (!SELF_ID_MORE_PACKETS(q))
65 return sid + 1;
66
67 shift = 16;
68 sid++;
69 q = *sid;
70
71 /*
72 * Check that the extra packets actually are
73 * extended self ID packets and that the
74 * sequence numbers in the extended self ID
75 * packets increase as expected.
76 */
77
78 if (!SELF_ID_EXTENDED(q) ||
79 seq != SELF_ID_EXT_SEQUENCE(q))
80 return NULL;
81
82 seq++;
83 }
84 }
85}
86
87static int get_port_type(u32 *sid, int port_index)
88{
89 int index, shift;
90
91 index = (port_index + 5) / 8;
92 shift = 16 - ((port_index + 5) & 7) * 2;
93 return (sid[index] >> shift) & 0x03;
94}
95
96static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
97{
98 struct fw_node *node;
99
100 node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
101 GFP_ATOMIC);
102 if (node == NULL)
103 return NULL;
104
105 node->color = color;
106 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
107 node->link_on = SELF_ID_LINK_ON(sid);
108 node->phy_speed = SELF_ID_PHY_SPEED(sid);
109 node->port_count = port_count;
110
111 atomic_set(&node->ref_count, 1);
112 INIT_LIST_HEAD(&node->link);
113
114 return node;
115}
116
117/*
118 * Compute the maximum hop count for this node and it's children. The
119 * maximum hop count is the maximum number of connections between any
120 * two nodes in the subtree rooted at this node. We need this for
121 * setting the gap count. As we build the tree bottom up in
122 * build_tree() below, this is fairly easy to do: for each node we
123 * maintain the max hop count and the max depth, ie the number of hops
124 * to the furthest leaf. Computing the max hop count breaks down into
125 * two cases: either the path goes through this node, in which case
126 * the hop count is the sum of the two biggest child depths plus 2.
127 * Or it could be the case that the max hop path is entirely
128 * containted in a child tree, in which case the max hop count is just
129 * the max hop count of this child.
130 */
131static void update_hop_count(struct fw_node *node)
132{
133 int depths[2] = { -1, -1 };
134 int max_child_hops = 0;
135 int i;
136
137 for (i = 0; i < node->port_count; i++) {
138 if (node->ports[i].node == NULL)
139 continue;
140
141 if (node->ports[i].node->max_hops > max_child_hops)
142 max_child_hops = node->ports[i].node->max_hops;
143
144 if (node->ports[i].node->max_depth > depths[0]) {
145 depths[1] = depths[0];
146 depths[0] = node->ports[i].node->max_depth;
147 } else if (node->ports[i].node->max_depth > depths[1])
148 depths[1] = node->ports[i].node->max_depth;
149 }
150
151 node->max_depth = depths[0] + 1;
152 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
153}
154
155
156/**
157 * build_tree - Build the tree representation of the topology
158 * @self_ids: array of self IDs to create the tree from
159 * @self_id_count: the length of the self_ids array
160 * @local_id: the node ID of the local node
161 *
162 * This function builds the tree representation of the topology given
163 * by the self IDs from the latest bus reset. During the construction
164 * of the tree, the function checks that the self IDs are valid and
165 * internally consistent. On succcess this funtions returns the
166 * fw_node corresponding to the local card otherwise NULL.
167 */
168static struct fw_node *build_tree(struct fw_card *card,
169 u32 *sid, int self_id_count)
170{
171 struct fw_node *node, *child, *local_node, *irm_node;
172 struct list_head stack, *h;
173 u32 *next_sid, *end, q;
174 int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
175 int gap_count, topology_type;
176
177 local_node = NULL;
178 node = NULL;
179 INIT_LIST_HEAD(&stack);
180 stack_depth = 0;
181 end = sid + self_id_count;
182 phy_id = 0;
183 irm_node = NULL;
184 gap_count = SELF_ID_GAP_COUNT(*sid);
185 topology_type = 0;
186
187 while (sid < end) {
188 next_sid = count_ports(sid, &port_count, &child_port_count);
189
190 if (next_sid == NULL) {
191 fw_error("Inconsistent extended self IDs.\n");
192 return NULL;
193 }
194
195 q = *sid;
196 if (phy_id != SELF_ID_PHY_ID(q)) {
197 fw_error("PHY ID mismatch in self ID: %d != %d.\n",
198 phy_id, SELF_ID_PHY_ID(q));
199 return NULL;
200 }
201
202 if (child_port_count > stack_depth) {
203 fw_error("Topology stack underflow\n");
204 return NULL;
205 }
206
207 /*
208 * Seek back from the top of our stack to find the
209 * start of the child nodes for this node.
210 */
211 for (i = 0, h = &stack; i < child_port_count; i++)
212 h = h->prev;
213 child = fw_node(h);
214
215 node = fw_node_create(q, port_count, card->color);
216 if (node == NULL) {
217 fw_error("Out of memory while building topology.");
218 return NULL;
219 }
220
221 if (phy_id == (card->node_id & 0x3f))
222 local_node = node;
223
224 if (SELF_ID_CONTENDER(q))
225 irm_node = node;
226
227 if (node->phy_speed == SCODE_BETA)
228 topology_type |= FW_TOPOLOGY_B;
229 else
230 topology_type |= FW_TOPOLOGY_A;
231
232 parent_count = 0;
233
234 for (i = 0; i < port_count; i++) {
235 switch (get_port_type(sid, i)) {
236 case SELFID_PORT_PARENT:
237 /*
238 * Who's your daddy? We dont know the
239 * parent node at this time, so we
240 * temporarily abuse node->color for
241 * remembering the entry in the
242 * node->ports array where the parent
243 * node should be. Later, when we
244 * handle the parent node, we fix up
245 * the reference.
246 */
247 parent_count++;
248 node->color = i;
249 break;
250
251 case SELFID_PORT_CHILD:
252 node->ports[i].node = child;
253 /*
254 * Fix up parent reference for this
255 * child node.
256 */
257 child->ports[child->color].node = node;
258 child->color = card->color;
259 child = fw_node(child->link.next);
260 break;
261 }
262 }
263
264 /*
265 * Check that the node reports exactly one parent
266 * port, except for the root, which of course should
267 * have no parents.
268 */
269 if ((next_sid == end && parent_count != 0) ||
270 (next_sid < end && parent_count != 1)) {
271 fw_error("Parent port inconsistency for node %d: "
272 "parent_count=%d\n", phy_id, parent_count);
273 return NULL;
274 }
275
276 /* Pop the child nodes off the stack and push the new node. */
277 __list_del(h->prev, &stack);
278 list_add_tail(&node->link, &stack);
279 stack_depth += 1 - child_port_count;
280
281 /*
282 * If all PHYs does not report the same gap count
283 * setting, we fall back to 63 which will force a gap
284 * count reconfiguration and a reset.
285 */
286 if (SELF_ID_GAP_COUNT(q) != gap_count)
287 gap_count = 63;
288
289 update_hop_count(node);
290
291 sid = next_sid;
292 phy_id++;
293 }
294
295 card->root_node = node;
296 card->irm_node = irm_node;
297 card->gap_count = gap_count;
298 card->topology_type = topology_type;
299
300 return local_node;
301}
302
303typedef void (*fw_node_callback_t)(struct fw_card * card,
304 struct fw_node * node,
305 struct fw_node * parent);
306
307static void
308for_each_fw_node(struct fw_card *card, struct fw_node *root,
309 fw_node_callback_t callback)
310{
311 struct list_head list;
312 struct fw_node *node, *next, *child, *parent;
313 int i;
314
315 INIT_LIST_HEAD(&list);
316
317 fw_node_get(root);
318 list_add_tail(&root->link, &list);
319 parent = NULL;
320 list_for_each_entry(node, &list, link) {
321 node->color = card->color;
322
323 for (i = 0; i < node->port_count; i++) {
324 child = node->ports[i].node;
325 if (!child)
326 continue;
327 if (child->color == card->color)
328 parent = child;
329 else {
330 fw_node_get(child);
331 list_add_tail(&child->link, &list);
332 }
333 }
334
335 callback(card, node, parent);
336 }
337
338 list_for_each_entry_safe(node, next, &list, link)
339 fw_node_put(node);
340}
341
342static void
343report_lost_node(struct fw_card *card,
344 struct fw_node *node, struct fw_node *parent)
345{
346 fw_node_event(card, node, FW_NODE_DESTROYED);
347 fw_node_put(node);
348}
349
350static void
351report_found_node(struct fw_card *card,
352 struct fw_node *node, struct fw_node *parent)
353{
354 int b_path = (node->phy_speed == SCODE_BETA);
355
356 if (parent != NULL) {
357 /* min() macro doesn't work here with gcc 3.4 */
358 node->max_speed = parent->max_speed < node->phy_speed ?
359 parent->max_speed : node->phy_speed;
360 node->b_path = parent->b_path && b_path;
361 } else {
362 node->max_speed = node->phy_speed;
363 node->b_path = b_path;
364 }
365
366 fw_node_event(card, node, FW_NODE_CREATED);
367}
368
369void fw_destroy_nodes(struct fw_card *card)
370{
371 unsigned long flags;
372
373 spin_lock_irqsave(&card->lock, flags);
374 card->color++;
375 if (card->local_node != NULL)
376 for_each_fw_node(card, card->local_node, report_lost_node);
377 spin_unlock_irqrestore(&card->lock, flags);
378}
379
380static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
381{
382 struct fw_node *tree;
383 int i;
384
385 tree = node1->ports[port].node;
386 node0->ports[port].node = tree;
387 for (i = 0; i < tree->port_count; i++) {
388 if (tree->ports[i].node == node1) {
389 tree->ports[i].node = node0;
390 break;
391 }
392 }
393}
394
395/**
396 * update_tree - compare the old topology tree for card with the new
397 * one specified by root. Queue the nodes and mark them as either
398 * found, lost or updated. Update the nodes in the card topology tree
399 * as we go.
400 */
401static void
402update_tree(struct fw_card *card, struct fw_node *root)
403{
404 struct list_head list0, list1;
405 struct fw_node *node0, *node1;
406 int i, event;
407
408 INIT_LIST_HEAD(&list0);
409 list_add_tail(&card->local_node->link, &list0);
410 INIT_LIST_HEAD(&list1);
411 list_add_tail(&root->link, &list1);
412
413 node0 = fw_node(list0.next);
414 node1 = fw_node(list1.next);
415
416 while (&node0->link != &list0) {
417
418 /* assert(node0->port_count == node1->port_count); */
419 if (node0->link_on && !node1->link_on)
420 event = FW_NODE_LINK_OFF;
421 else if (!node0->link_on && node1->link_on)
422 event = FW_NODE_LINK_ON;
423 else
424 event = FW_NODE_UPDATED;
425
426 node0->node_id = node1->node_id;
427 node0->color = card->color;
428 node0->link_on = node1->link_on;
429 node0->initiated_reset = node1->initiated_reset;
430 node0->max_hops = node1->max_hops;
431 node1->color = card->color;
432 fw_node_event(card, node0, event);
433
434 if (card->root_node == node1)
435 card->root_node = node0;
436 if (card->irm_node == node1)
437 card->irm_node = node0;
438
439 for (i = 0; i < node0->port_count; i++) {
440 if (node0->ports[i].node && node1->ports[i].node) {
441 /*
442 * This port didn't change, queue the
443 * connected node for further
444 * investigation.
445 */
446 if (node0->ports[i].node->color == card->color)
447 continue;
448 list_add_tail(&node0->ports[i].node->link,
449 &list0);
450 list_add_tail(&node1->ports[i].node->link,
451 &list1);
452 } else if (node0->ports[i].node) {
453 /*
454 * The nodes connected here were
455 * unplugged; unref the lost nodes and
456 * queue FW_NODE_LOST callbacks for
457 * them.
458 */
459
460 for_each_fw_node(card, node0->ports[i].node,
461 report_lost_node);
462 node0->ports[i].node = NULL;
463 } else if (node1->ports[i].node) {
464 /*
465 * One or more node were connected to
466 * this port. Move the new nodes into
467 * the tree and queue FW_NODE_CREATED
468 * callbacks for them.
469 */
470 move_tree(node0, node1, i);
471 for_each_fw_node(card, node0->ports[i].node,
472 report_found_node);
473 }
474 }
475
476 node0 = fw_node(node0->link.next);
477 node1 = fw_node(node1->link.next);
478 }
479}
480
481static void
482update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
483{
484 int node_count;
485
486 card->topology_map[1]++;
487 node_count = (card->root_node->node_id & 0x3f) + 1;
488 card->topology_map[2] = (node_count << 16) | self_id_count;
489 card->topology_map[0] = (self_id_count + 2) << 16;
490 memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
491 fw_compute_block_crc(card->topology_map);
492}
493
494void
495fw_core_handle_bus_reset(struct fw_card *card,
496 int node_id, int generation,
497 int self_id_count, u32 * self_ids)
498{
499 struct fw_node *local_node;
500 unsigned long flags;
501
502 fw_flush_transactions(card);
503
504 spin_lock_irqsave(&card->lock, flags);
505
506 /*
507 * If the new topology has a different self_id_count the topology
508 * changed, either nodes were added or removed. In that case we
509 * reset the IRM reset counter.
510 */
511 if (card->self_id_count != self_id_count)
512 card->bm_retries = 0;
513
514 card->node_id = node_id;
515 card->generation = generation;
516 card->reset_jiffies = jiffies;
517 schedule_delayed_work(&card->work, 0);
518
519 local_node = build_tree(card, self_ids, self_id_count);
520
521 update_topology_map(card, self_ids, self_id_count);
522
523 card->color++;
524
525 if (local_node == NULL) {
526 fw_error("topology build failed\n");
527 /* FIXME: We need to issue a bus reset in this case. */
528 } else if (card->local_node == NULL) {
529 card->local_node = local_node;
530 for_each_fw_node(card, local_node, report_found_node);
531 } else {
532 update_tree(card, local_node);
533 }
534
535 spin_unlock_irqrestore(&card->lock, flags);
536}
537EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
new file mode 100644
index 000000000000..363b6cbcd0b3
--- /dev/null
+++ b/drivers/firewire/fw-topology.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_topology_h
20#define __fw_topology_h
21
22enum {
23 FW_TOPOLOGY_A = 0x01,
24 FW_TOPOLOGY_B = 0x02,
25 FW_TOPOLOGY_MIXED = 0x03,
26};
27
28enum {
29 FW_NODE_CREATED = 0x00,
30 FW_NODE_UPDATED = 0x01,
31 FW_NODE_DESTROYED = 0x02,
32 FW_NODE_LINK_ON = 0x03,
33 FW_NODE_LINK_OFF = 0x04,
34};
35
36struct fw_port {
37 struct fw_node *node;
38 unsigned speed : 3; /* S100, S200, ... S3200 */
39};
40
41struct fw_node {
42 u16 node_id;
43 u8 color;
44 u8 port_count;
45 unsigned link_on : 1;
46 unsigned initiated_reset : 1;
47 unsigned b_path : 1;
48 u8 phy_speed : 3; /* As in the self ID packet. */
49 u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on
50 * the path from the local node to this node. */
51 u8 max_depth : 4; /* Maximum depth to any leaf node */
52 u8 max_hops : 4; /* Max hops in this sub tree */
53 atomic_t ref_count;
54
55 /* For serializing node topology into a list. */
56 struct list_head link;
57
58 /* Upper layer specific data. */
59 void *data;
60
61 struct fw_port ports[0];
62};
63
64static inline struct fw_node *
65fw_node(struct list_head *l)
66{
67 return list_entry(l, struct fw_node, link);
68}
69
70static inline struct fw_node *
71fw_node_get(struct fw_node *node)
72{
73 atomic_inc(&node->ref_count);
74
75 return node;
76}
77
78static inline void
79fw_node_put(struct fw_node *node)
80{
81 if (atomic_dec_and_test(&node->ref_count))
82 kfree(node);
83}
84
85void
86fw_destroy_nodes(struct fw_card *card);
87
88int
89fw_compute_block_crc(u32 *block);
90
91
92#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
new file mode 100644
index 000000000000..80d0121463d0
--- /dev/null
+++ b/drivers/firewire/fw-transaction.c
@@ -0,0 +1,910 @@
1/*
2 * Core IEEE1394 transaction logic
3 *
4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
28#include <linux/list.h>
29#include <linux/kthread.h>
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-topology.h"
35#include "fw-device.h"
36
37#define HEADER_PRI(pri) ((pri) << 0)
38#define HEADER_TCODE(tcode) ((tcode) << 4)
39#define HEADER_RETRY(retry) ((retry) << 8)
40#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
41#define HEADER_DESTINATION(destination) ((destination) << 16)
42#define HEADER_SOURCE(source) ((source) << 16)
43#define HEADER_RCODE(rcode) ((rcode) << 12)
44#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
45#define HEADER_DATA_LENGTH(length) ((length) << 16)
46#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
47
48#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
49#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
50#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
51#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
52#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
53#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
54#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
55#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
56
57#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
58#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
59#define PHY_IDENTIFIER(id) ((id) << 30)
60
61static int
62close_transaction(struct fw_transaction *transaction,
63 struct fw_card *card, int rcode,
64 u32 *payload, size_t length)
65{
66 struct fw_transaction *t;
67 unsigned long flags;
68
69 spin_lock_irqsave(&card->lock, flags);
70 list_for_each_entry(t, &card->transaction_list, link) {
71 if (t == transaction) {
72 list_del(&t->link);
73 card->tlabel_mask &= ~(1 << t->tlabel);
74 break;
75 }
76 }
77 spin_unlock_irqrestore(&card->lock, flags);
78
79 if (&t->link != &card->transaction_list) {
80 t->callback(card, rcode, payload, length, t->callback_data);
81 return 0;
82 }
83
84 return -ENOENT;
85}
86
87/*
88 * Only valid for transactions that are potentially pending (ie have
89 * been sent).
90 */
91int
92fw_cancel_transaction(struct fw_card *card,
93 struct fw_transaction *transaction)
94{
95 /*
96 * Cancel the packet transmission if it's still queued. That
97 * will call the packet transmission callback which cancels
98 * the transaction.
99 */
100
101 if (card->driver->cancel_packet(card, &transaction->packet) == 0)
102 return 0;
103
104 /*
105 * If the request packet has already been sent, we need to see
106 * if the transaction is still pending and remove it in that case.
107 */
108
109 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
110}
111EXPORT_SYMBOL(fw_cancel_transaction);
112
113static void
114transmit_complete_callback(struct fw_packet *packet,
115 struct fw_card *card, int status)
116{
117 struct fw_transaction *t =
118 container_of(packet, struct fw_transaction, packet);
119
120 switch (status) {
121 case ACK_COMPLETE:
122 close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
123 break;
124 case ACK_PENDING:
125 t->timestamp = packet->timestamp;
126 break;
127 case ACK_BUSY_X:
128 case ACK_BUSY_A:
129 case ACK_BUSY_B:
130 close_transaction(t, card, RCODE_BUSY, NULL, 0);
131 break;
132 case ACK_DATA_ERROR:
133 close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
134 break;
135 case ACK_TYPE_ERROR:
136 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
137 break;
138 default:
139 /*
140 * In this case the ack is really a juju specific
141 * rcode, so just forward that to the callback.
142 */
143 close_transaction(t, card, status, NULL, 0);
144 break;
145 }
146}
147
148static void
149fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
150 int node_id, int source_id, int generation, int speed,
151 unsigned long long offset, void *payload, size_t length)
152{
153 int ext_tcode;
154
155 if (tcode > 0x10) {
156 ext_tcode = tcode - 0x10;
157 tcode = TCODE_LOCK_REQUEST;
158 } else
159 ext_tcode = 0;
160
161 packet->header[0] =
162 HEADER_RETRY(RETRY_X) |
163 HEADER_TLABEL(tlabel) |
164 HEADER_TCODE(tcode) |
165 HEADER_DESTINATION(node_id);
166 packet->header[1] =
167 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
168 packet->header[2] =
169 offset;
170
171 switch (tcode) {
172 case TCODE_WRITE_QUADLET_REQUEST:
173 packet->header[3] = *(u32 *)payload;
174 packet->header_length = 16;
175 packet->payload_length = 0;
176 break;
177
178 case TCODE_LOCK_REQUEST:
179 case TCODE_WRITE_BLOCK_REQUEST:
180 packet->header[3] =
181 HEADER_DATA_LENGTH(length) |
182 HEADER_EXTENDED_TCODE(ext_tcode);
183 packet->header_length = 16;
184 packet->payload = payload;
185 packet->payload_length = length;
186 break;
187
188 case TCODE_READ_QUADLET_REQUEST:
189 packet->header_length = 12;
190 packet->payload_length = 0;
191 break;
192
193 case TCODE_READ_BLOCK_REQUEST:
194 packet->header[3] =
195 HEADER_DATA_LENGTH(length) |
196 HEADER_EXTENDED_TCODE(ext_tcode);
197 packet->header_length = 16;
198 packet->payload_length = 0;
199 break;
200 }
201
202 packet->speed = speed;
203 packet->generation = generation;
204 packet->ack = 0;
205}
206
207/**
208 * This function provides low-level access to the IEEE1394 transaction
209 * logic. Most C programs would use either fw_read(), fw_write() or
210 * fw_lock() instead - those function are convenience wrappers for
211 * this function. The fw_send_request() function is primarily
212 * provided as a flexible, one-stop entry point for languages bindings
213 * and protocol bindings.
214 *
215 * FIXME: Document this function further, in particular the possible
216 * values for rcode in the callback. In short, we map ACK_COMPLETE to
217 * RCODE_COMPLETE, internal errors set errno and set rcode to
218 * RCODE_SEND_ERROR (which is out of range for standard ieee1394
219 * rcodes). All other rcodes are forwarded unchanged. For all
220 * errors, payload is NULL, length is 0.
221 *
222 * Can not expect the callback to be called before the function
223 * returns, though this does happen in some cases (ACK_COMPLETE and
224 * errors).
225 *
226 * The payload is only used for write requests and must not be freed
227 * until the callback has been called.
228 *
229 * @param card the card from which to send the request
230 * @param tcode the tcode for this transaction. Do not use
231 * TCODE_LOCK_REQUEST directly, insted use TCODE_LOCK_MASK_SWAP
232 * etc. to specify tcode and ext_tcode.
233 * @param node_id the destination node ID (bus ID and PHY ID concatenated)
234 * @param generation the generation for which node_id is valid
235 * @param speed the speed to use for sending the request
236 * @param offset the 48 bit offset on the destination node
237 * @param payload the data payload for the request subaction
238 * @param length the length in bytes of the data to read
239 * @param callback function to be called when the transaction is completed
240 * @param callback_data pointer to arbitrary data, which will be
241 * passed to the callback
242 */
243void
244fw_send_request(struct fw_card *card, struct fw_transaction *t,
245 int tcode, int node_id, int generation, int speed,
246 unsigned long long offset,
247 void *payload, size_t length,
248 fw_transaction_callback_t callback, void *callback_data)
249{
250 unsigned long flags;
251 int tlabel, source;
252
253 /*
254 * Bump the flush timer up 100ms first of all so we
255 * don't race with a flush timer callback.
256 */
257
258 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
259
260 /*
261 * Allocate tlabel from the bitmap and put the transaction on
262 * the list while holding the card spinlock.
263 */
264
265 spin_lock_irqsave(&card->lock, flags);
266
267 source = card->node_id;
268 tlabel = card->current_tlabel;
269 if (card->tlabel_mask & (1 << tlabel)) {
270 spin_unlock_irqrestore(&card->lock, flags);
271 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
272 return;
273 }
274
275 card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
276 card->tlabel_mask |= (1 << tlabel);
277
278 list_add_tail(&t->link, &card->transaction_list);
279
280 spin_unlock_irqrestore(&card->lock, flags);
281
282 /* Initialize rest of transaction, fill out packet and send it. */
283 t->node_id = node_id;
284 t->tlabel = tlabel;
285 t->callback = callback;
286 t->callback_data = callback_data;
287
288 fw_fill_request(&t->packet, tcode, t->tlabel,
289 node_id, source, generation,
290 speed, offset, payload, length);
291 t->packet.callback = transmit_complete_callback;
292
293 card->driver->send_request(card, &t->packet);
294}
295EXPORT_SYMBOL(fw_send_request);
296
297static void
298transmit_phy_packet_callback(struct fw_packet *packet,
299 struct fw_card *card, int status)
300{
301 kfree(packet);
302}
303
304static void send_phy_packet(struct fw_card *card, u32 data, int generation)
305{
306 struct fw_packet *packet;
307
308 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
309 if (packet == NULL)
310 return;
311
312 packet->header[0] = data;
313 packet->header[1] = ~data;
314 packet->header_length = 8;
315 packet->payload_length = 0;
316 packet->speed = SCODE_100;
317 packet->generation = generation;
318 packet->callback = transmit_phy_packet_callback;
319
320 card->driver->send_request(card, packet);
321}
322
323void fw_send_phy_config(struct fw_card *card,
324 int node_id, int generation, int gap_count)
325{
326 u32 q;
327
328 q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
329 PHY_CONFIG_ROOT_ID(node_id) |
330 PHY_CONFIG_GAP_COUNT(gap_count);
331
332 send_phy_packet(card, q, generation);
333}
334
335void fw_flush_transactions(struct fw_card *card)
336{
337 struct fw_transaction *t, *next;
338 struct list_head list;
339 unsigned long flags;
340
341 INIT_LIST_HEAD(&list);
342 spin_lock_irqsave(&card->lock, flags);
343 list_splice_init(&card->transaction_list, &list);
344 card->tlabel_mask = 0;
345 spin_unlock_irqrestore(&card->lock, flags);
346
347 list_for_each_entry_safe(t, next, &list, link) {
348 card->driver->cancel_packet(card, &t->packet);
349
350 /*
351 * At this point cancel_packet will never call the
352 * transaction callback, since we just took all the
353 * transactions out of the list. So do it here.
354 */
355 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
356 }
357}
358
359static struct fw_address_handler *
360lookup_overlapping_address_handler(struct list_head *list,
361 unsigned long long offset, size_t length)
362{
363 struct fw_address_handler *handler;
364
365 list_for_each_entry(handler, list, link) {
366 if (handler->offset < offset + length &&
367 offset < handler->offset + handler->length)
368 return handler;
369 }
370
371 return NULL;
372}
373
374static struct fw_address_handler *
375lookup_enclosing_address_handler(struct list_head *list,
376 unsigned long long offset, size_t length)
377{
378 struct fw_address_handler *handler;
379
380 list_for_each_entry(handler, list, link) {
381 if (handler->offset <= offset &&
382 offset + length <= handler->offset + handler->length)
383 return handler;
384 }
385
386 return NULL;
387}
388
389static DEFINE_SPINLOCK(address_handler_lock);
390static LIST_HEAD(address_handler_list);
391
392const struct fw_address_region fw_low_memory_region =
393 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
394const struct fw_address_region fw_high_memory_region =
395 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
396const struct fw_address_region fw_private_region =
397 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
398const struct fw_address_region fw_csr_region =
399 { .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL, };
400const struct fw_address_region fw_unit_space_region =
401 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
402EXPORT_SYMBOL(fw_low_memory_region);
403EXPORT_SYMBOL(fw_high_memory_region);
404EXPORT_SYMBOL(fw_private_region);
405EXPORT_SYMBOL(fw_csr_region);
406EXPORT_SYMBOL(fw_unit_space_region);
407
408/**
409 * Allocate a range of addresses in the node space of the OHCI
410 * controller. When a request is received that falls within the
411 * specified address range, the specified callback is invoked. The
412 * parameters passed to the callback give the details of the
413 * particular request
414 */
415int
416fw_core_add_address_handler(struct fw_address_handler *handler,
417 const struct fw_address_region *region)
418{
419 struct fw_address_handler *other;
420 unsigned long flags;
421 int ret = -EBUSY;
422
423 spin_lock_irqsave(&address_handler_lock, flags);
424
425 handler->offset = region->start;
426 while (handler->offset + handler->length <= region->end) {
427 other =
428 lookup_overlapping_address_handler(&address_handler_list,
429 handler->offset,
430 handler->length);
431 if (other != NULL) {
432 handler->offset += other->length;
433 } else {
434 list_add_tail(&handler->link, &address_handler_list);
435 ret = 0;
436 break;
437 }
438 }
439
440 spin_unlock_irqrestore(&address_handler_lock, flags);
441
442 return ret;
443}
444EXPORT_SYMBOL(fw_core_add_address_handler);
445
446/**
447 * Deallocate a range of addresses allocated with fw_allocate. This
448 * will call the associated callback one last time with a the special
449 * tcode TCODE_DEALLOCATE, to let the client destroy the registered
450 * callback data. For convenience, the callback parameters offset and
451 * length are set to the start and the length respectively for the
452 * deallocated region, payload is set to NULL.
453 */
454void fw_core_remove_address_handler(struct fw_address_handler *handler)
455{
456 unsigned long flags;
457
458 spin_lock_irqsave(&address_handler_lock, flags);
459 list_del(&handler->link);
460 spin_unlock_irqrestore(&address_handler_lock, flags);
461}
462EXPORT_SYMBOL(fw_core_remove_address_handler);
463
464struct fw_request {
465 struct fw_packet response;
466 u32 request_header[4];
467 int ack;
468 u32 length;
469 u32 data[0];
470};
471
472static void
473free_response_callback(struct fw_packet *packet,
474 struct fw_card *card, int status)
475{
476 struct fw_request *request;
477
478 request = container_of(packet, struct fw_request, response);
479 kfree(request);
480}
481
482void
483fw_fill_response(struct fw_packet *response, u32 *request_header,
484 int rcode, void *payload, size_t length)
485{
486 int tcode, tlabel, extended_tcode, source, destination;
487
488 tcode = HEADER_GET_TCODE(request_header[0]);
489 tlabel = HEADER_GET_TLABEL(request_header[0]);
490 source = HEADER_GET_DESTINATION(request_header[0]);
491 destination = HEADER_GET_SOURCE(request_header[1]);
492 extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
493
494 response->header[0] =
495 HEADER_RETRY(RETRY_1) |
496 HEADER_TLABEL(tlabel) |
497 HEADER_DESTINATION(destination);
498 response->header[1] =
499 HEADER_SOURCE(source) |
500 HEADER_RCODE(rcode);
501 response->header[2] = 0;
502
503 switch (tcode) {
504 case TCODE_WRITE_QUADLET_REQUEST:
505 case TCODE_WRITE_BLOCK_REQUEST:
506 response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
507 response->header_length = 12;
508 response->payload_length = 0;
509 break;
510
511 case TCODE_READ_QUADLET_REQUEST:
512 response->header[0] |=
513 HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
514 if (payload != NULL)
515 response->header[3] = *(u32 *)payload;
516 else
517 response->header[3] = 0;
518 response->header_length = 16;
519 response->payload_length = 0;
520 break;
521
522 case TCODE_READ_BLOCK_REQUEST:
523 case TCODE_LOCK_REQUEST:
524 response->header[0] |= HEADER_TCODE(tcode + 2);
525 response->header[3] =
526 HEADER_DATA_LENGTH(length) |
527 HEADER_EXTENDED_TCODE(extended_tcode);
528 response->header_length = 16;
529 response->payload = payload;
530 response->payload_length = length;
531 break;
532
533 default:
534 BUG();
535 return;
536 }
537}
538EXPORT_SYMBOL(fw_fill_response);
539
540static struct fw_request *
541allocate_request(struct fw_packet *p)
542{
543 struct fw_request *request;
544 u32 *data, length;
545 int request_tcode, t;
546
547 request_tcode = HEADER_GET_TCODE(p->header[0]);
548 switch (request_tcode) {
549 case TCODE_WRITE_QUADLET_REQUEST:
550 data = &p->header[3];
551 length = 4;
552 break;
553
554 case TCODE_WRITE_BLOCK_REQUEST:
555 case TCODE_LOCK_REQUEST:
556 data = p->payload;
557 length = HEADER_GET_DATA_LENGTH(p->header[3]);
558 break;
559
560 case TCODE_READ_QUADLET_REQUEST:
561 data = NULL;
562 length = 4;
563 break;
564
565 case TCODE_READ_BLOCK_REQUEST:
566 data = NULL;
567 length = HEADER_GET_DATA_LENGTH(p->header[3]);
568 break;
569
570 default:
571 BUG();
572 return NULL;
573 }
574
575 request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
576 if (request == NULL)
577 return NULL;
578
579 t = (p->timestamp & 0x1fff) + 4000;
580 if (t >= 8000)
581 t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
582 else
583 t = (p->timestamp & ~0x1fff) + t;
584
585 request->response.speed = p->speed;
586 request->response.timestamp = t;
587 request->response.generation = p->generation;
588 request->response.ack = 0;
589 request->response.callback = free_response_callback;
590 request->ack = p->ack;
591 request->length = length;
592 if (data)
593 memcpy(request->data, data, length);
594
595 memcpy(request->request_header, p->header, sizeof(p->header));
596
597 return request;
598}
599
600void
601fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
602{
603 /*
604 * Broadcast packets are reported as ACK_COMPLETE, so this
605 * check is sufficient to ensure we don't send response to
606 * broadcast packets or posted writes.
607 */
608 if (request->ack != ACK_PENDING)
609 return;
610
611 if (rcode == RCODE_COMPLETE)
612 fw_fill_response(&request->response, request->request_header,
613 rcode, request->data, request->length);
614 else
615 fw_fill_response(&request->response, request->request_header,
616 rcode, NULL, 0);
617
618 card->driver->send_response(card, &request->response);
619}
620EXPORT_SYMBOL(fw_send_response);
621
622void
623fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
624{
625 struct fw_address_handler *handler;
626 struct fw_request *request;
627 unsigned long long offset;
628 unsigned long flags;
629 int tcode, destination, source;
630
631 if (p->payload_length > 2048) {
632 /* FIXME: send error response. */
633 return;
634 }
635
636 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
637 return;
638
639 request = allocate_request(p);
640 if (request == NULL) {
641 /* FIXME: send statically allocated busy packet. */
642 return;
643 }
644
645 offset =
646 ((unsigned long long)
647 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
648 tcode = HEADER_GET_TCODE(p->header[0]);
649 destination = HEADER_GET_DESTINATION(p->header[0]);
650 source = HEADER_GET_SOURCE(p->header[0]);
651
652 spin_lock_irqsave(&address_handler_lock, flags);
653 handler = lookup_enclosing_address_handler(&address_handler_list,
654 offset, request->length);
655 spin_unlock_irqrestore(&address_handler_lock, flags);
656
657 /*
658 * FIXME: lookup the fw_node corresponding to the sender of
659 * this request and pass that to the address handler instead
660 * of the node ID. We may also want to move the address
661 * allocations to fw_node so we only do this callback if the
662 * upper layers registered it for this node.
663 */
664
665 if (handler == NULL)
666 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
667 else
668 handler->address_callback(card, request,
669 tcode, destination, source,
670 p->generation, p->speed, offset,
671 request->data, request->length,
672 handler->callback_data);
673}
674EXPORT_SYMBOL(fw_core_handle_request);
675
676void
677fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
678{
679 struct fw_transaction *t;
680 unsigned long flags;
681 u32 *data;
682 size_t data_length;
683 int tcode, tlabel, destination, source, rcode;
684
685 tcode = HEADER_GET_TCODE(p->header[0]);
686 tlabel = HEADER_GET_TLABEL(p->header[0]);
687 destination = HEADER_GET_DESTINATION(p->header[0]);
688 source = HEADER_GET_SOURCE(p->header[1]);
689 rcode = HEADER_GET_RCODE(p->header[1]);
690
691 spin_lock_irqsave(&card->lock, flags);
692 list_for_each_entry(t, &card->transaction_list, link) {
693 if (t->node_id == source && t->tlabel == tlabel) {
694 list_del(&t->link);
695 card->tlabel_mask &= ~(1 << t->tlabel);
696 break;
697 }
698 }
699 spin_unlock_irqrestore(&card->lock, flags);
700
701 if (&t->link == &card->transaction_list) {
702 fw_notify("Unsolicited response (source %x, tlabel %x)\n",
703 source, tlabel);
704 return;
705 }
706
707 /*
708 * FIXME: sanity check packet, is length correct, does tcodes
709 * and addresses match.
710 */
711
712 switch (tcode) {
713 case TCODE_READ_QUADLET_RESPONSE:
714 data = (u32 *) &p->header[3];
715 data_length = 4;
716 break;
717
718 case TCODE_WRITE_RESPONSE:
719 data = NULL;
720 data_length = 0;
721 break;
722
723 case TCODE_READ_BLOCK_RESPONSE:
724 case TCODE_LOCK_RESPONSE:
725 data = p->payload;
726 data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
727 break;
728
729 default:
730 /* Should never happen, this is just to shut up gcc. */
731 data = NULL;
732 data_length = 0;
733 break;
734 }
735
736 t->callback(card, rcode, data, data_length, t->callback_data);
737}
738EXPORT_SYMBOL(fw_core_handle_response);
739
740const struct fw_address_region topology_map_region =
741 { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
742
743static void
744handle_topology_map(struct fw_card *card, struct fw_request *request,
745 int tcode, int destination, int source,
746 int generation, int speed,
747 unsigned long long offset,
748 void *payload, size_t length, void *callback_data)
749{
750 int i, start, end;
751 u32 *map;
752
753 if (!TCODE_IS_READ_REQUEST(tcode)) {
754 fw_send_response(card, request, RCODE_TYPE_ERROR);
755 return;
756 }
757
758 if ((offset & 3) > 0 || (length & 3) > 0) {
759 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
760 return;
761 }
762
763 start = (offset - topology_map_region.start) / 4;
764 end = start + length / 4;
765 map = payload;
766
767 for (i = 0; i < length / 4; i++)
768 map[i] = cpu_to_be32(card->topology_map[start + i]);
769
770 fw_send_response(card, request, RCODE_COMPLETE);
771}
772
773static struct fw_address_handler topology_map = {
774 .length = 0x200,
775 .address_callback = handle_topology_map,
776};
777
778const struct fw_address_region registers_region =
779 { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
780
781static void
782handle_registers(struct fw_card *card, struct fw_request *request,
783 int tcode, int destination, int source,
784 int generation, int speed,
785 unsigned long long offset,
786 void *payload, size_t length, void *callback_data)
787{
788 int reg = offset - CSR_REGISTER_BASE;
789 unsigned long long bus_time;
790 __be32 *data = payload;
791
792 switch (reg) {
793 case CSR_CYCLE_TIME:
794 case CSR_BUS_TIME:
795 if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
796 fw_send_response(card, request, RCODE_TYPE_ERROR);
797 break;
798 }
799
800 bus_time = card->driver->get_bus_time(card);
801 if (reg == CSR_CYCLE_TIME)
802 *data = cpu_to_be32(bus_time);
803 else
804 *data = cpu_to_be32(bus_time >> 25);
805 fw_send_response(card, request, RCODE_COMPLETE);
806 break;
807
808 case CSR_BUS_MANAGER_ID:
809 case CSR_BANDWIDTH_AVAILABLE:
810 case CSR_CHANNELS_AVAILABLE_HI:
811 case CSR_CHANNELS_AVAILABLE_LO:
812 /*
813 * FIXME: these are handled by the OHCI hardware and
814 * the stack never sees these request. If we add
815 * support for a new type of controller that doesn't
816 * handle this in hardware we need to deal with these
817 * transactions.
818 */
819 BUG();
820 break;
821
822 case CSR_BUSY_TIMEOUT:
823 /* FIXME: Implement this. */
824 default:
825 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
826 break;
827 }
828}
829
830static struct fw_address_handler registers = {
831 .length = 0x400,
832 .address_callback = handle_registers,
833};
834
835MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
836MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
837MODULE_LICENSE("GPL");
838
839static const u32 vendor_textual_descriptor[] = {
840 /* textual descriptor leaf () */
841 0x00060000,
842 0x00000000,
843 0x00000000,
844 0x4c696e75, /* L i n u */
845 0x78204669, /* x F i */
846 0x72657769, /* r e w i */
847 0x72650000, /* r e */
848};
849
850static const u32 model_textual_descriptor[] = {
851 /* model descriptor leaf () */
852 0x00030000,
853 0x00000000,
854 0x00000000,
855 0x4a756a75, /* J u j u */
856};
857
858static struct fw_descriptor vendor_id_descriptor = {
859 .length = ARRAY_SIZE(vendor_textual_descriptor),
860 .immediate = 0x03d00d1e,
861 .key = 0x81000000,
862 .data = vendor_textual_descriptor,
863};
864
865static struct fw_descriptor model_id_descriptor = {
866 .length = ARRAY_SIZE(model_textual_descriptor),
867 .immediate = 0x17000001,
868 .key = 0x81000000,
869 .data = model_textual_descriptor,
870};
871
872static int __init fw_core_init(void)
873{
874 int retval;
875
876 retval = bus_register(&fw_bus_type);
877 if (retval < 0)
878 return retval;
879
880 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
881 if (fw_cdev_major < 0) {
882 bus_unregister(&fw_bus_type);
883 return fw_cdev_major;
884 }
885
886 retval = fw_core_add_address_handler(&topology_map,
887 &topology_map_region);
888 BUG_ON(retval < 0);
889
890 retval = fw_core_add_address_handler(&registers,
891 &registers_region);
892 BUG_ON(retval < 0);
893
894 /* Add the vendor textual descriptor. */
895 retval = fw_core_add_descriptor(&vendor_id_descriptor);
896 BUG_ON(retval < 0);
897 retval = fw_core_add_descriptor(&model_id_descriptor);
898 BUG_ON(retval < 0);
899
900 return 0;
901}
902
903static void __exit fw_core_cleanup(void)
904{
905 unregister_chrdev(fw_cdev_major, "firewire");
906 bus_unregister(&fw_bus_type);
907}
908
909module_init(fw_core_init);
910module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
new file mode 100644
index 000000000000..acdc3be38c61
--- /dev/null
+++ b/drivers/firewire/fw-transaction.h
@@ -0,0 +1,458 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_transaction_h
20#define __fw_transaction_h
21
22#include <linux/device.h>
23#include <linux/timer.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/fs.h>
27#include <linux/dma-mapping.h>
28#include <linux/firewire-constants.h>
29
30#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
31#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
32#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
33#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
34#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
35#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
36
37#define LOCAL_BUS 0xffc0
38
39#define SELFID_PORT_CHILD 0x3
40#define SELFID_PORT_PARENT 0x2
41#define SELFID_PORT_NCONN 0x1
42#define SELFID_PORT_NONE 0x0
43
44#define PHY_PACKET_CONFIG 0x0
45#define PHY_PACKET_LINK_ON 0x1
46#define PHY_PACKET_SELF_ID 0x2
47
48/* Bit fields _within_ the PHY registers. */
49#define PHY_LINK_ACTIVE 0x80
50#define PHY_CONTENDER 0x40
51#define PHY_BUS_RESET 0x40
52#define PHY_BUS_SHORT_RESET 0x40
53
54#define CSR_REGISTER_BASE 0xfffff0000000ULL
55
56/* register offsets relative to CSR_REGISTER_BASE */
57#define CSR_STATE_CLEAR 0x0
58#define CSR_STATE_SET 0x4
59#define CSR_NODE_IDS 0x8
60#define CSR_RESET_START 0xc
61#define CSR_SPLIT_TIMEOUT_HI 0x18
62#define CSR_SPLIT_TIMEOUT_LO 0x1c
63#define CSR_CYCLE_TIME 0x200
64#define CSR_BUS_TIME 0x204
65#define CSR_BUSY_TIMEOUT 0x210
66#define CSR_BUS_MANAGER_ID 0x21c
67#define CSR_BANDWIDTH_AVAILABLE 0x220
68#define CSR_CHANNELS_AVAILABLE 0x224
69#define CSR_CHANNELS_AVAILABLE_HI 0x224
70#define CSR_CHANNELS_AVAILABLE_LO 0x228
71#define CSR_BROADCAST_CHANNEL 0x234
72#define CSR_CONFIG_ROM 0x400
73#define CSR_CONFIG_ROM_END 0x800
74#define CSR_FCP_COMMAND 0xB00
75#define CSR_FCP_RESPONSE 0xD00
76#define CSR_FCP_END 0xF00
77#define CSR_TOPOLOGY_MAP 0x1000
78#define CSR_TOPOLOGY_MAP_END 0x1400
79#define CSR_SPEED_MAP 0x2000
80#define CSR_SPEED_MAP_END 0x3000
81
82#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
83#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
84#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args)
85
86static inline void
87fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
88{
89 u32 *dst = _dst;
90 u32 *src = _src;
91 int i;
92
93 for (i = 0; i < size / 4; i++)
94 dst[i] = cpu_to_be32(src[i]);
95}
96
97static inline void
98fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
99{
100 fw_memcpy_from_be32(_dst, _src, size);
101}
102
103struct fw_card;
104struct fw_packet;
105struct fw_node;
106struct fw_request;
107
108struct fw_descriptor {
109 struct list_head link;
110 size_t length;
111 u32 immediate;
112 u32 key;
113 const u32 *data;
114};
115
116int fw_core_add_descriptor(struct fw_descriptor *desc);
117void fw_core_remove_descriptor(struct fw_descriptor *desc);
118
119typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
120 struct fw_card *card, int status);
121
122typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
123 void *data,
124 size_t length,
125 void *callback_data);
126
127typedef void (*fw_address_callback_t)(struct fw_card *card,
128 struct fw_request *request,
129 int tcode, int destination, int source,
130 int generation, int speed,
131 unsigned long long offset,
132 void *data, size_t length,
133 void *callback_data);
134
135typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
136 int node_id, int generation,
137 u32 *self_ids,
138 int self_id_count,
139 void *callback_data);
140
141struct fw_packet {
142 int speed;
143 int generation;
144 u32 header[4];
145 size_t header_length;
146 void *payload;
147 size_t payload_length;
148 u32 timestamp;
149
150 /*
151 * This callback is called when the packet transmission has
152 * completed; for successful transmission, the status code is
153 * the ack received from the destination, otherwise it's a
154 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
155 * The callback can be called from tasklet context and thus
156 * must never block.
157 */
158 fw_packet_callback_t callback;
159 int ack;
160 struct list_head link;
161 void *driver_data;
162};
163
164struct fw_transaction {
165 int node_id; /* The generation is implied; it is always the current. */
166 int tlabel;
167 int timestamp;
168 struct list_head link;
169
170 struct fw_packet packet;
171
172 /*
173 * The data passed to the callback is valid only during the
174 * callback.
175 */
176 fw_transaction_callback_t callback;
177 void *callback_data;
178};
179
180static inline struct fw_packet *
181fw_packet(struct list_head *l)
182{
183 return list_entry(l, struct fw_packet, link);
184}
185
186struct fw_address_handler {
187 u64 offset;
188 size_t length;
189 fw_address_callback_t address_callback;
190 void *callback_data;
191 struct list_head link;
192};
193
194
195struct fw_address_region {
196 u64 start;
197 u64 end;
198};
199
200extern const struct fw_address_region fw_low_memory_region;
201extern const struct fw_address_region fw_high_memory_region;
202extern const struct fw_address_region fw_private_region;
203extern const struct fw_address_region fw_csr_region;
204extern const struct fw_address_region fw_unit_space_region;
205
206int fw_core_add_address_handler(struct fw_address_handler *handler,
207 const struct fw_address_region *region);
208void fw_core_remove_address_handler(struct fw_address_handler *handler);
209void fw_fill_response(struct fw_packet *response, u32 *request_header,
210 int rcode, void *payload, size_t length);
211void fw_send_response(struct fw_card *card,
212 struct fw_request *request, int rcode);
213
214extern struct bus_type fw_bus_type;
215
216struct fw_card {
217 const struct fw_card_driver *driver;
218 struct device *device;
219 struct kref kref;
220
221 int node_id;
222 int generation;
223 /* This is the generation used for timestamping incoming requests. */
224 int request_generation;
225 int current_tlabel, tlabel_mask;
226 struct list_head transaction_list;
227 struct timer_list flush_timer;
228 unsigned long reset_jiffies;
229
230 unsigned long long guid;
231 int max_receive;
232 int link_speed;
233 int config_rom_generation;
234
235 /*
236 * We need to store up to 4 self ID for a maximum of 63
237 * devices plus 3 words for the topology map header.
238 */
239 int self_id_count;
240 u32 topology_map[252 + 3];
241
242 spinlock_t lock; /* Take this lock when handling the lists in
243 * this struct. */
244 struct fw_node *local_node;
245 struct fw_node *root_node;
246 struct fw_node *irm_node;
247 int color;
248 int gap_count;
249 int topology_type;
250
251 int index;
252
253 struct list_head link;
254
255 /* Work struct for BM duties. */
256 struct delayed_work work;
257 int bm_retries;
258 int bm_generation;
259};
260
261struct fw_card *fw_card_get(struct fw_card *card);
262void fw_card_put(struct fw_card *card);
263
264/*
265 * The iso packet format allows for an immediate header/payload part
266 * stored in 'header' immediately after the packet info plus an
267 * indirect payload part that is pointer to by the 'payload' field.
268 * Applications can use one or the other or both to implement simple
269 * low-bandwidth streaming (e.g. audio) or more advanced
270 * scatter-gather streaming (e.g. assembling video frame automatically).
271 */
272
273struct fw_iso_packet {
274 u16 payload_length; /* Length of indirect payload. */
275 u32 interrupt : 1; /* Generate interrupt on this packet */
276 u32 skip : 1; /* Set to not send packet at all. */
277 u32 tag : 2;
278 u32 sy : 4;
279 u32 header_length : 8; /* Length of immediate header. */
280 u32 header[0];
281};
282
283#define FW_ISO_CONTEXT_TRANSMIT 0
284#define FW_ISO_CONTEXT_RECEIVE 1
285
286#define FW_ISO_CONTEXT_MATCH_TAG0 1
287#define FW_ISO_CONTEXT_MATCH_TAG1 2
288#define FW_ISO_CONTEXT_MATCH_TAG2 4
289#define FW_ISO_CONTEXT_MATCH_TAG3 8
290#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
291
292struct fw_iso_context;
293
294typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
295 u32 cycle,
296 size_t header_length,
297 void *header,
298 void *data);
299
300/*
301 * An iso buffer is just a set of pages mapped for DMA in the
302 * specified direction. Since the pages are to be used for DMA, they
303 * are not mapped into the kernel virtual address space. We store the
304 * DMA address in the page private. The helper function
305 * fw_iso_buffer_map() will map the pages into a given vma.
306 */
307
308struct fw_iso_buffer {
309 enum dma_data_direction direction;
310 struct page **pages;
311 int page_count;
312};
313
314struct fw_iso_context {
315 struct fw_card *card;
316 int type;
317 int channel;
318 int speed;
319 size_t header_size;
320 fw_iso_callback_t callback;
321 void *callback_data;
322};
323
324int
325fw_iso_buffer_init(struct fw_iso_buffer *buffer,
326 struct fw_card *card,
327 int page_count,
328 enum dma_data_direction direction);
329int
330fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
331void
332fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
333
334struct fw_iso_context *
335fw_iso_context_create(struct fw_card *card, int type,
336 int channel, int speed, size_t header_size,
337 fw_iso_callback_t callback, void *callback_data);
338
339void
340fw_iso_context_destroy(struct fw_iso_context *ctx);
341
342int
343fw_iso_context_queue(struct fw_iso_context *ctx,
344 struct fw_iso_packet *packet,
345 struct fw_iso_buffer *buffer,
346 unsigned long payload);
347
348int
349fw_iso_context_start(struct fw_iso_context *ctx,
350 int cycle, int sync, int tags);
351
352int
353fw_iso_context_stop(struct fw_iso_context *ctx);
354
355struct fw_card_driver {
356 const char *name;
357
358 /*
359 * Enable the given card with the given initial config rom.
360 * This function is expected to activate the card, and either
361 * enable the PHY or set the link_on bit and initiate a bus
362 * reset.
363 */
364 int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
365
366 int (*update_phy_reg)(struct fw_card *card, int address,
367 int clear_bits, int set_bits);
368
369 /*
370 * Update the config rom for an enabled card. This function
371 * should change the config rom that is presented on the bus
372 * an initiate a bus reset.
373 */
374 int (*set_config_rom)(struct fw_card *card,
375 u32 *config_rom, size_t length);
376
377 void (*send_request)(struct fw_card *card, struct fw_packet *packet);
378 void (*send_response)(struct fw_card *card, struct fw_packet *packet);
379 /* Calling cancel is valid once a packet has been submitted. */
380 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
381
382 /*
383 * Allow the specified node ID to do direct DMA out and in of
384 * host memory. The card will disable this for all node when
385 * a bus reset happens, so driver need to reenable this after
386 * bus reset. Returns 0 on success, -ENODEV if the card
387 * doesn't support this, -ESTALE if the generation doesn't
388 * match.
389 */
390 int (*enable_phys_dma)(struct fw_card *card,
391 int node_id, int generation);
392
393 u64 (*get_bus_time)(struct fw_card *card);
394
395 struct fw_iso_context *
396 (*allocate_iso_context)(struct fw_card *card,
397 int type, size_t header_size);
398 void (*free_iso_context)(struct fw_iso_context *ctx);
399
400 int (*start_iso)(struct fw_iso_context *ctx,
401 s32 cycle, u32 sync, u32 tags);
402
403 int (*queue_iso)(struct fw_iso_context *ctx,
404 struct fw_iso_packet *packet,
405 struct fw_iso_buffer *buffer,
406 unsigned long payload);
407
408 int (*stop_iso)(struct fw_iso_context *ctx);
409};
410
411int
412fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
413
414void
415fw_send_request(struct fw_card *card, struct fw_transaction *t,
416 int tcode, int node_id, int generation, int speed,
417 unsigned long long offset,
418 void *data, size_t length,
419 fw_transaction_callback_t callback, void *callback_data);
420
421int fw_cancel_transaction(struct fw_card *card,
422 struct fw_transaction *transaction);
423
424void fw_flush_transactions(struct fw_card *card);
425
426void fw_send_phy_config(struct fw_card *card,
427 int node_id, int generation, int gap_count);
428
429/*
430 * Called by the topology code to inform the device code of node
431 * activity; found, lost, or updated nodes.
432 */
433void
434fw_node_event(struct fw_card *card, struct fw_node *node, int event);
435
436/* API used by card level drivers */
437
438void
439fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
440 struct device *device);
441int
442fw_card_add(struct fw_card *card,
443 u32 max_receive, u32 link_speed, u64 guid);
444
445void
446fw_core_remove_card(struct fw_card *card);
447
448void
449fw_core_handle_bus_reset(struct fw_card *card,
450 int node_id, int generation,
451 int self_id_count, u32 *self_ids);
452void
453fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
454
455void
456fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
457
458#endif /* __fw_transaction_h */
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index f21426ad2faa..8012b3b0ce75 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,6 +1,8 @@
1menu "IEEE 1394 (FireWire) support" 1menu "IEEE 1394 (FireWire) support"
2 depends on PCI || BROKEN 2 depends on PCI || BROKEN
3 3
4source "drivers/firewire/Kconfig"
5
4config IEEE1394 6config IEEE1394
5 tristate "IEEE 1394 (FireWire) support" 7 tristate "IEEE 1394 (FireWire) support"
6 depends on PCI || BROKEN 8 depends on PCI || BROKEN
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
new file mode 100644
index 000000000000..84920f3cc83e
--- /dev/null
+++ b/include/linux/crc-itu-t.h
@@ -0,0 +1,28 @@
1/*
2 * crc-itu-t.h - CRC ITU-T V.41 routine
3 *
4 * Implements the standard CRC ITU-T V.41:
5 * Width 16
6 * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1)
7 * Init 0
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13#ifndef CRC_ITU_T_H
14#define CRC_ITU_T_H
15
16#include <linux/types.h>
17
18extern u16 const crc_itu_t_table[256];
19
20extern u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len);
21
22static inline u16 crc_itu_t_byte(u16 crc, const u8 data)
23{
24 return (crc << 8) ^ crc_itu_t_table[((crc >> 8) ^ data) & 0xff];
25}
26
27#endif /* CRC_ITU_T_H */
28
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
new file mode 100644
index 000000000000..d4455eb2ae35
--- /dev/null
+++ b/include/linux/firewire-cdev.h
@@ -0,0 +1,229 @@
1/*
2 * Char device interface.
3 *
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef _LINUX_FIREWIRE_CDEV_H
22#define _LINUX_FIREWIRE_CDEV_H
23
24#include <linux/ioctl.h>
25#include <linux/types.h>
26#include <linux/firewire-constants.h>
27
28#define FW_CDEV_EVENT_BUS_RESET 0x00
29#define FW_CDEV_EVENT_RESPONSE 0x01
30#define FW_CDEV_EVENT_REQUEST 0x02
31#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
32
33/* The 'closure' fields are for user space to use. Data passed in the
34 * 'closure' field for a request will be returned in the corresponding
35 * event. It's a 64-bit type so that it's a fixed size type big
36 * enough to hold a pointer on all platforms. */
37
38struct fw_cdev_event_common {
39 __u64 closure;
40 __u32 type;
41};
42
43struct fw_cdev_event_bus_reset {
44 __u64 closure;
45 __u32 type;
46 __u32 node_id;
47 __u32 local_node_id;
48 __u32 bm_node_id;
49 __u32 irm_node_id;
50 __u32 root_node_id;
51 __u32 generation;
52};
53
54struct fw_cdev_event_response {
55 __u64 closure;
56 __u32 type;
57 __u32 rcode;
58 __u32 length;
59 __u32 data[0];
60};
61
62struct fw_cdev_event_request {
63 __u64 closure;
64 __u32 type;
65 __u32 tcode;
66 __u64 offset;
67 __u32 handle;
68 __u32 length;
69 __u32 data[0];
70};
71
72struct fw_cdev_event_iso_interrupt {
73 __u64 closure;
74 __u32 type;
75 __u32 cycle;
76 __u32 header_length; /* Length in bytes of following headers. */
77 __u32 header[0];
78};
79
80union fw_cdev_event {
81 struct fw_cdev_event_common common;
82 struct fw_cdev_event_bus_reset bus_reset;
83 struct fw_cdev_event_response response;
84 struct fw_cdev_event_request request;
85 struct fw_cdev_event_iso_interrupt iso_interrupt;
86};
87
88#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
89#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
90#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
91#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
92#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
93#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
94#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
95#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
96
97#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
98#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
99#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
100#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
101
102/* FW_CDEV_VERSION History
103 *
104 * 1 Feb 18, 2007: Initial version.
105 */
106#define FW_CDEV_VERSION 1
107
108struct fw_cdev_get_info {
109 /* The version field is just a running serial number. We
110 * never break backwards compatibility. Userspace passes in
111 * the version it expects and the kernel passes back the
112 * highest version it can provide. Even if the structs in
113 * this interface are extended in a later version, the kernel
114 * will not copy back more data than what was present in the
115 * interface version userspace expects. */
116 __u32 version;
117
118 /* If non-zero, at most rom_length bytes of config rom will be
119 * copied into that user space address. In either case,
120 * rom_length is updated with the actual length of the config
121 * rom. */
122 __u32 rom_length;
123 __u64 rom;
124
125 /* If non-zero, a fw_cdev_event_bus_reset struct will be
126 * copied here with the current state of the bus. This does
127 * not cause a bus reset to happen. The value of closure in
128 * this and sub-sequent bus reset events is set to
129 * bus_reset_closure. */
130 __u64 bus_reset;
131 __u64 bus_reset_closure;
132
133 /* The index of the card this devices belongs to. */
134 __u32 card;
135};
136
137struct fw_cdev_send_request {
138 __u32 tcode;
139 __u32 length;
140 __u64 offset;
141 __u64 closure;
142 __u64 data;
143 __u32 generation;
144};
145
146struct fw_cdev_send_response {
147 __u32 rcode;
148 __u32 length;
149 __u64 data;
150 __u32 handle;
151};
152
153struct fw_cdev_allocate {
154 __u64 offset;
155 __u64 closure;
156 __u32 length;
157 __u32 handle;
158};
159
160struct fw_cdev_deallocate {
161 __u32 handle;
162};
163
164#define FW_CDEV_LONG_RESET 0
165#define FW_CDEV_SHORT_RESET 1
166
167struct fw_cdev_initiate_bus_reset {
168 __u32 type;
169};
170
171struct fw_cdev_add_descriptor {
172 __u32 immediate;
173 __u32 key;
174 __u64 data;
175 __u32 length;
176 __u32 handle;
177};
178
179struct fw_cdev_remove_descriptor {
180 __u32 handle;
181};
182
183#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0
184#define FW_CDEV_ISO_CONTEXT_RECEIVE 1
185
186#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1
187#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2
188#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4
189#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8
190#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15
191
192struct fw_cdev_create_iso_context {
193 __u32 type;
194 __u32 header_size;
195 __u32 channel;
196 __u32 speed;
197 __u64 closure;
198 __u32 handle;
199};
200
201struct fw_cdev_iso_packet {
202 __u16 payload_length; /* Length of indirect payload. */
203 __u32 interrupt : 1; /* Generate interrupt on this packet */
204 __u32 skip : 1; /* Set to not send packet at all. */
205 __u32 tag : 2;
206 __u32 sy : 4;
207 __u32 header_length : 8; /* Length of immediate header. */
208 __u32 header[0];
209};
210
211struct fw_cdev_queue_iso {
212 __u64 packets;
213 __u64 data;
214 __u32 size;
215 __u32 handle;
216};
217
218struct fw_cdev_start_iso {
219 __s32 cycle;
220 __u32 sync;
221 __u32 tags;
222 __u32 handle;
223};
224
225struct fw_cdev_stop_iso {
226 __u32 handle;
227};
228
229#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h
new file mode 100644
index 000000000000..b316770a43fd
--- /dev/null
+++ b/include/linux/firewire-constants.h
@@ -0,0 +1,67 @@
1#ifndef _LINUX_FIREWIRE_CONSTANTS_H
2#define _LINUX_FIREWIRE_CONSTANTS_H
3
4#define TCODE_WRITE_QUADLET_REQUEST 0x0
5#define TCODE_WRITE_BLOCK_REQUEST 0x1
6#define TCODE_WRITE_RESPONSE 0x2
7#define TCODE_READ_QUADLET_REQUEST 0x4
8#define TCODE_READ_BLOCK_REQUEST 0x5
9#define TCODE_READ_QUADLET_RESPONSE 0x6
10#define TCODE_READ_BLOCK_RESPONSE 0x7
11#define TCODE_CYCLE_START 0x8
12#define TCODE_LOCK_REQUEST 0x9
13#define TCODE_STREAM_DATA 0xa
14#define TCODE_LOCK_RESPONSE 0xb
15
16#define EXTCODE_MASK_SWAP 0x1
17#define EXTCODE_COMPARE_SWAP 0x2
18#define EXTCODE_FETCH_ADD 0x3
19#define EXTCODE_LITTLE_ADD 0x4
20#define EXTCODE_BOUNDED_ADD 0x5
21#define EXTCODE_WRAP_ADD 0x6
22#define EXTCODE_VENDOR_DEPENDENT 0x7
23
24/* Juju specific tcodes */
25#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP)
26#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP)
27#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD)
28#define TCODE_LOCK_LITTLE_ADD (0x10 | EXTCODE_LITTLE_ADD)
29#define TCODE_LOCK_BOUNDED_ADD (0x10 | EXTCODE_BOUNDED_ADD)
30#define TCODE_LOCK_WRAP_ADD (0x10 | EXTCODE_WRAP_ADD)
31#define TCODE_LOCK_VENDOR_DEPENDENT (0x10 | EXTCODE_VENDOR_DEPENDENT)
32
33#define RCODE_COMPLETE 0x0
34#define RCODE_CONFLICT_ERROR 0x4
35#define RCODE_DATA_ERROR 0x5
36#define RCODE_TYPE_ERROR 0x6
37#define RCODE_ADDRESS_ERROR 0x7
38
39/* Juju specific rcodes */
40#define RCODE_SEND_ERROR 0x10
41#define RCODE_CANCELLED 0x11
42#define RCODE_BUSY 0x12
43#define RCODE_GENERATION 0x13
44#define RCODE_NO_ACK 0x14
45
46#define SCODE_100 0x0
47#define SCODE_200 0x1
48#define SCODE_400 0x2
49#define SCODE_800 0x3
50#define SCODE_1600 0x4
51#define SCODE_3200 0x5
52#define SCODE_BETA 0x3
53
54#define ACK_COMPLETE 0x1
55#define ACK_PENDING 0x2
56#define ACK_BUSY_X 0x4
57#define ACK_BUSY_A 0x5
58#define ACK_BUSY_B 0x6
59#define ACK_DATA_ERROR 0xd
60#define ACK_TYPE_ERROR 0xe
61
62#define RETRY_1 0x00
63#define RETRY_X 0x01
64#define RETRY_A 0x02
65#define RETRY_B 0x03
66
67#endif /* _LINUX_FIREWIRE_CONSTANTS_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 96d6e8ca8b70..2e7ae6b9215b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -23,6 +23,14 @@ config CRC16
23 the kernel tree does. Such modules that use library CRC16 23 the kernel tree does. Such modules that use library CRC16
24 functions require M here. 24 functions require M here.
25 25
26config CRC_ITU_T
27 tristate "CRC ITU-T V.41 functions"
28 help
29 This option is provided for the case where no in-kernel-tree
30 modules require CRC ITU-T V.41 functions, but a module built outside
31 the kernel tree does. Such modules that use library CRC ITU-T V.41
32 functions require M here.
33
26config CRC32 34config CRC32
27 tristate "CRC32 functions" 35 tristate "CRC32 functions"
28 default y 36 default y
diff --git a/lib/Makefile b/lib/Makefile
index ae57f357fec0..1f65b4613e09 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,6 +41,7 @@ endif
41obj-$(CONFIG_BITREVERSE) += bitrev.o 41obj-$(CONFIG_BITREVERSE) += bitrev.o
42obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 42obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
43obj-$(CONFIG_CRC16) += crc16.o 43obj-$(CONFIG_CRC16) += crc16.o
44obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
44obj-$(CONFIG_CRC32) += crc32.o 45obj-$(CONFIG_CRC32) += crc32.o
45obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 46obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
46obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o 47obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c
new file mode 100644
index 000000000000..a63472b82416
--- /dev/null
+++ b/lib/crc-itu-t.c
@@ -0,0 +1,69 @@
1/*
2 * crc-itu-t.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/crc-itu-t.h>
11
12/** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */
13const u16 crc_itu_t_table[256] = {
14 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
15 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
16 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
17 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
18 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
19 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
20 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
21 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
22 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
23 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
24 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
25 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
26 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
27 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
28 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
29 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
30 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
31 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
32 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
33 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
34 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
35 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
36 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
37 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
38 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
39 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
40 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
41 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
42 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
43 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
44 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
45 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
46};
47
48EXPORT_SYMBOL(crc_itu_t_table);
49
50/**
51 * crc_itu_t - Compute the CRC-ITU-T for the data buffer
52 *
53 * @crc: previous CRC value
54 * @buffer: data pointer
55 * @len: number of bytes in the buffer
56 *
57 * Returns the updated CRC value
58 */
59u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len)
60{
61 while (len--)
62 crc = crc_itu_t_byte(crc, *buffer++);
63 return crc;
64}
65EXPORT_SYMBOL(crc_itu_t);
66
67MODULE_DESCRIPTION("CRC ITU-T V.41 calculations");
68MODULE_LICENSE("GPL");
69