diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 20:09:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 20:09:24 -0400 |
commit | 2d53056973079e6c2ffc0d7ae3afbdd3d4f18ae3 (patch) | |
tree | e921596d80cd0a6434629dbd8d22c0ca3ec14b88 | |
parent | 9e50ab91d025afc17ca14a1764be2e1d0c24245d (diff) | |
parent | e78483c5aeb0d7fbb0e365802145f1045e62957e (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (82 commits)
firewire: core: add forgotten dummy driver methods, remove unused ones
firewire: add isochronous multichannel reception
firewire: core: small clarifications in core-cdev
firewire: core: remove unused code
firewire: ohci: release channel in error path
firewire: ohci: use memory barriers to order descriptor updates
tools/firewire: nosy-dump: increment program version
tools/firewire: nosy-dump: remove unused code
tools/firewire: nosy-dump: use linux/firewire-constants.h
tools/firewire: nosy-dump: break up a deeply nested function
tools/firewire: nosy-dump: make some symbols static or const
tools/firewire: nosy-dump: change to kernel coding style
tools/firewire: nosy-dump: work around segfault in decode_fcp
tools/firewire: nosy-dump: fix it on x86-64
tools/firewire: add userspace front-end of nosy
firewire: nosy: note ioctls in ioctl-number.txt
firewire: nosy: use generic printk macros
firewire: nosy: endianess fixes and annotations
firewire: nosy: annotate __user pointers and __iomem pointers
firewire: nosy: fix device shutdown with active client
...
31 files changed, 4303 insertions, 571 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 2ec3d7d89984..33223ff121d8 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
@@ -79,6 +79,7 @@ Code Seq#(hex) Include File Comments | |||
79 | 0x22 all scsi/sg.h | 79 | 0x22 all scsi/sg.h |
80 | '#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem | 80 | '#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem |
81 | '$' 00-0F linux/perf_counter.h, linux/perf_event.h | 81 | '$' 00-0F linux/perf_counter.h, linux/perf_event.h |
82 | '&' 00-07 drivers/firewire/nosy-user.h | ||
82 | '1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl | 83 | '1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl |
83 | <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/> | 84 | <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/> |
84 | '2' 01-04 linux/i2o.h | 85 | '2' 01-04 linux/i2o.h |
diff --git a/MAINTAINERS b/MAINTAINERS index 1349d1cb5aca..3f49df2d977e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2324,6 +2324,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git | |||
2324 | S: Maintained | 2324 | S: Maintained |
2325 | F: drivers/firewire/ | 2325 | F: drivers/firewire/ |
2326 | F: include/linux/firewire*.h | 2326 | F: include/linux/firewire*.h |
2327 | F: tools/firewire/ | ||
2327 | 2328 | ||
2328 | FIRMWARE LOADER (request_firmware) | 2329 | FIRMWARE LOADER (request_firmware) |
2329 | S: Orphan | 2330 | S: Orphan |
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index a9371b36a9b9..fcf3ea28340b 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -66,4 +66,28 @@ config FIREWIRE_NET | |||
66 | 66 | ||
67 | source "drivers/ieee1394/Kconfig" | 67 | source "drivers/ieee1394/Kconfig" |
68 | 68 | ||
69 | config FIREWIRE_NOSY | ||
70 | tristate "Nosy - a FireWire traffic sniffer for PCILynx cards" | ||
71 | depends on PCI | ||
72 | help | ||
73 | Nosy is an IEEE 1394 packet sniffer that is used for protocol | ||
74 | analysis and in development of IEEE 1394 drivers, applications, | ||
75 | or firmwares. | ||
76 | |||
77 | This driver lets you use a Texas Instruments PCILynx 1394 to PCI | ||
78 | link layer controller TSB12LV21/A/B as a low-budget bus analyzer. | ||
79 | PCILynx is a nowadays very rare IEEE 1394 controller which is | ||
80 | not OHCI 1394 compliant. | ||
81 | |||
82 | The following cards are known to be based on PCILynx or PCILynx-2: | ||
83 | IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2 | ||
84 | (PCI card), Newer Technology FireWire 2 Go (CardBus card), | ||
85 | Apple Power Mac G3 blue & white (onboard controller). | ||
86 | |||
87 | To compile this driver as a module, say M here: The module will be | ||
88 | called nosy. Source code of a userspace interface to nosy, called | ||
89 | nosy-dump, can be found in tools/firewire/ of the kernel sources. | ||
90 | |||
91 | If unsure, say N. | ||
92 | |||
69 | endmenu | 93 | endmenu |
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index a8f9bb6d9fdf..3c6a7fb20aa7 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile | |||
@@ -12,3 +12,4 @@ obj-$(CONFIG_FIREWIRE) += firewire-core.o | |||
12 | obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o | 12 | obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o |
13 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o | 13 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o |
14 | obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o | 14 | obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o |
15 | obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o | ||
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 371713ff0266..be0492398ef9 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -204,17 +204,62 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) | |||
204 | } | 204 | } |
205 | EXPORT_SYMBOL(fw_core_remove_descriptor); | 205 | EXPORT_SYMBOL(fw_core_remove_descriptor); |
206 | 206 | ||
207 | static int reset_bus(struct fw_card *card, bool short_reset) | ||
208 | { | ||
209 | int reg = short_reset ? 5 : 1; | ||
210 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; | ||
211 | |||
212 | return card->driver->update_phy_reg(card, reg, 0, bit); | ||
213 | } | ||
214 | |||
215 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) | ||
216 | { | ||
217 | /* We don't try hard to sort out requests of long vs. short resets. */ | ||
218 | card->br_short = short_reset; | ||
219 | |||
220 | /* Use an arbitrary short delay to combine multiple reset requests. */ | ||
221 | fw_card_get(card); | ||
222 | if (!schedule_delayed_work(&card->br_work, | ||
223 | delayed ? DIV_ROUND_UP(HZ, 100) : 0)) | ||
224 | fw_card_put(card); | ||
225 | } | ||
226 | EXPORT_SYMBOL(fw_schedule_bus_reset); | ||
227 | |||
228 | static void br_work(struct work_struct *work) | ||
229 | { | ||
230 | struct fw_card *card = container_of(work, struct fw_card, br_work.work); | ||
231 | |||
232 | /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ | ||
233 | if (card->reset_jiffies != 0 && | ||
234 | time_is_after_jiffies(card->reset_jiffies + 2 * HZ)) { | ||
235 | if (!schedule_delayed_work(&card->br_work, 2 * HZ)) | ||
236 | fw_card_put(card); | ||
237 | return; | ||
238 | } | ||
239 | |||
240 | fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, | ||
241 | FW_PHY_CONFIG_CURRENT_GAP_COUNT); | ||
242 | reset_bus(card, card->br_short); | ||
243 | fw_card_put(card); | ||
244 | } | ||
245 | |||
207 | static void allocate_broadcast_channel(struct fw_card *card, int generation) | 246 | static void allocate_broadcast_channel(struct fw_card *card, int generation) |
208 | { | 247 | { |
209 | int channel, bandwidth = 0; | 248 | int channel, bandwidth = 0; |
210 | 249 | ||
211 | fw_iso_resource_manage(card, generation, 1ULL << 31, &channel, | 250 | if (!card->broadcast_channel_allocated) { |
212 | &bandwidth, true, card->bm_transaction_data); | 251 | fw_iso_resource_manage(card, generation, 1ULL << 31, |
213 | if (channel == 31) { | 252 | &channel, &bandwidth, true, |
253 | card->bm_transaction_data); | ||
254 | if (channel != 31) { | ||
255 | fw_notify("failed to allocate broadcast channel\n"); | ||
256 | return; | ||
257 | } | ||
214 | card->broadcast_channel_allocated = true; | 258 | card->broadcast_channel_allocated = true; |
215 | device_for_each_child(card->device, (void *)(long)generation, | ||
216 | fw_device_set_broadcast_channel); | ||
217 | } | 259 | } |
260 | |||
261 | device_for_each_child(card->device, (void *)(long)generation, | ||
262 | fw_device_set_broadcast_channel); | ||
218 | } | 263 | } |
219 | 264 | ||
220 | static const char gap_count_table[] = { | 265 | static const char gap_count_table[] = { |
@@ -224,27 +269,26 @@ static const char gap_count_table[] = { | |||
224 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) | 269 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) |
225 | { | 270 | { |
226 | fw_card_get(card); | 271 | fw_card_get(card); |
227 | if (!schedule_delayed_work(&card->work, delay)) | 272 | if (!schedule_delayed_work(&card->bm_work, delay)) |
228 | fw_card_put(card); | 273 | fw_card_put(card); |
229 | } | 274 | } |
230 | 275 | ||
231 | static void fw_card_bm_work(struct work_struct *work) | 276 | static void bm_work(struct work_struct *work) |
232 | { | 277 | { |
233 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 278 | struct fw_card *card = container_of(work, struct fw_card, bm_work.work); |
234 | struct fw_device *root_device, *irm_device; | 279 | struct fw_device *root_device, *irm_device; |
235 | struct fw_node *root_node; | 280 | struct fw_node *root_node; |
236 | unsigned long flags; | 281 | int root_id, new_root_id, irm_id, bm_id, local_id; |
237 | int root_id, new_root_id, irm_id, local_id; | ||
238 | int gap_count, generation, grace, rcode; | 282 | int gap_count, generation, grace, rcode; |
239 | bool do_reset = false; | 283 | bool do_reset = false; |
240 | bool root_device_is_running; | 284 | bool root_device_is_running; |
241 | bool root_device_is_cmc; | 285 | bool root_device_is_cmc; |
242 | bool irm_is_1394_1995_only; | 286 | bool irm_is_1394_1995_only; |
243 | 287 | ||
244 | spin_lock_irqsave(&card->lock, flags); | 288 | spin_lock_irq(&card->lock); |
245 | 289 | ||
246 | if (card->local_node == NULL) { | 290 | if (card->local_node == NULL) { |
247 | spin_unlock_irqrestore(&card->lock, flags); | 291 | spin_unlock_irq(&card->lock); |
248 | goto out_put_card; | 292 | goto out_put_card; |
249 | } | 293 | } |
250 | 294 | ||
@@ -267,7 +311,8 @@ static void fw_card_bm_work(struct work_struct *work) | |||
267 | 311 | ||
268 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); | 312 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); |
269 | 313 | ||
270 | if (is_next_generation(generation, card->bm_generation) || | 314 | if ((is_next_generation(generation, card->bm_generation) && |
315 | !card->bm_abdicate) || | ||
271 | (card->bm_generation != generation && grace)) { | 316 | (card->bm_generation != generation && grace)) { |
272 | /* | 317 | /* |
273 | * This first step is to figure out who is IRM and | 318 | * This first step is to figure out who is IRM and |
@@ -298,21 +343,26 @@ static void fw_card_bm_work(struct work_struct *work) | |||
298 | card->bm_transaction_data[0] = cpu_to_be32(0x3f); | 343 | card->bm_transaction_data[0] = cpu_to_be32(0x3f); |
299 | card->bm_transaction_data[1] = cpu_to_be32(local_id); | 344 | card->bm_transaction_data[1] = cpu_to_be32(local_id); |
300 | 345 | ||
301 | spin_unlock_irqrestore(&card->lock, flags); | 346 | spin_unlock_irq(&card->lock); |
302 | 347 | ||
303 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 348 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
304 | irm_id, generation, SCODE_100, | 349 | irm_id, generation, SCODE_100, |
305 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, | 350 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, |
306 | card->bm_transaction_data, | 351 | card->bm_transaction_data, 8); |
307 | sizeof(card->bm_transaction_data)); | ||
308 | 352 | ||
309 | if (rcode == RCODE_GENERATION) | 353 | if (rcode == RCODE_GENERATION) |
310 | /* Another bus reset, BM work has been rescheduled. */ | 354 | /* Another bus reset, BM work has been rescheduled. */ |
311 | goto out; | 355 | goto out; |
312 | 356 | ||
313 | if (rcode == RCODE_COMPLETE && | 357 | bm_id = be32_to_cpu(card->bm_transaction_data[0]); |
314 | card->bm_transaction_data[0] != cpu_to_be32(0x3f)) { | ||
315 | 358 | ||
359 | spin_lock_irq(&card->lock); | ||
360 | if (rcode == RCODE_COMPLETE && generation == card->generation) | ||
361 | card->bm_node_id = | ||
362 | bm_id == 0x3f ? local_id : 0xffc0 | bm_id; | ||
363 | spin_unlock_irq(&card->lock); | ||
364 | |||
365 | if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { | ||
316 | /* Somebody else is BM. Only act as IRM. */ | 366 | /* Somebody else is BM. Only act as IRM. */ |
317 | if (local_id == irm_id) | 367 | if (local_id == irm_id) |
318 | allocate_broadcast_channel(card, generation); | 368 | allocate_broadcast_channel(card, generation); |
@@ -320,7 +370,17 @@ static void fw_card_bm_work(struct work_struct *work) | |||
320 | goto out; | 370 | goto out; |
321 | } | 371 | } |
322 | 372 | ||
323 | spin_lock_irqsave(&card->lock, flags); | 373 | if (rcode == RCODE_SEND_ERROR) { |
374 | /* | ||
375 | * We have been unable to send the lock request due to | ||
376 | * some local problem. Let's try again later and hope | ||
377 | * that the problem has gone away by then. | ||
378 | */ | ||
379 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | spin_lock_irq(&card->lock); | ||
324 | 384 | ||
325 | if (rcode != RCODE_COMPLETE) { | 385 | if (rcode != RCODE_COMPLETE) { |
326 | /* | 386 | /* |
@@ -339,7 +399,7 @@ static void fw_card_bm_work(struct work_struct *work) | |||
339 | * We weren't BM in the last generation, and the last | 399 | * We weren't BM in the last generation, and the last |
340 | * bus reset is less than 125ms ago. Reschedule this job. | 400 | * bus reset is less than 125ms ago. Reschedule this job. |
341 | */ | 401 | */ |
342 | spin_unlock_irqrestore(&card->lock, flags); | 402 | spin_unlock_irq(&card->lock); |
343 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); | 403 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); |
344 | goto out; | 404 | goto out; |
345 | } | 405 | } |
@@ -362,14 +422,12 @@ static void fw_card_bm_work(struct work_struct *work) | |||
362 | * If we haven't probed this device yet, bail out now | 422 | * If we haven't probed this device yet, bail out now |
363 | * and let's try again once that's done. | 423 | * and let's try again once that's done. |
364 | */ | 424 | */ |
365 | spin_unlock_irqrestore(&card->lock, flags); | 425 | spin_unlock_irq(&card->lock); |
366 | goto out; | 426 | goto out; |
367 | } else if (root_device_is_cmc) { | 427 | } else if (root_device_is_cmc) { |
368 | /* | 428 | /* |
369 | * FIXME: I suppose we should set the cmstr bit in the | 429 | * We will send out a force root packet for this |
370 | * STATE_CLEAR register of this node, as described in | 430 | * node as part of the gap count optimization. |
371 | * 1394-1995, 8.4.2.6. Also, send out a force root | ||
372 | * packet for this node. | ||
373 | */ | 431 | */ |
374 | new_root_id = root_id; | 432 | new_root_id = root_id; |
375 | } else { | 433 | } else { |
@@ -402,19 +460,33 @@ static void fw_card_bm_work(struct work_struct *work) | |||
402 | (card->gap_count != gap_count || new_root_id != root_id)) | 460 | (card->gap_count != gap_count || new_root_id != root_id)) |
403 | do_reset = true; | 461 | do_reset = true; |
404 | 462 | ||
405 | spin_unlock_irqrestore(&card->lock, flags); | 463 | spin_unlock_irq(&card->lock); |
406 | 464 | ||
407 | if (do_reset) { | 465 | if (do_reset) { |
408 | fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", | 466 | fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", |
409 | card->index, new_root_id, gap_count); | 467 | card->index, new_root_id, gap_count); |
410 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 468 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
411 | fw_core_initiate_bus_reset(card, 1); | 469 | reset_bus(card, true); |
412 | /* Will allocate broadcast channel after the reset. */ | 470 | /* Will allocate broadcast channel after the reset. */ |
413 | } else { | 471 | goto out; |
414 | if (local_id == irm_id) | 472 | } |
415 | allocate_broadcast_channel(card, generation); | 473 | |
474 | if (root_device_is_cmc) { | ||
475 | /* | ||
476 | * Make sure that the cycle master sends cycle start packets. | ||
477 | */ | ||
478 | card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); | ||
479 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, | ||
480 | root_id, generation, SCODE_100, | ||
481 | CSR_REGISTER_BASE + CSR_STATE_SET, | ||
482 | card->bm_transaction_data, 4); | ||
483 | if (rcode == RCODE_GENERATION) | ||
484 | goto out; | ||
416 | } | 485 | } |
417 | 486 | ||
487 | if (local_id == irm_id) | ||
488 | allocate_broadcast_channel(card, generation); | ||
489 | |||
418 | out: | 490 | out: |
419 | fw_node_put(root_node); | 491 | fw_node_put(root_node); |
420 | out_put_card: | 492 | out_put_card: |
@@ -432,17 +504,23 @@ void fw_card_initialize(struct fw_card *card, | |||
432 | card->device = device; | 504 | card->device = device; |
433 | card->current_tlabel = 0; | 505 | card->current_tlabel = 0; |
434 | card->tlabel_mask = 0; | 506 | card->tlabel_mask = 0; |
507 | card->split_timeout_hi = 0; | ||
508 | card->split_timeout_lo = 800 << 19; | ||
509 | card->split_timeout_cycles = 800; | ||
510 | card->split_timeout_jiffies = DIV_ROUND_UP(HZ, 10); | ||
435 | card->color = 0; | 511 | card->color = 0; |
436 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; | 512 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; |
437 | 513 | ||
438 | kref_init(&card->kref); | 514 | kref_init(&card->kref); |
439 | init_completion(&card->done); | 515 | init_completion(&card->done); |
440 | INIT_LIST_HEAD(&card->transaction_list); | 516 | INIT_LIST_HEAD(&card->transaction_list); |
517 | INIT_LIST_HEAD(&card->phy_receiver_list); | ||
441 | spin_lock_init(&card->lock); | 518 | spin_lock_init(&card->lock); |
442 | 519 | ||
443 | card->local_node = NULL; | 520 | card->local_node = NULL; |
444 | 521 | ||
445 | INIT_DELAYED_WORK(&card->work, fw_card_bm_work); | 522 | INIT_DELAYED_WORK(&card->br_work, br_work); |
523 | INIT_DELAYED_WORK(&card->bm_work, bm_work); | ||
446 | } | 524 | } |
447 | EXPORT_SYMBOL(fw_card_initialize); | 525 | EXPORT_SYMBOL(fw_card_initialize); |
448 | 526 | ||
@@ -468,20 +546,22 @@ int fw_card_add(struct fw_card *card, | |||
468 | } | 546 | } |
469 | EXPORT_SYMBOL(fw_card_add); | 547 | EXPORT_SYMBOL(fw_card_add); |
470 | 548 | ||
471 | |||
472 | /* | 549 | /* |
473 | * The next few functions implement a dummy driver that is used once a card | 550 | * The next few functions implement a dummy driver that is used once a card |
474 | * driver shuts down an fw_card. This allows the driver to cleanly unload, | 551 | * driver shuts down an fw_card. This allows the driver to cleanly unload, |
475 | * as all IO to the card will be handled (and failed) by the dummy driver | 552 | * as all IO to the card will be handled (and failed) by the dummy driver |
476 | * instead of calling into the module. Only functions for iso context | 553 | * instead of calling into the module. Only functions for iso context |
477 | * shutdown still need to be provided by the card driver. | 554 | * shutdown still need to be provided by the card driver. |
555 | * | ||
556 | * .read/write_csr() should never be called anymore after the dummy driver | ||
557 | * was bound since they are only used within request handler context. | ||
558 | * .set_config_rom() is never called since the card is taken out of card_list | ||
559 | * before switching to the dummy driver. | ||
478 | */ | 560 | */ |
479 | 561 | ||
480 | static int dummy_enable(struct fw_card *card, | 562 | static int dummy_read_phy_reg(struct fw_card *card, int address) |
481 | const __be32 *config_rom, size_t length) | ||
482 | { | 563 | { |
483 | BUG(); | 564 | return -ENODEV; |
484 | return -1; | ||
485 | } | 565 | } |
486 | 566 | ||
487 | static int dummy_update_phy_reg(struct fw_card *card, int address, | 567 | static int dummy_update_phy_reg(struct fw_card *card, int address, |
@@ -490,25 +570,14 @@ static int dummy_update_phy_reg(struct fw_card *card, int address, | |||
490 | return -ENODEV; | 570 | return -ENODEV; |
491 | } | 571 | } |
492 | 572 | ||
493 | static int dummy_set_config_rom(struct fw_card *card, | ||
494 | const __be32 *config_rom, size_t length) | ||
495 | { | ||
496 | /* | ||
497 | * We take the card out of card_list before setting the dummy | ||
498 | * driver, so this should never get called. | ||
499 | */ | ||
500 | BUG(); | ||
501 | return -1; | ||
502 | } | ||
503 | |||
504 | static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) | 573 | static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) |
505 | { | 574 | { |
506 | packet->callback(packet, card, -ENODEV); | 575 | packet->callback(packet, card, RCODE_CANCELLED); |
507 | } | 576 | } |
508 | 577 | ||
509 | static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) | 578 | static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) |
510 | { | 579 | { |
511 | packet->callback(packet, card, -ENODEV); | 580 | packet->callback(packet, card, RCODE_CANCELLED); |
512 | } | 581 | } |
513 | 582 | ||
514 | static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) | 583 | static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) |
@@ -522,14 +591,40 @@ static int dummy_enable_phys_dma(struct fw_card *card, | |||
522 | return -ENODEV; | 591 | return -ENODEV; |
523 | } | 592 | } |
524 | 593 | ||
594 | static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, | ||
595 | int type, int channel, size_t header_size) | ||
596 | { | ||
597 | return ERR_PTR(-ENODEV); | ||
598 | } | ||
599 | |||
600 | static int dummy_start_iso(struct fw_iso_context *ctx, | ||
601 | s32 cycle, u32 sync, u32 tags) | ||
602 | { | ||
603 | return -ENODEV; | ||
604 | } | ||
605 | |||
606 | static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) | ||
607 | { | ||
608 | return -ENODEV; | ||
609 | } | ||
610 | |||
611 | static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, | ||
612 | struct fw_iso_buffer *buffer, unsigned long payload) | ||
613 | { | ||
614 | return -ENODEV; | ||
615 | } | ||
616 | |||
525 | static const struct fw_card_driver dummy_driver_template = { | 617 | static const struct fw_card_driver dummy_driver_template = { |
526 | .enable = dummy_enable, | 618 | .read_phy_reg = dummy_read_phy_reg, |
527 | .update_phy_reg = dummy_update_phy_reg, | 619 | .update_phy_reg = dummy_update_phy_reg, |
528 | .set_config_rom = dummy_set_config_rom, | 620 | .send_request = dummy_send_request, |
529 | .send_request = dummy_send_request, | 621 | .send_response = dummy_send_response, |
530 | .cancel_packet = dummy_cancel_packet, | 622 | .cancel_packet = dummy_cancel_packet, |
531 | .send_response = dummy_send_response, | 623 | .enable_phys_dma = dummy_enable_phys_dma, |
532 | .enable_phys_dma = dummy_enable_phys_dma, | 624 | .allocate_iso_context = dummy_allocate_iso_context, |
625 | .start_iso = dummy_start_iso, | ||
626 | .set_iso_channels = dummy_set_iso_channels, | ||
627 | .queue_iso = dummy_queue_iso, | ||
533 | }; | 628 | }; |
534 | 629 | ||
535 | void fw_card_release(struct kref *kref) | 630 | void fw_card_release(struct kref *kref) |
@@ -545,7 +640,7 @@ void fw_core_remove_card(struct fw_card *card) | |||
545 | 640 | ||
546 | card->driver->update_phy_reg(card, 4, | 641 | card->driver->update_phy_reg(card, 4, |
547 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); | 642 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); |
548 | fw_core_initiate_bus_reset(card, 1); | 643 | fw_schedule_bus_reset(card, false, true); |
549 | 644 | ||
550 | mutex_lock(&card_mutex); | 645 | mutex_lock(&card_mutex); |
551 | list_del_init(&card->link); | 646 | list_del_init(&card->link); |
@@ -565,12 +660,3 @@ void fw_core_remove_card(struct fw_card *card) | |||
565 | WARN_ON(!list_empty(&card->transaction_list)); | 660 | WARN_ON(!list_empty(&card->transaction_list)); |
566 | } | 661 | } |
567 | EXPORT_SYMBOL(fw_core_remove_card); | 662 | EXPORT_SYMBOL(fw_core_remove_card); |
568 | |||
569 | int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) | ||
570 | { | ||
571 | int reg = short_reset ? 5 : 1; | ||
572 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; | ||
573 | |||
574 | return card->driver->update_phy_reg(card, reg, 0, bit); | ||
575 | } | ||
576 | EXPORT_SYMBOL(fw_core_initiate_bus_reset); | ||
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 5bf106b9d791..14bb7b7b5dd7 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/bug.h> | ||
21 | #include <linux/compat.h> | 22 | #include <linux/compat.h> |
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/device.h> | 24 | #include <linux/device.h> |
@@ -33,7 +34,7 @@ | |||
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
35 | #include <linux/poll.h> | 36 | #include <linux/poll.h> |
36 | #include <linux/sched.h> | 37 | #include <linux/sched.h> /* required for linux/wait.h */ |
37 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
39 | #include <linux/string.h> | 40 | #include <linux/string.h> |
@@ -47,6 +48,13 @@ | |||
47 | 48 | ||
48 | #include "core.h" | 49 | #include "core.h" |
49 | 50 | ||
51 | /* | ||
52 | * ABI version history is documented in linux/firewire-cdev.h. | ||
53 | */ | ||
54 | #define FW_CDEV_KERNEL_VERSION 4 | ||
55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 | ||
56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 | ||
57 | |||
50 | struct client { | 58 | struct client { |
51 | u32 version; | 59 | u32 version; |
52 | struct fw_device *device; | 60 | struct fw_device *device; |
@@ -63,6 +71,9 @@ struct client { | |||
63 | struct fw_iso_buffer buffer; | 71 | struct fw_iso_buffer buffer; |
64 | unsigned long vm_start; | 72 | unsigned long vm_start; |
65 | 73 | ||
74 | struct list_head phy_receiver_link; | ||
75 | u64 phy_receiver_closure; | ||
76 | |||
66 | struct list_head link; | 77 | struct list_head link; |
67 | struct kref kref; | 78 | struct kref kref; |
68 | }; | 79 | }; |
@@ -107,6 +118,7 @@ struct outbound_transaction_resource { | |||
107 | 118 | ||
108 | struct inbound_transaction_resource { | 119 | struct inbound_transaction_resource { |
109 | struct client_resource resource; | 120 | struct client_resource resource; |
121 | struct fw_card *card; | ||
110 | struct fw_request *request; | 122 | struct fw_request *request; |
111 | void *data; | 123 | void *data; |
112 | size_t length; | 124 | size_t length; |
@@ -171,7 +183,10 @@ struct outbound_transaction_event { | |||
171 | 183 | ||
172 | struct inbound_transaction_event { | 184 | struct inbound_transaction_event { |
173 | struct event event; | 185 | struct event event; |
174 | struct fw_cdev_event_request request; | 186 | union { |
187 | struct fw_cdev_event_request request; | ||
188 | struct fw_cdev_event_request2 request2; | ||
189 | } req; | ||
175 | }; | 190 | }; |
176 | 191 | ||
177 | struct iso_interrupt_event { | 192 | struct iso_interrupt_event { |
@@ -179,11 +194,28 @@ struct iso_interrupt_event { | |||
179 | struct fw_cdev_event_iso_interrupt interrupt; | 194 | struct fw_cdev_event_iso_interrupt interrupt; |
180 | }; | 195 | }; |
181 | 196 | ||
197 | struct iso_interrupt_mc_event { | ||
198 | struct event event; | ||
199 | struct fw_cdev_event_iso_interrupt_mc interrupt; | ||
200 | }; | ||
201 | |||
182 | struct iso_resource_event { | 202 | struct iso_resource_event { |
183 | struct event event; | 203 | struct event event; |
184 | struct fw_cdev_event_iso_resource iso_resource; | 204 | struct fw_cdev_event_iso_resource iso_resource; |
185 | }; | 205 | }; |
186 | 206 | ||
207 | struct outbound_phy_packet_event { | ||
208 | struct event event; | ||
209 | struct client *client; | ||
210 | struct fw_packet p; | ||
211 | struct fw_cdev_event_phy_packet phy_packet; | ||
212 | }; | ||
213 | |||
214 | struct inbound_phy_packet_event { | ||
215 | struct event event; | ||
216 | struct fw_cdev_event_phy_packet phy_packet; | ||
217 | }; | ||
218 | |||
187 | static inline void __user *u64_to_uptr(__u64 value) | 219 | static inline void __user *u64_to_uptr(__u64 value) |
188 | { | 220 | { |
189 | return (void __user *)(unsigned long)value; | 221 | return (void __user *)(unsigned long)value; |
@@ -219,6 +251,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
219 | idr_init(&client->resource_idr); | 251 | idr_init(&client->resource_idr); |
220 | INIT_LIST_HEAD(&client->event_list); | 252 | INIT_LIST_HEAD(&client->event_list); |
221 | init_waitqueue_head(&client->wait); | 253 | init_waitqueue_head(&client->wait); |
254 | INIT_LIST_HEAD(&client->phy_receiver_link); | ||
222 | kref_init(&client->kref); | 255 | kref_init(&client->kref); |
223 | 256 | ||
224 | file->private_data = client; | 257 | file->private_data = client; |
@@ -309,7 +342,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | |||
309 | event->generation = client->device->generation; | 342 | event->generation = client->device->generation; |
310 | event->node_id = client->device->node_id; | 343 | event->node_id = client->device->node_id; |
311 | event->local_node_id = card->local_node->node_id; | 344 | event->local_node_id = card->local_node->node_id; |
312 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ | 345 | event->bm_node_id = card->bm_node_id; |
313 | event->irm_node_id = card->irm_node->node_id; | 346 | event->irm_node_id = card->irm_node->node_id; |
314 | event->root_node_id = card->root_node->node_id; | 347 | event->root_node_id = card->root_node->node_id; |
315 | 348 | ||
@@ -340,7 +373,7 @@ static void queue_bus_reset_event(struct client *client) | |||
340 | 373 | ||
341 | e = kzalloc(sizeof(*e), GFP_KERNEL); | 374 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
342 | if (e == NULL) { | 375 | if (e == NULL) { |
343 | fw_notify("Out of memory when allocating bus reset event\n"); | 376 | fw_notify("Out of memory when allocating event\n"); |
344 | return; | 377 | return; |
345 | } | 378 | } |
346 | 379 | ||
@@ -386,6 +419,9 @@ union ioctl_arg { | |||
386 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; | 419 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; |
387 | struct fw_cdev_send_stream_packet send_stream_packet; | 420 | struct fw_cdev_send_stream_packet send_stream_packet; |
388 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; | 421 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; |
422 | struct fw_cdev_send_phy_packet send_phy_packet; | ||
423 | struct fw_cdev_receive_phy_packets receive_phy_packets; | ||
424 | struct fw_cdev_set_iso_channels set_iso_channels; | ||
389 | }; | 425 | }; |
390 | 426 | ||
391 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | 427 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) |
@@ -395,7 +431,7 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | |||
395 | unsigned long ret = 0; | 431 | unsigned long ret = 0; |
396 | 432 | ||
397 | client->version = a->version; | 433 | client->version = a->version; |
398 | a->version = FW_CDEV_VERSION; | 434 | a->version = FW_CDEV_KERNEL_VERSION; |
399 | a->card = client->device->card->index; | 435 | a->card = client->device->card->index; |
400 | 436 | ||
401 | down_read(&fw_device_rwsem); | 437 | down_read(&fw_device_rwsem); |
@@ -554,6 +590,10 @@ static int init_request(struct client *client, | |||
554 | (request->length > 4096 || request->length > 512 << speed)) | 590 | (request->length > 4096 || request->length > 512 << speed)) |
555 | return -EIO; | 591 | return -EIO; |
556 | 592 | ||
593 | if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && | ||
594 | request->length < 4) | ||
595 | return -EINVAL; | ||
596 | |||
557 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); | 597 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); |
558 | if (e == NULL) | 598 | if (e == NULL) |
559 | return -ENOMEM; | 599 | return -ENOMEM; |
@@ -626,28 +666,34 @@ static void release_request(struct client *client, | |||
626 | if (is_fcp_request(r->request)) | 666 | if (is_fcp_request(r->request)) |
627 | kfree(r->data); | 667 | kfree(r->data); |
628 | else | 668 | else |
629 | fw_send_response(client->device->card, r->request, | 669 | fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); |
630 | RCODE_CONFLICT_ERROR); | 670 | |
671 | fw_card_put(r->card); | ||
631 | kfree(r); | 672 | kfree(r); |
632 | } | 673 | } |
633 | 674 | ||
634 | static void handle_request(struct fw_card *card, struct fw_request *request, | 675 | static void handle_request(struct fw_card *card, struct fw_request *request, |
635 | int tcode, int destination, int source, | 676 | int tcode, int destination, int source, |
636 | int generation, int speed, | 677 | int generation, unsigned long long offset, |
637 | unsigned long long offset, | ||
638 | void *payload, size_t length, void *callback_data) | 678 | void *payload, size_t length, void *callback_data) |
639 | { | 679 | { |
640 | struct address_handler_resource *handler = callback_data; | 680 | struct address_handler_resource *handler = callback_data; |
641 | struct inbound_transaction_resource *r; | 681 | struct inbound_transaction_resource *r; |
642 | struct inbound_transaction_event *e; | 682 | struct inbound_transaction_event *e; |
683 | size_t event_size0; | ||
643 | void *fcp_frame = NULL; | 684 | void *fcp_frame = NULL; |
644 | int ret; | 685 | int ret; |
645 | 686 | ||
687 | /* card may be different from handler->client->device->card */ | ||
688 | fw_card_get(card); | ||
689 | |||
646 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | 690 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
647 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 691 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
648 | if (r == NULL || e == NULL) | 692 | if (r == NULL || e == NULL) { |
693 | fw_notify("Out of memory when allocating event\n"); | ||
649 | goto failed; | 694 | goto failed; |
650 | 695 | } | |
696 | r->card = card; | ||
651 | r->request = request; | 697 | r->request = request; |
652 | r->data = payload; | 698 | r->data = payload; |
653 | r->length = length; | 699 | r->length = length; |
@@ -669,15 +715,37 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
669 | if (ret < 0) | 715 | if (ret < 0) |
670 | goto failed; | 716 | goto failed; |
671 | 717 | ||
672 | e->request.type = FW_CDEV_EVENT_REQUEST; | 718 | if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { |
673 | e->request.tcode = tcode; | 719 | struct fw_cdev_event_request *req = &e->req.request; |
674 | e->request.offset = offset; | 720 | |
675 | e->request.length = length; | 721 | if (tcode & 0x10) |
676 | e->request.handle = r->resource.handle; | 722 | tcode = TCODE_LOCK_REQUEST; |
677 | e->request.closure = handler->closure; | 723 | |
724 | req->type = FW_CDEV_EVENT_REQUEST; | ||
725 | req->tcode = tcode; | ||
726 | req->offset = offset; | ||
727 | req->length = length; | ||
728 | req->handle = r->resource.handle; | ||
729 | req->closure = handler->closure; | ||
730 | event_size0 = sizeof(*req); | ||
731 | } else { | ||
732 | struct fw_cdev_event_request2 *req = &e->req.request2; | ||
733 | |||
734 | req->type = FW_CDEV_EVENT_REQUEST2; | ||
735 | req->tcode = tcode; | ||
736 | req->offset = offset; | ||
737 | req->source_node_id = source; | ||
738 | req->destination_node_id = destination; | ||
739 | req->card = card->index; | ||
740 | req->generation = generation; | ||
741 | req->length = length; | ||
742 | req->handle = r->resource.handle; | ||
743 | req->closure = handler->closure; | ||
744 | event_size0 = sizeof(*req); | ||
745 | } | ||
678 | 746 | ||
679 | queue_event(handler->client, &e->event, | 747 | queue_event(handler->client, &e->event, |
680 | &e->request, sizeof(e->request), r->data, length); | 748 | &e->req, event_size0, r->data, length); |
681 | return; | 749 | return; |
682 | 750 | ||
683 | failed: | 751 | failed: |
@@ -687,6 +755,8 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
687 | 755 | ||
688 | if (!is_fcp_request(request)) | 756 | if (!is_fcp_request(request)) |
689 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | 757 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
758 | |||
759 | fw_card_put(card); | ||
690 | } | 760 | } |
691 | 761 | ||
692 | static void release_address_handler(struct client *client, | 762 | static void release_address_handler(struct client *client, |
@@ -711,7 +781,11 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg) | |||
711 | return -ENOMEM; | 781 | return -ENOMEM; |
712 | 782 | ||
713 | region.start = a->offset; | 783 | region.start = a->offset; |
714 | region.end = a->offset + a->length; | 784 | if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) |
785 | region.end = a->offset + a->length; | ||
786 | else | ||
787 | region.end = a->region_end; | ||
788 | |||
715 | r->handler.length = a->length; | 789 | r->handler.length = a->length; |
716 | r->handler.address_callback = handle_request; | 790 | r->handler.address_callback = handle_request; |
717 | r->handler.callback_data = r; | 791 | r->handler.callback_data = r; |
@@ -723,6 +797,7 @@ static int ioctl_allocate(struct client *client, union ioctl_arg *arg) | |||
723 | kfree(r); | 797 | kfree(r); |
724 | return ret; | 798 | return ret; |
725 | } | 799 | } |
800 | a->offset = r->handler.offset; | ||
726 | 801 | ||
727 | r->resource.release = release_address_handler; | 802 | r->resource.release = release_address_handler; |
728 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 803 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
@@ -757,15 +832,19 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) | |||
757 | if (is_fcp_request(r->request)) | 832 | if (is_fcp_request(r->request)) |
758 | goto out; | 833 | goto out; |
759 | 834 | ||
760 | if (a->length < r->length) | 835 | if (a->length != fw_get_response_length(r->request)) { |
761 | r->length = a->length; | 836 | ret = -EINVAL; |
762 | if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) { | 837 | kfree(r->request); |
838 | goto out; | ||
839 | } | ||
840 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { | ||
763 | ret = -EFAULT; | 841 | ret = -EFAULT; |
764 | kfree(r->request); | 842 | kfree(r->request); |
765 | goto out; | 843 | goto out; |
766 | } | 844 | } |
767 | fw_send_response(client->device->card, r->request, a->rcode); | 845 | fw_send_response(r->card, r->request, a->rcode); |
768 | out: | 846 | out: |
847 | fw_card_put(r->card); | ||
769 | kfree(r); | 848 | kfree(r); |
770 | 849 | ||
771 | return ret; | 850 | return ret; |
@@ -773,8 +852,9 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) | |||
773 | 852 | ||
774 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) | 853 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) |
775 | { | 854 | { |
776 | return fw_core_initiate_bus_reset(client->device->card, | 855 | fw_schedule_bus_reset(client->device->card, true, |
777 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); | 856 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); |
857 | return 0; | ||
778 | } | 858 | } |
779 | 859 | ||
780 | static void release_descriptor(struct client *client, | 860 | static void release_descriptor(struct client *client, |
@@ -845,10 +925,11 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, | |||
845 | struct client *client = data; | 925 | struct client *client = data; |
846 | struct iso_interrupt_event *e; | 926 | struct iso_interrupt_event *e; |
847 | 927 | ||
848 | e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); | 928 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
849 | if (e == NULL) | 929 | if (e == NULL) { |
930 | fw_notify("Out of memory when allocating event\n"); | ||
850 | return; | 931 | return; |
851 | 932 | } | |
852 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | 933 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
853 | e->interrupt.closure = client->iso_closure; | 934 | e->interrupt.closure = client->iso_closure; |
854 | e->interrupt.cycle = cycle; | 935 | e->interrupt.cycle = cycle; |
@@ -858,27 +939,54 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, | |||
858 | sizeof(e->interrupt) + header_length, NULL, 0); | 939 | sizeof(e->interrupt) + header_length, NULL, 0); |
859 | } | 940 | } |
860 | 941 | ||
942 | static void iso_mc_callback(struct fw_iso_context *context, | ||
943 | dma_addr_t completed, void *data) | ||
944 | { | ||
945 | struct client *client = data; | ||
946 | struct iso_interrupt_mc_event *e; | ||
947 | |||
948 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | ||
949 | if (e == NULL) { | ||
950 | fw_notify("Out of memory when allocating event\n"); | ||
951 | return; | ||
952 | } | ||
953 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; | ||
954 | e->interrupt.closure = client->iso_closure; | ||
955 | e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, | ||
956 | completed); | ||
957 | queue_event(client, &e->event, &e->interrupt, | ||
958 | sizeof(e->interrupt), NULL, 0); | ||
959 | } | ||
960 | |||
861 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | 961 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) |
862 | { | 962 | { |
863 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; | 963 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; |
864 | struct fw_iso_context *context; | 964 | struct fw_iso_context *context; |
965 | fw_iso_callback_t cb; | ||
865 | 966 | ||
866 | /* We only support one context at this time. */ | 967 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || |
867 | if (client->iso_context != NULL) | 968 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || |
868 | return -EBUSY; | 969 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != |
869 | 970 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); | |
870 | if (a->channel > 63) | ||
871 | return -EINVAL; | ||
872 | 971 | ||
873 | switch (a->type) { | 972 | switch (a->type) { |
874 | case FW_ISO_CONTEXT_RECEIVE: | 973 | case FW_ISO_CONTEXT_TRANSMIT: |
875 | if (a->header_size < 4 || (a->header_size & 3)) | 974 | if (a->speed > SCODE_3200 || a->channel > 63) |
876 | return -EINVAL; | 975 | return -EINVAL; |
976 | |||
977 | cb = iso_callback; | ||
877 | break; | 978 | break; |
878 | 979 | ||
879 | case FW_ISO_CONTEXT_TRANSMIT: | 980 | case FW_ISO_CONTEXT_RECEIVE: |
880 | if (a->speed > SCODE_3200) | 981 | if (a->header_size < 4 || (a->header_size & 3) || |
982 | a->channel > 63) | ||
881 | return -EINVAL; | 983 | return -EINVAL; |
984 | |||
985 | cb = iso_callback; | ||
986 | break; | ||
987 | |||
988 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
989 | cb = (fw_iso_callback_t)iso_mc_callback; | ||
882 | break; | 990 | break; |
883 | 991 | ||
884 | default: | 992 | default: |
@@ -886,20 +994,37 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | |||
886 | } | 994 | } |
887 | 995 | ||
888 | context = fw_iso_context_create(client->device->card, a->type, | 996 | context = fw_iso_context_create(client->device->card, a->type, |
889 | a->channel, a->speed, a->header_size, | 997 | a->channel, a->speed, a->header_size, cb, client); |
890 | iso_callback, client); | ||
891 | if (IS_ERR(context)) | 998 | if (IS_ERR(context)) |
892 | return PTR_ERR(context); | 999 | return PTR_ERR(context); |
893 | 1000 | ||
1001 | /* We only support one context at this time. */ | ||
1002 | spin_lock_irq(&client->lock); | ||
1003 | if (client->iso_context != NULL) { | ||
1004 | spin_unlock_irq(&client->lock); | ||
1005 | fw_iso_context_destroy(context); | ||
1006 | return -EBUSY; | ||
1007 | } | ||
894 | client->iso_closure = a->closure; | 1008 | client->iso_closure = a->closure; |
895 | client->iso_context = context; | 1009 | client->iso_context = context; |
1010 | spin_unlock_irq(&client->lock); | ||
896 | 1011 | ||
897 | /* We only support one context at this time. */ | ||
898 | a->handle = 0; | 1012 | a->handle = 0; |
899 | 1013 | ||
900 | return 0; | 1014 | return 0; |
901 | } | 1015 | } |
902 | 1016 | ||
1017 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) | ||
1018 | { | ||
1019 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; | ||
1020 | struct fw_iso_context *ctx = client->iso_context; | ||
1021 | |||
1022 | if (ctx == NULL || a->handle != 0) | ||
1023 | return -EINVAL; | ||
1024 | |||
1025 | return fw_iso_context_set_channels(ctx, &a->channels); | ||
1026 | } | ||
1027 | |||
903 | /* Macros for decoding the iso packet control header. */ | 1028 | /* Macros for decoding the iso packet control header. */ |
904 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) | 1029 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) |
905 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) | 1030 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) |
@@ -913,7 +1038,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
913 | struct fw_cdev_queue_iso *a = &arg->queue_iso; | 1038 | struct fw_cdev_queue_iso *a = &arg->queue_iso; |
914 | struct fw_cdev_iso_packet __user *p, *end, *next; | 1039 | struct fw_cdev_iso_packet __user *p, *end, *next; |
915 | struct fw_iso_context *ctx = client->iso_context; | 1040 | struct fw_iso_context *ctx = client->iso_context; |
916 | unsigned long payload, buffer_end, header_length; | 1041 | unsigned long payload, buffer_end, transmit_header_bytes = 0; |
917 | u32 control; | 1042 | u32 control; |
918 | int count; | 1043 | int count; |
919 | struct { | 1044 | struct { |
@@ -933,7 +1058,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
933 | * use the indirect payload, the iso buffer need not be mapped | 1058 | * use the indirect payload, the iso buffer need not be mapped |
934 | * and the a->data pointer is ignored. | 1059 | * and the a->data pointer is ignored. |
935 | */ | 1060 | */ |
936 | |||
937 | payload = (unsigned long)a->data - client->vm_start; | 1061 | payload = (unsigned long)a->data - client->vm_start; |
938 | buffer_end = client->buffer.page_count << PAGE_SHIFT; | 1062 | buffer_end = client->buffer.page_count << PAGE_SHIFT; |
939 | if (a->data == 0 || client->buffer.pages == NULL || | 1063 | if (a->data == 0 || client->buffer.pages == NULL || |
@@ -942,8 +1066,10 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
942 | buffer_end = 0; | 1066 | buffer_end = 0; |
943 | } | 1067 | } |
944 | 1068 | ||
945 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | 1069 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) |
1070 | return -EINVAL; | ||
946 | 1071 | ||
1072 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | ||
947 | if (!access_ok(VERIFY_READ, p, a->size)) | 1073 | if (!access_ok(VERIFY_READ, p, a->size)) |
948 | return -EFAULT; | 1074 | return -EFAULT; |
949 | 1075 | ||
@@ -959,31 +1085,32 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
959 | u.packet.sy = GET_SY(control); | 1085 | u.packet.sy = GET_SY(control); |
960 | u.packet.header_length = GET_HEADER_LENGTH(control); | 1086 | u.packet.header_length = GET_HEADER_LENGTH(control); |
961 | 1087 | ||
962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { | 1088 | switch (ctx->type) { |
963 | if (u.packet.header_length % 4 != 0) | 1089 | case FW_ISO_CONTEXT_TRANSMIT: |
1090 | if (u.packet.header_length & 3) | ||
1091 | return -EINVAL; | ||
1092 | transmit_header_bytes = u.packet.header_length; | ||
1093 | break; | ||
1094 | |||
1095 | case FW_ISO_CONTEXT_RECEIVE: | ||
1096 | if (u.packet.header_length == 0 || | ||
1097 | u.packet.header_length % ctx->header_size != 0) | ||
964 | return -EINVAL; | 1098 | return -EINVAL; |
965 | header_length = u.packet.header_length; | 1099 | break; |
966 | } else { | 1100 | |
967 | /* | 1101 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
968 | * We require that header_length is a multiple of | 1102 | if (u.packet.payload_length == 0 || |
969 | * the fixed header size, ctx->header_size. | 1103 | u.packet.payload_length & 3) |
970 | */ | ||
971 | if (ctx->header_size == 0) { | ||
972 | if (u.packet.header_length > 0) | ||
973 | return -EINVAL; | ||
974 | } else if (u.packet.header_length == 0 || | ||
975 | u.packet.header_length % ctx->header_size != 0) { | ||
976 | return -EINVAL; | 1104 | return -EINVAL; |
977 | } | 1105 | break; |
978 | header_length = 0; | ||
979 | } | 1106 | } |
980 | 1107 | ||
981 | next = (struct fw_cdev_iso_packet __user *) | 1108 | next = (struct fw_cdev_iso_packet __user *) |
982 | &p->header[header_length / 4]; | 1109 | &p->header[transmit_header_bytes / 4]; |
983 | if (next > end) | 1110 | if (next > end) |
984 | return -EINVAL; | 1111 | return -EINVAL; |
985 | if (__copy_from_user | 1112 | if (__copy_from_user |
986 | (u.packet.header, p->header, header_length)) | 1113 | (u.packet.header, p->header, transmit_header_bytes)) |
987 | return -EFAULT; | 1114 | return -EFAULT; |
988 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && | 1115 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && |
989 | u.packet.header_length + u.packet.payload_length > 0) | 1116 | u.packet.header_length + u.packet.payload_length > 0) |
@@ -1011,6 +1138,13 @@ static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) | |||
1011 | { | 1138 | { |
1012 | struct fw_cdev_start_iso *a = &arg->start_iso; | 1139 | struct fw_cdev_start_iso *a = &arg->start_iso; |
1013 | 1140 | ||
1141 | BUILD_BUG_ON( | ||
1142 | FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || | ||
1143 | FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || | ||
1144 | FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || | ||
1145 | FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || | ||
1146 | FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); | ||
1147 | |||
1014 | if (client->iso_context == NULL || a->handle != 0) | 1148 | if (client->iso_context == NULL || a->handle != 0) |
1015 | return -EINVAL; | 1149 | return -EINVAL; |
1016 | 1150 | ||
@@ -1042,7 +1176,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) | |||
1042 | 1176 | ||
1043 | local_irq_disable(); | 1177 | local_irq_disable(); |
1044 | 1178 | ||
1045 | cycle_time = card->driver->get_cycle_time(card); | 1179 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); |
1046 | 1180 | ||
1047 | switch (a->clk_id) { | 1181 | switch (a->clk_id) { |
1048 | case CLOCK_REALTIME: getnstimeofday(&ts); break; | 1182 | case CLOCK_REALTIME: getnstimeofday(&ts); break; |
@@ -1323,28 +1457,135 @@ static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) | |||
1323 | return init_request(client, &request, dest, a->speed); | 1457 | return init_request(client, &request, dest, a->speed); |
1324 | } | 1458 | } |
1325 | 1459 | ||
1460 | static void outbound_phy_packet_callback(struct fw_packet *packet, | ||
1461 | struct fw_card *card, int status) | ||
1462 | { | ||
1463 | struct outbound_phy_packet_event *e = | ||
1464 | container_of(packet, struct outbound_phy_packet_event, p); | ||
1465 | |||
1466 | switch (status) { | ||
1467 | /* expected: */ | ||
1468 | case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; | ||
1469 | /* should never happen with PHY packets: */ | ||
1470 | case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; | ||
1471 | case ACK_BUSY_X: | ||
1472 | case ACK_BUSY_A: | ||
1473 | case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; | ||
1474 | case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; | ||
1475 | case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; | ||
1476 | /* stale generation; cancelled; on certain controllers: no ack */ | ||
1477 | default: e->phy_packet.rcode = status; break; | ||
1478 | } | ||
1479 | e->phy_packet.data[0] = packet->timestamp; | ||
1480 | |||
1481 | queue_event(e->client, &e->event, &e->phy_packet, | ||
1482 | sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); | ||
1483 | client_put(e->client); | ||
1484 | } | ||
1485 | |||
1486 | static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) | ||
1487 | { | ||
1488 | struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; | ||
1489 | struct fw_card *card = client->device->card; | ||
1490 | struct outbound_phy_packet_event *e; | ||
1491 | |||
1492 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | ||
1493 | if (!client->device->is_local) | ||
1494 | return -ENOSYS; | ||
1495 | |||
1496 | e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); | ||
1497 | if (e == NULL) | ||
1498 | return -ENOMEM; | ||
1499 | |||
1500 | client_get(client); | ||
1501 | e->client = client; | ||
1502 | e->p.speed = SCODE_100; | ||
1503 | e->p.generation = a->generation; | ||
1504 | e->p.header[0] = a->data[0]; | ||
1505 | e->p.header[1] = a->data[1]; | ||
1506 | e->p.header_length = 8; | ||
1507 | e->p.callback = outbound_phy_packet_callback; | ||
1508 | e->phy_packet.closure = a->closure; | ||
1509 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; | ||
1510 | if (is_ping_packet(a->data)) | ||
1511 | e->phy_packet.length = 4; | ||
1512 | |||
1513 | card->driver->send_request(card, &e->p); | ||
1514 | |||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) | ||
1519 | { | ||
1520 | struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; | ||
1521 | struct fw_card *card = client->device->card; | ||
1522 | |||
1523 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | ||
1524 | if (!client->device->is_local) | ||
1525 | return -ENOSYS; | ||
1526 | |||
1527 | spin_lock_irq(&card->lock); | ||
1528 | |||
1529 | list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); | ||
1530 | client->phy_receiver_closure = a->closure; | ||
1531 | |||
1532 | spin_unlock_irq(&card->lock); | ||
1533 | |||
1534 | return 0; | ||
1535 | } | ||
1536 | |||
1537 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) | ||
1538 | { | ||
1539 | struct client *client; | ||
1540 | struct inbound_phy_packet_event *e; | ||
1541 | unsigned long flags; | ||
1542 | |||
1543 | spin_lock_irqsave(&card->lock, flags); | ||
1544 | |||
1545 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { | ||
1546 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); | ||
1547 | if (e == NULL) { | ||
1548 | fw_notify("Out of memory when allocating event\n"); | ||
1549 | break; | ||
1550 | } | ||
1551 | e->phy_packet.closure = client->phy_receiver_closure; | ||
1552 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; | ||
1553 | e->phy_packet.rcode = RCODE_COMPLETE; | ||
1554 | e->phy_packet.length = 8; | ||
1555 | e->phy_packet.data[0] = p->header[1]; | ||
1556 | e->phy_packet.data[1] = p->header[2]; | ||
1557 | queue_event(client, &e->event, | ||
1558 | &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); | ||
1559 | } | ||
1560 | |||
1561 | spin_unlock_irqrestore(&card->lock, flags); | ||
1562 | } | ||
1563 | |||
1326 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { | 1564 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { |
1327 | ioctl_get_info, | 1565 | [0x00] = ioctl_get_info, |
1328 | ioctl_send_request, | 1566 | [0x01] = ioctl_send_request, |
1329 | ioctl_allocate, | 1567 | [0x02] = ioctl_allocate, |
1330 | ioctl_deallocate, | 1568 | [0x03] = ioctl_deallocate, |
1331 | ioctl_send_response, | 1569 | [0x04] = ioctl_send_response, |
1332 | ioctl_initiate_bus_reset, | 1570 | [0x05] = ioctl_initiate_bus_reset, |
1333 | ioctl_add_descriptor, | 1571 | [0x06] = ioctl_add_descriptor, |
1334 | ioctl_remove_descriptor, | 1572 | [0x07] = ioctl_remove_descriptor, |
1335 | ioctl_create_iso_context, | 1573 | [0x08] = ioctl_create_iso_context, |
1336 | ioctl_queue_iso, | 1574 | [0x09] = ioctl_queue_iso, |
1337 | ioctl_start_iso, | 1575 | [0x0a] = ioctl_start_iso, |
1338 | ioctl_stop_iso, | 1576 | [0x0b] = ioctl_stop_iso, |
1339 | ioctl_get_cycle_timer, | 1577 | [0x0c] = ioctl_get_cycle_timer, |
1340 | ioctl_allocate_iso_resource, | 1578 | [0x0d] = ioctl_allocate_iso_resource, |
1341 | ioctl_deallocate_iso_resource, | 1579 | [0x0e] = ioctl_deallocate_iso_resource, |
1342 | ioctl_allocate_iso_resource_once, | 1580 | [0x0f] = ioctl_allocate_iso_resource_once, |
1343 | ioctl_deallocate_iso_resource_once, | 1581 | [0x10] = ioctl_deallocate_iso_resource_once, |
1344 | ioctl_get_speed, | 1582 | [0x11] = ioctl_get_speed, |
1345 | ioctl_send_broadcast_request, | 1583 | [0x12] = ioctl_send_broadcast_request, |
1346 | ioctl_send_stream_packet, | 1584 | [0x13] = ioctl_send_stream_packet, |
1347 | ioctl_get_cycle_timer2, | 1585 | [0x14] = ioctl_get_cycle_timer2, |
1586 | [0x15] = ioctl_send_phy_packet, | ||
1587 | [0x16] = ioctl_receive_phy_packets, | ||
1588 | [0x17] = ioctl_set_iso_channels, | ||
1348 | }; | 1589 | }; |
1349 | 1590 | ||
1350 | static int dispatch_ioctl(struct client *client, | 1591 | static int dispatch_ioctl(struct client *client, |
@@ -1452,6 +1693,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file) | |||
1452 | struct client *client = file->private_data; | 1693 | struct client *client = file->private_data; |
1453 | struct event *event, *next_event; | 1694 | struct event *event, *next_event; |
1454 | 1695 | ||
1696 | spin_lock_irq(&client->device->card->lock); | ||
1697 | list_del(&client->phy_receiver_link); | ||
1698 | spin_unlock_irq(&client->device->card->lock); | ||
1699 | |||
1455 | mutex_lock(&client->device->client_list_mutex); | 1700 | mutex_lock(&client->device->client_list_mutex); |
1456 | list_del(&client->link); | 1701 | list_del(&client->link); |
1457 | mutex_unlock(&client->device->client_list_mutex); | 1702 | mutex_unlock(&client->device->client_list_mutex); |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 4b8523f00dce..6113b896e790 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -107,11 +107,11 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) | |||
107 | } | 107 | } |
108 | 108 | ||
109 | /** | 109 | /** |
110 | * fw_csr_string - reads a string from the configuration ROM | 110 | * fw_csr_string() - reads a string from the configuration ROM |
111 | * @directory: e.g. root directory or unit directory | 111 | * @directory: e.g. root directory or unit directory |
112 | * @key: the key of the preceding directory entry | 112 | * @key: the key of the preceding directory entry |
113 | * @buf: where to put the string | 113 | * @buf: where to put the string |
114 | * @size: size of @buf, in bytes | 114 | * @size: size of @buf, in bytes |
115 | * | 115 | * |
116 | * The string is taken from a minimal ASCII text descriptor leaf after | 116 | * The string is taken from a minimal ASCII text descriptor leaf after |
117 | * the immediate entry with @key. The string is zero-terminated. | 117 | * the immediate entry with @key. The string is zero-terminated. |
@@ -1136,6 +1136,7 @@ static void fw_device_refresh(struct work_struct *work) | |||
1136 | goto give_up; | 1136 | goto give_up; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | fw_device_cdev_update(device); | ||
1139 | create_units(device); | 1140 | create_units(device); |
1140 | 1141 | ||
1141 | /* Userspace may want to re-read attributes. */ | 1142 | /* Userspace may want to re-read attributes. */ |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 8f5aebfb29df..c003fa4e2db1 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -118,6 +118,23 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, | |||
118 | } | 118 | } |
119 | EXPORT_SYMBOL(fw_iso_buffer_destroy); | 119 | EXPORT_SYMBOL(fw_iso_buffer_destroy); |
120 | 120 | ||
121 | /* Convert DMA address to offset into virtually contiguous buffer. */ | ||
122 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) | ||
123 | { | ||
124 | int i; | ||
125 | dma_addr_t address; | ||
126 | ssize_t offset; | ||
127 | |||
128 | for (i = 0; i < buffer->page_count; i++) { | ||
129 | address = page_private(buffer->pages[i]); | ||
130 | offset = (ssize_t)completed - (ssize_t)address; | ||
131 | if (offset > 0 && offset <= PAGE_SIZE) | ||
132 | return (i << PAGE_SHIFT) + offset; | ||
133 | } | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
121 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 138 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
122 | int type, int channel, int speed, size_t header_size, | 139 | int type, int channel, int speed, size_t header_size, |
123 | fw_iso_callback_t callback, void *callback_data) | 140 | fw_iso_callback_t callback, void *callback_data) |
@@ -134,7 +151,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | |||
134 | ctx->channel = channel; | 151 | ctx->channel = channel; |
135 | ctx->speed = speed; | 152 | ctx->speed = speed; |
136 | ctx->header_size = header_size; | 153 | ctx->header_size = header_size; |
137 | ctx->callback = callback; | 154 | ctx->callback.sc = callback; |
138 | ctx->callback_data = callback_data; | 155 | ctx->callback_data = callback_data; |
139 | 156 | ||
140 | return ctx; | 157 | return ctx; |
@@ -143,9 +160,7 @@ EXPORT_SYMBOL(fw_iso_context_create); | |||
143 | 160 | ||
144 | void fw_iso_context_destroy(struct fw_iso_context *ctx) | 161 | void fw_iso_context_destroy(struct fw_iso_context *ctx) |
145 | { | 162 | { |
146 | struct fw_card *card = ctx->card; | 163 | ctx->card->driver->free_iso_context(ctx); |
147 | |||
148 | card->driver->free_iso_context(ctx); | ||
149 | } | 164 | } |
150 | EXPORT_SYMBOL(fw_iso_context_destroy); | 165 | EXPORT_SYMBOL(fw_iso_context_destroy); |
151 | 166 | ||
@@ -156,14 +171,17 @@ int fw_iso_context_start(struct fw_iso_context *ctx, | |||
156 | } | 171 | } |
157 | EXPORT_SYMBOL(fw_iso_context_start); | 172 | EXPORT_SYMBOL(fw_iso_context_start); |
158 | 173 | ||
174 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) | ||
175 | { | ||
176 | return ctx->card->driver->set_iso_channels(ctx, channels); | ||
177 | } | ||
178 | |||
159 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 179 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
160 | struct fw_iso_packet *packet, | 180 | struct fw_iso_packet *packet, |
161 | struct fw_iso_buffer *buffer, | 181 | struct fw_iso_buffer *buffer, |
162 | unsigned long payload) | 182 | unsigned long payload) |
163 | { | 183 | { |
164 | struct fw_card *card = ctx->card; | 184 | return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); |
165 | |||
166 | return card->driver->queue_iso(ctx, packet, buffer, payload); | ||
167 | } | 185 | } |
168 | EXPORT_SYMBOL(fw_iso_context_queue); | 186 | EXPORT_SYMBOL(fw_iso_context_queue); |
169 | 187 | ||
@@ -279,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id, | |||
279 | } | 297 | } |
280 | 298 | ||
281 | /** | 299 | /** |
282 | * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth | 300 | * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth |
283 | * | 301 | * |
284 | * In parameters: card, generation, channels_mask, bandwidth, allocate | 302 | * In parameters: card, generation, channels_mask, bandwidth, allocate |
285 | * Out parameters: channel, bandwidth | 303 | * Out parameters: channel, bandwidth |
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 93ec64cdeef7..09be1a635505 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c | |||
@@ -174,12 +174,7 @@ static inline struct fw_node *fw_node(struct list_head *l) | |||
174 | return list_entry(l, struct fw_node, link); | 174 | return list_entry(l, struct fw_node, link); |
175 | } | 175 | } |
176 | 176 | ||
177 | /** | 177 | /* |
178 | * build_tree - Build the tree representation of the topology | ||
179 | * @self_ids: array of self IDs to create the tree from | ||
180 | * @self_id_count: the length of the self_ids array | ||
181 | * @local_id: the node ID of the local node | ||
182 | * | ||
183 | * This function builds the tree representation of the topology given | 178 | * This function builds the tree representation of the topology given |
184 | * by the self IDs from the latest bus reset. During the construction | 179 | * by the self IDs from the latest bus reset. During the construction |
185 | * of the tree, the function checks that the self IDs are valid and | 180 | * of the tree, the function checks that the self IDs are valid and |
@@ -420,11 +415,10 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) | |||
420 | } | 415 | } |
421 | } | 416 | } |
422 | 417 | ||
423 | /** | 418 | /* |
424 | * update_tree - compare the old topology tree for card with the new | 419 | * Compare the old topology tree for card with the new one specified by root. |
425 | * one specified by root. Queue the nodes and mark them as either | 420 | * Queue the nodes and mark them as either found, lost or updated. |
426 | * found, lost or updated. Update the nodes in the card topology tree | 421 | * Update the nodes in the card topology tree as we go. |
427 | * as we go. | ||
428 | */ | 422 | */ |
429 | static void update_tree(struct fw_card *card, struct fw_node *root) | 423 | static void update_tree(struct fw_card *card, struct fw_node *root) |
430 | { | 424 | { |
@@ -524,7 +518,7 @@ static void update_topology_map(struct fw_card *card, | |||
524 | } | 518 | } |
525 | 519 | ||
526 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | 520 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, |
527 | int self_id_count, u32 *self_ids) | 521 | int self_id_count, u32 *self_ids, bool bm_abdicate) |
528 | { | 522 | { |
529 | struct fw_node *local_node; | 523 | struct fw_node *local_node; |
530 | unsigned long flags; | 524 | unsigned long flags; |
@@ -543,7 +537,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | |||
543 | 537 | ||
544 | spin_lock_irqsave(&card->lock, flags); | 538 | spin_lock_irqsave(&card->lock, flags); |
545 | 539 | ||
546 | card->broadcast_channel_allocated = false; | 540 | card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; |
547 | card->node_id = node_id; | 541 | card->node_id = node_id; |
548 | /* | 542 | /* |
549 | * Update node_id before generation to prevent anybody from using | 543 | * Update node_id before generation to prevent anybody from using |
@@ -552,6 +546,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | |||
552 | smp_wmb(); | 546 | smp_wmb(); |
553 | card->generation = generation; | 547 | card->generation = generation; |
554 | card->reset_jiffies = jiffies; | 548 | card->reset_jiffies = jiffies; |
549 | card->bm_node_id = 0xffff; | ||
550 | card->bm_abdicate = bm_abdicate; | ||
555 | fw_schedule_bm_work(card, 0); | 551 | fw_schedule_bm_work(card, 0); |
556 | 552 | ||
557 | local_node = build_tree(card, self_ids, self_id_count); | 553 | local_node = build_tree(card, self_ids, self_id_count); |
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index fdc33ff06dc1..ca7ca56661e0 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c | |||
@@ -246,7 +246,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, | |||
246 | break; | 246 | break; |
247 | 247 | ||
248 | default: | 248 | default: |
249 | WARN(1, KERN_ERR "wrong tcode %d", tcode); | 249 | WARN(1, "wrong tcode %d", tcode); |
250 | } | 250 | } |
251 | common: | 251 | common: |
252 | packet->speed = speed; | 252 | packet->speed = speed; |
@@ -273,43 +273,52 @@ static int allocate_tlabel(struct fw_card *card) | |||
273 | } | 273 | } |
274 | 274 | ||
275 | /** | 275 | /** |
276 | * This function provides low-level access to the IEEE1394 transaction | 276 | * fw_send_request() - submit a request packet for transmission |
277 | * logic. Most C programs would use either fw_read(), fw_write() or | 277 | * @card: interface to send the request at |
278 | * fw_lock() instead - those function are convenience wrappers for | 278 | * @t: transaction instance to which the request belongs |
279 | * this function. The fw_send_request() function is primarily | 279 | * @tcode: transaction code |
280 | * provided as a flexible, one-stop entry point for languages bindings | 280 | * @destination_id: destination node ID, consisting of bus_ID and phy_ID |
281 | * and protocol bindings. | 281 | * @generation: bus generation in which request and response are valid |
282 | * @speed: transmission speed | ||
283 | * @offset: 48bit wide offset into destination's address space | ||
284 | * @payload: data payload for the request subaction | ||
285 | * @length: length of the payload, in bytes | ||
286 | * @callback: function to be called when the transaction is completed | ||
287 | * @callback_data: data to be passed to the transaction completion callback | ||
282 | * | 288 | * |
283 | * FIXME: Document this function further, in particular the possible | 289 | * Submit a request packet into the asynchronous request transmission queue. |
284 | * values for rcode in the callback. In short, we map ACK_COMPLETE to | 290 | * Can be called from atomic context. If you prefer a blocking API, use |
285 | * RCODE_COMPLETE, internal errors set errno and set rcode to | 291 | * fw_run_transaction() in a context that can sleep. |
286 | * RCODE_SEND_ERROR (which is out of range for standard ieee1394 | ||
287 | * rcodes). All other rcodes are forwarded unchanged. For all | ||
288 | * errors, payload is NULL, length is 0. | ||
289 | * | 292 | * |
290 | * Can not expect the callback to be called before the function | 293 | * In case of lock requests, specify one of the firewire-core specific %TCODE_ |
291 | * returns, though this does happen in some cases (ACK_COMPLETE and | 294 | * constants instead of %TCODE_LOCK_REQUEST in @tcode. |
292 | * errors). | ||
293 | * | 295 | * |
294 | * The payload is only used for write requests and must not be freed | 296 | * Make sure that the value in @destination_id is not older than the one in |
295 | * until the callback has been called. | 297 | * @generation. Otherwise the request is in danger to be sent to a wrong node. |
296 | * | 298 | * |
297 | * @param card the card from which to send the request | 299 | * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller |
298 | * @param tcode the tcode for this transaction. Do not use | ||
299 | * TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP | ||
300 | * etc. to specify tcode and ext_tcode. | ||
301 | * @param node_id the destination node ID (bus ID and PHY ID concatenated) | ||
302 | * @param generation the generation for which node_id is valid | ||
303 | * @param speed the speed to use for sending the request | ||
304 | * @param offset the 48 bit offset on the destination node | ||
305 | * @param payload the data payload for the request subaction | ||
306 | * @param length the length in bytes of the data to read | ||
307 | * @param callback function to be called when the transaction is completed | ||
308 | * @param callback_data pointer to arbitrary data, which will be | ||
309 | * passed to the callback | ||
310 | * | ||
311 | * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller | ||
312 | * needs to synthesize @destination_id with fw_stream_packet_destination_id(). | 300 | * needs to synthesize @destination_id with fw_stream_packet_destination_id(). |
301 | * It will contain tag, channel, and sy data instead of a node ID then. | ||
302 | * | ||
303 | * The payload buffer at @data is going to be DMA-mapped except in case of | ||
304 | * quadlet-sized payload or of local (loopback) requests. Hence make sure that | ||
305 | * the buffer complies with the restrictions for DMA-mapped memory. The | ||
306 | * @payload must not be freed before the @callback is called. | ||
307 | * | ||
308 | * In case of request types without payload, @data is NULL and @length is 0. | ||
309 | * | ||
310 | * After the transaction is completed successfully or unsuccessfully, the | ||
311 | * @callback will be called. Among its parameters is the response code which | ||
312 | * is either one of the rcodes per IEEE 1394 or, in case of internal errors, | ||
313 | * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core | ||
314 | * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION, | ||
315 | * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request | ||
316 | * generation, or missing ACK respectively. | ||
317 | * | ||
318 | * Note some timing corner cases: fw_send_request() may complete much earlier | ||
319 | * than when the request packet actually hits the wire. On the other hand, | ||
320 | * transaction completion and hence execution of @callback may happen even | ||
321 | * before fw_send_request() returns. | ||
313 | */ | 322 | */ |
314 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, | 323 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, |
315 | int destination_id, int generation, int speed, | 324 | int destination_id, int generation, int speed, |
@@ -339,7 +348,8 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, | |||
339 | setup_timer(&t->split_timeout_timer, | 348 | setup_timer(&t->split_timeout_timer, |
340 | split_transaction_timeout_callback, (unsigned long)t); | 349 | split_transaction_timeout_callback, (unsigned long)t); |
341 | /* FIXME: start this timer later, relative to t->timestamp */ | 350 | /* FIXME: start this timer later, relative to t->timestamp */ |
342 | mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10)); | 351 | mod_timer(&t->split_timeout_timer, |
352 | jiffies + card->split_timeout_jiffies); | ||
343 | t->callback = callback; | 353 | t->callback = callback; |
344 | t->callback_data = callback_data; | 354 | t->callback_data = callback_data; |
345 | 355 | ||
@@ -374,9 +384,11 @@ static void transaction_callback(struct fw_card *card, int rcode, | |||
374 | } | 384 | } |
375 | 385 | ||
376 | /** | 386 | /** |
377 | * fw_run_transaction - send request and sleep until transaction is completed | 387 | * fw_run_transaction() - send request and sleep until transaction is completed |
378 | * | 388 | * |
379 | * Returns the RCODE. | 389 | * Returns the RCODE. See fw_send_request() for parameter documentation. |
390 | * Unlike fw_send_request(), @data points to the payload of the request or/and | ||
391 | * to the payload of the response. | ||
380 | */ | 392 | */ |
381 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | 393 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, |
382 | int generation, int speed, unsigned long long offset, | 394 | int generation, int speed, unsigned long long offset, |
@@ -417,9 +429,21 @@ void fw_send_phy_config(struct fw_card *card, | |||
417 | int node_id, int generation, int gap_count) | 429 | int node_id, int generation, int gap_count) |
418 | { | 430 | { |
419 | long timeout = DIV_ROUND_UP(HZ, 10); | 431 | long timeout = DIV_ROUND_UP(HZ, 10); |
420 | u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) | | 432 | u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); |
421 | PHY_CONFIG_ROOT_ID(node_id) | | 433 | |
422 | PHY_CONFIG_GAP_COUNT(gap_count); | 434 | if (node_id != FW_PHY_CONFIG_NO_NODE_ID) |
435 | data |= PHY_CONFIG_ROOT_ID(node_id); | ||
436 | |||
437 | if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { | ||
438 | gap_count = card->driver->read_phy_reg(card, 1); | ||
439 | if (gap_count < 0) | ||
440 | return; | ||
441 | |||
442 | gap_count &= 63; | ||
443 | if (gap_count == 63) | ||
444 | return; | ||
445 | } | ||
446 | data |= PHY_CONFIG_GAP_COUNT(gap_count); | ||
423 | 447 | ||
424 | mutex_lock(&phy_config_mutex); | 448 | mutex_lock(&phy_config_mutex); |
425 | 449 | ||
@@ -494,9 +518,9 @@ static bool is_in_fcp_region(u64 offset, size_t length) | |||
494 | } | 518 | } |
495 | 519 | ||
496 | /** | 520 | /** |
497 | * fw_core_add_address_handler - register for incoming requests | 521 | * fw_core_add_address_handler() - register for incoming requests |
498 | * @handler: callback | 522 | * @handler: callback |
499 | * @region: region in the IEEE 1212 node space address range | 523 | * @region: region in the IEEE 1212 node space address range |
500 | * | 524 | * |
501 | * region->start, ->end, and handler->length have to be quadlet-aligned. | 525 | * region->start, ->end, and handler->length have to be quadlet-aligned. |
502 | * | 526 | * |
@@ -519,8 +543,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
519 | int ret = -EBUSY; | 543 | int ret = -EBUSY; |
520 | 544 | ||
521 | if (region->start & 0xffff000000000003ULL || | 545 | if (region->start & 0xffff000000000003ULL || |
522 | region->end & 0xffff000000000003ULL || | ||
523 | region->start >= region->end || | 546 | region->start >= region->end || |
547 | region->end > 0x0001000000000000ULL || | ||
524 | handler->length & 3 || | 548 | handler->length & 3 || |
525 | handler->length == 0) | 549 | handler->length == 0) |
526 | return -EINVAL; | 550 | return -EINVAL; |
@@ -551,7 +575,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
551 | EXPORT_SYMBOL(fw_core_add_address_handler); | 575 | EXPORT_SYMBOL(fw_core_add_address_handler); |
552 | 576 | ||
553 | /** | 577 | /** |
554 | * fw_core_remove_address_handler - unregister an address handler | 578 | * fw_core_remove_address_handler() - unregister an address handler |
555 | */ | 579 | */ |
556 | void fw_core_remove_address_handler(struct fw_address_handler *handler) | 580 | void fw_core_remove_address_handler(struct fw_address_handler *handler) |
557 | { | 581 | { |
@@ -580,6 +604,41 @@ static void free_response_callback(struct fw_packet *packet, | |||
580 | kfree(request); | 604 | kfree(request); |
581 | } | 605 | } |
582 | 606 | ||
607 | int fw_get_response_length(struct fw_request *r) | ||
608 | { | ||
609 | int tcode, ext_tcode, data_length; | ||
610 | |||
611 | tcode = HEADER_GET_TCODE(r->request_header[0]); | ||
612 | |||
613 | switch (tcode) { | ||
614 | case TCODE_WRITE_QUADLET_REQUEST: | ||
615 | case TCODE_WRITE_BLOCK_REQUEST: | ||
616 | return 0; | ||
617 | |||
618 | case TCODE_READ_QUADLET_REQUEST: | ||
619 | return 4; | ||
620 | |||
621 | case TCODE_READ_BLOCK_REQUEST: | ||
622 | data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); | ||
623 | return data_length; | ||
624 | |||
625 | case TCODE_LOCK_REQUEST: | ||
626 | ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); | ||
627 | data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); | ||
628 | switch (ext_tcode) { | ||
629 | case EXTCODE_FETCH_ADD: | ||
630 | case EXTCODE_LITTLE_ADD: | ||
631 | return data_length; | ||
632 | default: | ||
633 | return data_length / 2; | ||
634 | } | ||
635 | |||
636 | default: | ||
637 | WARN(1, "wrong tcode %d", tcode); | ||
638 | return 0; | ||
639 | } | ||
640 | } | ||
641 | |||
583 | void fw_fill_response(struct fw_packet *response, u32 *request_header, | 642 | void fw_fill_response(struct fw_packet *response, u32 *request_header, |
584 | int rcode, void *payload, size_t length) | 643 | int rcode, void *payload, size_t length) |
585 | { | 644 | { |
@@ -631,18 +690,35 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header, | |||
631 | break; | 690 | break; |
632 | 691 | ||
633 | default: | 692 | default: |
634 | WARN(1, KERN_ERR "wrong tcode %d", tcode); | 693 | WARN(1, "wrong tcode %d", tcode); |
635 | } | 694 | } |
636 | 695 | ||
637 | response->payload_mapped = false; | 696 | response->payload_mapped = false; |
638 | } | 697 | } |
639 | EXPORT_SYMBOL(fw_fill_response); | 698 | EXPORT_SYMBOL(fw_fill_response); |
640 | 699 | ||
641 | static struct fw_request *allocate_request(struct fw_packet *p) | 700 | static u32 compute_split_timeout_timestamp(struct fw_card *card, |
701 | u32 request_timestamp) | ||
702 | { | ||
703 | unsigned int cycles; | ||
704 | u32 timestamp; | ||
705 | |||
706 | cycles = card->split_timeout_cycles; | ||
707 | cycles += request_timestamp & 0x1fff; | ||
708 | |||
709 | timestamp = request_timestamp & ~0x1fff; | ||
710 | timestamp += (cycles / 8000) << 13; | ||
711 | timestamp |= cycles % 8000; | ||
712 | |||
713 | return timestamp; | ||
714 | } | ||
715 | |||
716 | static struct fw_request *allocate_request(struct fw_card *card, | ||
717 | struct fw_packet *p) | ||
642 | { | 718 | { |
643 | struct fw_request *request; | 719 | struct fw_request *request; |
644 | u32 *data, length; | 720 | u32 *data, length; |
645 | int request_tcode, t; | 721 | int request_tcode; |
646 | 722 | ||
647 | request_tcode = HEADER_GET_TCODE(p->header[0]); | 723 | request_tcode = HEADER_GET_TCODE(p->header[0]); |
648 | switch (request_tcode) { | 724 | switch (request_tcode) { |
@@ -677,14 +753,9 @@ static struct fw_request *allocate_request(struct fw_packet *p) | |||
677 | if (request == NULL) | 753 | if (request == NULL) |
678 | return NULL; | 754 | return NULL; |
679 | 755 | ||
680 | t = (p->timestamp & 0x1fff) + 4000; | ||
681 | if (t >= 8000) | ||
682 | t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000; | ||
683 | else | ||
684 | t = (p->timestamp & ~0x1fff) + t; | ||
685 | |||
686 | request->response.speed = p->speed; | 756 | request->response.speed = p->speed; |
687 | request->response.timestamp = t; | 757 | request->response.timestamp = |
758 | compute_split_timeout_timestamp(card, p->timestamp); | ||
688 | request->response.generation = p->generation; | 759 | request->response.generation = p->generation; |
689 | request->response.ack = 0; | 760 | request->response.ack = 0; |
690 | request->response.callback = free_response_callback; | 761 | request->response.callback = free_response_callback; |
@@ -713,7 +784,8 @@ void fw_send_response(struct fw_card *card, | |||
713 | 784 | ||
714 | if (rcode == RCODE_COMPLETE) | 785 | if (rcode == RCODE_COMPLETE) |
715 | fw_fill_response(&request->response, request->request_header, | 786 | fw_fill_response(&request->response, request->request_header, |
716 | rcode, request->data, request->length); | 787 | rcode, request->data, |
788 | fw_get_response_length(request)); | ||
717 | else | 789 | else |
718 | fw_fill_response(&request->response, request->request_header, | 790 | fw_fill_response(&request->response, request->request_header, |
719 | rcode, NULL, 0); | 791 | rcode, NULL, 0); |
@@ -731,9 +803,11 @@ static void handle_exclusive_region_request(struct fw_card *card, | |||
731 | unsigned long flags; | 803 | unsigned long flags; |
732 | int tcode, destination, source; | 804 | int tcode, destination, source; |
733 | 805 | ||
734 | tcode = HEADER_GET_TCODE(p->header[0]); | ||
735 | destination = HEADER_GET_DESTINATION(p->header[0]); | 806 | destination = HEADER_GET_DESTINATION(p->header[0]); |
736 | source = HEADER_GET_SOURCE(p->header[1]); | 807 | source = HEADER_GET_SOURCE(p->header[1]); |
808 | tcode = HEADER_GET_TCODE(p->header[0]); | ||
809 | if (tcode == TCODE_LOCK_REQUEST) | ||
810 | tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); | ||
737 | 811 | ||
738 | spin_lock_irqsave(&address_handler_lock, flags); | 812 | spin_lock_irqsave(&address_handler_lock, flags); |
739 | handler = lookup_enclosing_address_handler(&address_handler_list, | 813 | handler = lookup_enclosing_address_handler(&address_handler_list, |
@@ -753,7 +827,7 @@ static void handle_exclusive_region_request(struct fw_card *card, | |||
753 | else | 827 | else |
754 | handler->address_callback(card, request, | 828 | handler->address_callback(card, request, |
755 | tcode, destination, source, | 829 | tcode, destination, source, |
756 | p->generation, p->speed, offset, | 830 | p->generation, offset, |
757 | request->data, request->length, | 831 | request->data, request->length, |
758 | handler->callback_data); | 832 | handler->callback_data); |
759 | } | 833 | } |
@@ -791,8 +865,8 @@ static void handle_fcp_region_request(struct fw_card *card, | |||
791 | if (is_enclosing_handler(handler, offset, request->length)) | 865 | if (is_enclosing_handler(handler, offset, request->length)) |
792 | handler->address_callback(card, NULL, tcode, | 866 | handler->address_callback(card, NULL, tcode, |
793 | destination, source, | 867 | destination, source, |
794 | p->generation, p->speed, | 868 | p->generation, offset, |
795 | offset, request->data, | 869 | request->data, |
796 | request->length, | 870 | request->length, |
797 | handler->callback_data); | 871 | handler->callback_data); |
798 | } | 872 | } |
@@ -809,7 +883,12 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | |||
809 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) | 883 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) |
810 | return; | 884 | return; |
811 | 885 | ||
812 | request = allocate_request(p); | 886 | if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { |
887 | fw_cdev_handle_phy_packet(card, p); | ||
888 | return; | ||
889 | } | ||
890 | |||
891 | request = allocate_request(card, p); | ||
813 | if (request == NULL) { | 892 | if (request == NULL) { |
814 | /* FIXME: send statically allocated busy packet. */ | 893 | /* FIXME: send statically allocated busy packet. */ |
815 | return; | 894 | return; |
@@ -832,13 +911,12 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | |||
832 | unsigned long flags; | 911 | unsigned long flags; |
833 | u32 *data; | 912 | u32 *data; |
834 | size_t data_length; | 913 | size_t data_length; |
835 | int tcode, tlabel, destination, source, rcode; | 914 | int tcode, tlabel, source, rcode; |
836 | 915 | ||
837 | tcode = HEADER_GET_TCODE(p->header[0]); | 916 | tcode = HEADER_GET_TCODE(p->header[0]); |
838 | tlabel = HEADER_GET_TLABEL(p->header[0]); | 917 | tlabel = HEADER_GET_TLABEL(p->header[0]); |
839 | destination = HEADER_GET_DESTINATION(p->header[0]); | 918 | source = HEADER_GET_SOURCE(p->header[1]); |
840 | source = HEADER_GET_SOURCE(p->header[1]); | 919 | rcode = HEADER_GET_RCODE(p->header[1]); |
841 | rcode = HEADER_GET_RCODE(p->header[1]); | ||
842 | 920 | ||
843 | spin_lock_irqsave(&card->lock, flags); | 921 | spin_lock_irqsave(&card->lock, flags); |
844 | list_for_each_entry(t, &card->transaction_list, link) { | 922 | list_for_each_entry(t, &card->transaction_list, link) { |
@@ -903,8 +981,8 @@ static const struct fw_address_region topology_map_region = | |||
903 | 981 | ||
904 | static void handle_topology_map(struct fw_card *card, struct fw_request *request, | 982 | static void handle_topology_map(struct fw_card *card, struct fw_request *request, |
905 | int tcode, int destination, int source, int generation, | 983 | int tcode, int destination, int source, int generation, |
906 | int speed, unsigned long long offset, | 984 | unsigned long long offset, void *payload, size_t length, |
907 | void *payload, size_t length, void *callback_data) | 985 | void *callback_data) |
908 | { | 986 | { |
909 | int start; | 987 | int start; |
910 | 988 | ||
@@ -933,19 +1011,97 @@ static const struct fw_address_region registers_region = | |||
933 | { .start = CSR_REGISTER_BASE, | 1011 | { .start = CSR_REGISTER_BASE, |
934 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; | 1012 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; |
935 | 1013 | ||
1014 | static void update_split_timeout(struct fw_card *card) | ||
1015 | { | ||
1016 | unsigned int cycles; | ||
1017 | |||
1018 | cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); | ||
1019 | |||
1020 | cycles = max(cycles, 800u); /* minimum as per the spec */ | ||
1021 | cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */ | ||
1022 | |||
1023 | card->split_timeout_cycles = cycles; | ||
1024 | card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); | ||
1025 | } | ||
1026 | |||
936 | static void handle_registers(struct fw_card *card, struct fw_request *request, | 1027 | static void handle_registers(struct fw_card *card, struct fw_request *request, |
937 | int tcode, int destination, int source, int generation, | 1028 | int tcode, int destination, int source, int generation, |
938 | int speed, unsigned long long offset, | 1029 | unsigned long long offset, void *payload, size_t length, |
939 | void *payload, size_t length, void *callback_data) | 1030 | void *callback_data) |
940 | { | 1031 | { |
941 | int reg = offset & ~CSR_REGISTER_BASE; | 1032 | int reg = offset & ~CSR_REGISTER_BASE; |
942 | __be32 *data = payload; | 1033 | __be32 *data = payload; |
943 | int rcode = RCODE_COMPLETE; | 1034 | int rcode = RCODE_COMPLETE; |
1035 | unsigned long flags; | ||
944 | 1036 | ||
945 | switch (reg) { | 1037 | switch (reg) { |
1038 | case CSR_PRIORITY_BUDGET: | ||
1039 | if (!card->priority_budget_implemented) { | ||
1040 | rcode = RCODE_ADDRESS_ERROR; | ||
1041 | break; | ||
1042 | } | ||
1043 | /* else fall through */ | ||
1044 | |||
1045 | case CSR_NODE_IDS: | ||
1046 | /* | ||
1047 | * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 | ||
1048 | * and 9.6, but interoperable with IEEE 1394.1-2004 bridges | ||
1049 | */ | ||
1050 | /* fall through */ | ||
1051 | |||
1052 | case CSR_STATE_CLEAR: | ||
1053 | case CSR_STATE_SET: | ||
946 | case CSR_CYCLE_TIME: | 1054 | case CSR_CYCLE_TIME: |
947 | if (TCODE_IS_READ_REQUEST(tcode) && length == 4) | 1055 | case CSR_BUS_TIME: |
948 | *data = cpu_to_be32(card->driver->get_cycle_time(card)); | 1056 | case CSR_BUSY_TIMEOUT: |
1057 | if (tcode == TCODE_READ_QUADLET_REQUEST) | ||
1058 | *data = cpu_to_be32(card->driver->read_csr(card, reg)); | ||
1059 | else if (tcode == TCODE_WRITE_QUADLET_REQUEST) | ||
1060 | card->driver->write_csr(card, reg, be32_to_cpu(*data)); | ||
1061 | else | ||
1062 | rcode = RCODE_TYPE_ERROR; | ||
1063 | break; | ||
1064 | |||
1065 | case CSR_RESET_START: | ||
1066 | if (tcode == TCODE_WRITE_QUADLET_REQUEST) | ||
1067 | card->driver->write_csr(card, CSR_STATE_CLEAR, | ||
1068 | CSR_STATE_BIT_ABDICATE); | ||
1069 | else | ||
1070 | rcode = RCODE_TYPE_ERROR; | ||
1071 | break; | ||
1072 | |||
1073 | case CSR_SPLIT_TIMEOUT_HI: | ||
1074 | if (tcode == TCODE_READ_QUADLET_REQUEST) { | ||
1075 | *data = cpu_to_be32(card->split_timeout_hi); | ||
1076 | } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { | ||
1077 | spin_lock_irqsave(&card->lock, flags); | ||
1078 | card->split_timeout_hi = be32_to_cpu(*data) & 7; | ||
1079 | update_split_timeout(card); | ||
1080 | spin_unlock_irqrestore(&card->lock, flags); | ||
1081 | } else { | ||
1082 | rcode = RCODE_TYPE_ERROR; | ||
1083 | } | ||
1084 | break; | ||
1085 | |||
1086 | case CSR_SPLIT_TIMEOUT_LO: | ||
1087 | if (tcode == TCODE_READ_QUADLET_REQUEST) { | ||
1088 | *data = cpu_to_be32(card->split_timeout_lo); | ||
1089 | } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { | ||
1090 | spin_lock_irqsave(&card->lock, flags); | ||
1091 | card->split_timeout_lo = | ||
1092 | be32_to_cpu(*data) & 0xfff80000; | ||
1093 | update_split_timeout(card); | ||
1094 | spin_unlock_irqrestore(&card->lock, flags); | ||
1095 | } else { | ||
1096 | rcode = RCODE_TYPE_ERROR; | ||
1097 | } | ||
1098 | break; | ||
1099 | |||
1100 | case CSR_MAINT_UTILITY: | ||
1101 | if (tcode == TCODE_READ_QUADLET_REQUEST) | ||
1102 | *data = card->maint_utility_register; | ||
1103 | else if (tcode == TCODE_WRITE_QUADLET_REQUEST) | ||
1104 | card->maint_utility_register = *data; | ||
949 | else | 1105 | else |
950 | rcode = RCODE_TYPE_ERROR; | 1106 | rcode = RCODE_TYPE_ERROR; |
951 | break; | 1107 | break; |
@@ -975,12 +1131,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, | |||
975 | BUG(); | 1131 | BUG(); |
976 | break; | 1132 | break; |
977 | 1133 | ||
978 | case CSR_BUSY_TIMEOUT: | ||
979 | /* FIXME: Implement this. */ | ||
980 | |||
981 | case CSR_BUS_TIME: | ||
982 | /* Useless without initialization by the bus manager. */ | ||
983 | |||
984 | default: | 1134 | default: |
985 | rcode = RCODE_ADDRESS_ERROR; | 1135 | rcode = RCODE_ADDRESS_ERROR; |
986 | break; | 1136 | break; |
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 0ecfcd95f4c5..e6239f971be6 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -38,6 +38,9 @@ struct fw_packet; | |||
38 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) | 38 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) |
39 | #define BROADCAST_CHANNEL_VALID (1 << 30) | 39 | #define BROADCAST_CHANNEL_VALID (1 << 30) |
40 | 40 | ||
41 | #define CSR_STATE_BIT_CMSTR (1 << 8) | ||
42 | #define CSR_STATE_BIT_ABDICATE (1 << 10) | ||
43 | |||
41 | struct fw_card_driver { | 44 | struct fw_card_driver { |
42 | /* | 45 | /* |
43 | * Enable the given card with the given initial config rom. | 46 | * Enable the given card with the given initial config rom. |
@@ -48,6 +51,7 @@ struct fw_card_driver { | |||
48 | int (*enable)(struct fw_card *card, | 51 | int (*enable)(struct fw_card *card, |
49 | const __be32 *config_rom, size_t length); | 52 | const __be32 *config_rom, size_t length); |
50 | 53 | ||
54 | int (*read_phy_reg)(struct fw_card *card, int address); | ||
51 | int (*update_phy_reg)(struct fw_card *card, int address, | 55 | int (*update_phy_reg)(struct fw_card *card, int address, |
52 | int clear_bits, int set_bits); | 56 | int clear_bits, int set_bits); |
53 | 57 | ||
@@ -75,7 +79,8 @@ struct fw_card_driver { | |||
75 | int (*enable_phys_dma)(struct fw_card *card, | 79 | int (*enable_phys_dma)(struct fw_card *card, |
76 | int node_id, int generation); | 80 | int node_id, int generation); |
77 | 81 | ||
78 | u32 (*get_cycle_time)(struct fw_card *card); | 82 | u32 (*read_csr)(struct fw_card *card, int csr_offset); |
83 | void (*write_csr)(struct fw_card *card, int csr_offset, u32 value); | ||
79 | 84 | ||
80 | struct fw_iso_context * | 85 | struct fw_iso_context * |
81 | (*allocate_iso_context)(struct fw_card *card, | 86 | (*allocate_iso_context)(struct fw_card *card, |
@@ -85,6 +90,8 @@ struct fw_card_driver { | |||
85 | int (*start_iso)(struct fw_iso_context *ctx, | 90 | int (*start_iso)(struct fw_iso_context *ctx, |
86 | s32 cycle, u32 sync, u32 tags); | 91 | s32 cycle, u32 sync, u32 tags); |
87 | 92 | ||
93 | int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); | ||
94 | |||
88 | int (*queue_iso)(struct fw_iso_context *ctx, | 95 | int (*queue_iso)(struct fw_iso_context *ctx, |
89 | struct fw_iso_packet *packet, | 96 | struct fw_iso_packet *packet, |
90 | struct fw_iso_buffer *buffer, | 97 | struct fw_iso_buffer *buffer, |
@@ -98,8 +105,8 @@ void fw_card_initialize(struct fw_card *card, | |||
98 | int fw_card_add(struct fw_card *card, | 105 | int fw_card_add(struct fw_card *card, |
99 | u32 max_receive, u32 link_speed, u64 guid); | 106 | u32 max_receive, u32 link_speed, u64 guid); |
100 | void fw_core_remove_card(struct fw_card *card); | 107 | void fw_core_remove_card(struct fw_card *card); |
101 | int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); | ||
102 | int fw_compute_block_crc(__be32 *block); | 108 | int fw_compute_block_crc(__be32 *block); |
109 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); | ||
103 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); | 110 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); |
104 | 111 | ||
105 | static inline struct fw_card *fw_card_get(struct fw_card *card) | 112 | static inline struct fw_card *fw_card_get(struct fw_card *card) |
@@ -123,6 +130,7 @@ extern const struct file_operations fw_device_ops; | |||
123 | 130 | ||
124 | void fw_device_cdev_update(struct fw_device *device); | 131 | void fw_device_cdev_update(struct fw_device *device); |
125 | void fw_device_cdev_remove(struct fw_device *device); | 132 | void fw_device_cdev_remove(struct fw_device *device); |
133 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); | ||
126 | 134 | ||
127 | 135 | ||
128 | /* -device */ | 136 | /* -device */ |
@@ -192,7 +200,7 @@ static inline void fw_node_put(struct fw_node *node) | |||
192 | } | 200 | } |
193 | 201 | ||
194 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, | 202 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, |
195 | int generation, int self_id_count, u32 *self_ids); | 203 | int generation, int self_id_count, u32 *self_ids, bool bm_abdicate); |
196 | void fw_destroy_nodes(struct fw_card *card); | 204 | void fw_destroy_nodes(struct fw_card *card); |
197 | 205 | ||
198 | /* | 206 | /* |
@@ -209,6 +217,7 @@ static inline bool is_next_generation(int new_generation, int old_generation) | |||
209 | 217 | ||
210 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 218 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
211 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 219 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
220 | #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe) | ||
212 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) | 221 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) |
213 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) | 222 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) |
214 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) | 223 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) |
@@ -218,9 +227,18 @@ static inline bool is_next_generation(int new_generation, int old_generation) | |||
218 | 227 | ||
219 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); | 228 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); |
220 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); | 229 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); |
230 | int fw_get_response_length(struct fw_request *request); | ||
221 | void fw_fill_response(struct fw_packet *response, u32 *request_header, | 231 | void fw_fill_response(struct fw_packet *response, u32 *request_header, |
222 | int rcode, void *payload, size_t length); | 232 | int rcode, void *payload, size_t length); |
233 | |||
234 | #define FW_PHY_CONFIG_NO_NODE_ID -1 | ||
235 | #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 | ||
223 | void fw_send_phy_config(struct fw_card *card, | 236 | void fw_send_phy_config(struct fw_card *card, |
224 | int node_id, int generation, int gap_count); | 237 | int node_id, int generation, int gap_count); |
225 | 238 | ||
239 | static inline bool is_ping_packet(u32 *data) | ||
240 | { | ||
241 | return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; | ||
242 | } | ||
243 | |||
226 | #endif /* _FIREWIRE_CORE_H */ | 244 | #endif /* _FIREWIRE_CORE_H */ |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 7142eeec8074..da17d409a244 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -806,8 +806,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | |||
806 | 806 | ||
807 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, | 807 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, |
808 | int tcode, int destination, int source, int generation, | 808 | int tcode, int destination, int source, int generation, |
809 | int speed, unsigned long long offset, void *payload, | 809 | unsigned long long offset, void *payload, size_t length, |
810 | size_t length, void *callback_data) | 810 | void *callback_data) |
811 | { | 811 | { |
812 | struct fwnet_device *dev = callback_data; | 812 | struct fwnet_device *dev = callback_data; |
813 | int rcode; | 813 | int rcode; |
diff --git a/drivers/firewire/nosy-user.h b/drivers/firewire/nosy-user.h new file mode 100644 index 000000000000..e48aa6200c72 --- /dev/null +++ b/drivers/firewire/nosy-user.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __nosy_user_h | ||
2 | #define __nosy_user_h | ||
3 | |||
4 | #include <linux/ioctl.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | #define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats) | ||
8 | #define NOSY_IOC_START _IO('&', 1) | ||
9 | #define NOSY_IOC_STOP _IO('&', 2) | ||
10 | #define NOSY_IOC_FILTER _IOW('&', 2, __u32) | ||
11 | |||
12 | struct nosy_stats { | ||
13 | __u32 total_packet_count; | ||
14 | __u32 lost_packet_count; | ||
15 | }; | ||
16 | |||
17 | /* | ||
18 | * Format of packets returned from the kernel driver: | ||
19 | * | ||
20 | * quadlet with timestamp (microseconds, CPU endian) | ||
21 | * quadlet-padded packet data... (little endian) | ||
22 | * quadlet with ack (little endian) | ||
23 | */ | ||
24 | |||
25 | #endif /* __nosy_user_h */ | ||
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c new file mode 100644 index 000000000000..8528b10763ed --- /dev/null +++ b/drivers/firewire/nosy.c | |||
@@ -0,0 +1,721 @@ | |||
1 | /* | ||
2 | * nosy - Snoop mode driver for TI PCILynx 1394 controllers | ||
3 | * Copyright (C) 2002-2007 Kristian Høgsberg | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software Foundation, | ||
17 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/device.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/kref.h> | ||
28 | #include <linux/miscdevice.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/mutex.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/poll.h> | ||
33 | #include <linux/sched.h> /* required for linux/wait.h */ | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/timex.h> | ||
37 | #include <linux/uaccess.h> | ||
38 | #include <linux/wait.h> | ||
39 | |||
40 | #include <asm/atomic.h> | ||
41 | #include <asm/byteorder.h> | ||
42 | |||
43 | #include "nosy.h" | ||
44 | #include "nosy-user.h" | ||
45 | |||
46 | #define TCODE_PHY_PACKET 0x10 | ||
47 | #define PCI_DEVICE_ID_TI_PCILYNX 0x8000 | ||
48 | |||
49 | static char driver_name[] = KBUILD_MODNAME; | ||
50 | |||
51 | /* this is the physical layout of a PCL, its size is 128 bytes */ | ||
52 | struct pcl { | ||
53 | __le32 next; | ||
54 | __le32 async_error_next; | ||
55 | u32 user_data; | ||
56 | __le32 pcl_status; | ||
57 | __le32 remaining_transfer_count; | ||
58 | __le32 next_data_buffer; | ||
59 | struct { | ||
60 | __le32 control; | ||
61 | __le32 pointer; | ||
62 | } buffer[13]; | ||
63 | }; | ||
64 | |||
65 | struct packet { | ||
66 | unsigned int length; | ||
67 | char data[0]; | ||
68 | }; | ||
69 | |||
70 | struct packet_buffer { | ||
71 | char *data; | ||
72 | size_t capacity; | ||
73 | long total_packet_count, lost_packet_count; | ||
74 | atomic_t size; | ||
75 | struct packet *head, *tail; | ||
76 | wait_queue_head_t wait; | ||
77 | }; | ||
78 | |||
79 | struct pcilynx { | ||
80 | struct pci_dev *pci_device; | ||
81 | __iomem char *registers; | ||
82 | |||
83 | struct pcl *rcv_start_pcl, *rcv_pcl; | ||
84 | __le32 *rcv_buffer; | ||
85 | |||
86 | dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus; | ||
87 | |||
88 | spinlock_t client_list_lock; | ||
89 | struct list_head client_list; | ||
90 | |||
91 | struct miscdevice misc; | ||
92 | struct list_head link; | ||
93 | struct kref kref; | ||
94 | }; | ||
95 | |||
96 | static inline struct pcilynx * | ||
97 | lynx_get(struct pcilynx *lynx) | ||
98 | { | ||
99 | kref_get(&lynx->kref); | ||
100 | |||
101 | return lynx; | ||
102 | } | ||
103 | |||
104 | static void | ||
105 | lynx_release(struct kref *kref) | ||
106 | { | ||
107 | kfree(container_of(kref, struct pcilynx, kref)); | ||
108 | } | ||
109 | |||
110 | static inline void | ||
111 | lynx_put(struct pcilynx *lynx) | ||
112 | { | ||
113 | kref_put(&lynx->kref, lynx_release); | ||
114 | } | ||
115 | |||
116 | struct client { | ||
117 | struct pcilynx *lynx; | ||
118 | u32 tcode_mask; | ||
119 | struct packet_buffer buffer; | ||
120 | struct list_head link; | ||
121 | }; | ||
122 | |||
123 | static DEFINE_MUTEX(card_mutex); | ||
124 | static LIST_HEAD(card_list); | ||
125 | |||
126 | static int | ||
127 | packet_buffer_init(struct packet_buffer *buffer, size_t capacity) | ||
128 | { | ||
129 | buffer->data = kmalloc(capacity, GFP_KERNEL); | ||
130 | if (buffer->data == NULL) | ||
131 | return -ENOMEM; | ||
132 | buffer->head = (struct packet *) buffer->data; | ||
133 | buffer->tail = (struct packet *) buffer->data; | ||
134 | buffer->capacity = capacity; | ||
135 | buffer->lost_packet_count = 0; | ||
136 | atomic_set(&buffer->size, 0); | ||
137 | init_waitqueue_head(&buffer->wait); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static void | ||
143 | packet_buffer_destroy(struct packet_buffer *buffer) | ||
144 | { | ||
145 | kfree(buffer->data); | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | packet_buffer_get(struct client *client, char __user *data, size_t user_length) | ||
150 | { | ||
151 | struct packet_buffer *buffer = &client->buffer; | ||
152 | size_t length; | ||
153 | char *end; | ||
154 | |||
155 | if (wait_event_interruptible(buffer->wait, | ||
156 | atomic_read(&buffer->size) > 0) || | ||
157 | list_empty(&client->lynx->link)) | ||
158 | return -ERESTARTSYS; | ||
159 | |||
160 | if (atomic_read(&buffer->size) == 0) | ||
161 | return -ENODEV; | ||
162 | |||
163 | /* FIXME: Check length <= user_length. */ | ||
164 | |||
165 | end = buffer->data + buffer->capacity; | ||
166 | length = buffer->head->length; | ||
167 | |||
168 | if (&buffer->head->data[length] < end) { | ||
169 | if (copy_to_user(data, buffer->head->data, length)) | ||
170 | return -EFAULT; | ||
171 | buffer->head = (struct packet *) &buffer->head->data[length]; | ||
172 | } else { | ||
173 | size_t split = end - buffer->head->data; | ||
174 | |||
175 | if (copy_to_user(data, buffer->head->data, split)) | ||
176 | return -EFAULT; | ||
177 | if (copy_to_user(data + split, buffer->data, length - split)) | ||
178 | return -EFAULT; | ||
179 | buffer->head = (struct packet *) &buffer->data[length - split]; | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Decrease buffer->size as the last thing, since this is what | ||
184 | * keeps the interrupt from overwriting the packet we are | ||
185 | * retrieving from the buffer. | ||
186 | */ | ||
187 | atomic_sub(sizeof(struct packet) + length, &buffer->size); | ||
188 | |||
189 | return length; | ||
190 | } | ||
191 | |||
192 | static void | ||
193 | packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length) | ||
194 | { | ||
195 | char *end; | ||
196 | |||
197 | buffer->total_packet_count++; | ||
198 | |||
199 | if (buffer->capacity < | ||
200 | atomic_read(&buffer->size) + sizeof(struct packet) + length) { | ||
201 | buffer->lost_packet_count++; | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | end = buffer->data + buffer->capacity; | ||
206 | buffer->tail->length = length; | ||
207 | |||
208 | if (&buffer->tail->data[length] < end) { | ||
209 | memcpy(buffer->tail->data, data, length); | ||
210 | buffer->tail = (struct packet *) &buffer->tail->data[length]; | ||
211 | } else { | ||
212 | size_t split = end - buffer->tail->data; | ||
213 | |||
214 | memcpy(buffer->tail->data, data, split); | ||
215 | memcpy(buffer->data, data + split, length - split); | ||
216 | buffer->tail = (struct packet *) &buffer->data[length - split]; | ||
217 | } | ||
218 | |||
219 | /* Finally, adjust buffer size and wake up userspace reader. */ | ||
220 | |||
221 | atomic_add(sizeof(struct packet) + length, &buffer->size); | ||
222 | wake_up_interruptible(&buffer->wait); | ||
223 | } | ||
224 | |||
225 | static inline void | ||
226 | reg_write(struct pcilynx *lynx, int offset, u32 data) | ||
227 | { | ||
228 | writel(data, lynx->registers + offset); | ||
229 | } | ||
230 | |||
231 | static inline u32 | ||
232 | reg_read(struct pcilynx *lynx, int offset) | ||
233 | { | ||
234 | return readl(lynx->registers + offset); | ||
235 | } | ||
236 | |||
237 | static inline void | ||
238 | reg_set_bits(struct pcilynx *lynx, int offset, u32 mask) | ||
239 | { | ||
240 | reg_write(lynx, offset, (reg_read(lynx, offset) | mask)); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Maybe the pcl programs could be set up to just append data instead | ||
245 | * of using a whole packet. | ||
246 | */ | ||
247 | static inline void | ||
248 | run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, | ||
249 | int dmachan) | ||
250 | { | ||
251 | reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus); | ||
252 | reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20, | ||
253 | DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK); | ||
254 | } | ||
255 | |||
256 | static int | ||
257 | set_phy_reg(struct pcilynx *lynx, int addr, int val) | ||
258 | { | ||
259 | if (addr > 15) { | ||
260 | dev_err(&lynx->pci_device->dev, | ||
261 | "PHY register address %d out of range\n", addr); | ||
262 | return -1; | ||
263 | } | ||
264 | if (val > 0xff) { | ||
265 | dev_err(&lynx->pci_device->dev, | ||
266 | "PHY register value %d out of range\n", val); | ||
267 | return -1; | ||
268 | } | ||
269 | reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | | ||
270 | LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val)); | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int | ||
276 | nosy_open(struct inode *inode, struct file *file) | ||
277 | { | ||
278 | int minor = iminor(inode); | ||
279 | struct client *client; | ||
280 | struct pcilynx *tmp, *lynx = NULL; | ||
281 | |||
282 | mutex_lock(&card_mutex); | ||
283 | list_for_each_entry(tmp, &card_list, link) | ||
284 | if (tmp->misc.minor == minor) { | ||
285 | lynx = lynx_get(tmp); | ||
286 | break; | ||
287 | } | ||
288 | mutex_unlock(&card_mutex); | ||
289 | if (lynx == NULL) | ||
290 | return -ENODEV; | ||
291 | |||
292 | client = kmalloc(sizeof *client, GFP_KERNEL); | ||
293 | if (client == NULL) | ||
294 | goto fail; | ||
295 | |||
296 | client->tcode_mask = ~0; | ||
297 | client->lynx = lynx; | ||
298 | INIT_LIST_HEAD(&client->link); | ||
299 | |||
300 | if (packet_buffer_init(&client->buffer, 128 * 1024) < 0) | ||
301 | goto fail; | ||
302 | |||
303 | file->private_data = client; | ||
304 | |||
305 | return 0; | ||
306 | fail: | ||
307 | kfree(client); | ||
308 | lynx_put(lynx); | ||
309 | |||
310 | return -ENOMEM; | ||
311 | } | ||
312 | |||
313 | static int | ||
314 | nosy_release(struct inode *inode, struct file *file) | ||
315 | { | ||
316 | struct client *client = file->private_data; | ||
317 | struct pcilynx *lynx = client->lynx; | ||
318 | |||
319 | spin_lock_irq(&lynx->client_list_lock); | ||
320 | list_del_init(&client->link); | ||
321 | spin_unlock_irq(&lynx->client_list_lock); | ||
322 | |||
323 | packet_buffer_destroy(&client->buffer); | ||
324 | kfree(client); | ||
325 | lynx_put(lynx); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static unsigned int | ||
331 | nosy_poll(struct file *file, poll_table *pt) | ||
332 | { | ||
333 | struct client *client = file->private_data; | ||
334 | unsigned int ret = 0; | ||
335 | |||
336 | poll_wait(file, &client->buffer.wait, pt); | ||
337 | |||
338 | if (atomic_read(&client->buffer.size) > 0) | ||
339 | ret = POLLIN | POLLRDNORM; | ||
340 | |||
341 | if (list_empty(&client->lynx->link)) | ||
342 | ret |= POLLHUP; | ||
343 | |||
344 | return ret; | ||
345 | } | ||
346 | |||
347 | static ssize_t | ||
348 | nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) | ||
349 | { | ||
350 | struct client *client = file->private_data; | ||
351 | |||
352 | return packet_buffer_get(client, buffer, count); | ||
353 | } | ||
354 | |||
355 | static long | ||
356 | nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
357 | { | ||
358 | struct client *client = file->private_data; | ||
359 | spinlock_t *client_list_lock = &client->lynx->client_list_lock; | ||
360 | struct nosy_stats stats; | ||
361 | |||
362 | switch (cmd) { | ||
363 | case NOSY_IOC_GET_STATS: | ||
364 | spin_lock_irq(client_list_lock); | ||
365 | stats.total_packet_count = client->buffer.total_packet_count; | ||
366 | stats.lost_packet_count = client->buffer.lost_packet_count; | ||
367 | spin_unlock_irq(client_list_lock); | ||
368 | |||
369 | if (copy_to_user((void __user *) arg, &stats, sizeof stats)) | ||
370 | return -EFAULT; | ||
371 | else | ||
372 | return 0; | ||
373 | |||
374 | case NOSY_IOC_START: | ||
375 | spin_lock_irq(client_list_lock); | ||
376 | list_add_tail(&client->link, &client->lynx->client_list); | ||
377 | spin_unlock_irq(client_list_lock); | ||
378 | |||
379 | return 0; | ||
380 | |||
381 | case NOSY_IOC_STOP: | ||
382 | spin_lock_irq(client_list_lock); | ||
383 | list_del_init(&client->link); | ||
384 | spin_unlock_irq(client_list_lock); | ||
385 | |||
386 | return 0; | ||
387 | |||
388 | case NOSY_IOC_FILTER: | ||
389 | spin_lock_irq(client_list_lock); | ||
390 | client->tcode_mask = arg; | ||
391 | spin_unlock_irq(client_list_lock); | ||
392 | |||
393 | return 0; | ||
394 | |||
395 | default: | ||
396 | return -EINVAL; | ||
397 | /* Flush buffer, configure filter. */ | ||
398 | } | ||
399 | } | ||
400 | |||
401 | static const struct file_operations nosy_ops = { | ||
402 | .owner = THIS_MODULE, | ||
403 | .read = nosy_read, | ||
404 | .unlocked_ioctl = nosy_ioctl, | ||
405 | .poll = nosy_poll, | ||
406 | .open = nosy_open, | ||
407 | .release = nosy_release, | ||
408 | }; | ||
409 | |||
410 | #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ | ||
411 | |||
412 | static void | ||
413 | packet_irq_handler(struct pcilynx *lynx) | ||
414 | { | ||
415 | struct client *client; | ||
416 | u32 tcode_mask, tcode; | ||
417 | size_t length; | ||
418 | struct timeval tv; | ||
419 | |||
420 | /* FIXME: Also report rcv_speed. */ | ||
421 | |||
422 | length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff; | ||
423 | tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf; | ||
424 | |||
425 | do_gettimeofday(&tv); | ||
426 | lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec; | ||
427 | |||
428 | if (length == PHY_PACKET_SIZE) | ||
429 | tcode_mask = 1 << TCODE_PHY_PACKET; | ||
430 | else | ||
431 | tcode_mask = 1 << tcode; | ||
432 | |||
433 | spin_lock(&lynx->client_list_lock); | ||
434 | |||
435 | list_for_each_entry(client, &lynx->client_list, link) | ||
436 | if (client->tcode_mask & tcode_mask) | ||
437 | packet_buffer_put(&client->buffer, | ||
438 | lynx->rcv_buffer, length + 4); | ||
439 | |||
440 | spin_unlock(&lynx->client_list_lock); | ||
441 | } | ||
442 | |||
443 | static void | ||
444 | bus_reset_irq_handler(struct pcilynx *lynx) | ||
445 | { | ||
446 | struct client *client; | ||
447 | struct timeval tv; | ||
448 | |||
449 | do_gettimeofday(&tv); | ||
450 | |||
451 | spin_lock(&lynx->client_list_lock); | ||
452 | |||
453 | list_for_each_entry(client, &lynx->client_list, link) | ||
454 | packet_buffer_put(&client->buffer, &tv.tv_usec, 4); | ||
455 | |||
456 | spin_unlock(&lynx->client_list_lock); | ||
457 | } | ||
458 | |||
459 | static irqreturn_t | ||
460 | irq_handler(int irq, void *device) | ||
461 | { | ||
462 | struct pcilynx *lynx = device; | ||
463 | u32 pci_int_status; | ||
464 | |||
465 | pci_int_status = reg_read(lynx, PCI_INT_STATUS); | ||
466 | |||
467 | if (pci_int_status == ~0) | ||
468 | /* Card was ejected. */ | ||
469 | return IRQ_NONE; | ||
470 | |||
471 | if ((pci_int_status & PCI_INT_INT_PEND) == 0) | ||
472 | /* Not our interrupt, bail out quickly. */ | ||
473 | return IRQ_NONE; | ||
474 | |||
475 | if ((pci_int_status & PCI_INT_P1394_INT) != 0) { | ||
476 | u32 link_int_status; | ||
477 | |||
478 | link_int_status = reg_read(lynx, LINK_INT_STATUS); | ||
479 | reg_write(lynx, LINK_INT_STATUS, link_int_status); | ||
480 | |||
481 | if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0) | ||
482 | bus_reset_irq_handler(lynx); | ||
483 | } | ||
484 | |||
485 | /* Clear the PCI_INT_STATUS register only after clearing the | ||
486 | * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will | ||
487 | * be set again immediately. */ | ||
488 | |||
489 | reg_write(lynx, PCI_INT_STATUS, pci_int_status); | ||
490 | |||
491 | if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) { | ||
492 | packet_irq_handler(lynx); | ||
493 | run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); | ||
494 | } | ||
495 | |||
496 | return IRQ_HANDLED; | ||
497 | } | ||
498 | |||
499 | static void | ||
500 | remove_card(struct pci_dev *dev) | ||
501 | { | ||
502 | struct pcilynx *lynx = pci_get_drvdata(dev); | ||
503 | struct client *client; | ||
504 | |||
505 | mutex_lock(&card_mutex); | ||
506 | list_del_init(&lynx->link); | ||
507 | misc_deregister(&lynx->misc); | ||
508 | mutex_unlock(&card_mutex); | ||
509 | |||
510 | reg_write(lynx, PCI_INT_ENABLE, 0); | ||
511 | free_irq(lynx->pci_device->irq, lynx); | ||
512 | |||
513 | spin_lock_irq(&lynx->client_list_lock); | ||
514 | list_for_each_entry(client, &lynx->client_list, link) | ||
515 | wake_up_interruptible(&client->buffer.wait); | ||
516 | spin_unlock_irq(&lynx->client_list_lock); | ||
517 | |||
518 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
519 | lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); | ||
520 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
521 | lynx->rcv_pcl, lynx->rcv_pcl_bus); | ||
522 | pci_free_consistent(lynx->pci_device, PAGE_SIZE, | ||
523 | lynx->rcv_buffer, lynx->rcv_buffer_bus); | ||
524 | |||
525 | iounmap(lynx->registers); | ||
526 | pci_disable_device(dev); | ||
527 | lynx_put(lynx); | ||
528 | } | ||
529 | |||
530 | #define RCV_BUFFER_SIZE (16 * 1024) | ||
531 | |||
532 | static int __devinit | ||
533 | add_card(struct pci_dev *dev, const struct pci_device_id *unused) | ||
534 | { | ||
535 | struct pcilynx *lynx; | ||
536 | u32 p, end; | ||
537 | int ret, i; | ||
538 | |||
539 | if (pci_set_dma_mask(dev, 0xffffffff)) { | ||
540 | dev_err(&dev->dev, | ||
541 | "DMA address limits not supported for PCILynx hardware\n"); | ||
542 | return -ENXIO; | ||
543 | } | ||
544 | if (pci_enable_device(dev)) { | ||
545 | dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); | ||
546 | return -ENXIO; | ||
547 | } | ||
548 | pci_set_master(dev); | ||
549 | |||
550 | lynx = kzalloc(sizeof *lynx, GFP_KERNEL); | ||
551 | if (lynx == NULL) { | ||
552 | dev_err(&dev->dev, "Failed to allocate control structure\n"); | ||
553 | ret = -ENOMEM; | ||
554 | goto fail_disable; | ||
555 | } | ||
556 | lynx->pci_device = dev; | ||
557 | pci_set_drvdata(dev, lynx); | ||
558 | |||
559 | spin_lock_init(&lynx->client_list_lock); | ||
560 | INIT_LIST_HEAD(&lynx->client_list); | ||
561 | kref_init(&lynx->kref); | ||
562 | |||
563 | lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), | ||
564 | PCILYNX_MAX_REGISTER); | ||
565 | |||
566 | lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, | ||
567 | sizeof(struct pcl), &lynx->rcv_start_pcl_bus); | ||
568 | lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, | ||
569 | sizeof(struct pcl), &lynx->rcv_pcl_bus); | ||
570 | lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, | ||
571 | RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); | ||
572 | if (lynx->rcv_start_pcl == NULL || | ||
573 | lynx->rcv_pcl == NULL || | ||
574 | lynx->rcv_buffer == NULL) { | ||
575 | dev_err(&dev->dev, "Failed to allocate receive buffer\n"); | ||
576 | ret = -ENOMEM; | ||
577 | goto fail_deallocate; | ||
578 | } | ||
579 | lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); | ||
580 | lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); | ||
581 | lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); | ||
582 | |||
583 | lynx->rcv_pcl->buffer[0].control = | ||
584 | cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); | ||
585 | lynx->rcv_pcl->buffer[0].pointer = | ||
586 | cpu_to_le32(lynx->rcv_buffer_bus + 4); | ||
587 | p = lynx->rcv_buffer_bus + 2048; | ||
588 | end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; | ||
589 | for (i = 1; p < end; i++, p += 2048) { | ||
590 | lynx->rcv_pcl->buffer[i].control = | ||
591 | cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); | ||
592 | lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); | ||
593 | } | ||
594 | lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); | ||
595 | |||
596 | reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); | ||
597 | /* Fix buggy cards with autoboot pin not tied low: */ | ||
598 | reg_write(lynx, DMA0_CHAN_CTRL, 0); | ||
599 | reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); | ||
600 | |||
601 | #if 0 | ||
602 | /* now, looking for PHY register set */ | ||
603 | if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { | ||
604 | lynx->phyic.reg_1394a = 1; | ||
605 | PRINT(KERN_INFO, lynx->id, | ||
606 | "found 1394a conform PHY (using extended register set)"); | ||
607 | lynx->phyic.vendor = get_phy_vendorid(lynx); | ||
608 | lynx->phyic.product = get_phy_productid(lynx); | ||
609 | } else { | ||
610 | lynx->phyic.reg_1394a = 0; | ||
611 | PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); | ||
612 | } | ||
613 | #endif | ||
614 | |||
615 | /* Setup the general receive FIFO max size. */ | ||
616 | reg_write(lynx, FIFO_SIZES, 255); | ||
617 | |||
618 | reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); | ||
619 | |||
620 | reg_write(lynx, LINK_INT_ENABLE, | ||
621 | LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | | ||
622 | LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | | ||
623 | LINK_INT_AT_STUCK | LINK_INT_SNTRJ | | ||
624 | LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | | ||
625 | LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); | ||
626 | |||
627 | /* Disable the L flag in self ID packets. */ | ||
628 | set_phy_reg(lynx, 4, 0); | ||
629 | |||
630 | /* Put this baby into snoop mode */ | ||
631 | reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); | ||
632 | |||
633 | run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); | ||
634 | |||
635 | if (request_irq(dev->irq, irq_handler, IRQF_SHARED, | ||
636 | driver_name, lynx)) { | ||
637 | dev_err(&dev->dev, | ||
638 | "Failed to allocate shared interrupt %d\n", dev->irq); | ||
639 | ret = -EIO; | ||
640 | goto fail_deallocate; | ||
641 | } | ||
642 | |||
643 | lynx->misc.parent = &dev->dev; | ||
644 | lynx->misc.minor = MISC_DYNAMIC_MINOR; | ||
645 | lynx->misc.name = "nosy"; | ||
646 | lynx->misc.fops = &nosy_ops; | ||
647 | |||
648 | mutex_lock(&card_mutex); | ||
649 | ret = misc_register(&lynx->misc); | ||
650 | if (ret) { | ||
651 | dev_err(&dev->dev, "Failed to register misc char device\n"); | ||
652 | mutex_unlock(&card_mutex); | ||
653 | goto fail_free_irq; | ||
654 | } | ||
655 | list_add_tail(&lynx->link, &card_list); | ||
656 | mutex_unlock(&card_mutex); | ||
657 | |||
658 | dev_info(&dev->dev, | ||
659 | "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); | ||
660 | |||
661 | return 0; | ||
662 | |||
663 | fail_free_irq: | ||
664 | reg_write(lynx, PCI_INT_ENABLE, 0); | ||
665 | free_irq(lynx->pci_device->irq, lynx); | ||
666 | |||
667 | fail_deallocate: | ||
668 | if (lynx->rcv_start_pcl) | ||
669 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
670 | lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); | ||
671 | if (lynx->rcv_pcl) | ||
672 | pci_free_consistent(lynx->pci_device, sizeof(struct pcl), | ||
673 | lynx->rcv_pcl, lynx->rcv_pcl_bus); | ||
674 | if (lynx->rcv_buffer) | ||
675 | pci_free_consistent(lynx->pci_device, PAGE_SIZE, | ||
676 | lynx->rcv_buffer, lynx->rcv_buffer_bus); | ||
677 | iounmap(lynx->registers); | ||
678 | kfree(lynx); | ||
679 | |||
680 | fail_disable: | ||
681 | pci_disable_device(dev); | ||
682 | |||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | static struct pci_device_id pci_table[] __devinitdata = { | ||
687 | { | ||
688 | .vendor = PCI_VENDOR_ID_TI, | ||
689 | .device = PCI_DEVICE_ID_TI_PCILYNX, | ||
690 | .subvendor = PCI_ANY_ID, | ||
691 | .subdevice = PCI_ANY_ID, | ||
692 | }, | ||
693 | { } /* Terminating entry */ | ||
694 | }; | ||
695 | |||
696 | static struct pci_driver lynx_pci_driver = { | ||
697 | .name = driver_name, | ||
698 | .id_table = pci_table, | ||
699 | .probe = add_card, | ||
700 | .remove = remove_card, | ||
701 | }; | ||
702 | |||
703 | MODULE_AUTHOR("Kristian Hoegsberg"); | ||
704 | MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); | ||
705 | MODULE_LICENSE("GPL"); | ||
706 | MODULE_DEVICE_TABLE(pci, pci_table); | ||
707 | |||
708 | static int __init nosy_init(void) | ||
709 | { | ||
710 | return pci_register_driver(&lynx_pci_driver); | ||
711 | } | ||
712 | |||
713 | static void __exit nosy_cleanup(void) | ||
714 | { | ||
715 | pci_unregister_driver(&lynx_pci_driver); | ||
716 | |||
717 | pr_info("Unloaded %s\n", driver_name); | ||
718 | } | ||
719 | |||
720 | module_init(nosy_init); | ||
721 | module_exit(nosy_cleanup); | ||
diff --git a/drivers/firewire/nosy.h b/drivers/firewire/nosy.h new file mode 100644 index 000000000000..078ff27f4756 --- /dev/null +++ b/drivers/firewire/nosy.h | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Chip register definitions for PCILynx chipset. Based on pcilynx.h | ||
3 | * from the Linux 1394 drivers, but modified a bit so the names here | ||
4 | * match the specification exactly (even though they have weird names, | ||
5 | * like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent | ||
6 | * reject" etc.) | ||
7 | */ | ||
8 | |||
9 | #define PCILYNX_MAX_REGISTER 0xfff | ||
10 | #define PCILYNX_MAX_MEMORY 0xffff | ||
11 | |||
12 | #define PCI_LATENCY_CACHELINE 0x0c | ||
13 | |||
14 | #define MISC_CONTROL 0x40 | ||
15 | #define MISC_CONTROL_SWRESET (1<<0) | ||
16 | |||
17 | #define SERIAL_EEPROM_CONTROL 0x44 | ||
18 | |||
19 | #define PCI_INT_STATUS 0x48 | ||
20 | #define PCI_INT_ENABLE 0x4c | ||
21 | /* status and enable have identical bit numbers */ | ||
22 | #define PCI_INT_INT_PEND (1<<31) | ||
23 | #define PCI_INT_FRC_INT (1<<30) | ||
24 | #define PCI_INT_SLV_ADR_PERR (1<<28) | ||
25 | #define PCI_INT_SLV_DAT_PERR (1<<27) | ||
26 | #define PCI_INT_MST_DAT_PERR (1<<26) | ||
27 | #define PCI_INT_MST_DEV_TO (1<<25) | ||
28 | #define PCI_INT_INT_SLV_TO (1<<23) | ||
29 | #define PCI_INT_AUX_TO (1<<18) | ||
30 | #define PCI_INT_AUX_INT (1<<17) | ||
31 | #define PCI_INT_P1394_INT (1<<16) | ||
32 | #define PCI_INT_DMA4_PCL (1<<9) | ||
33 | #define PCI_INT_DMA4_HLT (1<<8) | ||
34 | #define PCI_INT_DMA3_PCL (1<<7) | ||
35 | #define PCI_INT_DMA3_HLT (1<<6) | ||
36 | #define PCI_INT_DMA2_PCL (1<<5) | ||
37 | #define PCI_INT_DMA2_HLT (1<<4) | ||
38 | #define PCI_INT_DMA1_PCL (1<<3) | ||
39 | #define PCI_INT_DMA1_HLT (1<<2) | ||
40 | #define PCI_INT_DMA0_PCL (1<<1) | ||
41 | #define PCI_INT_DMA0_HLT (1<<0) | ||
42 | /* all DMA interrupts combined: */ | ||
43 | #define PCI_INT_DMA_ALL 0x3ff | ||
44 | |||
45 | #define PCI_INT_DMA_HLT(chan) (1 << (chan * 2)) | ||
46 | #define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1)) | ||
47 | |||
48 | #define LBUS_ADDR 0xb4 | ||
49 | #define LBUS_ADDR_SEL_RAM (0x0<<16) | ||
50 | #define LBUS_ADDR_SEL_ROM (0x1<<16) | ||
51 | #define LBUS_ADDR_SEL_AUX (0x2<<16) | ||
52 | #define LBUS_ADDR_SEL_ZV (0x3<<16) | ||
53 | |||
54 | #define GPIO_CTRL_A 0xb8 | ||
55 | #define GPIO_CTRL_B 0xbc | ||
56 | #define GPIO_DATA_BASE 0xc0 | ||
57 | |||
58 | #define DMA_BREG(base, chan) (base + chan * 0x20) | ||
59 | #define DMA_SREG(base, chan) (base + chan * 0x10) | ||
60 | |||
61 | #define PCL_NEXT_INVALID (1<<0) | ||
62 | |||
63 | /* transfer commands */ | ||
64 | #define PCL_CMD_RCV (0x1<<24) | ||
65 | #define PCL_CMD_RCV_AND_UPDATE (0xa<<24) | ||
66 | #define PCL_CMD_XMT (0x2<<24) | ||
67 | #define PCL_CMD_UNFXMT (0xc<<24) | ||
68 | #define PCL_CMD_PCI_TO_LBUS (0x8<<24) | ||
69 | #define PCL_CMD_LBUS_TO_PCI (0x9<<24) | ||
70 | |||
71 | /* aux commands */ | ||
72 | #define PCL_CMD_NOP (0x0<<24) | ||
73 | #define PCL_CMD_LOAD (0x3<<24) | ||
74 | #define PCL_CMD_STOREQ (0x4<<24) | ||
75 | #define PCL_CMD_STORED (0xb<<24) | ||
76 | #define PCL_CMD_STORE0 (0x5<<24) | ||
77 | #define PCL_CMD_STORE1 (0x6<<24) | ||
78 | #define PCL_CMD_COMPARE (0xe<<24) | ||
79 | #define PCL_CMD_SWAP_COMPARE (0xf<<24) | ||
80 | #define PCL_CMD_ADD (0xd<<24) | ||
81 | #define PCL_CMD_BRANCH (0x7<<24) | ||
82 | |||
83 | /* BRANCH condition codes */ | ||
84 | #define PCL_COND_DMARDY_SET (0x1<<20) | ||
85 | #define PCL_COND_DMARDY_CLEAR (0x2<<20) | ||
86 | |||
87 | #define PCL_GEN_INTR (1<<19) | ||
88 | #define PCL_LAST_BUFF (1<<18) | ||
89 | #define PCL_LAST_CMD (PCL_LAST_BUFF) | ||
90 | #define PCL_WAITSTAT (1<<17) | ||
91 | #define PCL_BIGENDIAN (1<<16) | ||
92 | #define PCL_ISOMODE (1<<12) | ||
93 | |||
94 | #define DMA0_PREV_PCL 0x100 | ||
95 | #define DMA1_PREV_PCL 0x120 | ||
96 | #define DMA2_PREV_PCL 0x140 | ||
97 | #define DMA3_PREV_PCL 0x160 | ||
98 | #define DMA4_PREV_PCL 0x180 | ||
99 | #define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan)) | ||
100 | |||
101 | #define DMA0_CURRENT_PCL 0x104 | ||
102 | #define DMA1_CURRENT_PCL 0x124 | ||
103 | #define DMA2_CURRENT_PCL 0x144 | ||
104 | #define DMA3_CURRENT_PCL 0x164 | ||
105 | #define DMA4_CURRENT_PCL 0x184 | ||
106 | #define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan)) | ||
107 | |||
108 | #define DMA0_CHAN_STAT 0x10c | ||
109 | #define DMA1_CHAN_STAT 0x12c | ||
110 | #define DMA2_CHAN_STAT 0x14c | ||
111 | #define DMA3_CHAN_STAT 0x16c | ||
112 | #define DMA4_CHAN_STAT 0x18c | ||
113 | #define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan)) | ||
114 | /* CHAN_STATUS registers share bits */ | ||
115 | #define DMA_CHAN_STAT_SELFID (1<<31) | ||
116 | #define DMA_CHAN_STAT_ISOPKT (1<<30) | ||
117 | #define DMA_CHAN_STAT_PCIERR (1<<29) | ||
118 | #define DMA_CHAN_STAT_PKTERR (1<<28) | ||
119 | #define DMA_CHAN_STAT_PKTCMPL (1<<27) | ||
120 | #define DMA_CHAN_STAT_SPECIALACK (1<<14) | ||
121 | |||
122 | #define DMA0_CHAN_CTRL 0x110 | ||
123 | #define DMA1_CHAN_CTRL 0x130 | ||
124 | #define DMA2_CHAN_CTRL 0x150 | ||
125 | #define DMA3_CHAN_CTRL 0x170 | ||
126 | #define DMA4_CHAN_CTRL 0x190 | ||
127 | #define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan)) | ||
128 | /* CHAN_CTRL registers share bits */ | ||
129 | #define DMA_CHAN_CTRL_ENABLE (1<<31) | ||
130 | #define DMA_CHAN_CTRL_BUSY (1<<30) | ||
131 | #define DMA_CHAN_CTRL_LINK (1<<29) | ||
132 | |||
133 | #define DMA0_READY 0x114 | ||
134 | #define DMA1_READY 0x134 | ||
135 | #define DMA2_READY 0x154 | ||
136 | #define DMA3_READY 0x174 | ||
137 | #define DMA4_READY 0x194 | ||
138 | #define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan)) | ||
139 | |||
140 | #define DMA_GLOBAL_REGISTER 0x908 | ||
141 | |||
142 | #define FIFO_SIZES 0xa00 | ||
143 | |||
144 | #define FIFO_CONTROL 0xa10 | ||
145 | #define FIFO_CONTROL_GRF_FLUSH (1<<4) | ||
146 | #define FIFO_CONTROL_ITF_FLUSH (1<<3) | ||
147 | #define FIFO_CONTROL_ATF_FLUSH (1<<2) | ||
148 | |||
149 | #define FIFO_XMIT_THRESHOLD 0xa14 | ||
150 | |||
151 | #define DMA0_WORD0_CMP_VALUE 0xb00 | ||
152 | #define DMA1_WORD0_CMP_VALUE 0xb10 | ||
153 | #define DMA2_WORD0_CMP_VALUE 0xb20 | ||
154 | #define DMA3_WORD0_CMP_VALUE 0xb30 | ||
155 | #define DMA4_WORD0_CMP_VALUE 0xb40 | ||
156 | #define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan)) | ||
157 | |||
158 | #define DMA0_WORD0_CMP_ENABLE 0xb04 | ||
159 | #define DMA1_WORD0_CMP_ENABLE 0xb14 | ||
160 | #define DMA2_WORD0_CMP_ENABLE 0xb24 | ||
161 | #define DMA3_WORD0_CMP_ENABLE 0xb34 | ||
162 | #define DMA4_WORD0_CMP_ENABLE 0xb44 | ||
163 | #define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan)) | ||
164 | |||
165 | #define DMA0_WORD1_CMP_VALUE 0xb08 | ||
166 | #define DMA1_WORD1_CMP_VALUE 0xb18 | ||
167 | #define DMA2_WORD1_CMP_VALUE 0xb28 | ||
168 | #define DMA3_WORD1_CMP_VALUE 0xb38 | ||
169 | #define DMA4_WORD1_CMP_VALUE 0xb48 | ||
170 | #define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan)) | ||
171 | |||
172 | #define DMA0_WORD1_CMP_ENABLE 0xb0c | ||
173 | #define DMA1_WORD1_CMP_ENABLE 0xb1c | ||
174 | #define DMA2_WORD1_CMP_ENABLE 0xb2c | ||
175 | #define DMA3_WORD1_CMP_ENABLE 0xb3c | ||
176 | #define DMA4_WORD1_CMP_ENABLE 0xb4c | ||
177 | #define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan)) | ||
178 | /* word 1 compare enable flags */ | ||
179 | #define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15) | ||
180 | #define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14) | ||
181 | #define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13) | ||
182 | #define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12) | ||
183 | #define DMA_WORD1_CMP_MATCH_EXACT (1<<11) | ||
184 | #define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10) | ||
185 | #define DMA_WORD1_CMP_ENABLE_MASTER (1<<8) | ||
186 | |||
187 | #define LINK_ID 0xf00 | ||
188 | #define LINK_ID_BUS(id) (id<<22) | ||
189 | #define LINK_ID_NODE(id) (id<<16) | ||
190 | |||
191 | #define LINK_CONTROL 0xf04 | ||
192 | #define LINK_CONTROL_BUSY (1<<29) | ||
193 | #define LINK_CONTROL_TX_ISO_EN (1<<26) | ||
194 | #define LINK_CONTROL_RX_ISO_EN (1<<25) | ||
195 | #define LINK_CONTROL_TX_ASYNC_EN (1<<24) | ||
196 | #define LINK_CONTROL_RX_ASYNC_EN (1<<23) | ||
197 | #define LINK_CONTROL_RESET_TX (1<<21) | ||
198 | #define LINK_CONTROL_RESET_RX (1<<20) | ||
199 | #define LINK_CONTROL_CYCMASTER (1<<11) | ||
200 | #define LINK_CONTROL_CYCSOURCE (1<<10) | ||
201 | #define LINK_CONTROL_CYCTIMEREN (1<<9) | ||
202 | #define LINK_CONTROL_RCV_CMP_VALID (1<<7) | ||
203 | #define LINK_CONTROL_SNOOP_ENABLE (1<<6) | ||
204 | |||
205 | #define CYCLE_TIMER 0xf08 | ||
206 | |||
207 | #define LINK_PHY 0xf0c | ||
208 | #define LINK_PHY_READ (1<<31) | ||
209 | #define LINK_PHY_WRITE (1<<30) | ||
210 | #define LINK_PHY_ADDR(addr) (addr<<24) | ||
211 | #define LINK_PHY_WDATA(data) (data<<16) | ||
212 | #define LINK_PHY_RADDR(addr) (addr<<8) | ||
213 | |||
214 | #define LINK_INT_STATUS 0xf14 | ||
215 | #define LINK_INT_ENABLE 0xf18 | ||
216 | /* status and enable have identical bit numbers */ | ||
217 | #define LINK_INT_LINK_INT (1<<31) | ||
218 | #define LINK_INT_PHY_TIME_OUT (1<<30) | ||
219 | #define LINK_INT_PHY_REG_RCVD (1<<29) | ||
220 | #define LINK_INT_PHY_BUSRESET (1<<28) | ||
221 | #define LINK_INT_TX_RDY (1<<26) | ||
222 | #define LINK_INT_RX_DATA_RDY (1<<25) | ||
223 | #define LINK_INT_IT_STUCK (1<<20) | ||
224 | #define LINK_INT_AT_STUCK (1<<19) | ||
225 | #define LINK_INT_SNTRJ (1<<17) | ||
226 | #define LINK_INT_HDR_ERR (1<<16) | ||
227 | #define LINK_INT_TC_ERR (1<<15) | ||
228 | #define LINK_INT_CYC_SEC (1<<11) | ||
229 | #define LINK_INT_CYC_STRT (1<<10) | ||
230 | #define LINK_INT_CYC_DONE (1<<9) | ||
231 | #define LINK_INT_CYC_PEND (1<<8) | ||
232 | #define LINK_INT_CYC_LOST (1<<7) | ||
233 | #define LINK_INT_CYC_ARB_FAILED (1<<6) | ||
234 | #define LINK_INT_GRF_OVER_FLOW (1<<5) | ||
235 | #define LINK_INT_ITF_UNDER_FLOW (1<<4) | ||
236 | #define LINK_INT_ATF_UNDER_FLOW (1<<3) | ||
237 | #define LINK_INT_IARB_FAILED (1<<0) | ||
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9f627e758cfc..7f03540cabe8 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/bug.h> | ||
21 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/device.h> | 24 | #include <linux/device.h> |
@@ -32,11 +33,13 @@ | |||
32 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | #include <linux/moduleparam.h> | 35 | #include <linux/moduleparam.h> |
36 | #include <linux/mutex.h> | ||
35 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
36 | #include <linux/pci_ids.h> | 38 | #include <linux/pci_ids.h> |
37 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
38 | #include <linux/spinlock.h> | 40 | #include <linux/spinlock.h> |
39 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/time.h> | ||
40 | 43 | ||
41 | #include <asm/byteorder.h> | 44 | #include <asm/byteorder.h> |
42 | #include <asm/page.h> | 45 | #include <asm/page.h> |
@@ -170,6 +173,10 @@ struct fw_ohci { | |||
170 | int generation; | 173 | int generation; |
171 | int request_generation; /* for timestamping incoming requests */ | 174 | int request_generation; /* for timestamping incoming requests */ |
172 | unsigned quirks; | 175 | unsigned quirks; |
176 | unsigned int pri_req_max; | ||
177 | u32 bus_time; | ||
178 | bool is_root; | ||
179 | bool csr_state_setclear_abdicate; | ||
173 | 180 | ||
174 | /* | 181 | /* |
175 | * Spinlock for accessing fw_ohci data. Never call out of | 182 | * Spinlock for accessing fw_ohci data. Never call out of |
@@ -177,16 +184,20 @@ struct fw_ohci { | |||
177 | */ | 184 | */ |
178 | spinlock_t lock; | 185 | spinlock_t lock; |
179 | 186 | ||
187 | struct mutex phy_reg_mutex; | ||
188 | |||
180 | struct ar_context ar_request_ctx; | 189 | struct ar_context ar_request_ctx; |
181 | struct ar_context ar_response_ctx; | 190 | struct ar_context ar_response_ctx; |
182 | struct context at_request_ctx; | 191 | struct context at_request_ctx; |
183 | struct context at_response_ctx; | 192 | struct context at_response_ctx; |
184 | 193 | ||
185 | u32 it_context_mask; | 194 | u32 it_context_mask; /* unoccupied IT contexts */ |
186 | struct iso_context *it_context_list; | 195 | struct iso_context *it_context_list; |
187 | u64 ir_context_channels; | 196 | u64 ir_context_channels; /* unoccupied channels */ |
188 | u32 ir_context_mask; | 197 | u32 ir_context_mask; /* unoccupied IR contexts */ |
189 | struct iso_context *ir_context_list; | 198 | struct iso_context *ir_context_list; |
199 | u64 mc_channels; /* channels in use by the multichannel IR context */ | ||
200 | bool mc_allocated; | ||
190 | 201 | ||
191 | __be32 *config_rom; | 202 | __be32 *config_rom; |
192 | dma_addr_t config_rom_bus; | 203 | dma_addr_t config_rom_bus; |
@@ -231,12 +242,14 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) | |||
231 | 242 | ||
232 | static char ohci_driver_name[] = KBUILD_MODNAME; | 243 | static char ohci_driver_name[] = KBUILD_MODNAME; |
233 | 244 | ||
245 | #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 | ||
234 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 | 246 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 |
235 | 247 | ||
236 | #define QUIRK_CYCLE_TIMER 1 | 248 | #define QUIRK_CYCLE_TIMER 1 |
237 | #define QUIRK_RESET_PACKET 2 | 249 | #define QUIRK_RESET_PACKET 2 |
238 | #define QUIRK_BE_HEADERS 4 | 250 | #define QUIRK_BE_HEADERS 4 |
239 | #define QUIRK_NO_1394A 8 | 251 | #define QUIRK_NO_1394A 8 |
252 | #define QUIRK_NO_MSI 16 | ||
240 | 253 | ||
241 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ | 254 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ |
242 | static const struct { | 255 | static const struct { |
@@ -247,6 +260,7 @@ static const struct { | |||
247 | QUIRK_NO_1394A}, | 260 | QUIRK_NO_1394A}, |
248 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, | 261 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, |
249 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 262 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
263 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, | ||
250 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 264 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
251 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, | 265 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, |
252 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, | 266 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, |
@@ -260,6 +274,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" | |||
260 | ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) | 274 | ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) |
261 | ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) | 275 | ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) |
262 | ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) | 276 | ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) |
277 | ", disable MSI = " __stringify(QUIRK_NO_MSI) | ||
263 | ")"); | 278 | ")"); |
264 | 279 | ||
265 | #define OHCI_PARAM_DEBUG_AT_AR 1 | 280 | #define OHCI_PARAM_DEBUG_AT_AR 1 |
@@ -288,7 +303,7 @@ static void log_irqs(u32 evt) | |||
288 | !(evt & OHCI1394_busReset)) | 303 | !(evt & OHCI1394_busReset)) |
289 | return; | 304 | return; |
290 | 305 | ||
291 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, | 306 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, |
292 | evt & OHCI1394_selfIDComplete ? " selfID" : "", | 307 | evt & OHCI1394_selfIDComplete ? " selfID" : "", |
293 | evt & OHCI1394_RQPkt ? " AR_req" : "", | 308 | evt & OHCI1394_RQPkt ? " AR_req" : "", |
294 | evt & OHCI1394_RSPkt ? " AR_resp" : "", | 309 | evt & OHCI1394_RSPkt ? " AR_resp" : "", |
@@ -298,6 +313,7 @@ static void log_irqs(u32 evt) | |||
298 | evt & OHCI1394_isochTx ? " IT" : "", | 313 | evt & OHCI1394_isochTx ? " IT" : "", |
299 | evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", | 314 | evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", |
300 | evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", | 315 | evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", |
316 | evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", | ||
301 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", | 317 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", |
302 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", | 318 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", |
303 | evt & OHCI1394_busReset ? " busReset" : "", | 319 | evt & OHCI1394_busReset ? " busReset" : "", |
@@ -305,7 +321,8 @@ static void log_irqs(u32 evt) | |||
305 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | | 321 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | |
306 | OHCI1394_respTxComplete | OHCI1394_isochRx | | 322 | OHCI1394_respTxComplete | OHCI1394_isochRx | |
307 | OHCI1394_isochTx | OHCI1394_postedWriteErr | | 323 | OHCI1394_isochTx | OHCI1394_postedWriteErr | |
308 | OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent | | 324 | OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | |
325 | OHCI1394_cycleInconsistent | | ||
309 | OHCI1394_regAccessFail | OHCI1394_busReset) | 326 | OHCI1394_regAccessFail | OHCI1394_busReset) |
310 | ? " ?" : ""); | 327 | ? " ?" : ""); |
311 | } | 328 | } |
@@ -470,12 +487,17 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr) | |||
470 | int i; | 487 | int i; |
471 | 488 | ||
472 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); | 489 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); |
473 | for (i = 0; i < 10; i++) { | 490 | for (i = 0; i < 3 + 100; i++) { |
474 | val = reg_read(ohci, OHCI1394_PhyControl); | 491 | val = reg_read(ohci, OHCI1394_PhyControl); |
475 | if (val & OHCI1394_PhyControl_ReadDone) | 492 | if (val & OHCI1394_PhyControl_ReadDone) |
476 | return OHCI1394_PhyControl_ReadData(val); | 493 | return OHCI1394_PhyControl_ReadData(val); |
477 | 494 | ||
478 | msleep(1); | 495 | /* |
496 | * Try a few times without waiting. Sleeping is necessary | ||
497 | * only when the link/PHY interface is busy. | ||
498 | */ | ||
499 | if (i >= 3) | ||
500 | msleep(1); | ||
479 | } | 501 | } |
480 | fw_error("failed to read phy reg\n"); | 502 | fw_error("failed to read phy reg\n"); |
481 | 503 | ||
@@ -488,25 +510,23 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) | |||
488 | 510 | ||
489 | reg_write(ohci, OHCI1394_PhyControl, | 511 | reg_write(ohci, OHCI1394_PhyControl, |
490 | OHCI1394_PhyControl_Write(addr, val)); | 512 | OHCI1394_PhyControl_Write(addr, val)); |
491 | for (i = 0; i < 100; i++) { | 513 | for (i = 0; i < 3 + 100; i++) { |
492 | val = reg_read(ohci, OHCI1394_PhyControl); | 514 | val = reg_read(ohci, OHCI1394_PhyControl); |
493 | if (!(val & OHCI1394_PhyControl_WritePending)) | 515 | if (!(val & OHCI1394_PhyControl_WritePending)) |
494 | return 0; | 516 | return 0; |
495 | 517 | ||
496 | msleep(1); | 518 | if (i >= 3) |
519 | msleep(1); | ||
497 | } | 520 | } |
498 | fw_error("failed to write phy reg\n"); | 521 | fw_error("failed to write phy reg\n"); |
499 | 522 | ||
500 | return -EBUSY; | 523 | return -EBUSY; |
501 | } | 524 | } |
502 | 525 | ||
503 | static int ohci_update_phy_reg(struct fw_card *card, int addr, | 526 | static int update_phy_reg(struct fw_ohci *ohci, int addr, |
504 | int clear_bits, int set_bits) | 527 | int clear_bits, int set_bits) |
505 | { | 528 | { |
506 | struct fw_ohci *ohci = fw_ohci(card); | 529 | int ret = read_phy_reg(ohci, addr); |
507 | int ret; | ||
508 | |||
509 | ret = read_phy_reg(ohci, addr); | ||
510 | if (ret < 0) | 530 | if (ret < 0) |
511 | return ret; | 531 | return ret; |
512 | 532 | ||
@@ -524,13 +544,38 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) | |||
524 | { | 544 | { |
525 | int ret; | 545 | int ret; |
526 | 546 | ||
527 | ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5); | 547 | ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); |
528 | if (ret < 0) | 548 | if (ret < 0) |
529 | return ret; | 549 | return ret; |
530 | 550 | ||
531 | return read_phy_reg(ohci, addr); | 551 | return read_phy_reg(ohci, addr); |
532 | } | 552 | } |
533 | 553 | ||
554 | static int ohci_read_phy_reg(struct fw_card *card, int addr) | ||
555 | { | ||
556 | struct fw_ohci *ohci = fw_ohci(card); | ||
557 | int ret; | ||
558 | |||
559 | mutex_lock(&ohci->phy_reg_mutex); | ||
560 | ret = read_phy_reg(ohci, addr); | ||
561 | mutex_unlock(&ohci->phy_reg_mutex); | ||
562 | |||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int ohci_update_phy_reg(struct fw_card *card, int addr, | ||
567 | int clear_bits, int set_bits) | ||
568 | { | ||
569 | struct fw_ohci *ohci = fw_ohci(card); | ||
570 | int ret; | ||
571 | |||
572 | mutex_lock(&ohci->phy_reg_mutex); | ||
573 | ret = update_phy_reg(ohci, addr, clear_bits, set_bits); | ||
574 | mutex_unlock(&ohci->phy_reg_mutex); | ||
575 | |||
576 | return ret; | ||
577 | } | ||
578 | |||
534 | static int ar_context_add_page(struct ar_context *ctx) | 579 | static int ar_context_add_page(struct ar_context *ctx) |
535 | { | 580 | { |
536 | struct device *dev = ctx->ohci->card.device; | 581 | struct device *dev = ctx->ohci->card.device; |
@@ -553,6 +598,7 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
553 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); | 598 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); |
554 | ab->descriptor.branch_address = 0; | 599 | ab->descriptor.branch_address = 0; |
555 | 600 | ||
601 | wmb(); /* finish init of new descriptors before branch_address update */ | ||
556 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); | 602 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); |
557 | ctx->last_buffer->next = ab; | 603 | ctx->last_buffer->next = ab; |
558 | ctx->last_buffer = ab; | 604 | ctx->last_buffer = ab; |
@@ -940,6 +986,8 @@ static void context_append(struct context *ctx, | |||
940 | d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); | 986 | d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); |
941 | 987 | ||
942 | desc->used += (z + extra) * sizeof(*d); | 988 | desc->used += (z + extra) * sizeof(*d); |
989 | |||
990 | wmb(); /* finish init of new descriptors before branch_address update */ | ||
943 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); | 991 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); |
944 | ctx->prev = find_branch_descriptor(d, z); | 992 | ctx->prev = find_branch_descriptor(d, z); |
945 | 993 | ||
@@ -1026,6 +1074,9 @@ static int at_context_queue_packet(struct context *ctx, | |||
1026 | header[1] = cpu_to_le32(packet->header[0]); | 1074 | header[1] = cpu_to_le32(packet->header[0]); |
1027 | header[2] = cpu_to_le32(packet->header[1]); | 1075 | header[2] = cpu_to_le32(packet->header[1]); |
1028 | d[0].req_count = cpu_to_le16(12); | 1076 | d[0].req_count = cpu_to_le16(12); |
1077 | |||
1078 | if (is_ping_packet(packet->header)) | ||
1079 | d[0].control |= cpu_to_le16(DESCRIPTOR_PING); | ||
1029 | break; | 1080 | break; |
1030 | 1081 | ||
1031 | case 4: | 1082 | case 4: |
@@ -1311,6 +1362,78 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet) | |||
1311 | 1362 | ||
1312 | } | 1363 | } |
1313 | 1364 | ||
1365 | static u32 cycle_timer_ticks(u32 cycle_timer) | ||
1366 | { | ||
1367 | u32 ticks; | ||
1368 | |||
1369 | ticks = cycle_timer & 0xfff; | ||
1370 | ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); | ||
1371 | ticks += (3072 * 8000) * (cycle_timer >> 25); | ||
1372 | |||
1373 | return ticks; | ||
1374 | } | ||
1375 | |||
1376 | /* | ||
1377 | * Some controllers exhibit one or more of the following bugs when updating the | ||
1378 | * iso cycle timer register: | ||
1379 | * - When the lowest six bits are wrapping around to zero, a read that happens | ||
1380 | * at the same time will return garbage in the lowest ten bits. | ||
1381 | * - When the cycleOffset field wraps around to zero, the cycleCount field is | ||
1382 | * not incremented for about 60 ns. | ||
1383 | * - Occasionally, the entire register reads zero. | ||
1384 | * | ||
1385 | * To catch these, we read the register three times and ensure that the | ||
1386 | * difference between each two consecutive reads is approximately the same, i.e. | ||
1387 | * less than twice the other. Furthermore, any negative difference indicates an | ||
1388 | * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to | ||
1389 | * execute, so we have enough precision to compute the ratio of the differences.) | ||
1390 | */ | ||
1391 | static u32 get_cycle_time(struct fw_ohci *ohci) | ||
1392 | { | ||
1393 | u32 c0, c1, c2; | ||
1394 | u32 t0, t1, t2; | ||
1395 | s32 diff01, diff12; | ||
1396 | int i; | ||
1397 | |||
1398 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | ||
1399 | |||
1400 | if (ohci->quirks & QUIRK_CYCLE_TIMER) { | ||
1401 | i = 0; | ||
1402 | c1 = c2; | ||
1403 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | ||
1404 | do { | ||
1405 | c0 = c1; | ||
1406 | c1 = c2; | ||
1407 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | ||
1408 | t0 = cycle_timer_ticks(c0); | ||
1409 | t1 = cycle_timer_ticks(c1); | ||
1410 | t2 = cycle_timer_ticks(c2); | ||
1411 | diff01 = t1 - t0; | ||
1412 | diff12 = t2 - t1; | ||
1413 | } while ((diff01 <= 0 || diff12 <= 0 || | ||
1414 | diff01 / diff12 >= 2 || diff12 / diff01 >= 2) | ||
1415 | && i++ < 20); | ||
1416 | } | ||
1417 | |||
1418 | return c2; | ||
1419 | } | ||
1420 | |||
1421 | /* | ||
1422 | * This function has to be called at least every 64 seconds. The bus_time | ||
1423 | * field stores not only the upper 25 bits of the BUS_TIME register but also | ||
1424 | * the most significant bit of the cycle timer in bit 6 so that we can detect | ||
1425 | * changes in this bit. | ||
1426 | */ | ||
1427 | static u32 update_bus_time(struct fw_ohci *ohci) | ||
1428 | { | ||
1429 | u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; | ||
1430 | |||
1431 | if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) | ||
1432 | ohci->bus_time += 0x40; | ||
1433 | |||
1434 | return ohci->bus_time | cycle_time_seconds; | ||
1435 | } | ||
1436 | |||
1314 | static void bus_reset_tasklet(unsigned long data) | 1437 | static void bus_reset_tasklet(unsigned long data) |
1315 | { | 1438 | { |
1316 | struct fw_ohci *ohci = (struct fw_ohci *)data; | 1439 | struct fw_ohci *ohci = (struct fw_ohci *)data; |
@@ -1319,6 +1442,7 @@ static void bus_reset_tasklet(unsigned long data) | |||
1319 | unsigned long flags; | 1442 | unsigned long flags; |
1320 | void *free_rom = NULL; | 1443 | void *free_rom = NULL; |
1321 | dma_addr_t free_rom_bus = 0; | 1444 | dma_addr_t free_rom_bus = 0; |
1445 | bool is_new_root; | ||
1322 | 1446 | ||
1323 | reg = reg_read(ohci, OHCI1394_NodeID); | 1447 | reg = reg_read(ohci, OHCI1394_NodeID); |
1324 | if (!(reg & OHCI1394_NodeID_idValid)) { | 1448 | if (!(reg & OHCI1394_NodeID_idValid)) { |
@@ -1332,6 +1456,12 @@ static void bus_reset_tasklet(unsigned long data) | |||
1332 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | | 1456 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | |
1333 | OHCI1394_NodeID_nodeNumber); | 1457 | OHCI1394_NodeID_nodeNumber); |
1334 | 1458 | ||
1459 | is_new_root = (reg & OHCI1394_NodeID_root) != 0; | ||
1460 | if (!(ohci->is_root && is_new_root)) | ||
1461 | reg_write(ohci, OHCI1394_LinkControlSet, | ||
1462 | OHCI1394_LinkControl_cycleMaster); | ||
1463 | ohci->is_root = is_new_root; | ||
1464 | |||
1335 | reg = reg_read(ohci, OHCI1394_SelfIDCount); | 1465 | reg = reg_read(ohci, OHCI1394_SelfIDCount); |
1336 | if (reg & OHCI1394_SelfIDCount_selfIDError) { | 1466 | if (reg & OHCI1394_SelfIDCount_selfIDError) { |
1337 | fw_notify("inconsistent self IDs\n"); | 1467 | fw_notify("inconsistent self IDs\n"); |
@@ -1439,7 +1569,9 @@ static void bus_reset_tasklet(unsigned long data) | |||
1439 | self_id_count, ohci->self_id_buffer); | 1569 | self_id_count, ohci->self_id_buffer); |
1440 | 1570 | ||
1441 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, | 1571 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, |
1442 | self_id_count, ohci->self_id_buffer); | 1572 | self_id_count, ohci->self_id_buffer, |
1573 | ohci->csr_state_setclear_abdicate); | ||
1574 | ohci->csr_state_setclear_abdicate = false; | ||
1443 | } | 1575 | } |
1444 | 1576 | ||
1445 | static irqreturn_t irq_handler(int irq, void *data) | 1577 | static irqreturn_t irq_handler(int irq, void *data) |
@@ -1515,6 +1647,12 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1515 | fw_notify("isochronous cycle inconsistent\n"); | 1647 | fw_notify("isochronous cycle inconsistent\n"); |
1516 | } | 1648 | } |
1517 | 1649 | ||
1650 | if (event & OHCI1394_cycle64Seconds) { | ||
1651 | spin_lock(&ohci->lock); | ||
1652 | update_bus_time(ohci); | ||
1653 | spin_unlock(&ohci->lock); | ||
1654 | } | ||
1655 | |||
1518 | return IRQ_HANDLED; | 1656 | return IRQ_HANDLED; |
1519 | } | 1657 | } |
1520 | 1658 | ||
@@ -1577,7 +1715,7 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci) | |||
1577 | clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; | 1715 | clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; |
1578 | set = 0; | 1716 | set = 0; |
1579 | } | 1717 | } |
1580 | ret = ohci_update_phy_reg(&ohci->card, 5, clear, set); | 1718 | ret = update_phy_reg(ohci, 5, clear, set); |
1581 | if (ret < 0) | 1719 | if (ret < 0) |
1582 | return ret; | 1720 | return ret; |
1583 | 1721 | ||
@@ -1599,7 +1737,7 @@ static int ohci_enable(struct fw_card *card, | |||
1599 | { | 1737 | { |
1600 | struct fw_ohci *ohci = fw_ohci(card); | 1738 | struct fw_ohci *ohci = fw_ohci(card); |
1601 | struct pci_dev *dev = to_pci_dev(card->device); | 1739 | struct pci_dev *dev = to_pci_dev(card->device); |
1602 | u32 lps; | 1740 | u32 lps, seconds, version, irqs; |
1603 | int i, ret; | 1741 | int i, ret; |
1604 | 1742 | ||
1605 | if (software_reset(ohci)) { | 1743 | if (software_reset(ohci)) { |
@@ -1635,17 +1773,34 @@ static int ohci_enable(struct fw_card *card, | |||
1635 | OHCI1394_HCControl_noByteSwapData); | 1773 | OHCI1394_HCControl_noByteSwapData); |
1636 | 1774 | ||
1637 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); | 1775 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); |
1638 | reg_write(ohci, OHCI1394_LinkControlClear, | ||
1639 | OHCI1394_LinkControl_rcvPhyPkt); | ||
1640 | reg_write(ohci, OHCI1394_LinkControlSet, | 1776 | reg_write(ohci, OHCI1394_LinkControlSet, |
1641 | OHCI1394_LinkControl_rcvSelfID | | 1777 | OHCI1394_LinkControl_rcvSelfID | |
1778 | OHCI1394_LinkControl_rcvPhyPkt | | ||
1642 | OHCI1394_LinkControl_cycleTimerEnable | | 1779 | OHCI1394_LinkControl_cycleTimerEnable | |
1643 | OHCI1394_LinkControl_cycleMaster); | 1780 | OHCI1394_LinkControl_cycleMaster); |
1644 | 1781 | ||
1645 | reg_write(ohci, OHCI1394_ATRetries, | 1782 | reg_write(ohci, OHCI1394_ATRetries, |
1646 | OHCI1394_MAX_AT_REQ_RETRIES | | 1783 | OHCI1394_MAX_AT_REQ_RETRIES | |
1647 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | | 1784 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | |
1648 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); | 1785 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | |
1786 | (200 << 16)); | ||
1787 | |||
1788 | seconds = lower_32_bits(get_seconds()); | ||
1789 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); | ||
1790 | ohci->bus_time = seconds & ~0x3f; | ||
1791 | |||
1792 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | ||
1793 | if (version >= OHCI_VERSION_1_1) { | ||
1794 | reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, | ||
1795 | 0xfffffffe); | ||
1796 | card->broadcast_channel_auto_allocated = true; | ||
1797 | } | ||
1798 | |||
1799 | /* Get implemented bits of the priority arbitration request counter. */ | ||
1800 | reg_write(ohci, OHCI1394_FairnessControl, 0x3f); | ||
1801 | ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; | ||
1802 | reg_write(ohci, OHCI1394_FairnessControl, 0); | ||
1803 | card->priority_budget_implemented = ohci->pri_req_max != 0; | ||
1649 | 1804 | ||
1650 | ar_context_run(&ohci->ar_request_ctx); | 1805 | ar_context_run(&ohci->ar_request_ctx); |
1651 | ar_context_run(&ohci->ar_response_ctx); | 1806 | ar_context_run(&ohci->ar_response_ctx); |
@@ -1653,16 +1808,6 @@ static int ohci_enable(struct fw_card *card, | |||
1653 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); | 1808 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); |
1654 | reg_write(ohci, OHCI1394_IntEventClear, ~0); | 1809 | reg_write(ohci, OHCI1394_IntEventClear, ~0); |
1655 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); | 1810 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); |
1656 | reg_write(ohci, OHCI1394_IntMaskSet, | ||
1657 | OHCI1394_selfIDComplete | | ||
1658 | OHCI1394_RQPkt | OHCI1394_RSPkt | | ||
1659 | OHCI1394_reqTxComplete | OHCI1394_respTxComplete | | ||
1660 | OHCI1394_isochRx | OHCI1394_isochTx | | ||
1661 | OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | | ||
1662 | OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | | ||
1663 | OHCI1394_masterIntEnable); | ||
1664 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) | ||
1665 | reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); | ||
1666 | 1811 | ||
1667 | ret = configure_1394a_enhancements(ohci); | 1812 | ret = configure_1394a_enhancements(ohci); |
1668 | if (ret < 0) | 1813 | if (ret < 0) |
@@ -1719,26 +1864,38 @@ static int ohci_enable(struct fw_card *card, | |||
1719 | 1864 | ||
1720 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); | 1865 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); |
1721 | 1866 | ||
1867 | if (!(ohci->quirks & QUIRK_NO_MSI)) | ||
1868 | pci_enable_msi(dev); | ||
1722 | if (request_irq(dev->irq, irq_handler, | 1869 | if (request_irq(dev->irq, irq_handler, |
1723 | IRQF_SHARED, ohci_driver_name, ohci)) { | 1870 | pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, |
1724 | fw_error("Failed to allocate shared interrupt %d.\n", | 1871 | ohci_driver_name, ohci)) { |
1725 | dev->irq); | 1872 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); |
1873 | pci_disable_msi(dev); | ||
1726 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1874 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1727 | ohci->config_rom, ohci->config_rom_bus); | 1875 | ohci->config_rom, ohci->config_rom_bus); |
1728 | return -EIO; | 1876 | return -EIO; |
1729 | } | 1877 | } |
1730 | 1878 | ||
1879 | irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | | ||
1880 | OHCI1394_RQPkt | OHCI1394_RSPkt | | ||
1881 | OHCI1394_isochTx | OHCI1394_isochRx | | ||
1882 | OHCI1394_postedWriteErr | | ||
1883 | OHCI1394_selfIDComplete | | ||
1884 | OHCI1394_regAccessFail | | ||
1885 | OHCI1394_cycle64Seconds | | ||
1886 | OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | | ||
1887 | OHCI1394_masterIntEnable; | ||
1888 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) | ||
1889 | irqs |= OHCI1394_busReset; | ||
1890 | reg_write(ohci, OHCI1394_IntMaskSet, irqs); | ||
1891 | |||
1731 | reg_write(ohci, OHCI1394_HCControlSet, | 1892 | reg_write(ohci, OHCI1394_HCControlSet, |
1732 | OHCI1394_HCControl_linkEnable | | 1893 | OHCI1394_HCControl_linkEnable | |
1733 | OHCI1394_HCControl_BIBimageValid); | 1894 | OHCI1394_HCControl_BIBimageValid); |
1734 | flush_writes(ohci); | 1895 | flush_writes(ohci); |
1735 | 1896 | ||
1736 | /* | 1897 | /* We are ready to go, reset bus to finish initialization. */ |
1737 | * We are ready to go, initiate bus reset to finish the | 1898 | fw_schedule_bus_reset(&ohci->card, false, true); |
1738 | * initialization. | ||
1739 | */ | ||
1740 | |||
1741 | fw_core_initiate_bus_reset(&ohci->card, 1); | ||
1742 | 1899 | ||
1743 | return 0; | 1900 | return 0; |
1744 | } | 1901 | } |
@@ -1813,7 +1970,7 @@ static int ohci_set_config_rom(struct fw_card *card, | |||
1813 | * takes effect. | 1970 | * takes effect. |
1814 | */ | 1971 | */ |
1815 | if (ret == 0) | 1972 | if (ret == 0) |
1816 | fw_core_initiate_bus_reset(&ohci->card, 1); | 1973 | fw_schedule_bus_reset(&ohci->card, true, true); |
1817 | else | 1974 | else |
1818 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1975 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1819 | next_config_rom, next_config_rom_bus); | 1976 | next_config_rom, next_config_rom_bus); |
@@ -1903,61 +2060,117 @@ static int ohci_enable_phys_dma(struct fw_card *card, | |||
1903 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ | 2060 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ |
1904 | } | 2061 | } |
1905 | 2062 | ||
1906 | static u32 cycle_timer_ticks(u32 cycle_timer) | 2063 | static u32 ohci_read_csr(struct fw_card *card, int csr_offset) |
1907 | { | 2064 | { |
1908 | u32 ticks; | 2065 | struct fw_ohci *ohci = fw_ohci(card); |
2066 | unsigned long flags; | ||
2067 | u32 value; | ||
2068 | |||
2069 | switch (csr_offset) { | ||
2070 | case CSR_STATE_CLEAR: | ||
2071 | case CSR_STATE_SET: | ||
2072 | if (ohci->is_root && | ||
2073 | (reg_read(ohci, OHCI1394_LinkControlSet) & | ||
2074 | OHCI1394_LinkControl_cycleMaster)) | ||
2075 | value = CSR_STATE_BIT_CMSTR; | ||
2076 | else | ||
2077 | value = 0; | ||
2078 | if (ohci->csr_state_setclear_abdicate) | ||
2079 | value |= CSR_STATE_BIT_ABDICATE; | ||
1909 | 2080 | ||
1910 | ticks = cycle_timer & 0xfff; | 2081 | return value; |
1911 | ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); | ||
1912 | ticks += (3072 * 8000) * (cycle_timer >> 25); | ||
1913 | 2082 | ||
1914 | return ticks; | 2083 | case CSR_NODE_IDS: |
2084 | return reg_read(ohci, OHCI1394_NodeID) << 16; | ||
2085 | |||
2086 | case CSR_CYCLE_TIME: | ||
2087 | return get_cycle_time(ohci); | ||
2088 | |||
2089 | case CSR_BUS_TIME: | ||
2090 | /* | ||
2091 | * We might be called just after the cycle timer has wrapped | ||
2092 | * around but just before the cycle64Seconds handler, so we | ||
2093 | * better check here, too, if the bus time needs to be updated. | ||
2094 | */ | ||
2095 | spin_lock_irqsave(&ohci->lock, flags); | ||
2096 | value = update_bus_time(ohci); | ||
2097 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
2098 | return value; | ||
2099 | |||
2100 | case CSR_BUSY_TIMEOUT: | ||
2101 | value = reg_read(ohci, OHCI1394_ATRetries); | ||
2102 | return (value >> 4) & 0x0ffff00f; | ||
2103 | |||
2104 | case CSR_PRIORITY_BUDGET: | ||
2105 | return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | | ||
2106 | (ohci->pri_req_max << 8); | ||
2107 | |||
2108 | default: | ||
2109 | WARN_ON(1); | ||
2110 | return 0; | ||
2111 | } | ||
1915 | } | 2112 | } |
1916 | 2113 | ||
1917 | /* | 2114 | static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) |
1918 | * Some controllers exhibit one or more of the following bugs when updating the | ||
1919 | * iso cycle timer register: | ||
1920 | * - When the lowest six bits are wrapping around to zero, a read that happens | ||
1921 | * at the same time will return garbage in the lowest ten bits. | ||
1922 | * - When the cycleOffset field wraps around to zero, the cycleCount field is | ||
1923 | * not incremented for about 60 ns. | ||
1924 | * - Occasionally, the entire register reads zero. | ||
1925 | * | ||
1926 | * To catch these, we read the register three times and ensure that the | ||
1927 | * difference between each two consecutive reads is approximately the same, i.e. | ||
1928 | * less than twice the other. Furthermore, any negative difference indicates an | ||
1929 | * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to | ||
1930 | * execute, so we have enough precision to compute the ratio of the differences.) | ||
1931 | */ | ||
1932 | static u32 ohci_get_cycle_time(struct fw_card *card) | ||
1933 | { | 2115 | { |
1934 | struct fw_ohci *ohci = fw_ohci(card); | 2116 | struct fw_ohci *ohci = fw_ohci(card); |
1935 | u32 c0, c1, c2; | 2117 | unsigned long flags; |
1936 | u32 t0, t1, t2; | ||
1937 | s32 diff01, diff12; | ||
1938 | int i; | ||
1939 | 2118 | ||
1940 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 2119 | switch (csr_offset) { |
2120 | case CSR_STATE_CLEAR: | ||
2121 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { | ||
2122 | reg_write(ohci, OHCI1394_LinkControlClear, | ||
2123 | OHCI1394_LinkControl_cycleMaster); | ||
2124 | flush_writes(ohci); | ||
2125 | } | ||
2126 | if (value & CSR_STATE_BIT_ABDICATE) | ||
2127 | ohci->csr_state_setclear_abdicate = false; | ||
2128 | break; | ||
1941 | 2129 | ||
1942 | if (ohci->quirks & QUIRK_CYCLE_TIMER) { | 2130 | case CSR_STATE_SET: |
1943 | i = 0; | 2131 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { |
1944 | c1 = c2; | 2132 | reg_write(ohci, OHCI1394_LinkControlSet, |
1945 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 2133 | OHCI1394_LinkControl_cycleMaster); |
1946 | do { | 2134 | flush_writes(ohci); |
1947 | c0 = c1; | 2135 | } |
1948 | c1 = c2; | 2136 | if (value & CSR_STATE_BIT_ABDICATE) |
1949 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 2137 | ohci->csr_state_setclear_abdicate = true; |
1950 | t0 = cycle_timer_ticks(c0); | 2138 | break; |
1951 | t1 = cycle_timer_ticks(c1); | ||
1952 | t2 = cycle_timer_ticks(c2); | ||
1953 | diff01 = t1 - t0; | ||
1954 | diff12 = t2 - t1; | ||
1955 | } while ((diff01 <= 0 || diff12 <= 0 || | ||
1956 | diff01 / diff12 >= 2 || diff12 / diff01 >= 2) | ||
1957 | && i++ < 20); | ||
1958 | } | ||
1959 | 2139 | ||
1960 | return c2; | 2140 | case CSR_NODE_IDS: |
2141 | reg_write(ohci, OHCI1394_NodeID, value >> 16); | ||
2142 | flush_writes(ohci); | ||
2143 | break; | ||
2144 | |||
2145 | case CSR_CYCLE_TIME: | ||
2146 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); | ||
2147 | reg_write(ohci, OHCI1394_IntEventSet, | ||
2148 | OHCI1394_cycleInconsistent); | ||
2149 | flush_writes(ohci); | ||
2150 | break; | ||
2151 | |||
2152 | case CSR_BUS_TIME: | ||
2153 | spin_lock_irqsave(&ohci->lock, flags); | ||
2154 | ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); | ||
2155 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
2156 | break; | ||
2157 | |||
2158 | case CSR_BUSY_TIMEOUT: | ||
2159 | value = (value & 0xf) | ((value & 0xf) << 4) | | ||
2160 | ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); | ||
2161 | reg_write(ohci, OHCI1394_ATRetries, value); | ||
2162 | flush_writes(ohci); | ||
2163 | break; | ||
2164 | |||
2165 | case CSR_PRIORITY_BUDGET: | ||
2166 | reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); | ||
2167 | flush_writes(ohci); | ||
2168 | break; | ||
2169 | |||
2170 | default: | ||
2171 | WARN_ON(1); | ||
2172 | break; | ||
2173 | } | ||
1961 | } | 2174 | } |
1962 | 2175 | ||
1963 | static void copy_iso_headers(struct iso_context *ctx, void *p) | 2176 | static void copy_iso_headers(struct iso_context *ctx, void *p) |
@@ -1992,10 +2205,9 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1992 | __le32 *ir_header; | 2205 | __le32 *ir_header; |
1993 | void *p; | 2206 | void *p; |
1994 | 2207 | ||
1995 | for (pd = d; pd <= last; pd++) { | 2208 | for (pd = d; pd <= last; pd++) |
1996 | if (pd->transfer_status) | 2209 | if (pd->transfer_status) |
1997 | break; | 2210 | break; |
1998 | } | ||
1999 | if (pd > last) | 2211 | if (pd > last) |
2000 | /* Descriptor(s) not done yet, stop iteration */ | 2212 | /* Descriptor(s) not done yet, stop iteration */ |
2001 | return 0; | 2213 | return 0; |
@@ -2005,16 +2217,38 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
2005 | 2217 | ||
2006 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2218 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2007 | ir_header = (__le32 *) p; | 2219 | ir_header = (__le32 *) p; |
2008 | ctx->base.callback(&ctx->base, | 2220 | ctx->base.callback.sc(&ctx->base, |
2009 | le32_to_cpu(ir_header[0]) & 0xffff, | 2221 | le32_to_cpu(ir_header[0]) & 0xffff, |
2010 | ctx->header_length, ctx->header, | 2222 | ctx->header_length, ctx->header, |
2011 | ctx->base.callback_data); | 2223 | ctx->base.callback_data); |
2012 | ctx->header_length = 0; | 2224 | ctx->header_length = 0; |
2013 | } | 2225 | } |
2014 | 2226 | ||
2015 | return 1; | 2227 | return 1; |
2016 | } | 2228 | } |
2017 | 2229 | ||
2230 | /* d == last because each descriptor block is only a single descriptor. */ | ||
2231 | static int handle_ir_buffer_fill(struct context *context, | ||
2232 | struct descriptor *d, | ||
2233 | struct descriptor *last) | ||
2234 | { | ||
2235 | struct iso_context *ctx = | ||
2236 | container_of(context, struct iso_context, context); | ||
2237 | |||
2238 | if (!last->transfer_status) | ||
2239 | /* Descriptor(s) not done yet, stop iteration */ | ||
2240 | return 0; | ||
2241 | |||
2242 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) | ||
2243 | ctx->base.callback.mc(&ctx->base, | ||
2244 | le32_to_cpu(last->data_address) + | ||
2245 | le16_to_cpu(last->req_count) - | ||
2246 | le16_to_cpu(last->res_count), | ||
2247 | ctx->base.callback_data); | ||
2248 | |||
2249 | return 1; | ||
2250 | } | ||
2251 | |||
2018 | static int handle_it_packet(struct context *context, | 2252 | static int handle_it_packet(struct context *context, |
2019 | struct descriptor *d, | 2253 | struct descriptor *d, |
2020 | struct descriptor *last) | 2254 | struct descriptor *last) |
@@ -2040,71 +2274,118 @@ static int handle_it_packet(struct context *context, | |||
2040 | ctx->header_length += 4; | 2274 | ctx->header_length += 4; |
2041 | } | 2275 | } |
2042 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2276 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2043 | ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), | 2277 | ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), |
2044 | ctx->header_length, ctx->header, | 2278 | ctx->header_length, ctx->header, |
2045 | ctx->base.callback_data); | 2279 | ctx->base.callback_data); |
2046 | ctx->header_length = 0; | 2280 | ctx->header_length = 0; |
2047 | } | 2281 | } |
2048 | return 1; | 2282 | return 1; |
2049 | } | 2283 | } |
2050 | 2284 | ||
2285 | static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) | ||
2286 | { | ||
2287 | u32 hi = channels >> 32, lo = channels; | ||
2288 | |||
2289 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); | ||
2290 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); | ||
2291 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); | ||
2292 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); | ||
2293 | mmiowb(); | ||
2294 | ohci->mc_channels = channels; | ||
2295 | } | ||
2296 | |||
2051 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | 2297 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, |
2052 | int type, int channel, size_t header_size) | 2298 | int type, int channel, size_t header_size) |
2053 | { | 2299 | { |
2054 | struct fw_ohci *ohci = fw_ohci(card); | 2300 | struct fw_ohci *ohci = fw_ohci(card); |
2055 | struct iso_context *ctx, *list; | 2301 | struct iso_context *uninitialized_var(ctx); |
2056 | descriptor_callback_t callback; | 2302 | descriptor_callback_t uninitialized_var(callback); |
2057 | u64 *channels, dont_care = ~0ULL; | 2303 | u64 *uninitialized_var(channels); |
2058 | u32 *mask, regs; | 2304 | u32 *uninitialized_var(mask), uninitialized_var(regs); |
2059 | unsigned long flags; | 2305 | unsigned long flags; |
2060 | int index, ret = -ENOMEM; | 2306 | int index, ret = -EBUSY; |
2307 | |||
2308 | spin_lock_irqsave(&ohci->lock, flags); | ||
2061 | 2309 | ||
2062 | if (type == FW_ISO_CONTEXT_TRANSMIT) { | 2310 | switch (type) { |
2063 | channels = &dont_care; | 2311 | case FW_ISO_CONTEXT_TRANSMIT: |
2064 | mask = &ohci->it_context_mask; | 2312 | mask = &ohci->it_context_mask; |
2065 | list = ohci->it_context_list; | ||
2066 | callback = handle_it_packet; | 2313 | callback = handle_it_packet; |
2067 | } else { | 2314 | index = ffs(*mask) - 1; |
2315 | if (index >= 0) { | ||
2316 | *mask &= ~(1 << index); | ||
2317 | regs = OHCI1394_IsoXmitContextBase(index); | ||
2318 | ctx = &ohci->it_context_list[index]; | ||
2319 | } | ||
2320 | break; | ||
2321 | |||
2322 | case FW_ISO_CONTEXT_RECEIVE: | ||
2068 | channels = &ohci->ir_context_channels; | 2323 | channels = &ohci->ir_context_channels; |
2069 | mask = &ohci->ir_context_mask; | 2324 | mask = &ohci->ir_context_mask; |
2070 | list = ohci->ir_context_list; | ||
2071 | callback = handle_ir_packet_per_buffer; | 2325 | callback = handle_ir_packet_per_buffer; |
2072 | } | 2326 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; |
2327 | if (index >= 0) { | ||
2328 | *channels &= ~(1ULL << channel); | ||
2329 | *mask &= ~(1 << index); | ||
2330 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2331 | ctx = &ohci->ir_context_list[index]; | ||
2332 | } | ||
2333 | break; | ||
2073 | 2334 | ||
2074 | spin_lock_irqsave(&ohci->lock, flags); | 2335 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2075 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; | 2336 | mask = &ohci->ir_context_mask; |
2076 | if (index >= 0) { | 2337 | callback = handle_ir_buffer_fill; |
2077 | *channels &= ~(1ULL << channel); | 2338 | index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; |
2078 | *mask &= ~(1 << index); | 2339 | if (index >= 0) { |
2340 | ohci->mc_allocated = true; | ||
2341 | *mask &= ~(1 << index); | ||
2342 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2343 | ctx = &ohci->ir_context_list[index]; | ||
2344 | } | ||
2345 | break; | ||
2346 | |||
2347 | default: | ||
2348 | index = -1; | ||
2349 | ret = -ENOSYS; | ||
2079 | } | 2350 | } |
2351 | |||
2080 | spin_unlock_irqrestore(&ohci->lock, flags); | 2352 | spin_unlock_irqrestore(&ohci->lock, flags); |
2081 | 2353 | ||
2082 | if (index < 0) | 2354 | if (index < 0) |
2083 | return ERR_PTR(-EBUSY); | 2355 | return ERR_PTR(ret); |
2084 | |||
2085 | if (type == FW_ISO_CONTEXT_TRANSMIT) | ||
2086 | regs = OHCI1394_IsoXmitContextBase(index); | ||
2087 | else | ||
2088 | regs = OHCI1394_IsoRcvContextBase(index); | ||
2089 | 2356 | ||
2090 | ctx = &list[index]; | ||
2091 | memset(ctx, 0, sizeof(*ctx)); | 2357 | memset(ctx, 0, sizeof(*ctx)); |
2092 | ctx->header_length = 0; | 2358 | ctx->header_length = 0; |
2093 | ctx->header = (void *) __get_free_page(GFP_KERNEL); | 2359 | ctx->header = (void *) __get_free_page(GFP_KERNEL); |
2094 | if (ctx->header == NULL) | 2360 | if (ctx->header == NULL) { |
2361 | ret = -ENOMEM; | ||
2095 | goto out; | 2362 | goto out; |
2096 | 2363 | } | |
2097 | ret = context_init(&ctx->context, ohci, regs, callback); | 2364 | ret = context_init(&ctx->context, ohci, regs, callback); |
2098 | if (ret < 0) | 2365 | if (ret < 0) |
2099 | goto out_with_header; | 2366 | goto out_with_header; |
2100 | 2367 | ||
2368 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) | ||
2369 | set_multichannel_mask(ohci, 0); | ||
2370 | |||
2101 | return &ctx->base; | 2371 | return &ctx->base; |
2102 | 2372 | ||
2103 | out_with_header: | 2373 | out_with_header: |
2104 | free_page((unsigned long)ctx->header); | 2374 | free_page((unsigned long)ctx->header); |
2105 | out: | 2375 | out: |
2106 | spin_lock_irqsave(&ohci->lock, flags); | 2376 | spin_lock_irqsave(&ohci->lock, flags); |
2377 | |||
2378 | switch (type) { | ||
2379 | case FW_ISO_CONTEXT_RECEIVE: | ||
2380 | *channels |= 1ULL << channel; | ||
2381 | break; | ||
2382 | |||
2383 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2384 | ohci->mc_allocated = false; | ||
2385 | break; | ||
2386 | } | ||
2107 | *mask |= 1 << index; | 2387 | *mask |= 1 << index; |
2388 | |||
2108 | spin_unlock_irqrestore(&ohci->lock, flags); | 2389 | spin_unlock_irqrestore(&ohci->lock, flags); |
2109 | 2390 | ||
2110 | return ERR_PTR(ret); | 2391 | return ERR_PTR(ret); |
@@ -2115,10 +2396,11 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2115 | { | 2396 | { |
2116 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2397 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2117 | struct fw_ohci *ohci = ctx->context.ohci; | 2398 | struct fw_ohci *ohci = ctx->context.ohci; |
2118 | u32 control, match; | 2399 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; |
2119 | int index; | 2400 | int index; |
2120 | 2401 | ||
2121 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2402 | switch (ctx->base.type) { |
2403 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2122 | index = ctx - ohci->it_context_list; | 2404 | index = ctx - ohci->it_context_list; |
2123 | match = 0; | 2405 | match = 0; |
2124 | if (cycle >= 0) | 2406 | if (cycle >= 0) |
@@ -2128,9 +2410,13 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2128 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); | 2410 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); |
2129 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); | 2411 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); |
2130 | context_run(&ctx->context, match); | 2412 | context_run(&ctx->context, match); |
2131 | } else { | 2413 | break; |
2414 | |||
2415 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2416 | control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; | ||
2417 | /* fall through */ | ||
2418 | case FW_ISO_CONTEXT_RECEIVE: | ||
2132 | index = ctx - ohci->ir_context_list; | 2419 | index = ctx - ohci->ir_context_list; |
2133 | control = IR_CONTEXT_ISOCH_HEADER; | ||
2134 | match = (tags << 28) | (sync << 8) | ctx->base.channel; | 2420 | match = (tags << 28) | (sync << 8) | ctx->base.channel; |
2135 | if (cycle >= 0) { | 2421 | if (cycle >= 0) { |
2136 | match |= (cycle & 0x07fff) << 12; | 2422 | match |= (cycle & 0x07fff) << 12; |
@@ -2141,6 +2427,7 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2141 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); | 2427 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); |
2142 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); | 2428 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); |
2143 | context_run(&ctx->context, control); | 2429 | context_run(&ctx->context, control); |
2430 | break; | ||
2144 | } | 2431 | } |
2145 | 2432 | ||
2146 | return 0; | 2433 | return 0; |
@@ -2152,12 +2439,17 @@ static int ohci_stop_iso(struct fw_iso_context *base) | |||
2152 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2439 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2153 | int index; | 2440 | int index; |
2154 | 2441 | ||
2155 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2442 | switch (ctx->base.type) { |
2443 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2156 | index = ctx - ohci->it_context_list; | 2444 | index = ctx - ohci->it_context_list; |
2157 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); | 2445 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); |
2158 | } else { | 2446 | break; |
2447 | |||
2448 | case FW_ISO_CONTEXT_RECEIVE: | ||
2449 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2159 | index = ctx - ohci->ir_context_list; | 2450 | index = ctx - ohci->ir_context_list; |
2160 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); | 2451 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); |
2452 | break; | ||
2161 | } | 2453 | } |
2162 | flush_writes(ohci); | 2454 | flush_writes(ohci); |
2163 | context_stop(&ctx->context); | 2455 | context_stop(&ctx->context); |
@@ -2178,24 +2470,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base) | |||
2178 | 2470 | ||
2179 | spin_lock_irqsave(&ohci->lock, flags); | 2471 | spin_lock_irqsave(&ohci->lock, flags); |
2180 | 2472 | ||
2181 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | 2473 | switch (base->type) { |
2474 | case FW_ISO_CONTEXT_TRANSMIT: | ||
2182 | index = ctx - ohci->it_context_list; | 2475 | index = ctx - ohci->it_context_list; |
2183 | ohci->it_context_mask |= 1 << index; | 2476 | ohci->it_context_mask |= 1 << index; |
2184 | } else { | 2477 | break; |
2478 | |||
2479 | case FW_ISO_CONTEXT_RECEIVE: | ||
2185 | index = ctx - ohci->ir_context_list; | 2480 | index = ctx - ohci->ir_context_list; |
2186 | ohci->ir_context_mask |= 1 << index; | 2481 | ohci->ir_context_mask |= 1 << index; |
2187 | ohci->ir_context_channels |= 1ULL << base->channel; | 2482 | ohci->ir_context_channels |= 1ULL << base->channel; |
2483 | break; | ||
2484 | |||
2485 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2486 | index = ctx - ohci->ir_context_list; | ||
2487 | ohci->ir_context_mask |= 1 << index; | ||
2488 | ohci->ir_context_channels |= ohci->mc_channels; | ||
2489 | ohci->mc_channels = 0; | ||
2490 | ohci->mc_allocated = false; | ||
2491 | break; | ||
2188 | } | 2492 | } |
2189 | 2493 | ||
2190 | spin_unlock_irqrestore(&ohci->lock, flags); | 2494 | spin_unlock_irqrestore(&ohci->lock, flags); |
2191 | } | 2495 | } |
2192 | 2496 | ||
2193 | static int ohci_queue_iso_transmit(struct fw_iso_context *base, | 2497 | static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) |
2194 | struct fw_iso_packet *packet, | 2498 | { |
2195 | struct fw_iso_buffer *buffer, | 2499 | struct fw_ohci *ohci = fw_ohci(base->card); |
2196 | unsigned long payload) | 2500 | unsigned long flags; |
2501 | int ret; | ||
2502 | |||
2503 | switch (base->type) { | ||
2504 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2505 | |||
2506 | spin_lock_irqsave(&ohci->lock, flags); | ||
2507 | |||
2508 | /* Don't allow multichannel to grab other contexts' channels. */ | ||
2509 | if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { | ||
2510 | *channels = ohci->ir_context_channels; | ||
2511 | ret = -EBUSY; | ||
2512 | } else { | ||
2513 | set_multichannel_mask(ohci, *channels); | ||
2514 | ret = 0; | ||
2515 | } | ||
2516 | |||
2517 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
2518 | |||
2519 | break; | ||
2520 | default: | ||
2521 | ret = -EINVAL; | ||
2522 | } | ||
2523 | |||
2524 | return ret; | ||
2525 | } | ||
2526 | |||
2527 | static int queue_iso_transmit(struct iso_context *ctx, | ||
2528 | struct fw_iso_packet *packet, | ||
2529 | struct fw_iso_buffer *buffer, | ||
2530 | unsigned long payload) | ||
2197 | { | 2531 | { |
2198 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
2199 | struct descriptor *d, *last, *pd; | 2532 | struct descriptor *d, *last, *pd; |
2200 | struct fw_iso_packet *p; | 2533 | struct fw_iso_packet *p; |
2201 | __le32 *header; | 2534 | __le32 *header; |
@@ -2291,14 +2624,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2291 | return 0; | 2624 | return 0; |
2292 | } | 2625 | } |
2293 | 2626 | ||
2294 | static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | 2627 | static int queue_iso_packet_per_buffer(struct iso_context *ctx, |
2295 | struct fw_iso_packet *packet, | 2628 | struct fw_iso_packet *packet, |
2296 | struct fw_iso_buffer *buffer, | 2629 | struct fw_iso_buffer *buffer, |
2297 | unsigned long payload) | 2630 | unsigned long payload) |
2298 | { | 2631 | { |
2299 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
2300 | struct descriptor *d, *pd; | 2632 | struct descriptor *d, *pd; |
2301 | struct fw_iso_packet *p = packet; | ||
2302 | dma_addr_t d_bus, page_bus; | 2633 | dma_addr_t d_bus, page_bus; |
2303 | u32 z, header_z, rest; | 2634 | u32 z, header_z, rest; |
2304 | int i, j, length; | 2635 | int i, j, length; |
@@ -2308,14 +2639,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2308 | * The OHCI controller puts the isochronous header and trailer in the | 2639 | * The OHCI controller puts the isochronous header and trailer in the |
2309 | * buffer, so we need at least 8 bytes. | 2640 | * buffer, so we need at least 8 bytes. |
2310 | */ | 2641 | */ |
2311 | packet_count = p->header_length / ctx->base.header_size; | 2642 | packet_count = packet->header_length / ctx->base.header_size; |
2312 | header_size = max(ctx->base.header_size, (size_t)8); | 2643 | header_size = max(ctx->base.header_size, (size_t)8); |
2313 | 2644 | ||
2314 | /* Get header size in number of descriptors. */ | 2645 | /* Get header size in number of descriptors. */ |
2315 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2646 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
2316 | page = payload >> PAGE_SHIFT; | 2647 | page = payload >> PAGE_SHIFT; |
2317 | offset = payload & ~PAGE_MASK; | 2648 | offset = payload & ~PAGE_MASK; |
2318 | payload_per_buffer = p->payload_length / packet_count; | 2649 | payload_per_buffer = packet->payload_length / packet_count; |
2319 | 2650 | ||
2320 | for (i = 0; i < packet_count; i++) { | 2651 | for (i = 0; i < packet_count; i++) { |
2321 | /* d points to the header descriptor */ | 2652 | /* d points to the header descriptor */ |
@@ -2327,7 +2658,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2327 | 2658 | ||
2328 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2659 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2329 | DESCRIPTOR_INPUT_MORE); | 2660 | DESCRIPTOR_INPUT_MORE); |
2330 | if (p->skip && i == 0) | 2661 | if (packet->skip && i == 0) |
2331 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | 2662 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
2332 | d->req_count = cpu_to_le16(header_size); | 2663 | d->req_count = cpu_to_le16(header_size); |
2333 | d->res_count = d->req_count; | 2664 | d->res_count = d->req_count; |
@@ -2360,7 +2691,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2360 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2691 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2361 | DESCRIPTOR_INPUT_LAST | | 2692 | DESCRIPTOR_INPUT_LAST | |
2362 | DESCRIPTOR_BRANCH_ALWAYS); | 2693 | DESCRIPTOR_BRANCH_ALWAYS); |
2363 | if (p->interrupt && i == packet_count - 1) | 2694 | if (packet->interrupt && i == packet_count - 1) |
2364 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | 2695 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
2365 | 2696 | ||
2366 | context_append(&ctx->context, d, z, header_z); | 2697 | context_append(&ctx->context, d, z, header_z); |
@@ -2369,6 +2700,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2369 | return 0; | 2700 | return 0; |
2370 | } | 2701 | } |
2371 | 2702 | ||
2703 | static int queue_iso_buffer_fill(struct iso_context *ctx, | ||
2704 | struct fw_iso_packet *packet, | ||
2705 | struct fw_iso_buffer *buffer, | ||
2706 | unsigned long payload) | ||
2707 | { | ||
2708 | struct descriptor *d; | ||
2709 | dma_addr_t d_bus, page_bus; | ||
2710 | int page, offset, rest, z, i, length; | ||
2711 | |||
2712 | page = payload >> PAGE_SHIFT; | ||
2713 | offset = payload & ~PAGE_MASK; | ||
2714 | rest = packet->payload_length; | ||
2715 | |||
2716 | /* We need one descriptor for each page in the buffer. */ | ||
2717 | z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); | ||
2718 | |||
2719 | if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) | ||
2720 | return -EFAULT; | ||
2721 | |||
2722 | for (i = 0; i < z; i++) { | ||
2723 | d = context_get_descriptors(&ctx->context, 1, &d_bus); | ||
2724 | if (d == NULL) | ||
2725 | return -ENOMEM; | ||
2726 | |||
2727 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | ||
2728 | DESCRIPTOR_BRANCH_ALWAYS); | ||
2729 | if (packet->skip && i == 0) | ||
2730 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | ||
2731 | if (packet->interrupt && i == z - 1) | ||
2732 | d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | ||
2733 | |||
2734 | if (offset + rest < PAGE_SIZE) | ||
2735 | length = rest; | ||
2736 | else | ||
2737 | length = PAGE_SIZE - offset; | ||
2738 | d->req_count = cpu_to_le16(length); | ||
2739 | d->res_count = d->req_count; | ||
2740 | d->transfer_status = 0; | ||
2741 | |||
2742 | page_bus = page_private(buffer->pages[page]); | ||
2743 | d->data_address = cpu_to_le32(page_bus + offset); | ||
2744 | |||
2745 | rest -= length; | ||
2746 | offset = 0; | ||
2747 | page++; | ||
2748 | |||
2749 | context_append(&ctx->context, d, 1, 0); | ||
2750 | } | ||
2751 | |||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2372 | static int ohci_queue_iso(struct fw_iso_context *base, | 2755 | static int ohci_queue_iso(struct fw_iso_context *base, |
2373 | struct fw_iso_packet *packet, | 2756 | struct fw_iso_packet *packet, |
2374 | struct fw_iso_buffer *buffer, | 2757 | struct fw_iso_buffer *buffer, |
@@ -2376,14 +2759,20 @@ static int ohci_queue_iso(struct fw_iso_context *base, | |||
2376 | { | 2759 | { |
2377 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2760 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2378 | unsigned long flags; | 2761 | unsigned long flags; |
2379 | int ret; | 2762 | int ret = -ENOSYS; |
2380 | 2763 | ||
2381 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); | 2764 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
2382 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) | 2765 | switch (base->type) { |
2383 | ret = ohci_queue_iso_transmit(base, packet, buffer, payload); | 2766 | case FW_ISO_CONTEXT_TRANSMIT: |
2384 | else | 2767 | ret = queue_iso_transmit(ctx, packet, buffer, payload); |
2385 | ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, | 2768 | break; |
2386 | buffer, payload); | 2769 | case FW_ISO_CONTEXT_RECEIVE: |
2770 | ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); | ||
2771 | break; | ||
2772 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
2773 | ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); | ||
2774 | break; | ||
2775 | } | ||
2387 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); | 2776 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); |
2388 | 2777 | ||
2389 | return ret; | 2778 | return ret; |
@@ -2391,16 +2780,19 @@ static int ohci_queue_iso(struct fw_iso_context *base, | |||
2391 | 2780 | ||
2392 | static const struct fw_card_driver ohci_driver = { | 2781 | static const struct fw_card_driver ohci_driver = { |
2393 | .enable = ohci_enable, | 2782 | .enable = ohci_enable, |
2783 | .read_phy_reg = ohci_read_phy_reg, | ||
2394 | .update_phy_reg = ohci_update_phy_reg, | 2784 | .update_phy_reg = ohci_update_phy_reg, |
2395 | .set_config_rom = ohci_set_config_rom, | 2785 | .set_config_rom = ohci_set_config_rom, |
2396 | .send_request = ohci_send_request, | 2786 | .send_request = ohci_send_request, |
2397 | .send_response = ohci_send_response, | 2787 | .send_response = ohci_send_response, |
2398 | .cancel_packet = ohci_cancel_packet, | 2788 | .cancel_packet = ohci_cancel_packet, |
2399 | .enable_phys_dma = ohci_enable_phys_dma, | 2789 | .enable_phys_dma = ohci_enable_phys_dma, |
2400 | .get_cycle_time = ohci_get_cycle_time, | 2790 | .read_csr = ohci_read_csr, |
2791 | .write_csr = ohci_write_csr, | ||
2401 | 2792 | ||
2402 | .allocate_iso_context = ohci_allocate_iso_context, | 2793 | .allocate_iso_context = ohci_allocate_iso_context, |
2403 | .free_iso_context = ohci_free_iso_context, | 2794 | .free_iso_context = ohci_free_iso_context, |
2795 | .set_iso_channels = ohci_set_iso_channels, | ||
2404 | .queue_iso = ohci_queue_iso, | 2796 | .queue_iso = ohci_queue_iso, |
2405 | .start_iso = ohci_start_iso, | 2797 | .start_iso = ohci_start_iso, |
2406 | .stop_iso = ohci_stop_iso, | 2798 | .stop_iso = ohci_stop_iso, |
@@ -2465,6 +2857,7 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2465 | pci_set_drvdata(dev, ohci); | 2857 | pci_set_drvdata(dev, ohci); |
2466 | 2858 | ||
2467 | spin_lock_init(&ohci->lock); | 2859 | spin_lock_init(&ohci->lock); |
2860 | mutex_init(&ohci->phy_reg_mutex); | ||
2468 | 2861 | ||
2469 | tasklet_init(&ohci->bus_reset_tasklet, | 2862 | tasklet_init(&ohci->bus_reset_tasklet, |
2470 | bus_reset_tasklet, (unsigned long)ohci); | 2863 | bus_reset_tasklet, (unsigned long)ohci); |
@@ -2625,6 +3018,7 @@ static void pci_remove(struct pci_dev *dev) | |||
2625 | context_release(&ohci->at_response_ctx); | 3018 | context_release(&ohci->at_response_ctx); |
2626 | kfree(ohci->it_context_list); | 3019 | kfree(ohci->it_context_list); |
2627 | kfree(ohci->ir_context_list); | 3020 | kfree(ohci->ir_context_list); |
3021 | pci_disable_msi(dev); | ||
2628 | pci_iounmap(dev, ohci->registers); | 3022 | pci_iounmap(dev, ohci->registers); |
2629 | pci_release_region(dev, 0); | 3023 | pci_release_region(dev, 0); |
2630 | pci_disable_device(dev); | 3024 | pci_disable_device(dev); |
@@ -2642,6 +3036,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) | |||
2642 | 3036 | ||
2643 | software_reset(ohci); | 3037 | software_reset(ohci); |
2644 | free_irq(dev->irq, ohci); | 3038 | free_irq(dev->irq, ohci); |
3039 | pci_disable_msi(dev); | ||
2645 | err = pci_save_state(dev); | 3040 | err = pci_save_state(dev); |
2646 | if (err) { | 3041 | if (err) { |
2647 | fw_error("pci_save_state failed\n"); | 3042 | fw_error("pci_save_state failed\n"); |
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index 3bc9a5d744eb..0e6c5a466908 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h | |||
@@ -60,6 +60,7 @@ | |||
60 | #define OHCI1394_LinkControl_cycleSource (1 << 22) | 60 | #define OHCI1394_LinkControl_cycleSource (1 << 22) |
61 | #define OHCI1394_NodeID 0x0E8 | 61 | #define OHCI1394_NodeID 0x0E8 |
62 | #define OHCI1394_NodeID_idValid 0x80000000 | 62 | #define OHCI1394_NodeID_idValid 0x80000000 |
63 | #define OHCI1394_NodeID_root 0x40000000 | ||
63 | #define OHCI1394_NodeID_nodeNumber 0x0000003f | 64 | #define OHCI1394_NodeID_nodeNumber 0x0000003f |
64 | #define OHCI1394_NodeID_busNumber 0x0000ffc0 | 65 | #define OHCI1394_NodeID_busNumber 0x0000ffc0 |
65 | #define OHCI1394_PhyControl 0x0EC | 66 | #define OHCI1394_PhyControl 0x0EC |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index ca264f2fdf0c..9f76171717e5 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -410,8 +410,7 @@ static void free_orb(struct kref *kref) | |||
410 | 410 | ||
411 | static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | 411 | static void sbp2_status_write(struct fw_card *card, struct fw_request *request, |
412 | int tcode, int destination, int source, | 412 | int tcode, int destination, int source, |
413 | int generation, int speed, | 413 | int generation, unsigned long long offset, |
414 | unsigned long long offset, | ||
415 | void *payload, size_t length, void *callback_data) | 414 | void *payload, size_t length, void *callback_data) |
416 | { | 415 | { |
417 | struct sbp2_logical_unit *lu = callback_data; | 416 | struct sbp2_logical_unit *lu = callback_data; |
@@ -508,8 +507,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | |||
508 | 507 | ||
509 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, | 508 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, |
510 | node_id, generation, device->max_speed, offset, | 509 | node_id, generation, device->max_speed, offset, |
511 | &orb->pointer, sizeof(orb->pointer), | 510 | &orb->pointer, 8, complete_transaction, orb); |
512 | complete_transaction, orb); | ||
513 | } | 511 | } |
514 | 512 | ||
515 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | 513 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) |
@@ -654,7 +652,7 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu) | |||
654 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | 652 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, |
655 | lu->tgt->node_id, lu->generation, device->max_speed, | 653 | lu->tgt->node_id, lu->generation, device->max_speed, |
656 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 654 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
657 | &d, sizeof(d)); | 655 | &d, 4); |
658 | } | 656 | } |
659 | 657 | ||
660 | static void complete_agent_reset_write_no_wait(struct fw_card *card, | 658 | static void complete_agent_reset_write_no_wait(struct fw_card *card, |
@@ -676,7 +674,7 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | |||
676 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 674 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
677 | lu->tgt->node_id, lu->generation, device->max_speed, | 675 | lu->tgt->node_id, lu->generation, device->max_speed, |
678 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 676 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
679 | &d, sizeof(d), complete_agent_reset_write_no_wait, t); | 677 | &d, 4, complete_agent_reset_write_no_wait, t); |
680 | } | 678 | } |
681 | 679 | ||
682 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | 680 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) |
@@ -866,8 +864,7 @@ static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) | |||
866 | 864 | ||
867 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | 865 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, |
868 | lu->tgt->node_id, lu->generation, device->max_speed, | 866 | lu->tgt->node_id, lu->generation, device->max_speed, |
869 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, | 867 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4); |
870 | &d, sizeof(d)); | ||
871 | } | 868 | } |
872 | 869 | ||
873 | static void sbp2_reconnect(struct work_struct *work); | 870 | static void sbp2_reconnect(struct work_struct *work); |
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index adaefabc40e9..c5a031b79d03 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c | |||
@@ -172,7 +172,7 @@ static DEFINE_SPINLOCK(dv1394_cards_lock); | |||
172 | 172 | ||
173 | static inline struct video_card* file_to_video_card(struct file *file) | 173 | static inline struct video_card* file_to_video_card(struct file *file) |
174 | { | 174 | { |
175 | return (struct video_card*) file->private_data; | 175 | return file->private_data; |
176 | } | 176 | } |
177 | 177 | ||
178 | /*** FRAME METHODS *********************************************************/ | 178 | /*** FRAME METHODS *********************************************************/ |
@@ -610,7 +610,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame) | |||
610 | } else { | 610 | } else { |
611 | 611 | ||
612 | u32 transmit_sec, transmit_cyc; | 612 | u32 transmit_sec, transmit_cyc; |
613 | u32 ts_cyc, ts_off; | 613 | u32 ts_cyc; |
614 | 614 | ||
615 | /* DMA is stopped, so this is the very first frame */ | 615 | /* DMA is stopped, so this is the very first frame */ |
616 | video->active_frame = this_frame; | 616 | video->active_frame = this_frame; |
@@ -636,7 +636,6 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame) | |||
636 | transmit_sec += transmit_cyc/8000; | 636 | transmit_sec += transmit_cyc/8000; |
637 | transmit_cyc %= 8000; | 637 | transmit_cyc %= 8000; |
638 | 638 | ||
639 | ts_off = ct_off; | ||
640 | ts_cyc = transmit_cyc + 3; | 639 | ts_cyc = transmit_cyc + 3; |
641 | ts_cyc %= 8000; | 640 | ts_cyc %= 8000; |
642 | 641 | ||
@@ -1784,7 +1783,7 @@ static int dv1394_open(struct inode *inode, struct file *file) | |||
1784 | struct video_card *video = NULL; | 1783 | struct video_card *video = NULL; |
1785 | 1784 | ||
1786 | if (file->private_data) { | 1785 | if (file->private_data) { |
1787 | video = (struct video_card*) file->private_data; | 1786 | video = file->private_data; |
1788 | 1787 | ||
1789 | } else { | 1788 | } else { |
1790 | /* look up the card by ID */ | 1789 | /* look up the card by ID */ |
@@ -2004,7 +2003,7 @@ static void ir_tasklet_func(unsigned long data) | |||
2004 | 2003 | ||
2005 | int sof=0; /* start-of-frame flag */ | 2004 | int sof=0; /* start-of-frame flag */ |
2006 | struct frame *f; | 2005 | struct frame *f; |
2007 | u16 packet_length, packet_time; | 2006 | u16 packet_length; |
2008 | int i, dbc=0; | 2007 | int i, dbc=0; |
2009 | struct DMA_descriptor_block *block = NULL; | 2008 | struct DMA_descriptor_block *block = NULL; |
2010 | u16 xferstatus; | 2009 | u16 xferstatus; |
@@ -2024,11 +2023,6 @@ static void ir_tasklet_func(unsigned long data) | |||
2024 | sizeof(struct packet)); | 2023 | sizeof(struct packet)); |
2025 | 2024 | ||
2026 | packet_length = le16_to_cpu(p->data_length); | 2025 | packet_length = le16_to_cpu(p->data_length); |
2027 | packet_time = le16_to_cpu(p->timestamp); | ||
2028 | |||
2029 | irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet, | ||
2030 | packet_time, packet_length, | ||
2031 | p->data[0], p->data[1]); | ||
2032 | 2026 | ||
2033 | /* get the descriptor based on packet_buffer cursor */ | 2027 | /* get the descriptor based on packet_buffer cursor */ |
2034 | f = video->frames[video->current_packet / MAX_PACKETS]; | 2028 | f = video->frames[video->current_packet / MAX_PACKETS]; |
@@ -2320,7 +2314,6 @@ static void dv1394_add_host(struct hpsb_host *host) | |||
2320 | 2314 | ||
2321 | static void dv1394_host_reset(struct hpsb_host *host) | 2315 | static void dv1394_host_reset(struct hpsb_host *host) |
2322 | { | 2316 | { |
2323 | struct ti_ohci *ohci; | ||
2324 | struct video_card *video = NULL, *tmp_vid; | 2317 | struct video_card *video = NULL, *tmp_vid; |
2325 | unsigned long flags; | 2318 | unsigned long flags; |
2326 | 2319 | ||
@@ -2328,9 +2321,6 @@ static void dv1394_host_reset(struct hpsb_host *host) | |||
2328 | if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME)) | 2321 | if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME)) |
2329 | return; | 2322 | return; |
2330 | 2323 | ||
2331 | ohci = (struct ti_ohci *)host->hostdata; | ||
2332 | |||
2333 | |||
2334 | /* find the corresponding video_cards */ | 2324 | /* find the corresponding video_cards */ |
2335 | spin_lock_irqsave(&dv1394_cards_lock, flags); | 2325 | spin_lock_irqsave(&dv1394_cards_lock, flags); |
2336 | list_for_each_entry(tmp_vid, &dv1394_cards, list) { | 2326 | list_for_each_entry(tmp_vid, &dv1394_cards, list) { |
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index a4e9dcb6d4a9..bc289e367e30 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c | |||
@@ -1258,7 +1258,6 @@ static void ether1394_iso(struct hpsb_iso *iso) | |||
1258 | char *buf; | 1258 | char *buf; |
1259 | struct eth1394_host_info *hi; | 1259 | struct eth1394_host_info *hi; |
1260 | struct net_device *dev; | 1260 | struct net_device *dev; |
1261 | struct eth1394_priv *priv; | ||
1262 | unsigned int len; | 1261 | unsigned int len; |
1263 | u32 specifier_id; | 1262 | u32 specifier_id; |
1264 | u16 source_id; | 1263 | u16 source_id; |
@@ -1288,8 +1287,6 @@ static void ether1394_iso(struct hpsb_iso *iso) | |||
1288 | (be32_to_cpu(data[1]) & 0xff000000) >> 24; | 1287 | (be32_to_cpu(data[1]) & 0xff000000) >> 24; |
1289 | source_id = be32_to_cpu(data[0]) >> 16; | 1288 | source_id = be32_to_cpu(data[0]) >> 16; |
1290 | 1289 | ||
1291 | priv = netdev_priv(dev); | ||
1292 | |||
1293 | if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) | 1290 | if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) |
1294 | || specifier_id != ETHER1394_GASP_SPECIFIER_ID) { | 1291 | || specifier_id != ETHER1394_GASP_SPECIFIER_ID) { |
1295 | /* This packet is not for us */ | 1292 | /* This packet is not for us */ |
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index b563d5e9fa2e..f3401427404c 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c | |||
@@ -440,7 +440,7 @@ static struct pending_request *next_complete_req(struct file_info *fi) | |||
440 | static ssize_t raw1394_read(struct file *file, char __user * buffer, | 440 | static ssize_t raw1394_read(struct file *file, char __user * buffer, |
441 | size_t count, loff_t * offset_is_ignored) | 441 | size_t count, loff_t * offset_is_ignored) |
442 | { | 442 | { |
443 | struct file_info *fi = (struct file_info *)file->private_data; | 443 | struct file_info *fi = file->private_data; |
444 | struct pending_request *req; | 444 | struct pending_request *req; |
445 | ssize_t ret; | 445 | ssize_t ret; |
446 | 446 | ||
@@ -1015,7 +1015,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, | |||
1015 | struct arm_addr *arm_addr = NULL; | 1015 | struct arm_addr *arm_addr = NULL; |
1016 | struct arm_request *arm_req = NULL; | 1016 | struct arm_request *arm_req = NULL; |
1017 | struct arm_response *arm_resp = NULL; | 1017 | struct arm_response *arm_resp = NULL; |
1018 | int found = 0, size = 0, rcode = -1, length_conflict = 0; | 1018 | int found = 0, size = 0, rcode = -1; |
1019 | struct arm_request_response *arm_req_resp = NULL; | 1019 | struct arm_request_response *arm_req_resp = NULL; |
1020 | 1020 | ||
1021 | DBGMSG("arm_write called by node: %X " | 1021 | DBGMSG("arm_write called by node: %X " |
@@ -1054,7 +1054,6 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, | |||
1054 | } | 1054 | } |
1055 | if (arm_addr->rec_length < length) { | 1055 | if (arm_addr->rec_length < length) { |
1056 | DBGMSG("arm_write blocklength too big -> rcode_data_error"); | 1056 | DBGMSG("arm_write blocklength too big -> rcode_data_error"); |
1057 | length_conflict = 1; | ||
1058 | rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */ | 1057 | rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */ |
1059 | } | 1058 | } |
1060 | if (rcode == -1) { | 1059 | if (rcode == -1) { |
@@ -2245,7 +2244,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req) | |||
2245 | static ssize_t raw1394_write(struct file *file, const char __user * buffer, | 2244 | static ssize_t raw1394_write(struct file *file, const char __user * buffer, |
2246 | size_t count, loff_t * offset_is_ignored) | 2245 | size_t count, loff_t * offset_is_ignored) |
2247 | { | 2246 | { |
2248 | struct file_info *fi = (struct file_info *)file->private_data; | 2247 | struct file_info *fi = file->private_data; |
2249 | struct pending_request *req; | 2248 | struct pending_request *req; |
2250 | ssize_t retval = -EBADFD; | 2249 | ssize_t retval = -EBADFD; |
2251 | 2250 | ||
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 4565cb5d3d1a..d6e251a300ce 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -1350,12 +1350,11 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, | |||
1350 | struct csr1212_keyval *kv; | 1350 | struct csr1212_keyval *kv; |
1351 | struct csr1212_dentry *dentry; | 1351 | struct csr1212_dentry *dentry; |
1352 | u64 management_agent_addr; | 1352 | u64 management_agent_addr; |
1353 | u32 unit_characteristics, firmware_revision, model; | 1353 | u32 firmware_revision, model; |
1354 | unsigned workarounds; | 1354 | unsigned workarounds; |
1355 | int i; | 1355 | int i; |
1356 | 1356 | ||
1357 | management_agent_addr = 0; | 1357 | management_agent_addr = 0; |
1358 | unit_characteristics = 0; | ||
1359 | firmware_revision = SBP2_ROM_VALUE_MISSING; | 1358 | firmware_revision = SBP2_ROM_VALUE_MISSING; |
1360 | model = ud->flags & UNIT_DIRECTORY_MODEL_ID ? | 1359 | model = ud->flags & UNIT_DIRECTORY_MODEL_ID ? |
1361 | ud->model_id : SBP2_ROM_VALUE_MISSING; | 1360 | ud->model_id : SBP2_ROM_VALUE_MISSING; |
@@ -1372,17 +1371,15 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, | |||
1372 | lu->lun = ORB_SET_LUN(kv->value.immediate); | 1371 | lu->lun = ORB_SET_LUN(kv->value.immediate); |
1373 | break; | 1372 | break; |
1374 | 1373 | ||
1375 | case SBP2_UNIT_CHARACTERISTICS_KEY: | ||
1376 | /* FIXME: This is ignored so far. | ||
1377 | * See SBP-2 clause 7.4.8. */ | ||
1378 | unit_characteristics = kv->value.immediate; | ||
1379 | break; | ||
1380 | 1374 | ||
1381 | case SBP2_FIRMWARE_REVISION_KEY: | 1375 | case SBP2_FIRMWARE_REVISION_KEY: |
1382 | firmware_revision = kv->value.immediate; | 1376 | firmware_revision = kv->value.immediate; |
1383 | break; | 1377 | break; |
1384 | 1378 | ||
1385 | default: | 1379 | default: |
1380 | /* FIXME: Check for SBP2_UNIT_CHARACTERISTICS_KEY | ||
1381 | * mgt_ORB_timeout and ORB_size, SBP-2 clause 7.4.8. */ | ||
1382 | |||
1386 | /* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY. | 1383 | /* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY. |
1387 | * Its "ordered" bit has consequences for command ORB | 1384 | * Its "ordered" bit has consequences for command ORB |
1388 | * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */ | 1385 | * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */ |
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index a42bd6893bcf..5c74f796d7f1 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c | |||
@@ -720,7 +720,7 @@ static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d, | |||
720 | static long video1394_ioctl(struct file *file, | 720 | static long video1394_ioctl(struct file *file, |
721 | unsigned int cmd, unsigned long arg) | 721 | unsigned int cmd, unsigned long arg) |
722 | { | 722 | { |
723 | struct file_ctx *ctx = (struct file_ctx *)file->private_data; | 723 | struct file_ctx *ctx = file->private_data; |
724 | struct ti_ohci *ohci = ctx->ohci; | 724 | struct ti_ohci *ohci = ctx->ohci; |
725 | unsigned long flags; | 725 | unsigned long flags; |
726 | void __user *argp = (void __user *)arg; | 726 | void __user *argp = (void __user *)arg; |
@@ -1045,14 +1045,9 @@ static long video1394_ioctl(struct file *file, | |||
1045 | if (get_user(qv, &p->packet_sizes)) | 1045 | if (get_user(qv, &p->packet_sizes)) |
1046 | return -EFAULT; | 1046 | return -EFAULT; |
1047 | 1047 | ||
1048 | psizes = kmalloc(buf_size, GFP_KERNEL); | 1048 | psizes = memdup_user(qv, buf_size); |
1049 | if (!psizes) | 1049 | if (IS_ERR(psizes)) |
1050 | return -ENOMEM; | 1050 | return PTR_ERR(psizes); |
1051 | |||
1052 | if (copy_from_user(psizes, qv, buf_size)) { | ||
1053 | kfree(psizes); | ||
1054 | return -EFAULT; | ||
1055 | } | ||
1056 | } | 1051 | } |
1057 | 1052 | ||
1058 | spin_lock_irqsave(&d->lock,flags); | 1053 | spin_lock_irqsave(&d->lock,flags); |
@@ -1177,7 +1172,7 @@ static long video1394_ioctl(struct file *file, | |||
1177 | 1172 | ||
1178 | static int video1394_mmap(struct file *file, struct vm_area_struct *vma) | 1173 | static int video1394_mmap(struct file *file, struct vm_area_struct *vma) |
1179 | { | 1174 | { |
1180 | struct file_ctx *ctx = (struct file_ctx *)file->private_data; | 1175 | struct file_ctx *ctx = file->private_data; |
1181 | 1176 | ||
1182 | if (ctx->current_ctx == NULL) { | 1177 | if (ctx->current_ctx == NULL) { |
1183 | PRINT(KERN_ERR, ctx->ohci->host->id, | 1178 | PRINT(KERN_ERR, ctx->ohci->host->id, |
@@ -1244,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file) | |||
1244 | 1239 | ||
1245 | static int video1394_release(struct inode *inode, struct file *file) | 1240 | static int video1394_release(struct inode *inode, struct file *file) |
1246 | { | 1241 | { |
1247 | struct file_ctx *ctx = (struct file_ctx *)file->private_data; | 1242 | struct file_ctx *ctx = file->private_data; |
1248 | struct ti_ohci *ohci = ctx->ohci; | 1243 | struct ti_ohci *ohci = ctx->ohci; |
1249 | struct list_head *lh, *next; | 1244 | struct list_head *lh, *next; |
1250 | u64 mask; | 1245 | u64 mask; |
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c index 75afe4f81e33..7424b0493f9d 100644 --- a/drivers/media/dvb/firewire/firedtv-fw.c +++ b/drivers/media/dvb/firewire/firedtv-fw.c | |||
@@ -194,8 +194,8 @@ static const struct firedtv_backend backend = { | |||
194 | 194 | ||
195 | static void handle_fcp(struct fw_card *card, struct fw_request *request, | 195 | static void handle_fcp(struct fw_card *card, struct fw_request *request, |
196 | int tcode, int destination, int source, int generation, | 196 | int tcode, int destination, int source, int generation, |
197 | int speed, unsigned long long offset, | 197 | unsigned long long offset, void *payload, size_t length, |
198 | void *payload, size_t length, void *callback_data) | 198 | void *callback_data) |
199 | { | 199 | { |
200 | struct firedtv *f, *fdtv = NULL; | 200 | struct firedtv *f, *fdtv = NULL; |
201 | struct fw_device *device; | 201 | struct fw_device *device; |
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 68f883b30a53..68c642d8843d 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
@@ -30,12 +30,18 @@ | |||
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/firewire-constants.h> | 31 | #include <linux/firewire-constants.h> |
32 | 32 | ||
33 | #define FW_CDEV_EVENT_BUS_RESET 0x00 | 33 | #define FW_CDEV_EVENT_BUS_RESET 0x00 |
34 | #define FW_CDEV_EVENT_RESPONSE 0x01 | 34 | #define FW_CDEV_EVENT_RESPONSE 0x01 |
35 | #define FW_CDEV_EVENT_REQUEST 0x02 | 35 | #define FW_CDEV_EVENT_REQUEST 0x02 |
36 | #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 | 36 | #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 |
37 | #define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 | 37 | #define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 |
38 | #define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 | 38 | #define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 |
39 | |||
40 | /* available since kernel version 2.6.36 */ | ||
41 | #define FW_CDEV_EVENT_REQUEST2 0x06 | ||
42 | #define FW_CDEV_EVENT_PHY_PACKET_SENT 0x07 | ||
43 | #define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08 | ||
44 | #define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09 | ||
39 | 45 | ||
40 | /** | 46 | /** |
41 | * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types | 47 | * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types |
@@ -68,6 +74,10 @@ struct fw_cdev_event_common { | |||
68 | * This event is sent when the bus the device belongs to goes through a bus | 74 | * This event is sent when the bus the device belongs to goes through a bus |
69 | * reset. It provides information about the new bus configuration, such as | 75 | * reset. It provides information about the new bus configuration, such as |
70 | * new node ID for this device, new root ID, and others. | 76 | * new node ID for this device, new root ID, and others. |
77 | * | ||
78 | * If @bm_node_id is 0xffff right after bus reset it can be reread by an | ||
79 | * %FW_CDEV_IOC_GET_INFO ioctl after bus manager selection was finished. | ||
80 | * Kernels with ABI version < 4 do not set @bm_node_id. | ||
71 | */ | 81 | */ |
72 | struct fw_cdev_event_bus_reset { | 82 | struct fw_cdev_event_bus_reset { |
73 | __u64 closure; | 83 | __u64 closure; |
@@ -82,8 +92,9 @@ struct fw_cdev_event_bus_reset { | |||
82 | 92 | ||
83 | /** | 93 | /** |
84 | * struct fw_cdev_event_response - Sent when a response packet was received | 94 | * struct fw_cdev_event_response - Sent when a response packet was received |
85 | * @closure: See &fw_cdev_event_common; | 95 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST |
86 | * set by %FW_CDEV_IOC_SEND_REQUEST ioctl | 96 | * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST |
97 | * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl | ||
87 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE | 98 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE |
88 | * @rcode: Response code returned by the remote node | 99 | * @rcode: Response code returned by the remote node |
89 | * @length: Data length, i.e. the response's payload size in bytes | 100 | * @length: Data length, i.e. the response's payload size in bytes |
@@ -93,6 +104,11 @@ struct fw_cdev_event_bus_reset { | |||
93 | * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses | 104 | * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses |
94 | * carrying data (read and lock responses) follows immediately and can be | 105 | * carrying data (read and lock responses) follows immediately and can be |
95 | * accessed through the @data field. | 106 | * accessed through the @data field. |
107 | * | ||
108 | * The event is also generated after conclusions of transactions that do not | ||
109 | * involve response packets. This includes unified write transactions, | ||
110 | * broadcast write transactions, and transmission of asynchronous stream | ||
111 | * packets. @rcode indicates success or failure of such transmissions. | ||
96 | */ | 112 | */ |
97 | struct fw_cdev_event_response { | 113 | struct fw_cdev_event_response { |
98 | __u64 closure; | 114 | __u64 closure; |
@@ -103,11 +119,46 @@ struct fw_cdev_event_response { | |||
103 | }; | 119 | }; |
104 | 120 | ||
105 | /** | 121 | /** |
106 | * struct fw_cdev_event_request - Sent on incoming request to an address region | 122 | * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2 |
107 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl | 123 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl |
108 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST | 124 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST |
125 | * @tcode: See &fw_cdev_event_request2 | ||
126 | * @offset: See &fw_cdev_event_request2 | ||
127 | * @handle: See &fw_cdev_event_request2 | ||
128 | * @length: See &fw_cdev_event_request2 | ||
129 | * @data: See &fw_cdev_event_request2 | ||
130 | * | ||
131 | * This event is sent instead of &fw_cdev_event_request2 if the kernel or | ||
132 | * the client implements ABI version <= 3. | ||
133 | * | ||
134 | * Unlike &fw_cdev_event_request2, the sender identity cannot be established, | ||
135 | * broadcast write requests cannot be distinguished from unicast writes, and | ||
136 | * @tcode of lock requests is %TCODE_LOCK_REQUEST. | ||
137 | * | ||
138 | * Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as | ||
139 | * with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send | ||
140 | * the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl. | ||
141 | */ | ||
142 | struct fw_cdev_event_request { | ||
143 | __u64 closure; | ||
144 | __u32 type; | ||
145 | __u32 tcode; | ||
146 | __u64 offset; | ||
147 | __u32 handle; | ||
148 | __u32 length; | ||
149 | __u32 data[0]; | ||
150 | }; | ||
151 | |||
152 | /** | ||
153 | * struct fw_cdev_event_request2 - Sent on incoming request to an address region | ||
154 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl | ||
155 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2 | ||
109 | * @tcode: Transaction code of the incoming request | 156 | * @tcode: Transaction code of the incoming request |
110 | * @offset: The offset into the 48-bit per-node address space | 157 | * @offset: The offset into the 48-bit per-node address space |
158 | * @source_node_id: Sender node ID | ||
159 | * @destination_node_id: Destination node ID | ||
160 | * @card: The index of the card from which the request came | ||
161 | * @generation: Bus generation in which the request is valid | ||
111 | * @handle: Reference to the kernel-side pending request | 162 | * @handle: Reference to the kernel-side pending request |
112 | * @length: Data length, i.e. the request's payload size in bytes | 163 | * @length: Data length, i.e. the request's payload size in bytes |
113 | * @data: Incoming data, if any | 164 | * @data: Incoming data, if any |
@@ -120,12 +171,42 @@ struct fw_cdev_event_response { | |||
120 | * | 171 | * |
121 | * The payload data for requests carrying data (write and lock requests) | 172 | * The payload data for requests carrying data (write and lock requests) |
122 | * follows immediately and can be accessed through the @data field. | 173 | * follows immediately and can be accessed through the @data field. |
174 | * | ||
175 | * Unlike &fw_cdev_event_request, @tcode of lock requests is one of the | ||
176 | * firewire-core specific %TCODE_LOCK_MASK_SWAP...%TCODE_LOCK_VENDOR_DEPENDENT, | ||
177 | * i.e. encodes the extended transaction code. | ||
178 | * | ||
179 | * @card may differ from &fw_cdev_get_info.card because requests are received | ||
180 | * from all cards of the Linux host. @source_node_id, @destination_node_id, and | ||
181 | * @generation pertain to that card. Destination node ID and bus generation may | ||
182 | * therefore differ from the corresponding fields of the last | ||
183 | * &fw_cdev_event_bus_reset. | ||
184 | * | ||
185 | * @destination_node_id may also differ from the current node ID because of a | ||
186 | * non-local bus ID part or in case of a broadcast write request. Note, a | ||
187 | * client must call an %FW_CDEV_IOC_SEND_RESPONSE ioctl even in case of a | ||
188 | * broadcast write request; the kernel will then release the kernel-side pending | ||
189 | * request but will not actually send a response packet. | ||
190 | * | ||
191 | * In case of a write request to FCP_REQUEST or FCP_RESPONSE, the kernel already | ||
192 | * sent a write response immediately after the request was received; in this | ||
193 | * case the client must still call an %FW_CDEV_IOC_SEND_RESPONSE ioctl to | ||
194 | * release the kernel-side pending request, though another response won't be | ||
195 | * sent. | ||
196 | * | ||
197 | * If the client subsequently needs to initiate requests to the sender node of | ||
198 | * an &fw_cdev_event_request2, it needs to use a device file with matching | ||
199 | * card index, node ID, and generation for outbound requests. | ||
123 | */ | 200 | */ |
124 | struct fw_cdev_event_request { | 201 | struct fw_cdev_event_request2 { |
125 | __u64 closure; | 202 | __u64 closure; |
126 | __u32 type; | 203 | __u32 type; |
127 | __u32 tcode; | 204 | __u32 tcode; |
128 | __u64 offset; | 205 | __u64 offset; |
206 | __u32 source_node_id; | ||
207 | __u32 destination_node_id; | ||
208 | __u32 card; | ||
209 | __u32 generation; | ||
129 | __u32 handle; | 210 | __u32 handle; |
130 | __u32 length; | 211 | __u32 length; |
131 | __u32 data[0]; | 212 | __u32 data[0]; |
@@ -141,26 +222,43 @@ struct fw_cdev_event_request { | |||
141 | * @header: Stripped headers, if any | 222 | * @header: Stripped headers, if any |
142 | * | 223 | * |
143 | * This event is sent when the controller has completed an &fw_cdev_iso_packet | 224 | * This event is sent when the controller has completed an &fw_cdev_iso_packet |
144 | * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers | 225 | * with the %FW_CDEV_ISO_INTERRUPT bit set. |
145 | * stripped of all packets up until and including the interrupt packet are | ||
146 | * returned in the @header field. The amount of header data per packet is as | ||
147 | * specified at iso context creation by &fw_cdev_create_iso_context.header_size. | ||
148 | * | 226 | * |
149 | * In version 1 of this ABI, header data consisted of the 1394 isochronous | 227 | * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): |
150 | * packet header, followed by quadlets from the packet payload if | ||
151 | * &fw_cdev_create_iso_context.header_size > 4. | ||
152 | * | 228 | * |
153 | * In version 2 of this ABI, header data consist of the 1394 isochronous | 229 | * In version 3 and some implementations of version 2 of the ABI, &header_length |
154 | * packet header, followed by a timestamp quadlet if | 230 | * is a multiple of 4 and &header contains timestamps of all packets up until |
155 | * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the | 231 | * the interrupt packet. The format of the timestamps is as described below for |
156 | * packet payload if &fw_cdev_create_iso_context.header_size > 8. | 232 | * isochronous reception. In version 1 of the ABI, &header_length was 0. |
157 | * | 233 | * |
158 | * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. | 234 | * Isochronous receive events (context type %FW_CDEV_ISO_CONTEXT_RECEIVE): |
235 | * | ||
236 | * The headers stripped of all packets up until and including the interrupt | ||
237 | * packet are returned in the @header field. The amount of header data per | ||
238 | * packet is as specified at iso context creation by | ||
239 | * &fw_cdev_create_iso_context.header_size. | ||
240 | * | ||
241 | * Hence, _interrupt.header_length / _context.header_size is the number of | ||
242 | * packets received in this interrupt event. The client can now iterate | ||
243 | * through the mmap()'ed DMA buffer according to this number of packets and | ||
244 | * to the buffer sizes as the client specified in &fw_cdev_queue_iso. | ||
245 | * | ||
246 | * Since version 2 of this ABI, the portion for each packet in _interrupt.header | ||
247 | * consists of the 1394 isochronous packet header, followed by a timestamp | ||
248 | * quadlet if &fw_cdev_create_iso_context.header_size > 4, followed by quadlets | ||
249 | * from the packet payload if &fw_cdev_create_iso_context.header_size > 8. | ||
159 | * | 250 | * |
160 | * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, | 251 | * Format of 1394 iso packet header: 16 bits data_length, 2 bits tag, 6 bits |
161 | * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: | 252 | * channel, 4 bits tcode, 4 bits sy, in big endian byte order. |
162 | * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte | 253 | * data_length is the actual received size of the packet without the four |
163 | * order. | 254 | * 1394 iso packet header bytes. |
255 | * | ||
256 | * Format of timestamp: 16 bits invalid, 3 bits cycleSeconds, 13 bits | ||
257 | * cycleCount, in big endian byte order. | ||
258 | * | ||
259 | * In version 1 of the ABI, no timestamp quadlet was inserted; instead, payload | ||
260 | * data followed directly after the 1394 is header if header_size > 4. | ||
261 | * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. | ||
164 | */ | 262 | */ |
165 | struct fw_cdev_event_iso_interrupt { | 263 | struct fw_cdev_event_iso_interrupt { |
166 | __u64 closure; | 264 | __u64 closure; |
@@ -171,6 +269,43 @@ struct fw_cdev_event_iso_interrupt { | |||
171 | }; | 269 | }; |
172 | 270 | ||
173 | /** | 271 | /** |
272 | * struct fw_cdev_event_iso_interrupt_mc - An iso buffer chunk was completed | ||
273 | * @closure: See &fw_cdev_event_common; | ||
274 | * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl | ||
275 | * @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL | ||
276 | * @completed: Offset into the receive buffer; data before this offest is valid | ||
277 | * | ||
278 | * This event is sent in multichannel contexts (context type | ||
279 | * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer | ||
280 | * chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens | ||
281 | * when a packet is completed and/or when a buffer chunk is completed depends | ||
282 | * on the hardware implementation. | ||
283 | * | ||
284 | * The buffer is continuously filled with the following data, per packet: | ||
285 | * - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt, | ||
286 | * but in little endian byte order, | ||
287 | * - packet payload (as many bytes as specified in the data_length field of | ||
288 | * the 1394 iso packet header) in big endian byte order, | ||
289 | * - 0...3 padding bytes as needed to align the following trailer quadlet, | ||
290 | * - trailer quadlet, containing the reception timestamp as described at | ||
291 | * &fw_cdev_event_iso_interrupt, but in little endian byte order. | ||
292 | * | ||
293 | * Hence the per-packet size is data_length (rounded up to a multiple of 4) + 8. | ||
294 | * When processing the data, stop before a packet that would cross the | ||
295 | * @completed offset. | ||
296 | * | ||
297 | * A packet near the end of a buffer chunk will typically spill over into the | ||
298 | * next queued buffer chunk. It is the responsibility of the client to check | ||
299 | * for this condition, assemble a broken-up packet from its parts, and not to | ||
300 | * re-queue any buffer chunks in which as yet unread packet parts reside. | ||
301 | */ | ||
302 | struct fw_cdev_event_iso_interrupt_mc { | ||
303 | __u64 closure; | ||
304 | __u32 type; | ||
305 | __u32 completed; | ||
306 | }; | ||
307 | |||
308 | /** | ||
174 | * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed | 309 | * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed |
175 | * @closure: See &fw_cdev_event_common; | 310 | * @closure: See &fw_cdev_event_common; |
176 | * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl | 311 | * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl |
@@ -200,15 +335,45 @@ struct fw_cdev_event_iso_resource { | |||
200 | }; | 335 | }; |
201 | 336 | ||
202 | /** | 337 | /** |
338 | * struct fw_cdev_event_phy_packet - A PHY packet was transmitted or received | ||
339 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET | ||
340 | * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl | ||
341 | * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED | ||
342 | * @rcode: %RCODE_..., indicates success or failure of transmission | ||
343 | * @length: Data length in bytes | ||
344 | * @data: Incoming data | ||
345 | * | ||
346 | * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty, | ||
347 | * except in case of a ping packet: Then, @length is 4, and @data[0] is the | ||
348 | * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE. | ||
349 | * | ||
350 | * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data | ||
351 | * consists of the two PHY packet quadlets, in host byte order. | ||
352 | */ | ||
353 | struct fw_cdev_event_phy_packet { | ||
354 | __u64 closure; | ||
355 | __u32 type; | ||
356 | __u32 rcode; | ||
357 | __u32 length; | ||
358 | __u32 data[0]; | ||
359 | }; | ||
360 | |||
361 | /** | ||
203 | * union fw_cdev_event - Convenience union of fw_cdev_event_ types | 362 | * union fw_cdev_event - Convenience union of fw_cdev_event_ types |
204 | * @common: Valid for all types | 363 | * @common: Valid for all types |
205 | * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET | 364 | * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET |
206 | * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE | 365 | * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE |
207 | * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST | 366 | * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST |
208 | * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT | 367 | * @request2: Valid if @common.type == %FW_CDEV_EVENT_REQUEST2 |
209 | * @iso_resource: Valid if @common.type == | 368 | * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT |
369 | * @iso_interrupt_mc: Valid if @common.type == | ||
370 | * %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL | ||
371 | * @iso_resource: Valid if @common.type == | ||
210 | * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or | 372 | * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or |
211 | * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED | 373 | * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED |
374 | * @phy_packet: Valid if @common.type == | ||
375 | * %FW_CDEV_EVENT_PHY_PACKET_SENT or | ||
376 | * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED | ||
212 | * | 377 | * |
213 | * Convenience union for userspace use. Events could be read(2) into an | 378 | * Convenience union for userspace use. Events could be read(2) into an |
214 | * appropriately aligned char buffer and then cast to this union for further | 379 | * appropriately aligned char buffer and then cast to this union for further |
@@ -223,8 +388,11 @@ union fw_cdev_event { | |||
223 | struct fw_cdev_event_bus_reset bus_reset; | 388 | struct fw_cdev_event_bus_reset bus_reset; |
224 | struct fw_cdev_event_response response; | 389 | struct fw_cdev_event_response response; |
225 | struct fw_cdev_event_request request; | 390 | struct fw_cdev_event_request request; |
391 | struct fw_cdev_event_request2 request2; /* added in 2.6.36 */ | ||
226 | struct fw_cdev_event_iso_interrupt iso_interrupt; | 392 | struct fw_cdev_event_iso_interrupt iso_interrupt; |
227 | struct fw_cdev_event_iso_resource iso_resource; | 393 | struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */ |
394 | struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */ | ||
395 | struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */ | ||
228 | }; | 396 | }; |
229 | 397 | ||
230 | /* available since kernel version 2.6.22 */ | 398 | /* available since kernel version 2.6.22 */ |
@@ -256,23 +424,46 @@ union fw_cdev_event { | |||
256 | /* available since kernel version 2.6.34 */ | 424 | /* available since kernel version 2.6.34 */ |
257 | #define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) | 425 | #define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) |
258 | 426 | ||
427 | /* available since kernel version 2.6.36 */ | ||
428 | #define FW_CDEV_IOC_SEND_PHY_PACKET _IOWR('#', 0x15, struct fw_cdev_send_phy_packet) | ||
429 | #define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets) | ||
430 | #define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels) | ||
431 | |||
259 | /* | 432 | /* |
260 | * FW_CDEV_VERSION History | 433 | * ABI version history |
261 | * 1 (2.6.22) - initial version | 434 | * 1 (2.6.22) - initial version |
435 | * (2.6.24) - added %FW_CDEV_IOC_GET_CYCLE_TIMER | ||
262 | * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if | 436 | * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if |
263 | * &fw_cdev_create_iso_context.header_size is 8 or more | 437 | * &fw_cdev_create_iso_context.header_size is 8 or more |
438 | * - added %FW_CDEV_IOC_*_ISO_RESOURCE*, | ||
439 | * %FW_CDEV_IOC_GET_SPEED, %FW_CDEV_IOC_SEND_BROADCAST_REQUEST, | ||
440 | * %FW_CDEV_IOC_SEND_STREAM_PACKET | ||
264 | * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt | 441 | * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt |
265 | * (2.6.33) - IR has always packet-per-buffer semantics now, not one of | 442 | * (2.6.33) - IR has always packet-per-buffer semantics now, not one of |
266 | * dual-buffer or packet-per-buffer depending on hardware | 443 | * dual-buffer or packet-per-buffer depending on hardware |
444 | * - shared use and auto-response for FCP registers | ||
267 | * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable | 445 | * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable |
446 | * - added %FW_CDEV_IOC_GET_CYCLE_TIMER2 | ||
447 | * 4 (2.6.36) - added %FW_CDEV_EVENT_REQUEST2, %FW_CDEV_EVENT_PHY_PACKET_*, | ||
448 | * and &fw_cdev_allocate.region_end | ||
449 | * - implemented &fw_cdev_event_bus_reset.bm_node_id | ||
450 | * - added %FW_CDEV_IOC_SEND_PHY_PACKET, _RECEIVE_PHY_PACKETS | ||
451 | * - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL, | ||
452 | * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and | ||
453 | * %FW_CDEV_IOC_SET_ISO_CHANNELS | ||
268 | */ | 454 | */ |
269 | #define FW_CDEV_VERSION 3 | 455 | #define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */ |
270 | 456 | ||
271 | /** | 457 | /** |
272 | * struct fw_cdev_get_info - General purpose information ioctl | 458 | * struct fw_cdev_get_info - General purpose information ioctl |
273 | * @version: The version field is just a running serial number. | 459 | * @version: The version field is just a running serial number. Both an |
274 | * We never break backwards compatibility, but may add more | 460 | * input parameter (ABI version implemented by the client) and |
275 | * structs and ioctls in later revisions. | 461 | * output parameter (ABI version implemented by the kernel). |
462 | * A client must not fill in an %FW_CDEV_VERSION defined from an | ||
463 | * included kernel header file but the actual version for which | ||
464 | * the client was implemented. This is necessary for forward | ||
465 | * compatibility. We never break backwards compatibility, but | ||
466 | * may add more structs, events, and ioctls in later revisions. | ||
276 | * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration | 467 | * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration |
277 | * ROM will be copied into that user space address. In either | 468 | * ROM will be copied into that user space address. In either |
278 | * case, @rom_length is updated with the actual length of the | 469 | * case, @rom_length is updated with the actual length of the |
@@ -339,28 +530,48 @@ struct fw_cdev_send_response { | |||
339 | }; | 530 | }; |
340 | 531 | ||
341 | /** | 532 | /** |
342 | * struct fw_cdev_allocate - Allocate a CSR address range | 533 | * struct fw_cdev_allocate - Allocate a CSR in an address range |
343 | * @offset: Start offset of the address range | 534 | * @offset: Start offset of the address range |
344 | * @closure: To be passed back to userspace in request events | 535 | * @closure: To be passed back to userspace in request events |
345 | * @length: Length of the address range, in bytes | 536 | * @length: Length of the CSR, in bytes |
346 | * @handle: Handle to the allocation, written by the kernel | 537 | * @handle: Handle to the allocation, written by the kernel |
538 | * @region_end: First address above the address range (added in ABI v4, 2.6.36) | ||
347 | * | 539 | * |
348 | * Allocate an address range in the 48-bit address space on the local node | 540 | * Allocate an address range in the 48-bit address space on the local node |
349 | * (the controller). This allows userspace to listen for requests with an | 541 | * (the controller). This allows userspace to listen for requests with an |
350 | * offset within that address range. When the kernel receives a request | 542 | * offset within that address range. Every time when the kernel receives a |
351 | * within the range, an &fw_cdev_event_request event will be written back. | 543 | * request within the range, an &fw_cdev_event_request2 event will be emitted. |
352 | * The @closure field is passed back to userspace in the response event. | 544 | * (If the kernel or the client implements ABI version <= 3, an |
545 | * &fw_cdev_event_request will be generated instead.) | ||
546 | * | ||
547 | * The @closure field is passed back to userspace in these request events. | ||
353 | * The @handle field is an out parameter, returning a handle to the allocated | 548 | * The @handle field is an out parameter, returning a handle to the allocated |
354 | * range to be used for later deallocation of the range. | 549 | * range to be used for later deallocation of the range. |
355 | * | 550 | * |
356 | * The address range is allocated on all local nodes. The address allocation | 551 | * The address range is allocated on all local nodes. The address allocation |
357 | * is exclusive except for the FCP command and response registers. | 552 | * is exclusive except for the FCP command and response registers. If an |
553 | * exclusive address region is already in use, the ioctl fails with errno set | ||
554 | * to %EBUSY. | ||
555 | * | ||
556 | * If kernel and client implement ABI version >= 4, the kernel looks up a free | ||
557 | * spot of size @length inside [@offset..@region_end) and, if found, writes | ||
558 | * the start address of the new CSR back in @offset. I.e. @offset is an | ||
559 | * in and out parameter. If this automatic placement of a CSR in a bigger | ||
560 | * address range is not desired, the client simply needs to set @region_end | ||
561 | * = @offset + @length. | ||
562 | * | ||
563 | * If the kernel or the client implements ABI version <= 3, @region_end is | ||
564 | * ignored and effectively assumed to be @offset + @length. | ||
565 | * | ||
566 | * @region_end is only present in a kernel header >= 2.6.36. If necessary, | ||
567 | * this can for example be tested by #ifdef FW_CDEV_EVENT_REQUEST2. | ||
358 | */ | 568 | */ |
359 | struct fw_cdev_allocate { | 569 | struct fw_cdev_allocate { |
360 | __u64 offset; | 570 | __u64 offset; |
361 | __u64 closure; | 571 | __u64 closure; |
362 | __u32 length; | 572 | __u32 length; |
363 | __u32 handle; | 573 | __u32 handle; |
574 | __u64 region_end; /* available since kernel version 2.6.36 */ | ||
364 | }; | 575 | }; |
365 | 576 | ||
366 | /** | 577 | /** |
@@ -382,9 +593,14 @@ struct fw_cdev_deallocate { | |||
382 | * Initiate a bus reset for the bus this device is on. The bus reset can be | 593 | * Initiate a bus reset for the bus this device is on. The bus reset can be |
383 | * either the original (long) bus reset or the arbitrated (short) bus reset | 594 | * either the original (long) bus reset or the arbitrated (short) bus reset |
384 | * introduced in 1394a-2000. | 595 | * introduced in 1394a-2000. |
596 | * | ||
597 | * The ioctl returns immediately. A subsequent &fw_cdev_event_bus_reset | ||
598 | * indicates when the reset actually happened. Since ABI v4, this may be | ||
599 | * considerably later than the ioctl because the kernel ensures a grace period | ||
600 | * between subsequent bus resets as per IEEE 1394 bus management specification. | ||
385 | */ | 601 | */ |
386 | struct fw_cdev_initiate_bus_reset { | 602 | struct fw_cdev_initiate_bus_reset { |
387 | __u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */ | 603 | __u32 type; |
388 | }; | 604 | }; |
389 | 605 | ||
390 | /** | 606 | /** |
@@ -408,9 +624,10 @@ struct fw_cdev_initiate_bus_reset { | |||
408 | * | 624 | * |
409 | * @immediate, @key, and @data array elements are CPU-endian quadlets. | 625 | * @immediate, @key, and @data array elements are CPU-endian quadlets. |
410 | * | 626 | * |
411 | * If successful, the kernel adds the descriptor and writes back a handle to the | 627 | * If successful, the kernel adds the descriptor and writes back a @handle to |
412 | * kernel-side object to be used for later removal of the descriptor block and | 628 | * the kernel-side object to be used for later removal of the descriptor block |
413 | * immediate key. | 629 | * and immediate key. The kernel will also generate a bus reset to signal the |
630 | * change of the configuration ROM to other nodes. | ||
414 | * | 631 | * |
415 | * This ioctl affects the configuration ROMs of all local nodes. | 632 | * This ioctl affects the configuration ROMs of all local nodes. |
416 | * The ioctl only succeeds on device files which represent a local node. | 633 | * The ioctl only succeeds on device files which represent a local node. |
@@ -429,38 +646,50 @@ struct fw_cdev_add_descriptor { | |||
429 | * descriptor was added | 646 | * descriptor was added |
430 | * | 647 | * |
431 | * Remove a descriptor block and accompanying immediate key from the local | 648 | * Remove a descriptor block and accompanying immediate key from the local |
432 | * nodes' configuration ROMs. | 649 | * nodes' configuration ROMs. The kernel will also generate a bus reset to |
650 | * signal the change of the configuration ROM to other nodes. | ||
433 | */ | 651 | */ |
434 | struct fw_cdev_remove_descriptor { | 652 | struct fw_cdev_remove_descriptor { |
435 | __u32 handle; | 653 | __u32 handle; |
436 | }; | 654 | }; |
437 | 655 | ||
438 | #define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 | 656 | #define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 |
439 | #define FW_CDEV_ISO_CONTEXT_RECEIVE 1 | 657 | #define FW_CDEV_ISO_CONTEXT_RECEIVE 1 |
658 | #define FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 /* added in 2.6.36 */ | ||
440 | 659 | ||
441 | /** | 660 | /** |
442 | * struct fw_cdev_create_iso_context - Create a context for isochronous IO | 661 | * struct fw_cdev_create_iso_context - Create a context for isochronous I/O |
443 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE | 662 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE or |
444 | * @header_size: Header size to strip for receive contexts | 663 | * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL |
445 | * @channel: Channel to bind to | 664 | * @header_size: Header size to strip in single-channel reception |
446 | * @speed: Speed for transmit contexts | 665 | * @channel: Channel to bind to in single-channel reception or transmission |
447 | * @closure: To be returned in &fw_cdev_event_iso_interrupt | 666 | * @speed: Transmission speed |
667 | * @closure: To be returned in &fw_cdev_event_iso_interrupt or | ||
668 | * &fw_cdev_event_iso_interrupt_multichannel | ||
448 | * @handle: Handle to context, written back by kernel | 669 | * @handle: Handle to context, written back by kernel |
449 | * | 670 | * |
450 | * Prior to sending or receiving isochronous I/O, a context must be created. | 671 | * Prior to sending or receiving isochronous I/O, a context must be created. |
451 | * The context records information about the transmit or receive configuration | 672 | * The context records information about the transmit or receive configuration |
452 | * and typically maps to an underlying hardware resource. A context is set up | 673 | * and typically maps to an underlying hardware resource. A context is set up |
453 | * for either sending or receiving. It is bound to a specific isochronous | 674 | * for either sending or receiving. It is bound to a specific isochronous |
454 | * channel. | 675 | * @channel. |
676 | * | ||
677 | * In case of multichannel reception, @header_size and @channel are ignored | ||
678 | * and the channels are selected by %FW_CDEV_IOC_SET_ISO_CHANNELS. | ||
679 | * | ||
680 | * For %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, @header_size must be at least 4 | ||
681 | * and must be a multiple of 4. It is ignored in other context types. | ||
682 | * | ||
683 | * @speed is ignored in receive context types. | ||
455 | * | 684 | * |
456 | * If a context was successfully created, the kernel writes back a handle to the | 685 | * If a context was successfully created, the kernel writes back a handle to the |
457 | * context, which must be passed in for subsequent operations on that context. | 686 | * context, which must be passed in for subsequent operations on that context. |
458 | * | 687 | * |
459 | * For receive contexts, @header_size must be at least 4 and must be a multiple | 688 | * Limitations: |
460 | * of 4. | 689 | * No more than one iso context can be created per fd. |
461 | * | 690 | * The total number of contexts that all userspace and kernelspace drivers can |
462 | * Note that the effect of a @header_size > 4 depends on | 691 | * create on a card at a time is a hardware limit, typically 4 or 8 contexts per |
463 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. | 692 | * direction, and of them at most one multichannel receive context. |
464 | */ | 693 | */ |
465 | struct fw_cdev_create_iso_context { | 694 | struct fw_cdev_create_iso_context { |
466 | __u32 type; | 695 | __u32 type; |
@@ -471,6 +700,22 @@ struct fw_cdev_create_iso_context { | |||
471 | __u32 handle; | 700 | __u32 handle; |
472 | }; | 701 | }; |
473 | 702 | ||
703 | /** | ||
704 | * struct fw_cdev_set_iso_channels - Select channels in multichannel reception | ||
705 | * @channels: Bitmask of channels to listen to | ||
706 | * @handle: Handle of the mutichannel receive context | ||
707 | * | ||
708 | * @channels is the bitwise or of 1ULL << n for each channel n to listen to. | ||
709 | * | ||
710 | * The ioctl fails with errno %EBUSY if there is already another receive context | ||
711 | * on a channel in @channels. In that case, the bitmask of all unoccupied | ||
712 | * channels is returned in @channels. | ||
713 | */ | ||
714 | struct fw_cdev_set_iso_channels { | ||
715 | __u64 channels; | ||
716 | __u32 handle; | ||
717 | }; | ||
718 | |||
474 | #define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v) | 719 | #define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v) |
475 | #define FW_CDEV_ISO_INTERRUPT (1 << 16) | 720 | #define FW_CDEV_ISO_INTERRUPT (1 << 16) |
476 | #define FW_CDEV_ISO_SKIP (1 << 17) | 721 | #define FW_CDEV_ISO_SKIP (1 << 17) |
@@ -481,42 +726,72 @@ struct fw_cdev_create_iso_context { | |||
481 | 726 | ||
482 | /** | 727 | /** |
483 | * struct fw_cdev_iso_packet - Isochronous packet | 728 | * struct fw_cdev_iso_packet - Isochronous packet |
484 | * @control: Contains the header length (8 uppermost bits), the sy field | 729 | * @control: Contains the header length (8 uppermost bits), |
485 | * (4 bits), the tag field (2 bits), a sync flag (1 bit), | 730 | * the sy field (4 bits), the tag field (2 bits), a sync flag |
486 | * a skip flag (1 bit), an interrupt flag (1 bit), and the | 731 | * or a skip flag (1 bit), an interrupt flag (1 bit), and the |
487 | * payload length (16 lowermost bits) | 732 | * payload length (16 lowermost bits) |
488 | * @header: Header and payload | 733 | * @header: Header and payload in case of a transmit context. |
489 | * | 734 | * |
490 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. | 735 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. |
491 | * | ||
492 | * Use the FW_CDEV_ISO_ macros to fill in @control. | 736 | * Use the FW_CDEV_ISO_ macros to fill in @control. |
737 | * The @header array is empty in case of receive contexts. | ||
738 | * | ||
739 | * Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT: | ||
740 | * | ||
741 | * @control.HEADER_LENGTH must be a multiple of 4. It specifies the numbers of | ||
742 | * bytes in @header that will be prepended to the packet's payload. These bytes | ||
743 | * are copied into the kernel and will not be accessed after the ioctl has | ||
744 | * returned. | ||
745 | * | ||
746 | * The @control.SY and TAG fields are copied to the iso packet header. These | ||
747 | * fields are specified by IEEE 1394a and IEC 61883-1. | ||
748 | * | ||
749 | * The @control.SKIP flag specifies that no packet is to be sent in a frame. | ||
750 | * When using this, all other fields except @control.INTERRUPT must be zero. | ||
751 | * | ||
752 | * When a packet with the @control.INTERRUPT flag set has been completed, an | ||
753 | * &fw_cdev_event_iso_interrupt event will be sent. | ||
754 | * | ||
755 | * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE: | ||
756 | * | ||
757 | * @control.HEADER_LENGTH must be a multiple of the context's header_size. | ||
758 | * If the HEADER_LENGTH is larger than the context's header_size, multiple | ||
759 | * packets are queued for this entry. | ||
760 | * | ||
761 | * The @control.SY and TAG fields are ignored. | ||
762 | * | ||
763 | * If the @control.SYNC flag is set, the context drops all packets until a | ||
764 | * packet with a sy field is received which matches &fw_cdev_start_iso.sync. | ||
765 | * | ||
766 | * @control.PAYLOAD_LENGTH defines how many payload bytes can be received for | ||
767 | * one packet (in addition to payload quadlets that have been defined as headers | ||
768 | * and are stripped and returned in the &fw_cdev_event_iso_interrupt structure). | ||
769 | * If more bytes are received, the additional bytes are dropped. If less bytes | ||
770 | * are received, the remaining bytes in this part of the payload buffer will not | ||
771 | * be written to, not even by the next packet. I.e., packets received in | ||
772 | * consecutive frames will not necessarily be consecutive in memory. If an | ||
773 | * entry has queued multiple packets, the PAYLOAD_LENGTH is divided equally | ||
774 | * among them. | ||
493 | * | 775 | * |
494 | * For transmit packets, the header length must be a multiple of 4 and specifies | 776 | * When a packet with the @control.INTERRUPT flag set has been completed, an |
495 | * the numbers of bytes in @header that will be prepended to the packet's | ||
496 | * payload; these bytes are copied into the kernel and will not be accessed | ||
497 | * after the ioctl has returned. The sy and tag fields are copied to the iso | ||
498 | * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). | ||
499 | * The skip flag specifies that no packet is to be sent in a frame; when using | ||
500 | * this, all other fields except the interrupt flag must be zero. | ||
501 | * | ||
502 | * For receive packets, the header length must be a multiple of the context's | ||
503 | * header size; if the header length is larger than the context's header size, | ||
504 | * multiple packets are queued for this entry. The sy and tag fields are | ||
505 | * ignored. If the sync flag is set, the context drops all packets until | ||
506 | * a packet with a matching sy field is received (the sync value to wait for is | ||
507 | * specified in the &fw_cdev_start_iso structure). The payload length defines | ||
508 | * how many payload bytes can be received for one packet (in addition to payload | ||
509 | * quadlets that have been defined as headers and are stripped and returned in | ||
510 | * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the | ||
511 | * additional bytes are dropped. If less bytes are received, the remaining | ||
512 | * bytes in this part of the payload buffer will not be written to, not even by | ||
513 | * the next packet, i.e., packets received in consecutive frames will not | ||
514 | * necessarily be consecutive in memory. If an entry has queued multiple | ||
515 | * packets, the payload length is divided equally among them. | ||
516 | * | ||
517 | * When a packet with the interrupt flag set has been completed, the | ||
518 | * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued | 777 | * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued |
519 | * multiple receive packets is completed when its last packet is completed. | 778 | * multiple receive packets is completed when its last packet is completed. |
779 | * | ||
780 | * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
781 | * | ||
782 | * Here, &fw_cdev_iso_packet would be more aptly named _iso_buffer_chunk since | ||
783 | * it specifies a chunk of the mmap()'ed buffer, while the number and alignment | ||
784 | * of packets to be placed into the buffer chunk is not known beforehand. | ||
785 | * | ||
786 | * @control.PAYLOAD_LENGTH is the size of the buffer chunk and specifies room | ||
787 | * for header, payload, padding, and trailer bytes of one or more packets. | ||
788 | * It must be a multiple of 4. | ||
789 | * | ||
790 | * @control.HEADER_LENGTH, TAG and SY are ignored. SYNC is treated as described | ||
791 | * for single-channel reception. | ||
792 | * | ||
793 | * When a buffer chunk with the @control.INTERRUPT flag set has been filled | ||
794 | * entirely, an &fw_cdev_event_iso_interrupt_mc event will be sent. | ||
520 | */ | 795 | */ |
521 | struct fw_cdev_iso_packet { | 796 | struct fw_cdev_iso_packet { |
522 | __u32 control; | 797 | __u32 control; |
@@ -525,9 +800,9 @@ struct fw_cdev_iso_packet { | |||
525 | 800 | ||
526 | /** | 801 | /** |
527 | * struct fw_cdev_queue_iso - Queue isochronous packets for I/O | 802 | * struct fw_cdev_queue_iso - Queue isochronous packets for I/O |
528 | * @packets: Userspace pointer to packet data | 803 | * @packets: Userspace pointer to an array of &fw_cdev_iso_packet |
529 | * @data: Pointer into mmap()'ed payload buffer | 804 | * @data: Pointer into mmap()'ed payload buffer |
530 | * @size: Size of packet data in bytes | 805 | * @size: Size of the @packets array, in bytes |
531 | * @handle: Isochronous context handle | 806 | * @handle: Isochronous context handle |
532 | * | 807 | * |
533 | * Queue a number of isochronous packets for reception or transmission. | 808 | * Queue a number of isochronous packets for reception or transmission. |
@@ -540,6 +815,9 @@ struct fw_cdev_iso_packet { | |||
540 | * The kernel may or may not queue all packets, but will write back updated | 815 | * The kernel may or may not queue all packets, but will write back updated |
541 | * values of the @packets, @data and @size fields, so the ioctl can be | 816 | * values of the @packets, @data and @size fields, so the ioctl can be |
542 | * resubmitted easily. | 817 | * resubmitted easily. |
818 | * | ||
819 | * In case of a multichannel receive context, @data must be quadlet-aligned | ||
820 | * relative to the buffer start. | ||
543 | */ | 821 | */ |
544 | struct fw_cdev_queue_iso { | 822 | struct fw_cdev_queue_iso { |
545 | __u64 packets; | 823 | __u64 packets; |
@@ -698,4 +976,39 @@ struct fw_cdev_send_stream_packet { | |||
698 | __u32 speed; | 976 | __u32 speed; |
699 | }; | 977 | }; |
700 | 978 | ||
979 | /** | ||
980 | * struct fw_cdev_send_phy_packet - send a PHY packet | ||
981 | * @closure: Passed back to userspace in the PHY-packet-sent event | ||
982 | * @data: First and second quadlet of the PHY packet | ||
983 | * @generation: The bus generation where packet is valid | ||
984 | * | ||
985 | * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes | ||
986 | * on the same card as this device. After transmission, an | ||
987 | * %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated. | ||
988 | * | ||
989 | * The payload @data[] shall be specified in host byte order. Usually, | ||
990 | * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets | ||
991 | * are an exception to this rule. | ||
992 | * | ||
993 | * The ioctl is only permitted on device files which represent a local node. | ||
994 | */ | ||
995 | struct fw_cdev_send_phy_packet { | ||
996 | __u64 closure; | ||
997 | __u32 data[2]; | ||
998 | __u32 generation; | ||
999 | }; | ||
1000 | |||
1001 | /** | ||
1002 | * struct fw_cdev_receive_phy_packets - start reception of PHY packets | ||
1003 | * @closure: Passed back to userspace in phy packet events | ||
1004 | * | ||
1005 | * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to | ||
1006 | * incoming PHY packets from any node on the same bus as the device. | ||
1007 | * | ||
1008 | * The ioctl is only permitted on device files which represent a local node. | ||
1009 | */ | ||
1010 | struct fw_cdev_receive_phy_packets { | ||
1011 | __u64 closure; | ||
1012 | }; | ||
1013 | |||
701 | #endif /* _LINUX_FIREWIRE_CDEV_H */ | 1014 | #endif /* _LINUX_FIREWIRE_CDEV_H */ |
diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 72e2b8ac2a5a..1cd637ef62d2 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h | |||
@@ -32,11 +32,13 @@ | |||
32 | #define CSR_CYCLE_TIME 0x200 | 32 | #define CSR_CYCLE_TIME 0x200 |
33 | #define CSR_BUS_TIME 0x204 | 33 | #define CSR_BUS_TIME 0x204 |
34 | #define CSR_BUSY_TIMEOUT 0x210 | 34 | #define CSR_BUSY_TIMEOUT 0x210 |
35 | #define CSR_PRIORITY_BUDGET 0x218 | ||
35 | #define CSR_BUS_MANAGER_ID 0x21c | 36 | #define CSR_BUS_MANAGER_ID 0x21c |
36 | #define CSR_BANDWIDTH_AVAILABLE 0x220 | 37 | #define CSR_BANDWIDTH_AVAILABLE 0x220 |
37 | #define CSR_CHANNELS_AVAILABLE 0x224 | 38 | #define CSR_CHANNELS_AVAILABLE 0x224 |
38 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 | 39 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 |
39 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 | 40 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 |
41 | #define CSR_MAINT_UTILITY 0x230 | ||
40 | #define CSR_BROADCAST_CHANNEL 0x234 | 42 | #define CSR_BROADCAST_CHANNEL 0x234 |
41 | #define CSR_CONFIG_ROM 0x400 | 43 | #define CSR_CONFIG_ROM 0x400 |
42 | #define CSR_CONFIG_ROM_END 0x800 | 44 | #define CSR_CONFIG_ROM_END 0x800 |
@@ -89,6 +91,11 @@ struct fw_card { | |||
89 | struct list_head transaction_list; | 91 | struct list_head transaction_list; |
90 | unsigned long reset_jiffies; | 92 | unsigned long reset_jiffies; |
91 | 93 | ||
94 | u32 split_timeout_hi; | ||
95 | u32 split_timeout_lo; | ||
96 | unsigned int split_timeout_cycles; | ||
97 | unsigned int split_timeout_jiffies; | ||
98 | |||
92 | unsigned long long guid; | 99 | unsigned long long guid; |
93 | unsigned max_receive; | 100 | unsigned max_receive; |
94 | int link_speed; | 101 | int link_speed; |
@@ -104,18 +111,28 @@ struct fw_card { | |||
104 | bool beta_repeaters_present; | 111 | bool beta_repeaters_present; |
105 | 112 | ||
106 | int index; | 113 | int index; |
107 | |||
108 | struct list_head link; | 114 | struct list_head link; |
109 | 115 | ||
110 | /* Work struct for BM duties. */ | 116 | struct list_head phy_receiver_list; |
111 | struct delayed_work work; | 117 | |
118 | struct delayed_work br_work; /* bus reset job */ | ||
119 | bool br_short; | ||
120 | |||
121 | struct delayed_work bm_work; /* bus manager job */ | ||
112 | int bm_retries; | 122 | int bm_retries; |
113 | int bm_generation; | 123 | int bm_generation; |
114 | __be32 bm_transaction_data[2]; | 124 | __be32 bm_transaction_data[2]; |
125 | int bm_node_id; | ||
126 | bool bm_abdicate; | ||
127 | |||
128 | bool priority_budget_implemented; /* controller feature */ | ||
129 | bool broadcast_channel_auto_allocated; /* controller feature */ | ||
115 | 130 | ||
116 | bool broadcast_channel_allocated; | 131 | bool broadcast_channel_allocated; |
117 | u32 broadcast_channel; | 132 | u32 broadcast_channel; |
118 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; | 133 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; |
134 | |||
135 | __be32 maint_utility_register; | ||
119 | }; | 136 | }; |
120 | 137 | ||
121 | struct fw_attribute_group { | 138 | struct fw_attribute_group { |
@@ -252,7 +269,7 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | |||
252 | typedef void (*fw_address_callback_t)(struct fw_card *card, | 269 | typedef void (*fw_address_callback_t)(struct fw_card *card, |
253 | struct fw_request *request, | 270 | struct fw_request *request, |
254 | int tcode, int destination, int source, | 271 | int tcode, int destination, int source, |
255 | int generation, int speed, | 272 | int generation, |
256 | unsigned long long offset, | 273 | unsigned long long offset, |
257 | void *data, size_t length, | 274 | void *data, size_t length, |
258 | void *callback_data); | 275 | void *callback_data); |
@@ -269,10 +286,10 @@ struct fw_packet { | |||
269 | u32 timestamp; | 286 | u32 timestamp; |
270 | 287 | ||
271 | /* | 288 | /* |
272 | * This callback is called when the packet transmission has | 289 | * This callback is called when the packet transmission has completed. |
273 | * completed; for successful transmission, the status code is | 290 | * For successful transmission, the status code is the ack received |
274 | * the ack received from the destination, otherwise it's a | 291 | * from the destination. Otherwise it is one of the juju-specific |
275 | * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. | 292 | * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. |
276 | * The callback can be called from tasklet context and thus | 293 | * The callback can be called from tasklet context and thus |
277 | * must never block. | 294 | * must never block. |
278 | */ | 295 | */ |
@@ -355,17 +372,19 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc); | |||
355 | * scatter-gather streaming (e.g. assembling video frame automatically). | 372 | * scatter-gather streaming (e.g. assembling video frame automatically). |
356 | */ | 373 | */ |
357 | struct fw_iso_packet { | 374 | struct fw_iso_packet { |
358 | u16 payload_length; /* Length of indirect payload. */ | 375 | u16 payload_length; /* Length of indirect payload */ |
359 | u32 interrupt:1; /* Generate interrupt on this packet */ | 376 | u32 interrupt:1; /* Generate interrupt on this packet */ |
360 | u32 skip:1; /* Set to not send packet at all. */ | 377 | u32 skip:1; /* tx: Set to not send packet at all */ |
361 | u32 tag:2; | 378 | /* rx: Sync bit, wait for matching sy */ |
362 | u32 sy:4; | 379 | u32 tag:2; /* tx: Tag in packet header */ |
363 | u32 header_length:8; /* Length of immediate header. */ | 380 | u32 sy:4; /* tx: Sy in packet header */ |
364 | u32 header[0]; | 381 | u32 header_length:8; /* Length of immediate header */ |
382 | u32 header[0]; /* tx: Top of 1394 isoch. data_block */ | ||
365 | }; | 383 | }; |
366 | 384 | ||
367 | #define FW_ISO_CONTEXT_TRANSMIT 0 | 385 | #define FW_ISO_CONTEXT_TRANSMIT 0 |
368 | #define FW_ISO_CONTEXT_RECEIVE 1 | 386 | #define FW_ISO_CONTEXT_RECEIVE 1 |
387 | #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 | ||
369 | 388 | ||
370 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 | 389 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 |
371 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 | 390 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 |
@@ -389,24 +408,31 @@ struct fw_iso_buffer { | |||
389 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 408 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
390 | int page_count, enum dma_data_direction direction); | 409 | int page_count, enum dma_data_direction direction); |
391 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | 410 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); |
411 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); | ||
392 | 412 | ||
393 | struct fw_iso_context; | 413 | struct fw_iso_context; |
394 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | 414 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, |
395 | u32 cycle, size_t header_length, | 415 | u32 cycle, size_t header_length, |
396 | void *header, void *data); | 416 | void *header, void *data); |
417 | typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, | ||
418 | dma_addr_t completed, void *data); | ||
397 | struct fw_iso_context { | 419 | struct fw_iso_context { |
398 | struct fw_card *card; | 420 | struct fw_card *card; |
399 | int type; | 421 | int type; |
400 | int channel; | 422 | int channel; |
401 | int speed; | 423 | int speed; |
402 | size_t header_size; | 424 | size_t header_size; |
403 | fw_iso_callback_t callback; | 425 | union { |
426 | fw_iso_callback_t sc; | ||
427 | fw_iso_mc_callback_t mc; | ||
428 | } callback; | ||
404 | void *callback_data; | 429 | void *callback_data; |
405 | }; | 430 | }; |
406 | 431 | ||
407 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 432 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
408 | int type, int channel, int speed, size_t header_size, | 433 | int type, int channel, int speed, size_t header_size, |
409 | fw_iso_callback_t callback, void *callback_data); | 434 | fw_iso_callback_t callback, void *callback_data); |
435 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); | ||
410 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 436 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
411 | struct fw_iso_packet *packet, | 437 | struct fw_iso_packet *packet, |
412 | struct fw_iso_buffer *buffer, | 438 | struct fw_iso_buffer *buffer, |
diff --git a/tools/firewire/Makefile b/tools/firewire/Makefile new file mode 100644 index 000000000000..81767adaae7d --- /dev/null +++ b/tools/firewire/Makefile | |||
@@ -0,0 +1,19 @@ | |||
1 | prefix = /usr | ||
2 | nosy-dump-version = 0.4 | ||
3 | |||
4 | CC = gcc | ||
5 | |||
6 | all : nosy-dump | ||
7 | |||
8 | nosy-dump : CFLAGS = -Wall -O2 -g | ||
9 | nosy-dump : CPPFLAGS = -DVERSION=\"$(nosy-dump-version)\" -I../../drivers/firewire | ||
10 | nosy-dump : LDFLAGS = -g | ||
11 | nosy-dump : LDLIBS = -lpopt | ||
12 | |||
13 | nosy-dump : nosy-dump.o decode-fcp.o | ||
14 | |||
15 | clean : | ||
16 | rm -rf *.o nosy-dump | ||
17 | |||
18 | install : | ||
19 | install nosy-dump $(prefix)/bin/nosy-dump | ||
diff --git a/tools/firewire/decode-fcp.c b/tools/firewire/decode-fcp.c new file mode 100644 index 000000000000..e41223b6a4c8 --- /dev/null +++ b/tools/firewire/decode-fcp.c | |||
@@ -0,0 +1,213 @@ | |||
1 | #include <linux/firewire-constants.h> | ||
2 | #include <stdio.h> | ||
3 | #include <stdlib.h> | ||
4 | |||
5 | #include "list.h" | ||
6 | #include "nosy-dump.h" | ||
7 | |||
8 | #define CSR_FCP_COMMAND 0xfffff0000b00ull | ||
9 | #define CSR_FCP_RESPONSE 0xfffff0000d00ull | ||
10 | |||
11 | static const char * const ctype_names[] = { | ||
12 | [0x0] = "control", [0x8] = "not implemented", | ||
13 | [0x1] = "status", [0x9] = "accepted", | ||
14 | [0x2] = "specific inquiry", [0xa] = "rejected", | ||
15 | [0x3] = "notify", [0xb] = "in transition", | ||
16 | [0x4] = "general inquiry", [0xc] = "stable", | ||
17 | [0x5] = "(reserved 0x05)", [0xd] = "changed", | ||
18 | [0x6] = "(reserved 0x06)", [0xe] = "(reserved 0x0e)", | ||
19 | [0x7] = "(reserved 0x07)", [0xf] = "interim", | ||
20 | }; | ||
21 | |||
22 | static const char * const subunit_type_names[] = { | ||
23 | [0x00] = "monitor", [0x10] = "(reserved 0x10)", | ||
24 | [0x01] = "audio", [0x11] = "(reserved 0x11)", | ||
25 | [0x02] = "printer", [0x12] = "(reserved 0x12)", | ||
26 | [0x03] = "disc", [0x13] = "(reserved 0x13)", | ||
27 | [0x04] = "tape recorder/player",[0x14] = "(reserved 0x14)", | ||
28 | [0x05] = "tuner", [0x15] = "(reserved 0x15)", | ||
29 | [0x06] = "ca", [0x16] = "(reserved 0x16)", | ||
30 | [0x07] = "camera", [0x17] = "(reserved 0x17)", | ||
31 | [0x08] = "(reserved 0x08)", [0x18] = "(reserved 0x18)", | ||
32 | [0x09] = "panel", [0x19] = "(reserved 0x19)", | ||
33 | [0x0a] = "bulletin board", [0x1a] = "(reserved 0x1a)", | ||
34 | [0x0b] = "camera storage", [0x1b] = "(reserved 0x1b)", | ||
35 | [0x0c] = "(reserved 0x0c)", [0x1c] = "vendor unique", | ||
36 | [0x0d] = "(reserved 0x0d)", [0x1d] = "all subunit types", | ||
37 | [0x0e] = "(reserved 0x0e)", [0x1e] = "subunit_type extended to next byte", | ||
38 | [0x0f] = "(reserved 0x0f)", [0x1f] = "unit", | ||
39 | }; | ||
40 | |||
41 | struct avc_enum { | ||
42 | int value; | ||
43 | const char *name; | ||
44 | }; | ||
45 | |||
46 | struct avc_field { | ||
47 | const char *name; /* Short name for field. */ | ||
48 | int offset; /* Location of field, specified in bits; */ | ||
49 | /* negative means from end of packet. */ | ||
50 | int width; /* Width of field, 0 means use data_length. */ | ||
51 | struct avc_enum *names; | ||
52 | }; | ||
53 | |||
54 | struct avc_opcode_info { | ||
55 | const char *name; | ||
56 | struct avc_field fields[8]; | ||
57 | }; | ||
58 | |||
59 | struct avc_enum power_field_names[] = { | ||
60 | { 0x70, "on" }, | ||
61 | { 0x60, "off" }, | ||
62 | { } | ||
63 | }; | ||
64 | |||
65 | static const struct avc_opcode_info opcode_info[256] = { | ||
66 | |||
67 | /* TA Document 1999026 */ | ||
68 | /* AV/C Digital Interface Command Set General Specification 4.0 */ | ||
69 | [0xb2] = { "power", { | ||
70 | { "state", 0, 8, power_field_names } | ||
71 | } | ||
72 | }, | ||
73 | [0x30] = { "unit info", { | ||
74 | { "foo", 0, 8 }, | ||
75 | { "unit_type", 8, 5 }, | ||
76 | { "unit", 13, 3 }, | ||
77 | { "company id", 16, 24 }, | ||
78 | } | ||
79 | }, | ||
80 | [0x31] = { "subunit info" }, | ||
81 | [0x01] = { "reserve" }, | ||
82 | [0xb0] = { "version" }, | ||
83 | [0x00] = { "vendor dependent" }, | ||
84 | [0x02] = { "plug info" }, | ||
85 | [0x12] = { "channel usage" }, | ||
86 | [0x24] = { "connect" }, | ||
87 | [0x20] = { "connect av" }, | ||
88 | [0x22] = { "connections" }, | ||
89 | [0x11] = { "digital input" }, | ||
90 | [0x10] = { "digital output" }, | ||
91 | [0x25] = { "disconnect" }, | ||
92 | [0x21] = { "disconnect av" }, | ||
93 | [0x19] = { "input plug signal format" }, | ||
94 | [0x18] = { "output plug signal format" }, | ||
95 | [0x1f] = { "general bus setup" }, | ||
96 | |||
97 | /* TA Document 1999025 */ | ||
98 | /* AV/C Descriptor Mechanism Specification Version 1.0 */ | ||
99 | [0x0c] = { "create descriptor" }, | ||
100 | [0x08] = { "open descriptor" }, | ||
101 | [0x09] = { "read descriptor" }, | ||
102 | [0x0a] = { "write descriptor" }, | ||
103 | [0x05] = { "open info block" }, | ||
104 | [0x06] = { "read info block" }, | ||
105 | [0x07] = { "write info block" }, | ||
106 | [0x0b] = { "search descriptor" }, | ||
107 | [0x0d] = { "object number select" }, | ||
108 | |||
109 | /* TA Document 1999015 */ | ||
110 | /* AV/C Command Set for Rate Control of Isochronous Data Flow 1.0 */ | ||
111 | [0xb3] = { "rate", { | ||
112 | { "subfunction", 0, 8 }, | ||
113 | { "result", 8, 8 }, | ||
114 | { "plug_type", 16, 8 }, | ||
115 | { "plug_id", 16, 8 }, | ||
116 | } | ||
117 | }, | ||
118 | |||
119 | /* TA Document 1999008 */ | ||
120 | /* AV/C Audio Subunit Specification 1.0 */ | ||
121 | [0xb8] = { "function block" }, | ||
122 | |||
123 | /* TA Document 2001001 */ | ||
124 | /* AV/C Panel Subunit Specification 1.1 */ | ||
125 | [0x7d] = { "gui update" }, | ||
126 | [0x7e] = { "push gui data" }, | ||
127 | [0x7f] = { "user action" }, | ||
128 | [0x7c] = { "pass through" }, | ||
129 | |||
130 | /* */ | ||
131 | [0x26] = { "asynchronous connection" }, | ||
132 | }; | ||
133 | |||
134 | struct avc_frame { | ||
135 | uint32_t operand0:8; | ||
136 | uint32_t opcode:8; | ||
137 | uint32_t subunit_id:3; | ||
138 | uint32_t subunit_type:5; | ||
139 | uint32_t ctype:4; | ||
140 | uint32_t cts:4; | ||
141 | }; | ||
142 | |||
143 | static void | ||
144 | decode_avc(struct link_transaction *t) | ||
145 | { | ||
146 | struct avc_frame *frame = | ||
147 | (struct avc_frame *) t->request->packet.write_block.data; | ||
148 | const struct avc_opcode_info *info; | ||
149 | const char *name; | ||
150 | char buffer[32]; | ||
151 | int i; | ||
152 | |||
153 | info = &opcode_info[frame->opcode]; | ||
154 | if (info->name == NULL) { | ||
155 | snprintf(buffer, sizeof(buffer), | ||
156 | "(unknown opcode 0x%02x)", frame->opcode); | ||
157 | name = buffer; | ||
158 | } else { | ||
159 | name = info->name; | ||
160 | } | ||
161 | |||
162 | printf("av/c %s, subunit_type=%s, subunit_id=%d, opcode=%s", | ||
163 | ctype_names[frame->ctype], subunit_type_names[frame->subunit_type], | ||
164 | frame->subunit_id, name); | ||
165 | |||
166 | for (i = 0; info->fields[i].name != NULL; i++) | ||
167 | printf(", %s", info->fields[i].name); | ||
168 | |||
169 | printf("\n"); | ||
170 | } | ||
171 | |||
172 | int | ||
173 | decode_fcp(struct link_transaction *t) | ||
174 | { | ||
175 | struct avc_frame *frame = | ||
176 | (struct avc_frame *) t->request->packet.write_block.data; | ||
177 | unsigned long long offset = | ||
178 | ((unsigned long long) t->request->packet.common.offset_high << 32) | | ||
179 | t->request->packet.common.offset_low; | ||
180 | |||
181 | if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST) | ||
182 | return 0; | ||
183 | |||
184 | if (offset == CSR_FCP_COMMAND || offset == CSR_FCP_RESPONSE) { | ||
185 | switch (frame->cts) { | ||
186 | case 0x00: | ||
187 | decode_avc(t); | ||
188 | break; | ||
189 | case 0x01: | ||
190 | printf("cal fcp frame (cts=0x01)\n"); | ||
191 | break; | ||
192 | case 0x02: | ||
193 | printf("ehs fcp frame (cts=0x02)\n"); | ||
194 | break; | ||
195 | case 0x03: | ||
196 | printf("havi fcp frame (cts=0x03)\n"); | ||
197 | break; | ||
198 | case 0x0e: | ||
199 | printf("vendor specific fcp frame (cts=0x0e)\n"); | ||
200 | break; | ||
201 | case 0x0f: | ||
202 | printf("extended cts\n"); | ||
203 | break; | ||
204 | default: | ||
205 | printf("reserved fcp frame (ctx=0x%02x)\n", frame->cts); | ||
206 | break; | ||
207 | } | ||
208 | return 1; | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
diff --git a/tools/firewire/list.h b/tools/firewire/list.h new file mode 100644 index 000000000000..41f4bdadf634 --- /dev/null +++ b/tools/firewire/list.h | |||
@@ -0,0 +1,62 @@ | |||
1 | struct list { | ||
2 | struct list *next, *prev; | ||
3 | }; | ||
4 | |||
5 | static inline void | ||
6 | list_init(struct list *list) | ||
7 | { | ||
8 | list->next = list; | ||
9 | list->prev = list; | ||
10 | } | ||
11 | |||
12 | static inline int | ||
13 | list_empty(struct list *list) | ||
14 | { | ||
15 | return list->next == list; | ||
16 | } | ||
17 | |||
18 | static inline void | ||
19 | list_insert(struct list *link, struct list *new_link) | ||
20 | { | ||
21 | new_link->prev = link->prev; | ||
22 | new_link->next = link; | ||
23 | new_link->prev->next = new_link; | ||
24 | new_link->next->prev = new_link; | ||
25 | } | ||
26 | |||
27 | static inline void | ||
28 | list_append(struct list *list, struct list *new_link) | ||
29 | { | ||
30 | list_insert((struct list *)list, new_link); | ||
31 | } | ||
32 | |||
33 | static inline void | ||
34 | list_prepend(struct list *list, struct list *new_link) | ||
35 | { | ||
36 | list_insert(list->next, new_link); | ||
37 | } | ||
38 | |||
39 | static inline void | ||
40 | list_remove(struct list *link) | ||
41 | { | ||
42 | link->prev->next = link->next; | ||
43 | link->next->prev = link->prev; | ||
44 | } | ||
45 | |||
46 | #define list_entry(link, type, member) \ | ||
47 | ((type *)((char *)(link)-(unsigned long)(&((type *)0)->member))) | ||
48 | |||
49 | #define list_head(list, type, member) \ | ||
50 | list_entry((list)->next, type, member) | ||
51 | |||
52 | #define list_tail(list, type, member) \ | ||
53 | list_entry((list)->prev, type, member) | ||
54 | |||
55 | #define list_next(elm, member) \ | ||
56 | list_entry((elm)->member.next, typeof(*elm), member) | ||
57 | |||
58 | #define list_for_each_entry(pos, list, member) \ | ||
59 | for (pos = list_head(list, typeof(*pos), member); \ | ||
60 | &pos->member != (list); \ | ||
61 | pos = list_next(pos, member)) | ||
62 | |||
diff --git a/tools/firewire/nosy-dump.c b/tools/firewire/nosy-dump.c new file mode 100644 index 000000000000..f93b776370b6 --- /dev/null +++ b/tools/firewire/nosy-dump.c | |||
@@ -0,0 +1,1031 @@ | |||
1 | /* | ||
2 | * nosy-dump - Interface to snoop mode driver for TI PCILynx 1394 controllers | ||
3 | * Copyright (C) 2002-2006 Kristian Høgsberg | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software Foundation, | ||
17 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | */ | ||
19 | |||
20 | #include <byteswap.h> | ||
21 | #include <endian.h> | ||
22 | #include <fcntl.h> | ||
23 | #include <linux/firewire-constants.h> | ||
24 | #include <poll.h> | ||
25 | #include <popt.h> | ||
26 | #include <signal.h> | ||
27 | #include <stdio.h> | ||
28 | #include <stdlib.h> | ||
29 | #include <string.h> | ||
30 | #include <sys/ioctl.h> | ||
31 | #include <sys/time.h> | ||
32 | #include <termios.h> | ||
33 | #include <unistd.h> | ||
34 | |||
35 | #include "list.h" | ||
36 | #include "nosy-dump.h" | ||
37 | #include "nosy-user.h" | ||
38 | |||
39 | enum { | ||
40 | PACKET_FIELD_DETAIL = 0x01, | ||
41 | PACKET_FIELD_DATA_LENGTH = 0x02, | ||
42 | /* Marks the fields we print in transaction view. */ | ||
43 | PACKET_FIELD_TRANSACTION = 0x04, | ||
44 | }; | ||
45 | |||
46 | static void print_packet(uint32_t *data, size_t length); | ||
47 | static void decode_link_packet(struct link_packet *packet, size_t length, | ||
48 | int include_flags, int exclude_flags); | ||
49 | static int run = 1; | ||
50 | sig_t sys_sigint_handler; | ||
51 | |||
52 | static char *option_nosy_device = "/dev/nosy"; | ||
53 | static char *option_view = "packet"; | ||
54 | static char *option_output; | ||
55 | static char *option_input; | ||
56 | static int option_hex; | ||
57 | static int option_iso; | ||
58 | static int option_cycle_start; | ||
59 | static int option_version; | ||
60 | static int option_verbose; | ||
61 | |||
62 | enum { | ||
63 | VIEW_TRANSACTION, | ||
64 | VIEW_PACKET, | ||
65 | VIEW_STATS, | ||
66 | }; | ||
67 | |||
68 | static const struct poptOption options[] = { | ||
69 | { | ||
70 | .longName = "device", | ||
71 | .shortName = 'd', | ||
72 | .argInfo = POPT_ARG_STRING, | ||
73 | .arg = &option_nosy_device, | ||
74 | .descrip = "Path to nosy device.", | ||
75 | .argDescrip = "DEVICE" | ||
76 | }, | ||
77 | { | ||
78 | .longName = "view", | ||
79 | .argInfo = POPT_ARG_STRING, | ||
80 | .arg = &option_view, | ||
81 | .descrip = "Specify view of bus traffic: packet, transaction or stats.", | ||
82 | .argDescrip = "VIEW" | ||
83 | }, | ||
84 | { | ||
85 | .longName = "hex", | ||
86 | .shortName = 'x', | ||
87 | .argInfo = POPT_ARG_NONE, | ||
88 | .arg = &option_hex, | ||
89 | .descrip = "Print each packet in hex.", | ||
90 | }, | ||
91 | { | ||
92 | .longName = "iso", | ||
93 | .argInfo = POPT_ARG_NONE, | ||
94 | .arg = &option_iso, | ||
95 | .descrip = "Print iso packets.", | ||
96 | }, | ||
97 | { | ||
98 | .longName = "cycle-start", | ||
99 | .argInfo = POPT_ARG_NONE, | ||
100 | .arg = &option_cycle_start, | ||
101 | .descrip = "Print cycle start packets.", | ||
102 | }, | ||
103 | { | ||
104 | .longName = "verbose", | ||
105 | .shortName = 'v', | ||
106 | .argInfo = POPT_ARG_NONE, | ||
107 | .arg = &option_verbose, | ||
108 | .descrip = "Verbose packet view.", | ||
109 | }, | ||
110 | { | ||
111 | .longName = "output", | ||
112 | .shortName = 'o', | ||
113 | .argInfo = POPT_ARG_STRING, | ||
114 | .arg = &option_output, | ||
115 | .descrip = "Log to output file.", | ||
116 | .argDescrip = "FILENAME" | ||
117 | }, | ||
118 | { | ||
119 | .longName = "input", | ||
120 | .shortName = 'i', | ||
121 | .argInfo = POPT_ARG_STRING, | ||
122 | .arg = &option_input, | ||
123 | .descrip = "Decode log from file.", | ||
124 | .argDescrip = "FILENAME" | ||
125 | }, | ||
126 | { | ||
127 | .longName = "version", | ||
128 | .argInfo = POPT_ARG_NONE, | ||
129 | .arg = &option_version, | ||
130 | .descrip = "Specify print version info.", | ||
131 | }, | ||
132 | POPT_AUTOHELP | ||
133 | POPT_TABLEEND | ||
134 | }; | ||
135 | |||
136 | /* Allow all ^C except the first to interrupt the program in the usual way. */ | ||
137 | static void | ||
138 | sigint_handler(int signal_num) | ||
139 | { | ||
140 | if (run == 1) { | ||
141 | run = 0; | ||
142 | signal(SIGINT, SIG_DFL); | ||
143 | } | ||
144 | } | ||
145 | |||
146 | static struct subaction * | ||
147 | subaction_create(uint32_t *data, size_t length) | ||
148 | { | ||
149 | struct subaction *sa; | ||
150 | |||
151 | /* we put the ack in the subaction struct for easy access. */ | ||
152 | sa = malloc(sizeof *sa - sizeof sa->packet + length); | ||
153 | sa->ack = data[length / 4 - 1]; | ||
154 | sa->length = length; | ||
155 | memcpy(&sa->packet, data, length); | ||
156 | |||
157 | return sa; | ||
158 | } | ||
159 | |||
160 | static void | ||
161 | subaction_destroy(struct subaction *sa) | ||
162 | { | ||
163 | free(sa); | ||
164 | } | ||
165 | |||
166 | static struct list pending_transaction_list = { | ||
167 | &pending_transaction_list, &pending_transaction_list | ||
168 | }; | ||
169 | |||
170 | static struct link_transaction * | ||
171 | link_transaction_lookup(int request_node, int response_node, int tlabel) | ||
172 | { | ||
173 | struct link_transaction *t; | ||
174 | |||
175 | list_for_each_entry(t, &pending_transaction_list, link) { | ||
176 | if (t->request_node == request_node && | ||
177 | t->response_node == response_node && | ||
178 | t->tlabel == tlabel) | ||
179 | return t; | ||
180 | } | ||
181 | |||
182 | t = malloc(sizeof *t); | ||
183 | t->request_node = request_node; | ||
184 | t->response_node = response_node; | ||
185 | t->tlabel = tlabel; | ||
186 | list_init(&t->request_list); | ||
187 | list_init(&t->response_list); | ||
188 | |||
189 | list_append(&pending_transaction_list, &t->link); | ||
190 | |||
191 | return t; | ||
192 | } | ||
193 | |||
194 | static void | ||
195 | link_transaction_destroy(struct link_transaction *t) | ||
196 | { | ||
197 | struct subaction *sa; | ||
198 | |||
199 | while (!list_empty(&t->request_list)) { | ||
200 | sa = list_head(&t->request_list, struct subaction, link); | ||
201 | list_remove(&sa->link); | ||
202 | subaction_destroy(sa); | ||
203 | } | ||
204 | while (!list_empty(&t->response_list)) { | ||
205 | sa = list_head(&t->response_list, struct subaction, link); | ||
206 | list_remove(&sa->link); | ||
207 | subaction_destroy(sa); | ||
208 | } | ||
209 | free(t); | ||
210 | } | ||
211 | |||
212 | struct protocol_decoder { | ||
213 | const char *name; | ||
214 | int (*decode)(struct link_transaction *t); | ||
215 | }; | ||
216 | |||
217 | static const struct protocol_decoder protocol_decoders[] = { | ||
218 | { "FCP", decode_fcp } | ||
219 | }; | ||
220 | |||
221 | static void | ||
222 | handle_transaction(struct link_transaction *t) | ||
223 | { | ||
224 | struct subaction *sa; | ||
225 | int i; | ||
226 | |||
227 | if (!t->request) { | ||
228 | printf("BUG in handle_transaction\n"); | ||
229 | return; | ||
230 | } | ||
231 | |||
232 | for (i = 0; i < array_length(protocol_decoders); i++) | ||
233 | if (protocol_decoders[i].decode(t)) | ||
234 | break; | ||
235 | |||
236 | /* HACK: decode only fcp right now. */ | ||
237 | return; | ||
238 | |||
239 | decode_link_packet(&t->request->packet, t->request->length, | ||
240 | PACKET_FIELD_TRANSACTION, 0); | ||
241 | if (t->response) | ||
242 | decode_link_packet(&t->response->packet, t->request->length, | ||
243 | PACKET_FIELD_TRANSACTION, 0); | ||
244 | else | ||
245 | printf("[no response]"); | ||
246 | |||
247 | if (option_verbose) { | ||
248 | list_for_each_entry(sa, &t->request_list, link) | ||
249 | print_packet((uint32_t *) &sa->packet, sa->length); | ||
250 | list_for_each_entry(sa, &t->response_list, link) | ||
251 | print_packet((uint32_t *) &sa->packet, sa->length); | ||
252 | } | ||
253 | printf("\r\n"); | ||
254 | |||
255 | link_transaction_destroy(t); | ||
256 | } | ||
257 | |||
258 | static void | ||
259 | clear_pending_transaction_list(void) | ||
260 | { | ||
261 | struct link_transaction *t; | ||
262 | |||
263 | while (!list_empty(&pending_transaction_list)) { | ||
264 | t = list_head(&pending_transaction_list, | ||
265 | struct link_transaction, link); | ||
266 | list_remove(&t->link); | ||
267 | link_transaction_destroy(t); | ||
268 | /* print unfinished transactions */ | ||
269 | } | ||
270 | } | ||
271 | |||
272 | static const char * const tcode_names[] = { | ||
273 | [0x0] = "write_quadlet_request", [0x6] = "read_quadlet_response", | ||
274 | [0x1] = "write_block_request", [0x7] = "read_block_response", | ||
275 | [0x2] = "write_response", [0x8] = "cycle_start", | ||
276 | [0x3] = "reserved", [0x9] = "lock_request", | ||
277 | [0x4] = "read_quadlet_request", [0xa] = "iso_data", | ||
278 | [0x5] = "read_block_request", [0xb] = "lock_response", | ||
279 | }; | ||
280 | |||
281 | static const char * const ack_names[] = { | ||
282 | [0x0] = "no ack", [0x8] = "reserved (0x08)", | ||
283 | [0x1] = "ack_complete", [0x9] = "reserved (0x09)", | ||
284 | [0x2] = "ack_pending", [0xa] = "reserved (0x0a)", | ||
285 | [0x3] = "reserved (0x03)", [0xb] = "reserved (0x0b)", | ||
286 | [0x4] = "ack_busy_x", [0xc] = "reserved (0x0c)", | ||
287 | [0x5] = "ack_busy_a", [0xd] = "ack_data_error", | ||
288 | [0x6] = "ack_busy_b", [0xe] = "ack_type_error", | ||
289 | [0x7] = "reserved (0x07)", [0xf] = "reserved (0x0f)", | ||
290 | }; | ||
291 | |||
292 | static const char * const rcode_names[] = { | ||
293 | [0x0] = "complete", [0x4] = "conflict_error", | ||
294 | [0x1] = "reserved (0x01)", [0x5] = "data_error", | ||
295 | [0x2] = "reserved (0x02)", [0x6] = "type_error", | ||
296 | [0x3] = "reserved (0x03)", [0x7] = "address_error", | ||
297 | }; | ||
298 | |||
299 | static const char * const retry_names[] = { | ||
300 | [0x0] = "retry_1", | ||
301 | [0x1] = "retry_x", | ||
302 | [0x2] = "retry_a", | ||
303 | [0x3] = "retry_b", | ||
304 | }; | ||
305 | |||
306 | enum { | ||
307 | PACKET_RESERVED, | ||
308 | PACKET_REQUEST, | ||
309 | PACKET_RESPONSE, | ||
310 | PACKET_OTHER, | ||
311 | }; | ||
312 | |||
313 | struct packet_info { | ||
314 | const char *name; | ||
315 | int type; | ||
316 | int response_tcode; | ||
317 | const struct packet_field *fields; | ||
318 | int field_count; | ||
319 | }; | ||
320 | |||
321 | struct packet_field { | ||
322 | const char *name; /* Short name for field. */ | ||
323 | int offset; /* Location of field, specified in bits; */ | ||
324 | /* negative means from end of packet. */ | ||
325 | int width; /* Width of field, 0 means use data_length. */ | ||
326 | int flags; /* Show options. */ | ||
327 | const char * const *value_names; | ||
328 | }; | ||
329 | |||
330 | #define COMMON_REQUEST_FIELDS \ | ||
331 | { "dest", 0, 16, PACKET_FIELD_TRANSACTION }, \ | ||
332 | { "tl", 16, 6 }, \ | ||
333 | { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \ | ||
334 | { "tcode", 24, 4, PACKET_FIELD_TRANSACTION, tcode_names }, \ | ||
335 | { "pri", 28, 4, PACKET_FIELD_DETAIL }, \ | ||
336 | { "src", 32, 16, PACKET_FIELD_TRANSACTION }, \ | ||
337 | { "offs", 48, 48, PACKET_FIELD_TRANSACTION } | ||
338 | |||
339 | #define COMMON_RESPONSE_FIELDS \ | ||
340 | { "dest", 0, 16 }, \ | ||
341 | { "tl", 16, 6 }, \ | ||
342 | { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \ | ||
343 | { "tcode", 24, 4, 0, tcode_names }, \ | ||
344 | { "pri", 28, 4, PACKET_FIELD_DETAIL }, \ | ||
345 | { "src", 32, 16 }, \ | ||
346 | { "rcode", 48, 4, PACKET_FIELD_TRANSACTION, rcode_names } | ||
347 | |||
348 | static const struct packet_field read_quadlet_request_fields[] = { | ||
349 | COMMON_REQUEST_FIELDS, | ||
350 | { "crc", 96, 32, PACKET_FIELD_DETAIL }, | ||
351 | { "ack", 156, 4, 0, ack_names }, | ||
352 | }; | ||
353 | |||
354 | static const struct packet_field read_quadlet_response_fields[] = { | ||
355 | COMMON_RESPONSE_FIELDS, | ||
356 | { "data", 96, 32, PACKET_FIELD_TRANSACTION }, | ||
357 | { "crc", 128, 32, PACKET_FIELD_DETAIL }, | ||
358 | { "ack", 188, 4, 0, ack_names }, | ||
359 | }; | ||
360 | |||
361 | static const struct packet_field read_block_request_fields[] = { | ||
362 | COMMON_REQUEST_FIELDS, | ||
363 | { "data_length", 96, 16, PACKET_FIELD_TRANSACTION }, | ||
364 | { "extended_tcode", 112, 16 }, | ||
365 | { "crc", 128, 32, PACKET_FIELD_DETAIL }, | ||
366 | { "ack", 188, 4, 0, ack_names }, | ||
367 | }; | ||
368 | |||
369 | static const struct packet_field block_response_fields[] = { | ||
370 | COMMON_RESPONSE_FIELDS, | ||
371 | { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH }, | ||
372 | { "extended_tcode", 112, 16 }, | ||
373 | { "crc", 128, 32, PACKET_FIELD_DETAIL }, | ||
374 | { "data", 160, 0, PACKET_FIELD_TRANSACTION }, | ||
375 | { "crc", -64, 32, PACKET_FIELD_DETAIL }, | ||
376 | { "ack", -4, 4, 0, ack_names }, | ||
377 | }; | ||
378 | |||
379 | static const struct packet_field write_quadlet_request_fields[] = { | ||
380 | COMMON_REQUEST_FIELDS, | ||
381 | { "data", 96, 32, PACKET_FIELD_TRANSACTION }, | ||
382 | { "ack", -4, 4, 0, ack_names }, | ||
383 | }; | ||
384 | |||
385 | static const struct packet_field block_request_fields[] = { | ||
386 | COMMON_REQUEST_FIELDS, | ||
387 | { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH | PACKET_FIELD_TRANSACTION }, | ||
388 | { "extended_tcode", 112, 16, PACKET_FIELD_TRANSACTION }, | ||
389 | { "crc", 128, 32, PACKET_FIELD_DETAIL }, | ||
390 | { "data", 160, 0, PACKET_FIELD_TRANSACTION }, | ||
391 | { "crc", -64, 32, PACKET_FIELD_DETAIL }, | ||
392 | { "ack", -4, 4, 0, ack_names }, | ||
393 | }; | ||
394 | |||
395 | static const struct packet_field write_response_fields[] = { | ||
396 | COMMON_RESPONSE_FIELDS, | ||
397 | { "reserved", 64, 32, PACKET_FIELD_DETAIL }, | ||
398 | { "ack", -4, 4, 0, ack_names }, | ||
399 | }; | ||
400 | |||
401 | static const struct packet_field iso_data_fields[] = { | ||
402 | { "data_length", 0, 16, PACKET_FIELD_DATA_LENGTH }, | ||
403 | { "tag", 16, 2 }, | ||
404 | { "channel", 18, 6 }, | ||
405 | { "tcode", 24, 4, 0, tcode_names }, | ||
406 | { "sy", 28, 4 }, | ||
407 | { "crc", 32, 32, PACKET_FIELD_DETAIL }, | ||
408 | { "data", 64, 0 }, | ||
409 | { "crc", -64, 32, PACKET_FIELD_DETAIL }, | ||
410 | { "ack", -4, 4, 0, ack_names }, | ||
411 | }; | ||
412 | |||
413 | static const struct packet_info packet_info[] = { | ||
414 | { | ||
415 | .name = "write_quadlet_request", | ||
416 | .type = PACKET_REQUEST, | ||
417 | .response_tcode = TCODE_WRITE_RESPONSE, | ||
418 | .fields = write_quadlet_request_fields, | ||
419 | .field_count = array_length(write_quadlet_request_fields) | ||
420 | }, | ||
421 | { | ||
422 | .name = "write_block_request", | ||
423 | .type = PACKET_REQUEST, | ||
424 | .response_tcode = TCODE_WRITE_RESPONSE, | ||
425 | .fields = block_request_fields, | ||
426 | .field_count = array_length(block_request_fields) | ||
427 | }, | ||
428 | { | ||
429 | .name = "write_response", | ||
430 | .type = PACKET_RESPONSE, | ||
431 | .fields = write_response_fields, | ||
432 | .field_count = array_length(write_response_fields) | ||
433 | }, | ||
434 | { | ||
435 | .name = "reserved", | ||
436 | .type = PACKET_RESERVED, | ||
437 | }, | ||
438 | { | ||
439 | .name = "read_quadlet_request", | ||
440 | .type = PACKET_REQUEST, | ||
441 | .response_tcode = TCODE_READ_QUADLET_RESPONSE, | ||
442 | .fields = read_quadlet_request_fields, | ||
443 | .field_count = array_length(read_quadlet_request_fields) | ||
444 | }, | ||
445 | { | ||
446 | .name = "read_block_request", | ||
447 | .type = PACKET_REQUEST, | ||
448 | .response_tcode = TCODE_READ_BLOCK_RESPONSE, | ||
449 | .fields = read_block_request_fields, | ||
450 | .field_count = array_length(read_block_request_fields) | ||
451 | }, | ||
452 | { | ||
453 | .name = "read_quadlet_response", | ||
454 | .type = PACKET_RESPONSE, | ||
455 | .fields = read_quadlet_response_fields, | ||
456 | .field_count = array_length(read_quadlet_response_fields) | ||
457 | }, | ||
458 | { | ||
459 | .name = "read_block_response", | ||
460 | .type = PACKET_RESPONSE, | ||
461 | .fields = block_response_fields, | ||
462 | .field_count = array_length(block_response_fields) | ||
463 | }, | ||
464 | { | ||
465 | .name = "cycle_start", | ||
466 | .type = PACKET_OTHER, | ||
467 | .fields = write_quadlet_request_fields, | ||
468 | .field_count = array_length(write_quadlet_request_fields) | ||
469 | }, | ||
470 | { | ||
471 | .name = "lock_request", | ||
472 | .type = PACKET_REQUEST, | ||
473 | .fields = block_request_fields, | ||
474 | .field_count = array_length(block_request_fields) | ||
475 | }, | ||
476 | { | ||
477 | .name = "iso_data", | ||
478 | .type = PACKET_OTHER, | ||
479 | .fields = iso_data_fields, | ||
480 | .field_count = array_length(iso_data_fields) | ||
481 | }, | ||
482 | { | ||
483 | .name = "lock_response", | ||
484 | .type = PACKET_RESPONSE, | ||
485 | .fields = block_response_fields, | ||
486 | .field_count = array_length(block_response_fields) | ||
487 | }, | ||
488 | }; | ||
489 | |||
490 | static int | ||
491 | handle_request_packet(uint32_t *data, size_t length) | ||
492 | { | ||
493 | struct link_packet *p = (struct link_packet *) data; | ||
494 | struct subaction *sa, *prev; | ||
495 | struct link_transaction *t; | ||
496 | |||
497 | t = link_transaction_lookup(p->common.source, p->common.destination, | ||
498 | p->common.tlabel); | ||
499 | sa = subaction_create(data, length); | ||
500 | t->request = sa; | ||
501 | |||
502 | if (!list_empty(&t->request_list)) { | ||
503 | prev = list_tail(&t->request_list, | ||
504 | struct subaction, link); | ||
505 | |||
506 | if (!ACK_BUSY(prev->ack)) { | ||
507 | /* | ||
508 | * error, we should only see ack_busy_* before the | ||
509 | * ack_pending/ack_complete -- this is an ack_pending | ||
510 | * instead (ack_complete would have finished the | ||
511 | * transaction). | ||
512 | */ | ||
513 | } | ||
514 | |||
515 | if (prev->packet.common.tcode != sa->packet.common.tcode || | ||
516 | prev->packet.common.tlabel != sa->packet.common.tlabel) { | ||
517 | /* memcmp() ? */ | ||
518 | /* error, these should match for retries. */ | ||
519 | } | ||
520 | } | ||
521 | |||
522 | list_append(&t->request_list, &sa->link); | ||
523 | |||
524 | switch (sa->ack) { | ||
525 | case ACK_COMPLETE: | ||
526 | if (p->common.tcode != TCODE_WRITE_QUADLET_REQUEST && | ||
527 | p->common.tcode != TCODE_WRITE_BLOCK_REQUEST) | ||
528 | /* error, unified transactions only allowed for write */; | ||
529 | list_remove(&t->link); | ||
530 | handle_transaction(t); | ||
531 | break; | ||
532 | |||
533 | case ACK_NO_ACK: | ||
534 | case ACK_DATA_ERROR: | ||
535 | case ACK_TYPE_ERROR: | ||
536 | list_remove(&t->link); | ||
537 | handle_transaction(t); | ||
538 | break; | ||
539 | |||
540 | case ACK_PENDING: | ||
541 | /* request subaction phase over, wait for response. */ | ||
542 | break; | ||
543 | |||
544 | case ACK_BUSY_X: | ||
545 | case ACK_BUSY_A: | ||
546 | case ACK_BUSY_B: | ||
547 | /* ok, wait for retry. */ | ||
548 | /* check that retry protocol is respected. */ | ||
549 | break; | ||
550 | } | ||
551 | |||
552 | return 1; | ||
553 | } | ||
554 | |||
555 | static int | ||
556 | handle_response_packet(uint32_t *data, size_t length) | ||
557 | { | ||
558 | struct link_packet *p = (struct link_packet *) data; | ||
559 | struct subaction *sa, *prev; | ||
560 | struct link_transaction *t; | ||
561 | |||
562 | t = link_transaction_lookup(p->common.destination, p->common.source, | ||
563 | p->common.tlabel); | ||
564 | if (list_empty(&t->request_list)) { | ||
565 | /* unsolicited response */ | ||
566 | } | ||
567 | |||
568 | sa = subaction_create(data, length); | ||
569 | t->response = sa; | ||
570 | |||
571 | if (!list_empty(&t->response_list)) { | ||
572 | prev = list_tail(&t->response_list, struct subaction, link); | ||
573 | |||
574 | if (!ACK_BUSY(prev->ack)) { | ||
575 | /* | ||
576 | * error, we should only see ack_busy_* before the | ||
577 | * ack_pending/ack_complete | ||
578 | */ | ||
579 | } | ||
580 | |||
581 | if (prev->packet.common.tcode != sa->packet.common.tcode || | ||
582 | prev->packet.common.tlabel != sa->packet.common.tlabel) { | ||
583 | /* use memcmp() instead? */ | ||
584 | /* error, these should match for retries. */ | ||
585 | } | ||
586 | } else { | ||
587 | prev = list_tail(&t->request_list, struct subaction, link); | ||
588 | if (prev->ack != ACK_PENDING) { | ||
589 | /* | ||
590 | * error, should not get response unless last request got | ||
591 | * ack_pending. | ||
592 | */ | ||
593 | } | ||
594 | |||
595 | if (packet_info[prev->packet.common.tcode].response_tcode != | ||
596 | sa->packet.common.tcode) { | ||
597 | /* error, tcode mismatch */ | ||
598 | } | ||
599 | } | ||
600 | |||
601 | list_append(&t->response_list, &sa->link); | ||
602 | |||
603 | switch (sa->ack) { | ||
604 | case ACK_COMPLETE: | ||
605 | case ACK_NO_ACK: | ||
606 | case ACK_DATA_ERROR: | ||
607 | case ACK_TYPE_ERROR: | ||
608 | list_remove(&t->link); | ||
609 | handle_transaction(t); | ||
610 | /* transaction complete, remove t from pending list. */ | ||
611 | break; | ||
612 | |||
613 | case ACK_PENDING: | ||
614 | /* error for responses. */ | ||
615 | break; | ||
616 | |||
617 | case ACK_BUSY_X: | ||
618 | case ACK_BUSY_A: | ||
619 | case ACK_BUSY_B: | ||
620 | /* no problem, wait for next retry */ | ||
621 | break; | ||
622 | } | ||
623 | |||
624 | return 1; | ||
625 | } | ||
626 | |||
627 | static int | ||
628 | handle_packet(uint32_t *data, size_t length) | ||
629 | { | ||
630 | if (length == 0) { | ||
631 | printf("bus reset\r\n"); | ||
632 | clear_pending_transaction_list(); | ||
633 | } else if (length > sizeof(struct phy_packet)) { | ||
634 | struct link_packet *p = (struct link_packet *) data; | ||
635 | |||
636 | switch (packet_info[p->common.tcode].type) { | ||
637 | case PACKET_REQUEST: | ||
638 | return handle_request_packet(data, length); | ||
639 | |||
640 | case PACKET_RESPONSE: | ||
641 | return handle_response_packet(data, length); | ||
642 | |||
643 | case PACKET_OTHER: | ||
644 | case PACKET_RESERVED: | ||
645 | return 0; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | return 1; | ||
650 | } | ||
651 | |||
652 | static unsigned int | ||
653 | get_bits(struct link_packet *packet, int offset, int width) | ||
654 | { | ||
655 | uint32_t *data = (uint32_t *) packet; | ||
656 | uint32_t index, shift, mask; | ||
657 | |||
658 | index = offset / 32 + 1; | ||
659 | shift = 32 - (offset & 31) - width; | ||
660 | mask = width == 32 ? ~0 : (1 << width) - 1; | ||
661 | |||
662 | return (data[index] >> shift) & mask; | ||
663 | } | ||
664 | |||
665 | #if __BYTE_ORDER == __LITTLE_ENDIAN | ||
666 | #define byte_index(i) ((i) ^ 3) | ||
667 | #elif __BYTE_ORDER == __BIG_ENDIAN | ||
668 | #define byte_index(i) (i) | ||
669 | #else | ||
670 | #error unsupported byte order. | ||
671 | #endif | ||
672 | |||
673 | static void | ||
674 | dump_data(unsigned char *data, int length) | ||
675 | { | ||
676 | int i, print_length; | ||
677 | |||
678 | if (length > 128) | ||
679 | print_length = 128; | ||
680 | else | ||
681 | print_length = length; | ||
682 | |||
683 | for (i = 0; i < print_length; i++) | ||
684 | printf("%s%02hhx", | ||
685 | (i % 4 == 0 && i != 0) ? " " : "", | ||
686 | data[byte_index(i)]); | ||
687 | |||
688 | if (print_length < length) | ||
689 | printf(" (%d more bytes)", length - print_length); | ||
690 | } | ||
691 | |||
692 | static void | ||
693 | decode_link_packet(struct link_packet *packet, size_t length, | ||
694 | int include_flags, int exclude_flags) | ||
695 | { | ||
696 | const struct packet_info *pi; | ||
697 | int data_length = 0; | ||
698 | int i; | ||
699 | |||
700 | pi = &packet_info[packet->common.tcode]; | ||
701 | |||
702 | for (i = 0; i < pi->field_count; i++) { | ||
703 | const struct packet_field *f = &pi->fields[i]; | ||
704 | int offset; | ||
705 | |||
706 | if (f->flags & exclude_flags) | ||
707 | continue; | ||
708 | if (include_flags && !(f->flags & include_flags)) | ||
709 | continue; | ||
710 | |||
711 | if (f->offset < 0) | ||
712 | offset = length * 8 + f->offset - 32; | ||
713 | else | ||
714 | offset = f->offset; | ||
715 | |||
716 | if (f->value_names != NULL) { | ||
717 | uint32_t bits; | ||
718 | |||
719 | bits = get_bits(packet, offset, f->width); | ||
720 | printf("%s", f->value_names[bits]); | ||
721 | } else if (f->width == 0) { | ||
722 | printf("%s=[", f->name); | ||
723 | dump_data((unsigned char *) packet + (offset / 8 + 4), data_length); | ||
724 | printf("]"); | ||
725 | } else { | ||
726 | unsigned long long bits; | ||
727 | int high_width, low_width; | ||
728 | |||
729 | if ((offset & ~31) != ((offset + f->width - 1) & ~31)) { | ||
730 | /* Bit field spans quadlet boundary. */ | ||
731 | high_width = ((offset + 31) & ~31) - offset; | ||
732 | low_width = f->width - high_width; | ||
733 | |||
734 | bits = get_bits(packet, offset, high_width); | ||
735 | bits = (bits << low_width) | | ||
736 | get_bits(packet, offset + high_width, low_width); | ||
737 | } else { | ||
738 | bits = get_bits(packet, offset, f->width); | ||
739 | } | ||
740 | |||
741 | printf("%s=0x%0*llx", f->name, (f->width + 3) / 4, bits); | ||
742 | |||
743 | if (f->flags & PACKET_FIELD_DATA_LENGTH) | ||
744 | data_length = bits; | ||
745 | } | ||
746 | |||
747 | if (i < pi->field_count - 1) | ||
748 | printf(", "); | ||
749 | } | ||
750 | } | ||
751 | |||
752 | static void | ||
753 | print_packet(uint32_t *data, size_t length) | ||
754 | { | ||
755 | int i; | ||
756 | |||
757 | printf("%6u ", data[0]); | ||
758 | |||
759 | if (length == 4) { | ||
760 | printf("bus reset"); | ||
761 | } else if (length < sizeof(struct phy_packet)) { | ||
762 | printf("short packet: "); | ||
763 | for (i = 1; i < length / 4; i++) | ||
764 | printf("%s%08x", i == 0 ? "[" : " ", data[i]); | ||
765 | printf("]"); | ||
766 | |||
767 | } else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) { | ||
768 | struct phy_packet *pp = (struct phy_packet *) data; | ||
769 | |||
770 | /* phy packet are 3 quadlets: the 1 quadlet payload, | ||
771 | * the bitwise inverse of the payload and the snoop | ||
772 | * mode ack */ | ||
773 | |||
774 | switch (pp->common.identifier) { | ||
775 | case PHY_PACKET_CONFIGURATION: | ||
776 | if (!pp->phy_config.set_root && !pp->phy_config.set_gap_count) { | ||
777 | printf("ext phy config: phy_id=%02x", pp->phy_config.root_id); | ||
778 | } else { | ||
779 | printf("phy config:"); | ||
780 | if (pp->phy_config.set_root) | ||
781 | printf(" set_root_id=%02x", pp->phy_config.root_id); | ||
782 | if (pp->phy_config.set_gap_count) | ||
783 | printf(" set_gap_count=%d", pp->phy_config.gap_count); | ||
784 | } | ||
785 | break; | ||
786 | |||
787 | case PHY_PACKET_LINK_ON: | ||
788 | printf("link-on packet, phy_id=%02x", pp->link_on.phy_id); | ||
789 | break; | ||
790 | |||
791 | case PHY_PACKET_SELF_ID: | ||
792 | if (pp->self_id.extended) { | ||
793 | printf("extended self id: phy_id=%02x, seq=%d", | ||
794 | pp->ext_self_id.phy_id, pp->ext_self_id.sequence); | ||
795 | } else { | ||
796 | static const char * const speed_names[] = { | ||
797 | "S100", "S200", "S400", "BETA" | ||
798 | }; | ||
799 | printf("self id: phy_id=%02x, link %s, gap_count=%d, speed=%s%s%s", | ||
800 | pp->self_id.phy_id, | ||
801 | (pp->self_id.link_active ? "active" : "not active"), | ||
802 | pp->self_id.gap_count, | ||
803 | speed_names[pp->self_id.phy_speed], | ||
804 | (pp->self_id.contender ? ", irm contender" : ""), | ||
805 | (pp->self_id.initiated_reset ? ", initiator" : "")); | ||
806 | } | ||
807 | break; | ||
808 | default: | ||
809 | printf("unknown phy packet: "); | ||
810 | for (i = 1; i < length / 4; i++) | ||
811 | printf("%s%08x", i == 0 ? "[" : " ", data[i]); | ||
812 | printf("]"); | ||
813 | break; | ||
814 | } | ||
815 | } else { | ||
816 | struct link_packet *packet = (struct link_packet *) data; | ||
817 | |||
818 | decode_link_packet(packet, length, 0, | ||
819 | option_verbose ? 0 : PACKET_FIELD_DETAIL); | ||
820 | } | ||
821 | |||
822 | if (option_hex) { | ||
823 | printf(" ["); | ||
824 | dump_data((unsigned char *) data + 4, length - 4); | ||
825 | printf("]"); | ||
826 | } | ||
827 | |||
828 | printf("\r\n"); | ||
829 | } | ||
830 | |||
831 | #define HIDE_CURSOR "\033[?25l" | ||
832 | #define SHOW_CURSOR "\033[?25h" | ||
833 | #define CLEAR "\033[H\033[2J" | ||
834 | |||
835 | static void | ||
836 | print_stats(uint32_t *data, size_t length) | ||
837 | { | ||
838 | static int bus_reset_count, short_packet_count, phy_packet_count; | ||
839 | static int tcode_count[16]; | ||
840 | static struct timeval last_update; | ||
841 | struct timeval now; | ||
842 | int i; | ||
843 | |||
844 | if (length == 0) | ||
845 | bus_reset_count++; | ||
846 | else if (length < sizeof(struct phy_packet)) | ||
847 | short_packet_count++; | ||
848 | else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) | ||
849 | phy_packet_count++; | ||
850 | else { | ||
851 | struct link_packet *packet = (struct link_packet *) data; | ||
852 | tcode_count[packet->common.tcode]++; | ||
853 | } | ||
854 | |||
855 | gettimeofday(&now, NULL); | ||
856 | if (now.tv_sec <= last_update.tv_sec && | ||
857 | now.tv_usec < last_update.tv_usec + 500000) | ||
858 | return; | ||
859 | |||
860 | last_update = now; | ||
861 | printf(CLEAR HIDE_CURSOR | ||
862 | " bus resets : %8d\n" | ||
863 | " short packets : %8d\n" | ||
864 | " phy packets : %8d\n", | ||
865 | bus_reset_count, short_packet_count, phy_packet_count); | ||
866 | |||
867 | for (i = 0; i < array_length(packet_info); i++) | ||
868 | if (packet_info[i].type != PACKET_RESERVED) | ||
869 | printf(" %-24s: %8d\n", packet_info[i].name, tcode_count[i]); | ||
870 | printf(SHOW_CURSOR "\n"); | ||
871 | } | ||
872 | |||
873 | static struct termios saved_attributes; | ||
874 | |||
875 | static void | ||
876 | reset_input_mode(void) | ||
877 | { | ||
878 | tcsetattr(STDIN_FILENO, TCSANOW, &saved_attributes); | ||
879 | } | ||
880 | |||
881 | static void | ||
882 | set_input_mode(void) | ||
883 | { | ||
884 | struct termios tattr; | ||
885 | |||
886 | /* Make sure stdin is a terminal. */ | ||
887 | if (!isatty(STDIN_FILENO)) { | ||
888 | fprintf(stderr, "Not a terminal.\n"); | ||
889 | exit(EXIT_FAILURE); | ||
890 | } | ||
891 | |||
892 | /* Save the terminal attributes so we can restore them later. */ | ||
893 | tcgetattr(STDIN_FILENO, &saved_attributes); | ||
894 | atexit(reset_input_mode); | ||
895 | |||
896 | /* Set the funny terminal modes. */ | ||
897 | tcgetattr(STDIN_FILENO, &tattr); | ||
898 | tattr.c_lflag &= ~(ICANON|ECHO); /* Clear ICANON and ECHO. */ | ||
899 | tattr.c_cc[VMIN] = 1; | ||
900 | tattr.c_cc[VTIME] = 0; | ||
901 | tcsetattr(STDIN_FILENO, TCSAFLUSH, &tattr); | ||
902 | } | ||
903 | |||
904 | int main(int argc, const char *argv[]) | ||
905 | { | ||
906 | uint32_t buf[128 * 1024]; | ||
907 | uint32_t filter; | ||
908 | int length, retval, view; | ||
909 | int fd = -1; | ||
910 | FILE *output = NULL, *input = NULL; | ||
911 | poptContext con; | ||
912 | char c; | ||
913 | struct pollfd pollfds[2]; | ||
914 | |||
915 | sys_sigint_handler = signal(SIGINT, sigint_handler); | ||
916 | |||
917 | con = poptGetContext(NULL, argc, argv, options, 0); | ||
918 | retval = poptGetNextOpt(con); | ||
919 | if (retval < -1) { | ||
920 | poptPrintUsage(con, stdout, 0); | ||
921 | return -1; | ||
922 | } | ||
923 | |||
924 | if (option_version) { | ||
925 | printf("dump tool for nosy sniffer, version %s\n", VERSION); | ||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | if (__BYTE_ORDER != __LITTLE_ENDIAN) | ||
930 | fprintf(stderr, "warning: nosy has only been tested on little " | ||
931 | "endian machines\n"); | ||
932 | |||
933 | if (option_input != NULL) { | ||
934 | input = fopen(option_input, "r"); | ||
935 | if (input == NULL) { | ||
936 | fprintf(stderr, "Could not open %s, %m\n", option_input); | ||
937 | return -1; | ||
938 | } | ||
939 | } else { | ||
940 | fd = open(option_nosy_device, O_RDWR); | ||
941 | if (fd < 0) { | ||
942 | fprintf(stderr, "Could not open %s, %m\n", option_nosy_device); | ||
943 | return -1; | ||
944 | } | ||
945 | set_input_mode(); | ||
946 | } | ||
947 | |||
948 | if (strcmp(option_view, "transaction") == 0) | ||
949 | view = VIEW_TRANSACTION; | ||
950 | else if (strcmp(option_view, "stats") == 0) | ||
951 | view = VIEW_STATS; | ||
952 | else | ||
953 | view = VIEW_PACKET; | ||
954 | |||
955 | if (option_output) { | ||
956 | output = fopen(option_output, "w"); | ||
957 | if (output == NULL) { | ||
958 | fprintf(stderr, "Could not open %s, %m\n", option_output); | ||
959 | return -1; | ||
960 | } | ||
961 | } | ||
962 | |||
963 | setvbuf(stdout, NULL, _IOLBF, BUFSIZ); | ||
964 | |||
965 | filter = ~0; | ||
966 | if (!option_iso) | ||
967 | filter &= ~(1 << TCODE_STREAM_DATA); | ||
968 | if (!option_cycle_start) | ||
969 | filter &= ~(1 << TCODE_CYCLE_START); | ||
970 | if (view == VIEW_STATS) | ||
971 | filter = ~(1 << TCODE_CYCLE_START); | ||
972 | |||
973 | ioctl(fd, NOSY_IOC_FILTER, filter); | ||
974 | |||
975 | ioctl(fd, NOSY_IOC_START); | ||
976 | |||
977 | pollfds[0].fd = fd; | ||
978 | pollfds[0].events = POLLIN; | ||
979 | pollfds[1].fd = STDIN_FILENO; | ||
980 | pollfds[1].events = POLLIN; | ||
981 | |||
982 | while (run) { | ||
983 | if (input != NULL) { | ||
984 | if (fread(&length, sizeof length, 1, input) != 1) | ||
985 | return 0; | ||
986 | fread(buf, 1, length, input); | ||
987 | } else { | ||
988 | poll(pollfds, 2, -1); | ||
989 | if (pollfds[1].revents) { | ||
990 | read(STDIN_FILENO, &c, sizeof c); | ||
991 | switch (c) { | ||
992 | case 'q': | ||
993 | if (output != NULL) | ||
994 | fclose(output); | ||
995 | return 0; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | if (pollfds[0].revents) | ||
1000 | length = read(fd, buf, sizeof buf); | ||
1001 | else | ||
1002 | continue; | ||
1003 | } | ||
1004 | |||
1005 | if (output != NULL) { | ||
1006 | fwrite(&length, sizeof length, 1, output); | ||
1007 | fwrite(buf, 1, length, output); | ||
1008 | } | ||
1009 | |||
1010 | switch (view) { | ||
1011 | case VIEW_TRANSACTION: | ||
1012 | handle_packet(buf, length); | ||
1013 | break; | ||
1014 | case VIEW_PACKET: | ||
1015 | print_packet(buf, length); | ||
1016 | break; | ||
1017 | case VIEW_STATS: | ||
1018 | print_stats(buf, length); | ||
1019 | break; | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | if (output != NULL) | ||
1024 | fclose(output); | ||
1025 | |||
1026 | close(fd); | ||
1027 | |||
1028 | poptFreeContext(con); | ||
1029 | |||
1030 | return 0; | ||
1031 | } | ||
diff --git a/tools/firewire/nosy-dump.h b/tools/firewire/nosy-dump.h new file mode 100644 index 000000000000..3a4b5b33ba5d --- /dev/null +++ b/tools/firewire/nosy-dump.h | |||
@@ -0,0 +1,173 @@ | |||
1 | #ifndef __nosy_dump_h__ | ||
2 | #define __nosy_dump_h__ | ||
3 | |||
4 | #define array_length(array) (sizeof(array) / sizeof(array[0])) | ||
5 | |||
6 | #define ACK_NO_ACK 0x0 | ||
7 | #define ACK_DONE(a) ((a >> 2) == 0) | ||
8 | #define ACK_BUSY(a) ((a >> 2) == 1) | ||
9 | #define ACK_ERROR(a) ((a >> 2) == 3) | ||
10 | |||
11 | #include <stdint.h> | ||
12 | |||
13 | struct phy_packet { | ||
14 | uint32_t timestamp; | ||
15 | union { | ||
16 | struct { | ||
17 | uint32_t zero:24; | ||
18 | uint32_t phy_id:6; | ||
19 | uint32_t identifier:2; | ||
20 | } common, link_on; | ||
21 | |||
22 | struct { | ||
23 | uint32_t zero:16; | ||
24 | uint32_t gap_count:6; | ||
25 | uint32_t set_gap_count:1; | ||
26 | uint32_t set_root:1; | ||
27 | uint32_t root_id:6; | ||
28 | uint32_t identifier:2; | ||
29 | } phy_config; | ||
30 | |||
31 | struct { | ||
32 | uint32_t more_packets:1; | ||
33 | uint32_t initiated_reset:1; | ||
34 | uint32_t port2:2; | ||
35 | uint32_t port1:2; | ||
36 | uint32_t port0:2; | ||
37 | uint32_t power_class:3; | ||
38 | uint32_t contender:1; | ||
39 | uint32_t phy_delay:2; | ||
40 | uint32_t phy_speed:2; | ||
41 | uint32_t gap_count:6; | ||
42 | uint32_t link_active:1; | ||
43 | uint32_t extended:1; | ||
44 | uint32_t phy_id:6; | ||
45 | uint32_t identifier:2; | ||
46 | } self_id; | ||
47 | |||
48 | struct { | ||
49 | uint32_t more_packets:1; | ||
50 | uint32_t reserved1:1; | ||
51 | uint32_t porth:2; | ||
52 | uint32_t portg:2; | ||
53 | uint32_t portf:2; | ||
54 | uint32_t porte:2; | ||
55 | uint32_t portd:2; | ||
56 | uint32_t portc:2; | ||
57 | uint32_t portb:2; | ||
58 | uint32_t porta:2; | ||
59 | uint32_t reserved0:2; | ||
60 | uint32_t sequence:3; | ||
61 | uint32_t extended:1; | ||
62 | uint32_t phy_id:6; | ||
63 | uint32_t identifier:2; | ||
64 | } ext_self_id; | ||
65 | }; | ||
66 | uint32_t inverted; | ||
67 | uint32_t ack; | ||
68 | }; | ||
69 | |||
70 | #define TCODE_PHY_PACKET 0x10 | ||
71 | |||
72 | #define PHY_PACKET_CONFIGURATION 0x00 | ||
73 | #define PHY_PACKET_LINK_ON 0x01 | ||
74 | #define PHY_PACKET_SELF_ID 0x02 | ||
75 | |||
76 | struct link_packet { | ||
77 | uint32_t timestamp; | ||
78 | union { | ||
79 | struct { | ||
80 | uint32_t priority:4; | ||
81 | uint32_t tcode:4; | ||
82 | uint32_t rt:2; | ||
83 | uint32_t tlabel:6; | ||
84 | uint32_t destination:16; | ||
85 | |||
86 | uint32_t offset_high:16; | ||
87 | uint32_t source:16; | ||
88 | |||
89 | uint32_t offset_low; | ||
90 | } common; | ||
91 | |||
92 | struct { | ||
93 | uint32_t common[3]; | ||
94 | uint32_t crc; | ||
95 | } read_quadlet; | ||
96 | |||
97 | struct { | ||
98 | uint32_t common[3]; | ||
99 | uint32_t data; | ||
100 | uint32_t crc; | ||
101 | } read_quadlet_response; | ||
102 | |||
103 | struct { | ||
104 | uint32_t common[3]; | ||
105 | uint32_t extended_tcode:16; | ||
106 | uint32_t data_length:16; | ||
107 | uint32_t crc; | ||
108 | } read_block; | ||
109 | |||
110 | struct { | ||
111 | uint32_t common[3]; | ||
112 | uint32_t extended_tcode:16; | ||
113 | uint32_t data_length:16; | ||
114 | uint32_t crc; | ||
115 | uint32_t data[0]; | ||
116 | /* crc and ack follows. */ | ||
117 | } read_block_response; | ||
118 | |||
119 | struct { | ||
120 | uint32_t common[3]; | ||
121 | uint32_t data; | ||
122 | uint32_t crc; | ||
123 | } write_quadlet; | ||
124 | |||
125 | struct { | ||
126 | uint32_t common[3]; | ||
127 | uint32_t extended_tcode:16; | ||
128 | uint32_t data_length:16; | ||
129 | uint32_t crc; | ||
130 | uint32_t data[0]; | ||
131 | /* crc and ack follows. */ | ||
132 | } write_block; | ||
133 | |||
134 | struct { | ||
135 | uint32_t common[3]; | ||
136 | uint32_t crc; | ||
137 | } write_response; | ||
138 | |||
139 | struct { | ||
140 | uint32_t common[3]; | ||
141 | uint32_t data; | ||
142 | uint32_t crc; | ||
143 | } cycle_start; | ||
144 | |||
145 | struct { | ||
146 | uint32_t sy:4; | ||
147 | uint32_t tcode:4; | ||
148 | uint32_t channel:6; | ||
149 | uint32_t tag:2; | ||
150 | uint32_t data_length:16; | ||
151 | |||
152 | uint32_t crc; | ||
153 | } iso_data; | ||
154 | }; | ||
155 | }; | ||
156 | |||
157 | struct subaction { | ||
158 | uint32_t ack; | ||
159 | size_t length; | ||
160 | struct list link; | ||
161 | struct link_packet packet; | ||
162 | }; | ||
163 | |||
164 | struct link_transaction { | ||
165 | int request_node, response_node, tlabel; | ||
166 | struct subaction *request, *response; | ||
167 | struct list request_list, response_list; | ||
168 | struct list link; | ||
169 | }; | ||
170 | |||
171 | int decode_fcp(struct link_transaction *t); | ||
172 | |||
173 | #endif /* __nosy_dump_h__ */ | ||