diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 23:31:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 23:31:15 -0400 |
commit | 34699403e9916060af8ae23f5e4705a6c078e79d (patch) | |
tree | e149ca6354171caf61132d80508ad878b00878c9 /drivers/firewire | |
parent | 7fc86a7908a4e9eb2da4b6498f86193d113842d3 (diff) | |
parent | d1bbd20972936b9b178fda3eb1ec417cb27fdc01 (diff) |
Merge tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394
Pull IEEE 1394 (FireWire) subsystem updates post v3.3 from Stefan Richter:
- Some SBP-2 initiator fixes, side product from ongoing work on a target.
- Reintroduction of an isochronous I/O feature of the older ieee1394 driver
stack (flush buffer completions); it was evidently rarely used but not
actually unused. Matching libraw1394 code is already available.
- Be sure to prefix all kernel log messages with device name or card name,
and other logging related cleanups.
- Misc other small cleanups, among them a small API change that affects
sound/firewire/ too. Clemens Ladisch is aware of it.
* tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394: (26 commits)
firewire: allow explicit flushing of iso packet completions
firewire: prevent dropping of completed iso packet header data
firewire: ohci: factor out iso completion flushing code
firewire: ohci: simplify iso header pointer arithmetic
firewire: ohci: optimize control bit checks
firewire: ohci: remove unused excess_bytes field
firewire: ohci: copy_iso_headers(): make comment match the code
firewire: cdev: fix IR multichannel event documentation
firewire: ohci: fix too-early completion of IR multichannel buffers
firewire: ohci: move runtime debug facility out of #ifdef
firewire: tone down some diagnostic log messages
firewire: sbp2: replace a GFP_ATOMIC allocation
firewire: sbp2: Fix SCSI sense data mangling
firewire: sbp2: Ignore SBP-2 targets on the local node
firewire: sbp2: Take into account Unit_Unique_ID
firewire: nosy: Use the macro DMA_BIT_MASK().
firewire: core: convert AR-req handler lock from _irqsave to _bh
firewire: core: fix race at address_handler unregistration
firewire: core: remove obsolete comment
firewire: core: prefix log messages with card name
...
Diffstat (limited to 'drivers/firewire')
-rw-r--r-- | drivers/firewire/Kconfig | 5 | ||||
-rw-r--r-- | drivers/firewire/core-card.c | 34 | ||||
-rw-r--r-- | drivers/firewire/core-cdev.c | 24 | ||||
-rw-r--r-- | drivers/firewire/core-device.c | 62 | ||||
-rw-r--r-- | drivers/firewire/core-iso.c | 6 | ||||
-rw-r--r-- | drivers/firewire/core-topology.c | 17 | ||||
-rw-r--r-- | drivers/firewire/core-transaction.c | 44 | ||||
-rw-r--r-- | drivers/firewire/core.h | 21 | ||||
-rw-r--r-- | drivers/firewire/net.c | 43 | ||||
-rw-r--r-- | drivers/firewire/nosy.c | 4 | ||||
-rw-r--r-- | drivers/firewire/ohci.c | 350 | ||||
-rw-r--r-- | drivers/firewire/sbp2.c | 130 |
12 files changed, 442 insertions, 298 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 2be6f4520772..7224533e8ca6 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -28,11 +28,6 @@ config FIREWIRE_OHCI | |||
28 | To compile this driver as a module, say M here: The module will be | 28 | To compile this driver as a module, say M here: The module will be |
29 | called firewire-ohci. | 29 | called firewire-ohci. |
30 | 30 | ||
31 | config FIREWIRE_OHCI_DEBUG | ||
32 | bool | ||
33 | depends on FIREWIRE_OHCI | ||
34 | default y | ||
35 | |||
36 | config FIREWIRE_SBP2 | 31 | config FIREWIRE_SBP2 |
37 | tristate "Storage devices (SBP-2 protocol)" | 32 | tristate "Storage devices (SBP-2 protocol)" |
38 | depends on FIREWIRE && SCSI | 33 | depends on FIREWIRE && SCSI |
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 85661b060ed7..cc595eba7ba9 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -37,6 +37,22 @@ | |||
37 | 37 | ||
38 | #include "core.h" | 38 | #include "core.h" |
39 | 39 | ||
40 | #define define_fw_printk_level(func, kern_level) \ | ||
41 | void func(const struct fw_card *card, const char *fmt, ...) \ | ||
42 | { \ | ||
43 | struct va_format vaf; \ | ||
44 | va_list args; \ | ||
45 | \ | ||
46 | va_start(args, fmt); \ | ||
47 | vaf.fmt = fmt; \ | ||
48 | vaf.va = &args; \ | ||
49 | printk(kern_level KBUILD_MODNAME " %s: %pV", \ | ||
50 | dev_name(card->device), &vaf); \ | ||
51 | va_end(args); \ | ||
52 | } | ||
53 | define_fw_printk_level(fw_err, KERN_ERR); | ||
54 | define_fw_printk_level(fw_notice, KERN_NOTICE); | ||
55 | |||
40 | int fw_compute_block_crc(__be32 *block) | 56 | int fw_compute_block_crc(__be32 *block) |
41 | { | 57 | { |
42 | int length; | 58 | int length; |
@@ -260,7 +276,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation) | |||
260 | fw_iso_resource_manage(card, generation, 1ULL << 31, | 276 | fw_iso_resource_manage(card, generation, 1ULL << 31, |
261 | &channel, &bandwidth, true); | 277 | &channel, &bandwidth, true); |
262 | if (channel != 31) { | 278 | if (channel != 31) { |
263 | fw_notify("failed to allocate broadcast channel\n"); | 279 | fw_notice(card, "failed to allocate broadcast channel\n"); |
264 | return; | 280 | return; |
265 | } | 281 | } |
266 | card->broadcast_channel_allocated = true; | 282 | card->broadcast_channel_allocated = true; |
@@ -343,14 +359,14 @@ static void bm_work(struct work_struct *work) | |||
343 | 359 | ||
344 | if (!card->irm_node->link_on) { | 360 | if (!card->irm_node->link_on) { |
345 | new_root_id = local_id; | 361 | new_root_id = local_id; |
346 | fw_notify("%s, making local node (%02x) root.\n", | 362 | fw_notice(card, "%s, making local node (%02x) root\n", |
347 | "IRM has link off", new_root_id); | 363 | "IRM has link off", new_root_id); |
348 | goto pick_me; | 364 | goto pick_me; |
349 | } | 365 | } |
350 | 366 | ||
351 | if (irm_is_1394_1995_only && !keep_this_irm) { | 367 | if (irm_is_1394_1995_only && !keep_this_irm) { |
352 | new_root_id = local_id; | 368 | new_root_id = local_id; |
353 | fw_notify("%s, making local node (%02x) root.\n", | 369 | fw_notice(card, "%s, making local node (%02x) root\n", |
354 | "IRM is not 1394a compliant", new_root_id); | 370 | "IRM is not 1394a compliant", new_root_id); |
355 | goto pick_me; | 371 | goto pick_me; |
356 | } | 372 | } |
@@ -405,7 +421,7 @@ static void bm_work(struct work_struct *work) | |||
405 | * root, and thus, IRM. | 421 | * root, and thus, IRM. |
406 | */ | 422 | */ |
407 | new_root_id = local_id; | 423 | new_root_id = local_id; |
408 | fw_notify("%s, making local node (%02x) root.\n", | 424 | fw_notice(card, "%s, making local node (%02x) root\n", |
409 | "BM lock failed", new_root_id); | 425 | "BM lock failed", new_root_id); |
410 | goto pick_me; | 426 | goto pick_me; |
411 | } | 427 | } |
@@ -478,8 +494,8 @@ static void bm_work(struct work_struct *work) | |||
478 | spin_unlock_irq(&card->lock); | 494 | spin_unlock_irq(&card->lock); |
479 | 495 | ||
480 | if (do_reset) { | 496 | if (do_reset) { |
481 | fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", | 497 | fw_notice(card, "phy config: new root=%x, gap_count=%d\n", |
482 | card->index, new_root_id, gap_count); | 498 | new_root_id, gap_count); |
483 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 499 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
484 | reset_bus(card, true); | 500 | reset_bus(card, true); |
485 | /* Will allocate broadcast channel after the reset. */ | 501 | /* Will allocate broadcast channel after the reset. */ |
@@ -634,6 +650,11 @@ static void dummy_flush_queue_iso(struct fw_iso_context *ctx) | |||
634 | { | 650 | { |
635 | } | 651 | } |
636 | 652 | ||
653 | static int dummy_flush_iso_completions(struct fw_iso_context *ctx) | ||
654 | { | ||
655 | return -ENODEV; | ||
656 | } | ||
657 | |||
637 | static const struct fw_card_driver dummy_driver_template = { | 658 | static const struct fw_card_driver dummy_driver_template = { |
638 | .read_phy_reg = dummy_read_phy_reg, | 659 | .read_phy_reg = dummy_read_phy_reg, |
639 | .update_phy_reg = dummy_update_phy_reg, | 660 | .update_phy_reg = dummy_update_phy_reg, |
@@ -646,6 +667,7 @@ static const struct fw_card_driver dummy_driver_template = { | |||
646 | .set_iso_channels = dummy_set_iso_channels, | 667 | .set_iso_channels = dummy_set_iso_channels, |
647 | .queue_iso = dummy_queue_iso, | 668 | .queue_iso = dummy_queue_iso, |
648 | .flush_queue_iso = dummy_flush_queue_iso, | 669 | .flush_queue_iso = dummy_flush_queue_iso, |
670 | .flush_iso_completions = dummy_flush_iso_completions, | ||
649 | }; | 671 | }; |
650 | 672 | ||
651 | void fw_card_release(struct kref *kref) | 673 | void fw_card_release(struct kref *kref) |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 4799393247c8..22c6df5f136d 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -51,7 +51,7 @@ | |||
51 | /* | 51 | /* |
52 | * ABI version history is documented in linux/firewire-cdev.h. | 52 | * ABI version history is documented in linux/firewire-cdev.h. |
53 | */ | 53 | */ |
54 | #define FW_CDEV_KERNEL_VERSION 4 | 54 | #define FW_CDEV_KERNEL_VERSION 5 |
55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 | 55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 |
56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 | 56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 |
57 | 57 | ||
@@ -389,7 +389,7 @@ static void queue_bus_reset_event(struct client *client) | |||
389 | 389 | ||
390 | e = kzalloc(sizeof(*e), GFP_KERNEL); | 390 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
391 | if (e == NULL) { | 391 | if (e == NULL) { |
392 | fw_notify("Out of memory when allocating event\n"); | 392 | fw_notice(client->device->card, "out of memory when allocating event\n"); |
393 | return; | 393 | return; |
394 | } | 394 | } |
395 | 395 | ||
@@ -438,6 +438,7 @@ union ioctl_arg { | |||
438 | struct fw_cdev_send_phy_packet send_phy_packet; | 438 | struct fw_cdev_send_phy_packet send_phy_packet; |
439 | struct fw_cdev_receive_phy_packets receive_phy_packets; | 439 | struct fw_cdev_receive_phy_packets receive_phy_packets; |
440 | struct fw_cdev_set_iso_channels set_iso_channels; | 440 | struct fw_cdev_set_iso_channels set_iso_channels; |
441 | struct fw_cdev_flush_iso flush_iso; | ||
441 | }; | 442 | }; |
442 | 443 | ||
443 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | 444 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) |
@@ -691,7 +692,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
691 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | 692 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
692 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 693 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
693 | if (r == NULL || e == NULL) { | 694 | if (r == NULL || e == NULL) { |
694 | fw_notify("Out of memory when allocating event\n"); | 695 | fw_notice(card, "out of memory when allocating event\n"); |
695 | goto failed; | 696 | goto failed; |
696 | } | 697 | } |
697 | r->card = card; | 698 | r->card = card; |
@@ -928,7 +929,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, | |||
928 | 929 | ||
929 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); | 930 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
930 | if (e == NULL) { | 931 | if (e == NULL) { |
931 | fw_notify("Out of memory when allocating event\n"); | 932 | fw_notice(context->card, "out of memory when allocating event\n"); |
932 | return; | 933 | return; |
933 | } | 934 | } |
934 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | 935 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
@@ -948,7 +949,7 @@ static void iso_mc_callback(struct fw_iso_context *context, | |||
948 | 949 | ||
949 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 950 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
950 | if (e == NULL) { | 951 | if (e == NULL) { |
951 | fw_notify("Out of memory when allocating event\n"); | 952 | fw_notice(context->card, "out of memory when allocating event\n"); |
952 | return; | 953 | return; |
953 | } | 954 | } |
954 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; | 955 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; |
@@ -1168,6 +1169,16 @@ static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) | |||
1168 | return fw_iso_context_stop(client->iso_context); | 1169 | return fw_iso_context_stop(client->iso_context); |
1169 | } | 1170 | } |
1170 | 1171 | ||
1172 | static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg) | ||
1173 | { | ||
1174 | struct fw_cdev_flush_iso *a = &arg->flush_iso; | ||
1175 | |||
1176 | if (client->iso_context == NULL || a->handle != 0) | ||
1177 | return -EINVAL; | ||
1178 | |||
1179 | return fw_iso_context_flush_completions(client->iso_context); | ||
1180 | } | ||
1181 | |||
1171 | static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) | 1182 | static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) |
1172 | { | 1183 | { |
1173 | struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; | 1184 | struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; |
@@ -1548,7 +1559,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) | |||
1548 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { | 1559 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { |
1549 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); | 1560 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); |
1550 | if (e == NULL) { | 1561 | if (e == NULL) { |
1551 | fw_notify("Out of memory when allocating event\n"); | 1562 | fw_notice(card, "out of memory when allocating event\n"); |
1552 | break; | 1563 | break; |
1553 | } | 1564 | } |
1554 | e->phy_packet.closure = client->phy_receiver_closure; | 1565 | e->phy_packet.closure = client->phy_receiver_closure; |
@@ -1589,6 +1600,7 @@ static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { | |||
1589 | [0x15] = ioctl_send_phy_packet, | 1600 | [0x15] = ioctl_send_phy_packet, |
1590 | [0x16] = ioctl_receive_phy_packets, | 1601 | [0x16] = ioctl_receive_phy_packets, |
1591 | [0x17] = ioctl_set_iso_channels, | 1602 | [0x17] = ioctl_set_iso_channels, |
1603 | [0x18] = ioctl_flush_iso, | ||
1592 | }; | 1604 | }; |
1593 | 1605 | ||
1594 | static int dispatch_ioctl(struct client *client, | 1606 | static int dispatch_ioctl(struct client *client, |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index f3b890da1e87..afa7c83bd114 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -485,6 +485,7 @@ static int read_rom(struct fw_device *device, | |||
485 | */ | 485 | */ |
486 | static int read_config_rom(struct fw_device *device, int generation) | 486 | static int read_config_rom(struct fw_device *device, int generation) |
487 | { | 487 | { |
488 | struct fw_card *card = device->card; | ||
488 | const u32 *old_rom, *new_rom; | 489 | const u32 *old_rom, *new_rom; |
489 | u32 *rom, *stack; | 490 | u32 *rom, *stack; |
490 | u32 sp, key; | 491 | u32 sp, key; |
@@ -529,12 +530,12 @@ static int read_config_rom(struct fw_device *device, int generation) | |||
529 | */ | 530 | */ |
530 | if ((rom[2] & 0x7) < device->max_speed || | 531 | if ((rom[2] & 0x7) < device->max_speed || |
531 | device->max_speed == SCODE_BETA || | 532 | device->max_speed == SCODE_BETA || |
532 | device->card->beta_repeaters_present) { | 533 | card->beta_repeaters_present) { |
533 | u32 dummy; | 534 | u32 dummy; |
534 | 535 | ||
535 | /* for S1600 and S3200 */ | 536 | /* for S1600 and S3200 */ |
536 | if (device->max_speed == SCODE_BETA) | 537 | if (device->max_speed == SCODE_BETA) |
537 | device->max_speed = device->card->link_speed; | 538 | device->max_speed = card->link_speed; |
538 | 539 | ||
539 | while (device->max_speed > SCODE_100) { | 540 | while (device->max_speed > SCODE_100) { |
540 | if (read_rom(device, generation, 0, &dummy) == | 541 | if (read_rom(device, generation, 0, &dummy) == |
@@ -576,9 +577,9 @@ static int read_config_rom(struct fw_device *device, int generation) | |||
576 | * a firmware bug. Ignore this whole block, i.e. | 577 | * a firmware bug. Ignore this whole block, i.e. |
577 | * simply set a fake block length of 0. | 578 | * simply set a fake block length of 0. |
578 | */ | 579 | */ |
579 | fw_error("skipped invalid ROM block %x at %llx\n", | 580 | fw_err(card, "skipped invalid ROM block %x at %llx\n", |
580 | rom[i], | 581 | rom[i], |
581 | i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); | 582 | i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); |
582 | rom[i] = 0; | 583 | rom[i] = 0; |
583 | end = i; | 584 | end = i; |
584 | } | 585 | } |
@@ -604,9 +605,10 @@ static int read_config_rom(struct fw_device *device, int generation) | |||
604 | * the ROM don't have to check offsets all the time. | 605 | * the ROM don't have to check offsets all the time. |
605 | */ | 606 | */ |
606 | if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) { | 607 | if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) { |
607 | fw_error("skipped unsupported ROM entry %x at %llx\n", | 608 | fw_err(card, |
608 | rom[i], | 609 | "skipped unsupported ROM entry %x at %llx\n", |
609 | i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); | 610 | rom[i], |
611 | i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); | ||
610 | rom[i] = 0; | 612 | rom[i] = 0; |
611 | continue; | 613 | continue; |
612 | } | 614 | } |
@@ -641,6 +643,7 @@ static void fw_unit_release(struct device *dev) | |||
641 | { | 643 | { |
642 | struct fw_unit *unit = fw_unit(dev); | 644 | struct fw_unit *unit = fw_unit(dev); |
643 | 645 | ||
646 | fw_device_put(fw_parent_device(unit)); | ||
644 | kfree(unit); | 647 | kfree(unit); |
645 | } | 648 | } |
646 | 649 | ||
@@ -672,7 +675,7 @@ static void create_units(struct fw_device *device) | |||
672 | */ | 675 | */ |
673 | unit = kzalloc(sizeof(*unit), GFP_KERNEL); | 676 | unit = kzalloc(sizeof(*unit), GFP_KERNEL); |
674 | if (unit == NULL) { | 677 | if (unit == NULL) { |
675 | fw_error("failed to allocate memory for unit\n"); | 678 | fw_err(device->card, "out of memory for unit\n"); |
676 | continue; | 679 | continue; |
677 | } | 680 | } |
678 | 681 | ||
@@ -692,6 +695,7 @@ static void create_units(struct fw_device *device) | |||
692 | if (device_register(&unit->device) < 0) | 695 | if (device_register(&unit->device) < 0) |
693 | goto skip_unit; | 696 | goto skip_unit; |
694 | 697 | ||
698 | fw_device_get(device); | ||
695 | continue; | 699 | continue; |
696 | 700 | ||
697 | skip_unit: | 701 | skip_unit: |
@@ -873,7 +877,7 @@ static int lookup_existing_device(struct device *dev, void *data) | |||
873 | smp_wmb(); /* update node_id before generation */ | 877 | smp_wmb(); /* update node_id before generation */ |
874 | old->generation = card->generation; | 878 | old->generation = card->generation; |
875 | old->config_rom_retries = 0; | 879 | old->config_rom_retries = 0; |
876 | fw_notify("rediscovered device %s\n", dev_name(dev)); | 880 | fw_notice(card, "rediscovered device %s\n", dev_name(dev)); |
877 | 881 | ||
878 | PREPARE_DELAYED_WORK(&old->work, fw_device_update); | 882 | PREPARE_DELAYED_WORK(&old->work, fw_device_update); |
879 | fw_schedule_device_work(old, 0); | 883 | fw_schedule_device_work(old, 0); |
@@ -954,6 +958,7 @@ static void fw_device_init(struct work_struct *work) | |||
954 | { | 958 | { |
955 | struct fw_device *device = | 959 | struct fw_device *device = |
956 | container_of(work, struct fw_device, work.work); | 960 | container_of(work, struct fw_device, work.work); |
961 | struct fw_card *card = device->card; | ||
957 | struct device *revived_dev; | 962 | struct device *revived_dev; |
958 | int minor, ret; | 963 | int minor, ret; |
959 | 964 | ||
@@ -970,16 +975,16 @@ static void fw_device_init(struct work_struct *work) | |||
970 | fw_schedule_device_work(device, RETRY_DELAY); | 975 | fw_schedule_device_work(device, RETRY_DELAY); |
971 | } else { | 976 | } else { |
972 | if (device->node->link_on) | 977 | if (device->node->link_on) |
973 | fw_notify("giving up on config rom for node id %x\n", | 978 | fw_notice(card, "giving up on Config ROM for node id %x\n", |
974 | device->node_id); | 979 | device->node_id); |
975 | if (device->node == device->card->root_node) | 980 | if (device->node == card->root_node) |
976 | fw_schedule_bm_work(device->card, 0); | 981 | fw_schedule_bm_work(card, 0); |
977 | fw_device_release(&device->device); | 982 | fw_device_release(&device->device); |
978 | } | 983 | } |
979 | return; | 984 | return; |
980 | } | 985 | } |
981 | 986 | ||
982 | revived_dev = device_find_child(device->card->device, | 987 | revived_dev = device_find_child(card->device, |
983 | device, lookup_existing_device); | 988 | device, lookup_existing_device); |
984 | if (revived_dev) { | 989 | if (revived_dev) { |
985 | put_device(revived_dev); | 990 | put_device(revived_dev); |
@@ -1002,7 +1007,7 @@ static void fw_device_init(struct work_struct *work) | |||
1002 | 1007 | ||
1003 | device->device.bus = &fw_bus_type; | 1008 | device->device.bus = &fw_bus_type; |
1004 | device->device.type = &fw_device_type; | 1009 | device->device.type = &fw_device_type; |
1005 | device->device.parent = device->card->device; | 1010 | device->device.parent = card->device; |
1006 | device->device.devt = MKDEV(fw_cdev_major, minor); | 1011 | device->device.devt = MKDEV(fw_cdev_major, minor); |
1007 | dev_set_name(&device->device, "fw%d", minor); | 1012 | dev_set_name(&device->device, "fw%d", minor); |
1008 | 1013 | ||
@@ -1014,7 +1019,7 @@ static void fw_device_init(struct work_struct *work) | |||
1014 | &device->attribute_group); | 1019 | &device->attribute_group); |
1015 | 1020 | ||
1016 | if (device_add(&device->device)) { | 1021 | if (device_add(&device->device)) { |
1017 | fw_error("Failed to add device.\n"); | 1022 | fw_err(card, "failed to add device\n"); |
1018 | goto error_with_cdev; | 1023 | goto error_with_cdev; |
1019 | } | 1024 | } |
1020 | 1025 | ||
@@ -1035,18 +1040,10 @@ static void fw_device_init(struct work_struct *work) | |||
1035 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1040 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); |
1036 | fw_schedule_device_work(device, SHUTDOWN_DELAY); | 1041 | fw_schedule_device_work(device, SHUTDOWN_DELAY); |
1037 | } else { | 1042 | } else { |
1038 | if (device->config_rom_retries) | 1043 | fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", |
1039 | fw_notify("created device %s: GUID %08x%08x, S%d00, " | 1044 | dev_name(&device->device), |
1040 | "%d config ROM retries\n", | 1045 | device->config_rom[3], device->config_rom[4], |
1041 | dev_name(&device->device), | 1046 | 1 << device->max_speed); |
1042 | device->config_rom[3], device->config_rom[4], | ||
1043 | 1 << device->max_speed, | ||
1044 | device->config_rom_retries); | ||
1045 | else | ||
1046 | fw_notify("created device %s: GUID %08x%08x, S%d00\n", | ||
1047 | dev_name(&device->device), | ||
1048 | device->config_rom[3], device->config_rom[4], | ||
1049 | 1 << device->max_speed); | ||
1050 | device->config_rom_retries = 0; | 1047 | device->config_rom_retries = 0; |
1051 | 1048 | ||
1052 | set_broadcast_channel(device, device->generation); | 1049 | set_broadcast_channel(device, device->generation); |
@@ -1058,8 +1055,8 @@ static void fw_device_init(struct work_struct *work) | |||
1058 | * just end up running the IRM work a couple of extra times - | 1055 | * just end up running the IRM work a couple of extra times - |
1059 | * pretty harmless. | 1056 | * pretty harmless. |
1060 | */ | 1057 | */ |
1061 | if (device->node == device->card->root_node) | 1058 | if (device->node == card->root_node) |
1062 | fw_schedule_bm_work(device->card, 0); | 1059 | fw_schedule_bm_work(card, 0); |
1063 | 1060 | ||
1064 | return; | 1061 | return; |
1065 | 1062 | ||
@@ -1163,12 +1160,13 @@ static void fw_device_refresh(struct work_struct *work) | |||
1163 | FW_DEVICE_RUNNING) == FW_DEVICE_GONE) | 1160 | FW_DEVICE_RUNNING) == FW_DEVICE_GONE) |
1164 | goto gone; | 1161 | goto gone; |
1165 | 1162 | ||
1166 | fw_notify("refreshed device %s\n", dev_name(&device->device)); | 1163 | fw_notice(card, "refreshed device %s\n", dev_name(&device->device)); |
1167 | device->config_rom_retries = 0; | 1164 | device->config_rom_retries = 0; |
1168 | goto out; | 1165 | goto out; |
1169 | 1166 | ||
1170 | give_up: | 1167 | give_up: |
1171 | fw_notify("giving up on refresh of device %s\n", dev_name(&device->device)); | 1168 | fw_notice(card, "giving up on refresh of device %s\n", |
1169 | dev_name(&device->device)); | ||
1172 | gone: | 1170 | gone: |
1173 | atomic_set(&device->state, FW_DEVICE_GONE); | 1171 | atomic_set(&device->state, FW_DEVICE_GONE); |
1174 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1172 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 0f90e0071875..d1565828ae2c 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -192,6 +192,12 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx) | |||
192 | } | 192 | } |
193 | EXPORT_SYMBOL(fw_iso_context_queue_flush); | 193 | EXPORT_SYMBOL(fw_iso_context_queue_flush); |
194 | 194 | ||
195 | int fw_iso_context_flush_completions(struct fw_iso_context *ctx) | ||
196 | { | ||
197 | return ctx->card->driver->flush_iso_completions(ctx); | ||
198 | } | ||
199 | EXPORT_SYMBOL(fw_iso_context_flush_completions); | ||
200 | |||
195 | int fw_iso_context_stop(struct fw_iso_context *ctx) | 201 | int fw_iso_context_stop(struct fw_iso_context *ctx) |
196 | { | 202 | { |
197 | return ctx->card->driver->stop_iso(ctx); | 203 | return ctx->card->driver->stop_iso(ctx); |
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 94d3b494ddfb..255646ffc352 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c | |||
@@ -205,19 +205,19 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
205 | next_sid = count_ports(sid, &port_count, &child_port_count); | 205 | next_sid = count_ports(sid, &port_count, &child_port_count); |
206 | 206 | ||
207 | if (next_sid == NULL) { | 207 | if (next_sid == NULL) { |
208 | fw_error("Inconsistent extended self IDs.\n"); | 208 | fw_err(card, "inconsistent extended self IDs\n"); |
209 | return NULL; | 209 | return NULL; |
210 | } | 210 | } |
211 | 211 | ||
212 | q = *sid; | 212 | q = *sid; |
213 | if (phy_id != SELF_ID_PHY_ID(q)) { | 213 | if (phy_id != SELF_ID_PHY_ID(q)) { |
214 | fw_error("PHY ID mismatch in self ID: %d != %d.\n", | 214 | fw_err(card, "PHY ID mismatch in self ID: %d != %d\n", |
215 | phy_id, SELF_ID_PHY_ID(q)); | 215 | phy_id, SELF_ID_PHY_ID(q)); |
216 | return NULL; | 216 | return NULL; |
217 | } | 217 | } |
218 | 218 | ||
219 | if (child_port_count > stack_depth) { | 219 | if (child_port_count > stack_depth) { |
220 | fw_error("Topology stack underflow\n"); | 220 | fw_err(card, "topology stack underflow\n"); |
221 | return NULL; | 221 | return NULL; |
222 | } | 222 | } |
223 | 223 | ||
@@ -235,7 +235,7 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
235 | 235 | ||
236 | node = fw_node_create(q, port_count, card->color); | 236 | node = fw_node_create(q, port_count, card->color); |
237 | if (node == NULL) { | 237 | if (node == NULL) { |
238 | fw_error("Out of memory while building topology.\n"); | 238 | fw_err(card, "out of memory while building topology\n"); |
239 | return NULL; | 239 | return NULL; |
240 | } | 240 | } |
241 | 241 | ||
@@ -284,8 +284,8 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
284 | */ | 284 | */ |
285 | if ((next_sid == end && parent_count != 0) || | 285 | if ((next_sid == end && parent_count != 0) || |
286 | (next_sid < end && parent_count != 1)) { | 286 | (next_sid < end && parent_count != 1)) { |
287 | fw_error("Parent port inconsistency for node %d: " | 287 | fw_err(card, "parent port inconsistency for node %d: " |
288 | "parent_count=%d\n", phy_id, parent_count); | 288 | "parent_count=%d\n", phy_id, parent_count); |
289 | return NULL; | 289 | return NULL; |
290 | } | 290 | } |
291 | 291 | ||
@@ -530,7 +530,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | |||
530 | */ | 530 | */ |
531 | if (!is_next_generation(generation, card->generation) && | 531 | if (!is_next_generation(generation, card->generation) && |
532 | card->local_node != NULL) { | 532 | card->local_node != NULL) { |
533 | fw_notify("skipped bus generations, destroying all nodes\n"); | ||
534 | fw_destroy_nodes(card); | 533 | fw_destroy_nodes(card); |
535 | card->bm_retries = 0; | 534 | card->bm_retries = 0; |
536 | } | 535 | } |
@@ -557,7 +556,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, | |||
557 | card->color++; | 556 | card->color++; |
558 | 557 | ||
559 | if (local_node == NULL) { | 558 | if (local_node == NULL) { |
560 | fw_error("topology build failed\n"); | 559 | fw_err(card, "topology build failed\n"); |
561 | /* FIXME: We need to issue a bus reset in this case. */ | 560 | /* FIXME: We need to issue a bus reset in this case. */ |
562 | } else if (card->local_node == NULL) { | 561 | } else if (card->local_node == NULL) { |
563 | card->local_node = local_node; | 562 | card->local_node = local_node; |
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 855ab3f5936f..dea2dcc9310d 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c | |||
@@ -565,7 +565,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
565 | const struct fw_address_region *region) | 565 | const struct fw_address_region *region) |
566 | { | 566 | { |
567 | struct fw_address_handler *other; | 567 | struct fw_address_handler *other; |
568 | unsigned long flags; | ||
569 | int ret = -EBUSY; | 568 | int ret = -EBUSY; |
570 | 569 | ||
571 | if (region->start & 0xffff000000000003ULL || | 570 | if (region->start & 0xffff000000000003ULL || |
@@ -575,7 +574,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
575 | handler->length == 0) | 574 | handler->length == 0) |
576 | return -EINVAL; | 575 | return -EINVAL; |
577 | 576 | ||
578 | spin_lock_irqsave(&address_handler_lock, flags); | 577 | spin_lock_bh(&address_handler_lock); |
579 | 578 | ||
580 | handler->offset = region->start; | 579 | handler->offset = region->start; |
581 | while (handler->offset + handler->length <= region->end) { | 580 | while (handler->offset + handler->length <= region->end) { |
@@ -594,7 +593,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
594 | } | 593 | } |
595 | } | 594 | } |
596 | 595 | ||
597 | spin_unlock_irqrestore(&address_handler_lock, flags); | 596 | spin_unlock_bh(&address_handler_lock); |
598 | 597 | ||
599 | return ret; | 598 | return ret; |
600 | } | 599 | } |
@@ -602,14 +601,15 @@ EXPORT_SYMBOL(fw_core_add_address_handler); | |||
602 | 601 | ||
603 | /** | 602 | /** |
604 | * fw_core_remove_address_handler() - unregister an address handler | 603 | * fw_core_remove_address_handler() - unregister an address handler |
604 | * | ||
605 | * When fw_core_remove_address_handler() returns, @handler->callback() is | ||
606 | * guaranteed to not run on any CPU anymore. | ||
605 | */ | 607 | */ |
606 | void fw_core_remove_address_handler(struct fw_address_handler *handler) | 608 | void fw_core_remove_address_handler(struct fw_address_handler *handler) |
607 | { | 609 | { |
608 | unsigned long flags; | 610 | spin_lock_bh(&address_handler_lock); |
609 | |||
610 | spin_lock_irqsave(&address_handler_lock, flags); | ||
611 | list_del(&handler->link); | 611 | list_del(&handler->link); |
612 | spin_unlock_irqrestore(&address_handler_lock, flags); | 612 | spin_unlock_bh(&address_handler_lock); |
613 | } | 613 | } |
614 | EXPORT_SYMBOL(fw_core_remove_address_handler); | 614 | EXPORT_SYMBOL(fw_core_remove_address_handler); |
615 | 615 | ||
@@ -770,7 +770,7 @@ static struct fw_request *allocate_request(struct fw_card *card, | |||
770 | break; | 770 | break; |
771 | 771 | ||
772 | default: | 772 | default: |
773 | fw_error("ERROR - corrupt request received - %08x %08x %08x\n", | 773 | fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n", |
774 | p->header[0], p->header[1], p->header[2]); | 774 | p->header[0], p->header[1], p->header[2]); |
775 | return NULL; | 775 | return NULL; |
776 | } | 776 | } |
@@ -826,7 +826,6 @@ static void handle_exclusive_region_request(struct fw_card *card, | |||
826 | unsigned long long offset) | 826 | unsigned long long offset) |
827 | { | 827 | { |
828 | struct fw_address_handler *handler; | 828 | struct fw_address_handler *handler; |
829 | unsigned long flags; | ||
830 | int tcode, destination, source; | 829 | int tcode, destination, source; |
831 | 830 | ||
832 | destination = HEADER_GET_DESTINATION(p->header[0]); | 831 | destination = HEADER_GET_DESTINATION(p->header[0]); |
@@ -835,27 +834,19 @@ static void handle_exclusive_region_request(struct fw_card *card, | |||
835 | if (tcode == TCODE_LOCK_REQUEST) | 834 | if (tcode == TCODE_LOCK_REQUEST) |
836 | tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); | 835 | tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); |
837 | 836 | ||
838 | spin_lock_irqsave(&address_handler_lock, flags); | 837 | spin_lock_bh(&address_handler_lock); |
839 | handler = lookup_enclosing_address_handler(&address_handler_list, | 838 | handler = lookup_enclosing_address_handler(&address_handler_list, |
840 | offset, request->length); | 839 | offset, request->length); |
841 | spin_unlock_irqrestore(&address_handler_lock, flags); | 840 | if (handler) |
842 | |||
843 | /* | ||
844 | * FIXME: lookup the fw_node corresponding to the sender of | ||
845 | * this request and pass that to the address handler instead | ||
846 | * of the node ID. We may also want to move the address | ||
847 | * allocations to fw_node so we only do this callback if the | ||
848 | * upper layers registered it for this node. | ||
849 | */ | ||
850 | |||
851 | if (handler == NULL) | ||
852 | fw_send_response(card, request, RCODE_ADDRESS_ERROR); | ||
853 | else | ||
854 | handler->address_callback(card, request, | 841 | handler->address_callback(card, request, |
855 | tcode, destination, source, | 842 | tcode, destination, source, |
856 | p->generation, offset, | 843 | p->generation, offset, |
857 | request->data, request->length, | 844 | request->data, request->length, |
858 | handler->callback_data); | 845 | handler->callback_data); |
846 | spin_unlock_bh(&address_handler_lock); | ||
847 | |||
848 | if (!handler) | ||
849 | fw_send_response(card, request, RCODE_ADDRESS_ERROR); | ||
859 | } | 850 | } |
860 | 851 | ||
861 | static void handle_fcp_region_request(struct fw_card *card, | 852 | static void handle_fcp_region_request(struct fw_card *card, |
@@ -864,7 +855,6 @@ static void handle_fcp_region_request(struct fw_card *card, | |||
864 | unsigned long long offset) | 855 | unsigned long long offset) |
865 | { | 856 | { |
866 | struct fw_address_handler *handler; | 857 | struct fw_address_handler *handler; |
867 | unsigned long flags; | ||
868 | int tcode, destination, source; | 858 | int tcode, destination, source; |
869 | 859 | ||
870 | if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && | 860 | if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && |
@@ -886,7 +876,7 @@ static void handle_fcp_region_request(struct fw_card *card, | |||
886 | return; | 876 | return; |
887 | } | 877 | } |
888 | 878 | ||
889 | spin_lock_irqsave(&address_handler_lock, flags); | 879 | spin_lock_bh(&address_handler_lock); |
890 | list_for_each_entry(handler, &address_handler_list, link) { | 880 | list_for_each_entry(handler, &address_handler_list, link) { |
891 | if (is_enclosing_handler(handler, offset, request->length)) | 881 | if (is_enclosing_handler(handler, offset, request->length)) |
892 | handler->address_callback(card, NULL, tcode, | 882 | handler->address_callback(card, NULL, tcode, |
@@ -896,7 +886,7 @@ static void handle_fcp_region_request(struct fw_card *card, | |||
896 | request->length, | 886 | request->length, |
897 | handler->callback_data); | 887 | handler->callback_data); |
898 | } | 888 | } |
899 | spin_unlock_irqrestore(&address_handler_lock, flags); | 889 | spin_unlock_bh(&address_handler_lock); |
900 | 890 | ||
901 | fw_send_response(card, request, RCODE_COMPLETE); | 891 | fw_send_response(card, request, RCODE_COMPLETE); |
902 | } | 892 | } |
@@ -960,7 +950,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | |||
960 | 950 | ||
961 | if (&t->link == &card->transaction_list) { | 951 | if (&t->link == &card->transaction_list) { |
962 | timed_out: | 952 | timed_out: |
963 | fw_notify("Unsolicited response (source %x, tlabel %x)\n", | 953 | fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", |
964 | source, tlabel); | 954 | source, tlabel); |
965 | return; | 955 | return; |
966 | } | 956 | } |
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index b45be5767529..9047f5547d98 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _FIREWIRE_CORE_H | 1 | #ifndef _FIREWIRE_CORE_H |
2 | #define _FIREWIRE_CORE_H | 2 | #define _FIREWIRE_CORE_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/device.h> | ||
4 | #include <linux/fs.h> | 6 | #include <linux/fs.h> |
5 | #include <linux/list.h> | 7 | #include <linux/list.h> |
6 | #include <linux/idr.h> | 8 | #include <linux/idr.h> |
@@ -23,6 +25,11 @@ struct fw_packet; | |||
23 | 25 | ||
24 | /* -card */ | 26 | /* -card */ |
25 | 27 | ||
28 | extern __printf(2, 3) | ||
29 | void fw_err(const struct fw_card *card, const char *fmt, ...); | ||
30 | extern __printf(2, 3) | ||
31 | void fw_notice(const struct fw_card *card, const char *fmt, ...); | ||
32 | |||
26 | /* bitfields within the PHY registers */ | 33 | /* bitfields within the PHY registers */ |
27 | #define PHY_LINK_ACTIVE 0x80 | 34 | #define PHY_LINK_ACTIVE 0x80 |
28 | #define PHY_CONTENDER 0x40 | 35 | #define PHY_CONTENDER 0x40 |
@@ -99,6 +106,8 @@ struct fw_card_driver { | |||
99 | 106 | ||
100 | void (*flush_queue_iso)(struct fw_iso_context *ctx); | 107 | void (*flush_queue_iso)(struct fw_iso_context *ctx); |
101 | 108 | ||
109 | int (*flush_iso_completions)(struct fw_iso_context *ctx); | ||
110 | |||
102 | int (*stop_iso)(struct fw_iso_context *ctx); | 111 | int (*stop_iso)(struct fw_iso_context *ctx); |
103 | }; | 112 | }; |
104 | 113 | ||
@@ -141,6 +150,18 @@ extern struct rw_semaphore fw_device_rwsem; | |||
141 | extern struct idr fw_device_idr; | 150 | extern struct idr fw_device_idr; |
142 | extern int fw_cdev_major; | 151 | extern int fw_cdev_major; |
143 | 152 | ||
153 | static inline struct fw_device *fw_device_get(struct fw_device *device) | ||
154 | { | ||
155 | get_device(&device->device); | ||
156 | |||
157 | return device; | ||
158 | } | ||
159 | |||
160 | static inline void fw_device_put(struct fw_device *device) | ||
161 | { | ||
162 | put_device(&device->device); | ||
163 | } | ||
164 | |||
144 | struct fw_device *fw_device_get_by_devt(dev_t devt); | 165 | struct fw_device *fw_device_get_by_devt(dev_t devt); |
145 | int fw_device_set_broadcast_channel(struct device *dev, void *gen); | 166 | int fw_device_set_broadcast_channel(struct device *dev, void *gen); |
146 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event); | 167 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event); |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index a20f45b1e7e5..08c674957af8 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -256,8 +256,8 @@ static int fwnet_header_rebuild(struct sk_buff *skb) | |||
256 | if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) | 256 | if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) |
257 | return arp_find((unsigned char *)&h->h_dest, skb); | 257 | return arp_find((unsigned char *)&h->h_dest, skb); |
258 | 258 | ||
259 | fw_notify("%s: unable to resolve type %04x addresses\n", | 259 | dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n", |
260 | skb->dev->name, be16_to_cpu(h->h_proto)); | 260 | be16_to_cpu(h->h_proto)); |
261 | return 0; | 261 | return 0; |
262 | } | 262 | } |
263 | 263 | ||
@@ -369,7 +369,7 @@ static struct fwnet_fragment_info *fwnet_frag_new( | |||
369 | 369 | ||
370 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | 370 | new = kmalloc(sizeof(*new), GFP_ATOMIC); |
371 | if (!new) { | 371 | if (!new) { |
372 | fw_error("out of memory\n"); | 372 | dev_err(&pd->skb->dev->dev, "out of memory\n"); |
373 | return NULL; | 373 | return NULL; |
374 | } | 374 | } |
375 | 375 | ||
@@ -414,7 +414,7 @@ fail_w_fi: | |||
414 | fail_w_new: | 414 | fail_w_new: |
415 | kfree(new); | 415 | kfree(new); |
416 | fail: | 416 | fail: |
417 | fw_error("out of memory\n"); | 417 | dev_err(&net->dev, "out of memory\n"); |
418 | 418 | ||
419 | return NULL; | 419 | return NULL; |
420 | } | 420 | } |
@@ -554,7 +554,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, | |||
554 | sspd = arp1394->sspd; | 554 | sspd = arp1394->sspd; |
555 | /* Sanity check. OS X 10.3 PPC reportedly sends 131. */ | 555 | /* Sanity check. OS X 10.3 PPC reportedly sends 131. */ |
556 | if (sspd > SCODE_3200) { | 556 | if (sspd > SCODE_3200) { |
557 | fw_notify("sspd %x out of range\n", sspd); | 557 | dev_notice(&net->dev, "sspd %x out of range\n", sspd); |
558 | sspd = SCODE_3200; | 558 | sspd = SCODE_3200; |
559 | } | 559 | } |
560 | max_payload = fwnet_max_payload(arp1394->max_rec, sspd); | 560 | max_payload = fwnet_max_payload(arp1394->max_rec, sspd); |
@@ -574,8 +574,9 @@ static int fwnet_finish_incoming_packet(struct net_device *net, | |||
574 | spin_unlock_irqrestore(&dev->lock, flags); | 574 | spin_unlock_irqrestore(&dev->lock, flags); |
575 | 575 | ||
576 | if (!peer) { | 576 | if (!peer) { |
577 | fw_notify("No peer for ARP packet from %016llx\n", | 577 | dev_notice(&net->dev, |
578 | (unsigned long long)peer_guid); | 578 | "no peer for ARP packet from %016llx\n", |
579 | (unsigned long long)peer_guid); | ||
579 | goto no_peer; | 580 | goto no_peer; |
580 | } | 581 | } |
581 | 582 | ||
@@ -691,7 +692,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | |||
691 | 692 | ||
692 | skb = dev_alloc_skb(len + net->hard_header_len + 15); | 693 | skb = dev_alloc_skb(len + net->hard_header_len + 15); |
693 | if (unlikely(!skb)) { | 694 | if (unlikely(!skb)) { |
694 | fw_error("out of memory\n"); | 695 | dev_err(&net->dev, "out of memory\n"); |
695 | net->stats.rx_dropped++; | 696 | net->stats.rx_dropped++; |
696 | 697 | ||
697 | return -ENOMEM; | 698 | return -ENOMEM; |
@@ -814,7 +815,7 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, | |||
814 | rcode = RCODE_TYPE_ERROR; | 815 | rcode = RCODE_TYPE_ERROR; |
815 | else if (fwnet_incoming_packet(dev, payload, length, | 816 | else if (fwnet_incoming_packet(dev, payload, length, |
816 | source, generation, false) != 0) { | 817 | source, generation, false) != 0) { |
817 | fw_error("Incoming packet failure\n"); | 818 | dev_err(&dev->netdev->dev, "incoming packet failure\n"); |
818 | rcode = RCODE_CONFLICT_ERROR; | 819 | rcode = RCODE_CONFLICT_ERROR; |
819 | } else | 820 | } else |
820 | rcode = RCODE_COMPLETE; | 821 | rcode = RCODE_COMPLETE; |
@@ -881,7 +882,7 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, | |||
881 | if (retval >= 0) | 882 | if (retval >= 0) |
882 | fw_iso_context_queue_flush(dev->broadcast_rcv_context); | 883 | fw_iso_context_queue_flush(dev->broadcast_rcv_context); |
883 | else | 884 | else |
884 | fw_error("requeue failed\n"); | 885 | dev_err(&dev->netdev->dev, "requeue failed\n"); |
885 | } | 886 | } |
886 | 887 | ||
887 | static struct kmem_cache *fwnet_packet_task_cache; | 888 | static struct kmem_cache *fwnet_packet_task_cache; |
@@ -936,9 +937,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |||
936 | case RFC2374_HDR_LASTFRAG: | 937 | case RFC2374_HDR_LASTFRAG: |
937 | case RFC2374_HDR_UNFRAG: | 938 | case RFC2374_HDR_UNFRAG: |
938 | default: | 939 | default: |
939 | fw_error("Outstanding packet %x lf %x, header %x,%x\n", | 940 | dev_err(&dev->netdev->dev, |
940 | ptask->outstanding_pkts, lf, ptask->hdr.w0, | 941 | "outstanding packet %x lf %x, header %x,%x\n", |
941 | ptask->hdr.w1); | 942 | ptask->outstanding_pkts, lf, ptask->hdr.w0, |
943 | ptask->hdr.w1); | ||
942 | BUG(); | 944 | BUG(); |
943 | 945 | ||
944 | case RFC2374_HDR_FIRSTFRAG: | 946 | case RFC2374_HDR_FIRSTFRAG: |
@@ -1010,8 +1012,9 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, | |||
1010 | fwnet_transmit_packet_failed(ptask); | 1012 | fwnet_transmit_packet_failed(ptask); |
1011 | 1013 | ||
1012 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { | 1014 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { |
1013 | fw_error("fwnet_write_complete: " | 1015 | dev_err(&ptask->dev->netdev->dev, |
1014 | "failed: %x (skipped %d)\n", rcode, errors_skipped); | 1016 | "fwnet_write_complete failed: %x (skipped %d)\n", |
1017 | rcode, errors_skipped); | ||
1015 | 1018 | ||
1016 | errors_skipped = 0; | 1019 | errors_skipped = 0; |
1017 | last_rcode = rcode; | 1020 | last_rcode = rcode; |
@@ -1539,14 +1542,12 @@ static int fwnet_probe(struct device *_dev) | |||
1539 | put_unaligned_be64(card->guid, net->dev_addr); | 1542 | put_unaligned_be64(card->guid, net->dev_addr); |
1540 | put_unaligned_be64(~0ULL, net->broadcast); | 1543 | put_unaligned_be64(~0ULL, net->broadcast); |
1541 | ret = register_netdev(net); | 1544 | ret = register_netdev(net); |
1542 | if (ret) { | 1545 | if (ret) |
1543 | fw_error("Cannot register the driver\n"); | ||
1544 | goto out; | 1546 | goto out; |
1545 | } | ||
1546 | 1547 | ||
1547 | list_add_tail(&dev->dev_link, &fwnet_device_list); | 1548 | list_add_tail(&dev->dev_link, &fwnet_device_list); |
1548 | fw_notify("%s: IPv4 over FireWire on device %016llx\n", | 1549 | dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n", |
1549 | net->name, (unsigned long long)card->guid); | 1550 | dev_name(card->device)); |
1550 | have_dev: | 1551 | have_dev: |
1551 | ret = fwnet_add_peer(dev, unit, device); | 1552 | ret = fwnet_add_peer(dev, unit, device); |
1552 | if (ret && allocated_netdev) { | 1553 | if (ret && allocated_netdev) { |
@@ -1648,7 +1649,7 @@ static const struct ieee1394_device_id fwnet_id_table[] = { | |||
1648 | static struct fw_driver fwnet_driver = { | 1649 | static struct fw_driver fwnet_driver = { |
1649 | .driver = { | 1650 | .driver = { |
1650 | .owner = THIS_MODULE, | 1651 | .owner = THIS_MODULE, |
1651 | .name = "net", | 1652 | .name = KBUILD_MODNAME, |
1652 | .bus = &fw_bus_type, | 1653 | .bus = &fw_bus_type, |
1653 | .probe = fwnet_probe, | 1654 | .probe = fwnet_probe, |
1654 | .remove = fwnet_remove, | 1655 | .remove = fwnet_remove, |
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 763626b739d1..a7c4422a688e 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/timex.h> | 36 | #include <linux/timex.h> |
37 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
38 | #include <linux/wait.h> | 38 | #include <linux/wait.h> |
39 | 39 | #include <linux/dma-mapping.h> | |
40 | #include <linux/atomic.h> | 40 | #include <linux/atomic.h> |
41 | #include <asm/byteorder.h> | 41 | #include <asm/byteorder.h> |
42 | 42 | ||
@@ -536,7 +536,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) | |||
536 | u32 p, end; | 536 | u32 p, end; |
537 | int ret, i; | 537 | int ret, i; |
538 | 538 | ||
539 | if (pci_set_dma_mask(dev, 0xffffffff)) { | 539 | if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { |
540 | dev_err(&dev->dev, | 540 | dev_err(&dev->dev, |
541 | "DMA address limits not supported for PCILynx hardware\n"); | 541 | "DMA address limits not supported for PCILynx hardware\n"); |
542 | return -ENXIO; | 542 | return -ENXIO; |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 7f5f0da726da..187b3f2e797e 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -170,10 +170,12 @@ struct context { | |||
170 | struct iso_context { | 170 | struct iso_context { |
171 | struct fw_iso_context base; | 171 | struct fw_iso_context base; |
172 | struct context context; | 172 | struct context context; |
173 | int excess_bytes; | ||
174 | void *header; | 173 | void *header; |
175 | size_t header_length; | 174 | size_t header_length; |
176 | 175 | unsigned long flushing_completions; | |
176 | u32 mc_buffer_bus; | ||
177 | u16 mc_completed; | ||
178 | u16 last_timestamp; | ||
177 | u8 sync; | 179 | u8 sync; |
178 | u8 tags; | 180 | u8 tags; |
179 | }; | 181 | }; |
@@ -338,8 +340,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" | |||
338 | #define OHCI_PARAM_DEBUG_IRQS 4 | 340 | #define OHCI_PARAM_DEBUG_IRQS 4 |
339 | #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ | 341 | #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ |
340 | 342 | ||
341 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG | ||
342 | |||
343 | static int param_debug; | 343 | static int param_debug; |
344 | module_param_named(debug, param_debug, int, 0644); | 344 | module_param_named(debug, param_debug, int, 0644); |
345 | MODULE_PARM_DESC(debug, "Verbose logging (default = 0" | 345 | MODULE_PARM_DESC(debug, "Verbose logging (default = 0" |
@@ -349,7 +349,7 @@ MODULE_PARM_DESC(debug, "Verbose logging (default = 0" | |||
349 | ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) | 349 | ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) |
350 | ", or a combination, or all = -1)"); | 350 | ", or a combination, or all = -1)"); |
351 | 351 | ||
352 | static void log_irqs(u32 evt) | 352 | static void log_irqs(struct fw_ohci *ohci, u32 evt) |
353 | { | 353 | { |
354 | if (likely(!(param_debug & | 354 | if (likely(!(param_debug & |
355 | (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) | 355 | (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) |
@@ -359,7 +359,8 @@ static void log_irqs(u32 evt) | |||
359 | !(evt & OHCI1394_busReset)) | 359 | !(evt & OHCI1394_busReset)) |
360 | return; | 360 | return; |
361 | 361 | ||
362 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, | 362 | dev_notice(ohci->card.device, |
363 | "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, | ||
363 | evt & OHCI1394_selfIDComplete ? " selfID" : "", | 364 | evt & OHCI1394_selfIDComplete ? " selfID" : "", |
364 | evt & OHCI1394_RQPkt ? " AR_req" : "", | 365 | evt & OHCI1394_RQPkt ? " AR_req" : "", |
365 | evt & OHCI1394_RSPkt ? " AR_resp" : "", | 366 | evt & OHCI1394_RSPkt ? " AR_resp" : "", |
@@ -398,24 +399,29 @@ static char _p(u32 *s, int shift) | |||
398 | return port[*s >> shift & 3]; | 399 | return port[*s >> shift & 3]; |
399 | } | 400 | } |
400 | 401 | ||
401 | static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) | 402 | static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) |
402 | { | 403 | { |
404 | u32 *s; | ||
405 | |||
403 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) | 406 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) |
404 | return; | 407 | return; |
405 | 408 | ||
406 | fw_notify("%d selfIDs, generation %d, local node ID %04x\n", | 409 | dev_notice(ohci->card.device, |
407 | self_id_count, generation, node_id); | 410 | "%d selfIDs, generation %d, local node ID %04x\n", |
411 | self_id_count, generation, ohci->node_id); | ||
408 | 412 | ||
409 | for (; self_id_count--; ++s) | 413 | for (s = ohci->self_id_buffer; self_id_count--; ++s) |
410 | if ((*s & 1 << 23) == 0) | 414 | if ((*s & 1 << 23) == 0) |
411 | fw_notify("selfID 0: %08x, phy %d [%c%c%c] " | 415 | dev_notice(ohci->card.device, |
416 | "selfID 0: %08x, phy %d [%c%c%c] " | ||
412 | "%s gc=%d %s %s%s%s\n", | 417 | "%s gc=%d %s %s%s%s\n", |
413 | *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), | 418 | *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), |
414 | speed[*s >> 14 & 3], *s >> 16 & 63, | 419 | speed[*s >> 14 & 3], *s >> 16 & 63, |
415 | power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", | 420 | power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", |
416 | *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); | 421 | *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); |
417 | else | 422 | else |
418 | fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", | 423 | dev_notice(ohci->card.device, |
424 | "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", | ||
419 | *s, *s >> 24 & 63, | 425 | *s, *s >> 24 & 63, |
420 | _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), | 426 | _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), |
421 | _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); | 427 | _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); |
@@ -451,7 +457,8 @@ static const char *tcodes[] = { | |||
451 | [0xe] = "link internal", [0xf] = "-reserved-", | 457 | [0xe] = "link internal", [0xf] = "-reserved-", |
452 | }; | 458 | }; |
453 | 459 | ||
454 | static void log_ar_at_event(char dir, int speed, u32 *header, int evt) | 460 | static void log_ar_at_event(struct fw_ohci *ohci, |
461 | char dir, int speed, u32 *header, int evt) | ||
455 | { | 462 | { |
456 | int tcode = header[0] >> 4 & 0xf; | 463 | int tcode = header[0] >> 4 & 0xf; |
457 | char specific[12]; | 464 | char specific[12]; |
@@ -463,8 +470,9 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) | |||
463 | evt = 0x1f; | 470 | evt = 0x1f; |
464 | 471 | ||
465 | if (evt == OHCI1394_evt_bus_reset) { | 472 | if (evt == OHCI1394_evt_bus_reset) { |
466 | fw_notify("A%c evt_bus_reset, generation %d\n", | 473 | dev_notice(ohci->card.device, |
467 | dir, (header[2] >> 16) & 0xff); | 474 | "A%c evt_bus_reset, generation %d\n", |
475 | dir, (header[2] >> 16) & 0xff); | ||
468 | return; | 476 | return; |
469 | } | 477 | } |
470 | 478 | ||
@@ -483,39 +491,35 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) | |||
483 | 491 | ||
484 | switch (tcode) { | 492 | switch (tcode) { |
485 | case 0xa: | 493 | case 0xa: |
486 | fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); | 494 | dev_notice(ohci->card.device, |
495 | "A%c %s, %s\n", | ||
496 | dir, evts[evt], tcodes[tcode]); | ||
487 | break; | 497 | break; |
488 | case 0xe: | 498 | case 0xe: |
489 | fw_notify("A%c %s, PHY %08x %08x\n", | 499 | dev_notice(ohci->card.device, |
490 | dir, evts[evt], header[1], header[2]); | 500 | "A%c %s, PHY %08x %08x\n", |
501 | dir, evts[evt], header[1], header[2]); | ||
491 | break; | 502 | break; |
492 | case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: | 503 | case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: |
493 | fw_notify("A%c spd %x tl %02x, " | 504 | dev_notice(ohci->card.device, |
494 | "%04x -> %04x, %s, " | 505 | "A%c spd %x tl %02x, " |
495 | "%s, %04x%08x%s\n", | 506 | "%04x -> %04x, %s, " |
496 | dir, speed, header[0] >> 10 & 0x3f, | 507 | "%s, %04x%08x%s\n", |
497 | header[1] >> 16, header[0] >> 16, evts[evt], | 508 | dir, speed, header[0] >> 10 & 0x3f, |
498 | tcodes[tcode], header[1] & 0xffff, header[2], specific); | 509 | header[1] >> 16, header[0] >> 16, evts[evt], |
510 | tcodes[tcode], header[1] & 0xffff, header[2], specific); | ||
499 | break; | 511 | break; |
500 | default: | 512 | default: |
501 | fw_notify("A%c spd %x tl %02x, " | 513 | dev_notice(ohci->card.device, |
502 | "%04x -> %04x, %s, " | 514 | "A%c spd %x tl %02x, " |
503 | "%s%s\n", | 515 | "%04x -> %04x, %s, " |
504 | dir, speed, header[0] >> 10 & 0x3f, | 516 | "%s%s\n", |
505 | header[1] >> 16, header[0] >> 16, evts[evt], | 517 | dir, speed, header[0] >> 10 & 0x3f, |
506 | tcodes[tcode], specific); | 518 | header[1] >> 16, header[0] >> 16, evts[evt], |
519 | tcodes[tcode], specific); | ||
507 | } | 520 | } |
508 | } | 521 | } |
509 | 522 | ||
510 | #else | ||
511 | |||
512 | #define param_debug 0 | ||
513 | static inline void log_irqs(u32 evt) {} | ||
514 | static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} | ||
515 | static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} | ||
516 | |||
517 | #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ | ||
518 | |||
519 | static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) | 523 | static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) |
520 | { | 524 | { |
521 | writel(data, ohci->registers + offset); | 525 | writel(data, ohci->registers + offset); |
@@ -559,7 +563,7 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr) | |||
559 | if (i >= 3) | 563 | if (i >= 3) |
560 | msleep(1); | 564 | msleep(1); |
561 | } | 565 | } |
562 | fw_error("failed to read phy reg\n"); | 566 | dev_err(ohci->card.device, "failed to read phy reg\n"); |
563 | 567 | ||
564 | return -EBUSY; | 568 | return -EBUSY; |
565 | } | 569 | } |
@@ -581,7 +585,7 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) | |||
581 | if (i >= 3) | 585 | if (i >= 3) |
582 | msleep(1); | 586 | msleep(1); |
583 | } | 587 | } |
584 | fw_error("failed to write phy reg\n"); | 588 | dev_err(ohci->card.device, "failed to write phy reg\n"); |
585 | 589 | ||
586 | return -EBUSY; | 590 | return -EBUSY; |
587 | } | 591 | } |
@@ -680,11 +684,14 @@ static void ar_context_release(struct ar_context *ctx) | |||
680 | 684 | ||
681 | static void ar_context_abort(struct ar_context *ctx, const char *error_msg) | 685 | static void ar_context_abort(struct ar_context *ctx, const char *error_msg) |
682 | { | 686 | { |
683 | if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { | 687 | struct fw_ohci *ohci = ctx->ohci; |
684 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); | 688 | |
685 | flush_writes(ctx->ohci); | 689 | if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { |
690 | reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); | ||
691 | flush_writes(ohci); | ||
686 | 692 | ||
687 | fw_error("AR error: %s; DMA stopped\n", error_msg); | 693 | dev_err(ohci->card.device, "AR error: %s; DMA stopped\n", |
694 | error_msg); | ||
688 | } | 695 | } |
689 | /* FIXME: restart? */ | 696 | /* FIXME: restart? */ |
690 | } | 697 | } |
@@ -854,7 +861,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
854 | p.timestamp = status & 0xffff; | 861 | p.timestamp = status & 0xffff; |
855 | p.generation = ohci->request_generation; | 862 | p.generation = ohci->request_generation; |
856 | 863 | ||
857 | log_ar_at_event('R', p.speed, p.header, evt); | 864 | log_ar_at_event(ohci, 'R', p.speed, p.header, evt); |
858 | 865 | ||
859 | /* | 866 | /* |
860 | * Several controllers, notably from NEC and VIA, forget to | 867 | * Several controllers, notably from NEC and VIA, forget to |
@@ -1226,21 +1233,22 @@ static void context_append(struct context *ctx, | |||
1226 | 1233 | ||
1227 | static void context_stop(struct context *ctx) | 1234 | static void context_stop(struct context *ctx) |
1228 | { | 1235 | { |
1236 | struct fw_ohci *ohci = ctx->ohci; | ||
1229 | u32 reg; | 1237 | u32 reg; |
1230 | int i; | 1238 | int i; |
1231 | 1239 | ||
1232 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); | 1240 | reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); |
1233 | ctx->running = false; | 1241 | ctx->running = false; |
1234 | 1242 | ||
1235 | for (i = 0; i < 1000; i++) { | 1243 | for (i = 0; i < 1000; i++) { |
1236 | reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); | 1244 | reg = reg_read(ohci, CONTROL_SET(ctx->regs)); |
1237 | if ((reg & CONTEXT_ACTIVE) == 0) | 1245 | if ((reg & CONTEXT_ACTIVE) == 0) |
1238 | return; | 1246 | return; |
1239 | 1247 | ||
1240 | if (i) | 1248 | if (i) |
1241 | udelay(10); | 1249 | udelay(10); |
1242 | } | 1250 | } |
1243 | fw_error("Error: DMA context still active (0x%08x)\n", reg); | 1251 | dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg); |
1244 | } | 1252 | } |
1245 | 1253 | ||
1246 | struct driver_data { | 1254 | struct driver_data { |
@@ -1420,7 +1428,7 @@ static int handle_at_packet(struct context *context, | |||
1420 | evt = le16_to_cpu(last->transfer_status) & 0x1f; | 1428 | evt = le16_to_cpu(last->transfer_status) & 0x1f; |
1421 | packet->timestamp = le16_to_cpu(last->res_count); | 1429 | packet->timestamp = le16_to_cpu(last->res_count); |
1422 | 1430 | ||
1423 | log_ar_at_event('T', packet->speed, packet->header, evt); | 1431 | log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); |
1424 | 1432 | ||
1425 | switch (evt) { | 1433 | switch (evt) { |
1426 | case OHCI1394_evt_timeout: | 1434 | case OHCI1394_evt_timeout: |
@@ -1549,7 +1557,7 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
1549 | goto out; | 1557 | goto out; |
1550 | } | 1558 | } |
1551 | 1559 | ||
1552 | fw_error("swap not done (CSR lock timeout)\n"); | 1560 | dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n"); |
1553 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); | 1561 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); |
1554 | 1562 | ||
1555 | out: | 1563 | out: |
@@ -1623,15 +1631,10 @@ static void detect_dead_context(struct fw_ohci *ohci, | |||
1623 | u32 ctl; | 1631 | u32 ctl; |
1624 | 1632 | ||
1625 | ctl = reg_read(ohci, CONTROL_SET(regs)); | 1633 | ctl = reg_read(ohci, CONTROL_SET(regs)); |
1626 | if (ctl & CONTEXT_DEAD) { | 1634 | if (ctl & CONTEXT_DEAD) |
1627 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG | 1635 | dev_err(ohci->card.device, |
1628 | fw_error("DMA context %s has stopped, error code: %s\n", | 1636 | "DMA context %s has stopped, error code: %s\n", |
1629 | name, evts[ctl & 0x1f]); | 1637 | name, evts[ctl & 0x1f]); |
1630 | #else | ||
1631 | fw_error("DMA context %s has stopped, error code: %#x\n", | ||
1632 | name, ctl & 0x1f); | ||
1633 | #endif | ||
1634 | } | ||
1635 | } | 1638 | } |
1636 | 1639 | ||
1637 | static void handle_dead_contexts(struct fw_ohci *ohci) | 1640 | static void handle_dead_contexts(struct fw_ohci *ohci) |
@@ -1781,7 +1784,8 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) | |||
1781 | 1784 | ||
1782 | reg = reg_read(ohci, OHCI1394_NodeID); | 1785 | reg = reg_read(ohci, OHCI1394_NodeID); |
1783 | if (!(reg & OHCI1394_NodeID_idValid)) { | 1786 | if (!(reg & OHCI1394_NodeID_idValid)) { |
1784 | fw_notify("node ID not valid, new bus reset in progress\n"); | 1787 | dev_notice(ohci->card.device, |
1788 | "node ID not valid, new bus reset in progress\n"); | ||
1785 | return -EBUSY; | 1789 | return -EBUSY; |
1786 | } | 1790 | } |
1787 | self_id |= ((reg & 0x3f) << 24); /* phy ID */ | 1791 | self_id |= ((reg & 0x3f) << 24); /* phy ID */ |
@@ -1827,11 +1831,12 @@ static void bus_reset_work(struct work_struct *work) | |||
1827 | 1831 | ||
1828 | reg = reg_read(ohci, OHCI1394_NodeID); | 1832 | reg = reg_read(ohci, OHCI1394_NodeID); |
1829 | if (!(reg & OHCI1394_NodeID_idValid)) { | 1833 | if (!(reg & OHCI1394_NodeID_idValid)) { |
1830 | fw_notify("node ID not valid, new bus reset in progress\n"); | 1834 | dev_notice(ohci->card.device, |
1835 | "node ID not valid, new bus reset in progress\n"); | ||
1831 | return; | 1836 | return; |
1832 | } | 1837 | } |
1833 | if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { | 1838 | if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { |
1834 | fw_notify("malconfigured bus\n"); | 1839 | dev_notice(ohci->card.device, "malconfigured bus\n"); |
1835 | return; | 1840 | return; |
1836 | } | 1841 | } |
1837 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | | 1842 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | |
@@ -1845,7 +1850,7 @@ static void bus_reset_work(struct work_struct *work) | |||
1845 | 1850 | ||
1846 | reg = reg_read(ohci, OHCI1394_SelfIDCount); | 1851 | reg = reg_read(ohci, OHCI1394_SelfIDCount); |
1847 | if (reg & OHCI1394_SelfIDCount_selfIDError) { | 1852 | if (reg & OHCI1394_SelfIDCount_selfIDError) { |
1848 | fw_notify("inconsistent self IDs\n"); | 1853 | dev_notice(ohci->card.device, "inconsistent self IDs\n"); |
1849 | return; | 1854 | return; |
1850 | } | 1855 | } |
1851 | /* | 1856 | /* |
@@ -1857,7 +1862,7 @@ static void bus_reset_work(struct work_struct *work) | |||
1857 | self_id_count = (reg >> 3) & 0xff; | 1862 | self_id_count = (reg >> 3) & 0xff; |
1858 | 1863 | ||
1859 | if (self_id_count > 252) { | 1864 | if (self_id_count > 252) { |
1860 | fw_notify("inconsistent self IDs\n"); | 1865 | dev_notice(ohci->card.device, "inconsistent self IDs\n"); |
1861 | return; | 1866 | return; |
1862 | } | 1867 | } |
1863 | 1868 | ||
@@ -1875,11 +1880,13 @@ static void bus_reset_work(struct work_struct *work) | |||
1875 | */ | 1880 | */ |
1876 | if (cond_le32_to_cpu(ohci->self_id_cpu[i]) | 1881 | if (cond_le32_to_cpu(ohci->self_id_cpu[i]) |
1877 | == 0xffff008f) { | 1882 | == 0xffff008f) { |
1878 | fw_notify("ignoring spurious self IDs\n"); | 1883 | dev_notice(ohci->card.device, |
1884 | "ignoring spurious self IDs\n"); | ||
1879 | self_id_count = j; | 1885 | self_id_count = j; |
1880 | break; | 1886 | break; |
1881 | } else { | 1887 | } else { |
1882 | fw_notify("inconsistent self IDs\n"); | 1888 | dev_notice(ohci->card.device, |
1889 | "inconsistent self IDs\n"); | ||
1883 | return; | 1890 | return; |
1884 | } | 1891 | } |
1885 | } | 1892 | } |
@@ -1890,13 +1897,14 @@ static void bus_reset_work(struct work_struct *work) | |||
1890 | if (ohci->quirks & QUIRK_TI_SLLZ059) { | 1897 | if (ohci->quirks & QUIRK_TI_SLLZ059) { |
1891 | self_id_count = find_and_insert_self_id(ohci, self_id_count); | 1898 | self_id_count = find_and_insert_self_id(ohci, self_id_count); |
1892 | if (self_id_count < 0) { | 1899 | if (self_id_count < 0) { |
1893 | fw_notify("could not construct local self ID\n"); | 1900 | dev_notice(ohci->card.device, |
1901 | "could not construct local self ID\n"); | ||
1894 | return; | 1902 | return; |
1895 | } | 1903 | } |
1896 | } | 1904 | } |
1897 | 1905 | ||
1898 | if (self_id_count == 0) { | 1906 | if (self_id_count == 0) { |
1899 | fw_notify("inconsistent self IDs\n"); | 1907 | dev_notice(ohci->card.device, "inconsistent self IDs\n"); |
1900 | return; | 1908 | return; |
1901 | } | 1909 | } |
1902 | rmb(); | 1910 | rmb(); |
@@ -1917,8 +1925,8 @@ static void bus_reset_work(struct work_struct *work) | |||
1917 | 1925 | ||
1918 | new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; | 1926 | new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; |
1919 | if (new_generation != generation) { | 1927 | if (new_generation != generation) { |
1920 | fw_notify("recursive bus reset detected, " | 1928 | dev_notice(ohci->card.device, |
1921 | "discarding self ids\n"); | 1929 | "new bus reset, discarding self ids\n"); |
1922 | return; | 1930 | return; |
1923 | } | 1931 | } |
1924 | 1932 | ||
@@ -1989,8 +1997,7 @@ static void bus_reset_work(struct work_struct *work) | |||
1989 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1997 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1990 | free_rom, free_rom_bus); | 1998 | free_rom, free_rom_bus); |
1991 | 1999 | ||
1992 | log_selfids(ohci->node_id, generation, | 2000 | log_selfids(ohci, generation, self_id_count); |
1993 | self_id_count, ohci->self_id_buffer); | ||
1994 | 2001 | ||
1995 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, | 2002 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, |
1996 | self_id_count, ohci->self_id_buffer, | 2003 | self_id_count, ohci->self_id_buffer, |
@@ -2015,7 +2022,7 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
2015 | */ | 2022 | */ |
2016 | reg_write(ohci, OHCI1394_IntEventClear, | 2023 | reg_write(ohci, OHCI1394_IntEventClear, |
2017 | event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); | 2024 | event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); |
2018 | log_irqs(event); | 2025 | log_irqs(ohci, event); |
2019 | 2026 | ||
2020 | if (event & OHCI1394_selfIDComplete) | 2027 | if (event & OHCI1394_selfIDComplete) |
2021 | queue_work(fw_workqueue, &ohci->bus_reset_work); | 2028 | queue_work(fw_workqueue, &ohci->bus_reset_work); |
@@ -2057,8 +2064,7 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
2057 | } | 2064 | } |
2058 | 2065 | ||
2059 | if (unlikely(event & OHCI1394_regAccessFail)) | 2066 | if (unlikely(event & OHCI1394_regAccessFail)) |
2060 | fw_error("Register access failure - " | 2067 | dev_err(ohci->card.device, "register access failure\n"); |
2061 | "please notify linux1394-devel@lists.sf.net\n"); | ||
2062 | 2068 | ||
2063 | if (unlikely(event & OHCI1394_postedWriteErr)) { | 2069 | if (unlikely(event & OHCI1394_postedWriteErr)) { |
2064 | reg_read(ohci, OHCI1394_PostedWriteAddressHi); | 2070 | reg_read(ohci, OHCI1394_PostedWriteAddressHi); |
@@ -2066,12 +2072,13 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
2066 | reg_write(ohci, OHCI1394_IntEventClear, | 2072 | reg_write(ohci, OHCI1394_IntEventClear, |
2067 | OHCI1394_postedWriteErr); | 2073 | OHCI1394_postedWriteErr); |
2068 | if (printk_ratelimit()) | 2074 | if (printk_ratelimit()) |
2069 | fw_error("PCI posted write error\n"); | 2075 | dev_err(ohci->card.device, "PCI posted write error\n"); |
2070 | } | 2076 | } |
2071 | 2077 | ||
2072 | if (unlikely(event & OHCI1394_cycleTooLong)) { | 2078 | if (unlikely(event & OHCI1394_cycleTooLong)) { |
2073 | if (printk_ratelimit()) | 2079 | if (printk_ratelimit()) |
2074 | fw_notify("isochronous cycle too long\n"); | 2080 | dev_notice(ohci->card.device, |
2081 | "isochronous cycle too long\n"); | ||
2075 | reg_write(ohci, OHCI1394_LinkControlSet, | 2082 | reg_write(ohci, OHCI1394_LinkControlSet, |
2076 | OHCI1394_LinkControl_cycleMaster); | 2083 | OHCI1394_LinkControl_cycleMaster); |
2077 | } | 2084 | } |
@@ -2084,7 +2091,8 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
2084 | * them at least two cycles later. (FIXME?) | 2091 | * them at least two cycles later. (FIXME?) |
2085 | */ | 2092 | */ |
2086 | if (printk_ratelimit()) | 2093 | if (printk_ratelimit()) |
2087 | fw_notify("isochronous cycle inconsistent\n"); | 2094 | dev_notice(ohci->card.device, |
2095 | "isochronous cycle inconsistent\n"); | ||
2088 | } | 2096 | } |
2089 | 2097 | ||
2090 | if (unlikely(event & OHCI1394_unrecoverableError)) | 2098 | if (unlikely(event & OHCI1394_unrecoverableError)) |
@@ -2211,7 +2219,7 @@ static int ohci_enable(struct fw_card *card, | |||
2211 | int i, ret; | 2219 | int i, ret; |
2212 | 2220 | ||
2213 | if (software_reset(ohci)) { | 2221 | if (software_reset(ohci)) { |
2214 | fw_error("Failed to reset ohci card.\n"); | 2222 | dev_err(card->device, "failed to reset ohci card\n"); |
2215 | return -EBUSY; | 2223 | return -EBUSY; |
2216 | } | 2224 | } |
2217 | 2225 | ||
@@ -2235,7 +2243,7 @@ static int ohci_enable(struct fw_card *card, | |||
2235 | } | 2243 | } |
2236 | 2244 | ||
2237 | if (!lps) { | 2245 | if (!lps) { |
2238 | fw_error("Failed to set Link Power Status\n"); | 2246 | dev_err(card->device, "failed to set Link Power Status\n"); |
2239 | return -EIO; | 2247 | return -EIO; |
2240 | } | 2248 | } |
2241 | 2249 | ||
@@ -2244,7 +2252,7 @@ static int ohci_enable(struct fw_card *card, | |||
2244 | if (ret < 0) | 2252 | if (ret < 0) |
2245 | return ret; | 2253 | return ret; |
2246 | if (ret) | 2254 | if (ret) |
2247 | fw_notify("local TSB41BA3D phy\n"); | 2255 | dev_notice(card->device, "local TSB41BA3D phy\n"); |
2248 | else | 2256 | else |
2249 | ohci->quirks &= ~QUIRK_TI_SLLZ059; | 2257 | ohci->quirks &= ~QUIRK_TI_SLLZ059; |
2250 | } | 2258 | } |
@@ -2344,7 +2352,8 @@ static int ohci_enable(struct fw_card *card, | |||
2344 | if (request_irq(dev->irq, irq_handler, | 2352 | if (request_irq(dev->irq, irq_handler, |
2345 | pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, | 2353 | pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, |
2346 | ohci_driver_name, ohci)) { | 2354 | ohci_driver_name, ohci)) { |
2347 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); | 2355 | dev_err(card->device, "failed to allocate interrupt %d\n", |
2356 | dev->irq); | ||
2348 | pci_disable_msi(dev); | 2357 | pci_disable_msi(dev); |
2349 | 2358 | ||
2350 | if (config_rom) { | 2359 | if (config_rom) { |
@@ -2509,7 +2518,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) | |||
2509 | dma_unmap_single(ohci->card.device, packet->payload_bus, | 2518 | dma_unmap_single(ohci->card.device, packet->payload_bus, |
2510 | packet->payload_length, DMA_TO_DEVICE); | 2519 | packet->payload_length, DMA_TO_DEVICE); |
2511 | 2520 | ||
2512 | log_ar_at_event('T', packet->speed, packet->header, 0x20); | 2521 | log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); |
2513 | driver_data->packet = NULL; | 2522 | driver_data->packet = NULL; |
2514 | packet->ack = RCODE_CANCELLED; | 2523 | packet->ack = RCODE_CANCELLED; |
2515 | packet->callback(packet, &ohci->card, packet->ack); | 2524 | packet->callback(packet, &ohci->card, packet->ack); |
@@ -2674,25 +2683,35 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) | |||
2674 | } | 2683 | } |
2675 | } | 2684 | } |
2676 | 2685 | ||
2677 | static void copy_iso_headers(struct iso_context *ctx, void *p) | 2686 | static void flush_iso_completions(struct iso_context *ctx) |
2678 | { | 2687 | { |
2679 | int i = ctx->header_length; | 2688 | ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, |
2689 | ctx->header_length, ctx->header, | ||
2690 | ctx->base.callback_data); | ||
2691 | ctx->header_length = 0; | ||
2692 | } | ||
2680 | 2693 | ||
2681 | if (i + ctx->base.header_size > PAGE_SIZE) | 2694 | static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) |
2682 | return; | 2695 | { |
2696 | u32 *ctx_hdr; | ||
2697 | |||
2698 | if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) | ||
2699 | flush_iso_completions(ctx); | ||
2700 | |||
2701 | ctx_hdr = ctx->header + ctx->header_length; | ||
2702 | ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); | ||
2683 | 2703 | ||
2684 | /* | 2704 | /* |
2685 | * The iso header is byteswapped to little endian by | 2705 | * The two iso header quadlets are byteswapped to little |
2686 | * the controller, but the remaining header quadlets | 2706 | * endian by the controller, but we want to present them |
2687 | * are big endian. We want to present all the headers | 2707 | * as big endian for consistency with the bus endianness. |
2688 | * as big endian, so we have to swap the first quadlet. | ||
2689 | */ | 2708 | */ |
2690 | if (ctx->base.header_size > 0) | 2709 | if (ctx->base.header_size > 0) |
2691 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | 2710 | ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */ |
2692 | if (ctx->base.header_size > 4) | 2711 | if (ctx->base.header_size > 4) |
2693 | *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); | 2712 | ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */ |
2694 | if (ctx->base.header_size > 8) | 2713 | if (ctx->base.header_size > 8) |
2695 | memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); | 2714 | memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); |
2696 | ctx->header_length += ctx->base.header_size; | 2715 | ctx->header_length += ctx->base.header_size; |
2697 | } | 2716 | } |
2698 | 2717 | ||
@@ -2704,8 +2723,6 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
2704 | container_of(context, struct iso_context, context); | 2723 | container_of(context, struct iso_context, context); |
2705 | struct descriptor *pd; | 2724 | struct descriptor *pd; |
2706 | u32 buffer_dma; | 2725 | u32 buffer_dma; |
2707 | __le32 *ir_header; | ||
2708 | void *p; | ||
2709 | 2726 | ||
2710 | for (pd = d; pd <= last; pd++) | 2727 | for (pd = d; pd <= last; pd++) |
2711 | if (pd->transfer_status) | 2728 | if (pd->transfer_status) |
@@ -2724,17 +2741,10 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
2724 | DMA_FROM_DEVICE); | 2741 | DMA_FROM_DEVICE); |
2725 | } | 2742 | } |
2726 | 2743 | ||
2727 | p = last + 1; | 2744 | copy_iso_headers(ctx, (u32 *) (last + 1)); |
2728 | copy_iso_headers(ctx, p); | ||
2729 | 2745 | ||
2730 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2746 | if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) |
2731 | ir_header = (__le32 *) p; | 2747 | flush_iso_completions(ctx); |
2732 | ctx->base.callback.sc(&ctx->base, | ||
2733 | le32_to_cpu(ir_header[0]) & 0xffff, | ||
2734 | ctx->header_length, ctx->header, | ||
2735 | ctx->base.callback_data); | ||
2736 | ctx->header_length = 0; | ||
2737 | } | ||
2738 | 2748 | ||
2739 | return 1; | 2749 | return 1; |
2740 | } | 2750 | } |
@@ -2746,29 +2756,51 @@ static int handle_ir_buffer_fill(struct context *context, | |||
2746 | { | 2756 | { |
2747 | struct iso_context *ctx = | 2757 | struct iso_context *ctx = |
2748 | container_of(context, struct iso_context, context); | 2758 | container_of(context, struct iso_context, context); |
2759 | unsigned int req_count, res_count, completed; | ||
2749 | u32 buffer_dma; | 2760 | u32 buffer_dma; |
2750 | 2761 | ||
2751 | if (!last->transfer_status) | 2762 | req_count = le16_to_cpu(last->req_count); |
2763 | res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); | ||
2764 | completed = req_count - res_count; | ||
2765 | buffer_dma = le32_to_cpu(last->data_address); | ||
2766 | |||
2767 | if (completed > 0) { | ||
2768 | ctx->mc_buffer_bus = buffer_dma; | ||
2769 | ctx->mc_completed = completed; | ||
2770 | } | ||
2771 | |||
2772 | if (res_count != 0) | ||
2752 | /* Descriptor(s) not done yet, stop iteration */ | 2773 | /* Descriptor(s) not done yet, stop iteration */ |
2753 | return 0; | 2774 | return 0; |
2754 | 2775 | ||
2755 | buffer_dma = le32_to_cpu(last->data_address); | ||
2756 | dma_sync_single_range_for_cpu(context->ohci->card.device, | 2776 | dma_sync_single_range_for_cpu(context->ohci->card.device, |
2757 | buffer_dma & PAGE_MASK, | 2777 | buffer_dma & PAGE_MASK, |
2758 | buffer_dma & ~PAGE_MASK, | 2778 | buffer_dma & ~PAGE_MASK, |
2759 | le16_to_cpu(last->req_count), | 2779 | completed, DMA_FROM_DEVICE); |
2760 | DMA_FROM_DEVICE); | ||
2761 | 2780 | ||
2762 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) | 2781 | if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { |
2763 | ctx->base.callback.mc(&ctx->base, | 2782 | ctx->base.callback.mc(&ctx->base, |
2764 | le32_to_cpu(last->data_address) + | 2783 | buffer_dma + completed, |
2765 | le16_to_cpu(last->req_count) - | ||
2766 | le16_to_cpu(last->res_count), | ||
2767 | ctx->base.callback_data); | 2784 | ctx->base.callback_data); |
2785 | ctx->mc_completed = 0; | ||
2786 | } | ||
2768 | 2787 | ||
2769 | return 1; | 2788 | return 1; |
2770 | } | 2789 | } |
2771 | 2790 | ||
2791 | static void flush_ir_buffer_fill(struct iso_context *ctx) | ||
2792 | { | ||
2793 | dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, | ||
2794 | ctx->mc_buffer_bus & PAGE_MASK, | ||
2795 | ctx->mc_buffer_bus & ~PAGE_MASK, | ||
2796 | ctx->mc_completed, DMA_FROM_DEVICE); | ||
2797 | |||
2798 | ctx->base.callback.mc(&ctx->base, | ||
2799 | ctx->mc_buffer_bus + ctx->mc_completed, | ||
2800 | ctx->base.callback_data); | ||
2801 | ctx->mc_completed = 0; | ||
2802 | } | ||
2803 | |||
2772 | static inline void sync_it_packet_for_cpu(struct context *context, | 2804 | static inline void sync_it_packet_for_cpu(struct context *context, |
2773 | struct descriptor *pd) | 2805 | struct descriptor *pd) |
2774 | { | 2806 | { |
@@ -2812,8 +2844,8 @@ static int handle_it_packet(struct context *context, | |||
2812 | { | 2844 | { |
2813 | struct iso_context *ctx = | 2845 | struct iso_context *ctx = |
2814 | container_of(context, struct iso_context, context); | 2846 | container_of(context, struct iso_context, context); |
2815 | int i; | ||
2816 | struct descriptor *pd; | 2847 | struct descriptor *pd; |
2848 | __be32 *ctx_hdr; | ||
2817 | 2849 | ||
2818 | for (pd = d; pd <= last; pd++) | 2850 | for (pd = d; pd <= last; pd++) |
2819 | if (pd->transfer_status) | 2851 | if (pd->transfer_status) |
@@ -2824,20 +2856,19 @@ static int handle_it_packet(struct context *context, | |||
2824 | 2856 | ||
2825 | sync_it_packet_for_cpu(context, d); | 2857 | sync_it_packet_for_cpu(context, d); |
2826 | 2858 | ||
2827 | i = ctx->header_length; | 2859 | if (ctx->header_length + 4 > PAGE_SIZE) |
2828 | if (i + 4 < PAGE_SIZE) { | 2860 | flush_iso_completions(ctx); |
2829 | /* Present this value as big-endian to match the receive code */ | 2861 | |
2830 | *(__be32 *)(ctx->header + i) = cpu_to_be32( | 2862 | ctx_hdr = ctx->header + ctx->header_length; |
2831 | ((u32)le16_to_cpu(pd->transfer_status) << 16) | | 2863 | ctx->last_timestamp = le16_to_cpu(last->res_count); |
2832 | le16_to_cpu(pd->res_count)); | 2864 | /* Present this value as big-endian to match the receive code */ |
2833 | ctx->header_length += 4; | 2865 | *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | |
2834 | } | 2866 | le16_to_cpu(pd->res_count)); |
2835 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2867 | ctx->header_length += 4; |
2836 | ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), | 2868 | |
2837 | ctx->header_length, ctx->header, | 2869 | if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) |
2838 | ctx->base.callback_data); | 2870 | flush_iso_completions(ctx); |
2839 | ctx->header_length = 0; | 2871 | |
2840 | } | ||
2841 | return 1; | 2872 | return 1; |
2842 | } | 2873 | } |
2843 | 2874 | ||
@@ -2924,8 +2955,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | |||
2924 | if (ret < 0) | 2955 | if (ret < 0) |
2925 | goto out_with_header; | 2956 | goto out_with_header; |
2926 | 2957 | ||
2927 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) | 2958 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { |
2928 | set_multichannel_mask(ohci, 0); | 2959 | set_multichannel_mask(ohci, 0); |
2960 | ctx->mc_completed = 0; | ||
2961 | } | ||
2929 | 2962 | ||
2930 | return &ctx->base; | 2963 | return &ctx->base; |
2931 | 2964 | ||
@@ -3387,6 +3420,39 @@ static void ohci_flush_queue_iso(struct fw_iso_context *base) | |||
3387 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | 3420 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); |
3388 | } | 3421 | } |
3389 | 3422 | ||
3423 | static int ohci_flush_iso_completions(struct fw_iso_context *base) | ||
3424 | { | ||
3425 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
3426 | int ret = 0; | ||
3427 | |||
3428 | tasklet_disable(&ctx->context.tasklet); | ||
3429 | |||
3430 | if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { | ||
3431 | context_tasklet((unsigned long)&ctx->context); | ||
3432 | |||
3433 | switch (base->type) { | ||
3434 | case FW_ISO_CONTEXT_TRANSMIT: | ||
3435 | case FW_ISO_CONTEXT_RECEIVE: | ||
3436 | if (ctx->header_length != 0) | ||
3437 | flush_iso_completions(ctx); | ||
3438 | break; | ||
3439 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
3440 | if (ctx->mc_completed != 0) | ||
3441 | flush_ir_buffer_fill(ctx); | ||
3442 | break; | ||
3443 | default: | ||
3444 | ret = -ENOSYS; | ||
3445 | } | ||
3446 | |||
3447 | clear_bit_unlock(0, &ctx->flushing_completions); | ||
3448 | smp_mb__after_clear_bit(); | ||
3449 | } | ||
3450 | |||
3451 | tasklet_enable(&ctx->context.tasklet); | ||
3452 | |||
3453 | return ret; | ||
3454 | } | ||
3455 | |||
3390 | static const struct fw_card_driver ohci_driver = { | 3456 | static const struct fw_card_driver ohci_driver = { |
3391 | .enable = ohci_enable, | 3457 | .enable = ohci_enable, |
3392 | .read_phy_reg = ohci_read_phy_reg, | 3458 | .read_phy_reg = ohci_read_phy_reg, |
@@ -3404,6 +3470,7 @@ static const struct fw_card_driver ohci_driver = { | |||
3404 | .set_iso_channels = ohci_set_iso_channels, | 3470 | .set_iso_channels = ohci_set_iso_channels, |
3405 | .queue_iso = ohci_queue_iso, | 3471 | .queue_iso = ohci_queue_iso, |
3406 | .flush_queue_iso = ohci_flush_queue_iso, | 3472 | .flush_queue_iso = ohci_flush_queue_iso, |
3473 | .flush_iso_completions = ohci_flush_iso_completions, | ||
3407 | .start_iso = ohci_start_iso, | 3474 | .start_iso = ohci_start_iso, |
3408 | .stop_iso = ohci_stop_iso, | 3475 | .stop_iso = ohci_stop_iso, |
3409 | }; | 3476 | }; |
@@ -3463,7 +3530,7 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
3463 | 3530 | ||
3464 | err = pci_enable_device(dev); | 3531 | err = pci_enable_device(dev); |
3465 | if (err) { | 3532 | if (err) { |
3466 | fw_error("Failed to enable OHCI hardware\n"); | 3533 | dev_err(&dev->dev, "failed to enable OHCI hardware\n"); |
3467 | goto fail_free; | 3534 | goto fail_free; |
3468 | } | 3535 | } |
3469 | 3536 | ||
@@ -3478,13 +3545,13 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
3478 | 3545 | ||
3479 | err = pci_request_region(dev, 0, ohci_driver_name); | 3546 | err = pci_request_region(dev, 0, ohci_driver_name); |
3480 | if (err) { | 3547 | if (err) { |
3481 | fw_error("MMIO resource unavailable\n"); | 3548 | dev_err(&dev->dev, "MMIO resource unavailable\n"); |
3482 | goto fail_disable; | 3549 | goto fail_disable; |
3483 | } | 3550 | } |
3484 | 3551 | ||
3485 | ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); | 3552 | ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); |
3486 | if (ohci->registers == NULL) { | 3553 | if (ohci->registers == NULL) { |
3487 | fw_error("Failed to remap registers\n"); | 3554 | dev_err(&dev->dev, "failed to remap registers\n"); |
3488 | err = -ENXIO; | 3555 | err = -ENXIO; |
3489 | goto fail_iomem; | 3556 | goto fail_iomem; |
3490 | } | 3557 | } |
@@ -3573,9 +3640,10 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
3573 | goto fail_contexts; | 3640 | goto fail_contexts; |
3574 | 3641 | ||
3575 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 3642 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
3576 | fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " | 3643 | dev_notice(&dev->dev, |
3644 | "added OHCI v%x.%x device as card %d, " | ||
3577 | "%d IR + %d IT contexts, quirks 0x%x\n", | 3645 | "%d IR + %d IT contexts, quirks 0x%x\n", |
3578 | dev_name(&dev->dev), version >> 16, version & 0xff, | 3646 | version >> 16, version & 0xff, ohci->card.index, |
3579 | ohci->n_ir, ohci->n_it, ohci->quirks); | 3647 | ohci->n_ir, ohci->n_it, ohci->quirks); |
3580 | 3648 | ||
3581 | return 0; | 3649 | return 0; |
@@ -3604,7 +3672,7 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
3604 | pmac_ohci_off(dev); | 3672 | pmac_ohci_off(dev); |
3605 | fail: | 3673 | fail: |
3606 | if (err == -ENOMEM) | 3674 | if (err == -ENOMEM) |
3607 | fw_error("Out of memory\n"); | 3675 | dev_err(&dev->dev, "out of memory\n"); |
3608 | 3676 | ||
3609 | return err; | 3677 | return err; |
3610 | } | 3678 | } |
@@ -3648,7 +3716,7 @@ static void pci_remove(struct pci_dev *dev) | |||
3648 | kfree(ohci); | 3716 | kfree(ohci); |
3649 | pmac_ohci_off(dev); | 3717 | pmac_ohci_off(dev); |
3650 | 3718 | ||
3651 | fw_notify("Removed fw-ohci device.\n"); | 3719 | dev_notice(&dev->dev, "removed fw-ohci device\n"); |
3652 | } | 3720 | } |
3653 | 3721 | ||
3654 | #ifdef CONFIG_PM | 3722 | #ifdef CONFIG_PM |
@@ -3662,12 +3730,12 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) | |||
3662 | pci_disable_msi(dev); | 3730 | pci_disable_msi(dev); |
3663 | err = pci_save_state(dev); | 3731 | err = pci_save_state(dev); |
3664 | if (err) { | 3732 | if (err) { |
3665 | fw_error("pci_save_state failed\n"); | 3733 | dev_err(&dev->dev, "pci_save_state failed\n"); |
3666 | return err; | 3734 | return err; |
3667 | } | 3735 | } |
3668 | err = pci_set_power_state(dev, pci_choose_state(dev, state)); | 3736 | err = pci_set_power_state(dev, pci_choose_state(dev, state)); |
3669 | if (err) | 3737 | if (err) |
3670 | fw_error("pci_set_power_state failed with %d\n", err); | 3738 | dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err); |
3671 | pmac_ohci_off(dev); | 3739 | pmac_ohci_off(dev); |
3672 | 3740 | ||
3673 | return 0; | 3741 | return 0; |
@@ -3683,7 +3751,7 @@ static int pci_resume(struct pci_dev *dev) | |||
3683 | pci_restore_state(dev); | 3751 | pci_restore_state(dev); |
3684 | err = pci_enable_device(dev); | 3752 | err = pci_enable_device(dev); |
3685 | if (err) { | 3753 | if (err) { |
3686 | fw_error("pci_enable_device failed\n"); | 3754 | dev_err(&dev->dev, "pci_enable_device failed\n"); |
3687 | return err; | 3755 | return err; |
3688 | } | 3756 | } |
3689 | 3757 | ||
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 80e95aa3bf14..000a29ffedae 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -125,8 +125,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
125 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 125 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
126 | ", or a combination)"); | 126 | ", or a combination)"); |
127 | 127 | ||
128 | static const char sbp2_driver_name[] = "sbp2"; | ||
129 | |||
130 | /* | 128 | /* |
131 | * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry | 129 | * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry |
132 | * and one struct scsi_device per sbp2_logical_unit. | 130 | * and one struct scsi_device per sbp2_logical_unit. |
@@ -165,7 +163,6 @@ static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) | |||
165 | */ | 163 | */ |
166 | struct sbp2_target { | 164 | struct sbp2_target { |
167 | struct fw_unit *unit; | 165 | struct fw_unit *unit; |
168 | const char *bus_id; | ||
169 | struct list_head lu_list; | 166 | struct list_head lu_list; |
170 | 167 | ||
171 | u64 management_agent_address; | 168 | u64 management_agent_address; |
@@ -181,11 +178,21 @@ struct sbp2_target { | |||
181 | int blocked; /* ditto */ | 178 | int blocked; /* ditto */ |
182 | }; | 179 | }; |
183 | 180 | ||
184 | static struct fw_device *target_device(struct sbp2_target *tgt) | 181 | static struct fw_device *target_parent_device(struct sbp2_target *tgt) |
185 | { | 182 | { |
186 | return fw_parent_device(tgt->unit); | 183 | return fw_parent_device(tgt->unit); |
187 | } | 184 | } |
188 | 185 | ||
186 | static const struct device *tgt_dev(const struct sbp2_target *tgt) | ||
187 | { | ||
188 | return &tgt->unit->device; | ||
189 | } | ||
190 | |||
191 | static const struct device *lu_dev(const struct sbp2_logical_unit *lu) | ||
192 | { | ||
193 | return &lu->tgt->unit->device; | ||
194 | } | ||
195 | |||
189 | /* Impossible login_id, to detect logout attempt before successful login */ | 196 | /* Impossible login_id, to detect logout attempt before successful login */ |
190 | #define INVALID_LOGIN_ID 0x10000 | 197 | #define INVALID_LOGIN_ID 0x10000 |
191 | 198 | ||
@@ -211,6 +218,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt) | |||
211 | #define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a | 218 | #define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a |
212 | #define SBP2_CSR_FIRMWARE_REVISION 0x3c | 219 | #define SBP2_CSR_FIRMWARE_REVISION 0x3c |
213 | #define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14 | 220 | #define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14 |
221 | #define SBP2_CSR_UNIT_UNIQUE_ID 0x8d | ||
214 | #define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4 | 222 | #define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4 |
215 | 223 | ||
216 | /* Management orb opcodes */ | 224 | /* Management orb opcodes */ |
@@ -430,7 +438,8 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
430 | memcpy(status.data, payload + 8, length - 8); | 438 | memcpy(status.data, payload + 8, length - 8); |
431 | 439 | ||
432 | if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) { | 440 | if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) { |
433 | fw_notify("non-orb related status write, not handled\n"); | 441 | dev_notice(lu_dev(lu), |
442 | "non-ORB related status write, not handled\n"); | ||
434 | fw_send_response(card, request, RCODE_COMPLETE); | 443 | fw_send_response(card, request, RCODE_COMPLETE); |
435 | return; | 444 | return; |
436 | } | 445 | } |
@@ -451,7 +460,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
451 | orb->callback(orb, &status); | 460 | orb->callback(orb, &status); |
452 | kref_put(&orb->kref, free_orb); /* orb callback reference */ | 461 | kref_put(&orb->kref, free_orb); /* orb callback reference */ |
453 | } else { | 462 | } else { |
454 | fw_error("status write for unknown orb\n"); | 463 | dev_err(lu_dev(lu), "status write for unknown ORB\n"); |
455 | } | 464 | } |
456 | 465 | ||
457 | fw_send_response(card, request, RCODE_COMPLETE); | 466 | fw_send_response(card, request, RCODE_COMPLETE); |
@@ -492,7 +501,7 @@ static void complete_transaction(struct fw_card *card, int rcode, | |||
492 | static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | 501 | static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, |
493 | int node_id, int generation, u64 offset) | 502 | int node_id, int generation, u64 offset) |
494 | { | 503 | { |
495 | struct fw_device *device = target_device(lu->tgt); | 504 | struct fw_device *device = target_parent_device(lu->tgt); |
496 | struct sbp2_pointer orb_pointer; | 505 | struct sbp2_pointer orb_pointer; |
497 | unsigned long flags; | 506 | unsigned long flags; |
498 | 507 | ||
@@ -513,7 +522,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | |||
513 | 522 | ||
514 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | 523 | static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) |
515 | { | 524 | { |
516 | struct fw_device *device = target_device(lu->tgt); | 525 | struct fw_device *device = target_parent_device(lu->tgt); |
517 | struct sbp2_orb *orb, *next; | 526 | struct sbp2_orb *orb, *next; |
518 | struct list_head list; | 527 | struct list_head list; |
519 | unsigned long flags; | 528 | unsigned long flags; |
@@ -552,7 +561,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
552 | int generation, int function, | 561 | int generation, int function, |
553 | int lun_or_login_id, void *response) | 562 | int lun_or_login_id, void *response) |
554 | { | 563 | { |
555 | struct fw_device *device = target_device(lu->tgt); | 564 | struct fw_device *device = target_parent_device(lu->tgt); |
556 | struct sbp2_management_orb *orb; | 565 | struct sbp2_management_orb *orb; |
557 | unsigned int timeout; | 566 | unsigned int timeout; |
558 | int retval = -ENOMEM; | 567 | int retval = -ENOMEM; |
@@ -560,7 +569,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
560 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | 569 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) |
561 | return 0; | 570 | return 0; |
562 | 571 | ||
563 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | 572 | orb = kzalloc(sizeof(*orb), GFP_NOIO); |
564 | if (orb == NULL) | 573 | if (orb == NULL) |
565 | return -ENOMEM; | 574 | return -ENOMEM; |
566 | 575 | ||
@@ -612,20 +621,20 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
612 | 621 | ||
613 | retval = -EIO; | 622 | retval = -EIO; |
614 | if (sbp2_cancel_orbs(lu) == 0) { | 623 | if (sbp2_cancel_orbs(lu) == 0) { |
615 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", | 624 | dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n", |
616 | lu->tgt->bus_id, orb->base.rcode); | 625 | orb->base.rcode); |
617 | goto out; | 626 | goto out; |
618 | } | 627 | } |
619 | 628 | ||
620 | if (orb->base.rcode != RCODE_COMPLETE) { | 629 | if (orb->base.rcode != RCODE_COMPLETE) { |
621 | fw_error("%s: management write failed, rcode 0x%02x\n", | 630 | dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n", |
622 | lu->tgt->bus_id, orb->base.rcode); | 631 | orb->base.rcode); |
623 | goto out; | 632 | goto out; |
624 | } | 633 | } |
625 | 634 | ||
626 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | 635 | if (STATUS_GET_RESPONSE(orb->status) != 0 || |
627 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | 636 | STATUS_GET_SBP_STATUS(orb->status) != 0) { |
628 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, | 637 | dev_err(lu_dev(lu), "error status: %d:%d\n", |
629 | STATUS_GET_RESPONSE(orb->status), | 638 | STATUS_GET_RESPONSE(orb->status), |
630 | STATUS_GET_SBP_STATUS(orb->status)); | 639 | STATUS_GET_SBP_STATUS(orb->status)); |
631 | goto out; | 640 | goto out; |
@@ -648,7 +657,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
648 | 657 | ||
649 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) | 658 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) |
650 | { | 659 | { |
651 | struct fw_device *device = target_device(lu->tgt); | 660 | struct fw_device *device = target_parent_device(lu->tgt); |
652 | __be32 d = 0; | 661 | __be32 d = 0; |
653 | 662 | ||
654 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | 663 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, |
@@ -665,7 +674,7 @@ static void complete_agent_reset_write_no_wait(struct fw_card *card, | |||
665 | 674 | ||
666 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | 675 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) |
667 | { | 676 | { |
668 | struct fw_device *device = target_device(lu->tgt); | 677 | struct fw_device *device = target_parent_device(lu->tgt); |
669 | struct fw_transaction *t; | 678 | struct fw_transaction *t; |
670 | static __be32 d; | 679 | static __be32 d; |
671 | 680 | ||
@@ -704,7 +713,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | |||
704 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | 713 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) |
705 | { | 714 | { |
706 | struct sbp2_target *tgt = lu->tgt; | 715 | struct sbp2_target *tgt = lu->tgt; |
707 | struct fw_card *card = target_device(tgt)->card; | 716 | struct fw_card *card = target_parent_device(tgt)->card; |
708 | struct Scsi_Host *shost = | 717 | struct Scsi_Host *shost = |
709 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 718 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
710 | unsigned long flags; | 719 | unsigned long flags; |
@@ -728,7 +737,7 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | |||
728 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | 737 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) |
729 | { | 738 | { |
730 | struct sbp2_target *tgt = lu->tgt; | 739 | struct sbp2_target *tgt = lu->tgt; |
731 | struct fw_card *card = target_device(tgt)->card; | 740 | struct fw_card *card = target_parent_device(tgt)->card; |
732 | struct Scsi_Host *shost = | 741 | struct Scsi_Host *shost = |
733 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 742 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
734 | unsigned long flags; | 743 | unsigned long flags; |
@@ -753,7 +762,7 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | |||
753 | */ | 762 | */ |
754 | static void sbp2_unblock(struct sbp2_target *tgt) | 763 | static void sbp2_unblock(struct sbp2_target *tgt) |
755 | { | 764 | { |
756 | struct fw_card *card = target_device(tgt)->card; | 765 | struct fw_card *card = target_parent_device(tgt)->card; |
757 | struct Scsi_Host *shost = | 766 | struct Scsi_Host *shost = |
758 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 767 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
759 | unsigned long flags; | 768 | unsigned long flags; |
@@ -794,7 +803,7 @@ static int sbp2_lun2int(u16 lun) | |||
794 | */ | 803 | */ |
795 | static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) | 804 | static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) |
796 | { | 805 | { |
797 | struct fw_device *device = target_device(lu->tgt); | 806 | struct fw_device *device = target_parent_device(lu->tgt); |
798 | __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT); | 807 | __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT); |
799 | 808 | ||
800 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, | 809 | fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, |
@@ -809,7 +818,7 @@ static void sbp2_login(struct work_struct *work) | |||
809 | struct sbp2_logical_unit *lu = | 818 | struct sbp2_logical_unit *lu = |
810 | container_of(work, struct sbp2_logical_unit, work.work); | 819 | container_of(work, struct sbp2_logical_unit, work.work); |
811 | struct sbp2_target *tgt = lu->tgt; | 820 | struct sbp2_target *tgt = lu->tgt; |
812 | struct fw_device *device = target_device(tgt); | 821 | struct fw_device *device = target_parent_device(tgt); |
813 | struct Scsi_Host *shost; | 822 | struct Scsi_Host *shost; |
814 | struct scsi_device *sdev; | 823 | struct scsi_device *sdev; |
815 | struct sbp2_login_response response; | 824 | struct sbp2_login_response response; |
@@ -833,8 +842,8 @@ static void sbp2_login(struct work_struct *work) | |||
833 | if (lu->retries++ < 5) { | 842 | if (lu->retries++ < 5) { |
834 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | 843 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); |
835 | } else { | 844 | } else { |
836 | fw_error("%s: failed to login to LUN %04x\n", | 845 | dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n", |
837 | tgt->bus_id, lu->lun); | 846 | lu->lun); |
838 | /* Let any waiting I/O fail from now on. */ | 847 | /* Let any waiting I/O fail from now on. */ |
839 | sbp2_unblock(lu->tgt); | 848 | sbp2_unblock(lu->tgt); |
840 | } | 849 | } |
@@ -851,8 +860,8 @@ static void sbp2_login(struct work_struct *work) | |||
851 | << 32) | be32_to_cpu(response.command_block_agent.low); | 860 | << 32) | be32_to_cpu(response.command_block_agent.low); |
852 | lu->login_id = be32_to_cpu(response.misc) & 0xffff; | 861 | lu->login_id = be32_to_cpu(response.misc) & 0xffff; |
853 | 862 | ||
854 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", | 863 | dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n", |
855 | tgt->bus_id, lu->lun, lu->retries); | 864 | lu->lun, lu->retries); |
856 | 865 | ||
857 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ | 866 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ |
858 | sbp2_set_busy_timeout(lu); | 867 | sbp2_set_busy_timeout(lu); |
@@ -919,7 +928,7 @@ static void sbp2_reconnect(struct work_struct *work) | |||
919 | struct sbp2_logical_unit *lu = | 928 | struct sbp2_logical_unit *lu = |
920 | container_of(work, struct sbp2_logical_unit, work.work); | 929 | container_of(work, struct sbp2_logical_unit, work.work); |
921 | struct sbp2_target *tgt = lu->tgt; | 930 | struct sbp2_target *tgt = lu->tgt; |
922 | struct fw_device *device = target_device(tgt); | 931 | struct fw_device *device = target_parent_device(tgt); |
923 | int generation, node_id, local_node_id; | 932 | int generation, node_id, local_node_id; |
924 | 933 | ||
925 | if (fw_device_is_shutdown(device)) | 934 | if (fw_device_is_shutdown(device)) |
@@ -943,7 +952,7 @@ static void sbp2_reconnect(struct work_struct *work) | |||
943 | smp_rmb(); /* get current card generation */ | 952 | smp_rmb(); /* get current card generation */ |
944 | if (generation == device->card->generation || | 953 | if (generation == device->card->generation || |
945 | lu->retries++ >= 5) { | 954 | lu->retries++ >= 5) { |
946 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | 955 | dev_err(tgt_dev(tgt), "failed to reconnect\n"); |
947 | lu->retries = 0; | 956 | lu->retries = 0; |
948 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 957 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); |
949 | } | 958 | } |
@@ -957,8 +966,8 @@ static void sbp2_reconnect(struct work_struct *work) | |||
957 | smp_wmb(); /* node IDs must not be older than generation */ | 966 | smp_wmb(); /* node IDs must not be older than generation */ |
958 | lu->generation = generation; | 967 | lu->generation = generation; |
959 | 968 | ||
960 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", | 969 | dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n", |
961 | tgt->bus_id, lu->lun, lu->retries); | 970 | lu->lun, lu->retries); |
962 | 971 | ||
963 | sbp2_agent_reset(lu); | 972 | sbp2_agent_reset(lu); |
964 | sbp2_cancel_orbs(lu); | 973 | sbp2_cancel_orbs(lu); |
@@ -997,6 +1006,13 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | |||
997 | return 0; | 1006 | return 0; |
998 | } | 1007 | } |
999 | 1008 | ||
1009 | static void sbp2_get_unit_unique_id(struct sbp2_target *tgt, | ||
1010 | const u32 *leaf) | ||
1011 | { | ||
1012 | if ((leaf[0] & 0xffff0000) == 0x00020000) | ||
1013 | tgt->guid = (u64)leaf[1] << 32 | leaf[2]; | ||
1014 | } | ||
1015 | |||
1000 | static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, | 1016 | static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, |
1001 | const u32 *directory) | 1017 | const u32 *directory) |
1002 | { | 1018 | { |
@@ -1048,6 +1064,10 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory, | |||
1048 | return -ENOMEM; | 1064 | return -ENOMEM; |
1049 | break; | 1065 | break; |
1050 | 1066 | ||
1067 | case SBP2_CSR_UNIT_UNIQUE_ID: | ||
1068 | sbp2_get_unit_unique_id(tgt, ci.p - 1 + value); | ||
1069 | break; | ||
1070 | |||
1051 | case SBP2_CSR_LOGICAL_UNIT_DIRECTORY: | 1071 | case SBP2_CSR_LOGICAL_UNIT_DIRECTORY: |
1052 | /* Adjust for the increment in the iterator */ | 1072 | /* Adjust for the increment in the iterator */ |
1053 | if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) | 1073 | if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) |
@@ -1068,8 +1088,8 @@ static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt) | |||
1068 | unsigned int timeout = tgt->mgt_orb_timeout; | 1088 | unsigned int timeout = tgt->mgt_orb_timeout; |
1069 | 1089 | ||
1070 | if (timeout > 40000) | 1090 | if (timeout > 40000) |
1071 | fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n", | 1091 | dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n", |
1072 | tgt->bus_id, timeout / 1000); | 1092 | timeout / 1000); |
1073 | 1093 | ||
1074 | tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); | 1094 | tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); |
1075 | } | 1095 | } |
@@ -1081,9 +1101,9 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
1081 | unsigned int w = sbp2_param_workarounds; | 1101 | unsigned int w = sbp2_param_workarounds; |
1082 | 1102 | ||
1083 | if (w) | 1103 | if (w) |
1084 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | 1104 | dev_notice(tgt_dev(tgt), |
1085 | "if you need the workarounds parameter for %s\n", | 1105 | "Please notify linux1394-devel@lists.sf.net " |
1086 | tgt->bus_id); | 1106 | "if you need the workarounds parameter\n"); |
1087 | 1107 | ||
1088 | if (w & SBP2_WORKAROUND_OVERRIDE) | 1108 | if (w & SBP2_WORKAROUND_OVERRIDE) |
1089 | goto out; | 1109 | goto out; |
@@ -1103,9 +1123,9 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
1103 | } | 1123 | } |
1104 | out: | 1124 | out: |
1105 | if (w) | 1125 | if (w) |
1106 | fw_notify("Workarounds for %s: 0x%x " | 1126 | dev_notice(tgt_dev(tgt), "workarounds 0x%x " |
1107 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | 1127 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", |
1108 | tgt->bus_id, w, firmware_revision, model); | 1128 | w, firmware_revision, model); |
1109 | tgt->workarounds = w; | 1129 | tgt->workarounds = w; |
1110 | } | 1130 | } |
1111 | 1131 | ||
@@ -1121,6 +1141,10 @@ static int sbp2_probe(struct device *dev) | |||
1121 | struct Scsi_Host *shost; | 1141 | struct Scsi_Host *shost; |
1122 | u32 model, firmware_revision; | 1142 | u32 model, firmware_revision; |
1123 | 1143 | ||
1144 | /* cannot (or should not) handle targets on the local node */ | ||
1145 | if (device->is_local) | ||
1146 | return -ENODEV; | ||
1147 | |||
1124 | if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) | 1148 | if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) |
1125 | BUG_ON(dma_set_max_seg_size(device->card->device, | 1149 | BUG_ON(dma_set_max_seg_size(device->card->device, |
1126 | SBP2_MAX_SEG_SIZE)); | 1150 | SBP2_MAX_SEG_SIZE)); |
@@ -1133,7 +1157,6 @@ static int sbp2_probe(struct device *dev) | |||
1133 | dev_set_drvdata(&unit->device, tgt); | 1157 | dev_set_drvdata(&unit->device, tgt); |
1134 | tgt->unit = unit; | 1158 | tgt->unit = unit; |
1135 | INIT_LIST_HEAD(&tgt->lu_list); | 1159 | INIT_LIST_HEAD(&tgt->lu_list); |
1136 | tgt->bus_id = dev_name(&unit->device); | ||
1137 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | 1160 | tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
1138 | 1161 | ||
1139 | if (fw_device_enable_phys_dma(device) < 0) | 1162 | if (fw_device_enable_phys_dma(device) < 0) |
@@ -1239,7 +1262,7 @@ static int sbp2_remove(struct device *dev) | |||
1239 | kfree(lu); | 1262 | kfree(lu); |
1240 | } | 1263 | } |
1241 | scsi_remove_host(shost); | 1264 | scsi_remove_host(shost); |
1242 | fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no); | 1265 | dev_notice(dev, "released target %d:0:0\n", shost->host_no); |
1243 | 1266 | ||
1244 | scsi_host_put(shost); | 1267 | scsi_host_put(shost); |
1245 | return 0; | 1268 | return 0; |
@@ -1261,7 +1284,7 @@ static const struct ieee1394_device_id sbp2_id_table[] = { | |||
1261 | static struct fw_driver sbp2_driver = { | 1284 | static struct fw_driver sbp2_driver = { |
1262 | .driver = { | 1285 | .driver = { |
1263 | .owner = THIS_MODULE, | 1286 | .owner = THIS_MODULE, |
1264 | .name = sbp2_driver_name, | 1287 | .name = KBUILD_MODNAME, |
1265 | .bus = &fw_bus_type, | 1288 | .bus = &fw_bus_type, |
1266 | .probe = sbp2_probe, | 1289 | .probe = sbp2_probe, |
1267 | .remove = sbp2_remove, | 1290 | .remove = sbp2_remove, |
@@ -1286,10 +1309,19 @@ static void sbp2_unmap_scatterlist(struct device *card_device, | |||
1286 | static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) | 1309 | static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) |
1287 | { | 1310 | { |
1288 | int sam_status; | 1311 | int sam_status; |
1312 | int sfmt = (sbp2_status[0] >> 6) & 0x03; | ||
1313 | |||
1314 | if (sfmt == 2 || sfmt == 3) { | ||
1315 | /* | ||
1316 | * Reserved for future standardization (2) or | ||
1317 | * Status block format vendor-dependent (3) | ||
1318 | */ | ||
1319 | return DID_ERROR << 16; | ||
1320 | } | ||
1289 | 1321 | ||
1290 | sense_data[0] = 0x70; | 1322 | sense_data[0] = 0x70 | sfmt | (sbp2_status[1] & 0x80); |
1291 | sense_data[1] = 0x0; | 1323 | sense_data[1] = 0x0; |
1292 | sense_data[2] = sbp2_status[1]; | 1324 | sense_data[2] = ((sbp2_status[1] << 1) & 0xe0) | (sbp2_status[1] & 0x0f); |
1293 | sense_data[3] = sbp2_status[4]; | 1325 | sense_data[3] = sbp2_status[4]; |
1294 | sense_data[4] = sbp2_status[5]; | 1326 | sense_data[4] = sbp2_status[5]; |
1295 | sense_data[5] = sbp2_status[6]; | 1327 | sense_data[5] = sbp2_status[6]; |
@@ -1325,7 +1357,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb, | |||
1325 | { | 1357 | { |
1326 | struct sbp2_command_orb *orb = | 1358 | struct sbp2_command_orb *orb = |
1327 | container_of(base_orb, struct sbp2_command_orb, base); | 1359 | container_of(base_orb, struct sbp2_command_orb, base); |
1328 | struct fw_device *device = target_device(orb->lu->tgt); | 1360 | struct fw_device *device = target_parent_device(orb->lu->tgt); |
1329 | int result; | 1361 | int result; |
1330 | 1362 | ||
1331 | if (status != NULL) { | 1363 | if (status != NULL) { |
@@ -1433,7 +1465,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, | |||
1433 | struct scsi_cmnd *cmd) | 1465 | struct scsi_cmnd *cmd) |
1434 | { | 1466 | { |
1435 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1467 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1436 | struct fw_device *device = target_device(lu->tgt); | 1468 | struct fw_device *device = target_parent_device(lu->tgt); |
1437 | struct sbp2_command_orb *orb; | 1469 | struct sbp2_command_orb *orb; |
1438 | int generation, retval = SCSI_MLQUEUE_HOST_BUSY; | 1470 | int generation, retval = SCSI_MLQUEUE_HOST_BUSY; |
1439 | 1471 | ||
@@ -1442,7 +1474,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, | |||
1442 | * transfer direction not handled. | 1474 | * transfer direction not handled. |
1443 | */ | 1475 | */ |
1444 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { | 1476 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { |
1445 | fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); | 1477 | dev_err(lu_dev(lu), "cannot handle bidirectional command\n"); |
1446 | cmd->result = DID_ERROR << 16; | 1478 | cmd->result = DID_ERROR << 16; |
1447 | cmd->scsi_done(cmd); | 1479 | cmd->scsi_done(cmd); |
1448 | return 0; | 1480 | return 0; |
@@ -1450,7 +1482,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, | |||
1450 | 1482 | ||
1451 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | 1483 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); |
1452 | if (orb == NULL) { | 1484 | if (orb == NULL) { |
1453 | fw_notify("failed to alloc orb\n"); | 1485 | dev_notice(lu_dev(lu), "failed to alloc ORB\n"); |
1454 | return SCSI_MLQUEUE_HOST_BUSY; | 1486 | return SCSI_MLQUEUE_HOST_BUSY; |
1455 | } | 1487 | } |
1456 | 1488 | ||
@@ -1550,7 +1582,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | |||
1550 | { | 1582 | { |
1551 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1583 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1552 | 1584 | ||
1553 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); | 1585 | dev_notice(lu_dev(lu), "sbp2_scsi_abort\n"); |
1554 | sbp2_agent_reset(lu); | 1586 | sbp2_agent_reset(lu); |
1555 | sbp2_cancel_orbs(lu); | 1587 | sbp2_cancel_orbs(lu); |
1556 | 1588 | ||
@@ -1590,7 +1622,7 @@ static struct device_attribute *sbp2_scsi_sysfs_attrs[] = { | |||
1590 | static struct scsi_host_template scsi_driver_template = { | 1622 | static struct scsi_host_template scsi_driver_template = { |
1591 | .module = THIS_MODULE, | 1623 | .module = THIS_MODULE, |
1592 | .name = "SBP-2 IEEE-1394", | 1624 | .name = "SBP-2 IEEE-1394", |
1593 | .proc_name = sbp2_driver_name, | 1625 | .proc_name = "sbp2", |
1594 | .queuecommand = sbp2_scsi_queuecommand, | 1626 | .queuecommand = sbp2_scsi_queuecommand, |
1595 | .slave_alloc = sbp2_scsi_slave_alloc, | 1627 | .slave_alloc = sbp2_scsi_slave_alloc, |
1596 | .slave_configure = sbp2_scsi_slave_configure, | 1628 | .slave_configure = sbp2_scsi_slave_configure, |