aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/firewire
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/Kconfig5
-rw-r--r--drivers/firewire/core-card.c37
-rw-r--r--drivers/firewire/core-cdev.c80
-rw-r--r--drivers/firewire/core-device.c185
-rw-r--r--drivers/firewire/core-iso.c89
-rw-r--r--drivers/firewire/core-topology.c18
-rw-r--r--drivers/firewire/core-transaction.c123
-rw-r--r--drivers/firewire/core.h43
-rw-r--r--drivers/firewire/init_ohci1394_dma.c4
-rw-r--r--drivers/firewire/net.c73
-rw-r--r--drivers/firewire/nosy.c28
-rw-r--r--drivers/firewire/ohci.c694
-rw-r--r--drivers/firewire/sbp2.c411
13 files changed, 662 insertions, 1128 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 7224533e8ca..2be6f452077 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -28,6 +28,11 @@ config FIREWIRE_OHCI
28 To compile this driver as a module, say M here: The module will be 28 To compile this driver as a module, say M here: The module will be
29 called firewire-ohci. 29 called firewire-ohci.
30 30
31config FIREWIRE_OHCI_DEBUG
32 bool
33 depends on FIREWIRE_OHCI
34 default y
35
31config FIREWIRE_SBP2 36config FIREWIRE_SBP2
32 tristate "Storage devices (SBP-2 protocol)" 37 tristate "Storage devices (SBP-2 protocol)"
33 depends on FIREWIRE && SCSI 38 depends on FIREWIRE && SCSI
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 57ea7f46417..85661b060ed 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -37,22 +37,6 @@
37 37
38#include "core.h" 38#include "core.h"
39 39
40#define define_fw_printk_level(func, kern_level) \
41void func(const struct fw_card *card, const char *fmt, ...) \
42{ \
43 struct va_format vaf; \
44 va_list args; \
45 \
46 va_start(args, fmt); \
47 vaf.fmt = fmt; \
48 vaf.va = &args; \
49 printk(kern_level KBUILD_MODNAME " %s: %pV", \
50 dev_name(card->device), &vaf); \
51 va_end(args); \
52}
53define_fw_printk_level(fw_err, KERN_ERR);
54define_fw_printk_level(fw_notice, KERN_NOTICE);
55
56int fw_compute_block_crc(__be32 *block) 40int fw_compute_block_crc(__be32 *block)
57{ 41{
58 int length; 42 int length;
@@ -276,7 +260,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
276 fw_iso_resource_manage(card, generation, 1ULL << 31, 260 fw_iso_resource_manage(card, generation, 1ULL << 31,
277 &channel, &bandwidth, true); 261 &channel, &bandwidth, true);
278 if (channel != 31) { 262 if (channel != 31) {
279 fw_notice(card, "failed to allocate broadcast channel\n"); 263 fw_notify("failed to allocate broadcast channel\n");
280 return; 264 return;
281 } 265 }
282 card->broadcast_channel_allocated = true; 266 card->broadcast_channel_allocated = true;
@@ -359,14 +343,14 @@ static void bm_work(struct work_struct *work)
359 343
360 if (!card->irm_node->link_on) { 344 if (!card->irm_node->link_on) {
361 new_root_id = local_id; 345 new_root_id = local_id;
362 fw_notice(card, "%s, making local node (%02x) root\n", 346 fw_notify("%s, making local node (%02x) root.\n",
363 "IRM has link off", new_root_id); 347 "IRM has link off", new_root_id);
364 goto pick_me; 348 goto pick_me;
365 } 349 }
366 350
367 if (irm_is_1394_1995_only && !keep_this_irm) { 351 if (irm_is_1394_1995_only && !keep_this_irm) {
368 new_root_id = local_id; 352 new_root_id = local_id;
369 fw_notice(card, "%s, making local node (%02x) root\n", 353 fw_notify("%s, making local node (%02x) root.\n",
370 "IRM is not 1394a compliant", new_root_id); 354 "IRM is not 1394a compliant", new_root_id);
371 goto pick_me; 355 goto pick_me;
372 } 356 }
@@ -421,8 +405,8 @@ static void bm_work(struct work_struct *work)
421 * root, and thus, IRM. 405 * root, and thus, IRM.
422 */ 406 */
423 new_root_id = local_id; 407 new_root_id = local_id;
424 fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n", 408 fw_notify("%s, making local node (%02x) root.\n",
425 fw_rcode_string(rcode), new_root_id); 409 "BM lock failed", new_root_id);
426 goto pick_me; 410 goto pick_me;
427 } 411 }
428 } else if (card->bm_generation != generation) { 412 } else if (card->bm_generation != generation) {
@@ -494,8 +478,8 @@ static void bm_work(struct work_struct *work)
494 spin_unlock_irq(&card->lock); 478 spin_unlock_irq(&card->lock);
495 479
496 if (do_reset) { 480 if (do_reset) {
497 fw_notice(card, "phy config: new root=%x, gap_count=%d\n", 481 fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
498 new_root_id, gap_count); 482 card->index, new_root_id, gap_count);
499 fw_send_phy_config(card, new_root_id, generation, gap_count); 483 fw_send_phy_config(card, new_root_id, generation, gap_count);
500 reset_bus(card, true); 484 reset_bus(card, true);
501 /* Will allocate broadcast channel after the reset. */ 485 /* Will allocate broadcast channel after the reset. */
@@ -650,11 +634,6 @@ static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
650{ 634{
651} 635}
652 636
653static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
654{
655 return -ENODEV;
656}
657
658static const struct fw_card_driver dummy_driver_template = { 637static const struct fw_card_driver dummy_driver_template = {
659 .read_phy_reg = dummy_read_phy_reg, 638 .read_phy_reg = dummy_read_phy_reg,
660 .update_phy_reg = dummy_update_phy_reg, 639 .update_phy_reg = dummy_update_phy_reg,
@@ -667,7 +646,6 @@ static const struct fw_card_driver dummy_driver_template = {
667 .set_iso_channels = dummy_set_iso_channels, 646 .set_iso_channels = dummy_set_iso_channels,
668 .queue_iso = dummy_queue_iso, 647 .queue_iso = dummy_queue_iso,
669 .flush_queue_iso = dummy_flush_queue_iso, 648 .flush_queue_iso = dummy_flush_queue_iso,
670 .flush_iso_completions = dummy_flush_iso_completions,
671}; 649};
672 650
673void fw_card_release(struct kref *kref) 651void fw_card_release(struct kref *kref)
@@ -676,7 +654,6 @@ void fw_card_release(struct kref *kref)
676 654
677 complete(&card->done); 655 complete(&card->done);
678} 656}
679EXPORT_SYMBOL_GPL(fw_card_release);
680 657
681void fw_core_remove_card(struct fw_card *card) 658void fw_core_remove_card(struct fw_card *card)
682{ 659{
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index f8d22872d75..4799393247c 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -22,7 +22,6 @@
22#include <linux/compat.h> 22#include <linux/compat.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/dma-mapping.h>
26#include <linux/errno.h> 25#include <linux/errno.h>
27#include <linux/firewire.h> 26#include <linux/firewire.h>
28#include <linux/firewire-cdev.h> 27#include <linux/firewire-cdev.h>
@@ -45,13 +44,14 @@
45#include <linux/wait.h> 44#include <linux/wait.h>
46#include <linux/workqueue.h> 45#include <linux/workqueue.h>
47 46
47#include <asm/system.h>
48 48
49#include "core.h" 49#include "core.h"
50 50
51/* 51/*
52 * ABI version history is documented in linux/firewire-cdev.h. 52 * ABI version history is documented in linux/firewire-cdev.h.
53 */ 53 */
54#define FW_CDEV_KERNEL_VERSION 5 54#define FW_CDEV_KERNEL_VERSION 4
55#define FW_CDEV_VERSION_EVENT_REQUEST2 4 55#define FW_CDEV_VERSION_EVENT_REQUEST2 4
56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 56#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
57 57
@@ -71,7 +71,6 @@ struct client {
71 u64 iso_closure; 71 u64 iso_closure;
72 struct fw_iso_buffer buffer; 72 struct fw_iso_buffer buffer;
73 unsigned long vm_start; 73 unsigned long vm_start;
74 bool buffer_is_mapped;
75 74
76 struct list_head phy_receiver_link; 75 struct list_head phy_receiver_link;
77 u64 phy_receiver_closure; 76 u64 phy_receiver_closure;
@@ -390,7 +389,7 @@ static void queue_bus_reset_event(struct client *client)
390 389
391 e = kzalloc(sizeof(*e), GFP_KERNEL); 390 e = kzalloc(sizeof(*e), GFP_KERNEL);
392 if (e == NULL) { 391 if (e == NULL) {
393 fw_notice(client->device->card, "out of memory when allocating event\n"); 392 fw_notify("Out of memory when allocating event\n");
394 return; 393 return;
395 } 394 }
396 395
@@ -439,7 +438,6 @@ union ioctl_arg {
439 struct fw_cdev_send_phy_packet send_phy_packet; 438 struct fw_cdev_send_phy_packet send_phy_packet;
440 struct fw_cdev_receive_phy_packets receive_phy_packets; 439 struct fw_cdev_receive_phy_packets receive_phy_packets;
441 struct fw_cdev_set_iso_channels set_iso_channels; 440 struct fw_cdev_set_iso_channels set_iso_channels;
442 struct fw_cdev_flush_iso flush_iso;
443}; 441};
444 442
445static int ioctl_get_info(struct client *client, union ioctl_arg *arg) 443static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
@@ -473,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
473 client->bus_reset_closure = a->bus_reset_closure; 471 client->bus_reset_closure = a->bus_reset_closure;
474 if (a->bus_reset != 0) { 472 if (a->bus_reset != 0) {
475 fill_bus_reset_event(&bus_reset, client); 473 fill_bus_reset_event(&bus_reset, client);
476 /* unaligned size of bus_reset is 36 bytes */ 474 ret = copy_to_user(u64_to_uptr(a->bus_reset),
477 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36); 475 &bus_reset, sizeof(bus_reset));
478 } 476 }
479 if (ret == 0 && list_empty(&client->link)) 477 if (ret == 0 && list_empty(&client->link))
480 list_add_tail(&client->link, &client->device->client_list); 478 list_add_tail(&client->link, &client->device->client_list);
@@ -693,7 +691,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
693 r = kmalloc(sizeof(*r), GFP_ATOMIC); 691 r = kmalloc(sizeof(*r), GFP_ATOMIC);
694 e = kmalloc(sizeof(*e), GFP_ATOMIC); 692 e = kmalloc(sizeof(*e), GFP_ATOMIC);
695 if (r == NULL || e == NULL) { 693 if (r == NULL || e == NULL) {
696 fw_notice(card, "out of memory when allocating event\n"); 694 fw_notify("Out of memory when allocating event\n");
697 goto failed; 695 goto failed;
698 } 696 }
699 r->card = card; 697 r->card = card;
@@ -930,7 +928,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
930 928
931 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); 929 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
932 if (e == NULL) { 930 if (e == NULL) {
933 fw_notice(context->card, "out of memory when allocating event\n"); 931 fw_notify("Out of memory when allocating event\n");
934 return; 932 return;
935 } 933 }
936 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 934 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
@@ -950,7 +948,7 @@ static void iso_mc_callback(struct fw_iso_context *context,
950 948
951 e = kmalloc(sizeof(*e), GFP_ATOMIC); 949 e = kmalloc(sizeof(*e), GFP_ATOMIC);
952 if (e == NULL) { 950 if (e == NULL) {
953 fw_notice(context->card, "out of memory when allocating event\n"); 951 fw_notify("Out of memory when allocating event\n");
954 return; 952 return;
955 } 953 }
956 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; 954 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
@@ -961,20 +959,11 @@ static void iso_mc_callback(struct fw_iso_context *context,
961 sizeof(e->interrupt), NULL, 0); 959 sizeof(e->interrupt), NULL, 0);
962} 960}
963 961
964static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
965{
966 if (context->type == FW_ISO_CONTEXT_TRANSMIT)
967 return DMA_TO_DEVICE;
968 else
969 return DMA_FROM_DEVICE;
970}
971
972static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) 962static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
973{ 963{
974 struct fw_cdev_create_iso_context *a = &arg->create_iso_context; 964 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
975 struct fw_iso_context *context; 965 struct fw_iso_context *context;
976 fw_iso_callback_t cb; 966 fw_iso_callback_t cb;
977 int ret;
978 967
979 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || 968 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
980 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || 969 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
@@ -1015,21 +1004,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
1015 if (client->iso_context != NULL) { 1004 if (client->iso_context != NULL) {
1016 spin_unlock_irq(&client->lock); 1005 spin_unlock_irq(&client->lock);
1017 fw_iso_context_destroy(context); 1006 fw_iso_context_destroy(context);
1018
1019 return -EBUSY; 1007 return -EBUSY;
1020 } 1008 }
1021 if (!client->buffer_is_mapped) {
1022 ret = fw_iso_buffer_map_dma(&client->buffer,
1023 client->device->card,
1024 iso_dma_direction(context));
1025 if (ret < 0) {
1026 spin_unlock_irq(&client->lock);
1027 fw_iso_context_destroy(context);
1028
1029 return ret;
1030 }
1031 client->buffer_is_mapped = true;
1032 }
1033 client->iso_closure = a->closure; 1009 client->iso_closure = a->closure;
1034 client->iso_context = context; 1010 client->iso_context = context;
1035 spin_unlock_irq(&client->lock); 1011 spin_unlock_irq(&client->lock);
@@ -1192,16 +1168,6 @@ static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1192 return fw_iso_context_stop(client->iso_context); 1168 return fw_iso_context_stop(client->iso_context);
1193} 1169}
1194 1170
1195static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1196{
1197 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1198
1199 if (client->iso_context == NULL || a->handle != 0)
1200 return -EINVAL;
1201
1202 return fw_iso_context_flush_completions(client->iso_context);
1203}
1204
1205static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) 1171static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1206{ 1172{
1207 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; 1173 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
@@ -1582,7 +1548,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1582 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { 1548 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1583 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); 1549 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1584 if (e == NULL) { 1550 if (e == NULL) {
1585 fw_notice(card, "out of memory when allocating event\n"); 1551 fw_notify("Out of memory when allocating event\n");
1586 break; 1552 break;
1587 } 1553 }
1588 e->phy_packet.closure = client->phy_receiver_closure; 1554 e->phy_packet.closure = client->phy_receiver_closure;
@@ -1623,7 +1589,6 @@ static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1623 [0x15] = ioctl_send_phy_packet, 1589 [0x15] = ioctl_send_phy_packet,
1624 [0x16] = ioctl_receive_phy_packets, 1590 [0x16] = ioctl_receive_phy_packets,
1625 [0x17] = ioctl_set_iso_channels, 1591 [0x17] = ioctl_set_iso_channels,
1626 [0x18] = ioctl_flush_iso,
1627}; 1592};
1628 1593
1629static int dispatch_ioctl(struct client *client, 1594static int dispatch_ioctl(struct client *client,
@@ -1675,6 +1640,7 @@ static long fw_device_op_compat_ioctl(struct file *file,
1675static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) 1640static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1676{ 1641{
1677 struct client *client = file->private_data; 1642 struct client *client = file->private_data;
1643 enum dma_data_direction direction;
1678 unsigned long size; 1644 unsigned long size;
1679 int page_count, ret; 1645 int page_count, ret;
1680 1646
@@ -1697,28 +1663,20 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1697 if (size & ~PAGE_MASK) 1663 if (size & ~PAGE_MASK)
1698 return -EINVAL; 1664 return -EINVAL;
1699 1665
1700 ret = fw_iso_buffer_alloc(&client->buffer, page_count); 1666 if (vma->vm_flags & VM_WRITE)
1701 if (ret < 0) 1667 direction = DMA_TO_DEVICE;
1702 return ret; 1668 else
1669 direction = DMA_FROM_DEVICE;
1703 1670
1704 spin_lock_irq(&client->lock); 1671 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1705 if (client->iso_context) { 1672 page_count, direction);
1706 ret = fw_iso_buffer_map_dma(&client->buffer,
1707 client->device->card,
1708 iso_dma_direction(client->iso_context));
1709 client->buffer_is_mapped = (ret == 0);
1710 }
1711 spin_unlock_irq(&client->lock);
1712 if (ret < 0) 1673 if (ret < 0)
1713 goto fail; 1674 return ret;
1714 1675
1715 ret = fw_iso_buffer_map_vma(&client->buffer, vma); 1676 ret = fw_iso_buffer_map(&client->buffer, vma);
1716 if (ret < 0) 1677 if (ret < 0)
1717 goto fail; 1678 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1718 1679
1719 return 0;
1720 fail:
1721 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1722 return ret; 1680 return ret;
1723} 1681}
1724 1682
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 3873d535b28..f3b890da1e8 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -32,7 +32,6 @@
32#include <linux/mod_devicetable.h> 32#include <linux/mod_devicetable.h>
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/random.h>
36#include <linux/rwsem.h> 35#include <linux/rwsem.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include <linux/spinlock.h> 37#include <linux/spinlock.h>
@@ -41,6 +40,7 @@
41 40
42#include <linux/atomic.h> 41#include <linux/atomic.h>
43#include <asm/byteorder.h> 42#include <asm/byteorder.h>
43#include <asm/system.h>
44 44
45#include "core.h" 45#include "core.h"
46 46
@@ -399,14 +399,6 @@ static ssize_t guid_show(struct device *dev,
399 return ret; 399 return ret;
400} 400}
401 401
402static ssize_t is_local_show(struct device *dev,
403 struct device_attribute *attr, char *buf)
404{
405 struct fw_device *device = fw_device(dev);
406
407 return sprintf(buf, "%u\n", device->is_local);
408}
409
410static int units_sprintf(char *buf, const u32 *directory) 402static int units_sprintf(char *buf, const u32 *directory)
411{ 403{
412 struct fw_csr_iterator ci; 404 struct fw_csr_iterator ci;
@@ -456,7 +448,6 @@ static ssize_t units_show(struct device *dev,
456static struct device_attribute fw_device_attributes[] = { 448static struct device_attribute fw_device_attributes[] = {
457 __ATTR_RO(config_rom), 449 __ATTR_RO(config_rom),
458 __ATTR_RO(guid), 450 __ATTR_RO(guid),
459 __ATTR_RO(is_local),
460 __ATTR_RO(units), 451 __ATTR_RO(units),
461 __ATTR_NULL, 452 __ATTR_NULL,
462}; 453};
@@ -491,15 +482,13 @@ static int read_rom(struct fw_device *device,
491 * generation changes under us, read_config_rom will fail and get retried. 482 * generation changes under us, read_config_rom will fail and get retried.
492 * It's better to start all over in this case because the node from which we 483 * It's better to start all over in this case because the node from which we
493 * are reading the ROM may have changed the ROM during the reset. 484 * are reading the ROM may have changed the ROM during the reset.
494 * Returns either a result code or a negative error code.
495 */ 485 */
496static int read_config_rom(struct fw_device *device, int generation) 486static int read_config_rom(struct fw_device *device, int generation)
497{ 487{
498 struct fw_card *card = device->card;
499 const u32 *old_rom, *new_rom; 488 const u32 *old_rom, *new_rom;
500 u32 *rom, *stack; 489 u32 *rom, *stack;
501 u32 sp, key; 490 u32 sp, key;
502 int i, end, length, ret; 491 int i, end, length, ret = -1;
503 492
504 rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE + 493 rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
505 sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL); 494 sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
@@ -513,21 +502,18 @@ static int read_config_rom(struct fw_device *device, int generation)
513 502
514 /* First read the bus info block. */ 503 /* First read the bus info block. */
515 for (i = 0; i < 5; i++) { 504 for (i = 0; i < 5; i++) {
516 ret = read_rom(device, generation, i, &rom[i]); 505 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
517 if (ret != RCODE_COMPLETE)
518 goto out; 506 goto out;
519 /* 507 /*
520 * As per IEEE1212 7.2, during initialization, devices can 508 * As per IEEE1212 7.2, during power-up, devices can
521 * reply with a 0 for the first quadlet of the config 509 * reply with a 0 for the first quadlet of the config
522 * rom to indicate that they are booting (for example, 510 * rom to indicate that they are booting (for example,
523 * if the firmware is on the disk of a external 511 * if the firmware is on the disk of a external
524 * harddisk). In that case we just fail, and the 512 * harddisk). In that case we just fail, and the
525 * retry mechanism will try again later. 513 * retry mechanism will try again later.
526 */ 514 */
527 if (i == 0 && rom[i] == 0) { 515 if (i == 0 && rom[i] == 0)
528 ret = RCODE_BUSY;
529 goto out; 516 goto out;
530 }
531 } 517 }
532 518
533 device->max_speed = device->node->max_speed; 519 device->max_speed = device->node->max_speed;
@@ -543,12 +529,12 @@ static int read_config_rom(struct fw_device *device, int generation)
543 */ 529 */
544 if ((rom[2] & 0x7) < device->max_speed || 530 if ((rom[2] & 0x7) < device->max_speed ||
545 device->max_speed == SCODE_BETA || 531 device->max_speed == SCODE_BETA ||
546 card->beta_repeaters_present) { 532 device->card->beta_repeaters_present) {
547 u32 dummy; 533 u32 dummy;
548 534
549 /* for S1600 and S3200 */ 535 /* for S1600 and S3200 */
550 if (device->max_speed == SCODE_BETA) 536 if (device->max_speed == SCODE_BETA)
551 device->max_speed = card->link_speed; 537 device->max_speed = device->card->link_speed;
552 538
553 while (device->max_speed > SCODE_100) { 539 while (device->max_speed > SCODE_100) {
554 if (read_rom(device, generation, 0, &dummy) == 540 if (read_rom(device, generation, 0, &dummy) ==
@@ -577,14 +563,11 @@ static int read_config_rom(struct fw_device *device, int generation)
577 */ 563 */
578 key = stack[--sp]; 564 key = stack[--sp];
579 i = key & 0xffffff; 565 i = key & 0xffffff;
580 if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) { 566 if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
581 ret = -ENXIO;
582 goto out; 567 goto out;
583 }
584 568
585 /* Read header quadlet for the block to get the length. */ 569 /* Read header quadlet for the block to get the length. */
586 ret = read_rom(device, generation, i, &rom[i]); 570 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
587 if (ret != RCODE_COMPLETE)
588 goto out; 571 goto out;
589 end = i + (rom[i] >> 16) + 1; 572 end = i + (rom[i] >> 16) + 1;
590 if (end > MAX_CONFIG_ROM_SIZE) { 573 if (end > MAX_CONFIG_ROM_SIZE) {
@@ -593,9 +576,9 @@ static int read_config_rom(struct fw_device *device, int generation)
593 * a firmware bug. Ignore this whole block, i.e. 576 * a firmware bug. Ignore this whole block, i.e.
594 * simply set a fake block length of 0. 577 * simply set a fake block length of 0.
595 */ 578 */
596 fw_err(card, "skipped invalid ROM block %x at %llx\n", 579 fw_error("skipped invalid ROM block %x at %llx\n",
597 rom[i], 580 rom[i],
598 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); 581 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
599 rom[i] = 0; 582 rom[i] = 0;
600 end = i; 583 end = i;
601 } 584 }
@@ -607,8 +590,8 @@ static int read_config_rom(struct fw_device *device, int generation)
607 * it references another block, and push it in that case. 590 * it references another block, and push it in that case.
608 */ 591 */
609 for (; i < end; i++) { 592 for (; i < end; i++) {
610 ret = read_rom(device, generation, i, &rom[i]); 593 if (read_rom(device, generation, i, &rom[i]) !=
611 if (ret != RCODE_COMPLETE) 594 RCODE_COMPLETE)
612 goto out; 595 goto out;
613 596
614 if ((key >> 30) != 3 || (rom[i] >> 30) < 2) 597 if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
@@ -621,10 +604,9 @@ static int read_config_rom(struct fw_device *device, int generation)
621 * the ROM don't have to check offsets all the time. 604 * the ROM don't have to check offsets all the time.
622 */ 605 */
623 if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) { 606 if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
624 fw_err(card, 607 fw_error("skipped unsupported ROM entry %x at %llx\n",
625 "skipped unsupported ROM entry %x at %llx\n", 608 rom[i],
626 rom[i], 609 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
627 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
628 rom[i] = 0; 610 rom[i] = 0;
629 continue; 611 continue;
630 } 612 }
@@ -636,10 +618,8 @@ static int read_config_rom(struct fw_device *device, int generation)
636 618
637 old_rom = device->config_rom; 619 old_rom = device->config_rom;
638 new_rom = kmemdup(rom, length * 4, GFP_KERNEL); 620 new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
639 if (new_rom == NULL) { 621 if (new_rom == NULL)
640 ret = -ENOMEM;
641 goto out; 622 goto out;
642 }
643 623
644 down_write(&fw_device_rwsem); 624 down_write(&fw_device_rwsem);
645 device->config_rom = new_rom; 625 device->config_rom = new_rom;
@@ -647,7 +627,7 @@ static int read_config_rom(struct fw_device *device, int generation)
647 up_write(&fw_device_rwsem); 627 up_write(&fw_device_rwsem);
648 628
649 kfree(old_rom); 629 kfree(old_rom);
650 ret = RCODE_COMPLETE; 630 ret = 0;
651 device->max_rec = rom[2] >> 12 & 0xf; 631 device->max_rec = rom[2] >> 12 & 0xf;
652 device->cmc = rom[2] >> 30 & 1; 632 device->cmc = rom[2] >> 30 & 1;
653 device->irmc = rom[2] >> 31 & 1; 633 device->irmc = rom[2] >> 31 & 1;
@@ -661,7 +641,6 @@ static void fw_unit_release(struct device *dev)
661{ 641{
662 struct fw_unit *unit = fw_unit(dev); 642 struct fw_unit *unit = fw_unit(dev);
663 643
664 fw_device_put(fw_parent_device(unit));
665 kfree(unit); 644 kfree(unit);
666} 645}
667 646
@@ -693,7 +672,7 @@ static void create_units(struct fw_device *device)
693 */ 672 */
694 unit = kzalloc(sizeof(*unit), GFP_KERNEL); 673 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
695 if (unit == NULL) { 674 if (unit == NULL) {
696 fw_err(device->card, "out of memory for unit\n"); 675 fw_error("failed to allocate memory for unit\n");
697 continue; 676 continue;
698 } 677 }
699 678
@@ -713,7 +692,6 @@ static void create_units(struct fw_device *device)
713 if (device_register(&unit->device) < 0) 692 if (device_register(&unit->device) < 0)
714 goto skip_unit; 693 goto skip_unit;
715 694
716 fw_device_get(device);
717 continue; 695 continue;
718 696
719 skip_unit: 697 skip_unit:
@@ -895,7 +873,7 @@ static int lookup_existing_device(struct device *dev, void *data)
895 smp_wmb(); /* update node_id before generation */ 873 smp_wmb(); /* update node_id before generation */
896 old->generation = card->generation; 874 old->generation = card->generation;
897 old->config_rom_retries = 0; 875 old->config_rom_retries = 0;
898 fw_notice(card, "rediscovered device %s\n", dev_name(dev)); 876 fw_notify("rediscovered device %s\n", dev_name(dev));
899 877
900 PREPARE_DELAYED_WORK(&old->work, fw_device_update); 878 PREPARE_DELAYED_WORK(&old->work, fw_device_update);
901 fw_schedule_device_work(old, 0); 879 fw_schedule_device_work(old, 0);
@@ -976,7 +954,6 @@ static void fw_device_init(struct work_struct *work)
976{ 954{
977 struct fw_device *device = 955 struct fw_device *device =
978 container_of(work, struct fw_device, work.work); 956 container_of(work, struct fw_device, work.work);
979 struct fw_card *card = device->card;
980 struct device *revived_dev; 957 struct device *revived_dev;
981 int minor, ret; 958 int minor, ret;
982 959
@@ -986,25 +963,23 @@ static void fw_device_init(struct work_struct *work)
986 * device. 963 * device.
987 */ 964 */
988 965
989 ret = read_config_rom(device, device->generation); 966 if (read_config_rom(device, device->generation) < 0) {
990 if (ret != RCODE_COMPLETE) {
991 if (device->config_rom_retries < MAX_RETRIES && 967 if (device->config_rom_retries < MAX_RETRIES &&
992 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 968 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
993 device->config_rom_retries++; 969 device->config_rom_retries++;
994 fw_schedule_device_work(device, RETRY_DELAY); 970 fw_schedule_device_work(device, RETRY_DELAY);
995 } else { 971 } else {
996 if (device->node->link_on) 972 if (device->node->link_on)
997 fw_notice(card, "giving up on node %x: reading config rom failed: %s\n", 973 fw_notify("giving up on config rom for node id %x\n",
998 device->node_id, 974 device->node_id);
999 fw_rcode_string(ret)); 975 if (device->node == device->card->root_node)
1000 if (device->node == card->root_node) 976 fw_schedule_bm_work(device->card, 0);
1001 fw_schedule_bm_work(card, 0);
1002 fw_device_release(&device->device); 977 fw_device_release(&device->device);
1003 } 978 }
1004 return; 979 return;
1005 } 980 }
1006 981
1007 revived_dev = device_find_child(card->device, 982 revived_dev = device_find_child(device->card->device,
1008 device, lookup_existing_device); 983 device, lookup_existing_device);
1009 if (revived_dev) { 984 if (revived_dev) {
1010 put_device(revived_dev); 985 put_device(revived_dev);
@@ -1027,7 +1002,7 @@ static void fw_device_init(struct work_struct *work)
1027 1002
1028 device->device.bus = &fw_bus_type; 1003 device->device.bus = &fw_bus_type;
1029 device->device.type = &fw_device_type; 1004 device->device.type = &fw_device_type;
1030 device->device.parent = card->device; 1005 device->device.parent = device->card->device;
1031 device->device.devt = MKDEV(fw_cdev_major, minor); 1006 device->device.devt = MKDEV(fw_cdev_major, minor);
1032 dev_set_name(&device->device, "fw%d", minor); 1007 dev_set_name(&device->device, "fw%d", minor);
1033 1008
@@ -1039,7 +1014,7 @@ static void fw_device_init(struct work_struct *work)
1039 &device->attribute_group); 1014 &device->attribute_group);
1040 1015
1041 if (device_add(&device->device)) { 1016 if (device_add(&device->device)) {
1042 fw_err(card, "failed to add device\n"); 1017 fw_error("Failed to add device.\n");
1043 goto error_with_cdev; 1018 goto error_with_cdev;
1044 } 1019 }
1045 1020
@@ -1060,15 +1035,21 @@ static void fw_device_init(struct work_struct *work)
1060 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1035 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1061 fw_schedule_device_work(device, SHUTDOWN_DELAY); 1036 fw_schedule_device_work(device, SHUTDOWN_DELAY);
1062 } else { 1037 } else {
1063 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", 1038 if (device->config_rom_retries)
1064 dev_name(&device->device), 1039 fw_notify("created device %s: GUID %08x%08x, S%d00, "
1065 device->config_rom[3], device->config_rom[4], 1040 "%d config ROM retries\n",
1066 1 << device->max_speed); 1041 dev_name(&device->device),
1042 device->config_rom[3], device->config_rom[4],
1043 1 << device->max_speed,
1044 device->config_rom_retries);
1045 else
1046 fw_notify("created device %s: GUID %08x%08x, S%d00\n",
1047 dev_name(&device->device),
1048 device->config_rom[3], device->config_rom[4],
1049 1 << device->max_speed);
1067 device->config_rom_retries = 0; 1050 device->config_rom_retries = 0;
1068 1051
1069 set_broadcast_channel(device, device->generation); 1052 set_broadcast_channel(device, device->generation);
1070
1071 add_device_randomness(&device->config_rom[3], 8);
1072 } 1053 }
1073 1054
1074 /* 1055 /*
@@ -1077,8 +1058,8 @@ static void fw_device_init(struct work_struct *work)
1077 * just end up running the IRM work a couple of extra times - 1058 * just end up running the IRM work a couple of extra times -
1078 * pretty harmless. 1059 * pretty harmless.
1079 */ 1060 */
1080 if (device->node == card->root_node) 1061 if (device->node == device->card->root_node)
1081 fw_schedule_bm_work(card, 0); 1062 fw_schedule_bm_work(device->card, 0);
1082 1063
1083 return; 1064 return;
1084 1065
@@ -1092,30 +1073,31 @@ static void fw_device_init(struct work_struct *work)
1092 put_device(&device->device); /* our reference */ 1073 put_device(&device->device); /* our reference */
1093} 1074}
1094 1075
1076enum {
1077 REREAD_BIB_ERROR,
1078 REREAD_BIB_GONE,
1079 REREAD_BIB_UNCHANGED,
1080 REREAD_BIB_CHANGED,
1081};
1082
1095/* Reread and compare bus info block and header of root directory */ 1083/* Reread and compare bus info block and header of root directory */
1096static int reread_config_rom(struct fw_device *device, int generation, 1084static int reread_config_rom(struct fw_device *device, int generation)
1097 bool *changed)
1098{ 1085{
1099 u32 q; 1086 u32 q;
1100 int i, rcode; 1087 int i;
1101 1088
1102 for (i = 0; i < 6; i++) { 1089 for (i = 0; i < 6; i++) {
1103 rcode = read_rom(device, generation, i, &q); 1090 if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
1104 if (rcode != RCODE_COMPLETE) 1091 return REREAD_BIB_ERROR;
1105 return rcode;
1106 1092
1107 if (i == 0 && q == 0) 1093 if (i == 0 && q == 0)
1108 /* inaccessible (see read_config_rom); retry later */ 1094 return REREAD_BIB_GONE;
1109 return RCODE_BUSY;
1110 1095
1111 if (q != device->config_rom[i]) { 1096 if (q != device->config_rom[i])
1112 *changed = true; 1097 return REREAD_BIB_CHANGED;
1113 return RCODE_COMPLETE;
1114 }
1115 } 1098 }
1116 1099
1117 *changed = false; 1100 return REREAD_BIB_UNCHANGED;
1118 return RCODE_COMPLETE;
1119} 1101}
1120 1102
1121static void fw_device_refresh(struct work_struct *work) 1103static void fw_device_refresh(struct work_struct *work)
@@ -1123,14 +1105,23 @@ static void fw_device_refresh(struct work_struct *work)
1123 struct fw_device *device = 1105 struct fw_device *device =
1124 container_of(work, struct fw_device, work.work); 1106 container_of(work, struct fw_device, work.work);
1125 struct fw_card *card = device->card; 1107 struct fw_card *card = device->card;
1126 int ret, node_id = device->node_id; 1108 int node_id = device->node_id;
1127 bool changed;
1128 1109
1129 ret = reread_config_rom(device, device->generation, &changed); 1110 switch (reread_config_rom(device, device->generation)) {
1130 if (ret != RCODE_COMPLETE) 1111 case REREAD_BIB_ERROR:
1131 goto failed_config_rom; 1112 if (device->config_rom_retries < MAX_RETRIES / 2 &&
1113 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1114 device->config_rom_retries++;
1115 fw_schedule_device_work(device, RETRY_DELAY / 2);
1132 1116
1133 if (!changed) { 1117 return;
1118 }
1119 goto give_up;
1120
1121 case REREAD_BIB_GONE:
1122 goto gone;
1123
1124 case REREAD_BIB_UNCHANGED:
1134 if (atomic_cmpxchg(&device->state, 1125 if (atomic_cmpxchg(&device->state,
1135 FW_DEVICE_INITIALIZING, 1126 FW_DEVICE_INITIALIZING,
1136 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) 1127 FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
@@ -1139,6 +1130,9 @@ static void fw_device_refresh(struct work_struct *work)
1139 fw_device_update(work); 1130 fw_device_update(work);
1140 device->config_rom_retries = 0; 1131 device->config_rom_retries = 0;
1141 goto out; 1132 goto out;
1133
1134 case REREAD_BIB_CHANGED:
1135 break;
1142 } 1136 }
1143 1137
1144 /* 1138 /*
@@ -1147,9 +1141,16 @@ static void fw_device_refresh(struct work_struct *work)
1147 */ 1141 */
1148 device_for_each_child(&device->device, NULL, shutdown_unit); 1142 device_for_each_child(&device->device, NULL, shutdown_unit);
1149 1143
1150 ret = read_config_rom(device, device->generation); 1144 if (read_config_rom(device, device->generation) < 0) {
1151 if (ret != RCODE_COMPLETE) 1145 if (device->config_rom_retries < MAX_RETRIES &&
1152 goto failed_config_rom; 1146 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1147 device->config_rom_retries++;
1148 fw_schedule_device_work(device, RETRY_DELAY);
1149
1150 return;
1151 }
1152 goto give_up;
1153 }
1153 1154
1154 fw_device_cdev_update(device); 1155 fw_device_cdev_update(device);
1155 create_units(device); 1156 create_units(device);
@@ -1162,20 +1163,12 @@ static void fw_device_refresh(struct work_struct *work)
1162 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) 1163 FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
1163 goto gone; 1164 goto gone;
1164 1165
1165 fw_notice(card, "refreshed device %s\n", dev_name(&device->device)); 1166 fw_notify("refreshed device %s\n", dev_name(&device->device));
1166 device->config_rom_retries = 0; 1167 device->config_rom_retries = 0;
1167 goto out; 1168 goto out;
1168 1169
1169 failed_config_rom: 1170 give_up:
1170 if (device->config_rom_retries < MAX_RETRIES && 1171 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
1171 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1172 device->config_rom_retries++;
1173 fw_schedule_device_work(device, RETRY_DELAY);
1174 return;
1175 }
1176
1177 fw_notice(card, "giving up on refresh of device %s: %s\n",
1178 dev_name(&device->device), fw_rcode_string(ret));
1179 gone: 1172 gone:
1180 atomic_set(&device->state, FW_DEVICE_GONE); 1173 atomic_set(&device->state, FW_DEVICE_GONE);
1181 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1174 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 38c0aa60b2c..57c3973093a 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -29,7 +29,6 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
32#include <linux/export.h>
33 32
34#include <asm/byteorder.h> 33#include <asm/byteorder.h>
35 34
@@ -39,73 +38,52 @@
39 * Isochronous DMA context management 38 * Isochronous DMA context management
40 */ 39 */
41 40
42int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) 41int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
42 int page_count, enum dma_data_direction direction)
43{ 43{
44 int i; 44 int i, j;
45 dma_addr_t address;
46
47 buffer->page_count = page_count;
48 buffer->direction = direction;
45 49
46 buffer->page_count = 0;
47 buffer->page_count_mapped = 0;
48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), 50 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
49 GFP_KERNEL); 51 GFP_KERNEL);
50 if (buffer->pages == NULL) 52 if (buffer->pages == NULL)
51 return -ENOMEM; 53 goto out;
52 54
53 for (i = 0; i < page_count; i++) { 55 for (i = 0; i < buffer->page_count; i++) {
54 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 56 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
55 if (buffer->pages[i] == NULL) 57 if (buffer->pages[i] == NULL)
56 break; 58 goto out_pages;
57 }
58 buffer->page_count = i;
59 if (i < page_count) {
60 fw_iso_buffer_destroy(buffer, NULL);
61 return -ENOMEM;
62 }
63 59
64 return 0;
65}
66
67int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
68 enum dma_data_direction direction)
69{
70 dma_addr_t address;
71 int i;
72
73 buffer->direction = direction;
74
75 for (i = 0; i < buffer->page_count; i++) {
76 address = dma_map_page(card->device, buffer->pages[i], 60 address = dma_map_page(card->device, buffer->pages[i],
77 0, PAGE_SIZE, direction); 61 0, PAGE_SIZE, direction);
78 if (dma_mapping_error(card->device, address)) 62 if (dma_mapping_error(card->device, address)) {
79 break; 63 __free_page(buffer->pages[i]);
80 64 goto out_pages;
65 }
81 set_page_private(buffer->pages[i], address); 66 set_page_private(buffer->pages[i], address);
82 } 67 }
83 buffer->page_count_mapped = i;
84 if (i < buffer->page_count)
85 return -ENOMEM;
86 68
87 return 0; 69 return 0;
88}
89
90int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
91 int page_count, enum dma_data_direction direction)
92{
93 int ret;
94 70
95 ret = fw_iso_buffer_alloc(buffer, page_count); 71 out_pages:
96 if (ret < 0) 72 for (j = 0; j < i; j++) {
97 return ret; 73 address = page_private(buffer->pages[j]);
98 74 dma_unmap_page(card->device, address,
99 ret = fw_iso_buffer_map_dma(buffer, card, direction); 75 PAGE_SIZE, direction);
100 if (ret < 0) 76 __free_page(buffer->pages[j]);
101 fw_iso_buffer_destroy(buffer, card); 77 }
78 kfree(buffer->pages);
79 out:
80 buffer->pages = NULL;
102 81
103 return ret; 82 return -ENOMEM;
104} 83}
105EXPORT_SYMBOL(fw_iso_buffer_init); 84EXPORT_SYMBOL(fw_iso_buffer_init);
106 85
107int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer, 86int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
108 struct vm_area_struct *vma)
109{ 87{
110 unsigned long uaddr; 88 unsigned long uaddr;
111 int i, err; 89 int i, err;
@@ -128,25 +106,22 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
128 int i; 106 int i;
129 dma_addr_t address; 107 dma_addr_t address;
130 108
131 for (i = 0; i < buffer->page_count_mapped; i++) { 109 for (i = 0; i < buffer->page_count; i++) {
132 address = page_private(buffer->pages[i]); 110 address = page_private(buffer->pages[i]);
133 dma_unmap_page(card->device, address, 111 dma_unmap_page(card->device, address,
134 PAGE_SIZE, buffer->direction); 112 PAGE_SIZE, buffer->direction);
135 }
136 for (i = 0; i < buffer->page_count; i++)
137 __free_page(buffer->pages[i]); 113 __free_page(buffer->pages[i]);
114 }
138 115
139 kfree(buffer->pages); 116 kfree(buffer->pages);
140 buffer->pages = NULL; 117 buffer->pages = NULL;
141 buffer->page_count = 0;
142 buffer->page_count_mapped = 0;
143} 118}
144EXPORT_SYMBOL(fw_iso_buffer_destroy); 119EXPORT_SYMBOL(fw_iso_buffer_destroy);
145 120
146/* Convert DMA address to offset into virtually contiguous buffer. */ 121/* Convert DMA address to offset into virtually contiguous buffer. */
147size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) 122size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
148{ 123{
149 size_t i; 124 int i;
150 dma_addr_t address; 125 dma_addr_t address;
151 ssize_t offset; 126 ssize_t offset;
152 127
@@ -216,12 +191,6 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
216} 191}
217EXPORT_SYMBOL(fw_iso_context_queue_flush); 192EXPORT_SYMBOL(fw_iso_context_queue_flush);
218 193
219int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
220{
221 return ctx->card->driver->flush_iso_completions(ctx);
222}
223EXPORT_SYMBOL(fw_iso_context_flush_completions);
224
225int fw_iso_context_stop(struct fw_iso_context *ctx) 194int fw_iso_context_stop(struct fw_iso_context *ctx)
226{ 195{
227 return ctx->card->driver->stop_iso(ctx); 196 return ctx->card->driver->stop_iso(ctx);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 0de83508f32..94d3b494ddf 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <asm/byteorder.h> 33#include <asm/byteorder.h>
34#include <asm/system.h>
34 35
35#include "core.h" 36#include "core.h"
36 37
@@ -204,19 +205,19 @@ static struct fw_node *build_tree(struct fw_card *card,
204 next_sid = count_ports(sid, &port_count, &child_port_count); 205 next_sid = count_ports(sid, &port_count, &child_port_count);
205 206
206 if (next_sid == NULL) { 207 if (next_sid == NULL) {
207 fw_err(card, "inconsistent extended self IDs\n"); 208 fw_error("Inconsistent extended self IDs.\n");
208 return NULL; 209 return NULL;
209 } 210 }
210 211
211 q = *sid; 212 q = *sid;
212 if (phy_id != SELF_ID_PHY_ID(q)) { 213 if (phy_id != SELF_ID_PHY_ID(q)) {
213 fw_err(card, "PHY ID mismatch in self ID: %d != %d\n", 214 fw_error("PHY ID mismatch in self ID: %d != %d.\n",
214 phy_id, SELF_ID_PHY_ID(q)); 215 phy_id, SELF_ID_PHY_ID(q));
215 return NULL; 216 return NULL;
216 } 217 }
217 218
218 if (child_port_count > stack_depth) { 219 if (child_port_count > stack_depth) {
219 fw_err(card, "topology stack underflow\n"); 220 fw_error("Topology stack underflow\n");
220 return NULL; 221 return NULL;
221 } 222 }
222 223
@@ -234,7 +235,7 @@ static struct fw_node *build_tree(struct fw_card *card,
234 235
235 node = fw_node_create(q, port_count, card->color); 236 node = fw_node_create(q, port_count, card->color);
236 if (node == NULL) { 237 if (node == NULL) {
237 fw_err(card, "out of memory while building topology\n"); 238 fw_error("Out of memory while building topology.\n");
238 return NULL; 239 return NULL;
239 } 240 }
240 241
@@ -283,8 +284,8 @@ static struct fw_node *build_tree(struct fw_card *card,
283 */ 284 */
284 if ((next_sid == end && parent_count != 0) || 285 if ((next_sid == end && parent_count != 0) ||
285 (next_sid < end && parent_count != 1)) { 286 (next_sid < end && parent_count != 1)) {
286 fw_err(card, "parent port inconsistency for node %d: " 287 fw_error("Parent port inconsistency for node %d: "
287 "parent_count=%d\n", phy_id, parent_count); 288 "parent_count=%d\n", phy_id, parent_count);
288 return NULL; 289 return NULL;
289 } 290 }
290 291
@@ -529,6 +530,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
529 */ 530 */
530 if (!is_next_generation(generation, card->generation) && 531 if (!is_next_generation(generation, card->generation) &&
531 card->local_node != NULL) { 532 card->local_node != NULL) {
533 fw_notify("skipped bus generations, destroying all nodes\n");
532 fw_destroy_nodes(card); 534 fw_destroy_nodes(card);
533 card->bm_retries = 0; 535 card->bm_retries = 0;
534 } 536 }
@@ -555,7 +557,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
555 card->color++; 557 card->color++;
556 558
557 if (local_node == NULL) { 559 if (local_node == NULL) {
558 fw_err(card, "topology build failed\n"); 560 fw_error("topology build failed\n");
559 /* FIXME: We need to issue a bus reset in this case. */ 561 /* FIXME: We need to issue a bus reset in this case. */
560 } else if (card->local_node == NULL) { 562 } else if (card->local_node == NULL) {
561 card->local_node = local_node; 563 card->local_node = local_node;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 28a94c7ec6e..334b82a3542 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -31,7 +31,6 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/rculist.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <linux/spinlock.h> 35#include <linux/spinlock.h>
37#include <linux/string.h> 36#include <linux/string.h>
@@ -490,7 +489,7 @@ static struct fw_address_handler *lookup_overlapping_address_handler(
490{ 489{
491 struct fw_address_handler *handler; 490 struct fw_address_handler *handler;
492 491
493 list_for_each_entry_rcu(handler, list, link) { 492 list_for_each_entry(handler, list, link) {
494 if (handler->offset < offset + length && 493 if (handler->offset < offset + length &&
495 offset < handler->offset + handler->length) 494 offset < handler->offset + handler->length)
496 return handler; 495 return handler;
@@ -511,7 +510,7 @@ static struct fw_address_handler *lookup_enclosing_address_handler(
511{ 510{
512 struct fw_address_handler *handler; 511 struct fw_address_handler *handler;
513 512
514 list_for_each_entry_rcu(handler, list, link) { 513 list_for_each_entry(handler, list, link) {
515 if (is_enclosing_handler(handler, offset, length)) 514 if (is_enclosing_handler(handler, offset, length))
516 return handler; 515 return handler;
517 } 516 }
@@ -519,17 +518,16 @@ static struct fw_address_handler *lookup_enclosing_address_handler(
519 return NULL; 518 return NULL;
520} 519}
521 520
522static DEFINE_SPINLOCK(address_handler_list_lock); 521static DEFINE_SPINLOCK(address_handler_lock);
523static LIST_HEAD(address_handler_list); 522static LIST_HEAD(address_handler_list);
524 523
525const struct fw_address_region fw_high_memory_region = 524const struct fw_address_region fw_high_memory_region =
526 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, }; 525 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
527EXPORT_SYMBOL(fw_high_memory_region); 526EXPORT_SYMBOL(fw_high_memory_region);
528 527
529static const struct fw_address_region low_memory_region =
530 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
531
532#if 0 528#if 0
529const struct fw_address_region fw_low_memory_region =
530 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
533const struct fw_address_region fw_private_region = 531const struct fw_address_region fw_private_region =
534 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; 532 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
535const struct fw_address_region fw_csr_region = 533const struct fw_address_region fw_csr_region =
@@ -556,7 +554,6 @@ static bool is_in_fcp_region(u64 offset, size_t length)
556 * the specified callback is invoked. The parameters passed to the callback 554 * the specified callback is invoked. The parameters passed to the callback
557 * give the details of the particular request. 555 * give the details of the particular request.
558 * 556 *
559 * To be called in process context.
560 * Return value: 0 on success, non-zero otherwise. 557 * Return value: 0 on success, non-zero otherwise.
561 * 558 *
562 * The start offset of the handler's address region is determined by 559 * The start offset of the handler's address region is determined by
@@ -568,6 +565,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
568 const struct fw_address_region *region) 565 const struct fw_address_region *region)
569{ 566{
570 struct fw_address_handler *other; 567 struct fw_address_handler *other;
568 unsigned long flags;
571 int ret = -EBUSY; 569 int ret = -EBUSY;
572 570
573 if (region->start & 0xffff000000000003ULL || 571 if (region->start & 0xffff000000000003ULL ||
@@ -577,7 +575,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
577 handler->length == 0) 575 handler->length == 0)
578 return -EINVAL; 576 return -EINVAL;
579 577
580 spin_lock(&address_handler_list_lock); 578 spin_lock_irqsave(&address_handler_lock, flags);
581 579
582 handler->offset = region->start; 580 handler->offset = region->start;
583 while (handler->offset + handler->length <= region->end) { 581 while (handler->offset + handler->length <= region->end) {
@@ -590,13 +588,13 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
590 if (other != NULL) { 588 if (other != NULL) {
591 handler->offset += other->length; 589 handler->offset += other->length;
592 } else { 590 } else {
593 list_add_tail_rcu(&handler->link, &address_handler_list); 591 list_add_tail(&handler->link, &address_handler_list);
594 ret = 0; 592 ret = 0;
595 break; 593 break;
596 } 594 }
597 } 595 }
598 596
599 spin_unlock(&address_handler_list_lock); 597 spin_unlock_irqrestore(&address_handler_lock, flags);
600 598
601 return ret; 599 return ret;
602} 600}
@@ -604,18 +602,14 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
604 602
605/** 603/**
606 * fw_core_remove_address_handler() - unregister an address handler 604 * fw_core_remove_address_handler() - unregister an address handler
607 *
608 * To be called in process context.
609 *
610 * When fw_core_remove_address_handler() returns, @handler->callback() is
611 * guaranteed to not run on any CPU anymore.
612 */ 605 */
613void fw_core_remove_address_handler(struct fw_address_handler *handler) 606void fw_core_remove_address_handler(struct fw_address_handler *handler)
614{ 607{
615 spin_lock(&address_handler_list_lock); 608 unsigned long flags;
616 list_del_rcu(&handler->link); 609
617 spin_unlock(&address_handler_list_lock); 610 spin_lock_irqsave(&address_handler_lock, flags);
618 synchronize_rcu(); 611 list_del(&handler->link);
612 spin_unlock_irqrestore(&address_handler_lock, flags);
619} 613}
620EXPORT_SYMBOL(fw_core_remove_address_handler); 614EXPORT_SYMBOL(fw_core_remove_address_handler);
621 615
@@ -776,7 +770,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
776 break; 770 break;
777 771
778 default: 772 default:
779 fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n", 773 fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
780 p->header[0], p->header[1], p->header[2]); 774 p->header[0], p->header[1], p->header[2]);
781 return NULL; 775 return NULL;
782 } 776 }
@@ -826,21 +820,13 @@ void fw_send_response(struct fw_card *card,
826} 820}
827EXPORT_SYMBOL(fw_send_response); 821EXPORT_SYMBOL(fw_send_response);
828 822
829/**
830 * fw_get_request_speed() - returns speed at which the @request was received
831 */
832int fw_get_request_speed(struct fw_request *request)
833{
834 return request->response.speed;
835}
836EXPORT_SYMBOL(fw_get_request_speed);
837
838static void handle_exclusive_region_request(struct fw_card *card, 823static void handle_exclusive_region_request(struct fw_card *card,
839 struct fw_packet *p, 824 struct fw_packet *p,
840 struct fw_request *request, 825 struct fw_request *request,
841 unsigned long long offset) 826 unsigned long long offset)
842{ 827{
843 struct fw_address_handler *handler; 828 struct fw_address_handler *handler;
829 unsigned long flags;
844 int tcode, destination, source; 830 int tcode, destination, source;
845 831
846 destination = HEADER_GET_DESTINATION(p->header[0]); 832 destination = HEADER_GET_DESTINATION(p->header[0]);
@@ -849,19 +835,27 @@ static void handle_exclusive_region_request(struct fw_card *card,
849 if (tcode == TCODE_LOCK_REQUEST) 835 if (tcode == TCODE_LOCK_REQUEST)
850 tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); 836 tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
851 837
852 rcu_read_lock(); 838 spin_lock_irqsave(&address_handler_lock, flags);
853 handler = lookup_enclosing_address_handler(&address_handler_list, 839 handler = lookup_enclosing_address_handler(&address_handler_list,
854 offset, request->length); 840 offset, request->length);
855 if (handler) 841 spin_unlock_irqrestore(&address_handler_lock, flags);
842
843 /*
844 * FIXME: lookup the fw_node corresponding to the sender of
845 * this request and pass that to the address handler instead
846 * of the node ID. We may also want to move the address
847 * allocations to fw_node so we only do this callback if the
848 * upper layers registered it for this node.
849 */
850
851 if (handler == NULL)
852 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
853 else
856 handler->address_callback(card, request, 854 handler->address_callback(card, request,
857 tcode, destination, source, 855 tcode, destination, source,
858 p->generation, offset, 856 p->generation, offset,
859 request->data, request->length, 857 request->data, request->length,
860 handler->callback_data); 858 handler->callback_data);
861 rcu_read_unlock();
862
863 if (!handler)
864 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
865} 859}
866 860
867static void handle_fcp_region_request(struct fw_card *card, 861static void handle_fcp_region_request(struct fw_card *card,
@@ -870,6 +864,7 @@ static void handle_fcp_region_request(struct fw_card *card,
870 unsigned long long offset) 864 unsigned long long offset)
871{ 865{
872 struct fw_address_handler *handler; 866 struct fw_address_handler *handler;
867 unsigned long flags;
873 int tcode, destination, source; 868 int tcode, destination, source;
874 869
875 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && 870 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
@@ -891,8 +886,8 @@ static void handle_fcp_region_request(struct fw_card *card,
891 return; 886 return;
892 } 887 }
893 888
894 rcu_read_lock(); 889 spin_lock_irqsave(&address_handler_lock, flags);
895 list_for_each_entry_rcu(handler, &address_handler_list, link) { 890 list_for_each_entry(handler, &address_handler_list, link) {
896 if (is_enclosing_handler(handler, offset, request->length)) 891 if (is_enclosing_handler(handler, offset, request->length))
897 handler->address_callback(card, NULL, tcode, 892 handler->address_callback(card, NULL, tcode,
898 destination, source, 893 destination, source,
@@ -901,7 +896,7 @@ static void handle_fcp_region_request(struct fw_card *card,
901 request->length, 896 request->length,
902 handler->callback_data); 897 handler->callback_data);
903 } 898 }
904 rcu_read_unlock(); 899 spin_unlock_irqrestore(&address_handler_lock, flags);
905 900
906 fw_send_response(card, request, RCODE_COMPLETE); 901 fw_send_response(card, request, RCODE_COMPLETE);
907} 902}
@@ -965,7 +960,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
965 960
966 if (&t->link == &card->transaction_list) { 961 if (&t->link == &card->transaction_list) {
967 timed_out: 962 timed_out:
968 fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", 963 fw_notify("Unsolicited response (source %x, tlabel %x)\n",
969 source, tlabel); 964 source, tlabel);
970 return; 965 return;
971 } 966 }
@@ -1009,32 +1004,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
1009} 1004}
1010EXPORT_SYMBOL(fw_core_handle_response); 1005EXPORT_SYMBOL(fw_core_handle_response);
1011 1006
1012/**
1013 * fw_rcode_string - convert a firewire result code to an error description
1014 * @rcode: the result code
1015 */
1016const char *fw_rcode_string(int rcode)
1017{
1018 static const char *const names[] = {
1019 [RCODE_COMPLETE] = "no error",
1020 [RCODE_CONFLICT_ERROR] = "conflict error",
1021 [RCODE_DATA_ERROR] = "data error",
1022 [RCODE_TYPE_ERROR] = "type error",
1023 [RCODE_ADDRESS_ERROR] = "address error",
1024 [RCODE_SEND_ERROR] = "send error",
1025 [RCODE_CANCELLED] = "timeout",
1026 [RCODE_BUSY] = "busy",
1027 [RCODE_GENERATION] = "bus reset",
1028 [RCODE_NO_ACK] = "no ack",
1029 };
1030
1031 if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
1032 return names[rcode];
1033 else
1034 return "unknown";
1035}
1036EXPORT_SYMBOL(fw_rcode_string);
1037
1038static const struct fw_address_region topology_map_region = 1007static const struct fw_address_region topology_map_region =
1039 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, 1008 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
1040 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; 1009 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
@@ -1077,8 +1046,8 @@ static void update_split_timeout(struct fw_card *card)
1077 1046
1078 cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); 1047 cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
1079 1048
1080 /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */ 1049 cycles = max(cycles, 800u); /* minimum as per the spec */
1081 cycles = clamp(cycles, 800u, 3u * 8000u); 1050 cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
1082 1051
1083 card->split_timeout_cycles = cycles; 1052 card->split_timeout_cycles = cycles;
1084 card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); 1053 card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
@@ -1204,23 +1173,6 @@ static struct fw_address_handler registers = {
1204 .address_callback = handle_registers, 1173 .address_callback = handle_registers,
1205}; 1174};
1206 1175
1207static void handle_low_memory(struct fw_card *card, struct fw_request *request,
1208 int tcode, int destination, int source, int generation,
1209 unsigned long long offset, void *payload, size_t length,
1210 void *callback_data)
1211{
1212 /*
1213 * This catches requests not handled by the physical DMA unit,
1214 * i.e., wrong transaction types or unauthorized source nodes.
1215 */
1216 fw_send_response(card, request, RCODE_TYPE_ERROR);
1217}
1218
1219static struct fw_address_handler low_memory = {
1220 .length = 0x000100000000ULL,
1221 .address_callback = handle_low_memory,
1222};
1223
1224MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1176MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1225MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); 1177MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1226MODULE_LICENSE("GPL"); 1178MODULE_LICENSE("GPL");
@@ -1282,7 +1234,6 @@ static int __init fw_core_init(void)
1282 1234
1283 fw_core_add_address_handler(&topology_map, &topology_map_region); 1235 fw_core_add_address_handler(&topology_map, &topology_map_region);
1284 fw_core_add_address_handler(&registers, &registers_region); 1236 fw_core_add_address_handler(&registers, &registers_region);
1285 fw_core_add_address_handler(&low_memory, &low_memory_region);
1286 fw_core_add_descriptor(&vendor_id_descriptor); 1237 fw_core_add_descriptor(&vendor_id_descriptor);
1287 fw_core_add_descriptor(&model_id_descriptor); 1238 fw_core_add_descriptor(&model_id_descriptor);
1288 1239
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 515a42c786d..b45be576752 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -1,9 +1,6 @@
1#ifndef _FIREWIRE_CORE_H 1#ifndef _FIREWIRE_CORE_H
2#define _FIREWIRE_CORE_H 2#define _FIREWIRE_CORE_H
3 3
4#include <linux/compiler.h>
5#include <linux/device.h>
6#include <linux/dma-mapping.h>
7#include <linux/fs.h> 4#include <linux/fs.h>
8#include <linux/list.h> 5#include <linux/list.h>
9#include <linux/idr.h> 6#include <linux/idr.h>
@@ -26,11 +23,6 @@ struct fw_packet;
26 23
27/* -card */ 24/* -card */
28 25
29extern __printf(2, 3)
30void fw_err(const struct fw_card *card, const char *fmt, ...);
31extern __printf(2, 3)
32void fw_notice(const struct fw_card *card, const char *fmt, ...);
33
34/* bitfields within the PHY registers */ 26/* bitfields within the PHY registers */
35#define PHY_LINK_ACTIVE 0x80 27#define PHY_LINK_ACTIVE 0x80
36#define PHY_CONTENDER 0x40 28#define PHY_CONTENDER 0x40
@@ -107,8 +99,6 @@ struct fw_card_driver {
107 99
108 void (*flush_queue_iso)(struct fw_iso_context *ctx); 100 void (*flush_queue_iso)(struct fw_iso_context *ctx);
109 101
110 int (*flush_iso_completions)(struct fw_iso_context *ctx);
111
112 int (*stop_iso)(struct fw_iso_context *ctx); 102 int (*stop_iso)(struct fw_iso_context *ctx);
113}; 103};
114 104
@@ -121,6 +111,21 @@ int fw_compute_block_crc(__be32 *block);
121void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); 111void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset);
122void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); 112void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
123 113
114static inline struct fw_card *fw_card_get(struct fw_card *card)
115{
116 kref_get(&card->kref);
117
118 return card;
119}
120
121void fw_card_release(struct kref *kref);
122
123static inline void fw_card_put(struct fw_card *card)
124{
125 kref_put(&card->kref, fw_card_release);
126}
127
128
124/* -cdev */ 129/* -cdev */
125 130
126extern const struct file_operations fw_device_ops; 131extern const struct file_operations fw_device_ops;
@@ -136,18 +141,6 @@ extern struct rw_semaphore fw_device_rwsem;
136extern struct idr fw_device_idr; 141extern struct idr fw_device_idr;
137extern int fw_cdev_major; 142extern int fw_cdev_major;
138 143
139static inline struct fw_device *fw_device_get(struct fw_device *device)
140{
141 get_device(&device->device);
142
143 return device;
144}
145
146static inline void fw_device_put(struct fw_device *device)
147{
148 put_device(&device->device);
149}
150
151struct fw_device *fw_device_get_by_devt(dev_t devt); 144struct fw_device *fw_device_get_by_devt(dev_t devt);
152int fw_device_set_broadcast_channel(struct device *dev, void *gen); 145int fw_device_set_broadcast_channel(struct device *dev, void *gen);
153void fw_node_event(struct fw_card *card, struct fw_node *node, int event); 146void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
@@ -155,11 +148,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
155 148
156/* -iso */ 149/* -iso */
157 150
158int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count); 151int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
159int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
160 enum dma_data_direction direction);
161int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
162 struct vm_area_struct *vma);
163 152
164 153
165/* -topology */ 154/* -topology */
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 2cc89ce745c..a9a347adb35 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -149,10 +149,10 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
149 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); 149 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
150 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); 150 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
151 151
152 /* Accept asynchronous transfer requests from all nodes for now */ 152 /* Accept asyncronous transfer requests from all nodes for now */
153 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 153 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
154 154
155 /* Specify asynchronous transfer retries */ 155 /* Specify asyncronous transfer retries */
156 reg_write(ohci, OHCI1394_ATRetries, 156 reg_write(ohci, OHCI1394_ATRetries,
157 OHCI1394_MAX_AT_REQ_RETRIES | 157 OHCI1394_MAX_AT_REQ_RETRIES |
158 (OHCI1394_MAX_AT_RESP_RETRIES<<4) | 158 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index e7a711f53a6..03a7a85d042 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -256,8 +256,8 @@ static int fwnet_header_rebuild(struct sk_buff *skb)
256 if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) 256 if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
257 return arp_find((unsigned char *)&h->h_dest, skb); 257 return arp_find((unsigned char *)&h->h_dest, skb);
258 258
259 dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n", 259 fw_notify("%s: unable to resolve type %04x addresses\n",
260 be16_to_cpu(h->h_proto)); 260 skb->dev->name, be16_to_cpu(h->h_proto));
261 return 0; 261 return 0;
262} 262}
263 263
@@ -369,7 +369,7 @@ static struct fwnet_fragment_info *fwnet_frag_new(
369 369
370 new = kmalloc(sizeof(*new), GFP_ATOMIC); 370 new = kmalloc(sizeof(*new), GFP_ATOMIC);
371 if (!new) { 371 if (!new) {
372 dev_err(&pd->skb->dev->dev, "out of memory\n"); 372 fw_error("out of memory\n");
373 return NULL; 373 return NULL;
374 } 374 }
375 375
@@ -414,7 +414,7 @@ fail_w_fi:
414fail_w_new: 414fail_w_new:
415 kfree(new); 415 kfree(new);
416fail: 416fail:
417 dev_err(&net->dev, "out of memory\n"); 417 fw_error("out of memory\n");
418 418
419 return NULL; 419 return NULL;
420} 420}
@@ -502,7 +502,11 @@ static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
502static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) 502static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
503{ 503{
504 max_rec = min(max_rec, speed + 8); 504 max_rec = min(max_rec, speed + 8);
505 max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */ 505 max_rec = min(max_rec, 0xbU); /* <= 4096 */
506 if (max_rec < 8) {
507 fw_notify("max_rec %x out of range\n", max_rec);
508 max_rec = 8;
509 }
506 510
507 return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; 511 return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
508} 512}
@@ -554,7 +558,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
554 sspd = arp1394->sspd; 558 sspd = arp1394->sspd;
555 /* Sanity check. OS X 10.3 PPC reportedly sends 131. */ 559 /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
556 if (sspd > SCODE_3200) { 560 if (sspd > SCODE_3200) {
557 dev_notice(&net->dev, "sspd %x out of range\n", sspd); 561 fw_notify("sspd %x out of range\n", sspd);
558 sspd = SCODE_3200; 562 sspd = SCODE_3200;
559 } 563 }
560 max_payload = fwnet_max_payload(arp1394->max_rec, sspd); 564 max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
@@ -574,9 +578,8 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
574 spin_unlock_irqrestore(&dev->lock, flags); 578 spin_unlock_irqrestore(&dev->lock, flags);
575 579
576 if (!peer) { 580 if (!peer) {
577 dev_notice(&net->dev, 581 fw_notify("No peer for ARP packet from %016llx\n",
578 "no peer for ARP packet from %016llx\n", 582 (unsigned long long)peer_guid);
579 (unsigned long long)peer_guid);
580 goto no_peer; 583 goto no_peer;
581 } 584 }
582 585
@@ -692,7 +695,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
692 695
693 skb = dev_alloc_skb(len + net->hard_header_len + 15); 696 skb = dev_alloc_skb(len + net->hard_header_len + 15);
694 if (unlikely(!skb)) { 697 if (unlikely(!skb)) {
695 dev_err(&net->dev, "out of memory\n"); 698 fw_error("out of memory\n");
696 net->stats.rx_dropped++; 699 net->stats.rx_dropped++;
697 700
698 return -ENOMEM; 701 return -ENOMEM;
@@ -815,7 +818,7 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
815 rcode = RCODE_TYPE_ERROR; 818 rcode = RCODE_TYPE_ERROR;
816 else if (fwnet_incoming_packet(dev, payload, length, 819 else if (fwnet_incoming_packet(dev, payload, length,
817 source, generation, false) != 0) { 820 source, generation, false) != 0) {
818 dev_err(&dev->netdev->dev, "incoming packet failure\n"); 821 fw_error("Incoming packet failure\n");
819 rcode = RCODE_CONFLICT_ERROR; 822 rcode = RCODE_CONFLICT_ERROR;
820 } else 823 } else
821 rcode = RCODE_COMPLETE; 824 rcode = RCODE_COMPLETE;
@@ -828,6 +831,7 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
828{ 831{
829 struct fwnet_device *dev; 832 struct fwnet_device *dev;
830 struct fw_iso_packet packet; 833 struct fw_iso_packet packet;
834 struct fw_card *card;
831 __be16 *hdr_ptr; 835 __be16 *hdr_ptr;
832 __be32 *buf_ptr; 836 __be32 *buf_ptr;
833 int retval; 837 int retval;
@@ -839,6 +843,7 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
839 unsigned long flags; 843 unsigned long flags;
840 844
841 dev = data; 845 dev = data;
846 card = dev->card;
842 hdr_ptr = header; 847 hdr_ptr = header;
843 length = be16_to_cpup(hdr_ptr); 848 length = be16_to_cpup(hdr_ptr);
844 849
@@ -859,8 +864,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
859 if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { 864 if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
860 buf_ptr += 2; 865 buf_ptr += 2;
861 length -= IEEE1394_GASP_HDR_SIZE; 866 length -= IEEE1394_GASP_HDR_SIZE;
862 fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, 867 fwnet_incoming_packet(dev, buf_ptr, length,
863 context->card->generation, true); 868 source_node_id, -1, true);
864 } 869 }
865 870
866 packet.payload_length = dev->rcv_buffer_size; 871 packet.payload_length = dev->rcv_buffer_size;
@@ -880,7 +885,7 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
880 if (retval >= 0) 885 if (retval >= 0)
881 fw_iso_context_queue_flush(dev->broadcast_rcv_context); 886 fw_iso_context_queue_flush(dev->broadcast_rcv_context);
882 else 887 else
883 dev_err(&dev->netdev->dev, "requeue failed\n"); 888 fw_error("requeue failed\n");
884} 889}
885 890
886static struct kmem_cache *fwnet_packet_task_cache; 891static struct kmem_cache *fwnet_packet_task_cache;
@@ -935,10 +940,9 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
935 case RFC2374_HDR_LASTFRAG: 940 case RFC2374_HDR_LASTFRAG:
936 case RFC2374_HDR_UNFRAG: 941 case RFC2374_HDR_UNFRAG:
937 default: 942 default:
938 dev_err(&dev->netdev->dev, 943 fw_error("Outstanding packet %x lf %x, header %x,%x\n",
939 "outstanding packet %x lf %x, header %x,%x\n", 944 ptask->outstanding_pkts, lf, ptask->hdr.w0,
940 ptask->outstanding_pkts, lf, ptask->hdr.w0, 945 ptask->hdr.w1);
941 ptask->hdr.w1);
942 BUG(); 946 BUG();
943 947
944 case RFC2374_HDR_FIRSTFRAG: 948 case RFC2374_HDR_FIRSTFRAG:
@@ -956,12 +960,7 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
956 break; 960 break;
957 } 961 }
958 962
959 if (ptask->dest_node == IEEE1394_ALL_NODES) { 963 skb_pull(skb, ptask->max_payload);
960 skb_pull(skb,
961 ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
962 } else {
963 skb_pull(skb, ptask->max_payload);
964 }
965 if (ptask->outstanding_pkts > 1) { 964 if (ptask->outstanding_pkts > 1) {
966 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, 965 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
967 dg_size, fg_off, datagram_label); 966 dg_size, fg_off, datagram_label);
@@ -1015,9 +1014,8 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
1015 fwnet_transmit_packet_failed(ptask); 1014 fwnet_transmit_packet_failed(ptask);
1016 1015
1017 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { 1016 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
1018 dev_err(&ptask->dev->netdev->dev, 1017 fw_error("fwnet_write_complete: "
1019 "fwnet_write_complete failed: %x (skipped %d)\n", 1018 "failed: %x (skipped %d)\n", rcode, errors_skipped);
1020 rcode, errors_skipped);
1021 1019
1022 errors_skipped = 0; 1020 errors_skipped = 0;
1023 last_rcode = rcode; 1021 last_rcode = rcode;
@@ -1065,7 +1063,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1065 smp_rmb(); 1063 smp_rmb();
1066 node_id = dev->card->node_id; 1064 node_id = dev->card->node_id;
1067 1065
1068 p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); 1066 p = skb_push(ptask->skb, 8);
1069 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); 1067 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
1070 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 1068 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
1071 | RFC2734_SW_VERSION, &p[4]); 1069 | RFC2734_SW_VERSION, &p[4]);
@@ -1127,12 +1125,17 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1127 unsigned u; 1125 unsigned u;
1128 1126
1129 if (dev->local_fifo == FWNET_NO_FIFO_ADDR) { 1127 if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
1128 /* outside OHCI posted write area? */
1129 static const struct fw_address_region region = {
1130 .start = 0xffff00000000ULL,
1131 .end = CSR_REGISTER_BASE,
1132 };
1133
1130 dev->handler.length = 4096; 1134 dev->handler.length = 4096;
1131 dev->handler.address_callback = fwnet_receive_packet; 1135 dev->handler.address_callback = fwnet_receive_packet;
1132 dev->handler.callback_data = dev; 1136 dev->handler.callback_data = dev;
1133 1137
1134 retval = fw_core_add_address_handler(&dev->handler, 1138 retval = fw_core_add_address_handler(&dev->handler, &region);
1135 &fw_high_memory_region);
1136 if (retval < 0) 1139 if (retval < 0)
1137 goto failed_initial; 1140 goto failed_initial;
1138 1141
@@ -1545,12 +1548,14 @@ static int fwnet_probe(struct device *_dev)
1545 put_unaligned_be64(card->guid, net->dev_addr); 1548 put_unaligned_be64(card->guid, net->dev_addr);
1546 put_unaligned_be64(~0ULL, net->broadcast); 1549 put_unaligned_be64(~0ULL, net->broadcast);
1547 ret = register_netdev(net); 1550 ret = register_netdev(net);
1548 if (ret) 1551 if (ret) {
1552 fw_error("Cannot register the driver\n");
1549 goto out; 1553 goto out;
1554 }
1550 1555
1551 list_add_tail(&dev->dev_link, &fwnet_device_list); 1556 list_add_tail(&dev->dev_link, &fwnet_device_list);
1552 dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n", 1557 fw_notify("%s: IPv4 over FireWire on device %016llx\n",
1553 dev_name(card->device)); 1558 net->name, (unsigned long long)card->guid);
1554 have_dev: 1559 have_dev:
1555 ret = fwnet_add_peer(dev, unit, device); 1560 ret = fwnet_add_peer(dev, unit, device);
1556 if (ret && allocated_netdev) { 1561 if (ret && allocated_netdev) {
@@ -1652,7 +1657,7 @@ static const struct ieee1394_device_id fwnet_id_table[] = {
1652static struct fw_driver fwnet_driver = { 1657static struct fw_driver fwnet_driver = {
1653 .driver = { 1658 .driver = {
1654 .owner = THIS_MODULE, 1659 .owner = THIS_MODULE,
1655 .name = KBUILD_MODNAME, 1660 .name = "net",
1656 .bus = &fw_bus_type, 1661 .bus = &fw_bus_type,
1657 .probe = fwnet_probe, 1662 .probe = fwnet_probe,
1658 .remove = fwnet_remove, 1663 .remove = fwnet_remove,
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 76b2d390f6e..763626b739d 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -36,7 +36,7 @@
36#include <linux/timex.h> 36#include <linux/timex.h>
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39#include <linux/dma-mapping.h> 39
40#include <linux/atomic.h> 40#include <linux/atomic.h>
41#include <asm/byteorder.h> 41#include <asm/byteorder.h>
42 42
@@ -529,14 +529,14 @@ remove_card(struct pci_dev *dev)
529 529
530#define RCV_BUFFER_SIZE (16 * 1024) 530#define RCV_BUFFER_SIZE (16 * 1024)
531 531
532static int 532static int __devinit
533add_card(struct pci_dev *dev, const struct pci_device_id *unused) 533add_card(struct pci_dev *dev, const struct pci_device_id *unused)
534{ 534{
535 struct pcilynx *lynx; 535 struct pcilynx *lynx;
536 u32 p, end; 536 u32 p, end;
537 int ret, i; 537 int ret, i;
538 538
539 if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { 539 if (pci_set_dma_mask(dev, 0xffffffff)) {
540 dev_err(&dev->dev, 540 dev_err(&dev->dev,
541 "DMA address limits not supported for PCILynx hardware\n"); 541 "DMA address limits not supported for PCILynx hardware\n");
542 return -ENXIO; 542 return -ENXIO;
@@ -683,7 +683,7 @@ fail_disable:
683 return ret; 683 return ret;
684} 684}
685 685
686static struct pci_device_id pci_table[] = { 686static struct pci_device_id pci_table[] __devinitdata = {
687 { 687 {
688 .vendor = PCI_VENDOR_ID_TI, 688 .vendor = PCI_VENDOR_ID_TI,
689 .device = PCI_DEVICE_ID_TI_PCILYNX, 689 .device = PCI_DEVICE_ID_TI_PCILYNX,
@@ -693,8 +693,6 @@ static struct pci_device_id pci_table[] = {
693 { } /* Terminating entry */ 693 { } /* Terminating entry */
694}; 694};
695 695
696MODULE_DEVICE_TABLE(pci, pci_table);
697
698static struct pci_driver lynx_pci_driver = { 696static struct pci_driver lynx_pci_driver = {
699 .name = driver_name, 697 .name = driver_name,
700 .id_table = pci_table, 698 .id_table = pci_table,
@@ -702,8 +700,22 @@ static struct pci_driver lynx_pci_driver = {
702 .remove = remove_card, 700 .remove = remove_card,
703}; 701};
704 702
705module_pci_driver(lynx_pci_driver);
706
707MODULE_AUTHOR("Kristian Hoegsberg"); 703MODULE_AUTHOR("Kristian Hoegsberg");
708MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); 704MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
709MODULE_LICENSE("GPL"); 705MODULE_LICENSE("GPL");
706MODULE_DEVICE_TABLE(pci, pci_table);
707
708static int __init nosy_init(void)
709{
710 return pci_register_driver(&lynx_pci_driver);
711}
712
713static void __exit nosy_cleanup(void)
714{
715 pci_unregister_driver(&lynx_pci_driver);
716
717 pr_info("Unloaded %s\n", driver_name);
718}
719
720module_init(nosy_init);
721module_exit(nosy_cleanup);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6ce6e07c38c..fd7170a9ad2 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -42,10 +42,10 @@
42#include <linux/string.h> 42#include <linux/string.h>
43#include <linux/time.h> 43#include <linux/time.h>
44#include <linux/vmalloc.h> 44#include <linux/vmalloc.h>
45#include <linux/workqueue.h>
46 45
47#include <asm/byteorder.h> 46#include <asm/byteorder.h>
48#include <asm/page.h> 47#include <asm/page.h>
48#include <asm/system.h>
49 49
50#ifdef CONFIG_PPC_PMAC 50#ifdef CONFIG_PPC_PMAC
51#include <asm/pmac_feature.h> 51#include <asm/pmac_feature.h>
@@ -125,7 +125,6 @@ struct context {
125 struct fw_ohci *ohci; 125 struct fw_ohci *ohci;
126 u32 regs; 126 u32 regs;
127 int total_allocation; 127 int total_allocation;
128 u32 current_bus;
129 bool running; 128 bool running;
130 bool flushing; 129 bool flushing;
131 130
@@ -169,12 +168,10 @@ struct context {
169struct iso_context { 168struct iso_context {
170 struct fw_iso_context base; 169 struct fw_iso_context base;
171 struct context context; 170 struct context context;
171 int excess_bytes;
172 void *header; 172 void *header;
173 size_t header_length; 173 size_t header_length;
174 unsigned long flushing_completions; 174
175 u32 mc_buffer_bus;
176 u16 mc_completed;
177 u16 last_timestamp;
178 u8 sync; 175 u8 sync;
179 u8 tags; 176 u8 tags;
180}; 177};
@@ -191,7 +188,6 @@ struct fw_ohci {
191 unsigned quirks; 188 unsigned quirks;
192 unsigned int pri_req_max; 189 unsigned int pri_req_max;
193 u32 bus_time; 190 u32 bus_time;
194 bool bus_time_running;
195 bool is_root; 191 bool is_root;
196 bool csr_state_setclear_abdicate; 192 bool csr_state_setclear_abdicate;
197 int n_ir; 193 int n_ir;
@@ -230,7 +226,7 @@ struct fw_ohci {
230 226
231 __le32 *self_id_cpu; 227 __le32 *self_id_cpu;
232 dma_addr_t self_id_bus; 228 dma_addr_t self_id_bus;
233 struct work_struct bus_reset_work; 229 struct tasklet_struct bus_reset_tasklet;
234 230
235 u32 self_id_buffer[512]; 231 u32 self_id_buffer[512];
236}; 232};
@@ -265,11 +261,8 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
265static char ohci_driver_name[] = KBUILD_MODNAME; 261static char ohci_driver_name[] = KBUILD_MODNAME;
266 262
267#define PCI_DEVICE_ID_AGERE_FW643 0x5901 263#define PCI_DEVICE_ID_AGERE_FW643 0x5901
268#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
269#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 264#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
270#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 265#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
271#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
272#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
273#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd 266#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
274 267
275#define QUIRK_CYCLE_TIMER 1 268#define QUIRK_CYCLE_TIMER 1
@@ -277,7 +270,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
277#define QUIRK_BE_HEADERS 4 270#define QUIRK_BE_HEADERS 4
278#define QUIRK_NO_1394A 8 271#define QUIRK_NO_1394A 8
279#define QUIRK_NO_MSI 16 272#define QUIRK_NO_MSI 16
280#define QUIRK_TI_SLLZ059 32
281 273
282/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 274/* In case of multiple matches in ohci_quirks[], only the first one is used. */
283static const struct { 275static const struct {
@@ -292,9 +284,6 @@ static const struct {
292 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 284 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
293 QUIRK_NO_MSI}, 285 QUIRK_NO_MSI},
294 286
295 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
296 QUIRK_RESET_PACKET},
297
298 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 287 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
299 QUIRK_NO_MSI}, 288 QUIRK_NO_MSI},
300 289
@@ -305,17 +294,11 @@ static const struct {
305 QUIRK_NO_MSI}, 294 QUIRK_NO_MSI},
306 295
307 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 296 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
308 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 297 QUIRK_CYCLE_TIMER},
309 298
310 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 299 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
311 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, 300 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
312 301
313 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
314 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
315
316 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
317 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
318
319 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, 302 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
320 QUIRK_RESET_PACKET}, 303 QUIRK_RESET_PACKET},
321 304
@@ -332,7 +315,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
332 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 315 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
333 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 316 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
334 ", disable MSI = " __stringify(QUIRK_NO_MSI) 317 ", disable MSI = " __stringify(QUIRK_NO_MSI)
335 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
336 ")"); 318 ")");
337 319
338#define OHCI_PARAM_DEBUG_AT_AR 1 320#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -340,6 +322,8 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
340#define OHCI_PARAM_DEBUG_IRQS 4 322#define OHCI_PARAM_DEBUG_IRQS 4
341#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 323#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
342 324
325#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
326
343static int param_debug; 327static int param_debug;
344module_param_named(debug, param_debug, int, 0644); 328module_param_named(debug, param_debug, int, 0644);
345MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 329MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -349,7 +333,7 @@ MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
349 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) 333 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
350 ", or a combination, or all = -1)"); 334 ", or a combination, or all = -1)");
351 335
352static void log_irqs(struct fw_ohci *ohci, u32 evt) 336static void log_irqs(u32 evt)
353{ 337{
354 if (likely(!(param_debug & 338 if (likely(!(param_debug &
355 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) 339 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
@@ -359,8 +343,7 @@ static void log_irqs(struct fw_ohci *ohci, u32 evt)
359 !(evt & OHCI1394_busReset)) 343 !(evt & OHCI1394_busReset))
360 return; 344 return;
361 345
362 dev_notice(ohci->card.device, 346 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
363 "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
364 evt & OHCI1394_selfIDComplete ? " selfID" : "", 347 evt & OHCI1394_selfIDComplete ? " selfID" : "",
365 evt & OHCI1394_RQPkt ? " AR_req" : "", 348 evt & OHCI1394_RQPkt ? " AR_req" : "",
366 evt & OHCI1394_RSPkt ? " AR_resp" : "", 349 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -399,29 +382,24 @@ static char _p(u32 *s, int shift)
399 return port[*s >> shift & 3]; 382 return port[*s >> shift & 3];
400} 383}
401 384
402static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) 385static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
403{ 386{
404 u32 *s;
405
406 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) 387 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
407 return; 388 return;
408 389
409 dev_notice(ohci->card.device, 390 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
410 "%d selfIDs, generation %d, local node ID %04x\n", 391 self_id_count, generation, node_id);
411 self_id_count, generation, ohci->node_id);
412 392
413 for (s = ohci->self_id_buffer; self_id_count--; ++s) 393 for (; self_id_count--; ++s)
414 if ((*s & 1 << 23) == 0) 394 if ((*s & 1 << 23) == 0)
415 dev_notice(ohci->card.device, 395 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
416 "selfID 0: %08x, phy %d [%c%c%c] "
417 "%s gc=%d %s %s%s%s\n", 396 "%s gc=%d %s %s%s%s\n",
418 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), 397 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
419 speed[*s >> 14 & 3], *s >> 16 & 63, 398 speed[*s >> 14 & 3], *s >> 16 & 63,
420 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", 399 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
421 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); 400 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
422 else 401 else
423 dev_notice(ohci->card.device, 402 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
424 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
425 *s, *s >> 24 & 63, 403 *s, *s >> 24 & 63,
426 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), 404 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
427 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); 405 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
@@ -457,8 +435,7 @@ static const char *tcodes[] = {
457 [0xe] = "link internal", [0xf] = "-reserved-", 435 [0xe] = "link internal", [0xf] = "-reserved-",
458}; 436};
459 437
460static void log_ar_at_event(struct fw_ohci *ohci, 438static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
461 char dir, int speed, u32 *header, int evt)
462{ 439{
463 int tcode = header[0] >> 4 & 0xf; 440 int tcode = header[0] >> 4 & 0xf;
464 char specific[12]; 441 char specific[12];
@@ -470,9 +447,8 @@ static void log_ar_at_event(struct fw_ohci *ohci,
470 evt = 0x1f; 447 evt = 0x1f;
471 448
472 if (evt == OHCI1394_evt_bus_reset) { 449 if (evt == OHCI1394_evt_bus_reset) {
473 dev_notice(ohci->card.device, 450 fw_notify("A%c evt_bus_reset, generation %d\n",
474 "A%c evt_bus_reset, generation %d\n", 451 dir, (header[2] >> 16) & 0xff);
475 dir, (header[2] >> 16) & 0xff);
476 return; 452 return;
477 } 453 }
478 454
@@ -491,35 +467,39 @@ static void log_ar_at_event(struct fw_ohci *ohci,
491 467
492 switch (tcode) { 468 switch (tcode) {
493 case 0xa: 469 case 0xa:
494 dev_notice(ohci->card.device, 470 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
495 "A%c %s, %s\n",
496 dir, evts[evt], tcodes[tcode]);
497 break; 471 break;
498 case 0xe: 472 case 0xe:
499 dev_notice(ohci->card.device, 473 fw_notify("A%c %s, PHY %08x %08x\n",
500 "A%c %s, PHY %08x %08x\n", 474 dir, evts[evt], header[1], header[2]);
501 dir, evts[evt], header[1], header[2]);
502 break; 475 break;
503 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 476 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
504 dev_notice(ohci->card.device, 477 fw_notify("A%c spd %x tl %02x, "
505 "A%c spd %x tl %02x, " 478 "%04x -> %04x, %s, "
506 "%04x -> %04x, %s, " 479 "%s, %04x%08x%s\n",
507 "%s, %04x%08x%s\n", 480 dir, speed, header[0] >> 10 & 0x3f,
508 dir, speed, header[0] >> 10 & 0x3f, 481 header[1] >> 16, header[0] >> 16, evts[evt],
509 header[1] >> 16, header[0] >> 16, evts[evt], 482 tcodes[tcode], header[1] & 0xffff, header[2], specific);
510 tcodes[tcode], header[1] & 0xffff, header[2], specific);
511 break; 483 break;
512 default: 484 default:
513 dev_notice(ohci->card.device, 485 fw_notify("A%c spd %x tl %02x, "
514 "A%c spd %x tl %02x, " 486 "%04x -> %04x, %s, "
515 "%04x -> %04x, %s, " 487 "%s%s\n",
516 "%s%s\n", 488 dir, speed, header[0] >> 10 & 0x3f,
517 dir, speed, header[0] >> 10 & 0x3f, 489 header[1] >> 16, header[0] >> 16, evts[evt],
518 header[1] >> 16, header[0] >> 16, evts[evt], 490 tcodes[tcode], specific);
519 tcodes[tcode], specific);
520 } 491 }
521} 492}
522 493
494#else
495
496#define param_debug 0
497static inline void log_irqs(u32 evt) {}
498static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
499static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
500
501#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
502
523static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 503static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
524{ 504{
525 writel(data, ohci->registers + offset); 505 writel(data, ohci->registers + offset);
@@ -563,7 +543,7 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
563 if (i >= 3) 543 if (i >= 3)
564 msleep(1); 544 msleep(1);
565 } 545 }
566 dev_err(ohci->card.device, "failed to read phy reg\n"); 546 fw_error("failed to read phy reg\n");
567 547
568 return -EBUSY; 548 return -EBUSY;
569} 549}
@@ -585,7 +565,7 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
585 if (i >= 3) 565 if (i >= 3)
586 msleep(1); 566 msleep(1);
587 } 567 }
588 dev_err(ohci->card.device, "failed to write phy reg\n"); 568 fw_error("failed to write phy reg\n");
589 569
590 return -EBUSY; 570 return -EBUSY;
591} 571}
@@ -684,14 +664,11 @@ static void ar_context_release(struct ar_context *ctx)
684 664
685static void ar_context_abort(struct ar_context *ctx, const char *error_msg) 665static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
686{ 666{
687 struct fw_ohci *ohci = ctx->ohci; 667 if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
688 668 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
689 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { 669 flush_writes(ctx->ohci);
690 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
691 flush_writes(ohci);
692 670
693 dev_err(ohci->card.device, "AR error: %s; DMA stopped\n", 671 fw_error("AR error: %s; DMA stopped\n", error_msg);
694 error_msg);
695 } 672 }
696 /* FIXME: restart? */ 673 /* FIXME: restart? */
697} 674}
@@ -861,7 +838,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
861 p.timestamp = status & 0xffff; 838 p.timestamp = status & 0xffff;
862 p.generation = ohci->request_generation; 839 p.generation = ohci->request_generation;
863 840
864 log_ar_at_event(ohci, 'R', p.speed, p.header, evt); 841 log_ar_at_event('R', p.speed, p.header, evt);
865 842
866 /* 843 /*
867 * Several controllers, notably from NEC and VIA, forget to 844 * Several controllers, notably from NEC and VIA, forget to
@@ -882,7 +859,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
882 * 859 *
883 * Alas some chips sometimes emit bus reset packets with a 860 * Alas some chips sometimes emit bus reset packets with a
884 * wrong generation. We set the correct generation for these 861 * wrong generation. We set the correct generation for these
885 * at a slightly incorrect time (in bus_reset_work). 862 * at a slightly incorrect time (in bus_reset_tasklet).
886 */ 863 */
887 if (evt == OHCI1394_evt_bus_reset) { 864 if (evt == OHCI1394_evt_bus_reset) {
888 if (!(ohci->quirks & QUIRK_RESET_PACKET)) 865 if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1069,7 +1046,6 @@ static void context_tasklet(unsigned long data)
1069 address = le32_to_cpu(last->branch_address); 1046 address = le32_to_cpu(last->branch_address);
1070 z = address & 0xf; 1047 z = address & 0xf;
1071 address &= ~0xf; 1048 address &= ~0xf;
1072 ctx->current_bus = address;
1073 1049
1074 /* If the branch address points to a buffer outside of the 1050 /* If the branch address points to a buffer outside of the
1075 * current buffer, advance to the next buffer. */ 1051 * current buffer, advance to the next buffer. */
@@ -1233,22 +1209,21 @@ static void context_append(struct context *ctx,
1233 1209
1234static void context_stop(struct context *ctx) 1210static void context_stop(struct context *ctx)
1235{ 1211{
1236 struct fw_ohci *ohci = ctx->ohci;
1237 u32 reg; 1212 u32 reg;
1238 int i; 1213 int i;
1239 1214
1240 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 1215 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1241 ctx->running = false; 1216 ctx->running = false;
1242 1217
1243 for (i = 0; i < 1000; i++) { 1218 for (i = 0; i < 1000; i++) {
1244 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); 1219 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1245 if ((reg & CONTEXT_ACTIVE) == 0) 1220 if ((reg & CONTEXT_ACTIVE) == 0)
1246 return; 1221 return;
1247 1222
1248 if (i) 1223 if (i)
1249 udelay(10); 1224 udelay(10);
1250 } 1225 }
1251 dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg); 1226 fw_error("Error: DMA context still active (0x%08x)\n", reg);
1252} 1227}
1253 1228
1254struct driver_data { 1229struct driver_data {
@@ -1281,7 +1256,7 @@ static int at_context_queue_packet(struct context *ctx,
1281 d[0].res_count = cpu_to_le16(packet->timestamp); 1256 d[0].res_count = cpu_to_le16(packet->timestamp);
1282 1257
1283 /* 1258 /*
1284 * The DMA format for asynchronous link packets is different 1259 * The DMA format for asyncronous link packets is different
1285 * from the IEEE1394 layout, so shift the fields around 1260 * from the IEEE1394 layout, so shift the fields around
1286 * accordingly. 1261 * accordingly.
1287 */ 1262 */
@@ -1428,7 +1403,7 @@ static int handle_at_packet(struct context *context,
1428 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1403 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1429 packet->timestamp = le16_to_cpu(last->res_count); 1404 packet->timestamp = le16_to_cpu(last->res_count);
1430 1405
1431 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); 1406 log_ar_at_event('T', packet->speed, packet->header, evt);
1432 1407
1433 switch (evt) { 1408 switch (evt) {
1434 case OHCI1394_evt_timeout: 1409 case OHCI1394_evt_timeout:
@@ -1557,7 +1532,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
1557 goto out; 1532 goto out;
1558 } 1533 }
1559 1534
1560 dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n"); 1535 fw_error("swap not done (CSR lock timeout)\n");
1561 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); 1536 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1562 1537
1563 out: 1538 out:
@@ -1631,10 +1606,15 @@ static void detect_dead_context(struct fw_ohci *ohci,
1631 u32 ctl; 1606 u32 ctl;
1632 1607
1633 ctl = reg_read(ohci, CONTROL_SET(regs)); 1608 ctl = reg_read(ohci, CONTROL_SET(regs));
1634 if (ctl & CONTEXT_DEAD) 1609 if (ctl & CONTEXT_DEAD) {
1635 dev_err(ohci->card.device, 1610#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
1636 "DMA context %s has stopped, error code: %s\n", 1611 fw_error("DMA context %s has stopped, error code: %s\n",
1637 name, evts[ctl & 0x1f]); 1612 name, evts[ctl & 0x1f]);
1613#else
1614 fw_error("DMA context %s has stopped, error code: %#x\n",
1615 name, ctl & 0x1f);
1616#endif
1617 }
1638} 1618}
1639 1619
1640static void handle_dead_contexts(struct fw_ohci *ohci) 1620static void handle_dead_contexts(struct fw_ohci *ohci)
@@ -1727,148 +1707,29 @@ static u32 update_bus_time(struct fw_ohci *ohci)
1727{ 1707{
1728 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; 1708 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1729 1709
1730 if (unlikely(!ohci->bus_time_running)) {
1731 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1732 ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
1733 (cycle_time_seconds & 0x40);
1734 ohci->bus_time_running = true;
1735 }
1736
1737 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) 1710 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1738 ohci->bus_time += 0x40; 1711 ohci->bus_time += 0x40;
1739 1712
1740 return ohci->bus_time | cycle_time_seconds; 1713 return ohci->bus_time | cycle_time_seconds;
1741} 1714}
1742 1715
1743static int get_status_for_port(struct fw_ohci *ohci, int port_index) 1716static void bus_reset_tasklet(unsigned long data)
1744{ 1717{
1745 int reg; 1718 struct fw_ohci *ohci = (struct fw_ohci *)data;
1746 1719 int self_id_count, i, j, reg;
1747 mutex_lock(&ohci->phy_reg_mutex); 1720 int generation, new_generation;
1748 reg = write_phy_reg(ohci, 7, port_index); 1721 unsigned long flags;
1749 if (reg >= 0)
1750 reg = read_phy_reg(ohci, 8);
1751 mutex_unlock(&ohci->phy_reg_mutex);
1752 if (reg < 0)
1753 return reg;
1754
1755 switch (reg & 0x0f) {
1756 case 0x06:
1757 return 2; /* is child node (connected to parent node) */
1758 case 0x0e:
1759 return 3; /* is parent node (connected to child node) */
1760 }
1761 return 1; /* not connected */
1762}
1763
1764static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1765 int self_id_count)
1766{
1767 int i;
1768 u32 entry;
1769
1770 for (i = 0; i < self_id_count; i++) {
1771 entry = ohci->self_id_buffer[i];
1772 if ((self_id & 0xff000000) == (entry & 0xff000000))
1773 return -1;
1774 if ((self_id & 0xff000000) < (entry & 0xff000000))
1775 return i;
1776 }
1777 return i;
1778}
1779
1780static int initiated_reset(struct fw_ohci *ohci)
1781{
1782 int reg;
1783 int ret = 0;
1784
1785 mutex_lock(&ohci->phy_reg_mutex);
1786 reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
1787 if (reg >= 0) {
1788 reg = read_phy_reg(ohci, 8);
1789 reg |= 0x40;
1790 reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
1791 if (reg >= 0) {
1792 reg = read_phy_reg(ohci, 12); /* read register 12 */
1793 if (reg >= 0) {
1794 if ((reg & 0x08) == 0x08) {
1795 /* bit 3 indicates "initiated reset" */
1796 ret = 0x2;
1797 }
1798 }
1799 }
1800 }
1801 mutex_unlock(&ohci->phy_reg_mutex);
1802 return ret;
1803}
1804
1805/*
1806 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1807 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1808 * Construct the selfID from phy register contents.
1809 */
1810static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1811{
1812 int reg, i, pos, status;
1813 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
1814 u32 self_id = 0x8040c800;
1815
1816 reg = reg_read(ohci, OHCI1394_NodeID);
1817 if (!(reg & OHCI1394_NodeID_idValid)) {
1818 dev_notice(ohci->card.device,
1819 "node ID not valid, new bus reset in progress\n");
1820 return -EBUSY;
1821 }
1822 self_id |= ((reg & 0x3f) << 24); /* phy ID */
1823
1824 reg = ohci_read_phy_reg(&ohci->card, 4);
1825 if (reg < 0)
1826 return reg;
1827 self_id |= ((reg & 0x07) << 8); /* power class */
1828
1829 reg = ohci_read_phy_reg(&ohci->card, 1);
1830 if (reg < 0)
1831 return reg;
1832 self_id |= ((reg & 0x3f) << 16); /* gap count */
1833
1834 for (i = 0; i < 3; i++) {
1835 status = get_status_for_port(ohci, i);
1836 if (status < 0)
1837 return status;
1838 self_id |= ((status & 0x3) << (6 - (i * 2)));
1839 }
1840
1841 self_id |= initiated_reset(ohci);
1842
1843 pos = get_self_id_pos(ohci, self_id, self_id_count);
1844 if (pos >= 0) {
1845 memmove(&(ohci->self_id_buffer[pos+1]),
1846 &(ohci->self_id_buffer[pos]),
1847 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1848 ohci->self_id_buffer[pos] = self_id;
1849 self_id_count++;
1850 }
1851 return self_id_count;
1852}
1853
1854static void bus_reset_work(struct work_struct *work)
1855{
1856 struct fw_ohci *ohci =
1857 container_of(work, struct fw_ohci, bus_reset_work);
1858 int self_id_count, generation, new_generation, i, j;
1859 u32 reg;
1860 void *free_rom = NULL; 1722 void *free_rom = NULL;
1861 dma_addr_t free_rom_bus = 0; 1723 dma_addr_t free_rom_bus = 0;
1862 bool is_new_root; 1724 bool is_new_root;
1863 1725
1864 reg = reg_read(ohci, OHCI1394_NodeID); 1726 reg = reg_read(ohci, OHCI1394_NodeID);
1865 if (!(reg & OHCI1394_NodeID_idValid)) { 1727 if (!(reg & OHCI1394_NodeID_idValid)) {
1866 dev_notice(ohci->card.device, 1728 fw_notify("node ID not valid, new bus reset in progress\n");
1867 "node ID not valid, new bus reset in progress\n");
1868 return; 1729 return;
1869 } 1730 }
1870 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { 1731 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1871 dev_notice(ohci->card.device, "malconfigured bus\n"); 1732 fw_notify("malconfigured bus\n");
1872 return; 1733 return;
1873 } 1734 }
1874 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1735 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
@@ -1882,7 +1743,7 @@ static void bus_reset_work(struct work_struct *work)
1882 1743
1883 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1744 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1884 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1745 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1885 dev_notice(ohci->card.device, "inconsistent self IDs\n"); 1746 fw_notify("inconsistent self IDs\n");
1886 return; 1747 return;
1887 } 1748 }
1888 /* 1749 /*
@@ -1892,53 +1753,21 @@ static void bus_reset_work(struct work_struct *work)
1892 * bit extra to get the actual number of self IDs. 1753 * bit extra to get the actual number of self IDs.
1893 */ 1754 */
1894 self_id_count = (reg >> 3) & 0xff; 1755 self_id_count = (reg >> 3) & 0xff;
1895 1756 if (self_id_count == 0 || self_id_count > 252) {
1896 if (self_id_count > 252) { 1757 fw_notify("inconsistent self IDs\n");
1897 dev_notice(ohci->card.device, "inconsistent self IDs\n");
1898 return; 1758 return;
1899 } 1759 }
1900
1901 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 1760 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1902 rmb(); 1761 rmb();
1903 1762
1904 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1763 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1905 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { 1764 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1906 /* 1765 fw_notify("inconsistent self IDs\n");
1907 * If the invalid data looks like a cycle start packet, 1766 return;
1908 * it's likely to be the result of the cycle master
1909 * having a wrong gap count. In this case, the self IDs
1910 * so far are valid and should be processed so that the
1911 * bus manager can then correct the gap count.
1912 */
1913 if (cond_le32_to_cpu(ohci->self_id_cpu[i])
1914 == 0xffff008f) {
1915 dev_notice(ohci->card.device,
1916 "ignoring spurious self IDs\n");
1917 self_id_count = j;
1918 break;
1919 } else {
1920 dev_notice(ohci->card.device,
1921 "inconsistent self IDs\n");
1922 return;
1923 }
1924 } 1767 }
1925 ohci->self_id_buffer[j] = 1768 ohci->self_id_buffer[j] =
1926 cond_le32_to_cpu(ohci->self_id_cpu[i]); 1769 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1927 } 1770 }
1928
1929 if (ohci->quirks & QUIRK_TI_SLLZ059) {
1930 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1931 if (self_id_count < 0) {
1932 dev_notice(ohci->card.device,
1933 "could not construct local self ID\n");
1934 return;
1935 }
1936 }
1937
1938 if (self_id_count == 0) {
1939 dev_notice(ohci->card.device, "inconsistent self IDs\n");
1940 return;
1941 }
1942 rmb(); 1771 rmb();
1943 1772
1944 /* 1773 /*
@@ -1957,19 +1786,19 @@ static void bus_reset_work(struct work_struct *work)
1957 1786
1958 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 1787 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1959 if (new_generation != generation) { 1788 if (new_generation != generation) {
1960 dev_notice(ohci->card.device, 1789 fw_notify("recursive bus reset detected, "
1961 "new bus reset, discarding self ids\n"); 1790 "discarding self ids\n");
1962 return; 1791 return;
1963 } 1792 }
1964 1793
1965 /* FIXME: Document how the locking works. */ 1794 /* FIXME: Document how the locking works. */
1966 spin_lock_irq(&ohci->lock); 1795 spin_lock_irqsave(&ohci->lock, flags);
1967 1796
1968 ohci->generation = -1; /* prevent AT packet queueing */ 1797 ohci->generation = -1; /* prevent AT packet queueing */
1969 context_stop(&ohci->at_request_ctx); 1798 context_stop(&ohci->at_request_ctx);
1970 context_stop(&ohci->at_response_ctx); 1799 context_stop(&ohci->at_response_ctx);
1971 1800
1972 spin_unlock_irq(&ohci->lock); 1801 spin_unlock_irqrestore(&ohci->lock, flags);
1973 1802
1974 /* 1803 /*
1975 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent 1804 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
@@ -1979,7 +1808,7 @@ static void bus_reset_work(struct work_struct *work)
1979 at_context_flush(&ohci->at_request_ctx); 1808 at_context_flush(&ohci->at_request_ctx);
1980 at_context_flush(&ohci->at_response_ctx); 1809 at_context_flush(&ohci->at_response_ctx);
1981 1810
1982 spin_lock_irq(&ohci->lock); 1811 spin_lock_irqsave(&ohci->lock, flags);
1983 1812
1984 ohci->generation = generation; 1813 ohci->generation = generation;
1985 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1814 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
@@ -2023,13 +1852,14 @@ static void bus_reset_work(struct work_struct *work)
2023 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); 1852 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2024#endif 1853#endif
2025 1854
2026 spin_unlock_irq(&ohci->lock); 1855 spin_unlock_irqrestore(&ohci->lock, flags);
2027 1856
2028 if (free_rom) 1857 if (free_rom)
2029 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1858 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2030 free_rom, free_rom_bus); 1859 free_rom, free_rom_bus);
2031 1860
2032 log_selfids(ohci, generation, self_id_count); 1861 log_selfids(ohci->node_id, generation,
1862 self_id_count, ohci->self_id_buffer);
2033 1863
2034 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1864 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2035 self_id_count, ohci->self_id_buffer, 1865 self_id_count, ohci->self_id_buffer,
@@ -2054,10 +1884,10 @@ static irqreturn_t irq_handler(int irq, void *data)
2054 */ 1884 */
2055 reg_write(ohci, OHCI1394_IntEventClear, 1885 reg_write(ohci, OHCI1394_IntEventClear,
2056 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); 1886 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2057 log_irqs(ohci, event); 1887 log_irqs(event);
2058 1888
2059 if (event & OHCI1394_selfIDComplete) 1889 if (event & OHCI1394_selfIDComplete)
2060 queue_work(fw_workqueue, &ohci->bus_reset_work); 1890 tasklet_schedule(&ohci->bus_reset_tasklet);
2061 1891
2062 if (event & OHCI1394_RQPkt) 1892 if (event & OHCI1394_RQPkt)
2063 tasklet_schedule(&ohci->ar_request_ctx.tasklet); 1893 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
@@ -2096,21 +1926,20 @@ static irqreturn_t irq_handler(int irq, void *data)
2096 } 1926 }
2097 1927
2098 if (unlikely(event & OHCI1394_regAccessFail)) 1928 if (unlikely(event & OHCI1394_regAccessFail))
2099 dev_err(ohci->card.device, "register access failure\n"); 1929 fw_error("Register access failure - "
1930 "please notify linux1394-devel@lists.sf.net\n");
2100 1931
2101 if (unlikely(event & OHCI1394_postedWriteErr)) { 1932 if (unlikely(event & OHCI1394_postedWriteErr)) {
2102 reg_read(ohci, OHCI1394_PostedWriteAddressHi); 1933 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2103 reg_read(ohci, OHCI1394_PostedWriteAddressLo); 1934 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2104 reg_write(ohci, OHCI1394_IntEventClear, 1935 reg_write(ohci, OHCI1394_IntEventClear,
2105 OHCI1394_postedWriteErr); 1936 OHCI1394_postedWriteErr);
2106 if (printk_ratelimit()) 1937 fw_error("PCI posted write error\n");
2107 dev_err(ohci->card.device, "PCI posted write error\n");
2108 } 1938 }
2109 1939
2110 if (unlikely(event & OHCI1394_cycleTooLong)) { 1940 if (unlikely(event & OHCI1394_cycleTooLong)) {
2111 if (printk_ratelimit()) 1941 if (printk_ratelimit())
2112 dev_notice(ohci->card.device, 1942 fw_notify("isochronous cycle too long\n");
2113 "isochronous cycle too long\n");
2114 reg_write(ohci, OHCI1394_LinkControlSet, 1943 reg_write(ohci, OHCI1394_LinkControlSet,
2115 OHCI1394_LinkControl_cycleMaster); 1944 OHCI1394_LinkControl_cycleMaster);
2116 } 1945 }
@@ -2123,8 +1952,7 @@ static irqreturn_t irq_handler(int irq, void *data)
2123 * them at least two cycles later. (FIXME?) 1952 * them at least two cycles later. (FIXME?)
2124 */ 1953 */
2125 if (printk_ratelimit()) 1954 if (printk_ratelimit())
2126 dev_notice(ohci->card.device, 1955 fw_notify("isochronous cycle inconsistent\n");
2127 "isochronous cycle inconsistent\n");
2128 } 1956 }
2129 1957
2130 if (unlikely(event & OHCI1394_unrecoverableError)) 1958 if (unlikely(event & OHCI1394_unrecoverableError))
@@ -2220,38 +2048,16 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci)
2220 return 0; 2048 return 0;
2221} 2049}
2222 2050
2223static int probe_tsb41ba3d(struct fw_ohci *ohci)
2224{
2225 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2226 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2227 int reg, i;
2228
2229 reg = read_phy_reg(ohci, 2);
2230 if (reg < 0)
2231 return reg;
2232 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2233 return 0;
2234
2235 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2236 reg = read_paged_phy_reg(ohci, 1, i + 10);
2237 if (reg < 0)
2238 return reg;
2239 if (reg != id[i])
2240 return 0;
2241 }
2242 return 1;
2243}
2244
2245static int ohci_enable(struct fw_card *card, 2051static int ohci_enable(struct fw_card *card,
2246 const __be32 *config_rom, size_t length) 2052 const __be32 *config_rom, size_t length)
2247{ 2053{
2248 struct fw_ohci *ohci = fw_ohci(card); 2054 struct fw_ohci *ohci = fw_ohci(card);
2249 struct pci_dev *dev = to_pci_dev(card->device); 2055 struct pci_dev *dev = to_pci_dev(card->device);
2250 u32 lps, version, irqs; 2056 u32 lps, seconds, version, irqs;
2251 int i, ret; 2057 int i, ret;
2252 2058
2253 if (software_reset(ohci)) { 2059 if (software_reset(ohci)) {
2254 dev_err(card->device, "failed to reset ohci card\n"); 2060 fw_error("Failed to reset ohci card.\n");
2255 return -EBUSY; 2061 return -EBUSY;
2256 } 2062 }
2257 2063
@@ -2275,20 +2081,10 @@ static int ohci_enable(struct fw_card *card,
2275 } 2081 }
2276 2082
2277 if (!lps) { 2083 if (!lps) {
2278 dev_err(card->device, "failed to set Link Power Status\n"); 2084 fw_error("Failed to set Link Power Status\n");
2279 return -EIO; 2085 return -EIO;
2280 } 2086 }
2281 2087
2282 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2283 ret = probe_tsb41ba3d(ohci);
2284 if (ret < 0)
2285 return ret;
2286 if (ret)
2287 dev_notice(card->device, "local TSB41BA3D phy\n");
2288 else
2289 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2290 }
2291
2292 reg_write(ohci, OHCI1394_HCControlClear, 2088 reg_write(ohci, OHCI1394_HCControlClear,
2293 OHCI1394_HCControl_noByteSwapData); 2089 OHCI1394_HCControl_noByteSwapData);
2294 2090
@@ -2303,12 +2099,9 @@ static int ohci_enable(struct fw_card *card,
2303 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | 2099 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2304 (200 << 16)); 2100 (200 << 16));
2305 2101
2306 ohci->bus_time_running = false; 2102 seconds = lower_32_bits(get_seconds());
2307 2103 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
2308 for (i = 0; i < 32; i++) 2104 ohci->bus_time = seconds & ~0x3f;
2309 if (ohci->ir_context_support & (1 << i))
2310 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2311 IR_CONTEXT_MULTI_CHANNEL_MODE);
2312 2105
2313 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2106 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2314 if (version >= OHCI_VERSION_1_1) { 2107 if (version >= OHCI_VERSION_1_1) {
@@ -2387,8 +2180,7 @@ static int ohci_enable(struct fw_card *card,
2387 if (request_irq(dev->irq, irq_handler, 2180 if (request_irq(dev->irq, irq_handler,
2388 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, 2181 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
2389 ohci_driver_name, ohci)) { 2182 ohci_driver_name, ohci)) {
2390 dev_err(card->device, "failed to allocate interrupt %d\n", 2183 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
2391 dev->irq);
2392 pci_disable_msi(dev); 2184 pci_disable_msi(dev);
2393 2185
2394 if (config_rom) { 2186 if (config_rom) {
@@ -2406,6 +2198,7 @@ static int ohci_enable(struct fw_card *card,
2406 OHCI1394_postedWriteErr | 2198 OHCI1394_postedWriteErr |
2407 OHCI1394_selfIDComplete | 2199 OHCI1394_selfIDComplete |
2408 OHCI1394_regAccessFail | 2200 OHCI1394_regAccessFail |
2201 OHCI1394_cycle64Seconds |
2409 OHCI1394_cycleInconsistent | 2202 OHCI1394_cycleInconsistent |
2410 OHCI1394_unrecoverableError | 2203 OHCI1394_unrecoverableError |
2411 OHCI1394_cycleTooLong | 2204 OHCI1394_cycleTooLong |
@@ -2437,6 +2230,7 @@ static int ohci_set_config_rom(struct fw_card *card,
2437 const __be32 *config_rom, size_t length) 2230 const __be32 *config_rom, size_t length)
2438{ 2231{
2439 struct fw_ohci *ohci; 2232 struct fw_ohci *ohci;
2233 unsigned long flags;
2440 __be32 *next_config_rom; 2234 __be32 *next_config_rom;
2441 dma_addr_t uninitialized_var(next_config_rom_bus); 2235 dma_addr_t uninitialized_var(next_config_rom_bus);
2442 2236
@@ -2466,7 +2260,7 @@ static int ohci_set_config_rom(struct fw_card *card,
2466 * then set up the real values for the two registers. 2260 * then set up the real values for the two registers.
2467 * 2261 *
2468 * We use ohci->lock to avoid racing with the code that sets 2262 * We use ohci->lock to avoid racing with the code that sets
2469 * ohci->next_config_rom to NULL (see bus_reset_work). 2263 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
2470 */ 2264 */
2471 2265
2472 next_config_rom = 2266 next_config_rom =
@@ -2475,7 +2269,7 @@ static int ohci_set_config_rom(struct fw_card *card,
2475 if (next_config_rom == NULL) 2269 if (next_config_rom == NULL)
2476 return -ENOMEM; 2270 return -ENOMEM;
2477 2271
2478 spin_lock_irq(&ohci->lock); 2272 spin_lock_irqsave(&ohci->lock, flags);
2479 2273
2480 /* 2274 /*
2481 * If there is not an already pending config_rom update, 2275 * If there is not an already pending config_rom update,
@@ -2501,7 +2295,7 @@ static int ohci_set_config_rom(struct fw_card *card,
2501 2295
2502 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2296 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2503 2297
2504 spin_unlock_irq(&ohci->lock); 2298 spin_unlock_irqrestore(&ohci->lock, flags);
2505 2299
2506 /* If we didn't use the DMA allocation, delete it. */ 2300 /* If we didn't use the DMA allocation, delete it. */
2507 if (next_config_rom != NULL) 2301 if (next_config_rom != NULL)
@@ -2551,7 +2345,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2551 dma_unmap_single(ohci->card.device, packet->payload_bus, 2345 dma_unmap_single(ohci->card.device, packet->payload_bus,
2552 packet->payload_length, DMA_TO_DEVICE); 2346 packet->payload_length, DMA_TO_DEVICE);
2553 2347
2554 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); 2348 log_ar_at_event('T', packet->speed, packet->header, 0x20);
2555 driver_data->packet = NULL; 2349 driver_data->packet = NULL;
2556 packet->ack = RCODE_CANCELLED; 2350 packet->ack = RCODE_CANCELLED;
2557 packet->callback(packet, &ohci->card, packet->ack); 2351 packet->callback(packet, &ohci->card, packet->ack);
@@ -2694,8 +2488,7 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2694 2488
2695 case CSR_BUS_TIME: 2489 case CSR_BUS_TIME:
2696 spin_lock_irqsave(&ohci->lock, flags); 2490 spin_lock_irqsave(&ohci->lock, flags);
2697 ohci->bus_time = (update_bus_time(ohci) & 0x40) | 2491 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
2698 (value & ~0x7f);
2699 spin_unlock_irqrestore(&ohci->lock, flags); 2492 spin_unlock_irqrestore(&ohci->lock, flags);
2700 break; 2493 break;
2701 2494
@@ -2717,35 +2510,25 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2717 } 2510 }
2718} 2511}
2719 2512
2720static void flush_iso_completions(struct iso_context *ctx) 2513static void copy_iso_headers(struct iso_context *ctx, void *p)
2721{
2722 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2723 ctx->header_length, ctx->header,
2724 ctx->base.callback_data);
2725 ctx->header_length = 0;
2726}
2727
2728static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2729{ 2514{
2730 u32 *ctx_hdr; 2515 int i = ctx->header_length;
2731 2516
2732 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) 2517 if (i + ctx->base.header_size > PAGE_SIZE)
2733 flush_iso_completions(ctx); 2518 return;
2734
2735 ctx_hdr = ctx->header + ctx->header_length;
2736 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2737 2519
2738 /* 2520 /*
2739 * The two iso header quadlets are byteswapped to little 2521 * The iso header is byteswapped to little endian by
2740 * endian by the controller, but we want to present them 2522 * the controller, but the remaining header quadlets
2741 * as big endian for consistency with the bus endianness. 2523 * are big endian. We want to present all the headers
2524 * as big endian, so we have to swap the first quadlet.
2742 */ 2525 */
2743 if (ctx->base.header_size > 0) 2526 if (ctx->base.header_size > 0)
2744 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */ 2527 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
2745 if (ctx->base.header_size > 4) 2528 if (ctx->base.header_size > 4)
2746 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */ 2529 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
2747 if (ctx->base.header_size > 8) 2530 if (ctx->base.header_size > 8)
2748 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); 2531 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
2749 ctx->header_length += ctx->base.header_size; 2532 ctx->header_length += ctx->base.header_size;
2750} 2533}
2751 2534
@@ -2756,7 +2539,8 @@ static int handle_ir_packet_per_buffer(struct context *context,
2756 struct iso_context *ctx = 2539 struct iso_context *ctx =
2757 container_of(context, struct iso_context, context); 2540 container_of(context, struct iso_context, context);
2758 struct descriptor *pd; 2541 struct descriptor *pd;
2759 u32 buffer_dma; 2542 __le32 *ir_header;
2543 void *p;
2760 2544
2761 for (pd = d; pd <= last; pd++) 2545 for (pd = d; pd <= last; pd++)
2762 if (pd->transfer_status) 2546 if (pd->transfer_status)
@@ -2765,20 +2549,17 @@ static int handle_ir_packet_per_buffer(struct context *context,
2765 /* Descriptor(s) not done yet, stop iteration */ 2549 /* Descriptor(s) not done yet, stop iteration */
2766 return 0; 2550 return 0;
2767 2551
2768 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { 2552 p = last + 1;
2769 d++; 2553 copy_iso_headers(ctx, p);
2770 buffer_dma = le32_to_cpu(d->data_address);
2771 dma_sync_single_range_for_cpu(context->ohci->card.device,
2772 buffer_dma & PAGE_MASK,
2773 buffer_dma & ~PAGE_MASK,
2774 le16_to_cpu(d->req_count),
2775 DMA_FROM_DEVICE);
2776 }
2777 2554
2778 copy_iso_headers(ctx, (u32 *) (last + 1)); 2555 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2779 2556 ir_header = (__le32 *) p;
2780 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) 2557 ctx->base.callback.sc(&ctx->base,
2781 flush_iso_completions(ctx); 2558 le32_to_cpu(ir_header[0]) & 0xffff,
2559 ctx->header_length, ctx->header,
2560 ctx->base.callback_data);
2561 ctx->header_length = 0;
2562 }
2782 2563
2783 return 1; 2564 return 1;
2784} 2565}
@@ -2790,96 +2571,29 @@ static int handle_ir_buffer_fill(struct context *context,
2790{ 2571{
2791 struct iso_context *ctx = 2572 struct iso_context *ctx =
2792 container_of(context, struct iso_context, context); 2573 container_of(context, struct iso_context, context);
2793 unsigned int req_count, res_count, completed;
2794 u32 buffer_dma;
2795
2796 req_count = le16_to_cpu(last->req_count);
2797 res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
2798 completed = req_count - res_count;
2799 buffer_dma = le32_to_cpu(last->data_address);
2800
2801 if (completed > 0) {
2802 ctx->mc_buffer_bus = buffer_dma;
2803 ctx->mc_completed = completed;
2804 }
2805 2574
2806 if (res_count != 0) 2575 if (!last->transfer_status)
2807 /* Descriptor(s) not done yet, stop iteration */ 2576 /* Descriptor(s) not done yet, stop iteration */
2808 return 0; 2577 return 0;
2809 2578
2810 dma_sync_single_range_for_cpu(context->ohci->card.device, 2579 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
2811 buffer_dma & PAGE_MASK,
2812 buffer_dma & ~PAGE_MASK,
2813 completed, DMA_FROM_DEVICE);
2814
2815 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2816 ctx->base.callback.mc(&ctx->base, 2580 ctx->base.callback.mc(&ctx->base,
2817 buffer_dma + completed, 2581 le32_to_cpu(last->data_address) +
2582 le16_to_cpu(last->req_count) -
2583 le16_to_cpu(last->res_count),
2818 ctx->base.callback_data); 2584 ctx->base.callback_data);
2819 ctx->mc_completed = 0;
2820 }
2821 2585
2822 return 1; 2586 return 1;
2823} 2587}
2824 2588
2825static void flush_ir_buffer_fill(struct iso_context *ctx)
2826{
2827 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2828 ctx->mc_buffer_bus & PAGE_MASK,
2829 ctx->mc_buffer_bus & ~PAGE_MASK,
2830 ctx->mc_completed, DMA_FROM_DEVICE);
2831
2832 ctx->base.callback.mc(&ctx->base,
2833 ctx->mc_buffer_bus + ctx->mc_completed,
2834 ctx->base.callback_data);
2835 ctx->mc_completed = 0;
2836}
2837
2838static inline void sync_it_packet_for_cpu(struct context *context,
2839 struct descriptor *pd)
2840{
2841 __le16 control;
2842 u32 buffer_dma;
2843
2844 /* only packets beginning with OUTPUT_MORE* have data buffers */
2845 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2846 return;
2847
2848 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2849 pd += 2;
2850
2851 /*
2852 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2853 * data buffer is in the context program's coherent page and must not
2854 * be synced.
2855 */
2856 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2857 (context->current_bus & PAGE_MASK)) {
2858 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2859 return;
2860 pd++;
2861 }
2862
2863 do {
2864 buffer_dma = le32_to_cpu(pd->data_address);
2865 dma_sync_single_range_for_cpu(context->ohci->card.device,
2866 buffer_dma & PAGE_MASK,
2867 buffer_dma & ~PAGE_MASK,
2868 le16_to_cpu(pd->req_count),
2869 DMA_TO_DEVICE);
2870 control = pd->control;
2871 pd++;
2872 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2873}
2874
2875static int handle_it_packet(struct context *context, 2589static int handle_it_packet(struct context *context,
2876 struct descriptor *d, 2590 struct descriptor *d,
2877 struct descriptor *last) 2591 struct descriptor *last)
2878{ 2592{
2879 struct iso_context *ctx = 2593 struct iso_context *ctx =
2880 container_of(context, struct iso_context, context); 2594 container_of(context, struct iso_context, context);
2595 int i;
2881 struct descriptor *pd; 2596 struct descriptor *pd;
2882 __be32 *ctx_hdr;
2883 2597
2884 for (pd = d; pd <= last; pd++) 2598 for (pd = d; pd <= last; pd++)
2885 if (pd->transfer_status) 2599 if (pd->transfer_status)
@@ -2888,21 +2602,20 @@ static int handle_it_packet(struct context *context,
2888 /* Descriptor(s) not done yet, stop iteration */ 2602 /* Descriptor(s) not done yet, stop iteration */
2889 return 0; 2603 return 0;
2890 2604
2891 sync_it_packet_for_cpu(context, d); 2605 i = ctx->header_length;
2892 2606 if (i + 4 < PAGE_SIZE) {
2893 if (ctx->header_length + 4 > PAGE_SIZE) 2607 /* Present this value as big-endian to match the receive code */
2894 flush_iso_completions(ctx); 2608 *(__be32 *)(ctx->header + i) = cpu_to_be32(
2895 2609 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
2896 ctx_hdr = ctx->header + ctx->header_length; 2610 le16_to_cpu(pd->res_count));
2897 ctx->last_timestamp = le16_to_cpu(last->res_count); 2611 ctx->header_length += 4;
2898 /* Present this value as big-endian to match the receive code */ 2612 }
2899 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | 2613 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2900 le16_to_cpu(pd->res_count)); 2614 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
2901 ctx->header_length += 4; 2615 ctx->header_length, ctx->header,
2902 2616 ctx->base.callback_data);
2903 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) 2617 ctx->header_length = 0;
2904 flush_iso_completions(ctx); 2618 }
2905
2906 return 1; 2619 return 1;
2907} 2620}
2908 2621
@@ -2926,9 +2639,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2926 descriptor_callback_t uninitialized_var(callback); 2639 descriptor_callback_t uninitialized_var(callback);
2927 u64 *uninitialized_var(channels); 2640 u64 *uninitialized_var(channels);
2928 u32 *uninitialized_var(mask), uninitialized_var(regs); 2641 u32 *uninitialized_var(mask), uninitialized_var(regs);
2642 unsigned long flags;
2929 int index, ret = -EBUSY; 2643 int index, ret = -EBUSY;
2930 2644
2931 spin_lock_irq(&ohci->lock); 2645 spin_lock_irqsave(&ohci->lock, flags);
2932 2646
2933 switch (type) { 2647 switch (type) {
2934 case FW_ISO_CONTEXT_TRANSMIT: 2648 case FW_ISO_CONTEXT_TRANSMIT:
@@ -2972,7 +2686,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2972 ret = -ENOSYS; 2686 ret = -ENOSYS;
2973 } 2687 }
2974 2688
2975 spin_unlock_irq(&ohci->lock); 2689 spin_unlock_irqrestore(&ohci->lock, flags);
2976 2690
2977 if (index < 0) 2691 if (index < 0)
2978 return ERR_PTR(ret); 2692 return ERR_PTR(ret);
@@ -2988,17 +2702,15 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2988 if (ret < 0) 2702 if (ret < 0)
2989 goto out_with_header; 2703 goto out_with_header;
2990 2704
2991 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { 2705 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
2992 set_multichannel_mask(ohci, 0); 2706 set_multichannel_mask(ohci, 0);
2993 ctx->mc_completed = 0;
2994 }
2995 2707
2996 return &ctx->base; 2708 return &ctx->base;
2997 2709
2998 out_with_header: 2710 out_with_header:
2999 free_page((unsigned long)ctx->header); 2711 free_page((unsigned long)ctx->header);
3000 out: 2712 out:
3001 spin_lock_irq(&ohci->lock); 2713 spin_lock_irqsave(&ohci->lock, flags);
3002 2714
3003 switch (type) { 2715 switch (type) {
3004 case FW_ISO_CONTEXT_RECEIVE: 2716 case FW_ISO_CONTEXT_RECEIVE:
@@ -3011,7 +2723,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
3011 } 2723 }
3012 *mask |= 1 << index; 2724 *mask |= 1 << index;
3013 2725
3014 spin_unlock_irq(&ohci->lock); 2726 spin_unlock_irqrestore(&ohci->lock, flags);
3015 2727
3016 return ERR_PTR(ret); 2728 return ERR_PTR(ret);
3017} 2729}
@@ -3259,10 +2971,6 @@ static int queue_iso_transmit(struct iso_context *ctx,
3259 page_bus = page_private(buffer->pages[page]); 2971 page_bus = page_private(buffer->pages[page]);
3260 pd[i].data_address = cpu_to_le32(page_bus + offset); 2972 pd[i].data_address = cpu_to_le32(page_bus + offset);
3261 2973
3262 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3263 page_bus, offset, length,
3264 DMA_TO_DEVICE);
3265
3266 payload_index += length; 2974 payload_index += length;
3267 } 2975 }
3268 2976
@@ -3287,7 +2995,6 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3287 struct fw_iso_buffer *buffer, 2995 struct fw_iso_buffer *buffer,
3288 unsigned long payload) 2996 unsigned long payload)
3289{ 2997{
3290 struct device *device = ctx->context.ohci->card.device;
3291 struct descriptor *d, *pd; 2998 struct descriptor *d, *pd;
3292 dma_addr_t d_bus, page_bus; 2999 dma_addr_t d_bus, page_bus;
3293 u32 z, header_z, rest; 3000 u32 z, header_z, rest;
@@ -3342,10 +3049,6 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3342 page_bus = page_private(buffer->pages[page]); 3049 page_bus = page_private(buffer->pages[page]);
3343 pd->data_address = cpu_to_le32(page_bus + offset); 3050 pd->data_address = cpu_to_le32(page_bus + offset);
3344 3051
3345 dma_sync_single_range_for_device(device, page_bus,
3346 offset, length,
3347 DMA_FROM_DEVICE);
3348
3349 offset = (offset + length) & ~PAGE_MASK; 3052 offset = (offset + length) & ~PAGE_MASK;
3350 rest -= length; 3053 rest -= length;
3351 if (offset == 0) 3054 if (offset == 0)
@@ -3405,10 +3108,6 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
3405 page_bus = page_private(buffer->pages[page]); 3108 page_bus = page_private(buffer->pages[page]);
3406 d->data_address = cpu_to_le32(page_bus + offset); 3109 d->data_address = cpu_to_le32(page_bus + offset);
3407 3110
3408 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3409 page_bus, offset, length,
3410 DMA_FROM_DEVICE);
3411
3412 rest -= length; 3111 rest -= length;
3413 offset = 0; 3112 offset = 0;
3414 page++; 3113 page++;
@@ -3453,39 +3152,6 @@ static void ohci_flush_queue_iso(struct fw_iso_context *base)
3453 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 3152 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3454} 3153}
3455 3154
3456static int ohci_flush_iso_completions(struct fw_iso_context *base)
3457{
3458 struct iso_context *ctx = container_of(base, struct iso_context, base);
3459 int ret = 0;
3460
3461 tasklet_disable(&ctx->context.tasklet);
3462
3463 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3464 context_tasklet((unsigned long)&ctx->context);
3465
3466 switch (base->type) {
3467 case FW_ISO_CONTEXT_TRANSMIT:
3468 case FW_ISO_CONTEXT_RECEIVE:
3469 if (ctx->header_length != 0)
3470 flush_iso_completions(ctx);
3471 break;
3472 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3473 if (ctx->mc_completed != 0)
3474 flush_ir_buffer_fill(ctx);
3475 break;
3476 default:
3477 ret = -ENOSYS;
3478 }
3479
3480 clear_bit_unlock(0, &ctx->flushing_completions);
3481 smp_mb__after_clear_bit();
3482 }
3483
3484 tasklet_enable(&ctx->context.tasklet);
3485
3486 return ret;
3487}
3488
3489static const struct fw_card_driver ohci_driver = { 3155static const struct fw_card_driver ohci_driver = {
3490 .enable = ohci_enable, 3156 .enable = ohci_enable,
3491 .read_phy_reg = ohci_read_phy_reg, 3157 .read_phy_reg = ohci_read_phy_reg,
@@ -3503,7 +3169,6 @@ static const struct fw_card_driver ohci_driver = {
3503 .set_iso_channels = ohci_set_iso_channels, 3169 .set_iso_channels = ohci_set_iso_channels,
3504 .queue_iso = ohci_queue_iso, 3170 .queue_iso = ohci_queue_iso,
3505 .flush_queue_iso = ohci_flush_queue_iso, 3171 .flush_queue_iso = ohci_flush_queue_iso,
3506 .flush_iso_completions = ohci_flush_iso_completions,
3507 .start_iso = ohci_start_iso, 3172 .start_iso = ohci_start_iso,
3508 .stop_iso = ohci_stop_iso, 3173 .stop_iso = ohci_stop_iso,
3509}; 3174};
@@ -3537,7 +3202,7 @@ static inline void pmac_ohci_on(struct pci_dev *dev) {}
3537static inline void pmac_ohci_off(struct pci_dev *dev) {} 3202static inline void pmac_ohci_off(struct pci_dev *dev) {}
3538#endif /* CONFIG_PPC_PMAC */ 3203#endif /* CONFIG_PPC_PMAC */
3539 3204
3540static int pci_probe(struct pci_dev *dev, 3205static int __devinit pci_probe(struct pci_dev *dev,
3541 const struct pci_device_id *ent) 3206 const struct pci_device_id *ent)
3542{ 3207{
3543 struct fw_ohci *ohci; 3208 struct fw_ohci *ohci;
@@ -3563,7 +3228,7 @@ static int pci_probe(struct pci_dev *dev,
3563 3228
3564 err = pci_enable_device(dev); 3229 err = pci_enable_device(dev);
3565 if (err) { 3230 if (err) {
3566 dev_err(&dev->dev, "failed to enable OHCI hardware\n"); 3231 fw_error("Failed to enable OHCI hardware\n");
3567 goto fail_free; 3232 goto fail_free;
3568 } 3233 }
3569 3234
@@ -3574,24 +3239,18 @@ static int pci_probe(struct pci_dev *dev,
3574 spin_lock_init(&ohci->lock); 3239 spin_lock_init(&ohci->lock);
3575 mutex_init(&ohci->phy_reg_mutex); 3240 mutex_init(&ohci->phy_reg_mutex);
3576 3241
3577 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); 3242 tasklet_init(&ohci->bus_reset_tasklet,
3578 3243 bus_reset_tasklet, (unsigned long)ohci);
3579 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3580 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3581 dev_err(&dev->dev, "invalid MMIO resource\n");
3582 err = -ENXIO;
3583 goto fail_disable;
3584 }
3585 3244
3586 err = pci_request_region(dev, 0, ohci_driver_name); 3245 err = pci_request_region(dev, 0, ohci_driver_name);
3587 if (err) { 3246 if (err) {
3588 dev_err(&dev->dev, "MMIO resource unavailable\n"); 3247 fw_error("MMIO resource unavailable\n");
3589 goto fail_disable; 3248 goto fail_disable;
3590 } 3249 }
3591 3250
3592 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); 3251 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
3593 if (ohci->registers == NULL) { 3252 if (ohci->registers == NULL) {
3594 dev_err(&dev->dev, "failed to remap registers\n"); 3253 fw_error("Failed to remap registers\n");
3595 err = -ENXIO; 3254 err = -ENXIO;
3596 goto fail_iomem; 3255 goto fail_iomem;
3597 } 3256 }
@@ -3680,10 +3339,9 @@ static int pci_probe(struct pci_dev *dev,
3680 goto fail_contexts; 3339 goto fail_contexts;
3681 3340
3682 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 3341 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3683 dev_notice(&dev->dev, 3342 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
3684 "added OHCI v%x.%x device as card %d, "
3685 "%d IR + %d IT contexts, quirks 0x%x\n", 3343 "%d IR + %d IT contexts, quirks 0x%x\n",
3686 version >> 16, version & 0xff, ohci->card.index, 3344 dev_name(&dev->dev), version >> 16, version & 0xff,
3687 ohci->n_ir, ohci->n_it, ohci->quirks); 3345 ohci->n_ir, ohci->n_it, ohci->quirks);
3688 3346
3689 return 0; 3347 return 0;
@@ -3712,7 +3370,7 @@ static int pci_probe(struct pci_dev *dev,
3712 pmac_ohci_off(dev); 3370 pmac_ohci_off(dev);
3713 fail: 3371 fail:
3714 if (err == -ENOMEM) 3372 if (err == -ENOMEM)
3715 dev_err(&dev->dev, "out of memory\n"); 3373 fw_error("Out of memory\n");
3716 3374
3717 return err; 3375 return err;
3718} 3376}
@@ -3724,7 +3382,6 @@ static void pci_remove(struct pci_dev *dev)
3724 ohci = pci_get_drvdata(dev); 3382 ohci = pci_get_drvdata(dev);
3725 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 3383 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3726 flush_writes(ohci); 3384 flush_writes(ohci);
3727 cancel_work_sync(&ohci->bus_reset_work);
3728 fw_core_remove_card(&ohci->card); 3385 fw_core_remove_card(&ohci->card);
3729 3386
3730 /* 3387 /*
@@ -3756,7 +3413,7 @@ static void pci_remove(struct pci_dev *dev)
3756 kfree(ohci); 3413 kfree(ohci);
3757 pmac_ohci_off(dev); 3414 pmac_ohci_off(dev);
3758 3415
3759 dev_notice(&dev->dev, "removed fw-ohci device\n"); 3416 fw_notify("Removed fw-ohci device.\n");
3760} 3417}
3761 3418
3762#ifdef CONFIG_PM 3419#ifdef CONFIG_PM
@@ -3770,12 +3427,12 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3770 pci_disable_msi(dev); 3427 pci_disable_msi(dev);
3771 err = pci_save_state(dev); 3428 err = pci_save_state(dev);
3772 if (err) { 3429 if (err) {
3773 dev_err(&dev->dev, "pci_save_state failed\n"); 3430 fw_error("pci_save_state failed\n");
3774 return err; 3431 return err;
3775 } 3432 }
3776 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 3433 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3777 if (err) 3434 if (err)
3778 dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err); 3435 fw_error("pci_set_power_state failed with %d\n", err);
3779 pmac_ohci_off(dev); 3436 pmac_ohci_off(dev);
3780 3437
3781 return 0; 3438 return 0;
@@ -3791,7 +3448,7 @@ static int pci_resume(struct pci_dev *dev)
3791 pci_restore_state(dev); 3448 pci_restore_state(dev);
3792 err = pci_enable_device(dev); 3449 err = pci_enable_device(dev);
3793 if (err) { 3450 if (err) {
3794 dev_err(&dev->dev, "pci_enable_device failed\n"); 3451 fw_error("pci_enable_device failed\n");
3795 return err; 3452 return err;
3796 } 3453 }
3797 3454
@@ -3830,8 +3487,6 @@ static struct pci_driver fw_ohci_pci_driver = {
3830#endif 3487#endif
3831}; 3488};
3832 3489
3833module_pci_driver(fw_ohci_pci_driver);
3834
3835MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 3490MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3836MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); 3491MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3837MODULE_LICENSE("GPL"); 3492MODULE_LICENSE("GPL");
@@ -3840,3 +3495,16 @@ MODULE_LICENSE("GPL");
3840#ifndef CONFIG_IEEE1394_OHCI1394_MODULE 3495#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
3841MODULE_ALIAS("ohci1394"); 3496MODULE_ALIAS("ohci1394");
3842#endif 3497#endif
3498
3499static int __init fw_ohci_init(void)
3500{
3501 return pci_register_driver(&fw_ohci_pci_driver);
3502}
3503
3504static void __exit fw_ohci_cleanup(void)
3505{
3506 pci_unregister_driver(&fw_ohci_pci_driver);
3507}
3508
3509module_init(fw_ohci_init);
3510module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 1162d6b3bf8..17cef864506 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -52,6 +52,7 @@
52#include <linux/workqueue.h> 52#include <linux/workqueue.h>
53 53
54#include <asm/byteorder.h> 54#include <asm/byteorder.h>
55#include <asm/system.h>
55 56
56#include <scsi/scsi.h> 57#include <scsi/scsi.h>
57#include <scsi/scsi_cmnd.h> 58#include <scsi/scsi_cmnd.h>
@@ -65,7 +66,7 @@
65 * 66 *
66 * Concurrent logins are useful together with cluster filesystems. 67 * Concurrent logins are useful together with cluster filesystems.
67 */ 68 */
68static bool sbp2_param_exclusive_login = 1; 69static int sbp2_param_exclusive_login = 1;
69module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644); 70module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
70MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " 71MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
71 "(default = Y, use N for concurrent initiators)"); 72 "(default = Y, use N for concurrent initiators)");
@@ -124,6 +125,8 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
124 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) 125 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
125 ", or a combination)"); 126 ", or a combination)");
126 127
128static const char sbp2_driver_name[] = "sbp2";
129
127/* 130/*
128 * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry 131 * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
129 * and one struct scsi_device per sbp2_logical_unit. 132 * and one struct scsi_device per sbp2_logical_unit.
@@ -151,17 +154,14 @@ struct sbp2_logical_unit {
151 bool blocked; 154 bool blocked;
152}; 155};
153 156
154static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
155{
156 queue_delayed_work(fw_workqueue, &lu->work, delay);
157}
158
159/* 157/*
160 * We create one struct sbp2_target per IEEE 1212 Unit Directory 158 * We create one struct sbp2_target per IEEE 1212 Unit Directory
161 * and one struct Scsi_Host per sbp2_target. 159 * and one struct Scsi_Host per sbp2_target.
162 */ 160 */
163struct sbp2_target { 161struct sbp2_target {
162 struct kref kref;
164 struct fw_unit *unit; 163 struct fw_unit *unit;
164 const char *bus_id;
165 struct list_head lu_list; 165 struct list_head lu_list;
166 166
167 u64 management_agent_address; 167 u64 management_agent_address;
@@ -177,21 +177,11 @@ struct sbp2_target {
177 int blocked; /* ditto */ 177 int blocked; /* ditto */
178}; 178};
179 179
180static struct fw_device *target_parent_device(struct sbp2_target *tgt) 180static struct fw_device *target_device(struct sbp2_target *tgt)
181{ 181{
182 return fw_parent_device(tgt->unit); 182 return fw_parent_device(tgt->unit);
183} 183}
184 184
185static const struct device *tgt_dev(const struct sbp2_target *tgt)
186{
187 return &tgt->unit->device;
188}
189
190static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
191{
192 return &lu->tgt->unit->device;
193}
194
195/* Impossible login_id, to detect logout attempt before successful login */ 185/* Impossible login_id, to detect logout attempt before successful login */
196#define INVALID_LOGIN_ID 0x10000 186#define INVALID_LOGIN_ID 0x10000
197 187
@@ -207,8 +197,9 @@ static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
207#define SBP2_MAX_CDB_SIZE 16 197#define SBP2_MAX_CDB_SIZE 16
208 198
209/* 199/*
210 * The maximum SBP-2 data buffer size is 0xffff. We quadlet-align this 200 * The default maximum s/g segment size of a FireWire controller is
211 * for compatibility with earlier versions of this driver. 201 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
202 * be quadlet-aligned, we set the length limit to 0xffff & ~3.
212 */ 203 */
213#define SBP2_MAX_SEG_SIZE 0xfffc 204#define SBP2_MAX_SEG_SIZE 0xfffc
214 205
@@ -216,7 +207,6 @@ static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
216#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a 207#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
217#define SBP2_CSR_FIRMWARE_REVISION 0x3c 208#define SBP2_CSR_FIRMWARE_REVISION 0x3c
218#define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14 209#define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14
219#define SBP2_CSR_UNIT_UNIQUE_ID 0x8d
220#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4 210#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4
221 211
222/* Management orb opcodes */ 212/* Management orb opcodes */
@@ -436,8 +426,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
436 memcpy(status.data, payload + 8, length - 8); 426 memcpy(status.data, payload + 8, length - 8);
437 427
438 if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) { 428 if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
439 dev_notice(lu_dev(lu), 429 fw_notify("non-orb related status write, not handled\n");
440 "non-ORB related status write, not handled\n");
441 fw_send_response(card, request, RCODE_COMPLETE); 430 fw_send_response(card, request, RCODE_COMPLETE);
442 return; 431 return;
443 } 432 }
@@ -458,7 +447,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
458 orb->callback(orb, &status); 447 orb->callback(orb, &status);
459 kref_put(&orb->kref, free_orb); /* orb callback reference */ 448 kref_put(&orb->kref, free_orb); /* orb callback reference */
460 } else { 449 } else {
461 dev_err(lu_dev(lu), "status write for unknown ORB\n"); 450 fw_error("status write for unknown orb\n");
462 } 451 }
463 452
464 fw_send_response(card, request, RCODE_COMPLETE); 453 fw_send_response(card, request, RCODE_COMPLETE);
@@ -499,7 +488,7 @@ static void complete_transaction(struct fw_card *card, int rcode,
499static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 488static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
500 int node_id, int generation, u64 offset) 489 int node_id, int generation, u64 offset)
501{ 490{
502 struct fw_device *device = target_parent_device(lu->tgt); 491 struct fw_device *device = target_device(lu->tgt);
503 struct sbp2_pointer orb_pointer; 492 struct sbp2_pointer orb_pointer;
504 unsigned long flags; 493 unsigned long flags;
505 494
@@ -520,7 +509,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
520 509
521static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) 510static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
522{ 511{
523 struct fw_device *device = target_parent_device(lu->tgt); 512 struct fw_device *device = target_device(lu->tgt);
524 struct sbp2_orb *orb, *next; 513 struct sbp2_orb *orb, *next;
525 struct list_head list; 514 struct list_head list;
526 unsigned long flags; 515 unsigned long flags;
@@ -559,7 +548,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
559 int generation, int function, 548 int generation, int function,
560 int lun_or_login_id, void *response) 549 int lun_or_login_id, void *response)
561{ 550{
562 struct fw_device *device = target_parent_device(lu->tgt); 551 struct fw_device *device = target_device(lu->tgt);
563 struct sbp2_management_orb *orb; 552 struct sbp2_management_orb *orb;
564 unsigned int timeout; 553 unsigned int timeout;
565 int retval = -ENOMEM; 554 int retval = -ENOMEM;
@@ -567,7 +556,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
567 if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) 556 if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
568 return 0; 557 return 0;
569 558
570 orb = kzalloc(sizeof(*orb), GFP_NOIO); 559 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
571 if (orb == NULL) 560 if (orb == NULL)
572 return -ENOMEM; 561 return -ENOMEM;
573 562
@@ -619,20 +608,20 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
619 608
620 retval = -EIO; 609 retval = -EIO;
621 if (sbp2_cancel_orbs(lu) == 0) { 610 if (sbp2_cancel_orbs(lu) == 0) {
622 dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n", 611 fw_error("%s: orb reply timed out, rcode=0x%02x\n",
623 orb->base.rcode); 612 lu->tgt->bus_id, orb->base.rcode);
624 goto out; 613 goto out;
625 } 614 }
626 615
627 if (orb->base.rcode != RCODE_COMPLETE) { 616 if (orb->base.rcode != RCODE_COMPLETE) {
628 dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n", 617 fw_error("%s: management write failed, rcode 0x%02x\n",
629 orb->base.rcode); 618 lu->tgt->bus_id, orb->base.rcode);
630 goto out; 619 goto out;
631 } 620 }
632 621
633 if (STATUS_GET_RESPONSE(orb->status) != 0 || 622 if (STATUS_GET_RESPONSE(orb->status) != 0 ||
634 STATUS_GET_SBP_STATUS(orb->status) != 0) { 623 STATUS_GET_SBP_STATUS(orb->status) != 0) {
635 dev_err(lu_dev(lu), "error status: %d:%d\n", 624 fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
636 STATUS_GET_RESPONSE(orb->status), 625 STATUS_GET_RESPONSE(orb->status),
637 STATUS_GET_SBP_STATUS(orb->status)); 626 STATUS_GET_SBP_STATUS(orb->status));
638 goto out; 627 goto out;
@@ -655,7 +644,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
655 644
656static void sbp2_agent_reset(struct sbp2_logical_unit *lu) 645static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
657{ 646{
658 struct fw_device *device = target_parent_device(lu->tgt); 647 struct fw_device *device = target_device(lu->tgt);
659 __be32 d = 0; 648 __be32 d = 0;
660 649
661 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 650 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -672,7 +661,7 @@ static void complete_agent_reset_write_no_wait(struct fw_card *card,
672 661
673static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) 662static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
674{ 663{
675 struct fw_device *device = target_parent_device(lu->tgt); 664 struct fw_device *device = target_device(lu->tgt);
676 struct fw_transaction *t; 665 struct fw_transaction *t;
677 static __be32 d; 666 static __be32 d;
678 667
@@ -711,7 +700,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
711static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) 700static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
712{ 701{
713 struct sbp2_target *tgt = lu->tgt; 702 struct sbp2_target *tgt = lu->tgt;
714 struct fw_card *card = target_parent_device(tgt)->card; 703 struct fw_card *card = target_device(tgt)->card;
715 struct Scsi_Host *shost = 704 struct Scsi_Host *shost =
716 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 705 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
717 unsigned long flags; 706 unsigned long flags;
@@ -735,7 +724,7 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
735static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) 724static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
736{ 725{
737 struct sbp2_target *tgt = lu->tgt; 726 struct sbp2_target *tgt = lu->tgt;
738 struct fw_card *card = target_parent_device(tgt)->card; 727 struct fw_card *card = target_device(tgt)->card;
739 struct Scsi_Host *shost = 728 struct Scsi_Host *shost =
740 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 729 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
741 unsigned long flags; 730 unsigned long flags;
@@ -760,7 +749,7 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
760 */ 749 */
761static void sbp2_unblock(struct sbp2_target *tgt) 750static void sbp2_unblock(struct sbp2_target *tgt)
762{ 751{
763 struct fw_card *card = target_parent_device(tgt)->card; 752 struct fw_card *card = target_device(tgt)->card;
764 struct Scsi_Host *shost = 753 struct Scsi_Host *shost =
765 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 754 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
766 unsigned long flags; 755 unsigned long flags;
@@ -783,6 +772,71 @@ static int sbp2_lun2int(u16 lun)
783 return scsilun_to_int(&eight_bytes_lun); 772 return scsilun_to_int(&eight_bytes_lun);
784} 773}
785 774
775static void sbp2_release_target(struct kref *kref)
776{
777 struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
778 struct sbp2_logical_unit *lu, *next;
779 struct Scsi_Host *shost =
780 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
781 struct scsi_device *sdev;
782 struct fw_device *device = target_device(tgt);
783
784 /* prevent deadlocks */
785 sbp2_unblock(tgt);
786
787 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
788 sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
789 if (sdev) {
790 scsi_remove_device(sdev);
791 scsi_device_put(sdev);
792 }
793 if (lu->login_id != INVALID_LOGIN_ID) {
794 int generation, node_id;
795 /*
796 * tgt->node_id may be obsolete here if we failed
797 * during initial login or after a bus reset where
798 * the topology changed.
799 */
800 generation = device->generation;
801 smp_rmb(); /* node_id vs. generation */
802 node_id = device->node_id;
803 sbp2_send_management_orb(lu, node_id, generation,
804 SBP2_LOGOUT_REQUEST,
805 lu->login_id, NULL);
806 }
807 fw_core_remove_address_handler(&lu->address_handler);
808 list_del(&lu->link);
809 kfree(lu);
810 }
811 scsi_remove_host(shost);
812 fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
813
814 fw_unit_put(tgt->unit);
815 scsi_host_put(shost);
816 fw_device_put(device);
817}
818
819static void sbp2_target_get(struct sbp2_target *tgt)
820{
821 kref_get(&tgt->kref);
822}
823
824static void sbp2_target_put(struct sbp2_target *tgt)
825{
826 kref_put(&tgt->kref, sbp2_release_target);
827}
828
829/*
830 * Always get the target's kref when scheduling work on one its units.
831 * Each workqueue job is responsible to call sbp2_target_put() upon return.
832 */
833static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
834{
835 sbp2_target_get(lu->tgt);
836 if (!queue_delayed_work(fw_workqueue, &lu->work, delay))
837 sbp2_target_put(lu->tgt);
838}
839
786/* 840/*
787 * Write retransmit retry values into the BUSY_TIMEOUT register. 841 * Write retransmit retry values into the BUSY_TIMEOUT register.
788 * - The single-phase retry protocol is supported by all SBP-2 devices, but the 842 * - The single-phase retry protocol is supported by all SBP-2 devices, but the
@@ -801,7 +855,7 @@ static int sbp2_lun2int(u16 lun)
801 */ 855 */
802static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) 856static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
803{ 857{
804 struct fw_device *device = target_parent_device(lu->tgt); 858 struct fw_device *device = target_device(lu->tgt);
805 __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT); 859 __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
806 860
807 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 861 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -816,14 +870,14 @@ static void sbp2_login(struct work_struct *work)
816 struct sbp2_logical_unit *lu = 870 struct sbp2_logical_unit *lu =
817 container_of(work, struct sbp2_logical_unit, work.work); 871 container_of(work, struct sbp2_logical_unit, work.work);
818 struct sbp2_target *tgt = lu->tgt; 872 struct sbp2_target *tgt = lu->tgt;
819 struct fw_device *device = target_parent_device(tgt); 873 struct fw_device *device = target_device(tgt);
820 struct Scsi_Host *shost; 874 struct Scsi_Host *shost;
821 struct scsi_device *sdev; 875 struct scsi_device *sdev;
822 struct sbp2_login_response response; 876 struct sbp2_login_response response;
823 int generation, node_id, local_node_id; 877 int generation, node_id, local_node_id;
824 878
825 if (fw_device_is_shutdown(device)) 879 if (fw_device_is_shutdown(device))
826 return; 880 goto out;
827 881
828 generation = device->generation; 882 generation = device->generation;
829 smp_rmb(); /* node IDs must not be older than generation */ 883 smp_rmb(); /* node IDs must not be older than generation */
@@ -840,12 +894,12 @@ static void sbp2_login(struct work_struct *work)
840 if (lu->retries++ < 5) { 894 if (lu->retries++ < 5) {
841 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); 895 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
842 } else { 896 } else {
843 dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n", 897 fw_error("%s: failed to login to LUN %04x\n",
844 lu->lun); 898 tgt->bus_id, lu->lun);
845 /* Let any waiting I/O fail from now on. */ 899 /* Let any waiting I/O fail from now on. */
846 sbp2_unblock(lu->tgt); 900 sbp2_unblock(lu->tgt);
847 } 901 }
848 return; 902 goto out;
849 } 903 }
850 904
851 tgt->node_id = node_id; 905 tgt->node_id = node_id;
@@ -858,8 +912,8 @@ static void sbp2_login(struct work_struct *work)
858 << 32) | be32_to_cpu(response.command_block_agent.low); 912 << 32) | be32_to_cpu(response.command_block_agent.low);
859 lu->login_id = be32_to_cpu(response.misc) & 0xffff; 913 lu->login_id = be32_to_cpu(response.misc) & 0xffff;
860 914
861 dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n", 915 fw_notify("%s: logged in to LUN %04x (%d retries)\n",
862 lu->lun, lu->retries); 916 tgt->bus_id, lu->lun, lu->retries);
863 917
864 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ 918 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
865 sbp2_set_busy_timeout(lu); 919 sbp2_set_busy_timeout(lu);
@@ -871,8 +925,7 @@ static void sbp2_login(struct work_struct *work)
871 if (lu->has_sdev) { 925 if (lu->has_sdev) {
872 sbp2_cancel_orbs(lu); 926 sbp2_cancel_orbs(lu);
873 sbp2_conditionally_unblock(lu); 927 sbp2_conditionally_unblock(lu);
874 928 goto out;
875 return;
876 } 929 }
877 930
878 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) 931 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
@@ -904,8 +957,7 @@ static void sbp2_login(struct work_struct *work)
904 lu->has_sdev = true; 957 lu->has_sdev = true;
905 scsi_device_put(sdev); 958 scsi_device_put(sdev);
906 sbp2_allow_block(lu); 959 sbp2_allow_block(lu);
907 960 goto out;
908 return;
909 961
910 out_logout_login: 962 out_logout_login:
911 smp_rmb(); /* generation may have changed */ 963 smp_rmb(); /* generation may have changed */
@@ -919,57 +971,8 @@ static void sbp2_login(struct work_struct *work)
919 * lu->work already. Reset the work from reconnect to login. 971 * lu->work already. Reset the work from reconnect to login.
920 */ 972 */
921 PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 973 PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
922} 974 out:
923 975 sbp2_target_put(tgt);
924static void sbp2_reconnect(struct work_struct *work)
925{
926 struct sbp2_logical_unit *lu =
927 container_of(work, struct sbp2_logical_unit, work.work);
928 struct sbp2_target *tgt = lu->tgt;
929 struct fw_device *device = target_parent_device(tgt);
930 int generation, node_id, local_node_id;
931
932 if (fw_device_is_shutdown(device))
933 return;
934
935 generation = device->generation;
936 smp_rmb(); /* node IDs must not be older than generation */
937 node_id = device->node_id;
938 local_node_id = device->card->node_id;
939
940 if (sbp2_send_management_orb(lu, node_id, generation,
941 SBP2_RECONNECT_REQUEST,
942 lu->login_id, NULL) < 0) {
943 /*
944 * If reconnect was impossible even though we are in the
945 * current generation, fall back and try to log in again.
946 *
947 * We could check for "Function rejected" status, but
948 * looking at the bus generation as simpler and more general.
949 */
950 smp_rmb(); /* get current card generation */
951 if (generation == device->card->generation ||
952 lu->retries++ >= 5) {
953 dev_err(tgt_dev(tgt), "failed to reconnect\n");
954 lu->retries = 0;
955 PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
956 }
957 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
958
959 return;
960 }
961
962 tgt->node_id = node_id;
963 tgt->address_high = local_node_id << 16;
964 smp_wmb(); /* node IDs must not be older than generation */
965 lu->generation = generation;
966
967 dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n",
968 lu->lun, lu->retries);
969
970 sbp2_agent_reset(lu);
971 sbp2_cancel_orbs(lu);
972 sbp2_conditionally_unblock(lu);
973} 976}
974 977
975static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) 978static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -1004,13 +1007,6 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
1004 return 0; 1007 return 0;
1005} 1008}
1006 1009
1007static void sbp2_get_unit_unique_id(struct sbp2_target *tgt,
1008 const u32 *leaf)
1009{
1010 if ((leaf[0] & 0xffff0000) == 0x00020000)
1011 tgt->guid = (u64)leaf[1] << 32 | leaf[2];
1012}
1013
1014static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, 1010static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
1015 const u32 *directory) 1011 const u32 *directory)
1016{ 1012{
@@ -1062,10 +1058,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
1062 return -ENOMEM; 1058 return -ENOMEM;
1063 break; 1059 break;
1064 1060
1065 case SBP2_CSR_UNIT_UNIQUE_ID:
1066 sbp2_get_unit_unique_id(tgt, ci.p - 1 + value);
1067 break;
1068
1069 case SBP2_CSR_LOGICAL_UNIT_DIRECTORY: 1061 case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
1070 /* Adjust for the increment in the iterator */ 1062 /* Adjust for the increment in the iterator */
1071 if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) 1063 if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
@@ -1086,8 +1078,8 @@ static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
1086 unsigned int timeout = tgt->mgt_orb_timeout; 1078 unsigned int timeout = tgt->mgt_orb_timeout;
1087 1079
1088 if (timeout > 40000) 1080 if (timeout > 40000)
1089 dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n", 1081 fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
1090 timeout / 1000); 1082 tgt->bus_id, timeout / 1000);
1091 1083
1092 tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); 1084 tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
1093} 1085}
@@ -1099,9 +1091,9 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
1099 unsigned int w = sbp2_param_workarounds; 1091 unsigned int w = sbp2_param_workarounds;
1100 1092
1101 if (w) 1093 if (w)
1102 dev_notice(tgt_dev(tgt), 1094 fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
1103 "Please notify linux1394-devel@lists.sf.net " 1095 "if you need the workarounds parameter for %s\n",
1104 "if you need the workarounds parameter\n"); 1096 tgt->bus_id);
1105 1097
1106 if (w & SBP2_WORKAROUND_OVERRIDE) 1098 if (w & SBP2_WORKAROUND_OVERRIDE)
1107 goto out; 1099 goto out;
@@ -1121,14 +1113,13 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
1121 } 1113 }
1122 out: 1114 out:
1123 if (w) 1115 if (w)
1124 dev_notice(tgt_dev(tgt), "workarounds 0x%x " 1116 fw_notify("Workarounds for %s: 0x%x "
1125 "(firmware_revision 0x%06x, model_id 0x%06x)\n", 1117 "(firmware_revision 0x%06x, model_id 0x%06x)\n",
1126 w, firmware_revision, model); 1118 tgt->bus_id, w, firmware_revision, model);
1127 tgt->workarounds = w; 1119 tgt->workarounds = w;
1128} 1120}
1129 1121
1130static struct scsi_host_template scsi_driver_template; 1122static struct scsi_host_template scsi_driver_template;
1131static int sbp2_remove(struct device *dev);
1132 1123
1133static int sbp2_probe(struct device *dev) 1124static int sbp2_probe(struct device *dev)
1134{ 1125{
@@ -1139,10 +1130,6 @@ static int sbp2_probe(struct device *dev)
1139 struct Scsi_Host *shost; 1130 struct Scsi_Host *shost;
1140 u32 model, firmware_revision; 1131 u32 model, firmware_revision;
1141 1132
1142 /* cannot (or should not) handle targets on the local node */
1143 if (device->is_local)
1144 return -ENODEV;
1145
1146 if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) 1133 if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
1147 BUG_ON(dma_set_max_seg_size(device->card->device, 1134 BUG_ON(dma_set_max_seg_size(device->card->device,
1148 SBP2_MAX_SEG_SIZE)); 1135 SBP2_MAX_SEG_SIZE));
@@ -1154,7 +1141,9 @@ static int sbp2_probe(struct device *dev)
1154 tgt = (struct sbp2_target *)shost->hostdata; 1141 tgt = (struct sbp2_target *)shost->hostdata;
1155 dev_set_drvdata(&unit->device, tgt); 1142 dev_set_drvdata(&unit->device, tgt);
1156 tgt->unit = unit; 1143 tgt->unit = unit;
1144 kref_init(&tgt->kref);
1157 INIT_LIST_HEAD(&tgt->lu_list); 1145 INIT_LIST_HEAD(&tgt->lu_list);
1146 tgt->bus_id = dev_name(&unit->device);
1158 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; 1147 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1159 1148
1160 if (fw_device_enable_phys_dma(device) < 0) 1149 if (fw_device_enable_phys_dma(device) < 0)
@@ -1162,10 +1151,12 @@ static int sbp2_probe(struct device *dev)
1162 1151
1163 shost->max_cmd_len = SBP2_MAX_CDB_SIZE; 1152 shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
1164 1153
1165 if (scsi_add_host_with_dma(shost, &unit->device, 1154 if (scsi_add_host(shost, &unit->device) < 0)
1166 device->card->device) < 0)
1167 goto fail_shost_put; 1155 goto fail_shost_put;
1168 1156
1157 fw_device_get(device);
1158 fw_unit_get(unit);
1159
1169 /* implicit directory ID */ 1160 /* implicit directory ID */
1170 tgt->directory_id = ((unit->directory - device->config_rom) * 4 1161 tgt->directory_id = ((unit->directory - device->config_rom) * 4
1171 + CSR_CONFIG_ROM) & 0xffffff; 1162 + CSR_CONFIG_ROM) & 0xffffff;
@@ -1175,7 +1166,7 @@ static int sbp2_probe(struct device *dev)
1175 1166
1176 if (sbp2_scan_unit_dir(tgt, unit->directory, &model, 1167 if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
1177 &firmware_revision) < 0) 1168 &firmware_revision) < 0)
1178 goto fail_remove; 1169 goto fail_tgt_put;
1179 1170
1180 sbp2_clamp_management_orb_timeout(tgt); 1171 sbp2_clamp_management_orb_timeout(tgt);
1181 sbp2_init_workarounds(tgt, model, firmware_revision); 1172 sbp2_init_workarounds(tgt, model, firmware_revision);
@@ -1186,17 +1177,16 @@ static int sbp2_probe(struct device *dev)
1186 * specifies the max payload size as 2 ^ (max_payload + 2), so 1177 * specifies the max payload size as 2 ^ (max_payload + 2), so
1187 * if we set this to max_speed + 7, we get the right value. 1178 * if we set this to max_speed + 7, we get the right value.
1188 */ 1179 */
1189 tgt->max_payload = min3(device->max_speed + 7, 10U, 1180 tgt->max_payload = min(device->max_speed + 7, 10U);
1190 device->card->max_receive - 1); 1181 tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
1191 1182
1192 /* Do the login in a workqueue so we can easily reschedule retries. */ 1183 /* Do the login in a workqueue so we can easily reschedule retries. */
1193 list_for_each_entry(lu, &tgt->lu_list, link) 1184 list_for_each_entry(lu, &tgt->lu_list, link)
1194 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); 1185 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
1195
1196 return 0; 1186 return 0;
1197 1187
1198 fail_remove: 1188 fail_tgt_put:
1199 sbp2_remove(dev); 1189 sbp2_target_put(tgt);
1200 return -ENOMEM; 1190 return -ENOMEM;
1201 1191
1202 fail_shost_put: 1192 fail_shost_put:
@@ -1204,6 +1194,71 @@ static int sbp2_probe(struct device *dev)
1204 return -ENOMEM; 1194 return -ENOMEM;
1205} 1195}
1206 1196
1197static int sbp2_remove(struct device *dev)
1198{
1199 struct fw_unit *unit = fw_unit(dev);
1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1201 struct sbp2_logical_unit *lu;
1202
1203 list_for_each_entry(lu, &tgt->lu_list, link)
1204 cancel_delayed_work_sync(&lu->work);
1205
1206 sbp2_target_put(tgt);
1207 return 0;
1208}
1209
1210static void sbp2_reconnect(struct work_struct *work)
1211{
1212 struct sbp2_logical_unit *lu =
1213 container_of(work, struct sbp2_logical_unit, work.work);
1214 struct sbp2_target *tgt = lu->tgt;
1215 struct fw_device *device = target_device(tgt);
1216 int generation, node_id, local_node_id;
1217
1218 if (fw_device_is_shutdown(device))
1219 goto out;
1220
1221 generation = device->generation;
1222 smp_rmb(); /* node IDs must not be older than generation */
1223 node_id = device->node_id;
1224 local_node_id = device->card->node_id;
1225
1226 if (sbp2_send_management_orb(lu, node_id, generation,
1227 SBP2_RECONNECT_REQUEST,
1228 lu->login_id, NULL) < 0) {
1229 /*
1230 * If reconnect was impossible even though we are in the
1231 * current generation, fall back and try to log in again.
1232 *
1233 * We could check for "Function rejected" status, but
1234 * looking at the bus generation as simpler and more general.
1235 */
1236 smp_rmb(); /* get current card generation */
1237 if (generation == device->card->generation ||
1238 lu->retries++ >= 5) {
1239 fw_error("%s: failed to reconnect\n", tgt->bus_id);
1240 lu->retries = 0;
1241 PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
1242 }
1243 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
1244 goto out;
1245 }
1246
1247 tgt->node_id = node_id;
1248 tgt->address_high = local_node_id << 16;
1249 smp_wmb(); /* node IDs must not be older than generation */
1250 lu->generation = generation;
1251
1252 fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
1253 tgt->bus_id, lu->lun, lu->retries);
1254
1255 sbp2_agent_reset(lu);
1256 sbp2_cancel_orbs(lu);
1257 sbp2_conditionally_unblock(lu);
1258 out:
1259 sbp2_target_put(tgt);
1260}
1261
1207static void sbp2_update(struct fw_unit *unit) 1262static void sbp2_update(struct fw_unit *unit)
1208{ 1263{
1209 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); 1264 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
@@ -1222,51 +1277,6 @@ static void sbp2_update(struct fw_unit *unit)
1222 } 1277 }
1223} 1278}
1224 1279
1225static int sbp2_remove(struct device *dev)
1226{
1227 struct fw_unit *unit = fw_unit(dev);
1228 struct fw_device *device = fw_parent_device(unit);
1229 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1230 struct sbp2_logical_unit *lu, *next;
1231 struct Scsi_Host *shost =
1232 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
1233 struct scsi_device *sdev;
1234
1235 /* prevent deadlocks */
1236 sbp2_unblock(tgt);
1237
1238 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
1239 cancel_delayed_work_sync(&lu->work);
1240 sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
1241 if (sdev) {
1242 scsi_remove_device(sdev);
1243 scsi_device_put(sdev);
1244 }
1245 if (lu->login_id != INVALID_LOGIN_ID) {
1246 int generation, node_id;
1247 /*
1248 * tgt->node_id may be obsolete here if we failed
1249 * during initial login or after a bus reset where
1250 * the topology changed.
1251 */
1252 generation = device->generation;
1253 smp_rmb(); /* node_id vs. generation */
1254 node_id = device->node_id;
1255 sbp2_send_management_orb(lu, node_id, generation,
1256 SBP2_LOGOUT_REQUEST,
1257 lu->login_id, NULL);
1258 }
1259 fw_core_remove_address_handler(&lu->address_handler);
1260 list_del(&lu->link);
1261 kfree(lu);
1262 }
1263 scsi_remove_host(shost);
1264 dev_notice(dev, "released target %d:0:0\n", shost->host_no);
1265
1266 scsi_host_put(shost);
1267 return 0;
1268}
1269
1270#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e 1280#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
1271#define SBP2_SW_VERSION_ENTRY 0x00010483 1281#define SBP2_SW_VERSION_ENTRY 0x00010483
1272 1282
@@ -1283,7 +1293,7 @@ static const struct ieee1394_device_id sbp2_id_table[] = {
1283static struct fw_driver sbp2_driver = { 1293static struct fw_driver sbp2_driver = {
1284 .driver = { 1294 .driver = {
1285 .owner = THIS_MODULE, 1295 .owner = THIS_MODULE,
1286 .name = KBUILD_MODNAME, 1296 .name = sbp2_driver_name,
1287 .bus = &fw_bus_type, 1297 .bus = &fw_bus_type,
1288 .probe = sbp2_probe, 1298 .probe = sbp2_probe,
1289 .remove = sbp2_remove, 1299 .remove = sbp2_remove,
@@ -1295,7 +1305,10 @@ static struct fw_driver sbp2_driver = {
1295static void sbp2_unmap_scatterlist(struct device *card_device, 1305static void sbp2_unmap_scatterlist(struct device *card_device,
1296 struct sbp2_command_orb *orb) 1306 struct sbp2_command_orb *orb)
1297{ 1307{
1298 scsi_dma_unmap(orb->cmd); 1308 if (scsi_sg_count(orb->cmd))
1309 dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
1310 scsi_sg_count(orb->cmd),
1311 orb->cmd->sc_data_direction);
1299 1312
1300 if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT)) 1313 if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
1301 dma_unmap_single(card_device, orb->page_table_bus, 1314 dma_unmap_single(card_device, orb->page_table_bus,
@@ -1305,19 +1318,10 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
1305static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) 1318static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1306{ 1319{
1307 int sam_status; 1320 int sam_status;
1308 int sfmt = (sbp2_status[0] >> 6) & 0x03;
1309
1310 if (sfmt == 2 || sfmt == 3) {
1311 /*
1312 * Reserved for future standardization (2) or
1313 * Status block format vendor-dependent (3)
1314 */
1315 return DID_ERROR << 16;
1316 }
1317 1321
1318 sense_data[0] = 0x70 | sfmt | (sbp2_status[1] & 0x80); 1322 sense_data[0] = 0x70;
1319 sense_data[1] = 0x0; 1323 sense_data[1] = 0x0;
1320 sense_data[2] = ((sbp2_status[1] << 1) & 0xe0) | (sbp2_status[1] & 0x0f); 1324 sense_data[2] = sbp2_status[1];
1321 sense_data[3] = sbp2_status[4]; 1325 sense_data[3] = sbp2_status[4];
1322 sense_data[4] = sbp2_status[5]; 1326 sense_data[4] = sbp2_status[5];
1323 sense_data[5] = sbp2_status[6]; 1327 sense_data[5] = sbp2_status[6];
@@ -1353,7 +1357,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
1353{ 1357{
1354 struct sbp2_command_orb *orb = 1358 struct sbp2_command_orb *orb =
1355 container_of(base_orb, struct sbp2_command_orb, base); 1359 container_of(base_orb, struct sbp2_command_orb, base);
1356 struct fw_device *device = target_parent_device(orb->lu->tgt); 1360 struct fw_device *device = target_device(orb->lu->tgt);
1357 int result; 1361 int result;
1358 1362
1359 if (status != NULL) { 1363 if (status != NULL) {
@@ -1401,8 +1405,9 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1401 struct scatterlist *sg = scsi_sglist(orb->cmd); 1405 struct scatterlist *sg = scsi_sglist(orb->cmd);
1402 int i, n; 1406 int i, n;
1403 1407
1404 n = scsi_dma_map(orb->cmd); 1408 n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
1405 if (n <= 0) 1409 orb->cmd->sc_data_direction);
1410 if (n == 0)
1406 goto fail; 1411 goto fail;
1407 1412
1408 /* 1413 /*
@@ -1448,7 +1453,8 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1448 return 0; 1453 return 0;
1449 1454
1450 fail_page_table: 1455 fail_page_table:
1451 scsi_dma_unmap(orb->cmd); 1456 dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
1457 scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
1452 fail: 1458 fail:
1453 return -ENOMEM; 1459 return -ENOMEM;
1454} 1460}
@@ -1459,7 +1465,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1459 struct scsi_cmnd *cmd) 1465 struct scsi_cmnd *cmd)
1460{ 1466{
1461 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1467 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1462 struct fw_device *device = target_parent_device(lu->tgt); 1468 struct fw_device *device = target_device(lu->tgt);
1463 struct sbp2_command_orb *orb; 1469 struct sbp2_command_orb *orb;
1464 int generation, retval = SCSI_MLQUEUE_HOST_BUSY; 1470 int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1465 1471
@@ -1468,7 +1474,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1468 * transfer direction not handled. 1474 * transfer direction not handled.
1469 */ 1475 */
1470 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { 1476 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1471 dev_err(lu_dev(lu), "cannot handle bidirectional command\n"); 1477 fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
1472 cmd->result = DID_ERROR << 16; 1478 cmd->result = DID_ERROR << 16;
1473 cmd->scsi_done(cmd); 1479 cmd->scsi_done(cmd);
1474 return 0; 1480 return 0;
@@ -1476,7 +1482,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1476 1482
1477 orb = kzalloc(sizeof(*orb), GFP_ATOMIC); 1483 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1478 if (orb == NULL) { 1484 if (orb == NULL) {
1479 dev_notice(lu_dev(lu), "failed to alloc ORB\n"); 1485 fw_notify("failed to alloc orb\n");
1480 return SCSI_MLQUEUE_HOST_BUSY; 1486 return SCSI_MLQUEUE_HOST_BUSY;
1481 } 1487 }
1482 1488
@@ -1529,10 +1535,7 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1529 1535
1530 sdev->allow_restart = 1; 1536 sdev->allow_restart = 1;
1531 1537
1532 /* 1538 /* SBP-2 requires quadlet alignment of the data buffers. */
1533 * SBP-2 does not require any alignment, but we set it anyway
1534 * for compatibility with earlier versions of this driver.
1535 */
1536 blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1); 1539 blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
1537 1540
1538 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) 1541 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
@@ -1566,6 +1569,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1566 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) 1569 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1567 blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); 1570 blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
1568 1571
1572 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
1573
1569 return 0; 1574 return 0;
1570} 1575}
1571 1576
@@ -1577,7 +1582,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1577{ 1582{
1578 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1583 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1579 1584
1580 dev_notice(lu_dev(lu), "sbp2_scsi_abort\n"); 1585 fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
1581 sbp2_agent_reset(lu); 1586 sbp2_agent_reset(lu);
1582 sbp2_cancel_orbs(lu); 1587 sbp2_cancel_orbs(lu);
1583 1588
@@ -1617,7 +1622,7 @@ static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
1617static struct scsi_host_template scsi_driver_template = { 1622static struct scsi_host_template scsi_driver_template = {
1618 .module = THIS_MODULE, 1623 .module = THIS_MODULE,
1619 .name = "SBP-2 IEEE-1394", 1624 .name = "SBP-2 IEEE-1394",
1620 .proc_name = "sbp2", 1625 .proc_name = sbp2_driver_name,
1621 .queuecommand = sbp2_scsi_queuecommand, 1626 .queuecommand = sbp2_scsi_queuecommand,
1622 .slave_alloc = sbp2_scsi_slave_alloc, 1627 .slave_alloc = sbp2_scsi_slave_alloc,
1623 .slave_configure = sbp2_scsi_slave_configure, 1628 .slave_configure = sbp2_scsi_slave_configure,